diff options
author | Jiri Kosina <jkosina@suse.cz> | 2010-04-22 20:08:44 -0400 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2010-04-22 20:08:44 -0400 |
commit | 6c9468e9eb1252eaefd94ce7f06e1be9b0b641b1 (patch) | |
tree | 797676a336b050bfa1ef879377c07e541b9075d6 /arch/x86 | |
parent | 4cb3ca7cd7e2cae8d1daf5345ec99a1e8502cf3f (diff) | |
parent | c81eddb0e3728661d1585fbc564449c94165cc36 (diff) |
Merge branch 'master' into for-next
Diffstat (limited to 'arch/x86')
120 files changed, 838 insertions, 574 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 93936de67796..9458685902bd 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -662,7 +662,7 @@ config GART_IOMMU | |||
662 | bool "GART IOMMU support" if EMBEDDED | 662 | bool "GART IOMMU support" if EMBEDDED |
663 | default y | 663 | default y |
664 | select SWIOTLB | 664 | select SWIOTLB |
665 | depends on X86_64 && PCI | 665 | depends on X86_64 && PCI && K8_NB |
666 | ---help--- | 666 | ---help--- |
667 | Support for full DMA access of devices with 32bit memory access only | 667 | Support for full DMA access of devices with 32bit memory access only |
668 | on systems with more than 3GB. This is usually needed for USB, | 668 | on systems with more than 3GB. This is usually needed for USB, |
@@ -1216,8 +1216,8 @@ config NUMA_EMU | |||
1216 | 1216 | ||
1217 | config NODES_SHIFT | 1217 | config NODES_SHIFT |
1218 | int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP | 1218 | int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP |
1219 | range 1 9 | 1219 | range 1 10 |
1220 | default "9" if MAXSMP | 1220 | default "10" if MAXSMP |
1221 | default "6" if X86_64 | 1221 | default "6" if X86_64 |
1222 | default "4" if X86_NUMAQ | 1222 | default "4" if X86_NUMAQ |
1223 | default "3" | 1223 | default "3" |
@@ -2061,7 +2061,7 @@ endif # X86_32 | |||
2061 | 2061 | ||
2062 | config K8_NB | 2062 | config K8_NB |
2063 | def_bool y | 2063 | def_bool y |
2064 | depends on AGP_AMD64 || (X86_64 && (GART_IOMMU || (PCI && NUMA))) | 2064 | depends on CPU_SUP_AMD && PCI |
2065 | 2065 | ||
2066 | source "drivers/pcmcia/Kconfig" | 2066 | source "drivers/pcmcia/Kconfig" |
2067 | 2067 | ||
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c index daef6cd2b45d..1a8f8649c035 100644 --- a/arch/x86/crypto/fpu.c +++ b/arch/x86/crypto/fpu.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/slab.h> | ||
19 | #include <asm/i387.h> | 20 | #include <asm/i387.h> |
20 | 21 | ||
21 | struct crypto_fpu_ctx { | 22 | struct crypto_fpu_ctx { |
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 280c019cfad8..0350311906ae 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/fcntl.h> | 21 | #include <linux/fcntl.h> |
22 | #include <linux/ptrace.h> | 22 | #include <linux/ptrace.h> |
23 | #include <linux/user.h> | 23 | #include <linux/user.h> |
24 | #include <linux/slab.h> | ||
25 | #include <linux/binfmts.h> | 24 | #include <linux/binfmts.h> |
26 | #include <linux/personality.h> | 25 | #include <linux/personality.h> |
27 | #include <linux/init.h> | 26 | #include <linux/init.h> |
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 59b4556a5b92..e790bc1fbfa3 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
@@ -626,7 +626,7 @@ ia32_sys_call_table: | |||
626 | .quad stub32_sigreturn | 626 | .quad stub32_sigreturn |
627 | .quad stub32_clone /* 120 */ | 627 | .quad stub32_clone /* 120 */ |
628 | .quad sys_setdomainname | 628 | .quad sys_setdomainname |
629 | .quad sys_uname | 629 | .quad sys_newuname |
630 | .quad sys_modify_ldt | 630 | .quad sys_modify_ldt |
631 | .quad compat_sys_adjtimex | 631 | .quad compat_sys_adjtimex |
632 | .quad sys32_mprotect /* 125 */ | 632 | .quad sys32_mprotect /* 125 */ |
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index 74c35431b7d8..626be156d88d 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/ptrace.h> | 40 | #include <linux/ptrace.h> |
41 | #include <linux/highuid.h> | 41 | #include <linux/highuid.h> |
42 | #include <linux/sysctl.h> | 42 | #include <linux/sysctl.h> |
43 | #include <linux/slab.h> | ||
43 | #include <asm/mman.h> | 44 | #include <asm/mman.h> |
44 | #include <asm/types.h> | 45 | #include <asm/types.h> |
45 | #include <asm/uaccess.h> | 46 | #include <asm/uaccess.h> |
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index ba19ad4c47d0..86a0ff0aeac7 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #define _ASM_X86_AMD_IOMMU_TYPES_H | 21 | #define _ASM_X86_AMD_IOMMU_TYPES_H |
22 | 22 | ||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/mutex.h> | ||
24 | #include <linux/list.h> | 25 | #include <linux/list.h> |
25 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
26 | 27 | ||
@@ -140,6 +141,7 @@ | |||
140 | 141 | ||
141 | /* constants to configure the command buffer */ | 142 | /* constants to configure the command buffer */ |
142 | #define CMD_BUFFER_SIZE 8192 | 143 | #define CMD_BUFFER_SIZE 8192 |
144 | #define CMD_BUFFER_UNINITIALIZED 1 | ||
143 | #define CMD_BUFFER_ENTRIES 512 | 145 | #define CMD_BUFFER_ENTRIES 512 |
144 | #define MMIO_CMD_SIZE_SHIFT 56 | 146 | #define MMIO_CMD_SIZE_SHIFT 56 |
145 | #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) | 147 | #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) |
@@ -237,6 +239,7 @@ struct protection_domain { | |||
237 | struct list_head list; /* for list of all protection domains */ | 239 | struct list_head list; /* for list of all protection domains */ |
238 | struct list_head dev_list; /* List of all devices in this domain */ | 240 | struct list_head dev_list; /* List of all devices in this domain */ |
239 | spinlock_t lock; /* mostly used to lock the page table*/ | 241 | spinlock_t lock; /* mostly used to lock the page table*/ |
242 | struct mutex api_lock; /* protect page tables in the iommu-api path */ | ||
240 | u16 id; /* the domain id written to the device table */ | 243 | u16 id; /* the domain id written to the device table */ |
241 | int mode; /* paging mode (0-6 levels) */ | 244 | int mode; /* paging mode (0-6 levels) */ |
242 | u64 *pt_root; /* page table root pointer */ | 245 | u64 *pt_root; /* page table root pointer */ |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 635f03bb4995..d07b44f7d1dc 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -82,6 +82,9 @@ enum fixed_addresses { | |||
82 | #endif | 82 | #endif |
83 | FIX_DBGP_BASE, | 83 | FIX_DBGP_BASE, |
84 | FIX_EARLYCON_MEM_BASE, | 84 | FIX_EARLYCON_MEM_BASE, |
85 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | ||
86 | FIX_OHCI1394_BASE, | ||
87 | #endif | ||
85 | #ifdef CONFIG_X86_LOCAL_APIC | 88 | #ifdef CONFIG_X86_LOCAL_APIC |
86 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ | 89 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ |
87 | #endif | 90 | #endif |
@@ -132,9 +135,6 @@ enum fixed_addresses { | |||
132 | (__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1)) | 135 | (__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1)) |
133 | : __end_of_permanent_fixed_addresses, | 136 | : __end_of_permanent_fixed_addresses, |
134 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, | 137 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, |
135 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | ||
136 | FIX_OHCI1394_BASE, | ||
137 | #endif | ||
138 | #ifdef CONFIG_X86_32 | 138 | #ifdef CONFIG_X86_32 |
139 | FIX_WP_TEST, | 139 | FIX_WP_TEST, |
140 | #endif | 140 | #endif |
diff --git a/arch/x86/include/asm/hw_breakpoint.h b/arch/x86/include/asm/hw_breakpoint.h index 0675a7c4c20e..2a1bd8f4f23a 100644 --- a/arch/x86/include/asm/hw_breakpoint.h +++ b/arch/x86/include/asm/hw_breakpoint.h | |||
@@ -10,7 +10,6 @@ | |||
10 | * (display/resolving) | 10 | * (display/resolving) |
11 | */ | 11 | */ |
12 | struct arch_hw_breakpoint { | 12 | struct arch_hw_breakpoint { |
13 | char *name; /* Contains name of the symbol to set bkpt */ | ||
14 | unsigned long address; | 13 | unsigned long address; |
15 | u8 len; | 14 | u8 len; |
16 | u8 type; | 15 | u8 type; |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index a929c9ede33d..46c0fe05f230 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -133,6 +133,7 @@ extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void); | |||
133 | 133 | ||
134 | typedef int vector_irq_t[NR_VECTORS]; | 134 | typedef int vector_irq_t[NR_VECTORS]; |
135 | DECLARE_PER_CPU(vector_irq_t, vector_irq); | 135 | DECLARE_PER_CPU(vector_irq_t, vector_irq); |
136 | extern void setup_vector_irq(int cpu); | ||
136 | 137 | ||
137 | #ifdef CONFIG_X86_IO_APIC | 138 | #ifdef CONFIG_X86_IO_APIC |
138 | extern void lock_vector_lock(void); | 139 | extern void lock_vector_lock(void); |
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h index ba0eed8aa1a6..b60f2924c413 100644 --- a/arch/x86/include/asm/lguest_hcall.h +++ b/arch/x86/include/asm/lguest_hcall.h | |||
@@ -28,22 +28,39 @@ | |||
28 | 28 | ||
29 | #ifndef __ASSEMBLY__ | 29 | #ifndef __ASSEMBLY__ |
30 | #include <asm/hw_irq.h> | 30 | #include <asm/hw_irq.h> |
31 | #include <asm/kvm_para.h> | ||
32 | 31 | ||
33 | /*G:030 | 32 | /*G:030 |
34 | * But first, how does our Guest contact the Host to ask for privileged | 33 | * But first, how does our Guest contact the Host to ask for privileged |
35 | * operations? There are two ways: the direct way is to make a "hypercall", | 34 | * operations? There are two ways: the direct way is to make a "hypercall", |
36 | * to make requests of the Host Itself. | 35 | * to make requests of the Host Itself. |
37 | * | 36 | * |
38 | * We use the KVM hypercall mechanism, though completely different hypercall | 37 | * Our hypercall mechanism uses the highest unused trap code (traps 32 and |
39 | * numbers. Seventeen hypercalls are available: the hypercall number is put in | 38 | * above are used by real hardware interrupts). Seventeen hypercalls are |
40 | * the %eax register, and the arguments (when required) are placed in %ebx, | 39 | * available: the hypercall number is put in the %eax register, and the |
41 | * %ecx, %edx and %esi. If a return value makes sense, it's returned in %eax. | 40 | * arguments (when required) are placed in %ebx, %ecx, %edx and %esi. |
41 | * If a return value makes sense, it's returned in %eax. | ||
42 | * | 42 | * |
43 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful | 43 | * Grossly invalid calls result in Sudden Death at the hands of the vengeful |
44 | * Host, rather than returning failure. This reflects Winston Churchill's | 44 | * Host, rather than returning failure. This reflects Winston Churchill's |
45 | * definition of a gentleman: "someone who is only rude intentionally". | 45 | * definition of a gentleman: "someone who is only rude intentionally". |
46 | :*/ | 46 | */ |
47 | static inline unsigned long | ||
48 | hcall(unsigned long call, | ||
49 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | ||
50 | unsigned long arg4) | ||
51 | { | ||
52 | /* "int" is the Intel instruction to trigger a trap. */ | ||
53 | asm volatile("int $" __stringify(LGUEST_TRAP_ENTRY) | ||
54 | /* The call in %eax (aka "a") might be overwritten */ | ||
55 | : "=a"(call) | ||
56 | /* The arguments are in %eax, %ebx, %ecx, %edx & %esi */ | ||
57 | : "a"(call), "b"(arg1), "c"(arg2), "d"(arg3), "S"(arg4) | ||
58 | /* "memory" means this might write somewhere in memory. | ||
59 | * This isn't true for all calls, but it's safe to tell | ||
60 | * gcc that it might happen so it doesn't get clever. */ | ||
61 | : "memory"); | ||
62 | return call; | ||
63 | } | ||
47 | 64 | ||
48 | /* Can't use our min() macro here: needs to be a constant */ | 65 | /* Can't use our min() macro here: needs to be a constant */ |
49 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) | 66 | #define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32) |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 1cd58cdbc03f..4604e6a54d36 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -105,6 +105,8 @@ | |||
105 | #define MSR_AMD64_PATCH_LEVEL 0x0000008b | 105 | #define MSR_AMD64_PATCH_LEVEL 0x0000008b |
106 | #define MSR_AMD64_NB_CFG 0xc001001f | 106 | #define MSR_AMD64_NB_CFG 0xc001001f |
107 | #define MSR_AMD64_PATCH_LOADER 0xc0010020 | 107 | #define MSR_AMD64_PATCH_LOADER 0xc0010020 |
108 | #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 | ||
109 | #define MSR_AMD64_OSVW_STATUS 0xc0010141 | ||
108 | #define MSR_AMD64_IBSFETCHCTL 0xc0011030 | 110 | #define MSR_AMD64_IBSFETCHCTL 0xc0011030 |
109 | #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 | 111 | #define MSR_AMD64_IBSFETCHLINAD 0xc0011031 |
110 | #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 | 112 | #define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index befd172c82ad..db6109a885a7 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -18,7 +18,7 @@ | |||
18 | #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 | 18 | #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 |
19 | #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 | 19 | #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 |
20 | 20 | ||
21 | #define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) | 21 | #define ARCH_PERFMON_EVENTSEL_ENABLE (1 << 22) |
22 | #define ARCH_PERFMON_EVENTSEL_ANY (1 << 21) | 22 | #define ARCH_PERFMON_EVENTSEL_ANY (1 << 21) |
23 | #define ARCH_PERFMON_EVENTSEL_INT (1 << 20) | 23 | #define ARCH_PERFMON_EVENTSEL_INT (1 << 20) |
24 | #define ARCH_PERFMON_EVENTSEL_OS (1 << 17) | 24 | #define ARCH_PERFMON_EVENTSEL_OS (1 << 17) |
@@ -50,7 +50,7 @@ | |||
50 | INTEL_ARCH_INV_MASK| \ | 50 | INTEL_ARCH_INV_MASK| \ |
51 | INTEL_ARCH_EDGE_MASK|\ | 51 | INTEL_ARCH_EDGE_MASK|\ |
52 | INTEL_ARCH_UNIT_MASK|\ | 52 | INTEL_ARCH_UNIT_MASK|\ |
53 | INTEL_ARCH_EVTSEL_MASK) | 53 | INTEL_ARCH_EVENT_MASK) |
54 | 54 | ||
55 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c | 55 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c |
56 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) | 56 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) |
@@ -117,6 +117,18 @@ union cpuid10_edx { | |||
117 | */ | 117 | */ |
118 | #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) | 118 | #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) |
119 | 119 | ||
120 | /* IbsFetchCtl bits/masks */ | ||
121 | #define IBS_FETCH_RAND_EN (1ULL<<57) | ||
122 | #define IBS_FETCH_VAL (1ULL<<49) | ||
123 | #define IBS_FETCH_ENABLE (1ULL<<48) | ||
124 | #define IBS_FETCH_CNT 0xFFFF0000ULL | ||
125 | #define IBS_FETCH_MAX_CNT 0x0000FFFFULL | ||
126 | |||
127 | /* IbsOpCtl bits */ | ||
128 | #define IBS_OP_CNT_CTL (1ULL<<19) | ||
129 | #define IBS_OP_VAL (1ULL<<18) | ||
130 | #define IBS_OP_ENABLE (1ULL<<17) | ||
131 | #define IBS_OP_MAX_CNT 0x0000FFFFULL | ||
120 | 132 | ||
121 | #ifdef CONFIG_PERF_EVENTS | 133 | #ifdef CONFIG_PERF_EVENTS |
122 | extern void init_hw_perf_events(void); | 134 | extern void init_hw_perf_events(void); |
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index 47339a1ac7b6..2984a25ff383 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <asm/paravirt.h> | 19 | #include <asm/paravirt.h> |
20 | 20 | ||
21 | #include <linux/bitops.h> | 21 | #include <linux/bitops.h> |
22 | #include <linux/slab.h> | ||
23 | #include <linux/list.h> | 22 | #include <linux/list.h> |
24 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
25 | 24 | ||
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index a54d714545ff..cd40aba6aa95 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | #include <linux/dmi.h> | 32 | #include <linux/dmi.h> |
33 | #include <linux/irq.h> | 33 | #include <linux/irq.h> |
34 | #include <linux/slab.h> | ||
34 | #include <linux/bootmem.h> | 35 | #include <linux/bootmem.h> |
35 | #include <linux/ioport.h> | 36 | #include <linux/ioport.h> |
36 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
@@ -490,6 +491,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) | |||
490 | * ACPI based hotplug support for CPU | 491 | * ACPI based hotplug support for CPU |
491 | */ | 492 | */ |
492 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | 493 | #ifdef CONFIG_ACPI_HOTPLUG_CPU |
494 | #include <acpi/processor.h> | ||
493 | 495 | ||
494 | static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) | 496 | static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) |
495 | { | 497 | { |
@@ -567,6 +569,8 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) | |||
567 | goto free_new_map; | 569 | goto free_new_map; |
568 | } | 570 | } |
569 | 571 | ||
572 | acpi_processor_set_pdc(handle); | ||
573 | |||
570 | cpu = cpumask_first(new_map); | 574 | cpu = cpumask_first(new_map); |
571 | acpi_map_cpu2node(handle, cpu, physid); | 575 | acpi_map_cpu2node(handle, cpu, physid); |
572 | 576 | ||
@@ -1293,23 +1297,6 @@ static int __init dmi_disable_acpi(const struct dmi_system_id *d) | |||
1293 | } | 1297 | } |
1294 | 1298 | ||
1295 | /* | 1299 | /* |
1296 | * Limit ACPI to CPU enumeration for HT | ||
1297 | */ | ||
1298 | static int __init force_acpi_ht(const struct dmi_system_id *d) | ||
1299 | { | ||
1300 | if (!acpi_force) { | ||
1301 | printk(KERN_NOTICE "%s detected: force use of acpi=ht\n", | ||
1302 | d->ident); | ||
1303 | disable_acpi(); | ||
1304 | acpi_ht = 1; | ||
1305 | } else { | ||
1306 | printk(KERN_NOTICE | ||
1307 | "Warning: acpi=force overrules DMI blacklist: acpi=ht\n"); | ||
1308 | } | ||
1309 | return 0; | ||
1310 | } | ||
1311 | |||
1312 | /* | ||
1313 | * Force ignoring BIOS IRQ0 pin2 override | 1300 | * Force ignoring BIOS IRQ0 pin2 override |
1314 | */ | 1301 | */ |
1315 | static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) | 1302 | static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) |
@@ -1345,82 +1332,6 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = { | |||
1345 | }, | 1332 | }, |
1346 | 1333 | ||
1347 | /* | 1334 | /* |
1348 | * Boxes that need acpi=ht | ||
1349 | */ | ||
1350 | { | ||
1351 | .callback = force_acpi_ht, | ||
1352 | .ident = "FSC Primergy T850", | ||
1353 | .matches = { | ||
1354 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), | ||
1355 | DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"), | ||
1356 | }, | ||
1357 | }, | ||
1358 | { | ||
1359 | .callback = force_acpi_ht, | ||
1360 | .ident = "HP VISUALIZE NT Workstation", | ||
1361 | .matches = { | ||
1362 | DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), | ||
1363 | DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"), | ||
1364 | }, | ||
1365 | }, | ||
1366 | { | ||
1367 | .callback = force_acpi_ht, | ||
1368 | .ident = "Compaq Workstation W8000", | ||
1369 | .matches = { | ||
1370 | DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), | ||
1371 | DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"), | ||
1372 | }, | ||
1373 | }, | ||
1374 | { | ||
1375 | .callback = force_acpi_ht, | ||
1376 | .ident = "ASUS CUR-DLS", | ||
1377 | .matches = { | ||
1378 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | ||
1379 | DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"), | ||
1380 | }, | ||
1381 | }, | ||
1382 | { | ||
1383 | .callback = force_acpi_ht, | ||
1384 | .ident = "ABIT i440BX-W83977", | ||
1385 | .matches = { | ||
1386 | DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"), | ||
1387 | DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"), | ||
1388 | }, | ||
1389 | }, | ||
1390 | { | ||
1391 | .callback = force_acpi_ht, | ||
1392 | .ident = "IBM Bladecenter", | ||
1393 | .matches = { | ||
1394 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | ||
1395 | DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"), | ||
1396 | }, | ||
1397 | }, | ||
1398 | { | ||
1399 | .callback = force_acpi_ht, | ||
1400 | .ident = "IBM eServer xSeries 360", | ||
1401 | .matches = { | ||
1402 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | ||
1403 | DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"), | ||
1404 | }, | ||
1405 | }, | ||
1406 | { | ||
1407 | .callback = force_acpi_ht, | ||
1408 | .ident = "IBM eserver xSeries 330", | ||
1409 | .matches = { | ||
1410 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | ||
1411 | DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"), | ||
1412 | }, | ||
1413 | }, | ||
1414 | { | ||
1415 | .callback = force_acpi_ht, | ||
1416 | .ident = "IBM eserver xSeries 440", | ||
1417 | .matches = { | ||
1418 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | ||
1419 | DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"), | ||
1420 | }, | ||
1421 | }, | ||
1422 | |||
1423 | /* | ||
1424 | * Boxes that need ACPI PCI IRQ routing disabled | 1335 | * Boxes that need ACPI PCI IRQ routing disabled |
1425 | */ | 1336 | */ |
1426 | { | 1337 | { |
@@ -1652,8 +1563,10 @@ static int __init parse_acpi(char *arg) | |||
1652 | } | 1563 | } |
1653 | /* Limit ACPI just to boot-time to enable HT */ | 1564 | /* Limit ACPI just to boot-time to enable HT */ |
1654 | else if (strcmp(arg, "ht") == 0) { | 1565 | else if (strcmp(arg, "ht") == 0) { |
1655 | if (!acpi_force) | 1566 | if (!acpi_force) { |
1567 | printk(KERN_WARNING "acpi=ht will be removed in Linux-2.6.35\n"); | ||
1656 | disable_acpi(); | 1568 | disable_acpi(); |
1569 | } | ||
1657 | acpi_ht = 1; | 1570 | acpi_ht = 1; |
1658 | } | 1571 | } |
1659 | /* acpi=rsdt use RSDT instead of XSDT */ | 1572 | /* acpi=rsdt use RSDT instead of XSDT */ |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 3a4bf35c179b..1a160d5d44d0 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/vmalloc.h> | 8 | #include <linux/vmalloc.h> |
9 | #include <linux/memory.h> | 9 | #include <linux/memory.h> |
10 | #include <linux/stop_machine.h> | 10 | #include <linux/stop_machine.h> |
11 | #include <linux/slab.h> | ||
11 | #include <asm/alternative.h> | 12 | #include <asm/alternative.h> |
12 | #include <asm/sections.h> | 13 | #include <asm/sections.h> |
13 | #include <asm/pgtable.h> | 14 | #include <asm/pgtable.h> |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index adb0ba025702..f854d89b7edf 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -18,8 +18,8 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
21 | #include <linux/gfp.h> | ||
22 | #include <linux/bitmap.h> | 21 | #include <linux/bitmap.h> |
22 | #include <linux/slab.h> | ||
23 | #include <linux/debugfs.h> | 23 | #include <linux/debugfs.h> |
24 | #include <linux/scatterlist.h> | 24 | #include <linux/scatterlist.h> |
25 | #include <linux/dma-mapping.h> | 25 | #include <linux/dma-mapping.h> |
@@ -118,7 +118,7 @@ static bool check_device(struct device *dev) | |||
118 | return false; | 118 | return false; |
119 | 119 | ||
120 | /* No device or no PCI device */ | 120 | /* No device or no PCI device */ |
121 | if (!dev || dev->bus != &pci_bus_type) | 121 | if (dev->bus != &pci_bus_type) |
122 | return false; | 122 | return false; |
123 | 123 | ||
124 | devid = get_device_id(dev); | 124 | devid = get_device_id(dev); |
@@ -392,6 +392,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | |||
392 | u32 tail, head; | 392 | u32 tail, head; |
393 | u8 *target; | 393 | u8 *target; |
394 | 394 | ||
395 | WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); | ||
395 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | 396 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); |
396 | target = iommu->cmd_buf + tail; | 397 | target = iommu->cmd_buf + tail; |
397 | memcpy_toio(target, cmd, sizeof(*cmd)); | 398 | memcpy_toio(target, cmd, sizeof(*cmd)); |
@@ -2186,7 +2187,7 @@ static void prealloc_protection_domains(void) | |||
2186 | struct dma_ops_domain *dma_dom; | 2187 | struct dma_ops_domain *dma_dom; |
2187 | u16 devid; | 2188 | u16 devid; |
2188 | 2189 | ||
2189 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 2190 | for_each_pci_dev(dev) { |
2190 | 2191 | ||
2191 | /* Do we handle this device? */ | 2192 | /* Do we handle this device? */ |
2192 | if (!check_device(&dev->dev)) | 2193 | if (!check_device(&dev->dev)) |
@@ -2298,7 +2299,7 @@ static void cleanup_domain(struct protection_domain *domain) | |||
2298 | list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { | 2299 | list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { |
2299 | struct device *dev = dev_data->dev; | 2300 | struct device *dev = dev_data->dev; |
2300 | 2301 | ||
2301 | do_detach(dev); | 2302 | __detach_device(dev); |
2302 | atomic_set(&dev_data->bind, 0); | 2303 | atomic_set(&dev_data->bind, 0); |
2303 | } | 2304 | } |
2304 | 2305 | ||
@@ -2327,6 +2328,7 @@ static struct protection_domain *protection_domain_alloc(void) | |||
2327 | return NULL; | 2328 | return NULL; |
2328 | 2329 | ||
2329 | spin_lock_init(&domain->lock); | 2330 | spin_lock_init(&domain->lock); |
2331 | mutex_init(&domain->api_lock); | ||
2330 | domain->id = domain_id_alloc(); | 2332 | domain->id = domain_id_alloc(); |
2331 | if (!domain->id) | 2333 | if (!domain->id) |
2332 | goto out_err; | 2334 | goto out_err; |
@@ -2379,9 +2381,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom) | |||
2379 | 2381 | ||
2380 | free_pagetable(domain); | 2382 | free_pagetable(domain); |
2381 | 2383 | ||
2382 | domain_id_free(domain->id); | 2384 | protection_domain_free(domain); |
2383 | |||
2384 | kfree(domain); | ||
2385 | 2385 | ||
2386 | dom->priv = NULL; | 2386 | dom->priv = NULL; |
2387 | } | 2387 | } |
@@ -2456,6 +2456,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom, | |||
2456 | iova &= PAGE_MASK; | 2456 | iova &= PAGE_MASK; |
2457 | paddr &= PAGE_MASK; | 2457 | paddr &= PAGE_MASK; |
2458 | 2458 | ||
2459 | mutex_lock(&domain->api_lock); | ||
2460 | |||
2459 | for (i = 0; i < npages; ++i) { | 2461 | for (i = 0; i < npages; ++i) { |
2460 | ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); | 2462 | ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); |
2461 | if (ret) | 2463 | if (ret) |
@@ -2465,6 +2467,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom, | |||
2465 | paddr += PAGE_SIZE; | 2467 | paddr += PAGE_SIZE; |
2466 | } | 2468 | } |
2467 | 2469 | ||
2470 | mutex_unlock(&domain->api_lock); | ||
2471 | |||
2468 | return 0; | 2472 | return 0; |
2469 | } | 2473 | } |
2470 | 2474 | ||
@@ -2477,12 +2481,16 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom, | |||
2477 | 2481 | ||
2478 | iova &= PAGE_MASK; | 2482 | iova &= PAGE_MASK; |
2479 | 2483 | ||
2484 | mutex_lock(&domain->api_lock); | ||
2485 | |||
2480 | for (i = 0; i < npages; ++i) { | 2486 | for (i = 0; i < npages; ++i) { |
2481 | iommu_unmap_page(domain, iova, PM_MAP_4k); | 2487 | iommu_unmap_page(domain, iova, PM_MAP_4k); |
2482 | iova += PAGE_SIZE; | 2488 | iova += PAGE_SIZE; |
2483 | } | 2489 | } |
2484 | 2490 | ||
2485 | iommu_flush_tlb_pde(domain); | 2491 | iommu_flush_tlb_pde(domain); |
2492 | |||
2493 | mutex_unlock(&domain->api_lock); | ||
2486 | } | 2494 | } |
2487 | 2495 | ||
2488 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, | 2496 | static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index 9dc91b431470..6360abf993d4 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -19,8 +19,8 @@ | |||
19 | 19 | ||
20 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
21 | #include <linux/acpi.h> | 21 | #include <linux/acpi.h> |
22 | #include <linux/gfp.h> | ||
23 | #include <linux/list.h> | 22 | #include <linux/list.h> |
23 | #include <linux/slab.h> | ||
24 | #include <linux/sysdev.h> | 24 | #include <linux/sysdev.h> |
25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/msi.h> | 26 | #include <linux/msi.h> |
@@ -138,9 +138,9 @@ int amd_iommus_present; | |||
138 | bool amd_iommu_np_cache __read_mostly; | 138 | bool amd_iommu_np_cache __read_mostly; |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * Set to true if ACPI table parsing and hardware intialization went properly | 141 | * The ACPI table parsing functions set this variable on an error |
142 | */ | 142 | */ |
143 | static bool amd_iommu_initialized; | 143 | static int __initdata amd_iommu_init_err; |
144 | 144 | ||
145 | /* | 145 | /* |
146 | * List of protection domains - used during resume | 146 | * List of protection domains - used during resume |
@@ -391,9 +391,11 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table) | |||
391 | */ | 391 | */ |
392 | for (i = 0; i < table->length; ++i) | 392 | for (i = 0; i < table->length; ++i) |
393 | checksum += p[i]; | 393 | checksum += p[i]; |
394 | if (checksum != 0) | 394 | if (checksum != 0) { |
395 | /* ACPI table corrupt */ | 395 | /* ACPI table corrupt */ |
396 | return -ENODEV; | 396 | amd_iommu_init_err = -ENODEV; |
397 | return 0; | ||
398 | } | ||
397 | 399 | ||
398 | p += IVRS_HEADER_LENGTH; | 400 | p += IVRS_HEADER_LENGTH; |
399 | 401 | ||
@@ -436,7 +438,7 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) | |||
436 | if (cmd_buf == NULL) | 438 | if (cmd_buf == NULL) |
437 | return NULL; | 439 | return NULL; |
438 | 440 | ||
439 | iommu->cmd_buf_size = CMD_BUFFER_SIZE; | 441 | iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; |
440 | 442 | ||
441 | return cmd_buf; | 443 | return cmd_buf; |
442 | } | 444 | } |
@@ -472,12 +474,13 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) | |||
472 | &entry, sizeof(entry)); | 474 | &entry, sizeof(entry)); |
473 | 475 | ||
474 | amd_iommu_reset_cmd_buffer(iommu); | 476 | amd_iommu_reset_cmd_buffer(iommu); |
477 | iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); | ||
475 | } | 478 | } |
476 | 479 | ||
477 | static void __init free_command_buffer(struct amd_iommu *iommu) | 480 | static void __init free_command_buffer(struct amd_iommu *iommu) |
478 | { | 481 | { |
479 | free_pages((unsigned long)iommu->cmd_buf, | 482 | free_pages((unsigned long)iommu->cmd_buf, |
480 | get_order(iommu->cmd_buf_size)); | 483 | get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); |
481 | } | 484 | } |
482 | 485 | ||
483 | /* allocates the memory where the IOMMU will log its events to */ | 486 | /* allocates the memory where the IOMMU will log its events to */ |
@@ -920,11 +923,16 @@ static int __init init_iommu_all(struct acpi_table_header *table) | |||
920 | h->mmio_phys); | 923 | h->mmio_phys); |
921 | 924 | ||
922 | iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); | 925 | iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); |
923 | if (iommu == NULL) | 926 | if (iommu == NULL) { |
924 | return -ENOMEM; | 927 | amd_iommu_init_err = -ENOMEM; |
928 | return 0; | ||
929 | } | ||
930 | |||
925 | ret = init_iommu_one(iommu, h); | 931 | ret = init_iommu_one(iommu, h); |
926 | if (ret) | 932 | if (ret) { |
927 | return ret; | 933 | amd_iommu_init_err = ret; |
934 | return 0; | ||
935 | } | ||
928 | break; | 936 | break; |
929 | default: | 937 | default: |
930 | break; | 938 | break; |
@@ -934,8 +942,6 @@ static int __init init_iommu_all(struct acpi_table_header *table) | |||
934 | } | 942 | } |
935 | WARN_ON(p != end); | 943 | WARN_ON(p != end); |
936 | 944 | ||
937 | amd_iommu_initialized = true; | ||
938 | |||
939 | return 0; | 945 | return 0; |
940 | } | 946 | } |
941 | 947 | ||
@@ -1211,6 +1217,10 @@ static int __init amd_iommu_init(void) | |||
1211 | if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) | 1217 | if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) |
1212 | return -ENODEV; | 1218 | return -ENODEV; |
1213 | 1219 | ||
1220 | ret = amd_iommu_init_err; | ||
1221 | if (ret) | ||
1222 | goto out; | ||
1223 | |||
1214 | dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); | 1224 | dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); |
1215 | alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); | 1225 | alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); |
1216 | rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); | 1226 | rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); |
@@ -1270,12 +1280,19 @@ static int __init amd_iommu_init(void) | |||
1270 | if (acpi_table_parse("IVRS", init_iommu_all) != 0) | 1280 | if (acpi_table_parse("IVRS", init_iommu_all) != 0) |
1271 | goto free; | 1281 | goto free; |
1272 | 1282 | ||
1273 | if (!amd_iommu_initialized) | 1283 | if (amd_iommu_init_err) { |
1284 | ret = amd_iommu_init_err; | ||
1274 | goto free; | 1285 | goto free; |
1286 | } | ||
1275 | 1287 | ||
1276 | if (acpi_table_parse("IVRS", init_memory_definitions) != 0) | 1288 | if (acpi_table_parse("IVRS", init_memory_definitions) != 0) |
1277 | goto free; | 1289 | goto free; |
1278 | 1290 | ||
1291 | if (amd_iommu_init_err) { | ||
1292 | ret = amd_iommu_init_err; | ||
1293 | goto free; | ||
1294 | } | ||
1295 | |||
1279 | ret = sysdev_class_register(&amd_iommu_sysdev_class); | 1296 | ret = sysdev_class_register(&amd_iommu_sysdev_class); |
1280 | if (ret) | 1297 | if (ret) |
1281 | goto free; | 1298 | goto free; |
@@ -1288,6 +1305,8 @@ static int __init amd_iommu_init(void) | |||
1288 | if (ret) | 1305 | if (ret) |
1289 | goto free; | 1306 | goto free; |
1290 | 1307 | ||
1308 | enable_iommus(); | ||
1309 | |||
1291 | if (iommu_pass_through) | 1310 | if (iommu_pass_through) |
1292 | ret = amd_iommu_init_passthrough(); | 1311 | ret = amd_iommu_init_passthrough(); |
1293 | else | 1312 | else |
@@ -1300,8 +1319,6 @@ static int __init amd_iommu_init(void) | |||
1300 | 1319 | ||
1301 | amd_iommu_init_notifier(); | 1320 | amd_iommu_init_notifier(); |
1302 | 1321 | ||
1303 | enable_iommus(); | ||
1304 | |||
1305 | if (iommu_pass_through) | 1322 | if (iommu_pass_through) |
1306 | goto out; | 1323 | goto out; |
1307 | 1324 | ||
@@ -1315,6 +1332,7 @@ out: | |||
1315 | return ret; | 1332 | return ret; |
1316 | 1333 | ||
1317 | free: | 1334 | free: |
1335 | disable_iommus(); | ||
1318 | 1336 | ||
1319 | amd_iommu_uninit_devices(); | 1337 | amd_iommu_uninit_devices(); |
1320 | 1338 | ||
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 4b7099526d2c..ff469e470059 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/errno.h> | 33 | #include <linux/errno.h> |
34 | #include <linux/init.h> | 34 | #include <linux/init.h> |
35 | #include <linux/sysdev.h> | 35 | #include <linux/sysdev.h> |
36 | #include <linux/slab.h> | ||
36 | #include <linux/pm.h> | 37 | #include <linux/pm.h> |
37 | #include <linux/pci.h> | 38 | #include <linux/pci.h> |
38 | #include <linux/sfi.h> | 39 | #include <linux/sfi.h> |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index f147a95fd84a..b5d8b0bcf235 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <asm/x86_init.h> | 31 | #include <asm/x86_init.h> |
32 | 32 | ||
33 | int gart_iommu_aperture; | 33 | int gart_iommu_aperture; |
34 | EXPORT_SYMBOL_GPL(gart_iommu_aperture); | ||
35 | int gart_iommu_aperture_disabled __initdata; | 34 | int gart_iommu_aperture_disabled __initdata; |
36 | int gart_iommu_aperture_allowed __initdata; | 35 | int gart_iommu_aperture_allowed __initdata; |
37 | 36 | ||
@@ -394,6 +393,7 @@ void __init gart_iommu_hole_init(void) | |||
394 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { | 393 | for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { |
395 | int bus; | 394 | int bus; |
396 | int dev_base, dev_limit; | 395 | int dev_base, dev_limit; |
396 | u32 ctl; | ||
397 | 397 | ||
398 | bus = bus_dev_ranges[i].bus; | 398 | bus = bus_dev_ranges[i].bus; |
399 | dev_base = bus_dev_ranges[i].dev_base; | 399 | dev_base = bus_dev_ranges[i].dev_base; |
@@ -407,7 +407,19 @@ void __init gart_iommu_hole_init(void) | |||
407 | gart_iommu_aperture = 1; | 407 | gart_iommu_aperture = 1; |
408 | x86_init.iommu.iommu_init = gart_iommu_init; | 408 | x86_init.iommu.iommu_init = gart_iommu_init; |
409 | 409 | ||
410 | aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7; | 410 | ctl = read_pci_config(bus, slot, 3, |
411 | AMD64_GARTAPERTURECTL); | ||
412 | |||
413 | /* | ||
414 | * Before we do anything else disable the GART. It may | ||
415 | * still be enabled if we boot into a crash-kernel here. | ||
416 | * Reconfiguring the GART while it is enabled could have | ||
417 | * unknown side-effects. | ||
418 | */ | ||
419 | ctl &= ~GARTEN; | ||
420 | write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); | ||
421 | |||
422 | aper_order = (ctl >> 1) & 7; | ||
411 | aper_size = (32 * 1024 * 1024) << aper_order; | 423 | aper_size = (32 * 1024 * 1024) << aper_order; |
412 | aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; | 424 | aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; |
413 | aper_base <<= 25; | 425 | aper_base <<= 25; |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 00187f1fcfb7..e5a4a1e01618 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -1640,8 +1640,10 @@ int __init APIC_init_uniprocessor(void) | |||
1640 | } | 1640 | } |
1641 | #endif | 1641 | #endif |
1642 | 1642 | ||
1643 | #ifndef CONFIG_SMP | ||
1643 | enable_IR_x2apic(); | 1644 | enable_IR_x2apic(); |
1644 | default_setup_apic_routing(); | 1645 | default_setup_apic_routing(); |
1646 | #endif | ||
1645 | 1647 | ||
1646 | verify_local_APIC(); | 1648 | verify_local_APIC(); |
1647 | connect_bsp_APIC(); | 1649 | connect_bsp_APIC(); |
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index dd2b5f264643..03ba1b895f5e 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/errno.h> | 42 | #include <linux/errno.h> |
43 | #include <linux/acpi.h> | 43 | #include <linux/acpi.h> |
44 | #include <linux/init.h> | 44 | #include <linux/init.h> |
45 | #include <linux/gfp.h> | ||
45 | #include <linux/nmi.h> | 46 | #include <linux/nmi.h> |
46 | #include <linux/smp.h> | 47 | #include <linux/smp.h> |
47 | #include <linux/io.h> | 48 | #include <linux/io.h> |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index e4e0ddcb1546..127b8718abfb 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/freezer.h> | 36 | #include <linux/freezer.h> |
37 | #include <linux/kthread.h> | 37 | #include <linux/kthread.h> |
38 | #include <linux/jiffies.h> /* time_after() */ | 38 | #include <linux/jiffies.h> /* time_after() */ |
39 | #include <linux/slab.h> | ||
39 | #ifdef CONFIG_ACPI | 40 | #ifdef CONFIG_ACPI |
40 | #include <acpi/acpi_bus.h> | 41 | #include <acpi/acpi_bus.h> |
41 | #endif | 42 | #endif |
@@ -1268,6 +1269,14 @@ void __setup_vector_irq(int cpu) | |||
1268 | /* Mark the inuse vectors */ | 1269 | /* Mark the inuse vectors */ |
1269 | for_each_irq_desc(irq, desc) { | 1270 | for_each_irq_desc(irq, desc) { |
1270 | cfg = desc->chip_data; | 1271 | cfg = desc->chip_data; |
1272 | |||
1273 | /* | ||
1274 | * If it is a legacy IRQ handled by the legacy PIC, this cpu | ||
1275 | * will be part of the irq_cfg's domain. | ||
1276 | */ | ||
1277 | if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq)) | ||
1278 | cpumask_set_cpu(cpu, cfg->domain); | ||
1279 | |||
1271 | if (!cpumask_test_cpu(cpu, cfg->domain)) | 1280 | if (!cpumask_test_cpu(cpu, cfg->domain)) |
1272 | continue; | 1281 | continue; |
1273 | vector = cfg->vector; | 1282 | vector = cfg->vector; |
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index 8aa65adbd25d..1edaf15c0b8e 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/slab.h> | ||
21 | #include <linux/sysdev.h> | 22 | #include <linux/sysdev.h> |
22 | #include <linux/sysctl.h> | 23 | #include <linux/sysctl.h> |
23 | #include <linux/percpu.h> | 24 | #include <linux/percpu.h> |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 3740c8a4eae7..c085d52dbaf2 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/ctype.h> | 17 | #include <linux/ctype.h> |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/timer.h> | 19 | #include <linux/timer.h> |
20 | #include <linux/slab.h> | ||
20 | #include <linux/cpu.h> | 21 | #include <linux/cpu.h> |
21 | #include <linux/init.h> | 22 | #include <linux/init.h> |
22 | #include <linux/io.h> | 23 | #include <linux/io.h> |
@@ -120,11 +121,9 @@ EXPORT_SYMBOL_GPL(uv_possible_blades); | |||
120 | unsigned long sn_rtc_cycles_per_second; | 121 | unsigned long sn_rtc_cycles_per_second; |
121 | EXPORT_SYMBOL(sn_rtc_cycles_per_second); | 122 | EXPORT_SYMBOL(sn_rtc_cycles_per_second); |
122 | 123 | ||
123 | /* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */ | ||
124 | |||
125 | static const struct cpumask *uv_target_cpus(void) | 124 | static const struct cpumask *uv_target_cpus(void) |
126 | { | 125 | { |
127 | return cpumask_of(0); | 126 | return cpu_online_mask; |
128 | } | 127 | } |
129 | 128 | ||
130 | static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) | 129 | static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask) |
diff --git a/arch/x86/kernel/bootflag.c b/arch/x86/kernel/bootflag.c index 30f25a75fe28..5de7f4c56971 100644 --- a/arch/x86/kernel/bootflag.c +++ b/arch/x86/kernel/bootflag.c | |||
@@ -5,7 +5,6 @@ | |||
5 | #include <linux/kernel.h> | 5 | #include <linux/kernel.h> |
6 | #include <linux/init.h> | 6 | #include <linux/init.h> |
7 | #include <linux/string.h> | 7 | #include <linux/string.h> |
8 | #include <linux/slab.h> | ||
9 | #include <linux/spinlock.h> | 8 | #include <linux/spinlock.h> |
10 | #include <linux/acpi.h> | 9 | #include <linux/acpi.h> |
11 | #include <asm/io.h> | 10 | #include <asm/io.h> |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 1b1920fa7c80..459168083b77 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/cpufreq.h> | 33 | #include <linux/cpufreq.h> |
34 | #include <linux/compiler.h> | 34 | #include <linux/compiler.h> |
35 | #include <linux/dmi.h> | 35 | #include <linux/dmi.h> |
36 | #include <linux/slab.h> | ||
36 | #include <trace/events/power.h> | 37 | #include <trace/events/power.h> |
37 | 38 | ||
38 | #include <linux/acpi.h> | 39 | #include <linux/acpi.h> |
diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/arch/x86/kernel/cpu/cpufreq/elanfreq.c index 006b278b0d5d..c587db472a75 100644 --- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c +++ b/arch/x86/kernel/cpu/cpufreq/elanfreq.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | 22 | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
25 | #include <linux/cpufreq.h> | 24 | #include <linux/cpufreq.h> |
26 | 25 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c index ac27ec2264d5..16e3483be9e3 100644 --- a/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c +++ b/arch/x86/kernel/cpu/cpufreq/gx-suspmod.c | |||
@@ -80,6 +80,7 @@ | |||
80 | #include <linux/cpufreq.h> | 80 | #include <linux/cpufreq.h> |
81 | #include <linux/pci.h> | 81 | #include <linux/pci.h> |
82 | #include <linux/errno.h> | 82 | #include <linux/errno.h> |
83 | #include <linux/slab.h> | ||
83 | 84 | ||
84 | #include <asm/processor-cyrix.h> | 85 | #include <asm/processor-cyrix.h> |
85 | 86 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c index da5f70fcb766..e7b559d74c52 100644 --- a/arch/x86/kernel/cpu/cpufreq/longrun.c +++ b/arch/x86/kernel/cpu/cpufreq/longrun.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/slab.h> | ||
13 | #include <linux/cpufreq.h> | 12 | #include <linux/cpufreq.h> |
14 | #include <linux/timex.h> | 13 | #include <linux/timex.h> |
15 | 14 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index 869615193720..7b8a8ba67b07 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/smp.h> | 26 | #include <linux/smp.h> |
27 | #include <linux/cpufreq.h> | 27 | #include <linux/cpufreq.h> |
28 | #include <linux/slab.h> | ||
29 | #include <linux/cpumask.h> | 28 | #include <linux/cpumask.h> |
30 | #include <linux/timex.h> | 29 | #include <linux/timex.h> |
31 | 30 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c index ff36d2979a90..ce7cde713e71 100644 --- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
31 | #include <linux/cpufreq.h> | 31 | #include <linux/cpufreq.h> |
32 | #include <linux/compiler.h> | 32 | #include <linux/compiler.h> |
33 | #include <linux/slab.h> | ||
33 | 34 | ||
34 | #include <linux/acpi.h> | 35 | #include <linux/acpi.h> |
35 | #include <linux/io.h> | 36 | #include <linux/io.h> |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c index cb01dac267d3..b3379d6a5c57 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/cpufreq.h> | 14 | #include <linux/cpufreq.h> |
15 | #include <linux/ioport.h> | 15 | #include <linux/ioport.h> |
16 | #include <linux/slab.h> | ||
17 | #include <linux/timex.h> | 16 | #include <linux/timex.h> |
18 | #include <linux/io.h> | 17 | #include <linux/io.h> |
19 | 18 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 8d672ef162ce..9b1ff37de46a 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/sched.h> /* current */ | 20 | #include <linux/sched.h> /* current */ |
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
23 | #include <linux/gfp.h> | ||
23 | 24 | ||
24 | #include <asm/msr.h> | 25 | #include <asm/msr.h> |
25 | #include <asm/processor.h> | 26 | #include <asm/processor.h> |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 2ce8e0b5cc54..561758e95180 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/cpufreq.h> | 24 | #include <linux/cpufreq.h> |
25 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
26 | #include <linux/slab.h> | ||
27 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
28 | 27 | ||
29 | #include "speedstep-lib.h" | 28 | #include "speedstep-lib.h" |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c index ad0083abfa23..a94ec6be69fa 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/moduleparam.h> | 13 | #include <linux/moduleparam.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/cpufreq.h> | 15 | #include <linux/cpufreq.h> |
16 | #include <linux/slab.h> | ||
17 | 16 | ||
18 | #include <asm/msr.h> | 17 | #include <asm/msr.h> |
19 | #include <asm/tsc.h> | 18 | #include <asm/tsc.h> |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c index 04d73c114e49..8abd869baabf 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-smi.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/moduleparam.h> | 17 | #include <linux/moduleparam.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/cpufreq.h> | 19 | #include <linux/cpufreq.h> |
20 | #include <linux/slab.h> | ||
21 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
22 | #include <linux/io.h> | 21 | #include <linux/io.h> |
23 | #include <asm/ist.h> | 22 | #include <asm/ist.h> |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 879666f4d871..7e1cca13af35 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -70,7 +70,8 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | |||
70 | if (c->x86_power & (1 << 8)) { | 70 | if (c->x86_power & (1 << 8)) { |
71 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 71 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
72 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); | 72 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); |
73 | sched_clock_stable = 1; | 73 | if (!check_tsc_unstable()) |
74 | sched_clock_stable = 1; | ||
74 | } | 75 | } |
75 | 76 | ||
76 | /* | 77 | /* |
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index 73734baa50f2..e7dbde7bfedb 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/kdebug.h> | 22 | #include <linux/kdebug.h> |
23 | #include <linux/cpu.h> | 23 | #include <linux/cpu.h> |
24 | #include <linux/sched.h> | 24 | #include <linux/sched.h> |
25 | #include <linux/gfp.h> | ||
25 | #include <asm/mce.h> | 26 | #include <asm/mce.h> |
26 | #include <asm/apic.h> | 27 | #include <asm/apic.h> |
27 | 28 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 28cba46bf32c..8a6f0afa767e 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | #include <linux/sysfs.h> | 27 | #include <linux/sysfs.h> |
28 | #include <linux/types.h> | 28 | #include <linux/types.h> |
29 | #include <linux/slab.h> | ||
29 | #include <linux/init.h> | 30 | #include <linux/init.h> |
30 | #include <linux/kmod.h> | 31 | #include <linux/kmod.h> |
31 | #include <linux/poll.h> | 32 | #include <linux/poll.h> |
@@ -46,6 +47,13 @@ | |||
46 | 47 | ||
47 | #include "mce-internal.h" | 48 | #include "mce-internal.h" |
48 | 49 | ||
50 | static DEFINE_MUTEX(mce_read_mutex); | ||
51 | |||
52 | #define rcu_dereference_check_mce(p) \ | ||
53 | rcu_dereference_check((p), \ | ||
54 | rcu_read_lock_sched_held() || \ | ||
55 | lockdep_is_held(&mce_read_mutex)) | ||
56 | |||
49 | #define CREATE_TRACE_POINTS | 57 | #define CREATE_TRACE_POINTS |
50 | #include <trace/events/mce.h> | 58 | #include <trace/events/mce.h> |
51 | 59 | ||
@@ -158,7 +166,7 @@ void mce_log(struct mce *mce) | |||
158 | mce->finished = 0; | 166 | mce->finished = 0; |
159 | wmb(); | 167 | wmb(); |
160 | for (;;) { | 168 | for (;;) { |
161 | entry = rcu_dereference(mcelog.next); | 169 | entry = rcu_dereference_check_mce(mcelog.next); |
162 | for (;;) { | 170 | for (;;) { |
163 | /* | 171 | /* |
164 | * When the buffer fills up discard new entries. | 172 | * When the buffer fills up discard new entries. |
@@ -1485,8 +1493,6 @@ static void collect_tscs(void *data) | |||
1485 | rdtscll(cpu_tsc[smp_processor_id()]); | 1493 | rdtscll(cpu_tsc[smp_processor_id()]); |
1486 | } | 1494 | } |
1487 | 1495 | ||
1488 | static DEFINE_MUTEX(mce_read_mutex); | ||
1489 | |||
1490 | static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, | 1496 | static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, |
1491 | loff_t *off) | 1497 | loff_t *off) |
1492 | { | 1498 | { |
@@ -1500,7 +1506,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, | |||
1500 | return -ENOMEM; | 1506 | return -ENOMEM; |
1501 | 1507 | ||
1502 | mutex_lock(&mce_read_mutex); | 1508 | mutex_lock(&mce_read_mutex); |
1503 | next = rcu_dereference(mcelog.next); | 1509 | next = rcu_dereference_check_mce(mcelog.next); |
1504 | 1510 | ||
1505 | /* Only supports full reads right now */ | 1511 | /* Only supports full reads right now */ |
1506 | if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { | 1512 | if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { |
@@ -1565,7 +1571,7 @@ timeout: | |||
1565 | static unsigned int mce_poll(struct file *file, poll_table *wait) | 1571 | static unsigned int mce_poll(struct file *file, poll_table *wait) |
1566 | { | 1572 | { |
1567 | poll_wait(file, &mce_wait, wait); | 1573 | poll_wait(file, &mce_wait, wait); |
1568 | if (rcu_dereference(mcelog.next)) | 1574 | if (rcu_dereference_check_mce(mcelog.next)) |
1569 | return POLLIN | POLLRDNORM; | 1575 | return POLLIN | POLLRDNORM; |
1570 | return 0; | 1576 | return 0; |
1571 | } | 1577 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index cda932ca3ade..224392d8fe8c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
23 | #include <linux/sysfs.h> | 23 | #include <linux/sysfs.h> |
24 | #include <linux/slab.h> | ||
24 | #include <linux/init.h> | 25 | #include <linux/init.h> |
25 | #include <linux/cpu.h> | 26 | #include <linux/cpu.h> |
26 | #include <linux/smp.h> | 27 | #include <linux/smp.h> |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 7c785634af2b..62b48e40920a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * Author: Andi Kleen | 5 | * Author: Andi Kleen |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/gfp.h> | ||
8 | #include <linux/init.h> | 9 | #include <linux/init.h> |
9 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
10 | #include <linux/percpu.h> | 11 | #include <linux/percpu.h> |
@@ -95,7 +96,7 @@ static void cmci_discover(int banks, int boot) | |||
95 | 96 | ||
96 | /* Already owned by someone else? */ | 97 | /* Already owned by someone else? */ |
97 | if (val & CMCI_EN) { | 98 | if (val & CMCI_EN) { |
98 | if (test_and_clear_bit(i, owned) || boot) | 99 | if (test_and_clear_bit(i, owned) && !boot) |
99 | print_update("SHD", &hdr, i); | 100 | print_update("SHD", &hdr, i); |
100 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | 101 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); |
101 | continue; | 102 | continue; |
@@ -107,7 +108,7 @@ static void cmci_discover(int banks, int boot) | |||
107 | 108 | ||
108 | /* Did the enable bit stick? -- the bank supports CMCI */ | 109 | /* Did the enable bit stick? -- the bank supports CMCI */ |
109 | if (val & CMCI_EN) { | 110 | if (val & CMCI_EN) { |
110 | if (!test_and_set_bit(i, owned) || boot) | 111 | if (!test_and_set_bit(i, owned) && !boot) |
111 | print_update("CMCI", &hdr, i); | 112 | print_update("CMCI", &hdr, i); |
112 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | 113 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); |
113 | } else { | 114 | } else { |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 9aa5dc76ff4a..fd31a441c61c 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -6,7 +6,6 @@ | |||
6 | 6 | ||
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include <linux/slab.h> | ||
10 | #include <linux/io.h> | 9 | #include <linux/io.h> |
11 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
12 | 11 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index e006e56f699c..79289632cb27 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/module.h> | 5 | #include <linux/module.h> |
6 | #include <linux/ctype.h> | 6 | #include <linux/ctype.h> |
7 | #include <linux/string.h> | 7 | #include <linux/string.h> |
8 | #include <linux/slab.h> | ||
8 | #include <linux/init.h> | 9 | #include <linux/init.h> |
9 | 10 | ||
10 | #define LINE_SIZE 80 | 11 | #define LINE_SIZE 80 |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index b1fbdeecf6c9..db5bdc8addf8 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/kdebug.h> | 21 | #include <linux/kdebug.h> |
22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
23 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
24 | #include <linux/slab.h> | ||
24 | #include <linux/highmem.h> | 25 | #include <linux/highmem.h> |
25 | #include <linux/cpu.h> | 26 | #include <linux/cpu.h> |
26 | #include <linux/bitops.h> | 27 | #include <linux/bitops.h> |
@@ -28,6 +29,7 @@ | |||
28 | #include <asm/apic.h> | 29 | #include <asm/apic.h> |
29 | #include <asm/stacktrace.h> | 30 | #include <asm/stacktrace.h> |
30 | #include <asm/nmi.h> | 31 | #include <asm/nmi.h> |
32 | #include <asm/compat.h> | ||
31 | 33 | ||
32 | static u64 perf_event_mask __read_mostly; | 34 | static u64 perf_event_mask __read_mostly; |
33 | 35 | ||
@@ -73,10 +75,10 @@ struct debug_store { | |||
73 | struct event_constraint { | 75 | struct event_constraint { |
74 | union { | 76 | union { |
75 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 77 | unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
76 | u64 idxmsk64[1]; | 78 | u64 idxmsk64; |
77 | }; | 79 | }; |
78 | int code; | 80 | u64 code; |
79 | int cmask; | 81 | u64 cmask; |
80 | int weight; | 82 | int weight; |
81 | }; | 83 | }; |
82 | 84 | ||
@@ -103,7 +105,7 @@ struct cpu_hw_events { | |||
103 | }; | 105 | }; |
104 | 106 | ||
105 | #define __EVENT_CONSTRAINT(c, n, m, w) {\ | 107 | #define __EVENT_CONSTRAINT(c, n, m, w) {\ |
106 | { .idxmsk64[0] = (n) }, \ | 108 | { .idxmsk64 = (n) }, \ |
107 | .code = (c), \ | 109 | .code = (c), \ |
108 | .cmask = (m), \ | 110 | .cmask = (m), \ |
109 | .weight = (w), \ | 111 | .weight = (w), \ |
@@ -116,7 +118,7 @@ struct cpu_hw_events { | |||
116 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK) | 118 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK) |
117 | 119 | ||
118 | #define FIXED_EVENT_CONSTRAINT(c, n) \ | 120 | #define FIXED_EVENT_CONSTRAINT(c, n) \ |
119 | EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK) | 121 | EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK) |
120 | 122 | ||
121 | #define EVENT_CONSTRAINT_END \ | 123 | #define EVENT_CONSTRAINT_END \ |
122 | EVENT_CONSTRAINT(0, 0, 0) | 124 | EVENT_CONSTRAINT(0, 0, 0) |
@@ -133,8 +135,8 @@ struct x86_pmu { | |||
133 | int (*handle_irq)(struct pt_regs *); | 135 | int (*handle_irq)(struct pt_regs *); |
134 | void (*disable_all)(void); | 136 | void (*disable_all)(void); |
135 | void (*enable_all)(void); | 137 | void (*enable_all)(void); |
136 | void (*enable)(struct hw_perf_event *, int); | 138 | void (*enable)(struct perf_event *); |
137 | void (*disable)(struct hw_perf_event *, int); | 139 | void (*disable)(struct perf_event *); |
138 | unsigned eventsel; | 140 | unsigned eventsel; |
139 | unsigned perfctr; | 141 | unsigned perfctr; |
140 | u64 (*event_map)(int); | 142 | u64 (*event_map)(int); |
@@ -157,6 +159,11 @@ struct x86_pmu { | |||
157 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, | 159 | void (*put_event_constraints)(struct cpu_hw_events *cpuc, |
158 | struct perf_event *event); | 160 | struct perf_event *event); |
159 | struct event_constraint *event_constraints; | 161 | struct event_constraint *event_constraints; |
162 | |||
163 | int (*cpu_prepare)(int cpu); | ||
164 | void (*cpu_starting)(int cpu); | ||
165 | void (*cpu_dying)(int cpu); | ||
166 | void (*cpu_dead)(int cpu); | ||
160 | }; | 167 | }; |
161 | 168 | ||
162 | static struct x86_pmu x86_pmu __read_mostly; | 169 | static struct x86_pmu x86_pmu __read_mostly; |
@@ -165,8 +172,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { | |||
165 | .enabled = 1, | 172 | .enabled = 1, |
166 | }; | 173 | }; |
167 | 174 | ||
168 | static int x86_perf_event_set_period(struct perf_event *event, | 175 | static int x86_perf_event_set_period(struct perf_event *event); |
169 | struct hw_perf_event *hwc, int idx); | ||
170 | 176 | ||
171 | /* | 177 | /* |
172 | * Generalized hw caching related hw_event table, filled | 178 | * Generalized hw caching related hw_event table, filled |
@@ -189,11 +195,12 @@ static u64 __read_mostly hw_cache_event_ids | |||
189 | * Returns the delta events processed. | 195 | * Returns the delta events processed. |
190 | */ | 196 | */ |
191 | static u64 | 197 | static u64 |
192 | x86_perf_event_update(struct perf_event *event, | 198 | x86_perf_event_update(struct perf_event *event) |
193 | struct hw_perf_event *hwc, int idx) | ||
194 | { | 199 | { |
200 | struct hw_perf_event *hwc = &event->hw; | ||
195 | int shift = 64 - x86_pmu.event_bits; | 201 | int shift = 64 - x86_pmu.event_bits; |
196 | u64 prev_raw_count, new_raw_count; | 202 | u64 prev_raw_count, new_raw_count; |
203 | int idx = hwc->idx; | ||
197 | s64 delta; | 204 | s64 delta; |
198 | 205 | ||
199 | if (idx == X86_PMC_IDX_FIXED_BTS) | 206 | if (idx == X86_PMC_IDX_FIXED_BTS) |
@@ -293,7 +300,7 @@ static inline bool bts_available(void) | |||
293 | return x86_pmu.enable_bts != NULL; | 300 | return x86_pmu.enable_bts != NULL; |
294 | } | 301 | } |
295 | 302 | ||
296 | static inline void init_debug_store_on_cpu(int cpu) | 303 | static void init_debug_store_on_cpu(int cpu) |
297 | { | 304 | { |
298 | struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; | 305 | struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; |
299 | 306 | ||
@@ -305,7 +312,7 @@ static inline void init_debug_store_on_cpu(int cpu) | |||
305 | (u32)((u64)(unsigned long)ds >> 32)); | 312 | (u32)((u64)(unsigned long)ds >> 32)); |
306 | } | 313 | } |
307 | 314 | ||
308 | static inline void fini_debug_store_on_cpu(int cpu) | 315 | static void fini_debug_store_on_cpu(int cpu) |
309 | { | 316 | { |
310 | if (!per_cpu(cpu_hw_events, cpu).ds) | 317 | if (!per_cpu(cpu_hw_events, cpu).ds) |
311 | return; | 318 | return; |
@@ -503,6 +510,9 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
503 | */ | 510 | */ |
504 | if (attr->type == PERF_TYPE_RAW) { | 511 | if (attr->type == PERF_TYPE_RAW) { |
505 | hwc->config |= x86_pmu.raw_event(attr->config); | 512 | hwc->config |= x86_pmu.raw_event(attr->config); |
513 | if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) && | ||
514 | perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | ||
515 | return -EACCES; | ||
506 | return 0; | 516 | return 0; |
507 | } | 517 | } |
508 | 518 | ||
@@ -553,9 +563,9 @@ static void x86_pmu_disable_all(void) | |||
553 | if (!test_bit(idx, cpuc->active_mask)) | 563 | if (!test_bit(idx, cpuc->active_mask)) |
554 | continue; | 564 | continue; |
555 | rdmsrl(x86_pmu.eventsel + idx, val); | 565 | rdmsrl(x86_pmu.eventsel + idx, val); |
556 | if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) | 566 | if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) |
557 | continue; | 567 | continue; |
558 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | 568 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; |
559 | wrmsrl(x86_pmu.eventsel + idx, val); | 569 | wrmsrl(x86_pmu.eventsel + idx, val); |
560 | } | 570 | } |
561 | } | 571 | } |
@@ -590,7 +600,7 @@ static void x86_pmu_enable_all(void) | |||
590 | continue; | 600 | continue; |
591 | 601 | ||
592 | val = event->hw.config; | 602 | val = event->hw.config; |
593 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 603 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
594 | wrmsrl(x86_pmu.eventsel + idx, val); | 604 | wrmsrl(x86_pmu.eventsel + idx, val); |
595 | } | 605 | } |
596 | } | 606 | } |
@@ -612,8 +622,8 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) | |||
612 | bitmap_zero(used_mask, X86_PMC_IDX_MAX); | 622 | bitmap_zero(used_mask, X86_PMC_IDX_MAX); |
613 | 623 | ||
614 | for (i = 0; i < n; i++) { | 624 | for (i = 0; i < n; i++) { |
615 | constraints[i] = | 625 | c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); |
616 | x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]); | 626 | constraints[i] = c; |
617 | } | 627 | } |
618 | 628 | ||
619 | /* | 629 | /* |
@@ -635,7 +645,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) | |||
635 | if (test_bit(hwc->idx, used_mask)) | 645 | if (test_bit(hwc->idx, used_mask)) |
636 | break; | 646 | break; |
637 | 647 | ||
638 | set_bit(hwc->idx, used_mask); | 648 | __set_bit(hwc->idx, used_mask); |
639 | if (assign) | 649 | if (assign) |
640 | assign[i] = hwc->idx; | 650 | assign[i] = hwc->idx; |
641 | } | 651 | } |
@@ -684,7 +694,7 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) | |||
684 | if (j == X86_PMC_IDX_MAX) | 694 | if (j == X86_PMC_IDX_MAX) |
685 | break; | 695 | break; |
686 | 696 | ||
687 | set_bit(j, used_mask); | 697 | __set_bit(j, used_mask); |
688 | 698 | ||
689 | if (assign) | 699 | if (assign) |
690 | assign[i] = j; | 700 | assign[i] = j; |
@@ -777,6 +787,7 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc, | |||
777 | hwc->last_tag == cpuc->tags[i]; | 787 | hwc->last_tag == cpuc->tags[i]; |
778 | } | 788 | } |
779 | 789 | ||
790 | static int x86_pmu_start(struct perf_event *event); | ||
780 | static void x86_pmu_stop(struct perf_event *event); | 791 | static void x86_pmu_stop(struct perf_event *event); |
781 | 792 | ||
782 | void hw_perf_enable(void) | 793 | void hw_perf_enable(void) |
@@ -793,6 +804,7 @@ void hw_perf_enable(void) | |||
793 | return; | 804 | return; |
794 | 805 | ||
795 | if (cpuc->n_added) { | 806 | if (cpuc->n_added) { |
807 | int n_running = cpuc->n_events - cpuc->n_added; | ||
796 | /* | 808 | /* |
797 | * apply assignment obtained either from | 809 | * apply assignment obtained either from |
798 | * hw_perf_group_sched_in() or x86_pmu_enable() | 810 | * hw_perf_group_sched_in() or x86_pmu_enable() |
@@ -800,8 +812,7 @@ void hw_perf_enable(void) | |||
800 | * step1: save events moving to new counters | 812 | * step1: save events moving to new counters |
801 | * step2: reprogram moved events into new counters | 813 | * step2: reprogram moved events into new counters |
802 | */ | 814 | */ |
803 | for (i = 0; i < cpuc->n_events; i++) { | 815 | for (i = 0; i < n_running; i++) { |
804 | |||
805 | event = cpuc->event_list[i]; | 816 | event = cpuc->event_list[i]; |
806 | hwc = &event->hw; | 817 | hwc = &event->hw; |
807 | 818 | ||
@@ -816,29 +827,18 @@ void hw_perf_enable(void) | |||
816 | continue; | 827 | continue; |
817 | 828 | ||
818 | x86_pmu_stop(event); | 829 | x86_pmu_stop(event); |
819 | |||
820 | hwc->idx = -1; | ||
821 | } | 830 | } |
822 | 831 | ||
823 | for (i = 0; i < cpuc->n_events; i++) { | 832 | for (i = 0; i < cpuc->n_events; i++) { |
824 | |||
825 | event = cpuc->event_list[i]; | 833 | event = cpuc->event_list[i]; |
826 | hwc = &event->hw; | 834 | hwc = &event->hw; |
827 | 835 | ||
828 | if (hwc->idx == -1) { | 836 | if (!match_prev_assignment(hwc, cpuc, i)) |
829 | x86_assign_hw_event(event, cpuc, i); | 837 | x86_assign_hw_event(event, cpuc, i); |
830 | x86_perf_event_set_period(event, hwc, hwc->idx); | 838 | else if (i < n_running) |
831 | } | 839 | continue; |
832 | /* | ||
833 | * need to mark as active because x86_pmu_disable() | ||
834 | * clear active_mask and events[] yet it preserves | ||
835 | * idx | ||
836 | */ | ||
837 | set_bit(hwc->idx, cpuc->active_mask); | ||
838 | cpuc->events[hwc->idx] = event; | ||
839 | 840 | ||
840 | x86_pmu.enable(hwc, hwc->idx); | 841 | x86_pmu_start(event); |
841 | perf_event_update_userpage(event); | ||
842 | } | 842 | } |
843 | cpuc->n_added = 0; | 843 | cpuc->n_added = 0; |
844 | perf_events_lapic_init(); | 844 | perf_events_lapic_init(); |
@@ -850,15 +850,16 @@ void hw_perf_enable(void) | |||
850 | x86_pmu.enable_all(); | 850 | x86_pmu.enable_all(); |
851 | } | 851 | } |
852 | 852 | ||
853 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 853 | static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc) |
854 | { | 854 | { |
855 | (void)checking_wrmsrl(hwc->config_base + idx, | 855 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, |
856 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); | 856 | hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE); |
857 | } | 857 | } |
858 | 858 | ||
859 | static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx) | 859 | static inline void x86_pmu_disable_event(struct perf_event *event) |
860 | { | 860 | { |
861 | (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); | 861 | struct hw_perf_event *hwc = &event->hw; |
862 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config); | ||
862 | } | 863 | } |
863 | 864 | ||
864 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); | 865 | static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); |
@@ -868,12 +869,12 @@ static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left); | |||
868 | * To be called with the event disabled in hw: | 869 | * To be called with the event disabled in hw: |
869 | */ | 870 | */ |
870 | static int | 871 | static int |
871 | x86_perf_event_set_period(struct perf_event *event, | 872 | x86_perf_event_set_period(struct perf_event *event) |
872 | struct hw_perf_event *hwc, int idx) | ||
873 | { | 873 | { |
874 | struct hw_perf_event *hwc = &event->hw; | ||
874 | s64 left = atomic64_read(&hwc->period_left); | 875 | s64 left = atomic64_read(&hwc->period_left); |
875 | s64 period = hwc->sample_period; | 876 | s64 period = hwc->sample_period; |
876 | int err, ret = 0; | 877 | int err, ret = 0, idx = hwc->idx; |
877 | 878 | ||
878 | if (idx == X86_PMC_IDX_FIXED_BTS) | 879 | if (idx == X86_PMC_IDX_FIXED_BTS) |
879 | return 0; | 880 | return 0; |
@@ -919,11 +920,11 @@ x86_perf_event_set_period(struct perf_event *event, | |||
919 | return ret; | 920 | return ret; |
920 | } | 921 | } |
921 | 922 | ||
922 | static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 923 | static void x86_pmu_enable_event(struct perf_event *event) |
923 | { | 924 | { |
924 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 925 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
925 | if (cpuc->enabled) | 926 | if (cpuc->enabled) |
926 | __x86_pmu_enable_event(hwc, idx); | 927 | __x86_pmu_enable_event(&event->hw); |
927 | } | 928 | } |
928 | 929 | ||
929 | /* | 930 | /* |
@@ -959,34 +960,32 @@ static int x86_pmu_enable(struct perf_event *event) | |||
959 | memcpy(cpuc->assign, assign, n*sizeof(int)); | 960 | memcpy(cpuc->assign, assign, n*sizeof(int)); |
960 | 961 | ||
961 | cpuc->n_events = n; | 962 | cpuc->n_events = n; |
962 | cpuc->n_added = n - n0; | 963 | cpuc->n_added += n - n0; |
963 | 964 | ||
964 | return 0; | 965 | return 0; |
965 | } | 966 | } |
966 | 967 | ||
967 | static int x86_pmu_start(struct perf_event *event) | 968 | static int x86_pmu_start(struct perf_event *event) |
968 | { | 969 | { |
969 | struct hw_perf_event *hwc = &event->hw; | 970 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
971 | int idx = event->hw.idx; | ||
970 | 972 | ||
971 | if (hwc->idx == -1) | 973 | if (idx == -1) |
972 | return -EAGAIN; | 974 | return -EAGAIN; |
973 | 975 | ||
974 | x86_perf_event_set_period(event, hwc, hwc->idx); | 976 | x86_perf_event_set_period(event); |
975 | x86_pmu.enable(hwc, hwc->idx); | 977 | cpuc->events[idx] = event; |
978 | __set_bit(idx, cpuc->active_mask); | ||
979 | x86_pmu.enable(event); | ||
980 | perf_event_update_userpage(event); | ||
976 | 981 | ||
977 | return 0; | 982 | return 0; |
978 | } | 983 | } |
979 | 984 | ||
980 | static void x86_pmu_unthrottle(struct perf_event *event) | 985 | static void x86_pmu_unthrottle(struct perf_event *event) |
981 | { | 986 | { |
982 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 987 | int ret = x86_pmu_start(event); |
983 | struct hw_perf_event *hwc = &event->hw; | 988 | WARN_ON_ONCE(ret); |
984 | |||
985 | if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX || | ||
986 | cpuc->events[hwc->idx] != event)) | ||
987 | return; | ||
988 | |||
989 | x86_pmu.enable(hwc, hwc->idx); | ||
990 | } | 989 | } |
991 | 990 | ||
992 | void perf_event_print_debug(void) | 991 | void perf_event_print_debug(void) |
@@ -1046,18 +1045,16 @@ static void x86_pmu_stop(struct perf_event *event) | |||
1046 | struct hw_perf_event *hwc = &event->hw; | 1045 | struct hw_perf_event *hwc = &event->hw; |
1047 | int idx = hwc->idx; | 1046 | int idx = hwc->idx; |
1048 | 1047 | ||
1049 | /* | 1048 | if (!__test_and_clear_bit(idx, cpuc->active_mask)) |
1050 | * Must be done before we disable, otherwise the nmi handler | 1049 | return; |
1051 | * could reenable again: | 1050 | |
1052 | */ | 1051 | x86_pmu.disable(event); |
1053 | clear_bit(idx, cpuc->active_mask); | ||
1054 | x86_pmu.disable(hwc, idx); | ||
1055 | 1052 | ||
1056 | /* | 1053 | /* |
1057 | * Drain the remaining delta count out of a event | 1054 | * Drain the remaining delta count out of a event |
1058 | * that we are disabling: | 1055 | * that we are disabling: |
1059 | */ | 1056 | */ |
1060 | x86_perf_event_update(event, hwc, idx); | 1057 | x86_perf_event_update(event); |
1061 | 1058 | ||
1062 | cpuc->events[idx] = NULL; | 1059 | cpuc->events[idx] = NULL; |
1063 | } | 1060 | } |
@@ -1094,8 +1091,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1094 | int idx, handled = 0; | 1091 | int idx, handled = 0; |
1095 | u64 val; | 1092 | u64 val; |
1096 | 1093 | ||
1097 | data.addr = 0; | 1094 | perf_sample_data_init(&data, 0); |
1098 | data.raw = NULL; | ||
1099 | 1095 | ||
1100 | cpuc = &__get_cpu_var(cpu_hw_events); | 1096 | cpuc = &__get_cpu_var(cpu_hw_events); |
1101 | 1097 | ||
@@ -1106,7 +1102,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1106 | event = cpuc->events[idx]; | 1102 | event = cpuc->events[idx]; |
1107 | hwc = &event->hw; | 1103 | hwc = &event->hw; |
1108 | 1104 | ||
1109 | val = x86_perf_event_update(event, hwc, idx); | 1105 | val = x86_perf_event_update(event); |
1110 | if (val & (1ULL << (x86_pmu.event_bits - 1))) | 1106 | if (val & (1ULL << (x86_pmu.event_bits - 1))) |
1111 | continue; | 1107 | continue; |
1112 | 1108 | ||
@@ -1116,11 +1112,11 @@ static int x86_pmu_handle_irq(struct pt_regs *regs) | |||
1116 | handled = 1; | 1112 | handled = 1; |
1117 | data.period = event->hw.last_period; | 1113 | data.period = event->hw.last_period; |
1118 | 1114 | ||
1119 | if (!x86_perf_event_set_period(event, hwc, idx)) | 1115 | if (!x86_perf_event_set_period(event)) |
1120 | continue; | 1116 | continue; |
1121 | 1117 | ||
1122 | if (perf_event_overflow(event, 1, &data, regs)) | 1118 | if (perf_event_overflow(event, 1, &data, regs)) |
1123 | x86_pmu.disable(hwc, idx); | 1119 | x86_pmu_stop(event); |
1124 | } | 1120 | } |
1125 | 1121 | ||
1126 | if (handled) | 1122 | if (handled) |
@@ -1307,7 +1303,7 @@ int hw_perf_group_sched_in(struct perf_event *leader, | |||
1307 | memcpy(cpuc->assign, assign, n0*sizeof(int)); | 1303 | memcpy(cpuc->assign, assign, n0*sizeof(int)); |
1308 | 1304 | ||
1309 | cpuc->n_events = n0; | 1305 | cpuc->n_events = n0; |
1310 | cpuc->n_added = n1; | 1306 | cpuc->n_added += n1; |
1311 | ctx->nr_active += n1; | 1307 | ctx->nr_active += n1; |
1312 | 1308 | ||
1313 | /* | 1309 | /* |
@@ -1335,6 +1331,41 @@ undo: | |||
1335 | #include "perf_event_p6.c" | 1331 | #include "perf_event_p6.c" |
1336 | #include "perf_event_intel.c" | 1332 | #include "perf_event_intel.c" |
1337 | 1333 | ||
1334 | static int __cpuinit | ||
1335 | x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | ||
1336 | { | ||
1337 | unsigned int cpu = (long)hcpu; | ||
1338 | int ret = NOTIFY_OK; | ||
1339 | |||
1340 | switch (action & ~CPU_TASKS_FROZEN) { | ||
1341 | case CPU_UP_PREPARE: | ||
1342 | if (x86_pmu.cpu_prepare) | ||
1343 | ret = x86_pmu.cpu_prepare(cpu); | ||
1344 | break; | ||
1345 | |||
1346 | case CPU_STARTING: | ||
1347 | if (x86_pmu.cpu_starting) | ||
1348 | x86_pmu.cpu_starting(cpu); | ||
1349 | break; | ||
1350 | |||
1351 | case CPU_DYING: | ||
1352 | if (x86_pmu.cpu_dying) | ||
1353 | x86_pmu.cpu_dying(cpu); | ||
1354 | break; | ||
1355 | |||
1356 | case CPU_UP_CANCELED: | ||
1357 | case CPU_DEAD: | ||
1358 | if (x86_pmu.cpu_dead) | ||
1359 | x86_pmu.cpu_dead(cpu); | ||
1360 | break; | ||
1361 | |||
1362 | default: | ||
1363 | break; | ||
1364 | } | ||
1365 | |||
1366 | return ret; | ||
1367 | } | ||
1368 | |||
1338 | static void __init pmu_check_apic(void) | 1369 | static void __init pmu_check_apic(void) |
1339 | { | 1370 | { |
1340 | if (cpu_has_apic) | 1371 | if (cpu_has_apic) |
@@ -1347,6 +1378,7 @@ static void __init pmu_check_apic(void) | |||
1347 | 1378 | ||
1348 | void __init init_hw_perf_events(void) | 1379 | void __init init_hw_perf_events(void) |
1349 | { | 1380 | { |
1381 | struct event_constraint *c; | ||
1350 | int err; | 1382 | int err; |
1351 | 1383 | ||
1352 | pr_info("Performance Events: "); | 1384 | pr_info("Performance Events: "); |
@@ -1395,6 +1427,16 @@ void __init init_hw_perf_events(void) | |||
1395 | __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, | 1427 | __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, |
1396 | 0, x86_pmu.num_events); | 1428 | 0, x86_pmu.num_events); |
1397 | 1429 | ||
1430 | if (x86_pmu.event_constraints) { | ||
1431 | for_each_event_constraint(c, x86_pmu.event_constraints) { | ||
1432 | if (c->cmask != INTEL_ARCH_FIXED_MASK) | ||
1433 | continue; | ||
1434 | |||
1435 | c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1; | ||
1436 | c->weight += x86_pmu.num_events; | ||
1437 | } | ||
1438 | } | ||
1439 | |||
1398 | pr_info("... version: %d\n", x86_pmu.version); | 1440 | pr_info("... version: %d\n", x86_pmu.version); |
1399 | pr_info("... bit width: %d\n", x86_pmu.event_bits); | 1441 | pr_info("... bit width: %d\n", x86_pmu.event_bits); |
1400 | pr_info("... generic registers: %d\n", x86_pmu.num_events); | 1442 | pr_info("... generic registers: %d\n", x86_pmu.num_events); |
@@ -1402,11 +1444,13 @@ void __init init_hw_perf_events(void) | |||
1402 | pr_info("... max period: %016Lx\n", x86_pmu.max_period); | 1444 | pr_info("... max period: %016Lx\n", x86_pmu.max_period); |
1403 | pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); | 1445 | pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed); |
1404 | pr_info("... event mask: %016Lx\n", perf_event_mask); | 1446 | pr_info("... event mask: %016Lx\n", perf_event_mask); |
1447 | |||
1448 | perf_cpu_notifier(x86_pmu_notifier); | ||
1405 | } | 1449 | } |
1406 | 1450 | ||
1407 | static inline void x86_pmu_read(struct perf_event *event) | 1451 | static inline void x86_pmu_read(struct perf_event *event) |
1408 | { | 1452 | { |
1409 | x86_perf_event_update(event, &event->hw, event->hw.idx); | 1453 | x86_perf_event_update(event); |
1410 | } | 1454 | } |
1411 | 1455 | ||
1412 | static const struct pmu pmu = { | 1456 | static const struct pmu pmu = { |
@@ -1588,14 +1632,42 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) | |||
1588 | return len; | 1632 | return len; |
1589 | } | 1633 | } |
1590 | 1634 | ||
1591 | static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) | 1635 | #ifdef CONFIG_COMPAT |
1636 | static inline int | ||
1637 | perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | ||
1592 | { | 1638 | { |
1593 | unsigned long bytes; | 1639 | /* 32-bit process in 64-bit kernel. */ |
1640 | struct stack_frame_ia32 frame; | ||
1641 | const void __user *fp; | ||
1594 | 1642 | ||
1595 | bytes = copy_from_user_nmi(frame, fp, sizeof(*frame)); | 1643 | if (!test_thread_flag(TIF_IA32)) |
1644 | return 0; | ||
1596 | 1645 | ||
1597 | return bytes == sizeof(*frame); | 1646 | fp = compat_ptr(regs->bp); |
1647 | while (entry->nr < PERF_MAX_STACK_DEPTH) { | ||
1648 | unsigned long bytes; | ||
1649 | frame.next_frame = 0; | ||
1650 | frame.return_address = 0; | ||
1651 | |||
1652 | bytes = copy_from_user_nmi(&frame, fp, sizeof(frame)); | ||
1653 | if (bytes != sizeof(frame)) | ||
1654 | break; | ||
1655 | |||
1656 | if (fp < compat_ptr(regs->sp)) | ||
1657 | break; | ||
1658 | |||
1659 | callchain_store(entry, frame.return_address); | ||
1660 | fp = compat_ptr(frame.next_frame); | ||
1661 | } | ||
1662 | return 1; | ||
1663 | } | ||
1664 | #else | ||
1665 | static inline int | ||
1666 | perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | ||
1667 | { | ||
1668 | return 0; | ||
1598 | } | 1669 | } |
1670 | #endif | ||
1599 | 1671 | ||
1600 | static void | 1672 | static void |
1601 | perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) | 1673 | perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) |
@@ -1611,11 +1683,16 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
1611 | callchain_store(entry, PERF_CONTEXT_USER); | 1683 | callchain_store(entry, PERF_CONTEXT_USER); |
1612 | callchain_store(entry, regs->ip); | 1684 | callchain_store(entry, regs->ip); |
1613 | 1685 | ||
1686 | if (perf_callchain_user32(regs, entry)) | ||
1687 | return; | ||
1688 | |||
1614 | while (entry->nr < PERF_MAX_STACK_DEPTH) { | 1689 | while (entry->nr < PERF_MAX_STACK_DEPTH) { |
1690 | unsigned long bytes; | ||
1615 | frame.next_frame = NULL; | 1691 | frame.next_frame = NULL; |
1616 | frame.return_address = 0; | 1692 | frame.return_address = 0; |
1617 | 1693 | ||
1618 | if (!copy_stack_frame(fp, &frame)) | 1694 | bytes = copy_from_user_nmi(&frame, fp, sizeof(frame)); |
1695 | if (bytes != sizeof(frame)) | ||
1619 | break; | 1696 | break; |
1620 | 1697 | ||
1621 | if ((unsigned long)fp < regs->sp) | 1698 | if ((unsigned long)fp < regs->sp) |
@@ -1662,28 +1739,14 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
1662 | return entry; | 1739 | return entry; |
1663 | } | 1740 | } |
1664 | 1741 | ||
1665 | void hw_perf_event_setup_online(int cpu) | 1742 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) |
1666 | { | ||
1667 | init_debug_store_on_cpu(cpu); | ||
1668 | |||
1669 | switch (boot_cpu_data.x86_vendor) { | ||
1670 | case X86_VENDOR_AMD: | ||
1671 | amd_pmu_cpu_online(cpu); | ||
1672 | break; | ||
1673 | default: | ||
1674 | return; | ||
1675 | } | ||
1676 | } | ||
1677 | |||
1678 | void hw_perf_event_setup_offline(int cpu) | ||
1679 | { | 1743 | { |
1680 | init_debug_store_on_cpu(cpu); | 1744 | regs->ip = ip; |
1681 | 1745 | /* | |
1682 | switch (boot_cpu_data.x86_vendor) { | 1746 | * perf_arch_fetch_caller_regs adds another call, we need to increment |
1683 | case X86_VENDOR_AMD: | 1747 | * the skip level |
1684 | amd_pmu_cpu_offline(cpu); | 1748 | */ |
1685 | break; | 1749 | regs->bp = rewind_frame_pointer(skip + 1); |
1686 | default: | 1750 | regs->cs = __KERNEL_CS; |
1687 | return; | 1751 | local_save_flags(regs->flags); |
1688 | } | ||
1689 | } | 1752 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 8f3dbfda3c4f..db6f7d4056e1 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -137,6 +137,13 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc) | |||
137 | return (hwc->config & 0xe0) == 0xe0; | 137 | return (hwc->config & 0xe0) == 0xe0; |
138 | } | 138 | } |
139 | 139 | ||
140 | static inline int amd_has_nb(struct cpu_hw_events *cpuc) | ||
141 | { | ||
142 | struct amd_nb *nb = cpuc->amd_nb; | ||
143 | |||
144 | return nb && nb->nb_id != -1; | ||
145 | } | ||
146 | |||
140 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | 147 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, |
141 | struct perf_event *event) | 148 | struct perf_event *event) |
142 | { | 149 | { |
@@ -147,7 +154,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | |||
147 | /* | 154 | /* |
148 | * only care about NB events | 155 | * only care about NB events |
149 | */ | 156 | */ |
150 | if (!(nb && amd_is_nb_event(hwc))) | 157 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) |
151 | return; | 158 | return; |
152 | 159 | ||
153 | /* | 160 | /* |
@@ -214,7 +221,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |||
214 | /* | 221 | /* |
215 | * if not NB event or no NB, then no constraints | 222 | * if not NB event or no NB, then no constraints |
216 | */ | 223 | */ |
217 | if (!(nb && amd_is_nb_event(hwc))) | 224 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) |
218 | return &unconstrained; | 225 | return &unconstrained; |
219 | 226 | ||
220 | /* | 227 | /* |
@@ -271,28 +278,6 @@ done: | |||
271 | return &emptyconstraint; | 278 | return &emptyconstraint; |
272 | } | 279 | } |
273 | 280 | ||
274 | static __initconst struct x86_pmu amd_pmu = { | ||
275 | .name = "AMD", | ||
276 | .handle_irq = x86_pmu_handle_irq, | ||
277 | .disable_all = x86_pmu_disable_all, | ||
278 | .enable_all = x86_pmu_enable_all, | ||
279 | .enable = x86_pmu_enable_event, | ||
280 | .disable = x86_pmu_disable_event, | ||
281 | .eventsel = MSR_K7_EVNTSEL0, | ||
282 | .perfctr = MSR_K7_PERFCTR0, | ||
283 | .event_map = amd_pmu_event_map, | ||
284 | .raw_event = amd_pmu_raw_event, | ||
285 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | ||
286 | .num_events = 4, | ||
287 | .event_bits = 48, | ||
288 | .event_mask = (1ULL << 48) - 1, | ||
289 | .apic = 1, | ||
290 | /* use highest bit to detect overflow */ | ||
291 | .max_period = (1ULL << 47) - 1, | ||
292 | .get_event_constraints = amd_get_event_constraints, | ||
293 | .put_event_constraints = amd_put_event_constraints | ||
294 | }; | ||
295 | |||
296 | static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) | 281 | static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) |
297 | { | 282 | { |
298 | struct amd_nb *nb; | 283 | struct amd_nb *nb; |
@@ -309,57 +294,61 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) | |||
309 | * initialize all possible NB constraints | 294 | * initialize all possible NB constraints |
310 | */ | 295 | */ |
311 | for (i = 0; i < x86_pmu.num_events; i++) { | 296 | for (i = 0; i < x86_pmu.num_events; i++) { |
312 | set_bit(i, nb->event_constraints[i].idxmsk); | 297 | __set_bit(i, nb->event_constraints[i].idxmsk); |
313 | nb->event_constraints[i].weight = 1; | 298 | nb->event_constraints[i].weight = 1; |
314 | } | 299 | } |
315 | return nb; | 300 | return nb; |
316 | } | 301 | } |
317 | 302 | ||
318 | static void amd_pmu_cpu_online(int cpu) | 303 | static int amd_pmu_cpu_prepare(int cpu) |
319 | { | 304 | { |
320 | struct cpu_hw_events *cpu1, *cpu2; | 305 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
321 | struct amd_nb *nb = NULL; | 306 | |
307 | WARN_ON_ONCE(cpuc->amd_nb); | ||
308 | |||
309 | if (boot_cpu_data.x86_max_cores < 2) | ||
310 | return NOTIFY_OK; | ||
311 | |||
312 | cpuc->amd_nb = amd_alloc_nb(cpu, -1); | ||
313 | if (!cpuc->amd_nb) | ||
314 | return NOTIFY_BAD; | ||
315 | |||
316 | return NOTIFY_OK; | ||
317 | } | ||
318 | |||
319 | static void amd_pmu_cpu_starting(int cpu) | ||
320 | { | ||
321 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | ||
322 | struct amd_nb *nb; | ||
322 | int i, nb_id; | 323 | int i, nb_id; |
323 | 324 | ||
324 | if (boot_cpu_data.x86_max_cores < 2) | 325 | if (boot_cpu_data.x86_max_cores < 2) |
325 | return; | 326 | return; |
326 | 327 | ||
327 | /* | ||
328 | * function may be called too early in the | ||
329 | * boot process, in which case nb_id is bogus | ||
330 | */ | ||
331 | nb_id = amd_get_nb_id(cpu); | 328 | nb_id = amd_get_nb_id(cpu); |
332 | if (nb_id == BAD_APICID) | 329 | WARN_ON_ONCE(nb_id == BAD_APICID); |
333 | return; | ||
334 | |||
335 | cpu1 = &per_cpu(cpu_hw_events, cpu); | ||
336 | cpu1->amd_nb = NULL; | ||
337 | 330 | ||
338 | raw_spin_lock(&amd_nb_lock); | 331 | raw_spin_lock(&amd_nb_lock); |
339 | 332 | ||
340 | for_each_online_cpu(i) { | 333 | for_each_online_cpu(i) { |
341 | cpu2 = &per_cpu(cpu_hw_events, i); | 334 | nb = per_cpu(cpu_hw_events, i).amd_nb; |
342 | nb = cpu2->amd_nb; | 335 | if (WARN_ON_ONCE(!nb)) |
343 | if (!nb) | ||
344 | continue; | 336 | continue; |
345 | if (nb->nb_id == nb_id) | ||
346 | goto found; | ||
347 | } | ||
348 | 337 | ||
349 | nb = amd_alloc_nb(cpu, nb_id); | 338 | if (nb->nb_id == nb_id) { |
350 | if (!nb) { | 339 | kfree(cpuc->amd_nb); |
351 | pr_err("perf_events: failed NB allocation for CPU%d\n", cpu); | 340 | cpuc->amd_nb = nb; |
352 | raw_spin_unlock(&amd_nb_lock); | 341 | break; |
353 | return; | 342 | } |
354 | } | 343 | } |
355 | found: | 344 | |
356 | nb->refcnt++; | 345 | cpuc->amd_nb->nb_id = nb_id; |
357 | cpu1->amd_nb = nb; | 346 | cpuc->amd_nb->refcnt++; |
358 | 347 | ||
359 | raw_spin_unlock(&amd_nb_lock); | 348 | raw_spin_unlock(&amd_nb_lock); |
360 | } | 349 | } |
361 | 350 | ||
362 | static void amd_pmu_cpu_offline(int cpu) | 351 | static void amd_pmu_cpu_dead(int cpu) |
363 | { | 352 | { |
364 | struct cpu_hw_events *cpuhw; | 353 | struct cpu_hw_events *cpuhw; |
365 | 354 | ||
@@ -370,14 +359,44 @@ static void amd_pmu_cpu_offline(int cpu) | |||
370 | 359 | ||
371 | raw_spin_lock(&amd_nb_lock); | 360 | raw_spin_lock(&amd_nb_lock); |
372 | 361 | ||
373 | if (--cpuhw->amd_nb->refcnt == 0) | 362 | if (cpuhw->amd_nb) { |
374 | kfree(cpuhw->amd_nb); | 363 | struct amd_nb *nb = cpuhw->amd_nb; |
364 | |||
365 | if (nb->nb_id == -1 || --nb->refcnt == 0) | ||
366 | kfree(nb); | ||
375 | 367 | ||
376 | cpuhw->amd_nb = NULL; | 368 | cpuhw->amd_nb = NULL; |
369 | } | ||
377 | 370 | ||
378 | raw_spin_unlock(&amd_nb_lock); | 371 | raw_spin_unlock(&amd_nb_lock); |
379 | } | 372 | } |
380 | 373 | ||
374 | static __initconst struct x86_pmu amd_pmu = { | ||
375 | .name = "AMD", | ||
376 | .handle_irq = x86_pmu_handle_irq, | ||
377 | .disable_all = x86_pmu_disable_all, | ||
378 | .enable_all = x86_pmu_enable_all, | ||
379 | .enable = x86_pmu_enable_event, | ||
380 | .disable = x86_pmu_disable_event, | ||
381 | .eventsel = MSR_K7_EVNTSEL0, | ||
382 | .perfctr = MSR_K7_PERFCTR0, | ||
383 | .event_map = amd_pmu_event_map, | ||
384 | .raw_event = amd_pmu_raw_event, | ||
385 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | ||
386 | .num_events = 4, | ||
387 | .event_bits = 48, | ||
388 | .event_mask = (1ULL << 48) - 1, | ||
389 | .apic = 1, | ||
390 | /* use highest bit to detect overflow */ | ||
391 | .max_period = (1ULL << 47) - 1, | ||
392 | .get_event_constraints = amd_get_event_constraints, | ||
393 | .put_event_constraints = amd_put_event_constraints, | ||
394 | |||
395 | .cpu_prepare = amd_pmu_cpu_prepare, | ||
396 | .cpu_starting = amd_pmu_cpu_starting, | ||
397 | .cpu_dead = amd_pmu_cpu_dead, | ||
398 | }; | ||
399 | |||
381 | static __init int amd_pmu_init(void) | 400 | static __init int amd_pmu_init(void) |
382 | { | 401 | { |
383 | /* Performance-monitoring supported from K7 and later: */ | 402 | /* Performance-monitoring supported from K7 and later: */ |
@@ -390,11 +409,6 @@ static __init int amd_pmu_init(void) | |||
390 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, | 409 | memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, |
391 | sizeof(hw_cache_event_ids)); | 410 | sizeof(hw_cache_event_ids)); |
392 | 411 | ||
393 | /* | ||
394 | * explicitly initialize the boot cpu, other cpus will get | ||
395 | * the cpu hotplug callbacks from smp_init() | ||
396 | */ | ||
397 | amd_pmu_cpu_online(smp_processor_id()); | ||
398 | return 0; | 412 | return 0; |
399 | } | 413 | } |
400 | 414 | ||
@@ -405,12 +419,4 @@ static int amd_pmu_init(void) | |||
405 | return 0; | 419 | return 0; |
406 | } | 420 | } |
407 | 421 | ||
408 | static void amd_pmu_cpu_online(int cpu) | ||
409 | { | ||
410 | } | ||
411 | |||
412 | static void amd_pmu_cpu_offline(int cpu) | ||
413 | { | ||
414 | } | ||
415 | |||
416 | #endif | 422 | #endif |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 977e7544738c..9c794ac87837 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifdef CONFIG_CPU_SUP_INTEL | 1 | #ifdef CONFIG_CPU_SUP_INTEL |
2 | 2 | ||
3 | /* | 3 | /* |
4 | * Intel PerfMon v3. Used on Core2 and later. | 4 | * Intel PerfMon, used on Core and later. |
5 | */ | 5 | */ |
6 | static const u64 intel_perfmon_event_map[] = | 6 | static const u64 intel_perfmon_event_map[] = |
7 | { | 7 | { |
@@ -27,8 +27,14 @@ static struct event_constraint intel_core_event_constraints[] = | |||
27 | 27 | ||
28 | static struct event_constraint intel_core2_event_constraints[] = | 28 | static struct event_constraint intel_core2_event_constraints[] = |
29 | { | 29 | { |
30 | FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ | 30 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
31 | FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ | 31 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
32 | /* | ||
33 | * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event | ||
34 | * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed | ||
35 | * ratio between these counters. | ||
36 | */ | ||
37 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | ||
32 | INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ | 38 | INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */ |
33 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ | 39 | INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */ |
34 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ | 40 | INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */ |
@@ -37,14 +43,16 @@ static struct event_constraint intel_core2_event_constraints[] = | |||
37 | INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ | 43 | INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */ |
38 | INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ | 44 | INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */ |
39 | INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ | 45 | INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */ |
46 | INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */ | ||
40 | INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ | 47 | INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */ |
41 | EVENT_CONSTRAINT_END | 48 | EVENT_CONSTRAINT_END |
42 | }; | 49 | }; |
43 | 50 | ||
44 | static struct event_constraint intel_nehalem_event_constraints[] = | 51 | static struct event_constraint intel_nehalem_event_constraints[] = |
45 | { | 52 | { |
46 | FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ | 53 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
47 | FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ | 54 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
55 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | ||
48 | INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ | 56 | INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */ |
49 | INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ | 57 | INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */ |
50 | INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ | 58 | INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */ |
@@ -58,8 +66,9 @@ static struct event_constraint intel_nehalem_event_constraints[] = | |||
58 | 66 | ||
59 | static struct event_constraint intel_westmere_event_constraints[] = | 67 | static struct event_constraint intel_westmere_event_constraints[] = |
60 | { | 68 | { |
61 | FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ | 69 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
62 | FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ | 70 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
71 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | ||
63 | INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ | 72 | INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */ |
64 | INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ | 73 | INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */ |
65 | INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ | 74 | INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */ |
@@ -68,8 +77,9 @@ static struct event_constraint intel_westmere_event_constraints[] = | |||
68 | 77 | ||
69 | static struct event_constraint intel_gen_event_constraints[] = | 78 | static struct event_constraint intel_gen_event_constraints[] = |
70 | { | 79 | { |
71 | FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */ | 80 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
72 | FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */ | 81 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
82 | /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */ | ||
73 | EVENT_CONSTRAINT_END | 83 | EVENT_CONSTRAINT_END |
74 | }; | 84 | }; |
75 | 85 | ||
@@ -538,9 +548,9 @@ static inline void intel_pmu_ack_status(u64 ack) | |||
538 | } | 548 | } |
539 | 549 | ||
540 | static inline void | 550 | static inline void |
541 | intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx) | 551 | intel_pmu_disable_fixed(struct hw_perf_event *hwc) |
542 | { | 552 | { |
543 | int idx = __idx - X86_PMC_IDX_FIXED; | 553 | int idx = hwc->idx - X86_PMC_IDX_FIXED; |
544 | u64 ctrl_val, mask; | 554 | u64 ctrl_val, mask; |
545 | 555 | ||
546 | mask = 0xfULL << (idx * 4); | 556 | mask = 0xfULL << (idx * 4); |
@@ -580,10 +590,9 @@ static void intel_pmu_drain_bts_buffer(void) | |||
580 | 590 | ||
581 | ds->bts_index = ds->bts_buffer_base; | 591 | ds->bts_index = ds->bts_buffer_base; |
582 | 592 | ||
593 | perf_sample_data_init(&data, 0); | ||
583 | 594 | ||
584 | data.period = event->hw.last_period; | 595 | data.period = event->hw.last_period; |
585 | data.addr = 0; | ||
586 | data.raw = NULL; | ||
587 | regs.ip = 0; | 596 | regs.ip = 0; |
588 | 597 | ||
589 | /* | 598 | /* |
@@ -612,26 +621,28 @@ static void intel_pmu_drain_bts_buffer(void) | |||
612 | } | 621 | } |
613 | 622 | ||
614 | static inline void | 623 | static inline void |
615 | intel_pmu_disable_event(struct hw_perf_event *hwc, int idx) | 624 | intel_pmu_disable_event(struct perf_event *event) |
616 | { | 625 | { |
617 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | 626 | struct hw_perf_event *hwc = &event->hw; |
627 | |||
628 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | ||
618 | intel_pmu_disable_bts(); | 629 | intel_pmu_disable_bts(); |
619 | intel_pmu_drain_bts_buffer(); | 630 | intel_pmu_drain_bts_buffer(); |
620 | return; | 631 | return; |
621 | } | 632 | } |
622 | 633 | ||
623 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 634 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
624 | intel_pmu_disable_fixed(hwc, idx); | 635 | intel_pmu_disable_fixed(hwc); |
625 | return; | 636 | return; |
626 | } | 637 | } |
627 | 638 | ||
628 | x86_pmu_disable_event(hwc, idx); | 639 | x86_pmu_disable_event(event); |
629 | } | 640 | } |
630 | 641 | ||
631 | static inline void | 642 | static inline void |
632 | intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) | 643 | intel_pmu_enable_fixed(struct hw_perf_event *hwc) |
633 | { | 644 | { |
634 | int idx = __idx - X86_PMC_IDX_FIXED; | 645 | int idx = hwc->idx - X86_PMC_IDX_FIXED; |
635 | u64 ctrl_val, bits, mask; | 646 | u64 ctrl_val, bits, mask; |
636 | int err; | 647 | int err; |
637 | 648 | ||
@@ -661,9 +672,11 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) | |||
661 | err = checking_wrmsrl(hwc->config_base, ctrl_val); | 672 | err = checking_wrmsrl(hwc->config_base, ctrl_val); |
662 | } | 673 | } |
663 | 674 | ||
664 | static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 675 | static void intel_pmu_enable_event(struct perf_event *event) |
665 | { | 676 | { |
666 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | 677 | struct hw_perf_event *hwc = &event->hw; |
678 | |||
679 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | ||
667 | if (!__get_cpu_var(cpu_hw_events).enabled) | 680 | if (!__get_cpu_var(cpu_hw_events).enabled) |
668 | return; | 681 | return; |
669 | 682 | ||
@@ -672,11 +685,11 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
672 | } | 685 | } |
673 | 686 | ||
674 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 687 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
675 | intel_pmu_enable_fixed(hwc, idx); | 688 | intel_pmu_enable_fixed(hwc); |
676 | return; | 689 | return; |
677 | } | 690 | } |
678 | 691 | ||
679 | __x86_pmu_enable_event(hwc, idx); | 692 | __x86_pmu_enable_event(hwc); |
680 | } | 693 | } |
681 | 694 | ||
682 | /* | 695 | /* |
@@ -685,14 +698,8 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
685 | */ | 698 | */ |
686 | static int intel_pmu_save_and_restart(struct perf_event *event) | 699 | static int intel_pmu_save_and_restart(struct perf_event *event) |
687 | { | 700 | { |
688 | struct hw_perf_event *hwc = &event->hw; | 701 | x86_perf_event_update(event); |
689 | int idx = hwc->idx; | 702 | return x86_perf_event_set_period(event); |
690 | int ret; | ||
691 | |||
692 | x86_perf_event_update(event, hwc, idx); | ||
693 | ret = x86_perf_event_set_period(event, hwc, idx); | ||
694 | |||
695 | return ret; | ||
696 | } | 703 | } |
697 | 704 | ||
698 | static void intel_pmu_reset(void) | 705 | static void intel_pmu_reset(void) |
@@ -732,16 +739,15 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
732 | int bit, loops; | 739 | int bit, loops; |
733 | u64 ack, status; | 740 | u64 ack, status; |
734 | 741 | ||
735 | data.addr = 0; | 742 | perf_sample_data_init(&data, 0); |
736 | data.raw = NULL; | ||
737 | 743 | ||
738 | cpuc = &__get_cpu_var(cpu_hw_events); | 744 | cpuc = &__get_cpu_var(cpu_hw_events); |
739 | 745 | ||
740 | perf_disable(); | 746 | intel_pmu_disable_all(); |
741 | intel_pmu_drain_bts_buffer(); | 747 | intel_pmu_drain_bts_buffer(); |
742 | status = intel_pmu_get_status(); | 748 | status = intel_pmu_get_status(); |
743 | if (!status) { | 749 | if (!status) { |
744 | perf_enable(); | 750 | intel_pmu_enable_all(); |
745 | return 0; | 751 | return 0; |
746 | } | 752 | } |
747 | 753 | ||
@@ -751,8 +757,7 @@ again: | |||
751 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); | 757 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); |
752 | perf_event_print_debug(); | 758 | perf_event_print_debug(); |
753 | intel_pmu_reset(); | 759 | intel_pmu_reset(); |
754 | perf_enable(); | 760 | goto done; |
755 | return 1; | ||
756 | } | 761 | } |
757 | 762 | ||
758 | inc_irq_stat(apic_perf_irqs); | 763 | inc_irq_stat(apic_perf_irqs); |
@@ -760,7 +765,6 @@ again: | |||
760 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { | 765 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { |
761 | struct perf_event *event = cpuc->events[bit]; | 766 | struct perf_event *event = cpuc->events[bit]; |
762 | 767 | ||
763 | clear_bit(bit, (unsigned long *) &status); | ||
764 | if (!test_bit(bit, cpuc->active_mask)) | 768 | if (!test_bit(bit, cpuc->active_mask)) |
765 | continue; | 769 | continue; |
766 | 770 | ||
@@ -770,7 +774,7 @@ again: | |||
770 | data.period = event->hw.last_period; | 774 | data.period = event->hw.last_period; |
771 | 775 | ||
772 | if (perf_event_overflow(event, 1, &data, regs)) | 776 | if (perf_event_overflow(event, 1, &data, regs)) |
773 | intel_pmu_disable_event(&event->hw, bit); | 777 | x86_pmu_stop(event); |
774 | } | 778 | } |
775 | 779 | ||
776 | intel_pmu_ack_status(ack); | 780 | intel_pmu_ack_status(ack); |
@@ -782,8 +786,8 @@ again: | |||
782 | if (status) | 786 | if (status) |
783 | goto again; | 787 | goto again; |
784 | 788 | ||
785 | perf_enable(); | 789 | done: |
786 | 790 | intel_pmu_enable_all(); | |
787 | return 1; | 791 | return 1; |
788 | } | 792 | } |
789 | 793 | ||
@@ -862,7 +866,10 @@ static __initconst struct x86_pmu intel_pmu = { | |||
862 | .max_period = (1ULL << 31) - 1, | 866 | .max_period = (1ULL << 31) - 1, |
863 | .enable_bts = intel_pmu_enable_bts, | 867 | .enable_bts = intel_pmu_enable_bts, |
864 | .disable_bts = intel_pmu_disable_bts, | 868 | .disable_bts = intel_pmu_disable_bts, |
865 | .get_event_constraints = intel_get_event_constraints | 869 | .get_event_constraints = intel_get_event_constraints, |
870 | |||
871 | .cpu_starting = init_debug_store_on_cpu, | ||
872 | .cpu_dying = fini_debug_store_on_cpu, | ||
866 | }; | 873 | }; |
867 | 874 | ||
868 | static __init int intel_pmu_init(void) | 875 | static __init int intel_pmu_init(void) |
@@ -929,13 +936,14 @@ static __init int intel_pmu_init(void) | |||
929 | 936 | ||
930 | case 26: /* 45 nm nehalem, "Bloomfield" */ | 937 | case 26: /* 45 nm nehalem, "Bloomfield" */ |
931 | case 30: /* 45 nm nehalem, "Lynnfield" */ | 938 | case 30: /* 45 nm nehalem, "Lynnfield" */ |
939 | case 46: /* 45 nm nehalem-ex, "Beckton" */ | ||
932 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, | 940 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, |
933 | sizeof(hw_cache_event_ids)); | 941 | sizeof(hw_cache_event_ids)); |
934 | 942 | ||
935 | x86_pmu.event_constraints = intel_nehalem_event_constraints; | 943 | x86_pmu.event_constraints = intel_nehalem_event_constraints; |
936 | pr_cont("Nehalem/Corei7 events, "); | 944 | pr_cont("Nehalem/Corei7 events, "); |
937 | break; | 945 | break; |
938 | case 28: | 946 | case 28: /* Atom */ |
939 | memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, | 947 | memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, |
940 | sizeof(hw_cache_event_ids)); | 948 | sizeof(hw_cache_event_ids)); |
941 | 949 | ||
@@ -951,6 +959,7 @@ static __init int intel_pmu_init(void) | |||
951 | x86_pmu.event_constraints = intel_westmere_event_constraints; | 959 | x86_pmu.event_constraints = intel_westmere_event_constraints; |
952 | pr_cont("Westmere events, "); | 960 | pr_cont("Westmere events, "); |
953 | break; | 961 | break; |
962 | |||
954 | default: | 963 | default: |
955 | /* | 964 | /* |
956 | * default constraints for v2 and up | 965 | * default constraints for v2 and up |
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c index 1ca5ba078afd..a330485d14da 100644 --- a/arch/x86/kernel/cpu/perf_event_p6.c +++ b/arch/x86/kernel/cpu/perf_event_p6.c | |||
@@ -62,7 +62,7 @@ static void p6_pmu_disable_all(void) | |||
62 | 62 | ||
63 | /* p6 only has one enable register */ | 63 | /* p6 only has one enable register */ |
64 | rdmsrl(MSR_P6_EVNTSEL0, val); | 64 | rdmsrl(MSR_P6_EVNTSEL0, val); |
65 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | 65 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; |
66 | wrmsrl(MSR_P6_EVNTSEL0, val); | 66 | wrmsrl(MSR_P6_EVNTSEL0, val); |
67 | } | 67 | } |
68 | 68 | ||
@@ -72,32 +72,34 @@ static void p6_pmu_enable_all(void) | |||
72 | 72 | ||
73 | /* p6 only has one enable register */ | 73 | /* p6 only has one enable register */ |
74 | rdmsrl(MSR_P6_EVNTSEL0, val); | 74 | rdmsrl(MSR_P6_EVNTSEL0, val); |
75 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 75 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
76 | wrmsrl(MSR_P6_EVNTSEL0, val); | 76 | wrmsrl(MSR_P6_EVNTSEL0, val); |
77 | } | 77 | } |
78 | 78 | ||
79 | static inline void | 79 | static inline void |
80 | p6_pmu_disable_event(struct hw_perf_event *hwc, int idx) | 80 | p6_pmu_disable_event(struct perf_event *event) |
81 | { | 81 | { |
82 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 82 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
83 | struct hw_perf_event *hwc = &event->hw; | ||
83 | u64 val = P6_NOP_EVENT; | 84 | u64 val = P6_NOP_EVENT; |
84 | 85 | ||
85 | if (cpuc->enabled) | 86 | if (cpuc->enabled) |
86 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 87 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
87 | 88 | ||
88 | (void)checking_wrmsrl(hwc->config_base + idx, val); | 89 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, val); |
89 | } | 90 | } |
90 | 91 | ||
91 | static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 92 | static void p6_pmu_enable_event(struct perf_event *event) |
92 | { | 93 | { |
93 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 94 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
95 | struct hw_perf_event *hwc = &event->hw; | ||
94 | u64 val; | 96 | u64 val; |
95 | 97 | ||
96 | val = hwc->config; | 98 | val = hwc->config; |
97 | if (cpuc->enabled) | 99 | if (cpuc->enabled) |
98 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 100 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
99 | 101 | ||
100 | (void)checking_wrmsrl(hwc->config_base + idx, val); | 102 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, val); |
101 | } | 103 | } |
102 | 104 | ||
103 | static __initconst struct x86_pmu p6_pmu = { | 105 | static __initconst struct x86_pmu p6_pmu = { |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 74f4e85a5727..fb329e9f8494 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -680,7 +680,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz) | |||
680 | cpu_nmi_set_wd_enabled(); | 680 | cpu_nmi_set_wd_enabled(); |
681 | 681 | ||
682 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 682 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
683 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 683 | evntsel |= ARCH_PERFMON_EVENTSEL_ENABLE; |
684 | wrmsr(evntsel_msr, evntsel, 0); | 684 | wrmsr(evntsel_msr, evntsel, 0); |
685 | intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); | 685 | intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); |
686 | return 1; | 686 | return 1; |
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 83e5e628de73..8b862d5900fe 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/cpu.h> | 40 | #include <linux/cpu.h> |
41 | #include <linux/notifier.h> | 41 | #include <linux/notifier.h> |
42 | #include <linux/uaccess.h> | 42 | #include <linux/uaccess.h> |
43 | #include <linux/gfp.h> | ||
43 | 44 | ||
44 | #include <asm/processor.h> | 45 | #include <asm/processor.h> |
45 | #include <asm/msr.h> | 46 | #include <asm/msr.h> |
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index a4849c10a77e..ebd4c51d096a 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <asm/cpu.h> | 27 | #include <asm/cpu.h> |
28 | #include <asm/reboot.h> | 28 | #include <asm/reboot.h> |
29 | #include <asm/virtext.h> | 29 | #include <asm/virtext.h> |
30 | #include <asm/x86_init.h> | ||
31 | 30 | ||
32 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) | 31 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) |
33 | 32 | ||
@@ -103,10 +102,5 @@ void native_machine_crash_shutdown(struct pt_regs *regs) | |||
103 | #ifdef CONFIG_HPET_TIMER | 102 | #ifdef CONFIG_HPET_TIMER |
104 | hpet_disable(); | 103 | hpet_disable(); |
105 | #endif | 104 | #endif |
106 | |||
107 | #ifdef CONFIG_X86_64 | ||
108 | x86_platform.iommu_shutdown(); | ||
109 | #endif | ||
110 | |||
111 | crash_save_cpu(regs, safe_smp_processor_id()); | 105 | crash_save_cpu(regs, safe_smp_processor_id()); |
112 | } | 106 | } |
diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c index cd97ce18c29d..67414550c3cc 100644 --- a/arch/x86/kernel/crash_dump_32.c +++ b/arch/x86/kernel/crash_dump_32.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * Copyright (C) IBM Corporation, 2004. All rights reserved | 5 | * Copyright (C) IBM Corporation, 2004. All rights reserved |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/slab.h> | ||
8 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
9 | #include <linux/highmem.h> | 10 | #include <linux/highmem.h> |
10 | #include <linux/crash_dump.h> | 11 | #include <linux/crash_dump.h> |
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h index 4fd1420faffa..e1a93be4fd44 100644 --- a/arch/x86/kernel/dumpstack.h +++ b/arch/x86/kernel/dumpstack.h | |||
@@ -14,6 +14,8 @@ | |||
14 | #define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :) | 14 | #define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :) |
15 | #endif | 15 | #endif |
16 | 16 | ||
17 | #include <linux/uaccess.h> | ||
18 | |||
17 | extern void | 19 | extern void |
18 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | 20 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, |
19 | unsigned long *stack, unsigned long bp, char *log_lvl); | 21 | unsigned long *stack, unsigned long bp, char *log_lvl); |
@@ -29,4 +31,26 @@ struct stack_frame { | |||
29 | struct stack_frame *next_frame; | 31 | struct stack_frame *next_frame; |
30 | unsigned long return_address; | 32 | unsigned long return_address; |
31 | }; | 33 | }; |
34 | |||
35 | struct stack_frame_ia32 { | ||
36 | u32 next_frame; | ||
37 | u32 return_address; | ||
38 | }; | ||
39 | |||
40 | static inline unsigned long rewind_frame_pointer(int n) | ||
41 | { | ||
42 | struct stack_frame *frame; | ||
43 | |||
44 | get_bp(frame); | ||
45 | |||
46 | #ifdef CONFIG_FRAME_POINTER | ||
47 | while (n--) { | ||
48 | if (probe_kernel_address(&frame->next_frame, frame)) | ||
49 | break; | ||
50 | } | ||
32 | #endif | 51 | #endif |
52 | |||
53 | return (unsigned long)frame; | ||
54 | } | ||
55 | |||
56 | #endif /* DUMPSTACK_H */ | ||
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index dce99abb4496..272c9f1f05f3 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c | |||
@@ -120,9 +120,15 @@ fixup_bp_irq_link(unsigned long bp, unsigned long *stack, | |||
120 | { | 120 | { |
121 | #ifdef CONFIG_FRAME_POINTER | 121 | #ifdef CONFIG_FRAME_POINTER |
122 | struct stack_frame *frame = (struct stack_frame *)bp; | 122 | struct stack_frame *frame = (struct stack_frame *)bp; |
123 | unsigned long next; | ||
123 | 124 | ||
124 | if (!in_irq_stack(stack, irq_stack, irq_stack_end)) | 125 | if (!in_irq_stack(stack, irq_stack, irq_stack_end)) { |
125 | return (unsigned long)frame->next_frame; | 126 | if (!probe_kernel_address(&frame->next_frame, next)) |
127 | return next; | ||
128 | else | ||
129 | WARN_ONCE(1, "Perf: bad frame pointer = %p in " | ||
130 | "callchain\n", &frame->next_frame); | ||
131 | } | ||
126 | #endif | 132 | #endif |
127 | return bp; | 133 | return bp; |
128 | } | 134 | } |
@@ -202,7 +208,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
202 | if (in_irq_stack(stack, irq_stack, irq_stack_end)) { | 208 | if (in_irq_stack(stack, irq_stack, irq_stack_end)) { |
203 | if (ops->stack(data, "IRQ") < 0) | 209 | if (ops->stack(data, "IRQ") < 0) |
204 | break; | 210 | break; |
205 | bp = print_context_stack(tinfo, stack, bp, | 211 | bp = ops->walk_stack(tinfo, stack, bp, |
206 | ops, data, irq_stack_end, &graph); | 212 | ops, data, irq_stack_end, &graph); |
207 | /* | 213 | /* |
208 | * We link to the next stack (which would be | 214 | * We link to the next stack (which would be |
@@ -223,7 +229,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
223 | /* | 229 | /* |
224 | * This handles the process stack: | 230 | * This handles the process stack: |
225 | */ | 231 | */ |
226 | bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph); | 232 | bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); |
227 | put_cpu(); | 233 | put_cpu(); |
228 | } | 234 | } |
229 | EXPORT_SYMBOL(dump_trace); | 235 | EXPORT_SYMBOL(dump_trace); |
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 740b440fbd73..7bca3c6a02fb 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c | |||
@@ -519,29 +519,45 @@ u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type, | |||
519 | printk(KERN_DEBUG "e820 remove range: %016Lx - %016Lx ", | 519 | printk(KERN_DEBUG "e820 remove range: %016Lx - %016Lx ", |
520 | (unsigned long long) start, | 520 | (unsigned long long) start, |
521 | (unsigned long long) end); | 521 | (unsigned long long) end); |
522 | e820_print_type(old_type); | 522 | if (checktype) |
523 | e820_print_type(old_type); | ||
523 | printk(KERN_CONT "\n"); | 524 | printk(KERN_CONT "\n"); |
524 | 525 | ||
525 | for (i = 0; i < e820.nr_map; i++) { | 526 | for (i = 0; i < e820.nr_map; i++) { |
526 | struct e820entry *ei = &e820.map[i]; | 527 | struct e820entry *ei = &e820.map[i]; |
527 | u64 final_start, final_end; | 528 | u64 final_start, final_end; |
529 | u64 ei_end; | ||
528 | 530 | ||
529 | if (checktype && ei->type != old_type) | 531 | if (checktype && ei->type != old_type) |
530 | continue; | 532 | continue; |
533 | |||
534 | ei_end = ei->addr + ei->size; | ||
531 | /* totally covered? */ | 535 | /* totally covered? */ |
532 | if (ei->addr >= start && | 536 | if (ei->addr >= start && ei_end <= end) { |
533 | (ei->addr + ei->size) <= (start + size)) { | ||
534 | real_removed_size += ei->size; | 537 | real_removed_size += ei->size; |
535 | memset(ei, 0, sizeof(struct e820entry)); | 538 | memset(ei, 0, sizeof(struct e820entry)); |
536 | continue; | 539 | continue; |
537 | } | 540 | } |
541 | |||
542 | /* new range is totally covered? */ | ||
543 | if (ei->addr < start && ei_end > end) { | ||
544 | e820_add_region(end, ei_end - end, ei->type); | ||
545 | ei->size = start - ei->addr; | ||
546 | real_removed_size += size; | ||
547 | continue; | ||
548 | } | ||
549 | |||
538 | /* partially covered */ | 550 | /* partially covered */ |
539 | final_start = max(start, ei->addr); | 551 | final_start = max(start, ei->addr); |
540 | final_end = min(start + size, ei->addr + ei->size); | 552 | final_end = min(end, ei_end); |
541 | if (final_start >= final_end) | 553 | if (final_start >= final_end) |
542 | continue; | 554 | continue; |
543 | real_removed_size += final_end - final_start; | 555 | real_removed_size += final_end - final_start; |
544 | 556 | ||
557 | /* | ||
558 | * left range could be head or tail, so need to update | ||
559 | * size at first. | ||
560 | */ | ||
545 | ei->size -= final_end - final_start; | 561 | ei->size -= final_end - final_start; |
546 | if (ei->addr < final_start) | 562 | if (ei->addr < final_start) |
547 | continue; | 563 | continue; |
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index adedeef1dedc..b2e246037392 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include <linux/start_kernel.h> | 9 | #include <linux/start_kernel.h> |
10 | #include <linux/mm.h> | ||
10 | 11 | ||
11 | #include <asm/setup.h> | 12 | #include <asm/setup.h> |
12 | #include <asm/sections.h> | 13 | #include <asm/sections.h> |
@@ -44,9 +45,10 @@ void __init i386_start_kernel(void) | |||
44 | #ifdef CONFIG_BLK_DEV_INITRD | 45 | #ifdef CONFIG_BLK_DEV_INITRD |
45 | /* Reserve INITRD */ | 46 | /* Reserve INITRD */ |
46 | if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { | 47 | if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { |
48 | /* Assume only end is not page aligned */ | ||
47 | u64 ramdisk_image = boot_params.hdr.ramdisk_image; | 49 | u64 ramdisk_image = boot_params.hdr.ramdisk_image; |
48 | u64 ramdisk_size = boot_params.hdr.ramdisk_size; | 50 | u64 ramdisk_size = boot_params.hdr.ramdisk_size; |
49 | u64 ramdisk_end = ramdisk_image + ramdisk_size; | 51 | u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); |
50 | reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); | 52 | reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); |
51 | } | 53 | } |
52 | #endif | 54 | #endif |
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index b5a9896ca1e7..7147143fd614 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
@@ -103,9 +103,10 @@ void __init x86_64_start_reservations(char *real_mode_data) | |||
103 | #ifdef CONFIG_BLK_DEV_INITRD | 103 | #ifdef CONFIG_BLK_DEV_INITRD |
104 | /* Reserve INITRD */ | 104 | /* Reserve INITRD */ |
105 | if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { | 105 | if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) { |
106 | /* Assume only end is not page aligned */ | ||
106 | unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; | 107 | unsigned long ramdisk_image = boot_params.hdr.ramdisk_image; |
107 | unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; | 108 | unsigned long ramdisk_size = boot_params.hdr.ramdisk_size; |
108 | unsigned long ramdisk_end = ramdisk_image + ramdisk_size; | 109 | unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); |
109 | reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); | 110 | reserve_early(ramdisk_image, ramdisk_end, "RAMDISK"); |
110 | } | 111 | } |
111 | #endif | 112 | #endif |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index ee4fa1bfcb33..23b4ecdffa9b 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/sysdev.h> | 4 | #include <linux/sysdev.h> |
5 | #include <linux/delay.h> | 5 | #include <linux/delay.h> |
6 | #include <linux/errno.h> | 6 | #include <linux/errno.h> |
7 | #include <linux/slab.h> | ||
7 | #include <linux/hpet.h> | 8 | #include <linux/hpet.h> |
8 | #include <linux/init.h> | 9 | #include <linux/init.h> |
9 | #include <linux/cpu.h> | 10 | #include <linux/cpu.h> |
@@ -399,9 +400,15 @@ static int hpet_next_event(unsigned long delta, | |||
399 | * then we might have a real hardware problem. We can not do | 400 | * then we might have a real hardware problem. We can not do |
400 | * much about it here, but at least alert the user/admin with | 401 | * much about it here, but at least alert the user/admin with |
401 | * a prominent warning. | 402 | * a prominent warning. |
403 | * An erratum on some chipsets (ICH9,..), results in comparator read | ||
404 | * immediately following a write returning old value. Workaround | ||
405 | * for this is to read this value second time, when first | ||
406 | * read returns old value. | ||
402 | */ | 407 | */ |
403 | WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt, | 408 | if (unlikely((u32)hpet_readl(HPET_Tn_CMP(timer)) != cnt)) { |
409 | WARN_ONCE(hpet_readl(HPET_Tn_CMP(timer)) != cnt, | ||
404 | KERN_WARNING "hpet: compare register read back failed.\n"); | 410 | KERN_WARNING "hpet: compare register read back failed.\n"); |
411 | } | ||
405 | 412 | ||
406 | return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; | 413 | return (s32)(hpet_readl(HPET_COUNTER) - cnt) >= 0 ? -ETIME : 0; |
407 | } | 414 | } |
@@ -1143,6 +1150,7 @@ int hpet_set_periodic_freq(unsigned long freq) | |||
1143 | do_div(clc, freq); | 1150 | do_div(clc, freq); |
1144 | clc >>= hpet_clockevent.shift; | 1151 | clc >>= hpet_clockevent.shift; |
1145 | hpet_pie_delta = clc; | 1152 | hpet_pie_delta = clc; |
1153 | hpet_pie_limit = 0; | ||
1146 | } | 1154 | } |
1147 | return 1; | 1155 | return 1; |
1148 | } | 1156 | } |
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index dca2802c666f..d6cc065f519f 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -344,13 +344,6 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp, | |||
344 | } | 344 | } |
345 | 345 | ||
346 | /* | 346 | /* |
347 | * For kernel-addresses, either the address or symbol name can be | ||
348 | * specified. | ||
349 | */ | ||
350 | if (info->name) | ||
351 | info->address = (unsigned long) | ||
352 | kallsyms_lookup_name(info->name); | ||
353 | /* | ||
354 | * Check that the low-order bits of the address are appropriate | 347 | * Check that the low-order bits of the address are appropriate |
355 | * for the alignment implied by len. | 348 | * for the alignment implied by len. |
356 | */ | 349 | */ |
@@ -535,8 +528,3 @@ void hw_breakpoint_pmu_read(struct perf_event *bp) | |||
535 | { | 528 | { |
536 | /* TODO */ | 529 | /* TODO */ |
537 | } | 530 | } |
538 | |||
539 | void hw_breakpoint_pmu_unthrottle(struct perf_event *bp) | ||
540 | { | ||
541 | /* TODO */ | ||
542 | } | ||
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index c01a2b846d47..54c31c285488 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/regset.h> | 9 | #include <linux/regset.h> |
10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/slab.h> | ||
11 | 12 | ||
12 | #include <asm/sigcontext.h> | 13 | #include <asm/sigcontext.h> |
13 | #include <asm/processor.h> | 14 | #include <asm/processor.h> |
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index fb725ee15f55..7c9f02c130f3 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c | |||
@@ -5,7 +5,6 @@ | |||
5 | #include <linux/ioport.h> | 5 | #include <linux/ioport.h> |
6 | #include <linux/interrupt.h> | 6 | #include <linux/interrupt.h> |
7 | #include <linux/timex.h> | 7 | #include <linux/timex.h> |
8 | #include <linux/slab.h> | ||
9 | #include <linux/random.h> | 8 | #include <linux/random.h> |
10 | #include <linux/init.h> | 9 | #include <linux/init.h> |
11 | #include <linux/kernel_stat.h> | 10 | #include <linux/kernel_stat.h> |
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index ef257fc2921b..0ed2d300cd46 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -5,7 +5,6 @@ | |||
5 | #include <linux/ioport.h> | 5 | #include <linux/ioport.h> |
6 | #include <linux/interrupt.h> | 6 | #include <linux/interrupt.h> |
7 | #include <linux/timex.h> | 7 | #include <linux/timex.h> |
8 | #include <linux/slab.h> | ||
9 | #include <linux/random.h> | 8 | #include <linux/random.h> |
10 | #include <linux/kprobes.h> | 9 | #include <linux/kprobes.h> |
11 | #include <linux/init.h> | 10 | #include <linux/init.h> |
@@ -141,6 +140,28 @@ void __init init_IRQ(void) | |||
141 | x86_init.irqs.intr_init(); | 140 | x86_init.irqs.intr_init(); |
142 | } | 141 | } |
143 | 142 | ||
143 | /* | ||
144 | * Setup the vector to irq mappings. | ||
145 | */ | ||
146 | void setup_vector_irq(int cpu) | ||
147 | { | ||
148 | #ifndef CONFIG_X86_IO_APIC | ||
149 | int irq; | ||
150 | |||
151 | /* | ||
152 | * On most of the platforms, legacy PIC delivers the interrupts on the | ||
153 | * boot cpu. But there are certain platforms where PIC interrupts are | ||
154 | * delivered to multiple cpu's. If the legacy IRQ is handled by the | ||
155 | * legacy PIC, for the new cpu that is coming online, setup the static | ||
156 | * legacy vector to irq mapping: | ||
157 | */ | ||
158 | for (irq = 0; irq < legacy_pic->nr_legacy_irqs; irq++) | ||
159 | per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq; | ||
160 | #endif | ||
161 | |||
162 | __setup_vector_irq(cpu); | ||
163 | } | ||
164 | |||
144 | static void __init smp_intr_init(void) | 165 | static void __init smp_intr_init(void) |
145 | { | 166 | { |
146 | #ifdef CONFIG_SMP | 167 | #ifdef CONFIG_SMP |
diff --git a/arch/x86/kernel/k8.c b/arch/x86/kernel/k8.c index cbc4332a77b2..0f7bc20cfcde 100644 --- a/arch/x86/kernel/k8.c +++ b/arch/x86/kernel/k8.c | |||
@@ -2,8 +2,8 @@ | |||
2 | * Shared support code for AMD K8 northbridges and derivates. | 2 | * Shared support code for AMD K8 northbridges and derivates. |
3 | * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2. | 3 | * Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2. |
4 | */ | 4 | */ |
5 | #include <linux/gfp.h> | ||
6 | #include <linux/types.h> | 5 | #include <linux/types.h> |
6 | #include <linux/slab.h> | ||
7 | #include <linux/init.h> | 7 | #include <linux/init.h> |
8 | #include <linux/errno.h> | 8 | #include <linux/errno.h> |
9 | #include <linux/module.h> | 9 | #include <linux/module.h> |
@@ -121,3 +121,17 @@ void k8_flush_garts(void) | |||
121 | } | 121 | } |
122 | EXPORT_SYMBOL_GPL(k8_flush_garts); | 122 | EXPORT_SYMBOL_GPL(k8_flush_garts); |
123 | 123 | ||
124 | static __init int init_k8_nbs(void) | ||
125 | { | ||
126 | int err = 0; | ||
127 | |||
128 | err = cache_k8_northbridges(); | ||
129 | |||
130 | if (err < 0) | ||
131 | printk(KERN_NOTICE "K8 NB: Cannot enumerate AMD northbridges.\n"); | ||
132 | |||
133 | return err; | ||
134 | } | ||
135 | |||
136 | /* This has to go after the PCI subsystem */ | ||
137 | fs_initcall(init_k8_nbs); | ||
diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c index e444357375ce..8afd9f321f10 100644 --- a/arch/x86/kernel/kdebugfs.c +++ b/arch/x86/kernel/kdebugfs.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/debugfs.h> | 9 | #include <linux/debugfs.h> |
10 | #include <linux/uaccess.h> | 10 | #include <linux/uaccess.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/slab.h> | ||
12 | #include <linux/init.h> | 13 | #include <linux/init.h> |
13 | #include <linux/stat.h> | 14 | #include <linux/stat.h> |
14 | #include <linux/io.h> | 15 | #include <linux/io.h> |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index bfba6019d762..b2258ca91003 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -618,8 +618,8 @@ int kgdb_arch_init(void) | |||
618 | * portion of kgdb because this operation requires mutexs to | 618 | * portion of kgdb because this operation requires mutexs to |
619 | * complete. | 619 | * complete. |
620 | */ | 620 | */ |
621 | hw_breakpoint_init(&attr); | ||
621 | attr.bp_addr = (unsigned long)kgdb_arch_init; | 622 | attr.bp_addr = (unsigned long)kgdb_arch_init; |
622 | attr.type = PERF_TYPE_BREAKPOINT; | ||
623 | attr.bp_len = HW_BREAKPOINT_LEN_1; | 623 | attr.bp_len = HW_BREAKPOINT_LEN_1; |
624 | attr.bp_type = HW_BREAKPOINT_W; | 624 | attr.bp_type = HW_BREAKPOINT_W; |
625 | attr.disabled = 1; | 625 | attr.disabled = 1; |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index ec6ef60cbd17..ea697263b373 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
10 | #include <linux/gfp.h> | ||
10 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
11 | #include <linux/string.h> | 12 | #include <linux/string.h> |
12 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 4a8bb82248ae..035c8c529181 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | #include <linux/kexec.h> | 10 | #include <linux/kexec.h> |
11 | #include <linux/string.h> | 11 | #include <linux/string.h> |
12 | #include <linux/gfp.h> | ||
12 | #include <linux/reboot.h> | 13 | #include <linux/reboot.h> |
13 | #include <linux/numa.h> | 14 | #include <linux/numa.h> |
14 | #include <linux/ftrace.h> | 15 | #include <linux/ftrace.h> |
diff --git a/arch/x86/kernel/mca_32.c b/arch/x86/kernel/mca_32.c index 845d80ce1ef1..63eaf6596233 100644 --- a/arch/x86/kernel/mca_32.c +++ b/arch/x86/kernel/mca_32.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/kernel.h> | 42 | #include <linux/kernel.h> |
43 | #include <linux/mca.h> | 43 | #include <linux/mca.h> |
44 | #include <linux/kprobes.h> | 44 | #include <linux/kprobes.h> |
45 | #include <linux/slab.h> | ||
45 | #include <asm/system.h> | 46 | #include <asm/system.h> |
46 | #include <asm/io.h> | 47 | #include <asm/io.h> |
47 | #include <linux/proc_fs.h> | 48 | #include <linux/proc_fs.h> |
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index 89f386f044e4..e0bc186d7501 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/bug.h> | 24 | #include <linux/bug.h> |
25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
26 | #include <linux/gfp.h> | ||
26 | 27 | ||
27 | #include <asm/system.h> | 28 | #include <asm/system.h> |
28 | #include <asm/page.h> | 29 | #include <asm/page.h> |
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c index a2c1edd2d3ac..e81030f71a8f 100644 --- a/arch/x86/kernel/mpparse.c +++ b/arch/x86/kernel/mpparse.c | |||
@@ -664,7 +664,7 @@ static void __init smp_reserve_memory(struct mpf_intel *mpf) | |||
664 | { | 664 | { |
665 | unsigned long size = get_mpc_size(mpf->physptr); | 665 | unsigned long size = get_mpc_size(mpf->physptr); |
666 | 666 | ||
667 | reserve_early(mpf->physptr, mpf->physptr+size, "MP-table mpc"); | 667 | reserve_early_overlap_ok(mpf->physptr, mpf->physptr+size, "MP-table mpc"); |
668 | } | 668 | } |
669 | 669 | ||
670 | static int __init smp_scan_config(unsigned long base, unsigned long length) | 670 | static int __init smp_scan_config(unsigned long base, unsigned long length) |
@@ -693,7 +693,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length) | |||
693 | mpf, (u64)virt_to_phys(mpf)); | 693 | mpf, (u64)virt_to_phys(mpf)); |
694 | 694 | ||
695 | mem = virt_to_phys(mpf); | 695 | mem = virt_to_phys(mpf); |
696 | reserve_early(mem, mem + sizeof(*mpf), "MP-table mpf"); | 696 | reserve_early_overlap_ok(mem, mem + sizeof(*mpf), "MP-table mpf"); |
697 | if (mpf->physptr) | 697 | if (mpf->physptr) |
698 | smp_reserve_memory(mpf); | 698 | smp_reserve_memory(mpf); |
699 | 699 | ||
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 206735ac8cbd..4d4468e9f47c 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/cpu.h> | 37 | #include <linux/cpu.h> |
38 | #include <linux/notifier.h> | 38 | #include <linux/notifier.h> |
39 | #include <linux/uaccess.h> | 39 | #include <linux/uaccess.h> |
40 | #include <linux/gfp.h> | ||
40 | 41 | ||
41 | #include <asm/processor.h> | 42 | #include <asm/processor.h> |
42 | #include <asm/msr.h> | 43 | #include <asm/msr.h> |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index a4ac764a6880..4b7e3d8b01dd 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/dma-debug.h> | 2 | #include <linux/dma-debug.h> |
3 | #include <linux/dmar.h> | 3 | #include <linux/dmar.h> |
4 | #include <linux/bootmem.h> | 4 | #include <linux/bootmem.h> |
5 | #include <linux/gfp.h> | ||
5 | #include <linux/pci.h> | 6 | #include <linux/pci.h> |
6 | #include <linux/kmemleak.h> | 7 | #include <linux/kmemleak.h> |
7 | 8 | ||
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 34de53b46f87..0f7f130caa67 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/iommu-helper.h> | 29 | #include <linux/iommu-helper.h> |
30 | #include <linux/sysdev.h> | 30 | #include <linux/sysdev.h> |
31 | #include <linux/io.h> | 31 | #include <linux/io.h> |
32 | #include <linux/gfp.h> | ||
32 | #include <asm/atomic.h> | 33 | #include <asm/atomic.h> |
33 | #include <asm/mtrr.h> | 34 | #include <asm/mtrr.h> |
34 | #include <asm/pgtable.h> | 35 | #include <asm/pgtable.h> |
@@ -564,6 +565,9 @@ static void enable_gart_translations(void) | |||
564 | 565 | ||
565 | enable_gart_translation(dev, __pa(agp_gatt_table)); | 566 | enable_gart_translation(dev, __pa(agp_gatt_table)); |
566 | } | 567 | } |
568 | |||
569 | /* Flush the GART-TLB to remove stale entries */ | ||
570 | k8_flush_garts(); | ||
567 | } | 571 | } |
568 | 572 | ||
569 | /* | 573 | /* |
@@ -735,7 +739,7 @@ int __init gart_iommu_init(void) | |||
735 | unsigned long scratch; | 739 | unsigned long scratch; |
736 | long i; | 740 | long i; |
737 | 741 | ||
738 | if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) | 742 | if (num_k8_northbridges == 0) |
739 | return 0; | 743 | return 0; |
740 | 744 | ||
741 | #ifndef CONFIG_AGP_AMD64 | 745 | #ifndef CONFIG_AGP_AMD64 |
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 22be12b60a8f..3af4af810c07 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/scatterlist.h> | 4 | #include <linux/scatterlist.h> |
5 | #include <linux/string.h> | 5 | #include <linux/string.h> |
6 | #include <linux/init.h> | 6 | #include <linux/init.h> |
7 | #include <linux/gfp.h> | ||
7 | #include <linux/pci.h> | 8 | #include <linux/pci.h> |
8 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
9 | 10 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 02d678065d7d..28ad9f4d8b94 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -526,21 +526,37 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) | |||
526 | } | 526 | } |
527 | 527 | ||
528 | /* | 528 | /* |
529 | * Check for AMD CPUs, which have potentially C1E support | 529 | * Check for AMD CPUs, where APIC timer interrupt does not wake up CPU from C1e. |
530 | * For more information see | ||
531 | * - Erratum #400 for NPT family 0xf and family 0x10 CPUs | ||
532 | * - Erratum #365 for family 0x11 (not affected because C1e not in use) | ||
530 | */ | 533 | */ |
531 | static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) | 534 | static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c) |
532 | { | 535 | { |
536 | u64 val; | ||
533 | if (c->x86_vendor != X86_VENDOR_AMD) | 537 | if (c->x86_vendor != X86_VENDOR_AMD) |
534 | return 0; | 538 | goto no_c1e_idle; |
535 | |||
536 | if (c->x86 < 0x0F) | ||
537 | return 0; | ||
538 | 539 | ||
539 | /* Family 0x0f models < rev F do not have C1E */ | 540 | /* Family 0x0f models < rev F do not have C1E */ |
540 | if (c->x86 == 0x0f && c->x86_model < 0x40) | 541 | if (c->x86 == 0x0F && c->x86_model >= 0x40) |
541 | return 0; | 542 | return 1; |
542 | 543 | ||
543 | return 1; | 544 | if (c->x86 == 0x10) { |
545 | /* | ||
546 | * check OSVW bit for CPUs that are not affected | ||
547 | * by erratum #400 | ||
548 | */ | ||
549 | rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val); | ||
550 | if (val >= 2) { | ||
551 | rdmsrl(MSR_AMD64_OSVW_STATUS, val); | ||
552 | if (!(val & BIT(1))) | ||
553 | goto no_c1e_idle; | ||
554 | } | ||
555 | return 1; | ||
556 | } | ||
557 | |||
558 | no_c1e_idle: | ||
559 | return 0; | ||
544 | } | 560 | } |
545 | 561 | ||
546 | static cpumask_var_t c1e_mask; | 562 | static cpumask_var_t c1e_mask; |
@@ -607,7 +623,7 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | |||
607 | { | 623 | { |
608 | #ifdef CONFIG_SMP | 624 | #ifdef CONFIG_SMP |
609 | if (pm_idle == poll_idle && smp_num_siblings > 1) { | 625 | if (pm_idle == poll_idle && smp_num_siblings > 1) { |
610 | printk(KERN_WARNING "WARNING: polling idle and HT enabled," | 626 | printk_once(KERN_WARNING "WARNING: polling idle and HT enabled," |
611 | " performance may degrade.\n"); | 627 | " performance may degrade.\n"); |
612 | } | 628 | } |
613 | #endif | 629 | #endif |
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index a503b1fd04e5..2e9b55027b7e 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
14 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
15 | #include <linux/slab.h> | ||
15 | #include <linux/ptrace.h> | 16 | #include <linux/ptrace.h> |
16 | #include <linux/regset.h> | 17 | #include <linux/regset.h> |
17 | #include <linux/tracehook.h> | 18 | #include <linux/tracehook.h> |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 5d7ba1a449bd..c4851eff57b3 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -55,7 +55,6 @@ | |||
55 | #include <linux/stddef.h> | 55 | #include <linux/stddef.h> |
56 | #include <linux/unistd.h> | 56 | #include <linux/unistd.h> |
57 | #include <linux/ptrace.h> | 57 | #include <linux/ptrace.h> |
58 | #include <linux/slab.h> | ||
59 | #include <linux/user.h> | 58 | #include <linux/user.h> |
60 | #include <linux/delay.h> | 59 | #include <linux/delay.h> |
61 | 60 | ||
@@ -314,16 +313,17 @@ static void __init reserve_brk(void) | |||
314 | #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) | 313 | #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) |
315 | static void __init relocate_initrd(void) | 314 | static void __init relocate_initrd(void) |
316 | { | 315 | { |
317 | 316 | /* Assume only end is not page aligned */ | |
318 | u64 ramdisk_image = boot_params.hdr.ramdisk_image; | 317 | u64 ramdisk_image = boot_params.hdr.ramdisk_image; |
319 | u64 ramdisk_size = boot_params.hdr.ramdisk_size; | 318 | u64 ramdisk_size = boot_params.hdr.ramdisk_size; |
319 | u64 area_size = PAGE_ALIGN(ramdisk_size); | ||
320 | u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; | 320 | u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; |
321 | u64 ramdisk_here; | 321 | u64 ramdisk_here; |
322 | unsigned long slop, clen, mapaddr; | 322 | unsigned long slop, clen, mapaddr; |
323 | char *p, *q; | 323 | char *p, *q; |
324 | 324 | ||
325 | /* We need to move the initrd down into lowmem */ | 325 | /* We need to move the initrd down into lowmem */ |
326 | ramdisk_here = find_e820_area(0, end_of_lowmem, ramdisk_size, | 326 | ramdisk_here = find_e820_area(0, end_of_lowmem, area_size, |
327 | PAGE_SIZE); | 327 | PAGE_SIZE); |
328 | 328 | ||
329 | if (ramdisk_here == -1ULL) | 329 | if (ramdisk_here == -1ULL) |
@@ -332,7 +332,7 @@ static void __init relocate_initrd(void) | |||
332 | 332 | ||
333 | /* Note: this includes all the lowmem currently occupied by | 333 | /* Note: this includes all the lowmem currently occupied by |
334 | the initrd, we rely on that fact to keep the data intact. */ | 334 | the initrd, we rely on that fact to keep the data intact. */ |
335 | reserve_early(ramdisk_here, ramdisk_here + ramdisk_size, | 335 | reserve_early(ramdisk_here, ramdisk_here + area_size, |
336 | "NEW RAMDISK"); | 336 | "NEW RAMDISK"); |
337 | initrd_start = ramdisk_here + PAGE_OFFSET; | 337 | initrd_start = ramdisk_here + PAGE_OFFSET; |
338 | initrd_end = initrd_start + ramdisk_size; | 338 | initrd_end = initrd_start + ramdisk_size; |
@@ -376,9 +376,10 @@ static void __init relocate_initrd(void) | |||
376 | 376 | ||
377 | static void __init reserve_initrd(void) | 377 | static void __init reserve_initrd(void) |
378 | { | 378 | { |
379 | /* Assume only end is not page aligned */ | ||
379 | u64 ramdisk_image = boot_params.hdr.ramdisk_image; | 380 | u64 ramdisk_image = boot_params.hdr.ramdisk_image; |
380 | u64 ramdisk_size = boot_params.hdr.ramdisk_size; | 381 | u64 ramdisk_size = boot_params.hdr.ramdisk_size; |
381 | u64 ramdisk_end = ramdisk_image + ramdisk_size; | 382 | u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size); |
382 | u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; | 383 | u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; |
383 | 384 | ||
384 | if (!boot_params.hdr.type_of_loader || | 385 | if (!boot_params.hdr.type_of_loader || |
@@ -606,6 +607,16 @@ static int __init setup_elfcorehdr(char *arg) | |||
606 | early_param("elfcorehdr", setup_elfcorehdr); | 607 | early_param("elfcorehdr", setup_elfcorehdr); |
607 | #endif | 608 | #endif |
608 | 609 | ||
610 | static __init void reserve_ibft_region(void) | ||
611 | { | ||
612 | unsigned long addr, size = 0; | ||
613 | |||
614 | addr = find_ibft_region(&size); | ||
615 | |||
616 | if (size) | ||
617 | reserve_early_overlap_ok(addr, addr + size, "ibft"); | ||
618 | } | ||
619 | |||
609 | #ifdef CONFIG_X86_RESERVE_LOW_64K | 620 | #ifdef CONFIG_X86_RESERVE_LOW_64K |
610 | static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) | 621 | static int __init dmi_low_memory_corruption(const struct dmi_system_id *d) |
611 | { | 622 | { |
@@ -908,6 +919,8 @@ void __init setup_arch(char **cmdline_p) | |||
908 | */ | 919 | */ |
909 | find_smp_config(); | 920 | find_smp_config(); |
910 | 921 | ||
922 | reserve_ibft_region(); | ||
923 | |||
911 | reserve_trampoline_memory(); | 924 | reserve_trampoline_memory(); |
912 | 925 | ||
913 | #ifdef CONFIG_ACPI_SLEEP | 926 | #ifdef CONFIG_ACPI_SLEEP |
@@ -975,8 +988,6 @@ void __init setup_arch(char **cmdline_p) | |||
975 | 988 | ||
976 | dma32_reserve_bootmem(); | 989 | dma32_reserve_bootmem(); |
977 | 990 | ||
978 | reserve_ibft_region(); | ||
979 | |||
980 | #ifdef CONFIG_KVM_CLOCK | 991 | #ifdef CONFIG_KVM_CLOCK |
981 | kvmclock_init(); | 992 | kvmclock_init(); |
982 | #endif | 993 | #endif |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index ec1de97600e7..d801210945d6 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/cache.h> | 21 | #include <linux/cache.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/cpu.h> | 23 | #include <linux/cpu.h> |
24 | #include <linux/gfp.h> | ||
24 | 25 | ||
25 | #include <asm/mtrr.h> | 26 | #include <asm/mtrr.h> |
26 | #include <asm/tlbflush.h> | 27 | #include <asm/tlbflush.h> |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index a02e80c3c54b..763d815e27a0 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/nmi.h> | 49 | #include <linux/nmi.h> |
50 | #include <linux/tboot.h> | 50 | #include <linux/tboot.h> |
51 | #include <linux/stackprotector.h> | 51 | #include <linux/stackprotector.h> |
52 | #include <linux/gfp.h> | ||
52 | 53 | ||
53 | #include <asm/acpi.h> | 54 | #include <asm/acpi.h> |
54 | #include <asm/desc.h> | 55 | #include <asm/desc.h> |
@@ -242,12 +243,10 @@ static void __cpuinit smp_callin(void) | |||
242 | end_local_APIC_setup(); | 243 | end_local_APIC_setup(); |
243 | map_cpu_to_logical_apicid(); | 244 | map_cpu_to_logical_apicid(); |
244 | 245 | ||
245 | notify_cpu_starting(cpuid); | ||
246 | |||
247 | /* | 246 | /* |
248 | * Need to setup vector mappings before we enable interrupts. | 247 | * Need to setup vector mappings before we enable interrupts. |
249 | */ | 248 | */ |
250 | __setup_vector_irq(smp_processor_id()); | 249 | setup_vector_irq(smp_processor_id()); |
251 | /* | 250 | /* |
252 | * Get our bogomips. | 251 | * Get our bogomips. |
253 | * | 252 | * |
@@ -264,6 +263,8 @@ static void __cpuinit smp_callin(void) | |||
264 | */ | 263 | */ |
265 | smp_store_cpu_info(cpuid); | 264 | smp_store_cpu_info(cpuid); |
266 | 265 | ||
266 | notify_cpu_starting(cpuid); | ||
267 | |||
267 | /* | 268 | /* |
268 | * Allow the master to continue. | 269 | * Allow the master to continue. |
269 | */ | 270 | */ |
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index 364d015efebc..17b03dd3a6b5 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/seq_file.h> | 9 | #include <linux/seq_file.h> |
10 | #include <linux/proc_fs.h> | 10 | #include <linux/proc_fs.h> |
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/slab.h> | ||
12 | 13 | ||
13 | #include <asm/mmu_context.h> | 14 | #include <asm/mmu_context.h> |
14 | #include <asm/uv/uv.h> | 15 | #include <asm/uv/uv.h> |
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c index ece73d8e3240..1d40336b030a 100644 --- a/arch/x86/kernel/uv_irq.c +++ b/arch/x86/kernel/uv_irq.c | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/rbtree.h> | 12 | #include <linux/rbtree.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/irq.h> | 14 | #include <linux/irq.h> |
14 | 15 | ||
15 | #include <asm/apic.h> | 16 | #include <asm/apic.h> |
diff --git a/arch/x86/kernel/uv_time.c b/arch/x86/kernel/uv_time.c index 2b75ef638dbc..56e421bc379b 100644 --- a/arch/x86/kernel/uv_time.c +++ b/arch/x86/kernel/uv_time.c | |||
@@ -19,6 +19,7 @@ | |||
19 | * Copyright (c) Dimitri Sivanich | 19 | * Copyright (c) Dimitri Sivanich |
20 | */ | 20 | */ |
21 | #include <linux/clockchips.h> | 21 | #include <linux/clockchips.h> |
22 | #include <linux/slab.h> | ||
22 | 23 | ||
23 | #include <asm/uv/uv_mmrs.h> | 24 | #include <asm/uv/uv_mmrs.h> |
24 | #include <asm/uv/uv_hub.h> | 25 | #include <asm/uv/uv_hub.h> |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 7dd599deca4a..ce9fbacb7526 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
29 | #include <linux/highmem.h> | 29 | #include <linux/highmem.h> |
30 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
31 | #include <linux/gfp.h> | ||
31 | #include <asm/vmi.h> | 32 | #include <asm/vmi.h> |
32 | #include <asm/io.h> | 33 | #include <asm/io.h> |
33 | #include <asm/fixmap.h> | 34 | #include <asm/fixmap.h> |
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 44879df55696..2cc249718c46 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -291,8 +291,8 @@ SECTIONS | |||
291 | .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { | 291 | .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { |
292 | __smp_locks = .; | 292 | __smp_locks = .; |
293 | *(.smp_locks) | 293 | *(.smp_locks) |
294 | __smp_locks_end = .; | ||
295 | . = ALIGN(PAGE_SIZE); | 294 | . = ALIGN(PAGE_SIZE); |
295 | __smp_locks_end = .; | ||
296 | } | 296 | } |
297 | 297 | ||
298 | #ifdef CONFIG_X86_64 | 298 | #ifdef CONFIG_X86_64 |
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c index 294698b6daff..0150affad25d 100644 --- a/arch/x86/kvm/i8254.c +++ b/arch/x86/kvm/i8254.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #define pr_fmt(fmt) "pit: " fmt | 32 | #define pr_fmt(fmt) "pit: " fmt |
33 | 33 | ||
34 | #include <linux/kvm_host.h> | 34 | #include <linux/kvm_host.h> |
35 | #include <linux/slab.h> | ||
35 | 36 | ||
36 | #include "irq.h" | 37 | #include "irq.h" |
37 | #include "i8254.h" | 38 | #include "i8254.h" |
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 07771da85de5..a790fa128a9f 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c | |||
@@ -26,6 +26,7 @@ | |||
26 | * Port from Qemu. | 26 | * Port from Qemu. |
27 | */ | 27 | */ |
28 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
29 | #include <linux/slab.h> | ||
29 | #include <linux/bitops.h> | 30 | #include <linux/bitops.h> |
30 | #include "irq.h" | 31 | #include "irq.h" |
31 | 32 | ||
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 4b224f90087b..1eb7a4ae0c9c 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/io.h> | 26 | #include <linux/io.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/math64.h> | 28 | #include <linux/math64.h> |
29 | #include <linux/slab.h> | ||
29 | #include <asm/processor.h> | 30 | #include <asm/processor.h> |
30 | #include <asm/msr.h> | 31 | #include <asm/msr.h> |
31 | #include <asm/page.h> | 32 | #include <asm/page.h> |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 741373e8ca77..19a8906bcaa2 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/hugetlb.h> | 31 | #include <linux/hugetlb.h> |
32 | #include <linux/compiler.h> | 32 | #include <linux/compiler.h> |
33 | #include <linux/srcu.h> | 33 | #include <linux/srcu.h> |
34 | #include <linux/slab.h> | ||
34 | 35 | ||
35 | #include <asm/page.h> | 36 | #include <asm/page.h> |
36 | #include <asm/cmpxchg.h> | 37 | #include <asm/cmpxchg.h> |
@@ -1489,8 +1490,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm, | |||
1489 | for_each_sp(pages, sp, parents, i) { | 1490 | for_each_sp(pages, sp, parents, i) { |
1490 | kvm_mmu_zap_page(kvm, sp); | 1491 | kvm_mmu_zap_page(kvm, sp); |
1491 | mmu_pages_clear_parents(&parents); | 1492 | mmu_pages_clear_parents(&parents); |
1493 | zapped++; | ||
1492 | } | 1494 | } |
1493 | zapped += pages.nr; | ||
1494 | kvm_mmu_pages_init(parent, &parents, &pages); | 1495 | kvm_mmu_pages_init(parent, &parents, &pages); |
1495 | } | 1496 | } |
1496 | 1497 | ||
@@ -1541,14 +1542,16 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) | |||
1541 | */ | 1542 | */ |
1542 | 1543 | ||
1543 | if (used_pages > kvm_nr_mmu_pages) { | 1544 | if (used_pages > kvm_nr_mmu_pages) { |
1544 | while (used_pages > kvm_nr_mmu_pages) { | 1545 | while (used_pages > kvm_nr_mmu_pages && |
1546 | !list_empty(&kvm->arch.active_mmu_pages)) { | ||
1545 | struct kvm_mmu_page *page; | 1547 | struct kvm_mmu_page *page; |
1546 | 1548 | ||
1547 | page = container_of(kvm->arch.active_mmu_pages.prev, | 1549 | page = container_of(kvm->arch.active_mmu_pages.prev, |
1548 | struct kvm_mmu_page, link); | 1550 | struct kvm_mmu_page, link); |
1549 | kvm_mmu_zap_page(kvm, page); | 1551 | used_pages -= kvm_mmu_zap_page(kvm, page); |
1550 | used_pages--; | 1552 | used_pages--; |
1551 | } | 1553 | } |
1554 | kvm_nr_mmu_pages = used_pages; | ||
1552 | kvm->arch.n_free_mmu_pages = 0; | 1555 | kvm->arch.n_free_mmu_pages = 0; |
1553 | } | 1556 | } |
1554 | else | 1557 | else |
@@ -1595,7 +1598,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) | |||
1595 | && !sp->role.invalid) { | 1598 | && !sp->role.invalid) { |
1596 | pgprintk("%s: zap %lx %x\n", | 1599 | pgprintk("%s: zap %lx %x\n", |
1597 | __func__, gfn, sp->role.word); | 1600 | __func__, gfn, sp->role.word); |
1598 | kvm_mmu_zap_page(kvm, sp); | 1601 | if (kvm_mmu_zap_page(kvm, sp)) |
1602 | nn = bucket->first; | ||
1599 | } | 1603 | } |
1600 | } | 1604 | } |
1601 | } | 1605 | } |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 52f78dd03010..2ba58206812a 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/highmem.h> | 26 | #include <linux/highmem.h> |
27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/ftrace_event.h> | 28 | #include <linux/ftrace_event.h> |
29 | #include <linux/slab.h> | ||
29 | 30 | ||
30 | #include <asm/desc.h> | 31 | #include <asm/desc.h> |
31 | 32 | ||
@@ -705,29 +706,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) | |||
705 | if (err) | 706 | if (err) |
706 | goto free_svm; | 707 | goto free_svm; |
707 | 708 | ||
709 | err = -ENOMEM; | ||
708 | page = alloc_page(GFP_KERNEL); | 710 | page = alloc_page(GFP_KERNEL); |
709 | if (!page) { | 711 | if (!page) |
710 | err = -ENOMEM; | ||
711 | goto uninit; | 712 | goto uninit; |
712 | } | ||
713 | 713 | ||
714 | err = -ENOMEM; | ||
715 | msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); | 714 | msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); |
716 | if (!msrpm_pages) | 715 | if (!msrpm_pages) |
717 | goto uninit; | 716 | goto free_page1; |
718 | 717 | ||
719 | nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); | 718 | nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); |
720 | if (!nested_msrpm_pages) | 719 | if (!nested_msrpm_pages) |
721 | goto uninit; | 720 | goto free_page2; |
722 | |||
723 | svm->msrpm = page_address(msrpm_pages); | ||
724 | svm_vcpu_init_msrpm(svm->msrpm); | ||
725 | 721 | ||
726 | hsave_page = alloc_page(GFP_KERNEL); | 722 | hsave_page = alloc_page(GFP_KERNEL); |
727 | if (!hsave_page) | 723 | if (!hsave_page) |
728 | goto uninit; | 724 | goto free_page3; |
725 | |||
729 | svm->nested.hsave = page_address(hsave_page); | 726 | svm->nested.hsave = page_address(hsave_page); |
730 | 727 | ||
728 | svm->msrpm = page_address(msrpm_pages); | ||
729 | svm_vcpu_init_msrpm(svm->msrpm); | ||
730 | |||
731 | svm->nested.msrpm = page_address(nested_msrpm_pages); | 731 | svm->nested.msrpm = page_address(nested_msrpm_pages); |
732 | 732 | ||
733 | svm->vmcb = page_address(page); | 733 | svm->vmcb = page_address(page); |
@@ -743,6 +743,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) | |||
743 | 743 | ||
744 | return &svm->vcpu; | 744 | return &svm->vcpu; |
745 | 745 | ||
746 | free_page3: | ||
747 | __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER); | ||
748 | free_page2: | ||
749 | __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER); | ||
750 | free_page1: | ||
751 | __free_page(page); | ||
746 | uninit: | 752 | uninit: |
747 | kvm_vcpu_uninit(&svm->vcpu); | 753 | kvm_vcpu_uninit(&svm->vcpu); |
748 | free_svm: | 754 | free_svm: |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 14873b9f8430..bc933cfb4e66 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | #include <linux/moduleparam.h> | 27 | #include <linux/moduleparam.h> |
28 | #include <linux/ftrace_event.h> | 28 | #include <linux/ftrace_event.h> |
29 | #include <linux/slab.h> | ||
29 | #include "kvm_cache_regs.h" | 30 | #include "kvm_cache_regs.h" |
30 | #include "x86.h" | 31 | #include "x86.h" |
31 | 32 | ||
@@ -76,6 +77,8 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO); | |||
76 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) | 77 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) |
77 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) | 78 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) |
78 | 79 | ||
80 | #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) | ||
81 | |||
79 | /* | 82 | /* |
80 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: | 83 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: |
81 | * ple_gap: upper bound on the amount of time between two successive | 84 | * ple_gap: upper bound on the amount of time between two successive |
@@ -130,7 +133,7 @@ struct vcpu_vmx { | |||
130 | } host_state; | 133 | } host_state; |
131 | struct { | 134 | struct { |
132 | int vm86_active; | 135 | int vm86_active; |
133 | u8 save_iopl; | 136 | ulong save_rflags; |
134 | struct kvm_save_segment { | 137 | struct kvm_save_segment { |
135 | u16 selector; | 138 | u16 selector; |
136 | unsigned long base; | 139 | unsigned long base; |
@@ -817,18 +820,23 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) | |||
817 | 820 | ||
818 | static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) | 821 | static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) |
819 | { | 822 | { |
820 | unsigned long rflags; | 823 | unsigned long rflags, save_rflags; |
821 | 824 | ||
822 | rflags = vmcs_readl(GUEST_RFLAGS); | 825 | rflags = vmcs_readl(GUEST_RFLAGS); |
823 | if (to_vmx(vcpu)->rmode.vm86_active) | 826 | if (to_vmx(vcpu)->rmode.vm86_active) { |
824 | rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM); | 827 | rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; |
828 | save_rflags = to_vmx(vcpu)->rmode.save_rflags; | ||
829 | rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; | ||
830 | } | ||
825 | return rflags; | 831 | return rflags; |
826 | } | 832 | } |
827 | 833 | ||
828 | static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | 834 | static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) |
829 | { | 835 | { |
830 | if (to_vmx(vcpu)->rmode.vm86_active) | 836 | if (to_vmx(vcpu)->rmode.vm86_active) { |
837 | to_vmx(vcpu)->rmode.save_rflags = rflags; | ||
831 | rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | 838 | rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; |
839 | } | ||
832 | vmcs_writel(GUEST_RFLAGS, rflags); | 840 | vmcs_writel(GUEST_RFLAGS, rflags); |
833 | } | 841 | } |
834 | 842 | ||
@@ -1482,8 +1490,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu) | |||
1482 | vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar); | 1490 | vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar); |
1483 | 1491 | ||
1484 | flags = vmcs_readl(GUEST_RFLAGS); | 1492 | flags = vmcs_readl(GUEST_RFLAGS); |
1485 | flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM); | 1493 | flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; |
1486 | flags |= (vmx->rmode.save_iopl << IOPL_SHIFT); | 1494 | flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; |
1487 | vmcs_writel(GUEST_RFLAGS, flags); | 1495 | vmcs_writel(GUEST_RFLAGS, flags); |
1488 | 1496 | ||
1489 | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | | 1497 | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | |
@@ -1556,8 +1564,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu) | |||
1556 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); | 1564 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); |
1557 | 1565 | ||
1558 | flags = vmcs_readl(GUEST_RFLAGS); | 1566 | flags = vmcs_readl(GUEST_RFLAGS); |
1559 | vmx->rmode.save_iopl | 1567 | vmx->rmode.save_rflags = flags; |
1560 | = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; | ||
1561 | 1568 | ||
1562 | flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; | 1569 | flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; |
1563 | 1570 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e46282a56565..3c4ca98ad27f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/cpufreq.h> | 39 | #include <linux/cpufreq.h> |
40 | #include <linux/user-return-notifier.h> | 40 | #include <linux/user-return-notifier.h> |
41 | #include <linux/srcu.h> | 41 | #include <linux/srcu.h> |
42 | #include <linux/slab.h> | ||
42 | #include <trace/events/kvm.h> | 43 | #include <trace/events/kvm.h> |
43 | #undef TRACE_INCLUDE_FILE | 44 | #undef TRACE_INCLUDE_FILE |
44 | #define CREATE_TRACE_POINTS | 45 | #define CREATE_TRACE_POINTS |
@@ -432,8 +433,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
432 | 433 | ||
433 | #ifdef CONFIG_X86_64 | 434 | #ifdef CONFIG_X86_64 |
434 | if (cr0 & 0xffffffff00000000UL) { | 435 | if (cr0 & 0xffffffff00000000UL) { |
435 | printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", | ||
436 | cr0, kvm_read_cr0(vcpu)); | ||
437 | kvm_inject_gp(vcpu, 0); | 436 | kvm_inject_gp(vcpu, 0); |
438 | return; | 437 | return; |
439 | } | 438 | } |
@@ -442,14 +441,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
442 | cr0 &= ~CR0_RESERVED_BITS; | 441 | cr0 &= ~CR0_RESERVED_BITS; |
443 | 442 | ||
444 | if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { | 443 | if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { |
445 | printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); | ||
446 | kvm_inject_gp(vcpu, 0); | 444 | kvm_inject_gp(vcpu, 0); |
447 | return; | 445 | return; |
448 | } | 446 | } |
449 | 447 | ||
450 | if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { | 448 | if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { |
451 | printk(KERN_DEBUG "set_cr0: #GP, set PG flag " | ||
452 | "and a clear PE flag\n"); | ||
453 | kvm_inject_gp(vcpu, 0); | 449 | kvm_inject_gp(vcpu, 0); |
454 | return; | 450 | return; |
455 | } | 451 | } |
@@ -460,15 +456,11 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
460 | int cs_db, cs_l; | 456 | int cs_db, cs_l; |
461 | 457 | ||
462 | if (!is_pae(vcpu)) { | 458 | if (!is_pae(vcpu)) { |
463 | printk(KERN_DEBUG "set_cr0: #GP, start paging " | ||
464 | "in long mode while PAE is disabled\n"); | ||
465 | kvm_inject_gp(vcpu, 0); | 459 | kvm_inject_gp(vcpu, 0); |
466 | return; | 460 | return; |
467 | } | 461 | } |
468 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | 462 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); |
469 | if (cs_l) { | 463 | if (cs_l) { |
470 | printk(KERN_DEBUG "set_cr0: #GP, start paging " | ||
471 | "in long mode while CS.L == 1\n"); | ||
472 | kvm_inject_gp(vcpu, 0); | 464 | kvm_inject_gp(vcpu, 0); |
473 | return; | 465 | return; |
474 | 466 | ||
@@ -476,8 +468,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
476 | } else | 468 | } else |
477 | #endif | 469 | #endif |
478 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { | 470 | if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) { |
479 | printk(KERN_DEBUG "set_cr0: #GP, pdptrs " | ||
480 | "reserved bits\n"); | ||
481 | kvm_inject_gp(vcpu, 0); | 471 | kvm_inject_gp(vcpu, 0); |
482 | return; | 472 | return; |
483 | } | 473 | } |
@@ -504,28 +494,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | |||
504 | unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; | 494 | unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE; |
505 | 495 | ||
506 | if (cr4 & CR4_RESERVED_BITS) { | 496 | if (cr4 & CR4_RESERVED_BITS) { |
507 | printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); | ||
508 | kvm_inject_gp(vcpu, 0); | 497 | kvm_inject_gp(vcpu, 0); |
509 | return; | 498 | return; |
510 | } | 499 | } |
511 | 500 | ||
512 | if (is_long_mode(vcpu)) { | 501 | if (is_long_mode(vcpu)) { |
513 | if (!(cr4 & X86_CR4_PAE)) { | 502 | if (!(cr4 & X86_CR4_PAE)) { |
514 | printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " | ||
515 | "in long mode\n"); | ||
516 | kvm_inject_gp(vcpu, 0); | 503 | kvm_inject_gp(vcpu, 0); |
517 | return; | 504 | return; |
518 | } | 505 | } |
519 | } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) | 506 | } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) |
520 | && ((cr4 ^ old_cr4) & pdptr_bits) | 507 | && ((cr4 ^ old_cr4) & pdptr_bits) |
521 | && !load_pdptrs(vcpu, vcpu->arch.cr3)) { | 508 | && !load_pdptrs(vcpu, vcpu->arch.cr3)) { |
522 | printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); | ||
523 | kvm_inject_gp(vcpu, 0); | 509 | kvm_inject_gp(vcpu, 0); |
524 | return; | 510 | return; |
525 | } | 511 | } |
526 | 512 | ||
527 | if (cr4 & X86_CR4_VMXE) { | 513 | if (cr4 & X86_CR4_VMXE) { |
528 | printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n"); | ||
529 | kvm_inject_gp(vcpu, 0); | 514 | kvm_inject_gp(vcpu, 0); |
530 | return; | 515 | return; |
531 | } | 516 | } |
@@ -546,21 +531,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
546 | 531 | ||
547 | if (is_long_mode(vcpu)) { | 532 | if (is_long_mode(vcpu)) { |
548 | if (cr3 & CR3_L_MODE_RESERVED_BITS) { | 533 | if (cr3 & CR3_L_MODE_RESERVED_BITS) { |
549 | printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); | ||
550 | kvm_inject_gp(vcpu, 0); | 534 | kvm_inject_gp(vcpu, 0); |
551 | return; | 535 | return; |
552 | } | 536 | } |
553 | } else { | 537 | } else { |
554 | if (is_pae(vcpu)) { | 538 | if (is_pae(vcpu)) { |
555 | if (cr3 & CR3_PAE_RESERVED_BITS) { | 539 | if (cr3 & CR3_PAE_RESERVED_BITS) { |
556 | printk(KERN_DEBUG | ||
557 | "set_cr3: #GP, reserved bits\n"); | ||
558 | kvm_inject_gp(vcpu, 0); | 540 | kvm_inject_gp(vcpu, 0); |
559 | return; | 541 | return; |
560 | } | 542 | } |
561 | if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { | 543 | if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { |
562 | printk(KERN_DEBUG "set_cr3: #GP, pdptrs " | ||
563 | "reserved bits\n"); | ||
564 | kvm_inject_gp(vcpu, 0); | 544 | kvm_inject_gp(vcpu, 0); |
565 | return; | 545 | return; |
566 | } | 546 | } |
@@ -592,7 +572,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3); | |||
592 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) | 572 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) |
593 | { | 573 | { |
594 | if (cr8 & CR8_RESERVED_BITS) { | 574 | if (cr8 & CR8_RESERVED_BITS) { |
595 | printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8); | ||
596 | kvm_inject_gp(vcpu, 0); | 575 | kvm_inject_gp(vcpu, 0); |
597 | return; | 576 | return; |
598 | } | 577 | } |
@@ -648,15 +627,12 @@ static u32 emulated_msrs[] = { | |||
648 | static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | 627 | static void set_efer(struct kvm_vcpu *vcpu, u64 efer) |
649 | { | 628 | { |
650 | if (efer & efer_reserved_bits) { | 629 | if (efer & efer_reserved_bits) { |
651 | printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n", | ||
652 | efer); | ||
653 | kvm_inject_gp(vcpu, 0); | 630 | kvm_inject_gp(vcpu, 0); |
654 | return; | 631 | return; |
655 | } | 632 | } |
656 | 633 | ||
657 | if (is_paging(vcpu) | 634 | if (is_paging(vcpu) |
658 | && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) { | 635 | && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) { |
659 | printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n"); | ||
660 | kvm_inject_gp(vcpu, 0); | 636 | kvm_inject_gp(vcpu, 0); |
661 | return; | 637 | return; |
662 | } | 638 | } |
@@ -666,7 +642,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
666 | 642 | ||
667 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | 643 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
668 | if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { | 644 | if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { |
669 | printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n"); | ||
670 | kvm_inject_gp(vcpu, 0); | 645 | kvm_inject_gp(vcpu, 0); |
671 | return; | 646 | return; |
672 | } | 647 | } |
@@ -677,7 +652,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer) | |||
677 | 652 | ||
678 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | 653 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
679 | if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { | 654 | if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { |
680 | printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n"); | ||
681 | kvm_inject_gp(vcpu, 0); | 655 | kvm_inject_gp(vcpu, 0); |
682 | return; | 656 | return; |
683 | } | 657 | } |
@@ -966,9 +940,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
966 | if (msr >= MSR_IA32_MC0_CTL && | 940 | if (msr >= MSR_IA32_MC0_CTL && |
967 | msr < MSR_IA32_MC0_CTL + 4 * bank_num) { | 941 | msr < MSR_IA32_MC0_CTL + 4 * bank_num) { |
968 | u32 offset = msr - MSR_IA32_MC0_CTL; | 942 | u32 offset = msr - MSR_IA32_MC0_CTL; |
969 | /* only 0 or all 1s can be written to IA32_MCi_CTL */ | 943 | /* only 0 or all 1s can be written to IA32_MCi_CTL |
944 | * some Linux kernels though clear bit 10 in bank 4 to | ||
945 | * workaround a BIOS/GART TBL issue on AMD K8s, ignore | ||
946 | * this to avoid an uncatched #GP in the guest | ||
947 | */ | ||
970 | if ((offset & 0x3) == 0 && | 948 | if ((offset & 0x3) == 0 && |
971 | data != 0 && data != ~(u64)0) | 949 | data != 0 && (data | (1 << 10)) != ~(u64)0) |
972 | return -1; | 950 | return -1; |
973 | vcpu->arch.mce_banks[offset] = data; | 951 | vcpu->arch.mce_banks[offset] = data; |
974 | break; | 952 | break; |
@@ -2634,8 +2612,9 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm, | |||
2634 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | 2612 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
2635 | struct kvm_dirty_log *log) | 2613 | struct kvm_dirty_log *log) |
2636 | { | 2614 | { |
2637 | int r, n, i; | 2615 | int r, i; |
2638 | struct kvm_memory_slot *memslot; | 2616 | struct kvm_memory_slot *memslot; |
2617 | unsigned long n; | ||
2639 | unsigned long is_dirty = 0; | 2618 | unsigned long is_dirty = 0; |
2640 | unsigned long *dirty_bitmap = NULL; | 2619 | unsigned long *dirty_bitmap = NULL; |
2641 | 2620 | ||
@@ -2650,7 +2629,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
2650 | if (!memslot->dirty_bitmap) | 2629 | if (!memslot->dirty_bitmap) |
2651 | goto out; | 2630 | goto out; |
2652 | 2631 | ||
2653 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 2632 | n = kvm_dirty_bitmap_bytes(memslot); |
2654 | 2633 | ||
2655 | r = -ENOMEM; | 2634 | r = -ENOMEM; |
2656 | dirty_bitmap = vmalloc(n); | 2635 | dirty_bitmap = vmalloc(n); |
@@ -4482,7 +4461,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
4482 | kvm_set_cr8(vcpu, kvm_run->cr8); | 4461 | kvm_set_cr8(vcpu, kvm_run->cr8); |
4483 | 4462 | ||
4484 | if (vcpu->arch.pio.cur_count) { | 4463 | if (vcpu->arch.pio.cur_count) { |
4464 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
4485 | r = complete_pio(vcpu); | 4465 | r = complete_pio(vcpu); |
4466 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); | ||
4486 | if (r) | 4467 | if (r) |
4487 | goto out; | 4468 | goto out; |
4488 | } | 4469 | } |
@@ -5145,6 +5126,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
5145 | int ret = 0; | 5126 | int ret = 0; |
5146 | u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); | 5127 | u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); |
5147 | u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); | 5128 | u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); |
5129 | u32 desc_limit; | ||
5148 | 5130 | ||
5149 | old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL); | 5131 | old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL); |
5150 | 5132 | ||
@@ -5167,7 +5149,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason) | |||
5167 | } | 5149 | } |
5168 | } | 5150 | } |
5169 | 5151 | ||
5170 | if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) { | 5152 | desc_limit = get_desc_limit(&nseg_desc); |
5153 | if (!nseg_desc.p || | ||
5154 | ((desc_limit < 0x67 && (nseg_desc.type & 8)) || | ||
5155 | desc_limit < 0x2b)) { | ||
5171 | kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc); | 5156 | kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc); |
5172 | return 1; | 5157 | return 1; |
5173 | } | 5158 | } |
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 7e59dc1d3fc2..2bdf628066bd 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c | |||
@@ -115,7 +115,7 @@ static void async_hcall(unsigned long call, unsigned long arg1, | |||
115 | local_irq_save(flags); | 115 | local_irq_save(flags); |
116 | if (lguest_data.hcall_status[next_call] != 0xFF) { | 116 | if (lguest_data.hcall_status[next_call] != 0xFF) { |
117 | /* Table full, so do normal hcall which will flush table. */ | 117 | /* Table full, so do normal hcall which will flush table. */ |
118 | kvm_hypercall4(call, arg1, arg2, arg3, arg4); | 118 | hcall(call, arg1, arg2, arg3, arg4); |
119 | } else { | 119 | } else { |
120 | lguest_data.hcalls[next_call].arg0 = call; | 120 | lguest_data.hcalls[next_call].arg0 = call; |
121 | lguest_data.hcalls[next_call].arg1 = arg1; | 121 | lguest_data.hcalls[next_call].arg1 = arg1; |
@@ -145,46 +145,45 @@ static void async_hcall(unsigned long call, unsigned long arg1, | |||
145 | * So, when we're in lazy mode, we call async_hcall() to store the call for | 145 | * So, when we're in lazy mode, we call async_hcall() to store the call for |
146 | * future processing: | 146 | * future processing: |
147 | */ | 147 | */ |
148 | static void lazy_hcall1(unsigned long call, | 148 | static void lazy_hcall1(unsigned long call, unsigned long arg1) |
149 | unsigned long arg1) | ||
150 | { | 149 | { |
151 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 150 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
152 | kvm_hypercall1(call, arg1); | 151 | hcall(call, arg1, 0, 0, 0); |
153 | else | 152 | else |
154 | async_hcall(call, arg1, 0, 0, 0); | 153 | async_hcall(call, arg1, 0, 0, 0); |
155 | } | 154 | } |
156 | 155 | ||
157 | /* You can imagine what lazy_hcall2, 3 and 4 look like. :*/ | 156 | /* You can imagine what lazy_hcall2, 3 and 4 look like. :*/ |
158 | static void lazy_hcall2(unsigned long call, | 157 | static void lazy_hcall2(unsigned long call, |
159 | unsigned long arg1, | 158 | unsigned long arg1, |
160 | unsigned long arg2) | 159 | unsigned long arg2) |
161 | { | 160 | { |
162 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 161 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
163 | kvm_hypercall2(call, arg1, arg2); | 162 | hcall(call, arg1, arg2, 0, 0); |
164 | else | 163 | else |
165 | async_hcall(call, arg1, arg2, 0, 0); | 164 | async_hcall(call, arg1, arg2, 0, 0); |
166 | } | 165 | } |
167 | 166 | ||
168 | static void lazy_hcall3(unsigned long call, | 167 | static void lazy_hcall3(unsigned long call, |
169 | unsigned long arg1, | 168 | unsigned long arg1, |
170 | unsigned long arg2, | 169 | unsigned long arg2, |
171 | unsigned long arg3) | 170 | unsigned long arg3) |
172 | { | 171 | { |
173 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 172 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
174 | kvm_hypercall3(call, arg1, arg2, arg3); | 173 | hcall(call, arg1, arg2, arg3, 0); |
175 | else | 174 | else |
176 | async_hcall(call, arg1, arg2, arg3, 0); | 175 | async_hcall(call, arg1, arg2, arg3, 0); |
177 | } | 176 | } |
178 | 177 | ||
179 | #ifdef CONFIG_X86_PAE | 178 | #ifdef CONFIG_X86_PAE |
180 | static void lazy_hcall4(unsigned long call, | 179 | static void lazy_hcall4(unsigned long call, |
181 | unsigned long arg1, | 180 | unsigned long arg1, |
182 | unsigned long arg2, | 181 | unsigned long arg2, |
183 | unsigned long arg3, | 182 | unsigned long arg3, |
184 | unsigned long arg4) | 183 | unsigned long arg4) |
185 | { | 184 | { |
186 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) | 185 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) |
187 | kvm_hypercall4(call, arg1, arg2, arg3, arg4); | 186 | hcall(call, arg1, arg2, arg3, arg4); |
188 | else | 187 | else |
189 | async_hcall(call, arg1, arg2, arg3, arg4); | 188 | async_hcall(call, arg1, arg2, arg3, arg4); |
190 | } | 189 | } |
@@ -196,13 +195,13 @@ static void lazy_hcall4(unsigned long call, | |||
196 | :*/ | 195 | :*/ |
197 | static void lguest_leave_lazy_mmu_mode(void) | 196 | static void lguest_leave_lazy_mmu_mode(void) |
198 | { | 197 | { |
199 | kvm_hypercall0(LHCALL_FLUSH_ASYNC); | 198 | hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0); |
200 | paravirt_leave_lazy_mmu(); | 199 | paravirt_leave_lazy_mmu(); |
201 | } | 200 | } |
202 | 201 | ||
203 | static void lguest_end_context_switch(struct task_struct *next) | 202 | static void lguest_end_context_switch(struct task_struct *next) |
204 | { | 203 | { |
205 | kvm_hypercall0(LHCALL_FLUSH_ASYNC); | 204 | hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0); |
206 | paravirt_end_context_switch(next); | 205 | paravirt_end_context_switch(next); |
207 | } | 206 | } |
208 | 207 | ||
@@ -286,7 +285,7 @@ static void lguest_write_idt_entry(gate_desc *dt, | |||
286 | /* Keep the local copy up to date. */ | 285 | /* Keep the local copy up to date. */ |
287 | native_write_idt_entry(dt, entrynum, g); | 286 | native_write_idt_entry(dt, entrynum, g); |
288 | /* Tell Host about this new entry. */ | 287 | /* Tell Host about this new entry. */ |
289 | kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]); | 288 | hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1], 0); |
290 | } | 289 | } |
291 | 290 | ||
292 | /* | 291 | /* |
@@ -300,7 +299,7 @@ static void lguest_load_idt(const struct desc_ptr *desc) | |||
300 | struct desc_struct *idt = (void *)desc->address; | 299 | struct desc_struct *idt = (void *)desc->address; |
301 | 300 | ||
302 | for (i = 0; i < (desc->size+1)/8; i++) | 301 | for (i = 0; i < (desc->size+1)/8; i++) |
303 | kvm_hypercall3(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b); | 302 | hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b, 0); |
304 | } | 303 | } |
305 | 304 | ||
306 | /* | 305 | /* |
@@ -321,7 +320,7 @@ static void lguest_load_gdt(const struct desc_ptr *desc) | |||
321 | struct desc_struct *gdt = (void *)desc->address; | 320 | struct desc_struct *gdt = (void *)desc->address; |
322 | 321 | ||
323 | for (i = 0; i < (desc->size+1)/8; i++) | 322 | for (i = 0; i < (desc->size+1)/8; i++) |
324 | kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b); | 323 | hcall(LHCALL_LOAD_GDT_ENTRY, i, gdt[i].a, gdt[i].b, 0); |
325 | } | 324 | } |
326 | 325 | ||
327 | /* | 326 | /* |
@@ -334,8 +333,8 @@ static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum, | |||
334 | { | 333 | { |
335 | native_write_gdt_entry(dt, entrynum, desc, type); | 334 | native_write_gdt_entry(dt, entrynum, desc, type); |
336 | /* Tell Host about this new entry. */ | 335 | /* Tell Host about this new entry. */ |
337 | kvm_hypercall3(LHCALL_LOAD_GDT_ENTRY, entrynum, | 336 | hcall(LHCALL_LOAD_GDT_ENTRY, entrynum, |
338 | dt[entrynum].a, dt[entrynum].b); | 337 | dt[entrynum].a, dt[entrynum].b, 0); |
339 | } | 338 | } |
340 | 339 | ||
341 | /* | 340 | /* |
@@ -931,7 +930,7 @@ static int lguest_clockevent_set_next_event(unsigned long delta, | |||
931 | } | 930 | } |
932 | 931 | ||
933 | /* Please wake us this far in the future. */ | 932 | /* Please wake us this far in the future. */ |
934 | kvm_hypercall1(LHCALL_SET_CLOCKEVENT, delta); | 933 | hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0, 0); |
935 | return 0; | 934 | return 0; |
936 | } | 935 | } |
937 | 936 | ||
@@ -942,7 +941,7 @@ static void lguest_clockevent_set_mode(enum clock_event_mode mode, | |||
942 | case CLOCK_EVT_MODE_UNUSED: | 941 | case CLOCK_EVT_MODE_UNUSED: |
943 | case CLOCK_EVT_MODE_SHUTDOWN: | 942 | case CLOCK_EVT_MODE_SHUTDOWN: |
944 | /* A 0 argument shuts the clock down. */ | 943 | /* A 0 argument shuts the clock down. */ |
945 | kvm_hypercall0(LHCALL_SET_CLOCKEVENT); | 944 | hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0, 0); |
946 | break; | 945 | break; |
947 | case CLOCK_EVT_MODE_ONESHOT: | 946 | case CLOCK_EVT_MODE_ONESHOT: |
948 | /* This is what we expect. */ | 947 | /* This is what we expect. */ |
@@ -1100,7 +1099,7 @@ static void set_lguest_basic_apic_ops(void) | |||
1100 | /* STOP! Until an interrupt comes in. */ | 1099 | /* STOP! Until an interrupt comes in. */ |
1101 | static void lguest_safe_halt(void) | 1100 | static void lguest_safe_halt(void) |
1102 | { | 1101 | { |
1103 | kvm_hypercall0(LHCALL_HALT); | 1102 | hcall(LHCALL_HALT, 0, 0, 0, 0); |
1104 | } | 1103 | } |
1105 | 1104 | ||
1106 | /* | 1105 | /* |
@@ -1112,8 +1111,8 @@ static void lguest_safe_halt(void) | |||
1112 | */ | 1111 | */ |
1113 | static void lguest_power_off(void) | 1112 | static void lguest_power_off(void) |
1114 | { | 1113 | { |
1115 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa("Power down"), | 1114 | hcall(LHCALL_SHUTDOWN, __pa("Power down"), |
1116 | LGUEST_SHUTDOWN_POWEROFF); | 1115 | LGUEST_SHUTDOWN_POWEROFF, 0, 0); |
1117 | } | 1116 | } |
1118 | 1117 | ||
1119 | /* | 1118 | /* |
@@ -1123,7 +1122,7 @@ static void lguest_power_off(void) | |||
1123 | */ | 1122 | */ |
1124 | static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p) | 1123 | static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p) |
1125 | { | 1124 | { |
1126 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF); | 1125 | hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0, 0); |
1127 | /* The hcall won't return, but to keep gcc happy, we're "done". */ | 1126 | /* The hcall won't return, but to keep gcc happy, we're "done". */ |
1128 | return NOTIFY_DONE; | 1127 | return NOTIFY_DONE; |
1129 | } | 1128 | } |
@@ -1162,7 +1161,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) | |||
1162 | len = sizeof(scratch) - 1; | 1161 | len = sizeof(scratch) - 1; |
1163 | scratch[len] = '\0'; | 1162 | scratch[len] = '\0'; |
1164 | memcpy(scratch, buf, len); | 1163 | memcpy(scratch, buf, len); |
1165 | kvm_hypercall1(LHCALL_NOTIFY, __pa(scratch)); | 1164 | hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0, 0); |
1166 | 1165 | ||
1167 | /* This routine returns the number of bytes actually written. */ | 1166 | /* This routine returns the number of bytes actually written. */ |
1168 | return len; | 1167 | return len; |
@@ -1174,7 +1173,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count) | |||
1174 | */ | 1173 | */ |
1175 | static void lguest_restart(char *reason) | 1174 | static void lguest_restart(char *reason) |
1176 | { | 1175 | { |
1177 | kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART); | 1176 | hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0); |
1178 | } | 1177 | } |
1179 | 1178 | ||
1180 | /*G:050 | 1179 | /*G:050 |
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S index 27eac0faee48..4f420c2f2d55 100644 --- a/arch/x86/lguest/i386_head.S +++ b/arch/x86/lguest/i386_head.S | |||
@@ -32,7 +32,7 @@ ENTRY(lguest_entry) | |||
32 | */ | 32 | */ |
33 | movl $LHCALL_LGUEST_INIT, %eax | 33 | movl $LHCALL_LGUEST_INIT, %eax |
34 | movl $lguest_data - __PAGE_OFFSET, %ebx | 34 | movl $lguest_data - __PAGE_OFFSET, %ebx |
35 | .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ | 35 | int $LGUEST_TRAP_ENTRY |
36 | 36 | ||
37 | /* Set up the initial stack so we can run C code. */ | 37 | /* Set up the initial stack so we can run C code. */ |
38 | movl $(init_thread_union+THREAD_SIZE),%esp | 38 | movl $(init_thread_union+THREAD_SIZE),%esp |
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index f46c340727b8..069ce7c37c01 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | #include <linux/hugetlb.h> | 10 | #include <linux/hugetlb.h> |
11 | #include <linux/pagemap.h> | 11 | #include <linux/pagemap.h> |
12 | #include <linux/slab.h> | ||
13 | #include <linux/err.h> | 12 | #include <linux/err.h> |
14 | #include <linux/sysctl.h> | 13 | #include <linux/sysctl.h> |
15 | #include <asm/mman.h> | 14 | #include <asm/mman.h> |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index e71c5cbc8f35..b278535b14aa 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -1,3 +1,4 @@ | |||
1 | #include <linux/gfp.h> | ||
1 | #include <linux/initrd.h> | 2 | #include <linux/initrd.h> |
2 | #include <linux/ioport.h> | 3 | #include <linux/ioport.h> |
3 | #include <linux/swap.h> | 4 | #include <linux/swap.h> |
@@ -331,11 +332,23 @@ int devmem_is_allowed(unsigned long pagenr) | |||
331 | 332 | ||
332 | void free_init_pages(char *what, unsigned long begin, unsigned long end) | 333 | void free_init_pages(char *what, unsigned long begin, unsigned long end) |
333 | { | 334 | { |
334 | unsigned long addr = begin; | 335 | unsigned long addr; |
336 | unsigned long begin_aligned, end_aligned; | ||
335 | 337 | ||
336 | if (addr >= end) | 338 | /* Make sure boundaries are page aligned */ |
339 | begin_aligned = PAGE_ALIGN(begin); | ||
340 | end_aligned = end & PAGE_MASK; | ||
341 | |||
342 | if (WARN_ON(begin_aligned != begin || end_aligned != end)) { | ||
343 | begin = begin_aligned; | ||
344 | end = end_aligned; | ||
345 | } | ||
346 | |||
347 | if (begin >= end) | ||
337 | return; | 348 | return; |
338 | 349 | ||
350 | addr = begin; | ||
351 | |||
339 | /* | 352 | /* |
340 | * If debugging page accesses then do not free this memory but | 353 | * If debugging page accesses then do not free this memory but |
341 | * mark them not present - any buggy init-section access will | 354 | * mark them not present - any buggy init-section access will |
@@ -343,7 +356,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
343 | */ | 356 | */ |
344 | #ifdef CONFIG_DEBUG_PAGEALLOC | 357 | #ifdef CONFIG_DEBUG_PAGEALLOC |
345 | printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", | 358 | printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", |
346 | begin, PAGE_ALIGN(end)); | 359 | begin, end); |
347 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); | 360 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); |
348 | #else | 361 | #else |
349 | /* | 362 | /* |
@@ -358,8 +371,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) | |||
358 | for (; addr < end; addr += PAGE_SIZE) { | 371 | for (; addr < end; addr += PAGE_SIZE) { |
359 | ClearPageReserved(virt_to_page(addr)); | 372 | ClearPageReserved(virt_to_page(addr)); |
360 | init_page_count(virt_to_page(addr)); | 373 | init_page_count(virt_to_page(addr)); |
361 | memset((void *)(addr & ~(PAGE_SIZE-1)), | 374 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); |
362 | POISON_FREE_INITMEM, PAGE_SIZE); | ||
363 | free_page(addr); | 375 | free_page(addr); |
364 | totalram_pages++; | 376 | totalram_pages++; |
365 | } | 377 | } |
@@ -376,6 +388,15 @@ void free_initmem(void) | |||
376 | #ifdef CONFIG_BLK_DEV_INITRD | 388 | #ifdef CONFIG_BLK_DEV_INITRD |
377 | void free_initrd_mem(unsigned long start, unsigned long end) | 389 | void free_initrd_mem(unsigned long start, unsigned long end) |
378 | { | 390 | { |
379 | free_init_pages("initrd memory", start, end); | 391 | /* |
392 | * end could be not aligned, and We can not align that, | ||
393 | * decompresser could be confused by aligned initrd_end | ||
394 | * We already reserve the end partial page before in | ||
395 | * - i386_start_kernel() | ||
396 | * - x86_64_start_kernel() | ||
397 | * - relocate_initrd() | ||
398 | * So here We can do PAGE_ALIGN() safely to get partial page to be freed | ||
399 | */ | ||
400 | free_init_pages("initrd memory", start, PAGE_ALIGN(end)); | ||
380 | } | 401 | } |
381 | #endif | 402 | #endif |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 5cb3f0f54f47..bca79091b9d6 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -25,11 +25,11 @@ | |||
25 | #include <linux/pfn.h> | 25 | #include <linux/pfn.h> |
26 | #include <linux/poison.h> | 26 | #include <linux/poison.h> |
27 | #include <linux/bootmem.h> | 27 | #include <linux/bootmem.h> |
28 | #include <linux/slab.h> | ||
29 | #include <linux/proc_fs.h> | 28 | #include <linux/proc_fs.h> |
30 | #include <linux/memory_hotplug.h> | 29 | #include <linux/memory_hotplug.h> |
31 | #include <linux/initrd.h> | 30 | #include <linux/initrd.h> |
32 | #include <linux/cpumask.h> | 31 | #include <linux/cpumask.h> |
32 | #include <linux/gfp.h> | ||
33 | 33 | ||
34 | #include <asm/asm.h> | 34 | #include <asm/asm.h> |
35 | #include <asm/bios_ebda.h> | 35 | #include <asm/bios_ebda.h> |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index e9b040e1cde5..ee41bba315d1 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/memory_hotplug.h> | 30 | #include <linux/memory_hotplug.h> |
31 | #include <linux/nmi.h> | 31 | #include <linux/nmi.h> |
32 | #include <linux/gfp.h> | ||
32 | 33 | ||
33 | #include <asm/processor.h> | 34 | #include <asm/processor.h> |
34 | #include <asm/bios_ebda.h> | 35 | #include <asm/bios_ebda.h> |
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index 536fb6823366..5d0e67fff1a6 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/kdebug.h> | 21 | #include <linux/kdebug.h> |
22 | #include <linux/mutex.h> | 22 | #include <linux/mutex.h> |
23 | #include <linux/io.h> | 23 | #include <linux/io.h> |
24 | #include <linux/slab.h> | ||
24 | #include <asm/cacheflush.h> | 25 | #include <asm/cacheflush.h> |
25 | #include <asm/tlbflush.h> | 26 | #include <asm/tlbflush.h> |
26 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c index 34a3291ca103..3adff7dcc148 100644 --- a/arch/x86/mm/mmio-mod.c +++ b/arch/x86/mm/mmio-mod.c | |||
@@ -26,6 +26,7 @@ | |||
26 | 26 | ||
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/debugfs.h> | 28 | #include <linux/debugfs.h> |
29 | #include <linux/slab.h> | ||
29 | #include <linux/uaccess.h> | 30 | #include <linux/uaccess.h> |
30 | #include <linux/io.h> | 31 | #include <linux/io.h> |
31 | #include <linux/version.h> | 32 | #include <linux/version.h> |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 1d4eb93d333c..28195c350b97 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -6,13 +6,13 @@ | |||
6 | #include <linux/bootmem.h> | 6 | #include <linux/bootmem.h> |
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <linux/slab.h> | ||
10 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
11 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
12 | #include <linux/seq_file.h> | 11 | #include <linux/seq_file.h> |
13 | #include <linux/debugfs.h> | 12 | #include <linux/debugfs.h> |
14 | #include <linux/pfn.h> | 13 | #include <linux/pfn.h> |
15 | #include <linux/percpu.h> | 14 | #include <linux/percpu.h> |
15 | #include <linux/gfp.h> | ||
16 | 16 | ||
17 | #include <asm/e820.h> | 17 | #include <asm/e820.h> |
18 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
@@ -291,8 +291,29 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, | |||
291 | */ | 291 | */ |
292 | if (kernel_set_to_readonly && | 292 | if (kernel_set_to_readonly && |
293 | within(address, (unsigned long)_text, | 293 | within(address, (unsigned long)_text, |
294 | (unsigned long)__end_rodata_hpage_align)) | 294 | (unsigned long)__end_rodata_hpage_align)) { |
295 | pgprot_val(forbidden) |= _PAGE_RW; | 295 | unsigned int level; |
296 | |||
297 | /* | ||
298 | * Don't enforce the !RW mapping for the kernel text mapping, | ||
299 | * if the current mapping is already using small page mapping. | ||
300 | * No need to work hard to preserve large page mappings in this | ||
301 | * case. | ||
302 | * | ||
303 | * This also fixes the Linux Xen paravirt guest boot failure | ||
304 | * (because of unexpected read-only mappings for kernel identity | ||
305 | * mappings). In this paravirt guest case, the kernel text | ||
306 | * mapping and the kernel identity mapping share the same | ||
307 | * page-table pages. Thus we can't really use different | ||
308 | * protections for the kernel text and identity mappings. Also, | ||
309 | * these shared mappings are made of small page mappings. | ||
310 | * Thus this don't enforce !RW mapping for small page kernel | ||
311 | * text mapping logic will help Linux Xen parvirt guest boot | ||
312 | * aswell. | ||
313 | */ | ||
314 | if (lookup_address(address, &level) && (level != PG_LEVEL_4K)) | ||
315 | pgprot_val(forbidden) |= _PAGE_RW; | ||
316 | } | ||
296 | #endif | 317 | #endif |
297 | 318 | ||
298 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); | 319 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index ae9648eb1c7f..edc8b95afc1a 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <linux/debugfs.h> | 12 | #include <linux/debugfs.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/gfp.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/mm.h> | 16 | #include <linux/mm.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/rbtree.h> | 18 | #include <linux/rbtree.h> |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index c9ba9deafe83..5c4ee422590e 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -1,4 +1,5 @@ | |||
1 | #include <linux/mm.h> | 1 | #include <linux/mm.h> |
2 | #include <linux/gfp.h> | ||
2 | #include <asm/pgalloc.h> | 3 | #include <asm/pgalloc.h> |
3 | #include <asm/pgtable.h> | 4 | #include <asm/pgtable.h> |
4 | #include <asm/tlb.h> | 5 | #include <asm/tlb.h> |
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 46c8834aedc0..1a8faf09afed 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c | |||
@@ -6,7 +6,6 @@ | |||
6 | #include <linux/swap.h> | 6 | #include <linux/swap.h> |
7 | #include <linux/smp.h> | 7 | #include <linux/smp.h> |
8 | #include <linux/highmem.h> | 8 | #include <linux/highmem.h> |
9 | #include <linux/slab.h> | ||
10 | #include <linux/pagemap.h> | 9 | #include <linux/pagemap.h> |
11 | #include <linux/spinlock.h> | 10 | #include <linux/spinlock.h> |
12 | #include <linux/module.h> | 11 | #include <linux/module.h> |
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 6a58256dce9f..090cbbec7dbd 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c | |||
@@ -46,17 +46,6 @@ | |||
46 | 46 | ||
47 | static unsigned long reset_value[NUM_VIRT_COUNTERS]; | 47 | static unsigned long reset_value[NUM_VIRT_COUNTERS]; |
48 | 48 | ||
49 | /* IbsFetchCtl bits/masks */ | ||
50 | #define IBS_FETCH_RAND_EN (1ULL<<57) | ||
51 | #define IBS_FETCH_VAL (1ULL<<49) | ||
52 | #define IBS_FETCH_ENABLE (1ULL<<48) | ||
53 | #define IBS_FETCH_CNT_MASK 0xFFFF0000ULL | ||
54 | |||
55 | /* IbsOpCtl bits */ | ||
56 | #define IBS_OP_CNT_CTL (1ULL<<19) | ||
57 | #define IBS_OP_VAL (1ULL<<18) | ||
58 | #define IBS_OP_ENABLE (1ULL<<17) | ||
59 | |||
60 | #define IBS_FETCH_SIZE 6 | 49 | #define IBS_FETCH_SIZE 6 |
61 | #define IBS_OP_SIZE 12 | 50 | #define IBS_OP_SIZE 12 |
62 | 51 | ||
@@ -182,7 +171,7 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, | |||
182 | continue; | 171 | continue; |
183 | } | 172 | } |
184 | rdmsrl(msrs->controls[i].addr, val); | 173 | rdmsrl(msrs->controls[i].addr, val); |
185 | if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) | 174 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) |
186 | op_x86_warn_in_use(i); | 175 | op_x86_warn_in_use(i); |
187 | val &= model->reserved; | 176 | val &= model->reserved; |
188 | wrmsrl(msrs->controls[i].addr, val); | 177 | wrmsrl(msrs->controls[i].addr, val); |
@@ -290,7 +279,7 @@ op_amd_handle_ibs(struct pt_regs * const regs, | |||
290 | oprofile_write_commit(&entry); | 279 | oprofile_write_commit(&entry); |
291 | 280 | ||
292 | /* reenable the IRQ */ | 281 | /* reenable the IRQ */ |
293 | ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT_MASK); | 282 | ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT); |
294 | ctl |= IBS_FETCH_ENABLE; | 283 | ctl |= IBS_FETCH_ENABLE; |
295 | wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl); | 284 | wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl); |
296 | } | 285 | } |
@@ -330,7 +319,7 @@ static inline void op_amd_start_ibs(void) | |||
330 | return; | 319 | return; |
331 | 320 | ||
332 | if (ibs_config.fetch_enabled) { | 321 | if (ibs_config.fetch_enabled) { |
333 | val = (ibs_config.max_cnt_fetch >> 4) & 0xFFFF; | 322 | val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT; |
334 | val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0; | 323 | val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0; |
335 | val |= IBS_FETCH_ENABLE; | 324 | val |= IBS_FETCH_ENABLE; |
336 | wrmsrl(MSR_AMD64_IBSFETCHCTL, val); | 325 | wrmsrl(MSR_AMD64_IBSFETCHCTL, val); |
@@ -352,7 +341,7 @@ static inline void op_amd_start_ibs(void) | |||
352 | * avoid underflows. | 341 | * avoid underflows. |
353 | */ | 342 | */ |
354 | ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET, | 343 | ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET, |
355 | 0xFFFFULL); | 344 | IBS_OP_MAX_CNT); |
356 | } | 345 | } |
357 | if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops) | 346 | if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops) |
358 | ibs_op_ctl |= IBS_OP_CNT_CTL; | 347 | ibs_op_ctl |= IBS_OP_CNT_CTL; |
@@ -409,7 +398,7 @@ static void op_amd_start(struct op_msrs const * const msrs) | |||
409 | if (!reset_value[op_x86_phys_to_virt(i)]) | 398 | if (!reset_value[op_x86_phys_to_virt(i)]) |
410 | continue; | 399 | continue; |
411 | rdmsrl(msrs->controls[i].addr, val); | 400 | rdmsrl(msrs->controls[i].addr, val); |
412 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 401 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
413 | wrmsrl(msrs->controls[i].addr, val); | 402 | wrmsrl(msrs->controls[i].addr, val); |
414 | } | 403 | } |
415 | 404 | ||
@@ -429,7 +418,7 @@ static void op_amd_stop(struct op_msrs const * const msrs) | |||
429 | if (!reset_value[op_x86_phys_to_virt(i)]) | 418 | if (!reset_value[op_x86_phys_to_virt(i)]) |
430 | continue; | 419 | continue; |
431 | rdmsrl(msrs->controls[i].addr, val); | 420 | rdmsrl(msrs->controls[i].addr, val); |
432 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | 421 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; |
433 | wrmsrl(msrs->controls[i].addr, val); | 422 | wrmsrl(msrs->controls[i].addr, val); |
434 | } | 423 | } |
435 | 424 | ||
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 5d1727ba409e..2bf90fafa7b5 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -88,7 +88,7 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, | |||
88 | continue; | 88 | continue; |
89 | } | 89 | } |
90 | rdmsrl(msrs->controls[i].addr, val); | 90 | rdmsrl(msrs->controls[i].addr, val); |
91 | if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) | 91 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) |
92 | op_x86_warn_in_use(i); | 92 | op_x86_warn_in_use(i); |
93 | val &= model->reserved; | 93 | val &= model->reserved; |
94 | wrmsrl(msrs->controls[i].addr, val); | 94 | wrmsrl(msrs->controls[i].addr, val); |
@@ -166,7 +166,7 @@ static void ppro_start(struct op_msrs const * const msrs) | |||
166 | for (i = 0; i < num_counters; ++i) { | 166 | for (i = 0; i < num_counters; ++i) { |
167 | if (reset_value[i]) { | 167 | if (reset_value[i]) { |
168 | rdmsrl(msrs->controls[i].addr, val); | 168 | rdmsrl(msrs->controls[i].addr, val); |
169 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 169 | val |= ARCH_PERFMON_EVENTSEL_ENABLE; |
170 | wrmsrl(msrs->controls[i].addr, val); | 170 | wrmsrl(msrs->controls[i].addr, val); |
171 | } | 171 | } |
172 | } | 172 | } |
@@ -184,7 +184,7 @@ static void ppro_stop(struct op_msrs const * const msrs) | |||
184 | if (!reset_value[i]) | 184 | if (!reset_value[i]) |
185 | continue; | 185 | continue; |
186 | rdmsrl(msrs->controls[i].addr, val); | 186 | rdmsrl(msrs->controls[i].addr, val); |
187 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | 187 | val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; |
188 | wrmsrl(msrs->controls[i].addr, val); | 188 | wrmsrl(msrs->controls[i].addr, val); |
189 | } | 189 | } |
190 | } | 190 | } |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 6e22454bfaa6..c7b1ebfb7da7 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/init.h> | 3 | #include <linux/init.h> |
4 | #include <linux/irq.h> | 4 | #include <linux/irq.h> |
5 | #include <linux/dmi.h> | 5 | #include <linux/dmi.h> |
6 | #include <linux/slab.h> | ||
6 | #include <asm/numa.h> | 7 | #include <asm/numa.h> |
7 | #include <asm/pci_x86.h> | 8 | #include <asm/pci_x86.h> |
8 | 9 | ||
@@ -122,8 +123,8 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
122 | struct acpi_resource_address64 addr; | 123 | struct acpi_resource_address64 addr; |
123 | acpi_status status; | 124 | acpi_status status; |
124 | unsigned long flags; | 125 | unsigned long flags; |
125 | struct resource *root; | 126 | struct resource *root, *conflict; |
126 | u64 start, end; | 127 | u64 start, end, max_len; |
127 | 128 | ||
128 | status = resource_to_addr(acpi_res, &addr); | 129 | status = resource_to_addr(acpi_res, &addr); |
129 | if (!ACPI_SUCCESS(status)) | 130 | if (!ACPI_SUCCESS(status)) |
@@ -140,6 +141,17 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
140 | } else | 141 | } else |
141 | return AE_OK; | 142 | return AE_OK; |
142 | 143 | ||
144 | max_len = addr.maximum - addr.minimum + 1; | ||
145 | if (addr.address_length > max_len) { | ||
146 | dev_printk(KERN_DEBUG, &info->bridge->dev, | ||
147 | "host bridge window length %#llx doesn't fit in " | ||
148 | "%#llx-%#llx, trimming\n", | ||
149 | (unsigned long long) addr.address_length, | ||
150 | (unsigned long long) addr.minimum, | ||
151 | (unsigned long long) addr.maximum); | ||
152 | addr.address_length = max_len; | ||
153 | } | ||
154 | |||
143 | start = addr.minimum + addr.translation_offset; | 155 | start = addr.minimum + addr.translation_offset; |
144 | end = start + addr.address_length - 1; | 156 | end = start + addr.address_length - 1; |
145 | 157 | ||
@@ -157,9 +169,12 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
157 | return AE_OK; | 169 | return AE_OK; |
158 | } | 170 | } |
159 | 171 | ||
160 | if (insert_resource(root, res)) { | 172 | conflict = insert_resource_conflict(root, res); |
173 | if (conflict) { | ||
161 | dev_err(&info->bridge->dev, | 174 | dev_err(&info->bridge->dev, |
162 | "can't allocate host bridge window %pR\n", res); | 175 | "address space collision: host bridge window %pR " |
176 | "conflicts with %s %pR\n", | ||
177 | res, conflict->name, conflict); | ||
163 | } else { | 178 | } else { |
164 | pci_bus_add_resource(info->bus, res, 0); | 179 | pci_bus_add_resource(info->bus, res, 0); |
165 | info->res_num++; | 180 | info->res_num++; |
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 294e10cb11e1..cf2e93869c48 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/ioport.h> | 9 | #include <linux/ioport.h> |
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/dmi.h> | 11 | #include <linux/dmi.h> |
12 | #include <linux/slab.h> | ||
12 | 13 | ||
13 | #include <asm/acpi.h> | 14 | #include <asm/acpi.h> |
14 | #include <asm/segment.h> | 15 | #include <asm/segment.h> |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index dece3eb9c906..46fd43f79103 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -127,9 +127,6 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list) | |||
127 | continue; | 127 | continue; |
128 | if (!r->start || | 128 | if (!r->start || |
129 | pci_claim_resource(dev, idx) < 0) { | 129 | pci_claim_resource(dev, idx) < 0) { |
130 | dev_info(&dev->dev, | ||
131 | "can't reserve window %pR\n", | ||
132 | r); | ||
133 | /* | 130 | /* |
134 | * Something is wrong with the region. | 131 | * Something is wrong with the region. |
135 | * Invalidate the resource to prevent | 132 | * Invalidate the resource to prevent |
@@ -181,8 +178,6 @@ static void __init pcibios_allocate_resources(int pass) | |||
181 | "BAR %d: reserving %pr (d=%d, p=%d)\n", | 178 | "BAR %d: reserving %pr (d=%d, p=%d)\n", |
182 | idx, r, disabled, pass); | 179 | idx, r, disabled, pass); |
183 | if (pci_claim_resource(dev, idx) < 0) { | 180 | if (pci_claim_resource(dev, idx) < 0) { |
184 | dev_info(&dev->dev, | ||
185 | "can't reserve %pR\n", r); | ||
186 | /* We'll assign a new address later */ | 181 | /* We'll assign a new address later */ |
187 | r->end -= r->start; | 182 | r->end -= r->start; |
188 | r->start = 0; | 183 | r->start = 0; |
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 8b107521d24e..5d362b5ba06f 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
@@ -8,7 +8,6 @@ | |||
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/pci.h> | 9 | #include <linux/pci.h> |
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/slab.h> | ||
12 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
13 | #include <linux/dmi.h> | 12 | #include <linux/dmi.h> |
14 | #include <linux/io.h> | 13 | #include <linux/io.h> |
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index 8f3f9a50b1e0..39b9ebe8f886 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/sfi_acpi.h> | 16 | #include <linux/sfi_acpi.h> |
17 | #include <linux/bitmap.h> | 17 | #include <linux/bitmap.h> |
18 | #include <linux/dmi.h> | 18 | #include <linux/dmi.h> |
19 | #include <linux/slab.h> | ||
19 | #include <asm/e820.h> | 20 | #include <asm/e820.h> |
20 | #include <asm/pci_x86.h> | 21 | #include <asm/pci_x86.h> |
21 | #include <asm/acpi.h> | 22 | #include <asm/acpi.h> |
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c index 1c975cc9839e..59a225c17b84 100644 --- a/arch/x86/pci/pcbios.c +++ b/arch/x86/pci/pcbios.c | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | #include <linux/pci.h> | 5 | #include <linux/pci.h> |
6 | #include <linux/init.h> | 6 | #include <linux/init.h> |
7 | #include <linux/slab.h> | ||
7 | #include <linux/module.h> | 8 | #include <linux/module.h> |
8 | #include <linux/uaccess.h> | 9 | #include <linux/uaccess.h> |
9 | #include <asm/pci_x86.h> | 10 | #include <asm/pci_x86.h> |
diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c index 81197c62d5b3..3769079874d8 100644 --- a/arch/x86/power/hibernate_32.c +++ b/arch/x86/power/hibernate_32.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl> | 6 | * Copyright (c) 2006 Rafael J. Wysocki <rjw@sisk.pl> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/gfp.h> | ||
9 | #include <linux/suspend.h> | 10 | #include <linux/suspend.h> |
10 | #include <linux/bootmem.h> | 11 | #include <linux/bootmem.h> |
11 | 12 | ||
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index 65fdc86e923f..d24f983ba1e5 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> | 8 | * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/gfp.h> | ||
11 | #include <linux/smp.h> | 12 | #include <linux/smp.h> |
12 | #include <linux/suspend.h> | 13 | #include <linux/suspend.h> |
13 | #include <asm/proto.h> | 14 | #include <asm/proto.h> |
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S index b641388d8286..ad47daeafa4e 100644 --- a/arch/x86/power/hibernate_asm_32.S +++ b/arch/x86/power/hibernate_asm_32.S | |||
@@ -27,10 +27,17 @@ ENTRY(swsusp_arch_suspend) | |||
27 | ret | 27 | ret |
28 | 28 | ||
29 | ENTRY(restore_image) | 29 | ENTRY(restore_image) |
30 | movl mmu_cr4_features, %ecx | ||
30 | movl resume_pg_dir, %eax | 31 | movl resume_pg_dir, %eax |
31 | subl $__PAGE_OFFSET, %eax | 32 | subl $__PAGE_OFFSET, %eax |
32 | movl %eax, %cr3 | 33 | movl %eax, %cr3 |
33 | 34 | ||
35 | jecxz 1f # cr4 Pentium and higher, skip if zero | ||
36 | andl $~(X86_CR4_PGE), %ecx | ||
37 | movl %ecx, %cr4; # turn off PGE | ||
38 | movl %cr3, %eax; # flush TLB | ||
39 | movl %eax, %cr3 | ||
40 | 1: | ||
34 | movl restore_pblist, %edx | 41 | movl restore_pblist, %edx |
35 | .p2align 4,,7 | 42 | .p2align 4,,7 |
36 | 43 | ||
@@ -54,16 +61,8 @@ done: | |||
54 | movl $swapper_pg_dir, %eax | 61 | movl $swapper_pg_dir, %eax |
55 | subl $__PAGE_OFFSET, %eax | 62 | subl $__PAGE_OFFSET, %eax |
56 | movl %eax, %cr3 | 63 | movl %eax, %cr3 |
57 | /* Flush TLB, including "global" things (vmalloc) */ | ||
58 | movl mmu_cr4_features, %ecx | 64 | movl mmu_cr4_features, %ecx |
59 | jecxz 1f # cr4 Pentium and higher, skip if zero | 65 | jecxz 1f # cr4 Pentium and higher, skip if zero |
60 | movl %ecx, %edx | ||
61 | andl $~(X86_CR4_PGE), %edx | ||
62 | movl %edx, %cr4; # turn off PGE | ||
63 | 1: | ||
64 | movl %cr3, %eax; # flush TLB | ||
65 | movl %eax, %cr3 | ||
66 | jecxz 1f # cr4 Pentium and higher, skip if zero | ||
67 | movl %ecx, %cr4; # turn PGE back on | 66 | movl %ecx, %cr4; # turn PGE back on |
68 | 1: | 67 | 1: |
69 | 68 | ||
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 21e1aeb9f3ea..ac74869b8140 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
7 | #include <linux/err.h> | 7 | #include <linux/err.h> |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <linux/slab.h> | ||
9 | #include <linux/init.h> | 10 | #include <linux/init.h> |
10 | #include <linux/random.h> | 11 | #include <linux/random.h> |
11 | #include <linux/elf.h> | 12 | #include <linux/elf.h> |
diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c index e133ce25e290..1304bcec8ee5 100644 --- a/arch/x86/xen/debugfs.c +++ b/arch/x86/xen/debugfs.c | |||
@@ -1,5 +1,6 @@ | |||
1 | #include <linux/init.h> | 1 | #include <linux/init.h> |
2 | #include <linux/debugfs.h> | 2 | #include <linux/debugfs.h> |
3 | #include <linux/slab.h> | ||
3 | #include <linux/module.h> | 4 | #include <linux/module.h> |
4 | 5 | ||
5 | #include "debugfs.h" | 6 | #include "debugfs.h" |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index b607239c1ba8..65d8d79b46a8 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/highmem.h> | 28 | #include <linux/highmem.h> |
29 | #include <linux/console.h> | 29 | #include <linux/console.h> |
30 | #include <linux/pci.h> | 30 | #include <linux/pci.h> |
31 | #include <linux/gfp.h> | ||
31 | 32 | ||
32 | #include <xen/xen.h> | 33 | #include <xen/xen.h> |
33 | #include <xen/interface/xen.h> | 34 | #include <xen/interface/xen.h> |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index f9eb7de74f42..914f04695ce5 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/debugfs.h> | 43 | #include <linux/debugfs.h> |
44 | #include <linux/bug.h> | 44 | #include <linux/bug.h> |
45 | #include <linux/module.h> | 45 | #include <linux/module.h> |
46 | #include <linux/gfp.h> | ||
46 | 47 | ||
47 | #include <asm/pgtable.h> | 48 | #include <asm/pgtable.h> |
48 | #include <asm/tlbflush.h> | 49 | #include <asm/tlbflush.h> |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index deafb65ef44e..a29693fd3138 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -14,6 +14,7 @@ | |||
14 | */ | 14 | */ |
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/slab.h> | ||
17 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
18 | 19 | ||
19 | #include <asm/paravirt.h> | 20 | #include <asm/paravirt.h> |
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 24ded31b5aec..e0500646585d 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/spinlock.h> | 6 | #include <linux/spinlock.h> |
7 | #include <linux/debugfs.h> | 7 | #include <linux/debugfs.h> |
8 | #include <linux/log2.h> | 8 | #include <linux/log2.h> |
9 | #include <linux/gfp.h> | ||
9 | 10 | ||
10 | #include <asm/paravirt.h> | 11 | #include <asm/paravirt.h> |
11 | 12 | ||
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 0d3f07cd1b5f..32764b8880b5 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/clockchips.h> | 13 | #include <linux/clockchips.h> |
14 | #include <linux/kernel_stat.h> | 14 | #include <linux/kernel_stat.h> |
15 | #include <linux/math64.h> | 15 | #include <linux/math64.h> |
16 | #include <linux/gfp.h> | ||
16 | 17 | ||
17 | #include <asm/pvclock.h> | 18 | #include <asm/pvclock.h> |
18 | #include <asm/xen/hypervisor.h> | 19 | #include <asm/xen/hypervisor.h> |