diff options
Diffstat (limited to 'arch/x86')
25 files changed, 161 insertions, 175 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 3b2a5aca4edb..cbcbfdee3ee0 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -49,7 +49,10 @@ config X86 | |||
49 | select HAVE_KERNEL_GZIP | 49 | select HAVE_KERNEL_GZIP |
50 | select HAVE_KERNEL_BZIP2 | 50 | select HAVE_KERNEL_BZIP2 |
51 | select HAVE_KERNEL_LZMA | 51 | select HAVE_KERNEL_LZMA |
52 | select HAVE_KERNEL_LZO | ||
52 | select HAVE_HW_BREAKPOINT | 53 | select HAVE_HW_BREAKPOINT |
54 | select PERF_EVENTS | ||
55 | select ANON_INODES | ||
53 | select HAVE_ARCH_KMEMCHECK | 56 | select HAVE_ARCH_KMEMCHECK |
54 | select HAVE_USER_RETURN_NOTIFIER | 57 | select HAVE_USER_RETURN_NOTIFIER |
55 | 58 | ||
@@ -1244,6 +1247,11 @@ config ARCH_MEMORY_PROBE | |||
1244 | def_bool X86_64 | 1247 | def_bool X86_64 |
1245 | depends on MEMORY_HOTPLUG | 1248 | depends on MEMORY_HOTPLUG |
1246 | 1249 | ||
1250 | config ILLEGAL_POINTER_VALUE | ||
1251 | hex | ||
1252 | default 0 if X86_32 | ||
1253 | default 0xdead000000000000 if X86_64 | ||
1254 | |||
1247 | source "mm/Kconfig" | 1255 | source "mm/Kconfig" |
1248 | 1256 | ||
1249 | config HIGHPTE | 1257 | config HIGHPTE |
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index f25bbd37765a..fbb47daf2459 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | # create a compressed vmlinux image from the original vmlinux | 4 | # create a compressed vmlinux image from the original vmlinux |
5 | # | 5 | # |
6 | 6 | ||
7 | targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head_$(BITS).o misc.o piggy.o | 7 | targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo head_$(BITS).o misc.o piggy.o |
8 | 8 | ||
9 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 | 9 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 |
10 | KBUILD_CFLAGS += -fno-strict-aliasing -fPIC | 10 | KBUILD_CFLAGS += -fno-strict-aliasing -fPIC |
@@ -49,10 +49,13 @@ $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE | |||
49 | $(call if_changed,bzip2) | 49 | $(call if_changed,bzip2) |
50 | $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE | 50 | $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE |
51 | $(call if_changed,lzma) | 51 | $(call if_changed,lzma) |
52 | $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE | ||
53 | $(call if_changed,lzo) | ||
52 | 54 | ||
53 | suffix-$(CONFIG_KERNEL_GZIP) := gz | 55 | suffix-$(CONFIG_KERNEL_GZIP) := gz |
54 | suffix-$(CONFIG_KERNEL_BZIP2) := bz2 | 56 | suffix-$(CONFIG_KERNEL_BZIP2) := bz2 |
55 | suffix-$(CONFIG_KERNEL_LZMA) := lzma | 57 | suffix-$(CONFIG_KERNEL_LZMA) := lzma |
58 | suffix-$(CONFIG_KERNEL_LZO) := lzo | ||
56 | 59 | ||
57 | quiet_cmd_mkpiggy = MKPIGGY $@ | 60 | quiet_cmd_mkpiggy = MKPIGGY $@ |
58 | cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false ) | 61 | cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false ) |
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 842b2a36174a..3b22fe8ab91b 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c | |||
@@ -162,6 +162,10 @@ static int lines, cols; | |||
162 | #include "../../../../lib/decompress_unlzma.c" | 162 | #include "../../../../lib/decompress_unlzma.c" |
163 | #endif | 163 | #endif |
164 | 164 | ||
165 | #ifdef CONFIG_KERNEL_LZO | ||
166 | #include "../../../../lib/decompress_unlzo.c" | ||
167 | #endif | ||
168 | |||
165 | static void scroll(void) | 169 | static void scroll(void) |
166 | { | 170 | { |
167 | int i; | 171 | int i; |
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 60d2b2db0bc5..56f462cf22d2 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
@@ -142,6 +142,32 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) | |||
142 | return max_cstate; | 142 | return max_cstate; |
143 | } | 143 | } |
144 | 144 | ||
145 | static inline bool arch_has_acpi_pdc(void) | ||
146 | { | ||
147 | struct cpuinfo_x86 *c = &cpu_data(0); | ||
148 | return (c->x86_vendor == X86_VENDOR_INTEL || | ||
149 | c->x86_vendor == X86_VENDOR_CENTAUR); | ||
150 | } | ||
151 | |||
152 | static inline void arch_acpi_set_pdc_bits(u32 *buf) | ||
153 | { | ||
154 | struct cpuinfo_x86 *c = &cpu_data(0); | ||
155 | |||
156 | buf[2] |= ACPI_PDC_C_CAPABILITY_SMP; | ||
157 | |||
158 | if (cpu_has(c, X86_FEATURE_EST)) | ||
159 | buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP; | ||
160 | |||
161 | if (cpu_has(c, X86_FEATURE_ACPI)) | ||
162 | buf[2] |= ACPI_PDC_T_FFH; | ||
163 | |||
164 | /* | ||
165 | * If mwait/monitor is unsupported, C2/C3_FFH will be disabled | ||
166 | */ | ||
167 | if (!cpu_has(c, X86_FEATURE_MWAIT)) | ||
168 | buf[2] &= ~(ACPI_PDC_C_C2C3_FFH); | ||
169 | } | ||
170 | |||
145 | #else /* !CONFIG_ACPI */ | 171 | #else /* !CONFIG_ACPI */ |
146 | 172 | ||
147 | #define acpi_lapic 0 | 173 | #define acpi_lapic 0 |
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index 950df434763f..f46b79f6c16c 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h | |||
@@ -254,6 +254,10 @@ struct kvm_reinject_control { | |||
254 | __u8 reserved[31]; | 254 | __u8 reserved[31]; |
255 | }; | 255 | }; |
256 | 256 | ||
257 | /* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */ | ||
258 | #define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001 | ||
259 | #define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 | ||
260 | |||
257 | /* for KVM_GET/SET_VCPU_EVENTS */ | 261 | /* for KVM_GET/SET_VCPU_EVENTS */ |
258 | struct kvm_vcpu_events { | 262 | struct kvm_vcpu_events { |
259 | struct { | 263 | struct { |
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index cf86a5e73815..35e89122a42f 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h | |||
@@ -5,6 +5,29 @@ extern int kstack_depth_to_print; | |||
5 | 5 | ||
6 | int x86_is_stack_id(int id, char *name); | 6 | int x86_is_stack_id(int id, char *name); |
7 | 7 | ||
8 | struct thread_info; | ||
9 | struct stacktrace_ops; | ||
10 | |||
11 | typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo, | ||
12 | unsigned long *stack, | ||
13 | unsigned long bp, | ||
14 | const struct stacktrace_ops *ops, | ||
15 | void *data, | ||
16 | unsigned long *end, | ||
17 | int *graph); | ||
18 | |||
19 | extern unsigned long | ||
20 | print_context_stack(struct thread_info *tinfo, | ||
21 | unsigned long *stack, unsigned long bp, | ||
22 | const struct stacktrace_ops *ops, void *data, | ||
23 | unsigned long *end, int *graph); | ||
24 | |||
25 | extern unsigned long | ||
26 | print_context_stack_bp(struct thread_info *tinfo, | ||
27 | unsigned long *stack, unsigned long bp, | ||
28 | const struct stacktrace_ops *ops, void *data, | ||
29 | unsigned long *end, int *graph); | ||
30 | |||
8 | /* Generic stack tracer with callbacks */ | 31 | /* Generic stack tracer with callbacks */ |
9 | 32 | ||
10 | struct stacktrace_ops { | 33 | struct stacktrace_ops { |
@@ -14,6 +37,7 @@ struct stacktrace_ops { | |||
14 | void (*address)(void *data, unsigned long address, int reliable); | 37 | void (*address)(void *data, unsigned long address, int reliable); |
15 | /* On negative return stop dumping */ | 38 | /* On negative return stop dumping */ |
16 | int (*stack)(void *data, char *name); | 39 | int (*stack)(void *data, char *name); |
40 | walk_stack_t walk_stack; | ||
17 | }; | 41 | }; |
18 | 42 | ||
19 | void dump_trace(struct task_struct *tsk, struct pt_regs *regs, | 43 | void dump_trace(struct task_struct *tsk, struct pt_regs *regs, |
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile index fd5ca97a2ad5..6f35260bb3ef 100644 --- a/arch/x86/kernel/acpi/Makefile +++ b/arch/x86/kernel/acpi/Makefile | |||
@@ -4,7 +4,7 @@ obj-$(CONFIG_ACPI) += boot.o | |||
4 | obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_rm.o wakeup_$(BITS).o | 4 | obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_rm.o wakeup_$(BITS).o |
5 | 5 | ||
6 | ifneq ($(CONFIG_ACPI_PROCESSOR),) | 6 | ifneq ($(CONFIG_ACPI_PROCESSOR),) |
7 | obj-y += cstate.o processor.o | 7 | obj-y += cstate.o |
8 | endif | 8 | endif |
9 | 9 | ||
10 | $(obj)/wakeup_rm.o: $(obj)/realmode/wakeup.bin | 10 | $(obj)/wakeup_rm.o: $(obj)/realmode/wakeup.bin |
diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c deleted file mode 100644 index d85d1b2432ba..000000000000 --- a/arch/x86/kernel/acpi/processor.c +++ /dev/null | |||
@@ -1,101 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 Intel Corporation | ||
3 | * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
4 | * - Added _PDC for platforms with Intel CPUs | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/acpi.h> | ||
11 | |||
12 | #include <acpi/processor.h> | ||
13 | #include <asm/acpi.h> | ||
14 | |||
15 | static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c) | ||
16 | { | ||
17 | struct acpi_object_list *obj_list; | ||
18 | union acpi_object *obj; | ||
19 | u32 *buf; | ||
20 | |||
21 | /* allocate and initialize pdc. It will be used later. */ | ||
22 | obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL); | ||
23 | if (!obj_list) { | ||
24 | printk(KERN_ERR "Memory allocation error\n"); | ||
25 | return; | ||
26 | } | ||
27 | |||
28 | obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL); | ||
29 | if (!obj) { | ||
30 | printk(KERN_ERR "Memory allocation error\n"); | ||
31 | kfree(obj_list); | ||
32 | return; | ||
33 | } | ||
34 | |||
35 | buf = kmalloc(12, GFP_KERNEL); | ||
36 | if (!buf) { | ||
37 | printk(KERN_ERR "Memory allocation error\n"); | ||
38 | kfree(obj); | ||
39 | kfree(obj_list); | ||
40 | return; | ||
41 | } | ||
42 | |||
43 | buf[0] = ACPI_PDC_REVISION_ID; | ||
44 | buf[1] = 1; | ||
45 | buf[2] = ACPI_PDC_C_CAPABILITY_SMP; | ||
46 | |||
47 | /* | ||
48 | * The default of PDC_SMP_T_SWCOORD bit is set for intel x86 cpu so | ||
49 | * that OSPM is capable of native ACPI throttling software | ||
50 | * coordination using BIOS supplied _TSD info. | ||
51 | */ | ||
52 | buf[2] |= ACPI_PDC_SMP_T_SWCOORD; | ||
53 | if (cpu_has(c, X86_FEATURE_EST)) | ||
54 | buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP; | ||
55 | |||
56 | if (cpu_has(c, X86_FEATURE_ACPI)) | ||
57 | buf[2] |= ACPI_PDC_T_FFH; | ||
58 | |||
59 | /* | ||
60 | * If mwait/monitor is unsupported, C2/C3_FFH will be disabled | ||
61 | */ | ||
62 | if (!cpu_has(c, X86_FEATURE_MWAIT)) | ||
63 | buf[2] &= ~(ACPI_PDC_C_C2C3_FFH); | ||
64 | |||
65 | obj->type = ACPI_TYPE_BUFFER; | ||
66 | obj->buffer.length = 12; | ||
67 | obj->buffer.pointer = (u8 *) buf; | ||
68 | obj_list->count = 1; | ||
69 | obj_list->pointer = obj; | ||
70 | pr->pdc = obj_list; | ||
71 | |||
72 | return; | ||
73 | } | ||
74 | |||
75 | |||
76 | /* Initialize _PDC data based on the CPU vendor */ | ||
77 | void arch_acpi_processor_init_pdc(struct acpi_processor *pr) | ||
78 | { | ||
79 | struct cpuinfo_x86 *c = &cpu_data(pr->id); | ||
80 | |||
81 | pr->pdc = NULL; | ||
82 | if (c->x86_vendor == X86_VENDOR_INTEL || | ||
83 | c->x86_vendor == X86_VENDOR_CENTAUR) | ||
84 | init_intel_pdc(pr, c); | ||
85 | |||
86 | return; | ||
87 | } | ||
88 | |||
89 | EXPORT_SYMBOL(arch_acpi_processor_init_pdc); | ||
90 | |||
91 | void arch_acpi_processor_cleanup_pdc(struct acpi_processor *pr) | ||
92 | { | ||
93 | if (pr->pdc) { | ||
94 | kfree(pr->pdc->pointer->buffer.pointer); | ||
95 | kfree(pr->pdc->pointer); | ||
96 | kfree(pr->pdc); | ||
97 | pr->pdc = NULL; | ||
98 | } | ||
99 | } | ||
100 | |||
101 | EXPORT_SYMBOL(arch_acpi_processor_cleanup_pdc); | ||
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 82e508677b91..f9961034e557 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -162,6 +162,8 @@ static int __init acpi_sleep_setup(char *str) | |||
162 | #endif | 162 | #endif |
163 | if (strncmp(str, "old_ordering", 12) == 0) | 163 | if (strncmp(str, "old_ordering", 12) == 0) |
164 | acpi_old_suspend_ordering(); | 164 | acpi_old_suspend_ordering(); |
165 | if (strncmp(str, "sci_force_enable", 16) == 0) | ||
166 | acpi_set_sci_en_on_resume(); | ||
165 | str = strchr(str, ','); | 167 | str = strchr(str, ','); |
166 | if (str != NULL) | 168 | if (str != NULL) |
167 | str += strspn(str, ", \t"); | 169 | str += strspn(str, ", \t"); |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index aa57c079c98f..e80f291472a4 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -62,7 +62,7 @@ unsigned int boot_cpu_physical_apicid = -1U; | |||
62 | /* | 62 | /* |
63 | * The highest APIC ID seen during enumeration. | 63 | * The highest APIC ID seen during enumeration. |
64 | * | 64 | * |
65 | * On AMD, this determines the messaging protocol we can use: if all APIC IDs | 65 | * This determines the messaging protocol we can use: if all APIC IDs |
66 | * are in the 0 ... 7 range, then we can use logical addressing which | 66 | * are in the 0 ... 7 range, then we can use logical addressing which |
67 | * has some performance advantages (better broadcasting). | 67 | * has some performance advantages (better broadcasting). |
68 | * | 68 | * |
@@ -1898,14 +1898,24 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1898 | max_physical_apicid = apicid; | 1898 | max_physical_apicid = apicid; |
1899 | 1899 | ||
1900 | #ifdef CONFIG_X86_32 | 1900 | #ifdef CONFIG_X86_32 |
1901 | switch (boot_cpu_data.x86_vendor) { | 1901 | /* |
1902 | case X86_VENDOR_INTEL: | 1902 | * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y |
1903 | if (num_processors > 8) | 1903 | * but we need to work other dependencies like SMP_SUSPEND etc |
1904 | def_to_bigsmp = 1; | 1904 | * before this can be done without some confusion. |
1905 | break; | 1905 | * if (CPU_HOTPLUG_ENABLED || num_processors > 8) |
1906 | case X86_VENDOR_AMD: | 1906 | * - Ashok Raj <ashok.raj@intel.com> |
1907 | if (max_physical_apicid >= 8) | 1907 | */ |
1908 | if (max_physical_apicid >= 8) { | ||
1909 | switch (boot_cpu_data.x86_vendor) { | ||
1910 | case X86_VENDOR_INTEL: | ||
1911 | if (!APIC_XAPIC(version)) { | ||
1912 | def_to_bigsmp = 0; | ||
1913 | break; | ||
1914 | } | ||
1915 | /* If P4 and above fall through */ | ||
1916 | case X86_VENDOR_AMD: | ||
1908 | def_to_bigsmp = 1; | 1917 | def_to_bigsmp = 1; |
1918 | } | ||
1909 | } | 1919 | } |
1910 | #endif | 1920 | #endif |
1911 | 1921 | ||
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index c4cbd3080c1c..65edc180fc82 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c | |||
@@ -64,23 +64,16 @@ void __init default_setup_apic_routing(void) | |||
64 | apic = &apic_x2apic_phys; | 64 | apic = &apic_x2apic_phys; |
65 | else | 65 | else |
66 | apic = &apic_x2apic_cluster; | 66 | apic = &apic_x2apic_cluster; |
67 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); | ||
67 | } | 68 | } |
68 | #endif | 69 | #endif |
69 | 70 | ||
70 | if (apic == &apic_flat) { | 71 | if (apic == &apic_flat) { |
71 | switch (boot_cpu_data.x86_vendor) { | 72 | if (max_physical_apicid >= 8) |
72 | case X86_VENDOR_INTEL: | 73 | apic = &apic_physflat; |
73 | if (num_processors > 8) | 74 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); |
74 | apic = &apic_physflat; | ||
75 | break; | ||
76 | case X86_VENDOR_AMD: | ||
77 | if (max_physical_apicid >= 8) | ||
78 | apic = &apic_physflat; | ||
79 | } | ||
80 | } | 75 | } |
81 | 76 | ||
82 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); | ||
83 | |||
84 | if (is_vsmp_box()) { | 77 | if (is_vsmp_box()) { |
85 | /* need to update phys_pkg_id */ | 78 | /* need to update phys_pkg_id */ |
86 | apic->phys_pkg_id = apicid_phys_pkg_id; | 79 | apic->phys_pkg_id = apicid_phys_pkg_id; |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index f28decf8dde3..1b1920fa7c80 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -190,9 +190,11 @@ static void do_drv_write(void *_cmd) | |||
190 | 190 | ||
191 | static void drv_read(struct drv_cmd *cmd) | 191 | static void drv_read(struct drv_cmd *cmd) |
192 | { | 192 | { |
193 | int err; | ||
193 | cmd->val = 0; | 194 | cmd->val = 0; |
194 | 195 | ||
195 | smp_call_function_single(cpumask_any(cmd->mask), do_drv_read, cmd, 1); | 196 | err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1); |
197 | WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */ | ||
196 | } | 198 | } |
197 | 199 | ||
198 | static void drv_write(struct drv_cmd *cmd) | 200 | static void drv_write(struct drv_cmd *cmd) |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 45506d5dd8df..d616c06e99b4 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -2336,6 +2336,7 @@ static const struct stacktrace_ops backtrace_ops = { | |||
2336 | .warning_symbol = backtrace_warning_symbol, | 2336 | .warning_symbol = backtrace_warning_symbol, |
2337 | .stack = backtrace_stack, | 2337 | .stack = backtrace_stack, |
2338 | .address = backtrace_address, | 2338 | .address = backtrace_address, |
2339 | .walk_stack = print_context_stack_bp, | ||
2339 | }; | 2340 | }; |
2340 | 2341 | ||
2341 | #include "../dumpstack.h" | 2342 | #include "../dumpstack.h" |
@@ -2346,7 +2347,7 @@ perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
2346 | callchain_store(entry, PERF_CONTEXT_KERNEL); | 2347 | callchain_store(entry, PERF_CONTEXT_KERNEL); |
2347 | callchain_store(entry, regs->ip); | 2348 | callchain_store(entry, regs->ip); |
2348 | 2349 | ||
2349 | dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); | 2350 | dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry); |
2350 | } | 2351 | } |
2351 | 2352 | ||
2352 | /* | 2353 | /* |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 0a0aa1cec8f1..6d817554780a 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -109,6 +109,32 @@ print_context_stack(struct thread_info *tinfo, | |||
109 | } | 109 | } |
110 | return bp; | 110 | return bp; |
111 | } | 111 | } |
112 | EXPORT_SYMBOL_GPL(print_context_stack); | ||
113 | |||
114 | unsigned long | ||
115 | print_context_stack_bp(struct thread_info *tinfo, | ||
116 | unsigned long *stack, unsigned long bp, | ||
117 | const struct stacktrace_ops *ops, void *data, | ||
118 | unsigned long *end, int *graph) | ||
119 | { | ||
120 | struct stack_frame *frame = (struct stack_frame *)bp; | ||
121 | unsigned long *ret_addr = &frame->return_address; | ||
122 | |||
123 | while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) { | ||
124 | unsigned long addr = *ret_addr; | ||
125 | |||
126 | if (!__kernel_text_address(addr)) | ||
127 | break; | ||
128 | |||
129 | ops->address(data, addr, 1); | ||
130 | frame = frame->next_frame; | ||
131 | ret_addr = &frame->return_address; | ||
132 | print_ftrace_graph_addr(addr, data, ops, tinfo, graph); | ||
133 | } | ||
134 | |||
135 | return (unsigned long)frame; | ||
136 | } | ||
137 | EXPORT_SYMBOL_GPL(print_context_stack_bp); | ||
112 | 138 | ||
113 | 139 | ||
114 | static void | 140 | static void |
@@ -141,10 +167,11 @@ static void print_trace_address(void *data, unsigned long addr, int reliable) | |||
141 | } | 167 | } |
142 | 168 | ||
143 | static const struct stacktrace_ops print_trace_ops = { | 169 | static const struct stacktrace_ops print_trace_ops = { |
144 | .warning = print_trace_warning, | 170 | .warning = print_trace_warning, |
145 | .warning_symbol = print_trace_warning_symbol, | 171 | .warning_symbol = print_trace_warning_symbol, |
146 | .stack = print_trace_stack, | 172 | .stack = print_trace_stack, |
147 | .address = print_trace_address, | 173 | .address = print_trace_address, |
174 | .walk_stack = print_context_stack, | ||
148 | }; | 175 | }; |
149 | 176 | ||
150 | void | 177 | void |
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h index 81086c227ab7..4fd1420faffa 100644 --- a/arch/x86/kernel/dumpstack.h +++ b/arch/x86/kernel/dumpstack.h | |||
@@ -14,12 +14,6 @@ | |||
14 | #define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :) | 14 | #define get_bp(bp) asm("movq %%rbp, %0" : "=r" (bp) :) |
15 | #endif | 15 | #endif |
16 | 16 | ||
17 | extern unsigned long | ||
18 | print_context_stack(struct thread_info *tinfo, | ||
19 | unsigned long *stack, unsigned long bp, | ||
20 | const struct stacktrace_ops *ops, void *data, | ||
21 | unsigned long *end, int *graph); | ||
22 | |||
23 | extern void | 17 | extern void |
24 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | 18 | show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, |
25 | unsigned long *stack, unsigned long bp, char *log_lvl); | 19 | unsigned long *stack, unsigned long bp, char *log_lvl); |
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index e0ed4c7abb62..ae775ca47b25 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c | |||
@@ -58,7 +58,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
58 | 58 | ||
59 | context = (struct thread_info *) | 59 | context = (struct thread_info *) |
60 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); | 60 | ((unsigned long)stack & (~(THREAD_SIZE - 1))); |
61 | bp = print_context_stack(context, stack, bp, ops, data, NULL, &graph); | 61 | bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph); |
62 | 62 | ||
63 | stack = (unsigned long *)context->previous_esp; | 63 | stack = (unsigned long *)context->previous_esp; |
64 | if (!stack) | 64 | if (!stack) |
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index b13af53883aa..0ad9597073f5 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c | |||
@@ -188,8 +188,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
188 | if (ops->stack(data, id) < 0) | 188 | if (ops->stack(data, id) < 0) |
189 | break; | 189 | break; |
190 | 190 | ||
191 | bp = print_context_stack(tinfo, stack, bp, ops, | 191 | bp = ops->walk_stack(tinfo, stack, bp, ops, |
192 | data, estack_end, &graph); | 192 | data, estack_end, &graph); |
193 | ops->stack(data, "<EOE>"); | 193 | ops->stack(data, "<EOE>"); |
194 | /* | 194 | /* |
195 | * We link to the next stack via the | 195 | * We link to the next stack via the |
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 844c02c65fcb..0c8632433090 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -394,7 +394,7 @@ static enum ucode_state microcode_update_cpu(int cpu) | |||
394 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 394 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
395 | enum ucode_state ustate; | 395 | enum ucode_state ustate; |
396 | 396 | ||
397 | if (uci->valid && uci->mc) | 397 | if (uci->valid) |
398 | ustate = microcode_resume_cpu(cpu); | 398 | ustate = microcode_resume_cpu(cpu); |
399 | else | 399 | else |
400 | ustate = microcode_init_cpu(cpu); | 400 | ustate = microcode_init_cpu(cpu); |
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index c3eb207181fe..922eefbb3f6c 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c | |||
@@ -53,17 +53,19 @@ save_stack_address_nosched(void *data, unsigned long addr, int reliable) | |||
53 | } | 53 | } |
54 | 54 | ||
55 | static const struct stacktrace_ops save_stack_ops = { | 55 | static const struct stacktrace_ops save_stack_ops = { |
56 | .warning = save_stack_warning, | 56 | .warning = save_stack_warning, |
57 | .warning_symbol = save_stack_warning_symbol, | 57 | .warning_symbol = save_stack_warning_symbol, |
58 | .stack = save_stack_stack, | 58 | .stack = save_stack_stack, |
59 | .address = save_stack_address, | 59 | .address = save_stack_address, |
60 | .walk_stack = print_context_stack, | ||
60 | }; | 61 | }; |
61 | 62 | ||
62 | static const struct stacktrace_ops save_stack_ops_nosched = { | 63 | static const struct stacktrace_ops save_stack_ops_nosched = { |
63 | .warning = save_stack_warning, | 64 | .warning = save_stack_warning, |
64 | .warning_symbol = save_stack_warning_symbol, | 65 | .warning_symbol = save_stack_warning_symbol, |
65 | .stack = save_stack_stack, | 66 | .stack = save_stack_stack, |
66 | .address = save_stack_address_nosched, | 67 | .address = save_stack_address_nosched, |
68 | .walk_stack = print_context_stack, | ||
67 | }; | 69 | }; |
68 | 70 | ||
69 | /* | 71 | /* |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index cd60c0bd1b32..3063a0c4858b 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1150,6 +1150,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu) | |||
1150 | hrtimer_cancel(&apic->lapic_timer.timer); | 1150 | hrtimer_cancel(&apic->lapic_timer.timer); |
1151 | update_divide_count(apic); | 1151 | update_divide_count(apic); |
1152 | start_apic_timer(apic); | 1152 | start_apic_timer(apic); |
1153 | apic->irr_pending = true; | ||
1153 | } | 1154 | } |
1154 | 1155 | ||
1155 | void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) | 1156 | void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index a6017132fba8..58a0f1e88596 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -455,8 +455,6 @@ out_unlock: | |||
455 | static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | 455 | static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) |
456 | { | 456 | { |
457 | struct kvm_shadow_walk_iterator iterator; | 457 | struct kvm_shadow_walk_iterator iterator; |
458 | pt_element_t gpte; | ||
459 | gpa_t pte_gpa = -1; | ||
460 | int level; | 458 | int level; |
461 | u64 *sptep; | 459 | u64 *sptep; |
462 | int need_flush = 0; | 460 | int need_flush = 0; |
@@ -470,10 +468,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | |||
470 | if (level == PT_PAGE_TABLE_LEVEL || | 468 | if (level == PT_PAGE_TABLE_LEVEL || |
471 | ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) || | 469 | ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) || |
472 | ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) { | 470 | ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) { |
473 | struct kvm_mmu_page *sp = page_header(__pa(sptep)); | ||
474 | |||
475 | pte_gpa = (sp->gfn << PAGE_SHIFT); | ||
476 | pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); | ||
477 | 471 | ||
478 | if (is_shadow_present_pte(*sptep)) { | 472 | if (is_shadow_present_pte(*sptep)) { |
479 | rmap_remove(vcpu->kvm, sptep); | 473 | rmap_remove(vcpu->kvm, sptep); |
@@ -492,18 +486,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | |||
492 | if (need_flush) | 486 | if (need_flush) |
493 | kvm_flush_remote_tlbs(vcpu->kvm); | 487 | kvm_flush_remote_tlbs(vcpu->kvm); |
494 | spin_unlock(&vcpu->kvm->mmu_lock); | 488 | spin_unlock(&vcpu->kvm->mmu_lock); |
495 | |||
496 | if (pte_gpa == -1) | ||
497 | return; | ||
498 | if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, | ||
499 | sizeof(pt_element_t))) | ||
500 | return; | ||
501 | if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) { | ||
502 | if (mmu_topup_memory_caches(vcpu)) | ||
503 | return; | ||
504 | kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte, | ||
505 | sizeof(pt_element_t), 0); | ||
506 | } | ||
507 | } | 489 | } |
508 | 490 | ||
509 | static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) | 491 | static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9d068966fb2a..6651dbf58675 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1913,7 +1913,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, | |||
1913 | 1913 | ||
1914 | events->sipi_vector = vcpu->arch.sipi_vector; | 1914 | events->sipi_vector = vcpu->arch.sipi_vector; |
1915 | 1915 | ||
1916 | events->flags = 0; | 1916 | events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING |
1917 | | KVM_VCPUEVENT_VALID_SIPI_VECTOR); | ||
1917 | 1918 | ||
1918 | vcpu_put(vcpu); | 1919 | vcpu_put(vcpu); |
1919 | } | 1920 | } |
@@ -1921,7 +1922,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, | |||
1921 | static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, | 1922 | static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, |
1922 | struct kvm_vcpu_events *events) | 1923 | struct kvm_vcpu_events *events) |
1923 | { | 1924 | { |
1924 | if (events->flags) | 1925 | if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING |
1926 | | KVM_VCPUEVENT_VALID_SIPI_VECTOR)) | ||
1925 | return -EINVAL; | 1927 | return -EINVAL; |
1926 | 1928 | ||
1927 | vcpu_load(vcpu); | 1929 | vcpu_load(vcpu); |
@@ -1938,10 +1940,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, | |||
1938 | kvm_pic_clear_isr_ack(vcpu->kvm); | 1940 | kvm_pic_clear_isr_ack(vcpu->kvm); |
1939 | 1941 | ||
1940 | vcpu->arch.nmi_injected = events->nmi.injected; | 1942 | vcpu->arch.nmi_injected = events->nmi.injected; |
1941 | vcpu->arch.nmi_pending = events->nmi.pending; | 1943 | if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) |
1944 | vcpu->arch.nmi_pending = events->nmi.pending; | ||
1942 | kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); | 1945 | kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); |
1943 | 1946 | ||
1944 | vcpu->arch.sipi_vector = events->sipi_vector; | 1947 | if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR) |
1948 | vcpu->arch.sipi_vector = events->sipi_vector; | ||
1945 | 1949 | ||
1946 | vcpu_put(vcpu); | 1950 | vcpu_put(vcpu); |
1947 | 1951 | ||
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index c973f8e2a6cf..9a0c258a86be 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -892,8 +892,7 @@ void __init mem_init(void) | |||
892 | reservedpages << (PAGE_SHIFT-10), | 892 | reservedpages << (PAGE_SHIFT-10), |
893 | datasize >> 10, | 893 | datasize >> 10, |
894 | initsize >> 10, | 894 | initsize >> 10, |
895 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) | 895 | totalhigh_pages << (PAGE_SHIFT-10)); |
896 | ); | ||
897 | 896 | ||
898 | printk(KERN_INFO "virtual kernel memory layout:\n" | 897 | printk(KERN_INFO "virtual kernel memory layout:\n" |
899 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | 898 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c index 044897be021f..3855096c59b8 100644 --- a/arch/x86/oprofile/backtrace.c +++ b/arch/x86/oprofile/backtrace.c | |||
@@ -41,10 +41,11 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) | |||
41 | } | 41 | } |
42 | 42 | ||
43 | static struct stacktrace_ops backtrace_ops = { | 43 | static struct stacktrace_ops backtrace_ops = { |
44 | .warning = backtrace_warning, | 44 | .warning = backtrace_warning, |
45 | .warning_symbol = backtrace_warning_symbol, | 45 | .warning_symbol = backtrace_warning_symbol, |
46 | .stack = backtrace_stack, | 46 | .stack = backtrace_stack, |
47 | .address = backtrace_address, | 47 | .address = backtrace_address, |
48 | .walk_stack = print_context_stack, | ||
48 | }; | 49 | }; |
49 | 50 | ||
50 | struct frame_head { | 51 | struct frame_head { |
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c index 145df00e0387..f939d603adfa 100644 --- a/arch/x86/pci/bus_numa.c +++ b/arch/x86/pci/bus_numa.c | |||
@@ -51,7 +51,7 @@ void x86_pci_root_bus_res_quirks(struct pci_bus *b) | |||
51 | } | 51 | } |
52 | } | 52 | } |
53 | 53 | ||
54 | void __init update_res(struct pci_root_info *info, size_t start, | 54 | void __devinit update_res(struct pci_root_info *info, size_t start, |
55 | size_t end, unsigned long flags, int merge) | 55 | size_t end, unsigned long flags, int merge) |
56 | { | 56 | { |
57 | int i; | 57 | int i; |