aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/Makefile4
-rw-r--r--arch/x86_64/kernel/aperture.c2
-rw-r--r--arch/x86_64/kernel/e820.c36
-rw-r--r--arch/x86_64/kernel/entry.S28
-rw-r--r--arch/x86_64/kernel/kprobes.c10
-rw-r--r--arch/x86_64/kernel/mce.c8
-rw-r--r--arch/x86_64/kernel/nmi.c7
-rw-r--r--arch/x86_64/kernel/pci-dma.c2
-rw-r--r--arch/x86_64/kernel/process.c14
-rw-r--r--arch/x86_64/kernel/ptrace.c5
-rw-r--r--arch/x86_64/kernel/setup.c8
-rw-r--r--arch/x86_64/kernel/time.c4
-rw-r--r--arch/x86_64/kernel/traps.c5
-rw-r--r--arch/x86_64/kernel/vmlinux.lds.S2
-rw-r--r--arch/x86_64/kernel/x8664_ksyms.c3
15 files changed, 95 insertions, 43 deletions
diff --git a/arch/x86_64/kernel/Makefile b/arch/x86_64/kernel/Makefile
index a098a11e7755..059c88313f4e 100644
--- a/arch/x86_64/kernel/Makefile
+++ b/arch/x86_64/kernel/Makefile
@@ -8,7 +8,7 @@ obj-y := process.o signal.o entry.o traps.o irq.o \
8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \ 8 ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \
9 x8664_ksyms.o i387.o syscall.o vsyscall.o \ 9 x8664_ksyms.o i387.o syscall.o vsyscall.o \
10 setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \ 10 setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o \
11 dmi_scan.o pci-dma.o pci-nommu.o 11 pci-dma.o pci-nommu.o
12 12
13obj-$(CONFIG_X86_MCE) += mce.o 13obj-$(CONFIG_X86_MCE) += mce.o
14obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o 14obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o
@@ -49,5 +49,3 @@ intel_cacheinfo-y += ../../i386/kernel/cpu/intel_cacheinfo.o
49quirks-y += ../../i386/kernel/quirks.o 49quirks-y += ../../i386/kernel/quirks.o
50i8237-y += ../../i386/kernel/i8237.o 50i8237-y += ../../i386/kernel/i8237.o
51msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o 51msr-$(subst m,y,$(CONFIG_X86_MSR)) += ../../i386/kernel/msr.o
52dmi_scan-y += ../../i386/kernel/dmi_scan.o
53
diff --git a/arch/x86_64/kernel/aperture.c b/arch/x86_64/kernel/aperture.c
index fffd6b0a2fab..70b9d21ed675 100644
--- a/arch/x86_64/kernel/aperture.c
+++ b/arch/x86_64/kernel/aperture.c
@@ -80,7 +80,7 @@ static int __init aperture_valid(char *name, u64 aper_base, u32 aper_size)
80 printk("Aperture from %s beyond 4GB. Ignoring.\n",name); 80 printk("Aperture from %s beyond 4GB. Ignoring.\n",name);
81 return 0; 81 return 0;
82 } 82 }
83 if (e820_mapped(aper_base, aper_base + aper_size, E820_RAM)) { 83 if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
84 printk("Aperture from %s pointing to e820 RAM. Ignoring.\n",name); 84 printk("Aperture from %s pointing to e820 RAM. Ignoring.\n",name);
85 return 0; 85 return 0;
86 } 86 }
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index 293cd71a266a..62776c07cff1 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -80,7 +80,12 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size)
80 return 0; 80 return 0;
81} 81}
82 82
83int __init e820_mapped(unsigned long start, unsigned long end, unsigned type) 83/*
84 * This function checks if any part of the range <start,end> is mapped
85 * with type.
86 */
87int __meminit
88e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
84{ 89{
85 int i; 90 int i;
86 for (i = 0; i < e820.nr_map; i++) { 91 for (i = 0; i < e820.nr_map; i++) {
@@ -94,6 +99,35 @@ int __init e820_mapped(unsigned long start, unsigned long end, unsigned type)
94 return 0; 99 return 0;
95} 100}
96 101
102/*
103 * This function checks if the entire range <start,end> is mapped with type.
104 *
105 * Note: this function only works correct if the e820 table is sorted and
106 * not-overlapping, which is the case
107 */
108int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
109{
110 int i;
111 for (i = 0; i < e820.nr_map; i++) {
112 struct e820entry *ei = &e820.map[i];
113 if (type && ei->type != type)
114 continue;
115 /* is the region (part) in overlap with the current region ?*/
116 if (ei->addr >= end || ei->addr + ei->size <= start)
117 continue;
118
119 /* if the region is at the beginning of <start,end> we move
120 * start to the end of the region since it's ok until there
121 */
122 if (ei->addr <= start)
123 start = ei->addr + ei->size;
124 /* if start is now at or beyond end, we're done, full coverage */
125 if (start >= end)
126 return 1; /* we're done */
127 }
128 return 0;
129}
130
97/* 131/*
98 * Find a free area in a specific range. 132 * Find a free area in a specific range.
99 */ 133 */
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 8538bfea30e6..c946e4fe67a7 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -180,6 +180,10 @@ rff_trace:
180 * 180 *
181 * XXX if we had a free scratch register we could save the RSP into the stack frame 181 * XXX if we had a free scratch register we could save the RSP into the stack frame
182 * and report it properly in ps. Unfortunately we haven't. 182 * and report it properly in ps. Unfortunately we haven't.
183 *
184 * When user can change the frames always force IRET. That is because
185 * it deals with uncanonical addresses better. SYSRET has trouble
186 * with them due to bugs in both AMD and Intel CPUs.
183 */ 187 */
184 188
185ENTRY(system_call) 189ENTRY(system_call)
@@ -254,7 +258,10 @@ sysret_signal:
254 xorl %esi,%esi # oldset -> arg2 258 xorl %esi,%esi # oldset -> arg2
255 call ptregscall_common 259 call ptregscall_common
2561: movl $_TIF_NEED_RESCHED,%edi 2601: movl $_TIF_NEED_RESCHED,%edi
257 jmp sysret_check 261 /* Use IRET because user could have changed frame. This
262 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
263 cli
264 jmp int_with_check
258 265
259badsys: 266badsys:
260 movq $-ENOSYS,RAX-ARGOFFSET(%rsp) 267 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
@@ -280,7 +287,8 @@ tracesys:
280 call syscall_trace_leave 287 call syscall_trace_leave
281 RESTORE_TOP_OF_STACK %rbx 288 RESTORE_TOP_OF_STACK %rbx
282 RESTORE_REST 289 RESTORE_REST
283 jmp ret_from_sys_call 290 /* Use IRET because user could have changed frame */
291 jmp int_ret_from_sys_call
284 CFI_ENDPROC 292 CFI_ENDPROC
285 293
286/* 294/*
@@ -408,25 +416,9 @@ ENTRY(stub_execve)
408 CFI_ADJUST_CFA_OFFSET -8 416 CFI_ADJUST_CFA_OFFSET -8
409 CFI_REGISTER rip, r11 417 CFI_REGISTER rip, r11
410 SAVE_REST 418 SAVE_REST
411 movq %r11, %r15
412 CFI_REGISTER rip, r15
413 FIXUP_TOP_OF_STACK %r11 419 FIXUP_TOP_OF_STACK %r11
414 call sys_execve 420 call sys_execve
415 GET_THREAD_INFO(%rcx)
416 bt $TIF_IA32,threadinfo_flags(%rcx)
417 CFI_REMEMBER_STATE
418 jc exec_32bit
419 RESTORE_TOP_OF_STACK %r11 421 RESTORE_TOP_OF_STACK %r11
420 movq %r15, %r11
421 CFI_REGISTER rip, r11
422 RESTORE_REST
423 pushq %r11
424 CFI_ADJUST_CFA_OFFSET 8
425 CFI_REL_OFFSET rip, 0
426 ret
427
428exec_32bit:
429 CFI_RESTORE_STATE
430 movq %rax,RAX(%rsp) 422 movq %rax,RAX(%rsp)
431 RESTORE_REST 423 RESTORE_REST
432 jmp int_ret_from_sys_call 424 jmp int_ret_from_sys_call
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index accbff3fec49..1eaa5dae6174 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -53,7 +53,7 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
53/* 53/*
54 * returns non-zero if opcode modifies the interrupt flag. 54 * returns non-zero if opcode modifies the interrupt flag.
55 */ 55 */
56static inline int is_IF_modifier(kprobe_opcode_t *insn) 56static __always_inline int is_IF_modifier(kprobe_opcode_t *insn)
57{ 57{
58 switch (*insn) { 58 switch (*insn) {
59 case 0xfa: /* cli */ 59 case 0xfa: /* cli */
@@ -84,7 +84,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
84 * If it does, return the address of the 32-bit displacement word. 84 * If it does, return the address of the 32-bit displacement word.
85 * If not, return null. 85 * If not, return null.
86 */ 86 */
87static inline s32 *is_riprel(u8 *insn) 87static s32 __kprobes *is_riprel(u8 *insn)
88{ 88{
89#define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \ 89#define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \
90 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ 90 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
@@ -229,7 +229,7 @@ void __kprobes arch_remove_kprobe(struct kprobe *p)
229 mutex_unlock(&kprobe_mutex); 229 mutex_unlock(&kprobe_mutex);
230} 230}
231 231
232static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) 232static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
233{ 233{
234 kcb->prev_kprobe.kp = kprobe_running(); 234 kcb->prev_kprobe.kp = kprobe_running();
235 kcb->prev_kprobe.status = kcb->kprobe_status; 235 kcb->prev_kprobe.status = kcb->kprobe_status;
@@ -237,7 +237,7 @@ static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
237 kcb->prev_kprobe.saved_rflags = kcb->kprobe_saved_rflags; 237 kcb->prev_kprobe.saved_rflags = kcb->kprobe_saved_rflags;
238} 238}
239 239
240static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) 240static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
241{ 241{
242 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 242 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
243 kcb->kprobe_status = kcb->prev_kprobe.status; 243 kcb->kprobe_status = kcb->prev_kprobe.status;
@@ -245,7 +245,7 @@ static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
245 kcb->kprobe_saved_rflags = kcb->prev_kprobe.saved_rflags; 245 kcb->kprobe_saved_rflags = kcb->prev_kprobe.saved_rflags;
246} 246}
247 247
248static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 248static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
249 struct kprobe_ctlblk *kcb) 249 struct kprobe_ctlblk *kcb)
250{ 250{
251 __get_cpu_var(current_kprobe) = p; 251 __get_cpu_var(current_kprobe) = p;
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 10b3e348fc99..6f0790e8b6d3 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -29,6 +29,8 @@
29#define MISC_MCELOG_MINOR 227 29#define MISC_MCELOG_MINOR 227
30#define NR_BANKS 6 30#define NR_BANKS 6
31 31
32atomic_t mce_entry;
33
32static int mce_dont_init; 34static int mce_dont_init;
33 35
34/* 0: always panic, 1: panic if deadlock possible, 2: try to avoid panic, 36/* 0: always panic, 1: panic if deadlock possible, 2: try to avoid panic,
@@ -172,10 +174,12 @@ void do_machine_check(struct pt_regs * regs, long error_code)
172 int i; 174 int i;
173 int panicm_found = 0; 175 int panicm_found = 0;
174 176
177 atomic_inc(&mce_entry);
178
175 if (regs) 179 if (regs)
176 notify_die(DIE_NMI, "machine check", regs, error_code, 18, SIGKILL); 180 notify_die(DIE_NMI, "machine check", regs, error_code, 18, SIGKILL);
177 if (!banks) 181 if (!banks)
178 return; 182 goto out2;
179 183
180 memset(&m, 0, sizeof(struct mce)); 184 memset(&m, 0, sizeof(struct mce));
181 m.cpu = safe_smp_processor_id(); 185 m.cpu = safe_smp_processor_id();
@@ -266,6 +270,8 @@ void do_machine_check(struct pt_regs * regs, long error_code)
266 out: 270 out:
267 /* Last thing done in the machine check exception to clear state. */ 271 /* Last thing done in the machine check exception to clear state. */
268 wrmsrl(MSR_IA32_MCG_STATUS, 0); 272 wrmsrl(MSR_IA32_MCG_STATUS, 0);
273 out2:
274 atomic_dec(&mce_entry);
269} 275}
270 276
271/* 277/*
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index d9e4067faf05..4e6357fe0ec3 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -34,6 +34,7 @@
34#include <asm/proto.h> 34#include <asm/proto.h>
35#include <asm/kdebug.h> 35#include <asm/kdebug.h>
36#include <asm/local.h> 36#include <asm/local.h>
37#include <asm/mce.h>
37 38
38/* 39/*
39 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: 40 * lapic_nmi_owner tracks the ownership of the lapic NMI hardware:
@@ -480,6 +481,12 @@ void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
480 __get_cpu_var(nmi_touch) = 0; 481 __get_cpu_var(nmi_touch) = 0;
481 touched = 1; 482 touched = 1;
482 } 483 }
484#ifdef CONFIG_X86_MCE
485 /* Could check oops_in_progress here too, but it's safer
486 not too */
487 if (atomic_read(&mce_entry) > 0)
488 touched = 1;
489#endif
483 if (!touched && __get_cpu_var(last_irq_sum) == sum) { 490 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
484 /* 491 /*
485 * Ayiee, looks like this CPU is stuck ... 492 * Ayiee, looks like this CPU is stuck ...
diff --git a/arch/x86_64/kernel/pci-dma.c b/arch/x86_64/kernel/pci-dma.c
index 03c9eeedb0f3..af035ede70cd 100644
--- a/arch/x86_64/kernel/pci-dma.c
+++ b/arch/x86_64/kernel/pci-dma.c
@@ -48,9 +48,11 @@ dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
48{ 48{
49 struct page *page; 49 struct page *page;
50 int node; 50 int node;
51#ifdef CONFIG_PCI
51 if (dev->bus == &pci_bus_type) 52 if (dev->bus == &pci_bus_type)
52 node = pcibus_to_node(to_pci_dev(dev)->bus); 53 node = pcibus_to_node(to_pci_dev(dev)->bus);
53 else 54 else
55#endif
54 node = numa_node_id(); 56 node = numa_node_id();
55 page = alloc_pages_node(node, gfp, order); 57 page = alloc_pages_node(node, gfp, order);
56 return page ? page_address(page) : NULL; 58 return page ? page_address(page) : NULL;
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 70dd8e5c6889..fb903e65e079 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -575,8 +575,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
575 prev->userrsp = read_pda(oldrsp); 575 prev->userrsp = read_pda(oldrsp);
576 write_pda(oldrsp, next->userrsp); 576 write_pda(oldrsp, next->userrsp);
577 write_pda(pcurrent, next_p); 577 write_pda(pcurrent, next_p);
578
578 /* This must be here to ensure both math_state_restore() and 579 /* This must be here to ensure both math_state_restore() and
579 kernel_fpu_begin() work consistently. */ 580 kernel_fpu_begin() work consistently.
581 And the AMD workaround requires it to be after DS reload. */
580 unlazy_fpu(prev_p); 582 unlazy_fpu(prev_p);
581 write_pda(kernelstack, 583 write_pda(kernelstack,
582 task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET); 584 task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
@@ -781,10 +783,16 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
781 } 783 }
782 case ARCH_GET_GS: { 784 case ARCH_GET_GS: {
783 unsigned long base; 785 unsigned long base;
786 unsigned gsindex;
784 if (task->thread.gsindex == GS_TLS_SEL) 787 if (task->thread.gsindex == GS_TLS_SEL)
785 base = read_32bit_tls(task, GS_TLS); 788 base = read_32bit_tls(task, GS_TLS);
786 else if (doit) 789 else if (doit) {
787 rdmsrl(MSR_KERNEL_GS_BASE, base); 790 asm("movl %%gs,%0" : "=r" (gsindex));
791 if (gsindex)
792 rdmsrl(MSR_KERNEL_GS_BASE, base);
793 else
794 base = task->thread.gs;
795 }
788 else 796 else
789 base = task->thread.gs; 797 base = task->thread.gs;
790 ret = put_user(base, (unsigned long __user *)addr); 798 ret = put_user(base, (unsigned long __user *)addr);
diff --git a/arch/x86_64/kernel/ptrace.c b/arch/x86_64/kernel/ptrace.c
index d44b2c1e63a6..da8e7903d817 100644
--- a/arch/x86_64/kernel/ptrace.c
+++ b/arch/x86_64/kernel/ptrace.c
@@ -274,11 +274,6 @@ static int putreg(struct task_struct *child,
274 return -EIO; 274 return -EIO;
275 value &= 0xffff; 275 value &= 0xffff;
276 break; 276 break;
277 case offsetof(struct user_regs_struct, rip):
278 /* Check if the new RIP address is canonical */
279 if (value >= TASK_SIZE_OF(child))
280 return -EIO;
281 break;
282 } 277 }
283 put_stack_long(child, regno - sizeof(struct pt_regs), value); 278 put_stack_long(child, regno - sizeof(struct pt_regs), value);
284 return 0; 279 return 0;
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index 0856ad444f90..759070c82751 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -353,8 +353,10 @@ static __init void parse_cmdline_early (char ** cmdline_p)
353 if (fullarg(from, "enable_timer_pin_1")) 353 if (fullarg(from, "enable_timer_pin_1"))
354 disable_timer_pin_1 = -1; 354 disable_timer_pin_1 = -1;
355 355
356 if (fullarg(from, "nolapic") || fullarg(from, "disableapic")) 356 if (fullarg(from, "nolapic") || fullarg(from, "disableapic")) {
357 clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
357 disable_apic = 1; 358 disable_apic = 1;
359 }
358 360
359 if (fullarg(from, "noapic")) 361 if (fullarg(from, "noapic"))
360 skip_ioapic_setup = 1; 362 skip_ioapic_setup = 1;
@@ -928,6 +930,10 @@ static int __init init_amd(struct cpuinfo_x86 *c)
928 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)) 930 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
929 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability); 931 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
930 932
933 /* Enable workaround for FXSAVE leak */
934 if (c->x86 >= 6)
935 set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
936
931 r = get_model_name(c); 937 r = get_model_name(c);
932 if (!r) { 938 if (!r) {
933 switch (c->x86) { 939 switch (c->x86) {
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c
index ef8bc46dc140..7392570f975d 100644
--- a/arch/x86_64/kernel/time.c
+++ b/arch/x86_64/kernel/time.c
@@ -726,7 +726,7 @@ static __init int late_hpet_init(void)
726 unsigned int ntimer; 726 unsigned int ntimer;
727 727
728 if (!vxtime.hpet_address) 728 if (!vxtime.hpet_address)
729 return -1; 729 return 0;
730 730
731 memset(&hd, 0, sizeof (hd)); 731 memset(&hd, 0, sizeof (hd));
732 732
@@ -917,6 +917,8 @@ void __init time_init(void)
917 vxtime.hpet_address = 0; 917 vxtime.hpet_address = 0;
918 918
919 if (hpet_use_timer) { 919 if (hpet_use_timer) {
920 /* set tick_nsec to use the proper rate for HPET */
921 tick_nsec = TICK_NSEC_HPET;
920 cpu_khz = hpet_calibrate_tsc(); 922 cpu_khz = hpet_calibrate_tsc();
921 timename = "HPET"; 923 timename = "HPET";
922#ifdef CONFIG_X86_PM_TIMER 924#ifdef CONFIG_X86_PM_TIMER
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c
index 6bda322d3caf..2700b1375c1f 100644
--- a/arch/x86_64/kernel/traps.c
+++ b/arch/x86_64/kernel/traps.c
@@ -30,6 +30,7 @@
30#include <linux/moduleparam.h> 30#include <linux/moduleparam.h>
31#include <linux/nmi.h> 31#include <linux/nmi.h>
32#include <linux/kprobes.h> 32#include <linux/kprobes.h>
33#include <linux/kexec.h>
33 34
34#include <asm/system.h> 35#include <asm/system.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
@@ -433,6 +434,8 @@ void __kprobes __die(const char * str, struct pt_regs * regs, long err)
433 printk(KERN_ALERT "RIP "); 434 printk(KERN_ALERT "RIP ");
434 printk_address(regs->rip); 435 printk_address(regs->rip);
435 printk(" RSP <%016lx>\n", regs->rsp); 436 printk(" RSP <%016lx>\n", regs->rsp);
437 if (kexec_should_crash(current))
438 crash_kexec(regs);
436} 439}
437 440
438void die(const char * str, struct pt_regs * regs, long err) 441void die(const char * str, struct pt_regs * regs, long err)
@@ -455,6 +458,8 @@ void __kprobes die_nmi(char *str, struct pt_regs *regs)
455 */ 458 */
456 printk(str, safe_smp_processor_id()); 459 printk(str, safe_smp_processor_id());
457 show_registers(regs); 460 show_registers(regs);
461 if (kexec_should_crash(current))
462 crash_kexec(regs);
458 if (panic_on_timeout || panic_on_oops) 463 if (panic_on_timeout || panic_on_oops)
459 panic("nmi watchdog"); 464 panic("nmi watchdog");
460 printk("console shuts up ...\n"); 465 printk("console shuts up ...\n");
diff --git a/arch/x86_64/kernel/vmlinux.lds.S b/arch/x86_64/kernel/vmlinux.lds.S
index 39ff0708f803..b81f473c4a19 100644
--- a/arch/x86_64/kernel/vmlinux.lds.S
+++ b/arch/x86_64/kernel/vmlinux.lds.S
@@ -65,7 +65,7 @@ SECTIONS
65 .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { 65 .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
66 *(.data.cacheline_aligned) 66 *(.data.cacheline_aligned)
67 } 67 }
68 . = ALIGN(CONFIG_X86_L1_CACHE_BYTES); 68 . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
69 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) { 69 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
70 *(.data.read_mostly) 70 *(.data.read_mostly)
71 } 71 }
diff --git a/arch/x86_64/kernel/x8664_ksyms.c b/arch/x86_64/kernel/x8664_ksyms.c
index d78f46056bda..1def21c9f7cd 100644
--- a/arch/x86_64/kernel/x8664_ksyms.c
+++ b/arch/x86_64/kernel/x8664_ksyms.c
@@ -112,7 +112,6 @@ EXPORT_SYMBOL_GPL(unset_nmi_callback);
112#undef memcpy 112#undef memcpy
113#undef memset 113#undef memset
114#undef memmove 114#undef memmove
115#undef strlen
116 115
117extern void * memset(void *,int,__kernel_size_t); 116extern void * memset(void *,int,__kernel_size_t);
118extern size_t strlen(const char *); 117extern size_t strlen(const char *);
@@ -121,8 +120,6 @@ extern void * memcpy(void *,const void *,__kernel_size_t);
121extern void * __memcpy(void *,const void *,__kernel_size_t); 120extern void * __memcpy(void *,const void *,__kernel_size_t);
122 121
123EXPORT_SYMBOL(memset); 122EXPORT_SYMBOL(memset);
124EXPORT_SYMBOL(strlen);
125EXPORT_SYMBOL(strpbrk);
126EXPORT_SYMBOL(memmove); 123EXPORT_SYMBOL(memmove);
127EXPORT_SYMBOL(memcpy); 124EXPORT_SYMBOL(memcpy);
128EXPORT_SYMBOL(__memcpy); 125EXPORT_SYMBOL(__memcpy);