aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/boot/compressed/aslr.c9
-rw-r--r--arch/x86/include/asm/irq.h2
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c18
-rw-r--r--arch/x86/kernel/traps.c7
-rw-r--r--arch/x86/xen/enlighten.c5
-rw-r--r--arch/x86/xen/setup.c60
-rw-r--r--arch/x86/xen/xen-ops.h1
8 files changed, 64 insertions, 39 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index fcefdda5136d..a8f749ef0fdc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1672,7 +1672,6 @@ config RELOCATABLE
1672config RANDOMIZE_BASE 1672config RANDOMIZE_BASE
1673 bool "Randomize the address of the kernel image" 1673 bool "Randomize the address of the kernel image"
1674 depends on RELOCATABLE 1674 depends on RELOCATABLE
1675 depends on !HIBERNATION
1676 default n 1675 default n
1677 ---help--- 1676 ---help---
1678 Randomizes the physical and virtual address at which the 1677 Randomizes the physical and virtual address at which the
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index 4dbf967da50d..fc6091abedb7 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -289,10 +289,17 @@ unsigned char *choose_kernel_location(unsigned char *input,
289 unsigned long choice = (unsigned long)output; 289 unsigned long choice = (unsigned long)output;
290 unsigned long random; 290 unsigned long random;
291 291
292#ifdef CONFIG_HIBERNATION
293 if (!cmdline_find_option_bool("kaslr")) {
294 debug_putstr("KASLR disabled by default...\n");
295 goto out;
296 }
297#else
292 if (cmdline_find_option_bool("nokaslr")) { 298 if (cmdline_find_option_bool("nokaslr")) {
293 debug_putstr("KASLR disabled...\n"); 299 debug_putstr("KASLR disabled by cmdline...\n");
294 goto out; 300 goto out;
295 } 301 }
302#endif
296 303
297 /* Record the various known unsafe memory ranges. */ 304 /* Record the various known unsafe memory ranges. */
298 mem_avoid_init((unsigned long)input, input_size, 305 mem_avoid_init((unsigned long)input, input_size,
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index cb6cfcd034cf..a80cbb88ea91 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -43,7 +43,7 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
43extern void init_ISA_irqs(void); 43extern void init_ISA_irqs(void);
44 44
45#ifdef CONFIG_X86_LOCAL_APIC 45#ifdef CONFIG_X86_LOCAL_APIC
46void arch_trigger_all_cpu_backtrace(void); 46void arch_trigger_all_cpu_backtrace(bool);
47#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace 47#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
48#endif 48#endif
49 49
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index c3fcb5de5083..6a1e71bde323 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -33,31 +33,41 @@ static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
33/* "in progress" flag of arch_trigger_all_cpu_backtrace */ 33/* "in progress" flag of arch_trigger_all_cpu_backtrace */
34static unsigned long backtrace_flag; 34static unsigned long backtrace_flag;
35 35
36void arch_trigger_all_cpu_backtrace(void) 36void arch_trigger_all_cpu_backtrace(bool include_self)
37{ 37{
38 int i; 38 int i;
39 int cpu = get_cpu();
39 40
40 if (test_and_set_bit(0, &backtrace_flag)) 41 if (test_and_set_bit(0, &backtrace_flag)) {
41 /* 42 /*
42 * If there is already a trigger_all_cpu_backtrace() in progress 43 * If there is already a trigger_all_cpu_backtrace() in progress
43 * (backtrace_flag == 1), don't output double cpu dump infos. 44 * (backtrace_flag == 1), don't output double cpu dump infos.
44 */ 45 */
46 put_cpu();
45 return; 47 return;
48 }
46 49
47 cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); 50 cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
51 if (!include_self)
52 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
48 53
49 printk(KERN_INFO "sending NMI to all CPUs:\n"); 54 if (!cpumask_empty(to_cpumask(backtrace_mask))) {
50 apic->send_IPI_all(NMI_VECTOR); 55 pr_info("sending NMI to %s CPUs:\n",
56 (include_self ? "all" : "other"));
57 apic->send_IPI_mask(to_cpumask(backtrace_mask), NMI_VECTOR);
58 }
51 59
52 /* Wait for up to 10 seconds for all CPUs to do the backtrace */ 60 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
53 for (i = 0; i < 10 * 1000; i++) { 61 for (i = 0; i < 10 * 1000; i++) {
54 if (cpumask_empty(to_cpumask(backtrace_mask))) 62 if (cpumask_empty(to_cpumask(backtrace_mask)))
55 break; 63 break;
56 mdelay(1); 64 mdelay(1);
65 touch_softlockup_watchdog();
57 } 66 }
58 67
59 clear_bit(0, &backtrace_flag); 68 clear_bit(0, &backtrace_flag);
60 smp_mb__after_atomic(); 69 smp_mb__after_atomic();
70 put_cpu();
61} 71}
62 72
63static int 73static int
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index c6eb418c5627..0d0e922fafc1 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -343,6 +343,7 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
343 if (poke_int3_handler(regs)) 343 if (poke_int3_handler(regs))
344 return; 344 return;
345 345
346 prev_state = exception_enter();
346#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 347#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
347 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 348 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
348 SIGTRAP) == NOTIFY_STOP) 349 SIGTRAP) == NOTIFY_STOP)
@@ -351,9 +352,8 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
351 352
352#ifdef CONFIG_KPROBES 353#ifdef CONFIG_KPROBES
353 if (kprobe_int3_handler(regs)) 354 if (kprobe_int3_handler(regs))
354 return; 355 goto exit;
355#endif 356#endif
356 prev_state = exception_enter();
357 357
358 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 358 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
359 SIGTRAP) == NOTIFY_STOP) 359 SIGTRAP) == NOTIFY_STOP)
@@ -433,6 +433,8 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
433 unsigned long dr6; 433 unsigned long dr6;
434 int si_code; 434 int si_code;
435 435
436 prev_state = exception_enter();
437
436 get_debugreg(dr6, 6); 438 get_debugreg(dr6, 6);
437 439
438 /* Filter out all the reserved bits which are preset to 1 */ 440 /* Filter out all the reserved bits which are preset to 1 */
@@ -465,7 +467,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
465 if (kprobe_debug_handler(regs)) 467 if (kprobe_debug_handler(regs))
466 goto exit; 468 goto exit;
467#endif 469#endif
468 prev_state = exception_enter();
469 470
470 if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code, 471 if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
471 SIGTRAP) == NOTIFY_STOP) 472 SIGTRAP) == NOTIFY_STOP)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index f17b29210ac4..ffb101e45731 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1537,7 +1537,10 @@ asmlinkage __visible void __init xen_start_kernel(void)
1537 if (!xen_pvh_domain()) 1537 if (!xen_pvh_domain())
1538 pv_cpu_ops = xen_cpu_ops; 1538 pv_cpu_ops = xen_cpu_ops;
1539 1539
1540 x86_init.resources.memory_setup = xen_memory_setup; 1540 if (xen_feature(XENFEAT_auto_translated_physmap))
1541 x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
1542 else
1543 x86_init.resources.memory_setup = xen_memory_setup;
1541 x86_init.oem.arch_setup = xen_arch_setup; 1544 x86_init.oem.arch_setup = xen_arch_setup;
1542 x86_init.oem.banner = xen_banner; 1545 x86_init.oem.banner = xen_banner;
1543 1546
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 821a11ada590..2e555163c2fe 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -27,7 +27,6 @@
27#include <xen/interface/memory.h> 27#include <xen/interface/memory.h>
28#include <xen/interface/physdev.h> 28#include <xen/interface/physdev.h>
29#include <xen/features.h> 29#include <xen/features.h>
30#include "mmu.h"
31#include "xen-ops.h" 30#include "xen-ops.h"
32#include "vdso.h" 31#include "vdso.h"
33 32
@@ -82,9 +81,6 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
82 81
83 memblock_reserve(start, size); 82 memblock_reserve(start, size);
84 83
85 if (xen_feature(XENFEAT_auto_translated_physmap))
86 return;
87
88 xen_max_p2m_pfn = PFN_DOWN(start + size); 84 xen_max_p2m_pfn = PFN_DOWN(start + size);
89 for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) { 85 for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
90 unsigned long mfn = pfn_to_mfn(pfn); 86 unsigned long mfn = pfn_to_mfn(pfn);
@@ -107,7 +103,6 @@ static unsigned long __init xen_do_chunk(unsigned long start,
107 .domid = DOMID_SELF 103 .domid = DOMID_SELF
108 }; 104 };
109 unsigned long len = 0; 105 unsigned long len = 0;
110 int xlated_phys = xen_feature(XENFEAT_auto_translated_physmap);
111 unsigned long pfn; 106 unsigned long pfn;
112 int ret; 107 int ret;
113 108
@@ -121,7 +116,7 @@ static unsigned long __init xen_do_chunk(unsigned long start,
121 continue; 116 continue;
122 frame = mfn; 117 frame = mfn;
123 } else { 118 } else {
124 if (!xlated_phys && mfn != INVALID_P2M_ENTRY) 119 if (mfn != INVALID_P2M_ENTRY)
125 continue; 120 continue;
126 frame = pfn; 121 frame = pfn;
127 } 122 }
@@ -159,13 +154,6 @@ static unsigned long __init xen_do_chunk(unsigned long start,
159static unsigned long __init xen_release_chunk(unsigned long start, 154static unsigned long __init xen_release_chunk(unsigned long start,
160 unsigned long end) 155 unsigned long end)
161{ 156{
162 /*
163 * Xen already ballooned out the E820 non RAM regions for us
164 * and set them up properly in EPT.
165 */
166 if (xen_feature(XENFEAT_auto_translated_physmap))
167 return end - start;
168
169 return xen_do_chunk(start, end, true); 157 return xen_do_chunk(start, end, true);
170} 158}
171 159
@@ -234,13 +222,7 @@ static void __init xen_set_identity_and_release_chunk(
234 * (except for the ISA region which must be 1:1 mapped) to 222 * (except for the ISA region which must be 1:1 mapped) to
235 * release the refcounts (in Xen) on the original frames. 223 * release the refcounts (in Xen) on the original frames.
236 */ 224 */
237 225 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) {
238 /*
239 * PVH E820 matches the hypervisor's P2M which means we need to
240 * account for the proper values of *release and *identity.
241 */
242 for (pfn = start_pfn; !xen_feature(XENFEAT_auto_translated_physmap) &&
243 pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) {
244 pte_t pte = __pte_ma(0); 226 pte_t pte = __pte_ma(0);
245 227
246 if (pfn < PFN_UP(ISA_END_ADDRESS)) 228 if (pfn < PFN_UP(ISA_END_ADDRESS))
@@ -518,6 +500,35 @@ char * __init xen_memory_setup(void)
518} 500}
519 501
520/* 502/*
503 * Machine specific memory setup for auto-translated guests.
504 */
505char * __init xen_auto_xlated_memory_setup(void)
506{
507 static struct e820entry map[E820MAX] __initdata;
508
509 struct xen_memory_map memmap;
510 int i;
511 int rc;
512
513 memmap.nr_entries = E820MAX;
514 set_xen_guest_handle(memmap.buffer, map);
515
516 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
517 if (rc < 0)
518 panic("No memory map (%d)\n", rc);
519
520 sanitize_e820_map(map, ARRAY_SIZE(map), &memmap.nr_entries);
521
522 for (i = 0; i < memmap.nr_entries; i++)
523 e820_add_region(map[i].addr, map[i].size, map[i].type);
524
525 memblock_reserve(__pa(xen_start_info->mfn_list),
526 xen_start_info->pt_base - xen_start_info->mfn_list);
527
528 return "Xen";
529}
530
531/*
521 * Set the bit indicating "nosegneg" library variants should be used. 532 * Set the bit indicating "nosegneg" library variants should be used.
522 * We only need to bother in pure 32-bit mode; compat 32-bit processes 533 * We only need to bother in pure 32-bit mode; compat 32-bit processes
523 * can have un-truncated segments, so wrapping around is allowed. 534 * can have un-truncated segments, so wrapping around is allowed.
@@ -590,13 +601,7 @@ void xen_enable_syscall(void)
590 } 601 }
591#endif /* CONFIG_X86_64 */ 602#endif /* CONFIG_X86_64 */
592} 603}
593void xen_enable_nmi(void) 604
594{
595#ifdef CONFIG_X86_64
596 if (register_callback(CALLBACKTYPE_nmi, (char *)nmi))
597 BUG();
598#endif
599}
600void __init xen_pvmmu_arch_setup(void) 605void __init xen_pvmmu_arch_setup(void)
601{ 606{
602 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); 607 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
@@ -611,7 +616,6 @@ void __init xen_pvmmu_arch_setup(void)
611 616
612 xen_enable_sysenter(); 617 xen_enable_sysenter();
613 xen_enable_syscall(); 618 xen_enable_syscall();
614 xen_enable_nmi();
615} 619}
616 620
617/* This function is not called for HVM domains */ 621/* This function is not called for HVM domains */
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index c834d4b231f0..97d87659f779 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -36,6 +36,7 @@ void xen_mm_unpin_all(void);
36void xen_set_pat(u64); 36void xen_set_pat(u64);
37 37
38char * __init xen_memory_setup(void); 38char * __init xen_memory_setup(void);
39char * xen_auto_xlated_memory_setup(void);
39void __init xen_arch_setup(void); 40void __init xen_arch_setup(void);
40void xen_enable_sysenter(void); 41void xen_enable_sysenter(void);
41void xen_enable_syscall(void); 42void xen_enable_syscall(void);