diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-01-29 03:24:57 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-01-29 04:36:22 -0500 |
commit | ae7f6711d6231c9ba54feb5ba9856c3775e482f8 (patch) | |
tree | 89070c82204b2503348e4fd6c51d25a169375545 /arch/x86 | |
parent | 64abebf731df87e6f4ae7d9ffc340bdf0c033e44 (diff) | |
parent | b23ff0e9330e4b11e18af984d50573598e10e7f9 (diff) |
Merge branch 'perf/urgent' into perf/core
Merge reason: We want to queue up a dependent patch. Also update to
later -rc's.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/mce.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/perf_event.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/uv/uv_hub.h | 12 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/boot.c | 22 | ||||
-rw-r--r-- | arch/x86/kernel/aperture_64.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/apic/apic.c | 15 | ||||
-rw-r--r-- | arch/x86/kernel/apic/apic_flat_64.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/apic/probe_64.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/apic/x2apic_uv_x.c | 26 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/lapic.c | 11 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 6 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 4 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/kmmio.c | 7 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 4 |
17 files changed, 81 insertions, 59 deletions
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 858baa061cfc..6c3fdd631ed3 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -108,10 +108,11 @@ struct mce_log { | |||
108 | #define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9) | 108 | #define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9) |
109 | #define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0) | 109 | #define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0) |
110 | 110 | ||
111 | extern struct atomic_notifier_head x86_mce_decoder_chain; | ||
112 | 111 | ||
113 | #ifdef __KERNEL__ | 112 | #ifdef __KERNEL__ |
114 | 113 | ||
114 | extern struct atomic_notifier_head x86_mce_decoder_chain; | ||
115 | |||
115 | #include <linux/percpu.h> | 116 | #include <linux/percpu.h> |
116 | #include <linux/init.h> | 117 | #include <linux/init.h> |
117 | #include <asm/atomic.h> | 118 | #include <asm/atomic.h> |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index ff5ede128bae..befd172c82ad 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 | 19 | #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 |
20 | 20 | ||
21 | #define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) | 21 | #define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) |
22 | #define ARCH_PERFMON_EVENTSEL_ANY (1 << 21) | ||
22 | #define ARCH_PERFMON_EVENTSEL_INT (1 << 20) | 23 | #define ARCH_PERFMON_EVENTSEL_INT (1 << 20) |
23 | #define ARCH_PERFMON_EVENTSEL_OS (1 << 17) | 24 | #define ARCH_PERFMON_EVENTSEL_OS (1 << 17) |
24 | #define ARCH_PERFMON_EVENTSEL_USR (1 << 16) | 25 | #define ARCH_PERFMON_EVENTSEL_USR (1 << 16) |
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index bc54fa965af3..40be813fefb1 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -495,5 +495,17 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector) | |||
495 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); | 495 | uv_write_global_mmr64(pnode, UVH_IPI_INT, val); |
496 | } | 496 | } |
497 | 497 | ||
498 | /* | ||
499 | * Get the minimum revision number of the hub chips within the partition. | ||
500 | * 1 - initial rev 1.0 silicon | ||
501 | * 2 - rev 2.0 production silicon | ||
502 | */ | ||
503 | static inline int uv_get_min_hub_revision_id(void) | ||
504 | { | ||
505 | extern int uv_min_hub_revision_id; | ||
506 | |||
507 | return uv_min_hub_revision_id; | ||
508 | } | ||
509 | |||
498 | #endif /* CONFIG_X86_64 */ | 510 | #endif /* CONFIG_X86_64 */ |
499 | #endif /* _ASM_X86_UV_UV_HUB_H */ | 511 | #endif /* _ASM_X86_UV_UV_HUB_H */ |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index fb1035cd9a6a..036d28adf59d 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -1529,16 +1529,10 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = { | |||
1529 | * if acpi_blacklisted() acpi_disabled = 1; | 1529 | * if acpi_blacklisted() acpi_disabled = 1; |
1530 | * acpi_irq_model=... | 1530 | * acpi_irq_model=... |
1531 | * ... | 1531 | * ... |
1532 | * | ||
1533 | * return value: (currently ignored) | ||
1534 | * 0: success | ||
1535 | * !0: failure | ||
1536 | */ | 1532 | */ |
1537 | 1533 | ||
1538 | int __init acpi_boot_table_init(void) | 1534 | void __init acpi_boot_table_init(void) |
1539 | { | 1535 | { |
1540 | int error; | ||
1541 | |||
1542 | dmi_check_system(acpi_dmi_table); | 1536 | dmi_check_system(acpi_dmi_table); |
1543 | 1537 | ||
1544 | /* | 1538 | /* |
@@ -1546,15 +1540,14 @@ int __init acpi_boot_table_init(void) | |||
1546 | * One exception: acpi=ht continues far enough to enumerate LAPICs | 1540 | * One exception: acpi=ht continues far enough to enumerate LAPICs |
1547 | */ | 1541 | */ |
1548 | if (acpi_disabled && !acpi_ht) | 1542 | if (acpi_disabled && !acpi_ht) |
1549 | return 1; | 1543 | return; |
1550 | 1544 | ||
1551 | /* | 1545 | /* |
1552 | * Initialize the ACPI boot-time table parser. | 1546 | * Initialize the ACPI boot-time table parser. |
1553 | */ | 1547 | */ |
1554 | error = acpi_table_init(); | 1548 | if (acpi_table_init()) { |
1555 | if (error) { | ||
1556 | disable_acpi(); | 1549 | disable_acpi(); |
1557 | return error; | 1550 | return; |
1558 | } | 1551 | } |
1559 | 1552 | ||
1560 | acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); | 1553 | acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf); |
@@ -1562,18 +1555,15 @@ int __init acpi_boot_table_init(void) | |||
1562 | /* | 1555 | /* |
1563 | * blacklist may disable ACPI entirely | 1556 | * blacklist may disable ACPI entirely |
1564 | */ | 1557 | */ |
1565 | error = acpi_blacklisted(); | 1558 | if (acpi_blacklisted()) { |
1566 | if (error) { | ||
1567 | if (acpi_force) { | 1559 | if (acpi_force) { |
1568 | printk(KERN_WARNING PREFIX "acpi=force override\n"); | 1560 | printk(KERN_WARNING PREFIX "acpi=force override\n"); |
1569 | } else { | 1561 | } else { |
1570 | printk(KERN_WARNING PREFIX "Disabling ACPI support\n"); | 1562 | printk(KERN_WARNING PREFIX "Disabling ACPI support\n"); |
1571 | disable_acpi(); | 1563 | disable_acpi(); |
1572 | return error; | 1564 | return; |
1573 | } | 1565 | } |
1574 | } | 1566 | } |
1575 | |||
1576 | return 0; | ||
1577 | } | 1567 | } |
1578 | 1568 | ||
1579 | int __init early_acpi_boot_init(void) | 1569 | int __init early_acpi_boot_init(void) |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index 3704997e8b25..f147a95fd84a 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/x86_init.h> | 31 | #include <asm/x86_init.h> |
32 | 32 | ||
33 | int gart_iommu_aperture; | 33 | int gart_iommu_aperture; |
34 | EXPORT_SYMBOL_GPL(gart_iommu_aperture); | ||
34 | int gart_iommu_aperture_disabled __initdata; | 35 | int gart_iommu_aperture_disabled __initdata; |
35 | int gart_iommu_aperture_allowed __initdata; | 36 | int gart_iommu_aperture_allowed __initdata; |
36 | 37 | ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index e80f291472a4..3987e4408f75 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -61,12 +61,6 @@ unsigned int boot_cpu_physical_apicid = -1U; | |||
61 | 61 | ||
62 | /* | 62 | /* |
63 | * The highest APIC ID seen during enumeration. | 63 | * The highest APIC ID seen during enumeration. |
64 | * | ||
65 | * This determines the messaging protocol we can use: if all APIC IDs | ||
66 | * are in the 0 ... 7 range, then we can use logical addressing which | ||
67 | * has some performance advantages (better broadcasting). | ||
68 | * | ||
69 | * If there's an APIC ID above 8, we use physical addressing. | ||
70 | */ | 64 | */ |
71 | unsigned int max_physical_apicid; | 65 | unsigned int max_physical_apicid; |
72 | 66 | ||
@@ -1898,14 +1892,7 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1898 | max_physical_apicid = apicid; | 1892 | max_physical_apicid = apicid; |
1899 | 1893 | ||
1900 | #ifdef CONFIG_X86_32 | 1894 | #ifdef CONFIG_X86_32 |
1901 | /* | 1895 | if (num_processors > 8) { |
1902 | * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y | ||
1903 | * but we need to work other dependencies like SMP_SUSPEND etc | ||
1904 | * before this can be done without some confusion. | ||
1905 | * if (CPU_HOTPLUG_ENABLED || num_processors > 8) | ||
1906 | * - Ashok Raj <ashok.raj@intel.com> | ||
1907 | */ | ||
1908 | if (max_physical_apicid >= 8) { | ||
1909 | switch (boot_cpu_data.x86_vendor) { | 1896 | switch (boot_cpu_data.x86_vendor) { |
1910 | case X86_VENDOR_INTEL: | 1897 | case X86_VENDOR_INTEL: |
1911 | if (!APIC_XAPIC(version)) { | 1898 | if (!APIC_XAPIC(version)) { |
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index eacbd2b31d27..e3c3d820c325 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c | |||
@@ -240,6 +240,11 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
240 | printk(KERN_DEBUG "system APIC only can use physical flat"); | 240 | printk(KERN_DEBUG "system APIC only can use physical flat"); |
241 | return 1; | 241 | return 1; |
242 | } | 242 | } |
243 | |||
244 | if (!strncmp(oem_id, "IBM", 3) && !strncmp(oem_table_id, "EXA", 3)) { | ||
245 | printk(KERN_DEBUG "IBM Summit detected, will use apic physical"); | ||
246 | return 1; | ||
247 | } | ||
243 | #endif | 248 | #endif |
244 | 249 | ||
245 | return 0; | 250 | return 0; |
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index 65edc180fc82..450fe2064a14 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c | |||
@@ -64,15 +64,13 @@ void __init default_setup_apic_routing(void) | |||
64 | apic = &apic_x2apic_phys; | 64 | apic = &apic_x2apic_phys; |
65 | else | 65 | else |
66 | apic = &apic_x2apic_cluster; | 66 | apic = &apic_x2apic_cluster; |
67 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); | ||
68 | } | 67 | } |
69 | #endif | 68 | #endif |
70 | 69 | ||
71 | if (apic == &apic_flat) { | 70 | if (apic == &apic_flat && num_processors > 8) |
72 | if (max_physical_apicid >= 8) | ||
73 | apic = &apic_physflat; | 71 | apic = &apic_physflat; |
74 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); | 72 | |
75 | } | 73 | printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); |
76 | 74 | ||
77 | if (is_vsmp_box()) { | 75 | if (is_vsmp_box()) { |
78 | /* need to update phys_pkg_id */ | 76 | /* need to update phys_pkg_id */ |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 5f92494dab61..21db3cbea7dc 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -36,6 +36,8 @@ DEFINE_PER_CPU(int, x2apic_extra_bits); | |||
36 | 36 | ||
37 | static enum uv_system_type uv_system_type; | 37 | static enum uv_system_type uv_system_type; |
38 | static u64 gru_start_paddr, gru_end_paddr; | 38 | static u64 gru_start_paddr, gru_end_paddr; |
39 | int uv_min_hub_revision_id; | ||
40 | EXPORT_SYMBOL_GPL(uv_min_hub_revision_id); | ||
39 | 41 | ||
40 | static inline bool is_GRU_range(u64 start, u64 end) | 42 | static inline bool is_GRU_range(u64 start, u64 end) |
41 | { | 43 | { |
@@ -55,12 +57,19 @@ static int early_get_nodeid(void) | |||
55 | mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr)); | 57 | mmr = early_ioremap(UV_LOCAL_MMR_BASE | UVH_NODE_ID, sizeof(*mmr)); |
56 | node_id.v = *mmr; | 58 | node_id.v = *mmr; |
57 | early_iounmap(mmr, sizeof(*mmr)); | 59 | early_iounmap(mmr, sizeof(*mmr)); |
60 | |||
61 | /* Currently, all blades have same revision number */ | ||
62 | uv_min_hub_revision_id = node_id.s.revision; | ||
63 | |||
58 | return node_id.s.node_id; | 64 | return node_id.s.node_id; |
59 | } | 65 | } |
60 | 66 | ||
61 | static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 67 | static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
62 | { | 68 | { |
69 | int nodeid; | ||
70 | |||
63 | if (!strcmp(oem_id, "SGI")) { | 71 | if (!strcmp(oem_id, "SGI")) { |
72 | nodeid = early_get_nodeid(); | ||
64 | x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; | 73 | x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; |
65 | if (!strcmp(oem_table_id, "UVL")) | 74 | if (!strcmp(oem_table_id, "UVL")) |
66 | uv_system_type = UV_LEGACY_APIC; | 75 | uv_system_type = UV_LEGACY_APIC; |
@@ -68,7 +77,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
68 | uv_system_type = UV_X2APIC; | 77 | uv_system_type = UV_X2APIC; |
69 | else if (!strcmp(oem_table_id, "UVH")) { | 78 | else if (!strcmp(oem_table_id, "UVH")) { |
70 | __get_cpu_var(x2apic_extra_bits) = | 79 | __get_cpu_var(x2apic_extra_bits) = |
71 | early_get_nodeid() << (UV_APIC_PNODE_SHIFT - 1); | 80 | nodeid << (UV_APIC_PNODE_SHIFT - 1); |
72 | uv_system_type = UV_NON_UNIQUE_APIC; | 81 | uv_system_type = UV_NON_UNIQUE_APIC; |
73 | return 1; | 82 | return 1; |
74 | } | 83 | } |
@@ -374,13 +383,13 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) | |||
374 | 383 | ||
375 | enum map_type {map_wb, map_uc}; | 384 | enum map_type {map_wb, map_uc}; |
376 | 385 | ||
377 | static __init void map_high(char *id, unsigned long base, int shift, | 386 | static __init void map_high(char *id, unsigned long base, int pshift, |
378 | int max_pnode, enum map_type map_type) | 387 | int bshift, int max_pnode, enum map_type map_type) |
379 | { | 388 | { |
380 | unsigned long bytes, paddr; | 389 | unsigned long bytes, paddr; |
381 | 390 | ||
382 | paddr = base << shift; | 391 | paddr = base << pshift; |
383 | bytes = (1UL << shift) * (max_pnode + 1); | 392 | bytes = (1UL << bshift) * (max_pnode + 1); |
384 | printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, | 393 | printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, |
385 | paddr + bytes); | 394 | paddr + bytes); |
386 | if (map_type == map_uc) | 395 | if (map_type == map_uc) |
@@ -396,7 +405,7 @@ static __init void map_gru_high(int max_pnode) | |||
396 | 405 | ||
397 | gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); | 406 | gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR); |
398 | if (gru.s.enable) { | 407 | if (gru.s.enable) { |
399 | map_high("GRU", gru.s.base, shift, max_pnode, map_wb); | 408 | map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb); |
400 | gru_start_paddr = ((u64)gru.s.base << shift); | 409 | gru_start_paddr = ((u64)gru.s.base << shift); |
401 | gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1); | 410 | gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1); |
402 | 411 | ||
@@ -410,7 +419,7 @@ static __init void map_mmr_high(int max_pnode) | |||
410 | 419 | ||
411 | mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); | 420 | mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); |
412 | if (mmr.s.enable) | 421 | if (mmr.s.enable) |
413 | map_high("MMR", mmr.s.base, shift, max_pnode, map_uc); | 422 | map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc); |
414 | } | 423 | } |
415 | 424 | ||
416 | static __init void map_mmioh_high(int max_pnode) | 425 | static __init void map_mmioh_high(int max_pnode) |
@@ -420,7 +429,8 @@ static __init void map_mmioh_high(int max_pnode) | |||
420 | 429 | ||
421 | mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); | 430 | mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); |
422 | if (mmioh.s.enable) | 431 | if (mmioh.s.enable) |
423 | map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc); | 432 | map_high("MMIOH", mmioh.s.base, shift, mmioh.s.m_io, |
433 | max_pnode, map_uc); | ||
424 | } | 434 | } |
425 | 435 | ||
426 | static __init void map_low_mmrs(void) | 436 | static __init void map_low_mmrs(void) |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 518eb3e39577..1846ead0576b 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -1679,6 +1679,13 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) | |||
1679 | bits |= 0x2; | 1679 | bits |= 0x2; |
1680 | if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) | 1680 | if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) |
1681 | bits |= 0x1; | 1681 | bits |= 0x1; |
1682 | |||
1683 | /* | ||
1684 | * ANY bit is supported in v3 and up | ||
1685 | */ | ||
1686 | if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY) | ||
1687 | bits |= 0x4; | ||
1688 | |||
1682 | bits <<= (idx * 4); | 1689 | bits <<= (idx * 4); |
1683 | mask = 0xfULL << (idx * 4); | 1690 | mask = 0xfULL << (idx * 4); |
1684 | 1691 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index c6ee241c8a98..02c3ee013ccd 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -288,6 +288,8 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) | |||
288 | regs.es = __USER_DS; | 288 | regs.es = __USER_DS; |
289 | regs.fs = __KERNEL_PERCPU; | 289 | regs.fs = __KERNEL_PERCPU; |
290 | regs.gs = __KERNEL_STACK_CANARY; | 290 | regs.gs = __KERNEL_STACK_CANARY; |
291 | #else | ||
292 | regs.ss = __KERNEL_DS; | ||
291 | #endif | 293 | #endif |
292 | 294 | ||
293 | regs.orig_ax = -1; | 295 | regs.orig_ax = -1; |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 3063a0c4858b..ba8c045da782 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -373,6 +373,12 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
373 | if (unlikely(!apic_enabled(apic))) | 373 | if (unlikely(!apic_enabled(apic))) |
374 | break; | 374 | break; |
375 | 375 | ||
376 | if (trig_mode) { | ||
377 | apic_debug("level trig mode for vector %d", vector); | ||
378 | apic_set_vector(vector, apic->regs + APIC_TMR); | ||
379 | } else | ||
380 | apic_clear_vector(vector, apic->regs + APIC_TMR); | ||
381 | |||
376 | result = !apic_test_and_set_irr(vector, apic); | 382 | result = !apic_test_and_set_irr(vector, apic); |
377 | trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, | 383 | trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, |
378 | trig_mode, vector, !result); | 384 | trig_mode, vector, !result); |
@@ -383,11 +389,6 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
383 | break; | 389 | break; |
384 | } | 390 | } |
385 | 391 | ||
386 | if (trig_mode) { | ||
387 | apic_debug("level trig mode for vector %d", vector); | ||
388 | apic_set_vector(vector, apic->regs + APIC_TMR); | ||
389 | } else | ||
390 | apic_clear_vector(vector, apic->regs + APIC_TMR); | ||
391 | kvm_vcpu_kick(vcpu); | 392 | kvm_vcpu_kick(vcpu); |
392 | break; | 393 | break; |
393 | 394 | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4c3e5b2314cb..89a49fb46a27 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -477,7 +477,7 @@ static int host_mapping_level(struct kvm *kvm, gfn_t gfn) | |||
477 | 477 | ||
478 | addr = gfn_to_hva(kvm, gfn); | 478 | addr = gfn_to_hva(kvm, gfn); |
479 | if (kvm_is_error_hva(addr)) | 479 | if (kvm_is_error_hva(addr)) |
480 | return page_size; | 480 | return PT_PAGE_TABLE_LEVEL; |
481 | 481 | ||
482 | down_read(¤t->mm->mmap_sem); | 482 | down_read(¤t->mm->mmap_sem); |
483 | vma = find_vma(current->mm, addr); | 483 | vma = find_vma(current->mm, addr); |
@@ -515,11 +515,9 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) | |||
515 | if (host_level == PT_PAGE_TABLE_LEVEL) | 515 | if (host_level == PT_PAGE_TABLE_LEVEL) |
516 | return host_level; | 516 | return host_level; |
517 | 517 | ||
518 | for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) { | 518 | for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) |
519 | |||
520 | if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) | 519 | if (has_wrprotected_page(vcpu->kvm, large_gfn, level)) |
521 | break; | 520 | break; |
522 | } | ||
523 | 521 | ||
524 | return level - 1; | 522 | return level - 1; |
525 | } | 523 | } |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 58a0f1e88596..ede2131a9225 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -150,7 +150,9 @@ walk: | |||
150 | walker->table_gfn[walker->level - 1] = table_gfn; | 150 | walker->table_gfn[walker->level - 1] = table_gfn; |
151 | walker->pte_gpa[walker->level - 1] = pte_gpa; | 151 | walker->pte_gpa[walker->level - 1] = pte_gpa; |
152 | 152 | ||
153 | kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)); | 153 | if (kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte))) |
154 | goto not_present; | ||
155 | |||
154 | trace_kvm_mmu_paging_element(pte, walker->level); | 156 | trace_kvm_mmu_paging_element(pte, walker->level); |
155 | 157 | ||
156 | if (!is_present_gpte(pte)) | 158 | if (!is_present_gpte(pte)) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6651dbf58675..1ddcad452add 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -5072,12 +5072,13 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
5072 | GFP_KERNEL); | 5072 | GFP_KERNEL); |
5073 | if (!vcpu->arch.mce_banks) { | 5073 | if (!vcpu->arch.mce_banks) { |
5074 | r = -ENOMEM; | 5074 | r = -ENOMEM; |
5075 | goto fail_mmu_destroy; | 5075 | goto fail_free_lapic; |
5076 | } | 5076 | } |
5077 | vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; | 5077 | vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; |
5078 | 5078 | ||
5079 | return 0; | 5079 | return 0; |
5080 | 5080 | fail_free_lapic: | |
5081 | kvm_free_lapic(vcpu); | ||
5081 | fail_mmu_destroy: | 5082 | fail_mmu_destroy: |
5082 | kvm_mmu_destroy(vcpu); | 5083 | kvm_mmu_destroy(vcpu); |
5083 | fail_free_pio_data: | 5084 | fail_free_pio_data: |
@@ -5088,6 +5089,7 @@ fail: | |||
5088 | 5089 | ||
5089 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | 5090 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
5090 | { | 5091 | { |
5092 | kfree(vcpu->arch.mce_banks); | ||
5091 | kvm_free_lapic(vcpu); | 5093 | kvm_free_lapic(vcpu); |
5092 | down_read(&vcpu->kvm->slots_lock); | 5094 | down_read(&vcpu->kvm->slots_lock); |
5093 | kvm_mmu_destroy(vcpu); | 5095 | kvm_mmu_destroy(vcpu); |
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index c0f6198565eb..536fb6823366 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c | |||
@@ -538,14 +538,15 @@ static int | |||
538 | kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args) | 538 | kmmio_die_notifier(struct notifier_block *nb, unsigned long val, void *args) |
539 | { | 539 | { |
540 | struct die_args *arg = args; | 540 | struct die_args *arg = args; |
541 | unsigned long* dr6_p = (unsigned long *)ERR_PTR(arg->err); | ||
541 | 542 | ||
542 | if (val == DIE_DEBUG && (arg->err & DR_STEP)) | 543 | if (val == DIE_DEBUG && (*dr6_p & DR_STEP)) |
543 | if (post_kmmio_handler(arg->err, arg->regs) == 1) { | 544 | if (post_kmmio_handler(*dr6_p, arg->regs) == 1) { |
544 | /* | 545 | /* |
545 | * Reset the BS bit in dr6 (pointed by args->err) to | 546 | * Reset the BS bit in dr6 (pointed by args->err) to |
546 | * denote completion of processing | 547 | * denote completion of processing |
547 | */ | 548 | */ |
548 | (*(unsigned long *)ERR_PTR(arg->err)) &= ~DR_STEP; | 549 | *dr6_p &= ~DR_STEP; |
549 | return NOTIFY_STOP; | 550 | return NOTIFY_STOP; |
550 | } | 551 | } |
551 | 552 | ||
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 2b26dd5930c6..36daccb68642 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1151,9 +1151,13 @@ asmlinkage void __init xen_start_kernel(void) | |||
1151 | 1151 | ||
1152 | /* keep using Xen gdt for now; no urgent need to change it */ | 1152 | /* keep using Xen gdt for now; no urgent need to change it */ |
1153 | 1153 | ||
1154 | #ifdef CONFIG_X86_32 | ||
1154 | pv_info.kernel_rpl = 1; | 1155 | pv_info.kernel_rpl = 1; |
1155 | if (xen_feature(XENFEAT_supervisor_mode_kernel)) | 1156 | if (xen_feature(XENFEAT_supervisor_mode_kernel)) |
1156 | pv_info.kernel_rpl = 0; | 1157 | pv_info.kernel_rpl = 0; |
1158 | #else | ||
1159 | pv_info.kernel_rpl = 0; | ||
1160 | #endif | ||
1157 | 1161 | ||
1158 | /* set the limit of our address space */ | 1162 | /* set the limit of our address space */ |
1159 | xen_reserve_top(); | 1163 | xen_reserve_top(); |