diff options
Diffstat (limited to 'arch/x86')
38 files changed, 481 insertions, 313 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e8327686d3c5..e330da21b84f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -21,7 +21,7 @@ config X86 | |||
21 | select HAVE_UNSTABLE_SCHED_CLOCK | 21 | select HAVE_UNSTABLE_SCHED_CLOCK |
22 | select HAVE_IDE | 22 | select HAVE_IDE |
23 | select HAVE_OPROFILE | 23 | select HAVE_OPROFILE |
24 | select HAVE_PERF_EVENTS if (!M386 && !M486) | 24 | select HAVE_PERF_EVENTS |
25 | select HAVE_IRQ_WORK | 25 | select HAVE_IRQ_WORK |
26 | select HAVE_IOREMAP_PROT | 26 | select HAVE_IOREMAP_PROT |
27 | select HAVE_KPROBES | 27 | select HAVE_KPROBES |
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index 849813f398e7..5852519b2d0f 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/syscalls.h> | 28 | #include <linux/syscalls.h> |
29 | #include <linux/times.h> | 29 | #include <linux/times.h> |
30 | #include <linux/utsname.h> | 30 | #include <linux/utsname.h> |
31 | #include <linux/smp_lock.h> | ||
32 | #include <linux/mm.h> | 31 | #include <linux/mm.h> |
33 | #include <linux/uio.h> | 32 | #include <linux/uio.h> |
34 | #include <linux/poll.h> | 33 | #include <linux/poll.h> |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 286de34b0ed6..f6ce0bda3b98 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -141,13 +141,13 @@ static inline void native_apic_msr_write(u32 reg, u32 v) | |||
141 | 141 | ||
142 | static inline u32 native_apic_msr_read(u32 reg) | 142 | static inline u32 native_apic_msr_read(u32 reg) |
143 | { | 143 | { |
144 | u32 low, high; | 144 | u64 msr; |
145 | 145 | ||
146 | if (reg == APIC_DFR) | 146 | if (reg == APIC_DFR) |
147 | return -1; | 147 | return -1; |
148 | 148 | ||
149 | rdmsr(APIC_BASE_MSR + (reg >> 4), low, high); | 149 | rdmsrl(APIC_BASE_MSR + (reg >> 4), msr); |
150 | return low; | 150 | return (u32)msr; |
151 | } | 151 | } |
152 | 152 | ||
153 | static inline void native_x2apic_wait_icr_idle(void) | 153 | static inline void native_x2apic_wait_icr_idle(void) |
@@ -181,12 +181,12 @@ extern void enable_x2apic(void); | |||
181 | extern void x2apic_icr_write(u32 low, u32 id); | 181 | extern void x2apic_icr_write(u32 low, u32 id); |
182 | static inline int x2apic_enabled(void) | 182 | static inline int x2apic_enabled(void) |
183 | { | 183 | { |
184 | int msr, msr2; | 184 | u64 msr; |
185 | 185 | ||
186 | if (!cpu_has_x2apic) | 186 | if (!cpu_has_x2apic) |
187 | return 0; | 187 | return 0; |
188 | 188 | ||
189 | rdmsr(MSR_IA32_APICBASE, msr, msr2); | 189 | rdmsrl(MSR_IA32_APICBASE, msr); |
190 | if (msr & X2APIC_ENABLE) | 190 | if (msr & X2APIC_ENABLE) |
191 | return 1; | 191 | return 1; |
192 | return 0; | 192 | return 0; |
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h index 4d293dced62f..9479a037419f 100644 --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h | |||
@@ -216,8 +216,8 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr) | |||
216 | } | 216 | } |
217 | 217 | ||
218 | /* Return an pointer with offset calculated */ | 218 | /* Return an pointer with offset calculated */ |
219 | static inline unsigned long __set_fixmap_offset(enum fixed_addresses idx, | 219 | static __always_inline unsigned long |
220 | phys_addr_t phys, pgprot_t flags) | 220 | __set_fixmap_offset(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) |
221 | { | 221 | { |
222 | __set_fixmap(idx, phys, flags); | 222 | __set_fixmap(idx, phys, flags); |
223 | return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1)); | 223 | return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1)); |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 3ea3dc487047..6b89f5e86021 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -128,7 +128,7 @@ | |||
128 | #define FAM10H_MMIO_CONF_ENABLE (1<<0) | 128 | #define FAM10H_MMIO_CONF_ENABLE (1<<0) |
129 | #define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf | 129 | #define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf |
130 | #define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2 | 130 | #define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2 |
131 | #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff | 131 | #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL |
132 | #define FAM10H_MMIO_CONF_BASE_SHIFT 20 | 132 | #define FAM10H_MMIO_CONF_BASE_SHIFT 20 |
133 | #define MSR_FAM10H_NODE_ID 0xc001100c | 133 | #define MSR_FAM10H_NODE_ID 0xc001100c |
134 | 134 | ||
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 18e3b8a8709f..ef9975812c77 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
@@ -824,27 +824,27 @@ static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) | |||
824 | #define __PV_IS_CALLEE_SAVE(func) \ | 824 | #define __PV_IS_CALLEE_SAVE(func) \ |
825 | ((struct paravirt_callee_save) { func }) | 825 | ((struct paravirt_callee_save) { func }) |
826 | 826 | ||
827 | static inline unsigned long arch_local_save_flags(void) | 827 | static inline notrace unsigned long arch_local_save_flags(void) |
828 | { | 828 | { |
829 | return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); | 829 | return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); |
830 | } | 830 | } |
831 | 831 | ||
832 | static inline void arch_local_irq_restore(unsigned long f) | 832 | static inline notrace void arch_local_irq_restore(unsigned long f) |
833 | { | 833 | { |
834 | PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); | 834 | PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); |
835 | } | 835 | } |
836 | 836 | ||
837 | static inline void arch_local_irq_disable(void) | 837 | static inline notrace void arch_local_irq_disable(void) |
838 | { | 838 | { |
839 | PVOP_VCALLEE0(pv_irq_ops.irq_disable); | 839 | PVOP_VCALLEE0(pv_irq_ops.irq_disable); |
840 | } | 840 | } |
841 | 841 | ||
842 | static inline void arch_local_irq_enable(void) | 842 | static inline notrace void arch_local_irq_enable(void) |
843 | { | 843 | { |
844 | PVOP_VCALLEE0(pv_irq_ops.irq_enable); | 844 | PVOP_VCALLEE0(pv_irq_ops.irq_enable); |
845 | } | 845 | } |
846 | 846 | ||
847 | static inline unsigned long arch_local_irq_save(void) | 847 | static inline notrace unsigned long arch_local_irq_save(void) |
848 | { | 848 | { |
849 | unsigned long f; | 849 | unsigned long f; |
850 | 850 | ||
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h index e969f691cbfd..a501741c2335 100644 --- a/arch/x86/include/asm/uv/uv_hub.h +++ b/arch/x86/include/asm/uv/uv_hub.h | |||
@@ -199,6 +199,8 @@ union uvh_apicid { | |||
199 | #define UVH_APICID 0x002D0E00L | 199 | #define UVH_APICID 0x002D0E00L |
200 | #define UV_APIC_PNODE_SHIFT 6 | 200 | #define UV_APIC_PNODE_SHIFT 6 |
201 | 201 | ||
202 | #define UV_APICID_HIBIT_MASK 0xffff0000 | ||
203 | |||
202 | /* Local Bus from cpu's perspective */ | 204 | /* Local Bus from cpu's perspective */ |
203 | #define LOCAL_BUS_BASE 0x1c00000 | 205 | #define LOCAL_BUS_BASE 0x1c00000 |
204 | #define LOCAL_BUS_SIZE (4 * 1024 * 1024) | 206 | #define LOCAL_BUS_SIZE (4 * 1024 * 1024) |
@@ -491,8 +493,10 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value) | |||
491 | } | 493 | } |
492 | } | 494 | } |
493 | 495 | ||
496 | extern unsigned int uv_apicid_hibits; | ||
494 | static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode) | 497 | static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode) |
495 | { | 498 | { |
499 | apicid |= uv_apicid_hibits; | ||
496 | return (1UL << UVH_IPI_INT_SEND_SHFT) | | 500 | return (1UL << UVH_IPI_INT_SEND_SHFT) | |
497 | ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) | | 501 | ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) | |
498 | (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) | | 502 | (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) | |
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h index b2f2d2e05cec..20cafeac7455 100644 --- a/arch/x86/include/asm/uv/uv_mmrs.h +++ b/arch/x86/include/asm/uv/uv_mmrs.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * SGI UV MMR definitions | 6 | * SGI UV MMR definitions |
7 | * | 7 | * |
8 | * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. | 8 | * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _ASM_X86_UV_UV_MMRS_H | 11 | #ifndef _ASM_X86_UV_UV_MMRS_H |
@@ -754,6 +754,23 @@ union uvh_lb_bau_sb_descriptor_base_u { | |||
754 | }; | 754 | }; |
755 | 755 | ||
756 | /* ========================================================================= */ | 756 | /* ========================================================================= */ |
757 | /* UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK */ | ||
758 | /* ========================================================================= */ | ||
759 | #define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK 0x320130UL | ||
760 | #define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_32 0x009f0 | ||
761 | |||
762 | #define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_SHFT 0 | ||
763 | #define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_MASK 0x00000000ffffffffUL | ||
764 | |||
765 | union uvh_lb_target_physical_apic_id_mask_u { | ||
766 | unsigned long v; | ||
767 | struct uvh_lb_target_physical_apic_id_mask_s { | ||
768 | unsigned long bit_enables : 32; /* RW */ | ||
769 | unsigned long rsvd_32_63 : 32; /* */ | ||
770 | } s; | ||
771 | }; | ||
772 | |||
773 | /* ========================================================================= */ | ||
757 | /* UVH_NODE_ID */ | 774 | /* UVH_NODE_ID */ |
758 | /* ========================================================================= */ | 775 | /* ========================================================================= */ |
759 | #define UVH_NODE_ID 0x0UL | 776 | #define UVH_NODE_ID 0x0UL |
@@ -806,6 +823,78 @@ union uvh_node_present_table_u { | |||
806 | }; | 823 | }; |
807 | 824 | ||
808 | /* ========================================================================= */ | 825 | /* ========================================================================= */ |
826 | /* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */ | ||
827 | /* ========================================================================= */ | ||
828 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL | ||
829 | |||
830 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24 | ||
831 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL | ||
832 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48 | ||
833 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL | ||
834 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63 | ||
835 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL | ||
836 | |||
837 | union uvh_rh_gam_alias210_overlay_config_0_mmr_u { | ||
838 | unsigned long v; | ||
839 | struct uvh_rh_gam_alias210_overlay_config_0_mmr_s { | ||
840 | unsigned long rsvd_0_23: 24; /* */ | ||
841 | unsigned long base : 8; /* RW */ | ||
842 | unsigned long rsvd_32_47: 16; /* */ | ||
843 | unsigned long m_alias : 5; /* RW */ | ||
844 | unsigned long rsvd_53_62: 10; /* */ | ||
845 | unsigned long enable : 1; /* RW */ | ||
846 | } s; | ||
847 | }; | ||
848 | |||
849 | /* ========================================================================= */ | ||
850 | /* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */ | ||
851 | /* ========================================================================= */ | ||
852 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL | ||
853 | |||
854 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24 | ||
855 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL | ||
856 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48 | ||
857 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL | ||
858 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63 | ||
859 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL | ||
860 | |||
861 | union uvh_rh_gam_alias210_overlay_config_1_mmr_u { | ||
862 | unsigned long v; | ||
863 | struct uvh_rh_gam_alias210_overlay_config_1_mmr_s { | ||
864 | unsigned long rsvd_0_23: 24; /* */ | ||
865 | unsigned long base : 8; /* RW */ | ||
866 | unsigned long rsvd_32_47: 16; /* */ | ||
867 | unsigned long m_alias : 5; /* RW */ | ||
868 | unsigned long rsvd_53_62: 10; /* */ | ||
869 | unsigned long enable : 1; /* RW */ | ||
870 | } s; | ||
871 | }; | ||
872 | |||
873 | /* ========================================================================= */ | ||
874 | /* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */ | ||
875 | /* ========================================================================= */ | ||
876 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL | ||
877 | |||
878 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24 | ||
879 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL | ||
880 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48 | ||
881 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL | ||
882 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63 | ||
883 | #define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL | ||
884 | |||
885 | union uvh_rh_gam_alias210_overlay_config_2_mmr_u { | ||
886 | unsigned long v; | ||
887 | struct uvh_rh_gam_alias210_overlay_config_2_mmr_s { | ||
888 | unsigned long rsvd_0_23: 24; /* */ | ||
889 | unsigned long base : 8; /* RW */ | ||
890 | unsigned long rsvd_32_47: 16; /* */ | ||
891 | unsigned long m_alias : 5; /* RW */ | ||
892 | unsigned long rsvd_53_62: 10; /* */ | ||
893 | unsigned long enable : 1; /* RW */ | ||
894 | } s; | ||
895 | }; | ||
896 | |||
897 | /* ========================================================================= */ | ||
809 | /* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */ | 898 | /* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */ |
810 | /* ========================================================================= */ | 899 | /* ========================================================================= */ |
811 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL | 900 | #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL |
@@ -857,6 +946,29 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u { | |||
857 | }; | 946 | }; |
858 | 947 | ||
859 | /* ========================================================================= */ | 948 | /* ========================================================================= */ |
949 | /* UVH_RH_GAM_CONFIG_MMR */ | ||
950 | /* ========================================================================= */ | ||
951 | #define UVH_RH_GAM_CONFIG_MMR 0x1600000UL | ||
952 | |||
953 | #define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0 | ||
954 | #define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL | ||
955 | #define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 | ||
956 | #define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL | ||
957 | #define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12 | ||
958 | #define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL | ||
959 | |||
960 | union uvh_rh_gam_config_mmr_u { | ||
961 | unsigned long v; | ||
962 | struct uvh_rh_gam_config_mmr_s { | ||
963 | unsigned long m_skt : 6; /* RW */ | ||
964 | unsigned long n_skt : 4; /* RW */ | ||
965 | unsigned long rsvd_10_11: 2; /* */ | ||
966 | unsigned long mmiol_cfg : 1; /* RW */ | ||
967 | unsigned long rsvd_13_63: 51; /* */ | ||
968 | } s; | ||
969 | }; | ||
970 | |||
971 | /* ========================================================================= */ | ||
860 | /* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ | 972 | /* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ |
861 | /* ========================================================================= */ | 973 | /* ========================================================================= */ |
862 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL | 974 | #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL |
@@ -987,97 +1099,5 @@ union uvh_rtc1_int_config_u { | |||
987 | } s; | 1099 | } s; |
988 | }; | 1100 | }; |
989 | 1101 | ||
990 | /* ========================================================================= */ | ||
991 | /* UVH_SI_ADDR_MAP_CONFIG */ | ||
992 | /* ========================================================================= */ | ||
993 | #define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL | ||
994 | |||
995 | #define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0 | ||
996 | #define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL | ||
997 | #define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8 | ||
998 | #define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL | ||
999 | |||
1000 | union uvh_si_addr_map_config_u { | ||
1001 | unsigned long v; | ||
1002 | struct uvh_si_addr_map_config_s { | ||
1003 | unsigned long m_skt : 6; /* RW */ | ||
1004 | unsigned long rsvd_6_7: 2; /* */ | ||
1005 | unsigned long n_skt : 4; /* RW */ | ||
1006 | unsigned long rsvd_12_63: 52; /* */ | ||
1007 | } s; | ||
1008 | }; | ||
1009 | |||
1010 | /* ========================================================================= */ | ||
1011 | /* UVH_SI_ALIAS0_OVERLAY_CONFIG */ | ||
1012 | /* ========================================================================= */ | ||
1013 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL | ||
1014 | |||
1015 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24 | ||
1016 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL | ||
1017 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48 | ||
1018 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL | ||
1019 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63 | ||
1020 | #define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL | ||
1021 | |||
1022 | union uvh_si_alias0_overlay_config_u { | ||
1023 | unsigned long v; | ||
1024 | struct uvh_si_alias0_overlay_config_s { | ||
1025 | unsigned long rsvd_0_23: 24; /* */ | ||
1026 | unsigned long base : 8; /* RW */ | ||
1027 | unsigned long rsvd_32_47: 16; /* */ | ||
1028 | unsigned long m_alias : 5; /* RW */ | ||
1029 | unsigned long rsvd_53_62: 10; /* */ | ||
1030 | unsigned long enable : 1; /* RW */ | ||
1031 | } s; | ||
1032 | }; | ||
1033 | |||
1034 | /* ========================================================================= */ | ||
1035 | /* UVH_SI_ALIAS1_OVERLAY_CONFIG */ | ||
1036 | /* ========================================================================= */ | ||
1037 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL | ||
1038 | |||
1039 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24 | ||
1040 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL | ||
1041 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48 | ||
1042 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL | ||
1043 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63 | ||
1044 | #define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL | ||
1045 | |||
1046 | union uvh_si_alias1_overlay_config_u { | ||
1047 | unsigned long v; | ||
1048 | struct uvh_si_alias1_overlay_config_s { | ||
1049 | unsigned long rsvd_0_23: 24; /* */ | ||
1050 | unsigned long base : 8; /* RW */ | ||
1051 | unsigned long rsvd_32_47: 16; /* */ | ||
1052 | unsigned long m_alias : 5; /* RW */ | ||
1053 | unsigned long rsvd_53_62: 10; /* */ | ||
1054 | unsigned long enable : 1; /* RW */ | ||
1055 | } s; | ||
1056 | }; | ||
1057 | |||
1058 | /* ========================================================================= */ | ||
1059 | /* UVH_SI_ALIAS2_OVERLAY_CONFIG */ | ||
1060 | /* ========================================================================= */ | ||
1061 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL | ||
1062 | |||
1063 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24 | ||
1064 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL | ||
1065 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48 | ||
1066 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL | ||
1067 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63 | ||
1068 | #define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL | ||
1069 | |||
1070 | union uvh_si_alias2_overlay_config_u { | ||
1071 | unsigned long v; | ||
1072 | struct uvh_si_alias2_overlay_config_s { | ||
1073 | unsigned long rsvd_0_23: 24; /* */ | ||
1074 | unsigned long base : 8; /* RW */ | ||
1075 | unsigned long rsvd_32_47: 16; /* */ | ||
1076 | unsigned long m_alias : 5; /* RW */ | ||
1077 | unsigned long rsvd_53_62: 10; /* */ | ||
1078 | unsigned long enable : 1; /* RW */ | ||
1079 | } s; | ||
1080 | }; | ||
1081 | |||
1082 | 1102 | ||
1083 | #endif /* _ASM_X86_UV_UV_MMRS_H */ | 1103 | #endif /* __ASM_UV_MMRS_X86_H__ */ |
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h index e8506c1f0c55..1c10c88ee4e1 100644 --- a/arch/x86/include/asm/xen/interface.h +++ b/arch/x86/include/asm/xen/interface.h | |||
@@ -61,9 +61,9 @@ DEFINE_GUEST_HANDLE(void); | |||
61 | #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) | 61 | #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | #ifndef machine_to_phys_mapping | 64 | #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) |
65 | #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) | 65 | #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) |
66 | #endif | 66 | #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>__MACH2PHYS_SHIFT) |
67 | 67 | ||
68 | /* Maximum number of virtual CPUs in multi-processor guests. */ | 68 | /* Maximum number of virtual CPUs in multi-processor guests. */ |
69 | #define MAX_VIRT_CPUS 32 | 69 | #define MAX_VIRT_CPUS 32 |
diff --git a/arch/x86/include/asm/xen/interface_32.h b/arch/x86/include/asm/xen/interface_32.h index 42a7e004ae5c..8413688b2571 100644 --- a/arch/x86/include/asm/xen/interface_32.h +++ b/arch/x86/include/asm/xen/interface_32.h | |||
@@ -32,6 +32,11 @@ | |||
32 | /* And the trap vector is... */ | 32 | /* And the trap vector is... */ |
33 | #define TRAP_INSTR "int $0x82" | 33 | #define TRAP_INSTR "int $0x82" |
34 | 34 | ||
35 | #define __MACH2PHYS_VIRT_START 0xF5800000 | ||
36 | #define __MACH2PHYS_VIRT_END 0xF6800000 | ||
37 | |||
38 | #define __MACH2PHYS_SHIFT 2 | ||
39 | |||
35 | /* | 40 | /* |
36 | * Virtual addresses beyond this are not modifiable by guest OSes. The | 41 | * Virtual addresses beyond this are not modifiable by guest OSes. The |
37 | * machine->physical mapping table starts at this address, read-only. | 42 | * machine->physical mapping table starts at this address, read-only. |
diff --git a/arch/x86/include/asm/xen/interface_64.h b/arch/x86/include/asm/xen/interface_64.h index 100d2662b97c..839a4811cf98 100644 --- a/arch/x86/include/asm/xen/interface_64.h +++ b/arch/x86/include/asm/xen/interface_64.h | |||
@@ -39,18 +39,7 @@ | |||
39 | #define __HYPERVISOR_VIRT_END 0xFFFF880000000000 | 39 | #define __HYPERVISOR_VIRT_END 0xFFFF880000000000 |
40 | #define __MACH2PHYS_VIRT_START 0xFFFF800000000000 | 40 | #define __MACH2PHYS_VIRT_START 0xFFFF800000000000 |
41 | #define __MACH2PHYS_VIRT_END 0xFFFF804000000000 | 41 | #define __MACH2PHYS_VIRT_END 0xFFFF804000000000 |
42 | 42 | #define __MACH2PHYS_SHIFT 3 | |
43 | #ifndef HYPERVISOR_VIRT_START | ||
44 | #define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) | ||
45 | #define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END) | ||
46 | #endif | ||
47 | |||
48 | #define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START) | ||
49 | #define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END) | ||
50 | #define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3) | ||
51 | #ifndef machine_to_phys_mapping | ||
52 | #define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) | ||
53 | #endif | ||
54 | 43 | ||
55 | /* | 44 | /* |
56 | * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) | 45 | * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) |
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index dd8c1414b3d5..8760cc60a21c 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
6 | #include <linux/spinlock.h> | 6 | #include <linux/spinlock.h> |
7 | #include <linux/pfn.h> | 7 | #include <linux/pfn.h> |
8 | #include <linux/mm.h> | ||
8 | 9 | ||
9 | #include <asm/uaccess.h> | 10 | #include <asm/uaccess.h> |
10 | #include <asm/page.h> | 11 | #include <asm/page.h> |
@@ -35,6 +36,8 @@ typedef struct xpaddr { | |||
35 | #define MAX_DOMAIN_PAGES \ | 36 | #define MAX_DOMAIN_PAGES \ |
36 | ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE)) | 37 | ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE)) |
37 | 38 | ||
39 | extern unsigned long *machine_to_phys_mapping; | ||
40 | extern unsigned int machine_to_phys_order; | ||
38 | 41 | ||
39 | extern unsigned long get_phys_to_machine(unsigned long pfn); | 42 | extern unsigned long get_phys_to_machine(unsigned long pfn); |
40 | extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); | 43 | extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); |
@@ -69,10 +72,8 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn) | |||
69 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 72 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
70 | return mfn; | 73 | return mfn; |
71 | 74 | ||
72 | #if 0 | ||
73 | if (unlikely((mfn >> machine_to_phys_order) != 0)) | 75 | if (unlikely((mfn >> machine_to_phys_order) != 0)) |
74 | return max_mapnr; | 76 | return ~0; |
75 | #endif | ||
76 | 77 | ||
77 | pfn = 0; | 78 | pfn = 0; |
78 | /* | 79 | /* |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 850657d1b0ed..3f838d537392 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -52,7 +52,6 @@ | |||
52 | #include <asm/mce.h> | 52 | #include <asm/mce.h> |
53 | #include <asm/kvm_para.h> | 53 | #include <asm/kvm_para.h> |
54 | #include <asm/tsc.h> | 54 | #include <asm/tsc.h> |
55 | #include <asm/atomic.h> | ||
56 | 55 | ||
57 | unsigned int num_processors; | 56 | unsigned int num_processors; |
58 | 57 | ||
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index cefd6942f0e9..62f6e1e55b90 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c | |||
@@ -17,15 +17,16 @@ | |||
17 | #include <linux/nmi.h> | 17 | #include <linux/nmi.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | 19 | ||
20 | /* For reliability, we're prepared to waste bits here. */ | ||
21 | static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; | ||
22 | |||
23 | u64 hw_nmi_get_sample_period(void) | 20 | u64 hw_nmi_get_sample_period(void) |
24 | { | 21 | { |
25 | return (u64)(cpu_khz) * 1000 * 60; | 22 | return (u64)(cpu_khz) * 1000 * 60; |
26 | } | 23 | } |
27 | 24 | ||
28 | #ifdef ARCH_HAS_NMI_WATCHDOG | 25 | #ifdef ARCH_HAS_NMI_WATCHDOG |
26 | |||
27 | /* For reliability, we're prepared to waste bits here. */ | ||
28 | static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; | ||
29 | |||
29 | void arch_trigger_all_cpu_backtrace(void) | 30 | void arch_trigger_all_cpu_backtrace(void) |
30 | { | 31 | { |
31 | int i; | 32 | int i; |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index ed4118de249e..c1c52c341f40 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -44,6 +44,8 @@ static u64 gru_start_paddr, gru_end_paddr; | |||
44 | static union uvh_apicid uvh_apicid; | 44 | static union uvh_apicid uvh_apicid; |
45 | int uv_min_hub_revision_id; | 45 | int uv_min_hub_revision_id; |
46 | EXPORT_SYMBOL_GPL(uv_min_hub_revision_id); | 46 | EXPORT_SYMBOL_GPL(uv_min_hub_revision_id); |
47 | unsigned int uv_apicid_hibits; | ||
48 | EXPORT_SYMBOL_GPL(uv_apicid_hibits); | ||
47 | static DEFINE_SPINLOCK(uv_nmi_lock); | 49 | static DEFINE_SPINLOCK(uv_nmi_lock); |
48 | 50 | ||
49 | static inline bool is_GRU_range(u64 start, u64 end) | 51 | static inline bool is_GRU_range(u64 start, u64 end) |
@@ -85,6 +87,23 @@ static void __init early_get_apic_pnode_shift(void) | |||
85 | uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT; | 87 | uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT; |
86 | } | 88 | } |
87 | 89 | ||
90 | /* | ||
91 | * Add an extra bit as dictated by bios to the destination apicid of | ||
92 | * interrupts potentially passing through the UV HUB. This prevents | ||
93 | * a deadlock between interrupts and IO port operations. | ||
94 | */ | ||
95 | static void __init uv_set_apicid_hibit(void) | ||
96 | { | ||
97 | union uvh_lb_target_physical_apic_id_mask_u apicid_mask; | ||
98 | unsigned long *mmr; | ||
99 | |||
100 | mmr = early_ioremap(UV_LOCAL_MMR_BASE | | ||
101 | UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK, sizeof(*mmr)); | ||
102 | apicid_mask.v = *mmr; | ||
103 | early_iounmap(mmr, sizeof(*mmr)); | ||
104 | uv_apicid_hibits = apicid_mask.s.bit_enables & UV_APICID_HIBIT_MASK; | ||
105 | } | ||
106 | |||
88 | static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | 107 | static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) |
89 | { | 108 | { |
90 | int nodeid; | 109 | int nodeid; |
@@ -102,6 +121,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) | |||
102 | __get_cpu_var(x2apic_extra_bits) = | 121 | __get_cpu_var(x2apic_extra_bits) = |
103 | nodeid << (uvh_apicid.s.pnode_shift - 1); | 122 | nodeid << (uvh_apicid.s.pnode_shift - 1); |
104 | uv_system_type = UV_NON_UNIQUE_APIC; | 123 | uv_system_type = UV_NON_UNIQUE_APIC; |
124 | uv_set_apicid_hibit(); | ||
105 | return 1; | 125 | return 1; |
106 | } | 126 | } |
107 | } | 127 | } |
@@ -155,6 +175,7 @@ static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_ri | |||
155 | int pnode; | 175 | int pnode; |
156 | 176 | ||
157 | pnode = uv_apicid_to_pnode(phys_apicid); | 177 | pnode = uv_apicid_to_pnode(phys_apicid); |
178 | phys_apicid |= uv_apicid_hibits; | ||
158 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | 179 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | |
159 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | | 180 | (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | |
160 | ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | | 181 | ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | |
@@ -236,7 +257,7 @@ static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask) | |||
236 | int cpu = cpumask_first(cpumask); | 257 | int cpu = cpumask_first(cpumask); |
237 | 258 | ||
238 | if ((unsigned)cpu < nr_cpu_ids) | 259 | if ((unsigned)cpu < nr_cpu_ids) |
239 | return per_cpu(x86_cpu_to_apicid, cpu); | 260 | return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits; |
240 | else | 261 | else |
241 | return BAD_APICID; | 262 | return BAD_APICID; |
242 | } | 263 | } |
@@ -255,7 +276,7 @@ uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | |||
255 | if (cpumask_test_cpu(cpu, cpu_online_mask)) | 276 | if (cpumask_test_cpu(cpu, cpu_online_mask)) |
256 | break; | 277 | break; |
257 | } | 278 | } |
258 | return per_cpu(x86_cpu_to_apicid, cpu); | 279 | return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits; |
259 | } | 280 | } |
260 | 281 | ||
261 | static unsigned int x2apic_get_apic_id(unsigned long x) | 282 | static unsigned int x2apic_get_apic_id(unsigned long x) |
@@ -379,14 +400,14 @@ struct redir_addr { | |||
379 | #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT | 400 | #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT |
380 | 401 | ||
381 | static __initdata struct redir_addr redir_addrs[] = { | 402 | static __initdata struct redir_addr redir_addrs[] = { |
382 | {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG}, | 403 | {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR}, |
383 | {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG}, | 404 | {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR}, |
384 | {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG}, | 405 | {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR}, |
385 | }; | 406 | }; |
386 | 407 | ||
387 | static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) | 408 | static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) |
388 | { | 409 | { |
389 | union uvh_si_alias0_overlay_config_u alias; | 410 | union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias; |
390 | union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; | 411 | union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; |
391 | int i; | 412 | int i; |
392 | 413 | ||
@@ -660,7 +681,7 @@ void uv_nmi_init(void) | |||
660 | 681 | ||
661 | void __init uv_system_init(void) | 682 | void __init uv_system_init(void) |
662 | { | 683 | { |
663 | union uvh_si_addr_map_config_u m_n_config; | 684 | union uvh_rh_gam_config_mmr_u m_n_config; |
664 | union uvh_node_id_u node_id; | 685 | union uvh_node_id_u node_id; |
665 | unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; | 686 | unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; |
666 | int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; | 687 | int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val; |
@@ -670,7 +691,7 @@ void __init uv_system_init(void) | |||
670 | 691 | ||
671 | map_low_mmrs(); | 692 | map_low_mmrs(); |
672 | 693 | ||
673 | m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); | 694 | m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR ); |
674 | m_val = m_n_config.s.m_skt; | 695 | m_val = m_n_config.s.m_skt; |
675 | n_val = m_n_config.s.n_skt; | 696 | n_val = m_n_config.s.n_skt; |
676 | mmr_base = | 697 | mmr_base = |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index ed6310183efb..6d75b9145b13 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -381,6 +381,20 @@ static void release_pmc_hardware(void) {} | |||
381 | 381 | ||
382 | #endif | 382 | #endif |
383 | 383 | ||
384 | static bool check_hw_exists(void) | ||
385 | { | ||
386 | u64 val, val_new = 0; | ||
387 | int ret = 0; | ||
388 | |||
389 | val = 0xabcdUL; | ||
390 | ret |= checking_wrmsrl(x86_pmu.perfctr, val); | ||
391 | ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new); | ||
392 | if (ret || val != val_new) | ||
393 | return false; | ||
394 | |||
395 | return true; | ||
396 | } | ||
397 | |||
384 | static void reserve_ds_buffers(void); | 398 | static void reserve_ds_buffers(void); |
385 | static void release_ds_buffers(void); | 399 | static void release_ds_buffers(void); |
386 | 400 | ||
@@ -1372,6 +1386,12 @@ void __init init_hw_perf_events(void) | |||
1372 | 1386 | ||
1373 | pmu_check_apic(); | 1387 | pmu_check_apic(); |
1374 | 1388 | ||
1389 | /* sanity check that the hardware exists or is emulated */ | ||
1390 | if (!check_hw_exists()) { | ||
1391 | pr_cont("Broken PMU hardware detected, software events only.\n"); | ||
1392 | return; | ||
1393 | } | ||
1394 | |||
1375 | pr_cont("%s PMU driver.\n", x86_pmu.name); | 1395 | pr_cont("%s PMU driver.\n", x86_pmu.name); |
1376 | 1396 | ||
1377 | if (x86_pmu.quirks) | 1397 | if (x86_pmu.quirks) |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 46d58448c3af..e421b8cd6944 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -280,11 +280,11 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) | |||
280 | struct amd_nb *nb; | 280 | struct amd_nb *nb; |
281 | int i; | 281 | int i; |
282 | 282 | ||
283 | nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL); | 283 | nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO, |
284 | cpu_to_node(cpu)); | ||
284 | if (!nb) | 285 | if (!nb) |
285 | return NULL; | 286 | return NULL; |
286 | 287 | ||
287 | memset(nb, 0, sizeof(*nb)); | ||
288 | nb->nb_id = nb_id; | 288 | nb->nb_id = nb_id; |
289 | 289 | ||
290 | /* | 290 | /* |
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 1b7b31ab7d86..212a6a42527c 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
34 | #include <linux/poll.h> | 34 | #include <linux/poll.h> |
35 | #include <linux/smp.h> | 35 | #include <linux/smp.h> |
36 | #include <linux/smp_lock.h> | ||
37 | #include <linux/major.h> | 36 | #include <linux/major.h> |
38 | #include <linux/fs.h> | 37 | #include <linux/fs.h> |
39 | #include <linux/device.h> | 38 | #include <linux/device.h> |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 59e175e89599..591e60104278 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -395,7 +395,7 @@ sysenter_past_esp: | |||
395 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words | 395 | * A tiny bit of offset fixup is necessary - 4*4 means the 4 words |
396 | * pushed above; +8 corresponds to copy_thread's esp0 setting. | 396 | * pushed above; +8 corresponds to copy_thread's esp0 setting. |
397 | */ | 397 | */ |
398 | pushl_cfi (TI_sysenter_return-THREAD_SIZE_asm+8+4*4)(%esp) | 398 | pushl_cfi ((TI_sysenter_return)-THREAD_SIZE_asm+8+4*4)(%esp) |
399 | CFI_REL_OFFSET eip, 0 | 399 | CFI_REL_OFFSET eip, 0 |
400 | 400 | ||
401 | pushl_cfi %eax | 401 | pushl_cfi %eax |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index fe2690d71c0c..e3ba417e8697 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -295,6 +295,7 @@ ENDPROC(native_usergs_sysret64) | |||
295 | .endm | 295 | .endm |
296 | 296 | ||
297 | /* save partial stack frame */ | 297 | /* save partial stack frame */ |
298 | .pushsection .kprobes.text, "ax" | ||
298 | ENTRY(save_args) | 299 | ENTRY(save_args) |
299 | XCPT_FRAME | 300 | XCPT_FRAME |
300 | cld | 301 | cld |
@@ -334,6 +335,7 @@ ENTRY(save_args) | |||
334 | ret | 335 | ret |
335 | CFI_ENDPROC | 336 | CFI_ENDPROC |
336 | END(save_args) | 337 | END(save_args) |
338 | .popsection | ||
337 | 339 | ||
338 | ENTRY(save_rest) | 340 | ENTRY(save_rest) |
339 | PARTIAL_FRAME 1 REST_SKIP+8 | 341 | PARTIAL_FRAME 1 REST_SKIP+8 |
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index ff15c9dcc25d..42c594254507 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -433,6 +433,10 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) | |||
433 | dr6_p = (unsigned long *)ERR_PTR(args->err); | 433 | dr6_p = (unsigned long *)ERR_PTR(args->err); |
434 | dr6 = *dr6_p; | 434 | dr6 = *dr6_p; |
435 | 435 | ||
436 | /* If it's a single step, TRAP bits are random */ | ||
437 | if (dr6 & DR_STEP) | ||
438 | return NOTIFY_DONE; | ||
439 | |||
436 | /* Do an early return if no trap bits are set in DR6 */ | 440 | /* Do an early return if no trap bits are set in DR6 */ |
437 | if ((dr6 & DR_TRAP_BITS) == 0) | 441 | if ((dr6 & DR_TRAP_BITS) == 0) |
438 | return NOTIFY_DONE; | 442 | return NOTIFY_DONE; |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index ec592caac4b4..cd21b654dec6 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -315,14 +315,18 @@ static void kgdb_remove_all_hw_break(void) | |||
315 | if (!breakinfo[i].enabled) | 315 | if (!breakinfo[i].enabled) |
316 | continue; | 316 | continue; |
317 | bp = *per_cpu_ptr(breakinfo[i].pev, cpu); | 317 | bp = *per_cpu_ptr(breakinfo[i].pev, cpu); |
318 | if (bp->attr.disabled == 1) | 318 | if (!bp->attr.disabled) { |
319 | arch_uninstall_hw_breakpoint(bp); | ||
320 | bp->attr.disabled = 1; | ||
319 | continue; | 321 | continue; |
322 | } | ||
320 | if (dbg_is_early) | 323 | if (dbg_is_early) |
321 | early_dr7 &= ~encode_dr7(i, breakinfo[i].len, | 324 | early_dr7 &= ~encode_dr7(i, breakinfo[i].len, |
322 | breakinfo[i].type); | 325 | breakinfo[i].type); |
323 | else | 326 | else if (hw_break_release_slot(i)) |
324 | arch_uninstall_hw_breakpoint(bp); | 327 | printk(KERN_ERR "KGDB: hw bpt remove failed %lx\n", |
325 | bp->attr.disabled = 1; | 328 | breakinfo[i].addr); |
329 | breakinfo[i].enabled = 0; | ||
326 | } | 330 | } |
327 | } | 331 | } |
328 | 332 | ||
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index e1af7c055c7d..ce0cb4721c9a 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -212,7 +212,7 @@ static int install_equiv_cpu_table(const u8 *buf) | |||
212 | return 0; | 212 | return 0; |
213 | } | 213 | } |
214 | 214 | ||
215 | equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size); | 215 | equiv_cpu_table = vmalloc(size); |
216 | if (!equiv_cpu_table) { | 216 | if (!equiv_cpu_table) { |
217 | pr_err("failed to allocate equivalent CPU table\n"); | 217 | pr_err("failed to allocate equivalent CPU table\n"); |
218 | return 0; | 218 | return 0; |
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c index 71825806cd44..ac861b8348e2 100644 --- a/arch/x86/kernel/mmconf-fam10h_64.c +++ b/arch/x86/kernel/mmconf-fam10h_64.c | |||
@@ -25,7 +25,6 @@ struct pci_hostbridge_probe { | |||
25 | }; | 25 | }; |
26 | 26 | ||
27 | static u64 __cpuinitdata fam10h_pci_mmconf_base; | 27 | static u64 __cpuinitdata fam10h_pci_mmconf_base; |
28 | static int __cpuinitdata fam10h_pci_mmconf_base_status; | ||
29 | 28 | ||
30 | static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = { | 29 | static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = { |
31 | { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, | 30 | { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, |
@@ -44,10 +43,12 @@ static int __cpuinit cmp_range(const void *x1, const void *x2) | |||
44 | return start1 - start2; | 43 | return start1 - start2; |
45 | } | 44 | } |
46 | 45 | ||
47 | /*[47:0] */ | 46 | #define MMCONF_UNIT (1ULL << FAM10H_MMIO_CONF_BASE_SHIFT) |
48 | /* need to avoid (0xfd<<32) and (0xfe<<32), ht used space */ | 47 | #define MMCONF_MASK (~(MMCONF_UNIT - 1)) |
48 | #define MMCONF_SIZE (MMCONF_UNIT << 8) | ||
49 | /* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */ | ||
49 | #define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) | 50 | #define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) |
50 | #define BASE_VALID(b) ((b != (0xfdULL << 32)) && (b != (0xfeULL << 32))) | 51 | #define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40)) |
51 | static void __cpuinit get_fam10h_pci_mmconf_base(void) | 52 | static void __cpuinit get_fam10h_pci_mmconf_base(void) |
52 | { | 53 | { |
53 | int i; | 54 | int i; |
@@ -64,12 +65,11 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void) | |||
64 | struct range range[8]; | 65 | struct range range[8]; |
65 | 66 | ||
66 | /* only try to get setting from BSP */ | 67 | /* only try to get setting from BSP */ |
67 | /* -1 or 1 */ | 68 | if (fam10h_pci_mmconf_base) |
68 | if (fam10h_pci_mmconf_base_status) | ||
69 | return; | 69 | return; |
70 | 70 | ||
71 | if (!early_pci_allowed()) | 71 | if (!early_pci_allowed()) |
72 | goto fail; | 72 | return; |
73 | 73 | ||
74 | found = 0; | 74 | found = 0; |
75 | for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { | 75 | for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { |
@@ -91,7 +91,7 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void) | |||
91 | } | 91 | } |
92 | 92 | ||
93 | if (!found) | 93 | if (!found) |
94 | goto fail; | 94 | return; |
95 | 95 | ||
96 | /* SYS_CFG */ | 96 | /* SYS_CFG */ |
97 | address = MSR_K8_SYSCFG; | 97 | address = MSR_K8_SYSCFG; |
@@ -99,16 +99,16 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void) | |||
99 | 99 | ||
100 | /* TOP_MEM2 is not enabled? */ | 100 | /* TOP_MEM2 is not enabled? */ |
101 | if (!(val & (1<<21))) { | 101 | if (!(val & (1<<21))) { |
102 | tom2 = 0; | 102 | tom2 = 1ULL << 32; |
103 | } else { | 103 | } else { |
104 | /* TOP_MEM2 */ | 104 | /* TOP_MEM2 */ |
105 | address = MSR_K8_TOP_MEM2; | 105 | address = MSR_K8_TOP_MEM2; |
106 | rdmsrl(address, val); | 106 | rdmsrl(address, val); |
107 | tom2 = val & (0xffffULL<<32); | 107 | tom2 = max(val & 0xffffff800000ULL, 1ULL << 32); |
108 | } | 108 | } |
109 | 109 | ||
110 | if (base <= tom2) | 110 | if (base <= tom2) |
111 | base = tom2 + (1ULL<<32); | 111 | base = (tom2 + 2 * MMCONF_UNIT - 1) & MMCONF_MASK; |
112 | 112 | ||
113 | /* | 113 | /* |
114 | * need to check if the range is in the high mmio range that is | 114 | * need to check if the range is in the high mmio range that is |
@@ -123,11 +123,11 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void) | |||
123 | if (!(reg & 3)) | 123 | if (!(reg & 3)) |
124 | continue; | 124 | continue; |
125 | 125 | ||
126 | start = (((u64)reg) << 8) & (0xffULL << 32); /* 39:16 on 31:8*/ | 126 | start = (u64)(reg & 0xffffff00) << 8; /* 39:16 on 31:8*/ |
127 | reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3)); | 127 | reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3)); |
128 | end = (((u64)reg) << 8) & (0xffULL << 32); /* 39:16 on 31:8*/ | 128 | end = ((u64)(reg & 0xffffff00) << 8) | 0xffff; /* 39:16 on 31:8*/ |
129 | 129 | ||
130 | if (!end) | 130 | if (end < tom2) |
131 | continue; | 131 | continue; |
132 | 132 | ||
133 | range[hi_mmio_num].start = start; | 133 | range[hi_mmio_num].start = start; |
@@ -143,32 +143,27 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void) | |||
143 | 143 | ||
144 | if (range[hi_mmio_num - 1].end < base) | 144 | if (range[hi_mmio_num - 1].end < base) |
145 | goto out; | 145 | goto out; |
146 | if (range[0].start > base) | 146 | if (range[0].start > base + MMCONF_SIZE) |
147 | goto out; | 147 | goto out; |
148 | 148 | ||
149 | /* need to find one window */ | 149 | /* need to find one window */ |
150 | base = range[0].start - (1ULL << 32); | 150 | base = (range[0].start & MMCONF_MASK) - MMCONF_UNIT; |
151 | if ((base > tom2) && BASE_VALID(base)) | 151 | if ((base > tom2) && BASE_VALID(base)) |
152 | goto out; | 152 | goto out; |
153 | base = range[hi_mmio_num - 1].end + (1ULL << 32); | 153 | base = (range[hi_mmio_num - 1].end + MMCONF_UNIT) & MMCONF_MASK; |
154 | if ((base > tom2) && BASE_VALID(base)) | 154 | if (BASE_VALID(base)) |
155 | goto out; | 155 | goto out; |
156 | /* need to find window between ranges */ | 156 | /* need to find window between ranges */ |
157 | if (hi_mmio_num > 1) | 157 | for (i = 1; i < hi_mmio_num; i++) { |
158 | for (i = 0; i < hi_mmio_num - 1; i++) { | 158 | base = (range[i - 1].end + MMCONF_UNIT) & MMCONF_MASK; |
159 | if (range[i + 1].start > (range[i].end + (1ULL << 32))) { | 159 | val = range[i].start & MMCONF_MASK; |
160 | base = range[i].end + (1ULL << 32); | 160 | if (val >= base + MMCONF_SIZE && BASE_VALID(base)) |
161 | if ((base > tom2) && BASE_VALID(base)) | 161 | goto out; |
162 | goto out; | ||
163 | } | ||
164 | } | 162 | } |
165 | |||
166 | fail: | ||
167 | fam10h_pci_mmconf_base_status = -1; | ||
168 | return; | 163 | return; |
164 | |||
169 | out: | 165 | out: |
170 | fam10h_pci_mmconf_base = base; | 166 | fam10h_pci_mmconf_base = base; |
171 | fam10h_pci_mmconf_base_status = 1; | ||
172 | } | 167 | } |
173 | 168 | ||
174 | void __cpuinit fam10h_check_enable_mmcfg(void) | 169 | void __cpuinit fam10h_check_enable_mmcfg(void) |
@@ -190,11 +185,10 @@ void __cpuinit fam10h_check_enable_mmcfg(void) | |||
190 | 185 | ||
191 | /* only trust the one handle 256 buses, if acpi=off */ | 186 | /* only trust the one handle 256 buses, if acpi=off */ |
192 | if (!acpi_pci_disabled || busnbits >= 8) { | 187 | if (!acpi_pci_disabled || busnbits >= 8) { |
193 | u64 base; | 188 | u64 base = val & MMCONF_MASK; |
194 | base = val & (0xffffULL << 32); | 189 | |
195 | if (fam10h_pci_mmconf_base_status <= 0) { | 190 | if (!fam10h_pci_mmconf_base) { |
196 | fam10h_pci_mmconf_base = base; | 191 | fam10h_pci_mmconf_base = base; |
197 | fam10h_pci_mmconf_base_status = 1; | ||
198 | return; | 192 | return; |
199 | } else if (fam10h_pci_mmconf_base == base) | 193 | } else if (fam10h_pci_mmconf_base == base) |
200 | return; | 194 | return; |
@@ -206,8 +200,10 @@ void __cpuinit fam10h_check_enable_mmcfg(void) | |||
206 | * with 256 buses | 200 | * with 256 buses |
207 | */ | 201 | */ |
208 | get_fam10h_pci_mmconf_base(); | 202 | get_fam10h_pci_mmconf_base(); |
209 | if (fam10h_pci_mmconf_base_status <= 0) | 203 | if (!fam10h_pci_mmconf_base) { |
204 | pci_probe &= ~PCI_CHECK_ENABLE_AMD_MMCONF; | ||
210 | return; | 205 | return; |
206 | } | ||
211 | 207 | ||
212 | printk(KERN_INFO "Enable MMCONFIG on AMD Family 10h\n"); | 208 | printk(KERN_INFO "Enable MMCONFIG on AMD Family 10h\n"); |
213 | val &= ~((FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT) | | 209 | val &= ~((FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT) | |
@@ -217,13 +213,13 @@ void __cpuinit fam10h_check_enable_mmcfg(void) | |||
217 | wrmsrl(address, val); | 213 | wrmsrl(address, val); |
218 | } | 214 | } |
219 | 215 | ||
220 | static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d) | 216 | static int __init set_check_enable_amd_mmconf(const struct dmi_system_id *d) |
221 | { | 217 | { |
222 | pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF; | 218 | pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF; |
223 | return 0; | 219 | return 0; |
224 | } | 220 | } |
225 | 221 | ||
226 | static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = { | 222 | static const struct dmi_system_id __initconst mmconf_dmi_table[] = { |
227 | { | 223 | { |
228 | .callback = set_check_enable_amd_mmconf, | 224 | .callback = set_check_enable_amd_mmconf, |
229 | .ident = "Sun Microsystems Machine", | 225 | .ident = "Sun Microsystems Machine", |
@@ -234,7 +230,8 @@ static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = { | |||
234 | {} | 230 | {} |
235 | }; | 231 | }; |
236 | 232 | ||
237 | void __cpuinit check_enable_amd_mmconf_dmi(void) | 233 | /* Called from a __cpuinit function, but only on the BSP. */ |
234 | void __ref check_enable_amd_mmconf_dmi(void) | ||
238 | { | 235 | { |
239 | dmi_check_system(mmconf_dmi_table); | 236 | dmi_check_system(mmconf_dmi_table); |
240 | } | 237 | } |
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index 7bf2dc4c8f70..12fcbe2c143e 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/init.h> | 30 | #include <linux/init.h> |
31 | #include <linux/poll.h> | 31 | #include <linux/poll.h> |
32 | #include <linux/smp.h> | 32 | #include <linux/smp.h> |
33 | #include <linux/smp_lock.h> | ||
34 | #include <linux/major.h> | 33 | #include <linux/major.h> |
35 | #include <linux/fs.h> | 34 | #include <linux/fs.h> |
36 | #include <linux/device.h> | 35 | #include <linux/device.h> |
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index bab3b9e6f66d..008b91eefa18 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c | |||
@@ -41,44 +41,6 @@ void pvclock_set_flags(u8 flags) | |||
41 | valid_flags = flags; | 41 | valid_flags = flags; |
42 | } | 42 | } |
43 | 43 | ||
44 | /* | ||
45 | * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction, | ||
46 | * yielding a 64-bit result. | ||
47 | */ | ||
48 | static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift) | ||
49 | { | ||
50 | u64 product; | ||
51 | #ifdef __i386__ | ||
52 | u32 tmp1, tmp2; | ||
53 | #endif | ||
54 | |||
55 | if (shift < 0) | ||
56 | delta >>= -shift; | ||
57 | else | ||
58 | delta <<= shift; | ||
59 | |||
60 | #ifdef __i386__ | ||
61 | __asm__ ( | ||
62 | "mul %5 ; " | ||
63 | "mov %4,%%eax ; " | ||
64 | "mov %%edx,%4 ; " | ||
65 | "mul %5 ; " | ||
66 | "xor %5,%5 ; " | ||
67 | "add %4,%%eax ; " | ||
68 | "adc %5,%%edx ; " | ||
69 | : "=A" (product), "=r" (tmp1), "=r" (tmp2) | ||
70 | : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); | ||
71 | #elif defined(__x86_64__) | ||
72 | __asm__ ( | ||
73 | "mul %%rdx ; shrd $32,%%rdx,%%rax" | ||
74 | : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) ); | ||
75 | #else | ||
76 | #error implement me! | ||
77 | #endif | ||
78 | |||
79 | return product; | ||
80 | } | ||
81 | |||
82 | static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow) | 44 | static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow) |
83 | { | 45 | { |
84 | u64 delta = native_read_tsc() - shadow->tsc_timestamp; | 46 | u64 delta = native_read_tsc() - shadow->tsc_timestamp; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 908ea5464a51..fb8b376bf28c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -720,7 +720,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) | |||
720 | } | 720 | } |
721 | } | 721 | } |
722 | 722 | ||
723 | static void set_spte_track_bits(u64 *sptep, u64 new_spte) | 723 | static int set_spte_track_bits(u64 *sptep, u64 new_spte) |
724 | { | 724 | { |
725 | pfn_t pfn; | 725 | pfn_t pfn; |
726 | u64 old_spte = *sptep; | 726 | u64 old_spte = *sptep; |
@@ -731,19 +731,20 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte) | |||
731 | old_spte = __xchg_spte(sptep, new_spte); | 731 | old_spte = __xchg_spte(sptep, new_spte); |
732 | 732 | ||
733 | if (!is_rmap_spte(old_spte)) | 733 | if (!is_rmap_spte(old_spte)) |
734 | return; | 734 | return 0; |
735 | 735 | ||
736 | pfn = spte_to_pfn(old_spte); | 736 | pfn = spte_to_pfn(old_spte); |
737 | if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) | 737 | if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) |
738 | kvm_set_pfn_accessed(pfn); | 738 | kvm_set_pfn_accessed(pfn); |
739 | if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) | 739 | if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) |
740 | kvm_set_pfn_dirty(pfn); | 740 | kvm_set_pfn_dirty(pfn); |
741 | return 1; | ||
741 | } | 742 | } |
742 | 743 | ||
743 | static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) | 744 | static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte) |
744 | { | 745 | { |
745 | set_spte_track_bits(sptep, new_spte); | 746 | if (set_spte_track_bits(sptep, new_spte)) |
746 | rmap_remove(kvm, sptep); | 747 | rmap_remove(kvm, sptep); |
747 | } | 748 | } |
748 | 749 | ||
749 | static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) | 750 | static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 82e144a4e514..1ca12298ffc7 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -3395,6 +3395,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
3395 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; | 3395 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; |
3396 | 3396 | ||
3397 | load_host_msrs(vcpu); | 3397 | load_host_msrs(vcpu); |
3398 | kvm_load_ldt(ldt_selector); | ||
3398 | loadsegment(fs, fs_selector); | 3399 | loadsegment(fs, fs_selector); |
3399 | #ifdef CONFIG_X86_64 | 3400 | #ifdef CONFIG_X86_64 |
3400 | load_gs_index(gs_selector); | 3401 | load_gs_index(gs_selector); |
@@ -3402,7 +3403,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
3402 | #else | 3403 | #else |
3403 | loadsegment(gs, gs_selector); | 3404 | loadsegment(gs, gs_selector); |
3404 | #endif | 3405 | #endif |
3405 | kvm_load_ldt(ldt_selector); | ||
3406 | 3406 | ||
3407 | reload_tss(vcpu); | 3407 | reload_tss(vcpu); |
3408 | 3408 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 8da0e45ff7c9..ff21fdda0c53 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -821,10 +821,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
821 | #endif | 821 | #endif |
822 | 822 | ||
823 | #ifdef CONFIG_X86_64 | 823 | #ifdef CONFIG_X86_64 |
824 | if (is_long_mode(&vmx->vcpu)) { | 824 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); |
825 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); | 825 | if (is_long_mode(&vmx->vcpu)) |
826 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); | 826 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); |
827 | } | ||
828 | #endif | 827 | #endif |
829 | for (i = 0; i < vmx->save_nmsrs; ++i) | 828 | for (i = 0; i < vmx->save_nmsrs; ++i) |
830 | kvm_set_shared_msr(vmx->guest_msrs[i].index, | 829 | kvm_set_shared_msr(vmx->guest_msrs[i].index, |
@@ -839,23 +838,23 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) | |||
839 | 838 | ||
840 | ++vmx->vcpu.stat.host_state_reload; | 839 | ++vmx->vcpu.stat.host_state_reload; |
841 | vmx->host_state.loaded = 0; | 840 | vmx->host_state.loaded = 0; |
842 | if (vmx->host_state.fs_reload_needed) | 841 | #ifdef CONFIG_X86_64 |
843 | loadsegment(fs, vmx->host_state.fs_sel); | 842 | if (is_long_mode(&vmx->vcpu)) |
843 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); | ||
844 | #endif | ||
844 | if (vmx->host_state.gs_ldt_reload_needed) { | 845 | if (vmx->host_state.gs_ldt_reload_needed) { |
845 | kvm_load_ldt(vmx->host_state.ldt_sel); | 846 | kvm_load_ldt(vmx->host_state.ldt_sel); |
846 | #ifdef CONFIG_X86_64 | 847 | #ifdef CONFIG_X86_64 |
847 | load_gs_index(vmx->host_state.gs_sel); | 848 | load_gs_index(vmx->host_state.gs_sel); |
848 | wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); | ||
849 | #else | 849 | #else |
850 | loadsegment(gs, vmx->host_state.gs_sel); | 850 | loadsegment(gs, vmx->host_state.gs_sel); |
851 | #endif | 851 | #endif |
852 | } | 852 | } |
853 | if (vmx->host_state.fs_reload_needed) | ||
854 | loadsegment(fs, vmx->host_state.fs_sel); | ||
853 | reload_tss(); | 855 | reload_tss(); |
854 | #ifdef CONFIG_X86_64 | 856 | #ifdef CONFIG_X86_64 |
855 | if (is_long_mode(&vmx->vcpu)) { | 857 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); |
856 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); | ||
857 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); | ||
858 | } | ||
859 | #endif | 858 | #endif |
860 | if (current_thread_info()->status & TS_USEDFPU) | 859 | if (current_thread_info()->status & TS_USEDFPU) |
861 | clts(); | 860 | clts(); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2288ad829b32..cdac9e592aa5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2560,6 +2560,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, | |||
2560 | !kvm_exception_is_soft(vcpu->arch.exception.nr); | 2560 | !kvm_exception_is_soft(vcpu->arch.exception.nr); |
2561 | events->exception.nr = vcpu->arch.exception.nr; | 2561 | events->exception.nr = vcpu->arch.exception.nr; |
2562 | events->exception.has_error_code = vcpu->arch.exception.has_error_code; | 2562 | events->exception.has_error_code = vcpu->arch.exception.has_error_code; |
2563 | events->exception.pad = 0; | ||
2563 | events->exception.error_code = vcpu->arch.exception.error_code; | 2564 | events->exception.error_code = vcpu->arch.exception.error_code; |
2564 | 2565 | ||
2565 | events->interrupt.injected = | 2566 | events->interrupt.injected = |
@@ -2573,12 +2574,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, | |||
2573 | events->nmi.injected = vcpu->arch.nmi_injected; | 2574 | events->nmi.injected = vcpu->arch.nmi_injected; |
2574 | events->nmi.pending = vcpu->arch.nmi_pending; | 2575 | events->nmi.pending = vcpu->arch.nmi_pending; |
2575 | events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); | 2576 | events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); |
2577 | events->nmi.pad = 0; | ||
2576 | 2578 | ||
2577 | events->sipi_vector = vcpu->arch.sipi_vector; | 2579 | events->sipi_vector = vcpu->arch.sipi_vector; |
2578 | 2580 | ||
2579 | events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | 2581 | events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING |
2580 | | KVM_VCPUEVENT_VALID_SIPI_VECTOR | 2582 | | KVM_VCPUEVENT_VALID_SIPI_VECTOR |
2581 | | KVM_VCPUEVENT_VALID_SHADOW); | 2583 | | KVM_VCPUEVENT_VALID_SHADOW); |
2584 | memset(&events->reserved, 0, sizeof(events->reserved)); | ||
2582 | } | 2585 | } |
2583 | 2586 | ||
2584 | static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, | 2587 | static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, |
@@ -2623,6 +2626,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, | |||
2623 | dbgregs->dr6 = vcpu->arch.dr6; | 2626 | dbgregs->dr6 = vcpu->arch.dr6; |
2624 | dbgregs->dr7 = vcpu->arch.dr7; | 2627 | dbgregs->dr7 = vcpu->arch.dr7; |
2625 | dbgregs->flags = 0; | 2628 | dbgregs->flags = 0; |
2629 | memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); | ||
2626 | } | 2630 | } |
2627 | 2631 | ||
2628 | static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, | 2632 | static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, |
@@ -3106,6 +3110,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) | |||
3106 | sizeof(ps->channels)); | 3110 | sizeof(ps->channels)); |
3107 | ps->flags = kvm->arch.vpit->pit_state.flags; | 3111 | ps->flags = kvm->arch.vpit->pit_state.flags; |
3108 | mutex_unlock(&kvm->arch.vpit->pit_state.lock); | 3112 | mutex_unlock(&kvm->arch.vpit->pit_state.lock); |
3113 | memset(&ps->reserved, 0, sizeof(ps->reserved)); | ||
3109 | return r; | 3114 | return r; |
3110 | } | 3115 | } |
3111 | 3116 | ||
@@ -3169,10 +3174,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
3169 | struct kvm_memslots *slots, *old_slots; | 3174 | struct kvm_memslots *slots, *old_slots; |
3170 | unsigned long *dirty_bitmap; | 3175 | unsigned long *dirty_bitmap; |
3171 | 3176 | ||
3172 | spin_lock(&kvm->mmu_lock); | ||
3173 | kvm_mmu_slot_remove_write_access(kvm, log->slot); | ||
3174 | spin_unlock(&kvm->mmu_lock); | ||
3175 | |||
3176 | r = -ENOMEM; | 3177 | r = -ENOMEM; |
3177 | dirty_bitmap = vmalloc(n); | 3178 | dirty_bitmap = vmalloc(n); |
3178 | if (!dirty_bitmap) | 3179 | if (!dirty_bitmap) |
@@ -3194,6 +3195,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
3194 | dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; | 3195 | dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; |
3195 | kfree(old_slots); | 3196 | kfree(old_slots); |
3196 | 3197 | ||
3198 | spin_lock(&kvm->mmu_lock); | ||
3199 | kvm_mmu_slot_remove_write_access(kvm, log->slot); | ||
3200 | spin_unlock(&kvm->mmu_lock); | ||
3201 | |||
3197 | r = -EFAULT; | 3202 | r = -EFAULT; |
3198 | if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { | 3203 | if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { |
3199 | vfree(dirty_bitmap); | 3204 | vfree(dirty_bitmap); |
@@ -3486,6 +3491,7 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
3486 | user_ns.clock = kvm->arch.kvmclock_offset + now_ns; | 3491 | user_ns.clock = kvm->arch.kvmclock_offset + now_ns; |
3487 | local_irq_enable(); | 3492 | local_irq_enable(); |
3488 | user_ns.flags = 0; | 3493 | user_ns.flags = 0; |
3494 | memset(&user_ns.pad, 0, sizeof(user_ns.pad)); | ||
3489 | 3495 | ||
3490 | r = -EFAULT; | 3496 | r = -EFAULT; |
3491 | if (copy_to_user(argp, &user_ns, sizeof(user_ns))) | 3497 | if (copy_to_user(argp, &user_ns, sizeof(user_ns))) |
@@ -3972,8 +3978,10 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) | |||
3972 | return X86EMUL_CONTINUE; | 3978 | return X86EMUL_CONTINUE; |
3973 | 3979 | ||
3974 | if (kvm_x86_ops->has_wbinvd_exit()) { | 3980 | if (kvm_x86_ops->has_wbinvd_exit()) { |
3981 | preempt_disable(); | ||
3975 | smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, | 3982 | smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, |
3976 | wbinvd_ipi, NULL, 1); | 3983 | wbinvd_ipi, NULL, 1); |
3984 | preempt_enable(); | ||
3977 | cpumask_clear(vcpu->arch.wbinvd_dirty_mask); | 3985 | cpumask_clear(vcpu->arch.wbinvd_dirty_mask); |
3978 | } | 3986 | } |
3979 | wbinvd(); | 3987 | wbinvd(); |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 49358481c733..6acc724d5d8f 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -223,7 +223,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask, | |||
223 | 223 | ||
224 | static void __cpuinit calculate_tlb_offset(void) | 224 | static void __cpuinit calculate_tlb_offset(void) |
225 | { | 225 | { |
226 | int cpu, node, nr_node_vecs; | 226 | int cpu, node, nr_node_vecs, idx = 0; |
227 | /* | 227 | /* |
228 | * we are changing tlb_vector_offset for each CPU in runtime, but this | 228 | * we are changing tlb_vector_offset for each CPU in runtime, but this |
229 | * will not cause inconsistency, as the write is atomic under X86. we | 229 | * will not cause inconsistency, as the write is atomic under X86. we |
@@ -239,7 +239,7 @@ static void __cpuinit calculate_tlb_offset(void) | |||
239 | nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes; | 239 | nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes; |
240 | 240 | ||
241 | for_each_online_node(node) { | 241 | for_each_online_node(node) { |
242 | int node_offset = (node % NUM_INVALIDATE_TLB_VECTORS) * | 242 | int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) * |
243 | nr_node_vecs; | 243 | nr_node_vecs; |
244 | int cpu_offset = 0; | 244 | int cpu_offset = 0; |
245 | for_each_cpu(cpu, cpumask_of_node(node)) { | 245 | for_each_cpu(cpu, cpumask_of_node(node)) { |
@@ -248,10 +248,11 @@ static void __cpuinit calculate_tlb_offset(void) | |||
248 | cpu_offset++; | 248 | cpu_offset++; |
249 | cpu_offset = cpu_offset % nr_node_vecs; | 249 | cpu_offset = cpu_offset % nr_node_vecs; |
250 | } | 250 | } |
251 | idx++; | ||
251 | } | 252 | } |
252 | } | 253 | } |
253 | 254 | ||
254 | static int tlb_cpuhp_notify(struct notifier_block *n, | 255 | static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n, |
255 | unsigned long action, void *hcpu) | 256 | unsigned long action, void *hcpu) |
256 | { | 257 | { |
257 | switch (action & 0xf) { | 258 | switch (action & 0xf) { |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 15466c096ba5..0972315c3860 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -138,7 +138,6 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
138 | struct acpi_resource_address64 addr; | 138 | struct acpi_resource_address64 addr; |
139 | acpi_status status; | 139 | acpi_status status; |
140 | unsigned long flags; | 140 | unsigned long flags; |
141 | struct resource *root, *conflict; | ||
142 | u64 start, end; | 141 | u64 start, end; |
143 | 142 | ||
144 | status = resource_to_addr(acpi_res, &addr); | 143 | status = resource_to_addr(acpi_res, &addr); |
@@ -146,12 +145,10 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
146 | return AE_OK; | 145 | return AE_OK; |
147 | 146 | ||
148 | if (addr.resource_type == ACPI_MEMORY_RANGE) { | 147 | if (addr.resource_type == ACPI_MEMORY_RANGE) { |
149 | root = &iomem_resource; | ||
150 | flags = IORESOURCE_MEM; | 148 | flags = IORESOURCE_MEM; |
151 | if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY) | 149 | if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY) |
152 | flags |= IORESOURCE_PREFETCH; | 150 | flags |= IORESOURCE_PREFETCH; |
153 | } else if (addr.resource_type == ACPI_IO_RANGE) { | 151 | } else if (addr.resource_type == ACPI_IO_RANGE) { |
154 | root = &ioport_resource; | ||
155 | flags = IORESOURCE_IO; | 152 | flags = IORESOURCE_IO; |
156 | } else | 153 | } else |
157 | return AE_OK; | 154 | return AE_OK; |
@@ -172,25 +169,90 @@ setup_resource(struct acpi_resource *acpi_res, void *data) | |||
172 | return AE_OK; | 169 | return AE_OK; |
173 | } | 170 | } |
174 | 171 | ||
175 | conflict = insert_resource_conflict(root, res); | 172 | info->res_num++; |
176 | if (conflict) { | 173 | if (addr.translation_offset) |
177 | dev_err(&info->bridge->dev, | 174 | dev_info(&info->bridge->dev, "host bridge window %pR " |
178 | "address space collision: host bridge window %pR " | 175 | "(PCI address [%#llx-%#llx])\n", |
179 | "conflicts with %s %pR\n", | 176 | res, res->start - addr.translation_offset, |
180 | res, conflict->name, conflict); | 177 | res->end - addr.translation_offset); |
181 | } else { | 178 | else |
182 | pci_bus_add_resource(info->bus, res, 0); | 179 | dev_info(&info->bridge->dev, "host bridge window %pR\n", res); |
183 | info->res_num++; | 180 | |
184 | if (addr.translation_offset) | 181 | return AE_OK; |
185 | dev_info(&info->bridge->dev, "host bridge window %pR " | 182 | } |
186 | "(PCI address [%#llx-%#llx])\n", | 183 | |
187 | res, res->start - addr.translation_offset, | 184 | static bool resource_contains(struct resource *res, resource_size_t point) |
188 | res->end - addr.translation_offset); | 185 | { |
186 | if (res->start <= point && point <= res->end) | ||
187 | return true; | ||
188 | return false; | ||
189 | } | ||
190 | |||
191 | static void coalesce_windows(struct pci_root_info *info, int type) | ||
192 | { | ||
193 | int i, j; | ||
194 | struct resource *res1, *res2; | ||
195 | |||
196 | for (i = 0; i < info->res_num; i++) { | ||
197 | res1 = &info->res[i]; | ||
198 | if (!(res1->flags & type)) | ||
199 | continue; | ||
200 | |||
201 | for (j = i + 1; j < info->res_num; j++) { | ||
202 | res2 = &info->res[j]; | ||
203 | if (!(res2->flags & type)) | ||
204 | continue; | ||
205 | |||
206 | /* | ||
207 | * I don't like throwing away windows because then | ||
208 | * our resources no longer match the ACPI _CRS, but | ||
209 | * the kernel resource tree doesn't allow overlaps. | ||
210 | */ | ||
211 | if (resource_contains(res1, res2->start) || | ||
212 | resource_contains(res1, res2->end) || | ||
213 | resource_contains(res2, res1->start) || | ||
214 | resource_contains(res2, res1->end)) { | ||
215 | res1->start = min(res1->start, res2->start); | ||
216 | res1->end = max(res1->end, res2->end); | ||
217 | dev_info(&info->bridge->dev, | ||
218 | "host bridge window expanded to %pR; %pR ignored\n", | ||
219 | res1, res2); | ||
220 | res2->flags = 0; | ||
221 | } | ||
222 | } | ||
223 | } | ||
224 | } | ||
225 | |||
226 | static void add_resources(struct pci_root_info *info) | ||
227 | { | ||
228 | int i; | ||
229 | struct resource *res, *root, *conflict; | ||
230 | |||
231 | if (!pci_use_crs) | ||
232 | return; | ||
233 | |||
234 | coalesce_windows(info, IORESOURCE_MEM); | ||
235 | coalesce_windows(info, IORESOURCE_IO); | ||
236 | |||
237 | for (i = 0; i < info->res_num; i++) { | ||
238 | res = &info->res[i]; | ||
239 | |||
240 | if (res->flags & IORESOURCE_MEM) | ||
241 | root = &iomem_resource; | ||
242 | else if (res->flags & IORESOURCE_IO) | ||
243 | root = &ioport_resource; | ||
189 | else | 244 | else |
190 | dev_info(&info->bridge->dev, | 245 | continue; |
191 | "host bridge window %pR\n", res); | 246 | |
247 | conflict = insert_resource_conflict(root, res); | ||
248 | if (conflict) | ||
249 | dev_err(&info->bridge->dev, | ||
250 | "address space collision: host bridge window %pR " | ||
251 | "conflicts with %s %pR\n", | ||
252 | res, conflict->name, conflict); | ||
253 | else | ||
254 | pci_bus_add_resource(info->bus, res, 0); | ||
192 | } | 255 | } |
193 | return AE_OK; | ||
194 | } | 256 | } |
195 | 257 | ||
196 | static void | 258 | static void |
@@ -224,6 +286,7 @@ get_current_resources(struct acpi_device *device, int busnum, | |||
224 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, | 286 | acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource, |
225 | &info); | 287 | &info); |
226 | 288 | ||
289 | add_resources(&info); | ||
227 | return; | 290 | return; |
228 | 291 | ||
229 | name_alloc_fail: | 292 | name_alloc_fail: |
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 117f5b8daf75..d7b5109f7a9c 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c | |||
@@ -147,8 +147,10 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
147 | irq = xen_allocate_pirq(v[i], 0, /* not sharable */ | 147 | irq = xen_allocate_pirq(v[i], 0, /* not sharable */ |
148 | (type == PCI_CAP_ID_MSIX) ? | 148 | (type == PCI_CAP_ID_MSIX) ? |
149 | "pcifront-msi-x" : "pcifront-msi"); | 149 | "pcifront-msi-x" : "pcifront-msi"); |
150 | if (irq < 0) | 150 | if (irq < 0) { |
151 | return -1; | 151 | ret = -1; |
152 | goto free; | ||
153 | } | ||
152 | 154 | ||
153 | ret = set_irq_msi(irq, msidesc); | 155 | ret = set_irq_msi(irq, msidesc); |
154 | if (ret) | 156 | if (ret) |
@@ -164,7 +166,7 @@ error: | |||
164 | if (ret == -ENODEV) | 166 | if (ret == -ENODEV) |
165 | dev_err(&dev->dev, "Xen PCI frontend has not registered" \ | 167 | dev_err(&dev->dev, "Xen PCI frontend has not registered" \ |
166 | " MSI/MSI-X support!\n"); | 168 | " MSI/MSI-X support!\n"); |
167 | 169 | free: | |
168 | kfree(v); | 170 | kfree(v); |
169 | return ret; | 171 | return ret; |
170 | } | 172 | } |
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 20ea20a39e2a..ba9caa808a9c 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -1343,8 +1343,8 @@ uv_activation_descriptor_init(int node, int pnode) | |||
1343 | * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) | 1343 | * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR) |
1344 | * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub | 1344 | * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub |
1345 | */ | 1345 | */ |
1346 | bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)* | 1346 | bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE |
1347 | UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); | 1347 | * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node); |
1348 | BUG_ON(!bau_desc); | 1348 | BUG_ON(!bau_desc); |
1349 | 1349 | ||
1350 | pa = uv_gpa(bau_desc); /* need the real nasid*/ | 1350 | pa = uv_gpa(bau_desc); /* need the real nasid*/ |
@@ -1402,9 +1402,9 @@ uv_payload_queue_init(int node, int pnode) | |||
1402 | struct bau_payload_queue_entry *pqp_malloc; | 1402 | struct bau_payload_queue_entry *pqp_malloc; |
1403 | struct bau_control *bcp; | 1403 | struct bau_control *bcp; |
1404 | 1404 | ||
1405 | pqp = (struct bau_payload_queue_entry *) kmalloc_node( | 1405 | pqp = kmalloc_node((DEST_Q_SIZE + 1) |
1406 | (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry), | 1406 | * sizeof(struct bau_payload_queue_entry), |
1407 | GFP_KERNEL, node); | 1407 | GFP_KERNEL, node); |
1408 | BUG_ON(!pqp); | 1408 | BUG_ON(!pqp); |
1409 | pqp_malloc = pqp; | 1409 | pqp_malloc = pqp; |
1410 | 1410 | ||
@@ -1455,7 +1455,7 @@ static void __init uv_init_uvhub(int uvhub, int vector) | |||
1455 | * the below initialization can't be in firmware because the | 1455 | * the below initialization can't be in firmware because the |
1456 | * messaging IRQ will be determined by the OS | 1456 | * messaging IRQ will be determined by the OS |
1457 | */ | 1457 | */ |
1458 | apicid = uvhub_to_first_apicid(uvhub); | 1458 | apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits; |
1459 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, | 1459 | uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, |
1460 | ((apicid << 32) | vector)); | 1460 | ((apicid << 32) | vector)); |
1461 | } | 1461 | } |
@@ -1520,8 +1520,7 @@ static void __init uv_init_per_cpu(int nuvhubs) | |||
1520 | 1520 | ||
1521 | timeout_us = calculate_destination_timeout(); | 1521 | timeout_us = calculate_destination_timeout(); |
1522 | 1522 | ||
1523 | uvhub_descs = (struct uvhub_desc *) | 1523 | uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL); |
1524 | kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL); | ||
1525 | memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); | 1524 | memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc)); |
1526 | uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL); | 1525 | uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL); |
1527 | for_each_present_cpu(cpu) { | 1526 | for_each_present_cpu(cpu) { |
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c index 56e421bc379b..9daf5d1af9f1 100644 --- a/arch/x86/platform/uv/uv_time.c +++ b/arch/x86/platform/uv/uv_time.c | |||
@@ -89,6 +89,7 @@ static void uv_rtc_send_IPI(int cpu) | |||
89 | 89 | ||
90 | apicid = cpu_physical_id(cpu); | 90 | apicid = cpu_physical_id(cpu); |
91 | pnode = uv_apicid_to_pnode(apicid); | 91 | pnode = uv_apicid_to_pnode(apicid); |
92 | apicid |= uv_apicid_hibits; | ||
92 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | | 93 | val = (1UL << UVH_IPI_INT_SEND_SHFT) | |
93 | (apicid << UVH_IPI_INT_APIC_ID_SHFT) | | 94 | (apicid << UVH_IPI_INT_APIC_ID_SHFT) | |
94 | (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT); | 95 | (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT); |
@@ -107,6 +108,7 @@ static int uv_intr_pending(int pnode) | |||
107 | static int uv_setup_intr(int cpu, u64 expires) | 108 | static int uv_setup_intr(int cpu, u64 expires) |
108 | { | 109 | { |
109 | u64 val; | 110 | u64 val; |
111 | unsigned long apicid = cpu_physical_id(cpu) | uv_apicid_hibits; | ||
110 | int pnode = uv_cpu_to_pnode(cpu); | 112 | int pnode = uv_cpu_to_pnode(cpu); |
111 | 113 | ||
112 | uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, | 114 | uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, |
@@ -117,7 +119,7 @@ static int uv_setup_intr(int cpu, u64 expires) | |||
117 | UVH_EVENT_OCCURRED0_RTC1_MASK); | 119 | UVH_EVENT_OCCURRED0_RTC1_MASK); |
118 | 120 | ||
119 | val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | | 121 | val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | |
120 | ((u64)cpu_physical_id(cpu) << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); | 122 | ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); |
121 | 123 | ||
122 | /* Set configuration */ | 124 | /* Set configuration */ |
123 | uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val); | 125 | uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val); |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 235c0f4d3861..02c710bebf7a 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -75,6 +75,11 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); | |||
75 | enum xen_domain_type xen_domain_type = XEN_NATIVE; | 75 | enum xen_domain_type xen_domain_type = XEN_NATIVE; |
76 | EXPORT_SYMBOL_GPL(xen_domain_type); | 76 | EXPORT_SYMBOL_GPL(xen_domain_type); |
77 | 77 | ||
78 | unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START; | ||
79 | EXPORT_SYMBOL(machine_to_phys_mapping); | ||
80 | unsigned int machine_to_phys_order; | ||
81 | EXPORT_SYMBOL(machine_to_phys_order); | ||
82 | |||
78 | struct start_info *xen_start_info; | 83 | struct start_info *xen_start_info; |
79 | EXPORT_SYMBOL_GPL(xen_start_info); | 84 | EXPORT_SYMBOL_GPL(xen_start_info); |
80 | 85 | ||
@@ -1090,6 +1095,8 @@ static void __init xen_setup_stackprotector(void) | |||
1090 | /* First C function to be called on Xen boot */ | 1095 | /* First C function to be called on Xen boot */ |
1091 | asmlinkage void __init xen_start_kernel(void) | 1096 | asmlinkage void __init xen_start_kernel(void) |
1092 | { | 1097 | { |
1098 | struct physdev_set_iopl set_iopl; | ||
1099 | int rc; | ||
1093 | pgd_t *pgd; | 1100 | pgd_t *pgd; |
1094 | 1101 | ||
1095 | if (!xen_start_info) | 1102 | if (!xen_start_info) |
@@ -1097,6 +1104,8 @@ asmlinkage void __init xen_start_kernel(void) | |||
1097 | 1104 | ||
1098 | xen_domain_type = XEN_PV_DOMAIN; | 1105 | xen_domain_type = XEN_PV_DOMAIN; |
1099 | 1106 | ||
1107 | xen_setup_machphys_mapping(); | ||
1108 | |||
1100 | /* Install Xen paravirt ops */ | 1109 | /* Install Xen paravirt ops */ |
1101 | pv_info = xen_info; | 1110 | pv_info = xen_info; |
1102 | pv_init_ops = xen_init_ops; | 1111 | pv_init_ops = xen_init_ops; |
@@ -1191,8 +1200,6 @@ asmlinkage void __init xen_start_kernel(void) | |||
1191 | /* Allocate and initialize top and mid mfn levels for p2m structure */ | 1200 | /* Allocate and initialize top and mid mfn levels for p2m structure */ |
1192 | xen_build_mfn_list_list(); | 1201 | xen_build_mfn_list_list(); |
1193 | 1202 | ||
1194 | init_mm.pgd = pgd; | ||
1195 | |||
1196 | /* keep using Xen gdt for now; no urgent need to change it */ | 1203 | /* keep using Xen gdt for now; no urgent need to change it */ |
1197 | 1204 | ||
1198 | #ifdef CONFIG_X86_32 | 1205 | #ifdef CONFIG_X86_32 |
@@ -1202,10 +1209,18 @@ asmlinkage void __init xen_start_kernel(void) | |||
1202 | #else | 1209 | #else |
1203 | pv_info.kernel_rpl = 0; | 1210 | pv_info.kernel_rpl = 0; |
1204 | #endif | 1211 | #endif |
1205 | |||
1206 | /* set the limit of our address space */ | 1212 | /* set the limit of our address space */ |
1207 | xen_reserve_top(); | 1213 | xen_reserve_top(); |
1208 | 1214 | ||
1215 | /* We used to do this in xen_arch_setup, but that is too late on AMD | ||
1216 | * were early_cpu_init (run before ->arch_setup()) calls early_amd_init | ||
1217 | * which pokes 0xcf8 port. | ||
1218 | */ | ||
1219 | set_iopl.iopl = 1; | ||
1220 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); | ||
1221 | if (rc != 0) | ||
1222 | xen_raw_printk("physdev_op failed %d\n", rc); | ||
1223 | |||
1209 | #ifdef CONFIG_X86_32 | 1224 | #ifdef CONFIG_X86_32 |
1210 | /* set up basic CPUID stuff */ | 1225 | /* set up basic CPUID stuff */ |
1211 | cpu_detect(&new_cpu_data); | 1226 | cpu_detect(&new_cpu_data); |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index c237b810b03f..a1feff9e59b6 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -2034,6 +2034,20 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) | |||
2034 | set_page_prot(pmd, PAGE_KERNEL_RO); | 2034 | set_page_prot(pmd, PAGE_KERNEL_RO); |
2035 | } | 2035 | } |
2036 | 2036 | ||
2037 | void __init xen_setup_machphys_mapping(void) | ||
2038 | { | ||
2039 | struct xen_machphys_mapping mapping; | ||
2040 | unsigned long machine_to_phys_nr_ents; | ||
2041 | |||
2042 | if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { | ||
2043 | machine_to_phys_mapping = (unsigned long *)mapping.v_start; | ||
2044 | machine_to_phys_nr_ents = mapping.max_mfn + 1; | ||
2045 | } else { | ||
2046 | machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES; | ||
2047 | } | ||
2048 | machine_to_phys_order = fls(machine_to_phys_nr_ents - 1); | ||
2049 | } | ||
2050 | |||
2037 | #ifdef CONFIG_X86_64 | 2051 | #ifdef CONFIG_X86_64 |
2038 | static void convert_pfn_mfn(void *v) | 2052 | static void convert_pfn_mfn(void *v) |
2039 | { | 2053 | { |
@@ -2119,44 +2133,83 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | |||
2119 | return pgd; | 2133 | return pgd; |
2120 | } | 2134 | } |
2121 | #else /* !CONFIG_X86_64 */ | 2135 | #else /* !CONFIG_X86_64 */ |
2122 | static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD); | 2136 | static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); |
2137 | static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); | ||
2138 | |||
2139 | static __init void xen_write_cr3_init(unsigned long cr3) | ||
2140 | { | ||
2141 | unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); | ||
2142 | |||
2143 | BUG_ON(read_cr3() != __pa(initial_page_table)); | ||
2144 | BUG_ON(cr3 != __pa(swapper_pg_dir)); | ||
2145 | |||
2146 | /* | ||
2147 | * We are switching to swapper_pg_dir for the first time (from | ||
2148 | * initial_page_table) and therefore need to mark that page | ||
2149 | * read-only and then pin it. | ||
2150 | * | ||
2151 | * Xen disallows sharing of kernel PMDs for PAE | ||
2152 | * guests. Therefore we must copy the kernel PMD from | ||
2153 | * initial_page_table into a new kernel PMD to be used in | ||
2154 | * swapper_pg_dir. | ||
2155 | */ | ||
2156 | swapper_kernel_pmd = | ||
2157 | extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); | ||
2158 | memcpy(swapper_kernel_pmd, initial_kernel_pmd, | ||
2159 | sizeof(pmd_t) * PTRS_PER_PMD); | ||
2160 | swapper_pg_dir[KERNEL_PGD_BOUNDARY] = | ||
2161 | __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT); | ||
2162 | set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO); | ||
2163 | |||
2164 | set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); | ||
2165 | xen_write_cr3(cr3); | ||
2166 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn); | ||
2167 | |||
2168 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, | ||
2169 | PFN_DOWN(__pa(initial_page_table))); | ||
2170 | set_page_prot(initial_page_table, PAGE_KERNEL); | ||
2171 | set_page_prot(initial_kernel_pmd, PAGE_KERNEL); | ||
2172 | |||
2173 | pv_mmu_ops.write_cr3 = &xen_write_cr3; | ||
2174 | } | ||
2123 | 2175 | ||
2124 | __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | 2176 | __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, |
2125 | unsigned long max_pfn) | 2177 | unsigned long max_pfn) |
2126 | { | 2178 | { |
2127 | pmd_t *kernel_pmd; | 2179 | pmd_t *kernel_pmd; |
2128 | 2180 | ||
2129 | level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE); | 2181 | initial_kernel_pmd = |
2182 | extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); | ||
2130 | 2183 | ||
2131 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + | 2184 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + |
2132 | xen_start_info->nr_pt_frames * PAGE_SIZE + | 2185 | xen_start_info->nr_pt_frames * PAGE_SIZE + |
2133 | 512*1024); | 2186 | 512*1024); |
2134 | 2187 | ||
2135 | kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); | 2188 | kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); |
2136 | memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); | 2189 | memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); |
2137 | 2190 | ||
2138 | xen_map_identity_early(level2_kernel_pgt, max_pfn); | 2191 | xen_map_identity_early(initial_kernel_pmd, max_pfn); |
2139 | 2192 | ||
2140 | memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD); | 2193 | memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD); |
2141 | set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY], | 2194 | initial_page_table[KERNEL_PGD_BOUNDARY] = |
2142 | __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT)); | 2195 | __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT); |
2143 | 2196 | ||
2144 | set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); | 2197 | set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO); |
2145 | set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); | 2198 | set_page_prot(initial_page_table, PAGE_KERNEL_RO); |
2146 | set_page_prot(empty_zero_page, PAGE_KERNEL_RO); | 2199 | set_page_prot(empty_zero_page, PAGE_KERNEL_RO); |
2147 | 2200 | ||
2148 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); | 2201 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); |
2149 | 2202 | ||
2150 | xen_write_cr3(__pa(swapper_pg_dir)); | 2203 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, |
2151 | 2204 | PFN_DOWN(__pa(initial_page_table))); | |
2152 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); | 2205 | xen_write_cr3(__pa(initial_page_table)); |
2153 | 2206 | ||
2154 | memblock_x86_reserve_range(__pa(xen_start_info->pt_base), | 2207 | memblock_x86_reserve_range(__pa(xen_start_info->pt_base), |
2155 | __pa(xen_start_info->pt_base + | 2208 | __pa(xen_start_info->pt_base + |
2156 | xen_start_info->nr_pt_frames * PAGE_SIZE), | 2209 | xen_start_info->nr_pt_frames * PAGE_SIZE), |
2157 | "XEN PAGETABLES"); | 2210 | "XEN PAGETABLES"); |
2158 | 2211 | ||
2159 | return swapper_pg_dir; | 2212 | return initial_page_table; |
2160 | } | 2213 | } |
2161 | #endif /* CONFIG_X86_64 */ | 2214 | #endif /* CONFIG_X86_64 */ |
2162 | 2215 | ||
@@ -2290,7 +2343,11 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
2290 | .write_cr2 = xen_write_cr2, | 2343 | .write_cr2 = xen_write_cr2, |
2291 | 2344 | ||
2292 | .read_cr3 = xen_read_cr3, | 2345 | .read_cr3 = xen_read_cr3, |
2346 | #ifdef CONFIG_X86_32 | ||
2347 | .write_cr3 = xen_write_cr3_init, | ||
2348 | #else | ||
2293 | .write_cr3 = xen_write_cr3, | 2349 | .write_cr3 = xen_write_cr3, |
2350 | #endif | ||
2294 | 2351 | ||
2295 | .flush_tlb_user = xen_flush_tlb, | 2352 | .flush_tlb_user = xen_flush_tlb, |
2296 | .flush_tlb_kernel = xen_flush_tlb, | 2353 | .flush_tlb_kernel = xen_flush_tlb, |
@@ -2627,7 +2684,8 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma, | |||
2627 | 2684 | ||
2628 | prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); | 2685 | prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); |
2629 | 2686 | ||
2630 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; | 2687 | BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) == |
2688 | (VM_PFNMAP | VM_RESERVED | VM_IO))); | ||
2631 | 2689 | ||
2632 | rmd.mfn = mfn; | 2690 | rmd.mfn = mfn; |
2633 | rmd.prot = prot; | 2691 | rmd.prot = prot; |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index b1dbdaa23ecc..01afd8a94607 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <xen/interface/callback.h> | 23 | #include <xen/interface/callback.h> |
24 | #include <xen/interface/memory.h> | 24 | #include <xen/interface/memory.h> |
25 | #include <xen/interface/physdev.h> | 25 | #include <xen/interface/physdev.h> |
26 | #include <xen/interface/memory.h> | ||
27 | #include <xen/features.h> | 26 | #include <xen/features.h> |
28 | 27 | ||
29 | #include "xen-ops.h" | 28 | #include "xen-ops.h" |
@@ -118,16 +117,18 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn, | |||
118 | const struct e820map *e820) | 117 | const struct e820map *e820) |
119 | { | 118 | { |
120 | phys_addr_t max_addr = PFN_PHYS(max_pfn); | 119 | phys_addr_t max_addr = PFN_PHYS(max_pfn); |
121 | phys_addr_t last_end = 0; | 120 | phys_addr_t last_end = ISA_END_ADDRESS; |
122 | unsigned long released = 0; | 121 | unsigned long released = 0; |
123 | int i; | 122 | int i; |
124 | 123 | ||
124 | /* Free any unused memory above the low 1Mbyte. */ | ||
125 | for (i = 0; i < e820->nr_map && last_end < max_addr; i++) { | 125 | for (i = 0; i < e820->nr_map && last_end < max_addr; i++) { |
126 | phys_addr_t end = e820->map[i].addr; | 126 | phys_addr_t end = e820->map[i].addr; |
127 | end = min(max_addr, end); | 127 | end = min(max_addr, end); |
128 | 128 | ||
129 | released += xen_release_chunk(last_end, end); | 129 | if (last_end < end) |
130 | last_end = e820->map[i].addr + e820->map[i].size; | 130 | released += xen_release_chunk(last_end, end); |
131 | last_end = max(last_end, e820->map[i].addr + e820->map[i].size); | ||
131 | } | 132 | } |
132 | 133 | ||
133 | if (last_end < max_addr) | 134 | if (last_end < max_addr) |
@@ -164,6 +165,7 @@ char * __init xen_memory_setup(void) | |||
164 | XENMEM_memory_map; | 165 | XENMEM_memory_map; |
165 | rc = HYPERVISOR_memory_op(op, &memmap); | 166 | rc = HYPERVISOR_memory_op(op, &memmap); |
166 | if (rc == -ENOSYS) { | 167 | if (rc == -ENOSYS) { |
168 | BUG_ON(xen_initial_domain()); | ||
167 | memmap.nr_entries = 1; | 169 | memmap.nr_entries = 1; |
168 | map[0].addr = 0ULL; | 170 | map[0].addr = 0ULL; |
169 | map[0].size = mem_end; | 171 | map[0].size = mem_end; |
@@ -201,12 +203,13 @@ char * __init xen_memory_setup(void) | |||
201 | } | 203 | } |
202 | 204 | ||
203 | /* | 205 | /* |
204 | * Even though this is normal, usable memory under Xen, reserve | 206 | * In domU, the ISA region is normal, usable memory, but we |
205 | * ISA memory anyway because too many things think they can poke | 207 | * reserve ISA memory anyway because too many things poke |
206 | * about in there. | 208 | * about in there. |
207 | * | 209 | * |
208 | * In a dom0 kernel, this region is identity mapped with the | 210 | * In Dom0, the host E820 information can leave gaps in the |
209 | * hardware ISA area, so it really is out of bounds. | 211 | * ISA range, which would cause us to release those pages. To |
212 | * avoid this, we unconditionally reserve them here. | ||
210 | */ | 213 | */ |
211 | e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, | 214 | e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, |
212 | E820_RESERVED); | 215 | E820_RESERVED); |
@@ -244,8 +247,7 @@ char * __init xen_memory_setup(void) | |||
244 | else | 247 | else |
245 | extra_pages = 0; | 248 | extra_pages = 0; |
246 | 249 | ||
247 | if (!xen_initial_domain()) | 250 | xen_add_extra_mem(extra_pages); |
248 | xen_add_extra_mem(extra_pages); | ||
249 | 251 | ||
250 | return "Xen"; | 252 | return "Xen"; |
251 | } | 253 | } |
@@ -333,9 +335,6 @@ void __cpuinit xen_enable_syscall(void) | |||
333 | 335 | ||
334 | void __init xen_arch_setup(void) | 336 | void __init xen_arch_setup(void) |
335 | { | 337 | { |
336 | struct physdev_set_iopl set_iopl; | ||
337 | int rc; | ||
338 | |||
339 | xen_panic_handler_init(); | 338 | xen_panic_handler_init(); |
340 | 339 | ||
341 | HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); | 340 | HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); |
@@ -352,11 +351,6 @@ void __init xen_arch_setup(void) | |||
352 | xen_enable_sysenter(); | 351 | xen_enable_sysenter(); |
353 | xen_enable_syscall(); | 352 | xen_enable_syscall(); |
354 | 353 | ||
355 | set_iopl.iopl = 1; | ||
356 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); | ||
357 | if (rc != 0) | ||
358 | printk(KERN_INFO "physdev_op failed %d\n", rc); | ||
359 | |||
360 | #ifdef CONFIG_ACPI | 354 | #ifdef CONFIG_ACPI |
361 | if (!(xen_start_info->flags & SIF_INITDOMAIN)) { | 355 | if (!(xen_start_info->flags & SIF_INITDOMAIN)) { |
362 | printk(KERN_INFO "ACPI in unprivileged domain disabled\n"); | 356 | printk(KERN_INFO "ACPI in unprivileged domain disabled\n"); |