aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-19 05:27:32 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-19 05:28:41 -0400
commit929bf0d0156562ce631728b6fa53d68004d456d2 (patch)
tree739063990a8077b29ef97e69d73bce94573daae4 /arch/x86
parentdef0a9b2573e00ab0b486cb5382625203ab4c4a6 (diff)
parent202c4675c55ddf6b443c7e057d2dff6b42ef71aa (diff)
Merge branch 'linus' into perfcounters/core
Merge reason: Bring in tracing changes we depend on. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig75
-rw-r--r--arch/x86/Kconfig.cpu19
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/Makefile_32.cpu2
-rw-r--r--arch/x86/boot/video-vesa.c7
-rw-r--r--arch/x86/boot/video-vga.c10
-rw-r--r--arch/x86/boot/video.c5
-rw-r--r--arch/x86/boot/video.h20
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c17
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/ia32/sys_ia32.c14
-rw-r--r--arch/x86/include/asm/agp.h4
-rw-r--r--arch/x86/include/asm/alternative.h7
-rw-r--r--arch/x86/include/asm/apic.h7
-rw-r--r--arch/x86/include/asm/apicdef.h5
-rw-r--r--arch/x86/include/asm/asm.h10
-rw-r--r--arch/x86/include/asm/bootparam.h3
-rw-r--r--arch/x86/include/asm/cacheflush.h54
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/current.h2
-rw-r--r--arch/x86/include/asm/desc.h13
-rw-r--r--arch/x86/include/asm/desc_defs.h6
-rw-r--r--arch/x86/include/asm/device.h3
-rw-r--r--arch/x86/include/asm/dwarf2.h18
-rw-r--r--arch/x86/include/asm/elf.h2
-rw-r--r--arch/x86/include/asm/entry_arch.h2
-rw-r--r--arch/x86/include/asm/fixmap.h3
-rw-r--r--arch/x86/include/asm/i387.h9
-rw-r--r--arch/x86/include/asm/io_apic.h13
-rw-r--r--arch/x86/include/asm/ioctls.h95
-rw-r--r--arch/x86/include/asm/iomap.h9
-rw-r--r--arch/x86/include/asm/ipcbuf.h29
-rw-r--r--arch/x86/include/asm/irqflags.h9
-rw-r--r--arch/x86/include/asm/kvm.h10
-rw-r--r--arch/x86/include/asm/kvm_emulate.h (renamed from arch/x86/include/asm/kvm_x86_emulate.h)0
-rw-r--r--arch/x86/include/asm/kvm_host.h60
-rw-r--r--arch/x86/include/asm/kvm_para.h2
-rw-r--r--arch/x86/include/asm/lguest.h5
-rw-r--r--arch/x86/include/asm/mce.h32
-rw-r--r--arch/x86/include/asm/mman.h14
-rw-r--r--arch/x86/include/asm/module.h15
-rw-r--r--arch/x86/include/asm/msgbuf.h40
-rw-r--r--arch/x86/include/asm/msr-index.h12
-rw-r--r--arch/x86/include/asm/msr.h75
-rw-r--r--arch/x86/include/asm/mtrr.h6
-rw-r--r--arch/x86/include/asm/nops.h2
-rw-r--r--arch/x86/include/asm/param.h23
-rw-r--r--arch/x86/include/asm/paravirt.h746
-rw-r--r--arch/x86/include/asm/paravirt_types.h721
-rw-r--r--arch/x86/include/asm/pat.h5
-rw-r--r--arch/x86/include/asm/pci.h1
-rw-r--r--arch/x86/include/asm/percpu.h35
-rw-r--r--arch/x86/include/asm/pgtable.h16
-rw-r--r--arch/x86/include/asm/processor.h60
-rw-r--r--arch/x86/include/asm/scatterlist.h27
-rw-r--r--arch/x86/include/asm/shmbuf.h52
-rw-r--r--arch/x86/include/asm/socket.h61
-rw-r--r--arch/x86/include/asm/sockios.h14
-rw-r--r--arch/x86/include/asm/stackprotector.h10
-rw-r--r--arch/x86/include/asm/system.h29
-rw-r--r--arch/x86/include/asm/termbits.h199
-rw-r--r--arch/x86/include/asm/termios.h115
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/asm/topology.h14
-rw-r--r--arch/x86/include/asm/traps.h4
-rw-r--r--arch/x86/include/asm/types.h12
-rw-r--r--arch/x86/include/asm/ucontext.h8
-rw-r--r--arch/x86/include/asm/vmx.h8
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/acpi/boot.c105
-rw-r--r--arch/x86/kernel/alternative.c58
-rw-r--r--arch/x86/kernel/apic/apic.c110
-rw-r--r--arch/x86/kernel/apic/es7000_32.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c324
-rw-r--r--arch/x86/kernel/apic/ipi.c2
-rw-r--r--arch/x86/kernel/apic/nmi.c2
-rw-r--r--arch/x86/kernel/apic/probe_64.c6
-rw-r--r--arch/x86/kernel/apm_32.c31
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/amd.c127
-rw-r--r--arch/x86/kernel/cpu/bugs.c10
-rw-r--r--arch/x86/kernel/cpu/bugs_64.c2
-rw-r--r--arch/x86/kernel/cpu/common.c70
-rw-r--r--arch/x86/kernel/cpu/cpu_debug.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c88
-rw-r--r--arch/x86/kernel/cpu/cyrix.c19
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c5
-rw-r--r--arch/x86/kernel/cpu/intel.c17
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c148
-rw-r--r--arch/x86/kernel/cpu/mcheck/Makefile5
-rw-r--r--arch/x86/kernel/cpu/mcheck/k7.c116
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c158
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-internal.h15
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c8
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c324
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c10
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c10
-rw-r--r--arch/x86/kernel/cpu/mcheck/non-fatal.c94
-rw-r--r--arch/x86/kernel/cpu/mcheck/p4.c163
-rw-r--r--arch/x86/kernel/cpu/mcheck/p6.c127
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c13
-rw-r--r--arch/x86/kernel/cpu/mtrr/amd.c97
-rw-r--r--arch/x86/kernel/cpu/mtrr/centaur.c168
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c390
-rw-r--r--arch/x86/kernel/cpu/mtrr/cyrix.c94
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c304
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c135
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c499
-rw-r--r--arch/x86/kernel/cpu/mtrr/mtrr.h19
-rw-r--r--arch/x86/kernel/cpu/mtrr/state.c68
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c14
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c45
-rw-r--r--arch/x86/kernel/cpu/proc.c4
-rw-r--r--arch/x86/kernel/cpu/sched.c55
-rw-r--r--arch/x86/kernel/cpu/vmware.c18
-rw-r--r--arch/x86/kernel/doublefault_32.c4
-rw-r--r--arch/x86/kernel/ds.c6
-rw-r--r--arch/x86/kernel/dumpstack.c1
-rw-r--r--arch/x86/kernel/e820.c2
-rw-r--r--arch/x86/kernel/entry_64.S6
-rw-r--r--arch/x86/kernel/head_32.S1
-rw-r--r--arch/x86/kernel/irq.c4
-rw-r--r--arch/x86/kernel/irq_32.c5
-rw-r--r--arch/x86/kernel/irqinit.c2
-rw-r--r--arch/x86/kernel/kvm.c7
-rw-r--r--arch/x86/kernel/kvmclock.c4
-rw-r--r--arch/x86/kernel/mpparse.c10
-rw-r--r--arch/x86/kernel/msr.c61
-rw-r--r--arch/x86/kernel/paravirt.c3
-rw-r--r--arch/x86/kernel/pci-dma.c4
-rw-r--r--arch/x86/kernel/process_32.c30
-rw-r--r--arch/x86/kernel/process_64.c36
-rw-r--r--arch/x86/kernel/quirks.c2
-rw-r--r--arch/x86/kernel/reboot.c7
-rw-r--r--arch/x86/kernel/setup.c23
-rw-r--r--arch/x86/kernel/setup_percpu.c364
-rw-r--r--arch/x86/kernel/signal.c2
-rw-r--r--arch/x86/kernel/smpboot.c19
-rw-r--r--arch/x86/kernel/step.c9
-rw-r--r--arch/x86/kernel/tboot.c447
-rw-r--r--arch/x86/kernel/tlb_uv.c4
-rw-r--r--arch/x86/kernel/traps.c54
-rw-r--r--arch/x86/kernel/vmlinux.lds.S11
-rw-r--r--arch/x86/kvm/Kconfig21
-rw-r--r--arch/x86/kvm/Makefile35
-rw-r--r--arch/x86/kvm/emulate.c (renamed from arch/x86/kvm/x86_emulate.c)265
-rw-r--r--arch/x86/kvm/i8254.c160
-rw-r--r--arch/x86/kvm/i8254.h5
-rw-r--r--arch/x86/kvm/i8259.c116
-rw-r--r--arch/x86/kvm/irq.h1
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h9
-rw-r--r--arch/x86/kvm/kvm_svm.h51
-rw-r--r--arch/x86/kvm/kvm_timer.h2
-rw-r--r--arch/x86/kvm/lapic.c334
-rw-r--r--arch/x86/kvm/lapic.h4
-rw-r--r--arch/x86/kvm/mmu.c587
-rw-r--r--arch/x86/kvm/mmu.h4
-rw-r--r--arch/x86/kvm/mmutrace.h220
-rw-r--r--arch/x86/kvm/paging_tmpl.h141
-rw-r--r--arch/x86/kvm/svm.c889
-rw-r--r--arch/x86/kvm/timer.c16
-rw-r--r--arch/x86/kvm/trace.h355
-rw-r--r--arch/x86/kvm/vmx.c497
-rw-r--r--arch/x86/kvm/x86.c822
-rw-r--r--arch/x86/kvm/x86.h4
-rw-r--r--arch/x86/lib/Makefile2
-rw-r--r--arch/x86/lib/msr-reg-export.c5
-rw-r--r--arch/x86/lib/msr-reg.S102
-rw-r--r--arch/x86/lib/msr.c49
-rw-r--r--arch/x86/mm/Makefile6
-rw-r--r--arch/x86/mm/fault.c51
-rw-r--r--arch/x86/mm/highmem_32.c3
-rw-r--r--arch/x86/mm/iomap_32.c27
-rw-r--r--arch/x86/mm/ioremap.c90
-rw-r--r--arch/x86/mm/mmap.c17
-rw-r--r--arch/x86/mm/pageattr.c29
-rw-r--r--arch/x86/mm/pat.c355
-rw-r--r--arch/x86/mm/physaddr.c70
-rw-r--r--arch/x86/mm/physaddr.h10
-rw-r--r--arch/x86/mm/srat_32.c4
-rw-r--r--arch/x86/pci/amd_bus.c64
-rw-r--r--arch/x86/pci/common.c69
-rw-r--r--arch/x86/power/cpu.c2
-rw-r--r--arch/x86/xen/Makefile2
-rw-r--r--arch/x86/xen/enlighten.c133
-rw-r--r--arch/x86/xen/smp.c1
-rw-r--r--arch/x86/xen/spinlock.c28
187 files changed, 7813 insertions, 5977 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index fc20fdc0f7f2..e5deee2dfcfe 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -150,7 +150,10 @@ config ARCH_HAS_CACHE_LINE_SIZE
150config HAVE_SETUP_PER_CPU_AREA 150config HAVE_SETUP_PER_CPU_AREA
151 def_bool y 151 def_bool y
152 152
153config HAVE_DYNAMIC_PER_CPU_AREA 153config NEED_PER_CPU_EMBED_FIRST_CHUNK
154 def_bool y
155
156config NEED_PER_CPU_PAGE_FIRST_CHUNK
154 def_bool y 157 def_bool y
155 158
156config HAVE_CPUMASK_OF_CPU_MAP 159config HAVE_CPUMASK_OF_CPU_MAP
@@ -179,6 +182,10 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING
179config ARCH_SUPPORTS_DEBUG_PAGEALLOC 182config ARCH_SUPPORTS_DEBUG_PAGEALLOC
180 def_bool y 183 def_bool y
181 184
185config HAVE_INTEL_TXT
186 def_bool y
187 depends on EXPERIMENTAL && DMAR && ACPI
188
182# Use the generic interrupt handling code in kernel/irq/: 189# Use the generic interrupt handling code in kernel/irq/:
183config GENERIC_HARDIRQS 190config GENERIC_HARDIRQS
184 bool 191 bool
@@ -776,41 +783,17 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
776 increased on these systems. 783 increased on these systems.
777 784
778config X86_MCE 785config X86_MCE
779 bool "Machine Check Exception" 786 bool "Machine Check / overheating reporting"
780 ---help--- 787 ---help---
781 Machine Check Exception support allows the processor to notify the 788 Machine Check support allows the processor to notify the
782 kernel if it detects a problem (e.g. overheating, component failure). 789 kernel if it detects a problem (e.g. overheating, data corruption).
783 The action the kernel takes depends on the severity of the problem, 790 The action the kernel takes depends on the severity of the problem,
784 ranging from a warning message on the console, to halting the machine. 791 ranging from warning messages to halting the machine.
785 Your processor must be a Pentium or newer to support this - check the
786 flags in /proc/cpuinfo for mce. Note that some older Pentium systems
787 have a design flaw which leads to false MCE events - hence MCE is
788 disabled on all P5 processors, unless explicitly enabled with "mce"
789 as a boot argument. Similarly, if MCE is built in and creates a
790 problem on some new non-standard machine, you can boot with "nomce"
791 to disable it. MCE support simply ignores non-MCE processors like
792 the 386 and 486, so nearly everyone can say Y here.
793
794config X86_OLD_MCE
795 depends on X86_32 && X86_MCE
796 bool "Use legacy machine check code (will go away)"
797 default n
798 select X86_ANCIENT_MCE
799 ---help---
800 Use the old i386 machine check code. This is merely intended for
801 testing in a transition period. Try this if you run into any machine
802 check related software problems, but report the problem to
803 linux-kernel. When in doubt say no.
804
805config X86_NEW_MCE
806 depends on X86_MCE
807 bool
808 default y if (!X86_OLD_MCE && X86_32) || X86_64
809 792
810config X86_MCE_INTEL 793config X86_MCE_INTEL
811 def_bool y 794 def_bool y
812 prompt "Intel MCE features" 795 prompt "Intel MCE features"
813 depends on X86_NEW_MCE && X86_LOCAL_APIC 796 depends on X86_MCE && X86_LOCAL_APIC
814 ---help--- 797 ---help---
815 Additional support for intel specific MCE features such as 798 Additional support for intel specific MCE features such as
816 the thermal monitor. 799 the thermal monitor.
@@ -818,14 +801,14 @@ config X86_MCE_INTEL
818config X86_MCE_AMD 801config X86_MCE_AMD
819 def_bool y 802 def_bool y
820 prompt "AMD MCE features" 803 prompt "AMD MCE features"
821 depends on X86_NEW_MCE && X86_LOCAL_APIC 804 depends on X86_MCE && X86_LOCAL_APIC
822 ---help--- 805 ---help---
823 Additional support for AMD specific MCE features such as 806 Additional support for AMD specific MCE features such as
824 the DRAM Error Threshold. 807 the DRAM Error Threshold.
825 808
826config X86_ANCIENT_MCE 809config X86_ANCIENT_MCE
827 def_bool n 810 def_bool n
828 depends on X86_32 811 depends on X86_32 && X86_MCE
829 prompt "Support for old Pentium 5 / WinChip machine checks" 812 prompt "Support for old Pentium 5 / WinChip machine checks"
830 ---help--- 813 ---help---
831 Include support for machine check handling on old Pentium 5 or WinChip 814 Include support for machine check handling on old Pentium 5 or WinChip
@@ -838,36 +821,16 @@ config X86_MCE_THRESHOLD
838 default y 821 default y
839 822
840config X86_MCE_INJECT 823config X86_MCE_INJECT
841 depends on X86_NEW_MCE 824 depends on X86_MCE
842 tristate "Machine check injector support" 825 tristate "Machine check injector support"
843 ---help--- 826 ---help---
844 Provide support for injecting machine checks for testing purposes. 827 Provide support for injecting machine checks for testing purposes.
845 If you don't know what a machine check is and you don't do kernel 828 If you don't know what a machine check is and you don't do kernel
846 QA it is safe to say n. 829 QA it is safe to say n.
847 830
848config X86_MCE_NONFATAL
849 tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4"
850 depends on X86_OLD_MCE
851 ---help---
852 Enabling this feature starts a timer that triggers every 5 seconds which
853 will look at the machine check registers to see if anything happened.
854 Non-fatal problems automatically get corrected (but still logged).
855 Disable this if you don't want to see these messages.
856 Seeing the messages this option prints out may be indicative of dying
857 or out-of-spec (ie, overclocked) hardware.
858 This option only does something on certain CPUs.
859 (AMD Athlon/Duron and Intel Pentium 4)
860
861config X86_MCE_P4THERMAL
862 bool "check for P4 thermal throttling interrupt."
863 depends on X86_OLD_MCE && X86_MCE && (X86_UP_APIC || SMP)
864 ---help---
865 Enabling this feature will cause a message to be printed when the P4
866 enters thermal throttling.
867
868config X86_THERMAL_VECTOR 831config X86_THERMAL_VECTOR
869 def_bool y 832 def_bool y
870 depends on X86_MCE_P4THERMAL || X86_MCE_INTEL 833 depends on X86_MCE_INTEL
871 834
872config VM86 835config VM86
873 bool "Enable VM86 support" if EMBEDDED 836 bool "Enable VM86 support" if EMBEDDED
@@ -1413,6 +1376,10 @@ config X86_PAT
1413 1376
1414 If unsure, say Y. 1377 If unsure, say Y.
1415 1378
1379config ARCH_USES_PG_UNCACHED
1380 def_bool y
1381 depends on X86_PAT
1382
1416config EFI 1383config EFI
1417 bool "EFI runtime service support" 1384 bool "EFI runtime service support"
1418 depends on ACPI 1385 depends on ACPI
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index 8130334329c0..527519b8a9f9 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -262,6 +262,15 @@ config MCORE2
262 family in /proc/cpuinfo. Newer ones have 6 and older ones 15 262 family in /proc/cpuinfo. Newer ones have 6 and older ones 15
263 (not a typo) 263 (not a typo)
264 264
265config MATOM
266 bool "Intel Atom"
267 ---help---
268
269 Select this for the Intel Atom platform. Intel Atom CPUs have an
270 in-order pipelining architecture and thus can benefit from
271 accordingly optimized code. Use a recent GCC with specific Atom
272 support in order to fully benefit from selecting this option.
273
265config GENERIC_CPU 274config GENERIC_CPU
266 bool "Generic-x86-64" 275 bool "Generic-x86-64"
267 depends on X86_64 276 depends on X86_64
@@ -295,7 +304,7 @@ config X86_CPU
295config X86_L1_CACHE_BYTES 304config X86_L1_CACHE_BYTES
296 int 305 int
297 default "128" if MPSC 306 default "128" if MPSC
298 default "64" if GENERIC_CPU || MK8 || MCORE2 || X86_32 307 default "64" if GENERIC_CPU || MK8 || MCORE2 || MATOM || X86_32
299 308
300config X86_INTERNODE_CACHE_BYTES 309config X86_INTERNODE_CACHE_BYTES
301 int 310 int
@@ -310,7 +319,7 @@ config X86_L1_CACHE_SHIFT
310 default "7" if MPENTIUM4 || MPSC 319 default "7" if MPENTIUM4 || MPSC
311 default "4" if X86_ELAN || M486 || M386 || MGEODEGX1 320 default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
312 default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX 321 default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
313 default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7 || X86_GENERIC || GENERIC_CPU 322 default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
314 323
315config X86_XADD 324config X86_XADD
316 def_bool y 325 def_bool y
@@ -359,7 +368,7 @@ config X86_INTEL_USERCOPY
359 368
360config X86_USE_PPRO_CHECKSUM 369config X86_USE_PPRO_CHECKSUM
361 def_bool y 370 def_bool y
362 depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX || MCORE2 371 depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
363 372
364config X86_USE_3DNOW 373config X86_USE_3DNOW
365 def_bool y 374 def_bool y
@@ -387,7 +396,7 @@ config X86_P6_NOP
387 396
388config X86_TSC 397config X86_TSC
389 def_bool y 398 def_bool y
390 depends on ((MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ) || X86_64 399 depends on ((MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) && !X86_NUMAQ) || X86_64
391 400
392config X86_CMPXCHG64 401config X86_CMPXCHG64
393 def_bool y 402 def_bool y
@@ -397,7 +406,7 @@ config X86_CMPXCHG64
397# generates cmov. 406# generates cmov.
398config X86_CMOV 407config X86_CMOV
399 def_bool y 408 def_bool y
400 depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64) 409 depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
401 410
402config X86_MINIMUM_CPU_FAMILY 411config X86_MINIMUM_CPU_FAMILY
403 int 412 int
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 1b68659c41b4..7983c420eaf2 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -32,8 +32,8 @@ ifeq ($(CONFIG_X86_32),y)
32 32
33 # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use 33 # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
34 # a lot more stack due to the lack of sharing of stacklots: 34 # a lot more stack due to the lack of sharing of stacklots:
35 KBUILD_CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then \ 35 KBUILD_CFLAGS += $(call cc-ifversion, -lt, 0400, \
36 echo $(call cc-option,-fno-unit-at-a-time); fi ;) 36 $(call cc-option,-fno-unit-at-a-time))
37 37
38 # CPU-specific tuning. Anything which can be shared with UML should go here. 38 # CPU-specific tuning. Anything which can be shared with UML should go here.
39 include $(srctree)/arch/x86/Makefile_32.cpu 39 include $(srctree)/arch/x86/Makefile_32.cpu
@@ -55,6 +55,8 @@ else
55 55
56 cflags-$(CONFIG_MCORE2) += \ 56 cflags-$(CONFIG_MCORE2) += \
57 $(call cc-option,-march=core2,$(call cc-option,-mtune=generic)) 57 $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
58 cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
59 $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
58 cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic) 60 cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
59 KBUILD_CFLAGS += $(cflags-y) 61 KBUILD_CFLAGS += $(cflags-y)
60 62
@@ -72,7 +74,7 @@ endif
72 74
73ifdef CONFIG_CC_STACKPROTECTOR 75ifdef CONFIG_CC_STACKPROTECTOR
74 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh 76 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
75 ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC)),y) 77 ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(biarch)),y)
76 stackp-y := -fstack-protector 78 stackp-y := -fstack-protector
77 stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += -fstack-protector-all 79 stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += -fstack-protector-all
78 KBUILD_CFLAGS += $(stackp-y) 80 KBUILD_CFLAGS += $(stackp-y)
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index 80177ec052f0..30e9a264f69d 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -33,6 +33,8 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc-option,-march=c3,-march=i486) $(align)-f
33cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686) 33cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
34cflags-$(CONFIG_MVIAC7) += -march=i686 34cflags-$(CONFIG_MVIAC7) += -march=i686
35cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2) 35cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
36cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
37 $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
36 38
37# AMD Elan support 39# AMD Elan support
38cflags-$(CONFIG_X86_ELAN) += -march=i486 40cflags-$(CONFIG_X86_ELAN) += -march=i486
diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
index 275dd177f198..11e8c6eb80a1 100644
--- a/arch/x86/boot/video-vesa.c
+++ b/arch/x86/boot/video-vesa.c
@@ -31,7 +31,6 @@ static inline void vesa_store_mode_params_graphics(void) {}
31 31
32static int vesa_probe(void) 32static int vesa_probe(void)
33{ 33{
34#if defined(CONFIG_VIDEO_VESA) || defined(CONFIG_FIRMWARE_EDID)
35 struct biosregs ireg, oreg; 34 struct biosregs ireg, oreg;
36 u16 mode; 35 u16 mode;
37 addr_t mode_ptr; 36 addr_t mode_ptr;
@@ -49,8 +48,7 @@ static int vesa_probe(void)
49 vginfo.signature != VESA_MAGIC || 48 vginfo.signature != VESA_MAGIC ||
50 vginfo.version < 0x0102) 49 vginfo.version < 0x0102)
51 return 0; /* Not present */ 50 return 0; /* Not present */
52#endif /* CONFIG_VIDEO_VESA || CONFIG_FIRMWARE_EDID */ 51
53#ifdef CONFIG_VIDEO_VESA
54 set_fs(vginfo.video_mode_ptr.seg); 52 set_fs(vginfo.video_mode_ptr.seg);
55 mode_ptr = vginfo.video_mode_ptr.off; 53 mode_ptr = vginfo.video_mode_ptr.off;
56 54
@@ -102,9 +100,6 @@ static int vesa_probe(void)
102 } 100 }
103 101
104 return nmodes; 102 return nmodes;
105#else
106 return 0;
107#endif /* CONFIG_VIDEO_VESA */
108} 103}
109 104
110static int vesa_set_mode(struct mode_info *mode) 105static int vesa_set_mode(struct mode_info *mode)
diff --git a/arch/x86/boot/video-vga.c b/arch/x86/boot/video-vga.c
index 8f8d827e254d..819caa1f2008 100644
--- a/arch/x86/boot/video-vga.c
+++ b/arch/x86/boot/video-vga.c
@@ -47,14 +47,6 @@ static u8 vga_set_basic_mode(void)
47 47
48 initregs(&ireg); 48 initregs(&ireg);
49 49
50#ifdef CONFIG_VIDEO_400_HACK
51 if (adapter >= ADAPTER_VGA) {
52 ireg.ax = 0x1202;
53 ireg.bx = 0x0030;
54 intcall(0x10, &ireg, NULL);
55 }
56#endif
57
58 ax = 0x0f00; 50 ax = 0x0f00;
59 intcall(0x10, &ireg, &oreg); 51 intcall(0x10, &ireg, &oreg);
60 mode = oreg.al; 52 mode = oreg.al;
@@ -62,11 +54,9 @@ static u8 vga_set_basic_mode(void)
62 set_fs(0); 54 set_fs(0);
63 rows = rdfs8(0x484); /* rows minus one */ 55 rows = rdfs8(0x484); /* rows minus one */
64 56
65#ifndef CONFIG_VIDEO_400_HACK
66 if ((oreg.ax == 0x5003 || oreg.ax == 0x5007) && 57 if ((oreg.ax == 0x5003 || oreg.ax == 0x5007) &&
67 (rows == 0 || rows == 24)) 58 (rows == 0 || rows == 24))
68 return mode; 59 return mode;
69#endif
70 60
71 if (mode != 3 && mode != 7) 61 if (mode != 3 && mode != 7)
72 mode = 3; 62 mode = 3;
diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
index bad728b76fc2..d42da3802499 100644
--- a/arch/x86/boot/video.c
+++ b/arch/x86/boot/video.c
@@ -221,7 +221,6 @@ static unsigned int mode_menu(void)
221 } 221 }
222} 222}
223 223
224#ifdef CONFIG_VIDEO_RETAIN
225/* Save screen content to the heap */ 224/* Save screen content to the heap */
226static struct saved_screen { 225static struct saved_screen {
227 int x, y; 226 int x, y;
@@ -299,10 +298,6 @@ static void restore_screen(void)
299 ireg.dl = saved.curx; 298 ireg.dl = saved.curx;
300 intcall(0x10, &ireg, NULL); 299 intcall(0x10, &ireg, NULL);
301} 300}
302#else
303#define save_screen() ((void)0)
304#define restore_screen() ((void)0)
305#endif
306 301
307void set_video(void) 302void set_video(void)
308{ 303{
diff --git a/arch/x86/boot/video.h b/arch/x86/boot/video.h
index 5bb174a997fc..ff339c5db311 100644
--- a/arch/x86/boot/video.h
+++ b/arch/x86/boot/video.h
@@ -17,19 +17,8 @@
17 17
18#include <linux/types.h> 18#include <linux/types.h>
19 19
20/* Enable autodetection of SVGA adapters and modes. */ 20/*
21#undef CONFIG_VIDEO_SVGA 21 * This code uses an extended set of video mode numbers. These include:
22
23/* Enable autodetection of VESA modes */
24#define CONFIG_VIDEO_VESA
25
26/* Retain screen contents when switching modes */
27#define CONFIG_VIDEO_RETAIN
28
29/* Force 400 scan lines for standard modes (hack to fix bad BIOS behaviour */
30#undef CONFIG_VIDEO_400_HACK
31
32/* This code uses an extended set of video mode numbers. These include:
33 * Aliases for standard modes 22 * Aliases for standard modes
34 * NORMAL_VGA (-1) 23 * NORMAL_VGA (-1)
35 * EXTENDED_VGA (-2) 24 * EXTENDED_VGA (-2)
@@ -67,13 +56,8 @@
67/* The "recalculate timings" flag */ 56/* The "recalculate timings" flag */
68#define VIDEO_RECALC 0x8000 57#define VIDEO_RECALC 0x8000
69 58
70/* Define DO_STORE according to CONFIG_VIDEO_RETAIN */
71#ifdef CONFIG_VIDEO_RETAIN
72void store_screen(void); 59void store_screen(void);
73#define DO_STORE() store_screen() 60#define DO_STORE() store_screen()
74#else
75#define DO_STORE() ((void)0)
76#endif /* CONFIG_VIDEO_RETAIN */
77 61
78/* 62/*
79 * Mode table structures 63 * Mode table structures
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index d3ec8d588d4b..585edebe12cf 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -59,13 +59,6 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
59asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, 59asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
60 const u8 *in, unsigned int len, u8 *iv); 60 const u8 *in, unsigned int len, u8 *iv);
61 61
62static inline int kernel_fpu_using(void)
63{
64 if (in_interrupt() && !(read_cr0() & X86_CR0_TS))
65 return 1;
66 return 0;
67}
68
69static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) 62static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
70{ 63{
71 unsigned long addr = (unsigned long)raw_ctx; 64 unsigned long addr = (unsigned long)raw_ctx;
@@ -89,7 +82,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
89 return -EINVAL; 82 return -EINVAL;
90 } 83 }
91 84
92 if (kernel_fpu_using()) 85 if (irq_fpu_usable())
93 err = crypto_aes_expand_key(ctx, in_key, key_len); 86 err = crypto_aes_expand_key(ctx, in_key, key_len);
94 else { 87 else {
95 kernel_fpu_begin(); 88 kernel_fpu_begin();
@@ -110,7 +103,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
110{ 103{
111 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 104 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
112 105
113 if (kernel_fpu_using()) 106 if (irq_fpu_usable())
114 crypto_aes_encrypt_x86(ctx, dst, src); 107 crypto_aes_encrypt_x86(ctx, dst, src);
115 else { 108 else {
116 kernel_fpu_begin(); 109 kernel_fpu_begin();
@@ -123,7 +116,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
123{ 116{
124 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); 117 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
125 118
126 if (kernel_fpu_using()) 119 if (irq_fpu_usable())
127 crypto_aes_decrypt_x86(ctx, dst, src); 120 crypto_aes_decrypt_x86(ctx, dst, src);
128 else { 121 else {
129 kernel_fpu_begin(); 122 kernel_fpu_begin();
@@ -349,7 +342,7 @@ static int ablk_encrypt(struct ablkcipher_request *req)
349 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 342 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
350 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 343 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
351 344
352 if (kernel_fpu_using()) { 345 if (irq_fpu_usable()) {
353 struct ablkcipher_request *cryptd_req = 346 struct ablkcipher_request *cryptd_req =
354 ablkcipher_request_ctx(req); 347 ablkcipher_request_ctx(req);
355 memcpy(cryptd_req, req, sizeof(*req)); 348 memcpy(cryptd_req, req, sizeof(*req));
@@ -370,7 +363,7 @@ static int ablk_decrypt(struct ablkcipher_request *req)
370 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); 363 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
371 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); 364 struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
372 365
373 if (kernel_fpu_using()) { 366 if (irq_fpu_usable()) {
374 struct ablkcipher_request *cryptd_req = 367 struct ablkcipher_request *cryptd_req =
375 ablkcipher_request_ctx(req); 368 ablkcipher_request_ctx(req);
376 memcpy(cryptd_req, req, sizeof(*req)); 369 memcpy(cryptd_req, req, sizeof(*req));
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index e590261ba059..ba331bfd1112 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -537,7 +537,7 @@ ia32_sys_call_table:
537 .quad sys_mkdir 537 .quad sys_mkdir
538 .quad sys_rmdir /* 40 */ 538 .quad sys_rmdir /* 40 */
539 .quad sys_dup 539 .quad sys_dup
540 .quad sys32_pipe 540 .quad sys_pipe
541 .quad compat_sys_times 541 .quad compat_sys_times
542 .quad quiet_ni_syscall /* old prof syscall holder */ 542 .quad quiet_ni_syscall /* old prof syscall holder */
543 .quad sys_brk /* 45 */ 543 .quad sys_brk /* 45 */
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 085a8c35f149..9f5527198825 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -189,20 +189,6 @@ asmlinkage long sys32_mprotect(unsigned long start, size_t len,
189 return sys_mprotect(start, len, prot); 189 return sys_mprotect(start, len, prot);
190} 190}
191 191
192asmlinkage long sys32_pipe(int __user *fd)
193{
194 int retval;
195 int fds[2];
196
197 retval = do_pipe_flags(fds, 0);
198 if (retval)
199 goto out;
200 if (copy_to_user(fd, fds, sizeof(fds)))
201 retval = -EFAULT;
202out:
203 return retval;
204}
205
206asmlinkage long sys32_rt_sigaction(int sig, struct sigaction32 __user *act, 192asmlinkage long sys32_rt_sigaction(int sig, struct sigaction32 __user *act,
207 struct sigaction32 __user *oact, 193 struct sigaction32 __user *oact,
208 unsigned int sigsetsize) 194 unsigned int sigsetsize)
diff --git a/arch/x86/include/asm/agp.h b/arch/x86/include/asm/agp.h
index 9825cd64c9b6..eec2a70d4376 100644
--- a/arch/x86/include/asm/agp.h
+++ b/arch/x86/include/asm/agp.h
@@ -22,10 +22,6 @@
22 */ 22 */
23#define flush_agp_cache() wbinvd() 23#define flush_agp_cache() wbinvd()
24 24
25/* Convert a physical address to an address suitable for the GART. */
26#define phys_to_gart(x) (x)
27#define gart_to_phys(x) (x)
28
29/* GATT allocation. Returns/accepts GATT kernel virtual address. */ 25/* GATT allocation. Returns/accepts GATT kernel virtual address. */
30#define alloc_gatt_pages(order) \ 26#define alloc_gatt_pages(order) \
31 ((char *)__get_free_pages(GFP_KERNEL, (order))) 27 ((char *)__get_free_pages(GFP_KERNEL, (order)))
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 1a37bcdc8606..c240efc74e00 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -73,8 +73,6 @@ static inline void alternatives_smp_module_del(struct module *mod) {}
73static inline void alternatives_smp_switch(int smp) {} 73static inline void alternatives_smp_switch(int smp) {}
74#endif /* CONFIG_SMP */ 74#endif /* CONFIG_SMP */
75 75
76const unsigned char *const *find_nop_table(void);
77
78/* alternative assembly primitive: */ 76/* alternative assembly primitive: */
79#define ALTERNATIVE(oldinstr, newinstr, feature) \ 77#define ALTERNATIVE(oldinstr, newinstr, feature) \
80 \ 78 \
@@ -144,8 +142,6 @@ static inline void apply_paravirt(struct paravirt_patch_site *start,
144#define __parainstructions_end NULL 142#define __parainstructions_end NULL
145#endif 143#endif
146 144
147extern void add_nops(void *insns, unsigned int len);
148
149/* 145/*
150 * Clear and restore the kernel write-protection flag on the local CPU. 146 * Clear and restore the kernel write-protection flag on the local CPU.
151 * Allows the kernel to edit read-only pages. 147 * Allows the kernel to edit read-only pages.
@@ -161,10 +157,7 @@ extern void add_nops(void *insns, unsigned int len);
161 * Intel's errata. 157 * Intel's errata.
162 * On the local CPU you need to be protected again NMI or MCE handlers seeing an 158 * On the local CPU you need to be protected again NMI or MCE handlers seeing an
163 * inconsistent instruction while you patch. 159 * inconsistent instruction while you patch.
164 * The _early version expects the memory to already be RW.
165 */ 160 */
166
167extern void *text_poke(void *addr, const void *opcode, size_t len); 161extern void *text_poke(void *addr, const void *opcode, size_t len);
168extern void *text_poke_early(void *addr, const void *opcode, size_t len);
169 162
170#endif /* _ASM_X86_ALTERNATIVE_H */ 163#endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index bb7d47925847..586b7adb8e53 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -183,6 +183,10 @@ static inline int x2apic_enabled(void)
183} 183}
184 184
185#define x2apic_supported() (cpu_has_x2apic) 185#define x2apic_supported() (cpu_has_x2apic)
186static inline void x2apic_force_phys(void)
187{
188 x2apic_phys = 1;
189}
186#else 190#else
187static inline void check_x2apic(void) 191static inline void check_x2apic(void)
188{ 192{
@@ -194,6 +198,9 @@ static inline int x2apic_enabled(void)
194{ 198{
195 return 0; 199 return 0;
196} 200}
201static inline void x2apic_force_phys(void)
202{
203}
197 204
198#define x2apic_preenabled 0 205#define x2apic_preenabled 0
199#define x2apic_supported() 0 206#define x2apic_supported() 0
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h
index 7ddb36ab933b..3b62da926de9 100644
--- a/arch/x86/include/asm/apicdef.h
+++ b/arch/x86/include/asm/apicdef.h
@@ -8,12 +8,14 @@
8 * Ingo Molnar <mingo@redhat.com>, 1999, 2000 8 * Ingo Molnar <mingo@redhat.com>, 1999, 2000
9 */ 9 */
10 10
11#define APIC_DEFAULT_PHYS_BASE 0xfee00000 11#define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000
12#define APIC_DEFAULT_PHYS_BASE 0xfee00000
12 13
13#define APIC_ID 0x20 14#define APIC_ID 0x20
14 15
15#define APIC_LVR 0x30 16#define APIC_LVR 0x30
16#define APIC_LVR_MASK 0xFF00FF 17#define APIC_LVR_MASK 0xFF00FF
18#define APIC_LVR_DIRECTED_EOI (1 << 24)
17#define GET_APIC_VERSION(x) ((x) & 0xFFu) 19#define GET_APIC_VERSION(x) ((x) & 0xFFu)
18#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu) 20#define GET_APIC_MAXLVT(x) (((x) >> 16) & 0xFFu)
19#ifdef CONFIG_X86_32 21#ifdef CONFIG_X86_32
@@ -40,6 +42,7 @@
40#define APIC_DFR_CLUSTER 0x0FFFFFFFul 42#define APIC_DFR_CLUSTER 0x0FFFFFFFul
41#define APIC_DFR_FLAT 0xFFFFFFFFul 43#define APIC_DFR_FLAT 0xFFFFFFFFul
42#define APIC_SPIV 0xF0 44#define APIC_SPIV 0xF0
45#define APIC_SPIV_DIRECTED_EOI (1 << 12)
43#define APIC_SPIV_FOCUS_DISABLED (1 << 9) 46#define APIC_SPIV_FOCUS_DISABLED (1 << 9)
44#define APIC_SPIV_APIC_ENABLED (1 << 8) 47#define APIC_SPIV_APIC_ENABLED (1 << 8)
45#define APIC_ISR 0x100 48#define APIC_ISR 0x100
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 56be78f582f0..b3ed1e1460ff 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -3,7 +3,7 @@
3 3
4#ifdef __ASSEMBLY__ 4#ifdef __ASSEMBLY__
5# define __ASM_FORM(x) x 5# define __ASM_FORM(x) x
6# define __ASM_EX_SEC .section __ex_table 6# define __ASM_EX_SEC .section __ex_table, "a"
7#else 7#else
8# define __ASM_FORM(x) " " #x " " 8# define __ASM_FORM(x) " " #x " "
9# define __ASM_EX_SEC " .section __ex_table,\"a\"\n" 9# define __ASM_EX_SEC " .section __ex_table,\"a\"\n"
@@ -38,10 +38,18 @@
38#define _ASM_DI __ASM_REG(di) 38#define _ASM_DI __ASM_REG(di)
39 39
40/* Exception table entry */ 40/* Exception table entry */
41#ifdef __ASSEMBLY__
42# define _ASM_EXTABLE(from,to) \
43 __ASM_EX_SEC ; \
44 _ASM_ALIGN ; \
45 _ASM_PTR from , to ; \
46 .previous
47#else
41# define _ASM_EXTABLE(from,to) \ 48# define _ASM_EXTABLE(from,to) \
42 __ASM_EX_SEC \ 49 __ASM_EX_SEC \
43 _ASM_ALIGN "\n" \ 50 _ASM_ALIGN "\n" \
44 _ASM_PTR #from "," #to "\n" \ 51 _ASM_PTR #from "," #to "\n" \
45 " .previous\n" 52 " .previous\n"
53#endif
46 54
47#endif /* _ASM_X86_ASM_H */ 55#endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/bootparam.h b/arch/x86/include/asm/bootparam.h
index 1724e8de317c..6ca20218dd72 100644
--- a/arch/x86/include/asm/bootparam.h
+++ b/arch/x86/include/asm/bootparam.h
@@ -85,7 +85,8 @@ struct efi_info {
85struct boot_params { 85struct boot_params {
86 struct screen_info screen_info; /* 0x000 */ 86 struct screen_info screen_info; /* 0x000 */
87 struct apm_bios_info apm_bios_info; /* 0x040 */ 87 struct apm_bios_info apm_bios_info; /* 0x040 */
88 __u8 _pad2[12]; /* 0x054 */ 88 __u8 _pad2[4]; /* 0x054 */
89 __u64 tboot_addr; /* 0x058 */
89 struct ist_info ist_info; /* 0x060 */ 90 struct ist_info ist_info; /* 0x060 */
90 __u8 _pad3[16]; /* 0x070 */ 91 __u8 _pad3[16]; /* 0x070 */
91 __u8 hd0_info[16]; /* obsolete! */ /* 0x080 */ 92 __u8 hd0_info[16]; /* obsolete! */ /* 0x080 */
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index e55dfc1ad453..b54f6afe7ec4 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -43,8 +43,58 @@ static inline void copy_from_user_page(struct vm_area_struct *vma,
43 memcpy(dst, src, len); 43 memcpy(dst, src, len);
44} 44}
45 45
46#define PG_non_WB PG_arch_1 46#define PG_WC PG_arch_1
47PAGEFLAG(NonWB, non_WB) 47PAGEFLAG(WC, WC)
48
49#ifdef CONFIG_X86_PAT
50/*
51 * X86 PAT uses page flags WC and Uncached together to keep track of
52 * memory type of pages that have backing page struct. X86 PAT supports 3
53 * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and
54 * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
55 * been changed from its default (value of -1 used to denote this).
56 * Note we do not support _PAGE_CACHE_UC here.
57 *
58 * Caller must hold memtype_lock for atomicity.
59 */
60static inline unsigned long get_page_memtype(struct page *pg)
61{
62 if (!PageUncached(pg) && !PageWC(pg))
63 return -1;
64 else if (!PageUncached(pg) && PageWC(pg))
65 return _PAGE_CACHE_WC;
66 else if (PageUncached(pg) && !PageWC(pg))
67 return _PAGE_CACHE_UC_MINUS;
68 else
69 return _PAGE_CACHE_WB;
70}
71
72static inline void set_page_memtype(struct page *pg, unsigned long memtype)
73{
74 switch (memtype) {
75 case _PAGE_CACHE_WC:
76 ClearPageUncached(pg);
77 SetPageWC(pg);
78 break;
79 case _PAGE_CACHE_UC_MINUS:
80 SetPageUncached(pg);
81 ClearPageWC(pg);
82 break;
83 case _PAGE_CACHE_WB:
84 SetPageUncached(pg);
85 SetPageWC(pg);
86 break;
87 default:
88 case -1:
89 ClearPageUncached(pg);
90 ClearPageWC(pg);
91 break;
92 }
93}
94#else
95static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
96static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
97#endif
48 98
49/* 99/*
50 * The set_memory_* API can be used to change various attributes of a virtual 100 * The set_memory_* API can be used to change various attributes of a virtual
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 4a28d22d4793..9cfc88b97742 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -95,6 +95,8 @@
95#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ 95#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
96#define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */ 96#define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */
97#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */ 97#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */
98#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */
99#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */
98 100
99/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 101/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
100#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ 102#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h
index c68c361697e1..4d447b732d82 100644
--- a/arch/x86/include/asm/current.h
+++ b/arch/x86/include/asm/current.h
@@ -11,7 +11,7 @@ DECLARE_PER_CPU(struct task_struct *, current_task);
11 11
12static __always_inline struct task_struct *get_current(void) 12static __always_inline struct task_struct *get_current(void)
13{ 13{
14 return percpu_read(current_task); 14 return percpu_read_stable(current_task);
15} 15}
16 16
17#define current get_current() 17#define current get_current()
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index c993e9e0fed4..e8de2f6f5ca5 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -291,11 +291,24 @@ static inline unsigned long get_desc_base(const struct desc_struct *desc)
291 return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24); 291 return desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24);
292} 292}
293 293
294static inline void set_desc_base(struct desc_struct *desc, unsigned long base)
295{
296 desc->base0 = base & 0xffff;
297 desc->base1 = (base >> 16) & 0xff;
298 desc->base2 = (base >> 24) & 0xff;
299}
300
294static inline unsigned long get_desc_limit(const struct desc_struct *desc) 301static inline unsigned long get_desc_limit(const struct desc_struct *desc)
295{ 302{
296 return desc->limit0 | (desc->limit << 16); 303 return desc->limit0 | (desc->limit << 16);
297} 304}
298 305
306static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
307{
308 desc->limit0 = limit & 0xffff;
309 desc->limit = (limit >> 16) & 0xf;
310}
311
299static inline void _set_gate(int gate, unsigned type, void *addr, 312static inline void _set_gate(int gate, unsigned type, void *addr,
300 unsigned dpl, unsigned ist, unsigned seg) 313 unsigned dpl, unsigned ist, unsigned seg)
301{ 314{
diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
index a6adefa28b94..9d6684849fd9 100644
--- a/arch/x86/include/asm/desc_defs.h
+++ b/arch/x86/include/asm/desc_defs.h
@@ -34,6 +34,12 @@ struct desc_struct {
34 }; 34 };
35} __attribute__((packed)); 35} __attribute__((packed));
36 36
37#define GDT_ENTRY_INIT(flags, base, limit) { { { \
38 .a = ((limit) & 0xffff) | (((base) & 0xffff) << 16), \
39 .b = (((base) & 0xff0000) >> 16) | (((flags) & 0xf0ff) << 8) | \
40 ((limit) & 0xf0000) | ((base) & 0xff000000), \
41 } } }
42
37enum { 43enum {
38 GATE_INTERRUPT = 0xE, 44 GATE_INTERRUPT = 0xE,
39 GATE_TRAP = 0xF, 45 GATE_TRAP = 0xF,
diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
index 4994a20acbcb..cee34e9ca45b 100644
--- a/arch/x86/include/asm/device.h
+++ b/arch/x86/include/asm/device.h
@@ -13,4 +13,7 @@ struct dma_map_ops *dma_ops;
13#endif 13#endif
14}; 14};
15 15
16struct pdev_archdata {
17};
18
16#endif /* _ASM_X86_DEVICE_H */ 19#endif /* _ASM_X86_DEVICE_H */
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
index 3afc5e87cfdd..ae6253ab9029 100644
--- a/arch/x86/include/asm/dwarf2.h
+++ b/arch/x86/include/asm/dwarf2.h
@@ -87,9 +87,25 @@
87 CFI_RESTORE \reg 87 CFI_RESTORE \reg
88 .endm 88 .endm
89#else /*!CONFIG_X86_64*/ 89#else /*!CONFIG_X86_64*/
90 .macro pushl_cfi reg
91 pushl \reg
92 CFI_ADJUST_CFA_OFFSET 4
93 .endm
90 94
91 /* 32bit defenitions are missed yet */ 95 .macro popl_cfi reg
96 popl \reg
97 CFI_ADJUST_CFA_OFFSET -4
98 .endm
92 99
100 .macro movl_cfi reg offset=0
101 movl %\reg, \offset(%esp)
102 CFI_REL_OFFSET \reg, \offset
103 .endm
104
105 .macro movl_cfi_restore offset reg
106 movl \offset(%esp), %\reg
107 CFI_RESTORE \reg
108 .endm
93#endif /*!CONFIG_X86_64*/ 109#endif /*!CONFIG_X86_64*/
94#endif /*__ASSEMBLY__*/ 110#endif /*__ASSEMBLY__*/
95 111
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 83c1bc8d2e8a..456a304b8172 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -299,6 +299,8 @@ do { \
299 299
300#ifdef CONFIG_X86_32 300#ifdef CONFIG_X86_32
301 301
302#define STACK_RND_MASK (0x7ff)
303
302#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO)) 304#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO))
303 305
304#define ARCH_DLINFO ARCH_DLINFO_IA32(vdso_enabled) 306#define ARCH_DLINFO ARCH_DLINFO_IA32(vdso_enabled)
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
index ff8cbfa07851..5e3f2044f0d3 100644
--- a/arch/x86/include/asm/entry_arch.h
+++ b/arch/x86/include/asm/entry_arch.h
@@ -61,7 +61,7 @@ BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
61BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR) 61BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR)
62#endif 62#endif
63 63
64#ifdef CONFIG_X86_NEW_MCE 64#ifdef CONFIG_X86_MCE
65BUILD_INTERRUPT(mce_self_interrupt,MCE_SELF_VECTOR) 65BUILD_INTERRUPT(mce_self_interrupt,MCE_SELF_VECTOR)
66#endif 66#endif
67 67
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 7b2d71df39a6..14f9890eb495 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -132,6 +132,9 @@ enum fixed_addresses {
132#ifdef CONFIG_X86_32 132#ifdef CONFIG_X86_32
133 FIX_WP_TEST, 133 FIX_WP_TEST,
134#endif 134#endif
135#ifdef CONFIG_INTEL_TXT
136 FIX_TBOOT_BASE,
137#endif
135 __end_of_fixed_addresses 138 __end_of_fixed_addresses
136}; 139};
137 140
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index 175adf58dd4f..0b20bbb758f2 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -26,6 +26,7 @@ extern void fpu_init(void);
26extern void mxcsr_feature_mask_init(void); 26extern void mxcsr_feature_mask_init(void);
27extern int init_fpu(struct task_struct *child); 27extern int init_fpu(struct task_struct *child);
28extern asmlinkage void math_state_restore(void); 28extern asmlinkage void math_state_restore(void);
29extern void __math_state_restore(void);
29extern void init_thread_xstate(void); 30extern void init_thread_xstate(void);
30extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); 31extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
31 32
@@ -301,6 +302,14 @@ static inline void kernel_fpu_end(void)
301 preempt_enable(); 302 preempt_enable();
302} 303}
303 304
305static inline bool irq_fpu_usable(void)
306{
307 struct pt_regs *regs;
308
309 return !in_interrupt() || !(regs = get_irq_regs()) || \
310 user_mode(regs) || (read_cr0() & X86_CR0_TS);
311}
312
304/* 313/*
305 * Some instructions like VIA's padlock instructions generate a spurious 314 * Some instructions like VIA's padlock instructions generate a spurious
306 * DNA fault but don't modify SSE registers. And these instructions 315 * DNA fault but don't modify SSE registers. And these instructions
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 330ee807f89e..85232d32fcb8 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -150,11 +150,10 @@ extern int timer_through_8259;
150#define io_apic_assign_pci_irqs \ 150#define io_apic_assign_pci_irqs \
151 (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) 151 (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
152 152
153#ifdef CONFIG_ACPI 153extern u8 io_apic_unique_id(u8 id);
154extern int io_apic_get_unique_id(int ioapic, int apic_id); 154extern int io_apic_get_unique_id(int ioapic, int apic_id);
155extern int io_apic_get_version(int ioapic); 155extern int io_apic_get_version(int ioapic);
156extern int io_apic_get_redir_entries(int ioapic); 156extern int io_apic_get_redir_entries(int ioapic);
157#endif /* CONFIG_ACPI */
158 157
159struct io_apic_irq_attr; 158struct io_apic_irq_attr;
160extern int io_apic_set_pci_routing(struct device *dev, int irq, 159extern int io_apic_set_pci_routing(struct device *dev, int irq,
@@ -177,6 +176,16 @@ extern int setup_ioapic_entry(int apic, int irq,
177 int polarity, int vector, int pin); 176 int polarity, int vector, int pin);
178extern void ioapic_write_entry(int apic, int pin, 177extern void ioapic_write_entry(int apic, int pin,
179 struct IO_APIC_route_entry e); 178 struct IO_APIC_route_entry e);
179
180struct mp_ioapic_gsi{
181 int gsi_base;
182 int gsi_end;
183};
184extern struct mp_ioapic_gsi mp_gsi_routing[];
185int mp_find_ioapic(int gsi);
186int mp_find_ioapic_pin(int ioapic, int gsi);
187void __init mp_register_ioapic(int id, u32 address, u32 gsi_base);
188
180#else /* !CONFIG_X86_IO_APIC */ 189#else /* !CONFIG_X86_IO_APIC */
181#define io_apic_assign_pci_irqs 0 190#define io_apic_assign_pci_irqs 0
182static const int timer_through_8259 = 0; 191static const int timer_through_8259 = 0;
diff --git a/arch/x86/include/asm/ioctls.h b/arch/x86/include/asm/ioctls.h
index 0d5b23b7b06e..ec34c760665e 100644
--- a/arch/x86/include/asm/ioctls.h
+++ b/arch/x86/include/asm/ioctls.h
@@ -1,94 +1 @@
1#ifndef _ASM_X86_IOCTLS_H #include <asm-generic/ioctls.h>
2#define _ASM_X86_IOCTLS_H
3
4#include <asm/ioctl.h>
5
6/* 0x54 is just a magic number to make these relatively unique ('T') */
7
8#define TCGETS 0x5401
9#define TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
10#define TCSETSW 0x5403
11#define TCSETSF 0x5404
12#define TCGETA 0x5405
13#define TCSETA 0x5406
14#define TCSETAW 0x5407
15#define TCSETAF 0x5408
16#define TCSBRK 0x5409
17#define TCXONC 0x540A
18#define TCFLSH 0x540B
19#define TIOCEXCL 0x540C
20#define TIOCNXCL 0x540D
21#define TIOCSCTTY 0x540E
22#define TIOCGPGRP 0x540F
23#define TIOCSPGRP 0x5410
24#define TIOCOUTQ 0x5411
25#define TIOCSTI 0x5412
26#define TIOCGWINSZ 0x5413
27#define TIOCSWINSZ 0x5414
28#define TIOCMGET 0x5415
29#define TIOCMBIS 0x5416
30#define TIOCMBIC 0x5417
31#define TIOCMSET 0x5418
32#define TIOCGSOFTCAR 0x5419
33#define TIOCSSOFTCAR 0x541A
34#define FIONREAD 0x541B
35#define TIOCINQ FIONREAD
36#define TIOCLINUX 0x541C
37#define TIOCCONS 0x541D
38#define TIOCGSERIAL 0x541E
39#define TIOCSSERIAL 0x541F
40#define TIOCPKT 0x5420
41#define FIONBIO 0x5421
42#define TIOCNOTTY 0x5422
43#define TIOCSETD 0x5423
44#define TIOCGETD 0x5424
45#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
46/* #define TIOCTTYGSTRUCT 0x5426 - Former debugging-only ioctl */
47#define TIOCSBRK 0x5427 /* BSD compatibility */
48#define TIOCCBRK 0x5428 /* BSD compatibility */
49#define TIOCGSID 0x5429 /* Return the session ID of FD */
50#define TCGETS2 _IOR('T', 0x2A, struct termios2)
51#define TCSETS2 _IOW('T', 0x2B, struct termios2)
52#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
53#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
54#define TIOCGRS485 0x542E
55#define TIOCSRS485 0x542F
56#define TIOCGPTN _IOR('T', 0x30, unsigned int)
57 /* Get Pty Number (of pty-mux device) */
58#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
59#define TCGETX 0x5432 /* SYS5 TCGETX compatibility */
60#define TCSETX 0x5433
61#define TCSETXF 0x5434
62#define TCSETXW 0x5435
63
64#define FIONCLEX 0x5450
65#define FIOCLEX 0x5451
66#define FIOASYNC 0x5452
67#define TIOCSERCONFIG 0x5453
68#define TIOCSERGWILD 0x5454
69#define TIOCSERSWILD 0x5455
70#define TIOCGLCKTRMIOS 0x5456
71#define TIOCSLCKTRMIOS 0x5457
72#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
73#define TIOCSERGETLSR 0x5459 /* Get line status register */
74#define TIOCSERGETMULTI 0x545A /* Get multiport config */
75#define TIOCSERSETMULTI 0x545B /* Set multiport config */
76
77#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
78#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
79#define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
80#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
81#define FIOQSIZE 0x5460
82
83/* Used for packet mode */
84#define TIOCPKT_DATA 0
85#define TIOCPKT_FLUSHREAD 1
86#define TIOCPKT_FLUSHWRITE 2
87#define TIOCPKT_STOP 4
88#define TIOCPKT_START 8
89#define TIOCPKT_NOSTOP 16
90#define TIOCPKT_DOSTOP 32
91
92#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
93
94#endif /* _ASM_X86_IOCTLS_H */
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
index 0e9fe1d9d971..f35eb45d6576 100644
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -26,13 +26,16 @@
26#include <asm/pgtable.h> 26#include <asm/pgtable.h>
27#include <asm/tlbflush.h> 27#include <asm/tlbflush.h>
28 28
29int
30is_io_mapping_possible(resource_size_t base, unsigned long size);
31
32void * 29void *
33iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 30iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
34 31
35void 32void
36iounmap_atomic(void *kvaddr, enum km_type type); 33iounmap_atomic(void *kvaddr, enum km_type type);
37 34
35int
36iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
37
38void
39iomap_free(resource_size_t base, unsigned long size);
40
38#endif /* _ASM_X86_IOMAP_H */ 41#endif /* _ASM_X86_IOMAP_H */
diff --git a/arch/x86/include/asm/ipcbuf.h b/arch/x86/include/asm/ipcbuf.h
index ee678fd51594..84c7e51cb6d0 100644
--- a/arch/x86/include/asm/ipcbuf.h
+++ b/arch/x86/include/asm/ipcbuf.h
@@ -1,28 +1 @@
1#ifndef _ASM_X86_IPCBUF_H #include <asm-generic/ipcbuf.h>
2#define _ASM_X86_IPCBUF_H
3
4/*
5 * The ipc64_perm structure for x86 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space is left for:
10 * - 32-bit mode_t and seq
11 * - 2 miscellaneous 32-bit values
12 */
13
14struct ipc64_perm {
15 __kernel_key_t key;
16 __kernel_uid32_t uid;
17 __kernel_gid32_t gid;
18 __kernel_uid32_t cuid;
19 __kernel_gid32_t cgid;
20 __kernel_mode_t mode;
21 unsigned short __pad1;
22 unsigned short seq;
23 unsigned short __pad2;
24 unsigned long __unused1;
25 unsigned long __unused2;
26};
27
28#endif /* _ASM_X86_IPCBUF_H */
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index c6ccbe7e81ad..9e2b952f810a 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -13,14 +13,13 @@ static inline unsigned long native_save_fl(void)
13 unsigned long flags; 13 unsigned long flags;
14 14
15 /* 15 /*
16 * Note: this needs to be "=r" not "=rm", because we have the 16 * "=rm" is safe here, because "pop" adjusts the stack before
17 * stack offset from what gcc expects at the time the "pop" is 17 * it evaluates its effective address -- this is part of the
18 * executed, and so a memory reference with respect to the stack 18 * documented behavior of the "pop" instruction.
19 * would end up using the wrong address.
20 */ 19 */
21 asm volatile("# __raw_save_flags\n\t" 20 asm volatile("# __raw_save_flags\n\t"
22 "pushf ; pop %0" 21 "pushf ; pop %0"
23 : "=r" (flags) 22 : "=rm" (flags)
24 : /* no input */ 23 : /* no input */
25 : "memory"); 24 : "memory");
26 25
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index 125be8b19568..4a5fe914dc59 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -17,6 +17,8 @@
17#define __KVM_HAVE_USER_NMI 17#define __KVM_HAVE_USER_NMI
18#define __KVM_HAVE_GUEST_DEBUG 18#define __KVM_HAVE_GUEST_DEBUG
19#define __KVM_HAVE_MSIX 19#define __KVM_HAVE_MSIX
20#define __KVM_HAVE_MCE
21#define __KVM_HAVE_PIT_STATE2
20 22
21/* Architectural interrupt line count. */ 23/* Architectural interrupt line count. */
22#define KVM_NR_INTERRUPTS 256 24#define KVM_NR_INTERRUPTS 256
@@ -236,6 +238,14 @@ struct kvm_pit_state {
236 struct kvm_pit_channel_state channels[3]; 238 struct kvm_pit_channel_state channels[3];
237}; 239};
238 240
241#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001
242
243struct kvm_pit_state2 {
244 struct kvm_pit_channel_state channels[3];
245 __u32 flags;
246 __u32 reserved[9];
247};
248
239struct kvm_reinject_control { 249struct kvm_reinject_control {
240 __u8 pit_reinject; 250 __u8 pit_reinject;
241 __u8 reserved[31]; 251 __u8 reserved[31];
diff --git a/arch/x86/include/asm/kvm_x86_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index b7ed2c423116..b7ed2c423116 100644
--- a/arch/x86/include/asm/kvm_x86_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index eabdc1cfab5c..3be000435fad 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -14,6 +14,7 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/mmu_notifier.h> 16#include <linux/mmu_notifier.h>
17#include <linux/tracepoint.h>
17 18
18#include <linux/kvm.h> 19#include <linux/kvm.h>
19#include <linux/kvm_para.h> 20#include <linux/kvm_para.h>
@@ -37,12 +38,14 @@
37#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \ 38#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
38 0xFFFFFF0000000000ULL) 39 0xFFFFFF0000000000ULL)
39 40
40#define KVM_GUEST_CR0_MASK \ 41#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
41 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE \ 42 (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
42 | X86_CR0_NW | X86_CR0_CD) 43#define KVM_GUEST_CR0_MASK \
44 (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
45#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
46 (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP)
43#define KVM_VM_CR0_ALWAYS_ON \ 47#define KVM_VM_CR0_ALWAYS_ON \
44 (X86_CR0_PG | X86_CR0_PE | X86_CR0_WP | X86_CR0_NE | X86_CR0_TS \ 48 (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
45 | X86_CR0_MP)
46#define KVM_GUEST_CR4_MASK \ 49#define KVM_GUEST_CR4_MASK \
47 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE) 50 (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
48#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) 51#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
@@ -51,12 +54,12 @@
51#define INVALID_PAGE (~(hpa_t)0) 54#define INVALID_PAGE (~(hpa_t)0)
52#define UNMAPPED_GVA (~(gpa_t)0) 55#define UNMAPPED_GVA (~(gpa_t)0)
53 56
54/* shadow tables are PAE even on non-PAE hosts */ 57/* KVM Hugepage definitions for x86 */
55#define KVM_HPAGE_SHIFT 21 58#define KVM_NR_PAGE_SIZES 3
56#define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT) 59#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + (((x) - 1) * 9))
57#define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1)) 60#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
58 61#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
59#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE) 62#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
60 63
61#define DE_VECTOR 0 64#define DE_VECTOR 0
62#define DB_VECTOR 1 65#define DB_VECTOR 1
@@ -120,6 +123,10 @@ enum kvm_reg {
120 NR_VCPU_REGS 123 NR_VCPU_REGS
121}; 124};
122 125
126enum kvm_reg_ex {
127 VCPU_EXREG_PDPTR = NR_VCPU_REGS,
128};
129
123enum { 130enum {
124 VCPU_SREG_ES, 131 VCPU_SREG_ES,
125 VCPU_SREG_CS, 132 VCPU_SREG_CS,
@@ -131,7 +138,7 @@ enum {
131 VCPU_SREG_LDTR, 138 VCPU_SREG_LDTR,
132}; 139};
133 140
134#include <asm/kvm_x86_emulate.h> 141#include <asm/kvm_emulate.h>
135 142
136#define KVM_NR_MEM_OBJS 40 143#define KVM_NR_MEM_OBJS 40
137 144
@@ -308,7 +315,6 @@ struct kvm_vcpu_arch {
308 struct { 315 struct {
309 gfn_t gfn; /* presumed gfn during guest pte update */ 316 gfn_t gfn; /* presumed gfn during guest pte update */
310 pfn_t pfn; /* pfn corresponding to that gfn */ 317 pfn_t pfn; /* pfn corresponding to that gfn */
311 int largepage;
312 unsigned long mmu_seq; 318 unsigned long mmu_seq;
313 } update_pte; 319 } update_pte;
314 320
@@ -334,16 +340,6 @@ struct kvm_vcpu_arch {
334 u8 nr; 340 u8 nr;
335 } interrupt; 341 } interrupt;
336 342
337 struct {
338 int vm86_active;
339 u8 save_iopl;
340 struct kvm_save_segment {
341 u16 selector;
342 unsigned long base;
343 u32 limit;
344 u32 ar;
345 } tr, es, ds, fs, gs;
346 } rmode;
347 int halt_request; /* real mode on Intel only */ 343 int halt_request; /* real mode on Intel only */
348 344
349 int cpuid_nent; 345 int cpuid_nent;
@@ -366,13 +362,15 @@ struct kvm_vcpu_arch {
366 u32 pat; 362 u32 pat;
367 363
368 int switch_db_regs; 364 int switch_db_regs;
369 unsigned long host_db[KVM_NR_DB_REGS];
370 unsigned long host_dr6;
371 unsigned long host_dr7;
372 unsigned long db[KVM_NR_DB_REGS]; 365 unsigned long db[KVM_NR_DB_REGS];
373 unsigned long dr6; 366 unsigned long dr6;
374 unsigned long dr7; 367 unsigned long dr7;
375 unsigned long eff_db[KVM_NR_DB_REGS]; 368 unsigned long eff_db[KVM_NR_DB_REGS];
369
370 u64 mcg_cap;
371 u64 mcg_status;
372 u64 mcg_ctl;
373 u64 *mce_banks;
376}; 374};
377 375
378struct kvm_mem_alias { 376struct kvm_mem_alias {
@@ -409,6 +407,7 @@ struct kvm_arch{
409 407
410 struct page *ept_identity_pagetable; 408 struct page *ept_identity_pagetable;
411 bool ept_identity_pagetable_done; 409 bool ept_identity_pagetable_done;
410 gpa_t ept_identity_map_addr;
412 411
413 unsigned long irq_sources_bitmap; 412 unsigned long irq_sources_bitmap;
414 unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; 413 unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
@@ -526,6 +525,9 @@ struct kvm_x86_ops {
526 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); 525 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
527 int (*get_tdp_level)(void); 526 int (*get_tdp_level)(void);
528 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 527 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
528 bool (*gb_page_enable)(void);
529
530 const struct trace_print_flags *exit_reasons_str;
529}; 531};
530 532
531extern struct kvm_x86_ops *kvm_x86_ops; 533extern struct kvm_x86_ops *kvm_x86_ops;
@@ -618,6 +620,7 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
618void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); 620void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
619void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, 621void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2,
620 u32 error_code); 622 u32 error_code);
623bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
621 624
622int kvm_pic_set_irq(void *opaque, int irq, int level); 625int kvm_pic_set_irq(void *opaque, int irq, int level);
623 626
@@ -752,8 +755,6 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
752 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); 755 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
753} 756}
754 757
755#define MSR_IA32_TIME_STAMP_COUNTER 0x010
756
757#define TSS_IOPB_BASE_OFFSET 0x66 758#define TSS_IOPB_BASE_OFFSET 0x66
758#define TSS_BASE_SIZE 0x68 759#define TSS_BASE_SIZE 0x68
759#define TSS_IOPB_SIZE (65536 / 8) 760#define TSS_IOPB_SIZE (65536 / 8)
@@ -796,5 +797,8 @@ asmlinkage void kvm_handle_fault_on_reboot(void);
796int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); 797int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
797int kvm_age_hva(struct kvm *kvm, unsigned long hva); 798int kvm_age_hva(struct kvm *kvm, unsigned long hva);
798int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); 799int cpuid_maxphyaddr(struct kvm_vcpu *vcpu);
800int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
801int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
802int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
799 803
800#endif /* _ASM_X86_KVM_HOST_H */ 804#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index b8a3305ae093..c584076a47f4 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -1,6 +1,8 @@
1#ifndef _ASM_X86_KVM_PARA_H 1#ifndef _ASM_X86_KVM_PARA_H
2#define _ASM_X86_KVM_PARA_H 2#define _ASM_X86_KVM_PARA_H
3 3
4#include <linux/types.h>
5
4/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It 6/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
5 * should be used to determine that a VM is running under KVM. 7 * should be used to determine that a VM is running under KVM.
6 */ 8 */
diff --git a/arch/x86/include/asm/lguest.h b/arch/x86/include/asm/lguest.h
index 5136dad57cbb..0d97deba1e35 100644
--- a/arch/x86/include/asm/lguest.h
+++ b/arch/x86/include/asm/lguest.h
@@ -90,8 +90,9 @@ static inline void lguest_set_ts(void)
90} 90}
91 91
92/* Full 4G segment descriptors, suitable for CS and DS. */ 92/* Full 4G segment descriptors, suitable for CS and DS. */
93#define FULL_EXEC_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9b00} } }) 93#define FULL_EXEC_SEGMENT \
94#define FULL_SEGMENT ((struct desc_struct){ { {0x0000ffff, 0x00cf9300} } }) 94 ((struct desc_struct)GDT_ENTRY_INIT(0xc09b, 0, 0xfffff))
95#define FULL_SEGMENT ((struct desc_struct)GDT_ENTRY_INIT(0xc093, 0, 0xfffff))
95 96
96#endif /* __ASSEMBLY__ */ 97#endif /* __ASSEMBLY__ */
97 98
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 5cdd8d100ec9..b608a64c5814 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -9,7 +9,7 @@
9 */ 9 */
10 10
11#define MCG_BANKCNT_MASK 0xff /* Number of Banks */ 11#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
12#define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ 12#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
13#define MCG_EXT_P (1ULL<<9) /* Extended registers available */ 13#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
14#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */ 14#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
15#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */ 15#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
@@ -38,6 +38,14 @@
38#define MCM_ADDR_MEM 3 /* memory address */ 38#define MCM_ADDR_MEM 3 /* memory address */
39#define MCM_ADDR_GENERIC 7 /* generic */ 39#define MCM_ADDR_GENERIC 7 /* generic */
40 40
41#define MCJ_CTX_MASK 3
42#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
43#define MCJ_CTX_RANDOM 0 /* inject context: random */
44#define MCJ_CTX_PROCESS 1 /* inject context: process */
45#define MCJ_CTX_IRQ 2 /* inject context: IRQ */
46#define MCJ_NMI_BROADCAST 4 /* do NMI broadcasting */
47#define MCJ_EXCEPTION 8 /* raise as exception */
48
41/* Fields are zero when not available */ 49/* Fields are zero when not available */
42struct mce { 50struct mce {
43 __u64 status; 51 __u64 status;
@@ -48,8 +56,8 @@ struct mce {
48 __u64 tsc; /* cpu time stamp counter */ 56 __u64 tsc; /* cpu time stamp counter */
49 __u64 time; /* wall time_t when error was detected */ 57 __u64 time; /* wall time_t when error was detected */
50 __u8 cpuvendor; /* cpu vendor as encoded in system.h */ 58 __u8 cpuvendor; /* cpu vendor as encoded in system.h */
51 __u8 pad1; 59 __u8 inject_flags; /* software inject flags */
52 __u16 pad2; 60 __u16 pad;
53 __u32 cpuid; /* CPUID 1 EAX */ 61 __u32 cpuid; /* CPUID 1 EAX */
54 __u8 cs; /* code segment */ 62 __u8 cs; /* code segment */
55 __u8 bank; /* machine check bank */ 63 __u8 bank; /* machine check bank */
@@ -115,13 +123,6 @@ void mcheck_init(struct cpuinfo_x86 *c);
115static inline void mcheck_init(struct cpuinfo_x86 *c) {} 123static inline void mcheck_init(struct cpuinfo_x86 *c) {}
116#endif 124#endif
117 125
118#ifdef CONFIG_X86_OLD_MCE
119extern int nr_mce_banks;
120void amd_mcheck_init(struct cpuinfo_x86 *c);
121void intel_p4_mcheck_init(struct cpuinfo_x86 *c);
122void intel_p6_mcheck_init(struct cpuinfo_x86 *c);
123#endif
124
125#ifdef CONFIG_X86_ANCIENT_MCE 126#ifdef CONFIG_X86_ANCIENT_MCE
126void intel_p5_mcheck_init(struct cpuinfo_x86 *c); 127void intel_p5_mcheck_init(struct cpuinfo_x86 *c);
127void winchip_mcheck_init(struct cpuinfo_x86 *c); 128void winchip_mcheck_init(struct cpuinfo_x86 *c);
@@ -137,10 +138,11 @@ void mce_log(struct mce *m);
137DECLARE_PER_CPU(struct sys_device, mce_dev); 138DECLARE_PER_CPU(struct sys_device, mce_dev);
138 139
139/* 140/*
140 * To support more than 128 would need to escape the predefined 141 * Maximum banks number.
141 * Linux defined extended banks first. 142 * This is the limit of the current register layout on
143 * Intel CPUs.
142 */ 144 */
143#define MAX_NR_BANKS (MCE_EXTENDED_BANK - 1) 145#define MAX_NR_BANKS 32
144 146
145#ifdef CONFIG_X86_MCE_INTEL 147#ifdef CONFIG_X86_MCE_INTEL
146extern int mce_cmci_disabled; 148extern int mce_cmci_disabled;
@@ -208,11 +210,7 @@ extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
208 210
209void intel_init_thermal(struct cpuinfo_x86 *c); 211void intel_init_thermal(struct cpuinfo_x86 *c);
210 212
211#ifdef CONFIG_X86_NEW_MCE
212void mce_log_therm_throt_event(__u64 status); 213void mce_log_therm_throt_event(__u64 status);
213#else
214static inline void mce_log_therm_throt_event(__u64 status) {}
215#endif
216 214
217#endif /* __KERNEL__ */ 215#endif /* __KERNEL__ */
218#endif /* _ASM_X86_MCE_H */ 216#endif /* _ASM_X86_MCE_H */
diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
index 751af2550ed9..593e51d4643f 100644
--- a/arch/x86/include/asm/mman.h
+++ b/arch/x86/include/asm/mman.h
@@ -1,20 +1,8 @@
1#ifndef _ASM_X86_MMAN_H 1#ifndef _ASM_X86_MMAN_H
2#define _ASM_X86_MMAN_H 2#define _ASM_X86_MMAN_H
3 3
4#include <asm-generic/mman-common.h>
5
6#define MAP_32BIT 0x40 /* only give out 32bit addresses */ 4#define MAP_32BIT 0x40 /* only give out 32bit addresses */
7 5
8#define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 6#include <asm-generic/mman.h>
9#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
10#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
11#define MAP_LOCKED 0x2000 /* pages are locked */
12#define MAP_NORESERVE 0x4000 /* don't check for reservations */
13#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
14#define MAP_NONBLOCK 0x10000 /* do not block on IO */
15#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
16
17#define MCL_CURRENT 1 /* lock all current mappings */
18#define MCL_FUTURE 2 /* lock all future mappings */
19 7
20#endif /* _ASM_X86_MMAN_H */ 8#endif /* _ASM_X86_MMAN_H */
diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
index 47d62743c4d5..3e2ce58a31a3 100644
--- a/arch/x86/include/asm/module.h
+++ b/arch/x86/include/asm/module.h
@@ -1,18 +1,7 @@
1#ifndef _ASM_X86_MODULE_H 1#ifndef _ASM_X86_MODULE_H
2#define _ASM_X86_MODULE_H 2#define _ASM_X86_MODULE_H
3 3
4/* x86_32/64 are simple */ 4#include <asm-generic/module.h>
5struct mod_arch_specific {};
6
7#ifdef CONFIG_X86_32
8# define Elf_Shdr Elf32_Shdr
9# define Elf_Sym Elf32_Sym
10# define Elf_Ehdr Elf32_Ehdr
11#else
12# define Elf_Shdr Elf64_Shdr
13# define Elf_Sym Elf64_Sym
14# define Elf_Ehdr Elf64_Ehdr
15#endif
16 5
17#ifdef CONFIG_X86_64 6#ifdef CONFIG_X86_64
18/* X86_64 does not define MODULE_PROC_FAMILY */ 7/* X86_64 does not define MODULE_PROC_FAMILY */
@@ -28,6 +17,8 @@ struct mod_arch_specific {};
28#define MODULE_PROC_FAMILY "586MMX " 17#define MODULE_PROC_FAMILY "586MMX "
29#elif defined CONFIG_MCORE2 18#elif defined CONFIG_MCORE2
30#define MODULE_PROC_FAMILY "CORE2 " 19#define MODULE_PROC_FAMILY "CORE2 "
20#elif defined CONFIG_MATOM
21#define MODULE_PROC_FAMILY "ATOM "
31#elif defined CONFIG_M686 22#elif defined CONFIG_M686
32#define MODULE_PROC_FAMILY "686 " 23#define MODULE_PROC_FAMILY "686 "
33#elif defined CONFIG_MPENTIUMII 24#elif defined CONFIG_MPENTIUMII
diff --git a/arch/x86/include/asm/msgbuf.h b/arch/x86/include/asm/msgbuf.h
index 7e4e9481f51c..809134c644a6 100644
--- a/arch/x86/include/asm/msgbuf.h
+++ b/arch/x86/include/asm/msgbuf.h
@@ -1,39 +1 @@
1#ifndef _ASM_X86_MSGBUF_H #include <asm-generic/msgbuf.h>
2#define _ASM_X86_MSGBUF_H
3
4/*
5 * The msqid64_ds structure for i386 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space on i386 is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 *
13 * Pad space on x8664 is left for:
14 * - 2 miscellaneous 64-bit values
15 */
16struct msqid64_ds {
17 struct ipc64_perm msg_perm;
18 __kernel_time_t msg_stime; /* last msgsnd time */
19#ifdef __i386__
20 unsigned long __unused1;
21#endif
22 __kernel_time_t msg_rtime; /* last msgrcv time */
23#ifdef __i386__
24 unsigned long __unused2;
25#endif
26 __kernel_time_t msg_ctime; /* last change time */
27#ifdef __i386__
28 unsigned long __unused3;
29#endif
30 unsigned long msg_cbytes; /* current number of bytes on queue */
31 unsigned long msg_qnum; /* number of messages in queue */
32 unsigned long msg_qbytes; /* max number of bytes on queue */
33 __kernel_pid_t msg_lspid; /* pid of last msgsnd */
34 __kernel_pid_t msg_lrpid; /* last receive pid */
35 unsigned long __unused4;
36 unsigned long __unused5;
37};
38
39#endif /* _ASM_X86_MSGBUF_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 6be7fc254b59..4ffe09b2ad75 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -81,8 +81,15 @@
81#define MSR_IA32_MC0_ADDR 0x00000402 81#define MSR_IA32_MC0_ADDR 0x00000402
82#define MSR_IA32_MC0_MISC 0x00000403 82#define MSR_IA32_MC0_MISC 0x00000403
83 83
84#define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x))
85#define MSR_IA32_MCx_STATUS(x) (MSR_IA32_MC0_STATUS + 4*(x))
86#define MSR_IA32_MCx_ADDR(x) (MSR_IA32_MC0_ADDR + 4*(x))
87#define MSR_IA32_MCx_MISC(x) (MSR_IA32_MC0_MISC + 4*(x))
88
84/* These are consecutive and not in the normal 4er MCE bank block */ 89/* These are consecutive and not in the normal 4er MCE bank block */
85#define MSR_IA32_MC0_CTL2 0x00000280 90#define MSR_IA32_MC0_CTL2 0x00000280
91#define MSR_IA32_MCx_CTL2(x) (MSR_IA32_MC0_CTL2 + (x))
92
86#define CMCI_EN (1ULL << 30) 93#define CMCI_EN (1ULL << 30)
87#define CMCI_THRESHOLD_MASK 0xffffULL 94#define CMCI_THRESHOLD_MASK 0xffffULL
88 95
@@ -215,6 +222,10 @@
215 222
216#define THERM_STATUS_PROCHOT (1 << 0) 223#define THERM_STATUS_PROCHOT (1 << 0)
217 224
225#define MSR_THERM2_CTL 0x0000019d
226
227#define MSR_THERM2_CTL_TM_SELECT (1ULL << 16)
228
218#define MSR_IA32_MISC_ENABLE 0x000001a0 229#define MSR_IA32_MISC_ENABLE 0x000001a0
219 230
220/* MISC_ENABLE bits: architectural */ 231/* MISC_ENABLE bits: architectural */
@@ -374,6 +385,7 @@
374/* AMD-V MSRs */ 385/* AMD-V MSRs */
375 386
376#define MSR_VM_CR 0xc0010114 387#define MSR_VM_CR 0xc0010114
388#define MSR_VM_IGNNE 0xc0010115
377#define MSR_VM_HSAVE_PA 0xc0010117 389#define MSR_VM_HSAVE_PA 0xc0010117
378 390
379#endif /* _ASM_X86_MSR_INDEX_H */ 391#endif /* _ASM_X86_MSR_INDEX_H */
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 48ad9d29484a..7e2b6ba962ff 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -3,10 +3,16 @@
3 3
4#include <asm/msr-index.h> 4#include <asm/msr-index.h>
5 5
6#ifdef __KERNEL__
7#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
8 7
9#include <linux/types.h> 8#include <linux/types.h>
9#include <linux/ioctl.h>
10
11#define X86_IOC_RDMSR_REGS _IOWR('c', 0xA0, __u32[8])
12#define X86_IOC_WRMSR_REGS _IOWR('c', 0xA1, __u32[8])
13
14#ifdef __KERNEL__
15
10#include <asm/asm.h> 16#include <asm/asm.h>
11#include <asm/errno.h> 17#include <asm/errno.h>
12#include <asm/cpumask.h> 18#include <asm/cpumask.h>
@@ -67,23 +73,7 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
67 ".previous\n\t" 73 ".previous\n\t"
68 _ASM_EXTABLE(2b, 3b) 74 _ASM_EXTABLE(2b, 3b)
69 : [err] "=r" (*err), EAX_EDX_RET(val, low, high) 75 : [err] "=r" (*err), EAX_EDX_RET(val, low, high)
70 : "c" (msr), [fault] "i" (-EFAULT)); 76 : "c" (msr), [fault] "i" (-EIO));
71 return EAX_EDX_VAL(val, low, high);
72}
73
74static inline unsigned long long native_read_msr_amd_safe(unsigned int msr,
75 int *err)
76{
77 DECLARE_ARGS(val, low, high);
78
79 asm volatile("2: rdmsr ; xor %0,%0\n"
80 "1:\n\t"
81 ".section .fixup,\"ax\"\n\t"
82 "3: mov %3,%0 ; jmp 1b\n\t"
83 ".previous\n\t"
84 _ASM_EXTABLE(2b, 3b)
85 : "=r" (*err), EAX_EDX_RET(val, low, high)
86 : "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT));
87 return EAX_EDX_VAL(val, low, high); 77 return EAX_EDX_VAL(val, low, high);
88} 78}
89 79
@@ -106,13 +96,16 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
106 _ASM_EXTABLE(2b, 3b) 96 _ASM_EXTABLE(2b, 3b)
107 : [err] "=a" (err) 97 : [err] "=a" (err)
108 : "c" (msr), "0" (low), "d" (high), 98 : "c" (msr), "0" (low), "d" (high),
109 [fault] "i" (-EFAULT) 99 [fault] "i" (-EIO)
110 : "memory"); 100 : "memory");
111 return err; 101 return err;
112} 102}
113 103
114extern unsigned long long native_read_tsc(void); 104extern unsigned long long native_read_tsc(void);
115 105
106extern int native_rdmsr_safe_regs(u32 regs[8]);
107extern int native_wrmsr_safe_regs(u32 regs[8]);
108
116static __always_inline unsigned long long __native_read_tsc(void) 109static __always_inline unsigned long long __native_read_tsc(void)
117{ 110{
118 DECLARE_ARGS(val, low, high); 111 DECLARE_ARGS(val, low, high);
@@ -181,14 +174,44 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
181 *p = native_read_msr_safe(msr, &err); 174 *p = native_read_msr_safe(msr, &err);
182 return err; 175 return err;
183} 176}
177
184static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) 178static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
185{ 179{
180 u32 gprs[8] = { 0 };
186 int err; 181 int err;
187 182
188 *p = native_read_msr_amd_safe(msr, &err); 183 gprs[1] = msr;
184 gprs[7] = 0x9c5a203a;
185
186 err = native_rdmsr_safe_regs(gprs);
187
188 *p = gprs[0] | ((u64)gprs[2] << 32);
189
189 return err; 190 return err;
190} 191}
191 192
193static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
194{
195 u32 gprs[8] = { 0 };
196
197 gprs[0] = (u32)val;
198 gprs[1] = msr;
199 gprs[2] = val >> 32;
200 gprs[7] = 0x9c5a203a;
201
202 return native_wrmsr_safe_regs(gprs);
203}
204
205static inline int rdmsr_safe_regs(u32 regs[8])
206{
207 return native_rdmsr_safe_regs(regs);
208}
209
210static inline int wrmsr_safe_regs(u32 regs[8])
211{
212 return native_wrmsr_safe_regs(regs);
213}
214
192#define rdtscl(low) \ 215#define rdtscl(low) \
193 ((low) = (u32)__native_read_tsc()) 216 ((low) = (u32)__native_read_tsc())
194 217
@@ -228,6 +251,8 @@ void rdmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
228void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs); 251void wrmsr_on_cpus(const cpumask_t *mask, u32 msr_no, struct msr *msrs);
229int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 252int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
230int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 253int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
254int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
255int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
231#else /* CONFIG_SMP */ 256#else /* CONFIG_SMP */
232static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 257static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
233{ 258{
@@ -258,7 +283,15 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
258{ 283{
259 return wrmsr_safe(msr_no, l, h); 284 return wrmsr_safe(msr_no, l, h);
260} 285}
286static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
287{
288 return rdmsr_safe_regs(regs);
289}
290static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
291{
292 return wrmsr_safe_regs(regs);
293}
261#endif /* CONFIG_SMP */ 294#endif /* CONFIG_SMP */
262#endif /* __ASSEMBLY__ */
263#endif /* __KERNEL__ */ 295#endif /* __KERNEL__ */
296#endif /* __ASSEMBLY__ */
264#endif /* _ASM_X86_MSR_H */ 297#endif /* _ASM_X86_MSR_H */
diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index a51ada8467de..4365ffdb461f 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -121,6 +121,9 @@ extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
121extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); 121extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
122extern void mtrr_ap_init(void); 122extern void mtrr_ap_init(void);
123extern void mtrr_bp_init(void); 123extern void mtrr_bp_init(void);
124extern void set_mtrr_aps_delayed_init(void);
125extern void mtrr_aps_init(void);
126extern void mtrr_bp_restore(void);
124extern int mtrr_trim_uncached_memory(unsigned long end_pfn); 127extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
125extern int amd_special_default_mtrr(void); 128extern int amd_special_default_mtrr(void);
126# else 129# else
@@ -161,6 +164,9 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
161 164
162#define mtrr_ap_init() do {} while (0) 165#define mtrr_ap_init() do {} while (0)
163#define mtrr_bp_init() do {} while (0) 166#define mtrr_bp_init() do {} while (0)
167#define set_mtrr_aps_delayed_init() do {} while (0)
168#define mtrr_aps_init() do {} while (0)
169#define mtrr_bp_restore() do {} while (0)
164# endif 170# endif
165 171
166#ifdef CONFIG_COMPAT 172#ifdef CONFIG_COMPAT
diff --git a/arch/x86/include/asm/nops.h b/arch/x86/include/asm/nops.h
index ad2668ee1aa7..6d8723a766cc 100644
--- a/arch/x86/include/asm/nops.h
+++ b/arch/x86/include/asm/nops.h
@@ -65,6 +65,8 @@
65 6: osp nopl 0x00(%eax,%eax,1) 65 6: osp nopl 0x00(%eax,%eax,1)
66 7: nopl 0x00000000(%eax) 66 7: nopl 0x00000000(%eax)
67 8: nopl 0x00000000(%eax,%eax,1) 67 8: nopl 0x00000000(%eax,%eax,1)
68 Note: All the above are assumed to be a single instruction.
69 There is kernel code that depends on this.
68*/ 70*/
69#define P6_NOP1 GENERIC_NOP1 71#define P6_NOP1 GENERIC_NOP1
70#define P6_NOP2 ".byte 0x66,0x90\n" 72#define P6_NOP2 ".byte 0x66,0x90\n"
diff --git a/arch/x86/include/asm/param.h b/arch/x86/include/asm/param.h
index 6f0d0422f4ca..965d45427975 100644
--- a/arch/x86/include/asm/param.h
+++ b/arch/x86/include/asm/param.h
@@ -1,22 +1 @@
1#ifndef _ASM_X86_PARAM_H #include <asm-generic/param.h>
2#define _ASM_X86_PARAM_H
3
4#ifdef __KERNEL__
5# define HZ CONFIG_HZ /* Internal kernel timer frequency */
6# define USER_HZ 100 /* some user interfaces are */
7# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */
8#endif
9
10#ifndef HZ
11#define HZ 100
12#endif
13
14#define EXEC_PAGESIZE 4096
15
16#ifndef NOGROUP
17#define NOGROUP (-1)
18#endif
19
20#define MAXHOSTNAMELEN 64 /* max length of hostname */
21
22#endif /* _ASM_X86_PARAM_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 4fb37c8a0832..40d6586af25b 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -7,689 +7,11 @@
7#include <asm/pgtable_types.h> 7#include <asm/pgtable_types.h>
8#include <asm/asm.h> 8#include <asm/asm.h>
9 9
10/* Bitmask of what can be clobbered: usually at least eax. */ 10#include <asm/paravirt_types.h>
11#define CLBR_NONE 0
12#define CLBR_EAX (1 << 0)
13#define CLBR_ECX (1 << 1)
14#define CLBR_EDX (1 << 2)
15#define CLBR_EDI (1 << 3)
16
17#ifdef CONFIG_X86_32
18/* CLBR_ANY should match all regs platform has. For i386, that's just it */
19#define CLBR_ANY ((1 << 4) - 1)
20
21#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
22#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
23#define CLBR_SCRATCH (0)
24#else
25#define CLBR_RAX CLBR_EAX
26#define CLBR_RCX CLBR_ECX
27#define CLBR_RDX CLBR_EDX
28#define CLBR_RDI CLBR_EDI
29#define CLBR_RSI (1 << 4)
30#define CLBR_R8 (1 << 5)
31#define CLBR_R9 (1 << 6)
32#define CLBR_R10 (1 << 7)
33#define CLBR_R11 (1 << 8)
34
35#define CLBR_ANY ((1 << 9) - 1)
36
37#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
38 CLBR_RCX | CLBR_R8 | CLBR_R9)
39#define CLBR_RET_REG (CLBR_RAX)
40#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
41
42#include <asm/desc_defs.h>
43#endif /* X86_64 */
44
45#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
46 11
47#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
48#include <linux/types.h> 13#include <linux/types.h>
49#include <linux/cpumask.h> 14#include <linux/cpumask.h>
50#include <asm/kmap_types.h>
51#include <asm/desc_defs.h>
52
53struct page;
54struct thread_struct;
55struct desc_ptr;
56struct tss_struct;
57struct mm_struct;
58struct desc_struct;
59struct task_struct;
60
61/*
62 * Wrapper type for pointers to code which uses the non-standard
63 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
64 */
65struct paravirt_callee_save {
66 void *func;
67};
68
69/* general info */
70struct pv_info {
71 unsigned int kernel_rpl;
72 int shared_kernel_pmd;
73 int paravirt_enabled;
74 const char *name;
75};
76
77struct pv_init_ops {
78 /*
79 * Patch may replace one of the defined code sequences with
80 * arbitrary code, subject to the same register constraints.
81 * This generally means the code is not free to clobber any
82 * registers other than EAX. The patch function should return
83 * the number of bytes of code generated, as we nop pad the
84 * rest in generic code.
85 */
86 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
87 unsigned long addr, unsigned len);
88
89 /* Basic arch-specific setup */
90 void (*arch_setup)(void);
91 char *(*memory_setup)(void);
92 void (*post_allocator_init)(void);
93
94 /* Print a banner to identify the environment */
95 void (*banner)(void);
96};
97
98
99struct pv_lazy_ops {
100 /* Set deferred update mode, used for batching operations. */
101 void (*enter)(void);
102 void (*leave)(void);
103};
104
105struct pv_time_ops {
106 void (*time_init)(void);
107
108 /* Set and set time of day */
109 unsigned long (*get_wallclock)(void);
110 int (*set_wallclock)(unsigned long);
111
112 unsigned long long (*sched_clock)(void);
113 unsigned long (*get_tsc_khz)(void);
114};
115
116struct pv_cpu_ops {
117 /* hooks for various privileged instructions */
118 unsigned long (*get_debugreg)(int regno);
119 void (*set_debugreg)(int regno, unsigned long value);
120
121 void (*clts)(void);
122
123 unsigned long (*read_cr0)(void);
124 void (*write_cr0)(unsigned long);
125
126 unsigned long (*read_cr4_safe)(void);
127 unsigned long (*read_cr4)(void);
128 void (*write_cr4)(unsigned long);
129
130#ifdef CONFIG_X86_64
131 unsigned long (*read_cr8)(void);
132 void (*write_cr8)(unsigned long);
133#endif
134
135 /* Segment descriptor handling */
136 void (*load_tr_desc)(void);
137 void (*load_gdt)(const struct desc_ptr *);
138 void (*load_idt)(const struct desc_ptr *);
139 void (*store_gdt)(struct desc_ptr *);
140 void (*store_idt)(struct desc_ptr *);
141 void (*set_ldt)(const void *desc, unsigned entries);
142 unsigned long (*store_tr)(void);
143 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
144#ifdef CONFIG_X86_64
145 void (*load_gs_index)(unsigned int idx);
146#endif
147 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
148 const void *desc);
149 void (*write_gdt_entry)(struct desc_struct *,
150 int entrynum, const void *desc, int size);
151 void (*write_idt_entry)(gate_desc *,
152 int entrynum, const gate_desc *gate);
153 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
154 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
155
156 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
157
158 void (*set_iopl_mask)(unsigned mask);
159
160 void (*wbinvd)(void);
161 void (*io_delay)(void);
162
163 /* cpuid emulation, mostly so that caps bits can be disabled */
164 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
165 unsigned int *ecx, unsigned int *edx);
166
167 /* MSR, PMC and TSR operations.
168 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
169 u64 (*read_msr_amd)(unsigned int msr, int *err);
170 u64 (*read_msr)(unsigned int msr, int *err);
171 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
172
173 u64 (*read_tsc)(void);
174 u64 (*read_pmc)(int counter);
175 unsigned long long (*read_tscp)(unsigned int *aux);
176
177 /*
178 * Atomically enable interrupts and return to userspace. This
179 * is only ever used to return to 32-bit processes; in a
180 * 64-bit kernel, it's used for 32-on-64 compat processes, but
181 * never native 64-bit processes. (Jump, not call.)
182 */
183 void (*irq_enable_sysexit)(void);
184
185 /*
186 * Switch to usermode gs and return to 64-bit usermode using
187 * sysret. Only used in 64-bit kernels to return to 64-bit
188 * processes. Usermode register state, including %rsp, must
189 * already be restored.
190 */
191 void (*usergs_sysret64)(void);
192
193 /*
194 * Switch to usermode gs and return to 32-bit usermode using
195 * sysret. Used to return to 32-on-64 compat processes.
196 * Other usermode register state, including %esp, must already
197 * be restored.
198 */
199 void (*usergs_sysret32)(void);
200
201 /* Normal iret. Jump to this with the standard iret stack
202 frame set up. */
203 void (*iret)(void);
204
205 void (*swapgs)(void);
206
207 void (*start_context_switch)(struct task_struct *prev);
208 void (*end_context_switch)(struct task_struct *next);
209};
210
211struct pv_irq_ops {
212 void (*init_IRQ)(void);
213
214 /*
215 * Get/set interrupt state. save_fl and restore_fl are only
216 * expected to use X86_EFLAGS_IF; all other bits
217 * returned from save_fl are undefined, and may be ignored by
218 * restore_fl.
219 *
220 * NOTE: These functions callers expect the callee to preserve
221 * more registers than the standard C calling convention.
222 */
223 struct paravirt_callee_save save_fl;
224 struct paravirt_callee_save restore_fl;
225 struct paravirt_callee_save irq_disable;
226 struct paravirt_callee_save irq_enable;
227
228 void (*safe_halt)(void);
229 void (*halt)(void);
230
231#ifdef CONFIG_X86_64
232 void (*adjust_exception_frame)(void);
233#endif
234};
235
236struct pv_apic_ops {
237#ifdef CONFIG_X86_LOCAL_APIC
238 void (*setup_boot_clock)(void);
239 void (*setup_secondary_clock)(void);
240
241 void (*startup_ipi_hook)(int phys_apicid,
242 unsigned long start_eip,
243 unsigned long start_esp);
244#endif
245};
246
247struct pv_mmu_ops {
248 /*
249 * Called before/after init_mm pagetable setup. setup_start
250 * may reset %cr3, and may pre-install parts of the pagetable;
251 * pagetable setup is expected to preserve any existing
252 * mapping.
253 */
254 void (*pagetable_setup_start)(pgd_t *pgd_base);
255 void (*pagetable_setup_done)(pgd_t *pgd_base);
256
257 unsigned long (*read_cr2)(void);
258 void (*write_cr2)(unsigned long);
259
260 unsigned long (*read_cr3)(void);
261 void (*write_cr3)(unsigned long);
262
263 /*
264 * Hooks for intercepting the creation/use/destruction of an
265 * mm_struct.
266 */
267 void (*activate_mm)(struct mm_struct *prev,
268 struct mm_struct *next);
269 void (*dup_mmap)(struct mm_struct *oldmm,
270 struct mm_struct *mm);
271 void (*exit_mmap)(struct mm_struct *mm);
272
273
274 /* TLB operations */
275 void (*flush_tlb_user)(void);
276 void (*flush_tlb_kernel)(void);
277 void (*flush_tlb_single)(unsigned long addr);
278 void (*flush_tlb_others)(const struct cpumask *cpus,
279 struct mm_struct *mm,
280 unsigned long va);
281
282 /* Hooks for allocating and freeing a pagetable top-level */
283 int (*pgd_alloc)(struct mm_struct *mm);
284 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
285
286 /*
287 * Hooks for allocating/releasing pagetable pages when they're
288 * attached to a pagetable
289 */
290 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
291 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
292 void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
293 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
294 void (*release_pte)(unsigned long pfn);
295 void (*release_pmd)(unsigned long pfn);
296 void (*release_pud)(unsigned long pfn);
297
298 /* Pagetable manipulation functions */
299 void (*set_pte)(pte_t *ptep, pte_t pteval);
300 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
301 pte_t *ptep, pte_t pteval);
302 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
303 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
304 pte_t *ptep);
305 void (*pte_update_defer)(struct mm_struct *mm,
306 unsigned long addr, pte_t *ptep);
307
308 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
309 pte_t *ptep);
310 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
311 pte_t *ptep, pte_t pte);
312
313 struct paravirt_callee_save pte_val;
314 struct paravirt_callee_save make_pte;
315
316 struct paravirt_callee_save pgd_val;
317 struct paravirt_callee_save make_pgd;
318
319#if PAGETABLE_LEVELS >= 3
320#ifdef CONFIG_X86_PAE
321 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
322 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
323 pte_t *ptep);
324 void (*pmd_clear)(pmd_t *pmdp);
325
326#endif /* CONFIG_X86_PAE */
327
328 void (*set_pud)(pud_t *pudp, pud_t pudval);
329
330 struct paravirt_callee_save pmd_val;
331 struct paravirt_callee_save make_pmd;
332
333#if PAGETABLE_LEVELS == 4
334 struct paravirt_callee_save pud_val;
335 struct paravirt_callee_save make_pud;
336
337 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
338#endif /* PAGETABLE_LEVELS == 4 */
339#endif /* PAGETABLE_LEVELS >= 3 */
340
341#ifdef CONFIG_HIGHPTE
342 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
343#endif
344
345 struct pv_lazy_ops lazy_mode;
346
347 /* dom0 ops */
348
349 /* Sometimes the physical address is a pfn, and sometimes its
350 an mfn. We can tell which is which from the index. */
351 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
352 phys_addr_t phys, pgprot_t flags);
353};
354
355struct raw_spinlock;
356struct pv_lock_ops {
357 int (*spin_is_locked)(struct raw_spinlock *lock);
358 int (*spin_is_contended)(struct raw_spinlock *lock);
359 void (*spin_lock)(struct raw_spinlock *lock);
360 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
361 int (*spin_trylock)(struct raw_spinlock *lock);
362 void (*spin_unlock)(struct raw_spinlock *lock);
363};
364
365/* This contains all the paravirt structures: we get a convenient
366 * number for each function using the offset which we use to indicate
367 * what to patch. */
368struct paravirt_patch_template {
369 struct pv_init_ops pv_init_ops;
370 struct pv_time_ops pv_time_ops;
371 struct pv_cpu_ops pv_cpu_ops;
372 struct pv_irq_ops pv_irq_ops;
373 struct pv_apic_ops pv_apic_ops;
374 struct pv_mmu_ops pv_mmu_ops;
375 struct pv_lock_ops pv_lock_ops;
376};
377
378extern struct pv_info pv_info;
379extern struct pv_init_ops pv_init_ops;
380extern struct pv_time_ops pv_time_ops;
381extern struct pv_cpu_ops pv_cpu_ops;
382extern struct pv_irq_ops pv_irq_ops;
383extern struct pv_apic_ops pv_apic_ops;
384extern struct pv_mmu_ops pv_mmu_ops;
385extern struct pv_lock_ops pv_lock_ops;
386
387#define PARAVIRT_PATCH(x) \
388 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
389
390#define paravirt_type(op) \
391 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
392 [paravirt_opptr] "i" (&(op))
393#define paravirt_clobber(clobber) \
394 [paravirt_clobber] "i" (clobber)
395
396/*
397 * Generate some code, and mark it as patchable by the
398 * apply_paravirt() alternate instruction patcher.
399 */
400#define _paravirt_alt(insn_string, type, clobber) \
401 "771:\n\t" insn_string "\n" "772:\n" \
402 ".pushsection .parainstructions,\"a\"\n" \
403 _ASM_ALIGN "\n" \
404 _ASM_PTR " 771b\n" \
405 " .byte " type "\n" \
406 " .byte 772b-771b\n" \
407 " .short " clobber "\n" \
408 ".popsection\n"
409
410/* Generate patchable code, with the default asm parameters. */
411#define paravirt_alt(insn_string) \
412 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
413
414/* Simple instruction patching code. */
415#define DEF_NATIVE(ops, name, code) \
416 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
417 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
418
419unsigned paravirt_patch_nop(void);
420unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
421unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
422unsigned paravirt_patch_ignore(unsigned len);
423unsigned paravirt_patch_call(void *insnbuf,
424 const void *target, u16 tgt_clobbers,
425 unsigned long addr, u16 site_clobbers,
426 unsigned len);
427unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
428 unsigned long addr, unsigned len);
429unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
430 unsigned long addr, unsigned len);
431
432unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
433 const char *start, const char *end);
434
435unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
436 unsigned long addr, unsigned len);
437
438int paravirt_disable_iospace(void);
439
440/*
441 * This generates an indirect call based on the operation type number.
442 * The type number, computed in PARAVIRT_PATCH, is derived from the
443 * offset into the paravirt_patch_template structure, and can therefore be
444 * freely converted back into a structure offset.
445 */
446#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
447
448/*
449 * These macros are intended to wrap calls through one of the paravirt
450 * ops structs, so that they can be later identified and patched at
451 * runtime.
452 *
453 * Normally, a call to a pv_op function is a simple indirect call:
454 * (pv_op_struct.operations)(args...).
455 *
456 * Unfortunately, this is a relatively slow operation for modern CPUs,
457 * because it cannot necessarily determine what the destination
458 * address is. In this case, the address is a runtime constant, so at
459 * the very least we can patch the call to e a simple direct call, or
460 * ideally, patch an inline implementation into the callsite. (Direct
461 * calls are essentially free, because the call and return addresses
462 * are completely predictable.)
463 *
464 * For i386, these macros rely on the standard gcc "regparm(3)" calling
465 * convention, in which the first three arguments are placed in %eax,
466 * %edx, %ecx (in that order), and the remaining arguments are placed
467 * on the stack. All caller-save registers (eax,edx,ecx) are expected
468 * to be modified (either clobbered or used for return values).
469 * X86_64, on the other hand, already specifies a register-based calling
470 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
471 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
472 * special handling for dealing with 4 arguments, unlike i386.
473 * However, x86_64 also have to clobber all caller saved registers, which
474 * unfortunately, are quite a bit (r8 - r11)
475 *
476 * The call instruction itself is marked by placing its start address
477 * and size into the .parainstructions section, so that
478 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
479 * appropriate patching under the control of the backend pv_init_ops
480 * implementation.
481 *
482 * Unfortunately there's no way to get gcc to generate the args setup
483 * for the call, and then allow the call itself to be generated by an
484 * inline asm. Because of this, we must do the complete arg setup and
485 * return value handling from within these macros. This is fairly
486 * cumbersome.
487 *
488 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
489 * It could be extended to more arguments, but there would be little
490 * to be gained from that. For each number of arguments, there are
491 * the two VCALL and CALL variants for void and non-void functions.
492 *
493 * When there is a return value, the invoker of the macro must specify
494 * the return type. The macro then uses sizeof() on that type to
495 * determine whether its a 32 or 64 bit value, and places the return
496 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
497 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
498 * the return value size.
499 *
500 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
501 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
502 * in low,high order
503 *
504 * Small structures are passed and returned in registers. The macro
505 * calling convention can't directly deal with this, so the wrapper
506 * functions must do this.
507 *
508 * These PVOP_* macros are only defined within this header. This
509 * means that all uses must be wrapped in inline functions. This also
510 * makes sure the incoming and outgoing types are always correct.
511 */
512#ifdef CONFIG_X86_32
513#define PVOP_VCALL_ARGS \
514 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
515#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
516
517#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
518#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
519#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
520
521#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
522 "=c" (__ecx)
523#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
524
525#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
526#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
527
528#define EXTRA_CLOBBERS
529#define VEXTRA_CLOBBERS
530#else /* CONFIG_X86_64 */
531#define PVOP_VCALL_ARGS \
532 unsigned long __edi = __edi, __esi = __esi, \
533 __edx = __edx, __ecx = __ecx
534#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
535
536#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
537#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
538#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
539#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
540
541#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
542 "=S" (__esi), "=d" (__edx), \
543 "=c" (__ecx)
544#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
545
546#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
547#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
548
549#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
550#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
551#endif /* CONFIG_X86_32 */
552
553#ifdef CONFIG_PARAVIRT_DEBUG
554#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
555#else
556#define PVOP_TEST_NULL(op) ((void)op)
557#endif
558
559#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
560 pre, post, ...) \
561 ({ \
562 rettype __ret; \
563 PVOP_CALL_ARGS; \
564 PVOP_TEST_NULL(op); \
565 /* This is 32-bit specific, but is okay in 64-bit */ \
566 /* since this condition will never hold */ \
567 if (sizeof(rettype) > sizeof(unsigned long)) { \
568 asm volatile(pre \
569 paravirt_alt(PARAVIRT_CALL) \
570 post \
571 : call_clbr \
572 : paravirt_type(op), \
573 paravirt_clobber(clbr), \
574 ##__VA_ARGS__ \
575 : "memory", "cc" extra_clbr); \
576 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
577 } else { \
578 asm volatile(pre \
579 paravirt_alt(PARAVIRT_CALL) \
580 post \
581 : call_clbr \
582 : paravirt_type(op), \
583 paravirt_clobber(clbr), \
584 ##__VA_ARGS__ \
585 : "memory", "cc" extra_clbr); \
586 __ret = (rettype)__eax; \
587 } \
588 __ret; \
589 })
590
591#define __PVOP_CALL(rettype, op, pre, post, ...) \
592 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
593 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
594
595#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
596 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
597 PVOP_CALLEE_CLOBBERS, , \
598 pre, post, ##__VA_ARGS__)
599
600
601#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
602 ({ \
603 PVOP_VCALL_ARGS; \
604 PVOP_TEST_NULL(op); \
605 asm volatile(pre \
606 paravirt_alt(PARAVIRT_CALL) \
607 post \
608 : call_clbr \
609 : paravirt_type(op), \
610 paravirt_clobber(clbr), \
611 ##__VA_ARGS__ \
612 : "memory", "cc" extra_clbr); \
613 })
614
615#define __PVOP_VCALL(op, pre, post, ...) \
616 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
617 VEXTRA_CLOBBERS, \
618 pre, post, ##__VA_ARGS__)
619
620#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
621 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
622 PVOP_VCALLEE_CLOBBERS, , \
623 pre, post, ##__VA_ARGS__)
624
625
626
627#define PVOP_CALL0(rettype, op) \
628 __PVOP_CALL(rettype, op, "", "")
629#define PVOP_VCALL0(op) \
630 __PVOP_VCALL(op, "", "")
631
632#define PVOP_CALLEE0(rettype, op) \
633 __PVOP_CALLEESAVE(rettype, op, "", "")
634#define PVOP_VCALLEE0(op) \
635 __PVOP_VCALLEESAVE(op, "", "")
636
637
638#define PVOP_CALL1(rettype, op, arg1) \
639 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
640#define PVOP_VCALL1(op, arg1) \
641 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
642
643#define PVOP_CALLEE1(rettype, op, arg1) \
644 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
645#define PVOP_VCALLEE1(op, arg1) \
646 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
647
648
649#define PVOP_CALL2(rettype, op, arg1, arg2) \
650 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
651 PVOP_CALL_ARG2(arg2))
652#define PVOP_VCALL2(op, arg1, arg2) \
653 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
654 PVOP_CALL_ARG2(arg2))
655
656#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
657 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
658 PVOP_CALL_ARG2(arg2))
659#define PVOP_VCALLEE2(op, arg1, arg2) \
660 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
661 PVOP_CALL_ARG2(arg2))
662
663
664#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
665 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
666 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
667#define PVOP_VCALL3(op, arg1, arg2, arg3) \
668 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
669 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
670
671/* This is the only difference in x86_64. We can make it much simpler */
672#ifdef CONFIG_X86_32
673#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
674 __PVOP_CALL(rettype, op, \
675 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
676 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
677 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
678#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
679 __PVOP_VCALL(op, \
680 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
681 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
682 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
683#else
684#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
685 __PVOP_CALL(rettype, op, "", "", \
686 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
687 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
688#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
689 __PVOP_VCALL(op, "", "", \
690 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
691 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
692#endif
693 15
694static inline int paravirt_enabled(void) 16static inline int paravirt_enabled(void)
695{ 17{
@@ -820,15 +142,22 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
820{ 142{
821 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); 143 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
822} 144}
823static inline u64 paravirt_read_msr_amd(unsigned msr, int *err) 145
146static inline int paravirt_rdmsr_regs(u32 *regs)
824{ 147{
825 return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err); 148 return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
826} 149}
150
827static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) 151static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
828{ 152{
829 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); 153 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
830} 154}
831 155
156static inline int paravirt_wrmsr_regs(u32 *regs)
157{
158 return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
159}
160
832/* These should all do BUG_ON(_err), but our headers are too tangled. */ 161/* These should all do BUG_ON(_err), but our headers are too tangled. */
833#define rdmsr(msr, val1, val2) \ 162#define rdmsr(msr, val1, val2) \
834do { \ 163do { \
@@ -862,6 +191,9 @@ do { \
862 _err; \ 191 _err; \
863}) 192})
864 193
194#define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
195#define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
196
865static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) 197static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
866{ 198{
867 int err; 199 int err;
@@ -871,12 +203,31 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
871} 203}
872static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) 204static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
873{ 205{
206 u32 gprs[8] = { 0 };
874 int err; 207 int err;
875 208
876 *p = paravirt_read_msr_amd(msr, &err); 209 gprs[1] = msr;
210 gprs[7] = 0x9c5a203a;
211
212 err = paravirt_rdmsr_regs(gprs);
213
214 *p = gprs[0] | ((u64)gprs[2] << 32);
215
877 return err; 216 return err;
878} 217}
879 218
219static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
220{
221 u32 gprs[8] = { 0 };
222
223 gprs[0] = (u32)val;
224 gprs[1] = msr;
225 gprs[2] = val >> 32;
226 gprs[7] = 0x9c5a203a;
227
228 return paravirt_wrmsr_regs(gprs);
229}
230
880static inline u64 paravirt_read_tsc(void) 231static inline u64 paravirt_read_tsc(void)
881{ 232{
882 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc); 233 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
@@ -1393,20 +744,6 @@ static inline void pmd_clear(pmd_t *pmdp)
1393} 744}
1394#endif /* CONFIG_X86_PAE */ 745#endif /* CONFIG_X86_PAE */
1395 746
1396/* Lazy mode for batching updates / context switch */
1397enum paravirt_lazy_mode {
1398 PARAVIRT_LAZY_NONE,
1399 PARAVIRT_LAZY_MMU,
1400 PARAVIRT_LAZY_CPU,
1401};
1402
1403enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1404void paravirt_start_context_switch(struct task_struct *prev);
1405void paravirt_end_context_switch(struct task_struct *next);
1406
1407void paravirt_enter_lazy_mmu(void);
1408void paravirt_leave_lazy_mmu(void);
1409
1410#define __HAVE_ARCH_START_CONTEXT_SWITCH 747#define __HAVE_ARCH_START_CONTEXT_SWITCH
1411static inline void arch_start_context_switch(struct task_struct *prev) 748static inline void arch_start_context_switch(struct task_struct *prev)
1412{ 749{
@@ -1437,12 +774,6 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1437 pv_mmu_ops.set_fixmap(idx, phys, flags); 774 pv_mmu_ops.set_fixmap(idx, phys, flags);
1438} 775}
1439 776
1440void _paravirt_nop(void);
1441u32 _paravirt_ident_32(u32);
1442u64 _paravirt_ident_64(u64);
1443
1444#define paravirt_nop ((void *)_paravirt_nop)
1445
1446#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) 777#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
1447 778
1448static inline int __raw_spin_is_locked(struct raw_spinlock *lock) 779static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
@@ -1479,17 +810,6 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1479 810
1480#endif 811#endif
1481 812
1482/* These all sit in the .parainstructions section to tell us what to patch. */
1483struct paravirt_patch_site {
1484 u8 *instr; /* original instructions */
1485 u8 instrtype; /* type of this instruction */
1486 u8 len; /* length of original instruction */
1487 u16 clobbers; /* what registers you may clobber */
1488};
1489
1490extern struct paravirt_patch_site __parainstructions[],
1491 __parainstructions_end[];
1492
1493#ifdef CONFIG_X86_32 813#ifdef CONFIG_X86_32
1494#define PV_SAVE_REGS "pushl %ecx; pushl %edx;" 814#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
1495#define PV_RESTORE_REGS "popl %edx; popl %ecx;" 815#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
new file mode 100644
index 000000000000..25402d0006e7
--- /dev/null
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -0,0 +1,721 @@
1#ifndef _ASM_X86_PARAVIRT_TYPES_H
2#define _ASM_X86_PARAVIRT_TYPES_H
3
4/* Bitmask of what can be clobbered: usually at least eax. */
5#define CLBR_NONE 0
6#define CLBR_EAX (1 << 0)
7#define CLBR_ECX (1 << 1)
8#define CLBR_EDX (1 << 2)
9#define CLBR_EDI (1 << 3)
10
11#ifdef CONFIG_X86_32
12/* CLBR_ANY should match all regs platform has. For i386, that's just it */
13#define CLBR_ANY ((1 << 4) - 1)
14
15#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
16#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
17#define CLBR_SCRATCH (0)
18#else
19#define CLBR_RAX CLBR_EAX
20#define CLBR_RCX CLBR_ECX
21#define CLBR_RDX CLBR_EDX
22#define CLBR_RDI CLBR_EDI
23#define CLBR_RSI (1 << 4)
24#define CLBR_R8 (1 << 5)
25#define CLBR_R9 (1 << 6)
26#define CLBR_R10 (1 << 7)
27#define CLBR_R11 (1 << 8)
28
29#define CLBR_ANY ((1 << 9) - 1)
30
31#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
32 CLBR_RCX | CLBR_R8 | CLBR_R9)
33#define CLBR_RET_REG (CLBR_RAX)
34#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
35
36#endif /* X86_64 */
37
38#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
39
40#ifndef __ASSEMBLY__
41
42#include <asm/desc_defs.h>
43#include <asm/kmap_types.h>
44
45struct page;
46struct thread_struct;
47struct desc_ptr;
48struct tss_struct;
49struct mm_struct;
50struct desc_struct;
51struct task_struct;
52struct cpumask;
53
54/*
55 * Wrapper type for pointers to code which uses the non-standard
56 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
57 */
58struct paravirt_callee_save {
59 void *func;
60};
61
62/* general info */
63struct pv_info {
64 unsigned int kernel_rpl;
65 int shared_kernel_pmd;
66 int paravirt_enabled;
67 const char *name;
68};
69
70struct pv_init_ops {
71 /*
72 * Patch may replace one of the defined code sequences with
73 * arbitrary code, subject to the same register constraints.
74 * This generally means the code is not free to clobber any
75 * registers other than EAX. The patch function should return
76 * the number of bytes of code generated, as we nop pad the
77 * rest in generic code.
78 */
79 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
80 unsigned long addr, unsigned len);
81
82 /* Basic arch-specific setup */
83 void (*arch_setup)(void);
84 char *(*memory_setup)(void);
85 void (*post_allocator_init)(void);
86
87 /* Print a banner to identify the environment */
88 void (*banner)(void);
89};
90
91
92struct pv_lazy_ops {
93 /* Set deferred update mode, used for batching operations. */
94 void (*enter)(void);
95 void (*leave)(void);
96};
97
98struct pv_time_ops {
99 void (*time_init)(void);
100
101 /* Set and set time of day */
102 unsigned long (*get_wallclock)(void);
103 int (*set_wallclock)(unsigned long);
104
105 unsigned long long (*sched_clock)(void);
106 unsigned long (*get_tsc_khz)(void);
107};
108
109struct pv_cpu_ops {
110 /* hooks for various privileged instructions */
111 unsigned long (*get_debugreg)(int regno);
112 void (*set_debugreg)(int regno, unsigned long value);
113
114 void (*clts)(void);
115
116 unsigned long (*read_cr0)(void);
117 void (*write_cr0)(unsigned long);
118
119 unsigned long (*read_cr4_safe)(void);
120 unsigned long (*read_cr4)(void);
121 void (*write_cr4)(unsigned long);
122
123#ifdef CONFIG_X86_64
124 unsigned long (*read_cr8)(void);
125 void (*write_cr8)(unsigned long);
126#endif
127
128 /* Segment descriptor handling */
129 void (*load_tr_desc)(void);
130 void (*load_gdt)(const struct desc_ptr *);
131 void (*load_idt)(const struct desc_ptr *);
132 void (*store_gdt)(struct desc_ptr *);
133 void (*store_idt)(struct desc_ptr *);
134 void (*set_ldt)(const void *desc, unsigned entries);
135 unsigned long (*store_tr)(void);
136 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
137#ifdef CONFIG_X86_64
138 void (*load_gs_index)(unsigned int idx);
139#endif
140 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
141 const void *desc);
142 void (*write_gdt_entry)(struct desc_struct *,
143 int entrynum, const void *desc, int size);
144 void (*write_idt_entry)(gate_desc *,
145 int entrynum, const gate_desc *gate);
146 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
147 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
148
149 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
150
151 void (*set_iopl_mask)(unsigned mask);
152
153 void (*wbinvd)(void);
154 void (*io_delay)(void);
155
156 /* cpuid emulation, mostly so that caps bits can be disabled */
157 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
158 unsigned int *ecx, unsigned int *edx);
159
160 /* MSR, PMC and TSR operations.
161 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
162 u64 (*read_msr)(unsigned int msr, int *err);
163 int (*rdmsr_regs)(u32 *regs);
164 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
165 int (*wrmsr_regs)(u32 *regs);
166
167 u64 (*read_tsc)(void);
168 u64 (*read_pmc)(int counter);
169 unsigned long long (*read_tscp)(unsigned int *aux);
170
171 /*
172 * Atomically enable interrupts and return to userspace. This
173 * is only ever used to return to 32-bit processes; in a
174 * 64-bit kernel, it's used for 32-on-64 compat processes, but
175 * never native 64-bit processes. (Jump, not call.)
176 */
177 void (*irq_enable_sysexit)(void);
178
179 /*
180 * Switch to usermode gs and return to 64-bit usermode using
181 * sysret. Only used in 64-bit kernels to return to 64-bit
182 * processes. Usermode register state, including %rsp, must
183 * already be restored.
184 */
185 void (*usergs_sysret64)(void);
186
187 /*
188 * Switch to usermode gs and return to 32-bit usermode using
189 * sysret. Used to return to 32-on-64 compat processes.
190 * Other usermode register state, including %esp, must already
191 * be restored.
192 */
193 void (*usergs_sysret32)(void);
194
195 /* Normal iret. Jump to this with the standard iret stack
196 frame set up. */
197 void (*iret)(void);
198
199 void (*swapgs)(void);
200
201 void (*start_context_switch)(struct task_struct *prev);
202 void (*end_context_switch)(struct task_struct *next);
203};
204
205struct pv_irq_ops {
206 void (*init_IRQ)(void);
207
208 /*
209 * Get/set interrupt state. save_fl and restore_fl are only
210 * expected to use X86_EFLAGS_IF; all other bits
211 * returned from save_fl are undefined, and may be ignored by
212 * restore_fl.
213 *
214 * NOTE: These functions callers expect the callee to preserve
215 * more registers than the standard C calling convention.
216 */
217 struct paravirt_callee_save save_fl;
218 struct paravirt_callee_save restore_fl;
219 struct paravirt_callee_save irq_disable;
220 struct paravirt_callee_save irq_enable;
221
222 void (*safe_halt)(void);
223 void (*halt)(void);
224
225#ifdef CONFIG_X86_64
226 void (*adjust_exception_frame)(void);
227#endif
228};
229
230struct pv_apic_ops {
231#ifdef CONFIG_X86_LOCAL_APIC
232 void (*setup_boot_clock)(void);
233 void (*setup_secondary_clock)(void);
234
235 void (*startup_ipi_hook)(int phys_apicid,
236 unsigned long start_eip,
237 unsigned long start_esp);
238#endif
239};
240
241struct pv_mmu_ops {
242 /*
243 * Called before/after init_mm pagetable setup. setup_start
244 * may reset %cr3, and may pre-install parts of the pagetable;
245 * pagetable setup is expected to preserve any existing
246 * mapping.
247 */
248 void (*pagetable_setup_start)(pgd_t *pgd_base);
249 void (*pagetable_setup_done)(pgd_t *pgd_base);
250
251 unsigned long (*read_cr2)(void);
252 void (*write_cr2)(unsigned long);
253
254 unsigned long (*read_cr3)(void);
255 void (*write_cr3)(unsigned long);
256
257 /*
258 * Hooks for intercepting the creation/use/destruction of an
259 * mm_struct.
260 */
261 void (*activate_mm)(struct mm_struct *prev,
262 struct mm_struct *next);
263 void (*dup_mmap)(struct mm_struct *oldmm,
264 struct mm_struct *mm);
265 void (*exit_mmap)(struct mm_struct *mm);
266
267
268 /* TLB operations */
269 void (*flush_tlb_user)(void);
270 void (*flush_tlb_kernel)(void);
271 void (*flush_tlb_single)(unsigned long addr);
272 void (*flush_tlb_others)(const struct cpumask *cpus,
273 struct mm_struct *mm,
274 unsigned long va);
275
276 /* Hooks for allocating and freeing a pagetable top-level */
277 int (*pgd_alloc)(struct mm_struct *mm);
278 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
279
280 /*
281 * Hooks for allocating/releasing pagetable pages when they're
282 * attached to a pagetable
283 */
284 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
285 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
286 void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
287 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
288 void (*release_pte)(unsigned long pfn);
289 void (*release_pmd)(unsigned long pfn);
290 void (*release_pud)(unsigned long pfn);
291
292 /* Pagetable manipulation functions */
293 void (*set_pte)(pte_t *ptep, pte_t pteval);
294 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
295 pte_t *ptep, pte_t pteval);
296 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
297 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
298 pte_t *ptep);
299 void (*pte_update_defer)(struct mm_struct *mm,
300 unsigned long addr, pte_t *ptep);
301
302 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
303 pte_t *ptep);
304 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
305 pte_t *ptep, pte_t pte);
306
307 struct paravirt_callee_save pte_val;
308 struct paravirt_callee_save make_pte;
309
310 struct paravirt_callee_save pgd_val;
311 struct paravirt_callee_save make_pgd;
312
313#if PAGETABLE_LEVELS >= 3
314#ifdef CONFIG_X86_PAE
315 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
316 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
317 pte_t *ptep);
318 void (*pmd_clear)(pmd_t *pmdp);
319
320#endif /* CONFIG_X86_PAE */
321
322 void (*set_pud)(pud_t *pudp, pud_t pudval);
323
324 struct paravirt_callee_save pmd_val;
325 struct paravirt_callee_save make_pmd;
326
327#if PAGETABLE_LEVELS == 4
328 struct paravirt_callee_save pud_val;
329 struct paravirt_callee_save make_pud;
330
331 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
332#endif /* PAGETABLE_LEVELS == 4 */
333#endif /* PAGETABLE_LEVELS >= 3 */
334
335#ifdef CONFIG_HIGHPTE
336 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
337#endif
338
339 struct pv_lazy_ops lazy_mode;
340
341 /* dom0 ops */
342
343 /* Sometimes the physical address is a pfn, and sometimes its
344 an mfn. We can tell which is which from the index. */
345 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
346 phys_addr_t phys, pgprot_t flags);
347};
348
349struct raw_spinlock;
350struct pv_lock_ops {
351 int (*spin_is_locked)(struct raw_spinlock *lock);
352 int (*spin_is_contended)(struct raw_spinlock *lock);
353 void (*spin_lock)(struct raw_spinlock *lock);
354 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
355 int (*spin_trylock)(struct raw_spinlock *lock);
356 void (*spin_unlock)(struct raw_spinlock *lock);
357};
358
359/* This contains all the paravirt structures: we get a convenient
360 * number for each function using the offset which we use to indicate
361 * what to patch. */
362struct paravirt_patch_template {
363 struct pv_init_ops pv_init_ops;
364 struct pv_time_ops pv_time_ops;
365 struct pv_cpu_ops pv_cpu_ops;
366 struct pv_irq_ops pv_irq_ops;
367 struct pv_apic_ops pv_apic_ops;
368 struct pv_mmu_ops pv_mmu_ops;
369 struct pv_lock_ops pv_lock_ops;
370};
371
372extern struct pv_info pv_info;
373extern struct pv_init_ops pv_init_ops;
374extern struct pv_time_ops pv_time_ops;
375extern struct pv_cpu_ops pv_cpu_ops;
376extern struct pv_irq_ops pv_irq_ops;
377extern struct pv_apic_ops pv_apic_ops;
378extern struct pv_mmu_ops pv_mmu_ops;
379extern struct pv_lock_ops pv_lock_ops;
380
381#define PARAVIRT_PATCH(x) \
382 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
383
384#define paravirt_type(op) \
385 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
386 [paravirt_opptr] "i" (&(op))
387#define paravirt_clobber(clobber) \
388 [paravirt_clobber] "i" (clobber)
389
390/*
391 * Generate some code, and mark it as patchable by the
392 * apply_paravirt() alternate instruction patcher.
393 */
394#define _paravirt_alt(insn_string, type, clobber) \
395 "771:\n\t" insn_string "\n" "772:\n" \
396 ".pushsection .parainstructions,\"a\"\n" \
397 _ASM_ALIGN "\n" \
398 _ASM_PTR " 771b\n" \
399 " .byte " type "\n" \
400 " .byte 772b-771b\n" \
401 " .short " clobber "\n" \
402 ".popsection\n"
403
404/* Generate patchable code, with the default asm parameters. */
405#define paravirt_alt(insn_string) \
406 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
407
408/* Simple instruction patching code. */
409#define DEF_NATIVE(ops, name, code) \
410 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
411 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
412
413unsigned paravirt_patch_nop(void);
414unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
415unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
416unsigned paravirt_patch_ignore(unsigned len);
417unsigned paravirt_patch_call(void *insnbuf,
418 const void *target, u16 tgt_clobbers,
419 unsigned long addr, u16 site_clobbers,
420 unsigned len);
421unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
422 unsigned long addr, unsigned len);
423unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
424 unsigned long addr, unsigned len);
425
426unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
427 const char *start, const char *end);
428
429unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
430 unsigned long addr, unsigned len);
431
432int paravirt_disable_iospace(void);
433
434/*
435 * This generates an indirect call based on the operation type number.
436 * The type number, computed in PARAVIRT_PATCH, is derived from the
437 * offset into the paravirt_patch_template structure, and can therefore be
438 * freely converted back into a structure offset.
439 */
440#define PARAVIRT_CALL "call *%c[paravirt_opptr];"
441
442/*
443 * These macros are intended to wrap calls through one of the paravirt
444 * ops structs, so that they can be later identified and patched at
445 * runtime.
446 *
447 * Normally, a call to a pv_op function is a simple indirect call:
448 * (pv_op_struct.operations)(args...).
449 *
450 * Unfortunately, this is a relatively slow operation for modern CPUs,
451 * because it cannot necessarily determine what the destination
452 * address is. In this case, the address is a runtime constant, so at
453 * the very least we can patch the call to e a simple direct call, or
454 * ideally, patch an inline implementation into the callsite. (Direct
455 * calls are essentially free, because the call and return addresses
456 * are completely predictable.)
457 *
458 * For i386, these macros rely on the standard gcc "regparm(3)" calling
459 * convention, in which the first three arguments are placed in %eax,
460 * %edx, %ecx (in that order), and the remaining arguments are placed
461 * on the stack. All caller-save registers (eax,edx,ecx) are expected
462 * to be modified (either clobbered or used for return values).
463 * X86_64, on the other hand, already specifies a register-based calling
464 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
465 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
466 * special handling for dealing with 4 arguments, unlike i386.
467 * However, x86_64 also have to clobber all caller saved registers, which
468 * unfortunately, are quite a bit (r8 - r11)
469 *
470 * The call instruction itself is marked by placing its start address
471 * and size into the .parainstructions section, so that
472 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
473 * appropriate patching under the control of the backend pv_init_ops
474 * implementation.
475 *
476 * Unfortunately there's no way to get gcc to generate the args setup
477 * for the call, and then allow the call itself to be generated by an
478 * inline asm. Because of this, we must do the complete arg setup and
479 * return value handling from within these macros. This is fairly
480 * cumbersome.
481 *
482 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
483 * It could be extended to more arguments, but there would be little
484 * to be gained from that. For each number of arguments, there are
485 * the two VCALL and CALL variants for void and non-void functions.
486 *
487 * When there is a return value, the invoker of the macro must specify
488 * the return type. The macro then uses sizeof() on that type to
489 * determine whether its a 32 or 64 bit value, and places the return
490 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
491 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
492 * the return value size.
493 *
494 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
495 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
496 * in low,high order
497 *
498 * Small structures are passed and returned in registers. The macro
499 * calling convention can't directly deal with this, so the wrapper
500 * functions must do this.
501 *
502 * These PVOP_* macros are only defined within this header. This
503 * means that all uses must be wrapped in inline functions. This also
504 * makes sure the incoming and outgoing types are always correct.
505 */
506#ifdef CONFIG_X86_32
507#define PVOP_VCALL_ARGS \
508 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
509#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
510
511#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
512#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
513#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
514
515#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
516 "=c" (__ecx)
517#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
518
519#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
520#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
521
522#define EXTRA_CLOBBERS
523#define VEXTRA_CLOBBERS
524#else /* CONFIG_X86_64 */
525#define PVOP_VCALL_ARGS \
526 unsigned long __edi = __edi, __esi = __esi, \
527 __edx = __edx, __ecx = __ecx
528#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
529
530#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
531#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
532#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
533#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
534
535#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
536 "=S" (__esi), "=d" (__edx), \
537 "=c" (__ecx)
538#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
539
540#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
541#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
542
543#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
544#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
545#endif /* CONFIG_X86_32 */
546
547#ifdef CONFIG_PARAVIRT_DEBUG
548#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
549#else
550#define PVOP_TEST_NULL(op) ((void)op)
551#endif
552
553#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
554 pre, post, ...) \
555 ({ \
556 rettype __ret; \
557 PVOP_CALL_ARGS; \
558 PVOP_TEST_NULL(op); \
559 /* This is 32-bit specific, but is okay in 64-bit */ \
560 /* since this condition will never hold */ \
561 if (sizeof(rettype) > sizeof(unsigned long)) { \
562 asm volatile(pre \
563 paravirt_alt(PARAVIRT_CALL) \
564 post \
565 : call_clbr \
566 : paravirt_type(op), \
567 paravirt_clobber(clbr), \
568 ##__VA_ARGS__ \
569 : "memory", "cc" extra_clbr); \
570 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
571 } else { \
572 asm volatile(pre \
573 paravirt_alt(PARAVIRT_CALL) \
574 post \
575 : call_clbr \
576 : paravirt_type(op), \
577 paravirt_clobber(clbr), \
578 ##__VA_ARGS__ \
579 : "memory", "cc" extra_clbr); \
580 __ret = (rettype)__eax; \
581 } \
582 __ret; \
583 })
584
585#define __PVOP_CALL(rettype, op, pre, post, ...) \
586 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
587 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
588
589#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
590 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
591 PVOP_CALLEE_CLOBBERS, , \
592 pre, post, ##__VA_ARGS__)
593
594
595#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
596 ({ \
597 PVOP_VCALL_ARGS; \
598 PVOP_TEST_NULL(op); \
599 asm volatile(pre \
600 paravirt_alt(PARAVIRT_CALL) \
601 post \
602 : call_clbr \
603 : paravirt_type(op), \
604 paravirt_clobber(clbr), \
605 ##__VA_ARGS__ \
606 : "memory", "cc" extra_clbr); \
607 })
608
609#define __PVOP_VCALL(op, pre, post, ...) \
610 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
611 VEXTRA_CLOBBERS, \
612 pre, post, ##__VA_ARGS__)
613
614#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
615 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
616 PVOP_VCALLEE_CLOBBERS, , \
617 pre, post, ##__VA_ARGS__)
618
619
620
621#define PVOP_CALL0(rettype, op) \
622 __PVOP_CALL(rettype, op, "", "")
623#define PVOP_VCALL0(op) \
624 __PVOP_VCALL(op, "", "")
625
626#define PVOP_CALLEE0(rettype, op) \
627 __PVOP_CALLEESAVE(rettype, op, "", "")
628#define PVOP_VCALLEE0(op) \
629 __PVOP_VCALLEESAVE(op, "", "")
630
631
632#define PVOP_CALL1(rettype, op, arg1) \
633 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
634#define PVOP_VCALL1(op, arg1) \
635 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
636
637#define PVOP_CALLEE1(rettype, op, arg1) \
638 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
639#define PVOP_VCALLEE1(op, arg1) \
640 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
641
642
643#define PVOP_CALL2(rettype, op, arg1, arg2) \
644 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
645 PVOP_CALL_ARG2(arg2))
646#define PVOP_VCALL2(op, arg1, arg2) \
647 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
648 PVOP_CALL_ARG2(arg2))
649
650#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
651 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
652 PVOP_CALL_ARG2(arg2))
653#define PVOP_VCALLEE2(op, arg1, arg2) \
654 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
655 PVOP_CALL_ARG2(arg2))
656
657
658#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
659 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
660 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
661#define PVOP_VCALL3(op, arg1, arg2, arg3) \
662 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
663 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
664
665/* This is the only difference in x86_64. We can make it much simpler */
666#ifdef CONFIG_X86_32
667#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
668 __PVOP_CALL(rettype, op, \
669 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
670 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
671 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
672#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
673 __PVOP_VCALL(op, \
674 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
675 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
676 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
677#else
678#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
679 __PVOP_CALL(rettype, op, "", "", \
680 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
681 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
682#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
683 __PVOP_VCALL(op, "", "", \
684 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
685 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
686#endif
687
688/* Lazy mode for batching updates / context switch */
689enum paravirt_lazy_mode {
690 PARAVIRT_LAZY_NONE,
691 PARAVIRT_LAZY_MMU,
692 PARAVIRT_LAZY_CPU,
693};
694
695enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
696void paravirt_start_context_switch(struct task_struct *prev);
697void paravirt_end_context_switch(struct task_struct *next);
698
699void paravirt_enter_lazy_mmu(void);
700void paravirt_leave_lazy_mmu(void);
701
702void _paravirt_nop(void);
703u32 _paravirt_ident_32(u32);
704u64 _paravirt_ident_64(u64);
705
706#define paravirt_nop ((void *)_paravirt_nop)
707
708/* These all sit in the .parainstructions section to tell us what to patch. */
709struct paravirt_patch_site {
710 u8 *instr; /* original instructions */
711 u8 instrtype; /* type of this instruction */
712 u8 len; /* length of original instruction */
713 u16 clobbers; /* what registers you may clobber */
714};
715
716extern struct paravirt_patch_site __parainstructions[],
717 __parainstructions_end[];
718
719#endif /* __ASSEMBLY__ */
720
721#endif /* _ASM_X86_PARAVIRT_TYPES_H */
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 7af14e512f97..e2c1668dde7a 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -19,4 +19,9 @@ extern int free_memtype(u64 start, u64 end);
19extern int kernel_map_sync_memtype(u64 base, unsigned long size, 19extern int kernel_map_sync_memtype(u64 base, unsigned long size,
20 unsigned long flag); 20 unsigned long flag);
21 21
22int io_reserve_memtype(resource_size_t start, resource_size_t end,
23 unsigned long *type);
24
25void io_free_memtype(resource_size_t start, resource_size_t end);
26
22#endif /* _ASM_X86_PAT_H */ 27#endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 1ff685ca221c..f76a162c082c 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -48,7 +48,6 @@ extern unsigned int pcibios_assign_all_busses(void);
48#else 48#else
49#define pcibios_assign_all_busses() 0 49#define pcibios_assign_all_busses() 0
50#endif 50#endif
51#define pcibios_scan_all_fns(a, b) 0
52 51
53extern unsigned long pci_mem_start; 52extern unsigned long pci_mem_start;
54#define PCIBIOS_MIN_IO 0x1000 53#define PCIBIOS_MIN_IO 0x1000
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 103f1ddb0d85..b65a36defeb7 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -49,7 +49,7 @@
49#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x 49#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
50#define __my_cpu_offset percpu_read(this_cpu_off) 50#define __my_cpu_offset percpu_read(this_cpu_off)
51#else 51#else
52#define __percpu_arg(x) "%" #x 52#define __percpu_arg(x) "%P" #x
53#endif 53#endif
54 54
55/* 55/*
@@ -104,36 +104,48 @@ do { \
104 } \ 104 } \
105} while (0) 105} while (0)
106 106
107#define percpu_from_op(op, var) \ 107#define percpu_from_op(op, var, constraint) \
108({ \ 108({ \
109 typeof(var) ret__; \ 109 typeof(var) ret__; \
110 switch (sizeof(var)) { \ 110 switch (sizeof(var)) { \
111 case 1: \ 111 case 1: \
112 asm(op "b "__percpu_arg(1)",%0" \ 112 asm(op "b "__percpu_arg(1)",%0" \
113 : "=q" (ret__) \ 113 : "=q" (ret__) \
114 : "m" (var)); \ 114 : constraint); \
115 break; \ 115 break; \
116 case 2: \ 116 case 2: \
117 asm(op "w "__percpu_arg(1)",%0" \ 117 asm(op "w "__percpu_arg(1)",%0" \
118 : "=r" (ret__) \ 118 : "=r" (ret__) \
119 : "m" (var)); \ 119 : constraint); \
120 break; \ 120 break; \
121 case 4: \ 121 case 4: \
122 asm(op "l "__percpu_arg(1)",%0" \ 122 asm(op "l "__percpu_arg(1)",%0" \
123 : "=r" (ret__) \ 123 : "=r" (ret__) \
124 : "m" (var)); \ 124 : constraint); \
125 break; \ 125 break; \
126 case 8: \ 126 case 8: \
127 asm(op "q "__percpu_arg(1)",%0" \ 127 asm(op "q "__percpu_arg(1)",%0" \
128 : "=r" (ret__) \ 128 : "=r" (ret__) \
129 : "m" (var)); \ 129 : constraint); \
130 break; \ 130 break; \
131 default: __bad_percpu_size(); \ 131 default: __bad_percpu_size(); \
132 } \ 132 } \
133 ret__; \ 133 ret__; \
134}) 134})
135 135
136#define percpu_read(var) percpu_from_op("mov", per_cpu__##var) 136/*
137 * percpu_read() makes gcc load the percpu variable every time it is
138 * accessed while percpu_read_stable() allows the value to be cached.
139 * percpu_read_stable() is more efficient and can be used if its value
140 * is guaranteed to be valid across cpus. The current users include
141 * get_current() and get_thread_info() both of which are actually
142 * per-thread variables implemented as per-cpu variables and thus
143 * stable for the duration of the respective task.
144 */
145#define percpu_read(var) percpu_from_op("mov", per_cpu__##var, \
146 "m" (per_cpu__##var))
147#define percpu_read_stable(var) percpu_from_op("mov", per_cpu__##var, \
148 "p" (&per_cpu__##var))
137#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val) 149#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
138#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val) 150#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val)
139#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val) 151#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val)
@@ -156,15 +168,6 @@ do { \
156/* We can use this directly for local CPU (faster). */ 168/* We can use this directly for local CPU (faster). */
157DECLARE_PER_CPU(unsigned long, this_cpu_off); 169DECLARE_PER_CPU(unsigned long, this_cpu_off);
158 170
159#ifdef CONFIG_NEED_MULTIPLE_NODES
160void *pcpu_lpage_remapped(void *kaddr);
161#else
162static inline void *pcpu_lpage_remapped(void *kaddr)
163{
164 return NULL;
165}
166#endif
167
168#endif /* !__ASSEMBLY__ */ 171#endif /* !__ASSEMBLY__ */
169 172
170#ifdef CONFIG_SMP 173#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 16748077559a..4c5b51fdc788 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -135,6 +135,11 @@ static inline unsigned long pte_pfn(pte_t pte)
135 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; 135 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
136} 136}
137 137
138static inline unsigned long pmd_pfn(pmd_t pmd)
139{
140 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
141}
142
138#define pte_page(pte) pfn_to_page(pte_pfn(pte)) 143#define pte_page(pte) pfn_to_page(pte_pfn(pte))
139 144
140static inline int pmd_large(pmd_t pte) 145static inline int pmd_large(pmd_t pte)
@@ -359,7 +364,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
359 * this macro returns the index of the entry in the pmd page which would 364 * this macro returns the index of the entry in the pmd page which would
360 * control the given virtual address 365 * control the given virtual address
361 */ 366 */
362static inline unsigned pmd_index(unsigned long address) 367static inline unsigned long pmd_index(unsigned long address)
363{ 368{
364 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); 369 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
365} 370}
@@ -379,7 +384,7 @@ static inline unsigned pmd_index(unsigned long address)
379 * this function returns the index of the entry in the pte page which would 384 * this function returns the index of the entry in the pte page which would
380 * control the given virtual address 385 * control the given virtual address
381 */ 386 */
382static inline unsigned pte_index(unsigned long address) 387static inline unsigned long pte_index(unsigned long address)
383{ 388{
384 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); 389 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
385} 390}
@@ -430,11 +435,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
430 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); 435 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
431} 436}
432 437
433static inline unsigned long pmd_pfn(pmd_t pmd)
434{
435 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
436}
437
438static inline int pud_large(pud_t pud) 438static inline int pud_large(pud_t pud)
439{ 439{
440 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == 440 return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
@@ -470,7 +470,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
470#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) 470#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
471 471
472/* to find an entry in a page-table-directory. */ 472/* to find an entry in a page-table-directory. */
473static inline unsigned pud_index(unsigned long address) 473static inline unsigned long pud_index(unsigned long address)
474{ 474{
475 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); 475 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
476} 476}
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c7768269b1cf..c3429e8b2424 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -27,6 +27,7 @@ struct mm_struct;
27#include <linux/cpumask.h> 27#include <linux/cpumask.h>
28#include <linux/cache.h> 28#include <linux/cache.h>
29#include <linux/threads.h> 29#include <linux/threads.h>
30#include <linux/math64.h>
30#include <linux/init.h> 31#include <linux/init.h>
31 32
32/* 33/*
@@ -403,7 +404,17 @@ extern unsigned long kernel_eflags;
403extern asmlinkage void ignore_sysret(void); 404extern asmlinkage void ignore_sysret(void);
404#else /* X86_64 */ 405#else /* X86_64 */
405#ifdef CONFIG_CC_STACKPROTECTOR 406#ifdef CONFIG_CC_STACKPROTECTOR
406DECLARE_PER_CPU(unsigned long, stack_canary); 407/*
408 * Make sure stack canary segment base is cached-aligned:
409 * "For Intel Atom processors, avoid non zero segment base address
410 * that is not aligned to cache line boundary at all cost."
411 * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
412 */
413struct stack_canary {
414 char __pad[20]; /* canary at %gs:20 */
415 unsigned long canary;
416};
417DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
407#endif 418#endif
408#endif /* X86_64 */ 419#endif /* X86_64 */
409 420
@@ -703,13 +714,23 @@ static inline void cpu_relax(void)
703 rep_nop(); 714 rep_nop();
704} 715}
705 716
706/* Stop speculative execution: */ 717/* Stop speculative execution and prefetching of modified code. */
707static inline void sync_core(void) 718static inline void sync_core(void)
708{ 719{
709 int tmp; 720 int tmp;
710 721
711 asm volatile("cpuid" : "=a" (tmp) : "0" (1) 722#if defined(CONFIG_M386) || defined(CONFIG_M486)
712 : "ebx", "ecx", "edx", "memory"); 723 if (boot_cpu_data.x86 < 5)
724 /* There is no speculative execution.
725 * jmp is a barrier to prefetching. */
726 asm volatile("jmp 1f\n1:\n" ::: "memory");
727 else
728#endif
729 /* cpuid is a barrier to speculative execution.
730 * Prefetched instructions are automatically
731 * invalidated when modified. */
732 asm volatile("cpuid" : "=a" (tmp) : "0" (1)
733 : "ebx", "ecx", "edx", "memory");
713} 734}
714 735
715static inline void __monitor(const void *eax, unsigned long ecx, 736static inline void __monitor(const void *eax, unsigned long ecx,
@@ -1000,4 +1021,35 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
1000extern int get_tsc_mode(unsigned long adr); 1021extern int get_tsc_mode(unsigned long adr);
1001extern int set_tsc_mode(unsigned int val); 1022extern int set_tsc_mode(unsigned int val);
1002 1023
1024extern int amd_get_nb_id(int cpu);
1025
1026struct aperfmperf {
1027 u64 aperf, mperf;
1028};
1029
1030static inline void get_aperfmperf(struct aperfmperf *am)
1031{
1032 WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF));
1033
1034 rdmsrl(MSR_IA32_APERF, am->aperf);
1035 rdmsrl(MSR_IA32_MPERF, am->mperf);
1036}
1037
1038#define APERFMPERF_SHIFT 10
1039
1040static inline
1041unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
1042 struct aperfmperf *new)
1043{
1044 u64 aperf = new->aperf - old->aperf;
1045 u64 mperf = new->mperf - old->mperf;
1046 unsigned long ratio = aperf;
1047
1048 mperf >>= APERFMPERF_SHIFT;
1049 if (mperf)
1050 ratio = div64_u64(aperf, mperf);
1051
1052 return ratio;
1053}
1054
1003#endif /* _ASM_X86_PROCESSOR_H */ 1055#endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/include/asm/scatterlist.h b/arch/x86/include/asm/scatterlist.h
index 263d397d2eef..75af592677ec 100644
--- a/arch/x86/include/asm/scatterlist.h
+++ b/arch/x86/include/asm/scatterlist.h
@@ -1,33 +1,8 @@
1#ifndef _ASM_X86_SCATTERLIST_H 1#ifndef _ASM_X86_SCATTERLIST_H
2#define _ASM_X86_SCATTERLIST_H 2#define _ASM_X86_SCATTERLIST_H
3 3
4#include <asm/types.h>
5
6struct scatterlist {
7#ifdef CONFIG_DEBUG_SG
8 unsigned long sg_magic;
9#endif
10 unsigned long page_link;
11 unsigned int offset;
12 unsigned int length;
13 dma_addr_t dma_address;
14 unsigned int dma_length;
15};
16
17#define ARCH_HAS_SG_CHAIN
18#define ISA_DMA_THRESHOLD (0x00ffffff) 4#define ISA_DMA_THRESHOLD (0x00ffffff)
19 5
20/* 6#include <asm-generic/scatterlist.h>
21 * These macros should be used after a pci_map_sg call has been done
22 * to get bus addresses of each of the SG entries and their lengths.
23 * You should only work with the number of sg entries pci_map_sg
24 * returns.
25 */
26#define sg_dma_address(sg) ((sg)->dma_address)
27#ifdef CONFIG_X86_32
28# define sg_dma_len(sg) ((sg)->length)
29#else
30# define sg_dma_len(sg) ((sg)->dma_length)
31#endif
32 7
33#endif /* _ASM_X86_SCATTERLIST_H */ 8#endif /* _ASM_X86_SCATTERLIST_H */
diff --git a/arch/x86/include/asm/shmbuf.h b/arch/x86/include/asm/shmbuf.h
index b51413b74971..83c05fc2de38 100644
--- a/arch/x86/include/asm/shmbuf.h
+++ b/arch/x86/include/asm/shmbuf.h
@@ -1,51 +1 @@
1#ifndef _ASM_X86_SHMBUF_H #include <asm-generic/shmbuf.h>
2#define _ASM_X86_SHMBUF_H
3
4/*
5 * The shmid64_ds structure for x86 architecture.
6 * Note extra padding because this structure is passed back and forth
7 * between kernel and user space.
8 *
9 * Pad space on 32 bit is left for:
10 * - 64-bit time_t to solve y2038 problem
11 * - 2 miscellaneous 32-bit values
12 *
13 * Pad space on 64 bit is left for:
14 * - 2 miscellaneous 64-bit values
15 */
16
17struct shmid64_ds {
18 struct ipc64_perm shm_perm; /* operation perms */
19 size_t shm_segsz; /* size of segment (bytes) */
20 __kernel_time_t shm_atime; /* last attach time */
21#ifdef __i386__
22 unsigned long __unused1;
23#endif
24 __kernel_time_t shm_dtime; /* last detach time */
25#ifdef __i386__
26 unsigned long __unused2;
27#endif
28 __kernel_time_t shm_ctime; /* last change time */
29#ifdef __i386__
30 unsigned long __unused3;
31#endif
32 __kernel_pid_t shm_cpid; /* pid of creator */
33 __kernel_pid_t shm_lpid; /* pid of last operator */
34 unsigned long shm_nattch; /* no. of current attaches */
35 unsigned long __unused4;
36 unsigned long __unused5;
37};
38
39struct shminfo64 {
40 unsigned long shmmax;
41 unsigned long shmmin;
42 unsigned long shmmni;
43 unsigned long shmseg;
44 unsigned long shmall;
45 unsigned long __unused1;
46 unsigned long __unused2;
47 unsigned long __unused3;
48 unsigned long __unused4;
49};
50
51#endif /* _ASM_X86_SHMBUF_H */
diff --git a/arch/x86/include/asm/socket.h b/arch/x86/include/asm/socket.h
index ca8bf2cd0ba9..6b71384b9d8b 100644
--- a/arch/x86/include/asm/socket.h
+++ b/arch/x86/include/asm/socket.h
@@ -1,60 +1 @@
1#ifndef _ASM_X86_SOCKET_H #include <asm-generic/socket.h>
2#define _ASM_X86_SOCKET_H
3
4#include <asm/sockios.h>
5
6/* For setsockopt(2) */
7#define SOL_SOCKET 1
8
9#define SO_DEBUG 1
10#define SO_REUSEADDR 2
11#define SO_TYPE 3
12#define SO_ERROR 4
13#define SO_DONTROUTE 5
14#define SO_BROADCAST 6
15#define SO_SNDBUF 7
16#define SO_RCVBUF 8
17#define SO_SNDBUFFORCE 32
18#define SO_RCVBUFFORCE 33
19#define SO_KEEPALIVE 9
20#define SO_OOBINLINE 10
21#define SO_NO_CHECK 11
22#define SO_PRIORITY 12
23#define SO_LINGER 13
24#define SO_BSDCOMPAT 14
25/* To add :#define SO_REUSEPORT 15 */
26#define SO_PASSCRED 16
27#define SO_PEERCRED 17
28#define SO_RCVLOWAT 18
29#define SO_SNDLOWAT 19
30#define SO_RCVTIMEO 20
31#define SO_SNDTIMEO 21
32
33/* Security levels - as per NRL IPv6 - don't actually do anything */
34#define SO_SECURITY_AUTHENTICATION 22
35#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
36#define SO_SECURITY_ENCRYPTION_NETWORK 24
37
38#define SO_BINDTODEVICE 25
39
40/* Socket filtering */
41#define SO_ATTACH_FILTER 26
42#define SO_DETACH_FILTER 27
43
44#define SO_PEERNAME 28
45#define SO_TIMESTAMP 29
46#define SCM_TIMESTAMP SO_TIMESTAMP
47
48#define SO_ACCEPTCONN 30
49
50#define SO_PEERSEC 31
51#define SO_PASSSEC 34
52#define SO_TIMESTAMPNS 35
53#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
54
55#define SO_MARK 36
56
57#define SO_TIMESTAMPING 37
58#define SCM_TIMESTAMPING SO_TIMESTAMPING
59
60#endif /* _ASM_X86_SOCKET_H */
diff --git a/arch/x86/include/asm/sockios.h b/arch/x86/include/asm/sockios.h
index 49cc72b5d3c9..def6d4746ee7 100644
--- a/arch/x86/include/asm/sockios.h
+++ b/arch/x86/include/asm/sockios.h
@@ -1,13 +1 @@
1#ifndef _ASM_X86_SOCKIOS_H #include <asm-generic/sockios.h>
2#define _ASM_X86_SOCKIOS_H
3
4/* Socket-level I/O control calls. */
5#define FIOSETOWN 0x8901
6#define SIOCSPGRP 0x8902
7#define FIOGETOWN 0x8903
8#define SIOCGPGRP 0x8904
9#define SIOCATMARK 0x8905
10#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
11#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
12
13#endif /* _ASM_X86_SOCKIOS_H */
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index c2d742c6e15f..157517763565 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -48,7 +48,7 @@
48 * head_32 for boot CPU and setup_per_cpu_areas() for others. 48 * head_32 for boot CPU and setup_per_cpu_areas() for others.
49 */ 49 */
50#define GDT_STACK_CANARY_INIT \ 50#define GDT_STACK_CANARY_INIT \
51 [GDT_ENTRY_STACK_CANARY] = { { { 0x00000018, 0x00409000 } } }, 51 [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
52 52
53/* 53/*
54 * Initialize the stackprotector canary value. 54 * Initialize the stackprotector canary value.
@@ -78,21 +78,19 @@ static __always_inline void boot_init_stack_canary(void)
78#ifdef CONFIG_X86_64 78#ifdef CONFIG_X86_64
79 percpu_write(irq_stack_union.stack_canary, canary); 79 percpu_write(irq_stack_union.stack_canary, canary);
80#else 80#else
81 percpu_write(stack_canary, canary); 81 percpu_write(stack_canary.canary, canary);
82#endif 82#endif
83} 83}
84 84
85static inline void setup_stack_canary_segment(int cpu) 85static inline void setup_stack_canary_segment(int cpu)
86{ 86{
87#ifdef CONFIG_X86_32 87#ifdef CONFIG_X86_32
88 unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20; 88 unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu);
89 struct desc_struct *gdt_table = get_cpu_gdt_table(cpu); 89 struct desc_struct *gdt_table = get_cpu_gdt_table(cpu);
90 struct desc_struct desc; 90 struct desc_struct desc;
91 91
92 desc = gdt_table[GDT_ENTRY_STACK_CANARY]; 92 desc = gdt_table[GDT_ENTRY_STACK_CANARY];
93 desc.base0 = canary & 0xffff; 93 set_desc_base(&desc, canary);
94 desc.base1 = (canary >> 16) & 0xff;
95 desc.base2 = (canary >> 24) & 0xff;
96 write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S); 94 write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S);
97#endif 95#endif
98} 96}
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index 643c59b4bc6e..f08f97374892 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -31,7 +31,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
31 "movl %P[task_canary](%[next]), %%ebx\n\t" \ 31 "movl %P[task_canary](%[next]), %%ebx\n\t" \
32 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t" 32 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
33#define __switch_canary_oparam \ 33#define __switch_canary_oparam \
34 , [stack_canary] "=m" (per_cpu_var(stack_canary)) 34 , [stack_canary] "=m" (per_cpu_var(stack_canary.canary))
35#define __switch_canary_iparam \ 35#define __switch_canary_iparam \
36 , [task_canary] "i" (offsetof(struct task_struct, stack_canary)) 36 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
37#else /* CC_STACKPROTECTOR */ 37#else /* CC_STACKPROTECTOR */
@@ -150,33 +150,6 @@ do { \
150#endif 150#endif
151 151
152#ifdef __KERNEL__ 152#ifdef __KERNEL__
153#define _set_base(addr, base) do { unsigned long __pr; \
154__asm__ __volatile__ ("movw %%dx,%1\n\t" \
155 "rorl $16,%%edx\n\t" \
156 "movb %%dl,%2\n\t" \
157 "movb %%dh,%3" \
158 :"=&d" (__pr) \
159 :"m" (*((addr)+2)), \
160 "m" (*((addr)+4)), \
161 "m" (*((addr)+7)), \
162 "0" (base) \
163 ); } while (0)
164
165#define _set_limit(addr, limit) do { unsigned long __lr; \
166__asm__ __volatile__ ("movw %%dx,%1\n\t" \
167 "rorl $16,%%edx\n\t" \
168 "movb %2,%%dh\n\t" \
169 "andb $0xf0,%%dh\n\t" \
170 "orb %%dh,%%dl\n\t" \
171 "movb %%dl,%2" \
172 :"=&d" (__lr) \
173 :"m" (*(addr)), \
174 "m" (*((addr)+6)), \
175 "0" (limit) \
176 ); } while (0)
177
178#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
179#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
180 153
181extern void native_load_gs_index(unsigned); 154extern void native_load_gs_index(unsigned);
182 155
diff --git a/arch/x86/include/asm/termbits.h b/arch/x86/include/asm/termbits.h
index af1b70ea440f..3935b106de79 100644
--- a/arch/x86/include/asm/termbits.h
+++ b/arch/x86/include/asm/termbits.h
@@ -1,198 +1 @@
1#ifndef _ASM_X86_TERMBITS_H #include <asm-generic/termbits.h>
2#define _ASM_X86_TERMBITS_H
3
4#include <linux/posix_types.h>
5
6typedef unsigned char cc_t;
7typedef unsigned int speed_t;
8typedef unsigned int tcflag_t;
9
10#define NCCS 19
11struct termios {
12 tcflag_t c_iflag; /* input mode flags */
13 tcflag_t c_oflag; /* output mode flags */
14 tcflag_t c_cflag; /* control mode flags */
15 tcflag_t c_lflag; /* local mode flags */
16 cc_t c_line; /* line discipline */
17 cc_t c_cc[NCCS]; /* control characters */
18};
19
20struct termios2 {
21 tcflag_t c_iflag; /* input mode flags */
22 tcflag_t c_oflag; /* output mode flags */
23 tcflag_t c_cflag; /* control mode flags */
24 tcflag_t c_lflag; /* local mode flags */
25 cc_t c_line; /* line discipline */
26 cc_t c_cc[NCCS]; /* control characters */
27 speed_t c_ispeed; /* input speed */
28 speed_t c_ospeed; /* output speed */
29};
30
31struct ktermios {
32 tcflag_t c_iflag; /* input mode flags */
33 tcflag_t c_oflag; /* output mode flags */
34 tcflag_t c_cflag; /* control mode flags */
35 tcflag_t c_lflag; /* local mode flags */
36 cc_t c_line; /* line discipline */
37 cc_t c_cc[NCCS]; /* control characters */
38 speed_t c_ispeed; /* input speed */
39 speed_t c_ospeed; /* output speed */
40};
41
42/* c_cc characters */
43#define VINTR 0
44#define VQUIT 1
45#define VERASE 2
46#define VKILL 3
47#define VEOF 4
48#define VTIME 5
49#define VMIN 6
50#define VSWTC 7
51#define VSTART 8
52#define VSTOP 9
53#define VSUSP 10
54#define VEOL 11
55#define VREPRINT 12
56#define VDISCARD 13
57#define VWERASE 14
58#define VLNEXT 15
59#define VEOL2 16
60
61/* c_iflag bits */
62#define IGNBRK 0000001
63#define BRKINT 0000002
64#define IGNPAR 0000004
65#define PARMRK 0000010
66#define INPCK 0000020
67#define ISTRIP 0000040
68#define INLCR 0000100
69#define IGNCR 0000200
70#define ICRNL 0000400
71#define IUCLC 0001000
72#define IXON 0002000
73#define IXANY 0004000
74#define IXOFF 0010000
75#define IMAXBEL 0020000
76#define IUTF8 0040000
77
78/* c_oflag bits */
79#define OPOST 0000001
80#define OLCUC 0000002
81#define ONLCR 0000004
82#define OCRNL 0000010
83#define ONOCR 0000020
84#define ONLRET 0000040
85#define OFILL 0000100
86#define OFDEL 0000200
87#define NLDLY 0000400
88#define NL0 0000000
89#define NL1 0000400
90#define CRDLY 0003000
91#define CR0 0000000
92#define CR1 0001000
93#define CR2 0002000
94#define CR3 0003000
95#define TABDLY 0014000
96#define TAB0 0000000
97#define TAB1 0004000
98#define TAB2 0010000
99#define TAB3 0014000
100#define XTABS 0014000
101#define BSDLY 0020000
102#define BS0 0000000
103#define BS1 0020000
104#define VTDLY 0040000
105#define VT0 0000000
106#define VT1 0040000
107#define FFDLY 0100000
108#define FF0 0000000
109#define FF1 0100000
110
111/* c_cflag bit meaning */
112#define CBAUD 0010017
113#define B0 0000000 /* hang up */
114#define B50 0000001
115#define B75 0000002
116#define B110 0000003
117#define B134 0000004
118#define B150 0000005
119#define B200 0000006
120#define B300 0000007
121#define B600 0000010
122#define B1200 0000011
123#define B1800 0000012
124#define B2400 0000013
125#define B4800 0000014
126#define B9600 0000015
127#define B19200 0000016
128#define B38400 0000017
129#define EXTA B19200
130#define EXTB B38400
131#define CSIZE 0000060
132#define CS5 0000000
133#define CS6 0000020
134#define CS7 0000040
135#define CS8 0000060
136#define CSTOPB 0000100
137#define CREAD 0000200
138#define PARENB 0000400
139#define PARODD 0001000
140#define HUPCL 0002000
141#define CLOCAL 0004000
142#define CBAUDEX 0010000
143#define BOTHER 0010000 /* non standard rate */
144#define B57600 0010001
145#define B115200 0010002
146#define B230400 0010003
147#define B460800 0010004
148#define B500000 0010005
149#define B576000 0010006
150#define B921600 0010007
151#define B1000000 0010010
152#define B1152000 0010011
153#define B1500000 0010012
154#define B2000000 0010013
155#define B2500000 0010014
156#define B3000000 0010015
157#define B3500000 0010016
158#define B4000000 0010017
159#define CIBAUD 002003600000 /* input baud rate */
160#define CMSPAR 010000000000 /* mark or space (stick) parity */
161#define CRTSCTS 020000000000 /* flow control */
162
163#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
164
165/* c_lflag bits */
166#define ISIG 0000001
167#define ICANON 0000002
168#define XCASE 0000004
169#define ECHO 0000010
170#define ECHOE 0000020
171#define ECHOK 0000040
172#define ECHONL 0000100
173#define NOFLSH 0000200
174#define TOSTOP 0000400
175#define ECHOCTL 0001000
176#define ECHOPRT 0002000
177#define ECHOKE 0004000
178#define FLUSHO 0010000
179#define PENDIN 0040000
180#define IEXTEN 0100000
181
182/* tcflow() and TCXONC use these */
183#define TCOOFF 0
184#define TCOON 1
185#define TCIOFF 2
186#define TCION 3
187
188/* tcflush() and TCFLSH use these */
189#define TCIFLUSH 0
190#define TCOFLUSH 1
191#define TCIOFLUSH 2
192
193/* tcsetattr uses these */
194#define TCSANOW 0
195#define TCSADRAIN 1
196#define TCSAFLUSH 2
197
198#endif /* _ASM_X86_TERMBITS_H */
diff --git a/arch/x86/include/asm/termios.h b/arch/x86/include/asm/termios.h
index c4ee8056baca..280d78a9d966 100644
--- a/arch/x86/include/asm/termios.h
+++ b/arch/x86/include/asm/termios.h
@@ -1,114 +1 @@
1#ifndef _ASM_X86_TERMIOS_H #include <asm-generic/termios.h>
2#define _ASM_X86_TERMIOS_H
3
4#include <asm/termbits.h>
5#include <asm/ioctls.h>
6
7struct winsize {
8 unsigned short ws_row;
9 unsigned short ws_col;
10 unsigned short ws_xpixel;
11 unsigned short ws_ypixel;
12};
13
14#define NCC 8
15struct termio {
16 unsigned short c_iflag; /* input mode flags */
17 unsigned short c_oflag; /* output mode flags */
18 unsigned short c_cflag; /* control mode flags */
19 unsigned short c_lflag; /* local mode flags */
20 unsigned char c_line; /* line discipline */
21 unsigned char c_cc[NCC]; /* control characters */
22};
23
24/* modem lines */
25#define TIOCM_LE 0x001
26#define TIOCM_DTR 0x002
27#define TIOCM_RTS 0x004
28#define TIOCM_ST 0x008
29#define TIOCM_SR 0x010
30#define TIOCM_CTS 0x020
31#define TIOCM_CAR 0x040
32#define TIOCM_RNG 0x080
33#define TIOCM_DSR 0x100
34#define TIOCM_CD TIOCM_CAR
35#define TIOCM_RI TIOCM_RNG
36#define TIOCM_OUT1 0x2000
37#define TIOCM_OUT2 0x4000
38#define TIOCM_LOOP 0x8000
39
40/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
41
42#ifdef __KERNEL__
43
44#include <asm/uaccess.h>
45
46/* intr=^C quit=^\ erase=del kill=^U
47 eof=^D vtime=\0 vmin=\1 sxtc=\0
48 start=^Q stop=^S susp=^Z eol=\0
49 reprint=^R discard=^U werase=^W lnext=^V
50 eol2=\0
51*/
52#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
53
54/*
55 * Translate a "termio" structure into a "termios". Ugh.
56 */
57#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
58 unsigned short __tmp; \
59 get_user(__tmp,&(termio)->x); \
60 *(unsigned short *) &(termios)->x = __tmp; \
61}
62
63static inline int user_termio_to_kernel_termios(struct ktermios *termios,
64 struct termio __user *termio)
65{
66 SET_LOW_TERMIOS_BITS(termios, termio, c_iflag);
67 SET_LOW_TERMIOS_BITS(termios, termio, c_oflag);
68 SET_LOW_TERMIOS_BITS(termios, termio, c_cflag);
69 SET_LOW_TERMIOS_BITS(termios, termio, c_lflag);
70 get_user(termios->c_line, &termio->c_line);
71 return copy_from_user(termios->c_cc, termio->c_cc, NCC);
72}
73
74/*
75 * Translate a "termios" structure into a "termio". Ugh.
76 */
77static inline int kernel_termios_to_user_termio(struct termio __user *termio,
78 struct ktermios *termios)
79{
80 put_user((termios)->c_iflag, &(termio)->c_iflag);
81 put_user((termios)->c_oflag, &(termio)->c_oflag);
82 put_user((termios)->c_cflag, &(termio)->c_cflag);
83 put_user((termios)->c_lflag, &(termio)->c_lflag);
84 put_user((termios)->c_line, &(termio)->c_line);
85 return copy_to_user((termio)->c_cc, (termios)->c_cc, NCC);
86}
87
88static inline int user_termios_to_kernel_termios(struct ktermios *k,
89 struct termios2 __user *u)
90{
91 return copy_from_user(k, u, sizeof(struct termios2));
92}
93
94static inline int kernel_termios_to_user_termios(struct termios2 __user *u,
95 struct ktermios *k)
96{
97 return copy_to_user(u, k, sizeof(struct termios2));
98}
99
100static inline int user_termios_to_kernel_termios_1(struct ktermios *k,
101 struct termios __user *u)
102{
103 return copy_from_user(k, u, sizeof(struct termios));
104}
105
106static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
107 struct ktermios *k)
108{
109 return copy_to_user(u, k, sizeof(struct termios));
110}
111
112#endif /* __KERNEL__ */
113
114#endif /* _ASM_X86_TERMIOS_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 6f7786aea4fc..d27d0a2fec4c 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -214,7 +214,7 @@ DECLARE_PER_CPU(unsigned long, kernel_stack);
214static inline struct thread_info *current_thread_info(void) 214static inline struct thread_info *current_thread_info(void)
215{ 215{
216 struct thread_info *ti; 216 struct thread_info *ti;
217 ti = (void *)(percpu_read(kernel_stack) + 217 ti = (void *)(percpu_read_stable(kernel_stack) +
218 KERNEL_STACK_OFFSET - THREAD_SIZE); 218 KERNEL_STACK_OFFSET - THREAD_SIZE);
219 return ti; 219 return ti;
220} 220}
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 26d06e052a18..6f0695d744bf 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -116,15 +116,11 @@ extern unsigned long node_remap_size[];
116 116
117# define SD_CACHE_NICE_TRIES 1 117# define SD_CACHE_NICE_TRIES 1
118# define SD_IDLE_IDX 1 118# define SD_IDLE_IDX 1
119# define SD_NEWIDLE_IDX 2
120# define SD_FORKEXEC_IDX 0
121 119
122#else 120#else
123 121
124# define SD_CACHE_NICE_TRIES 2 122# define SD_CACHE_NICE_TRIES 2
125# define SD_IDLE_IDX 2 123# define SD_IDLE_IDX 2
126# define SD_NEWIDLE_IDX 2
127# define SD_FORKEXEC_IDX 1
128 124
129#endif 125#endif
130 126
@@ -137,22 +133,20 @@ extern unsigned long node_remap_size[];
137 .cache_nice_tries = SD_CACHE_NICE_TRIES, \ 133 .cache_nice_tries = SD_CACHE_NICE_TRIES, \
138 .busy_idx = 3, \ 134 .busy_idx = 3, \
139 .idle_idx = SD_IDLE_IDX, \ 135 .idle_idx = SD_IDLE_IDX, \
140 .newidle_idx = SD_NEWIDLE_IDX, \ 136 .newidle_idx = 0, \
141 .wake_idx = 1, \ 137 .wake_idx = 0, \
142 .forkexec_idx = SD_FORKEXEC_IDX, \ 138 .forkexec_idx = 0, \
143 \ 139 \
144 .flags = 1*SD_LOAD_BALANCE \ 140 .flags = 1*SD_LOAD_BALANCE \
145 | 1*SD_BALANCE_NEWIDLE \ 141 | 1*SD_BALANCE_NEWIDLE \
146 | 1*SD_BALANCE_EXEC \ 142 | 1*SD_BALANCE_EXEC \
147 | 1*SD_BALANCE_FORK \ 143 | 1*SD_BALANCE_FORK \
148 | 0*SD_WAKE_IDLE \ 144 | 0*SD_BALANCE_WAKE \
149 | 1*SD_WAKE_AFFINE \ 145 | 1*SD_WAKE_AFFINE \
150 | 1*SD_WAKE_BALANCE \
151 | 0*SD_SHARE_CPUPOWER \ 146 | 0*SD_SHARE_CPUPOWER \
152 | 0*SD_POWERSAVINGS_BALANCE \ 147 | 0*SD_POWERSAVINGS_BALANCE \
153 | 0*SD_SHARE_PKG_RESOURCES \ 148 | 0*SD_SHARE_PKG_RESOURCES \
154 | 1*SD_SERIALIZE \ 149 | 1*SD_SERIALIZE \
155 | 1*SD_WAKE_IDLE_FAR \
156 | 0*SD_PREFER_SIBLING \ 150 | 0*SD_PREFER_SIBLING \
157 , \ 151 , \
158 .last_balance = jiffies, \ 152 .last_balance = jiffies, \
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index bfd74c032fca..4da91ad69e0d 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -81,9 +81,7 @@ extern int panic_on_unrecovered_nmi;
81 81
82void math_error(void __user *); 82void math_error(void __user *);
83void math_emulate(struct math_emu_info *); 83void math_emulate(struct math_emu_info *);
84#ifdef CONFIG_X86_32 84#ifndef CONFIG_X86_32
85unsigned long patch_espfix_desc(unsigned long, unsigned long);
86#else
87asmlinkage void smp_thermal_interrupt(void); 85asmlinkage void smp_thermal_interrupt(void);
88asmlinkage void mce_threshold_interrupt(void); 86asmlinkage void mce_threshold_interrupt(void);
89#endif 87#endif
diff --git a/arch/x86/include/asm/types.h b/arch/x86/include/asm/types.h
index 09b97745772f..df1da20f4534 100644
--- a/arch/x86/include/asm/types.h
+++ b/arch/x86/include/asm/types.h
@@ -1,19 +1,11 @@
1#ifndef _ASM_X86_TYPES_H 1#ifndef _ASM_X86_TYPES_H
2#define _ASM_X86_TYPES_H 2#define _ASM_X86_TYPES_H
3 3
4#include <asm-generic/int-ll64.h> 4#define dma_addr_t dma_addr_t
5 5
6#ifndef __ASSEMBLY__ 6#include <asm-generic/types.h>
7
8typedef unsigned short umode_t;
9 7
10#endif /* __ASSEMBLY__ */
11
12/*
13 * These aren't exported outside the kernel to avoid name space clashes
14 */
15#ifdef __KERNEL__ 8#ifdef __KERNEL__
16
17#ifndef __ASSEMBLY__ 9#ifndef __ASSEMBLY__
18 10
19typedef u64 dma64_addr_t; 11typedef u64 dma64_addr_t;
diff --git a/arch/x86/include/asm/ucontext.h b/arch/x86/include/asm/ucontext.h
index 87324cf439d9..b7c29c8017f2 100644
--- a/arch/x86/include/asm/ucontext.h
+++ b/arch/x86/include/asm/ucontext.h
@@ -7,12 +7,6 @@
7 * sigcontext struct (uc_mcontext). 7 * sigcontext struct (uc_mcontext).
8 */ 8 */
9 9
10struct ucontext { 10#include <asm-generic/ucontext.h>
11 unsigned long uc_flags;
12 struct ucontext *uc_link;
13 stack_t uc_stack;
14 struct sigcontext uc_mcontext;
15 sigset_t uc_sigmask; /* mask last for extensibility */
16};
17 11
18#endif /* _ASM_X86_UCONTEXT_H */ 12#endif /* _ASM_X86_UCONTEXT_H */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 11be5ad2e0e9..272514c2d456 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -55,6 +55,7 @@
55#define SECONDARY_EXEC_ENABLE_EPT 0x00000002 55#define SECONDARY_EXEC_ENABLE_EPT 0x00000002
56#define SECONDARY_EXEC_ENABLE_VPID 0x00000020 56#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
57#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 57#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
58#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
58 59
59 60
60#define PIN_BASED_EXT_INTR_MASK 0x00000001 61#define PIN_BASED_EXT_INTR_MASK 0x00000001
@@ -351,9 +352,16 @@ enum vmcs_field {
351#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0 352#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0
352#define VMX_EPT_EXTENT_CONTEXT 1 353#define VMX_EPT_EXTENT_CONTEXT 1
353#define VMX_EPT_EXTENT_GLOBAL 2 354#define VMX_EPT_EXTENT_GLOBAL 2
355
356#define VMX_EPT_EXECUTE_ONLY_BIT (1ull)
357#define VMX_EPT_PAGE_WALK_4_BIT (1ull << 6)
358#define VMX_EPTP_UC_BIT (1ull << 8)
359#define VMX_EPTP_WB_BIT (1ull << 14)
360#define VMX_EPT_2MB_PAGE_BIT (1ull << 16)
354#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24) 361#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24)
355#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) 362#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
356#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) 363#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
364
357#define VMX_EPT_DEFAULT_GAW 3 365#define VMX_EPT_DEFAULT_GAW 3
358#define VMX_EPT_MAX_GAW 0x4 366#define VMX_EPT_MAX_GAW 0x4
359#define VMX_EPT_MT_EPTE_SHIFT 3 367#define VMX_EPT_MT_EPTE_SHIFT 3
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 430d5b24af7b..832cb838cb48 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -52,6 +52,7 @@ obj-$(CONFIG_X86_DS_SELFTEST) += ds_selftest.o
52obj-$(CONFIG_X86_32) += tls.o 52obj-$(CONFIG_X86_32) += tls.o
53obj-$(CONFIG_IA32_EMULATION) += tls.o 53obj-$(CONFIG_IA32_EMULATION) += tls.o
54obj-y += step.o 54obj-y += step.o
55obj-$(CONFIG_INTEL_TXT) += tboot.o
55obj-$(CONFIG_STACKTRACE) += stacktrace.o 56obj-$(CONFIG_STACKTRACE) += stacktrace.o
56obj-y += cpu/ 57obj-y += cpu/
57obj-y += acpi/ 58obj-y += acpi/
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 6b8ca3a0285d..67e929b89875 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -833,106 +833,6 @@ static int __init acpi_parse_madt_lapic_entries(void)
833extern int es7000_plat; 833extern int es7000_plat;
834#endif 834#endif
835 835
836static struct {
837 int gsi_base;
838 int gsi_end;
839} mp_ioapic_routing[MAX_IO_APICS];
840
841int mp_find_ioapic(int gsi)
842{
843 int i = 0;
844
845 /* Find the IOAPIC that manages this GSI. */
846 for (i = 0; i < nr_ioapics; i++) {
847 if ((gsi >= mp_ioapic_routing[i].gsi_base)
848 && (gsi <= mp_ioapic_routing[i].gsi_end))
849 return i;
850 }
851
852 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
853 return -1;
854}
855
856int mp_find_ioapic_pin(int ioapic, int gsi)
857{
858 if (WARN_ON(ioapic == -1))
859 return -1;
860 if (WARN_ON(gsi > mp_ioapic_routing[ioapic].gsi_end))
861 return -1;
862
863 return gsi - mp_ioapic_routing[ioapic].gsi_base;
864}
865
866static u8 __init uniq_ioapic_id(u8 id)
867{
868#ifdef CONFIG_X86_32
869 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
870 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
871 return io_apic_get_unique_id(nr_ioapics, id);
872 else
873 return id;
874#else
875 int i;
876 DECLARE_BITMAP(used, 256);
877 bitmap_zero(used, 256);
878 for (i = 0; i < nr_ioapics; i++) {
879 struct mpc_ioapic *ia = &mp_ioapics[i];
880 __set_bit(ia->apicid, used);
881 }
882 if (!test_bit(id, used))
883 return id;
884 return find_first_zero_bit(used, 256);
885#endif
886}
887
888static int bad_ioapic(unsigned long address)
889{
890 if (nr_ioapics >= MAX_IO_APICS) {
891 printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
892 "(found %d)\n", MAX_IO_APICS, nr_ioapics);
893 panic("Recompile kernel with bigger MAX_IO_APICS!\n");
894 }
895 if (!address) {
896 printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
897 " found in table, skipping!\n");
898 return 1;
899 }
900 return 0;
901}
902
903void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
904{
905 int idx = 0;
906
907 if (bad_ioapic(address))
908 return;
909
910 idx = nr_ioapics;
911
912 mp_ioapics[idx].type = MP_IOAPIC;
913 mp_ioapics[idx].flags = MPC_APIC_USABLE;
914 mp_ioapics[idx].apicaddr = address;
915
916 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
917 mp_ioapics[idx].apicid = uniq_ioapic_id(id);
918 mp_ioapics[idx].apicver = io_apic_get_version(idx);
919
920 /*
921 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
922 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
923 */
924 mp_ioapic_routing[idx].gsi_base = gsi_base;
925 mp_ioapic_routing[idx].gsi_end = gsi_base +
926 io_apic_get_redir_entries(idx);
927
928 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
929 "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
930 mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
931 mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end);
932
933 nr_ioapics++;
934}
935
936int __init acpi_probe_gsi(void) 836int __init acpi_probe_gsi(void)
937{ 837{
938 int idx; 838 int idx;
@@ -947,7 +847,7 @@ int __init acpi_probe_gsi(void)
947 847
948 max_gsi = 0; 848 max_gsi = 0;
949 for (idx = 0; idx < nr_ioapics; idx++) { 849 for (idx = 0; idx < nr_ioapics; idx++) {
950 gsi = mp_ioapic_routing[idx].gsi_end; 850 gsi = mp_gsi_routing[idx].gsi_end;
951 851
952 if (gsi > max_gsi) 852 if (gsi > max_gsi)
953 max_gsi = gsi; 853 max_gsi = gsi;
@@ -1179,9 +1079,8 @@ static int __init acpi_parse_madt_ioapic_entries(void)
1179 * If MPS is present, it will handle them, 1079 * If MPS is present, it will handle them,
1180 * otherwise the system will stay in PIC mode 1080 * otherwise the system will stay in PIC mode
1181 */ 1081 */
1182 if (acpi_disabled || acpi_noirq) { 1082 if (acpi_disabled || acpi_noirq)
1183 return -ENODEV; 1083 return -ENODEV;
1184 }
1185 1084
1186 if (!cpu_has_apic) 1085 if (!cpu_has_apic)
1187 return -ENODEV; 1086 return -ENODEV;
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index f57658702571..de7353c0ce9c 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -2,6 +2,7 @@
2#include <linux/sched.h> 2#include <linux/sched.h>
3#include <linux/mutex.h> 3#include <linux/mutex.h>
4#include <linux/list.h> 4#include <linux/list.h>
5#include <linux/stringify.h>
5#include <linux/kprobes.h> 6#include <linux/kprobes.h>
6#include <linux/mm.h> 7#include <linux/mm.h>
7#include <linux/vmalloc.h> 8#include <linux/vmalloc.h>
@@ -32,7 +33,7 @@ __setup("smp-alt-boot", bootonly);
32#define smp_alt_once 1 33#define smp_alt_once 1
33#endif 34#endif
34 35
35static int debug_alternative; 36static int __initdata_or_module debug_alternative;
36 37
37static int __init debug_alt(char *str) 38static int __init debug_alt(char *str)
38{ 39{
@@ -51,7 +52,7 @@ static int __init setup_noreplace_smp(char *str)
51__setup("noreplace-smp", setup_noreplace_smp); 52__setup("noreplace-smp", setup_noreplace_smp);
52 53
53#ifdef CONFIG_PARAVIRT 54#ifdef CONFIG_PARAVIRT
54static int noreplace_paravirt = 0; 55static int __initdata_or_module noreplace_paravirt = 0;
55 56
56static int __init setup_noreplace_paravirt(char *str) 57static int __init setup_noreplace_paravirt(char *str)
57{ 58{
@@ -64,16 +65,17 @@ __setup("noreplace-paravirt", setup_noreplace_paravirt);
64#define DPRINTK(fmt, args...) if (debug_alternative) \ 65#define DPRINTK(fmt, args...) if (debug_alternative) \
65 printk(KERN_DEBUG fmt, args) 66 printk(KERN_DEBUG fmt, args)
66 67
67#ifdef GENERIC_NOP1 68#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
68/* Use inline assembly to define this because the nops are defined 69/* Use inline assembly to define this because the nops are defined
69 as inline assembly strings in the include files and we cannot 70 as inline assembly strings in the include files and we cannot
70 get them easily into strings. */ 71 get them easily into strings. */
71asm("\t.section .rodata, \"a\"\nintelnops: " 72asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: "
72 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6 73 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
73 GENERIC_NOP7 GENERIC_NOP8 74 GENERIC_NOP7 GENERIC_NOP8
74 "\t.previous"); 75 "\t.previous");
75extern const unsigned char intelnops[]; 76extern const unsigned char intelnops[];
76static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = { 77static const unsigned char *const __initconst_or_module
78intel_nops[ASM_NOP_MAX+1] = {
77 NULL, 79 NULL,
78 intelnops, 80 intelnops,
79 intelnops + 1, 81 intelnops + 1,
@@ -87,12 +89,13 @@ static const unsigned char *const intel_nops[ASM_NOP_MAX+1] = {
87#endif 89#endif
88 90
89#ifdef K8_NOP1 91#ifdef K8_NOP1
90asm("\t.section .rodata, \"a\"\nk8nops: " 92asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: "
91 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6 93 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
92 K8_NOP7 K8_NOP8 94 K8_NOP7 K8_NOP8
93 "\t.previous"); 95 "\t.previous");
94extern const unsigned char k8nops[]; 96extern const unsigned char k8nops[];
95static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = { 97static const unsigned char *const __initconst_or_module
98k8_nops[ASM_NOP_MAX+1] = {
96 NULL, 99 NULL,
97 k8nops, 100 k8nops,
98 k8nops + 1, 101 k8nops + 1,
@@ -105,13 +108,14 @@ static const unsigned char *const k8_nops[ASM_NOP_MAX+1] = {
105}; 108};
106#endif 109#endif
107 110
108#ifdef K7_NOP1 111#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
109asm("\t.section .rodata, \"a\"\nk7nops: " 112asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: "
110 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6 113 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
111 K7_NOP7 K7_NOP8 114 K7_NOP7 K7_NOP8
112 "\t.previous"); 115 "\t.previous");
113extern const unsigned char k7nops[]; 116extern const unsigned char k7nops[];
114static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = { 117static const unsigned char *const __initconst_or_module
118k7_nops[ASM_NOP_MAX+1] = {
115 NULL, 119 NULL,
116 k7nops, 120 k7nops,
117 k7nops + 1, 121 k7nops + 1,
@@ -125,12 +129,13 @@ static const unsigned char *const k7_nops[ASM_NOP_MAX+1] = {
125#endif 129#endif
126 130
127#ifdef P6_NOP1 131#ifdef P6_NOP1
128asm("\t.section .rodata, \"a\"\np6nops: " 132asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: "
129 P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6 133 P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6
130 P6_NOP7 P6_NOP8 134 P6_NOP7 P6_NOP8
131 "\t.previous"); 135 "\t.previous");
132extern const unsigned char p6nops[]; 136extern const unsigned char p6nops[];
133static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = { 137static const unsigned char *const __initconst_or_module
138p6_nops[ASM_NOP_MAX+1] = {
134 NULL, 139 NULL,
135 p6nops, 140 p6nops,
136 p6nops + 1, 141 p6nops + 1,
@@ -146,7 +151,7 @@ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = {
146#ifdef CONFIG_X86_64 151#ifdef CONFIG_X86_64
147 152
148extern char __vsyscall_0; 153extern char __vsyscall_0;
149const unsigned char *const *find_nop_table(void) 154static const unsigned char *const *__init_or_module find_nop_table(void)
150{ 155{
151 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 156 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
152 boot_cpu_has(X86_FEATURE_NOPL)) 157 boot_cpu_has(X86_FEATURE_NOPL))
@@ -157,7 +162,7 @@ const unsigned char *const *find_nop_table(void)
157 162
158#else /* CONFIG_X86_64 */ 163#else /* CONFIG_X86_64 */
159 164
160const unsigned char *const *find_nop_table(void) 165static const unsigned char *const *__init_or_module find_nop_table(void)
161{ 166{
162 if (boot_cpu_has(X86_FEATURE_K8)) 167 if (boot_cpu_has(X86_FEATURE_K8))
163 return k8_nops; 168 return k8_nops;
@@ -172,7 +177,7 @@ const unsigned char *const *find_nop_table(void)
172#endif /* CONFIG_X86_64 */ 177#endif /* CONFIG_X86_64 */
173 178
174/* Use this to add nops to a buffer, then text_poke the whole buffer. */ 179/* Use this to add nops to a buffer, then text_poke the whole buffer. */
175void add_nops(void *insns, unsigned int len) 180static void __init_or_module add_nops(void *insns, unsigned int len)
176{ 181{
177 const unsigned char *const *noptable = find_nop_table(); 182 const unsigned char *const *noptable = find_nop_table();
178 183
@@ -185,10 +190,10 @@ void add_nops(void *insns, unsigned int len)
185 len -= noplen; 190 len -= noplen;
186 } 191 }
187} 192}
188EXPORT_SYMBOL_GPL(add_nops);
189 193
190extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; 194extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
191extern u8 *__smp_locks[], *__smp_locks_end[]; 195extern u8 *__smp_locks[], *__smp_locks_end[];
196static void *text_poke_early(void *addr, const void *opcode, size_t len);
192 197
193/* Replace instructions with better alternatives for this CPU type. 198/* Replace instructions with better alternatives for this CPU type.
194 This runs before SMP is initialized to avoid SMP problems with 199 This runs before SMP is initialized to avoid SMP problems with
@@ -196,7 +201,8 @@ extern u8 *__smp_locks[], *__smp_locks_end[];
196 APs have less capabilities than the boot processor are not handled. 201 APs have less capabilities than the boot processor are not handled.
197 Tough. Make sure you disable such features by hand. */ 202 Tough. Make sure you disable such features by hand. */
198 203
199void apply_alternatives(struct alt_instr *start, struct alt_instr *end) 204void __init_or_module apply_alternatives(struct alt_instr *start,
205 struct alt_instr *end)
200{ 206{
201 struct alt_instr *a; 207 struct alt_instr *a;
202 char insnbuf[MAX_PATCH_LEN]; 208 char insnbuf[MAX_PATCH_LEN];
@@ -279,9 +285,10 @@ static LIST_HEAD(smp_alt_modules);
279static DEFINE_MUTEX(smp_alt); 285static DEFINE_MUTEX(smp_alt);
280static int smp_mode = 1; /* protected by smp_alt */ 286static int smp_mode = 1; /* protected by smp_alt */
281 287
282void alternatives_smp_module_add(struct module *mod, char *name, 288void __init_or_module alternatives_smp_module_add(struct module *mod,
283 void *locks, void *locks_end, 289 char *name,
284 void *text, void *text_end) 290 void *locks, void *locks_end,
291 void *text, void *text_end)
285{ 292{
286 struct smp_alt_module *smp; 293 struct smp_alt_module *smp;
287 294
@@ -317,7 +324,7 @@ void alternatives_smp_module_add(struct module *mod, char *name,
317 mutex_unlock(&smp_alt); 324 mutex_unlock(&smp_alt);
318} 325}
319 326
320void alternatives_smp_module_del(struct module *mod) 327void __init_or_module alternatives_smp_module_del(struct module *mod)
321{ 328{
322 struct smp_alt_module *item; 329 struct smp_alt_module *item;
323 330
@@ -386,8 +393,8 @@ void alternatives_smp_switch(int smp)
386#endif 393#endif
387 394
388#ifdef CONFIG_PARAVIRT 395#ifdef CONFIG_PARAVIRT
389void apply_paravirt(struct paravirt_patch_site *start, 396void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
390 struct paravirt_patch_site *end) 397 struct paravirt_patch_site *end)
391{ 398{
392 struct paravirt_patch_site *p; 399 struct paravirt_patch_site *p;
393 char insnbuf[MAX_PATCH_LEN]; 400 char insnbuf[MAX_PATCH_LEN];
@@ -485,13 +492,14 @@ void __init alternative_instructions(void)
485 * instructions. And on the local CPU you need to be protected again NMI or MCE 492 * instructions. And on the local CPU you need to be protected again NMI or MCE
486 * handlers seeing an inconsistent instruction while you patch. 493 * handlers seeing an inconsistent instruction while you patch.
487 */ 494 */
488void *text_poke_early(void *addr, const void *opcode, size_t len) 495static void *__init_or_module text_poke_early(void *addr, const void *opcode,
496 size_t len)
489{ 497{
490 unsigned long flags; 498 unsigned long flags;
491 local_irq_save(flags); 499 local_irq_save(flags);
492 memcpy(addr, opcode, len); 500 memcpy(addr, opcode, len);
493 local_irq_restore(flags);
494 sync_core(); 501 sync_core();
502 local_irq_restore(flags);
495 /* Could also do a CLFLUSH here to speed up CPU recovery; but 503 /* Could also do a CLFLUSH here to speed up CPU recovery; but
496 that causes hangs on some VIA CPUs. */ 504 that causes hangs on some VIA CPUs. */
497 return addr; 505 return addr;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 0a1c2830ec66..159740decc41 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -49,6 +49,7 @@
49#include <asm/mtrr.h> 49#include <asm/mtrr.h>
50#include <asm/smp.h> 50#include <asm/smp.h>
51#include <asm/mce.h> 51#include <asm/mce.h>
52#include <asm/kvm_para.h>
52 53
53unsigned int num_processors; 54unsigned int num_processors;
54 55
@@ -1361,52 +1362,80 @@ void enable_x2apic(void)
1361} 1362}
1362#endif /* CONFIG_X86_X2APIC */ 1363#endif /* CONFIG_X86_X2APIC */
1363 1364
1364void __init enable_IR_x2apic(void) 1365int __init enable_IR(void)
1365{ 1366{
1366#ifdef CONFIG_INTR_REMAP 1367#ifdef CONFIG_INTR_REMAP
1367 int ret;
1368 unsigned long flags;
1369 struct IO_APIC_route_entry **ioapic_entries = NULL;
1370
1371 ret = dmar_table_init();
1372 if (ret) {
1373 pr_debug("dmar_table_init() failed with %d:\n", ret);
1374 goto ir_failed;
1375 }
1376
1377 if (!intr_remapping_supported()) { 1368 if (!intr_remapping_supported()) {
1378 pr_debug("intr-remapping not supported\n"); 1369 pr_debug("intr-remapping not supported\n");
1379 goto ir_failed; 1370 return 0;
1380 } 1371 }
1381 1372
1382
1383 if (!x2apic_preenabled && skip_ioapic_setup) { 1373 if (!x2apic_preenabled && skip_ioapic_setup) {
1384 pr_info("Skipped enabling intr-remap because of skipping " 1374 pr_info("Skipped enabling intr-remap because of skipping "
1385 "io-apic setup\n"); 1375 "io-apic setup\n");
1386 return; 1376 return 0;
1387 } 1377 }
1388 1378
1379 if (enable_intr_remapping(x2apic_supported()))
1380 return 0;
1381
1382 pr_info("Enabled Interrupt-remapping\n");
1383
1384 return 1;
1385
1386#endif
1387 return 0;
1388}
1389
1390void __init enable_IR_x2apic(void)
1391{
1392 unsigned long flags;
1393 struct IO_APIC_route_entry **ioapic_entries = NULL;
1394 int ret, x2apic_enabled = 0;
1395 int dmar_table_init_ret = 0;
1396
1397#ifdef CONFIG_INTR_REMAP
1398 dmar_table_init_ret = dmar_table_init();
1399 if (dmar_table_init_ret)
1400 pr_debug("dmar_table_init() failed with %d:\n",
1401 dmar_table_init_ret);
1402#endif
1403
1389 ioapic_entries = alloc_ioapic_entries(); 1404 ioapic_entries = alloc_ioapic_entries();
1390 if (!ioapic_entries) { 1405 if (!ioapic_entries) {
1391 pr_info("Allocate ioapic_entries failed: %d\n", ret); 1406 pr_err("Allocate ioapic_entries failed\n");
1392 goto end; 1407 goto out;
1393 } 1408 }
1394 1409
1395 ret = save_IO_APIC_setup(ioapic_entries); 1410 ret = save_IO_APIC_setup(ioapic_entries);
1396 if (ret) { 1411 if (ret) {
1397 pr_info("Saving IO-APIC state failed: %d\n", ret); 1412 pr_info("Saving IO-APIC state failed: %d\n", ret);
1398 goto end; 1413 goto out;
1399 } 1414 }
1400 1415
1401 local_irq_save(flags); 1416 local_irq_save(flags);
1402 mask_IO_APIC_setup(ioapic_entries);
1403 mask_8259A(); 1417 mask_8259A();
1418 mask_IO_APIC_setup(ioapic_entries);
1404 1419
1405 ret = enable_intr_remapping(x2apic_supported()); 1420 if (dmar_table_init_ret)
1406 if (ret) 1421 ret = 0;
1407 goto end_restore; 1422 else
1423 ret = enable_IR();
1408 1424
1409 pr_info("Enabled Interrupt-remapping\n"); 1425 if (!ret) {
1426 /* IR is required if there is APIC ID > 255 even when running
1427 * under KVM
1428 */
1429 if (max_physical_apicid > 255 || !kvm_para_available())
1430 goto nox2apic;
1431 /*
1432 * without IR all CPUs can be addressed by IOAPIC/MSI
1433 * only in physical mode
1434 */
1435 x2apic_force_phys();
1436 }
1437
1438 x2apic_enabled = 1;
1410 1439
1411 if (x2apic_supported() && !x2apic_mode) { 1440 if (x2apic_supported() && !x2apic_mode) {
1412 x2apic_mode = 1; 1441 x2apic_mode = 1;
@@ -1414,41 +1443,25 @@ void __init enable_IR_x2apic(void)
1414 pr_info("Enabled x2apic\n"); 1443 pr_info("Enabled x2apic\n");
1415 } 1444 }
1416 1445
1417end_restore: 1446nox2apic:
1418 if (ret) 1447 if (!ret) /* IR enabling failed */
1419 /*
1420 * IR enabling failed
1421 */
1422 restore_IO_APIC_setup(ioapic_entries); 1448 restore_IO_APIC_setup(ioapic_entries);
1423
1424 unmask_8259A(); 1449 unmask_8259A();
1425 local_irq_restore(flags); 1450 local_irq_restore(flags);
1426 1451
1427end: 1452out:
1428 if (ioapic_entries) 1453 if (ioapic_entries)
1429 free_ioapic_entries(ioapic_entries); 1454 free_ioapic_entries(ioapic_entries);
1430 1455
1431 if (!ret) 1456 if (x2apic_enabled)
1432 return; 1457 return;
1433 1458
1434ir_failed:
1435 if (x2apic_preenabled) 1459 if (x2apic_preenabled)
1436 panic("x2apic enabled by bios. But IR enabling failed"); 1460 panic("x2apic: enabled by BIOS but kernel init failed.");
1437 else if (cpu_has_x2apic) 1461 else if (cpu_has_x2apic)
1438 pr_info("Not enabling x2apic,Intr-remapping\n"); 1462 pr_info("Not enabling x2apic, Intr-remapping init failed.\n");
1439#else
1440 if (!cpu_has_x2apic)
1441 return;
1442
1443 if (x2apic_preenabled)
1444 panic("x2apic enabled prior OS handover,"
1445 " enable CONFIG_X86_X2APIC, CONFIG_INTR_REMAP");
1446#endif
1447
1448 return;
1449} 1463}
1450 1464
1451
1452#ifdef CONFIG_X86_64 1465#ifdef CONFIG_X86_64
1453/* 1466/*
1454 * Detect and enable local APICs on non-SMP boards. 1467 * Detect and enable local APICs on non-SMP boards.
@@ -1549,8 +1562,6 @@ no_apic:
1549#ifdef CONFIG_X86_64 1562#ifdef CONFIG_X86_64
1550void __init early_init_lapic_mapping(void) 1563void __init early_init_lapic_mapping(void)
1551{ 1564{
1552 unsigned long phys_addr;
1553
1554 /* 1565 /*
1555 * If no local APIC can be found then go out 1566 * If no local APIC can be found then go out
1556 * : it means there is no mpatable and MADT 1567 * : it means there is no mpatable and MADT
@@ -1558,11 +1569,9 @@ void __init early_init_lapic_mapping(void)
1558 if (!smp_found_config) 1569 if (!smp_found_config)
1559 return; 1570 return;
1560 1571
1561 phys_addr = mp_lapic_addr; 1572 set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
1562
1563 set_fixmap_nocache(FIX_APIC_BASE, phys_addr);
1564 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n", 1573 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
1565 APIC_BASE, phys_addr); 1574 APIC_BASE, mp_lapic_addr);
1566 1575
1567 /* 1576 /*
1568 * Fetch the APIC ID of the BSP in case we have a 1577 * Fetch the APIC ID of the BSP in case we have a
@@ -1651,7 +1660,6 @@ int __init APIC_init_uniprocessor(void)
1651 APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) { 1660 APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
1652 pr_err("BIOS bug, local APIC 0x%x not detected!...\n", 1661 pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
1653 boot_cpu_physical_apicid); 1662 boot_cpu_physical_apicid);
1654 clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1655 return -1; 1663 return -1;
1656 } 1664 }
1657#endif 1665#endif
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 8952a5890281..89174f847b49 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -167,7 +167,7 @@ static int es7000_apic_is_cluster(void)
167{ 167{
168 /* MPENTIUMIII */ 168 /* MPENTIUMIII */
169 if (boot_cpu_data.x86 == 6 && 169 if (boot_cpu_data.x86 == 6 &&
170 (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) 170 (boot_cpu_data.x86_model >= 7 && boot_cpu_data.x86_model <= 11))
171 return 1; 171 return 1;
172 172
173 return 0; 173 return 0;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index d2ed6c5ddc80..3c8f9e75d038 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -66,6 +66,8 @@
66#include <asm/apic.h> 66#include <asm/apic.h>
67 67
68#define __apicdebuginit(type) static type __init 68#define __apicdebuginit(type) static type __init
69#define for_each_irq_pin(entry, head) \
70 for (entry = head; entry; entry = entry->next)
69 71
70/* 72/*
71 * Is the SiS APIC rmw bug present ? 73 * Is the SiS APIC rmw bug present ?
@@ -85,6 +87,9 @@ int nr_ioapic_registers[MAX_IO_APICS];
85struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; 87struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
86int nr_ioapics; 88int nr_ioapics;
87 89
90/* IO APIC gsi routing info */
91struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS];
92
88/* MP IRQ source entries */ 93/* MP IRQ source entries */
89struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; 94struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
90 95
@@ -116,15 +121,6 @@ static int __init parse_noapic(char *str)
116} 121}
117early_param("noapic", parse_noapic); 122early_param("noapic", parse_noapic);
118 123
119struct irq_pin_list;
120
121/*
122 * This is performance-critical, we want to do it O(1)
123 *
124 * the indexing order of this array favors 1:1 mappings
125 * between pins and IRQs.
126 */
127
128struct irq_pin_list { 124struct irq_pin_list {
129 int apic, pin; 125 int apic, pin;
130 struct irq_pin_list *next; 126 struct irq_pin_list *next;
@@ -139,6 +135,11 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int node)
139 return pin; 135 return pin;
140} 136}
141 137
138/*
139 * This is performance-critical, we want to do it O(1)
140 *
141 * Most irqs are mapped 1:1 with pins.
142 */
142struct irq_cfg { 143struct irq_cfg {
143 struct irq_pin_list *irq_2_pin; 144 struct irq_pin_list *irq_2_pin;
144 cpumask_var_t domain; 145 cpumask_var_t domain;
@@ -414,13 +415,10 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
414 unsigned long flags; 415 unsigned long flags;
415 416
416 spin_lock_irqsave(&ioapic_lock, flags); 417 spin_lock_irqsave(&ioapic_lock, flags);
417 entry = cfg->irq_2_pin; 418 for_each_irq_pin(entry, cfg->irq_2_pin) {
418 for (;;) {
419 unsigned int reg; 419 unsigned int reg;
420 int pin; 420 int pin;
421 421
422 if (!entry)
423 break;
424 pin = entry->pin; 422 pin = entry->pin;
425 reg = io_apic_read(entry->apic, 0x10 + pin*2); 423 reg = io_apic_read(entry->apic, 0x10 + pin*2);
426 /* Is the remote IRR bit set? */ 424 /* Is the remote IRR bit set? */
@@ -428,9 +426,6 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
428 spin_unlock_irqrestore(&ioapic_lock, flags); 426 spin_unlock_irqrestore(&ioapic_lock, flags);
429 return true; 427 return true;
430 } 428 }
431 if (!entry->next)
432 break;
433 entry = entry->next;
434 } 429 }
435 spin_unlock_irqrestore(&ioapic_lock, flags); 430 spin_unlock_irqrestore(&ioapic_lock, flags);
436 431
@@ -498,72 +493,68 @@ static void ioapic_mask_entry(int apic, int pin)
498 * shared ISA-space IRQs, so we have to support them. We are super 493 * shared ISA-space IRQs, so we have to support them. We are super
499 * fast in the common case, and fast for shared ISA-space IRQs. 494 * fast in the common case, and fast for shared ISA-space IRQs.
500 */ 495 */
501static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 496static int
497add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin)
502{ 498{
503 struct irq_pin_list *entry; 499 struct irq_pin_list **last, *entry;
504 500
505 entry = cfg->irq_2_pin; 501 /* don't allow duplicates */
506 if (!entry) { 502 last = &cfg->irq_2_pin;
507 entry = get_one_free_irq_2_pin(node); 503 for_each_irq_pin(entry, cfg->irq_2_pin) {
508 if (!entry) {
509 printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n",
510 apic, pin);
511 return;
512 }
513 cfg->irq_2_pin = entry;
514 entry->apic = apic;
515 entry->pin = pin;
516 return;
517 }
518
519 while (entry->next) {
520 /* not again, please */
521 if (entry->apic == apic && entry->pin == pin) 504 if (entry->apic == apic && entry->pin == pin)
522 return; 505 return 0;
523 506 last = &entry->next;
524 entry = entry->next;
525 } 507 }
526 508
527 entry->next = get_one_free_irq_2_pin(node); 509 entry = get_one_free_irq_2_pin(node);
528 entry = entry->next; 510 if (!entry) {
511 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n",
512 node, apic, pin);
513 return -ENOMEM;
514 }
529 entry->apic = apic; 515 entry->apic = apic;
530 entry->pin = pin; 516 entry->pin = pin;
517
518 *last = entry;
519 return 0;
520}
521
522static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
523{
524 if (add_pin_to_irq_node_nopanic(cfg, node, apic, pin))
525 panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
531} 526}
532 527
533/* 528/*
534 * Reroute an IRQ to a different pin. 529 * Reroute an IRQ to a different pin.
535 */ 530 */
536static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, 531static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node,
537 int oldapic, int oldpin, 532 int oldapic, int oldpin,
538 int newapic, int newpin) 533 int newapic, int newpin)
539{ 534{
540 struct irq_pin_list *entry = cfg->irq_2_pin; 535 struct irq_pin_list *entry;
541 int replaced = 0;
542 536
543 while (entry) { 537 for_each_irq_pin(entry, cfg->irq_2_pin) {
544 if (entry->apic == oldapic && entry->pin == oldpin) { 538 if (entry->apic == oldapic && entry->pin == oldpin) {
545 entry->apic = newapic; 539 entry->apic = newapic;
546 entry->pin = newpin; 540 entry->pin = newpin;
547 replaced = 1;
548 /* every one is different, right? */ 541 /* every one is different, right? */
549 break; 542 return;
550 } 543 }
551 entry = entry->next;
552 } 544 }
553 545
554 /* why? call replace before add? */ 546 /* old apic/pin didn't exist, so just add new ones */
555 if (!replaced) 547 add_pin_to_irq_node(cfg, node, newapic, newpin);
556 add_pin_to_irq_node(cfg, node, newapic, newpin);
557} 548}
558 549
559static inline void io_apic_modify_irq(struct irq_cfg *cfg, 550static void io_apic_modify_irq(struct irq_cfg *cfg,
560 int mask_and, int mask_or, 551 int mask_and, int mask_or,
561 void (*final)(struct irq_pin_list *entry)) 552 void (*final)(struct irq_pin_list *entry))
562{ 553{
563 int pin; 554 int pin;
564 struct irq_pin_list *entry; 555 struct irq_pin_list *entry;
565 556
566 for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) { 557 for_each_irq_pin(entry, cfg->irq_2_pin) {
567 unsigned int reg; 558 unsigned int reg;
568 pin = entry->pin; 559 pin = entry->pin;
569 reg = io_apic_read(entry->apic, 0x10 + pin * 2); 560 reg = io_apic_read(entry->apic, 0x10 + pin * 2);
@@ -580,7 +571,6 @@ static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
580 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); 571 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
581} 572}
582 573
583#ifdef CONFIG_X86_64
584static void io_apic_sync(struct irq_pin_list *entry) 574static void io_apic_sync(struct irq_pin_list *entry)
585{ 575{
586 /* 576 /*
@@ -596,11 +586,6 @@ static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
596{ 586{
597 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); 587 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
598} 588}
599#else /* CONFIG_X86_32 */
600static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
601{
602 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, NULL);
603}
604 589
605static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg) 590static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg)
606{ 591{
@@ -613,7 +598,6 @@ static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg)
613 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 598 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED,
614 IO_APIC_REDIR_LEVEL_TRIGGER, NULL); 599 IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
615} 600}
616#endif /* CONFIG_X86_32 */
617 601
618static void mask_IO_APIC_irq_desc(struct irq_desc *desc) 602static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
619{ 603{
@@ -1702,12 +1686,8 @@ __apicdebuginit(void) print_IO_APIC(void)
1702 if (!entry) 1686 if (!entry)
1703 continue; 1687 continue;
1704 printk(KERN_DEBUG "IRQ%d ", irq); 1688 printk(KERN_DEBUG "IRQ%d ", irq);
1705 for (;;) { 1689 for_each_irq_pin(entry, cfg->irq_2_pin)
1706 printk("-> %d:%d", entry->apic, entry->pin); 1690 printk("-> %d:%d", entry->apic, entry->pin);
1707 if (!entry->next)
1708 break;
1709 entry = entry->next;
1710 }
1711 printk("\n"); 1691 printk("\n");
1712 } 1692 }
1713 1693
@@ -2211,7 +2191,6 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
2211 return was_pending; 2191 return was_pending;
2212} 2192}
2213 2193
2214#ifdef CONFIG_X86_64
2215static int ioapic_retrigger_irq(unsigned int irq) 2194static int ioapic_retrigger_irq(unsigned int irq)
2216{ 2195{
2217 2196
@@ -2224,14 +2203,6 @@ static int ioapic_retrigger_irq(unsigned int irq)
2224 2203
2225 return 1; 2204 return 1;
2226} 2205}
2227#else
2228static int ioapic_retrigger_irq(unsigned int irq)
2229{
2230 apic->send_IPI_self(irq_cfg(irq)->vector);
2231
2232 return 1;
2233}
2234#endif
2235 2206
2236/* 2207/*
2237 * Level and edge triggered IO-APIC interrupts need different handling, 2208 * Level and edge triggered IO-APIC interrupts need different handling,
@@ -2269,13 +2240,9 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
2269 struct irq_pin_list *entry; 2240 struct irq_pin_list *entry;
2270 u8 vector = cfg->vector; 2241 u8 vector = cfg->vector;
2271 2242
2272 entry = cfg->irq_2_pin; 2243 for_each_irq_pin(entry, cfg->irq_2_pin) {
2273 for (;;) {
2274 unsigned int reg; 2244 unsigned int reg;
2275 2245
2276 if (!entry)
2277 break;
2278
2279 apic = entry->apic; 2246 apic = entry->apic;
2280 pin = entry->pin; 2247 pin = entry->pin;
2281 /* 2248 /*
@@ -2288,9 +2255,6 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
2288 reg &= ~IO_APIC_REDIR_VECTOR_MASK; 2255 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
2289 reg |= vector; 2256 reg |= vector;
2290 io_apic_modify(apic, 0x10 + pin*2, reg); 2257 io_apic_modify(apic, 0x10 + pin*2, reg);
2291 if (!entry->next)
2292 break;
2293 entry = entry->next;
2294 } 2258 }
2295} 2259}
2296 2260
@@ -2515,11 +2479,8 @@ atomic_t irq_mis_count;
2515static void ack_apic_level(unsigned int irq) 2479static void ack_apic_level(unsigned int irq)
2516{ 2480{
2517 struct irq_desc *desc = irq_to_desc(irq); 2481 struct irq_desc *desc = irq_to_desc(irq);
2518
2519#ifdef CONFIG_X86_32
2520 unsigned long v; 2482 unsigned long v;
2521 int i; 2483 int i;
2522#endif
2523 struct irq_cfg *cfg; 2484 struct irq_cfg *cfg;
2524 int do_unmask_irq = 0; 2485 int do_unmask_irq = 0;
2525 2486
@@ -2532,31 +2493,28 @@ static void ack_apic_level(unsigned int irq)
2532 } 2493 }
2533#endif 2494#endif
2534 2495
2535#ifdef CONFIG_X86_32
2536 /* 2496 /*
2537 * It appears there is an erratum which affects at least version 0x11 2497 * It appears there is an erratum which affects at least version 0x11
2538 * of I/O APIC (that's the 82093AA and cores integrated into various 2498 * of I/O APIC (that's the 82093AA and cores integrated into various
2539 * chipsets). Under certain conditions a level-triggered interrupt is 2499 * chipsets). Under certain conditions a level-triggered interrupt is
2540 * erroneously delivered as edge-triggered one but the respective IRR 2500 * erroneously delivered as edge-triggered one but the respective IRR
2541 * bit gets set nevertheless. As a result the I/O unit expects an EOI 2501 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2542 * message but it will never arrive and further interrupts are blocked 2502 * message but it will never arrive and further interrupts are blocked
2543 * from the source. The exact reason is so far unknown, but the 2503 * from the source. The exact reason is so far unknown, but the
2544 * phenomenon was observed when two consecutive interrupt requests 2504 * phenomenon was observed when two consecutive interrupt requests
2545 * from a given source get delivered to the same CPU and the source is 2505 * from a given source get delivered to the same CPU and the source is
2546 * temporarily disabled in between. 2506 * temporarily disabled in between.
2547 * 2507 *
2548 * A workaround is to simulate an EOI message manually. We achieve it 2508 * A workaround is to simulate an EOI message manually. We achieve it
2549 * by setting the trigger mode to edge and then to level when the edge 2509 * by setting the trigger mode to edge and then to level when the edge
2550 * trigger mode gets detected in the TMR of a local APIC for a 2510 * trigger mode gets detected in the TMR of a local APIC for a
2551 * level-triggered interrupt. We mask the source for the time of the 2511 * level-triggered interrupt. We mask the source for the time of the
2552 * operation to prevent an edge-triggered interrupt escaping meanwhile. 2512 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2553 * The idea is from Manfred Spraul. --macro 2513 * The idea is from Manfred Spraul. --macro
2554 */ 2514 */
2555 cfg = desc->chip_data; 2515 cfg = desc->chip_data;
2556 i = cfg->vector; 2516 i = cfg->vector;
2557
2558 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); 2517 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2559#endif
2560 2518
2561 /* 2519 /*
2562 * We must acknowledge the irq before we move it or the acknowledge will 2520 * We must acknowledge the irq before we move it or the acknowledge will
@@ -2598,7 +2556,7 @@ static void ack_apic_level(unsigned int irq)
2598 unmask_IO_APIC_irq_desc(desc); 2556 unmask_IO_APIC_irq_desc(desc);
2599 } 2557 }
2600 2558
2601#ifdef CONFIG_X86_32 2559 /* Tail end of version 0x11 I/O APIC bug workaround */
2602 if (!(v & (1 << (i & 0x1f)))) { 2560 if (!(v & (1 << (i & 0x1f)))) {
2603 atomic_inc(&irq_mis_count); 2561 atomic_inc(&irq_mis_count);
2604 spin_lock(&ioapic_lock); 2562 spin_lock(&ioapic_lock);
@@ -2606,26 +2564,15 @@ static void ack_apic_level(unsigned int irq)
2606 __unmask_and_level_IO_APIC_irq(cfg); 2564 __unmask_and_level_IO_APIC_irq(cfg);
2607 spin_unlock(&ioapic_lock); 2565 spin_unlock(&ioapic_lock);
2608 } 2566 }
2609#endif
2610} 2567}
2611 2568
2612#ifdef CONFIG_INTR_REMAP 2569#ifdef CONFIG_INTR_REMAP
2613static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 2570static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
2614{ 2571{
2615 int apic, pin;
2616 struct irq_pin_list *entry; 2572 struct irq_pin_list *entry;
2617 2573
2618 entry = cfg->irq_2_pin; 2574 for_each_irq_pin(entry, cfg->irq_2_pin)
2619 for (;;) { 2575 io_apic_eoi(entry->apic, entry->pin);
2620
2621 if (!entry)
2622 break;
2623
2624 apic = entry->apic;
2625 pin = entry->pin;
2626 io_apic_eoi(apic, pin);
2627 entry = entry->next;
2628 }
2629} 2576}
2630 2577
2631static void 2578static void
@@ -3241,8 +3188,7 @@ void destroy_irq(unsigned int irq)
3241 cfg = desc->chip_data; 3188 cfg = desc->chip_data;
3242 dynamic_irq_cleanup(irq); 3189 dynamic_irq_cleanup(irq);
3243 /* connect back irq_cfg */ 3190 /* connect back irq_cfg */
3244 if (desc) 3191 desc->chip_data = cfg;
3245 desc->chip_data = cfg;
3246 3192
3247 free_irte(irq); 3193 free_irte(irq);
3248 spin_lock_irqsave(&vector_lock, flags); 3194 spin_lock_irqsave(&vector_lock, flags);
@@ -3912,7 +3858,11 @@ static int __io_apic_set_pci_routing(struct device *dev, int irq,
3912 */ 3858 */
3913 if (irq >= NR_IRQS_LEGACY) { 3859 if (irq >= NR_IRQS_LEGACY) {
3914 cfg = desc->chip_data; 3860 cfg = desc->chip_data;
3915 add_pin_to_irq_node(cfg, node, ioapic, pin); 3861 if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) {
3862 printk(KERN_INFO "can not add pin %d for irq %d\n",
3863 pin, irq);
3864 return 0;
3865 }
3916 } 3866 }
3917 3867
3918 setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity); 3868 setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity);
@@ -3941,11 +3891,28 @@ int io_apic_set_pci_routing(struct device *dev, int irq,
3941 return __io_apic_set_pci_routing(dev, irq, irq_attr); 3891 return __io_apic_set_pci_routing(dev, irq, irq_attr);
3942} 3892}
3943 3893
3944/* -------------------------------------------------------------------------- 3894u8 __init io_apic_unique_id(u8 id)
3945 ACPI-based IOAPIC Configuration 3895{
3946 -------------------------------------------------------------------------- */ 3896#ifdef CONFIG_X86_32
3897 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
3898 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
3899 return io_apic_get_unique_id(nr_ioapics, id);
3900 else
3901 return id;
3902#else
3903 int i;
3904 DECLARE_BITMAP(used, 256);
3947 3905
3948#ifdef CONFIG_ACPI 3906 bitmap_zero(used, 256);
3907 for (i = 0; i < nr_ioapics; i++) {
3908 struct mpc_ioapic *ia = &mp_ioapics[i];
3909 __set_bit(ia->apicid, used);
3910 }
3911 if (!test_bit(id, used))
3912 return id;
3913 return find_first_zero_bit(used, 256);
3914#endif
3915}
3949 3916
3950#ifdef CONFIG_X86_32 3917#ifdef CONFIG_X86_32
3951int __init io_apic_get_unique_id(int ioapic, int apic_id) 3918int __init io_apic_get_unique_id(int ioapic, int apic_id)
@@ -4054,8 +4021,6 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
4054 return 0; 4021 return 0;
4055} 4022}
4056 4023
4057#endif /* CONFIG_ACPI */
4058
4059/* 4024/*
4060 * This function currently is only a helper for the i386 smp boot process where 4025 * This function currently is only a helper for the i386 smp boot process where
4061 * we need to reprogram the ioredtbls to cater for the cpus which have come online 4026 * we need to reprogram the ioredtbls to cater for the cpus which have come online
@@ -4109,7 +4074,7 @@ void __init setup_ioapic_dest(void)
4109 4074
4110static struct resource *ioapic_resources; 4075static struct resource *ioapic_resources;
4111 4076
4112static struct resource * __init ioapic_setup_resources(void) 4077static struct resource * __init ioapic_setup_resources(int nr_ioapics)
4113{ 4078{
4114 unsigned long n; 4079 unsigned long n;
4115 struct resource *res; 4080 struct resource *res;
@@ -4125,15 +4090,13 @@ static struct resource * __init ioapic_setup_resources(void)
4125 mem = alloc_bootmem(n); 4090 mem = alloc_bootmem(n);
4126 res = (void *)mem; 4091 res = (void *)mem;
4127 4092
4128 if (mem != NULL) { 4093 mem += sizeof(struct resource) * nr_ioapics;
4129 mem += sizeof(struct resource) * nr_ioapics;
4130 4094
4131 for (i = 0; i < nr_ioapics; i++) { 4095 for (i = 0; i < nr_ioapics; i++) {
4132 res[i].name = mem; 4096 res[i].name = mem;
4133 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; 4097 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
4134 sprintf(mem, "IOAPIC %u", i); 4098 sprintf(mem, "IOAPIC %u", i);
4135 mem += IOAPIC_RESOURCE_NAME_SIZE; 4099 mem += IOAPIC_RESOURCE_NAME_SIZE;
4136 }
4137 } 4100 }
4138 4101
4139 ioapic_resources = res; 4102 ioapic_resources = res;
@@ -4147,7 +4110,7 @@ void __init ioapic_init_mappings(void)
4147 struct resource *ioapic_res; 4110 struct resource *ioapic_res;
4148 int i; 4111 int i;
4149 4112
4150 ioapic_res = ioapic_setup_resources(); 4113 ioapic_res = ioapic_setup_resources(nr_ioapics);
4151 for (i = 0; i < nr_ioapics; i++) { 4114 for (i = 0; i < nr_ioapics; i++) {
4152 if (smp_found_config) { 4115 if (smp_found_config) {
4153 ioapic_phys = mp_ioapics[i].apicaddr; 4116 ioapic_phys = mp_ioapics[i].apicaddr;
@@ -4176,11 +4139,9 @@ fake_ioapic_page:
4176 __fix_to_virt(idx), ioapic_phys); 4139 __fix_to_virt(idx), ioapic_phys);
4177 idx++; 4140 idx++;
4178 4141
4179 if (ioapic_res != NULL) { 4142 ioapic_res->start = ioapic_phys;
4180 ioapic_res->start = ioapic_phys; 4143 ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
4181 ioapic_res->end = ioapic_phys + (4 * 1024) - 1; 4144 ioapic_res++;
4182 ioapic_res++;
4183 }
4184 } 4145 }
4185} 4146}
4186 4147
@@ -4201,3 +4162,76 @@ void __init ioapic_insert_resources(void)
4201 r++; 4162 r++;
4202 } 4163 }
4203} 4164}
4165
4166int mp_find_ioapic(int gsi)
4167{
4168 int i = 0;
4169
4170 /* Find the IOAPIC that manages this GSI. */
4171 for (i = 0; i < nr_ioapics; i++) {
4172 if ((gsi >= mp_gsi_routing[i].gsi_base)
4173 && (gsi <= mp_gsi_routing[i].gsi_end))
4174 return i;
4175 }
4176
4177 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
4178 return -1;
4179}
4180
4181int mp_find_ioapic_pin(int ioapic, int gsi)
4182{
4183 if (WARN_ON(ioapic == -1))
4184 return -1;
4185 if (WARN_ON(gsi > mp_gsi_routing[ioapic].gsi_end))
4186 return -1;
4187
4188 return gsi - mp_gsi_routing[ioapic].gsi_base;
4189}
4190
4191static int bad_ioapic(unsigned long address)
4192{
4193 if (nr_ioapics >= MAX_IO_APICS) {
4194 printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded "
4195 "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
4196 return 1;
4197 }
4198 if (!address) {
4199 printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address"
4200 " found in table, skipping!\n");
4201 return 1;
4202 }
4203 return 0;
4204}
4205
4206void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
4207{
4208 int idx = 0;
4209
4210 if (bad_ioapic(address))
4211 return;
4212
4213 idx = nr_ioapics;
4214
4215 mp_ioapics[idx].type = MP_IOAPIC;
4216 mp_ioapics[idx].flags = MPC_APIC_USABLE;
4217 mp_ioapics[idx].apicaddr = address;
4218
4219 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
4220 mp_ioapics[idx].apicid = io_apic_unique_id(id);
4221 mp_ioapics[idx].apicver = io_apic_get_version(idx);
4222
4223 /*
4224 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
4225 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
4226 */
4227 mp_gsi_routing[idx].gsi_base = gsi_base;
4228 mp_gsi_routing[idx].gsi_end = gsi_base +
4229 io_apic_get_redir_entries(idx);
4230
4231 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
4232 "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
4233 mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
4234 mp_gsi_routing[idx].gsi_base, mp_gsi_routing[idx].gsi_end);
4235
4236 nr_ioapics++;
4237}
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index 6ef00ba4c886..08385e090a6f 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -153,7 +153,7 @@ int safe_smp_processor_id(void)
153{ 153{
154 int apicid, cpuid; 154 int apicid, cpuid;
155 155
156 if (!boot_cpu_has(X86_FEATURE_APIC)) 156 if (!cpu_has_apic)
157 return 0; 157 return 0;
158 158
159 apicid = hard_smp_processor_id(); 159 apicid = hard_smp_processor_id();
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index db7220220d09..cb66a22d98ad 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -66,7 +66,7 @@ static inline unsigned int get_nmi_count(int cpu)
66 66
67static inline int mce_in_progress(void) 67static inline int mce_in_progress(void)
68{ 68{
69#if defined(CONFIG_X86_NEW_MCE) 69#if defined(CONFIG_X86_MCE)
70 return atomic_read(&mce_entry) > 0; 70 return atomic_read(&mce_entry) > 0;
71#endif 71#endif
72 return 0; 72 return 0;
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index fcec2f1d34a1..65edc180fc82 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -55,11 +55,11 @@ static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
55void __init default_setup_apic_routing(void) 55void __init default_setup_apic_routing(void)
56{ 56{
57#ifdef CONFIG_X86_X2APIC 57#ifdef CONFIG_X86_X2APIC
58 if (x2apic_mode && (apic != &apic_x2apic_phys && 58 if (x2apic_mode
59#ifdef CONFIG_X86_UV 59#ifdef CONFIG_X86_UV
60 apic != &apic_x2apic_uv_x && 60 && apic != &apic_x2apic_uv_x
61#endif 61#endif
62 apic != &apic_x2apic_cluster)) { 62 ) {
63 if (x2apic_phys) 63 if (x2apic_phys)
64 apic = &apic_x2apic_phys; 64 apic = &apic_x2apic_phys;
65 else 65 else
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 442b5508893f..151ace69a5aa 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -403,7 +403,15 @@ static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
403static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue); 403static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);
404static struct apm_user *user_list; 404static struct apm_user *user_list;
405static DEFINE_SPINLOCK(user_list_lock); 405static DEFINE_SPINLOCK(user_list_lock);
406static const struct desc_struct bad_bios_desc = { { { 0, 0x00409200 } } }; 406
407/*
408 * Set up a segment that references the real mode segment 0x40
409 * that extends up to the end of page zero (that we have reserved).
410 * This is for buggy BIOS's that refer to (real mode) segment 0x40
411 * even though they are called in protected mode.
412 */
413static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
414 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
407 415
408static const char driver_version[] = "1.16ac"; /* no spaces */ 416static const char driver_version[] = "1.16ac"; /* no spaces */
409 417
@@ -2332,15 +2340,6 @@ static int __init apm_init(void)
2332 pm_flags |= PM_APM; 2340 pm_flags |= PM_APM;
2333 2341
2334 /* 2342 /*
2335 * Set up a segment that references the real mode segment 0x40
2336 * that extends up to the end of page zero (that we have reserved).
2337 * This is for buggy BIOS's that refer to (real mode) segment 0x40
2338 * even though they are called in protected mode.
2339 */
2340 set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
2341 _set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
2342
2343 /*
2344 * Set up the long jump entry point to the APM BIOS, which is called 2343 * Set up the long jump entry point to the APM BIOS, which is called
2345 * from inline assembly. 2344 * from inline assembly.
2346 */ 2345 */
@@ -2358,12 +2357,12 @@ static int __init apm_init(void)
2358 * code to that CPU. 2357 * code to that CPU.
2359 */ 2358 */
2360 gdt = get_cpu_gdt_table(0); 2359 gdt = get_cpu_gdt_table(0);
2361 set_base(gdt[APM_CS >> 3], 2360 set_desc_base(&gdt[APM_CS >> 3],
2362 __va((unsigned long)apm_info.bios.cseg << 4)); 2361 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
2363 set_base(gdt[APM_CS_16 >> 3], 2362 set_desc_base(&gdt[APM_CS_16 >> 3],
2364 __va((unsigned long)apm_info.bios.cseg_16 << 4)); 2363 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
2365 set_base(gdt[APM_DS >> 3], 2364 set_desc_base(&gdt[APM_DS >> 3],
2366 __va((unsigned long)apm_info.bios.dseg << 4)); 2365 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
2367 2366
2368 proc_create("apm", 0, NULL, &apm_file_ops); 2367 proc_create("apm", 0, NULL, &apm_file_ops);
2369 2368
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index c1f253dac155..8dd30638fe44 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -13,7 +13,7 @@ CFLAGS_common.o := $(nostackp)
13 13
14obj-y := intel_cacheinfo.o addon_cpuid_features.o 14obj-y := intel_cacheinfo.o addon_cpuid_features.o
15obj-y += proc.o capflags.o powerflags.o common.o 15obj-y += proc.o capflags.o powerflags.o common.o
16obj-y += vmware.o hypervisor.o 16obj-y += vmware.o hypervisor.o sched.o
17 17
18obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o 18obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o
19obj-$(CONFIG_X86_64) += bugs_64.o 19obj-$(CONFIG_X86_64) += bugs_64.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 63fddcd082cd..f32fa71ccf97 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -2,7 +2,7 @@
2#include <linux/bitops.h> 2#include <linux/bitops.h>
3#include <linux/mm.h> 3#include <linux/mm.h>
4 4
5#include <asm/io.h> 5#include <linux/io.h>
6#include <asm/processor.h> 6#include <asm/processor.h>
7#include <asm/apic.h> 7#include <asm/apic.h>
8#include <asm/cpu.h> 8#include <asm/cpu.h>
@@ -45,8 +45,8 @@ static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
45#define CBAR_ENB (0x80000000) 45#define CBAR_ENB (0x80000000)
46#define CBAR_KEY (0X000000CB) 46#define CBAR_KEY (0X000000CB)
47 if (c->x86_model == 9 || c->x86_model == 10) { 47 if (c->x86_model == 9 || c->x86_model == 10) {
48 if (inl (CBAR) & CBAR_ENB) 48 if (inl(CBAR) & CBAR_ENB)
49 outl (0 | CBAR_KEY, CBAR); 49 outl(0 | CBAR_KEY, CBAR);
50 } 50 }
51} 51}
52 52
@@ -87,9 +87,10 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
87 d = d2-d; 87 d = d2-d;
88 88
89 if (d > 20*K6_BUG_LOOP) 89 if (d > 20*K6_BUG_LOOP)
90 printk("system stability may be impaired when more than 32 MB are used.\n"); 90 printk(KERN_CONT
91 "system stability may be impaired when more than 32 MB are used.\n");
91 else 92 else
92 printk("probably OK (after B9730xxxx).\n"); 93 printk(KERN_CONT "probably OK (after B9730xxxx).\n");
93 printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); 94 printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
94 } 95 }
95 96
@@ -219,8 +220,9 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
219 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { 220 if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
220 rdmsr(MSR_K7_CLK_CTL, l, h); 221 rdmsr(MSR_K7_CLK_CTL, l, h);
221 if ((l & 0xfff00000) != 0x20000000) { 222 if ((l & 0xfff00000) != 0x20000000) {
222 printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l, 223 printk(KERN_INFO
223 ((l & 0x000fffff)|0x20000000)); 224 "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
225 l, ((l & 0x000fffff)|0x20000000));
224 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); 226 wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
225 } 227 }
226 } 228 }
@@ -251,6 +253,64 @@ static int __cpuinit nearby_node(int apicid)
251#endif 253#endif
252 254
253/* 255/*
256 * Fixup core topology information for AMD multi-node processors.
257 * Assumption 1: Number of cores in each internal node is the same.
258 * Assumption 2: Mixed systems with both single-node and dual-node
259 * processors are not supported.
260 */
261#ifdef CONFIG_X86_HT
262static void __cpuinit amd_fixup_dcm(struct cpuinfo_x86 *c)
263{
264#ifdef CONFIG_PCI
265 u32 t, cpn;
266 u8 n, n_id;
267 int cpu = smp_processor_id();
268
269 /* fixup topology information only once for a core */
270 if (cpu_has(c, X86_FEATURE_AMD_DCM))
271 return;
272
273 /* check for multi-node processor on boot cpu */
274 t = read_pci_config(0, 24, 3, 0xe8);
275 if (!(t & (1 << 29)))
276 return;
277
278 set_cpu_cap(c, X86_FEATURE_AMD_DCM);
279
280 /* cores per node: each internal node has half the number of cores */
281 cpn = c->x86_max_cores >> 1;
282
283 /* even-numbered NB_id of this dual-node processor */
284 n = c->phys_proc_id << 1;
285
286 /*
287 * determine internal node id and assign cores fifty-fifty to
288 * each node of the dual-node processor
289 */
290 t = read_pci_config(0, 24 + n, 3, 0xe8);
291 n = (t>>30) & 0x3;
292 if (n == 0) {
293 if (c->cpu_core_id < cpn)
294 n_id = 0;
295 else
296 n_id = 1;
297 } else {
298 if (c->cpu_core_id < cpn)
299 n_id = 1;
300 else
301 n_id = 0;
302 }
303
304 /* compute entire NodeID, use llc_shared_map to store sibling info */
305 per_cpu(cpu_llc_id, cpu) = (c->phys_proc_id << 1) + n_id;
306
307 /* fixup core id to be in range from 0 to cpn */
308 c->cpu_core_id = c->cpu_core_id % cpn;
309#endif
310}
311#endif
312
313/*
254 * On a AMD dual core setup the lower bits of the APIC id distingush the cores. 314 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
255 * Assumes number of cores is a power of two. 315 * Assumes number of cores is a power of two.
256 */ 316 */
@@ -267,17 +327,31 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
267 c->phys_proc_id = c->initial_apicid >> bits; 327 c->phys_proc_id = c->initial_apicid >> bits;
268 /* use socket ID also for last level cache */ 328 /* use socket ID also for last level cache */
269 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; 329 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
330 /* fixup topology information on multi-node processors */
331 if ((c->x86 == 0x10) && (c->x86_model == 9))
332 amd_fixup_dcm(c);
270#endif 333#endif
271} 334}
272 335
336int amd_get_nb_id(int cpu)
337{
338 int id = 0;
339#ifdef CONFIG_SMP
340 id = per_cpu(cpu_llc_id, cpu);
341#endif
342 return id;
343}
344EXPORT_SYMBOL_GPL(amd_get_nb_id);
345
273static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) 346static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
274{ 347{
275#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) 348#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
276 int cpu = smp_processor_id(); 349 int cpu = smp_processor_id();
277 int node; 350 int node;
278 unsigned apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid; 351 unsigned apicid = c->apicid;
352
353 node = per_cpu(cpu_llc_id, cpu);
279 354
280 node = c->phys_proc_id;
281 if (apicid_to_node[apicid] != NUMA_NO_NODE) 355 if (apicid_to_node[apicid] != NUMA_NO_NODE)
282 node = apicid_to_node[apicid]; 356 node = apicid_to_node[apicid];
283 if (!node_online(node)) { 357 if (!node_online(node)) {
@@ -398,18 +472,30 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
398 u32 level; 472 u32 level;
399 473
400 level = cpuid_eax(1); 474 level = cpuid_eax(1);
401 if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) 475 if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
402 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 476 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
403 477
404 /* 478 /*
405 * Some BIOSes incorrectly force this feature, but only K8 479 * Some BIOSes incorrectly force this feature, but only K8
406 * revision D (model = 0x14) and later actually support it. 480 * revision D (model = 0x14) and later actually support it.
481 * (AMD Erratum #110, docId: 25759).
407 */ 482 */
408 if (c->x86_model < 0x14) 483 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
484 u64 val;
485
409 clear_cpu_cap(c, X86_FEATURE_LAHF_LM); 486 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
487 if (!rdmsrl_amd_safe(0xc001100d, &val)) {
488 val &= ~(1ULL << 32);
489 wrmsrl_amd_safe(0xc001100d, val);
490 }
491 }
492
410 } 493 }
411 if (c->x86 == 0x10 || c->x86 == 0x11) 494 if (c->x86 == 0x10 || c->x86 == 0x11)
412 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 495 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
496
497 /* get apicid instead of initial apic id from cpuid */
498 c->apicid = hard_smp_processor_id();
413#else 499#else
414 500
415 /* 501 /*
@@ -494,27 +580,30 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
494 * benefit in doing so. 580 * benefit in doing so.
495 */ 581 */
496 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { 582 if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
497 printk(KERN_DEBUG "tseg: %010llx\n", tseg); 583 printk(KERN_DEBUG "tseg: %010llx\n", tseg);
498 if ((tseg>>PMD_SHIFT) < 584 if ((tseg>>PMD_SHIFT) <
499 (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) || 585 (max_low_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) ||
500 ((tseg>>PMD_SHIFT) < 586 ((tseg>>PMD_SHIFT) <
501 (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) && 587 (max_pfn_mapped>>(PMD_SHIFT-PAGE_SHIFT)) &&
502 (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT)))) 588 (tseg>>PMD_SHIFT) >= (1ULL<<(32 - PMD_SHIFT))))
503 set_memory_4k((unsigned long)__va(tseg), 1); 589 set_memory_4k((unsigned long)__va(tseg), 1);
504 } 590 }
505 } 591 }
506#endif 592#endif
507} 593}
508 594
509#ifdef CONFIG_X86_32 595#ifdef CONFIG_X86_32
510static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) 596static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
597 unsigned int size)
511{ 598{
512 /* AMD errata T13 (order #21922) */ 599 /* AMD errata T13 (order #21922) */
513 if ((c->x86 == 6)) { 600 if ((c->x86 == 6)) {
514 if (c->x86_model == 3 && c->x86_mask == 0) /* Duron Rev A0 */ 601 /* Duron Rev A0 */
602 if (c->x86_model == 3 && c->x86_mask == 0)
515 size = 64; 603 size = 64;
604 /* Tbird rev A1/A2 */
516 if (c->x86_model == 4 && 605 if (c->x86_model == 4 &&
517 (c->x86_mask == 0 || c->x86_mask == 1)) /* Tbird rev A1/A2 */ 606 (c->x86_mask == 0 || c->x86_mask == 1))
518 size = 256; 607 size = 256;
519 } 608 }
520 return size; 609 return size;
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index c8e315f1aa83..01a265212395 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -81,7 +81,7 @@ static void __init check_fpu(void)
81 81
82 boot_cpu_data.fdiv_bug = fdiv_bug; 82 boot_cpu_data.fdiv_bug = fdiv_bug;
83 if (boot_cpu_data.fdiv_bug) 83 if (boot_cpu_data.fdiv_bug)
84 printk("Hmm, FPU with FDIV bug.\n"); 84 printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n");
85} 85}
86 86
87static void __init check_hlt(void) 87static void __init check_hlt(void)
@@ -98,7 +98,7 @@ static void __init check_hlt(void)
98 halt(); 98 halt();
99 halt(); 99 halt();
100 halt(); 100 halt();
101 printk("OK.\n"); 101 printk(KERN_CONT "OK.\n");
102} 102}
103 103
104/* 104/*
@@ -122,9 +122,9 @@ static void __init check_popad(void)
122 * CPU hard. Too bad. 122 * CPU hard. Too bad.
123 */ 123 */
124 if (res != 12345678) 124 if (res != 12345678)
125 printk("Buggy.\n"); 125 printk(KERN_CONT "Buggy.\n");
126 else 126 else
127 printk("OK.\n"); 127 printk(KERN_CONT "OK.\n");
128#endif 128#endif
129} 129}
130 130
@@ -156,7 +156,7 @@ void __init check_bugs(void)
156{ 156{
157 identify_boot_cpu(); 157 identify_boot_cpu();
158#ifndef CONFIG_SMP 158#ifndef CONFIG_SMP
159 printk("CPU: "); 159 printk(KERN_INFO "CPU: ");
160 print_cpu_info(&boot_cpu_data); 160 print_cpu_info(&boot_cpu_data);
161#endif 161#endif
162 check_config(); 162 check_config();
diff --git a/arch/x86/kernel/cpu/bugs_64.c b/arch/x86/kernel/cpu/bugs_64.c
index 9a3ed0649d4e..04f0fe5af83e 100644
--- a/arch/x86/kernel/cpu/bugs_64.c
+++ b/arch/x86/kernel/cpu/bugs_64.c
@@ -15,7 +15,7 @@ void __init check_bugs(void)
15{ 15{
16 identify_boot_cpu(); 16 identify_boot_cpu();
17#if !defined(CONFIG_SMP) 17#if !defined(CONFIG_SMP)
18 printk("CPU: "); 18 printk(KERN_INFO "CPU: ");
19 print_cpu_info(&boot_cpu_data); 19 print_cpu_info(&boot_cpu_data);
20#endif 20#endif
21 alternative_instructions(); 21 alternative_instructions();
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 5ce60a88027b..2055fc2b2e6b 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -18,8 +18,8 @@
18#include <asm/hypervisor.h> 18#include <asm/hypervisor.h>
19#include <asm/processor.h> 19#include <asm/processor.h>
20#include <asm/sections.h> 20#include <asm/sections.h>
21#include <asm/topology.h> 21#include <linux/topology.h>
22#include <asm/cpumask.h> 22#include <linux/cpumask.h>
23#include <asm/pgtable.h> 23#include <asm/pgtable.h>
24#include <asm/atomic.h> 24#include <asm/atomic.h>
25#include <asm/proto.h> 25#include <asm/proto.h>
@@ -28,13 +28,13 @@
28#include <asm/desc.h> 28#include <asm/desc.h>
29#include <asm/i387.h> 29#include <asm/i387.h>
30#include <asm/mtrr.h> 30#include <asm/mtrr.h>
31#include <asm/numa.h> 31#include <linux/numa.h>
32#include <asm/asm.h> 32#include <asm/asm.h>
33#include <asm/cpu.h> 33#include <asm/cpu.h>
34#include <asm/mce.h> 34#include <asm/mce.h>
35#include <asm/msr.h> 35#include <asm/msr.h>
36#include <asm/pat.h> 36#include <asm/pat.h>
37#include <asm/smp.h> 37#include <linux/smp.h>
38 38
39#ifdef CONFIG_X86_LOCAL_APIC 39#ifdef CONFIG_X86_LOCAL_APIC
40#include <asm/uv/uv.h> 40#include <asm/uv/uv.h>
@@ -94,45 +94,45 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
94 * TLS descriptors are currently at a different place compared to i386. 94 * TLS descriptors are currently at a different place compared to i386.
95 * Hopefully nobody expects them at a fixed place (Wine?) 95 * Hopefully nobody expects them at a fixed place (Wine?)
96 */ 96 */
97 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, 97 [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
98 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, 98 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
99 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, 99 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
100 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, 100 [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
101 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, 101 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
102 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, 102 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
103#else 103#else
104 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, 104 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
105 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, 105 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
106 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, 106 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
107 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, 107 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
108 /* 108 /*
109 * Segments used for calling PnP BIOS have byte granularity. 109 * Segments used for calling PnP BIOS have byte granularity.
110 * They code segments and data segments have fixed 64k limits, 110 * They code segments and data segments have fixed 64k limits,
111 * the transfer segment sizes are set at run time. 111 * the transfer segment sizes are set at run time.
112 */ 112 */
113 /* 32-bit code */ 113 /* 32-bit code */
114 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, 114 [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
115 /* 16-bit code */ 115 /* 16-bit code */
116 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, 116 [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
117 /* 16-bit data */ 117 /* 16-bit data */
118 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, 118 [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
119 /* 16-bit data */ 119 /* 16-bit data */
120 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, 120 [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
121 /* 16-bit data */ 121 /* 16-bit data */
122 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, 122 [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
123 /* 123 /*
124 * The APM segments have byte granularity and their bases 124 * The APM segments have byte granularity and their bases
125 * are set at run time. All have 64k limits. 125 * are set at run time. All have 64k limits.
126 */ 126 */
127 /* 32-bit code */ 127 /* 32-bit code */
128 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, 128 [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
129 /* 16-bit code */ 129 /* 16-bit code */
130 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, 130 [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
131 /* data */ 131 /* data */
132 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, 132 [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
133 133
134 [GDT_ENTRY_ESPFIX_SS] = { { { 0x0000ffff, 0x00cf9200 } } }, 134 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
135 [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, 135 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
136 GDT_STACK_CANARY_INIT 136 GDT_STACK_CANARY_INIT
137#endif 137#endif
138} }; 138} };
@@ -982,18 +982,26 @@ static __init int setup_disablecpuid(char *arg)
982__setup("clearcpuid=", setup_disablecpuid); 982__setup("clearcpuid=", setup_disablecpuid);
983 983
984#ifdef CONFIG_X86_64 984#ifdef CONFIG_X86_64
985struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; 985struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
986 986
987DEFINE_PER_CPU_FIRST(union irq_stack_union, 987DEFINE_PER_CPU_FIRST(union irq_stack_union,
988 irq_stack_union) __aligned(PAGE_SIZE); 988 irq_stack_union) __aligned(PAGE_SIZE);
989 989
990DEFINE_PER_CPU(char *, irq_stack_ptr) = 990/*
991 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; 991 * The following four percpu variables are hot. Align current_task to
992 * cacheline size such that all four fall in the same cacheline.
993 */
994DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
995 &init_task;
996EXPORT_PER_CPU_SYMBOL(current_task);
992 997
993DEFINE_PER_CPU(unsigned long, kernel_stack) = 998DEFINE_PER_CPU(unsigned long, kernel_stack) =
994 (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; 999 (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
995EXPORT_PER_CPU_SYMBOL(kernel_stack); 1000EXPORT_PER_CPU_SYMBOL(kernel_stack);
996 1001
1002DEFINE_PER_CPU(char *, irq_stack_ptr) =
1003 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
1004
997DEFINE_PER_CPU(unsigned int, irq_count) = -1; 1005DEFINE_PER_CPU(unsigned int, irq_count) = -1;
998 1006
999/* 1007/*
@@ -1008,8 +1016,7 @@ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
1008}; 1016};
1009 1017
1010static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks 1018static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
1011 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) 1019 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
1012 __aligned(PAGE_SIZE);
1013 1020
1014/* May not be marked __init: used by software suspend */ 1021/* May not be marked __init: used by software suspend */
1015void syscall_init(void) 1022void syscall_init(void)
@@ -1042,8 +1049,11 @@ DEFINE_PER_CPU(struct orig_ist, orig_ist);
1042 1049
1043#else /* CONFIG_X86_64 */ 1050#else /* CONFIG_X86_64 */
1044 1051
1052DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1053EXPORT_PER_CPU_SYMBOL(current_task);
1054
1045#ifdef CONFIG_CC_STACKPROTECTOR 1055#ifdef CONFIG_CC_STACKPROTECTOR
1046DEFINE_PER_CPU(unsigned long, stack_canary); 1056DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
1047#endif 1057#endif
1048 1058
1049/* Make sure %fs and %gs are initialized properly in idle threads */ 1059/* Make sure %fs and %gs are initialized properly in idle threads */
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
index 6b2a52dd0403..dca325c03999 100644
--- a/arch/x86/kernel/cpu/cpu_debug.c
+++ b/arch/x86/kernel/cpu/cpu_debug.c
@@ -30,8 +30,8 @@
30#include <asm/apic.h> 30#include <asm/apic.h>
31#include <asm/desc.h> 31#include <asm/desc.h>
32 32
33static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]); 33static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr);
34static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]); 34static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr);
35static DEFINE_PER_CPU(int, cpu_priv_count); 35static DEFINE_PER_CPU(int, cpu_priv_count);
36 36
37static DEFINE_MUTEX(cpu_debug_lock); 37static DEFINE_MUTEX(cpu_debug_lock);
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index ae9b503220ca..4109679863c1 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -60,7 +60,6 @@ enum {
60}; 60};
61 61
62#define INTEL_MSR_RANGE (0xffff) 62#define INTEL_MSR_RANGE (0xffff)
63#define CPUID_6_ECX_APERFMPERF_CAPABILITY (0x1)
64 63
65struct acpi_cpufreq_data { 64struct acpi_cpufreq_data {
66 struct acpi_processor_performance *acpi_data; 65 struct acpi_processor_performance *acpi_data;
@@ -71,11 +70,7 @@ struct acpi_cpufreq_data {
71 70
72static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data); 71static DEFINE_PER_CPU(struct acpi_cpufreq_data *, drv_data);
73 72
74struct acpi_msr_data { 73static DEFINE_PER_CPU(struct aperfmperf, old_perf);
75 u64 saved_aperf, saved_mperf;
76};
77
78static DEFINE_PER_CPU(struct acpi_msr_data, msr_data);
79 74
80DEFINE_TRACE(power_mark); 75DEFINE_TRACE(power_mark);
81 76
@@ -244,23 +239,12 @@ static u32 get_cur_val(const struct cpumask *mask)
244 return cmd.val; 239 return cmd.val;
245} 240}
246 241
247struct perf_pair {
248 union {
249 struct {
250 u32 lo;
251 u32 hi;
252 } split;
253 u64 whole;
254 } aperf, mperf;
255};
256
257/* Called via smp_call_function_single(), on the target CPU */ 242/* Called via smp_call_function_single(), on the target CPU */
258static void read_measured_perf_ctrs(void *_cur) 243static void read_measured_perf_ctrs(void *_cur)
259{ 244{
260 struct perf_pair *cur = _cur; 245 struct aperfmperf *am = _cur;
261 246
262 rdmsr(MSR_IA32_APERF, cur->aperf.split.lo, cur->aperf.split.hi); 247 get_aperfmperf(am);
263 rdmsr(MSR_IA32_MPERF, cur->mperf.split.lo, cur->mperf.split.hi);
264} 248}
265 249
266/* 250/*
@@ -279,63 +263,17 @@ static void read_measured_perf_ctrs(void *_cur)
279static unsigned int get_measured_perf(struct cpufreq_policy *policy, 263static unsigned int get_measured_perf(struct cpufreq_policy *policy,
280 unsigned int cpu) 264 unsigned int cpu)
281{ 265{
282 struct perf_pair readin, cur; 266 struct aperfmperf perf;
283 unsigned int perf_percent; 267 unsigned long ratio;
284 unsigned int retval; 268 unsigned int retval;
285 269
286 if (smp_call_function_single(cpu, read_measured_perf_ctrs, &readin, 1)) 270 if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
287 return 0; 271 return 0;
288 272
289 cur.aperf.whole = readin.aperf.whole - 273 ratio = calc_aperfmperf_ratio(&per_cpu(old_perf, cpu), &perf);
290 per_cpu(msr_data, cpu).saved_aperf; 274 per_cpu(old_perf, cpu) = perf;
291 cur.mperf.whole = readin.mperf.whole -
292 per_cpu(msr_data, cpu).saved_mperf;
293 per_cpu(msr_data, cpu).saved_aperf = readin.aperf.whole;
294 per_cpu(msr_data, cpu).saved_mperf = readin.mperf.whole;
295
296#ifdef __i386__
297 /*
298 * We dont want to do 64 bit divide with 32 bit kernel
299 * Get an approximate value. Return failure in case we cannot get
300 * an approximate value.
301 */
302 if (unlikely(cur.aperf.split.hi || cur.mperf.split.hi)) {
303 int shift_count;
304 u32 h;
305
306 h = max_t(u32, cur.aperf.split.hi, cur.mperf.split.hi);
307 shift_count = fls(h);
308
309 cur.aperf.whole >>= shift_count;
310 cur.mperf.whole >>= shift_count;
311 }
312
313 if (((unsigned long)(-1) / 100) < cur.aperf.split.lo) {
314 int shift_count = 7;
315 cur.aperf.split.lo >>= shift_count;
316 cur.mperf.split.lo >>= shift_count;
317 }
318
319 if (cur.aperf.split.lo && cur.mperf.split.lo)
320 perf_percent = (cur.aperf.split.lo * 100) / cur.mperf.split.lo;
321 else
322 perf_percent = 0;
323 275
324#else 276 retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
325 if (unlikely(((unsigned long)(-1) / 100) < cur.aperf.whole)) {
326 int shift_count = 7;
327 cur.aperf.whole >>= shift_count;
328 cur.mperf.whole >>= shift_count;
329 }
330
331 if (cur.aperf.whole && cur.mperf.whole)
332 perf_percent = (cur.aperf.whole * 100) / cur.mperf.whole;
333 else
334 perf_percent = 0;
335
336#endif
337
338 retval = (policy->cpuinfo.max_freq * perf_percent) / 100;
339 277
340 return retval; 278 return retval;
341} 279}
@@ -731,12 +669,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
731 acpi_processor_notify_smm(THIS_MODULE); 669 acpi_processor_notify_smm(THIS_MODULE);
732 670
733 /* Check for APERF/MPERF support in hardware */ 671 /* Check for APERF/MPERF support in hardware */
734 if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6) { 672 if (cpu_has(c, X86_FEATURE_APERFMPERF))
735 unsigned int ecx; 673 acpi_cpufreq_driver.getavg = get_measured_perf;
736 ecx = cpuid_ecx(6);
737 if (ecx & CPUID_6_ECX_APERFMPERF_CAPABILITY)
738 acpi_cpufreq_driver.getavg = get_measured_perf;
739 }
740 674
741 dprintk("CPU%u - ACPI performance management activated.\n", cpu); 675 dprintk("CPU%u - ACPI performance management activated.\n", cpu);
742 for (i = 0; i < perf->state_count; i++) 676 for (i = 0; i < perf->state_count; i++)
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 593171e967ef..19807b89f058 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -3,10 +3,10 @@
3#include <linux/delay.h> 3#include <linux/delay.h>
4#include <linux/pci.h> 4#include <linux/pci.h>
5#include <asm/dma.h> 5#include <asm/dma.h>
6#include <asm/io.h> 6#include <linux/io.h>
7#include <asm/processor-cyrix.h> 7#include <asm/processor-cyrix.h>
8#include <asm/processor-flags.h> 8#include <asm/processor-flags.h>
9#include <asm/timer.h> 9#include <linux/timer.h>
10#include <asm/pci-direct.h> 10#include <asm/pci-direct.h>
11#include <asm/tsc.h> 11#include <asm/tsc.h>
12 12
@@ -282,7 +282,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
282 * The 5510/5520 companion chips have a funky PIT. 282 * The 5510/5520 companion chips have a funky PIT.
283 */ 283 */
284 if (vendor == PCI_VENDOR_ID_CYRIX && 284 if (vendor == PCI_VENDOR_ID_CYRIX &&
285 (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520)) 285 (device == PCI_DEVICE_ID_CYRIX_5510 ||
286 device == PCI_DEVICE_ID_CYRIX_5520))
286 mark_tsc_unstable("cyrix 5510/5520 detected"); 287 mark_tsc_unstable("cyrix 5510/5520 detected");
287 } 288 }
288#endif 289#endif
@@ -299,7 +300,8 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
299 * ? : 0x7x 300 * ? : 0x7x
300 * GX1 : 0x8x GX1 datasheet 56 301 * GX1 : 0x8x GX1 datasheet 56
301 */ 302 */
302 if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f)) 303 if ((0x30 <= dir1 && dir1 <= 0x6f) ||
304 (0x80 <= dir1 && dir1 <= 0x8f))
303 geode_configure(); 305 geode_configure();
304 return; 306 return;
305 } else { /* MediaGX */ 307 } else { /* MediaGX */
@@ -427,9 +429,12 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
427 printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n"); 429 printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
428 local_irq_save(flags); 430 local_irq_save(flags);
429 ccr3 = getCx86(CX86_CCR3); 431 ccr3 = getCx86(CX86_CCR3);
430 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ 432 /* enable MAPEN */
431 setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); /* enable cpuid */ 433 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
432 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ 434 /* enable cpuid */
435 setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80);
436 /* disable MAPEN */
437 setCx86(CX86_CCR3, ccr3);
433 local_irq_restore(flags); 438 local_irq_restore(flags);
434 } 439 }
435 } 440 }
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index fb5b86af0b01..93ba8eeb100a 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -28,11 +28,10 @@
28static inline void __cpuinit 28static inline void __cpuinit
29detect_hypervisor_vendor(struct cpuinfo_x86 *c) 29detect_hypervisor_vendor(struct cpuinfo_x86 *c)
30{ 30{
31 if (vmware_platform()) { 31 if (vmware_platform())
32 c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE; 32 c->x86_hyper_vendor = X86_HYPER_VENDOR_VMWARE;
33 } else { 33 else
34 c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE; 34 c->x86_hyper_vendor = X86_HYPER_VENDOR_NONE;
35 }
36} 35}
37 36
38unsigned long get_hypervisor_tsc_freq(void) 37unsigned long get_hypervisor_tsc_freq(void)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 3260ab044996..40e1835b35e8 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -7,17 +7,17 @@
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/thread_info.h> 8#include <linux/thread_info.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/uaccess.h>
10 11
11#include <asm/processor.h> 12#include <asm/processor.h>
12#include <asm/pgtable.h> 13#include <asm/pgtable.h>
13#include <asm/msr.h> 14#include <asm/msr.h>
14#include <asm/uaccess.h>
15#include <asm/ds.h> 15#include <asm/ds.h>
16#include <asm/bugs.h> 16#include <asm/bugs.h>
17#include <asm/cpu.h> 17#include <asm/cpu.h>
18 18
19#ifdef CONFIG_X86_64 19#ifdef CONFIG_X86_64
20#include <asm/topology.h> 20#include <linux/topology.h>
21#include <asm/numa_64.h> 21#include <asm/numa_64.h>
22#endif 22#endif
23 23
@@ -174,7 +174,8 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
174#ifdef CONFIG_X86_F00F_BUG 174#ifdef CONFIG_X86_F00F_BUG
175 /* 175 /*
176 * All current models of Pentium and Pentium with MMX technology CPUs 176 * All current models of Pentium and Pentium with MMX technology CPUs
177 * have the F0 0F bug, which lets nonprivileged users lock up the system. 177 * have the F0 0F bug, which lets nonprivileged users lock up the
178 * system.
178 * Note that the workaround only should be initialized once... 179 * Note that the workaround only should be initialized once...
179 */ 180 */
180 c->f00f_bug = 0; 181 c->f00f_bug = 0;
@@ -207,7 +208,7 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
207 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); 208 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
208 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); 209 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
209 lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; 210 lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
210 wrmsr (MSR_IA32_MISC_ENABLE, lo, hi); 211 wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
211 } 212 }
212 } 213 }
213 214
@@ -283,7 +284,7 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
283 /* Intel has a non-standard dependency on %ecx for this CPUID level. */ 284 /* Intel has a non-standard dependency on %ecx for this CPUID level. */
284 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); 285 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
285 if (eax & 0x1f) 286 if (eax & 0x1f)
286 return ((eax >> 26) + 1); 287 return (eax >> 26) + 1;
287 else 288 else
288 return 1; 289 return 1;
289} 290}
@@ -349,6 +350,12 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
349 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); 350 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
350 } 351 }
351 352
353 if (c->cpuid_level > 6) {
354 unsigned ecx = cpuid_ecx(6);
355 if (ecx & 0x01)
356 set_cpu_cap(c, X86_FEATURE_APERFMPERF);
357 }
358
352 if (cpu_has_xmm2) 359 if (cpu_has_xmm2)
353 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 360 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
354 if (cpu_has_ds) { 361 if (cpu_has_ds) {
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 789efe217e1a..804c40e2bc3e 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Changes: 4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4) 5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure. 6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD. 7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
8 */ 8 */
9 9
@@ -16,7 +16,7 @@
16#include <linux/pci.h> 16#include <linux/pci.h>
17 17
18#include <asm/processor.h> 18#include <asm/processor.h>
19#include <asm/smp.h> 19#include <linux/smp.h>
20#include <asm/k8.h> 20#include <asm/k8.h>
21 21
22#define LVL_1_INST 1 22#define LVL_1_INST 1
@@ -25,14 +25,15 @@
25#define LVL_3 4 25#define LVL_3 4
26#define LVL_TRACE 5 26#define LVL_TRACE 5
27 27
28struct _cache_table 28struct _cache_table {
29{
30 unsigned char descriptor; 29 unsigned char descriptor;
31 char cache_type; 30 char cache_type;
32 short size; 31 short size;
33}; 32};
34 33
35/* all the cache descriptor types we care about (no TLB or trace cache entries) */ 34/* All the cache descriptor types we care about (no TLB or
35 trace cache entries) */
36
36static const struct _cache_table __cpuinitconst cache_table[] = 37static const struct _cache_table __cpuinitconst cache_table[] =
37{ 38{
38 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 39 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
@@ -105,8 +106,7 @@ static const struct _cache_table __cpuinitconst cache_table[] =
105}; 106};
106 107
107 108
108enum _cache_type 109enum _cache_type {
109{
110 CACHE_TYPE_NULL = 0, 110 CACHE_TYPE_NULL = 0,
111 CACHE_TYPE_DATA = 1, 111 CACHE_TYPE_DATA = 1,
112 CACHE_TYPE_INST = 2, 112 CACHE_TYPE_INST = 2,
@@ -170,31 +170,31 @@ unsigned short num_cache_leaves;
170 Maybe later */ 170 Maybe later */
171union l1_cache { 171union l1_cache {
172 struct { 172 struct {
173 unsigned line_size : 8; 173 unsigned line_size:8;
174 unsigned lines_per_tag : 8; 174 unsigned lines_per_tag:8;
175 unsigned assoc : 8; 175 unsigned assoc:8;
176 unsigned size_in_kb : 8; 176 unsigned size_in_kb:8;
177 }; 177 };
178 unsigned val; 178 unsigned val;
179}; 179};
180 180
181union l2_cache { 181union l2_cache {
182 struct { 182 struct {
183 unsigned line_size : 8; 183 unsigned line_size:8;
184 unsigned lines_per_tag : 4; 184 unsigned lines_per_tag:4;
185 unsigned assoc : 4; 185 unsigned assoc:4;
186 unsigned size_in_kb : 16; 186 unsigned size_in_kb:16;
187 }; 187 };
188 unsigned val; 188 unsigned val;
189}; 189};
190 190
191union l3_cache { 191union l3_cache {
192 struct { 192 struct {
193 unsigned line_size : 8; 193 unsigned line_size:8;
194 unsigned lines_per_tag : 4; 194 unsigned lines_per_tag:4;
195 unsigned assoc : 4; 195 unsigned assoc:4;
196 unsigned res : 2; 196 unsigned res:2;
197 unsigned size_encoded : 14; 197 unsigned size_encoded:14;
198 }; 198 };
199 unsigned val; 199 unsigned val;
200}; 200};
@@ -241,7 +241,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
241 case 0: 241 case 0:
242 if (!l1->val) 242 if (!l1->val)
243 return; 243 return;
244 assoc = l1->assoc; 244 assoc = assocs[l1->assoc];
245 line_size = l1->line_size; 245 line_size = l1->line_size;
246 lines_per_tag = l1->lines_per_tag; 246 lines_per_tag = l1->lines_per_tag;
247 size_in_kb = l1->size_in_kb; 247 size_in_kb = l1->size_in_kb;
@@ -249,7 +249,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
249 case 2: 249 case 2:
250 if (!l2.val) 250 if (!l2.val)
251 return; 251 return;
252 assoc = l2.assoc; 252 assoc = assocs[l2.assoc];
253 line_size = l2.line_size; 253 line_size = l2.line_size;
254 lines_per_tag = l2.lines_per_tag; 254 lines_per_tag = l2.lines_per_tag;
255 /* cpu_data has errata corrections for K7 applied */ 255 /* cpu_data has errata corrections for K7 applied */
@@ -258,10 +258,14 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
258 case 3: 258 case 3:
259 if (!l3.val) 259 if (!l3.val)
260 return; 260 return;
261 assoc = l3.assoc; 261 assoc = assocs[l3.assoc];
262 line_size = l3.line_size; 262 line_size = l3.line_size;
263 lines_per_tag = l3.lines_per_tag; 263 lines_per_tag = l3.lines_per_tag;
264 size_in_kb = l3.size_encoded * 512; 264 size_in_kb = l3.size_encoded * 512;
265 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
266 size_in_kb = size_in_kb >> 1;
267 assoc = assoc >> 1;
268 }
265 break; 269 break;
266 default: 270 default:
267 return; 271 return;
@@ -270,18 +274,14 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
270 eax->split.is_self_initializing = 1; 274 eax->split.is_self_initializing = 1;
271 eax->split.type = types[leaf]; 275 eax->split.type = types[leaf];
272 eax->split.level = levels[leaf]; 276 eax->split.level = levels[leaf];
273 if (leaf == 3) 277 eax->split.num_threads_sharing = 0;
274 eax->split.num_threads_sharing =
275 current_cpu_data.x86_max_cores - 1;
276 else
277 eax->split.num_threads_sharing = 0;
278 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1; 278 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
279 279
280 280
281 if (assoc == 0xf) 281 if (assoc == 0xffff)
282 eax->split.is_fully_associative = 1; 282 eax->split.is_fully_associative = 1;
283 ebx->split.coherency_line_size = line_size - 1; 283 ebx->split.coherency_line_size = line_size - 1;
284 ebx->split.ways_of_associativity = assocs[assoc] - 1; 284 ebx->split.ways_of_associativity = assoc - 1;
285 ebx->split.physical_line_partition = lines_per_tag - 1; 285 ebx->split.physical_line_partition = lines_per_tag - 1;
286 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size / 286 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
287 (ebx->split.ways_of_associativity + 1) - 1; 287 (ebx->split.ways_of_associativity + 1) - 1;
@@ -350,7 +350,8 @@ static int __cpuinit find_num_cache_leaves(void)
350 350
351unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) 351unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
352{ 352{
353 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ 353 /* Cache sizes */
354 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
354 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ 355 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
355 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ 356 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
356 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; 357 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
@@ -377,8 +378,8 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
377 378
378 retval = cpuid4_cache_lookup_regs(i, &this_leaf); 379 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
379 if (retval >= 0) { 380 if (retval >= 0) {
380 switch(this_leaf.eax.split.level) { 381 switch (this_leaf.eax.split.level) {
381 case 1: 382 case 1:
382 if (this_leaf.eax.split.type == 383 if (this_leaf.eax.split.type ==
383 CACHE_TYPE_DATA) 384 CACHE_TYPE_DATA)
384 new_l1d = this_leaf.size/1024; 385 new_l1d = this_leaf.size/1024;
@@ -386,19 +387,20 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
386 CACHE_TYPE_INST) 387 CACHE_TYPE_INST)
387 new_l1i = this_leaf.size/1024; 388 new_l1i = this_leaf.size/1024;
388 break; 389 break;
389 case 2: 390 case 2:
390 new_l2 = this_leaf.size/1024; 391 new_l2 = this_leaf.size/1024;
391 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; 392 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
392 index_msb = get_count_order(num_threads_sharing); 393 index_msb = get_count_order(num_threads_sharing);
393 l2_id = c->apicid >> index_msb; 394 l2_id = c->apicid >> index_msb;
394 break; 395 break;
395 case 3: 396 case 3:
396 new_l3 = this_leaf.size/1024; 397 new_l3 = this_leaf.size/1024;
397 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing; 398 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
398 index_msb = get_count_order(num_threads_sharing); 399 index_msb = get_count_order(
400 num_threads_sharing);
399 l3_id = c->apicid >> index_msb; 401 l3_id = c->apicid >> index_msb;
400 break; 402 break;
401 default: 403 default:
402 break; 404 break;
403 } 405 }
404 } 406 }
@@ -421,22 +423,21 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
421 /* Number of times to iterate */ 423 /* Number of times to iterate */
422 n = cpuid_eax(2) & 0xFF; 424 n = cpuid_eax(2) & 0xFF;
423 425
424 for ( i = 0 ; i < n ; i++ ) { 426 for (i = 0 ; i < n ; i++) {
425 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]); 427 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
426 428
427 /* If bit 31 is set, this is an unknown format */ 429 /* If bit 31 is set, this is an unknown format */
428 for ( j = 0 ; j < 3 ; j++ ) { 430 for (j = 0 ; j < 3 ; j++)
429 if (regs[j] & (1 << 31)) regs[j] = 0; 431 if (regs[j] & (1 << 31))
430 } 432 regs[j] = 0;
431 433
432 /* Byte 0 is level count, not a descriptor */ 434 /* Byte 0 is level count, not a descriptor */
433 for ( j = 1 ; j < 16 ; j++ ) { 435 for (j = 1 ; j < 16 ; j++) {
434 unsigned char des = dp[j]; 436 unsigned char des = dp[j];
435 unsigned char k = 0; 437 unsigned char k = 0;
436 438
437 /* look up this descriptor in the table */ 439 /* look up this descriptor in the table */
438 while (cache_table[k].descriptor != 0) 440 while (cache_table[k].descriptor != 0) {
439 {
440 if (cache_table[k].descriptor == des) { 441 if (cache_table[k].descriptor == des) {
441 if (only_trace && cache_table[k].cache_type != LVL_TRACE) 442 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
442 break; 443 break;
@@ -488,14 +489,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
488 } 489 }
489 490
490 if (trace) 491 if (trace)
491 printk (KERN_INFO "CPU: Trace cache: %dK uops", trace); 492 printk(KERN_INFO "CPU: Trace cache: %dK uops", trace);
492 else if ( l1i ) 493 else if (l1i)
493 printk (KERN_INFO "CPU: L1 I cache: %dK", l1i); 494 printk(KERN_INFO "CPU: L1 I cache: %dK", l1i);
494 495
495 if (l1d) 496 if (l1d)
496 printk(", L1 D cache: %dK\n", l1d); 497 printk(KERN_CONT ", L1 D cache: %dK\n", l1d);
497 else 498 else
498 printk("\n"); 499 printk(KERN_CONT "\n");
499 500
500 if (l2) 501 if (l2)
501 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2); 502 printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
@@ -522,6 +523,18 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
522 int index_msb, i; 523 int index_msb, i;
523 struct cpuinfo_x86 *c = &cpu_data(cpu); 524 struct cpuinfo_x86 *c = &cpu_data(cpu);
524 525
526 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
527 struct cpuinfo_x86 *d;
528 for_each_online_cpu(i) {
529 if (!per_cpu(cpuid4_info, i))
530 continue;
531 d = &cpu_data(i);
532 this_leaf = CPUID4_INFO_IDX(i, index);
533 cpumask_copy(to_cpumask(this_leaf->shared_cpu_map),
534 d->llc_shared_map);
535 }
536 return;
537 }
525 this_leaf = CPUID4_INFO_IDX(cpu, index); 538 this_leaf = CPUID4_INFO_IDX(cpu, index);
526 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; 539 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
527 540
@@ -558,8 +571,13 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
558 } 571 }
559} 572}
560#else 573#else
561static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {} 574static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
562static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {} 575{
576}
577
578static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
579{
580}
563#endif 581#endif
564 582
565static void __cpuinit free_cache_attributes(unsigned int cpu) 583static void __cpuinit free_cache_attributes(unsigned int cpu)
@@ -645,7 +663,7 @@ static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
645static ssize_t show_##file_name \ 663static ssize_t show_##file_name \
646 (struct _cpuid4_info *this_leaf, char *buf) \ 664 (struct _cpuid4_info *this_leaf, char *buf) \
647{ \ 665{ \
648 return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \ 666 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
649} 667}
650 668
651show_one_plus(level, eax.split.level, 0); 669show_one_plus(level, eax.split.level, 0);
@@ -656,7 +674,7 @@ show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
656 674
657static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf) 675static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
658{ 676{
659 return sprintf (buf, "%luK\n", this_leaf->size / 1024); 677 return sprintf(buf, "%luK\n", this_leaf->size / 1024);
660} 678}
661 679
662static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, 680static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
@@ -669,7 +687,7 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
669 const struct cpumask *mask; 687 const struct cpumask *mask;
670 688
671 mask = to_cpumask(this_leaf->shared_cpu_map); 689 mask = to_cpumask(this_leaf->shared_cpu_map);
672 n = type? 690 n = type ?
673 cpulist_scnprintf(buf, len-2, mask) : 691 cpulist_scnprintf(buf, len-2, mask) :
674 cpumask_scnprintf(buf, len-2, mask); 692 cpumask_scnprintf(buf, len-2, mask);
675 buf[n++] = '\n'; 693 buf[n++] = '\n';
@@ -800,7 +818,7 @@ static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
800static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, 818static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
801 show_cache_disable_1, store_cache_disable_1); 819 show_cache_disable_1, store_cache_disable_1);
802 820
803static struct attribute * default_attrs[] = { 821static struct attribute *default_attrs[] = {
804 &type.attr, 822 &type.attr,
805 &level.attr, 823 &level.attr,
806 &coherency_line_size.attr, 824 &coherency_line_size.attr,
@@ -815,7 +833,7 @@ static struct attribute * default_attrs[] = {
815 NULL 833 NULL
816}; 834};
817 835
818static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf) 836static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
819{ 837{
820 struct _cache_attr *fattr = to_attr(attr); 838 struct _cache_attr *fattr = to_attr(attr);
821 struct _index_kobject *this_leaf = to_object(kobj); 839 struct _index_kobject *this_leaf = to_object(kobj);
@@ -828,8 +846,8 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
828 return ret; 846 return ret;
829} 847}
830 848
831static ssize_t store(struct kobject * kobj, struct attribute * attr, 849static ssize_t store(struct kobject *kobj, struct attribute *attr,
832 const char * buf, size_t count) 850 const char *buf, size_t count)
833{ 851{
834 struct _cache_attr *fattr = to_attr(attr); 852 struct _cache_attr *fattr = to_attr(attr);
835 struct _index_kobject *this_leaf = to_object(kobj); 853 struct _index_kobject *this_leaf = to_object(kobj);
@@ -883,7 +901,7 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
883 goto err_out; 901 goto err_out;
884 902
885 per_cpu(index_kobject, cpu) = kzalloc( 903 per_cpu(index_kobject, cpu) = kzalloc(
886 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL); 904 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
887 if (unlikely(per_cpu(index_kobject, cpu) == NULL)) 905 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
888 goto err_out; 906 goto err_out;
889 907
@@ -917,7 +935,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
917 } 935 }
918 936
919 for (i = 0; i < num_cache_leaves; i++) { 937 for (i = 0; i < num_cache_leaves; i++) {
920 this_object = INDEX_KOBJECT_PTR(cpu,i); 938 this_object = INDEX_KOBJECT_PTR(cpu, i);
921 this_object->cpu = cpu; 939 this_object->cpu = cpu;
922 this_object->index = i; 940 this_object->index = i;
923 retval = kobject_init_and_add(&(this_object->kobj), 941 retval = kobject_init_and_add(&(this_object->kobj),
@@ -925,9 +943,8 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
925 per_cpu(cache_kobject, cpu), 943 per_cpu(cache_kobject, cpu),
926 "index%1lu", i); 944 "index%1lu", i);
927 if (unlikely(retval)) { 945 if (unlikely(retval)) {
928 for (j = 0; j < i; j++) { 946 for (j = 0; j < i; j++)
929 kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj)); 947 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
930 }
931 kobject_put(per_cpu(cache_kobject, cpu)); 948 kobject_put(per_cpu(cache_kobject, cpu));
932 cpuid4_cache_sysfs_exit(cpu); 949 cpuid4_cache_sysfs_exit(cpu);
933 return retval; 950 return retval;
@@ -952,7 +969,7 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
952 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); 969 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
953 970
954 for (i = 0; i < num_cache_leaves; i++) 971 for (i = 0; i < num_cache_leaves; i++)
955 kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); 972 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
956 kobject_put(per_cpu(cache_kobject, cpu)); 973 kobject_put(per_cpu(cache_kobject, cpu));
957 cpuid4_cache_sysfs_exit(cpu); 974 cpuid4_cache_sysfs_exit(cpu);
958} 975}
@@ -977,8 +994,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
977 return NOTIFY_OK; 994 return NOTIFY_OK;
978} 995}
979 996
980static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = 997static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
981{
982 .notifier_call = cacheinfo_cpu_callback, 998 .notifier_call = cacheinfo_cpu_callback,
983}; 999};
984 1000
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile
index 188a1ca5ad2b..4ac6d48fe11b 100644
--- a/arch/x86/kernel/cpu/mcheck/Makefile
+++ b/arch/x86/kernel/cpu/mcheck/Makefile
@@ -1,11 +1,8 @@
1obj-y = mce.o 1obj-y = mce.o mce-severity.o
2 2
3obj-$(CONFIG_X86_NEW_MCE) += mce-severity.o
4obj-$(CONFIG_X86_OLD_MCE) += k7.o p4.o p6.o
5obj-$(CONFIG_X86_ANCIENT_MCE) += winchip.o p5.o 3obj-$(CONFIG_X86_ANCIENT_MCE) += winchip.o p5.o
6obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o 4obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o
7obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o 5obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o
8obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o
9obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o 6obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o
10obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o 7obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o
11 8
diff --git a/arch/x86/kernel/cpu/mcheck/k7.c b/arch/x86/kernel/cpu/mcheck/k7.c
deleted file mode 100644
index b945d5dbc609..000000000000
--- a/arch/x86/kernel/cpu/mcheck/k7.c
+++ /dev/null
@@ -1,116 +0,0 @@
1/*
2 * Athlon specific Machine Check Exception Reporting
3 * (C) Copyright 2002 Dave Jones <davej@redhat.com>
4 */
5#include <linux/interrupt.h>
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/init.h>
9#include <linux/smp.h>
10
11#include <asm/processor.h>
12#include <asm/system.h>
13#include <asm/mce.h>
14#include <asm/msr.h>
15
16/* Machine Check Handler For AMD Athlon/Duron: */
17static void k7_machine_check(struct pt_regs *regs, long error_code)
18{
19 u32 alow, ahigh, high, low;
20 u32 mcgstl, mcgsth;
21 int recover = 1;
22 int i;
23
24 rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
25 if (mcgstl & (1<<0)) /* Recoverable ? */
26 recover = 0;
27
28 printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
29 smp_processor_id(), mcgsth, mcgstl);
30
31 for (i = 1; i < nr_mce_banks; i++) {
32 rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
33 if (high & (1<<31)) {
34 char misc[20];
35 char addr[24];
36
37 misc[0] = '\0';
38 addr[0] = '\0';
39
40 if (high & (1<<29))
41 recover |= 1;
42 if (high & (1<<25))
43 recover |= 2;
44 high &= ~(1<<31);
45
46 if (high & (1<<27)) {
47 rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh);
48 snprintf(misc, 20, "[%08x%08x]", ahigh, alow);
49 }
50 if (high & (1<<26)) {
51 rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
52 snprintf(addr, 24, " at %08x%08x", ahigh, alow);
53 }
54
55 printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n",
56 smp_processor_id(), i, high, low, misc, addr);
57
58 /* Clear it: */
59 wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
60 /* Serialize: */
61 wmb();
62 add_taint(TAINT_MACHINE_CHECK);
63 }
64 }
65
66 if (recover & 2)
67 panic("CPU context corrupt");
68 if (recover & 1)
69 panic("Unable to continue");
70
71 printk(KERN_EMERG "Attempting to continue.\n");
72
73 mcgstl &= ~(1<<2);
74 wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
75}
76
77
78/* AMD K7 machine check is Intel like: */
79void amd_mcheck_init(struct cpuinfo_x86 *c)
80{
81 u32 l, h;
82 int i;
83
84 if (!cpu_has(c, X86_FEATURE_MCE))
85 return;
86
87 machine_check_vector = k7_machine_check;
88 /* Make sure the vector pointer is visible before we enable MCEs: */
89 wmb();
90
91 printk(KERN_INFO "Intel machine check architecture supported.\n");
92
93 rdmsr(MSR_IA32_MCG_CAP, l, h);
94 if (l & (1<<8)) /* Control register present ? */
95 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
96 nr_mce_banks = l & 0xff;
97
98 /*
99 * Clear status for MC index 0 separately, we don't touch CTL,
100 * as some K7 Athlons cause spurious MCEs when its enabled:
101 */
102 if (boot_cpu_data.x86 == 6) {
103 wrmsr(MSR_IA32_MC0_STATUS, 0x0, 0x0);
104 i = 1;
105 } else
106 i = 0;
107
108 for (; i < nr_mce_banks; i++) {
109 wrmsr(MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
110 wrmsr(MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
111 }
112
113 set_in_cr4(X86_CR4_MCE);
114 printk(KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
115 smp_processor_id());
116}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index a3a235a53f09..7029f0e2acad 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -18,7 +18,12 @@
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/fs.h> 19#include <linux/fs.h>
20#include <linux/smp.h> 20#include <linux/smp.h>
21#include <linux/notifier.h>
22#include <linux/kdebug.h>
23#include <linux/cpu.h>
24#include <linux/sched.h>
21#include <asm/mce.h> 25#include <asm/mce.h>
26#include <asm/apic.h>
22 27
23/* Update fake mce registers on current CPU. */ 28/* Update fake mce registers on current CPU. */
24static void inject_mce(struct mce *m) 29static void inject_mce(struct mce *m)
@@ -39,44 +44,141 @@ static void inject_mce(struct mce *m)
39 i->finished = 1; 44 i->finished = 1;
40} 45}
41 46
42struct delayed_mce { 47static void raise_poll(struct mce *m)
43 struct timer_list timer; 48{
44 struct mce m; 49 unsigned long flags;
45}; 50 mce_banks_t b;
46 51
47/* Inject mce on current CPU */ 52 memset(&b, 0xff, sizeof(mce_banks_t));
48static void raise_mce(unsigned long data) 53 local_irq_save(flags);
54 machine_check_poll(0, &b);
55 local_irq_restore(flags);
56 m->finished = 0;
57}
58
59static void raise_exception(struct mce *m, struct pt_regs *pregs)
49{ 60{
50 struct delayed_mce *dm = (struct delayed_mce *)data; 61 struct pt_regs regs;
51 struct mce *m = &dm->m; 62 unsigned long flags;
52 int cpu = m->extcpu;
53 63
54 inject_mce(m); 64 if (!pregs) {
55 if (m->status & MCI_STATUS_UC) {
56 struct pt_regs regs;
57 memset(&regs, 0, sizeof(struct pt_regs)); 65 memset(&regs, 0, sizeof(struct pt_regs));
58 regs.ip = m->ip; 66 regs.ip = m->ip;
59 regs.cs = m->cs; 67 regs.cs = m->cs;
68 pregs = &regs;
69 }
70 /* in mcheck exeception handler, irq will be disabled */
71 local_irq_save(flags);
72 do_machine_check(pregs, 0);
73 local_irq_restore(flags);
74 m->finished = 0;
75}
76
77static cpumask_t mce_inject_cpumask;
78
79static int mce_raise_notify(struct notifier_block *self,
80 unsigned long val, void *data)
81{
82 struct die_args *args = (struct die_args *)data;
83 int cpu = smp_processor_id();
84 struct mce *m = &__get_cpu_var(injectm);
85 if (val != DIE_NMI_IPI || !cpu_isset(cpu, mce_inject_cpumask))
86 return NOTIFY_DONE;
87 cpu_clear(cpu, mce_inject_cpumask);
88 if (m->inject_flags & MCJ_EXCEPTION)
89 raise_exception(m, args->regs);
90 else if (m->status)
91 raise_poll(m);
92 return NOTIFY_STOP;
93}
94
95static struct notifier_block mce_raise_nb = {
96 .notifier_call = mce_raise_notify,
97 .priority = 1000,
98};
99
100/* Inject mce on current CPU */
101static int raise_local(struct mce *m)
102{
103 int context = MCJ_CTX(m->inject_flags);
104 int ret = 0;
105 int cpu = m->extcpu;
106
107 if (m->inject_flags & MCJ_EXCEPTION) {
60 printk(KERN_INFO "Triggering MCE exception on CPU %d\n", cpu); 108 printk(KERN_INFO "Triggering MCE exception on CPU %d\n", cpu);
61 do_machine_check(&regs, 0); 109 switch (context) {
110 case MCJ_CTX_IRQ:
111 /*
112 * Could do more to fake interrupts like
113 * calling irq_enter, but the necessary
114 * machinery isn't exported currently.
115 */
116 /*FALL THROUGH*/
117 case MCJ_CTX_PROCESS:
118 raise_exception(m, NULL);
119 break;
120 default:
121 printk(KERN_INFO "Invalid MCE context\n");
122 ret = -EINVAL;
123 }
62 printk(KERN_INFO "MCE exception done on CPU %d\n", cpu); 124 printk(KERN_INFO "MCE exception done on CPU %d\n", cpu);
63 } else { 125 } else if (m->status) {
64 mce_banks_t b;
65 memset(&b, 0xff, sizeof(mce_banks_t));
66 printk(KERN_INFO "Starting machine check poll CPU %d\n", cpu); 126 printk(KERN_INFO "Starting machine check poll CPU %d\n", cpu);
67 machine_check_poll(0, &b); 127 raise_poll(m);
68 mce_notify_irq(); 128 mce_notify_irq();
69 printk(KERN_INFO "Finished machine check poll on CPU %d\n", 129 printk(KERN_INFO "Machine check poll done on CPU %d\n", cpu);
70 cpu); 130 } else
71 } 131 m->finished = 0;
72 kfree(dm); 132
133 return ret;
134}
135
136static void raise_mce(struct mce *m)
137{
138 int context = MCJ_CTX(m->inject_flags);
139
140 inject_mce(m);
141
142 if (context == MCJ_CTX_RANDOM)
143 return;
144
145#ifdef CONFIG_X86_LOCAL_APIC
146 if (m->inject_flags & MCJ_NMI_BROADCAST) {
147 unsigned long start;
148 int cpu;
149 get_online_cpus();
150 mce_inject_cpumask = cpu_online_map;
151 cpu_clear(get_cpu(), mce_inject_cpumask);
152 for_each_online_cpu(cpu) {
153 struct mce *mcpu = &per_cpu(injectm, cpu);
154 if (!mcpu->finished ||
155 MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM)
156 cpu_clear(cpu, mce_inject_cpumask);
157 }
158 if (!cpus_empty(mce_inject_cpumask))
159 apic->send_IPI_mask(&mce_inject_cpumask, NMI_VECTOR);
160 start = jiffies;
161 while (!cpus_empty(mce_inject_cpumask)) {
162 if (!time_before(jiffies, start + 2*HZ)) {
163 printk(KERN_ERR
164 "Timeout waiting for mce inject NMI %lx\n",
165 *cpus_addr(mce_inject_cpumask));
166 break;
167 }
168 cpu_relax();
169 }
170 raise_local(m);
171 put_cpu();
172 put_online_cpus();
173 } else
174#endif
175 raise_local(m);
73} 176}
74 177
75/* Error injection interface */ 178/* Error injection interface */
76static ssize_t mce_write(struct file *filp, const char __user *ubuf, 179static ssize_t mce_write(struct file *filp, const char __user *ubuf,
77 size_t usize, loff_t *off) 180 size_t usize, loff_t *off)
78{ 181{
79 struct delayed_mce *dm;
80 struct mce m; 182 struct mce m;
81 183
82 if (!capable(CAP_SYS_ADMIN)) 184 if (!capable(CAP_SYS_ADMIN))
@@ -96,19 +198,12 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
96 if (m.extcpu >= num_possible_cpus() || !cpu_online(m.extcpu)) 198 if (m.extcpu >= num_possible_cpus() || !cpu_online(m.extcpu))
97 return -EINVAL; 199 return -EINVAL;
98 200
99 dm = kmalloc(sizeof(struct delayed_mce), GFP_KERNEL);
100 if (!dm)
101 return -ENOMEM;
102
103 /* 201 /*
104 * Need to give user space some time to set everything up, 202 * Need to give user space some time to set everything up,
105 * so do it a jiffie or two later everywhere. 203 * so do it a jiffie or two later everywhere.
106 * Should we use a hrtimer here for better synchronization?
107 */ 204 */
108 memcpy(&dm->m, &m, sizeof(struct mce)); 205 schedule_timeout(2);
109 setup_timer(&dm->timer, raise_mce, (unsigned long)dm); 206 raise_mce(&m);
110 dm->timer.expires = jiffies + 2;
111 add_timer_on(&dm->timer, m.extcpu);
112 return usize; 207 return usize;
113} 208}
114 209
@@ -116,6 +211,7 @@ static int inject_init(void)
116{ 211{
117 printk(KERN_INFO "Machine check injector initialized\n"); 212 printk(KERN_INFO "Machine check injector initialized\n");
118 mce_chrdev_ops.write = mce_write; 213 mce_chrdev_ops.write = mce_write;
214 register_die_notifier(&mce_raise_nb);
119 return 0; 215 return 0;
120} 216}
121 217
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index 54dcb8ff12e5..32996f9fab67 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -1,3 +1,4 @@
1#include <linux/sysdev.h>
1#include <asm/mce.h> 2#include <asm/mce.h>
2 3
3enum severity_level { 4enum severity_level {
@@ -10,6 +11,20 @@ enum severity_level {
10 MCE_PANIC_SEVERITY, 11 MCE_PANIC_SEVERITY,
11}; 12};
12 13
14#define ATTR_LEN 16
15
16/* One object for each MCE bank, shared by all CPUs */
17struct mce_bank {
18 u64 ctl; /* subevents to enable */
19 unsigned char init; /* initialise bank? */
20 struct sysdev_attribute attr; /* sysdev attribute */
21 char attrname[ATTR_LEN]; /* attribute name */
22};
23
13int mce_severity(struct mce *a, int tolerant, char **msg); 24int mce_severity(struct mce *a, int tolerant, char **msg);
25struct dentry *mce_get_debugfs_dir(void);
14 26
15extern int mce_ser; 27extern int mce_ser;
28
29extern struct mce_bank *mce_banks;
30
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index ff0807f97056..8a85dd1b1aa1 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -139,6 +139,7 @@ int mce_severity(struct mce *a, int tolerant, char **msg)
139 } 139 }
140} 140}
141 141
142#ifdef CONFIG_DEBUG_FS
142static void *s_start(struct seq_file *f, loff_t *pos) 143static void *s_start(struct seq_file *f, loff_t *pos)
143{ 144{
144 if (*pos >= ARRAY_SIZE(severities)) 145 if (*pos >= ARRAY_SIZE(severities))
@@ -197,7 +198,7 @@ static int __init severities_debugfs_init(void)
197{ 198{
198 struct dentry *dmce = NULL, *fseverities_coverage = NULL; 199 struct dentry *dmce = NULL, *fseverities_coverage = NULL;
199 200
200 dmce = debugfs_create_dir("mce", NULL); 201 dmce = mce_get_debugfs_dir();
201 if (dmce == NULL) 202 if (dmce == NULL)
202 goto err_out; 203 goto err_out;
203 fseverities_coverage = debugfs_create_file("severities-coverage", 204 fseverities_coverage = debugfs_create_file("severities-coverage",
@@ -209,10 +210,7 @@ static int __init severities_debugfs_init(void)
209 return 0; 210 return 0;
210 211
211err_out: 212err_out:
212 if (fseverities_coverage)
213 debugfs_remove(fseverities_coverage);
214 if (dmce)
215 debugfs_remove(dmce);
216 return -ENOMEM; 213 return -ENOMEM;
217} 214}
218late_initcall(severities_debugfs_init); 215late_initcall(severities_debugfs_init);
216#endif
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 01213048f62f..2f5aab26320e 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -34,6 +34,7 @@
34#include <linux/smp.h> 34#include <linux/smp.h>
35#include <linux/fs.h> 35#include <linux/fs.h>
36#include <linux/mm.h> 36#include <linux/mm.h>
37#include <linux/debugfs.h>
37 38
38#include <asm/processor.h> 39#include <asm/processor.h>
39#include <asm/hw_irq.h> 40#include <asm/hw_irq.h>
@@ -45,21 +46,8 @@
45 46
46#include "mce-internal.h" 47#include "mce-internal.h"
47 48
48/* Handle unconfigured int18 (should never happen) */
49static void unexpected_machine_check(struct pt_regs *regs, long error_code)
50{
51 printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n",
52 smp_processor_id());
53}
54
55/* Call the installed machine check handler for this CPU setup. */
56void (*machine_check_vector)(struct pt_regs *, long error_code) =
57 unexpected_machine_check;
58
59int mce_disabled __read_mostly; 49int mce_disabled __read_mostly;
60 50
61#ifdef CONFIG_X86_NEW_MCE
62
63#define MISC_MCELOG_MINOR 227 51#define MISC_MCELOG_MINOR 227
64 52
65#define SPINUNIT 100 /* 100ns */ 53#define SPINUNIT 100 /* 100ns */
@@ -77,7 +65,6 @@ DEFINE_PER_CPU(unsigned, mce_exception_count);
77 */ 65 */
78static int tolerant __read_mostly = 1; 66static int tolerant __read_mostly = 1;
79static int banks __read_mostly; 67static int banks __read_mostly;
80static u64 *bank __read_mostly;
81static int rip_msr __read_mostly; 68static int rip_msr __read_mostly;
82static int mce_bootlog __read_mostly = -1; 69static int mce_bootlog __read_mostly = -1;
83static int monarch_timeout __read_mostly = -1; 70static int monarch_timeout __read_mostly = -1;
@@ -87,13 +74,13 @@ int mce_cmci_disabled __read_mostly;
87int mce_ignore_ce __read_mostly; 74int mce_ignore_ce __read_mostly;
88int mce_ser __read_mostly; 75int mce_ser __read_mostly;
89 76
77struct mce_bank *mce_banks __read_mostly;
78
90/* User mode helper program triggered by machine check event */ 79/* User mode helper program triggered by machine check event */
91static unsigned long mce_need_notify; 80static unsigned long mce_need_notify;
92static char mce_helper[128]; 81static char mce_helper[128];
93static char *mce_helper_argv[2] = { mce_helper, NULL }; 82static char *mce_helper_argv[2] = { mce_helper, NULL };
94 83
95static unsigned long dont_init_banks;
96
97static DECLARE_WAIT_QUEUE_HEAD(mce_wait); 84static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
98static DEFINE_PER_CPU(struct mce, mces_seen); 85static DEFINE_PER_CPU(struct mce, mces_seen);
99static int cpu_missing; 86static int cpu_missing;
@@ -104,11 +91,6 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
104 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL 91 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
105}; 92};
106 93
107static inline int skip_bank_init(int i)
108{
109 return i < BITS_PER_LONG && test_bit(i, &dont_init_banks);
110}
111
112static DEFINE_PER_CPU(struct work_struct, mce_work); 94static DEFINE_PER_CPU(struct work_struct, mce_work);
113 95
114/* Do initial initialization of a struct mce */ 96/* Do initial initialization of a struct mce */
@@ -183,6 +165,11 @@ void mce_log(struct mce *mce)
183 set_bit(0, &mce_need_notify); 165 set_bit(0, &mce_need_notify);
184} 166}
185 167
168void __weak decode_mce(struct mce *m)
169{
170 return;
171}
172
186static void print_mce(struct mce *m) 173static void print_mce(struct mce *m)
187{ 174{
188 printk(KERN_EMERG 175 printk(KERN_EMERG
@@ -205,6 +192,8 @@ static void print_mce(struct mce *m)
205 printk(KERN_EMERG "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n", 192 printk(KERN_EMERG "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
206 m->cpuvendor, m->cpuid, m->time, m->socketid, 193 m->cpuvendor, m->cpuid, m->time, m->socketid,
207 m->apicid); 194 m->apicid);
195
196 decode_mce(m);
208} 197}
209 198
210static void print_mce_head(void) 199static void print_mce_head(void)
@@ -215,13 +204,19 @@ static void print_mce_head(void)
215static void print_mce_tail(void) 204static void print_mce_tail(void)
216{ 205{
217 printk(KERN_EMERG "This is not a software problem!\n" 206 printk(KERN_EMERG "This is not a software problem!\n"
218 "Run through mcelog --ascii to decode and contact your hardware vendor\n"); 207#if (!defined(CONFIG_EDAC) || !defined(CONFIG_CPU_SUP_AMD))
208 "Run through mcelog --ascii to decode and contact your hardware vendor\n"
209#endif
210 );
219} 211}
220 212
221#define PANIC_TIMEOUT 5 /* 5 seconds */ 213#define PANIC_TIMEOUT 5 /* 5 seconds */
222 214
223static atomic_t mce_paniced; 215static atomic_t mce_paniced;
224 216
217static int fake_panic;
218static atomic_t mce_fake_paniced;
219
225/* Panic in progress. Enable interrupts and wait for final IPI */ 220/* Panic in progress. Enable interrupts and wait for final IPI */
226static void wait_for_panic(void) 221static void wait_for_panic(void)
227{ 222{
@@ -239,15 +234,21 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
239{ 234{
240 int i; 235 int i;
241 236
242 /* 237 if (!fake_panic) {
243 * Make sure only one CPU runs in machine check panic 238 /*
244 */ 239 * Make sure only one CPU runs in machine check panic
245 if (atomic_add_return(1, &mce_paniced) > 1) 240 */
246 wait_for_panic(); 241 if (atomic_inc_return(&mce_paniced) > 1)
247 barrier(); 242 wait_for_panic();
243 barrier();
248 244
249 bust_spinlocks(1); 245 bust_spinlocks(1);
250 console_verbose(); 246 console_verbose();
247 } else {
248 /* Don't log too much for fake panic */
249 if (atomic_inc_return(&mce_fake_paniced) > 1)
250 return;
251 }
251 print_mce_head(); 252 print_mce_head();
252 /* First print corrected ones that are still unlogged */ 253 /* First print corrected ones that are still unlogged */
253 for (i = 0; i < MCE_LOG_LEN; i++) { 254 for (i = 0; i < MCE_LOG_LEN; i++) {
@@ -274,9 +275,12 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
274 print_mce_tail(); 275 print_mce_tail();
275 if (exp) 276 if (exp)
276 printk(KERN_EMERG "Machine check: %s\n", exp); 277 printk(KERN_EMERG "Machine check: %s\n", exp);
277 if (panic_timeout == 0) 278 if (!fake_panic) {
278 panic_timeout = mce_panic_timeout; 279 if (panic_timeout == 0)
279 panic(msg); 280 panic_timeout = mce_panic_timeout;
281 panic(msg);
282 } else
283 printk(KERN_EMERG "Fake kernel panic: %s\n", msg);
280} 284}
281 285
282/* Support code for software error injection */ 286/* Support code for software error injection */
@@ -286,11 +290,11 @@ static int msr_to_offset(u32 msr)
286 unsigned bank = __get_cpu_var(injectm.bank); 290 unsigned bank = __get_cpu_var(injectm.bank);
287 if (msr == rip_msr) 291 if (msr == rip_msr)
288 return offsetof(struct mce, ip); 292 return offsetof(struct mce, ip);
289 if (msr == MSR_IA32_MC0_STATUS + bank*4) 293 if (msr == MSR_IA32_MCx_STATUS(bank))
290 return offsetof(struct mce, status); 294 return offsetof(struct mce, status);
291 if (msr == MSR_IA32_MC0_ADDR + bank*4) 295 if (msr == MSR_IA32_MCx_ADDR(bank))
292 return offsetof(struct mce, addr); 296 return offsetof(struct mce, addr);
293 if (msr == MSR_IA32_MC0_MISC + bank*4) 297 if (msr == MSR_IA32_MCx_MISC(bank))
294 return offsetof(struct mce, misc); 298 return offsetof(struct mce, misc);
295 if (msr == MSR_IA32_MCG_STATUS) 299 if (msr == MSR_IA32_MCG_STATUS)
296 return offsetof(struct mce, mcgstatus); 300 return offsetof(struct mce, mcgstatus);
@@ -495,7 +499,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
495 499
496 m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); 500 m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
497 for (i = 0; i < banks; i++) { 501 for (i = 0; i < banks; i++) {
498 if (!bank[i] || !test_bit(i, *b)) 502 if (!mce_banks[i].ctl || !test_bit(i, *b))
499 continue; 503 continue;
500 504
501 m.misc = 0; 505 m.misc = 0;
@@ -504,7 +508,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
504 m.tsc = 0; 508 m.tsc = 0;
505 509
506 barrier(); 510 barrier();
507 m.status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4); 511 m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
508 if (!(m.status & MCI_STATUS_VAL)) 512 if (!(m.status & MCI_STATUS_VAL))
509 continue; 513 continue;
510 514
@@ -519,9 +523,9 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
519 continue; 523 continue;
520 524
521 if (m.status & MCI_STATUS_MISCV) 525 if (m.status & MCI_STATUS_MISCV)
522 m.misc = mce_rdmsrl(MSR_IA32_MC0_MISC + i*4); 526 m.misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
523 if (m.status & MCI_STATUS_ADDRV) 527 if (m.status & MCI_STATUS_ADDRV)
524 m.addr = mce_rdmsrl(MSR_IA32_MC0_ADDR + i*4); 528 m.addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
525 529
526 if (!(flags & MCP_TIMESTAMP)) 530 if (!(flags & MCP_TIMESTAMP))
527 m.tsc = 0; 531 m.tsc = 0;
@@ -537,7 +541,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
537 /* 541 /*
538 * Clear state for this bank. 542 * Clear state for this bank.
539 */ 543 */
540 mce_wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); 544 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
541 } 545 }
542 546
543 /* 547 /*
@@ -558,7 +562,7 @@ static int mce_no_way_out(struct mce *m, char **msg)
558 int i; 562 int i;
559 563
560 for (i = 0; i < banks; i++) { 564 for (i = 0; i < banks; i++) {
561 m->status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4); 565 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
562 if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY) 566 if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
563 return 1; 567 return 1;
564 } 568 }
@@ -618,7 +622,7 @@ out:
618 * This way we prevent any potential data corruption in a unrecoverable case 622 * This way we prevent any potential data corruption in a unrecoverable case
619 * and also makes sure always all CPU's errors are examined. 623 * and also makes sure always all CPU's errors are examined.
620 * 624 *
621 * Also this detects the case of an machine check event coming from outer 625 * Also this detects the case of a machine check event coming from outer
622 * space (not detected by any CPUs) In this case some external agent wants 626 * space (not detected by any CPUs) In this case some external agent wants
623 * us to shut down, so panic too. 627 * us to shut down, so panic too.
624 * 628 *
@@ -671,7 +675,7 @@ static void mce_reign(void)
671 * No machine check event found. Must be some external 675 * No machine check event found. Must be some external
672 * source or one CPU is hung. Panic. 676 * source or one CPU is hung. Panic.
673 */ 677 */
674 if (!m && tolerant < 3) 678 if (global_worst <= MCE_KEEP_SEVERITY && tolerant < 3)
675 mce_panic("Machine check from unknown source", NULL, NULL); 679 mce_panic("Machine check from unknown source", NULL, NULL);
676 680
677 /* 681 /*
@@ -705,7 +709,7 @@ static int mce_start(int *no_way_out)
705 * global_nwo should be updated before mce_callin 709 * global_nwo should be updated before mce_callin
706 */ 710 */
707 smp_wmb(); 711 smp_wmb();
708 order = atomic_add_return(1, &mce_callin); 712 order = atomic_inc_return(&mce_callin);
709 713
710 /* 714 /*
711 * Wait for everyone. 715 * Wait for everyone.
@@ -842,7 +846,7 @@ static void mce_clear_state(unsigned long *toclear)
842 846
843 for (i = 0; i < banks; i++) { 847 for (i = 0; i < banks; i++) {
844 if (test_bit(i, toclear)) 848 if (test_bit(i, toclear))
845 mce_wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); 849 mce_wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
846 } 850 }
847} 851}
848 852
@@ -895,11 +899,11 @@ void do_machine_check(struct pt_regs *regs, long error_code)
895 mce_setup(&m); 899 mce_setup(&m);
896 900
897 m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS); 901 m.mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
898 no_way_out = mce_no_way_out(&m, &msg);
899
900 final = &__get_cpu_var(mces_seen); 902 final = &__get_cpu_var(mces_seen);
901 *final = m; 903 *final = m;
902 904
905 no_way_out = mce_no_way_out(&m, &msg);
906
903 barrier(); 907 barrier();
904 908
905 /* 909 /*
@@ -916,14 +920,14 @@ void do_machine_check(struct pt_regs *regs, long error_code)
916 order = mce_start(&no_way_out); 920 order = mce_start(&no_way_out);
917 for (i = 0; i < banks; i++) { 921 for (i = 0; i < banks; i++) {
918 __clear_bit(i, toclear); 922 __clear_bit(i, toclear);
919 if (!bank[i]) 923 if (!mce_banks[i].ctl)
920 continue; 924 continue;
921 925
922 m.misc = 0; 926 m.misc = 0;
923 m.addr = 0; 927 m.addr = 0;
924 m.bank = i; 928 m.bank = i;
925 929
926 m.status = mce_rdmsrl(MSR_IA32_MC0_STATUS + i*4); 930 m.status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
927 if ((m.status & MCI_STATUS_VAL) == 0) 931 if ((m.status & MCI_STATUS_VAL) == 0)
928 continue; 932 continue;
929 933
@@ -964,9 +968,9 @@ void do_machine_check(struct pt_regs *regs, long error_code)
964 kill_it = 1; 968 kill_it = 1;
965 969
966 if (m.status & MCI_STATUS_MISCV) 970 if (m.status & MCI_STATUS_MISCV)
967 m.misc = mce_rdmsrl(MSR_IA32_MC0_MISC + i*4); 971 m.misc = mce_rdmsrl(MSR_IA32_MCx_MISC(i));
968 if (m.status & MCI_STATUS_ADDRV) 972 if (m.status & MCI_STATUS_ADDRV)
969 m.addr = mce_rdmsrl(MSR_IA32_MC0_ADDR + i*4); 973 m.addr = mce_rdmsrl(MSR_IA32_MCx_ADDR(i));
970 974
971 /* 975 /*
972 * Action optional error. Queue address for later processing. 976 * Action optional error. Queue address for later processing.
@@ -1091,7 +1095,7 @@ void mce_log_therm_throt_event(__u64 status)
1091 */ 1095 */
1092static int check_interval = 5 * 60; /* 5 minutes */ 1096static int check_interval = 5 * 60; /* 5 minutes */
1093 1097
1094static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ 1098static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
1095static DEFINE_PER_CPU(struct timer_list, mce_timer); 1099static DEFINE_PER_CPU(struct timer_list, mce_timer);
1096 1100
1097static void mcheck_timer(unsigned long data) 1101static void mcheck_timer(unsigned long data)
@@ -1110,7 +1114,7 @@ static void mcheck_timer(unsigned long data)
1110 * Alert userspace if needed. If we logged an MCE, reduce the 1114 * Alert userspace if needed. If we logged an MCE, reduce the
1111 * polling interval, otherwise increase the polling interval. 1115 * polling interval, otherwise increase the polling interval.
1112 */ 1116 */
1113 n = &__get_cpu_var(next_interval); 1117 n = &__get_cpu_var(mce_next_interval);
1114 if (mce_notify_irq()) 1118 if (mce_notify_irq())
1115 *n = max(*n/2, HZ/100); 1119 *n = max(*n/2, HZ/100);
1116 else 1120 else
@@ -1159,10 +1163,25 @@ int mce_notify_irq(void)
1159} 1163}
1160EXPORT_SYMBOL_GPL(mce_notify_irq); 1164EXPORT_SYMBOL_GPL(mce_notify_irq);
1161 1165
1166static int mce_banks_init(void)
1167{
1168 int i;
1169
1170 mce_banks = kzalloc(banks * sizeof(struct mce_bank), GFP_KERNEL);
1171 if (!mce_banks)
1172 return -ENOMEM;
1173 for (i = 0; i < banks; i++) {
1174 struct mce_bank *b = &mce_banks[i];
1175 b->ctl = -1ULL;
1176 b->init = 1;
1177 }
1178 return 0;
1179}
1180
1162/* 1181/*
1163 * Initialize Machine Checks for a CPU. 1182 * Initialize Machine Checks for a CPU.
1164 */ 1183 */
1165static int mce_cap_init(void) 1184static int __cpuinit mce_cap_init(void)
1166{ 1185{
1167 unsigned b; 1186 unsigned b;
1168 u64 cap; 1187 u64 cap;
@@ -1182,11 +1201,10 @@ static int mce_cap_init(void)
1182 /* Don't support asymmetric configurations today */ 1201 /* Don't support asymmetric configurations today */
1183 WARN_ON(banks != 0 && b != banks); 1202 WARN_ON(banks != 0 && b != banks);
1184 banks = b; 1203 banks = b;
1185 if (!bank) { 1204 if (!mce_banks) {
1186 bank = kmalloc(banks * sizeof(u64), GFP_KERNEL); 1205 int err = mce_banks_init();
1187 if (!bank) 1206 if (err)
1188 return -ENOMEM; 1207 return err;
1189 memset(bank, 0xff, banks * sizeof(u64));
1190 } 1208 }
1191 1209
1192 /* Use accurate RIP reporting if available. */ 1210 /* Use accurate RIP reporting if available. */
@@ -1218,15 +1236,16 @@ static void mce_init(void)
1218 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); 1236 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
1219 1237
1220 for (i = 0; i < banks; i++) { 1238 for (i = 0; i < banks; i++) {
1221 if (skip_bank_init(i)) 1239 struct mce_bank *b = &mce_banks[i];
1240 if (!b->init)
1222 continue; 1241 continue;
1223 wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); 1242 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
1224 wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); 1243 wrmsrl(MSR_IA32_MCx_STATUS(i), 0);
1225 } 1244 }
1226} 1245}
1227 1246
1228/* Add per CPU specific workarounds here */ 1247/* Add per CPU specific workarounds here */
1229static int mce_cpu_quirks(struct cpuinfo_x86 *c) 1248static int __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
1230{ 1249{
1231 if (c->x86_vendor == X86_VENDOR_UNKNOWN) { 1250 if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
1232 pr_info("MCE: unknown CPU type - not enabling MCE support.\n"); 1251 pr_info("MCE: unknown CPU type - not enabling MCE support.\n");
@@ -1241,7 +1260,7 @@ static int mce_cpu_quirks(struct cpuinfo_x86 *c)
1241 * trips off incorrectly with the IOMMU & 3ware 1260 * trips off incorrectly with the IOMMU & 3ware
1242 * & Cerberus: 1261 * & Cerberus:
1243 */ 1262 */
1244 clear_bit(10, (unsigned long *)&bank[4]); 1263 clear_bit(10, (unsigned long *)&mce_banks[4].ctl);
1245 } 1264 }
1246 if (c->x86 <= 17 && mce_bootlog < 0) { 1265 if (c->x86 <= 17 && mce_bootlog < 0) {
1247 /* 1266 /*
@@ -1255,7 +1274,7 @@ static int mce_cpu_quirks(struct cpuinfo_x86 *c)
1255 * by default. 1274 * by default.
1256 */ 1275 */
1257 if (c->x86 == 6 && banks > 0) 1276 if (c->x86 == 6 && banks > 0)
1258 bank[0] = 0; 1277 mce_banks[0].ctl = 0;
1259 } 1278 }
1260 1279
1261 if (c->x86_vendor == X86_VENDOR_INTEL) { 1280 if (c->x86_vendor == X86_VENDOR_INTEL) {
@@ -1268,8 +1287,8 @@ static int mce_cpu_quirks(struct cpuinfo_x86 *c)
1268 * valid event later, merely don't write CTL0. 1287 * valid event later, merely don't write CTL0.
1269 */ 1288 */
1270 1289
1271 if (c->x86 == 6 && c->x86_model < 0x1A) 1290 if (c->x86 == 6 && c->x86_model < 0x1A && banks > 0)
1272 __set_bit(0, &dont_init_banks); 1291 mce_banks[0].init = 0;
1273 1292
1274 /* 1293 /*
1275 * All newer Intel systems support MCE broadcasting. Enable 1294 * All newer Intel systems support MCE broadcasting. Enable
@@ -1325,7 +1344,7 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
1325static void mce_init_timer(void) 1344static void mce_init_timer(void)
1326{ 1345{
1327 struct timer_list *t = &__get_cpu_var(mce_timer); 1346 struct timer_list *t = &__get_cpu_var(mce_timer);
1328 int *n = &__get_cpu_var(next_interval); 1347 int *n = &__get_cpu_var(mce_next_interval);
1329 1348
1330 if (mce_ignore_ce) 1349 if (mce_ignore_ce)
1331 return; 1350 return;
@@ -1338,6 +1357,17 @@ static void mce_init_timer(void)
1338 add_timer_on(t, smp_processor_id()); 1357 add_timer_on(t, smp_processor_id());
1339} 1358}
1340 1359
1360/* Handle unconfigured int18 (should never happen) */
1361static void unexpected_machine_check(struct pt_regs *regs, long error_code)
1362{
1363 printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n",
1364 smp_processor_id());
1365}
1366
1367/* Call the installed machine check handler for this CPU setup. */
1368void (*machine_check_vector)(struct pt_regs *, long error_code) =
1369 unexpected_machine_check;
1370
1341/* 1371/*
1342 * Called for each booted CPU to set up machine checks. 1372 * Called for each booted CPU to set up machine checks.
1343 * Must be called with preempt off: 1373 * Must be called with preempt off:
@@ -1551,8 +1581,10 @@ static struct miscdevice mce_log_device = {
1551 */ 1581 */
1552static int __init mcheck_enable(char *str) 1582static int __init mcheck_enable(char *str)
1553{ 1583{
1554 if (*str == 0) 1584 if (*str == 0) {
1555 enable_p5_mce(); 1585 enable_p5_mce();
1586 return 1;
1587 }
1556 if (*str == '=') 1588 if (*str == '=')
1557 str++; 1589 str++;
1558 if (!strcmp(str, "off")) 1590 if (!strcmp(str, "off"))
@@ -1593,8 +1625,9 @@ static int mce_disable(void)
1593 int i; 1625 int i;
1594 1626
1595 for (i = 0; i < banks; i++) { 1627 for (i = 0; i < banks; i++) {
1596 if (!skip_bank_init(i)) 1628 struct mce_bank *b = &mce_banks[i];
1597 wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); 1629 if (b->init)
1630 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
1598 } 1631 }
1599 return 0; 1632 return 0;
1600} 1633}
@@ -1669,14 +1702,15 @@ DEFINE_PER_CPU(struct sys_device, mce_dev);
1669__cpuinitdata 1702__cpuinitdata
1670void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); 1703void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
1671 1704
1672static struct sysdev_attribute *bank_attrs; 1705static inline struct mce_bank *attr_to_bank(struct sysdev_attribute *attr)
1706{
1707 return container_of(attr, struct mce_bank, attr);
1708}
1673 1709
1674static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr, 1710static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
1675 char *buf) 1711 char *buf)
1676{ 1712{
1677 u64 b = bank[attr - bank_attrs]; 1713 return sprintf(buf, "%llx\n", attr_to_bank(attr)->ctl);
1678
1679 return sprintf(buf, "%llx\n", b);
1680} 1714}
1681 1715
1682static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr, 1716static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
@@ -1687,7 +1721,7 @@ static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
1687 if (strict_strtoull(buf, 0, &new) < 0) 1721 if (strict_strtoull(buf, 0, &new) < 0)
1688 return -EINVAL; 1722 return -EINVAL;
1689 1723
1690 bank[attr - bank_attrs] = new; 1724 attr_to_bank(attr)->ctl = new;
1691 mce_restart(); 1725 mce_restart();
1692 1726
1693 return size; 1727 return size;
@@ -1829,7 +1863,7 @@ static __cpuinit int mce_create_device(unsigned int cpu)
1829 } 1863 }
1830 for (j = 0; j < banks; j++) { 1864 for (j = 0; j < banks; j++) {
1831 err = sysdev_create_file(&per_cpu(mce_dev, cpu), 1865 err = sysdev_create_file(&per_cpu(mce_dev, cpu),
1832 &bank_attrs[j]); 1866 &mce_banks[j].attr);
1833 if (err) 1867 if (err)
1834 goto error2; 1868 goto error2;
1835 } 1869 }
@@ -1838,10 +1872,10 @@ static __cpuinit int mce_create_device(unsigned int cpu)
1838 return 0; 1872 return 0;
1839error2: 1873error2:
1840 while (--j >= 0) 1874 while (--j >= 0)
1841 sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[j]); 1875 sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[j].attr);
1842error: 1876error:
1843 while (--i >= 0) 1877 while (--i >= 0)
1844 sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]); 1878 sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[i].attr);
1845 1879
1846 sysdev_unregister(&per_cpu(mce_dev, cpu)); 1880 sysdev_unregister(&per_cpu(mce_dev, cpu));
1847 1881
@@ -1859,7 +1893,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
1859 sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]); 1893 sysdev_remove_file(&per_cpu(mce_dev, cpu), mce_attrs[i]);
1860 1894
1861 for (i = 0; i < banks; i++) 1895 for (i = 0; i < banks; i++)
1862 sysdev_remove_file(&per_cpu(mce_dev, cpu), &bank_attrs[i]); 1896 sysdev_remove_file(&per_cpu(mce_dev, cpu), &mce_banks[i].attr);
1863 1897
1864 sysdev_unregister(&per_cpu(mce_dev, cpu)); 1898 sysdev_unregister(&per_cpu(mce_dev, cpu));
1865 cpumask_clear_cpu(cpu, mce_dev_initialized); 1899 cpumask_clear_cpu(cpu, mce_dev_initialized);
@@ -1876,8 +1910,9 @@ static void mce_disable_cpu(void *h)
1876 if (!(action & CPU_TASKS_FROZEN)) 1910 if (!(action & CPU_TASKS_FROZEN))
1877 cmci_clear(); 1911 cmci_clear();
1878 for (i = 0; i < banks; i++) { 1912 for (i = 0; i < banks; i++) {
1879 if (!skip_bank_init(i)) 1913 struct mce_bank *b = &mce_banks[i];
1880 wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); 1914 if (b->init)
1915 wrmsrl(MSR_IA32_MCx_CTL(i), 0);
1881 } 1916 }
1882} 1917}
1883 1918
@@ -1892,8 +1927,9 @@ static void mce_reenable_cpu(void *h)
1892 if (!(action & CPU_TASKS_FROZEN)) 1927 if (!(action & CPU_TASKS_FROZEN))
1893 cmci_reenable(); 1928 cmci_reenable();
1894 for (i = 0; i < banks; i++) { 1929 for (i = 0; i < banks; i++) {
1895 if (!skip_bank_init(i)) 1930 struct mce_bank *b = &mce_banks[i];
1896 wrmsrl(MSR_IA32_MC0_CTL + i*4, bank[i]); 1931 if (b->init)
1932 wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
1897 } 1933 }
1898} 1934}
1899 1935
@@ -1925,7 +1961,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1925 case CPU_DOWN_FAILED: 1961 case CPU_DOWN_FAILED:
1926 case CPU_DOWN_FAILED_FROZEN: 1962 case CPU_DOWN_FAILED_FROZEN:
1927 t->expires = round_jiffies(jiffies + 1963 t->expires = round_jiffies(jiffies +
1928 __get_cpu_var(next_interval)); 1964 __get_cpu_var(mce_next_interval));
1929 add_timer_on(t, cpu); 1965 add_timer_on(t, cpu);
1930 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); 1966 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
1931 break; 1967 break;
@@ -1941,35 +1977,21 @@ static struct notifier_block mce_cpu_notifier __cpuinitdata = {
1941 .notifier_call = mce_cpu_callback, 1977 .notifier_call = mce_cpu_callback,
1942}; 1978};
1943 1979
1944static __init int mce_init_banks(void) 1980static __init void mce_init_banks(void)
1945{ 1981{
1946 int i; 1982 int i;
1947 1983
1948 bank_attrs = kzalloc(sizeof(struct sysdev_attribute) * banks,
1949 GFP_KERNEL);
1950 if (!bank_attrs)
1951 return -ENOMEM;
1952
1953 for (i = 0; i < banks; i++) { 1984 for (i = 0; i < banks; i++) {
1954 struct sysdev_attribute *a = &bank_attrs[i]; 1985 struct mce_bank *b = &mce_banks[i];
1986 struct sysdev_attribute *a = &b->attr;
1955 1987
1956 a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i); 1988 a->attr.name = b->attrname;
1957 if (!a->attr.name) 1989 snprintf(b->attrname, ATTR_LEN, "bank%d", i);
1958 goto nomem;
1959 1990
1960 a->attr.mode = 0644; 1991 a->attr.mode = 0644;
1961 a->show = show_bank; 1992 a->show = show_bank;
1962 a->store = set_bank; 1993 a->store = set_bank;
1963 } 1994 }
1964 return 0;
1965
1966nomem:
1967 while (--i >= 0)
1968 kfree(bank_attrs[i].attr.name);
1969 kfree(bank_attrs);
1970 bank_attrs = NULL;
1971
1972 return -ENOMEM;
1973} 1995}
1974 1996
1975static __init int mce_init_device(void) 1997static __init int mce_init_device(void)
@@ -1982,9 +2004,7 @@ static __init int mce_init_device(void)
1982 2004
1983 zalloc_cpumask_var(&mce_dev_initialized, GFP_KERNEL); 2005 zalloc_cpumask_var(&mce_dev_initialized, GFP_KERNEL);
1984 2006
1985 err = mce_init_banks(); 2007 mce_init_banks();
1986 if (err)
1987 return err;
1988 2008
1989 err = sysdev_class_register(&mce_sysclass); 2009 err = sysdev_class_register(&mce_sysclass);
1990 if (err) 2010 if (err)
@@ -2004,57 +2024,65 @@ static __init int mce_init_device(void)
2004 2024
2005device_initcall(mce_init_device); 2025device_initcall(mce_init_device);
2006 2026
2007#else /* CONFIG_X86_OLD_MCE: */ 2027/*
2008 2028 * Old style boot options parsing. Only for compatibility.
2009int nr_mce_banks; 2029 */
2010EXPORT_SYMBOL_GPL(nr_mce_banks); /* non-fatal.o */ 2030static int __init mcheck_disable(char *str)
2031{
2032 mce_disabled = 1;
2033 return 1;
2034}
2035__setup("nomce", mcheck_disable);
2011 2036
2012/* This has to be run for each processor */ 2037#ifdef CONFIG_DEBUG_FS
2013void mcheck_init(struct cpuinfo_x86 *c) 2038struct dentry *mce_get_debugfs_dir(void)
2014{ 2039{
2015 if (mce_disabled) 2040 static struct dentry *dmce;
2016 return;
2017 2041
2018 switch (c->x86_vendor) { 2042 if (!dmce)
2019 case X86_VENDOR_AMD: 2043 dmce = debugfs_create_dir("mce", NULL);
2020 amd_mcheck_init(c);
2021 break;
2022 2044
2023 case X86_VENDOR_INTEL: 2045 return dmce;
2024 if (c->x86 == 5) 2046}
2025 intel_p5_mcheck_init(c);
2026 if (c->x86 == 6)
2027 intel_p6_mcheck_init(c);
2028 if (c->x86 == 15)
2029 intel_p4_mcheck_init(c);
2030 break;
2031 2047
2032 case X86_VENDOR_CENTAUR: 2048static void mce_reset(void)
2033 if (c->x86 == 5) 2049{
2034 winchip_mcheck_init(c); 2050 cpu_missing = 0;
2035 break; 2051 atomic_set(&mce_fake_paniced, 0);
2052 atomic_set(&mce_executing, 0);
2053 atomic_set(&mce_callin, 0);
2054 atomic_set(&global_nwo, 0);
2055}
2036 2056
2037 default: 2057static int fake_panic_get(void *data, u64 *val)
2038 break; 2058{
2039 } 2059 *val = fake_panic;
2040 printk(KERN_INFO "mce: CPU supports %d MCE banks\n", nr_mce_banks); 2060 return 0;
2041} 2061}
2042 2062
2043static int __init mcheck_enable(char *str) 2063static int fake_panic_set(void *data, u64 val)
2044{ 2064{
2045 mce_p5_enabled = 1; 2065 mce_reset();
2046 return 1; 2066 fake_panic = val;
2067 return 0;
2047} 2068}
2048__setup("mce", mcheck_enable);
2049 2069
2050#endif /* CONFIG_X86_OLD_MCE */ 2070DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
2071 fake_panic_set, "%llu\n");
2051 2072
2052/* 2073static int __init mce_debugfs_init(void)
2053 * Old style boot options parsing. Only for compatibility.
2054 */
2055static int __init mcheck_disable(char *str)
2056{ 2074{
2057 mce_disabled = 1; 2075 struct dentry *dmce, *ffake_panic;
2058 return 1; 2076
2077 dmce = mce_get_debugfs_dir();
2078 if (!dmce)
2079 return -ENOMEM;
2080 ffake_panic = debugfs_create_file("fake_panic", 0444, dmce, NULL,
2081 &fake_panic_fops);
2082 if (!ffake_panic)
2083 return -ENOMEM;
2084
2085 return 0;
2059} 2086}
2060__setup("nomce", mcheck_disable); 2087late_initcall(mce_debugfs_init);
2088#endif
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index ddae21620bda..8cd5224943b5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -69,7 +69,7 @@ struct threshold_bank {
69 struct threshold_block *blocks; 69 struct threshold_block *blocks;
70 cpumask_var_t cpus; 70 cpumask_var_t cpus;
71}; 71};
72static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); 72static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
73 73
74#ifdef CONFIG_SMP 74#ifdef CONFIG_SMP
75static unsigned char shared_bank[NR_BANKS] = { 75static unsigned char shared_bank[NR_BANKS] = {
@@ -489,12 +489,14 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
489 int i, err = 0; 489 int i, err = 0;
490 struct threshold_bank *b = NULL; 490 struct threshold_bank *b = NULL;
491 char name[32]; 491 char name[32];
492 struct cpuinfo_x86 *c = &cpu_data(cpu);
493
492 494
493 sprintf(name, "threshold_bank%i", bank); 495 sprintf(name, "threshold_bank%i", bank);
494 496
495#ifdef CONFIG_SMP 497#ifdef CONFIG_SMP
496 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ 498 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
497 i = cpumask_first(cpu_core_mask(cpu)); 499 i = cpumask_first(c->llc_shared_map);
498 500
499 /* first core not up yet */ 501 /* first core not up yet */
500 if (cpu_data(i).cpu_core_id) 502 if (cpu_data(i).cpu_core_id)
@@ -514,7 +516,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
514 if (err) 516 if (err)
515 goto out; 517 goto out;
516 518
517 cpumask_copy(b->cpus, cpu_core_mask(cpu)); 519 cpumask_copy(b->cpus, c->llc_shared_map);
518 per_cpu(threshold_banks, cpu)[bank] = b; 520 per_cpu(threshold_banks, cpu)[bank] = b;
519 521
520 goto out; 522 goto out;
@@ -539,7 +541,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
539#ifndef CONFIG_SMP 541#ifndef CONFIG_SMP
540 cpumask_setall(b->cpus); 542 cpumask_setall(b->cpus);
541#else 543#else
542 cpumask_copy(b->cpus, cpu_core_mask(cpu)); 544 cpumask_copy(b->cpus, c->llc_shared_map);
543#endif 545#endif
544 546
545 per_cpu(threshold_banks, cpu)[bank] = b; 547 per_cpu(threshold_banks, cpu)[bank] = b;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index e1acec0f7a32..889f665fe93d 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -90,7 +90,7 @@ static void cmci_discover(int banks, int boot)
90 if (test_bit(i, owned)) 90 if (test_bit(i, owned))
91 continue; 91 continue;
92 92
93 rdmsrl(MSR_IA32_MC0_CTL2 + i, val); 93 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
94 94
95 /* Already owned by someone else? */ 95 /* Already owned by someone else? */
96 if (val & CMCI_EN) { 96 if (val & CMCI_EN) {
@@ -101,8 +101,8 @@ static void cmci_discover(int banks, int boot)
101 } 101 }
102 102
103 val |= CMCI_EN | CMCI_THRESHOLD; 103 val |= CMCI_EN | CMCI_THRESHOLD;
104 wrmsrl(MSR_IA32_MC0_CTL2 + i, val); 104 wrmsrl(MSR_IA32_MCx_CTL2(i), val);
105 rdmsrl(MSR_IA32_MC0_CTL2 + i, val); 105 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
106 106
107 /* Did the enable bit stick? -- the bank supports CMCI */ 107 /* Did the enable bit stick? -- the bank supports CMCI */
108 if (val & CMCI_EN) { 108 if (val & CMCI_EN) {
@@ -152,9 +152,9 @@ void cmci_clear(void)
152 if (!test_bit(i, __get_cpu_var(mce_banks_owned))) 152 if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
153 continue; 153 continue;
154 /* Disable CMCI */ 154 /* Disable CMCI */
155 rdmsrl(MSR_IA32_MC0_CTL2 + i, val); 155 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
156 val &= ~(CMCI_EN|CMCI_THRESHOLD_MASK); 156 val &= ~(CMCI_EN|CMCI_THRESHOLD_MASK);
157 wrmsrl(MSR_IA32_MC0_CTL2 + i, val); 157 wrmsrl(MSR_IA32_MCx_CTL2(i), val);
158 __clear_bit(i, __get_cpu_var(mce_banks_owned)); 158 __clear_bit(i, __get_cpu_var(mce_banks_owned));
159 } 159 }
160 spin_unlock_irqrestore(&cmci_discover_lock, flags); 160 spin_unlock_irqrestore(&cmci_discover_lock, flags);
diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c
deleted file mode 100644
index f5f2d6f71fb6..000000000000
--- a/arch/x86/kernel/cpu/mcheck/non-fatal.c
+++ /dev/null
@@ -1,94 +0,0 @@
1/*
2 * Non Fatal Machine Check Exception Reporting
3 *
4 * (C) Copyright 2002 Dave Jones. <davej@redhat.com>
5 *
6 * This file contains routines to check for non-fatal MCEs every 15s
7 *
8 */
9#include <linux/interrupt.h>
10#include <linux/workqueue.h>
11#include <linux/jiffies.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/init.h>
16#include <linux/smp.h>
17
18#include <asm/processor.h>
19#include <asm/system.h>
20#include <asm/mce.h>
21#include <asm/msr.h>
22
23static int firstbank;
24
25#define MCE_RATE (15*HZ) /* timer rate is 15s */
26
27static void mce_checkregs(void *info)
28{
29 u32 low, high;
30 int i;
31
32 for (i = firstbank; i < nr_mce_banks; i++) {
33 rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
34
35 if (!(high & (1<<31)))
36 continue;
37
38 printk(KERN_INFO "MCE: The hardware reports a non fatal, "
39 "correctable incident occurred on CPU %d.\n",
40 smp_processor_id());
41
42 printk(KERN_INFO "Bank %d: %08x%08x\n", i, high, low);
43
44 /*
45 * Scrub the error so we don't pick it up in MCE_RATE
46 * seconds time:
47 */
48 wrmsr(MSR_IA32_MC0_STATUS+i*4, 0UL, 0UL);
49
50 /* Serialize: */
51 wmb();
52 add_taint(TAINT_MACHINE_CHECK);
53 }
54}
55
56static void mce_work_fn(struct work_struct *work);
57static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
58
59static void mce_work_fn(struct work_struct *work)
60{
61 on_each_cpu(mce_checkregs, NULL, 1);
62 schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE));
63}
64
65static int __init init_nonfatal_mce_checker(void)
66{
67 struct cpuinfo_x86 *c = &boot_cpu_data;
68
69 /* Check for MCE support */
70 if (!cpu_has(c, X86_FEATURE_MCE))
71 return -ENODEV;
72
73 /* Check for PPro style MCA */
74 if (!cpu_has(c, X86_FEATURE_MCA))
75 return -ENODEV;
76
77 /* Some Athlons misbehave when we frob bank 0 */
78 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
79 boot_cpu_data.x86 == 6)
80 firstbank = 1;
81 else
82 firstbank = 0;
83
84 /*
85 * Check for non-fatal errors every MCE_RATE s
86 */
87 schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE));
88 printk(KERN_INFO "Machine check exception polling timer started.\n");
89
90 return 0;
91}
92module_init(init_nonfatal_mce_checker);
93
94MODULE_LICENSE("GPL");
diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c
deleted file mode 100644
index 4482aea9aa2e..000000000000
--- a/arch/x86/kernel/cpu/mcheck/p4.c
+++ /dev/null
@@ -1,163 +0,0 @@
1/*
2 * P4 specific Machine Check Exception Reporting
3 */
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/init.h>
7#include <linux/smp.h>
8
9#include <asm/processor.h>
10#include <asm/mce.h>
11#include <asm/msr.h>
12
13/* as supported by the P4/Xeon family */
14struct intel_mce_extended_msrs {
15 u32 eax;
16 u32 ebx;
17 u32 ecx;
18 u32 edx;
19 u32 esi;
20 u32 edi;
21 u32 ebp;
22 u32 esp;
23 u32 eflags;
24 u32 eip;
25 /* u32 *reserved[]; */
26};
27
28static int mce_num_extended_msrs;
29
30/* P4/Xeon Extended MCE MSR retrieval, return 0 if unsupported */
31static void intel_get_extended_msrs(struct intel_mce_extended_msrs *r)
32{
33 u32 h;
34
35 rdmsr(MSR_IA32_MCG_EAX, r->eax, h);
36 rdmsr(MSR_IA32_MCG_EBX, r->ebx, h);
37 rdmsr(MSR_IA32_MCG_ECX, r->ecx, h);
38 rdmsr(MSR_IA32_MCG_EDX, r->edx, h);
39 rdmsr(MSR_IA32_MCG_ESI, r->esi, h);
40 rdmsr(MSR_IA32_MCG_EDI, r->edi, h);
41 rdmsr(MSR_IA32_MCG_EBP, r->ebp, h);
42 rdmsr(MSR_IA32_MCG_ESP, r->esp, h);
43 rdmsr(MSR_IA32_MCG_EFLAGS, r->eflags, h);
44 rdmsr(MSR_IA32_MCG_EIP, r->eip, h);
45}
46
47static void intel_machine_check(struct pt_regs *regs, long error_code)
48{
49 u32 alow, ahigh, high, low;
50 u32 mcgstl, mcgsth;
51 int recover = 1;
52 int i;
53
54 rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
55 if (mcgstl & (1<<0)) /* Recoverable ? */
56 recover = 0;
57
58 printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
59 smp_processor_id(), mcgsth, mcgstl);
60
61 if (mce_num_extended_msrs > 0) {
62 struct intel_mce_extended_msrs dbg;
63
64 intel_get_extended_msrs(&dbg);
65
66 printk(KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n"
67 "\teax: %08x ebx: %08x ecx: %08x edx: %08x\n"
68 "\tesi: %08x edi: %08x ebp: %08x esp: %08x\n",
69 smp_processor_id(), dbg.eip, dbg.eflags,
70 dbg.eax, dbg.ebx, dbg.ecx, dbg.edx,
71 dbg.esi, dbg.edi, dbg.ebp, dbg.esp);
72 }
73
74 for (i = 0; i < nr_mce_banks; i++) {
75 rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
76 if (high & (1<<31)) {
77 char misc[20];
78 char addr[24];
79
80 misc[0] = addr[0] = '\0';
81 if (high & (1<<29))
82 recover |= 1;
83 if (high & (1<<25))
84 recover |= 2;
85 high &= ~(1<<31);
86 if (high & (1<<27)) {
87 rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh);
88 snprintf(misc, 20, "[%08x%08x]", ahigh, alow);
89 }
90 if (high & (1<<26)) {
91 rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
92 snprintf(addr, 24, " at %08x%08x", ahigh, alow);
93 }
94 printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n",
95 smp_processor_id(), i, high, low, misc, addr);
96 }
97 }
98
99 if (recover & 2)
100 panic("CPU context corrupt");
101 if (recover & 1)
102 panic("Unable to continue");
103
104 printk(KERN_EMERG "Attempting to continue.\n");
105
106 /*
107 * Do not clear the MSR_IA32_MCi_STATUS if the error is not
108 * recoverable/continuable.This will allow BIOS to look at the MSRs
109 * for errors if the OS could not log the error.
110 */
111 for (i = 0; i < nr_mce_banks; i++) {
112 u32 msr;
113 msr = MSR_IA32_MC0_STATUS+i*4;
114 rdmsr(msr, low, high);
115 if (high&(1<<31)) {
116 /* Clear it */
117 wrmsr(msr, 0UL, 0UL);
118 /* Serialize */
119 wmb();
120 add_taint(TAINT_MACHINE_CHECK);
121 }
122 }
123 mcgstl &= ~(1<<2);
124 wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
125}
126
127void intel_p4_mcheck_init(struct cpuinfo_x86 *c)
128{
129 u32 l, h;
130 int i;
131
132 machine_check_vector = intel_machine_check;
133 wmb();
134
135 printk(KERN_INFO "Intel machine check architecture supported.\n");
136 rdmsr(MSR_IA32_MCG_CAP, l, h);
137 if (l & (1<<8)) /* Control register present ? */
138 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
139 nr_mce_banks = l & 0xff;
140
141 for (i = 0; i < nr_mce_banks; i++) {
142 wrmsr(MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
143 wrmsr(MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
144 }
145
146 set_in_cr4(X86_CR4_MCE);
147 printk(KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
148 smp_processor_id());
149
150 /* Check for P4/Xeon extended MCE MSRs */
151 rdmsr(MSR_IA32_MCG_CAP, l, h);
152 if (l & (1<<9)) {/* MCG_EXT_P */
153 mce_num_extended_msrs = (l >> 16) & 0xff;
154 printk(KERN_INFO "CPU%d: Intel P4/Xeon Extended MCE MSRs (%d)"
155 " available\n",
156 smp_processor_id(), mce_num_extended_msrs);
157
158#ifdef CONFIG_X86_MCE_P4THERMAL
159 /* Check for P4/Xeon Thermal monitor */
160 intel_init_thermal(c);
161#endif
162 }
163}
diff --git a/arch/x86/kernel/cpu/mcheck/p6.c b/arch/x86/kernel/cpu/mcheck/p6.c
deleted file mode 100644
index 01e4f8178183..000000000000
--- a/arch/x86/kernel/cpu/mcheck/p6.c
+++ /dev/null
@@ -1,127 +0,0 @@
1/*
2 * P6 specific Machine Check Exception Reporting
3 * (C) Copyright 2002 Alan Cox <alan@lxorguk.ukuu.org.uk>
4 */
5#include <linux/interrupt.h>
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/init.h>
9#include <linux/smp.h>
10
11#include <asm/processor.h>
12#include <asm/system.h>
13#include <asm/mce.h>
14#include <asm/msr.h>
15
16/* Machine Check Handler For PII/PIII */
17static void intel_machine_check(struct pt_regs *regs, long error_code)
18{
19 u32 alow, ahigh, high, low;
20 u32 mcgstl, mcgsth;
21 int recover = 1;
22 int i;
23
24 rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
25 if (mcgstl & (1<<0)) /* Recoverable ? */
26 recover = 0;
27
28 printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
29 smp_processor_id(), mcgsth, mcgstl);
30
31 for (i = 0; i < nr_mce_banks; i++) {
32 rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
33 if (high & (1<<31)) {
34 char misc[20];
35 char addr[24];
36
37 misc[0] = '\0';
38 addr[0] = '\0';
39
40 if (high & (1<<29))
41 recover |= 1;
42 if (high & (1<<25))
43 recover |= 2;
44 high &= ~(1<<31);
45
46 if (high & (1<<27)) {
47 rdmsr(MSR_IA32_MC0_MISC+i*4, alow, ahigh);
48 snprintf(misc, 20, "[%08x%08x]", ahigh, alow);
49 }
50 if (high & (1<<26)) {
51 rdmsr(MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
52 snprintf(addr, 24, " at %08x%08x", ahigh, alow);
53 }
54
55 printk(KERN_EMERG "CPU %d: Bank %d: %08x%08x%s%s\n",
56 smp_processor_id(), i, high, low, misc, addr);
57 }
58 }
59
60 if (recover & 2)
61 panic("CPU context corrupt");
62 if (recover & 1)
63 panic("Unable to continue");
64
65 printk(KERN_EMERG "Attempting to continue.\n");
66 /*
67 * Do not clear the MSR_IA32_MCi_STATUS if the error is not
68 * recoverable/continuable.This will allow BIOS to look at the MSRs
69 * for errors if the OS could not log the error:
70 */
71 for (i = 0; i < nr_mce_banks; i++) {
72 unsigned int msr;
73
74 msr = MSR_IA32_MC0_STATUS+i*4;
75 rdmsr(msr, low, high);
76 if (high & (1<<31)) {
77 /* Clear it: */
78 wrmsr(msr, 0UL, 0UL);
79 /* Serialize: */
80 wmb();
81 add_taint(TAINT_MACHINE_CHECK);
82 }
83 }
84 mcgstl &= ~(1<<2);
85 wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
86}
87
88/* Set up machine check reporting for processors with Intel style MCE: */
89void intel_p6_mcheck_init(struct cpuinfo_x86 *c)
90{
91 u32 l, h;
92 int i;
93
94 /* Check for MCE support */
95 if (!cpu_has(c, X86_FEATURE_MCE))
96 return;
97
98 /* Check for PPro style MCA */
99 if (!cpu_has(c, X86_FEATURE_MCA))
100 return;
101
102 /* Ok machine check is available */
103 machine_check_vector = intel_machine_check;
104 /* Make sure the vector pointer is visible before we enable MCEs: */
105 wmb();
106
107 printk(KERN_INFO "Intel machine check architecture supported.\n");
108 rdmsr(MSR_IA32_MCG_CAP, l, h);
109 if (l & (1<<8)) /* Control register present ? */
110 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
111 nr_mce_banks = l & 0xff;
112
113 /*
114 * Following the example in IA-32 SDM Vol 3:
115 * - MC0_CTL should not be written
116 * - Status registers on all banks should be cleared on reset
117 */
118 for (i = 1; i < nr_mce_banks; i++)
119 wrmsr(MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
120
121 for (i = 0; i < nr_mce_banks; i++)
122 wrmsr(MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
123
124 set_in_cr4(X86_CR4_MCE);
125 printk(KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
126 smp_processor_id());
127}
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 5957a93e5173..63a56d147e4a 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -260,9 +260,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
260 return; 260 return;
261 } 261 }
262 262
263 if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2))
264 tm2 = 1;
265
266 /* Check whether a vector already exists */ 263 /* Check whether a vector already exists */
267 if (h & APIC_VECTOR_MASK) { 264 if (h & APIC_VECTOR_MASK) {
268 printk(KERN_DEBUG 265 printk(KERN_DEBUG
@@ -271,6 +268,16 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
271 return; 268 return;
272 } 269 }
273 270
271 /* early Pentium M models use different method for enabling TM2 */
272 if (cpu_has(c, X86_FEATURE_TM2)) {
273 if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) {
274 rdmsr(MSR_THERM2_CTL, l, h);
275 if (l & MSR_THERM2_CTL_TM_SELECT)
276 tm2 = 1;
277 } else if (l & MSR_IA32_MISC_ENABLE_TM2)
278 tm2 = 1;
279 }
280
274 /* We'll mask the thermal vector in the lapic till we're ready: */ 281 /* We'll mask the thermal vector in the lapic till we're ready: */
275 h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED; 282 h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED;
276 apic_write(APIC_LVTTHMR, h); 283 apic_write(APIC_LVTTHMR, h);
diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
index ee2331b0e58f..33af14110dfd 100644
--- a/arch/x86/kernel/cpu/mtrr/amd.c
+++ b/arch/x86/kernel/cpu/mtrr/amd.c
@@ -7,15 +7,15 @@
7 7
8static void 8static void
9amd_get_mtrr(unsigned int reg, unsigned long *base, 9amd_get_mtrr(unsigned int reg, unsigned long *base,
10 unsigned long *size, mtrr_type * type) 10 unsigned long *size, mtrr_type *type)
11{ 11{
12 unsigned long low, high; 12 unsigned long low, high;
13 13
14 rdmsr(MSR_K6_UWCCR, low, high); 14 rdmsr(MSR_K6_UWCCR, low, high);
15 /* Upper dword is region 1, lower is region 0 */ 15 /* Upper dword is region 1, lower is region 0 */
16 if (reg == 1) 16 if (reg == 1)
17 low = high; 17 low = high;
18 /* The base masks off on the right alignment */ 18 /* The base masks off on the right alignment */
19 *base = (low & 0xFFFE0000) >> PAGE_SHIFT; 19 *base = (low & 0xFFFE0000) >> PAGE_SHIFT;
20 *type = 0; 20 *type = 0;
21 if (low & 1) 21 if (low & 1)
@@ -27,74 +27,81 @@ amd_get_mtrr(unsigned int reg, unsigned long *base,
27 return; 27 return;
28 } 28 }
29 /* 29 /*
30 * This needs a little explaining. The size is stored as an 30 * This needs a little explaining. The size is stored as an
31 * inverted mask of bits of 128K granularity 15 bits long offset 31 * inverted mask of bits of 128K granularity 15 bits long offset
32 * 2 bits 32 * 2 bits.
33 * 33 *
34 * So to get a size we do invert the mask and add 1 to the lowest 34 * So to get a size we do invert the mask and add 1 to the lowest
35 * mask bit (4 as its 2 bits in). This gives us a size we then shift 35 * mask bit (4 as its 2 bits in). This gives us a size we then shift
36 * to turn into 128K blocks 36 * to turn into 128K blocks.
37 * 37 *
38 * eg 111 1111 1111 1100 is 512K 38 * eg 111 1111 1111 1100 is 512K
39 * 39 *
40 * invert 000 0000 0000 0011 40 * invert 000 0000 0000 0011
41 * +1 000 0000 0000 0100 41 * +1 000 0000 0000 0100
42 * *128K ... 42 * *128K ...
43 */ 43 */
44 low = (~low) & 0x1FFFC; 44 low = (~low) & 0x1FFFC;
45 *size = (low + 4) << (15 - PAGE_SHIFT); 45 *size = (low + 4) << (15 - PAGE_SHIFT);
46 return;
47} 46}
48 47
49static void amd_set_mtrr(unsigned int reg, unsigned long base, 48/**
50 unsigned long size, mtrr_type type) 49 * amd_set_mtrr - Set variable MTRR register on the local CPU.
51/* [SUMMARY] Set variable MTRR register on the local CPU. 50 *
52 <reg> The register to set. 51 * @reg The register to set.
53 <base> The base address of the region. 52 * @base The base address of the region.
54 <size> The size of the region. If this is 0 the region is disabled. 53 * @size The size of the region. If this is 0 the region is disabled.
55 <type> The type of the region. 54 * @type The type of the region.
56 [RETURNS] Nothing. 55 *
57*/ 56 * Returns nothing.
57 */
58static void
59amd_set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
58{ 60{
59 u32 regs[2]; 61 u32 regs[2];
60 62
61 /* 63 /*
62 * Low is MTRR0 , High MTRR 1 64 * Low is MTRR0, High MTRR 1
63 */ 65 */
64 rdmsr(MSR_K6_UWCCR, regs[0], regs[1]); 66 rdmsr(MSR_K6_UWCCR, regs[0], regs[1]);
65 /* 67 /*
66 * Blank to disable 68 * Blank to disable
67 */ 69 */
68 if (size == 0) 70 if (size == 0) {
69 regs[reg] = 0; 71 regs[reg] = 0;
70 else 72 } else {
71 /* Set the register to the base, the type (off by one) and an 73 /*
72 inverted bitmask of the size The size is the only odd 74 * Set the register to the base, the type (off by one) and an
73 bit. We are fed say 512K We invert this and we get 111 1111 75 * inverted bitmask of the size The size is the only odd
74 1111 1011 but if you subtract one and invert you get the 76 * bit. We are fed say 512K We invert this and we get 111 1111
75 desired 111 1111 1111 1100 mask 77 * 1111 1011 but if you subtract one and invert you get the
76 78 * desired 111 1111 1111 1100 mask
77 But ~(x - 1) == ~x + 1 == -x. Two's complement rocks! */ 79 *
80 * But ~(x - 1) == ~x + 1 == -x. Two's complement rocks!
81 */
78 regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC) 82 regs[reg] = (-size >> (15 - PAGE_SHIFT) & 0x0001FFFC)
79 | (base << PAGE_SHIFT) | (type + 1); 83 | (base << PAGE_SHIFT) | (type + 1);
84 }
80 85
81 /* 86 /*
82 * The writeback rule is quite specific. See the manual. Its 87 * The writeback rule is quite specific. See the manual. Its
83 * disable local interrupts, write back the cache, set the mtrr 88 * disable local interrupts, write back the cache, set the mtrr
84 */ 89 */
85 wbinvd(); 90 wbinvd();
86 wrmsr(MSR_K6_UWCCR, regs[0], regs[1]); 91 wrmsr(MSR_K6_UWCCR, regs[0], regs[1]);
87} 92}
88 93
89static int amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type) 94static int
95amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
90{ 96{
91 /* Apply the K6 block alignment and size rules 97 /*
92 In order 98 * Apply the K6 block alignment and size rules
93 o Uncached or gathering only 99 * In order
94 o 128K or bigger block 100 * o Uncached or gathering only
95 o Power of 2 block 101 * o 128K or bigger block
96 o base suitably aligned to the power 102 * o Power of 2 block
97 */ 103 * o base suitably aligned to the power
104 */
98 if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT)) 105 if (type > MTRR_TYPE_WRCOMB || size < (1 << (17 - PAGE_SHIFT))
99 || (size & ~(size - 1)) - size || (base & (size - 1))) 106 || (size & ~(size - 1)) - size || (base & (size - 1)))
100 return -EINVAL; 107 return -EINVAL;
@@ -115,5 +122,3 @@ int __init amd_init_mtrr(void)
115 set_mtrr_ops(&amd_mtrr_ops); 122 set_mtrr_ops(&amd_mtrr_ops);
116 return 0; 123 return 0;
117} 124}
118
119//arch_initcall(amd_mtrr_init);
diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
index cb9aa3a7a7ab..de89f14eff3a 100644
--- a/arch/x86/kernel/cpu/mtrr/centaur.c
+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
@@ -1,7 +1,9 @@
1#include <linux/init.h> 1#include <linux/init.h>
2#include <linux/mm.h> 2#include <linux/mm.h>
3
3#include <asm/mtrr.h> 4#include <asm/mtrr.h>
4#include <asm/msr.h> 5#include <asm/msr.h>
6
5#include "mtrr.h" 7#include "mtrr.h"
6 8
7static struct { 9static struct {
@@ -12,25 +14,25 @@ static struct {
12static u8 centaur_mcr_reserved; 14static u8 centaur_mcr_reserved;
13static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */ 15static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */
14 16
15/* 17/**
16 * Report boot time MCR setups 18 * centaur_get_free_region - Get a free MTRR.
19 *
20 * @base: The starting (base) address of the region.
21 * @size: The size (in bytes) of the region.
22 *
23 * Returns: the index of the region on success, else -1 on error.
17 */ 24 */
18
19static int 25static int
20centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg) 26centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg)
21/* [SUMMARY] Get a free MTRR.
22 <base> The starting (base) address of the region.
23 <size> The size (in bytes) of the region.
24 [RETURNS] The index of the region on success, else -1 on error.
25*/
26{ 27{
27 int i, max;
28 mtrr_type ltype;
29 unsigned long lbase, lsize; 28 unsigned long lbase, lsize;
29 mtrr_type ltype;
30 int i, max;
30 31
31 max = num_var_ranges; 32 max = num_var_ranges;
32 if (replace_reg >= 0 && replace_reg < max) 33 if (replace_reg >= 0 && replace_reg < max)
33 return replace_reg; 34 return replace_reg;
35
34 for (i = 0; i < max; ++i) { 36 for (i = 0; i < max; ++i) {
35 if (centaur_mcr_reserved & (1 << i)) 37 if (centaur_mcr_reserved & (1 << i))
36 continue; 38 continue;
@@ -38,11 +40,14 @@ centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg)
38 if (lsize == 0) 40 if (lsize == 0)
39 return i; 41 return i;
40 } 42 }
43
41 return -ENOSPC; 44 return -ENOSPC;
42} 45}
43 46
44void 47/*
45mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) 48 * Report boot time MCR setups
49 */
50void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
46{ 51{
47 centaur_mcr[mcr].low = lo; 52 centaur_mcr[mcr].low = lo;
48 centaur_mcr[mcr].high = hi; 53 centaur_mcr[mcr].high = hi;
@@ -54,33 +59,35 @@ centaur_get_mcr(unsigned int reg, unsigned long *base,
54{ 59{
55 *base = centaur_mcr[reg].high >> PAGE_SHIFT; 60 *base = centaur_mcr[reg].high >> PAGE_SHIFT;
56 *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT; 61 *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT;
57 *type = MTRR_TYPE_WRCOMB; /* If it is there, it is write-combining */ 62 *type = MTRR_TYPE_WRCOMB; /* write-combining */
63
58 if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2)) 64 if (centaur_mcr_type == 1 && ((centaur_mcr[reg].low & 31) & 2))
59 *type = MTRR_TYPE_UNCACHABLE; 65 *type = MTRR_TYPE_UNCACHABLE;
60 if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25) 66 if (centaur_mcr_type == 1 && (centaur_mcr[reg].low & 31) == 25)
61 *type = MTRR_TYPE_WRBACK; 67 *type = MTRR_TYPE_WRBACK;
62 if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31) 68 if (centaur_mcr_type == 0 && (centaur_mcr[reg].low & 31) == 31)
63 *type = MTRR_TYPE_WRBACK; 69 *type = MTRR_TYPE_WRBACK;
64
65} 70}
66 71
67static void centaur_set_mcr(unsigned int reg, unsigned long base, 72static void
68 unsigned long size, mtrr_type type) 73centaur_set_mcr(unsigned int reg, unsigned long base,
74 unsigned long size, mtrr_type type)
69{ 75{
70 unsigned long low, high; 76 unsigned long low, high;
71 77
72 if (size == 0) { 78 if (size == 0) {
73 /* Disable */ 79 /* Disable */
74 high = low = 0; 80 high = low = 0;
75 } else { 81 } else {
76 high = base << PAGE_SHIFT; 82 high = base << PAGE_SHIFT;
77 if (centaur_mcr_type == 0) 83 if (centaur_mcr_type == 0) {
78 low = -size << PAGE_SHIFT | 0x1f; /* only support write-combining... */ 84 /* Only support write-combining... */
79 else { 85 low = -size << PAGE_SHIFT | 0x1f;
86 } else {
80 if (type == MTRR_TYPE_UNCACHABLE) 87 if (type == MTRR_TYPE_UNCACHABLE)
81 low = -size << PAGE_SHIFT | 0x02; /* NC */ 88 low = -size << PAGE_SHIFT | 0x02; /* NC */
82 else 89 else
83 low = -size << PAGE_SHIFT | 0x09; /* WWO,WC */ 90 low = -size << PAGE_SHIFT | 0x09; /* WWO, WC */
84 } 91 }
85 } 92 }
86 centaur_mcr[reg].high = high; 93 centaur_mcr[reg].high = high;
@@ -88,118 +95,16 @@ static void centaur_set_mcr(unsigned int reg, unsigned long base,
88 wrmsr(MSR_IDT_MCR0 + reg, low, high); 95 wrmsr(MSR_IDT_MCR0 + reg, low, high);
89} 96}
90 97
91#if 0 98static int
92/* 99centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
93 * Initialise the later (saner) Winchip MCR variant. In this version
94 * the BIOS can pass us the registers it has used (but not their values)
95 * and the control register is read/write
96 */
97
98static void __init
99centaur_mcr1_init(void)
100{
101 unsigned i;
102 u32 lo, hi;
103
104 /* Unfortunately, MCR's are read-only, so there is no way to
105 * find out what the bios might have done.
106 */
107
108 rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
109 if (((lo >> 17) & 7) == 1) { /* Type 1 Winchip2 MCR */
110 lo &= ~0x1C0; /* clear key */
111 lo |= 0x040; /* set key to 1 */
112 wrmsr(MSR_IDT_MCR_CTRL, lo, hi); /* unlock MCR */
113 }
114
115 centaur_mcr_type = 1;
116
117 /*
118 * Clear any unconfigured MCR's.
119 */
120
121 for (i = 0; i < 8; ++i) {
122 if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0) {
123 if (!(lo & (1 << (9 + i))))
124 wrmsr(MSR_IDT_MCR0 + i, 0, 0);
125 else
126 /*
127 * If the BIOS set up an MCR we cannot see it
128 * but we don't wish to obliterate it
129 */
130 centaur_mcr_reserved |= (1 << i);
131 }
132 }
133 /*
134 * Throw the main write-combining switch...
135 * However if OOSTORE is enabled then people have already done far
136 * cleverer things and we should behave.
137 */
138
139 lo |= 15; /* Write combine enables */
140 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
141}
142
143/*
144 * Initialise the original winchip with read only MCR registers
145 * no used bitmask for the BIOS to pass on and write only control
146 */
147
148static void __init
149centaur_mcr0_init(void)
150{
151 unsigned i;
152
153 /* Unfortunately, MCR's are read-only, so there is no way to
154 * find out what the bios might have done.
155 */
156
157 /* Clear any unconfigured MCR's.
158 * This way we are sure that the centaur_mcr array contains the actual
159 * values. The disadvantage is that any BIOS tweaks are thus undone.
160 *
161 */
162 for (i = 0; i < 8; ++i) {
163 if (centaur_mcr[i].high == 0 && centaur_mcr[i].low == 0)
164 wrmsr(MSR_IDT_MCR0 + i, 0, 0);
165 }
166
167 wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); /* Write only */
168}
169
170/*
171 * Initialise Winchip series MCR registers
172 */
173
174static void __init
175centaur_mcr_init(void)
176{
177 struct set_mtrr_context ctxt;
178
179 set_mtrr_prepare_save(&ctxt);
180 set_mtrr_cache_disable(&ctxt);
181
182 if (boot_cpu_data.x86_model == 4)
183 centaur_mcr0_init();
184 else if (boot_cpu_data.x86_model == 8 || boot_cpu_data.x86_model == 9)
185 centaur_mcr1_init();
186
187 set_mtrr_done(&ctxt);
188}
189#endif
190
191static int centaur_validate_add_page(unsigned long base,
192 unsigned long size, unsigned int type)
193{ 100{
194 /* 101 /*
195 * FIXME: Winchip2 supports uncached 102 * FIXME: Winchip2 supports uncached
196 */ 103 */
197 if (type != MTRR_TYPE_WRCOMB && 104 if (type != MTRR_TYPE_WRCOMB &&
198 (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) { 105 (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) {
199 printk(KERN_WARNING 106 pr_warning("mtrr: only write-combining%s supported\n",
200 "mtrr: only write-combining%s supported\n", 107 centaur_mcr_type ? " and uncacheable are" : " is");
201 centaur_mcr_type ? " and uncacheable are"
202 : " is");
203 return -EINVAL; 108 return -EINVAL;
204 } 109 }
205 return 0; 110 return 0;
@@ -207,7 +112,6 @@ static int centaur_validate_add_page(unsigned long base,
207 112
208static struct mtrr_ops centaur_mtrr_ops = { 113static struct mtrr_ops centaur_mtrr_ops = {
209 .vendor = X86_VENDOR_CENTAUR, 114 .vendor = X86_VENDOR_CENTAUR,
210// .init = centaur_mcr_init,
211 .set = centaur_set_mcr, 115 .set = centaur_set_mcr,
212 .get = centaur_get_mcr, 116 .get = centaur_get_mcr,
213 .get_free_region = centaur_get_free_region, 117 .get_free_region = centaur_get_free_region,
@@ -220,5 +124,3 @@ int __init centaur_init_mtrr(void)
220 set_mtrr_ops(&centaur_mtrr_ops); 124 set_mtrr_ops(&centaur_mtrr_ops);
221 return 0; 125 return 0;
222} 126}
223
224//arch_initcall(centaur_init_mtrr);
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 1d584a18a50d..315738c74aad 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -1,51 +1,75 @@
1/* MTRR (Memory Type Range Register) cleanup 1/*
2 2 * MTRR (Memory Type Range Register) cleanup
3 Copyright (C) 2009 Yinghai Lu 3 *
4 4 * Copyright (C) 2009 Yinghai Lu
5 This library is free software; you can redistribute it and/or 5 *
6 modify it under the terms of the GNU Library General Public 6 * This library is free software; you can redistribute it and/or
7 License as published by the Free Software Foundation; either 7 * modify it under the terms of the GNU Library General Public
8 version 2 of the License, or (at your option) any later version. 8 * License as published by the Free Software Foundation; either
9 9 * version 2 of the License, or (at your option) any later version.
10 This library is distributed in the hope that it will be useful, 10 *
11 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * This library is distributed in the hope that it will be useful,
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 Library General Public License for more details. 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 14 * Library General Public License for more details.
15 You should have received a copy of the GNU Library General Public 15 *
16 License along with this library; if not, write to the Free 16 * You should have received a copy of the GNU Library General Public
17 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 * License along with this library; if not, write to the Free
18*/ 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 19 */
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/smp.h> 23#include <linux/smp.h>
24#include <linux/cpu.h> 24#include <linux/cpu.h>
25#include <linux/mutex.h>
26#include <linux/sort.h> 25#include <linux/sort.h>
26#include <linux/mutex.h>
27#include <linux/uaccess.h>
28#include <linux/kvm_para.h>
27 29
30#include <asm/processor.h>
28#include <asm/e820.h> 31#include <asm/e820.h>
29#include <asm/mtrr.h> 32#include <asm/mtrr.h>
30#include <asm/uaccess.h>
31#include <asm/processor.h>
32#include <asm/msr.h> 33#include <asm/msr.h>
33#include <asm/kvm_para.h>
34#include "mtrr.h"
35 34
36/* should be related to MTRR_VAR_RANGES nums */ 35#include "mtrr.h"
37#define RANGE_NUM 256
38 36
39struct res_range { 37struct res_range {
40 unsigned long start; 38 unsigned long start;
41 unsigned long end; 39 unsigned long end;
40};
41
42struct var_mtrr_range_state {
43 unsigned long base_pfn;
44 unsigned long size_pfn;
45 mtrr_type type;
46};
47
48struct var_mtrr_state {
49 unsigned long range_startk;
50 unsigned long range_sizek;
51 unsigned long chunk_sizek;
52 unsigned long gran_sizek;
53 unsigned int reg;
42}; 54};
43 55
56/* Should be related to MTRR_VAR_RANGES nums */
57#define RANGE_NUM 256
58
59static struct res_range __initdata range[RANGE_NUM];
60static int __initdata nr_range;
61
62static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
63
64static int __initdata debug_print;
65#define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0)
66
67
44static int __init 68static int __init
45add_range(struct res_range *range, int nr_range, unsigned long start, 69add_range(struct res_range *range, int nr_range,
46 unsigned long end) 70 unsigned long start, unsigned long end)
47{ 71{
48 /* out of slots */ 72 /* Out of slots: */
49 if (nr_range >= RANGE_NUM) 73 if (nr_range >= RANGE_NUM)
50 return nr_range; 74 return nr_range;
51 75
@@ -58,12 +82,12 @@ add_range(struct res_range *range, int nr_range, unsigned long start,
58} 82}
59 83
60static int __init 84static int __init
61add_range_with_merge(struct res_range *range, int nr_range, unsigned long start, 85add_range_with_merge(struct res_range *range, int nr_range,
62 unsigned long end) 86 unsigned long start, unsigned long end)
63{ 87{
64 int i; 88 int i;
65 89
66 /* try to merge it with old one */ 90 /* Try to merge it with old one: */
67 for (i = 0; i < nr_range; i++) { 91 for (i = 0; i < nr_range; i++) {
68 unsigned long final_start, final_end; 92 unsigned long final_start, final_end;
69 unsigned long common_start, common_end; 93 unsigned long common_start, common_end;
@@ -84,7 +108,7 @@ add_range_with_merge(struct res_range *range, int nr_range, unsigned long start,
84 return nr_range; 108 return nr_range;
85 } 109 }
86 110
87 /* need to add that */ 111 /* Need to add it: */
88 return add_range(range, nr_range, start, end); 112 return add_range(range, nr_range, start, end);
89} 113}
90 114
@@ -117,7 +141,7 @@ subtract_range(struct res_range *range, unsigned long start, unsigned long end)
117 } 141 }
118 142
119 if (start > range[j].start && end < range[j].end) { 143 if (start > range[j].start && end < range[j].end) {
120 /* find the new spare */ 144 /* Find the new spare: */
121 for (i = 0; i < RANGE_NUM; i++) { 145 for (i = 0; i < RANGE_NUM; i++) {
122 if (range[i].end == 0) 146 if (range[i].end == 0)
123 break; 147 break;
@@ -146,14 +170,8 @@ static int __init cmp_range(const void *x1, const void *x2)
146 return start1 - start2; 170 return start1 - start2;
147} 171}
148 172
149struct var_mtrr_range_state { 173#define BIOS_BUG_MSG KERN_WARNING \
150 unsigned long base_pfn; 174 "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n"
151 unsigned long size_pfn;
152 mtrr_type type;
153};
154
155static struct var_mtrr_range_state __initdata range_state[RANGE_NUM];
156static int __initdata debug_print;
157 175
158static int __init 176static int __init
159x86_get_mtrr_mem_range(struct res_range *range, int nr_range, 177x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
@@ -180,7 +198,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
180 range[i].start, range[i].end + 1); 198 range[i].start, range[i].end + 1);
181 } 199 }
182 200
183 /* take out UC ranges */ 201 /* Take out UC ranges: */
184 for (i = 0; i < num_var_ranges; i++) { 202 for (i = 0; i < num_var_ranges; i++) {
185 type = range_state[i].type; 203 type = range_state[i].type;
186 if (type != MTRR_TYPE_UNCACHABLE && 204 if (type != MTRR_TYPE_UNCACHABLE &&
@@ -193,9 +211,7 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
193 if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed && 211 if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed &&
194 (mtrr_state.enabled & 1)) { 212 (mtrr_state.enabled & 1)) {
195 /* Var MTRR contains UC entry below 1M? Skip it: */ 213 /* Var MTRR contains UC entry below 1M? Skip it: */
196 printk(KERN_WARNING "WARNING: BIOS bug: VAR MTRR %d " 214 printk(BIOS_BUG_MSG, i);
197 "contains strange UC entry under 1M, check "
198 "with your system vendor!\n", i);
199 if (base + size <= (1<<(20-PAGE_SHIFT))) 215 if (base + size <= (1<<(20-PAGE_SHIFT)))
200 continue; 216 continue;
201 size -= (1<<(20-PAGE_SHIFT)) - base; 217 size -= (1<<(20-PAGE_SHIFT)) - base;
@@ -237,17 +253,13 @@ x86_get_mtrr_mem_range(struct res_range *range, int nr_range,
237 return nr_range; 253 return nr_range;
238} 254}
239 255
240static struct res_range __initdata range[RANGE_NUM];
241static int __initdata nr_range;
242
243#ifdef CONFIG_MTRR_SANITIZER 256#ifdef CONFIG_MTRR_SANITIZER
244 257
245static unsigned long __init sum_ranges(struct res_range *range, int nr_range) 258static unsigned long __init sum_ranges(struct res_range *range, int nr_range)
246{ 259{
247 unsigned long sum; 260 unsigned long sum = 0;
248 int i; 261 int i;
249 262
250 sum = 0;
251 for (i = 0; i < nr_range; i++) 263 for (i = 0; i < nr_range; i++)
252 sum += range[i].end + 1 - range[i].start; 264 sum += range[i].end + 1 - range[i].start;
253 265
@@ -278,17 +290,9 @@ static int __init mtrr_cleanup_debug_setup(char *str)
278} 290}
279early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup); 291early_param("mtrr_cleanup_debug", mtrr_cleanup_debug_setup);
280 292
281struct var_mtrr_state {
282 unsigned long range_startk;
283 unsigned long range_sizek;
284 unsigned long chunk_sizek;
285 unsigned long gran_sizek;
286 unsigned int reg;
287};
288
289static void __init 293static void __init
290set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, 294set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
291 unsigned char type, unsigned int address_bits) 295 unsigned char type, unsigned int address_bits)
292{ 296{
293 u32 base_lo, base_hi, mask_lo, mask_hi; 297 u32 base_lo, base_hi, mask_lo, mask_hi;
294 u64 base, mask; 298 u64 base, mask;
@@ -301,7 +305,7 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
301 mask = (1ULL << address_bits) - 1; 305 mask = (1ULL << address_bits) - 1;
302 mask &= ~((((u64)sizek) << 10) - 1); 306 mask &= ~((((u64)sizek) << 10) - 1);
303 307
304 base = ((u64)basek) << 10; 308 base = ((u64)basek) << 10;
305 309
306 base |= type; 310 base |= type;
307 mask |= 0x800; 311 mask |= 0x800;
@@ -317,15 +321,14 @@ set_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
317 321
318static void __init 322static void __init
319save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek, 323save_var_mtrr(unsigned int reg, unsigned long basek, unsigned long sizek,
320 unsigned char type) 324 unsigned char type)
321{ 325{
322 range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10); 326 range_state[reg].base_pfn = basek >> (PAGE_SHIFT - 10);
323 range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10); 327 range_state[reg].size_pfn = sizek >> (PAGE_SHIFT - 10);
324 range_state[reg].type = type; 328 range_state[reg].type = type;
325} 329}
326 330
327static void __init 331static void __init set_var_mtrr_all(unsigned int address_bits)
328set_var_mtrr_all(unsigned int address_bits)
329{ 332{
330 unsigned long basek, sizek; 333 unsigned long basek, sizek;
331 unsigned char type; 334 unsigned char type;
@@ -342,11 +345,11 @@ set_var_mtrr_all(unsigned int address_bits)
342 345
343static unsigned long to_size_factor(unsigned long sizek, char *factorp) 346static unsigned long to_size_factor(unsigned long sizek, char *factorp)
344{ 347{
345 char factor;
346 unsigned long base = sizek; 348 unsigned long base = sizek;
349 char factor;
347 350
348 if (base & ((1<<10) - 1)) { 351 if (base & ((1<<10) - 1)) {
349 /* not MB alignment */ 352 /* Not MB-aligned: */
350 factor = 'K'; 353 factor = 'K';
351 } else if (base & ((1<<20) - 1)) { 354 } else if (base & ((1<<20) - 1)) {
352 factor = 'M'; 355 factor = 'M';
@@ -372,11 +375,12 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
372 unsigned long max_align, align; 375 unsigned long max_align, align;
373 unsigned long sizek; 376 unsigned long sizek;
374 377
375 /* Compute the maximum size I can make a range */ 378 /* Compute the maximum size with which we can make a range: */
376 if (range_startk) 379 if (range_startk)
377 max_align = ffs(range_startk) - 1; 380 max_align = ffs(range_startk) - 1;
378 else 381 else
379 max_align = 32; 382 max_align = 32;
383
380 align = fls(range_sizek) - 1; 384 align = fls(range_sizek) - 1;
381 if (align > max_align) 385 if (align > max_align)
382 align = max_align; 386 align = max_align;
@@ -386,11 +390,10 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
386 char start_factor = 'K', size_factor = 'K'; 390 char start_factor = 'K', size_factor = 'K';
387 unsigned long start_base, size_base; 391 unsigned long start_base, size_base;
388 392
389 start_base = to_size_factor(range_startk, 393 start_base = to_size_factor(range_startk, &start_factor);
390 &start_factor), 394 size_base = to_size_factor(sizek, &size_factor);
391 size_base = to_size_factor(sizek, &size_factor),
392 395
393 printk(KERN_DEBUG "Setting variable MTRR %d, " 396 Dprintk("Setting variable MTRR %d, "
394 "base: %ld%cB, range: %ld%cB, type %s\n", 397 "base: %ld%cB, range: %ld%cB, type %s\n",
395 reg, start_base, start_factor, 398 reg, start_base, start_factor,
396 size_base, size_factor, 399 size_base, size_factor,
@@ -425,10 +428,11 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
425 chunk_sizek = state->chunk_sizek; 428 chunk_sizek = state->chunk_sizek;
426 gran_sizek = state->gran_sizek; 429 gran_sizek = state->gran_sizek;
427 430
428 /* align with gran size, prevent small block used up MTRRs */ 431 /* Align with gran size, prevent small block used up MTRRs: */
429 range_basek = ALIGN(state->range_startk, gran_sizek); 432 range_basek = ALIGN(state->range_startk, gran_sizek);
430 if ((range_basek > basek) && basek) 433 if ((range_basek > basek) && basek)
431 return second_sizek; 434 return second_sizek;
435
432 state->range_sizek -= (range_basek - state->range_startk); 436 state->range_sizek -= (range_basek - state->range_startk);
433 range_sizek = ALIGN(state->range_sizek, gran_sizek); 437 range_sizek = ALIGN(state->range_sizek, gran_sizek);
434 438
@@ -439,22 +443,21 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
439 } 443 }
440 state->range_sizek = range_sizek; 444 state->range_sizek = range_sizek;
441 445
442 /* try to append some small hole */ 446 /* Try to append some small hole: */
443 range0_basek = state->range_startk; 447 range0_basek = state->range_startk;
444 range0_sizek = ALIGN(state->range_sizek, chunk_sizek); 448 range0_sizek = ALIGN(state->range_sizek, chunk_sizek);
445 449
446 /* no increase */ 450 /* No increase: */
447 if (range0_sizek == state->range_sizek) { 451 if (range0_sizek == state->range_sizek) {
448 if (debug_print) 452 Dprintk("rangeX: %016lx - %016lx\n",
449 printk(KERN_DEBUG "rangeX: %016lx - %016lx\n", 453 range0_basek<<10,
450 range0_basek<<10, 454 (range0_basek + state->range_sizek)<<10);
451 (range0_basek + state->range_sizek)<<10);
452 state->reg = range_to_mtrr(state->reg, range0_basek, 455 state->reg = range_to_mtrr(state->reg, range0_basek,
453 state->range_sizek, MTRR_TYPE_WRBACK); 456 state->range_sizek, MTRR_TYPE_WRBACK);
454 return 0; 457 return 0;
455 } 458 }
456 459
457 /* only cut back, when it is not the last */ 460 /* Only cut back when it is not the last: */
458 if (sizek) { 461 if (sizek) {
459 while (range0_basek + range0_sizek > (basek + sizek)) { 462 while (range0_basek + range0_sizek > (basek + sizek)) {
460 if (range0_sizek >= chunk_sizek) 463 if (range0_sizek >= chunk_sizek)
@@ -470,16 +473,16 @@ range_to_mtrr_with_hole(struct var_mtrr_state *state, unsigned long basek,
470second_try: 473second_try:
471 range_basek = range0_basek + range0_sizek; 474 range_basek = range0_basek + range0_sizek;
472 475
473 /* one hole in the middle */ 476 /* One hole in the middle: */
474 if (range_basek > basek && range_basek <= (basek + sizek)) 477 if (range_basek > basek && range_basek <= (basek + sizek))
475 second_sizek = range_basek - basek; 478 second_sizek = range_basek - basek;
476 479
477 if (range0_sizek > state->range_sizek) { 480 if (range0_sizek > state->range_sizek) {
478 481
479 /* one hole in middle or at end */ 482 /* One hole in middle or at the end: */
480 hole_sizek = range0_sizek - state->range_sizek - second_sizek; 483 hole_sizek = range0_sizek - state->range_sizek - second_sizek;
481 484
482 /* hole size should be less than half of range0 size */ 485 /* Hole size should be less than half of range0 size: */
483 if (hole_sizek >= (range0_sizek >> 1) && 486 if (hole_sizek >= (range0_sizek >> 1) &&
484 range0_sizek >= chunk_sizek) { 487 range0_sizek >= chunk_sizek) {
485 range0_sizek -= chunk_sizek; 488 range0_sizek -= chunk_sizek;
@@ -491,32 +494,30 @@ second_try:
491 } 494 }
492 495
493 if (range0_sizek) { 496 if (range0_sizek) {
494 if (debug_print) 497 Dprintk("range0: %016lx - %016lx\n",
495 printk(KERN_DEBUG "range0: %016lx - %016lx\n", 498 range0_basek<<10,
496 range0_basek<<10, 499 (range0_basek + range0_sizek)<<10);
497 (range0_basek + range0_sizek)<<10);
498 state->reg = range_to_mtrr(state->reg, range0_basek, 500 state->reg = range_to_mtrr(state->reg, range0_basek,
499 range0_sizek, MTRR_TYPE_WRBACK); 501 range0_sizek, MTRR_TYPE_WRBACK);
500 } 502 }
501 503
502 if (range0_sizek < state->range_sizek) { 504 if (range0_sizek < state->range_sizek) {
503 /* need to handle left over */ 505 /* Need to handle left over range: */
504 range_sizek = state->range_sizek - range0_sizek; 506 range_sizek = state->range_sizek - range0_sizek;
505 507
506 if (debug_print) 508 Dprintk("range: %016lx - %016lx\n",
507 printk(KERN_DEBUG "range: %016lx - %016lx\n", 509 range_basek<<10,
508 range_basek<<10, 510 (range_basek + range_sizek)<<10);
509 (range_basek + range_sizek)<<10); 511
510 state->reg = range_to_mtrr(state->reg, range_basek, 512 state->reg = range_to_mtrr(state->reg, range_basek,
511 range_sizek, MTRR_TYPE_WRBACK); 513 range_sizek, MTRR_TYPE_WRBACK);
512 } 514 }
513 515
514 if (hole_sizek) { 516 if (hole_sizek) {
515 hole_basek = range_basek - hole_sizek - second_sizek; 517 hole_basek = range_basek - hole_sizek - second_sizek;
516 if (debug_print) 518 Dprintk("hole: %016lx - %016lx\n",
517 printk(KERN_DEBUG "hole: %016lx - %016lx\n", 519 hole_basek<<10,
518 hole_basek<<10, 520 (hole_basek + hole_sizek)<<10);
519 (hole_basek + hole_sizek)<<10);
520 state->reg = range_to_mtrr(state->reg, hole_basek, 521 state->reg = range_to_mtrr(state->reg, hole_basek,
521 hole_sizek, MTRR_TYPE_UNCACHABLE); 522 hole_sizek, MTRR_TYPE_UNCACHABLE);
522 } 523 }
@@ -537,23 +538,23 @@ set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn,
537 basek = base_pfn << (PAGE_SHIFT - 10); 538 basek = base_pfn << (PAGE_SHIFT - 10);
538 sizek = size_pfn << (PAGE_SHIFT - 10); 539 sizek = size_pfn << (PAGE_SHIFT - 10);
539 540
540 /* See if I can merge with the last range */ 541 /* See if I can merge with the last range: */
541 if ((basek <= 1024) || 542 if ((basek <= 1024) ||
542 (state->range_startk + state->range_sizek == basek)) { 543 (state->range_startk + state->range_sizek == basek)) {
543 unsigned long endk = basek + sizek; 544 unsigned long endk = basek + sizek;
544 state->range_sizek = endk - state->range_startk; 545 state->range_sizek = endk - state->range_startk;
545 return; 546 return;
546 } 547 }
547 /* Write the range mtrrs */ 548 /* Write the range mtrrs: */
548 if (state->range_sizek != 0) 549 if (state->range_sizek != 0)
549 second_sizek = range_to_mtrr_with_hole(state, basek, sizek); 550 second_sizek = range_to_mtrr_with_hole(state, basek, sizek);
550 551
551 /* Allocate an msr */ 552 /* Allocate an msr: */
552 state->range_startk = basek + second_sizek; 553 state->range_startk = basek + second_sizek;
553 state->range_sizek = sizek - second_sizek; 554 state->range_sizek = sizek - second_sizek;
554} 555}
555 556
556/* mininum size of mtrr block that can take hole */ 557/* Mininum size of mtrr block that can take hole: */
557static u64 mtrr_chunk_size __initdata = (256ULL<<20); 558static u64 mtrr_chunk_size __initdata = (256ULL<<20);
558 559
559static int __init parse_mtrr_chunk_size_opt(char *p) 560static int __init parse_mtrr_chunk_size_opt(char *p)
@@ -565,7 +566,7 @@ static int __init parse_mtrr_chunk_size_opt(char *p)
565} 566}
566early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt); 567early_param("mtrr_chunk_size", parse_mtrr_chunk_size_opt);
567 568
568/* granity of mtrr of block */ 569/* Granularity of mtrr of block: */
569static u64 mtrr_gran_size __initdata; 570static u64 mtrr_gran_size __initdata;
570 571
571static int __init parse_mtrr_gran_size_opt(char *p) 572static int __init parse_mtrr_gran_size_opt(char *p)
@@ -577,7 +578,7 @@ static int __init parse_mtrr_gran_size_opt(char *p)
577} 578}
578early_param("mtrr_gran_size", parse_mtrr_gran_size_opt); 579early_param("mtrr_gran_size", parse_mtrr_gran_size_opt);
579 580
580static int nr_mtrr_spare_reg __initdata = 581static unsigned long nr_mtrr_spare_reg __initdata =
581 CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT; 582 CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT;
582 583
583static int __init parse_mtrr_spare_reg(char *arg) 584static int __init parse_mtrr_spare_reg(char *arg)
@@ -586,7 +587,6 @@ static int __init parse_mtrr_spare_reg(char *arg)
586 nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0); 587 nr_mtrr_spare_reg = simple_strtoul(arg, NULL, 0);
587 return 0; 588 return 0;
588} 589}
589
590early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg); 590early_param("mtrr_spare_reg_nr", parse_mtrr_spare_reg);
591 591
592static int __init 592static int __init
@@ -594,8 +594,8 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range,
594 u64 chunk_size, u64 gran_size) 594 u64 chunk_size, u64 gran_size)
595{ 595{
596 struct var_mtrr_state var_state; 596 struct var_mtrr_state var_state;
597 int i;
598 int num_reg; 597 int num_reg;
598 int i;
599 599
600 var_state.range_startk = 0; 600 var_state.range_startk = 0;
601 var_state.range_sizek = 0; 601 var_state.range_sizek = 0;
@@ -605,17 +605,18 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range,
605 605
606 memset(range_state, 0, sizeof(range_state)); 606 memset(range_state, 0, sizeof(range_state));
607 607
608 /* Write the range etc */ 608 /* Write the range: */
609 for (i = 0; i < nr_range; i++) 609 for (i = 0; i < nr_range; i++) {
610 set_var_mtrr_range(&var_state, range[i].start, 610 set_var_mtrr_range(&var_state, range[i].start,
611 range[i].end - range[i].start + 1); 611 range[i].end - range[i].start + 1);
612 }
612 613
613 /* Write the last range */ 614 /* Write the last range: */
614 if (var_state.range_sizek != 0) 615 if (var_state.range_sizek != 0)
615 range_to_mtrr_with_hole(&var_state, 0, 0); 616 range_to_mtrr_with_hole(&var_state, 0, 0);
616 617
617 num_reg = var_state.reg; 618 num_reg = var_state.reg;
618 /* Clear out the extra MTRR's */ 619 /* Clear out the extra MTRR's: */
619 while (var_state.reg < num_var_ranges) { 620 while (var_state.reg < num_var_ranges) {
620 save_var_mtrr(var_state.reg, 0, 0, 0); 621 save_var_mtrr(var_state.reg, 0, 0, 0);
621 var_state.reg++; 622 var_state.reg++;
@@ -625,11 +626,11 @@ x86_setup_var_mtrrs(struct res_range *range, int nr_range,
625} 626}
626 627
627struct mtrr_cleanup_result { 628struct mtrr_cleanup_result {
628 unsigned long gran_sizek; 629 unsigned long gran_sizek;
629 unsigned long chunk_sizek; 630 unsigned long chunk_sizek;
630 unsigned long lose_cover_sizek; 631 unsigned long lose_cover_sizek;
631 unsigned int num_reg; 632 unsigned int num_reg;
632 int bad; 633 int bad;
633}; 634};
634 635
635/* 636/*
@@ -645,10 +646,10 @@ static unsigned long __initdata min_loss_pfn[RANGE_NUM];
645 646
646static void __init print_out_mtrr_range_state(void) 647static void __init print_out_mtrr_range_state(void)
647{ 648{
648 int i;
649 char start_factor = 'K', size_factor = 'K'; 649 char start_factor = 'K', size_factor = 'K';
650 unsigned long start_base, size_base; 650 unsigned long start_base, size_base;
651 mtrr_type type; 651 mtrr_type type;
652 int i;
652 653
653 for (i = 0; i < num_var_ranges; i++) { 654 for (i = 0; i < num_var_ranges; i++) {
654 655
@@ -676,10 +677,10 @@ static int __init mtrr_need_cleanup(void)
676 int i; 677 int i;
677 mtrr_type type; 678 mtrr_type type;
678 unsigned long size; 679 unsigned long size;
679 /* extra one for all 0 */ 680 /* Extra one for all 0: */
680 int num[MTRR_NUM_TYPES + 1]; 681 int num[MTRR_NUM_TYPES + 1];
681 682
682 /* check entries number */ 683 /* Check entries number: */
683 memset(num, 0, sizeof(num)); 684 memset(num, 0, sizeof(num));
684 for (i = 0; i < num_var_ranges; i++) { 685 for (i = 0; i < num_var_ranges; i++) {
685 type = range_state[i].type; 686 type = range_state[i].type;
@@ -693,88 +694,86 @@ static int __init mtrr_need_cleanup(void)
693 num[type]++; 694 num[type]++;
694 } 695 }
695 696
696 /* check if we got UC entries */ 697 /* Check if we got UC entries: */
697 if (!num[MTRR_TYPE_UNCACHABLE]) 698 if (!num[MTRR_TYPE_UNCACHABLE])
698 return 0; 699 return 0;
699 700
700 /* check if we only had WB and UC */ 701 /* Check if we only had WB and UC */
701 if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != 702 if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
702 num_var_ranges - num[MTRR_NUM_TYPES]) 703 num_var_ranges - num[MTRR_NUM_TYPES])
703 return 0; 704 return 0;
704 705
705 return 1; 706 return 1;
706} 707}
707 708
708static unsigned long __initdata range_sums; 709static unsigned long __initdata range_sums;
709static void __init mtrr_calc_range_state(u64 chunk_size, u64 gran_size, 710
710 unsigned long extra_remove_base, 711static void __init
711 unsigned long extra_remove_size, 712mtrr_calc_range_state(u64 chunk_size, u64 gran_size,
712 int i) 713 unsigned long x_remove_base,
714 unsigned long x_remove_size, int i)
713{ 715{
714 int num_reg;
715 static struct res_range range_new[RANGE_NUM]; 716 static struct res_range range_new[RANGE_NUM];
716 static int nr_range_new;
717 unsigned long range_sums_new; 717 unsigned long range_sums_new;
718 static int nr_range_new;
719 int num_reg;
718 720
719 /* convert ranges to var ranges state */ 721 /* Convert ranges to var ranges state: */
720 num_reg = x86_setup_var_mtrrs(range, nr_range, 722 num_reg = x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
721 chunk_size, gran_size);
722 723
723 /* we got new setting in range_state, check it */ 724 /* We got new setting in range_state, check it: */
724 memset(range_new, 0, sizeof(range_new)); 725 memset(range_new, 0, sizeof(range_new));
725 nr_range_new = x86_get_mtrr_mem_range(range_new, 0, 726 nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
726 extra_remove_base, extra_remove_size); 727 x_remove_base, x_remove_size);
727 range_sums_new = sum_ranges(range_new, nr_range_new); 728 range_sums_new = sum_ranges(range_new, nr_range_new);
728 729
729 result[i].chunk_sizek = chunk_size >> 10; 730 result[i].chunk_sizek = chunk_size >> 10;
730 result[i].gran_sizek = gran_size >> 10; 731 result[i].gran_sizek = gran_size >> 10;
731 result[i].num_reg = num_reg; 732 result[i].num_reg = num_reg;
733
732 if (range_sums < range_sums_new) { 734 if (range_sums < range_sums_new) {
733 result[i].lose_cover_sizek = 735 result[i].lose_cover_sizek = (range_sums_new - range_sums) << PSHIFT;
734 (range_sums_new - range_sums) << PSHIFT;
735 result[i].bad = 1; 736 result[i].bad = 1;
736 } else 737 } else {
737 result[i].lose_cover_sizek = 738 result[i].lose_cover_sizek = (range_sums - range_sums_new) << PSHIFT;
738 (range_sums - range_sums_new) << PSHIFT; 739 }
739 740
740 /* double check it */ 741 /* Double check it: */
741 if (!result[i].bad && !result[i].lose_cover_sizek) { 742 if (!result[i].bad && !result[i].lose_cover_sizek) {
742 if (nr_range_new != nr_range || 743 if (nr_range_new != nr_range || memcmp(range, range_new, sizeof(range)))
743 memcmp(range, range_new, sizeof(range))) 744 result[i].bad = 1;
744 result[i].bad = 1;
745 } 745 }
746 746
747 if (!result[i].bad && (range_sums - range_sums_new < 747 if (!result[i].bad && (range_sums - range_sums_new < min_loss_pfn[num_reg]))
748 min_loss_pfn[num_reg])) { 748 min_loss_pfn[num_reg] = range_sums - range_sums_new;
749 min_loss_pfn[num_reg] =
750 range_sums - range_sums_new;
751 }
752} 749}
753 750
754static void __init mtrr_print_out_one_result(int i) 751static void __init mtrr_print_out_one_result(int i)
755{ 752{
756 char gran_factor, chunk_factor, lose_factor;
757 unsigned long gran_base, chunk_base, lose_base; 753 unsigned long gran_base, chunk_base, lose_base;
754 char gran_factor, chunk_factor, lose_factor;
758 755
759 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor), 756 gran_base = to_size_factor(result[i].gran_sizek, &gran_factor),
760 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor), 757 chunk_base = to_size_factor(result[i].chunk_sizek, &chunk_factor),
761 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor), 758 lose_base = to_size_factor(result[i].lose_cover_sizek, &lose_factor),
762 printk(KERN_INFO "%sgran_size: %ld%c \tchunk_size: %ld%c \t", 759
763 result[i].bad ? "*BAD*" : " ", 760 pr_info("%sgran_size: %ld%c \tchunk_size: %ld%c \t",
764 gran_base, gran_factor, chunk_base, chunk_factor); 761 result[i].bad ? "*BAD*" : " ",
765 printk(KERN_CONT "num_reg: %d \tlose cover RAM: %s%ld%c\n", 762 gran_base, gran_factor, chunk_base, chunk_factor);
766 result[i].num_reg, result[i].bad ? "-" : "", 763 pr_cont("num_reg: %d \tlose cover RAM: %s%ld%c\n",
767 lose_base, lose_factor); 764 result[i].num_reg, result[i].bad ? "-" : "",
765 lose_base, lose_factor);
768} 766}
769 767
770static int __init mtrr_search_optimal_index(void) 768static int __init mtrr_search_optimal_index(void)
771{ 769{
772 int i;
773 int num_reg_good; 770 int num_reg_good;
774 int index_good; 771 int index_good;
772 int i;
775 773
776 if (nr_mtrr_spare_reg >= num_var_ranges) 774 if (nr_mtrr_spare_reg >= num_var_ranges)
777 nr_mtrr_spare_reg = num_var_ranges - 1; 775 nr_mtrr_spare_reg = num_var_ranges - 1;
776
778 num_reg_good = -1; 777 num_reg_good = -1;
779 for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) { 778 for (i = num_var_ranges - nr_mtrr_spare_reg; i > 0; i--) {
780 if (!min_loss_pfn[i]) 779 if (!min_loss_pfn[i])
@@ -796,24 +795,24 @@ static int __init mtrr_search_optimal_index(void)
796 return index_good; 795 return index_good;
797} 796}
798 797
799
800int __init mtrr_cleanup(unsigned address_bits) 798int __init mtrr_cleanup(unsigned address_bits)
801{ 799{
802 unsigned long extra_remove_base, extra_remove_size; 800 unsigned long x_remove_base, x_remove_size;
803 unsigned long base, size, def, dummy; 801 unsigned long base, size, def, dummy;
804 mtrr_type type;
805 u64 chunk_size, gran_size; 802 u64 chunk_size, gran_size;
803 mtrr_type type;
806 int index_good; 804 int index_good;
807 int i; 805 int i;
808 806
809 if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1) 807 if (!is_cpu(INTEL) || enable_mtrr_cleanup < 1)
810 return 0; 808 return 0;
809
811 rdmsr(MSR_MTRRdefType, def, dummy); 810 rdmsr(MSR_MTRRdefType, def, dummy);
812 def &= 0xff; 811 def &= 0xff;
813 if (def != MTRR_TYPE_UNCACHABLE) 812 if (def != MTRR_TYPE_UNCACHABLE)
814 return 0; 813 return 0;
815 814
816 /* get it and store it aside */ 815 /* Get it and store it aside: */
817 memset(range_state, 0, sizeof(range_state)); 816 memset(range_state, 0, sizeof(range_state));
818 for (i = 0; i < num_var_ranges; i++) { 817 for (i = 0; i < num_var_ranges; i++) {
819 mtrr_if->get(i, &base, &size, &type); 818 mtrr_if->get(i, &base, &size, &type);
@@ -822,29 +821,28 @@ int __init mtrr_cleanup(unsigned address_bits)
822 range_state[i].type = type; 821 range_state[i].type = type;
823 } 822 }
824 823
825 /* check if we need handle it and can handle it */ 824 /* Check if we need handle it and can handle it: */
826 if (!mtrr_need_cleanup()) 825 if (!mtrr_need_cleanup())
827 return 0; 826 return 0;
828 827
829 /* print original var MTRRs at first, for debugging: */ 828 /* Print original var MTRRs at first, for debugging: */
830 printk(KERN_DEBUG "original variable MTRRs\n"); 829 printk(KERN_DEBUG "original variable MTRRs\n");
831 print_out_mtrr_range_state(); 830 print_out_mtrr_range_state();
832 831
833 memset(range, 0, sizeof(range)); 832 memset(range, 0, sizeof(range));
834 extra_remove_size = 0; 833 x_remove_size = 0;
835 extra_remove_base = 1 << (32 - PAGE_SHIFT); 834 x_remove_base = 1 << (32 - PAGE_SHIFT);
836 if (mtrr_tom2) 835 if (mtrr_tom2)
837 extra_remove_size = 836 x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
838 (mtrr_tom2 >> PAGE_SHIFT) - extra_remove_base; 837
839 nr_range = x86_get_mtrr_mem_range(range, 0, extra_remove_base, 838 nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size);
840 extra_remove_size);
841 /* 839 /*
842 * [0, 1M) should always be coverred by var mtrr with WB 840 * [0, 1M) should always be covered by var mtrr with WB
843 * and fixed mtrrs should take effective before var mtrr for it 841 * and fixed mtrrs should take effect before var mtrr for it:
844 */ 842 */
845 nr_range = add_range_with_merge(range, nr_range, 0, 843 nr_range = add_range_with_merge(range, nr_range, 0,
846 (1ULL<<(20 - PAGE_SHIFT)) - 1); 844 (1ULL<<(20 - PAGE_SHIFT)) - 1);
847 /* sort the ranges */ 845 /* Sort the ranges: */
848 sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL); 846 sort(range, nr_range, sizeof(struct res_range), cmp_range, NULL);
849 847
850 range_sums = sum_ranges(range, nr_range); 848 range_sums = sum_ranges(range, nr_range);
@@ -854,7 +852,7 @@ int __init mtrr_cleanup(unsigned address_bits)
854 if (mtrr_chunk_size && mtrr_gran_size) { 852 if (mtrr_chunk_size && mtrr_gran_size) {
855 i = 0; 853 i = 0;
856 mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size, 854 mtrr_calc_range_state(mtrr_chunk_size, mtrr_gran_size,
857 extra_remove_base, extra_remove_size, i); 855 x_remove_base, x_remove_size, i);
858 856
859 mtrr_print_out_one_result(i); 857 mtrr_print_out_one_result(i);
860 858
@@ -880,7 +878,7 @@ int __init mtrr_cleanup(unsigned address_bits)
880 continue; 878 continue;
881 879
882 mtrr_calc_range_state(chunk_size, gran_size, 880 mtrr_calc_range_state(chunk_size, gran_size,
883 extra_remove_base, extra_remove_size, i); 881 x_remove_base, x_remove_size, i);
884 if (debug_print) { 882 if (debug_print) {
885 mtrr_print_out_one_result(i); 883 mtrr_print_out_one_result(i);
886 printk(KERN_INFO "\n"); 884 printk(KERN_INFO "\n");
@@ -890,7 +888,7 @@ int __init mtrr_cleanup(unsigned address_bits)
890 } 888 }
891 } 889 }
892 890
893 /* try to find the optimal index */ 891 /* Try to find the optimal index: */
894 index_good = mtrr_search_optimal_index(); 892 index_good = mtrr_search_optimal_index();
895 893
896 if (index_good != -1) { 894 if (index_good != -1) {
@@ -898,7 +896,7 @@ int __init mtrr_cleanup(unsigned address_bits)
898 i = index_good; 896 i = index_good;
899 mtrr_print_out_one_result(i); 897 mtrr_print_out_one_result(i);
900 898
901 /* convert ranges to var ranges state */ 899 /* Convert ranges to var ranges state: */
902 chunk_size = result[i].chunk_sizek; 900 chunk_size = result[i].chunk_sizek;
903 chunk_size <<= 10; 901 chunk_size <<= 10;
904 gran_size = result[i].gran_sizek; 902 gran_size = result[i].gran_sizek;
@@ -941,8 +939,8 @@ early_param("disable_mtrr_trim", disable_mtrr_trim_setup);
941 * Note this won't check if the MTRRs < 4GB where the magic bit doesn't 939 * Note this won't check if the MTRRs < 4GB where the magic bit doesn't
942 * apply to are wrong, but so far we don't know of any such case in the wild. 940 * apply to are wrong, but so far we don't know of any such case in the wild.
943 */ 941 */
944#define Tom2Enabled (1U << 21) 942#define Tom2Enabled (1U << 21)
945#define Tom2ForceMemTypeWB (1U << 22) 943#define Tom2ForceMemTypeWB (1U << 22)
946 944
947int __init amd_special_default_mtrr(void) 945int __init amd_special_default_mtrr(void)
948{ 946{
@@ -952,7 +950,7 @@ int __init amd_special_default_mtrr(void)
952 return 0; 950 return 0;
953 if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) 951 if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11)
954 return 0; 952 return 0;
955 /* In case some hypervisor doesn't pass SYSCFG through */ 953 /* In case some hypervisor doesn't pass SYSCFG through: */
956 if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) 954 if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
957 return 0; 955 return 0;
958 /* 956 /*
@@ -965,19 +963,21 @@ int __init amd_special_default_mtrr(void)
965 return 0; 963 return 0;
966} 964}
967 965
968static u64 __init real_trim_memory(unsigned long start_pfn, 966static u64 __init
969 unsigned long limit_pfn) 967real_trim_memory(unsigned long start_pfn, unsigned long limit_pfn)
970{ 968{
971 u64 trim_start, trim_size; 969 u64 trim_start, trim_size;
970
972 trim_start = start_pfn; 971 trim_start = start_pfn;
973 trim_start <<= PAGE_SHIFT; 972 trim_start <<= PAGE_SHIFT;
973
974 trim_size = limit_pfn; 974 trim_size = limit_pfn;
975 trim_size <<= PAGE_SHIFT; 975 trim_size <<= PAGE_SHIFT;
976 trim_size -= trim_start; 976 trim_size -= trim_start;
977 977
978 return e820_update_range(trim_start, trim_size, E820_RAM, 978 return e820_update_range(trim_start, trim_size, E820_RAM, E820_RESERVED);
979 E820_RESERVED);
980} 979}
980
981/** 981/**
982 * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs 982 * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs
983 * @end_pfn: ending page frame number 983 * @end_pfn: ending page frame number
@@ -985,7 +985,7 @@ static u64 __init real_trim_memory(unsigned long start_pfn,
985 * Some buggy BIOSes don't setup the MTRRs properly for systems with certain 985 * Some buggy BIOSes don't setup the MTRRs properly for systems with certain
986 * memory configurations. This routine checks that the highest MTRR matches 986 * memory configurations. This routine checks that the highest MTRR matches
987 * the end of memory, to make sure the MTRRs having a write back type cover 987 * the end of memory, to make sure the MTRRs having a write back type cover
988 * all of the memory the kernel is intending to use. If not, it'll trim any 988 * all of the memory the kernel is intending to use. If not, it'll trim any
989 * memory off the end by adjusting end_pfn, removing it from the kernel's 989 * memory off the end by adjusting end_pfn, removing it from the kernel's
990 * allocation pools, warning the user with an obnoxious message. 990 * allocation pools, warning the user with an obnoxious message.
991 */ 991 */
@@ -994,21 +994,22 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
994 unsigned long i, base, size, highest_pfn = 0, def, dummy; 994 unsigned long i, base, size, highest_pfn = 0, def, dummy;
995 mtrr_type type; 995 mtrr_type type;
996 u64 total_trim_size; 996 u64 total_trim_size;
997
998 /* extra one for all 0 */ 997 /* extra one for all 0 */
999 int num[MTRR_NUM_TYPES + 1]; 998 int num[MTRR_NUM_TYPES + 1];
999
1000 /* 1000 /*
1001 * Make sure we only trim uncachable memory on machines that 1001 * Make sure we only trim uncachable memory on machines that
1002 * support the Intel MTRR architecture: 1002 * support the Intel MTRR architecture:
1003 */ 1003 */
1004 if (!is_cpu(INTEL) || disable_mtrr_trim) 1004 if (!is_cpu(INTEL) || disable_mtrr_trim)
1005 return 0; 1005 return 0;
1006
1006 rdmsr(MSR_MTRRdefType, def, dummy); 1007 rdmsr(MSR_MTRRdefType, def, dummy);
1007 def &= 0xff; 1008 def &= 0xff;
1008 if (def != MTRR_TYPE_UNCACHABLE) 1009 if (def != MTRR_TYPE_UNCACHABLE)
1009 return 0; 1010 return 0;
1010 1011
1011 /* get it and store it aside */ 1012 /* Get it and store it aside: */
1012 memset(range_state, 0, sizeof(range_state)); 1013 memset(range_state, 0, sizeof(range_state));
1013 for (i = 0; i < num_var_ranges; i++) { 1014 for (i = 0; i < num_var_ranges; i++) {
1014 mtrr_if->get(i, &base, &size, &type); 1015 mtrr_if->get(i, &base, &size, &type);
@@ -1017,7 +1018,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1017 range_state[i].type = type; 1018 range_state[i].type = type;
1018 } 1019 }
1019 1020
1020 /* Find highest cached pfn */ 1021 /* Find highest cached pfn: */
1021 for (i = 0; i < num_var_ranges; i++) { 1022 for (i = 0; i < num_var_ranges; i++) {
1022 type = range_state[i].type; 1023 type = range_state[i].type;
1023 if (type != MTRR_TYPE_WRBACK) 1024 if (type != MTRR_TYPE_WRBACK)
@@ -1028,13 +1029,13 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1028 highest_pfn = base + size; 1029 highest_pfn = base + size;
1029 } 1030 }
1030 1031
1031 /* kvm/qemu doesn't have mtrr set right, don't trim them all */ 1032 /* kvm/qemu doesn't have mtrr set right, don't trim them all: */
1032 if (!highest_pfn) { 1033 if (!highest_pfn) {
1033 printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n"); 1034 printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n");
1034 return 0; 1035 return 0;
1035 } 1036 }
1036 1037
1037 /* check entries number */ 1038 /* Check entries number: */
1038 memset(num, 0, sizeof(num)); 1039 memset(num, 0, sizeof(num));
1039 for (i = 0; i < num_var_ranges; i++) { 1040 for (i = 0; i < num_var_ranges; i++) {
1040 type = range_state[i].type; 1041 type = range_state[i].type;
@@ -1046,11 +1047,11 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1046 num[type]++; 1047 num[type]++;
1047 } 1048 }
1048 1049
1049 /* no entry for WB? */ 1050 /* No entry for WB? */
1050 if (!num[MTRR_TYPE_WRBACK]) 1051 if (!num[MTRR_TYPE_WRBACK])
1051 return 0; 1052 return 0;
1052 1053
1053 /* check if we only had WB and UC */ 1054 /* Check if we only had WB and UC: */
1054 if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] != 1055 if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
1055 num_var_ranges - num[MTRR_NUM_TYPES]) 1056 num_var_ranges - num[MTRR_NUM_TYPES])
1056 return 0; 1057 return 0;
@@ -1066,31 +1067,31 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1066 } 1067 }
1067 nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0); 1068 nr_range = x86_get_mtrr_mem_range(range, nr_range, 0, 0);
1068 1069
1070 /* Check the head: */
1069 total_trim_size = 0; 1071 total_trim_size = 0;
1070 /* check the head */
1071 if (range[0].start) 1072 if (range[0].start)
1072 total_trim_size += real_trim_memory(0, range[0].start); 1073 total_trim_size += real_trim_memory(0, range[0].start);
1073 /* check the holes */ 1074
1075 /* Check the holes: */
1074 for (i = 0; i < nr_range - 1; i++) { 1076 for (i = 0; i < nr_range - 1; i++) {
1075 if (range[i].end + 1 < range[i+1].start) 1077 if (range[i].end + 1 < range[i+1].start)
1076 total_trim_size += real_trim_memory(range[i].end + 1, 1078 total_trim_size += real_trim_memory(range[i].end + 1,
1077 range[i+1].start); 1079 range[i+1].start);
1078 } 1080 }
1079 /* check the top */ 1081
1082 /* Check the top: */
1080 i = nr_range - 1; 1083 i = nr_range - 1;
1081 if (range[i].end + 1 < end_pfn) 1084 if (range[i].end + 1 < end_pfn)
1082 total_trim_size += real_trim_memory(range[i].end + 1, 1085 total_trim_size += real_trim_memory(range[i].end + 1,
1083 end_pfn); 1086 end_pfn);
1084 1087
1085 if (total_trim_size) { 1088 if (total_trim_size) {
1086 printk(KERN_WARNING "WARNING: BIOS bug: CPU MTRRs don't cover" 1089 pr_warning("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n", total_trim_size >> 20);
1087 " all of memory, losing %lluMB of RAM.\n",
1088 total_trim_size >> 20);
1089 1090
1090 if (!changed_by_mtrr_cleanup) 1091 if (!changed_by_mtrr_cleanup)
1091 WARN_ON(1); 1092 WARN_ON(1);
1092 1093
1093 printk(KERN_INFO "update e820 for mtrr\n"); 1094 pr_info("update e820 for mtrr\n");
1094 update_e820(); 1095 update_e820();
1095 1096
1096 return 1; 1097 return 1;
@@ -1098,4 +1099,3 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
1098 1099
1099 return 0; 1100 return 0;
1100} 1101}
1101
diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
index ff14c320040c..228d982ce09c 100644
--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
@@ -1,38 +1,40 @@
1#include <linux/init.h> 1#include <linux/init.h>
2#include <linux/io.h>
2#include <linux/mm.h> 3#include <linux/mm.h>
3#include <asm/mtrr.h> 4
4#include <asm/msr.h>
5#include <asm/io.h>
6#include <asm/processor-cyrix.h> 5#include <asm/processor-cyrix.h>
7#include <asm/processor-flags.h> 6#include <asm/processor-flags.h>
7#include <asm/mtrr.h>
8#include <asm/msr.h>
9
8#include "mtrr.h" 10#include "mtrr.h"
9 11
10static void 12static void
11cyrix_get_arr(unsigned int reg, unsigned long *base, 13cyrix_get_arr(unsigned int reg, unsigned long *base,
12 unsigned long *size, mtrr_type * type) 14 unsigned long *size, mtrr_type * type)
13{ 15{
14 unsigned long flags;
15 unsigned char arr, ccr3, rcr, shift; 16 unsigned char arr, ccr3, rcr, shift;
17 unsigned long flags;
16 18
17 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */ 19 arr = CX86_ARR_BASE + (reg << 1) + reg; /* avoid multiplication by 3 */
18 20
19 /* Save flags and disable interrupts */
20 local_irq_save(flags); 21 local_irq_save(flags);
21 22
22 ccr3 = getCx86(CX86_CCR3); 23 ccr3 = getCx86(CX86_CCR3);
23 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ 24 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
24 ((unsigned char *) base)[3] = getCx86(arr); 25 ((unsigned char *)base)[3] = getCx86(arr);
25 ((unsigned char *) base)[2] = getCx86(arr + 1); 26 ((unsigned char *)base)[2] = getCx86(arr + 1);
26 ((unsigned char *) base)[1] = getCx86(arr + 2); 27 ((unsigned char *)base)[1] = getCx86(arr + 2);
27 rcr = getCx86(CX86_RCR_BASE + reg); 28 rcr = getCx86(CX86_RCR_BASE + reg);
28 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ 29 setCx86(CX86_CCR3, ccr3); /* disable MAPEN */
29 30
30 /* Enable interrupts if it was enabled previously */
31 local_irq_restore(flags); 31 local_irq_restore(flags);
32
32 shift = ((unsigned char *) base)[1] & 0x0f; 33 shift = ((unsigned char *) base)[1] & 0x0f;
33 *base >>= PAGE_SHIFT; 34 *base >>= PAGE_SHIFT;
34 35
35 /* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7 36 /*
37 * Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
36 * Note: shift==0xf means 4G, this is unsupported. 38 * Note: shift==0xf means 4G, this is unsupported.
37 */ 39 */
38 if (shift) 40 if (shift)
@@ -76,17 +78,20 @@ cyrix_get_arr(unsigned int reg, unsigned long *base,
76 } 78 }
77} 79}
78 80
81/*
82 * cyrix_get_free_region - get a free ARR.
83 *
84 * @base: the starting (base) address of the region.
85 * @size: the size (in bytes) of the region.
86 *
87 * Returns: the index of the region on success, else -1 on error.
88*/
79static int 89static int
80cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg) 90cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
81/* [SUMMARY] Get a free ARR.
82 <base> The starting (base) address of the region.
83 <size> The size (in bytes) of the region.
84 [RETURNS] The index of the region on success, else -1 on error.
85*/
86{ 91{
87 int i;
88 mtrr_type ltype;
89 unsigned long lbase, lsize; 92 unsigned long lbase, lsize;
93 mtrr_type ltype;
94 int i;
90 95
91 switch (replace_reg) { 96 switch (replace_reg) {
92 case 7: 97 case 7:
@@ -107,14 +112,17 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
107 cyrix_get_arr(7, &lbase, &lsize, &ltype); 112 cyrix_get_arr(7, &lbase, &lsize, &ltype);
108 if (lsize == 0) 113 if (lsize == 0)
109 return 7; 114 return 7;
110 /* Else try ARR0-ARR6 first */ 115 /* Else try ARR0-ARR6 first */
111 } else { 116 } else {
112 for (i = 0; i < 7; i++) { 117 for (i = 0; i < 7; i++) {
113 cyrix_get_arr(i, &lbase, &lsize, &ltype); 118 cyrix_get_arr(i, &lbase, &lsize, &ltype);
114 if (lsize == 0) 119 if (lsize == 0)
115 return i; 120 return i;
116 } 121 }
117 /* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */ 122 /*
123 * ARR0-ARR6 isn't free
124 * try ARR7 but its size must be at least 256K
125 */
118 cyrix_get_arr(i, &lbase, &lsize, &ltype); 126 cyrix_get_arr(i, &lbase, &lsize, &ltype);
119 if ((lsize == 0) && (size >= 0x40)) 127 if ((lsize == 0) && (size >= 0x40))
120 return i; 128 return i;
@@ -122,21 +130,22 @@ cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
122 return -ENOSPC; 130 return -ENOSPC;
123} 131}
124 132
125static u32 cr4 = 0; 133static u32 cr4, ccr3;
126static u32 ccr3;
127 134
128static void prepare_set(void) 135static void prepare_set(void)
129{ 136{
130 u32 cr0; 137 u32 cr0;
131 138
132 /* Save value of CR4 and clear Page Global Enable (bit 7) */ 139 /* Save value of CR4 and clear Page Global Enable (bit 7) */
133 if ( cpu_has_pge ) { 140 if (cpu_has_pge) {
134 cr4 = read_cr4(); 141 cr4 = read_cr4();
135 write_cr4(cr4 & ~X86_CR4_PGE); 142 write_cr4(cr4 & ~X86_CR4_PGE);
136 } 143 }
137 144
138 /* Disable and flush caches. Note that wbinvd flushes the TLBs as 145 /*
139 a side-effect */ 146 * Disable and flush caches.
147 * Note that wbinvd flushes the TLBs as a side-effect
148 */
140 cr0 = read_cr0() | X86_CR0_CD; 149 cr0 = read_cr0() | X86_CR0_CD;
141 wbinvd(); 150 wbinvd();
142 write_cr0(cr0); 151 write_cr0(cr0);
@@ -147,22 +156,21 @@ static void prepare_set(void)
147 156
148 /* Cyrix ARRs - everything else was excluded at the top */ 157 /* Cyrix ARRs - everything else was excluded at the top */
149 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); 158 setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
150
151} 159}
152 160
153static void post_set(void) 161static void post_set(void)
154{ 162{
155 /* Flush caches and TLBs */ 163 /* Flush caches and TLBs */
156 wbinvd(); 164 wbinvd();
157 165
158 /* Cyrix ARRs - everything else was excluded at the top */ 166 /* Cyrix ARRs - everything else was excluded at the top */
159 setCx86(CX86_CCR3, ccr3); 167 setCx86(CX86_CCR3, ccr3);
160 168
161 /* Enable caches */ 169 /* Enable caches */
162 write_cr0(read_cr0() & 0xbfffffff); 170 write_cr0(read_cr0() & 0xbfffffff);
163 171
164 /* Restore value of CR4 */ 172 /* Restore value of CR4 */
165 if ( cpu_has_pge ) 173 if (cpu_has_pge)
166 write_cr4(cr4); 174 write_cr4(cr4);
167} 175}
168 176
@@ -178,7 +186,8 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
178 size >>= 6; 186 size >>= 6;
179 187
180 size &= 0x7fff; /* make sure arr_size <= 14 */ 188 size &= 0x7fff; /* make sure arr_size <= 14 */
181 for (arr_size = 0; size; arr_size++, size >>= 1) ; 189 for (arr_size = 0; size; arr_size++, size >>= 1)
190 ;
182 191
183 if (reg < 7) { 192 if (reg < 7) {
184 switch (type) { 193 switch (type) {
@@ -215,18 +224,18 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
215 prepare_set(); 224 prepare_set();
216 225
217 base <<= PAGE_SHIFT; 226 base <<= PAGE_SHIFT;
218 setCx86(arr, ((unsigned char *) &base)[3]); 227 setCx86(arr + 0, ((unsigned char *)&base)[3]);
219 setCx86(arr + 1, ((unsigned char *) &base)[2]); 228 setCx86(arr + 1, ((unsigned char *)&base)[2]);
220 setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size); 229 setCx86(arr + 2, (((unsigned char *)&base)[1]) | arr_size);
221 setCx86(CX86_RCR_BASE + reg, arr_type); 230 setCx86(CX86_RCR_BASE + reg, arr_type);
222 231
223 post_set(); 232 post_set();
224} 233}
225 234
226typedef struct { 235typedef struct {
227 unsigned long base; 236 unsigned long base;
228 unsigned long size; 237 unsigned long size;
229 mtrr_type type; 238 mtrr_type type;
230} arr_state_t; 239} arr_state_t;
231 240
232static arr_state_t arr_state[8] = { 241static arr_state_t arr_state[8] = {
@@ -247,16 +256,17 @@ static void cyrix_set_all(void)
247 setCx86(CX86_CCR0 + i, ccr_state[i]); 256 setCx86(CX86_CCR0 + i, ccr_state[i]);
248 for (; i < 7; i++) 257 for (; i < 7; i++)
249 setCx86(CX86_CCR4 + i, ccr_state[i]); 258 setCx86(CX86_CCR4 + i, ccr_state[i]);
250 for (i = 0; i < 8; i++) 259
251 cyrix_set_arr(i, arr_state[i].base, 260 for (i = 0; i < 8; i++) {
261 cyrix_set_arr(i, arr_state[i].base,
252 arr_state[i].size, arr_state[i].type); 262 arr_state[i].size, arr_state[i].type);
263 }
253 264
254 post_set(); 265 post_set();
255} 266}
256 267
257static struct mtrr_ops cyrix_mtrr_ops = { 268static struct mtrr_ops cyrix_mtrr_ops = {
258 .vendor = X86_VENDOR_CYRIX, 269 .vendor = X86_VENDOR_CYRIX,
259// .init = cyrix_arr_init,
260 .set_all = cyrix_set_all, 270 .set_all = cyrix_set_all,
261 .set = cyrix_set_arr, 271 .set = cyrix_set_arr,
262 .get = cyrix_get_arr, 272 .get = cyrix_get_arr,
@@ -270,5 +280,3 @@ int __init cyrix_init_mtrr(void)
270 set_mtrr_ops(&cyrix_mtrr_ops); 280 set_mtrr_ops(&cyrix_mtrr_ops);
271 return 0; 281 return 0;
272} 282}
273
274//arch_initcall(cyrix_init_mtrr);
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 0543f69f0b27..55da0c5f68dd 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -1,28 +1,34 @@
1/* This only handles 32bit MTRR on 32bit hosts. This is strictly wrong 1/*
2 because MTRRs can span upto 40 bits (36bits on most modern x86) */ 2 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
3 * because MTRRs can span upto 40 bits (36bits on most modern x86)
4 */
5#define DEBUG
6
7#include <linux/module.h>
3#include <linux/init.h> 8#include <linux/init.h>
4#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/io.h>
5#include <linux/mm.h> 11#include <linux/mm.h>
6#include <linux/module.h> 12
7#include <asm/io.h>
8#include <asm/mtrr.h>
9#include <asm/msr.h>
10#include <asm/system.h>
11#include <asm/cpufeature.h>
12#include <asm/processor-flags.h> 13#include <asm/processor-flags.h>
14#include <asm/cpufeature.h>
13#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
16#include <asm/system.h>
17#include <asm/mtrr.h>
18#include <asm/msr.h>
14#include <asm/pat.h> 19#include <asm/pat.h>
20
15#include "mtrr.h" 21#include "mtrr.h"
16 22
17struct fixed_range_block { 23struct fixed_range_block {
18 int base_msr; /* start address of an MTRR block */ 24 int base_msr; /* start address of an MTRR block */
19 int ranges; /* number of MTRRs in this block */ 25 int ranges; /* number of MTRRs in this block */
20}; 26};
21 27
22static struct fixed_range_block fixed_range_blocks[] = { 28static struct fixed_range_block fixed_range_blocks[] = {
23 { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */ 29 { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */
24 { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */ 30 { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */
25 { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */ 31 { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */
26 {} 32 {}
27}; 33};
28 34
@@ -30,10 +36,10 @@ static unsigned long smp_changes_mask;
30static int mtrr_state_set; 36static int mtrr_state_set;
31u64 mtrr_tom2; 37u64 mtrr_tom2;
32 38
33struct mtrr_state_type mtrr_state = {}; 39struct mtrr_state_type mtrr_state;
34EXPORT_SYMBOL_GPL(mtrr_state); 40EXPORT_SYMBOL_GPL(mtrr_state);
35 41
36/** 42/*
37 * BIOS is expected to clear MtrrFixDramModEn bit, see for example 43 * BIOS is expected to clear MtrrFixDramModEn bit, see for example
38 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD 44 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
39 * Opteron Processors" (26094 Rev. 3.30 February 2006), section 45 * Opteron Processors" (26094 Rev. 3.30 February 2006), section
@@ -104,9 +110,8 @@ u8 mtrr_type_lookup(u64 start, u64 end)
104 * Look of multiple ranges matching this address and pick type 110 * Look of multiple ranges matching this address and pick type
105 * as per MTRR precedence 111 * as per MTRR precedence
106 */ 112 */
107 if (!(mtrr_state.enabled & 2)) { 113 if (!(mtrr_state.enabled & 2))
108 return mtrr_state.def_type; 114 return mtrr_state.def_type;
109 }
110 115
111 prev_match = 0xFF; 116 prev_match = 0xFF;
112 for (i = 0; i < num_var_ranges; ++i) { 117 for (i = 0; i < num_var_ranges; ++i) {
@@ -125,9 +130,8 @@ u8 mtrr_type_lookup(u64 start, u64 end)
125 if (start_state != end_state) 130 if (start_state != end_state)
126 return 0xFE; 131 return 0xFE;
127 132
128 if ((start & mask) != (base & mask)) { 133 if ((start & mask) != (base & mask))
129 continue; 134 continue;
130 }
131 135
132 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff; 136 curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
133 if (prev_match == 0xFF) { 137 if (prev_match == 0xFF) {
@@ -148,9 +152,8 @@ u8 mtrr_type_lookup(u64 start, u64 end)
148 curr_match = MTRR_TYPE_WRTHROUGH; 152 curr_match = MTRR_TYPE_WRTHROUGH;
149 } 153 }
150 154
151 if (prev_match != curr_match) { 155 if (prev_match != curr_match)
152 return MTRR_TYPE_UNCACHABLE; 156 return MTRR_TYPE_UNCACHABLE;
153 }
154 } 157 }
155 158
156 if (mtrr_tom2) { 159 if (mtrr_tom2) {
@@ -164,7 +167,7 @@ u8 mtrr_type_lookup(u64 start, u64 end)
164 return mtrr_state.def_type; 167 return mtrr_state.def_type;
165} 168}
166 169
167/* Get the MSR pair relating to a var range */ 170/* Get the MSR pair relating to a var range */
168static void 171static void
169get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) 172get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
170{ 173{
@@ -172,7 +175,7 @@ get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
172 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi); 175 rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
173} 176}
174 177
175/* fill the MSR pair relating to a var range */ 178/* Fill the MSR pair relating to a var range */
176void fill_mtrr_var_range(unsigned int index, 179void fill_mtrr_var_range(unsigned int index,
177 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi) 180 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
178{ 181{
@@ -186,10 +189,9 @@ void fill_mtrr_var_range(unsigned int index,
186 vr[index].mask_hi = mask_hi; 189 vr[index].mask_hi = mask_hi;
187} 190}
188 191
189static void 192static void get_fixed_ranges(mtrr_type *frs)
190get_fixed_ranges(mtrr_type * frs)
191{ 193{
192 unsigned int *p = (unsigned int *) frs; 194 unsigned int *p = (unsigned int *)frs;
193 int i; 195 int i;
194 196
195 k8_check_syscfg_dram_mod_en(); 197 k8_check_syscfg_dram_mod_en();
@@ -217,22 +219,22 @@ static void __init print_fixed_last(void)
217 if (!last_fixed_end) 219 if (!last_fixed_end)
218 return; 220 return;
219 221
220 printk(KERN_DEBUG " %05X-%05X %s\n", last_fixed_start, 222 pr_debug(" %05X-%05X %s\n", last_fixed_start,
221 last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type)); 223 last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
222 224
223 last_fixed_end = 0; 225 last_fixed_end = 0;
224} 226}
225 227
226static void __init update_fixed_last(unsigned base, unsigned end, 228static void __init update_fixed_last(unsigned base, unsigned end,
227 mtrr_type type) 229 mtrr_type type)
228{ 230{
229 last_fixed_start = base; 231 last_fixed_start = base;
230 last_fixed_end = end; 232 last_fixed_end = end;
231 last_fixed_type = type; 233 last_fixed_type = type;
232} 234}
233 235
234static void __init print_fixed(unsigned base, unsigned step, 236static void __init
235 const mtrr_type *types) 237print_fixed(unsigned base, unsigned step, const mtrr_type *types)
236{ 238{
237 unsigned i; 239 unsigned i;
238 240
@@ -259,54 +261,55 @@ static void __init print_mtrr_state(void)
259 unsigned int i; 261 unsigned int i;
260 int high_width; 262 int high_width;
261 263
262 printk(KERN_DEBUG "MTRR default type: %s\n", 264 pr_debug("MTRR default type: %s\n",
263 mtrr_attrib_to_str(mtrr_state.def_type)); 265 mtrr_attrib_to_str(mtrr_state.def_type));
264 if (mtrr_state.have_fixed) { 266 if (mtrr_state.have_fixed) {
265 printk(KERN_DEBUG "MTRR fixed ranges %sabled:\n", 267 pr_debug("MTRR fixed ranges %sabled:\n",
266 mtrr_state.enabled & 1 ? "en" : "dis"); 268 mtrr_state.enabled & 1 ? "en" : "dis");
267 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0); 269 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
268 for (i = 0; i < 2; ++i) 270 for (i = 0; i < 2; ++i)
269 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8); 271 print_fixed(0x80000 + i * 0x20000, 0x04000,
272 mtrr_state.fixed_ranges + (i + 1) * 8);
270 for (i = 0; i < 8; ++i) 273 for (i = 0; i < 8; ++i)
271 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8); 274 print_fixed(0xC0000 + i * 0x08000, 0x01000,
275 mtrr_state.fixed_ranges + (i + 3) * 8);
272 276
273 /* tail */ 277 /* tail */
274 print_fixed_last(); 278 print_fixed_last();
275 } 279 }
276 printk(KERN_DEBUG "MTRR variable ranges %sabled:\n", 280 pr_debug("MTRR variable ranges %sabled:\n",
277 mtrr_state.enabled & 2 ? "en" : "dis"); 281 mtrr_state.enabled & 2 ? "en" : "dis");
278 if (size_or_mask & 0xffffffffUL) 282 if (size_or_mask & 0xffffffffUL)
279 high_width = ffs(size_or_mask & 0xffffffffUL) - 1; 283 high_width = ffs(size_or_mask & 0xffffffffUL) - 1;
280 else 284 else
281 high_width = ffs(size_or_mask>>32) + 32 - 1; 285 high_width = ffs(size_or_mask>>32) + 32 - 1;
282 high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4; 286 high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4;
287
283 for (i = 0; i < num_var_ranges; ++i) { 288 for (i = 0; i < num_var_ranges; ++i) {
284 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11)) 289 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
285 printk(KERN_DEBUG " %u base %0*X%05X000 mask %0*X%05X000 %s\n", 290 pr_debug(" %u base %0*X%05X000 mask %0*X%05X000 %s\n",
286 i, 291 i,
287 high_width, 292 high_width,
288 mtrr_state.var_ranges[i].base_hi, 293 mtrr_state.var_ranges[i].base_hi,
289 mtrr_state.var_ranges[i].base_lo >> 12, 294 mtrr_state.var_ranges[i].base_lo >> 12,
290 high_width, 295 high_width,
291 mtrr_state.var_ranges[i].mask_hi, 296 mtrr_state.var_ranges[i].mask_hi,
292 mtrr_state.var_ranges[i].mask_lo >> 12, 297 mtrr_state.var_ranges[i].mask_lo >> 12,
293 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff)); 298 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
294 else 299 else
295 printk(KERN_DEBUG " %u disabled\n", i); 300 pr_debug(" %u disabled\n", i);
296 }
297 if (mtrr_tom2) {
298 printk(KERN_DEBUG "TOM2: %016llx aka %lldM\n",
299 mtrr_tom2, mtrr_tom2>>20);
300 } 301 }
302 if (mtrr_tom2)
303 pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
301} 304}
302 305
303/* Grab all of the MTRR state for this CPU into *state */ 306/* Grab all of the MTRR state for this CPU into *state */
304void __init get_mtrr_state(void) 307void __init get_mtrr_state(void)
305{ 308{
306 unsigned int i;
307 struct mtrr_var_range *vrs; 309 struct mtrr_var_range *vrs;
308 unsigned lo, dummy;
309 unsigned long flags; 310 unsigned long flags;
311 unsigned lo, dummy;
312 unsigned int i;
310 313
311 vrs = mtrr_state.var_ranges; 314 vrs = mtrr_state.var_ranges;
312 315
@@ -324,6 +327,7 @@ void __init get_mtrr_state(void)
324 327
325 if (amd_special_default_mtrr()) { 328 if (amd_special_default_mtrr()) {
326 unsigned low, high; 329 unsigned low, high;
330
327 /* TOP_MEM2 */ 331 /* TOP_MEM2 */
328 rdmsr(MSR_K8_TOP_MEM2, low, high); 332 rdmsr(MSR_K8_TOP_MEM2, low, high);
329 mtrr_tom2 = high; 333 mtrr_tom2 = high;
@@ -344,10 +348,9 @@ void __init get_mtrr_state(void)
344 348
345 post_set(); 349 post_set();
346 local_irq_restore(flags); 350 local_irq_restore(flags);
347
348} 351}
349 352
350/* Some BIOS's are fucked and don't set all MTRRs the same! */ 353/* Some BIOS's are messed up and don't set all MTRRs the same! */
351void __init mtrr_state_warn(void) 354void __init mtrr_state_warn(void)
352{ 355{
353 unsigned long mask = smp_changes_mask; 356 unsigned long mask = smp_changes_mask;
@@ -355,28 +358,33 @@ void __init mtrr_state_warn(void)
355 if (!mask) 358 if (!mask)
356 return; 359 return;
357 if (mask & MTRR_CHANGE_MASK_FIXED) 360 if (mask & MTRR_CHANGE_MASK_FIXED)
358 printk(KERN_WARNING "mtrr: your CPUs had inconsistent fixed MTRR settings\n"); 361 pr_warning("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
359 if (mask & MTRR_CHANGE_MASK_VARIABLE) 362 if (mask & MTRR_CHANGE_MASK_VARIABLE)
360 printk(KERN_WARNING "mtrr: your CPUs had inconsistent variable MTRR settings\n"); 363 pr_warning("mtrr: your CPUs had inconsistent variable MTRR settings\n");
361 if (mask & MTRR_CHANGE_MASK_DEFTYPE) 364 if (mask & MTRR_CHANGE_MASK_DEFTYPE)
362 printk(KERN_WARNING "mtrr: your CPUs had inconsistent MTRRdefType settings\n"); 365 pr_warning("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
366
363 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n"); 367 printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
364 printk(KERN_INFO "mtrr: corrected configuration.\n"); 368 printk(KERN_INFO "mtrr: corrected configuration.\n");
365} 369}
366 370
367/* Doesn't attempt to pass an error out to MTRR users 371/*
368 because it's quite complicated in some cases and probably not 372 * Doesn't attempt to pass an error out to MTRR users
369 worth it because the best error handling is to ignore it. */ 373 * because it's quite complicated in some cases and probably not
374 * worth it because the best error handling is to ignore it.
375 */
370void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b) 376void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
371{ 377{
372 if (wrmsr_safe(msr, a, b) < 0) 378 if (wrmsr_safe(msr, a, b) < 0) {
373 printk(KERN_ERR 379 printk(KERN_ERR
374 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n", 380 "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
375 smp_processor_id(), msr, a, b); 381 smp_processor_id(), msr, a, b);
382 }
376} 383}
377 384
378/** 385/**
379 * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have 386 * set_fixed_range - checks & updates a fixed-range MTRR if it
387 * differs from the value it should have
380 * @msr: MSR address of the MTTR which should be checked and updated 388 * @msr: MSR address of the MTTR which should be checked and updated
381 * @changed: pointer which indicates whether the MTRR needed to be changed 389 * @changed: pointer which indicates whether the MTRR needed to be changed
382 * @msrwords: pointer to the MSR values which the MSR should have 390 * @msrwords: pointer to the MSR values which the MSR should have
@@ -401,20 +409,23 @@ static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
401 * 409 *
402 * Returns: The index of the region on success, else negative on error. 410 * Returns: The index of the region on success, else negative on error.
403 */ 411 */
404int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) 412int
413generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
405{ 414{
406 int i, max;
407 mtrr_type ltype;
408 unsigned long lbase, lsize; 415 unsigned long lbase, lsize;
416 mtrr_type ltype;
417 int i, max;
409 418
410 max = num_var_ranges; 419 max = num_var_ranges;
411 if (replace_reg >= 0 && replace_reg < max) 420 if (replace_reg >= 0 && replace_reg < max)
412 return replace_reg; 421 return replace_reg;
422
413 for (i = 0; i < max; ++i) { 423 for (i = 0; i < max; ++i) {
414 mtrr_if->get(i, &lbase, &lsize, &ltype); 424 mtrr_if->get(i, &lbase, &lsize, &ltype);
415 if (lsize == 0) 425 if (lsize == 0)
416 return i; 426 return i;
417 } 427 }
428
418 return -ENOSPC; 429 return -ENOSPC;
419} 430}
420 431
@@ -434,7 +445,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
434 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); 445 rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
435 446
436 if ((mask_lo & 0x800) == 0) { 447 if ((mask_lo & 0x800) == 0) {
437 /* Invalid (i.e. free) range */ 448 /* Invalid (i.e. free) range */
438 *base = 0; 449 *base = 0;
439 *size = 0; 450 *size = 0;
440 *type = 0; 451 *type = 0;
@@ -471,27 +482,31 @@ out_put_cpu:
471} 482}
472 483
473/** 484/**
474 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they differ from the saved set 485 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
486 * differ from the saved set
475 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges() 487 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
476 */ 488 */
477static int set_fixed_ranges(mtrr_type * frs) 489static int set_fixed_ranges(mtrr_type *frs)
478{ 490{
479 unsigned long long *saved = (unsigned long long *) frs; 491 unsigned long long *saved = (unsigned long long *)frs;
480 bool changed = false; 492 bool changed = false;
481 int block=-1, range; 493 int block = -1, range;
482 494
483 k8_check_syscfg_dram_mod_en(); 495 k8_check_syscfg_dram_mod_en();
484 496
485 while (fixed_range_blocks[++block].ranges) 497 while (fixed_range_blocks[++block].ranges) {
486 for (range=0; range < fixed_range_blocks[block].ranges; range++) 498 for (range = 0; range < fixed_range_blocks[block].ranges; range++)
487 set_fixed_range(fixed_range_blocks[block].base_msr + range, 499 set_fixed_range(fixed_range_blocks[block].base_msr + range,
488 &changed, (unsigned int *) saved++); 500 &changed, (unsigned int *)saved++);
501 }
489 502
490 return changed; 503 return changed;
491} 504}
492 505
493/* Set the MSR pair relating to a var range. Returns TRUE if 506/*
494 changes are made */ 507 * Set the MSR pair relating to a var range.
508 * Returns true if changes are made.
509 */
495static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr) 510static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
496{ 511{
497 unsigned int lo, hi; 512 unsigned int lo, hi;
@@ -501,6 +516,7 @@ static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
501 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL) 516 if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
502 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) != 517 || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
503 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) { 518 (hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
519
504 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi); 520 mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
505 changed = true; 521 changed = true;
506 } 522 }
@@ -526,21 +542,26 @@ static u32 deftype_lo, deftype_hi;
526 */ 542 */
527static unsigned long set_mtrr_state(void) 543static unsigned long set_mtrr_state(void)
528{ 544{
529 unsigned int i;
530 unsigned long change_mask = 0; 545 unsigned long change_mask = 0;
546 unsigned int i;
531 547
532 for (i = 0; i < num_var_ranges; i++) 548 for (i = 0; i < num_var_ranges; i++) {
533 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) 549 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
534 change_mask |= MTRR_CHANGE_MASK_VARIABLE; 550 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
551 }
535 552
536 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges)) 553 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
537 change_mask |= MTRR_CHANGE_MASK_FIXED; 554 change_mask |= MTRR_CHANGE_MASK_FIXED;
538 555
539 /* Set_mtrr_restore restores the old value of MTRRdefType, 556 /*
540 so to set it we fiddle with the saved value */ 557 * Set_mtrr_restore restores the old value of MTRRdefType,
558 * so to set it we fiddle with the saved value:
559 */
541 if ((deftype_lo & 0xff) != mtrr_state.def_type 560 if ((deftype_lo & 0xff) != mtrr_state.def_type
542 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { 561 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
543 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10); 562
563 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type |
564 (mtrr_state.enabled << 10);
544 change_mask |= MTRR_CHANGE_MASK_DEFTYPE; 565 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
545 } 566 }
546 567
@@ -548,33 +569,36 @@ static unsigned long set_mtrr_state(void)
548} 569}
549 570
550 571
551static unsigned long cr4 = 0; 572static unsigned long cr4;
552static DEFINE_SPINLOCK(set_atomicity_lock); 573static DEFINE_SPINLOCK(set_atomicity_lock);
553 574
554/* 575/*
555 * Since we are disabling the cache don't allow any interrupts - they 576 * Since we are disabling the cache don't allow any interrupts,
556 * would run extremely slow and would only increase the pain. The caller must 577 * they would run extremely slow and would only increase the pain.
557 * ensure that local interrupts are disabled and are reenabled after post_set() 578 *
558 * has been called. 579 * The caller must ensure that local interrupts are disabled and
580 * are reenabled after post_set() has been called.
559 */ 581 */
560
561static void prepare_set(void) __acquires(set_atomicity_lock) 582static void prepare_set(void) __acquires(set_atomicity_lock)
562{ 583{
563 unsigned long cr0; 584 unsigned long cr0;
564 585
565 /* Note that this is not ideal, since the cache is only flushed/disabled 586 /*
566 for this CPU while the MTRRs are changed, but changing this requires 587 * Note that this is not ideal
567 more invasive changes to the way the kernel boots */ 588 * since the cache is only flushed/disabled for this CPU while the
589 * MTRRs are changed, but changing this requires more invasive
590 * changes to the way the kernel boots
591 */
568 592
569 spin_lock(&set_atomicity_lock); 593 spin_lock(&set_atomicity_lock);
570 594
571 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */ 595 /* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
572 cr0 = read_cr0() | X86_CR0_CD; 596 cr0 = read_cr0() | X86_CR0_CD;
573 write_cr0(cr0); 597 write_cr0(cr0);
574 wbinvd(); 598 wbinvd();
575 599
576 /* Save value of CR4 and clear Page Global Enable (bit 7) */ 600 /* Save value of CR4 and clear Page Global Enable (bit 7) */
577 if ( cpu_has_pge ) { 601 if (cpu_has_pge) {
578 cr4 = read_cr4(); 602 cr4 = read_cr4();
579 write_cr4(cr4 & ~X86_CR4_PGE); 603 write_cr4(cr4 & ~X86_CR4_PGE);
580 } 604 }
@@ -582,26 +606,26 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
582 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ 606 /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
583 __flush_tlb(); 607 __flush_tlb();
584 608
585 /* Save MTRR state */ 609 /* Save MTRR state */
586 rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); 610 rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
587 611
588 /* Disable MTRRs, and set the default type to uncached */ 612 /* Disable MTRRs, and set the default type to uncached */
589 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi); 613 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
590} 614}
591 615
592static void post_set(void) __releases(set_atomicity_lock) 616static void post_set(void) __releases(set_atomicity_lock)
593{ 617{
594 /* Flush TLBs (no need to flush caches - they are disabled) */ 618 /* Flush TLBs (no need to flush caches - they are disabled) */
595 __flush_tlb(); 619 __flush_tlb();
596 620
597 /* Intel (P6) standard MTRRs */ 621 /* Intel (P6) standard MTRRs */
598 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi); 622 mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
599 623
600 /* Enable caches */ 624 /* Enable caches */
601 write_cr0(read_cr0() & 0xbfffffff); 625 write_cr0(read_cr0() & 0xbfffffff);
602 626
603 /* Restore value of CR4 */ 627 /* Restore value of CR4 */
604 if ( cpu_has_pge ) 628 if (cpu_has_pge)
605 write_cr4(cr4); 629 write_cr4(cr4);
606 spin_unlock(&set_atomicity_lock); 630 spin_unlock(&set_atomicity_lock);
607} 631}
@@ -623,24 +647,27 @@ static void generic_set_all(void)
623 post_set(); 647 post_set();
624 local_irq_restore(flags); 648 local_irq_restore(flags);
625 649
626 /* Use the atomic bitops to update the global mask */ 650 /* Use the atomic bitops to update the global mask */
627 for (count = 0; count < sizeof mask * 8; ++count) { 651 for (count = 0; count < sizeof mask * 8; ++count) {
628 if (mask & 0x01) 652 if (mask & 0x01)
629 set_bit(count, &smp_changes_mask); 653 set_bit(count, &smp_changes_mask);
630 mask >>= 1; 654 mask >>= 1;
631 } 655 }
632 656
633} 657}
634 658
659/**
660 * generic_set_mtrr - set variable MTRR register on the local CPU.
661 *
662 * @reg: The register to set.
663 * @base: The base address of the region.
664 * @size: The size of the region. If this is 0 the region is disabled.
665 * @type: The type of the region.
666 *
667 * Returns nothing.
668 */
635static void generic_set_mtrr(unsigned int reg, unsigned long base, 669static void generic_set_mtrr(unsigned int reg, unsigned long base,
636 unsigned long size, mtrr_type type) 670 unsigned long size, mtrr_type type)
637/* [SUMMARY] Set variable MTRR register on the local CPU.
638 <reg> The register to set.
639 <base> The base address of the region.
640 <size> The size of the region. If this is 0 the region is disabled.
641 <type> The type of the region.
642 [RETURNS] Nothing.
643*/
644{ 671{
645 unsigned long flags; 672 unsigned long flags;
646 struct mtrr_var_range *vr; 673 struct mtrr_var_range *vr;
@@ -651,8 +678,10 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
651 prepare_set(); 678 prepare_set();
652 679
653 if (size == 0) { 680 if (size == 0) {
654 /* The invalid bit is kept in the mask, so we simply clear the 681 /*
655 relevant mask register to disable a range. */ 682 * The invalid bit is kept in the mask, so we simply
683 * clear the relevant mask register to disable a range.
684 */
656 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0); 685 mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
657 memset(vr, 0, sizeof(struct mtrr_var_range)); 686 memset(vr, 0, sizeof(struct mtrr_var_range));
658 } else { 687 } else {
@@ -669,46 +698,50 @@ static void generic_set_mtrr(unsigned int reg, unsigned long base,
669 local_irq_restore(flags); 698 local_irq_restore(flags);
670} 699}
671 700
672int generic_validate_add_page(unsigned long base, unsigned long size, unsigned int type) 701int generic_validate_add_page(unsigned long base, unsigned long size,
702 unsigned int type)
673{ 703{
674 unsigned long lbase, last; 704 unsigned long lbase, last;
675 705
676 /* For Intel PPro stepping <= 7, must be 4 MiB aligned 706 /*
677 and not touch 0x70000000->0x7003FFFF */ 707 * For Intel PPro stepping <= 7
708 * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
709 */
678 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 && 710 if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
679 boot_cpu_data.x86_model == 1 && 711 boot_cpu_data.x86_model == 1 &&
680 boot_cpu_data.x86_mask <= 7) { 712 boot_cpu_data.x86_mask <= 7) {
681 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) { 713 if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
682 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); 714 pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
683 return -EINVAL; 715 return -EINVAL;
684 } 716 }
685 if (!(base + size < 0x70000 || base > 0x7003F) && 717 if (!(base + size < 0x70000 || base > 0x7003F) &&
686 (type == MTRR_TYPE_WRCOMB 718 (type == MTRR_TYPE_WRCOMB
687 || type == MTRR_TYPE_WRBACK)) { 719 || type == MTRR_TYPE_WRBACK)) {
688 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); 720 pr_warning("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
689 return -EINVAL; 721 return -EINVAL;
690 } 722 }
691 } 723 }
692 724
693 /* Check upper bits of base and last are equal and lower bits are 0 725 /*
694 for base and 1 for last */ 726 * Check upper bits of base and last are equal and lower bits are 0
727 * for base and 1 for last
728 */
695 last = base + size - 1; 729 last = base + size - 1;
696 for (lbase = base; !(lbase & 1) && (last & 1); 730 for (lbase = base; !(lbase & 1) && (last & 1);
697 lbase = lbase >> 1, last = last >> 1) ; 731 lbase = lbase >> 1, last = last >> 1)
732 ;
698 if (lbase != last) { 733 if (lbase != last) {
699 printk(KERN_WARNING "mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", 734 pr_warning("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
700 base, size);
701 return -EINVAL; 735 return -EINVAL;
702 } 736 }
703 return 0; 737 return 0;
704} 738}
705 739
706
707static int generic_have_wrcomb(void) 740static int generic_have_wrcomb(void)
708{ 741{
709 unsigned long config, dummy; 742 unsigned long config, dummy;
710 rdmsr(MSR_MTRRcap, config, dummy); 743 rdmsr(MSR_MTRRcap, config, dummy);
711 return (config & (1 << 10)); 744 return config & (1 << 10);
712} 745}
713 746
714int positive_have_wrcomb(void) 747int positive_have_wrcomb(void)
@@ -716,14 +749,15 @@ int positive_have_wrcomb(void)
716 return 1; 749 return 1;
717} 750}
718 751
719/* generic structure... 752/*
753 * Generic structure...
720 */ 754 */
721struct mtrr_ops generic_mtrr_ops = { 755struct mtrr_ops generic_mtrr_ops = {
722 .use_intel_if = 1, 756 .use_intel_if = 1,
723 .set_all = generic_set_all, 757 .set_all = generic_set_all,
724 .get = generic_get_mtrr, 758 .get = generic_get_mtrr,
725 .get_free_region = generic_get_free_region, 759 .get_free_region = generic_get_free_region,
726 .set = generic_set_mtrr, 760 .set = generic_set_mtrr,
727 .validate_add_page = generic_validate_add_page, 761 .validate_add_page = generic_validate_add_page,
728 .have_wrcomb = generic_have_wrcomb, 762 .have_wrcomb = generic_have_wrcomb,
729}; 763};
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index fb73a52913a4..08b6ea4c62b4 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -1,27 +1,28 @@
1#include <linux/init.h>
2#include <linux/proc_fs.h>
3#include <linux/capability.h> 1#include <linux/capability.h>
4#include <linux/ctype.h>
5#include <linux/module.h>
6#include <linux/seq_file.h> 2#include <linux/seq_file.h>
7#include <asm/uaccess.h> 3#include <linux/uaccess.h>
4#include <linux/proc_fs.h>
5#include <linux/module.h>
6#include <linux/ctype.h>
7#include <linux/init.h>
8 8
9#define LINE_SIZE 80 9#define LINE_SIZE 80
10 10
11#include <asm/mtrr.h> 11#include <asm/mtrr.h>
12
12#include "mtrr.h" 13#include "mtrr.h"
13 14
14#define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private) 15#define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private)
15 16
16static const char *const mtrr_strings[MTRR_NUM_TYPES] = 17static const char *const mtrr_strings[MTRR_NUM_TYPES] =
17{ 18{
18 "uncachable", /* 0 */ 19 "uncachable", /* 0 */
19 "write-combining", /* 1 */ 20 "write-combining", /* 1 */
20 "?", /* 2 */ 21 "?", /* 2 */
21 "?", /* 3 */ 22 "?", /* 3 */
22 "write-through", /* 4 */ 23 "write-through", /* 4 */
23 "write-protect", /* 5 */ 24 "write-protect", /* 5 */
24 "write-back", /* 6 */ 25 "write-back", /* 6 */
25}; 26};
26 27
27const char *mtrr_attrib_to_str(int x) 28const char *mtrr_attrib_to_str(int x)
@@ -35,8 +36,8 @@ static int
35mtrr_file_add(unsigned long base, unsigned long size, 36mtrr_file_add(unsigned long base, unsigned long size,
36 unsigned int type, bool increment, struct file *file, int page) 37 unsigned int type, bool increment, struct file *file, int page)
37{ 38{
39 unsigned int *fcount = FILE_FCOUNT(file);
38 int reg, max; 40 int reg, max;
39 unsigned int *fcount = FILE_FCOUNT(file);
40 41
41 max = num_var_ranges; 42 max = num_var_ranges;
42 if (fcount == NULL) { 43 if (fcount == NULL) {
@@ -61,8 +62,8 @@ static int
61mtrr_file_del(unsigned long base, unsigned long size, 62mtrr_file_del(unsigned long base, unsigned long size,
62 struct file *file, int page) 63 struct file *file, int page)
63{ 64{
64 int reg;
65 unsigned int *fcount = FILE_FCOUNT(file); 65 unsigned int *fcount = FILE_FCOUNT(file);
66 int reg;
66 67
67 if (!page) { 68 if (!page) {
68 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) 69 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1)))
@@ -81,13 +82,14 @@ mtrr_file_del(unsigned long base, unsigned long size,
81 return reg; 82 return reg;
82} 83}
83 84
84/* RED-PEN: seq_file can seek now. this is ignored. */ 85/*
86 * seq_file can seek but we ignore it.
87 *
88 * Format of control line:
89 * "base=%Lx size=%Lx type=%s" or "disable=%d"
90 */
85static ssize_t 91static ssize_t
86mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos) 92mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
87/* Format of control line:
88 "base=%Lx size=%Lx type=%s" OR:
89 "disable=%d"
90*/
91{ 93{
92 int i, err; 94 int i, err;
93 unsigned long reg; 95 unsigned long reg;
@@ -100,15 +102,18 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
100 return -EPERM; 102 return -EPERM;
101 if (!len) 103 if (!len)
102 return -EINVAL; 104 return -EINVAL;
105
103 memset(line, 0, LINE_SIZE); 106 memset(line, 0, LINE_SIZE);
104 if (len > LINE_SIZE) 107 if (len > LINE_SIZE)
105 len = LINE_SIZE; 108 len = LINE_SIZE;
106 if (copy_from_user(line, buf, len - 1)) 109 if (copy_from_user(line, buf, len - 1))
107 return -EFAULT; 110 return -EFAULT;
111
108 linelen = strlen(line); 112 linelen = strlen(line);
109 ptr = line + linelen - 1; 113 ptr = line + linelen - 1;
110 if (linelen && *ptr == '\n') 114 if (linelen && *ptr == '\n')
111 *ptr = '\0'; 115 *ptr = '\0';
116
112 if (!strncmp(line, "disable=", 8)) { 117 if (!strncmp(line, "disable=", 8)) {
113 reg = simple_strtoul(line + 8, &ptr, 0); 118 reg = simple_strtoul(line + 8, &ptr, 0);
114 err = mtrr_del_page(reg, 0, 0); 119 err = mtrr_del_page(reg, 0, 0);
@@ -116,28 +121,35 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
116 return err; 121 return err;
117 return len; 122 return len;
118 } 123 }
124
119 if (strncmp(line, "base=", 5)) 125 if (strncmp(line, "base=", 5))
120 return -EINVAL; 126 return -EINVAL;
127
121 base = simple_strtoull(line + 5, &ptr, 0); 128 base = simple_strtoull(line + 5, &ptr, 0);
122 for (; isspace(*ptr); ++ptr) ; 129 for (; isspace(*ptr); ++ptr)
130 ;
131
123 if (strncmp(ptr, "size=", 5)) 132 if (strncmp(ptr, "size=", 5))
124 return -EINVAL; 133 return -EINVAL;
134
125 size = simple_strtoull(ptr + 5, &ptr, 0); 135 size = simple_strtoull(ptr + 5, &ptr, 0);
126 if ((base & 0xfff) || (size & 0xfff)) 136 if ((base & 0xfff) || (size & 0xfff))
127 return -EINVAL; 137 return -EINVAL;
128 for (; isspace(*ptr); ++ptr) ; 138 for (; isspace(*ptr); ++ptr)
139 ;
140
129 if (strncmp(ptr, "type=", 5)) 141 if (strncmp(ptr, "type=", 5))
130 return -EINVAL; 142 return -EINVAL;
131 ptr += 5; 143 ptr += 5;
132 for (; isspace(*ptr); ++ptr) ; 144 for (; isspace(*ptr); ++ptr)
145 ;
146
133 for (i = 0; i < MTRR_NUM_TYPES; ++i) { 147 for (i = 0; i < MTRR_NUM_TYPES; ++i) {
134 if (strcmp(ptr, mtrr_strings[i])) 148 if (strcmp(ptr, mtrr_strings[i]))
135 continue; 149 continue;
136 base >>= PAGE_SHIFT; 150 base >>= PAGE_SHIFT;
137 size >>= PAGE_SHIFT; 151 size >>= PAGE_SHIFT;
138 err = 152 err = mtrr_add_page((unsigned long)base, (unsigned long)size, i, true);
139 mtrr_add_page((unsigned long) base, (unsigned long) size, i,
140 true);
141 if (err < 0) 153 if (err < 0)
142 return err; 154 return err;
143 return len; 155 return len;
@@ -181,7 +193,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
181 case MTRRIOC32_SET_PAGE_ENTRY: 193 case MTRRIOC32_SET_PAGE_ENTRY:
182 case MTRRIOC32_DEL_PAGE_ENTRY: 194 case MTRRIOC32_DEL_PAGE_ENTRY:
183 case MTRRIOC32_KILL_PAGE_ENTRY: { 195 case MTRRIOC32_KILL_PAGE_ENTRY: {
184 struct mtrr_sentry32 __user *s32 = (struct mtrr_sentry32 __user *)__arg; 196 struct mtrr_sentry32 __user *s32;
197
198 s32 = (struct mtrr_sentry32 __user *)__arg;
185 err = get_user(sentry.base, &s32->base); 199 err = get_user(sentry.base, &s32->base);
186 err |= get_user(sentry.size, &s32->size); 200 err |= get_user(sentry.size, &s32->size);
187 err |= get_user(sentry.type, &s32->type); 201 err |= get_user(sentry.type, &s32->type);
@@ -191,7 +205,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
191 } 205 }
192 case MTRRIOC32_GET_ENTRY: 206 case MTRRIOC32_GET_ENTRY:
193 case MTRRIOC32_GET_PAGE_ENTRY: { 207 case MTRRIOC32_GET_PAGE_ENTRY: {
194 struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg; 208 struct mtrr_gentry32 __user *g32;
209
210 g32 = (struct mtrr_gentry32 __user *)__arg;
195 err = get_user(gentry.regnum, &g32->regnum); 211 err = get_user(gentry.regnum, &g32->regnum);
196 err |= get_user(gentry.base, &g32->base); 212 err |= get_user(gentry.base, &g32->base);
197 err |= get_user(gentry.size, &g32->size); 213 err |= get_user(gentry.size, &g32->size);
@@ -314,7 +330,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
314 if (err) 330 if (err)
315 return err; 331 return err;
316 332
317 switch(cmd) { 333 switch (cmd) {
318 case MTRRIOC_GET_ENTRY: 334 case MTRRIOC_GET_ENTRY:
319 case MTRRIOC_GET_PAGE_ENTRY: 335 case MTRRIOC_GET_PAGE_ENTRY:
320 if (copy_to_user(arg, &gentry, sizeof gentry)) 336 if (copy_to_user(arg, &gentry, sizeof gentry))
@@ -323,7 +339,9 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
323#ifdef CONFIG_COMPAT 339#ifdef CONFIG_COMPAT
324 case MTRRIOC32_GET_ENTRY: 340 case MTRRIOC32_GET_ENTRY:
325 case MTRRIOC32_GET_PAGE_ENTRY: { 341 case MTRRIOC32_GET_PAGE_ENTRY: {
326 struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg; 342 struct mtrr_gentry32 __user *g32;
343
344 g32 = (struct mtrr_gentry32 __user *)__arg;
327 err = put_user(gentry.base, &g32->base); 345 err = put_user(gentry.base, &g32->base);
328 err |= put_user(gentry.size, &g32->size); 346 err |= put_user(gentry.size, &g32->size);
329 err |= put_user(gentry.regnum, &g32->regnum); 347 err |= put_user(gentry.regnum, &g32->regnum);
@@ -335,11 +353,10 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
335 return err; 353 return err;
336} 354}
337 355
338static int 356static int mtrr_close(struct inode *ino, struct file *file)
339mtrr_close(struct inode *ino, struct file *file)
340{ 357{
341 int i, max;
342 unsigned int *fcount = FILE_FCOUNT(file); 358 unsigned int *fcount = FILE_FCOUNT(file);
359 int i, max;
343 360
344 if (fcount != NULL) { 361 if (fcount != NULL) {
345 max = num_var_ranges; 362 max = num_var_ranges;
@@ -359,22 +376,22 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset);
359 376
360static int mtrr_open(struct inode *inode, struct file *file) 377static int mtrr_open(struct inode *inode, struct file *file)
361{ 378{
362 if (!mtrr_if) 379 if (!mtrr_if)
363 return -EIO; 380 return -EIO;
364 if (!mtrr_if->get) 381 if (!mtrr_if->get)
365 return -ENXIO; 382 return -ENXIO;
366 return single_open(file, mtrr_seq_show, NULL); 383 return single_open(file, mtrr_seq_show, NULL);
367} 384}
368 385
369static const struct file_operations mtrr_fops = { 386static const struct file_operations mtrr_fops = {
370 .owner = THIS_MODULE, 387 .owner = THIS_MODULE,
371 .open = mtrr_open, 388 .open = mtrr_open,
372 .read = seq_read, 389 .read = seq_read,
373 .llseek = seq_lseek, 390 .llseek = seq_lseek,
374 .write = mtrr_write, 391 .write = mtrr_write,
375 .unlocked_ioctl = mtrr_ioctl, 392 .unlocked_ioctl = mtrr_ioctl,
376 .compat_ioctl = mtrr_ioctl, 393 .compat_ioctl = mtrr_ioctl,
377 .release = mtrr_close, 394 .release = mtrr_close,
378}; 395};
379 396
380static int mtrr_seq_show(struct seq_file *seq, void *offset) 397static int mtrr_seq_show(struct seq_file *seq, void *offset)
@@ -388,23 +405,24 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
388 max = num_var_ranges; 405 max = num_var_ranges;
389 for (i = 0; i < max; i++) { 406 for (i = 0; i < max; i++) {
390 mtrr_if->get(i, &base, &size, &type); 407 mtrr_if->get(i, &base, &size, &type);
391 if (size == 0) 408 if (size == 0) {
392 mtrr_usage_table[i] = 0; 409 mtrr_usage_table[i] = 0;
393 else { 410 continue;
394 if (size < (0x100000 >> PAGE_SHIFT)) {
395 /* less than 1MB */
396 factor = 'K';
397 size <<= PAGE_SHIFT - 10;
398 } else {
399 factor = 'M';
400 size >>= 20 - PAGE_SHIFT;
401 }
402 /* RED-PEN: base can be > 32bit */
403 len += seq_printf(seq,
404 "reg%02i: base=0x%06lx000 (%5luMB), size=%5lu%cB, count=%d: %s\n",
405 i, base, base >> (20 - PAGE_SHIFT), size, factor,
406 mtrr_usage_table[i], mtrr_attrib_to_str(type));
407 } 411 }
412 if (size < (0x100000 >> PAGE_SHIFT)) {
413 /* less than 1MB */
414 factor = 'K';
415 size <<= PAGE_SHIFT - 10;
416 } else {
417 factor = 'M';
418 size >>= 20 - PAGE_SHIFT;
419 }
420 /* Base can be > 32bit */
421 len += seq_printf(seq, "reg%02i: base=0x%06lx000 "
422 "(%5luMB), size=%5lu%cB, count=%d: %s\n",
423 i, base, base >> (20 - PAGE_SHIFT), size,
424 factor, mtrr_usage_table[i],
425 mtrr_attrib_to_str(type));
408 } 426 }
409 return 0; 427 return 0;
410} 428}
@@ -422,6 +440,5 @@ static int __init mtrr_if_init(void)
422 proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops); 440 proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops);
423 return 0; 441 return 0;
424} 442}
425
426arch_initcall(mtrr_if_init); 443arch_initcall(mtrr_if_init);
427#endif /* CONFIG_PROC_FS */ 444#endif /* CONFIG_PROC_FS */
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index 8fc248b5aeaf..84e83de54575 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -25,43 +25,49 @@
25 Operating System Writer's Guide" (Intel document number 242692), 25 Operating System Writer's Guide" (Intel document number 242692),
26 section 11.11.7 26 section 11.11.7
27 27
28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> 28 This was cleaned and made readable by Patrick Mochel <mochel@osdl.org>
29 on 6-7 March 2002. 29 on 6-7 March 2002.
30 Source: Intel Architecture Software Developers Manual, Volume 3: 30 Source: Intel Architecture Software Developers Manual, Volume 3:
31 System Programming Guide; Section 9.11. (1997 edition - PPro). 31 System Programming Guide; Section 9.11. (1997 edition - PPro).
32*/ 32*/
33 33
34#define DEBUG
35
36#include <linux/types.h> /* FIXME: kvm_para.h needs this */
37
38#include <linux/kvm_para.h>
39#include <linux/uaccess.h>
34#include <linux/module.h> 40#include <linux/module.h>
41#include <linux/mutex.h>
35#include <linux/init.h> 42#include <linux/init.h>
43#include <linux/sort.h>
44#include <linux/cpu.h>
36#include <linux/pci.h> 45#include <linux/pci.h>
37#include <linux/smp.h> 46#include <linux/smp.h>
38#include <linux/cpu.h>
39#include <linux/mutex.h>
40#include <linux/sort.h>
41 47
48#include <asm/processor.h>
42#include <asm/e820.h> 49#include <asm/e820.h>
43#include <asm/mtrr.h> 50#include <asm/mtrr.h>
44#include <asm/uaccess.h>
45#include <asm/processor.h>
46#include <asm/msr.h> 51#include <asm/msr.h>
47#include <asm/kvm_para.h> 52
48#include "mtrr.h" 53#include "mtrr.h"
49 54
50u32 num_var_ranges = 0; 55u32 num_var_ranges;
51 56
52unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; 57unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
53static DEFINE_MUTEX(mtrr_mutex); 58static DEFINE_MUTEX(mtrr_mutex);
54 59
55u64 size_or_mask, size_and_mask; 60u64 size_or_mask, size_and_mask;
61static bool mtrr_aps_delayed_init;
56 62
57static struct mtrr_ops * mtrr_ops[X86_VENDOR_NUM] = {}; 63static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
58 64
59struct mtrr_ops * mtrr_if = NULL; 65struct mtrr_ops *mtrr_if;
60 66
61static void set_mtrr(unsigned int reg, unsigned long base, 67static void set_mtrr(unsigned int reg, unsigned long base,
62 unsigned long size, mtrr_type type); 68 unsigned long size, mtrr_type type);
63 69
64void set_mtrr_ops(struct mtrr_ops * ops) 70void set_mtrr_ops(struct mtrr_ops *ops)
65{ 71{
66 if (ops->vendor && ops->vendor < X86_VENDOR_NUM) 72 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
67 mtrr_ops[ops->vendor] = ops; 73 mtrr_ops[ops->vendor] = ops;
@@ -72,30 +78,36 @@ static int have_wrcomb(void)
72{ 78{
73 struct pci_dev *dev; 79 struct pci_dev *dev;
74 u8 rev; 80 u8 rev;
75 81
76 if ((dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL)) != NULL) { 82 dev = pci_get_class(PCI_CLASS_BRIDGE_HOST << 8, NULL);
77 /* ServerWorks LE chipsets < rev 6 have problems with write-combining 83 if (dev != NULL) {
78 Don't allow it and leave room for other chipsets to be tagged */ 84 /*
85 * ServerWorks LE chipsets < rev 6 have problems with
86 * write-combining. Don't allow it and leave room for other
87 * chipsets to be tagged
88 */
79 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS && 89 if (dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
80 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) { 90 dev->device == PCI_DEVICE_ID_SERVERWORKS_LE) {
81 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); 91 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
82 if (rev <= 5) { 92 if (rev <= 5) {
83 printk(KERN_INFO "mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n"); 93 pr_info("mtrr: Serverworks LE rev < 6 detected. Write-combining disabled.\n");
84 pci_dev_put(dev); 94 pci_dev_put(dev);
85 return 0; 95 return 0;
86 } 96 }
87 } 97 }
88 /* Intel 450NX errata # 23. Non ascending cacheline evictions to 98 /*
89 write combining memory may resulting in data corruption */ 99 * Intel 450NX errata # 23. Non ascending cacheline evictions to
100 * write combining memory may resulting in data corruption
101 */
90 if (dev->vendor == PCI_VENDOR_ID_INTEL && 102 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
91 dev->device == PCI_DEVICE_ID_INTEL_82451NX) { 103 dev->device == PCI_DEVICE_ID_INTEL_82451NX) {
92 printk(KERN_INFO "mtrr: Intel 450NX MMC detected. Write-combining disabled.\n"); 104 pr_info("mtrr: Intel 450NX MMC detected. Write-combining disabled.\n");
93 pci_dev_put(dev); 105 pci_dev_put(dev);
94 return 0; 106 return 0;
95 } 107 }
96 pci_dev_put(dev); 108 pci_dev_put(dev);
97 } 109 }
98 return (mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0); 110 return mtrr_if->have_wrcomb ? mtrr_if->have_wrcomb() : 0;
99} 111}
100 112
101/* This function returns the number of variable MTRRs */ 113/* This function returns the number of variable MTRRs */
@@ -103,12 +115,13 @@ static void __init set_num_var_ranges(void)
103{ 115{
104 unsigned long config = 0, dummy; 116 unsigned long config = 0, dummy;
105 117
106 if (use_intel()) { 118 if (use_intel())
107 rdmsr(MSR_MTRRcap, config, dummy); 119 rdmsr(MSR_MTRRcap, config, dummy);
108 } else if (is_cpu(AMD)) 120 else if (is_cpu(AMD))
109 config = 2; 121 config = 2;
110 else if (is_cpu(CYRIX) || is_cpu(CENTAUR)) 122 else if (is_cpu(CYRIX) || is_cpu(CENTAUR))
111 config = 8; 123 config = 8;
124
112 num_var_ranges = config & 0xff; 125 num_var_ranges = config & 0xff;
113} 126}
114 127
@@ -130,10 +143,12 @@ struct set_mtrr_data {
130 mtrr_type smp_type; 143 mtrr_type smp_type;
131}; 144};
132 145
146/**
147 * ipi_handler - Synchronisation handler. Executed by "other" CPUs.
148 *
149 * Returns nothing.
150 */
133static void ipi_handler(void *info) 151static void ipi_handler(void *info)
134/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
135 [RETURNS] Nothing.
136*/
137{ 152{
138#ifdef CONFIG_SMP 153#ifdef CONFIG_SMP
139 struct set_mtrr_data *data = info; 154 struct set_mtrr_data *data = info;
@@ -142,18 +157,22 @@ static void ipi_handler(void *info)
142 local_irq_save(flags); 157 local_irq_save(flags);
143 158
144 atomic_dec(&data->count); 159 atomic_dec(&data->count);
145 while(!atomic_read(&data->gate)) 160 while (!atomic_read(&data->gate))
146 cpu_relax(); 161 cpu_relax();
147 162
148 /* The master has cleared me to execute */ 163 /* The master has cleared me to execute */
149 if (data->smp_reg != ~0U) 164 if (data->smp_reg != ~0U) {
150 mtrr_if->set(data->smp_reg, data->smp_base, 165 mtrr_if->set(data->smp_reg, data->smp_base,
151 data->smp_size, data->smp_type); 166 data->smp_size, data->smp_type);
152 else 167 } else if (mtrr_aps_delayed_init) {
168 /*
169 * Initialize the MTRRs inaddition to the synchronisation.
170 */
153 mtrr_if->set_all(); 171 mtrr_if->set_all();
172 }
154 173
155 atomic_dec(&data->count); 174 atomic_dec(&data->count);
156 while(atomic_read(&data->gate)) 175 while (atomic_read(&data->gate))
157 cpu_relax(); 176 cpu_relax();
158 177
159 atomic_dec(&data->count); 178 atomic_dec(&data->count);
@@ -161,7 +180,8 @@ static void ipi_handler(void *info)
161#endif 180#endif
162} 181}
163 182
164static inline int types_compatible(mtrr_type type1, mtrr_type type2) { 183static inline int types_compatible(mtrr_type type1, mtrr_type type2)
184{
165 return type1 == MTRR_TYPE_UNCACHABLE || 185 return type1 == MTRR_TYPE_UNCACHABLE ||
166 type2 == MTRR_TYPE_UNCACHABLE || 186 type2 == MTRR_TYPE_UNCACHABLE ||
167 (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) || 187 (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
@@ -176,10 +196,10 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
176 * @type: mtrr type 196 * @type: mtrr type
177 * 197 *
178 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: 198 * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly:
179 * 199 *
180 * 1. Send IPI to do the following: 200 * 1. Send IPI to do the following:
181 * 2. Disable Interrupts 201 * 2. Disable Interrupts
182 * 3. Wait for all procs to do so 202 * 3. Wait for all procs to do so
183 * 4. Enter no-fill cache mode 203 * 4. Enter no-fill cache mode
184 * 5. Flush caches 204 * 5. Flush caches
185 * 6. Clear PGE bit 205 * 6. Clear PGE bit
@@ -189,26 +209,27 @@ static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
189 * 10. Enable all range registers 209 * 10. Enable all range registers
190 * 11. Flush all TLBs and caches again 210 * 11. Flush all TLBs and caches again
191 * 12. Enter normal cache mode and reenable caching 211 * 12. Enter normal cache mode and reenable caching
192 * 13. Set PGE 212 * 13. Set PGE
193 * 14. Wait for buddies to catch up 213 * 14. Wait for buddies to catch up
194 * 15. Enable interrupts. 214 * 15. Enable interrupts.
195 * 215 *
196 * What does that mean for us? Well, first we set data.count to the number 216 * What does that mean for us? Well, first we set data.count to the number
197 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait 217 * of CPUs. As each CPU disables interrupts, it'll decrement it once. We wait
198 * until it hits 0 and proceed. We set the data.gate flag and reset data.count. 218 * until it hits 0 and proceed. We set the data.gate flag and reset data.count.
199 * Meanwhile, they are waiting for that flag to be set. Once it's set, each 219 * Meanwhile, they are waiting for that flag to be set. Once it's set, each
200 * CPU goes through the transition of updating MTRRs. The CPU vendors may each do it 220 * CPU goes through the transition of updating MTRRs.
201 * differently, so we call mtrr_if->set() callback and let them take care of it. 221 * The CPU vendors may each do it differently,
202 * When they're done, they again decrement data->count and wait for data.gate to 222 * so we call mtrr_if->set() callback and let them take care of it.
203 * be reset. 223 * When they're done, they again decrement data->count and wait for data.gate
204 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag. 224 * to be reset.
225 * When we finish, we wait for data.count to hit 0 and toggle the data.gate flag
205 * Everyone then enables interrupts and we all continue on. 226 * Everyone then enables interrupts and we all continue on.
206 * 227 *
207 * Note that the mechanism is the same for UP systems, too; all the SMP stuff 228 * Note that the mechanism is the same for UP systems, too; all the SMP stuff
208 * becomes nops. 229 * becomes nops.
209 */ 230 */
210static void set_mtrr(unsigned int reg, unsigned long base, 231static void
211 unsigned long size, mtrr_type type) 232set_mtrr(unsigned int reg, unsigned long base, unsigned long size, mtrr_type type)
212{ 233{
213 struct set_mtrr_data data; 234 struct set_mtrr_data data;
214 unsigned long flags; 235 unsigned long flags;
@@ -218,121 +239,124 @@ static void set_mtrr(unsigned int reg, unsigned long base,
218 data.smp_size = size; 239 data.smp_size = size;
219 data.smp_type = type; 240 data.smp_type = type;
220 atomic_set(&data.count, num_booting_cpus() - 1); 241 atomic_set(&data.count, num_booting_cpus() - 1);
221 /* make sure data.count is visible before unleashing other CPUs */ 242
243 /* Make sure data.count is visible before unleashing other CPUs */
222 smp_wmb(); 244 smp_wmb();
223 atomic_set(&data.gate,0); 245 atomic_set(&data.gate, 0);
224 246
225 /* Start the ball rolling on other CPUs */ 247 /* Start the ball rolling on other CPUs */
226 if (smp_call_function(ipi_handler, &data, 0) != 0) 248 if (smp_call_function(ipi_handler, &data, 0) != 0)
227 panic("mtrr: timed out waiting for other CPUs\n"); 249 panic("mtrr: timed out waiting for other CPUs\n");
228 250
229 local_irq_save(flags); 251 local_irq_save(flags);
230 252
231 while(atomic_read(&data.count)) 253 while (atomic_read(&data.count))
232 cpu_relax(); 254 cpu_relax();
233 255
234 /* ok, reset count and toggle gate */ 256 /* Ok, reset count and toggle gate */
235 atomic_set(&data.count, num_booting_cpus() - 1); 257 atomic_set(&data.count, num_booting_cpus() - 1);
236 smp_wmb(); 258 smp_wmb();
237 atomic_set(&data.gate,1); 259 atomic_set(&data.gate, 1);
238 260
239 /* do our MTRR business */ 261 /* Do our MTRR business */
240 262
241 /* HACK! 263 /*
264 * HACK!
242 * We use this same function to initialize the mtrrs on boot. 265 * We use this same function to initialize the mtrrs on boot.
243 * The state of the boot cpu's mtrrs has been saved, and we want 266 * The state of the boot cpu's mtrrs has been saved, and we want
244 * to replicate across all the APs. 267 * to replicate across all the APs.
245 * If we're doing that @reg is set to something special... 268 * If we're doing that @reg is set to something special...
246 */ 269 */
247 if (reg != ~0U) 270 if (reg != ~0U)
248 mtrr_if->set(reg,base,size,type); 271 mtrr_if->set(reg, base, size, type);
272 else if (!mtrr_aps_delayed_init)
273 mtrr_if->set_all();
249 274
250 /* wait for the others */ 275 /* Wait for the others */
251 while(atomic_read(&data.count)) 276 while (atomic_read(&data.count))
252 cpu_relax(); 277 cpu_relax();
253 278
254 atomic_set(&data.count, num_booting_cpus() - 1); 279 atomic_set(&data.count, num_booting_cpus() - 1);
255 smp_wmb(); 280 smp_wmb();
256 atomic_set(&data.gate,0); 281 atomic_set(&data.gate, 0);
257 282
258 /* 283 /*
259 * Wait here for everyone to have seen the gate change 284 * Wait here for everyone to have seen the gate change
260 * So we're the last ones to touch 'data' 285 * So we're the last ones to touch 'data'
261 */ 286 */
262 while(atomic_read(&data.count)) 287 while (atomic_read(&data.count))
263 cpu_relax(); 288 cpu_relax();
264 289
265 local_irq_restore(flags); 290 local_irq_restore(flags);
266} 291}
267 292
268/** 293/**
269 * mtrr_add_page - Add a memory type region 294 * mtrr_add_page - Add a memory type region
270 * @base: Physical base address of region in pages (in units of 4 kB!) 295 * @base: Physical base address of region in pages (in units of 4 kB!)
271 * @size: Physical size of region in pages (4 kB) 296 * @size: Physical size of region in pages (4 kB)
272 * @type: Type of MTRR desired 297 * @type: Type of MTRR desired
273 * @increment: If this is true do usage counting on the region 298 * @increment: If this is true do usage counting on the region
274 * 299 *
275 * Memory type region registers control the caching on newer Intel and 300 * Memory type region registers control the caching on newer Intel and
276 * non Intel processors. This function allows drivers to request an 301 * non Intel processors. This function allows drivers to request an
277 * MTRR is added. The details and hardware specifics of each processor's 302 * MTRR is added. The details and hardware specifics of each processor's
278 * implementation are hidden from the caller, but nevertheless the 303 * implementation are hidden from the caller, but nevertheless the
279 * caller should expect to need to provide a power of two size on an 304 * caller should expect to need to provide a power of two size on an
280 * equivalent power of two boundary. 305 * equivalent power of two boundary.
281 * 306 *
282 * If the region cannot be added either because all regions are in use 307 * If the region cannot be added either because all regions are in use
283 * or the CPU cannot support it a negative value is returned. On success 308 * or the CPU cannot support it a negative value is returned. On success
284 * the register number for this entry is returned, but should be treated 309 * the register number for this entry is returned, but should be treated
285 * as a cookie only. 310 * as a cookie only.
286 * 311 *
287 * On a multiprocessor machine the changes are made to all processors. 312 * On a multiprocessor machine the changes are made to all processors.
288 * This is required on x86 by the Intel processors. 313 * This is required on x86 by the Intel processors.
289 * 314 *
290 * The available types are 315 * The available types are
291 * 316 *
292 * %MTRR_TYPE_UNCACHABLE - No caching 317 * %MTRR_TYPE_UNCACHABLE - No caching
293 * 318 *
294 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever 319 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
295 * 320 *
296 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts 321 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
297 * 322 *
298 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes 323 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
299 * 324 *
300 * BUGS: Needs a quiet flag for the cases where drivers do not mind 325 * BUGS: Needs a quiet flag for the cases where drivers do not mind
301 * failures and do not wish system log messages to be sent. 326 * failures and do not wish system log messages to be sent.
302 */ 327 */
303 328int mtrr_add_page(unsigned long base, unsigned long size,
304int mtrr_add_page(unsigned long base, unsigned long size,
305 unsigned int type, bool increment) 329 unsigned int type, bool increment)
306{ 330{
331 unsigned long lbase, lsize;
307 int i, replace, error; 332 int i, replace, error;
308 mtrr_type ltype; 333 mtrr_type ltype;
309 unsigned long lbase, lsize;
310 334
311 if (!mtrr_if) 335 if (!mtrr_if)
312 return -ENXIO; 336 return -ENXIO;
313 337
314 if ((error = mtrr_if->validate_add_page(base,size,type))) 338 error = mtrr_if->validate_add_page(base, size, type);
339 if (error)
315 return error; 340 return error;
316 341
317 if (type >= MTRR_NUM_TYPES) { 342 if (type >= MTRR_NUM_TYPES) {
318 printk(KERN_WARNING "mtrr: type: %u invalid\n", type); 343 pr_warning("mtrr: type: %u invalid\n", type);
319 return -EINVAL; 344 return -EINVAL;
320 } 345 }
321 346
322 /* If the type is WC, check that this processor supports it */ 347 /* If the type is WC, check that this processor supports it */
323 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) { 348 if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
324 printk(KERN_WARNING 349 pr_warning("mtrr: your processor doesn't support write-combining\n");
325 "mtrr: your processor doesn't support write-combining\n");
326 return -ENOSYS; 350 return -ENOSYS;
327 } 351 }
328 352
329 if (!size) { 353 if (!size) {
330 printk(KERN_WARNING "mtrr: zero sized request\n"); 354 pr_warning("mtrr: zero sized request\n");
331 return -EINVAL; 355 return -EINVAL;
332 } 356 }
333 357
334 if (base & size_or_mask || size & size_or_mask) { 358 if (base & size_or_mask || size & size_or_mask) {
335 printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n"); 359 pr_warning("mtrr: base or size exceeds the MTRR width\n");
336 return -EINVAL; 360 return -EINVAL;
337 } 361 }
338 362
@@ -341,36 +365,40 @@ int mtrr_add_page(unsigned long base, unsigned long size,
341 365
342 /* No CPU hotplug when we change MTRR entries */ 366 /* No CPU hotplug when we change MTRR entries */
343 get_online_cpus(); 367 get_online_cpus();
344 /* Search for existing MTRR */ 368
369 /* Search for existing MTRR */
345 mutex_lock(&mtrr_mutex); 370 mutex_lock(&mtrr_mutex);
346 for (i = 0; i < num_var_ranges; ++i) { 371 for (i = 0; i < num_var_ranges; ++i) {
347 mtrr_if->get(i, &lbase, &lsize, &ltype); 372 mtrr_if->get(i, &lbase, &lsize, &ltype);
348 if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase) 373 if (!lsize || base > lbase + lsize - 1 ||
374 base + size - 1 < lbase)
349 continue; 375 continue;
350 /* At this point we know there is some kind of overlap/enclosure */ 376 /*
377 * At this point we know there is some kind of
378 * overlap/enclosure
379 */
351 if (base < lbase || base + size - 1 > lbase + lsize - 1) { 380 if (base < lbase || base + size - 1 > lbase + lsize - 1) {
352 if (base <= lbase && base + size - 1 >= lbase + lsize - 1) { 381 if (base <= lbase &&
382 base + size - 1 >= lbase + lsize - 1) {
353 /* New region encloses an existing region */ 383 /* New region encloses an existing region */
354 if (type == ltype) { 384 if (type == ltype) {
355 replace = replace == -1 ? i : -2; 385 replace = replace == -1 ? i : -2;
356 continue; 386 continue;
357 } 387 } else if (types_compatible(type, ltype))
358 else if (types_compatible(type, ltype))
359 continue; 388 continue;
360 } 389 }
361 printk(KERN_WARNING 390 pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing"
362 "mtrr: 0x%lx000,0x%lx000 overlaps existing" 391 " 0x%lx000,0x%lx000\n", base, size, lbase,
363 " 0x%lx000,0x%lx000\n", base, size, lbase, 392 lsize);
364 lsize);
365 goto out; 393 goto out;
366 } 394 }
367 /* New region is enclosed by an existing region */ 395 /* New region is enclosed by an existing region */
368 if (ltype != type) { 396 if (ltype != type) {
369 if (types_compatible(type, ltype)) 397 if (types_compatible(type, ltype))
370 continue; 398 continue;
371 printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", 399 pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
372 base, size, mtrr_attrib_to_str(ltype), 400 base, size, mtrr_attrib_to_str(ltype),
373 mtrr_attrib_to_str(type)); 401 mtrr_attrib_to_str(type));
374 goto out; 402 goto out;
375 } 403 }
376 if (increment) 404 if (increment)
@@ -378,7 +406,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
378 error = i; 406 error = i;
379 goto out; 407 goto out;
380 } 408 }
381 /* Search for an empty MTRR */ 409 /* Search for an empty MTRR */
382 i = mtrr_if->get_free_region(base, size, replace); 410 i = mtrr_if->get_free_region(base, size, replace);
383 if (i >= 0) { 411 if (i >= 0) {
384 set_mtrr(i, base, size, type); 412 set_mtrr(i, base, size, type);
@@ -393,8 +421,9 @@ int mtrr_add_page(unsigned long base, unsigned long size,
393 mtrr_usage_table[replace] = 0; 421 mtrr_usage_table[replace] = 0;
394 } 422 }
395 } 423 }
396 } else 424 } else {
397 printk(KERN_INFO "mtrr: no more MTRRs available\n"); 425 pr_info("mtrr: no more MTRRs available\n");
426 }
398 error = i; 427 error = i;
399 out: 428 out:
400 mutex_unlock(&mtrr_mutex); 429 mutex_unlock(&mtrr_mutex);
@@ -405,10 +434,8 @@ int mtrr_add_page(unsigned long base, unsigned long size,
405static int mtrr_check(unsigned long base, unsigned long size) 434static int mtrr_check(unsigned long base, unsigned long size)
406{ 435{
407 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) { 436 if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
408 printk(KERN_WARNING 437 pr_warning("mtrr: size and base must be multiples of 4 kiB\n");
409 "mtrr: size and base must be multiples of 4 kiB\n"); 438 pr_debug("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
410 printk(KERN_DEBUG
411 "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
412 dump_stack(); 439 dump_stack();
413 return -1; 440 return -1;
414 } 441 }
@@ -416,66 +443,64 @@ static int mtrr_check(unsigned long base, unsigned long size)
416} 443}
417 444
418/** 445/**
419 * mtrr_add - Add a memory type region 446 * mtrr_add - Add a memory type region
420 * @base: Physical base address of region 447 * @base: Physical base address of region
421 * @size: Physical size of region 448 * @size: Physical size of region
422 * @type: Type of MTRR desired 449 * @type: Type of MTRR desired
423 * @increment: If this is true do usage counting on the region 450 * @increment: If this is true do usage counting on the region
424 * 451 *
425 * Memory type region registers control the caching on newer Intel and 452 * Memory type region registers control the caching on newer Intel and
426 * non Intel processors. This function allows drivers to request an 453 * non Intel processors. This function allows drivers to request an
427 * MTRR is added. The details and hardware specifics of each processor's 454 * MTRR is added. The details and hardware specifics of each processor's
428 * implementation are hidden from the caller, but nevertheless the 455 * implementation are hidden from the caller, but nevertheless the
429 * caller should expect to need to provide a power of two size on an 456 * caller should expect to need to provide a power of two size on an
430 * equivalent power of two boundary. 457 * equivalent power of two boundary.
431 * 458 *
432 * If the region cannot be added either because all regions are in use 459 * If the region cannot be added either because all regions are in use
433 * or the CPU cannot support it a negative value is returned. On success 460 * or the CPU cannot support it a negative value is returned. On success
434 * the register number for this entry is returned, but should be treated 461 * the register number for this entry is returned, but should be treated
435 * as a cookie only. 462 * as a cookie only.
436 * 463 *
437 * On a multiprocessor machine the changes are made to all processors. 464 * On a multiprocessor machine the changes are made to all processors.
438 * This is required on x86 by the Intel processors. 465 * This is required on x86 by the Intel processors.
439 * 466 *
440 * The available types are 467 * The available types are
441 * 468 *
442 * %MTRR_TYPE_UNCACHABLE - No caching 469 * %MTRR_TYPE_UNCACHABLE - No caching
443 * 470 *
444 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever 471 * %MTRR_TYPE_WRBACK - Write data back in bursts whenever
445 * 472 *
446 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts 473 * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
447 * 474 *
448 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes 475 * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
449 * 476 *
450 * BUGS: Needs a quiet flag for the cases where drivers do not mind 477 * BUGS: Needs a quiet flag for the cases where drivers do not mind
451 * failures and do not wish system log messages to be sent. 478 * failures and do not wish system log messages to be sent.
452 */ 479 */
453 480int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
454int 481 bool increment)
455mtrr_add(unsigned long base, unsigned long size, unsigned int type,
456 bool increment)
457{ 482{
458 if (mtrr_check(base, size)) 483 if (mtrr_check(base, size))
459 return -EINVAL; 484 return -EINVAL;
460 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type, 485 return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
461 increment); 486 increment);
462} 487}
488EXPORT_SYMBOL(mtrr_add);
463 489
464/** 490/**
465 * mtrr_del_page - delete a memory type region 491 * mtrr_del_page - delete a memory type region
466 * @reg: Register returned by mtrr_add 492 * @reg: Register returned by mtrr_add
467 * @base: Physical base address 493 * @base: Physical base address
468 * @size: Size of region 494 * @size: Size of region
469 * 495 *
470 * If register is supplied then base and size are ignored. This is 496 * If register is supplied then base and size are ignored. This is
471 * how drivers should call it. 497 * how drivers should call it.
472 * 498 *
473 * Releases an MTRR region. If the usage count drops to zero the 499 * Releases an MTRR region. If the usage count drops to zero the
474 * register is freed and the region returns to default state. 500 * register is freed and the region returns to default state.
475 * On success the register is returned, on failure a negative error 501 * On success the register is returned, on failure a negative error
476 * code. 502 * code.
477 */ 503 */
478
479int mtrr_del_page(int reg, unsigned long base, unsigned long size) 504int mtrr_del_page(int reg, unsigned long base, unsigned long size)
480{ 505{
481 int i, max; 506 int i, max;
@@ -500,22 +525,22 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
500 } 525 }
501 } 526 }
502 if (reg < 0) { 527 if (reg < 0) {
503 printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base, 528 pr_debug("mtrr: no MTRR for %lx000,%lx000 found\n",
504 size); 529 base, size);
505 goto out; 530 goto out;
506 } 531 }
507 } 532 }
508 if (reg >= max) { 533 if (reg >= max) {
509 printk(KERN_WARNING "mtrr: register: %d too big\n", reg); 534 pr_warning("mtrr: register: %d too big\n", reg);
510 goto out; 535 goto out;
511 } 536 }
512 mtrr_if->get(reg, &lbase, &lsize, &ltype); 537 mtrr_if->get(reg, &lbase, &lsize, &ltype);
513 if (lsize < 1) { 538 if (lsize < 1) {
514 printk(KERN_WARNING "mtrr: MTRR %d not used\n", reg); 539 pr_warning("mtrr: MTRR %d not used\n", reg);
515 goto out; 540 goto out;
516 } 541 }
517 if (mtrr_usage_table[reg] < 1) { 542 if (mtrr_usage_table[reg] < 1) {
518 printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg); 543 pr_warning("mtrr: reg: %d has count=0\n", reg);
519 goto out; 544 goto out;
520 } 545 }
521 if (--mtrr_usage_table[reg] < 1) 546 if (--mtrr_usage_table[reg] < 1)
@@ -526,33 +551,31 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
526 put_online_cpus(); 551 put_online_cpus();
527 return error; 552 return error;
528} 553}
554
529/** 555/**
530 * mtrr_del - delete a memory type region 556 * mtrr_del - delete a memory type region
531 * @reg: Register returned by mtrr_add 557 * @reg: Register returned by mtrr_add
532 * @base: Physical base address 558 * @base: Physical base address
533 * @size: Size of region 559 * @size: Size of region
534 * 560 *
535 * If register is supplied then base and size are ignored. This is 561 * If register is supplied then base and size are ignored. This is
536 * how drivers should call it. 562 * how drivers should call it.
537 * 563 *
538 * Releases an MTRR region. If the usage count drops to zero the 564 * Releases an MTRR region. If the usage count drops to zero the
539 * register is freed and the region returns to default state. 565 * register is freed and the region returns to default state.
540 * On success the register is returned, on failure a negative error 566 * On success the register is returned, on failure a negative error
541 * code. 567 * code.
542 */ 568 */
543 569int mtrr_del(int reg, unsigned long base, unsigned long size)
544int
545mtrr_del(int reg, unsigned long base, unsigned long size)
546{ 570{
547 if (mtrr_check(base, size)) 571 if (mtrr_check(base, size))
548 return -EINVAL; 572 return -EINVAL;
549 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT); 573 return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
550} 574}
551
552EXPORT_SYMBOL(mtrr_add);
553EXPORT_SYMBOL(mtrr_del); 575EXPORT_SYMBOL(mtrr_del);
554 576
555/* HACK ALERT! 577/*
578 * HACK ALERT!
556 * These should be called implicitly, but we can't yet until all the initcall 579 * These should be called implicitly, but we can't yet until all the initcall
557 * stuff is done... 580 * stuff is done...
558 */ 581 */
@@ -576,29 +599,28 @@ struct mtrr_value {
576 599
577static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES]; 600static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES];
578 601
579static int mtrr_save(struct sys_device * sysdev, pm_message_t state) 602static int mtrr_save(struct sys_device *sysdev, pm_message_t state)
580{ 603{
581 int i; 604 int i;
582 605
583 for (i = 0; i < num_var_ranges; i++) { 606 for (i = 0; i < num_var_ranges; i++) {
584 mtrr_if->get(i, 607 mtrr_if->get(i, &mtrr_value[i].lbase,
585 &mtrr_value[i].lbase, 608 &mtrr_value[i].lsize,
586 &mtrr_value[i].lsize, 609 &mtrr_value[i].ltype);
587 &mtrr_value[i].ltype);
588 } 610 }
589 return 0; 611 return 0;
590} 612}
591 613
592static int mtrr_restore(struct sys_device * sysdev) 614static int mtrr_restore(struct sys_device *sysdev)
593{ 615{
594 int i; 616 int i;
595 617
596 for (i = 0; i < num_var_ranges; i++) { 618 for (i = 0; i < num_var_ranges; i++) {
597 if (mtrr_value[i].lsize) 619 if (mtrr_value[i].lsize) {
598 set_mtrr(i, 620 set_mtrr(i, mtrr_value[i].lbase,
599 mtrr_value[i].lbase, 621 mtrr_value[i].lsize,
600 mtrr_value[i].lsize, 622 mtrr_value[i].ltype);
601 mtrr_value[i].ltype); 623 }
602 } 624 }
603 return 0; 625 return 0;
604} 626}
@@ -615,26 +637,29 @@ int __initdata changed_by_mtrr_cleanup;
615/** 637/**
616 * mtrr_bp_init - initialize mtrrs on the boot CPU 638 * mtrr_bp_init - initialize mtrrs on the boot CPU
617 * 639 *
618 * This needs to be called early; before any of the other CPUs are 640 * This needs to be called early; before any of the other CPUs are
619 * initialized (i.e. before smp_init()). 641 * initialized (i.e. before smp_init()).
620 * 642 *
621 */ 643 */
622void __init mtrr_bp_init(void) 644void __init mtrr_bp_init(void)
623{ 645{
624 u32 phys_addr; 646 u32 phys_addr;
647
625 init_ifs(); 648 init_ifs();
626 649
627 phys_addr = 32; 650 phys_addr = 32;
628 651
629 if (cpu_has_mtrr) { 652 if (cpu_has_mtrr) {
630 mtrr_if = &generic_mtrr_ops; 653 mtrr_if = &generic_mtrr_ops;
631 size_or_mask = 0xff000000; /* 36 bits */ 654 size_or_mask = 0xff000000; /* 36 bits */
632 size_and_mask = 0x00f00000; 655 size_and_mask = 0x00f00000;
633 phys_addr = 36; 656 phys_addr = 36;
634 657
635 /* This is an AMD specific MSR, but we assume(hope?) that 658 /*
636 Intel will implement it to when they extend the address 659 * This is an AMD specific MSR, but we assume(hope?) that
637 bus of the Xeon. */ 660 * Intel will implement it to when they extend the address
661 * bus of the Xeon.
662 */
638 if (cpuid_eax(0x80000000) >= 0x80000008) { 663 if (cpuid_eax(0x80000000) >= 0x80000008) {
639 phys_addr = cpuid_eax(0x80000008) & 0xff; 664 phys_addr = cpuid_eax(0x80000008) & 0xff;
640 /* CPUID workaround for Intel 0F33/0F34 CPU */ 665 /* CPUID workaround for Intel 0F33/0F34 CPU */
@@ -649,9 +674,11 @@ void __init mtrr_bp_init(void)
649 size_and_mask = ~size_or_mask & 0xfffff00000ULL; 674 size_and_mask = ~size_or_mask & 0xfffff00000ULL;
650 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && 675 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
651 boot_cpu_data.x86 == 6) { 676 boot_cpu_data.x86 == 6) {
652 /* VIA C* family have Intel style MTRRs, but 677 /*
653 don't support PAE */ 678 * VIA C* family have Intel style MTRRs,
654 size_or_mask = 0xfff00000; /* 32 bits */ 679 * but don't support PAE
680 */
681 size_or_mask = 0xfff00000; /* 32 bits */
655 size_and_mask = 0; 682 size_and_mask = 0;
656 phys_addr = 32; 683 phys_addr = 32;
657 } 684 }
@@ -694,30 +721,28 @@ void __init mtrr_bp_init(void)
694 changed_by_mtrr_cleanup = 1; 721 changed_by_mtrr_cleanup = 1;
695 mtrr_if->set_all(); 722 mtrr_if->set_all();
696 } 723 }
697
698 } 724 }
699 } 725 }
700} 726}
701 727
702void mtrr_ap_init(void) 728void mtrr_ap_init(void)
703{ 729{
704 unsigned long flags; 730 if (!use_intel() || mtrr_aps_delayed_init)
705
706 if (!mtrr_if || !use_intel())
707 return; 731 return;
708 /* 732 /*
709 * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed, 733 * Ideally we should hold mtrr_mutex here to avoid mtrr entries
710 * but this routine will be called in cpu boot time, holding the lock 734 * changed, but this routine will be called in cpu boot time,
711 * breaks it. This routine is called in two cases: 1.very earily time 735 * holding the lock breaks it.
712 * of software resume, when there absolutely isn't mtrr entry changes; 736 *
713 * 2.cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug lock to 737 * This routine is called in two cases:
714 * prevent mtrr entry changes 738 *
739 * 1. very earily time of software resume, when there absolutely
740 * isn't mtrr entry changes;
741 *
742 * 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
743 * lock to prevent mtrr entry changes
715 */ 744 */
716 local_irq_save(flags); 745 set_mtrr(~0U, 0, 0, 0);
717
718 mtrr_if->set_all();
719
720 local_irq_restore(flags);
721} 746}
722 747
723/** 748/**
@@ -728,23 +753,55 @@ void mtrr_save_state(void)
728 smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1); 753 smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1);
729} 754}
730 755
756void set_mtrr_aps_delayed_init(void)
757{
758 if (!use_intel())
759 return;
760
761 mtrr_aps_delayed_init = true;
762}
763
764/*
765 * MTRR initialization for all AP's
766 */
767void mtrr_aps_init(void)
768{
769 if (!use_intel())
770 return;
771
772 set_mtrr(~0U, 0, 0, 0);
773 mtrr_aps_delayed_init = false;
774}
775
776void mtrr_bp_restore(void)
777{
778 if (!use_intel())
779 return;
780
781 mtrr_if->set_all();
782}
783
731static int __init mtrr_init_finialize(void) 784static int __init mtrr_init_finialize(void)
732{ 785{
733 if (!mtrr_if) 786 if (!mtrr_if)
734 return 0; 787 return 0;
788
735 if (use_intel()) { 789 if (use_intel()) {
736 if (!changed_by_mtrr_cleanup) 790 if (!changed_by_mtrr_cleanup)
737 mtrr_state_warn(); 791 mtrr_state_warn();
738 } else { 792 return 0;
739 /* The CPUs haven't MTRR and seem to not support SMP. They have
740 * specific drivers, we use a tricky method to support
741 * suspend/resume for them.
742 * TBD: is there any system with such CPU which supports
743 * suspend/resume? if no, we should remove the code.
744 */
745 sysdev_driver_register(&cpu_sysdev_class,
746 &mtrr_sysdev_driver);
747 } 793 }
794
795 /*
796 * The CPU has no MTRR and seems to not support SMP. They have
797 * specific drivers, we use a tricky method to support
798 * suspend/resume for them.
799 *
800 * TBD: is there any system with such CPU which supports
801 * suspend/resume? If no, we should remove the code.
802 */
803 sysdev_driver_register(&cpu_sysdev_class, &mtrr_sysdev_driver);
804
748 return 0; 805 return 0;
749} 806}
750subsys_initcall(mtrr_init_finialize); 807subsys_initcall(mtrr_init_finialize);
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 7538b767f206..a501dee9a87a 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * local mtrr defines. 2 * local MTRR defines.
3 */ 3 */
4 4
5#include <linux/types.h> 5#include <linux/types.h>
@@ -14,13 +14,12 @@ extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
14struct mtrr_ops { 14struct mtrr_ops {
15 u32 vendor; 15 u32 vendor;
16 u32 use_intel_if; 16 u32 use_intel_if;
17// void (*init)(void);
18 void (*set)(unsigned int reg, unsigned long base, 17 void (*set)(unsigned int reg, unsigned long base,
19 unsigned long size, mtrr_type type); 18 unsigned long size, mtrr_type type);
20 void (*set_all)(void); 19 void (*set_all)(void);
21 20
22 void (*get)(unsigned int reg, unsigned long *base, 21 void (*get)(unsigned int reg, unsigned long *base,
23 unsigned long *size, mtrr_type * type); 22 unsigned long *size, mtrr_type *type);
24 int (*get_free_region)(unsigned long base, unsigned long size, 23 int (*get_free_region)(unsigned long base, unsigned long size,
25 int replace_reg); 24 int replace_reg);
26 int (*validate_add_page)(unsigned long base, unsigned long size, 25 int (*validate_add_page)(unsigned long base, unsigned long size,
@@ -39,11 +38,11 @@ extern int positive_have_wrcomb(void);
39 38
40/* library functions for processor-specific routines */ 39/* library functions for processor-specific routines */
41struct set_mtrr_context { 40struct set_mtrr_context {
42 unsigned long flags; 41 unsigned long flags;
43 unsigned long cr4val; 42 unsigned long cr4val;
44 u32 deftype_lo; 43 u32 deftype_lo;
45 u32 deftype_hi; 44 u32 deftype_hi;
46 u32 ccr3; 45 u32 ccr3;
47}; 46};
48 47
49void set_mtrr_done(struct set_mtrr_context *ctxt); 48void set_mtrr_done(struct set_mtrr_context *ctxt);
@@ -54,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
54 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi); 53 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
55void get_mtrr_state(void); 54void get_mtrr_state(void);
56 55
57extern void set_mtrr_ops(struct mtrr_ops * ops); 56extern void set_mtrr_ops(struct mtrr_ops *ops);
58 57
59extern u64 size_or_mask, size_and_mask; 58extern u64 size_or_mask, size_and_mask;
60extern struct mtrr_ops * mtrr_if; 59extern struct mtrr_ops *mtrr_if;
61 60
62#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd) 61#define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
63#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1) 62#define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
diff --git a/arch/x86/kernel/cpu/mtrr/state.c b/arch/x86/kernel/cpu/mtrr/state.c
index 1f5fb1588d1f..dfc80b4e6b0d 100644
--- a/arch/x86/kernel/cpu/mtrr/state.c
+++ b/arch/x86/kernel/cpu/mtrr/state.c
@@ -1,24 +1,25 @@
1#include <linux/mm.h>
2#include <linux/init.h> 1#include <linux/init.h>
3#include <asm/io.h> 2#include <linux/io.h>
4#include <asm/mtrr.h> 3#include <linux/mm.h>
5#include <asm/msr.h> 4
6#include <asm/processor-cyrix.h> 5#include <asm/processor-cyrix.h>
7#include <asm/processor-flags.h> 6#include <asm/processor-flags.h>
8#include "mtrr.h" 7#include <asm/mtrr.h>
8#include <asm/msr.h>
9 9
10#include "mtrr.h"
10 11
11/* Put the processor into a state where MTRRs can be safely set */ 12/* Put the processor into a state where MTRRs can be safely set */
12void set_mtrr_prepare_save(struct set_mtrr_context *ctxt) 13void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
13{ 14{
14 unsigned int cr0; 15 unsigned int cr0;
15 16
16 /* Disable interrupts locally */ 17 /* Disable interrupts locally */
17 local_irq_save(ctxt->flags); 18 local_irq_save(ctxt->flags);
18 19
19 if (use_intel() || is_cpu(CYRIX)) { 20 if (use_intel() || is_cpu(CYRIX)) {
20 21
21 /* Save value of CR4 and clear Page Global Enable (bit 7) */ 22 /* Save value of CR4 and clear Page Global Enable (bit 7) */
22 if (cpu_has_pge) { 23 if (cpu_has_pge) {
23 ctxt->cr4val = read_cr4(); 24 ctxt->cr4val = read_cr4();
24 write_cr4(ctxt->cr4val & ~X86_CR4_PGE); 25 write_cr4(ctxt->cr4val & ~X86_CR4_PGE);
@@ -33,50 +34,61 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt)
33 write_cr0(cr0); 34 write_cr0(cr0);
34 wbinvd(); 35 wbinvd();
35 36
36 if (use_intel()) 37 if (use_intel()) {
37 /* Save MTRR state */ 38 /* Save MTRR state */
38 rdmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi); 39 rdmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi);
39 else 40 } else {
40 /* Cyrix ARRs - everything else were excluded at the top */ 41 /*
42 * Cyrix ARRs -
43 * everything else were excluded at the top
44 */
41 ctxt->ccr3 = getCx86(CX86_CCR3); 45 ctxt->ccr3 = getCx86(CX86_CCR3);
46 }
42 } 47 }
43} 48}
44 49
45void set_mtrr_cache_disable(struct set_mtrr_context *ctxt) 50void set_mtrr_cache_disable(struct set_mtrr_context *ctxt)
46{ 51{
47 if (use_intel()) 52 if (use_intel()) {
48 /* Disable MTRRs, and set the default type to uncached */ 53 /* Disable MTRRs, and set the default type to uncached */
49 mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo & 0xf300UL, 54 mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo & 0xf300UL,
50 ctxt->deftype_hi); 55 ctxt->deftype_hi);
51 else if (is_cpu(CYRIX)) 56 } else {
52 /* Cyrix ARRs - everything else were excluded at the top */ 57 if (is_cpu(CYRIX)) {
53 setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10); 58 /* Cyrix ARRs - everything else were excluded at the top */
59 setCx86(CX86_CCR3, (ctxt->ccr3 & 0x0f) | 0x10);
60 }
61 }
54} 62}
55 63
56/* Restore the processor after a set_mtrr_prepare */ 64/* Restore the processor after a set_mtrr_prepare */
57void set_mtrr_done(struct set_mtrr_context *ctxt) 65void set_mtrr_done(struct set_mtrr_context *ctxt)
58{ 66{
59 if (use_intel() || is_cpu(CYRIX)) { 67 if (use_intel() || is_cpu(CYRIX)) {
60 68
61 /* Flush caches and TLBs */ 69 /* Flush caches and TLBs */
62 wbinvd(); 70 wbinvd();
63 71
64 /* Restore MTRRdefType */ 72 /* Restore MTRRdefType */
65 if (use_intel()) 73 if (use_intel()) {
66 /* Intel (P6) standard MTRRs */ 74 /* Intel (P6) standard MTRRs */
67 mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi); 75 mtrr_wrmsr(MSR_MTRRdefType, ctxt->deftype_lo,
68 else 76 ctxt->deftype_hi);
69 /* Cyrix ARRs - everything else was excluded at the top */ 77 } else {
78 /*
79 * Cyrix ARRs -
80 * everything else was excluded at the top
81 */
70 setCx86(CX86_CCR3, ctxt->ccr3); 82 setCx86(CX86_CCR3, ctxt->ccr3);
83 }
71 84
72 /* Enable caches */ 85 /* Enable caches */
73 write_cr0(read_cr0() & 0xbfffffff); 86 write_cr0(read_cr0() & 0xbfffffff);
74 87
75 /* Restore value of CR4 */ 88 /* Restore value of CR4 */
76 if (cpu_has_pge) 89 if (cpu_has_pge)
77 write_cr4(ctxt->cr4val); 90 write_cr4(ctxt->cr4val);
78 } 91 }
79 /* Re-enable interrupts locally (if enabled previously) */ 92 /* Re-enable interrupts locally (if enabled previously) */
80 local_irq_restore(ctxt->flags); 93 local_irq_restore(ctxt->flags);
81} 94}
82
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 6a0e71b38126..dbdf712fae9e 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -1211,7 +1211,7 @@ amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
1211 x86_pmu_disable_counter(hwc, idx); 1211 x86_pmu_disable_counter(hwc, idx);
1212} 1212}
1213 1213
1214static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); 1214static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1215 1215
1216/* 1216/*
1217 * Set the next IRQ period, based on the hwc->period_left value. 1217 * Set the next IRQ period, based on the hwc->period_left value.
@@ -1253,7 +1253,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
1253 if (left > x86_pmu.max_period) 1253 if (left > x86_pmu.max_period)
1254 left = x86_pmu.max_period; 1254 left = x86_pmu.max_period;
1255 1255
1256 per_cpu(prev_left[idx], smp_processor_id()) = left; 1256 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1257 1257
1258 /* 1258 /*
1259 * The hw counter starts counting from this counter offset, 1259 * The hw counter starts counting from this counter offset,
@@ -1470,7 +1470,7 @@ void perf_counter_print_debug(void)
1470 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); 1470 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1471 rdmsrl(x86_pmu.perfctr + idx, pmc_count); 1471 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
1472 1472
1473 prev_left = per_cpu(prev_left[idx], cpu); 1473 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1474 1474
1475 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", 1475 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
1476 cpu, idx, pmc_ctrl); 1476 cpu, idx, pmc_ctrl);
@@ -2124,8 +2124,8 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
2124 entry->ip[entry->nr++] = ip; 2124 entry->ip[entry->nr++] = ip;
2125} 2125}
2126 2126
2127static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); 2127static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2128static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); 2128static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
2129static DEFINE_PER_CPU(int, in_nmi_frame); 2129static DEFINE_PER_CPU(int, in_nmi_frame);
2130 2130
2131 2131
@@ -2278,9 +2278,9 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2278 struct perf_callchain_entry *entry; 2278 struct perf_callchain_entry *entry;
2279 2279
2280 if (in_nmi()) 2280 if (in_nmi())
2281 entry = &__get_cpu_var(nmi_entry); 2281 entry = &__get_cpu_var(pmc_nmi_entry);
2282 else 2282 else
2283 entry = &__get_cpu_var(irq_entry); 2283 entry = &__get_cpu_var(pmc_irq_entry);
2284 2284
2285 entry->nr = 0; 2285 entry->nr = 0;
2286 2286
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index e60ed740d2b3..392bea43b890 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -68,16 +68,16 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
68 /* returns the bit offset of the performance counter register */ 68 /* returns the bit offset of the performance counter register */
69 switch (boot_cpu_data.x86_vendor) { 69 switch (boot_cpu_data.x86_vendor) {
70 case X86_VENDOR_AMD: 70 case X86_VENDOR_AMD:
71 return (msr - MSR_K7_PERFCTR0); 71 return msr - MSR_K7_PERFCTR0;
72 case X86_VENDOR_INTEL: 72 case X86_VENDOR_INTEL:
73 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) 73 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
74 return (msr - MSR_ARCH_PERFMON_PERFCTR0); 74 return msr - MSR_ARCH_PERFMON_PERFCTR0;
75 75
76 switch (boot_cpu_data.x86) { 76 switch (boot_cpu_data.x86) {
77 case 6: 77 case 6:
78 return (msr - MSR_P6_PERFCTR0); 78 return msr - MSR_P6_PERFCTR0;
79 case 15: 79 case 15:
80 return (msr - MSR_P4_BPU_PERFCTR0); 80 return msr - MSR_P4_BPU_PERFCTR0;
81 } 81 }
82 } 82 }
83 return 0; 83 return 0;
@@ -92,16 +92,16 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
92 /* returns the bit offset of the event selection register */ 92 /* returns the bit offset of the event selection register */
93 switch (boot_cpu_data.x86_vendor) { 93 switch (boot_cpu_data.x86_vendor) {
94 case X86_VENDOR_AMD: 94 case X86_VENDOR_AMD:
95 return (msr - MSR_K7_EVNTSEL0); 95 return msr - MSR_K7_EVNTSEL0;
96 case X86_VENDOR_INTEL: 96 case X86_VENDOR_INTEL:
97 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) 97 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
98 return (msr - MSR_ARCH_PERFMON_EVENTSEL0); 98 return msr - MSR_ARCH_PERFMON_EVENTSEL0;
99 99
100 switch (boot_cpu_data.x86) { 100 switch (boot_cpu_data.x86) {
101 case 6: 101 case 6:
102 return (msr - MSR_P6_EVNTSEL0); 102 return msr - MSR_P6_EVNTSEL0;
103 case 15: 103 case 15:
104 return (msr - MSR_P4_BSU_ESCR0); 104 return msr - MSR_P4_BSU_ESCR0;
105 } 105 }
106 } 106 }
107 return 0; 107 return 0;
@@ -113,7 +113,7 @@ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
113{ 113{
114 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 114 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
115 115
116 return (!test_bit(counter, perfctr_nmi_owner)); 116 return !test_bit(counter, perfctr_nmi_owner);
117} 117}
118 118
119/* checks the an msr for availability */ 119/* checks the an msr for availability */
@@ -124,7 +124,7 @@ int avail_to_resrv_perfctr_nmi(unsigned int msr)
124 counter = nmi_perfctr_msr_to_bit(msr); 124 counter = nmi_perfctr_msr_to_bit(msr);
125 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 125 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
126 126
127 return (!test_bit(counter, perfctr_nmi_owner)); 127 return !test_bit(counter, perfctr_nmi_owner);
128} 128}
129EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit); 129EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
130 130
@@ -237,7 +237,7 @@ static unsigned int adjust_for_32bit_ctr(unsigned int hz)
237 */ 237 */
238 counter_val = (u64)cpu_khz * 1000; 238 counter_val = (u64)cpu_khz * 1000;
239 do_div(counter_val, retval); 239 do_div(counter_val, retval);
240 if (counter_val > 0x7fffffffULL) { 240 if (counter_val > 0x7fffffffULL) {
241 u64 count = (u64)cpu_khz * 1000; 241 u64 count = (u64)cpu_khz * 1000;
242 do_div(count, 0x7fffffffUL); 242 do_div(count, 0x7fffffffUL);
243 retval = count + 1; 243 retval = count + 1;
@@ -251,7 +251,7 @@ static void write_watchdog_counter(unsigned int perfctr_msr,
251 u64 count = (u64)cpu_khz * 1000; 251 u64 count = (u64)cpu_khz * 1000;
252 252
253 do_div(count, nmi_hz); 253 do_div(count, nmi_hz);
254 if(descr) 254 if (descr)
255 pr_debug("setting %s to -0x%08Lx\n", descr, count); 255 pr_debug("setting %s to -0x%08Lx\n", descr, count);
256 wrmsrl(perfctr_msr, 0 - count); 256 wrmsrl(perfctr_msr, 0 - count);
257} 257}
@@ -262,7 +262,7 @@ static void write_watchdog_counter32(unsigned int perfctr_msr,
262 u64 count = (u64)cpu_khz * 1000; 262 u64 count = (u64)cpu_khz * 1000;
263 263
264 do_div(count, nmi_hz); 264 do_div(count, nmi_hz);
265 if(descr) 265 if (descr)
266 pr_debug("setting %s to -0x%08Lx\n", descr, count); 266 pr_debug("setting %s to -0x%08Lx\n", descr, count);
267 wrmsr(perfctr_msr, (u32)(-count), 0); 267 wrmsr(perfctr_msr, (u32)(-count), 0);
268} 268}
@@ -296,7 +296,7 @@ static int setup_k7_watchdog(unsigned nmi_hz)
296 296
297 /* setup the timer */ 297 /* setup the timer */
298 wrmsr(evntsel_msr, evntsel, 0); 298 wrmsr(evntsel_msr, evntsel, 0);
299 write_watchdog_counter(perfctr_msr, "K7_PERFCTR0",nmi_hz); 299 write_watchdog_counter(perfctr_msr, "K7_PERFCTR0", nmi_hz);
300 300
301 /* initialize the wd struct before enabling */ 301 /* initialize the wd struct before enabling */
302 wd->perfctr_msr = perfctr_msr; 302 wd->perfctr_msr = perfctr_msr;
@@ -387,7 +387,7 @@ static int setup_p6_watchdog(unsigned nmi_hz)
387 /* setup the timer */ 387 /* setup the timer */
388 wrmsr(evntsel_msr, evntsel, 0); 388 wrmsr(evntsel_msr, evntsel, 0);
389 nmi_hz = adjust_for_32bit_ctr(nmi_hz); 389 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
390 write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0",nmi_hz); 390 write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0", nmi_hz);
391 391
392 /* initialize the wd struct before enabling */ 392 /* initialize the wd struct before enabling */
393 wd->perfctr_msr = perfctr_msr; 393 wd->perfctr_msr = perfctr_msr;
@@ -415,7 +415,7 @@ static void __kprobes p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
415 apic_write(APIC_LVTPC, APIC_DM_NMI); 415 apic_write(APIC_LVTPC, APIC_DM_NMI);
416 416
417 /* P6/ARCH_PERFMON has 32 bit counter write */ 417 /* P6/ARCH_PERFMON has 32 bit counter write */
418 write_watchdog_counter32(wd->perfctr_msr, NULL,nmi_hz); 418 write_watchdog_counter32(wd->perfctr_msr, NULL, nmi_hz);
419} 419}
420 420
421static const struct wd_ops p6_wd_ops = { 421static const struct wd_ops p6_wd_ops = {
@@ -490,9 +490,9 @@ static int setup_p4_watchdog(unsigned nmi_hz)
490 if (smp_num_siblings == 2) { 490 if (smp_num_siblings == 2) {
491 unsigned int ebx, apicid; 491 unsigned int ebx, apicid;
492 492
493 ebx = cpuid_ebx(1); 493 ebx = cpuid_ebx(1);
494 apicid = (ebx >> 24) & 0xff; 494 apicid = (ebx >> 24) & 0xff;
495 ht_num = apicid & 1; 495 ht_num = apicid & 1;
496 } else 496 } else
497#endif 497#endif
498 ht_num = 0; 498 ht_num = 0;
@@ -544,7 +544,7 @@ static int setup_p4_watchdog(unsigned nmi_hz)
544 } 544 }
545 545
546 evntsel = P4_ESCR_EVENT_SELECT(0x3F) 546 evntsel = P4_ESCR_EVENT_SELECT(0x3F)
547 | P4_ESCR_OS 547 | P4_ESCR_OS
548 | P4_ESCR_USR; 548 | P4_ESCR_USR;
549 549
550 cccr_val |= P4_CCCR_THRESHOLD(15) 550 cccr_val |= P4_CCCR_THRESHOLD(15)
@@ -612,7 +612,7 @@ static void __kprobes p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz)
612{ 612{
613 unsigned dummy; 613 unsigned dummy;
614 /* 614 /*
615 * P4 quirks: 615 * P4 quirks:
616 * - An overflown perfctr will assert its interrupt 616 * - An overflown perfctr will assert its interrupt
617 * until the OVF flag in its CCCR is cleared. 617 * until the OVF flag in its CCCR is cleared.
618 * - LVTPC is masked on interrupt and must be 618 * - LVTPC is masked on interrupt and must be
@@ -662,7 +662,8 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
662 * NOTE: Corresponding bit = 0 in ebx indicates event present. 662 * NOTE: Corresponding bit = 0 in ebx indicates event present.
663 */ 663 */
664 cpuid(10, &(eax.full), &ebx, &unused, &unused); 664 cpuid(10, &(eax.full), &ebx, &unused, &unused);
665 if ((eax.split.mask_length < (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || 665 if ((eax.split.mask_length <
666 (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) ||
666 (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) 667 (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
667 return 0; 668 return 0;
668 669
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index d5e30397246b..62ac8cb6ba27 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -116,11 +116,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
116 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize); 116 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
117#endif 117#endif
118 seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size); 118 seq_printf(m, "clflush size\t: %u\n", c->x86_clflush_size);
119#ifdef CONFIG_X86_64
120 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment); 119 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
121 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n", 120 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
122 c->x86_phys_bits, c->x86_virt_bits); 121 c->x86_phys_bits, c->x86_virt_bits);
123#endif
124 122
125 seq_printf(m, "power management:"); 123 seq_printf(m, "power management:");
126 for (i = 0; i < 32; i++) { 124 for (i = 0; i < 32; i++) {
@@ -128,7 +126,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
128 if (i < ARRAY_SIZE(x86_power_flags) && 126 if (i < ARRAY_SIZE(x86_power_flags) &&
129 x86_power_flags[i]) 127 x86_power_flags[i])
130 seq_printf(m, "%s%s", 128 seq_printf(m, "%s%s",
131 x86_power_flags[i][0]?" ":"", 129 x86_power_flags[i][0] ? " " : "",
132 x86_power_flags[i]); 130 x86_power_flags[i]);
133 else 131 else
134 seq_printf(m, " [%d]", i); 132 seq_printf(m, " [%d]", i);
diff --git a/arch/x86/kernel/cpu/sched.c b/arch/x86/kernel/cpu/sched.c
new file mode 100644
index 000000000000..a640ae5ad201
--- /dev/null
+++ b/arch/x86/kernel/cpu/sched.c
@@ -0,0 +1,55 @@
1#include <linux/sched.h>
2#include <linux/math64.h>
3#include <linux/percpu.h>
4#include <linux/irqflags.h>
5
6#include <asm/cpufeature.h>
7#include <asm/processor.h>
8
9#ifdef CONFIG_SMP
10
11static DEFINE_PER_CPU(struct aperfmperf, old_perf_sched);
12
13static unsigned long scale_aperfmperf(void)
14{
15 struct aperfmperf val, *old = &__get_cpu_var(old_perf_sched);
16 unsigned long ratio, flags;
17
18 local_irq_save(flags);
19 get_aperfmperf(&val);
20 local_irq_restore(flags);
21
22 ratio = calc_aperfmperf_ratio(old, &val);
23 *old = val;
24
25 return ratio;
26}
27
28unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
29{
30 /*
31 * do aperf/mperf on the cpu level because it includes things
32 * like turbo mode, which are relevant to full cores.
33 */
34 if (boot_cpu_has(X86_FEATURE_APERFMPERF))
35 return scale_aperfmperf();
36
37 /*
38 * maybe have something cpufreq here
39 */
40
41 return default_scale_freq_power(sd, cpu);
42}
43
44unsigned long arch_scale_smt_power(struct sched_domain *sd, int cpu)
45{
46 /*
47 * aperf/mperf already includes the smt gain
48 */
49 if (boot_cpu_has(X86_FEATURE_APERFMPERF))
50 return SCHED_LOAD_SCALE;
51
52 return default_scale_smt_power(sd, cpu);
53}
54
55#endif
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 284c399e3234..bc24f514ec93 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -49,17 +49,17 @@ static inline int __vmware_platform(void)
49 49
50static unsigned long __vmware_get_tsc_khz(void) 50static unsigned long __vmware_get_tsc_khz(void)
51{ 51{
52 uint64_t tsc_hz; 52 uint64_t tsc_hz;
53 uint32_t eax, ebx, ecx, edx; 53 uint32_t eax, ebx, ecx, edx;
54 54
55 VMWARE_PORT(GETHZ, eax, ebx, ecx, edx); 55 VMWARE_PORT(GETHZ, eax, ebx, ecx, edx);
56 56
57 if (ebx == UINT_MAX) 57 if (ebx == UINT_MAX)
58 return 0; 58 return 0;
59 tsc_hz = eax | (((uint64_t)ebx) << 32); 59 tsc_hz = eax | (((uint64_t)ebx) << 32);
60 do_div(tsc_hz, 1000); 60 do_div(tsc_hz, 1000);
61 BUG_ON(tsc_hz >> 32); 61 BUG_ON(tsc_hz >> 32);
62 return tsc_hz; 62 return tsc_hz;
63} 63}
64 64
65/* 65/*
diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
index b4f14c6c09d9..37250fe490b1 100644
--- a/arch/x86/kernel/doublefault_32.c
+++ b/arch/x86/kernel/doublefault_32.c
@@ -27,9 +27,7 @@ static void doublefault_fn(void)
27 27
28 if (ptr_ok(gdt)) { 28 if (ptr_ok(gdt)) {
29 gdt += GDT_ENTRY_TSS << 3; 29 gdt += GDT_ENTRY_TSS << 3;
30 tss = *(u16 *)(gdt+2); 30 tss = get_desc_base((struct desc_struct *)gdt);
31 tss += *(u8 *)(gdt+4) << 16;
32 tss += *(u8 *)(gdt+7) << 24;
33 printk(KERN_EMERG "double fault, tss at %08lx\n", tss); 31 printk(KERN_EMERG "double fault, tss at %08lx\n", tss);
34 32
35 if (ptr_ok(tss)) { 33 if (ptr_ok(tss)) {
diff --git a/arch/x86/kernel/ds.c b/arch/x86/kernel/ds.c
index 48bfe1386038..ef42a038f1a6 100644
--- a/arch/x86/kernel/ds.c
+++ b/arch/x86/kernel/ds.c
@@ -509,15 +509,15 @@ enum bts_field {
509 bts_escape = ((unsigned long)-1 & ~bts_qual_mask) 509 bts_escape = ((unsigned long)-1 & ~bts_qual_mask)
510}; 510};
511 511
512static inline unsigned long bts_get(const char *base, enum bts_field field) 512static inline unsigned long bts_get(const char *base, unsigned long field)
513{ 513{
514 base += (ds_cfg.sizeof_ptr_field * field); 514 base += (ds_cfg.sizeof_ptr_field * field);
515 return *(unsigned long *)base; 515 return *(unsigned long *)base;
516} 516}
517 517
518static inline void bts_set(char *base, enum bts_field field, unsigned long val) 518static inline void bts_set(char *base, unsigned long field, unsigned long val)
519{ 519{
520 base += (ds_cfg.sizeof_ptr_field * field);; 520 base += (ds_cfg.sizeof_ptr_field * field);
521 (*(unsigned long *)base) = val; 521 (*(unsigned long *)base) = val;
522} 522}
523 523
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index c8405718a4c3..2d8a371d4339 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -15,7 +15,6 @@
15#include <linux/bug.h> 15#include <linux/bug.h>
16#include <linux/nmi.h> 16#include <linux/nmi.h>
17#include <linux/sysfs.h> 17#include <linux/sysfs.h>
18#include <linux/ftrace.h>
19 18
20#include <asm/stacktrace.h> 19#include <asm/stacktrace.h>
21 20
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 5cb5725b2bae..147005a1cc3c 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -115,7 +115,7 @@ static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size,
115{ 115{
116 int x = e820x->nr_map; 116 int x = e820x->nr_map;
117 117
118 if (x == ARRAY_SIZE(e820x->map)) { 118 if (x >= ARRAY_SIZE(e820x->map)) {
119 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); 119 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
120 return; 120 return;
121 } 121 }
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index c251be745107..d59fe323807e 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -146,7 +146,7 @@ ENTRY(ftrace_graph_caller)
146END(ftrace_graph_caller) 146END(ftrace_graph_caller)
147 147
148GLOBAL(return_to_handler) 148GLOBAL(return_to_handler)
149 subq $80, %rsp 149 subq $24, %rsp
150 150
151 /* Save the return values */ 151 /* Save the return values */
152 movq %rax, (%rsp) 152 movq %rax, (%rsp)
@@ -155,10 +155,10 @@ GLOBAL(return_to_handler)
155 155
156 call ftrace_return_to_handler 156 call ftrace_return_to_handler
157 157
158 movq %rax, 72(%rsp) 158 movq %rax, 16(%rsp)
159 movq 8(%rsp), %rdx 159 movq 8(%rsp), %rdx
160 movq (%rsp), %rax 160 movq (%rsp), %rax
161 addq $72, %rsp 161 addq $16, %rsp
162 retq 162 retq
163#endif 163#endif
164 164
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index cc827ac9e8d3..7ffec6b3b331 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -439,7 +439,6 @@ is386: movl $2,%ecx # set MP
439 jne 1f 439 jne 1f
440 movl $per_cpu__gdt_page,%eax 440 movl $per_cpu__gdt_page,%eax
441 movl $per_cpu__stack_canary,%ecx 441 movl $per_cpu__stack_canary,%ecx
442 subl $20, %ecx
443 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) 442 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
444 shrl $16, %ecx 443 shrl $16, %ecx
445 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) 444 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index b0cdde6932f5..74656d1d4e30 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -104,7 +104,7 @@ static int show_other_interrupts(struct seq_file *p, int prec)
104 seq_printf(p, " Threshold APIC interrupts\n"); 104 seq_printf(p, " Threshold APIC interrupts\n");
105# endif 105# endif
106#endif 106#endif
107#ifdef CONFIG_X86_NEW_MCE 107#ifdef CONFIG_X86_MCE
108 seq_printf(p, "%*s: ", prec, "MCE"); 108 seq_printf(p, "%*s: ", prec, "MCE");
109 for_each_online_cpu(j) 109 for_each_online_cpu(j)
110 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j)); 110 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
@@ -200,7 +200,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
200 sum += irq_stats(cpu)->irq_threshold_count; 200 sum += irq_stats(cpu)->irq_threshold_count;
201# endif 201# endif
202#endif 202#endif
203#ifdef CONFIG_X86_NEW_MCE 203#ifdef CONFIG_X86_MCE
204 sum += per_cpu(mce_exception_count, cpu); 204 sum += per_cpu(mce_exception_count, cpu);
205 sum += per_cpu(mce_poll_count, cpu); 205 sum += per_cpu(mce_poll_count, cpu);
206#endif 206#endif
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 3b09634a5153..7d35d0fe2329 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -218,7 +218,6 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
218void fixup_irqs(void) 218void fixup_irqs(void)
219{ 219{
220 unsigned int irq; 220 unsigned int irq;
221 static int warned;
222 struct irq_desc *desc; 221 struct irq_desc *desc;
223 222
224 for_each_irq_desc(irq, desc) { 223 for_each_irq_desc(irq, desc) {
@@ -236,8 +235,8 @@ void fixup_irqs(void)
236 } 235 }
237 if (desc->chip->set_affinity) 236 if (desc->chip->set_affinity)
238 desc->chip->set_affinity(irq, affinity); 237 desc->chip->set_affinity(irq, affinity);
239 else if (desc->action && !(warned++)) 238 else if (desc->action)
240 printk("Cannot set affinity for irq %i\n", irq); 239 printk_once("Cannot set affinity for irq %i\n", irq);
241 } 240 }
242 241
243#if 0 242#if 0
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 92b7703d3d58..ccf8ab54f31a 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -190,7 +190,7 @@ static void __init apic_intr_init(void)
190#ifdef CONFIG_X86_MCE_THRESHOLD 190#ifdef CONFIG_X86_MCE_THRESHOLD
191 alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); 191 alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
192#endif 192#endif
193#if defined(CONFIG_X86_NEW_MCE) && defined(CONFIG_X86_LOCAL_APIC) 193#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_LOCAL_APIC)
194 alloc_intr_gate(MCE_SELF_VECTOR, mce_self_interrupt); 194 alloc_intr_gate(MCE_SELF_VECTOR, mce_self_interrupt);
195#endif 195#endif
196 196
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index c664d515f613..63b0ec8d3d4a 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -34,7 +34,6 @@
34struct kvm_para_state { 34struct kvm_para_state {
35 u8 mmu_queue[MMU_QUEUE_SIZE]; 35 u8 mmu_queue[MMU_QUEUE_SIZE];
36 int mmu_queue_len; 36 int mmu_queue_len;
37 enum paravirt_lazy_mode mode;
38}; 37};
39 38
40static DEFINE_PER_CPU(struct kvm_para_state, para_state); 39static DEFINE_PER_CPU(struct kvm_para_state, para_state);
@@ -77,7 +76,7 @@ static void kvm_deferred_mmu_op(void *buffer, int len)
77{ 76{
78 struct kvm_para_state *state = kvm_para_state(); 77 struct kvm_para_state *state = kvm_para_state();
79 78
80 if (state->mode != PARAVIRT_LAZY_MMU) { 79 if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) {
81 kvm_mmu_op(buffer, len); 80 kvm_mmu_op(buffer, len);
82 return; 81 return;
83 } 82 }
@@ -185,10 +184,7 @@ static void kvm_release_pt(unsigned long pfn)
185 184
186static void kvm_enter_lazy_mmu(void) 185static void kvm_enter_lazy_mmu(void)
187{ 186{
188 struct kvm_para_state *state = kvm_para_state();
189
190 paravirt_enter_lazy_mmu(); 187 paravirt_enter_lazy_mmu();
191 state->mode = paravirt_get_lazy_mode();
192} 188}
193 189
194static void kvm_leave_lazy_mmu(void) 190static void kvm_leave_lazy_mmu(void)
@@ -197,7 +193,6 @@ static void kvm_leave_lazy_mmu(void)
197 193
198 mmu_queue_flush(state); 194 mmu_queue_flush(state);
199 paravirt_leave_lazy_mmu(); 195 paravirt_leave_lazy_mmu();
200 state->mode = paravirt_get_lazy_mode();
201} 196}
202 197
203static void __init paravirt_ops_setup(void) 198static void __init paravirt_ops_setup(void)
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 223af43f1526..e5efcdcca31b 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -50,8 +50,8 @@ static unsigned long kvm_get_wallclock(void)
50 struct timespec ts; 50 struct timespec ts;
51 int low, high; 51 int low, high;
52 52
53 low = (int)__pa(&wall_clock); 53 low = (int)__pa_symbol(&wall_clock);
54 high = ((u64)__pa(&wall_clock) >> 32); 54 high = ((u64)__pa_symbol(&wall_clock) >> 32);
55 native_write_msr(MSR_KVM_WALL_CLOCK, low, high); 55 native_write_msr(MSR_KVM_WALL_CLOCK, low, high);
56 56
57 vcpu_time = &get_cpu_var(hv_clock); 57 vcpu_time = &get_cpu_var(hv_clock);
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index 651c93b28862..fcd513bf2846 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -482,11 +482,11 @@ static void __init construct_ioapic_table(int mpc_default_type)
482 MP_bus_info(&bus); 482 MP_bus_info(&bus);
483 } 483 }
484 484
485 ioapic.type = MP_IOAPIC; 485 ioapic.type = MP_IOAPIC;
486 ioapic.apicid = 2; 486 ioapic.apicid = 2;
487 ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01; 487 ioapic.apicver = mpc_default_type > 4 ? 0x10 : 0x01;
488 ioapic.flags = MPC_APIC_USABLE; 488 ioapic.flags = MPC_APIC_USABLE;
489 ioapic.apicaddr = 0xFEC00000; 489 ioapic.apicaddr = IO_APIC_DEFAULT_PHYS_BASE;
490 MP_ioapic_info(&ioapic); 490 MP_ioapic_info(&ioapic);
491 491
492 /* 492 /*
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 98fd6cd4e3a4..7dd950094178 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -1,6 +1,7 @@
1/* ----------------------------------------------------------------------- * 1/* ----------------------------------------------------------------------- *
2 * 2 *
3 * Copyright 2000-2008 H. Peter Anvin - All Rights Reserved 3 * Copyright 2000-2008 H. Peter Anvin - All Rights Reserved
4 * Copyright 2009 Intel Corporation; author: H. Peter Anvin
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -80,11 +81,8 @@ static ssize_t msr_read(struct file *file, char __user *buf,
80 81
81 for (; count; count -= 8) { 82 for (; count; count -= 8) {
82 err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]); 83 err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]);
83 if (err) { 84 if (err)
84 if (err == -EFAULT) /* Fix idiotic error code */
85 err = -EIO;
86 break; 85 break;
87 }
88 if (copy_to_user(tmp, &data, 8)) { 86 if (copy_to_user(tmp, &data, 8)) {
89 err = -EFAULT; 87 err = -EFAULT;
90 break; 88 break;
@@ -115,11 +113,8 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
115 break; 113 break;
116 } 114 }
117 err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]); 115 err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]);
118 if (err) { 116 if (err)
119 if (err == -EFAULT) /* Fix idiotic error code */
120 err = -EIO;
121 break; 117 break;
122 }
123 tmp += 2; 118 tmp += 2;
124 bytes += 8; 119 bytes += 8;
125 } 120 }
@@ -127,6 +122,54 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
127 return bytes ? bytes : err; 122 return bytes ? bytes : err;
128} 123}
129 124
125static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
126{
127 u32 __user *uregs = (u32 __user *)arg;
128 u32 regs[8];
129 int cpu = iminor(file->f_path.dentry->d_inode);
130 int err;
131
132 switch (ioc) {
133 case X86_IOC_RDMSR_REGS:
134 if (!(file->f_mode & FMODE_READ)) {
135 err = -EBADF;
136 break;
137 }
138 if (copy_from_user(&regs, uregs, sizeof regs)) {
139 err = -EFAULT;
140 break;
141 }
142 err = rdmsr_safe_regs_on_cpu(cpu, regs);
143 if (err)
144 break;
145 if (copy_to_user(uregs, &regs, sizeof regs))
146 err = -EFAULT;
147 break;
148
149 case X86_IOC_WRMSR_REGS:
150 if (!(file->f_mode & FMODE_WRITE)) {
151 err = -EBADF;
152 break;
153 }
154 if (copy_from_user(&regs, uregs, sizeof regs)) {
155 err = -EFAULT;
156 break;
157 }
158 err = wrmsr_safe_regs_on_cpu(cpu, regs);
159 if (err)
160 break;
161 if (copy_to_user(uregs, &regs, sizeof regs))
162 err = -EFAULT;
163 break;
164
165 default:
166 err = -ENOTTY;
167 break;
168 }
169
170 return err;
171}
172
130static int msr_open(struct inode *inode, struct file *file) 173static int msr_open(struct inode *inode, struct file *file)
131{ 174{
132 unsigned int cpu = iminor(file->f_path.dentry->d_inode); 175 unsigned int cpu = iminor(file->f_path.dentry->d_inode);
@@ -157,6 +200,8 @@ static const struct file_operations msr_fops = {
157 .read = msr_read, 200 .read = msr_read,
158 .write = msr_write, 201 .write = msr_write,
159 .open = msr_open, 202 .open = msr_open,
203 .unlocked_ioctl = msr_ioctl,
204 .compat_ioctl = msr_ioctl,
160}; 205};
161 206
162static int __cpuinit msr_device_create(int cpu) 207static int __cpuinit msr_device_create(int cpu)
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 70ec9b951d76..f5b0b4a01fb2 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -362,8 +362,9 @@ struct pv_cpu_ops pv_cpu_ops = {
362#endif 362#endif
363 .wbinvd = native_wbinvd, 363 .wbinvd = native_wbinvd,
364 .read_msr = native_read_msr_safe, 364 .read_msr = native_read_msr_safe,
365 .read_msr_amd = native_read_msr_amd_safe, 365 .rdmsr_regs = native_rdmsr_safe_regs,
366 .write_msr = native_write_msr_safe, 366 .write_msr = native_write_msr_safe,
367 .wrmsr_regs = native_wrmsr_safe_regs,
367 .read_tsc = native_read_tsc, 368 .read_tsc = native_read_tsc,
368 .read_pmc = native_read_pmc, 369 .read_pmc = native_read_pmc,
369 .read_tscp = native_read_tscp, 370 .read_tscp = native_read_tscp,
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index d71c8655905b..64b838eac18c 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -225,10 +225,8 @@ static __init int iommu_setup(char *p)
225 if (!strncmp(p, "soft", 4)) 225 if (!strncmp(p, "soft", 4))
226 swiotlb = 1; 226 swiotlb = 1;
227#endif 227#endif
228 if (!strncmp(p, "pt", 2)) { 228 if (!strncmp(p, "pt", 2))
229 iommu_pass_through = 1; 229 iommu_pass_through = 1;
230 return 1;
231 }
232 230
233 gart_parse_options(p); 231 gart_parse_options(p);
234 232
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 59f4524984af..4cf79567cdab 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -61,9 +61,6 @@
61 61
62asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 62asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
63 63
64DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
65EXPORT_PER_CPU_SYMBOL(current_task);
66
67/* 64/*
68 * Return saved PC of a blocked thread. 65 * Return saved PC of a blocked thread.
69 */ 66 */
@@ -350,14 +347,21 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
350 *next = &next_p->thread; 347 *next = &next_p->thread;
351 int cpu = smp_processor_id(); 348 int cpu = smp_processor_id();
352 struct tss_struct *tss = &per_cpu(init_tss, cpu); 349 struct tss_struct *tss = &per_cpu(init_tss, cpu);
350 bool preload_fpu;
353 351
354 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ 352 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
355 353
356 __unlazy_fpu(prev_p); 354 /*
355 * If the task has used fpu the last 5 timeslices, just do a full
356 * restore of the math state immediately to avoid the trap; the
357 * chances of needing FPU soon are obviously high now
358 */
359 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
357 360
361 __unlazy_fpu(prev_p);
358 362
359 /* we're going to use this soon, after a few expensive things */ 363 /* we're going to use this soon, after a few expensive things */
360 if (next_p->fpu_counter > 5) 364 if (preload_fpu)
361 prefetch(next->xstate); 365 prefetch(next->xstate);
362 366
363 /* 367 /*
@@ -398,6 +402,11 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
398 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) 402 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
399 __switch_to_xtra(prev_p, next_p, tss); 403 __switch_to_xtra(prev_p, next_p, tss);
400 404
405 /* If we're going to preload the fpu context, make sure clts
406 is run while we're batching the cpu state updates. */
407 if (preload_fpu)
408 clts();
409
401 /* 410 /*
402 * Leave lazy mode, flushing any hypercalls made here. 411 * Leave lazy mode, flushing any hypercalls made here.
403 * This must be done before restoring TLS segments so 412 * This must be done before restoring TLS segments so
@@ -407,15 +416,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
407 */ 416 */
408 arch_end_context_switch(next_p); 417 arch_end_context_switch(next_p);
409 418
410 /* If the task has used fpu the last 5 timeslices, just do a full 419 if (preload_fpu)
411 * restore of the math state immediately to avoid the trap; the 420 __math_state_restore();
412 * chances of needing FPU soon are obviously high now
413 *
414 * tsk_used_math() checks prevent calling math_state_restore(),
415 * which can sleep in the case of !tsk_used_math()
416 */
417 if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
418 math_state_restore();
419 421
420 /* 422 /*
421 * Restore %gs if needed (which is common) 423 * Restore %gs if needed (which is common)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index ebefb5407b9d..ad535b683170 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -55,9 +55,6 @@
55 55
56asmlinkage extern void ret_from_fork(void); 56asmlinkage extern void ret_from_fork(void);
57 57
58DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
59EXPORT_PER_CPU_SYMBOL(current_task);
60
61DEFINE_PER_CPU(unsigned long, old_rsp); 58DEFINE_PER_CPU(unsigned long, old_rsp);
62static DEFINE_PER_CPU(unsigned char, is_idle); 59static DEFINE_PER_CPU(unsigned char, is_idle);
63 60
@@ -386,9 +383,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
386 int cpu = smp_processor_id(); 383 int cpu = smp_processor_id();
387 struct tss_struct *tss = &per_cpu(init_tss, cpu); 384 struct tss_struct *tss = &per_cpu(init_tss, cpu);
388 unsigned fsindex, gsindex; 385 unsigned fsindex, gsindex;
386 bool preload_fpu;
387
388 /*
389 * If the task has used fpu the last 5 timeslices, just do a full
390 * restore of the math state immediately to avoid the trap; the
391 * chances of needing FPU soon are obviously high now
392 */
393 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
389 394
390 /* we're going to use this soon, after a few expensive things */ 395 /* we're going to use this soon, after a few expensive things */
391 if (next_p->fpu_counter > 5) 396 if (preload_fpu)
392 prefetch(next->xstate); 397 prefetch(next->xstate);
393 398
394 /* 399 /*
@@ -419,6 +424,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
419 424
420 load_TLS(next, cpu); 425 load_TLS(next, cpu);
421 426
427 /* Must be after DS reload */
428 unlazy_fpu(prev_p);
429
430 /* Make sure cpu is ready for new context */
431 if (preload_fpu)
432 clts();
433
422 /* 434 /*
423 * Leave lazy mode, flushing any hypercalls made here. 435 * Leave lazy mode, flushing any hypercalls made here.
424 * This must be done before restoring TLS segments so 436 * This must be done before restoring TLS segments so
@@ -459,9 +471,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
459 wrmsrl(MSR_KERNEL_GS_BASE, next->gs); 471 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
460 prev->gsindex = gsindex; 472 prev->gsindex = gsindex;
461 473
462 /* Must be after DS reload */
463 unlazy_fpu(prev_p);
464
465 /* 474 /*
466 * Switch the PDA and FPU contexts. 475 * Switch the PDA and FPU contexts.
467 */ 476 */
@@ -480,15 +489,12 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
480 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) 489 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
481 __switch_to_xtra(prev_p, next_p, tss); 490 __switch_to_xtra(prev_p, next_p, tss);
482 491
483 /* If the task has used fpu the last 5 timeslices, just do a full 492 /*
484 * restore of the math state immediately to avoid the trap; the 493 * Preload the FPU context, now that we've determined that the
485 * chances of needing FPU soon are obviously high now 494 * task is likely to be using it.
486 *
487 * tsk_used_math() checks prevent calling math_state_restore(),
488 * which can sleep in the case of !tsk_used_math()
489 */ 495 */
490 if (tsk_used_math(next_p) && next_p->fpu_counter > 5) 496 if (preload_fpu)
491 math_state_restore(); 497 __math_state_restore();
492 return prev_p; 498 return prev_p;
493} 499}
494 500
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index af71d06624bf..6c3b2c6fd772 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -508,7 +508,7 @@ static void __init quirk_amd_nb_node(struct pci_dev *dev)
508 508
509 pci_read_config_dword(nb_ht, 0x60, &val); 509 pci_read_config_dword(nb_ht, 0x60, &val);
510 set_dev_node(&dev->dev, val & 7); 510 set_dev_node(&dev->dev, val & 7);
511 pci_dev_put(dev); 511 pci_dev_put(nb_ht);
512} 512}
513 513
514DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB, 514DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index a06e8d101844..27349f92a6d7 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -4,6 +4,7 @@
4#include <linux/pm.h> 4#include <linux/pm.h>
5#include <linux/efi.h> 5#include <linux/efi.h>
6#include <linux/dmi.h> 6#include <linux/dmi.h>
7#include <linux/tboot.h>
7#include <acpi/reboot.h> 8#include <acpi/reboot.h>
8#include <asm/io.h> 9#include <asm/io.h>
9#include <asm/apic.h> 10#include <asm/apic.h>
@@ -508,6 +509,8 @@ static void native_machine_emergency_restart(void)
508 if (reboot_emergency) 509 if (reboot_emergency)
509 emergency_vmx_disable_all(); 510 emergency_vmx_disable_all();
510 511
512 tboot_shutdown(TB_SHUTDOWN_REBOOT);
513
511 /* Tell the BIOS if we want cold or warm reboot */ 514 /* Tell the BIOS if we want cold or warm reboot */
512 *((unsigned short *)__va(0x472)) = reboot_mode; 515 *((unsigned short *)__va(0x472)) = reboot_mode;
513 516
@@ -634,6 +637,8 @@ static void native_machine_halt(void)
634 /* stop other cpus and apics */ 637 /* stop other cpus and apics */
635 machine_shutdown(); 638 machine_shutdown();
636 639
640 tboot_shutdown(TB_SHUTDOWN_HALT);
641
637 /* stop this cpu */ 642 /* stop this cpu */
638 stop_this_cpu(NULL); 643 stop_this_cpu(NULL);
639} 644}
@@ -645,6 +650,8 @@ static void native_machine_power_off(void)
645 machine_shutdown(); 650 machine_shutdown();
646 pm_power_off(); 651 pm_power_off();
647 } 652 }
653 /* a fallback in case there is no PM info available */
654 tboot_shutdown(TB_SHUTDOWN_HALT);
648} 655}
649 656
650struct machine_ops machine_ops = { 657struct machine_ops machine_ops = {
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 63f32d220ef2..19f15c4076fb 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -66,6 +66,7 @@
66 66
67#include <linux/percpu.h> 67#include <linux/percpu.h>
68#include <linux/crash_dump.h> 68#include <linux/crash_dump.h>
69#include <linux/tboot.h>
69 70
70#include <video/edid.h> 71#include <video/edid.h>
71 72
@@ -711,6 +712,21 @@ void __init setup_arch(char **cmdline_p)
711 printk(KERN_INFO "Command line: %s\n", boot_command_line); 712 printk(KERN_INFO "Command line: %s\n", boot_command_line);
712#endif 713#endif
713 714
715 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
716 *cmdline_p = command_line;
717
718#ifdef CONFIG_X86_64
719 /*
720 * Must call this twice: Once just to detect whether hardware doesn't
721 * support NX (so that the early EHCI debug console setup can safely
722 * call set_fixmap(), and then again after parsing early parameters to
723 * honor the respective command line option.
724 */
725 check_efer();
726#endif
727
728 parse_early_param();
729
714 /* VMI may relocate the fixmap; do this before touching ioremap area */ 730 /* VMI may relocate the fixmap; do this before touching ioremap area */
715 vmi_init(); 731 vmi_init();
716 732
@@ -793,11 +809,6 @@ void __init setup_arch(char **cmdline_p)
793#endif 809#endif
794#endif 810#endif
795 811
796 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
797 *cmdline_p = command_line;
798
799 parse_early_param();
800
801#ifdef CONFIG_X86_64 812#ifdef CONFIG_X86_64
802 check_efer(); 813 check_efer();
803#endif 814#endif
@@ -977,6 +988,8 @@ void __init setup_arch(char **cmdline_p)
977 paravirt_pagetable_setup_done(swapper_pg_dir); 988 paravirt_pagetable_setup_done(swapper_pg_dir);
978 paravirt_post_allocator_init(); 989 paravirt_post_allocator_init();
979 990
991 tboot_probe();
992
980#ifdef CONFIG_X86_64 993#ifdef CONFIG_X86_64
981 map_vsyscall(); 994 map_vsyscall();
982#endif 995#endif
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 07d81916f212..d559af913e1f 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -55,6 +55,7 @@ EXPORT_SYMBOL(__per_cpu_offset);
55#define PERCPU_FIRST_CHUNK_RESERVE 0 55#define PERCPU_FIRST_CHUNK_RESERVE 0
56#endif 56#endif
57 57
58#ifdef CONFIG_X86_32
58/** 59/**
59 * pcpu_need_numa - determine percpu allocation needs to consider NUMA 60 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
60 * 61 *
@@ -83,6 +84,7 @@ static bool __init pcpu_need_numa(void)
83#endif 84#endif
84 return false; 85 return false;
85} 86}
87#endif
86 88
87/** 89/**
88 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu 90 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
@@ -124,308 +126,35 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
124} 126}
125 127
126/* 128/*
127 * Large page remap allocator 129 * Helpers for first chunk memory allocation
128 *
129 * This allocator uses PMD page as unit. A PMD page is allocated for
130 * each cpu and each is remapped into vmalloc area using PMD mapping.
131 * As PMD page is quite large, only part of it is used for the first
132 * chunk. Unused part is returned to the bootmem allocator.
133 *
134 * So, the PMD pages are mapped twice - once to the physical mapping
135 * and to the vmalloc area for the first percpu chunk. The double
136 * mapping does add one more PMD TLB entry pressure but still is much
137 * better than only using 4k mappings while still being NUMA friendly.
138 */ 130 */
139#ifdef CONFIG_NEED_MULTIPLE_NODES 131static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
140struct pcpul_ent {
141 unsigned int cpu;
142 void *ptr;
143};
144
145static size_t pcpul_size;
146static struct pcpul_ent *pcpul_map;
147static struct vm_struct pcpul_vm;
148
149static struct page * __init pcpul_get_page(unsigned int cpu, int pageno)
150{ 132{
151 size_t off = (size_t)pageno << PAGE_SHIFT; 133 return pcpu_alloc_bootmem(cpu, size, align);
152
153 if (off >= pcpul_size)
154 return NULL;
155
156 return virt_to_page(pcpul_map[cpu].ptr + off);
157} 134}
158 135
159static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) 136static void __init pcpu_fc_free(void *ptr, size_t size)
160{ 137{
161 size_t map_size, dyn_size; 138 free_bootmem(__pa(ptr), size);
162 unsigned int cpu;
163 int i, j;
164 ssize_t ret;
165
166 if (!chosen) {
167 size_t vm_size = VMALLOC_END - VMALLOC_START;
168 size_t tot_size = nr_cpu_ids * PMD_SIZE;
169
170 /* on non-NUMA, embedding is better */
171 if (!pcpu_need_numa())
172 return -EINVAL;
173
174 /* don't consume more than 20% of vmalloc area */
175 if (tot_size > vm_size / 5) {
176 pr_info("PERCPU: too large chunk size %zuMB for "
177 "large page remap\n", tot_size >> 20);
178 return -EINVAL;
179 }
180 }
181
182 /* need PSE */
183 if (!cpu_has_pse) {
184 pr_warning("PERCPU: lpage allocator requires PSE\n");
185 return -EINVAL;
186 }
187
188 /*
189 * Currently supports only single page. Supporting multiple
190 * pages won't be too difficult if it ever becomes necessary.
191 */
192 pcpul_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
193 PERCPU_DYNAMIC_RESERVE);
194 if (pcpul_size > PMD_SIZE) {
195 pr_warning("PERCPU: static data is larger than large page, "
196 "can't use large page\n");
197 return -EINVAL;
198 }
199 dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
200
201 /* allocate pointer array and alloc large pages */
202 map_size = PFN_ALIGN(nr_cpu_ids * sizeof(pcpul_map[0]));
203 pcpul_map = alloc_bootmem(map_size);
204
205 for_each_possible_cpu(cpu) {
206 pcpul_map[cpu].cpu = cpu;
207 pcpul_map[cpu].ptr = pcpu_alloc_bootmem(cpu, PMD_SIZE,
208 PMD_SIZE);
209 if (!pcpul_map[cpu].ptr) {
210 pr_warning("PERCPU: failed to allocate large page "
211 "for cpu%u\n", cpu);
212 goto enomem;
213 }
214
215 /*
216 * Only use pcpul_size bytes and give back the rest.
217 *
218 * Ingo: The 2MB up-rounding bootmem is needed to make
219 * sure the partial 2MB page is still fully RAM - it's
220 * not well-specified to have a PAT-incompatible area
221 * (unmapped RAM, device memory, etc.) in that hole.
222 */
223 free_bootmem(__pa(pcpul_map[cpu].ptr + pcpul_size),
224 PMD_SIZE - pcpul_size);
225
226 memcpy(pcpul_map[cpu].ptr, __per_cpu_load, static_size);
227 }
228
229 /* allocate address and map */
230 pcpul_vm.flags = VM_ALLOC;
231 pcpul_vm.size = nr_cpu_ids * PMD_SIZE;
232 vm_area_register_early(&pcpul_vm, PMD_SIZE);
233
234 for_each_possible_cpu(cpu) {
235 pmd_t *pmd, pmd_v;
236
237 pmd = populate_extra_pmd((unsigned long)pcpul_vm.addr +
238 cpu * PMD_SIZE);
239 pmd_v = pfn_pmd(page_to_pfn(virt_to_page(pcpul_map[cpu].ptr)),
240 PAGE_KERNEL_LARGE);
241 set_pmd(pmd, pmd_v);
242 }
243
244 /* we're ready, commit */
245 pr_info("PERCPU: Remapped at %p with large pages, static data "
246 "%zu bytes\n", pcpul_vm.addr, static_size);
247
248 ret = pcpu_setup_first_chunk(pcpul_get_page, static_size,
249 PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
250 PMD_SIZE, pcpul_vm.addr, NULL);
251
252 /* sort pcpul_map array for pcpu_lpage_remapped() */
253 for (i = 0; i < nr_cpu_ids - 1; i++)
254 for (j = i + 1; j < nr_cpu_ids; j++)
255 if (pcpul_map[i].ptr > pcpul_map[j].ptr) {
256 struct pcpul_ent tmp = pcpul_map[i];
257 pcpul_map[i] = pcpul_map[j];
258 pcpul_map[j] = tmp;
259 }
260
261 return ret;
262
263enomem:
264 for_each_possible_cpu(cpu)
265 if (pcpul_map[cpu].ptr)
266 free_bootmem(__pa(pcpul_map[cpu].ptr), pcpul_size);
267 free_bootmem(__pa(pcpul_map), map_size);
268 return -ENOMEM;
269} 139}
270 140
271/** 141static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
272 * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area
273 * @kaddr: the kernel address in question
274 *
275 * Determine whether @kaddr falls in the pcpul recycled area. This is
276 * used by pageattr to detect VM aliases and break up the pcpu PMD
277 * mapping such that the same physical page is not mapped under
278 * different attributes.
279 *
280 * The recycled area is always at the tail of a partially used PMD
281 * page.
282 *
283 * RETURNS:
284 * Address of corresponding remapped pcpu address if match is found;
285 * otherwise, NULL.
286 */
287void *pcpu_lpage_remapped(void *kaddr)
288{ 142{
289 void *pmd_addr = (void *)((unsigned long)kaddr & PMD_MASK); 143#ifdef CONFIG_NEED_MULTIPLE_NODES
290 unsigned long offset = (unsigned long)kaddr & ~PMD_MASK; 144 if (early_cpu_to_node(from) == early_cpu_to_node(to))
291 int left = 0, right = nr_cpu_ids - 1; 145 return LOCAL_DISTANCE;
292 int pos; 146 else
293 147 return REMOTE_DISTANCE;
294 /* pcpul in use at all? */
295 if (!pcpul_map)
296 return NULL;
297
298 /* okay, perform binary search */
299 while (left <= right) {
300 pos = (left + right) / 2;
301
302 if (pcpul_map[pos].ptr < pmd_addr)
303 left = pos + 1;
304 else if (pcpul_map[pos].ptr > pmd_addr)
305 right = pos - 1;
306 else {
307 /* it shouldn't be in the area for the first chunk */
308 WARN_ON(offset < pcpul_size);
309
310 return pcpul_vm.addr +
311 pcpul_map[pos].cpu * PMD_SIZE + offset;
312 }
313 }
314
315 return NULL;
316}
317#else 148#else
318static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen) 149 return LOCAL_DISTANCE;
319{
320 return -EINVAL;
321}
322#endif 150#endif
323
324/*
325 * Embedding allocator
326 *
327 * The first chunk is sized to just contain the static area plus
328 * module and dynamic reserves and embedded into linear physical
329 * mapping so that it can use PMD mapping without additional TLB
330 * pressure.
331 */
332static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen)
333{
334 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
335
336 /*
337 * If large page isn't supported, there's no benefit in doing
338 * this. Also, embedding allocation doesn't play well with
339 * NUMA.
340 */
341 if (!chosen && (!cpu_has_pse || pcpu_need_numa()))
342 return -EINVAL;
343
344 return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
345 reserve - PERCPU_FIRST_CHUNK_RESERVE, -1);
346} 151}
347 152
348/* 153static void __init pcpup_populate_pte(unsigned long addr)
349 * 4k page allocator
350 *
351 * This is the basic allocator. Static percpu area is allocated
352 * page-by-page and most of initialization is done by the generic
353 * setup function.
354 */
355static struct page **pcpu4k_pages __initdata;
356static int pcpu4k_nr_static_pages __initdata;
357
358static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
359{
360 if (pageno < pcpu4k_nr_static_pages)
361 return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
362 return NULL;
363}
364
365static void __init pcpu4k_populate_pte(unsigned long addr)
366{ 154{
367 populate_extra_pte(addr); 155 populate_extra_pte(addr);
368} 156}
369 157
370static ssize_t __init setup_pcpu_4k(size_t static_size)
371{
372 size_t pages_size;
373 unsigned int cpu;
374 int i, j;
375 ssize_t ret;
376
377 pcpu4k_nr_static_pages = PFN_UP(static_size);
378
379 /* unaligned allocations can't be freed, round up to page size */
380 pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * nr_cpu_ids
381 * sizeof(pcpu4k_pages[0]));
382 pcpu4k_pages = alloc_bootmem(pages_size);
383
384 /* allocate and copy */
385 j = 0;
386 for_each_possible_cpu(cpu)
387 for (i = 0; i < pcpu4k_nr_static_pages; i++) {
388 void *ptr;
389
390 ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
391 if (!ptr) {
392 pr_warning("PERCPU: failed to allocate "
393 "4k page for cpu%u\n", cpu);
394 goto enomem;
395 }
396
397 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
398 pcpu4k_pages[j++] = virt_to_page(ptr);
399 }
400
401 /* we're ready, commit */
402 pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
403 pcpu4k_nr_static_pages, static_size);
404
405 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
406 PERCPU_FIRST_CHUNK_RESERVE, -1,
407 -1, NULL, pcpu4k_populate_pte);
408 goto out_free_ar;
409
410enomem:
411 while (--j >= 0)
412 free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
413 ret = -ENOMEM;
414out_free_ar:
415 free_bootmem(__pa(pcpu4k_pages), pages_size);
416 return ret;
417}
418
419/* for explicit first chunk allocator selection */
420static char pcpu_chosen_alloc[16] __initdata;
421
422static int __init percpu_alloc_setup(char *str)
423{
424 strncpy(pcpu_chosen_alloc, str, sizeof(pcpu_chosen_alloc) - 1);
425 return 0;
426}
427early_param("percpu_alloc", percpu_alloc_setup);
428
429static inline void setup_percpu_segment(int cpu) 158static inline void setup_percpu_segment(int cpu)
430{ 159{
431#ifdef CONFIG_X86_32 160#ifdef CONFIG_X86_32
@@ -441,52 +170,49 @@ static inline void setup_percpu_segment(int cpu)
441 170
442void __init setup_per_cpu_areas(void) 171void __init setup_per_cpu_areas(void)
443{ 172{
444 size_t static_size = __per_cpu_end - __per_cpu_start;
445 unsigned int cpu; 173 unsigned int cpu;
446 unsigned long delta; 174 unsigned long delta;
447 size_t pcpu_unit_size; 175 int rc;
448 ssize_t ret;
449 176
450 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", 177 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
451 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); 178 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
452 179
453 /* 180 /*
454 * Allocate percpu area. If PSE is supported, try to make use 181 * Allocate percpu area. Embedding allocator is our favorite;
455 * of large page mappings. Please read comments on top of 182 * however, on NUMA configurations, it can result in very
456 * each allocator for details. 183 * sparse unit mapping and vmalloc area isn't spacious enough
184 * on 32bit. Use page in that case.
457 */ 185 */
458 ret = -EINVAL; 186#ifdef CONFIG_X86_32
459 if (strlen(pcpu_chosen_alloc)) { 187 if (pcpu_chosen_fc == PCPU_FC_AUTO && pcpu_need_numa())
460 if (strcmp(pcpu_chosen_alloc, "4k")) { 188 pcpu_chosen_fc = PCPU_FC_PAGE;
461 if (!strcmp(pcpu_chosen_alloc, "lpage")) 189#endif
462 ret = setup_pcpu_lpage(static_size, true); 190 rc = -EINVAL;
463 else if (!strcmp(pcpu_chosen_alloc, "embed")) 191 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
464 ret = setup_pcpu_embed(static_size, true); 192 const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
465 else 193 const size_t dyn_size = PERCPU_MODULE_RESERVE +
466 pr_warning("PERCPU: unknown allocator %s " 194 PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
467 "specified\n", pcpu_chosen_alloc); 195
468 if (ret < 0) 196 rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
469 pr_warning("PERCPU: %s allocator failed (%zd), " 197 dyn_size, atom_size,
470 "falling back to 4k\n", 198 pcpu_cpu_distance,
471 pcpu_chosen_alloc, ret); 199 pcpu_fc_alloc, pcpu_fc_free);
472 } 200 if (rc < 0)
473 } else { 201 pr_warning("PERCPU: %s allocator failed (%d), "
474 ret = setup_pcpu_lpage(static_size, false); 202 "falling back to page size\n",
475 if (ret < 0) 203 pcpu_fc_names[pcpu_chosen_fc], rc);
476 ret = setup_pcpu_embed(static_size, false);
477 } 204 }
478 if (ret < 0) 205 if (rc < 0)
479 ret = setup_pcpu_4k(static_size); 206 rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
480 if (ret < 0) 207 pcpu_fc_alloc, pcpu_fc_free,
481 panic("cannot allocate static percpu area (%zu bytes, err=%zd)", 208 pcpup_populate_pte);
482 static_size, ret); 209 if (rc < 0)
483 210 panic("cannot initialize percpu area (err=%d)", rc);
484 pcpu_unit_size = ret;
485 211
486 /* alrighty, percpu areas up and running */ 212 /* alrighty, percpu areas up and running */
487 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 213 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
488 for_each_possible_cpu(cpu) { 214 for_each_possible_cpu(cpu) {
489 per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; 215 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
490 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); 216 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
491 per_cpu(cpu_number, cpu) = cpu; 217 per_cpu(cpu_number, cpu) = cpu;
492 setup_percpu_segment(cpu); 218 setup_percpu_segment(cpu);
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 81e58238c4ce..6a44a76055ad 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -856,7 +856,7 @@ static void do_signal(struct pt_regs *regs)
856void 856void
857do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) 857do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
858{ 858{
859#ifdef CONFIG_X86_NEW_MCE 859#ifdef CONFIG_X86_MCE
860 /* notify userspace of pending MCEs */ 860 /* notify userspace of pending MCEs */
861 if (thread_info_flags & _TIF_MCE_NOTIFY) 861 if (thread_info_flags & _TIF_MCE_NOTIFY)
862 mce_notify_process(); 862 mce_notify_process();
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 2fecda69ee64..a25eeec00080 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -47,6 +47,7 @@
47#include <linux/bootmem.h> 47#include <linux/bootmem.h>
48#include <linux/err.h> 48#include <linux/err.h>
49#include <linux/nmi.h> 49#include <linux/nmi.h>
50#include <linux/tboot.h>
50 51
51#include <asm/acpi.h> 52#include <asm/acpi.h>
52#include <asm/desc.h> 53#include <asm/desc.h>
@@ -434,7 +435,8 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
434 * For perf, we return last level cache shared map. 435 * For perf, we return last level cache shared map.
435 * And for power savings, we return cpu_core_map 436 * And for power savings, we return cpu_core_map
436 */ 437 */
437 if (sched_mc_power_savings || sched_smt_power_savings) 438 if ((sched_mc_power_savings || sched_smt_power_savings) &&
439 !(cpu_has(c, X86_FEATURE_AMD_DCM)))
438 return cpu_core_mask(cpu); 440 return cpu_core_mask(cpu);
439 else 441 else
440 return c->llc_shared_map; 442 return c->llc_shared_map;
@@ -1116,9 +1118,22 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1116 1118
1117 if (is_uv_system()) 1119 if (is_uv_system())
1118 uv_system_init(); 1120 uv_system_init();
1121
1122 set_mtrr_aps_delayed_init();
1119out: 1123out:
1120 preempt_enable(); 1124 preempt_enable();
1121} 1125}
1126
1127void arch_enable_nonboot_cpus_begin(void)
1128{
1129 set_mtrr_aps_delayed_init();
1130}
1131
1132void arch_enable_nonboot_cpus_end(void)
1133{
1134 mtrr_aps_init();
1135}
1136
1122/* 1137/*
1123 * Early setup to make printk work. 1138 * Early setup to make printk work.
1124 */ 1139 */
@@ -1140,6 +1155,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
1140 setup_ioapic_dest(); 1155 setup_ioapic_dest();
1141#endif 1156#endif
1142 check_nmi_watchdog(); 1157 check_nmi_watchdog();
1158 mtrr_aps_init();
1143} 1159}
1144 1160
1145static int __initdata setup_possible_cpus = -1; 1161static int __initdata setup_possible_cpus = -1;
@@ -1317,6 +1333,7 @@ void play_dead_common(void)
1317void native_play_dead(void) 1333void native_play_dead(void)
1318{ 1334{
1319 play_dead_common(); 1335 play_dead_common();
1336 tboot_shutdown(TB_SHUTDOWN_WFS);
1320 wbinvd_halt(); 1337 wbinvd_halt();
1321} 1338}
1322 1339
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index e8b9863ef8c4..3149032ff107 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -4,6 +4,7 @@
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/ptrace.h> 6#include <linux/ptrace.h>
7#include <asm/desc.h>
7 8
8unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) 9unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
9{ 10{
@@ -23,7 +24,7 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
23 * and APM bios ones we just ignore here. 24 * and APM bios ones we just ignore here.
24 */ 25 */
25 if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) { 26 if ((seg & SEGMENT_TI_MASK) == SEGMENT_LDT) {
26 u32 *desc; 27 struct desc_struct *desc;
27 unsigned long base; 28 unsigned long base;
28 29
29 seg &= ~7UL; 30 seg &= ~7UL;
@@ -33,12 +34,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
33 addr = -1L; /* bogus selector, access would fault */ 34 addr = -1L; /* bogus selector, access would fault */
34 else { 35 else {
35 desc = child->mm->context.ldt + seg; 36 desc = child->mm->context.ldt + seg;
36 base = ((desc[0] >> 16) | 37 base = get_desc_base(desc);
37 ((desc[1] & 0xff) << 16) |
38 (desc[1] & 0xff000000));
39 38
40 /* 16-bit code segment? */ 39 /* 16-bit code segment? */
41 if (!((desc[1] >> 22) & 1)) 40 if (!desc->d)
42 addr &= 0xffff; 41 addr &= 0xffff;
43 addr += base; 42 addr += base;
44 } 43 }
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
new file mode 100644
index 000000000000..86c9f91b48ae
--- /dev/null
+++ b/arch/x86/kernel/tboot.c
@@ -0,0 +1,447 @@
1/*
2 * tboot.c: main implementation of helper functions used by kernel for
3 * runtime support of Intel(R) Trusted Execution Technology
4 *
5 * Copyright (c) 2006-2009, Intel Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 */
21
22#include <linux/dma_remapping.h>
23#include <linux/init_task.h>
24#include <linux/spinlock.h>
25#include <linux/delay.h>
26#include <linux/sched.h>
27#include <linux/init.h>
28#include <linux/dmar.h>
29#include <linux/cpu.h>
30#include <linux/pfn.h>
31#include <linux/mm.h>
32#include <linux/tboot.h>
33
34#include <asm/trampoline.h>
35#include <asm/processor.h>
36#include <asm/bootparam.h>
37#include <asm/pgtable.h>
38#include <asm/pgalloc.h>
39#include <asm/fixmap.h>
40#include <asm/proto.h>
41#include <asm/setup.h>
42#include <asm/e820.h>
43#include <asm/io.h>
44
45#include "acpi/realmode/wakeup.h"
46
47/* Global pointer to shared data; NULL means no measured launch. */
48struct tboot *tboot __read_mostly;
49
50/* timeout for APs (in secs) to enter wait-for-SIPI state during shutdown */
51#define AP_WAIT_TIMEOUT 1
52
53#undef pr_fmt
54#define pr_fmt(fmt) "tboot: " fmt
55
56static u8 tboot_uuid[16] __initdata = TBOOT_UUID;
57
58void __init tboot_probe(void)
59{
60 /* Look for valid page-aligned address for shared page. */
61 if (!boot_params.tboot_addr)
62 return;
63 /*
64 * also verify that it is mapped as we expect it before calling
65 * set_fixmap(), to reduce chance of garbage value causing crash
66 */
67 if (!e820_any_mapped(boot_params.tboot_addr,
68 boot_params.tboot_addr, E820_RESERVED)) {
69 pr_warning("non-0 tboot_addr but it is not of type E820_RESERVED\n");
70 return;
71 }
72
73 /* only a natively booted kernel should be using TXT */
74 if (paravirt_enabled()) {
75 pr_warning("non-0 tboot_addr but pv_ops is enabled\n");
76 return;
77 }
78
79 /* Map and check for tboot UUID. */
80 set_fixmap(FIX_TBOOT_BASE, boot_params.tboot_addr);
81 tboot = (struct tboot *)fix_to_virt(FIX_TBOOT_BASE);
82 if (memcmp(&tboot_uuid, &tboot->uuid, sizeof(tboot->uuid))) {
83 pr_warning("tboot at 0x%llx is invalid\n",
84 boot_params.tboot_addr);
85 tboot = NULL;
86 return;
87 }
88 if (tboot->version < 5) {
89 pr_warning("tboot version is invalid: %u\n", tboot->version);
90 tboot = NULL;
91 return;
92 }
93
94 pr_info("found shared page at phys addr 0x%llx:\n",
95 boot_params.tboot_addr);
96 pr_debug("version: %d\n", tboot->version);
97 pr_debug("log_addr: 0x%08x\n", tboot->log_addr);
98 pr_debug("shutdown_entry: 0x%x\n", tboot->shutdown_entry);
99 pr_debug("tboot_base: 0x%08x\n", tboot->tboot_base);
100 pr_debug("tboot_size: 0x%x\n", tboot->tboot_size);
101}
102
103static pgd_t *tboot_pg_dir;
104static struct mm_struct tboot_mm = {
105 .mm_rb = RB_ROOT,
106 .pgd = swapper_pg_dir,
107 .mm_users = ATOMIC_INIT(2),
108 .mm_count = ATOMIC_INIT(1),
109 .mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
110 .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
111 .mmlist = LIST_HEAD_INIT(init_mm.mmlist),
112 .cpu_vm_mask = CPU_MASK_ALL,
113};
114
115static inline void switch_to_tboot_pt(void)
116{
117 write_cr3(virt_to_phys(tboot_pg_dir));
118}
119
120static int map_tboot_page(unsigned long vaddr, unsigned long pfn,
121 pgprot_t prot)
122{
123 pgd_t *pgd;
124 pud_t *pud;
125 pmd_t *pmd;
126 pte_t *pte;
127
128 pgd = pgd_offset(&tboot_mm, vaddr);
129 pud = pud_alloc(&tboot_mm, pgd, vaddr);
130 if (!pud)
131 return -1;
132 pmd = pmd_alloc(&tboot_mm, pud, vaddr);
133 if (!pmd)
134 return -1;
135 pte = pte_alloc_map(&tboot_mm, pmd, vaddr);
136 if (!pte)
137 return -1;
138 set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
139 pte_unmap(pte);
140 return 0;
141}
142
143static int map_tboot_pages(unsigned long vaddr, unsigned long start_pfn,
144 unsigned long nr)
145{
146 /* Reuse the original kernel mapping */
147 tboot_pg_dir = pgd_alloc(&tboot_mm);
148 if (!tboot_pg_dir)
149 return -1;
150
151 for (; nr > 0; nr--, vaddr += PAGE_SIZE, start_pfn++) {
152 if (map_tboot_page(vaddr, start_pfn, PAGE_KERNEL_EXEC))
153 return -1;
154 }
155
156 return 0;
157}
158
159static void tboot_create_trampoline(void)
160{
161 u32 map_base, map_size;
162
163 /* Create identity map for tboot shutdown code. */
164 map_base = PFN_DOWN(tboot->tboot_base);
165 map_size = PFN_UP(tboot->tboot_size);
166 if (map_tboot_pages(map_base << PAGE_SHIFT, map_base, map_size))
167 panic("tboot: Error mapping tboot pages (mfns) @ 0x%x, 0x%x\n",
168 map_base, map_size);
169}
170
171#ifdef CONFIG_ACPI_SLEEP
172
173static void add_mac_region(phys_addr_t start, unsigned long size)
174{
175 struct tboot_mac_region *mr;
176 phys_addr_t end = start + size;
177
178 if (start && size) {
179 mr = &tboot->mac_regions[tboot->num_mac_regions++];
180 mr->start = round_down(start, PAGE_SIZE);
181 mr->size = round_up(end, PAGE_SIZE) - mr->start;
182 }
183}
184
185static int tboot_setup_sleep(void)
186{
187 tboot->num_mac_regions = 0;
188
189 /* S3 resume code */
190 add_mac_region(acpi_wakeup_address, WAKEUP_SIZE);
191
192#ifdef CONFIG_X86_TRAMPOLINE
193 /* AP trampoline code */
194 add_mac_region(virt_to_phys(trampoline_base), TRAMPOLINE_SIZE);
195#endif
196
197 /* kernel code + data + bss */
198 add_mac_region(virt_to_phys(_text), _end - _text);
199
200 tboot->acpi_sinfo.kernel_s3_resume_vector = acpi_wakeup_address;
201
202 return 0;
203}
204
205#else /* no CONFIG_ACPI_SLEEP */
206
207static int tboot_setup_sleep(void)
208{
209 /* S3 shutdown requested, but S3 not supported by the kernel... */
210 BUG();
211 return -1;
212}
213
214#endif
215
216void tboot_shutdown(u32 shutdown_type)
217{
218 void (*shutdown)(void);
219
220 if (!tboot_enabled())
221 return;
222
223 /*
224 * if we're being called before the 1:1 mapping is set up then just
225 * return and let the normal shutdown happen; this should only be
226 * due to very early panic()
227 */
228 if (!tboot_pg_dir)
229 return;
230
231 /* if this is S3 then set regions to MAC */
232 if (shutdown_type == TB_SHUTDOWN_S3)
233 if (tboot_setup_sleep())
234 return;
235
236 tboot->shutdown_type = shutdown_type;
237
238 switch_to_tboot_pt();
239
240 shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
241 shutdown();
242
243 /* should not reach here */
244 while (1)
245 halt();
246}
247
248static void tboot_copy_fadt(const struct acpi_table_fadt *fadt)
249{
250#define TB_COPY_GAS(tbg, g) \
251 tbg.space_id = g.space_id; \
252 tbg.bit_width = g.bit_width; \
253 tbg.bit_offset = g.bit_offset; \
254 tbg.access_width = g.access_width; \
255 tbg.address = g.address;
256
257 TB_COPY_GAS(tboot->acpi_sinfo.pm1a_cnt_blk, fadt->xpm1a_control_block);
258 TB_COPY_GAS(tboot->acpi_sinfo.pm1b_cnt_blk, fadt->xpm1b_control_block);
259 TB_COPY_GAS(tboot->acpi_sinfo.pm1a_evt_blk, fadt->xpm1a_event_block);
260 TB_COPY_GAS(tboot->acpi_sinfo.pm1b_evt_blk, fadt->xpm1b_event_block);
261
262 /*
263 * We need phys addr of waking vector, but can't use virt_to_phys() on
264 * &acpi_gbl_FACS because it is ioremap'ed, so calc from FACS phys
265 * addr.
266 */
267 tboot->acpi_sinfo.wakeup_vector = fadt->facs +
268 offsetof(struct acpi_table_facs, firmware_waking_vector);
269}
270
271void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
272{
273 static u32 acpi_shutdown_map[ACPI_S_STATE_COUNT] = {
274 /* S0,1,2: */ -1, -1, -1,
275 /* S3: */ TB_SHUTDOWN_S3,
276 /* S4: */ TB_SHUTDOWN_S4,
277 /* S5: */ TB_SHUTDOWN_S5 };
278
279 if (!tboot_enabled())
280 return;
281
282 tboot_copy_fadt(&acpi_gbl_FADT);
283 tboot->acpi_sinfo.pm1a_cnt_val = pm1a_control;
284 tboot->acpi_sinfo.pm1b_cnt_val = pm1b_control;
285 /* we always use the 32b wakeup vector */
286 tboot->acpi_sinfo.vector_width = 32;
287
288 if (sleep_state >= ACPI_S_STATE_COUNT ||
289 acpi_shutdown_map[sleep_state] == -1) {
290 pr_warning("unsupported sleep state 0x%x\n", sleep_state);
291 return;
292 }
293
294 tboot_shutdown(acpi_shutdown_map[sleep_state]);
295}
296
297static atomic_t ap_wfs_count;
298
299static int tboot_wait_for_aps(int num_aps)
300{
301 unsigned long timeout;
302
303 timeout = AP_WAIT_TIMEOUT*HZ;
304 while (atomic_read((atomic_t *)&tboot->num_in_wfs) != num_aps &&
305 timeout) {
306 mdelay(1);
307 timeout--;
308 }
309
310 if (timeout)
311 pr_warning("tboot wait for APs timeout\n");
312
313 return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps);
314}
315
316static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
317 unsigned long action, void *hcpu)
318{
319 switch (action) {
320 case CPU_DYING:
321 atomic_inc(&ap_wfs_count);
322 if (num_online_cpus() == 1)
323 if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
324 return NOTIFY_BAD;
325 break;
326 }
327 return NOTIFY_OK;
328}
329
330static struct notifier_block tboot_cpu_notifier __cpuinitdata =
331{
332 .notifier_call = tboot_cpu_callback,
333};
334
335static __init int tboot_late_init(void)
336{
337 if (!tboot_enabled())
338 return 0;
339
340 tboot_create_trampoline();
341
342 atomic_set(&ap_wfs_count, 0);
343 register_hotcpu_notifier(&tboot_cpu_notifier);
344 return 0;
345}
346
347late_initcall(tboot_late_init);
348
349/*
350 * TXT configuration registers (offsets from TXT_{PUB, PRIV}_CONFIG_REGS_BASE)
351 */
352
353#define TXT_PUB_CONFIG_REGS_BASE 0xfed30000
354#define TXT_PRIV_CONFIG_REGS_BASE 0xfed20000
355
356/* # pages for each config regs space - used by fixmap */
357#define NR_TXT_CONFIG_PAGES ((TXT_PUB_CONFIG_REGS_BASE - \
358 TXT_PRIV_CONFIG_REGS_BASE) >> PAGE_SHIFT)
359
360/* offsets from pub/priv config space */
361#define TXTCR_HEAP_BASE 0x0300
362#define TXTCR_HEAP_SIZE 0x0308
363
364#define SHA1_SIZE 20
365
366struct sha1_hash {
367 u8 hash[SHA1_SIZE];
368};
369
370struct sinit_mle_data {
371 u32 version; /* currently 6 */
372 struct sha1_hash bios_acm_id;
373 u32 edx_senter_flags;
374 u64 mseg_valid;
375 struct sha1_hash sinit_hash;
376 struct sha1_hash mle_hash;
377 struct sha1_hash stm_hash;
378 struct sha1_hash lcp_policy_hash;
379 u32 lcp_policy_control;
380 u32 rlp_wakeup_addr;
381 u32 reserved;
382 u32 num_mdrs;
383 u32 mdrs_off;
384 u32 num_vtd_dmars;
385 u32 vtd_dmars_off;
386} __packed;
387
388struct acpi_table_header *tboot_get_dmar_table(struct acpi_table_header *dmar_tbl)
389{
390 void *heap_base, *heap_ptr, *config;
391
392 if (!tboot_enabled())
393 return dmar_tbl;
394
395 /*
396 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
397 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
398 */
399
400 /* map config space in order to get heap addr */
401 config = ioremap(TXT_PUB_CONFIG_REGS_BASE, NR_TXT_CONFIG_PAGES *
402 PAGE_SIZE);
403 if (!config)
404 return NULL;
405
406 /* now map TXT heap */
407 heap_base = ioremap(*(u64 *)(config + TXTCR_HEAP_BASE),
408 *(u64 *)(config + TXTCR_HEAP_SIZE));
409 iounmap(config);
410 if (!heap_base)
411 return NULL;
412
413 /* walk heap to SinitMleData */
414 /* skip BiosData */
415 heap_ptr = heap_base + *(u64 *)heap_base;
416 /* skip OsMleData */
417 heap_ptr += *(u64 *)heap_ptr;
418 /* skip OsSinitData */
419 heap_ptr += *(u64 *)heap_ptr;
420 /* now points to SinitMleDataSize; set to SinitMleData */
421 heap_ptr += sizeof(u64);
422 /* get addr of DMAR table */
423 dmar_tbl = (struct acpi_table_header *)(heap_ptr +
424 ((struct sinit_mle_data *)heap_ptr)->vtd_dmars_off -
425 sizeof(u64));
426
427 /* don't unmap heap because dmar.c needs access to this */
428
429 return dmar_tbl;
430}
431
432int tboot_force_iommu(void)
433{
434 if (!tboot_enabled())
435 return 0;
436
437 if (no_iommu || swiotlb || dmar_disabled)
438 pr_warning("Forcing Intel-IOMMU to enabled\n");
439
440 dmar_disabled = 0;
441#ifdef CONFIG_SWIOTLB
442 swiotlb = 0;
443#endif
444 no_iommu = 0;
445
446 return 1;
447}
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c
index 77b9689f8edb..503c1f2e8835 100644
--- a/arch/x86/kernel/tlb_uv.c
+++ b/arch/x86/kernel/tlb_uv.c
@@ -640,13 +640,13 @@ static int __init uv_ptc_init(void)
640 if (!is_uv_system()) 640 if (!is_uv_system())
641 return 0; 641 return 0;
642 642
643 proc_uv_ptc = create_proc_entry(UV_PTC_BASENAME, 0444, NULL); 643 proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
644 &proc_uv_ptc_operations);
644 if (!proc_uv_ptc) { 645 if (!proc_uv_ptc) {
645 printk(KERN_ERR "unable to create %s proc entry\n", 646 printk(KERN_ERR "unable to create %s proc entry\n",
646 UV_PTC_BASENAME); 647 UV_PTC_BASENAME);
647 return -EINVAL; 648 return -EINVAL;
648 } 649 }
649 proc_uv_ptc->proc_fops = &proc_uv_ptc_operations;
650 return 0; 650 return 0;
651} 651}
652 652
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 5204332f475d..83264922a878 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -76,7 +76,7 @@ char ignore_fpu_irq;
76 * F0 0F bug workaround.. We have a special link segment 76 * F0 0F bug workaround.. We have a special link segment
77 * for this. 77 * for this.
78 */ 78 */
79gate_desc idt_table[256] 79gate_desc idt_table[NR_VECTORS]
80 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, }; 80 __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
81#endif 81#endif
82 82
@@ -786,33 +786,34 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
786#endif 786#endif
787} 787}
788 788
789#ifdef CONFIG_X86_32 789asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
790unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
791{ 790{
792 struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id());
793 unsigned long base = (kesp - uesp) & -THREAD_SIZE;
794 unsigned long new_kesp = kesp - base;
795 unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
796 __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
797
798 /* Set up base for espfix segment */
799 desc &= 0x00f0ff0000000000ULL;
800 desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
801 ((((__u64)base) << 32) & 0xff00000000000000ULL) |
802 ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
803 (lim_pages & 0xffff);
804 *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
805
806 return new_kesp;
807} 791}
808#endif
809 792
810asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void) 793asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
811{ 794{
812} 795}
813 796
814asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void) 797/*
798 * __math_state_restore assumes that cr0.TS is already clear and the
799 * fpu state is all ready for use. Used during context switch.
800 */
801void __math_state_restore(void)
815{ 802{
803 struct thread_info *thread = current_thread_info();
804 struct task_struct *tsk = thread->task;
805
806 /*
807 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
808 */
809 if (unlikely(restore_fpu_checking(tsk))) {
810 stts();
811 force_sig(SIGSEGV, tsk);
812 return;
813 }
814
815 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
816 tsk->fpu_counter++;
816} 817}
817 818
818/* 819/*
@@ -846,17 +847,8 @@ asmlinkage void math_state_restore(void)
846 } 847 }
847 848
848 clts(); /* Allow maths ops (or we recurse) */ 849 clts(); /* Allow maths ops (or we recurse) */
849 /*
850 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
851 */
852 if (unlikely(restore_fpu_checking(tsk))) {
853 stts();
854 force_sig(SIGSEGV, tsk);
855 return;
856 }
857 850
858 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */ 851 __math_state_restore();
859 tsk->fpu_counter++;
860} 852}
861EXPORT_SYMBOL_GPL(math_state_restore); 853EXPORT_SYMBOL_GPL(math_state_restore);
862 854
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 9fc178255c04..0ccb57d5ee35 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -348,15 +348,12 @@ SECTIONS
348 _end = .; 348 _end = .;
349 } 349 }
350 350
351 /* Sections to be discarded */
352 /DISCARD/ : {
353 *(.exitcall.exit)
354 *(.eh_frame)
355 *(.discard)
356 }
357
358 STABS_DEBUG 351 STABS_DEBUG
359 DWARF_DEBUG 352 DWARF_DEBUG
353
354 /* Sections to be discarded */
355 DISCARDS
356 /DISCARD/ : { *(.eh_frame) }
360} 357}
361 358
362 359
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 8600a09e0c6c..b84e571f4175 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -1,12 +1,8 @@
1# 1#
2# KVM configuration 2# KVM configuration
3# 3#
4config HAVE_KVM
5 bool
6 4
7config HAVE_KVM_IRQCHIP 5source "virt/kvm/Kconfig"
8 bool
9 default y
10 6
11menuconfig VIRTUALIZATION 7menuconfig VIRTUALIZATION
12 bool "Virtualization" 8 bool "Virtualization"
@@ -29,6 +25,9 @@ config KVM
29 select PREEMPT_NOTIFIERS 25 select PREEMPT_NOTIFIERS
30 select MMU_NOTIFIER 26 select MMU_NOTIFIER
31 select ANON_INODES 27 select ANON_INODES
28 select HAVE_KVM_IRQCHIP
29 select HAVE_KVM_EVENTFD
30 select KVM_APIC_ARCHITECTURE
32 ---help--- 31 ---help---
33 Support hosting fully virtualized guest machines using hardware 32 Support hosting fully virtualized guest machines using hardware
34 virtualization extensions. You will need a fairly recent 33 virtualization extensions. You will need a fairly recent
@@ -63,18 +62,6 @@ config KVM_AMD
63 To compile this as a module, choose M here: the module 62 To compile this as a module, choose M here: the module
64 will be called kvm-amd. 63 will be called kvm-amd.
65 64
66config KVM_TRACE
67 bool "KVM trace support"
68 depends on KVM && SYSFS
69 select MARKERS
70 select RELAY
71 select DEBUG_FS
72 default n
73 ---help---
74 This option allows reading a trace of kvm-related events through
75 relayfs. Note the ABI is not considered stable and will be
76 modified in future updates.
77
78# OK, it's a little counter-intuitive to do this, but it puts it neatly under 65# OK, it's a little counter-intuitive to do this, but it puts it neatly under
79# the virtualization menu. 66# the virtualization menu.
80source drivers/lguest/Kconfig 67source drivers/lguest/Kconfig
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index b43c4efafe80..0e7fe78d0f74 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -1,22 +1,19 @@
1#
2# Makefile for Kernel-based Virtual Machine module
3#
4
5common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
6 coalesced_mmio.o irq_comm.o)
7ifeq ($(CONFIG_KVM_TRACE),y)
8common-objs += $(addprefix ../../../virt/kvm/, kvm_trace.o)
9endif
10ifeq ($(CONFIG_IOMMU_API),y)
11common-objs += $(addprefix ../../../virt/kvm/, iommu.o)
12endif
13 1
14EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm 2EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm
15 3
16kvm-objs := $(common-objs) x86.o mmu.o x86_emulate.o i8259.o irq.o lapic.o \ 4CFLAGS_x86.o := -I.
17 i8254.o timer.o 5CFLAGS_svm.o := -I.
18obj-$(CONFIG_KVM) += kvm.o 6CFLAGS_vmx.o := -I.
19kvm-intel-objs = vmx.o 7
20obj-$(CONFIG_KVM_INTEL) += kvm-intel.o 8kvm-y += $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
21kvm-amd-objs = svm.o 9 coalesced_mmio.o irq_comm.o eventfd.o)
22obj-$(CONFIG_KVM_AMD) += kvm-amd.o 10kvm-$(CONFIG_IOMMU_API) += $(addprefix ../../../virt/kvm/, iommu.o)
11
12kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
13 i8254.o timer.o
14kvm-intel-y += vmx.o
15kvm-amd-y += svm.o
16
17obj-$(CONFIG_KVM) += kvm.o
18obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
19obj-$(CONFIG_KVM_AMD) += kvm-amd.o
diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/emulate.c
index 616de4628d60..1be5cd640e93 100644
--- a/arch/x86/kvm/x86_emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1,5 +1,5 @@
1/****************************************************************************** 1/******************************************************************************
2 * x86_emulate.c 2 * emulate.c
3 * 3 *
4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator. 4 * Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
5 * 5 *
@@ -30,7 +30,9 @@
30#define DPRINTF(x...) do {} while (0) 30#define DPRINTF(x...) do {} while (0)
31#endif 31#endif
32#include <linux/module.h> 32#include <linux/module.h>
33#include <asm/kvm_x86_emulate.h> 33#include <asm/kvm_emulate.h>
34
35#include "mmu.h" /* for is_long_mode() */
34 36
35/* 37/*
36 * Opcode effective-address decode tables. 38 * Opcode effective-address decode tables.
@@ -60,6 +62,7 @@
60#define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */ 62#define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
61#define SrcOne (7<<4) /* Implied '1' */ 63#define SrcOne (7<<4) /* Implied '1' */
62#define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */ 64#define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
65#define SrcImmU (9<<4) /* Immediate operand, unsigned */
63#define SrcMask (0xf<<4) 66#define SrcMask (0xf<<4)
64/* Generic ModRM decode. */ 67/* Generic ModRM decode. */
65#define ModRM (1<<8) 68#define ModRM (1<<8)
@@ -97,11 +100,11 @@ static u32 opcode_table[256] = {
97 /* 0x10 - 0x17 */ 100 /* 0x10 - 0x17 */
98 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 101 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
99 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, 102 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
100 0, 0, 0, 0, 103 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 0, 0,
101 /* 0x18 - 0x1F */ 104 /* 0x18 - 0x1F */
102 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 105 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
103 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, 106 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
104 0, 0, 0, 0, 107 ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, 0, 0,
105 /* 0x20 - 0x27 */ 108 /* 0x20 - 0x27 */
106 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, 109 ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
107 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, 110 ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM,
@@ -195,7 +198,7 @@ static u32 opcode_table[256] = {
195 ByteOp | SrcImmUByte, SrcImmUByte, 198 ByteOp | SrcImmUByte, SrcImmUByte,
196 /* 0xE8 - 0xEF */ 199 /* 0xE8 - 0xEF */
197 SrcImm | Stack, SrcImm | ImplicitOps, 200 SrcImm | Stack, SrcImm | ImplicitOps,
198 SrcImm | Src2Imm16, SrcImmByte | ImplicitOps, 201 SrcImmU | Src2Imm16, SrcImmByte | ImplicitOps,
199 SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, 202 SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
200 SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, 203 SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
201 /* 0xF0 - 0xF7 */ 204 /* 0xF0 - 0xF7 */
@@ -208,7 +211,7 @@ static u32 opcode_table[256] = {
208 211
209static u32 twobyte_table[256] = { 212static u32 twobyte_table[256] = {
210 /* 0x00 - 0x0F */ 213 /* 0x00 - 0x0F */
211 0, Group | GroupDual | Group7, 0, 0, 0, 0, ImplicitOps, 0, 214 0, Group | GroupDual | Group7, 0, 0, 0, ImplicitOps, ImplicitOps, 0,
212 ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 215 ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
213 /* 0x10 - 0x1F */ 216 /* 0x10 - 0x1F */
214 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0, 217 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
@@ -216,7 +219,9 @@ static u32 twobyte_table[256] = {
216 ModRM | ImplicitOps, ModRM, ModRM | ImplicitOps, ModRM, 0, 0, 0, 0, 219 ModRM | ImplicitOps, ModRM, ModRM | ImplicitOps, ModRM, 0, 0, 0, 0,
217 0, 0, 0, 0, 0, 0, 0, 0, 220 0, 0, 0, 0, 0, 0, 0, 0,
218 /* 0x30 - 0x3F */ 221 /* 0x30 - 0x3F */
219 ImplicitOps, 0, ImplicitOps, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 222 ImplicitOps, 0, ImplicitOps, 0,
223 ImplicitOps, ImplicitOps, 0, 0,
224 0, 0, 0, 0, 0, 0, 0, 0,
220 /* 0x40 - 0x47 */ 225 /* 0x40 - 0x47 */
221 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, 226 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
222 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, 227 DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
@@ -319,8 +324,11 @@ static u32 group2_table[] = {
319}; 324};
320 325
321/* EFLAGS bit definitions. */ 326/* EFLAGS bit definitions. */
327#define EFLG_VM (1<<17)
328#define EFLG_RF (1<<16)
322#define EFLG_OF (1<<11) 329#define EFLG_OF (1<<11)
323#define EFLG_DF (1<<10) 330#define EFLG_DF (1<<10)
331#define EFLG_IF (1<<9)
324#define EFLG_SF (1<<7) 332#define EFLG_SF (1<<7)
325#define EFLG_ZF (1<<6) 333#define EFLG_ZF (1<<6)
326#define EFLG_AF (1<<4) 334#define EFLG_AF (1<<4)
@@ -1027,6 +1035,7 @@ done_prefixes:
1027 c->src.type = OP_MEM; 1035 c->src.type = OP_MEM;
1028 break; 1036 break;
1029 case SrcImm: 1037 case SrcImm:
1038 case SrcImmU:
1030 c->src.type = OP_IMM; 1039 c->src.type = OP_IMM;
1031 c->src.ptr = (unsigned long *)c->eip; 1040 c->src.ptr = (unsigned long *)c->eip;
1032 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; 1041 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
@@ -1044,6 +1053,19 @@ done_prefixes:
1044 c->src.val = insn_fetch(s32, 4, c->eip); 1053 c->src.val = insn_fetch(s32, 4, c->eip);
1045 break; 1054 break;
1046 } 1055 }
1056 if ((c->d & SrcMask) == SrcImmU) {
1057 switch (c->src.bytes) {
1058 case 1:
1059 c->src.val &= 0xff;
1060 break;
1061 case 2:
1062 c->src.val &= 0xffff;
1063 break;
1064 case 4:
1065 c->src.val &= 0xffffffff;
1066 break;
1067 }
1068 }
1047 break; 1069 break;
1048 case SrcImmByte: 1070 case SrcImmByte:
1049 case SrcImmUByte: 1071 case SrcImmUByte:
@@ -1375,6 +1397,217 @@ static void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask)
1375 ctxt->interruptibility = mask; 1397 ctxt->interruptibility = mask;
1376} 1398}
1377 1399
1400static inline void
1401setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1402 struct kvm_segment *cs, struct kvm_segment *ss)
1403{
1404 memset(cs, 0, sizeof(struct kvm_segment));
1405 kvm_x86_ops->get_segment(ctxt->vcpu, cs, VCPU_SREG_CS);
1406 memset(ss, 0, sizeof(struct kvm_segment));
1407
1408 cs->l = 0; /* will be adjusted later */
1409 cs->base = 0; /* flat segment */
1410 cs->g = 1; /* 4kb granularity */
1411 cs->limit = 0xffffffff; /* 4GB limit */
1412 cs->type = 0x0b; /* Read, Execute, Accessed */
1413 cs->s = 1;
1414 cs->dpl = 0; /* will be adjusted later */
1415 cs->present = 1;
1416 cs->db = 1;
1417
1418 ss->unusable = 0;
1419 ss->base = 0; /* flat segment */
1420 ss->limit = 0xffffffff; /* 4GB limit */
1421 ss->g = 1; /* 4kb granularity */
1422 ss->s = 1;
1423 ss->type = 0x03; /* Read/Write, Accessed */
1424 ss->db = 1; /* 32bit stack segment */
1425 ss->dpl = 0;
1426 ss->present = 1;
1427}
1428
1429static int
1430emulate_syscall(struct x86_emulate_ctxt *ctxt)
1431{
1432 struct decode_cache *c = &ctxt->decode;
1433 struct kvm_segment cs, ss;
1434 u64 msr_data;
1435
1436 /* syscall is not available in real mode */
1437 if (c->lock_prefix || ctxt->mode == X86EMUL_MODE_REAL
1438 || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE))
1439 return -1;
1440
1441 setup_syscalls_segments(ctxt, &cs, &ss);
1442
1443 kvm_x86_ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1444 msr_data >>= 32;
1445 cs.selector = (u16)(msr_data & 0xfffc);
1446 ss.selector = (u16)(msr_data + 8);
1447
1448 if (is_long_mode(ctxt->vcpu)) {
1449 cs.db = 0;
1450 cs.l = 1;
1451 }
1452 kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS);
1453 kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS);
1454
1455 c->regs[VCPU_REGS_RCX] = c->eip;
1456 if (is_long_mode(ctxt->vcpu)) {
1457#ifdef CONFIG_X86_64
1458 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1459
1460 kvm_x86_ops->get_msr(ctxt->vcpu,
1461 ctxt->mode == X86EMUL_MODE_PROT64 ?
1462 MSR_LSTAR : MSR_CSTAR, &msr_data);
1463 c->eip = msr_data;
1464
1465 kvm_x86_ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data);
1466 ctxt->eflags &= ~(msr_data | EFLG_RF);
1467#endif
1468 } else {
1469 /* legacy mode */
1470 kvm_x86_ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1471 c->eip = (u32)msr_data;
1472
1473 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1474 }
1475
1476 return 0;
1477}
1478
1479static int
1480emulate_sysenter(struct x86_emulate_ctxt *ctxt)
1481{
1482 struct decode_cache *c = &ctxt->decode;
1483 struct kvm_segment cs, ss;
1484 u64 msr_data;
1485
1486 /* inject #UD if LOCK prefix is used */
1487 if (c->lock_prefix)
1488 return -1;
1489
1490 /* inject #GP if in real mode or paging is disabled */
1491 if (ctxt->mode == X86EMUL_MODE_REAL ||
1492 !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
1493 kvm_inject_gp(ctxt->vcpu, 0);
1494 return -1;
1495 }
1496
1497 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1498 * Therefore, we inject an #UD.
1499 */
1500 if (ctxt->mode == X86EMUL_MODE_PROT64)
1501 return -1;
1502
1503 setup_syscalls_segments(ctxt, &cs, &ss);
1504
1505 kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1506 switch (ctxt->mode) {
1507 case X86EMUL_MODE_PROT32:
1508 if ((msr_data & 0xfffc) == 0x0) {
1509 kvm_inject_gp(ctxt->vcpu, 0);
1510 return -1;
1511 }
1512 break;
1513 case X86EMUL_MODE_PROT64:
1514 if (msr_data == 0x0) {
1515 kvm_inject_gp(ctxt->vcpu, 0);
1516 return -1;
1517 }
1518 break;
1519 }
1520
1521 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1522 cs.selector = (u16)msr_data;
1523 cs.selector &= ~SELECTOR_RPL_MASK;
1524 ss.selector = cs.selector + 8;
1525 ss.selector &= ~SELECTOR_RPL_MASK;
1526 if (ctxt->mode == X86EMUL_MODE_PROT64
1527 || is_long_mode(ctxt->vcpu)) {
1528 cs.db = 0;
1529 cs.l = 1;
1530 }
1531
1532 kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS);
1533 kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS);
1534
1535 kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
1536 c->eip = msr_data;
1537
1538 kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data);
1539 c->regs[VCPU_REGS_RSP] = msr_data;
1540
1541 return 0;
1542}
1543
1544static int
1545emulate_sysexit(struct x86_emulate_ctxt *ctxt)
1546{
1547 struct decode_cache *c = &ctxt->decode;
1548 struct kvm_segment cs, ss;
1549 u64 msr_data;
1550 int usermode;
1551
1552 /* inject #UD if LOCK prefix is used */
1553 if (c->lock_prefix)
1554 return -1;
1555
1556 /* inject #GP if in real mode or paging is disabled */
1557 if (ctxt->mode == X86EMUL_MODE_REAL
1558 || !(ctxt->vcpu->arch.cr0 & X86_CR0_PE)) {
1559 kvm_inject_gp(ctxt->vcpu, 0);
1560 return -1;
1561 }
1562
1563 /* sysexit must be called from CPL 0 */
1564 if (kvm_x86_ops->get_cpl(ctxt->vcpu) != 0) {
1565 kvm_inject_gp(ctxt->vcpu, 0);
1566 return -1;
1567 }
1568
1569 setup_syscalls_segments(ctxt, &cs, &ss);
1570
1571 if ((c->rex_prefix & 0x8) != 0x0)
1572 usermode = X86EMUL_MODE_PROT64;
1573 else
1574 usermode = X86EMUL_MODE_PROT32;
1575
1576 cs.dpl = 3;
1577 ss.dpl = 3;
1578 kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1579 switch (usermode) {
1580 case X86EMUL_MODE_PROT32:
1581 cs.selector = (u16)(msr_data + 16);
1582 if ((msr_data & 0xfffc) == 0x0) {
1583 kvm_inject_gp(ctxt->vcpu, 0);
1584 return -1;
1585 }
1586 ss.selector = (u16)(msr_data + 24);
1587 break;
1588 case X86EMUL_MODE_PROT64:
1589 cs.selector = (u16)(msr_data + 32);
1590 if (msr_data == 0x0) {
1591 kvm_inject_gp(ctxt->vcpu, 0);
1592 return -1;
1593 }
1594 ss.selector = cs.selector + 8;
1595 cs.db = 0;
1596 cs.l = 1;
1597 break;
1598 }
1599 cs.selector |= SELECTOR_RPL_MASK;
1600 ss.selector |= SELECTOR_RPL_MASK;
1601
1602 kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS);
1603 kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS);
1604
1605 c->eip = ctxt->vcpu->arch.regs[VCPU_REGS_RDX];
1606 c->regs[VCPU_REGS_RSP] = ctxt->vcpu->arch.regs[VCPU_REGS_RCX];
1607
1608 return 0;
1609}
1610
1378int 1611int
1379x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) 1612x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1380{ 1613{
@@ -1970,6 +2203,12 @@ twobyte_insn:
1970 goto cannot_emulate; 2203 goto cannot_emulate;
1971 } 2204 }
1972 break; 2205 break;
2206 case 0x05: /* syscall */
2207 if (emulate_syscall(ctxt) == -1)
2208 goto cannot_emulate;
2209 else
2210 goto writeback;
2211 break;
1973 case 0x06: 2212 case 0x06:
1974 emulate_clts(ctxt->vcpu); 2213 emulate_clts(ctxt->vcpu);
1975 c->dst.type = OP_NONE; 2214 c->dst.type = OP_NONE;
@@ -2036,6 +2275,18 @@ twobyte_insn:
2036 rc = X86EMUL_CONTINUE; 2275 rc = X86EMUL_CONTINUE;
2037 c->dst.type = OP_NONE; 2276 c->dst.type = OP_NONE;
2038 break; 2277 break;
2278 case 0x34: /* sysenter */
2279 if (emulate_sysenter(ctxt) == -1)
2280 goto cannot_emulate;
2281 else
2282 goto writeback;
2283 break;
2284 case 0x35: /* sysexit */
2285 if (emulate_sysexit(ctxt) == -1)
2286 goto cannot_emulate;
2287 else
2288 goto writeback;
2289 break;
2039 case 0x40 ... 0x4f: /* cmov */ 2290 case 0x40 ... 0x4f: /* cmov */
2040 c->dst.val = c->dst.orig_val = c->src.val; 2291 c->dst.val = c->dst.orig_val = c->src.val;
2041 if (!test_cc(c->b, ctxt->eflags)) 2292 if (!test_cc(c->b, ctxt->eflags))
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 21f68e00524f..82ad523b4901 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -231,7 +231,7 @@ int pit_has_pending_timer(struct kvm_vcpu *vcpu)
231{ 231{
232 struct kvm_pit *pit = vcpu->kvm->arch.vpit; 232 struct kvm_pit *pit = vcpu->kvm->arch.vpit;
233 233
234 if (pit && vcpu->vcpu_id == 0 && pit->pit_state.irq_ack) 234 if (pit && kvm_vcpu_is_bsp(vcpu) && pit->pit_state.irq_ack)
235 return atomic_read(&pit->pit_state.pit_timer.pending); 235 return atomic_read(&pit->pit_state.pit_timer.pending);
236 return 0; 236 return 0;
237} 237}
@@ -252,7 +252,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
252 struct kvm_pit *pit = vcpu->kvm->arch.vpit; 252 struct kvm_pit *pit = vcpu->kvm->arch.vpit;
253 struct hrtimer *timer; 253 struct hrtimer *timer;
254 254
255 if (vcpu->vcpu_id != 0 || !pit) 255 if (!kvm_vcpu_is_bsp(vcpu) || !pit)
256 return; 256 return;
257 257
258 timer = &pit->pit_state.pit_timer.timer; 258 timer = &pit->pit_state.pit_timer.timer;
@@ -294,7 +294,7 @@ static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
294 pt->timer.function = kvm_timer_fn; 294 pt->timer.function = kvm_timer_fn;
295 pt->t_ops = &kpit_ops; 295 pt->t_ops = &kpit_ops;
296 pt->kvm = ps->pit->kvm; 296 pt->kvm = ps->pit->kvm;
297 pt->vcpu_id = 0; 297 pt->vcpu = pt->kvm->bsp_vcpu;
298 298
299 atomic_set(&pt->pending, 0); 299 atomic_set(&pt->pending, 0);
300 ps->irq_ack = 1; 300 ps->irq_ack = 1;
@@ -332,33 +332,62 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
332 case 1: 332 case 1:
333 /* FIXME: enhance mode 4 precision */ 333 /* FIXME: enhance mode 4 precision */
334 case 4: 334 case 4:
335 create_pit_timer(ps, val, 0); 335 if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) {
336 create_pit_timer(ps, val, 0);
337 }
336 break; 338 break;
337 case 2: 339 case 2:
338 case 3: 340 case 3:
339 create_pit_timer(ps, val, 1); 341 if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){
342 create_pit_timer(ps, val, 1);
343 }
340 break; 344 break;
341 default: 345 default:
342 destroy_pit_timer(&ps->pit_timer); 346 destroy_pit_timer(&ps->pit_timer);
343 } 347 }
344} 348}
345 349
346void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val) 350void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start)
351{
352 u8 saved_mode;
353 if (hpet_legacy_start) {
354 /* save existing mode for later reenablement */
355 saved_mode = kvm->arch.vpit->pit_state.channels[0].mode;
356 kvm->arch.vpit->pit_state.channels[0].mode = 0xff; /* disable timer */
357 pit_load_count(kvm, channel, val);
358 kvm->arch.vpit->pit_state.channels[0].mode = saved_mode;
359 } else {
360 pit_load_count(kvm, channel, val);
361 }
362}
363
364static inline struct kvm_pit *dev_to_pit(struct kvm_io_device *dev)
365{
366 return container_of(dev, struct kvm_pit, dev);
367}
368
369static inline struct kvm_pit *speaker_to_pit(struct kvm_io_device *dev)
347{ 370{
348 mutex_lock(&kvm->arch.vpit->pit_state.lock); 371 return container_of(dev, struct kvm_pit, speaker_dev);
349 pit_load_count(kvm, channel, val);
350 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
351} 372}
352 373
353static void pit_ioport_write(struct kvm_io_device *this, 374static inline int pit_in_range(gpa_t addr)
354 gpa_t addr, int len, const void *data)
355{ 375{
356 struct kvm_pit *pit = (struct kvm_pit *)this->private; 376 return ((addr >= KVM_PIT_BASE_ADDRESS) &&
377 (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
378}
379
380static int pit_ioport_write(struct kvm_io_device *this,
381 gpa_t addr, int len, const void *data)
382{
383 struct kvm_pit *pit = dev_to_pit(this);
357 struct kvm_kpit_state *pit_state = &pit->pit_state; 384 struct kvm_kpit_state *pit_state = &pit->pit_state;
358 struct kvm *kvm = pit->kvm; 385 struct kvm *kvm = pit->kvm;
359 int channel, access; 386 int channel, access;
360 struct kvm_kpit_channel_state *s; 387 struct kvm_kpit_channel_state *s;
361 u32 val = *(u32 *) data; 388 u32 val = *(u32 *) data;
389 if (!pit_in_range(addr))
390 return -EOPNOTSUPP;
362 391
363 val &= 0xff; 392 val &= 0xff;
364 addr &= KVM_PIT_CHANNEL_MASK; 393 addr &= KVM_PIT_CHANNEL_MASK;
@@ -421,16 +450,19 @@ static void pit_ioport_write(struct kvm_io_device *this,
421 } 450 }
422 451
423 mutex_unlock(&pit_state->lock); 452 mutex_unlock(&pit_state->lock);
453 return 0;
424} 454}
425 455
426static void pit_ioport_read(struct kvm_io_device *this, 456static int pit_ioport_read(struct kvm_io_device *this,
427 gpa_t addr, int len, void *data) 457 gpa_t addr, int len, void *data)
428{ 458{
429 struct kvm_pit *pit = (struct kvm_pit *)this->private; 459 struct kvm_pit *pit = dev_to_pit(this);
430 struct kvm_kpit_state *pit_state = &pit->pit_state; 460 struct kvm_kpit_state *pit_state = &pit->pit_state;
431 struct kvm *kvm = pit->kvm; 461 struct kvm *kvm = pit->kvm;
432 int ret, count; 462 int ret, count;
433 struct kvm_kpit_channel_state *s; 463 struct kvm_kpit_channel_state *s;
464 if (!pit_in_range(addr))
465 return -EOPNOTSUPP;
434 466
435 addr &= KVM_PIT_CHANNEL_MASK; 467 addr &= KVM_PIT_CHANNEL_MASK;
436 s = &pit_state->channels[addr]; 468 s = &pit_state->channels[addr];
@@ -485,37 +517,36 @@ static void pit_ioport_read(struct kvm_io_device *this,
485 memcpy(data, (char *)&ret, len); 517 memcpy(data, (char *)&ret, len);
486 518
487 mutex_unlock(&pit_state->lock); 519 mutex_unlock(&pit_state->lock);
520 return 0;
488} 521}
489 522
490static int pit_in_range(struct kvm_io_device *this, gpa_t addr, 523static int speaker_ioport_write(struct kvm_io_device *this,
491 int len, int is_write) 524 gpa_t addr, int len, const void *data)
492{
493 return ((addr >= KVM_PIT_BASE_ADDRESS) &&
494 (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
495}
496
497static void speaker_ioport_write(struct kvm_io_device *this,
498 gpa_t addr, int len, const void *data)
499{ 525{
500 struct kvm_pit *pit = (struct kvm_pit *)this->private; 526 struct kvm_pit *pit = speaker_to_pit(this);
501 struct kvm_kpit_state *pit_state = &pit->pit_state; 527 struct kvm_kpit_state *pit_state = &pit->pit_state;
502 struct kvm *kvm = pit->kvm; 528 struct kvm *kvm = pit->kvm;
503 u32 val = *(u32 *) data; 529 u32 val = *(u32 *) data;
530 if (addr != KVM_SPEAKER_BASE_ADDRESS)
531 return -EOPNOTSUPP;
504 532
505 mutex_lock(&pit_state->lock); 533 mutex_lock(&pit_state->lock);
506 pit_state->speaker_data_on = (val >> 1) & 1; 534 pit_state->speaker_data_on = (val >> 1) & 1;
507 pit_set_gate(kvm, 2, val & 1); 535 pit_set_gate(kvm, 2, val & 1);
508 mutex_unlock(&pit_state->lock); 536 mutex_unlock(&pit_state->lock);
537 return 0;
509} 538}
510 539
511static void speaker_ioport_read(struct kvm_io_device *this, 540static int speaker_ioport_read(struct kvm_io_device *this,
512 gpa_t addr, int len, void *data) 541 gpa_t addr, int len, void *data)
513{ 542{
514 struct kvm_pit *pit = (struct kvm_pit *)this->private; 543 struct kvm_pit *pit = speaker_to_pit(this);
515 struct kvm_kpit_state *pit_state = &pit->pit_state; 544 struct kvm_kpit_state *pit_state = &pit->pit_state;
516 struct kvm *kvm = pit->kvm; 545 struct kvm *kvm = pit->kvm;
517 unsigned int refresh_clock; 546 unsigned int refresh_clock;
518 int ret; 547 int ret;
548 if (addr != KVM_SPEAKER_BASE_ADDRESS)
549 return -EOPNOTSUPP;
519 550
520 /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */ 551 /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */
521 refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1; 552 refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
@@ -527,12 +558,7 @@ static void speaker_ioport_read(struct kvm_io_device *this,
527 len = sizeof(ret); 558 len = sizeof(ret);
528 memcpy(data, (char *)&ret, len); 559 memcpy(data, (char *)&ret, len);
529 mutex_unlock(&pit_state->lock); 560 mutex_unlock(&pit_state->lock);
530} 561 return 0;
531
532static int speaker_in_range(struct kvm_io_device *this, gpa_t addr,
533 int len, int is_write)
534{
535 return (addr == KVM_SPEAKER_BASE_ADDRESS);
536} 562}
537 563
538void kvm_pit_reset(struct kvm_pit *pit) 564void kvm_pit_reset(struct kvm_pit *pit)
@@ -541,6 +567,7 @@ void kvm_pit_reset(struct kvm_pit *pit)
541 struct kvm_kpit_channel_state *c; 567 struct kvm_kpit_channel_state *c;
542 568
543 mutex_lock(&pit->pit_state.lock); 569 mutex_lock(&pit->pit_state.lock);
570 pit->pit_state.flags = 0;
544 for (i = 0; i < 3; i++) { 571 for (i = 0; i < 3; i++) {
545 c = &pit->pit_state.channels[i]; 572 c = &pit->pit_state.channels[i];
546 c->mode = 0xff; 573 c->mode = 0xff;
@@ -563,10 +590,22 @@ static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
563 } 590 }
564} 591}
565 592
566struct kvm_pit *kvm_create_pit(struct kvm *kvm) 593static const struct kvm_io_device_ops pit_dev_ops = {
594 .read = pit_ioport_read,
595 .write = pit_ioport_write,
596};
597
598static const struct kvm_io_device_ops speaker_dev_ops = {
599 .read = speaker_ioport_read,
600 .write = speaker_ioport_write,
601};
602
603/* Caller must have writers lock on slots_lock */
604struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
567{ 605{
568 struct kvm_pit *pit; 606 struct kvm_pit *pit;
569 struct kvm_kpit_state *pit_state; 607 struct kvm_kpit_state *pit_state;
608 int ret;
570 609
571 pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL); 610 pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL);
572 if (!pit) 611 if (!pit)
@@ -582,19 +621,6 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm)
582 mutex_lock(&pit->pit_state.lock); 621 mutex_lock(&pit->pit_state.lock);
583 spin_lock_init(&pit->pit_state.inject_lock); 622 spin_lock_init(&pit->pit_state.inject_lock);
584 623
585 /* Initialize PIO device */
586 pit->dev.read = pit_ioport_read;
587 pit->dev.write = pit_ioport_write;
588 pit->dev.in_range = pit_in_range;
589 pit->dev.private = pit;
590 kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev);
591
592 pit->speaker_dev.read = speaker_ioport_read;
593 pit->speaker_dev.write = speaker_ioport_write;
594 pit->speaker_dev.in_range = speaker_in_range;
595 pit->speaker_dev.private = pit;
596 kvm_io_bus_register_dev(&kvm->pio_bus, &pit->speaker_dev);
597
598 kvm->arch.vpit = pit; 624 kvm->arch.vpit = pit;
599 pit->kvm = kvm; 625 pit->kvm = kvm;
600 626
@@ -613,7 +639,30 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm)
613 pit->mask_notifier.func = pit_mask_notifer; 639 pit->mask_notifier.func = pit_mask_notifer;
614 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier); 640 kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
615 641
642 kvm_iodevice_init(&pit->dev, &pit_dev_ops);
643 ret = __kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev);
644 if (ret < 0)
645 goto fail;
646
647 if (flags & KVM_PIT_SPEAKER_DUMMY) {
648 kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops);
649 ret = __kvm_io_bus_register_dev(&kvm->pio_bus,
650 &pit->speaker_dev);
651 if (ret < 0)
652 goto fail_unregister;
653 }
654
616 return pit; 655 return pit;
656
657fail_unregister:
658 __kvm_io_bus_unregister_dev(&kvm->pio_bus, &pit->dev);
659
660fail:
661 if (pit->irq_source_id >= 0)
662 kvm_free_irq_source_id(kvm, pit->irq_source_id);
663
664 kfree(pit);
665 return NULL;
617} 666}
618 667
619void kvm_free_pit(struct kvm *kvm) 668void kvm_free_pit(struct kvm *kvm)
@@ -623,6 +672,8 @@ void kvm_free_pit(struct kvm *kvm)
623 if (kvm->arch.vpit) { 672 if (kvm->arch.vpit) {
624 kvm_unregister_irq_mask_notifier(kvm, 0, 673 kvm_unregister_irq_mask_notifier(kvm, 0,
625 &kvm->arch.vpit->mask_notifier); 674 &kvm->arch.vpit->mask_notifier);
675 kvm_unregister_irq_ack_notifier(kvm,
676 &kvm->arch.vpit->pit_state.irq_ack_notifier);
626 mutex_lock(&kvm->arch.vpit->pit_state.lock); 677 mutex_lock(&kvm->arch.vpit->pit_state.lock);
627 timer = &kvm->arch.vpit->pit_state.pit_timer.timer; 678 timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
628 hrtimer_cancel(timer); 679 hrtimer_cancel(timer);
@@ -637,10 +688,10 @@ static void __inject_pit_timer_intr(struct kvm *kvm)
637 struct kvm_vcpu *vcpu; 688 struct kvm_vcpu *vcpu;
638 int i; 689 int i;
639 690
640 mutex_lock(&kvm->lock); 691 mutex_lock(&kvm->irq_lock);
641 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1); 692 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1);
642 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0); 693 kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0);
643 mutex_unlock(&kvm->lock); 694 mutex_unlock(&kvm->irq_lock);
644 695
645 /* 696 /*
646 * Provides NMI watchdog support via Virtual Wire mode. 697 * Provides NMI watchdog support via Virtual Wire mode.
@@ -652,11 +703,8 @@ static void __inject_pit_timer_intr(struct kvm *kvm)
652 * VCPU0, and only if its LVT0 is in EXTINT mode. 703 * VCPU0, and only if its LVT0 is in EXTINT mode.
653 */ 704 */
654 if (kvm->arch.vapics_in_nmi_mode > 0) 705 if (kvm->arch.vapics_in_nmi_mode > 0)
655 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 706 kvm_for_each_vcpu(i, vcpu, kvm)
656 vcpu = kvm->vcpus[i]; 707 kvm_apic_nmi_wd_deliver(vcpu);
657 if (vcpu)
658 kvm_apic_nmi_wd_deliver(vcpu);
659 }
660} 708}
661 709
662void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu) 710void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
@@ -665,7 +713,7 @@ void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
665 struct kvm *kvm = vcpu->kvm; 713 struct kvm *kvm = vcpu->kvm;
666 struct kvm_kpit_state *ps; 714 struct kvm_kpit_state *ps;
667 715
668 if (vcpu && pit) { 716 if (pit) {
669 int inject = 0; 717 int inject = 0;
670 ps = &pit->pit_state; 718 ps = &pit->pit_state;
671 719
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index bbd863ff60b7..d4c1c7ffdc09 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -21,6 +21,7 @@ struct kvm_kpit_channel_state {
21 21
22struct kvm_kpit_state { 22struct kvm_kpit_state {
23 struct kvm_kpit_channel_state channels[3]; 23 struct kvm_kpit_channel_state channels[3];
24 u32 flags;
24 struct kvm_timer pit_timer; 25 struct kvm_timer pit_timer;
25 bool is_periodic; 26 bool is_periodic;
26 u32 speaker_data_on; 27 u32 speaker_data_on;
@@ -49,8 +50,8 @@ struct kvm_pit {
49#define KVM_PIT_CHANNEL_MASK 0x3 50#define KVM_PIT_CHANNEL_MASK 0x3
50 51
51void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu); 52void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu);
52void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val); 53void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start);
53struct kvm_pit *kvm_create_pit(struct kvm *kvm); 54struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags);
54void kvm_free_pit(struct kvm *kvm); 55void kvm_free_pit(struct kvm *kvm);
55void kvm_pit_reset(struct kvm_pit *pit); 56void kvm_pit_reset(struct kvm_pit *pit);
56 57
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 1ccb50c74f18..01f151682802 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -30,50 +30,24 @@
30#include "irq.h" 30#include "irq.h"
31 31
32#include <linux/kvm_host.h> 32#include <linux/kvm_host.h>
33 33#include "trace.h"
34static void pic_lock(struct kvm_pic *s)
35 __acquires(&s->lock)
36{
37 spin_lock(&s->lock);
38}
39
40static void pic_unlock(struct kvm_pic *s)
41 __releases(&s->lock)
42{
43 struct kvm *kvm = s->kvm;
44 unsigned acks = s->pending_acks;
45 bool wakeup = s->wakeup_needed;
46 struct kvm_vcpu *vcpu;
47
48 s->pending_acks = 0;
49 s->wakeup_needed = false;
50
51 spin_unlock(&s->lock);
52
53 while (acks) {
54 kvm_notify_acked_irq(kvm, SELECT_PIC(__ffs(acks)),
55 __ffs(acks));
56 acks &= acks - 1;
57 }
58
59 if (wakeup) {
60 vcpu = s->kvm->vcpus[0];
61 if (vcpu)
62 kvm_vcpu_kick(vcpu);
63 }
64}
65 34
66static void pic_clear_isr(struct kvm_kpic_state *s, int irq) 35static void pic_clear_isr(struct kvm_kpic_state *s, int irq)
67{ 36{
68 s->isr &= ~(1 << irq); 37 s->isr &= ~(1 << irq);
69 s->isr_ack |= (1 << irq); 38 s->isr_ack |= (1 << irq);
39 if (s != &s->pics_state->pics[0])
40 irq += 8;
41 kvm_notify_acked_irq(s->pics_state->kvm, SELECT_PIC(irq), irq);
70} 42}
71 43
72void kvm_pic_clear_isr_ack(struct kvm *kvm) 44void kvm_pic_clear_isr_ack(struct kvm *kvm)
73{ 45{
74 struct kvm_pic *s = pic_irqchip(kvm); 46 struct kvm_pic *s = pic_irqchip(kvm);
47 spin_lock(&s->lock);
75 s->pics[0].isr_ack = 0xff; 48 s->pics[0].isr_ack = 0xff;
76 s->pics[1].isr_ack = 0xff; 49 s->pics[1].isr_ack = 0xff;
50 spin_unlock(&s->lock);
77} 51}
78 52
79/* 53/*
@@ -174,9 +148,9 @@ static void pic_update_irq(struct kvm_pic *s)
174 148
175void kvm_pic_update_irq(struct kvm_pic *s) 149void kvm_pic_update_irq(struct kvm_pic *s)
176{ 150{
177 pic_lock(s); 151 spin_lock(&s->lock);
178 pic_update_irq(s); 152 pic_update_irq(s);
179 pic_unlock(s); 153 spin_unlock(&s->lock);
180} 154}
181 155
182int kvm_pic_set_irq(void *opaque, int irq, int level) 156int kvm_pic_set_irq(void *opaque, int irq, int level)
@@ -184,12 +158,14 @@ int kvm_pic_set_irq(void *opaque, int irq, int level)
184 struct kvm_pic *s = opaque; 158 struct kvm_pic *s = opaque;
185 int ret = -1; 159 int ret = -1;
186 160
187 pic_lock(s); 161 spin_lock(&s->lock);
188 if (irq >= 0 && irq < PIC_NUM_PINS) { 162 if (irq >= 0 && irq < PIC_NUM_PINS) {
189 ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level); 163 ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
190 pic_update_irq(s); 164 pic_update_irq(s);
165 trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr,
166 s->pics[irq >> 3].imr, ret == 0);
191 } 167 }
192 pic_unlock(s); 168 spin_unlock(&s->lock);
193 169
194 return ret; 170 return ret;
195} 171}
@@ -217,7 +193,7 @@ int kvm_pic_read_irq(struct kvm *kvm)
217 int irq, irq2, intno; 193 int irq, irq2, intno;
218 struct kvm_pic *s = pic_irqchip(kvm); 194 struct kvm_pic *s = pic_irqchip(kvm);
219 195
220 pic_lock(s); 196 spin_lock(&s->lock);
221 irq = pic_get_irq(&s->pics[0]); 197 irq = pic_get_irq(&s->pics[0]);
222 if (irq >= 0) { 198 if (irq >= 0) {
223 pic_intack(&s->pics[0], irq); 199 pic_intack(&s->pics[0], irq);
@@ -242,8 +218,7 @@ int kvm_pic_read_irq(struct kvm *kvm)
242 intno = s->pics[0].irq_base + irq; 218 intno = s->pics[0].irq_base + irq;
243 } 219 }
244 pic_update_irq(s); 220 pic_update_irq(s);
245 pic_unlock(s); 221 spin_unlock(&s->lock);
246 kvm_notify_acked_irq(kvm, SELECT_PIC(irq), irq);
247 222
248 return intno; 223 return intno;
249} 224}
@@ -252,7 +227,7 @@ void kvm_pic_reset(struct kvm_kpic_state *s)
252{ 227{
253 int irq, irqbase, n; 228 int irq, irqbase, n;
254 struct kvm *kvm = s->pics_state->irq_request_opaque; 229 struct kvm *kvm = s->pics_state->irq_request_opaque;
255 struct kvm_vcpu *vcpu0 = kvm->vcpus[0]; 230 struct kvm_vcpu *vcpu0 = kvm->bsp_vcpu;
256 231
257 if (s == &s->pics_state->pics[0]) 232 if (s == &s->pics_state->pics[0])
258 irqbase = 0; 233 irqbase = 0;
@@ -263,7 +238,7 @@ void kvm_pic_reset(struct kvm_kpic_state *s)
263 if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0)) 238 if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0))
264 if (s->irr & (1 << irq) || s->isr & (1 << irq)) { 239 if (s->irr & (1 << irq) || s->isr & (1 << irq)) {
265 n = irq + irqbase; 240 n = irq + irqbase;
266 s->pics_state->pending_acks |= 1 << n; 241 kvm_notify_acked_irq(kvm, SELECT_PIC(n), n);
267 } 242 }
268 } 243 }
269 s->last_irr = 0; 244 s->last_irr = 0;
@@ -428,8 +403,7 @@ static u32 elcr_ioport_read(void *opaque, u32 addr1)
428 return s->elcr; 403 return s->elcr;
429} 404}
430 405
431static int picdev_in_range(struct kvm_io_device *this, gpa_t addr, 406static int picdev_in_range(gpa_t addr)
432 int len, int is_write)
433{ 407{
434 switch (addr) { 408 switch (addr) {
435 case 0x20: 409 case 0x20:
@@ -444,18 +418,25 @@ static int picdev_in_range(struct kvm_io_device *this, gpa_t addr,
444 } 418 }
445} 419}
446 420
447static void picdev_write(struct kvm_io_device *this, 421static inline struct kvm_pic *to_pic(struct kvm_io_device *dev)
422{
423 return container_of(dev, struct kvm_pic, dev);
424}
425
426static int picdev_write(struct kvm_io_device *this,
448 gpa_t addr, int len, const void *val) 427 gpa_t addr, int len, const void *val)
449{ 428{
450 struct kvm_pic *s = this->private; 429 struct kvm_pic *s = to_pic(this);
451 unsigned char data = *(unsigned char *)val; 430 unsigned char data = *(unsigned char *)val;
431 if (!picdev_in_range(addr))
432 return -EOPNOTSUPP;
452 433
453 if (len != 1) { 434 if (len != 1) {
454 if (printk_ratelimit()) 435 if (printk_ratelimit())
455 printk(KERN_ERR "PIC: non byte write\n"); 436 printk(KERN_ERR "PIC: non byte write\n");
456 return; 437 return 0;
457 } 438 }
458 pic_lock(s); 439 spin_lock(&s->lock);
459 switch (addr) { 440 switch (addr) {
460 case 0x20: 441 case 0x20:
461 case 0x21: 442 case 0x21:
@@ -468,21 +449,24 @@ static void picdev_write(struct kvm_io_device *this,
468 elcr_ioport_write(&s->pics[addr & 1], addr, data); 449 elcr_ioport_write(&s->pics[addr & 1], addr, data);
469 break; 450 break;
470 } 451 }
471 pic_unlock(s); 452 spin_unlock(&s->lock);
453 return 0;
472} 454}
473 455
474static void picdev_read(struct kvm_io_device *this, 456static int picdev_read(struct kvm_io_device *this,
475 gpa_t addr, int len, void *val) 457 gpa_t addr, int len, void *val)
476{ 458{
477 struct kvm_pic *s = this->private; 459 struct kvm_pic *s = to_pic(this);
478 unsigned char data = 0; 460 unsigned char data = 0;
461 if (!picdev_in_range(addr))
462 return -EOPNOTSUPP;
479 463
480 if (len != 1) { 464 if (len != 1) {
481 if (printk_ratelimit()) 465 if (printk_ratelimit())
482 printk(KERN_ERR "PIC: non byte read\n"); 466 printk(KERN_ERR "PIC: non byte read\n");
483 return; 467 return 0;
484 } 468 }
485 pic_lock(s); 469 spin_lock(&s->lock);
486 switch (addr) { 470 switch (addr) {
487 case 0x20: 471 case 0x20:
488 case 0x21: 472 case 0x21:
@@ -496,7 +480,8 @@ static void picdev_read(struct kvm_io_device *this,
496 break; 480 break;
497 } 481 }
498 *(unsigned char *)val = data; 482 *(unsigned char *)val = data;
499 pic_unlock(s); 483 spin_unlock(&s->lock);
484 return 0;
500} 485}
501 486
502/* 487/*
@@ -505,20 +490,27 @@ static void picdev_read(struct kvm_io_device *this,
505static void pic_irq_request(void *opaque, int level) 490static void pic_irq_request(void *opaque, int level)
506{ 491{
507 struct kvm *kvm = opaque; 492 struct kvm *kvm = opaque;
508 struct kvm_vcpu *vcpu = kvm->vcpus[0]; 493 struct kvm_vcpu *vcpu = kvm->bsp_vcpu;
509 struct kvm_pic *s = pic_irqchip(kvm); 494 struct kvm_pic *s = pic_irqchip(kvm);
510 int irq = pic_get_irq(&s->pics[0]); 495 int irq = pic_get_irq(&s->pics[0]);
511 496
512 s->output = level; 497 s->output = level;
513 if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) { 498 if (vcpu && level && (s->pics[0].isr_ack & (1 << irq))) {
514 s->pics[0].isr_ack &= ~(1 << irq); 499 s->pics[0].isr_ack &= ~(1 << irq);
515 s->wakeup_needed = true; 500 kvm_vcpu_kick(vcpu);
516 } 501 }
517} 502}
518 503
504static const struct kvm_io_device_ops picdev_ops = {
505 .read = picdev_read,
506 .write = picdev_write,
507};
508
519struct kvm_pic *kvm_create_pic(struct kvm *kvm) 509struct kvm_pic *kvm_create_pic(struct kvm *kvm)
520{ 510{
521 struct kvm_pic *s; 511 struct kvm_pic *s;
512 int ret;
513
522 s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL); 514 s = kzalloc(sizeof(struct kvm_pic), GFP_KERNEL);
523 if (!s) 515 if (!s)
524 return NULL; 516 return NULL;
@@ -534,10 +526,12 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
534 /* 526 /*
535 * Initialize PIO device 527 * Initialize PIO device
536 */ 528 */
537 s->dev.read = picdev_read; 529 kvm_iodevice_init(&s->dev, &picdev_ops);
538 s->dev.write = picdev_write; 530 ret = kvm_io_bus_register_dev(kvm, &kvm->pio_bus, &s->dev);
539 s->dev.in_range = picdev_in_range; 531 if (ret < 0) {
540 s->dev.private = s; 532 kfree(s);
541 kvm_io_bus_register_dev(&kvm->pio_bus, &s->dev); 533 return NULL;
534 }
535
542 return s; 536 return s;
543} 537}
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 9f593188129e..7d6058a2fd38 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -63,7 +63,6 @@ struct kvm_kpic_state {
63 63
64struct kvm_pic { 64struct kvm_pic {
65 spinlock_t lock; 65 spinlock_t lock;
66 bool wakeup_needed;
67 unsigned pending_acks; 66 unsigned pending_acks;
68 struct kvm *kvm; 67 struct kvm *kvm;
69 struct kvm_kpic_state pics[2]; /* 0 is master pic, 1 is slave pic */ 68 struct kvm_kpic_state pics[2]; /* 0 is master pic, 1 is slave pic */
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 1ff819dce7d3..7bcc5b6a4403 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -29,4 +29,13 @@ static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
29 kvm_register_write(vcpu, VCPU_REGS_RIP, val); 29 kvm_register_write(vcpu, VCPU_REGS_RIP, val);
30} 30}
31 31
32static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
33{
34 if (!test_bit(VCPU_EXREG_PDPTR,
35 (unsigned long *)&vcpu->arch.regs_avail))
36 kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
37
38 return vcpu->arch.pdptrs[index];
39}
40
32#endif 41#endif
diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h
deleted file mode 100644
index ed66e4c078dc..000000000000
--- a/arch/x86/kvm/kvm_svm.h
+++ /dev/null
@@ -1,51 +0,0 @@
1#ifndef __KVM_SVM_H
2#define __KVM_SVM_H
3
4#include <linux/kernel.h>
5#include <linux/types.h>
6#include <linux/list.h>
7#include <linux/kvm_host.h>
8#include <asm/msr.h>
9
10#include <asm/svm.h>
11
12static const u32 host_save_user_msrs[] = {
13#ifdef CONFIG_X86_64
14 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
15 MSR_FS_BASE,
16#endif
17 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
18};
19
20#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
21
22struct kvm_vcpu;
23
24struct vcpu_svm {
25 struct kvm_vcpu vcpu;
26 struct vmcb *vmcb;
27 unsigned long vmcb_pa;
28 struct svm_cpu_data *svm_data;
29 uint64_t asid_generation;
30
31 u64 next_rip;
32
33 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
34 u64 host_gs_base;
35 unsigned long host_cr2;
36
37 u32 *msrpm;
38 struct vmcb *hsave;
39 u64 hsave_msr;
40
41 u64 nested_vmcb;
42
43 /* These are the merged vectors */
44 u32 *nested_msrpm;
45
46 /* gpa pointers to the real vectors */
47 u64 nested_vmcb_msrpm;
48};
49
50#endif
51
diff --git a/arch/x86/kvm/kvm_timer.h b/arch/x86/kvm/kvm_timer.h
index 26bd6ba74e1c..55c7524dda54 100644
--- a/arch/x86/kvm/kvm_timer.h
+++ b/arch/x86/kvm/kvm_timer.h
@@ -6,7 +6,7 @@ struct kvm_timer {
6 bool reinject; 6 bool reinject;
7 struct kvm_timer_ops *t_ops; 7 struct kvm_timer_ops *t_ops;
8 struct kvm *kvm; 8 struct kvm *kvm;
9 int vcpu_id; 9 struct kvm_vcpu *vcpu;
10}; 10};
11 11
12struct kvm_timer_ops { 12struct kvm_timer_ops {
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index ae99d83f81a3..1ae5ceba7eb2 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -32,8 +32,11 @@
32#include <asm/current.h> 32#include <asm/current.h>
33#include <asm/apicdef.h> 33#include <asm/apicdef.h>
34#include <asm/atomic.h> 34#include <asm/atomic.h>
35#include <asm/apicdef.h>
35#include "kvm_cache_regs.h" 36#include "kvm_cache_regs.h"
36#include "irq.h" 37#include "irq.h"
38#include "trace.h"
39#include "x86.h"
37 40
38#ifndef CONFIG_X86_64 41#ifndef CONFIG_X86_64
39#define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) 42#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
@@ -141,6 +144,26 @@ static inline int apic_lvt_nmi_mode(u32 lvt_val)
141 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI; 144 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
142} 145}
143 146
147void kvm_apic_set_version(struct kvm_vcpu *vcpu)
148{
149 struct kvm_lapic *apic = vcpu->arch.apic;
150 struct kvm_cpuid_entry2 *feat;
151 u32 v = APIC_VERSION;
152
153 if (!irqchip_in_kernel(vcpu->kvm))
154 return;
155
156 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
157 if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
158 v |= APIC_LVR_DIRECTED_EOI;
159 apic_set_reg(apic, APIC_LVR, v);
160}
161
162static inline int apic_x2apic_mode(struct kvm_lapic *apic)
163{
164 return apic->vcpu->arch.apic_base & X2APIC_ENABLE;
165}
166
144static unsigned int apic_lvt_mask[APIC_LVT_NUM] = { 167static unsigned int apic_lvt_mask[APIC_LVT_NUM] = {
145 LVT_MASK | APIC_LVT_TIMER_PERIODIC, /* LVTT */ 168 LVT_MASK | APIC_LVT_TIMER_PERIODIC, /* LVTT */
146 LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */ 169 LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */
@@ -165,36 +188,52 @@ static int find_highest_vector(void *bitmap)
165 188
166static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic) 189static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic)
167{ 190{
191 apic->irr_pending = true;
168 return apic_test_and_set_vector(vec, apic->regs + APIC_IRR); 192 return apic_test_and_set_vector(vec, apic->regs + APIC_IRR);
169} 193}
170 194
171static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) 195static inline int apic_search_irr(struct kvm_lapic *apic)
172{ 196{
173 apic_clear_vector(vec, apic->regs + APIC_IRR); 197 return find_highest_vector(apic->regs + APIC_IRR);
174} 198}
175 199
176static inline int apic_find_highest_irr(struct kvm_lapic *apic) 200static inline int apic_find_highest_irr(struct kvm_lapic *apic)
177{ 201{
178 int result; 202 int result;
179 203
180 result = find_highest_vector(apic->regs + APIC_IRR); 204 if (!apic->irr_pending)
205 return -1;
206
207 result = apic_search_irr(apic);
181 ASSERT(result == -1 || result >= 16); 208 ASSERT(result == -1 || result >= 16);
182 209
183 return result; 210 return result;
184} 211}
185 212
213static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
214{
215 apic->irr_pending = false;
216 apic_clear_vector(vec, apic->regs + APIC_IRR);
217 if (apic_search_irr(apic) != -1)
218 apic->irr_pending = true;
219}
220
186int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) 221int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
187{ 222{
188 struct kvm_lapic *apic = vcpu->arch.apic; 223 struct kvm_lapic *apic = vcpu->arch.apic;
189 int highest_irr; 224 int highest_irr;
190 225
226 /* This may race with setting of irr in __apic_accept_irq() and
227 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
228 * will cause vmexit immediately and the value will be recalculated
229 * on the next vmentry.
230 */
191 if (!apic) 231 if (!apic)
192 return 0; 232 return 0;
193 highest_irr = apic_find_highest_irr(apic); 233 highest_irr = apic_find_highest_irr(apic);
194 234
195 return highest_irr; 235 return highest_irr;
196} 236}
197EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
198 237
199static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, 238static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
200 int vector, int level, int trig_mode); 239 int vector, int level, int trig_mode);
@@ -251,7 +290,12 @@ int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
251int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda) 290int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
252{ 291{
253 int result = 0; 292 int result = 0;
254 u8 logical_id; 293 u32 logical_id;
294
295 if (apic_x2apic_mode(apic)) {
296 logical_id = apic_get_reg(apic, APIC_LDR);
297 return logical_id & mda;
298 }
255 299
256 logical_id = GET_APIC_LOGICAL_ID(apic_get_reg(apic, APIC_LDR)); 300 logical_id = GET_APIC_LOGICAL_ID(apic_get_reg(apic, APIC_LDR));
257 301
@@ -331,6 +375,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
331 break; 375 break;
332 376
333 result = !apic_test_and_set_irr(vector, apic); 377 result = !apic_test_and_set_irr(vector, apic);
378 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
379 trig_mode, vector, !result);
334 if (!result) { 380 if (!result) {
335 if (trig_mode) 381 if (trig_mode)
336 apic_debug("level trig mode repeatedly for " 382 apic_debug("level trig mode repeatedly for "
@@ -425,7 +471,11 @@ static void apic_set_eoi(struct kvm_lapic *apic)
425 trigger_mode = IOAPIC_LEVEL_TRIG; 471 trigger_mode = IOAPIC_LEVEL_TRIG;
426 else 472 else
427 trigger_mode = IOAPIC_EDGE_TRIG; 473 trigger_mode = IOAPIC_EDGE_TRIG;
428 kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); 474 if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)) {
475 mutex_lock(&apic->vcpu->kvm->irq_lock);
476 kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
477 mutex_unlock(&apic->vcpu->kvm->irq_lock);
478 }
429} 479}
430 480
431static void apic_send_ipi(struct kvm_lapic *apic) 481static void apic_send_ipi(struct kvm_lapic *apic)
@@ -440,7 +490,12 @@ static void apic_send_ipi(struct kvm_lapic *apic)
440 irq.level = icr_low & APIC_INT_ASSERT; 490 irq.level = icr_low & APIC_INT_ASSERT;
441 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG; 491 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
442 irq.shorthand = icr_low & APIC_SHORT_MASK; 492 irq.shorthand = icr_low & APIC_SHORT_MASK;
443 irq.dest_id = GET_APIC_DEST_FIELD(icr_high); 493 if (apic_x2apic_mode(apic))
494 irq.dest_id = icr_high;
495 else
496 irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
497
498 trace_kvm_apic_ipi(icr_low, irq.dest_id);
444 499
445 apic_debug("icr_high 0x%x, icr_low 0x%x, " 500 apic_debug("icr_high 0x%x, icr_low 0x%x, "
446 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, " 501 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
@@ -449,7 +504,9 @@ static void apic_send_ipi(struct kvm_lapic *apic)
449 irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode, 504 irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
450 irq.vector); 505 irq.vector);
451 506
507 mutex_lock(&apic->vcpu->kvm->irq_lock);
452 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq); 508 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq);
509 mutex_unlock(&apic->vcpu->kvm->irq_lock);
453} 510}
454 511
455static u32 apic_get_tmcct(struct kvm_lapic *apic) 512static u32 apic_get_tmcct(struct kvm_lapic *apic)
@@ -495,12 +552,16 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
495{ 552{
496 u32 val = 0; 553 u32 val = 0;
497 554
498 KVMTRACE_1D(APIC_ACCESS, apic->vcpu, (u32)offset, handler);
499
500 if (offset >= LAPIC_MMIO_LENGTH) 555 if (offset >= LAPIC_MMIO_LENGTH)
501 return 0; 556 return 0;
502 557
503 switch (offset) { 558 switch (offset) {
559 case APIC_ID:
560 if (apic_x2apic_mode(apic))
561 val = kvm_apic_id(apic);
562 else
563 val = kvm_apic_id(apic) << 24;
564 break;
504 case APIC_ARBPRI: 565 case APIC_ARBPRI:
505 printk(KERN_WARNING "Access APIC ARBPRI register " 566 printk(KERN_WARNING "Access APIC ARBPRI register "
506 "which is for P6\n"); 567 "which is for P6\n");
@@ -522,21 +583,35 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
522 return val; 583 return val;
523} 584}
524 585
525static void apic_mmio_read(struct kvm_io_device *this, 586static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
526 gpa_t address, int len, void *data) 587{
588 return container_of(dev, struct kvm_lapic, dev);
589}
590
591static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
592 void *data)
527{ 593{
528 struct kvm_lapic *apic = (struct kvm_lapic *)this->private;
529 unsigned int offset = address - apic->base_address;
530 unsigned char alignment = offset & 0xf; 594 unsigned char alignment = offset & 0xf;
531 u32 result; 595 u32 result;
596 /* this bitmask has a bit cleared for each reserver register */
597 static const u64 rmask = 0x43ff01ffffffe70cULL;
532 598
533 if ((alignment + len) > 4) { 599 if ((alignment + len) > 4) {
534 printk(KERN_ERR "KVM_APIC_READ: alignment error %lx %d", 600 apic_debug("KVM_APIC_READ: alignment error %x %d\n",
535 (unsigned long)address, len); 601 offset, len);
536 return; 602 return 1;
537 } 603 }
604
605 if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) {
606 apic_debug("KVM_APIC_READ: read reserved register %x\n",
607 offset);
608 return 1;
609 }
610
538 result = __apic_read(apic, offset & ~0xf); 611 result = __apic_read(apic, offset & ~0xf);
539 612
613 trace_kvm_apic_read(offset, result);
614
540 switch (len) { 615 switch (len) {
541 case 1: 616 case 1:
542 case 2: 617 case 2:
@@ -548,6 +623,28 @@ static void apic_mmio_read(struct kvm_io_device *this,
548 "should be 1,2, or 4 instead\n", len); 623 "should be 1,2, or 4 instead\n", len);
549 break; 624 break;
550 } 625 }
626 return 0;
627}
628
629static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
630{
631 return apic_hw_enabled(apic) &&
632 addr >= apic->base_address &&
633 addr < apic->base_address + LAPIC_MMIO_LENGTH;
634}
635
636static int apic_mmio_read(struct kvm_io_device *this,
637 gpa_t address, int len, void *data)
638{
639 struct kvm_lapic *apic = to_lapic(this);
640 u32 offset = address - apic->base_address;
641
642 if (!apic_mmio_in_range(apic, address))
643 return -EOPNOTSUPP;
644
645 apic_reg_read(apic, offset, len, data);
646
647 return 0;
551} 648}
552 649
553static void update_divide_count(struct kvm_lapic *apic) 650static void update_divide_count(struct kvm_lapic *apic)
@@ -573,6 +670,15 @@ static void start_apic_timer(struct kvm_lapic *apic)
573 670
574 if (!apic->lapic_timer.period) 671 if (!apic->lapic_timer.period)
575 return; 672 return;
673 /*
674 * Do not allow the guest to program periodic timers with small
675 * interval, since the hrtimers are not throttled by the host
676 * scheduler.
677 */
678 if (apic_lvtt_period(apic)) {
679 if (apic->lapic_timer.period < NSEC_PER_MSEC/2)
680 apic->lapic_timer.period = NSEC_PER_MSEC/2;
681 }
576 682
577 hrtimer_start(&apic->lapic_timer.timer, 683 hrtimer_start(&apic->lapic_timer.timer,
578 ktime_add_ns(now, apic->lapic_timer.period), 684 ktime_add_ns(now, apic->lapic_timer.period),
@@ -603,40 +709,18 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
603 apic->vcpu->kvm->arch.vapics_in_nmi_mode--; 709 apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
604} 710}
605 711
606static void apic_mmio_write(struct kvm_io_device *this, 712static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
607 gpa_t address, int len, const void *data)
608{ 713{
609 struct kvm_lapic *apic = (struct kvm_lapic *)this->private; 714 int ret = 0;
610 unsigned int offset = address - apic->base_address;
611 unsigned char alignment = offset & 0xf;
612 u32 val;
613
614 /*
615 * APIC register must be aligned on 128-bits boundary.
616 * 32/64/128 bits registers must be accessed thru 32 bits.
617 * Refer SDM 8.4.1
618 */
619 if (len != 4 || alignment) {
620 /* Don't shout loud, $infamous_os would cause only noise. */
621 apic_debug("apic write: bad size=%d %lx\n",
622 len, (long)address);
623 return;
624 }
625
626 val = *(u32 *) data;
627
628 /* too common printing */
629 if (offset != APIC_EOI)
630 apic_debug("%s: offset 0x%x with length 0x%x, and value is "
631 "0x%x\n", __func__, offset, len, val);
632
633 offset &= 0xff0;
634 715
635 KVMTRACE_1D(APIC_ACCESS, apic->vcpu, (u32)offset, handler); 716 trace_kvm_apic_write(reg, val);
636 717
637 switch (offset) { 718 switch (reg) {
638 case APIC_ID: /* Local APIC ID */ 719 case APIC_ID: /* Local APIC ID */
639 apic_set_reg(apic, APIC_ID, val); 720 if (!apic_x2apic_mode(apic))
721 apic_set_reg(apic, APIC_ID, val);
722 else
723 ret = 1;
640 break; 724 break;
641 725
642 case APIC_TASKPRI: 726 case APIC_TASKPRI:
@@ -649,15 +733,24 @@ static void apic_mmio_write(struct kvm_io_device *this,
649 break; 733 break;
650 734
651 case APIC_LDR: 735 case APIC_LDR:
652 apic_set_reg(apic, APIC_LDR, val & APIC_LDR_MASK); 736 if (!apic_x2apic_mode(apic))
737 apic_set_reg(apic, APIC_LDR, val & APIC_LDR_MASK);
738 else
739 ret = 1;
653 break; 740 break;
654 741
655 case APIC_DFR: 742 case APIC_DFR:
656 apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF); 743 if (!apic_x2apic_mode(apic))
744 apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
745 else
746 ret = 1;
657 break; 747 break;
658 748
659 case APIC_SPIV: 749 case APIC_SPIV: {
660 apic_set_reg(apic, APIC_SPIV, val & 0x3ff); 750 u32 mask = 0x3ff;
751 if (apic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
752 mask |= APIC_SPIV_DIRECTED_EOI;
753 apic_set_reg(apic, APIC_SPIV, val & mask);
661 if (!(val & APIC_SPIV_APIC_ENABLED)) { 754 if (!(val & APIC_SPIV_APIC_ENABLED)) {
662 int i; 755 int i;
663 u32 lvt_val; 756 u32 lvt_val;
@@ -672,7 +765,7 @@ static void apic_mmio_write(struct kvm_io_device *this,
672 765
673 } 766 }
674 break; 767 break;
675 768 }
676 case APIC_ICR: 769 case APIC_ICR:
677 /* No delay here, so we always clear the pending bit */ 770 /* No delay here, so we always clear the pending bit */
678 apic_set_reg(apic, APIC_ICR, val & ~(1 << 12)); 771 apic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
@@ -680,7 +773,9 @@ static void apic_mmio_write(struct kvm_io_device *this,
680 break; 773 break;
681 774
682 case APIC_ICR2: 775 case APIC_ICR2:
683 apic_set_reg(apic, APIC_ICR2, val & 0xff000000); 776 if (!apic_x2apic_mode(apic))
777 val &= 0xff000000;
778 apic_set_reg(apic, APIC_ICR2, val);
684 break; 779 break;
685 780
686 case APIC_LVT0: 781 case APIC_LVT0:
@@ -694,8 +789,8 @@ static void apic_mmio_write(struct kvm_io_device *this,
694 if (!apic_sw_enabled(apic)) 789 if (!apic_sw_enabled(apic))
695 val |= APIC_LVT_MASKED; 790 val |= APIC_LVT_MASKED;
696 791
697 val &= apic_lvt_mask[(offset - APIC_LVTT) >> 4]; 792 val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
698 apic_set_reg(apic, offset, val); 793 apic_set_reg(apic, reg, val);
699 794
700 break; 795 break;
701 796
@@ -703,7 +798,7 @@ static void apic_mmio_write(struct kvm_io_device *this,
703 hrtimer_cancel(&apic->lapic_timer.timer); 798 hrtimer_cancel(&apic->lapic_timer.timer);
704 apic_set_reg(apic, APIC_TMICT, val); 799 apic_set_reg(apic, APIC_TMICT, val);
705 start_apic_timer(apic); 800 start_apic_timer(apic);
706 return; 801 break;
707 802
708 case APIC_TDCR: 803 case APIC_TDCR:
709 if (val & 4) 804 if (val & 4)
@@ -712,27 +807,59 @@ static void apic_mmio_write(struct kvm_io_device *this,
712 update_divide_count(apic); 807 update_divide_count(apic);
713 break; 808 break;
714 809
810 case APIC_ESR:
811 if (apic_x2apic_mode(apic) && val != 0) {
812 printk(KERN_ERR "KVM_WRITE:ESR not zero %x\n", val);
813 ret = 1;
814 }
815 break;
816
817 case APIC_SELF_IPI:
818 if (apic_x2apic_mode(apic)) {
819 apic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
820 } else
821 ret = 1;
822 break;
715 default: 823 default:
716 apic_debug("Local APIC Write to read-only register %x\n", 824 ret = 1;
717 offset);
718 break; 825 break;
719 } 826 }
720 827 if (ret)
828 apic_debug("Local APIC Write to read-only register %x\n", reg);
829 return ret;
721} 830}
722 831
723static int apic_mmio_range(struct kvm_io_device *this, gpa_t addr, 832static int apic_mmio_write(struct kvm_io_device *this,
724 int len, int size) 833 gpa_t address, int len, const void *data)
725{ 834{
726 struct kvm_lapic *apic = (struct kvm_lapic *)this->private; 835 struct kvm_lapic *apic = to_lapic(this);
727 int ret = 0; 836 unsigned int offset = address - apic->base_address;
837 u32 val;
728 838
839 if (!apic_mmio_in_range(apic, address))
840 return -EOPNOTSUPP;
729 841
730 if (apic_hw_enabled(apic) && 842 /*
731 (addr >= apic->base_address) && 843 * APIC register must be aligned on 128-bits boundary.
732 (addr < (apic->base_address + LAPIC_MMIO_LENGTH))) 844 * 32/64/128 bits registers must be accessed thru 32 bits.
733 ret = 1; 845 * Refer SDM 8.4.1
846 */
847 if (len != 4 || (offset & 0xf)) {
848 /* Don't shout loud, $infamous_os would cause only noise. */
849 apic_debug("apic write: bad size=%d %lx\n", len, (long)address);
850 return 0;
851 }
734 852
735 return ret; 853 val = *(u32*)data;
854
855 /* too common printing */
856 if (offset != APIC_EOI)
857 apic_debug("%s: offset 0x%x with length 0x%x, and value is "
858 "0x%x\n", __func__, offset, len, val);
859
860 apic_reg_write(apic, offset & 0xff0, val);
861
862 return 0;
736} 863}
737 864
738void kvm_free_lapic(struct kvm_vcpu *vcpu) 865void kvm_free_lapic(struct kvm_vcpu *vcpu)
@@ -763,7 +890,6 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
763 apic_set_tpr(apic, ((cr8 & 0x0f) << 4) 890 apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
764 | (apic_get_reg(apic, APIC_TASKPRI) & 4)); 891 | (apic_get_reg(apic, APIC_TASKPRI) & 4));
765} 892}
766EXPORT_SYMBOL_GPL(kvm_lapic_set_tpr);
767 893
768u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) 894u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
769{ 895{
@@ -776,7 +902,6 @@ u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
776 902
777 return (tpr & 0xf0) >> 4; 903 return (tpr & 0xf0) >> 4;
778} 904}
779EXPORT_SYMBOL_GPL(kvm_lapic_get_cr8);
780 905
781void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) 906void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
782{ 907{
@@ -787,10 +912,16 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
787 vcpu->arch.apic_base = value; 912 vcpu->arch.apic_base = value;
788 return; 913 return;
789 } 914 }
790 if (apic->vcpu->vcpu_id) 915
916 if (!kvm_vcpu_is_bsp(apic->vcpu))
791 value &= ~MSR_IA32_APICBASE_BSP; 917 value &= ~MSR_IA32_APICBASE_BSP;
792 918
793 vcpu->arch.apic_base = value; 919 vcpu->arch.apic_base = value;
920 if (apic_x2apic_mode(apic)) {
921 u32 id = kvm_apic_id(apic);
922 u32 ldr = ((id & ~0xf) << 16) | (1 << (id & 0xf));
923 apic_set_reg(apic, APIC_LDR, ldr);
924 }
794 apic->base_address = apic->vcpu->arch.apic_base & 925 apic->base_address = apic->vcpu->arch.apic_base &
795 MSR_IA32_APICBASE_BASE; 926 MSR_IA32_APICBASE_BASE;
796 927
@@ -800,12 +931,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
800 931
801} 932}
802 933
803u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu)
804{
805 return vcpu->arch.apic_base;
806}
807EXPORT_SYMBOL_GPL(kvm_lapic_get_base);
808
809void kvm_lapic_reset(struct kvm_vcpu *vcpu) 934void kvm_lapic_reset(struct kvm_vcpu *vcpu)
810{ 935{
811 struct kvm_lapic *apic; 936 struct kvm_lapic *apic;
@@ -821,7 +946,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
821 hrtimer_cancel(&apic->lapic_timer.timer); 946 hrtimer_cancel(&apic->lapic_timer.timer);
822 947
823 apic_set_reg(apic, APIC_ID, vcpu->vcpu_id << 24); 948 apic_set_reg(apic, APIC_ID, vcpu->vcpu_id << 24);
824 apic_set_reg(apic, APIC_LVR, APIC_VERSION); 949 kvm_apic_set_version(apic->vcpu);
825 950
826 for (i = 0; i < APIC_LVT_NUM; i++) 951 for (i = 0; i < APIC_LVT_NUM; i++)
827 apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); 952 apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
@@ -842,9 +967,10 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
842 apic_set_reg(apic, APIC_ISR + 0x10 * i, 0); 967 apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
843 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); 968 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
844 } 969 }
970 apic->irr_pending = false;
845 update_divide_count(apic); 971 update_divide_count(apic);
846 atomic_set(&apic->lapic_timer.pending, 0); 972 atomic_set(&apic->lapic_timer.pending, 0);
847 if (vcpu->vcpu_id == 0) 973 if (kvm_vcpu_is_bsp(vcpu))
848 vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP; 974 vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
849 apic_update_ppr(apic); 975 apic_update_ppr(apic);
850 976
@@ -855,7 +981,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
855 vcpu, kvm_apic_id(apic), 981 vcpu, kvm_apic_id(apic),
856 vcpu->arch.apic_base, apic->base_address); 982 vcpu->arch.apic_base, apic->base_address);
857} 983}
858EXPORT_SYMBOL_GPL(kvm_lapic_reset);
859 984
860bool kvm_apic_present(struct kvm_vcpu *vcpu) 985bool kvm_apic_present(struct kvm_vcpu *vcpu)
861{ 986{
@@ -866,7 +991,6 @@ int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
866{ 991{
867 return kvm_apic_present(vcpu) && apic_sw_enabled(vcpu->arch.apic); 992 return kvm_apic_present(vcpu) && apic_sw_enabled(vcpu->arch.apic);
868} 993}
869EXPORT_SYMBOL_GPL(kvm_lapic_enabled);
870 994
871/* 995/*
872 *---------------------------------------------------------------------- 996 *----------------------------------------------------------------------
@@ -917,6 +1041,11 @@ static struct kvm_timer_ops lapic_timer_ops = {
917 .is_periodic = lapic_is_periodic, 1041 .is_periodic = lapic_is_periodic,
918}; 1042};
919 1043
1044static const struct kvm_io_device_ops apic_mmio_ops = {
1045 .read = apic_mmio_read,
1046 .write = apic_mmio_write,
1047};
1048
920int kvm_create_lapic(struct kvm_vcpu *vcpu) 1049int kvm_create_lapic(struct kvm_vcpu *vcpu)
921{ 1050{
922 struct kvm_lapic *apic; 1051 struct kvm_lapic *apic;
@@ -945,16 +1074,13 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
945 apic->lapic_timer.timer.function = kvm_timer_fn; 1074 apic->lapic_timer.timer.function = kvm_timer_fn;
946 apic->lapic_timer.t_ops = &lapic_timer_ops; 1075 apic->lapic_timer.t_ops = &lapic_timer_ops;
947 apic->lapic_timer.kvm = vcpu->kvm; 1076 apic->lapic_timer.kvm = vcpu->kvm;
948 apic->lapic_timer.vcpu_id = vcpu->vcpu_id; 1077 apic->lapic_timer.vcpu = vcpu;
949 1078
950 apic->base_address = APIC_DEFAULT_PHYS_BASE; 1079 apic->base_address = APIC_DEFAULT_PHYS_BASE;
951 vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE; 1080 vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE;
952 1081
953 kvm_lapic_reset(vcpu); 1082 kvm_lapic_reset(vcpu);
954 apic->dev.read = apic_mmio_read; 1083 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
955 apic->dev.write = apic_mmio_write;
956 apic->dev.in_range = apic_mmio_range;
957 apic->dev.private = apic;
958 1084
959 return 0; 1085 return 0;
960nomem_free_apic: 1086nomem_free_apic:
@@ -962,7 +1088,6 @@ nomem_free_apic:
962nomem: 1088nomem:
963 return -ENOMEM; 1089 return -ENOMEM;
964} 1090}
965EXPORT_SYMBOL_GPL(kvm_create_lapic);
966 1091
967int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) 1092int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
968{ 1093{
@@ -985,7 +1110,7 @@ int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
985 u32 lvt0 = apic_get_reg(vcpu->arch.apic, APIC_LVT0); 1110 u32 lvt0 = apic_get_reg(vcpu->arch.apic, APIC_LVT0);
986 int r = 0; 1111 int r = 0;
987 1112
988 if (vcpu->vcpu_id == 0) { 1113 if (kvm_vcpu_is_bsp(vcpu)) {
989 if (!apic_hw_enabled(vcpu->arch.apic)) 1114 if (!apic_hw_enabled(vcpu->arch.apic))
990 r = 1; 1115 r = 1;
991 if ((lvt0 & APIC_LVT_MASKED) == 0 && 1116 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
@@ -1025,7 +1150,8 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
1025 1150
1026 apic->base_address = vcpu->arch.apic_base & 1151 apic->base_address = vcpu->arch.apic_base &
1027 MSR_IA32_APICBASE_BASE; 1152 MSR_IA32_APICBASE_BASE;
1028 apic_set_reg(apic, APIC_LVR, APIC_VERSION); 1153 kvm_apic_set_version(vcpu);
1154
1029 apic_update_ppr(apic); 1155 apic_update_ppr(apic);
1030 hrtimer_cancel(&apic->lapic_timer.timer); 1156 hrtimer_cancel(&apic->lapic_timer.timer);
1031 update_divide_count(apic); 1157 update_divide_count(apic);
@@ -1092,3 +1218,35 @@ void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
1092 1218
1093 vcpu->arch.apic->vapic_addr = vapic_addr; 1219 vcpu->arch.apic->vapic_addr = vapic_addr;
1094} 1220}
1221
1222int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1223{
1224 struct kvm_lapic *apic = vcpu->arch.apic;
1225 u32 reg = (msr - APIC_BASE_MSR) << 4;
1226
1227 if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
1228 return 1;
1229
1230 /* if this is ICR write vector before command */
1231 if (msr == 0x830)
1232 apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
1233 return apic_reg_write(apic, reg, (u32)data);
1234}
1235
1236int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
1237{
1238 struct kvm_lapic *apic = vcpu->arch.apic;
1239 u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
1240
1241 if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
1242 return 1;
1243
1244 if (apic_reg_read(apic, reg, 4, &low))
1245 return 1;
1246 if (msr == 0x830)
1247 apic_reg_read(apic, APIC_ICR2, 4, &high);
1248
1249 *data = (((u64)high) << 32) | low;
1250
1251 return 0;
1252}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index a587f8349c46..40010b09c4aa 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -12,6 +12,7 @@ struct kvm_lapic {
12 struct kvm_timer lapic_timer; 12 struct kvm_timer lapic_timer;
13 u32 divide_count; 13 u32 divide_count;
14 struct kvm_vcpu *vcpu; 14 struct kvm_vcpu *vcpu;
15 bool irr_pending;
15 struct page *regs_page; 16 struct page *regs_page;
16 void *regs; 17 void *regs;
17 gpa_t vapic_addr; 18 gpa_t vapic_addr;
@@ -28,6 +29,7 @@ u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
28void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8); 29void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
29void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value); 30void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
30u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu); 31u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
32void kvm_apic_set_version(struct kvm_vcpu *vcpu);
31 33
32int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest); 34int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
33int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda); 35int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
@@ -44,4 +46,6 @@ void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
44void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); 46void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
45void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu); 47void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
46 48
49int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data);
50int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
47#endif 51#endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 0ef5bb2b4043..eca41ae9f453 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -18,6 +18,7 @@
18 */ 18 */
19 19
20#include "mmu.h" 20#include "mmu.h"
21#include "kvm_cache_regs.h"
21 22
22#include <linux/kvm_host.h> 23#include <linux/kvm_host.h>
23#include <linux/types.h> 24#include <linux/types.h>
@@ -107,6 +108,9 @@ module_param(oos_shadow, bool, 0644);
107 108
108#define PT32_LEVEL_MASK(level) \ 109#define PT32_LEVEL_MASK(level) \
109 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level)) 110 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
111#define PT32_LVL_OFFSET_MASK(level) \
112 (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
113 * PT32_LEVEL_BITS))) - 1))
110 114
111#define PT32_INDEX(address, level)\ 115#define PT32_INDEX(address, level)\
112 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1)) 116 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
@@ -115,10 +119,19 @@ module_param(oos_shadow, bool, 0644);
115#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)) 119#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
116#define PT64_DIR_BASE_ADDR_MASK \ 120#define PT64_DIR_BASE_ADDR_MASK \
117 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1)) 121 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
122#define PT64_LVL_ADDR_MASK(level) \
123 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
124 * PT64_LEVEL_BITS))) - 1))
125#define PT64_LVL_OFFSET_MASK(level) \
126 (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
127 * PT64_LEVEL_BITS))) - 1))
118 128
119#define PT32_BASE_ADDR_MASK PAGE_MASK 129#define PT32_BASE_ADDR_MASK PAGE_MASK
120#define PT32_DIR_BASE_ADDR_MASK \ 130#define PT32_DIR_BASE_ADDR_MASK \
121 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1)) 131 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
132#define PT32_LVL_ADDR_MASK(level) \
133 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
134 * PT32_LEVEL_BITS))) - 1))
122 135
123#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \ 136#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
124 | PT64_NX_MASK) 137 | PT64_NX_MASK)
@@ -129,6 +142,7 @@ module_param(oos_shadow, bool, 0644);
129#define PFERR_RSVD_MASK (1U << 3) 142#define PFERR_RSVD_MASK (1U << 3)
130#define PFERR_FETCH_MASK (1U << 4) 143#define PFERR_FETCH_MASK (1U << 4)
131 144
145#define PT_PDPE_LEVEL 3
132#define PT_DIRECTORY_LEVEL 2 146#define PT_DIRECTORY_LEVEL 2
133#define PT_PAGE_TABLE_LEVEL 1 147#define PT_PAGE_TABLE_LEVEL 1
134 148
@@ -139,10 +153,13 @@ module_param(oos_shadow, bool, 0644);
139#define ACC_USER_MASK PT_USER_MASK 153#define ACC_USER_MASK PT_USER_MASK
140#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) 154#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
141 155
156#define CREATE_TRACE_POINTS
157#include "mmutrace.h"
158
142#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 159#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
143 160
144struct kvm_rmap_desc { 161struct kvm_rmap_desc {
145 u64 *shadow_ptes[RMAP_EXT]; 162 u64 *sptes[RMAP_EXT];
146 struct kvm_rmap_desc *more; 163 struct kvm_rmap_desc *more;
147}; 164};
148 165
@@ -239,16 +256,25 @@ static int is_writeble_pte(unsigned long pte)
239 return pte & PT_WRITABLE_MASK; 256 return pte & PT_WRITABLE_MASK;
240} 257}
241 258
242static int is_dirty_pte(unsigned long pte) 259static int is_dirty_gpte(unsigned long pte)
243{ 260{
244 return pte & shadow_dirty_mask; 261 return pte & PT_DIRTY_MASK;
245} 262}
246 263
247static int is_rmap_pte(u64 pte) 264static int is_rmap_spte(u64 pte)
248{ 265{
249 return is_shadow_present_pte(pte); 266 return is_shadow_present_pte(pte);
250} 267}
251 268
269static int is_last_spte(u64 pte, int level)
270{
271 if (level == PT_PAGE_TABLE_LEVEL)
272 return 1;
273 if (is_large_pte(pte))
274 return 1;
275 return 0;
276}
277
252static pfn_t spte_to_pfn(u64 pte) 278static pfn_t spte_to_pfn(u64 pte)
253{ 279{
254 return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; 280 return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
@@ -261,7 +287,7 @@ static gfn_t pse36_gfn_delta(u32 gpte)
261 return (gpte & PT32_DIR_PSE36_MASK) << shift; 287 return (gpte & PT32_DIR_PSE36_MASK) << shift;
262} 288}
263 289
264static void set_shadow_pte(u64 *sptep, u64 spte) 290static void __set_spte(u64 *sptep, u64 spte)
265{ 291{
266#ifdef CONFIG_X86_64 292#ifdef CONFIG_X86_64
267 set_64bit((unsigned long *)sptep, spte); 293 set_64bit((unsigned long *)sptep, spte);
@@ -380,37 +406,52 @@ static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
380 * Return the pointer to the largepage write count for a given 406 * Return the pointer to the largepage write count for a given
381 * gfn, handling slots that are not large page aligned. 407 * gfn, handling slots that are not large page aligned.
382 */ 408 */
383static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot) 409static int *slot_largepage_idx(gfn_t gfn,
410 struct kvm_memory_slot *slot,
411 int level)
384{ 412{
385 unsigned long idx; 413 unsigned long idx;
386 414
387 idx = (gfn / KVM_PAGES_PER_HPAGE) - 415 idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
388 (slot->base_gfn / KVM_PAGES_PER_HPAGE); 416 (slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
389 return &slot->lpage_info[idx].write_count; 417 return &slot->lpage_info[level - 2][idx].write_count;
390} 418}
391 419
392static void account_shadowed(struct kvm *kvm, gfn_t gfn) 420static void account_shadowed(struct kvm *kvm, gfn_t gfn)
393{ 421{
422 struct kvm_memory_slot *slot;
394 int *write_count; 423 int *write_count;
424 int i;
395 425
396 gfn = unalias_gfn(kvm, gfn); 426 gfn = unalias_gfn(kvm, gfn);
397 write_count = slot_largepage_idx(gfn, 427
398 gfn_to_memslot_unaliased(kvm, gfn)); 428 slot = gfn_to_memslot_unaliased(kvm, gfn);
399 *write_count += 1; 429 for (i = PT_DIRECTORY_LEVEL;
430 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
431 write_count = slot_largepage_idx(gfn, slot, i);
432 *write_count += 1;
433 }
400} 434}
401 435
402static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) 436static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
403{ 437{
438 struct kvm_memory_slot *slot;
404 int *write_count; 439 int *write_count;
440 int i;
405 441
406 gfn = unalias_gfn(kvm, gfn); 442 gfn = unalias_gfn(kvm, gfn);
407 write_count = slot_largepage_idx(gfn, 443 for (i = PT_DIRECTORY_LEVEL;
408 gfn_to_memslot_unaliased(kvm, gfn)); 444 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
409 *write_count -= 1; 445 slot = gfn_to_memslot_unaliased(kvm, gfn);
410 WARN_ON(*write_count < 0); 446 write_count = slot_largepage_idx(gfn, slot, i);
447 *write_count -= 1;
448 WARN_ON(*write_count < 0);
449 }
411} 450}
412 451
413static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn) 452static int has_wrprotected_page(struct kvm *kvm,
453 gfn_t gfn,
454 int level)
414{ 455{
415 struct kvm_memory_slot *slot; 456 struct kvm_memory_slot *slot;
416 int *largepage_idx; 457 int *largepage_idx;
@@ -418,47 +459,67 @@ static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
418 gfn = unalias_gfn(kvm, gfn); 459 gfn = unalias_gfn(kvm, gfn);
419 slot = gfn_to_memslot_unaliased(kvm, gfn); 460 slot = gfn_to_memslot_unaliased(kvm, gfn);
420 if (slot) { 461 if (slot) {
421 largepage_idx = slot_largepage_idx(gfn, slot); 462 largepage_idx = slot_largepage_idx(gfn, slot, level);
422 return *largepage_idx; 463 return *largepage_idx;
423 } 464 }
424 465
425 return 1; 466 return 1;
426} 467}
427 468
428static int host_largepage_backed(struct kvm *kvm, gfn_t gfn) 469static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
429{ 470{
471 unsigned long page_size = PAGE_SIZE;
430 struct vm_area_struct *vma; 472 struct vm_area_struct *vma;
431 unsigned long addr; 473 unsigned long addr;
432 int ret = 0; 474 int i, ret = 0;
433 475
434 addr = gfn_to_hva(kvm, gfn); 476 addr = gfn_to_hva(kvm, gfn);
435 if (kvm_is_error_hva(addr)) 477 if (kvm_is_error_hva(addr))
436 return ret; 478 return page_size;
437 479
438 down_read(&current->mm->mmap_sem); 480 down_read(&current->mm->mmap_sem);
439 vma = find_vma(current->mm, addr); 481 vma = find_vma(current->mm, addr);
440 if (vma && is_vm_hugetlb_page(vma)) 482 if (!vma)
441 ret = 1; 483 goto out;
484
485 page_size = vma_kernel_pagesize(vma);
486
487out:
442 up_read(&current->mm->mmap_sem); 488 up_read(&current->mm->mmap_sem);
443 489
490 for (i = PT_PAGE_TABLE_LEVEL;
491 i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
492 if (page_size >= KVM_HPAGE_SIZE(i))
493 ret = i;
494 else
495 break;
496 }
497
444 return ret; 498 return ret;
445} 499}
446 500
447static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn) 501static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
448{ 502{
449 struct kvm_memory_slot *slot; 503 struct kvm_memory_slot *slot;
450 504 int host_level;
451 if (has_wrprotected_page(vcpu->kvm, large_gfn)) 505 int level = PT_PAGE_TABLE_LEVEL;
452 return 0;
453
454 if (!host_largepage_backed(vcpu->kvm, large_gfn))
455 return 0;
456 506
457 slot = gfn_to_memslot(vcpu->kvm, large_gfn); 507 slot = gfn_to_memslot(vcpu->kvm, large_gfn);
458 if (slot && slot->dirty_bitmap) 508 if (slot && slot->dirty_bitmap)
459 return 0; 509 return PT_PAGE_TABLE_LEVEL;
460 510
461 return 1; 511 host_level = host_mapping_level(vcpu->kvm, large_gfn);
512
513 if (host_level == PT_PAGE_TABLE_LEVEL)
514 return host_level;
515
516 for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) {
517
518 if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
519 break;
520 }
521
522 return level - 1;
462} 523}
463 524
464/* 525/*
@@ -466,19 +527,19 @@ static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
466 * Note: gfn must be unaliased before this function get called 527 * Note: gfn must be unaliased before this function get called
467 */ 528 */
468 529
469static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage) 530static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
470{ 531{
471 struct kvm_memory_slot *slot; 532 struct kvm_memory_slot *slot;
472 unsigned long idx; 533 unsigned long idx;
473 534
474 slot = gfn_to_memslot(kvm, gfn); 535 slot = gfn_to_memslot(kvm, gfn);
475 if (!lpage) 536 if (likely(level == PT_PAGE_TABLE_LEVEL))
476 return &slot->rmap[gfn - slot->base_gfn]; 537 return &slot->rmap[gfn - slot->base_gfn];
477 538
478 idx = (gfn / KVM_PAGES_PER_HPAGE) - 539 idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
479 (slot->base_gfn / KVM_PAGES_PER_HPAGE); 540 (slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
480 541
481 return &slot->lpage_info[idx].rmap_pde; 542 return &slot->lpage_info[level - 2][idx].rmap_pde;
482} 543}
483 544
484/* 545/*
@@ -494,42 +555,42 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
494 * the spte was not added. 555 * the spte was not added.
495 * 556 *
496 */ 557 */
497static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage) 558static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
498{ 559{
499 struct kvm_mmu_page *sp; 560 struct kvm_mmu_page *sp;
500 struct kvm_rmap_desc *desc; 561 struct kvm_rmap_desc *desc;
501 unsigned long *rmapp; 562 unsigned long *rmapp;
502 int i, count = 0; 563 int i, count = 0;
503 564
504 if (!is_rmap_pte(*spte)) 565 if (!is_rmap_spte(*spte))
505 return count; 566 return count;
506 gfn = unalias_gfn(vcpu->kvm, gfn); 567 gfn = unalias_gfn(vcpu->kvm, gfn);
507 sp = page_header(__pa(spte)); 568 sp = page_header(__pa(spte));
508 sp->gfns[spte - sp->spt] = gfn; 569 sp->gfns[spte - sp->spt] = gfn;
509 rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage); 570 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
510 if (!*rmapp) { 571 if (!*rmapp) {
511 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); 572 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
512 *rmapp = (unsigned long)spte; 573 *rmapp = (unsigned long)spte;
513 } else if (!(*rmapp & 1)) { 574 } else if (!(*rmapp & 1)) {
514 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte); 575 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
515 desc = mmu_alloc_rmap_desc(vcpu); 576 desc = mmu_alloc_rmap_desc(vcpu);
516 desc->shadow_ptes[0] = (u64 *)*rmapp; 577 desc->sptes[0] = (u64 *)*rmapp;
517 desc->shadow_ptes[1] = spte; 578 desc->sptes[1] = spte;
518 *rmapp = (unsigned long)desc | 1; 579 *rmapp = (unsigned long)desc | 1;
519 } else { 580 } else {
520 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); 581 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
521 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); 582 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
522 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) { 583 while (desc->sptes[RMAP_EXT-1] && desc->more) {
523 desc = desc->more; 584 desc = desc->more;
524 count += RMAP_EXT; 585 count += RMAP_EXT;
525 } 586 }
526 if (desc->shadow_ptes[RMAP_EXT-1]) { 587 if (desc->sptes[RMAP_EXT-1]) {
527 desc->more = mmu_alloc_rmap_desc(vcpu); 588 desc->more = mmu_alloc_rmap_desc(vcpu);
528 desc = desc->more; 589 desc = desc->more;
529 } 590 }
530 for (i = 0; desc->shadow_ptes[i]; ++i) 591 for (i = 0; desc->sptes[i]; ++i)
531 ; 592 ;
532 desc->shadow_ptes[i] = spte; 593 desc->sptes[i] = spte;
533 } 594 }
534 return count; 595 return count;
535} 596}
@@ -541,14 +602,14 @@ static void rmap_desc_remove_entry(unsigned long *rmapp,
541{ 602{
542 int j; 603 int j;
543 604
544 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j) 605 for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
545 ; 606 ;
546 desc->shadow_ptes[i] = desc->shadow_ptes[j]; 607 desc->sptes[i] = desc->sptes[j];
547 desc->shadow_ptes[j] = NULL; 608 desc->sptes[j] = NULL;
548 if (j != 0) 609 if (j != 0)
549 return; 610 return;
550 if (!prev_desc && !desc->more) 611 if (!prev_desc && !desc->more)
551 *rmapp = (unsigned long)desc->shadow_ptes[0]; 612 *rmapp = (unsigned long)desc->sptes[0];
552 else 613 else
553 if (prev_desc) 614 if (prev_desc)
554 prev_desc->more = desc->more; 615 prev_desc->more = desc->more;
@@ -566,7 +627,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
566 unsigned long *rmapp; 627 unsigned long *rmapp;
567 int i; 628 int i;
568 629
569 if (!is_rmap_pte(*spte)) 630 if (!is_rmap_spte(*spte))
570 return; 631 return;
571 sp = page_header(__pa(spte)); 632 sp = page_header(__pa(spte));
572 pfn = spte_to_pfn(*spte); 633 pfn = spte_to_pfn(*spte);
@@ -576,7 +637,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
576 kvm_release_pfn_dirty(pfn); 637 kvm_release_pfn_dirty(pfn);
577 else 638 else
578 kvm_release_pfn_clean(pfn); 639 kvm_release_pfn_clean(pfn);
579 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte)); 640 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level);
580 if (!*rmapp) { 641 if (!*rmapp) {
581 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); 642 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
582 BUG(); 643 BUG();
@@ -593,8 +654,8 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
593 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); 654 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
594 prev_desc = NULL; 655 prev_desc = NULL;
595 while (desc) { 656 while (desc) {
596 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) 657 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
597 if (desc->shadow_ptes[i] == spte) { 658 if (desc->sptes[i] == spte) {
598 rmap_desc_remove_entry(rmapp, 659 rmap_desc_remove_entry(rmapp,
599 desc, i, 660 desc, i,
600 prev_desc); 661 prev_desc);
@@ -625,10 +686,10 @@ static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
625 prev_desc = NULL; 686 prev_desc = NULL;
626 prev_spte = NULL; 687 prev_spte = NULL;
627 while (desc) { 688 while (desc) {
628 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) { 689 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
629 if (prev_spte == spte) 690 if (prev_spte == spte)
630 return desc->shadow_ptes[i]; 691 return desc->sptes[i];
631 prev_spte = desc->shadow_ptes[i]; 692 prev_spte = desc->sptes[i];
632 } 693 }
633 desc = desc->more; 694 desc = desc->more;
634 } 695 }
@@ -639,10 +700,10 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
639{ 700{
640 unsigned long *rmapp; 701 unsigned long *rmapp;
641 u64 *spte; 702 u64 *spte;
642 int write_protected = 0; 703 int i, write_protected = 0;
643 704
644 gfn = unalias_gfn(kvm, gfn); 705 gfn = unalias_gfn(kvm, gfn);
645 rmapp = gfn_to_rmap(kvm, gfn, 0); 706 rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
646 707
647 spte = rmap_next(kvm, rmapp, NULL); 708 spte = rmap_next(kvm, rmapp, NULL);
648 while (spte) { 709 while (spte) {
@@ -650,7 +711,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
650 BUG_ON(!(*spte & PT_PRESENT_MASK)); 711 BUG_ON(!(*spte & PT_PRESENT_MASK));
651 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); 712 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
652 if (is_writeble_pte(*spte)) { 713 if (is_writeble_pte(*spte)) {
653 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK); 714 __set_spte(spte, *spte & ~PT_WRITABLE_MASK);
654 write_protected = 1; 715 write_protected = 1;
655 } 716 }
656 spte = rmap_next(kvm, rmapp, spte); 717 spte = rmap_next(kvm, rmapp, spte);
@@ -664,21 +725,24 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
664 } 725 }
665 726
666 /* check for huge page mappings */ 727 /* check for huge page mappings */
667 rmapp = gfn_to_rmap(kvm, gfn, 1); 728 for (i = PT_DIRECTORY_LEVEL;
668 spte = rmap_next(kvm, rmapp, NULL); 729 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
669 while (spte) { 730 rmapp = gfn_to_rmap(kvm, gfn, i);
670 BUG_ON(!spte); 731 spte = rmap_next(kvm, rmapp, NULL);
671 BUG_ON(!(*spte & PT_PRESENT_MASK)); 732 while (spte) {
672 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)); 733 BUG_ON(!spte);
673 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn); 734 BUG_ON(!(*spte & PT_PRESENT_MASK));
674 if (is_writeble_pte(*spte)) { 735 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
675 rmap_remove(kvm, spte); 736 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
676 --kvm->stat.lpages; 737 if (is_writeble_pte(*spte)) {
677 set_shadow_pte(spte, shadow_trap_nonpresent_pte); 738 rmap_remove(kvm, spte);
678 spte = NULL; 739 --kvm->stat.lpages;
679 write_protected = 1; 740 __set_spte(spte, shadow_trap_nonpresent_pte);
741 spte = NULL;
742 write_protected = 1;
743 }
744 spte = rmap_next(kvm, rmapp, spte);
680 } 745 }
681 spte = rmap_next(kvm, rmapp, spte);
682 } 746 }
683 747
684 return write_protected; 748 return write_protected;
@@ -693,7 +757,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
693 BUG_ON(!(*spte & PT_PRESENT_MASK)); 757 BUG_ON(!(*spte & PT_PRESENT_MASK));
694 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte); 758 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
695 rmap_remove(kvm, spte); 759 rmap_remove(kvm, spte);
696 set_shadow_pte(spte, shadow_trap_nonpresent_pte); 760 __set_spte(spte, shadow_trap_nonpresent_pte);
697 need_tlb_flush = 1; 761 need_tlb_flush = 1;
698 } 762 }
699 return need_tlb_flush; 763 return need_tlb_flush;
@@ -702,7 +766,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
702static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, 766static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
703 int (*handler)(struct kvm *kvm, unsigned long *rmapp)) 767 int (*handler)(struct kvm *kvm, unsigned long *rmapp))
704{ 768{
705 int i; 769 int i, j;
706 int retval = 0; 770 int retval = 0;
707 771
708 /* 772 /*
@@ -721,11 +785,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
721 end = start + (memslot->npages << PAGE_SHIFT); 785 end = start + (memslot->npages << PAGE_SHIFT);
722 if (hva >= start && hva < end) { 786 if (hva >= start && hva < end) {
723 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; 787 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
788
724 retval |= handler(kvm, &memslot->rmap[gfn_offset]); 789 retval |= handler(kvm, &memslot->rmap[gfn_offset]);
725 retval |= handler(kvm, 790
726 &memslot->lpage_info[ 791 for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
727 gfn_offset / 792 int idx = gfn_offset;
728 KVM_PAGES_PER_HPAGE].rmap_pde); 793 idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j);
794 retval |= handler(kvm,
795 &memslot->lpage_info[j][idx].rmap_pde);
796 }
729 } 797 }
730 } 798 }
731 799
@@ -763,12 +831,15 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
763 831
764#define RMAP_RECYCLE_THRESHOLD 1000 832#define RMAP_RECYCLE_THRESHOLD 1000
765 833
766static void rmap_recycle(struct kvm_vcpu *vcpu, gfn_t gfn, int lpage) 834static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
767{ 835{
768 unsigned long *rmapp; 836 unsigned long *rmapp;
837 struct kvm_mmu_page *sp;
838
839 sp = page_header(__pa(spte));
769 840
770 gfn = unalias_gfn(vcpu->kvm, gfn); 841 gfn = unalias_gfn(vcpu->kvm, gfn);
771 rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage); 842 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
772 843
773 kvm_unmap_rmapp(vcpu->kvm, rmapp); 844 kvm_unmap_rmapp(vcpu->kvm, rmapp);
774 kvm_flush_remote_tlbs(vcpu->kvm); 845 kvm_flush_remote_tlbs(vcpu->kvm);
@@ -1109,6 +1180,7 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1109 return 1; 1180 return 1;
1110 } 1181 }
1111 1182
1183 trace_kvm_mmu_sync_page(sp);
1112 if (rmap_write_protect(vcpu->kvm, sp->gfn)) 1184 if (rmap_write_protect(vcpu->kvm, sp->gfn))
1113 kvm_flush_remote_tlbs(vcpu->kvm); 1185 kvm_flush_remote_tlbs(vcpu->kvm);
1114 kvm_unlink_unsync_page(vcpu->kvm, sp); 1186 kvm_unlink_unsync_page(vcpu->kvm, sp);
@@ -1231,8 +1303,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1231 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; 1303 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1232 role.quadrant = quadrant; 1304 role.quadrant = quadrant;
1233 } 1305 }
1234 pgprintk("%s: looking gfn %lx role %x\n", __func__,
1235 gfn, role.word);
1236 index = kvm_page_table_hashfn(gfn); 1306 index = kvm_page_table_hashfn(gfn);
1237 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 1307 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1238 hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link) 1308 hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
@@ -1249,14 +1319,13 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1249 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); 1319 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
1250 kvm_mmu_mark_parents_unsync(vcpu, sp); 1320 kvm_mmu_mark_parents_unsync(vcpu, sp);
1251 } 1321 }
1252 pgprintk("%s: found\n", __func__); 1322 trace_kvm_mmu_get_page(sp, false);
1253 return sp; 1323 return sp;
1254 } 1324 }
1255 ++vcpu->kvm->stat.mmu_cache_miss; 1325 ++vcpu->kvm->stat.mmu_cache_miss;
1256 sp = kvm_mmu_alloc_page(vcpu, parent_pte); 1326 sp = kvm_mmu_alloc_page(vcpu, parent_pte);
1257 if (!sp) 1327 if (!sp)
1258 return sp; 1328 return sp;
1259 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
1260 sp->gfn = gfn; 1329 sp->gfn = gfn;
1261 sp->role = role; 1330 sp->role = role;
1262 hlist_add_head(&sp->hash_link, bucket); 1331 hlist_add_head(&sp->hash_link, bucket);
@@ -1269,6 +1338,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1269 vcpu->arch.mmu.prefetch_page(vcpu, sp); 1338 vcpu->arch.mmu.prefetch_page(vcpu, sp);
1270 else 1339 else
1271 nonpaging_prefetch_page(vcpu, sp); 1340 nonpaging_prefetch_page(vcpu, sp);
1341 trace_kvm_mmu_get_page(sp, true);
1272 return sp; 1342 return sp;
1273} 1343}
1274 1344
@@ -1292,6 +1362,11 @@ static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1292{ 1362{
1293 if (iterator->level < PT_PAGE_TABLE_LEVEL) 1363 if (iterator->level < PT_PAGE_TABLE_LEVEL)
1294 return false; 1364 return false;
1365
1366 if (iterator->level == PT_PAGE_TABLE_LEVEL)
1367 if (is_large_pte(*iterator->sptep))
1368 return false;
1369
1295 iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); 1370 iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1296 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; 1371 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1297 return true; 1372 return true;
@@ -1312,25 +1387,17 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1312 1387
1313 pt = sp->spt; 1388 pt = sp->spt;
1314 1389
1315 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1316 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1317 if (is_shadow_present_pte(pt[i]))
1318 rmap_remove(kvm, &pt[i]);
1319 pt[i] = shadow_trap_nonpresent_pte;
1320 }
1321 return;
1322 }
1323
1324 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { 1390 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1325 ent = pt[i]; 1391 ent = pt[i];
1326 1392
1327 if (is_shadow_present_pte(ent)) { 1393 if (is_shadow_present_pte(ent)) {
1328 if (!is_large_pte(ent)) { 1394 if (!is_last_spte(ent, sp->role.level)) {
1329 ent &= PT64_BASE_ADDR_MASK; 1395 ent &= PT64_BASE_ADDR_MASK;
1330 mmu_page_remove_parent_pte(page_header(ent), 1396 mmu_page_remove_parent_pte(page_header(ent),
1331 &pt[i]); 1397 &pt[i]);
1332 } else { 1398 } else {
1333 --kvm->stat.lpages; 1399 if (is_large_pte(ent))
1400 --kvm->stat.lpages;
1334 rmap_remove(kvm, &pt[i]); 1401 rmap_remove(kvm, &pt[i]);
1335 } 1402 }
1336 } 1403 }
@@ -1346,10 +1413,10 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1346static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm) 1413static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1347{ 1414{
1348 int i; 1415 int i;
1416 struct kvm_vcpu *vcpu;
1349 1417
1350 for (i = 0; i < KVM_MAX_VCPUS; ++i) 1418 kvm_for_each_vcpu(i, vcpu, kvm)
1351 if (kvm->vcpus[i]) 1419 vcpu->arch.last_pte_updated = NULL;
1352 kvm->vcpus[i]->arch.last_pte_updated = NULL;
1353} 1420}
1354 1421
1355static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) 1422static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
@@ -1368,7 +1435,7 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1368 } 1435 }
1369 BUG_ON(!parent_pte); 1436 BUG_ON(!parent_pte);
1370 kvm_mmu_put_page(sp, parent_pte); 1437 kvm_mmu_put_page(sp, parent_pte);
1371 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte); 1438 __set_spte(parent_pte, shadow_trap_nonpresent_pte);
1372 } 1439 }
1373} 1440}
1374 1441
@@ -1400,6 +1467,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
1400static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) 1467static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1401{ 1468{
1402 int ret; 1469 int ret;
1470
1471 trace_kvm_mmu_zap_page(sp);
1403 ++kvm->stat.mmu_shadow_zapped; 1472 ++kvm->stat.mmu_shadow_zapped;
1404 ret = mmu_zap_unsync_children(kvm, sp); 1473 ret = mmu_zap_unsync_children(kvm, sp);
1405 kvm_mmu_page_unlink_children(kvm, sp); 1474 kvm_mmu_page_unlink_children(kvm, sp);
@@ -1516,7 +1585,7 @@ static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1516 1585
1517 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { 1586 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1518 if (pt[i] == shadow_notrap_nonpresent_pte) 1587 if (pt[i] == shadow_notrap_nonpresent_pte)
1519 set_shadow_pte(&pt[i], shadow_trap_nonpresent_pte); 1588 __set_spte(&pt[i], shadow_trap_nonpresent_pte);
1520 } 1589 }
1521} 1590}
1522 1591
@@ -1646,6 +1715,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1646 struct kvm_mmu_page *s; 1715 struct kvm_mmu_page *s;
1647 struct hlist_node *node, *n; 1716 struct hlist_node *node, *n;
1648 1717
1718 trace_kvm_mmu_unsync_page(sp);
1649 index = kvm_page_table_hashfn(sp->gfn); 1719 index = kvm_page_table_hashfn(sp->gfn);
1650 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 1720 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1651 /* don't unsync if pagetable is shadowed with multiple roles */ 1721 /* don't unsync if pagetable is shadowed with multiple roles */
@@ -1682,9 +1752,9 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1682 return 0; 1752 return 0;
1683} 1753}
1684 1754
1685static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, 1755static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1686 unsigned pte_access, int user_fault, 1756 unsigned pte_access, int user_fault,
1687 int write_fault, int dirty, int largepage, 1757 int write_fault, int dirty, int level,
1688 gfn_t gfn, pfn_t pfn, bool speculative, 1758 gfn_t gfn, pfn_t pfn, bool speculative,
1689 bool can_unsync) 1759 bool can_unsync)
1690{ 1760{
@@ -1707,7 +1777,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1707 spte |= shadow_nx_mask; 1777 spte |= shadow_nx_mask;
1708 if (pte_access & ACC_USER_MASK) 1778 if (pte_access & ACC_USER_MASK)
1709 spte |= shadow_user_mask; 1779 spte |= shadow_user_mask;
1710 if (largepage) 1780 if (level > PT_PAGE_TABLE_LEVEL)
1711 spte |= PT_PAGE_SIZE_MASK; 1781 spte |= PT_PAGE_SIZE_MASK;
1712 if (tdp_enabled) 1782 if (tdp_enabled)
1713 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, 1783 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
@@ -1718,7 +1788,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1718 if ((pte_access & ACC_WRITE_MASK) 1788 if ((pte_access & ACC_WRITE_MASK)
1719 || (write_fault && !is_write_protection(vcpu) && !user_fault)) { 1789 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1720 1790
1721 if (largepage && has_wrprotected_page(vcpu->kvm, gfn)) { 1791 if (level > PT_PAGE_TABLE_LEVEL &&
1792 has_wrprotected_page(vcpu->kvm, gfn, level)) {
1722 ret = 1; 1793 ret = 1;
1723 spte = shadow_trap_nonpresent_pte; 1794 spte = shadow_trap_nonpresent_pte;
1724 goto set_pte; 1795 goto set_pte;
@@ -1732,7 +1803,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1732 * is responsibility of mmu_get_page / kvm_sync_page. 1803 * is responsibility of mmu_get_page / kvm_sync_page.
1733 * Same reasoning can be applied to dirty page accounting. 1804 * Same reasoning can be applied to dirty page accounting.
1734 */ 1805 */
1735 if (!can_unsync && is_writeble_pte(*shadow_pte)) 1806 if (!can_unsync && is_writeble_pte(*sptep))
1736 goto set_pte; 1807 goto set_pte;
1737 1808
1738 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { 1809 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
@@ -1749,65 +1820,67 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1749 mark_page_dirty(vcpu->kvm, gfn); 1820 mark_page_dirty(vcpu->kvm, gfn);
1750 1821
1751set_pte: 1822set_pte:
1752 set_shadow_pte(shadow_pte, spte); 1823 __set_spte(sptep, spte);
1753 return ret; 1824 return ret;
1754} 1825}
1755 1826
1756static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, 1827static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1757 unsigned pt_access, unsigned pte_access, 1828 unsigned pt_access, unsigned pte_access,
1758 int user_fault, int write_fault, int dirty, 1829 int user_fault, int write_fault, int dirty,
1759 int *ptwrite, int largepage, gfn_t gfn, 1830 int *ptwrite, int level, gfn_t gfn,
1760 pfn_t pfn, bool speculative) 1831 pfn_t pfn, bool speculative)
1761{ 1832{
1762 int was_rmapped = 0; 1833 int was_rmapped = 0;
1763 int was_writeble = is_writeble_pte(*shadow_pte); 1834 int was_writeble = is_writeble_pte(*sptep);
1764 int rmap_count; 1835 int rmap_count;
1765 1836
1766 pgprintk("%s: spte %llx access %x write_fault %d" 1837 pgprintk("%s: spte %llx access %x write_fault %d"
1767 " user_fault %d gfn %lx\n", 1838 " user_fault %d gfn %lx\n",
1768 __func__, *shadow_pte, pt_access, 1839 __func__, *sptep, pt_access,
1769 write_fault, user_fault, gfn); 1840 write_fault, user_fault, gfn);
1770 1841
1771 if (is_rmap_pte(*shadow_pte)) { 1842 if (is_rmap_spte(*sptep)) {
1772 /* 1843 /*
1773 * If we overwrite a PTE page pointer with a 2MB PMD, unlink 1844 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1774 * the parent of the now unreachable PTE. 1845 * the parent of the now unreachable PTE.
1775 */ 1846 */
1776 if (largepage && !is_large_pte(*shadow_pte)) { 1847 if (level > PT_PAGE_TABLE_LEVEL &&
1848 !is_large_pte(*sptep)) {
1777 struct kvm_mmu_page *child; 1849 struct kvm_mmu_page *child;
1778 u64 pte = *shadow_pte; 1850 u64 pte = *sptep;
1779 1851
1780 child = page_header(pte & PT64_BASE_ADDR_MASK); 1852 child = page_header(pte & PT64_BASE_ADDR_MASK);
1781 mmu_page_remove_parent_pte(child, shadow_pte); 1853 mmu_page_remove_parent_pte(child, sptep);
1782 } else if (pfn != spte_to_pfn(*shadow_pte)) { 1854 } else if (pfn != spte_to_pfn(*sptep)) {
1783 pgprintk("hfn old %lx new %lx\n", 1855 pgprintk("hfn old %lx new %lx\n",
1784 spte_to_pfn(*shadow_pte), pfn); 1856 spte_to_pfn(*sptep), pfn);
1785 rmap_remove(vcpu->kvm, shadow_pte); 1857 rmap_remove(vcpu->kvm, sptep);
1786 } else 1858 } else
1787 was_rmapped = 1; 1859 was_rmapped = 1;
1788 } 1860 }
1789 if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault, 1861
1790 dirty, largepage, gfn, pfn, speculative, true)) { 1862 if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
1863 dirty, level, gfn, pfn, speculative, true)) {
1791 if (write_fault) 1864 if (write_fault)
1792 *ptwrite = 1; 1865 *ptwrite = 1;
1793 kvm_x86_ops->tlb_flush(vcpu); 1866 kvm_x86_ops->tlb_flush(vcpu);
1794 } 1867 }
1795 1868
1796 pgprintk("%s: setting spte %llx\n", __func__, *shadow_pte); 1869 pgprintk("%s: setting spte %llx\n", __func__, *sptep);
1797 pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n", 1870 pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1798 is_large_pte(*shadow_pte)? "2MB" : "4kB", 1871 is_large_pte(*sptep)? "2MB" : "4kB",
1799 is_present_pte(*shadow_pte)?"RW":"R", gfn, 1872 *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
1800 *shadow_pte, shadow_pte); 1873 *sptep, sptep);
1801 if (!was_rmapped && is_large_pte(*shadow_pte)) 1874 if (!was_rmapped && is_large_pte(*sptep))
1802 ++vcpu->kvm->stat.lpages; 1875 ++vcpu->kvm->stat.lpages;
1803 1876
1804 page_header_update_slot(vcpu->kvm, shadow_pte, gfn); 1877 page_header_update_slot(vcpu->kvm, sptep, gfn);
1805 if (!was_rmapped) { 1878 if (!was_rmapped) {
1806 rmap_count = rmap_add(vcpu, shadow_pte, gfn, largepage); 1879 rmap_count = rmap_add(vcpu, sptep, gfn);
1807 if (!is_rmap_pte(*shadow_pte)) 1880 if (!is_rmap_spte(*sptep))
1808 kvm_release_pfn_clean(pfn); 1881 kvm_release_pfn_clean(pfn);
1809 if (rmap_count > RMAP_RECYCLE_THRESHOLD) 1882 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
1810 rmap_recycle(vcpu, gfn, largepage); 1883 rmap_recycle(vcpu, sptep, gfn);
1811 } else { 1884 } else {
1812 if (was_writeble) 1885 if (was_writeble)
1813 kvm_release_pfn_dirty(pfn); 1886 kvm_release_pfn_dirty(pfn);
@@ -1815,7 +1888,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1815 kvm_release_pfn_clean(pfn); 1888 kvm_release_pfn_clean(pfn);
1816 } 1889 }
1817 if (speculative) { 1890 if (speculative) {
1818 vcpu->arch.last_pte_updated = shadow_pte; 1891 vcpu->arch.last_pte_updated = sptep;
1819 vcpu->arch.last_pte_gfn = gfn; 1892 vcpu->arch.last_pte_gfn = gfn;
1820 } 1893 }
1821} 1894}
@@ -1825,7 +1898,7 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1825} 1898}
1826 1899
1827static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, 1900static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1828 int largepage, gfn_t gfn, pfn_t pfn) 1901 int level, gfn_t gfn, pfn_t pfn)
1829{ 1902{
1830 struct kvm_shadow_walk_iterator iterator; 1903 struct kvm_shadow_walk_iterator iterator;
1831 struct kvm_mmu_page *sp; 1904 struct kvm_mmu_page *sp;
@@ -1833,11 +1906,10 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1833 gfn_t pseudo_gfn; 1906 gfn_t pseudo_gfn;
1834 1907
1835 for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) { 1908 for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
1836 if (iterator.level == PT_PAGE_TABLE_LEVEL 1909 if (iterator.level == level) {
1837 || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
1838 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, 1910 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
1839 0, write, 1, &pt_write, 1911 0, write, 1, &pt_write,
1840 largepage, gfn, pfn, false); 1912 level, gfn, pfn, false);
1841 ++vcpu->stat.pf_fixed; 1913 ++vcpu->stat.pf_fixed;
1842 break; 1914 break;
1843 } 1915 }
@@ -1853,10 +1925,10 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1853 return -ENOMEM; 1925 return -ENOMEM;
1854 } 1926 }
1855 1927
1856 set_shadow_pte(iterator.sptep, 1928 __set_spte(iterator.sptep,
1857 __pa(sp->spt) 1929 __pa(sp->spt)
1858 | PT_PRESENT_MASK | PT_WRITABLE_MASK 1930 | PT_PRESENT_MASK | PT_WRITABLE_MASK
1859 | shadow_user_mask | shadow_x_mask); 1931 | shadow_user_mask | shadow_x_mask);
1860 } 1932 }
1861 } 1933 }
1862 return pt_write; 1934 return pt_write;
@@ -1865,14 +1937,20 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1865static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) 1937static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1866{ 1938{
1867 int r; 1939 int r;
1868 int largepage = 0; 1940 int level;
1869 pfn_t pfn; 1941 pfn_t pfn;
1870 unsigned long mmu_seq; 1942 unsigned long mmu_seq;
1871 1943
1872 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { 1944 level = mapping_level(vcpu, gfn);
1873 gfn &= ~(KVM_PAGES_PER_HPAGE-1); 1945
1874 largepage = 1; 1946 /*
1875 } 1947 * This path builds a PAE pagetable - so we can map 2mb pages at
1948 * maximum. Therefore check if the level is larger than that.
1949 */
1950 if (level > PT_DIRECTORY_LEVEL)
1951 level = PT_DIRECTORY_LEVEL;
1952
1953 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
1876 1954
1877 mmu_seq = vcpu->kvm->mmu_notifier_seq; 1955 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1878 smp_rmb(); 1956 smp_rmb();
@@ -1888,7 +1966,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1888 if (mmu_notifier_retry(vcpu, mmu_seq)) 1966 if (mmu_notifier_retry(vcpu, mmu_seq))
1889 goto out_unlock; 1967 goto out_unlock;
1890 kvm_mmu_free_some_pages(vcpu); 1968 kvm_mmu_free_some_pages(vcpu);
1891 r = __direct_map(vcpu, v, write, largepage, gfn, pfn); 1969 r = __direct_map(vcpu, v, write, level, gfn, pfn);
1892 spin_unlock(&vcpu->kvm->mmu_lock); 1970 spin_unlock(&vcpu->kvm->mmu_lock);
1893 1971
1894 1972
@@ -1954,6 +2032,7 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
1954 gfn_t root_gfn; 2032 gfn_t root_gfn;
1955 struct kvm_mmu_page *sp; 2033 struct kvm_mmu_page *sp;
1956 int direct = 0; 2034 int direct = 0;
2035 u64 pdptr;
1957 2036
1958 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT; 2037 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1959 2038
@@ -1981,11 +2060,12 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
1981 2060
1982 ASSERT(!VALID_PAGE(root)); 2061 ASSERT(!VALID_PAGE(root));
1983 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { 2062 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1984 if (!is_present_pte(vcpu->arch.pdptrs[i])) { 2063 pdptr = kvm_pdptr_read(vcpu, i);
2064 if (!is_present_gpte(pdptr)) {
1985 vcpu->arch.mmu.pae_root[i] = 0; 2065 vcpu->arch.mmu.pae_root[i] = 0;
1986 continue; 2066 continue;
1987 } 2067 }
1988 root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT; 2068 root_gfn = pdptr >> PAGE_SHIFT;
1989 } else if (vcpu->arch.mmu.root_level == 0) 2069 } else if (vcpu->arch.mmu.root_level == 0)
1990 root_gfn = 0; 2070 root_gfn = 0;
1991 if (mmu_check_root(vcpu, root_gfn)) 2071 if (mmu_check_root(vcpu, root_gfn))
@@ -2062,7 +2142,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2062{ 2142{
2063 pfn_t pfn; 2143 pfn_t pfn;
2064 int r; 2144 int r;
2065 int largepage = 0; 2145 int level;
2066 gfn_t gfn = gpa >> PAGE_SHIFT; 2146 gfn_t gfn = gpa >> PAGE_SHIFT;
2067 unsigned long mmu_seq; 2147 unsigned long mmu_seq;
2068 2148
@@ -2073,10 +2153,10 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2073 if (r) 2153 if (r)
2074 return r; 2154 return r;
2075 2155
2076 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { 2156 level = mapping_level(vcpu, gfn);
2077 gfn &= ~(KVM_PAGES_PER_HPAGE-1); 2157
2078 largepage = 1; 2158 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
2079 } 2159
2080 mmu_seq = vcpu->kvm->mmu_notifier_seq; 2160 mmu_seq = vcpu->kvm->mmu_notifier_seq;
2081 smp_rmb(); 2161 smp_rmb();
2082 pfn = gfn_to_pfn(vcpu->kvm, gfn); 2162 pfn = gfn_to_pfn(vcpu->kvm, gfn);
@@ -2089,7 +2169,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2089 goto out_unlock; 2169 goto out_unlock;
2090 kvm_mmu_free_some_pages(vcpu); 2170 kvm_mmu_free_some_pages(vcpu);
2091 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK, 2171 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
2092 largepage, gfn, pfn); 2172 level, gfn, pfn);
2093 spin_unlock(&vcpu->kvm->mmu_lock); 2173 spin_unlock(&vcpu->kvm->mmu_lock);
2094 2174
2095 return r; 2175 return r;
@@ -2206,7 +2286,9 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
2206 context->rsvd_bits_mask[0][0] = exb_bit_rsvd | 2286 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2207 rsvd_bits(maxphyaddr, 51); 2287 rsvd_bits(maxphyaddr, 51);
2208 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3]; 2288 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
2209 context->rsvd_bits_mask[1][2] = context->rsvd_bits_mask[0][2]; 2289 context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
2290 rsvd_bits(maxphyaddr, 51) |
2291 rsvd_bits(13, 29);
2210 context->rsvd_bits_mask[1][1] = exb_bit_rsvd | 2292 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2211 rsvd_bits(maxphyaddr, 51) | 2293 rsvd_bits(maxphyaddr, 51) |
2212 rsvd_bits(13, 20); /* large page */ 2294 rsvd_bits(13, 20); /* large page */
@@ -2357,8 +2439,8 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
2357 spin_unlock(&vcpu->kvm->mmu_lock); 2439 spin_unlock(&vcpu->kvm->mmu_lock);
2358 if (r) 2440 if (r)
2359 goto out; 2441 goto out;
2442 /* set_cr3() should ensure TLB has been flushed */
2360 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa); 2443 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
2361 kvm_mmu_flush_tlb(vcpu);
2362out: 2444out:
2363 return r; 2445 return r;
2364} 2446}
@@ -2378,15 +2460,14 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2378 2460
2379 pte = *spte; 2461 pte = *spte;
2380 if (is_shadow_present_pte(pte)) { 2462 if (is_shadow_present_pte(pte)) {
2381 if (sp->role.level == PT_PAGE_TABLE_LEVEL || 2463 if (is_last_spte(pte, sp->role.level))
2382 is_large_pte(pte))
2383 rmap_remove(vcpu->kvm, spte); 2464 rmap_remove(vcpu->kvm, spte);
2384 else { 2465 else {
2385 child = page_header(pte & PT64_BASE_ADDR_MASK); 2466 child = page_header(pte & PT64_BASE_ADDR_MASK);
2386 mmu_page_remove_parent_pte(child, spte); 2467 mmu_page_remove_parent_pte(child, spte);
2387 } 2468 }
2388 } 2469 }
2389 set_shadow_pte(spte, shadow_trap_nonpresent_pte); 2470 __set_spte(spte, shadow_trap_nonpresent_pte);
2390 if (is_large_pte(pte)) 2471 if (is_large_pte(pte))
2391 --vcpu->kvm->stat.lpages; 2472 --vcpu->kvm->stat.lpages;
2392} 2473}
@@ -2397,11 +2478,8 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
2397 const void *new) 2478 const void *new)
2398{ 2479{
2399 if (sp->role.level != PT_PAGE_TABLE_LEVEL) { 2480 if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
2400 if (!vcpu->arch.update_pte.largepage || 2481 ++vcpu->kvm->stat.mmu_pde_zapped;
2401 sp->role.glevels == PT32_ROOT_LEVEL) { 2482 return;
2402 ++vcpu->kvm->stat.mmu_pde_zapped;
2403 return;
2404 }
2405 } 2483 }
2406 2484
2407 ++vcpu->kvm->stat.mmu_pte_updated; 2485 ++vcpu->kvm->stat.mmu_pte_updated;
@@ -2447,8 +2525,6 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2447 u64 gpte = 0; 2525 u64 gpte = 0;
2448 pfn_t pfn; 2526 pfn_t pfn;
2449 2527
2450 vcpu->arch.update_pte.largepage = 0;
2451
2452 if (bytes != 4 && bytes != 8) 2528 if (bytes != 4 && bytes != 8)
2453 return; 2529 return;
2454 2530
@@ -2472,14 +2548,10 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2472 if ((bytes == 4) && (gpa % 4 == 0)) 2548 if ((bytes == 4) && (gpa % 4 == 0))
2473 memcpy((void *)&gpte, new, 4); 2549 memcpy((void *)&gpte, new, 4);
2474 } 2550 }
2475 if (!is_present_pte(gpte)) 2551 if (!is_present_gpte(gpte))
2476 return; 2552 return;
2477 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; 2553 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
2478 2554
2479 if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
2480 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
2481 vcpu->arch.update_pte.largepage = 1;
2482 }
2483 vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq; 2555 vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
2484 smp_rmb(); 2556 smp_rmb();
2485 pfn = gfn_to_pfn(vcpu->kvm, gfn); 2557 pfn = gfn_to_pfn(vcpu->kvm, gfn);
@@ -2622,6 +2694,9 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2622 gpa_t gpa; 2694 gpa_t gpa;
2623 int r; 2695 int r;
2624 2696
2697 if (tdp_enabled)
2698 return 0;
2699
2625 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); 2700 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
2626 2701
2627 spin_lock(&vcpu->kvm->mmu_lock); 2702 spin_lock(&vcpu->kvm->mmu_lock);
@@ -2633,7 +2708,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2633 2708
2634void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 2709void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2635{ 2710{
2636 while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) { 2711 while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES &&
2712 !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
2637 struct kvm_mmu_page *sp; 2713 struct kvm_mmu_page *sp;
2638 2714
2639 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev, 2715 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
@@ -2670,8 +2746,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
2670 ++vcpu->stat.mmio_exits; 2746 ++vcpu->stat.mmio_exits;
2671 return 0; 2747 return 0;
2672 case EMULATE_FAIL: 2748 case EMULATE_FAIL:
2673 kvm_report_emulation_failure(vcpu, "pagetable"); 2749 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2674 return 1; 2750 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
2751 return 0;
2675 default: 2752 default:
2676 BUG(); 2753 BUG();
2677 } 2754 }
@@ -2712,12 +2789,6 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
2712 2789
2713 ASSERT(vcpu); 2790 ASSERT(vcpu);
2714 2791
2715 if (vcpu->kvm->arch.n_requested_mmu_pages)
2716 vcpu->kvm->arch.n_free_mmu_pages =
2717 vcpu->kvm->arch.n_requested_mmu_pages;
2718 else
2719 vcpu->kvm->arch.n_free_mmu_pages =
2720 vcpu->kvm->arch.n_alloc_mmu_pages;
2721 /* 2792 /*
2722 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64. 2793 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
2723 * Therefore we need to allocate shadow page tables in the first 2794 * Therefore we need to allocate shadow page tables in the first
@@ -3029,6 +3100,24 @@ out:
3029 return r; 3100 return r;
3030} 3101}
3031 3102
3103int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
3104{
3105 struct kvm_shadow_walk_iterator iterator;
3106 int nr_sptes = 0;
3107
3108 spin_lock(&vcpu->kvm->mmu_lock);
3109 for_each_shadow_entry(vcpu, addr, iterator) {
3110 sptes[iterator.level-1] = *iterator.sptep;
3111 nr_sptes++;
3112 if (!is_shadow_present_pte(*iterator.sptep))
3113 break;
3114 }
3115 spin_unlock(&vcpu->kvm->mmu_lock);
3116
3117 return nr_sptes;
3118}
3119EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
3120
3032#ifdef AUDIT 3121#ifdef AUDIT
3033 3122
3034static const char *audit_msg; 3123static const char *audit_msg;
@@ -3041,6 +3130,54 @@ static gva_t canonicalize(gva_t gva)
3041 return gva; 3130 return gva;
3042} 3131}
3043 3132
3133
3134typedef void (*inspect_spte_fn) (struct kvm *kvm, struct kvm_mmu_page *sp,
3135 u64 *sptep);
3136
3137static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
3138 inspect_spte_fn fn)
3139{
3140 int i;
3141
3142 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3143 u64 ent = sp->spt[i];
3144
3145 if (is_shadow_present_pte(ent)) {
3146 if (!is_last_spte(ent, sp->role.level)) {
3147 struct kvm_mmu_page *child;
3148 child = page_header(ent & PT64_BASE_ADDR_MASK);
3149 __mmu_spte_walk(kvm, child, fn);
3150 } else
3151 fn(kvm, sp, &sp->spt[i]);
3152 }
3153 }
3154}
3155
3156static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
3157{
3158 int i;
3159 struct kvm_mmu_page *sp;
3160
3161 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3162 return;
3163 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
3164 hpa_t root = vcpu->arch.mmu.root_hpa;
3165 sp = page_header(root);
3166 __mmu_spte_walk(vcpu->kvm, sp, fn);
3167 return;
3168 }
3169 for (i = 0; i < 4; ++i) {
3170 hpa_t root = vcpu->arch.mmu.pae_root[i];
3171
3172 if (root && VALID_PAGE(root)) {
3173 root &= PT64_BASE_ADDR_MASK;
3174 sp = page_header(root);
3175 __mmu_spte_walk(vcpu->kvm, sp, fn);
3176 }
3177 }
3178 return;
3179}
3180
3044static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte, 3181static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
3045 gva_t va, int level) 3182 gva_t va, int level)
3046{ 3183{
@@ -3055,20 +3192,19 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
3055 continue; 3192 continue;
3056 3193
3057 va = canonicalize(va); 3194 va = canonicalize(va);
3058 if (level > 1) { 3195 if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
3059 if (ent == shadow_notrap_nonpresent_pte) 3196 audit_mappings_page(vcpu, ent, va, level - 1);
3060 printk(KERN_ERR "audit: (%s) nontrapping pte" 3197 else {
3061 " in nonleaf level: levels %d gva %lx"
3062 " level %d pte %llx\n", audit_msg,
3063 vcpu->arch.mmu.root_level, va, level, ent);
3064 else
3065 audit_mappings_page(vcpu, ent, va, level - 1);
3066 } else {
3067 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va); 3198 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
3068 gfn_t gfn = gpa >> PAGE_SHIFT; 3199 gfn_t gfn = gpa >> PAGE_SHIFT;
3069 pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn); 3200 pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
3070 hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT; 3201 hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
3071 3202
3203 if (is_error_pfn(pfn)) {
3204 kvm_release_pfn_clean(pfn);
3205 continue;
3206 }
3207
3072 if (is_shadow_present_pte(ent) 3208 if (is_shadow_present_pte(ent)
3073 && (ent & PT64_BASE_ADDR_MASK) != hpa) 3209 && (ent & PT64_BASE_ADDR_MASK) != hpa)
3074 printk(KERN_ERR "xx audit error: (%s) levels %d" 3210 printk(KERN_ERR "xx audit error: (%s) levels %d"
@@ -3122,7 +3258,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
3122 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul); 3258 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
3123 while (d) { 3259 while (d) {
3124 for (k = 0; k < RMAP_EXT; ++k) 3260 for (k = 0; k < RMAP_EXT; ++k)
3125 if (d->shadow_ptes[k]) 3261 if (d->sptes[k])
3126 ++nmaps; 3262 ++nmaps;
3127 else 3263 else
3128 break; 3264 break;
@@ -3133,9 +3269,48 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
3133 return nmaps; 3269 return nmaps;
3134} 3270}
3135 3271
3136static int count_writable_mappings(struct kvm_vcpu *vcpu) 3272void inspect_spte_has_rmap(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *sptep)
3273{
3274 unsigned long *rmapp;
3275 struct kvm_mmu_page *rev_sp;
3276 gfn_t gfn;
3277
3278 if (*sptep & PT_WRITABLE_MASK) {
3279 rev_sp = page_header(__pa(sptep));
3280 gfn = rev_sp->gfns[sptep - rev_sp->spt];
3281
3282 if (!gfn_to_memslot(kvm, gfn)) {
3283 if (!printk_ratelimit())
3284 return;
3285 printk(KERN_ERR "%s: no memslot for gfn %ld\n",
3286 audit_msg, gfn);
3287 printk(KERN_ERR "%s: index %ld of sp (gfn=%lx)\n",
3288 audit_msg, sptep - rev_sp->spt,
3289 rev_sp->gfn);
3290 dump_stack();
3291 return;
3292 }
3293
3294 rmapp = gfn_to_rmap(kvm, rev_sp->gfns[sptep - rev_sp->spt],
3295 is_large_pte(*sptep));
3296 if (!*rmapp) {
3297 if (!printk_ratelimit())
3298 return;
3299 printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
3300 audit_msg, *sptep);
3301 dump_stack();
3302 }
3303 }
3304
3305}
3306
3307void audit_writable_sptes_have_rmaps(struct kvm_vcpu *vcpu)
3308{
3309 mmu_spte_walk(vcpu, inspect_spte_has_rmap);
3310}
3311
3312static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
3137{ 3313{
3138 int nmaps = 0;
3139 struct kvm_mmu_page *sp; 3314 struct kvm_mmu_page *sp;
3140 int i; 3315 int i;
3141 3316
@@ -3152,20 +3327,16 @@ static int count_writable_mappings(struct kvm_vcpu *vcpu)
3152 continue; 3327 continue;
3153 if (!(ent & PT_WRITABLE_MASK)) 3328 if (!(ent & PT_WRITABLE_MASK))
3154 continue; 3329 continue;
3155 ++nmaps; 3330 inspect_spte_has_rmap(vcpu->kvm, sp, &pt[i]);
3156 } 3331 }
3157 } 3332 }
3158 return nmaps; 3333 return;
3159} 3334}
3160 3335
3161static void audit_rmap(struct kvm_vcpu *vcpu) 3336static void audit_rmap(struct kvm_vcpu *vcpu)
3162{ 3337{
3163 int n_rmap = count_rmaps(vcpu); 3338 check_writable_mappings_rmap(vcpu);
3164 int n_actual = count_writable_mappings(vcpu); 3339 count_rmaps(vcpu);
3165
3166 if (n_rmap != n_actual)
3167 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
3168 __func__, audit_msg, n_rmap, n_actual);
3169} 3340}
3170 3341
3171static void audit_write_protection(struct kvm_vcpu *vcpu) 3342static void audit_write_protection(struct kvm_vcpu *vcpu)
@@ -3173,20 +3344,28 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
3173 struct kvm_mmu_page *sp; 3344 struct kvm_mmu_page *sp;
3174 struct kvm_memory_slot *slot; 3345 struct kvm_memory_slot *slot;
3175 unsigned long *rmapp; 3346 unsigned long *rmapp;
3347 u64 *spte;
3176 gfn_t gfn; 3348 gfn_t gfn;
3177 3349
3178 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) { 3350 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3179 if (sp->role.direct) 3351 if (sp->role.direct)
3180 continue; 3352 continue;
3353 if (sp->unsync)
3354 continue;
3181 3355
3182 gfn = unalias_gfn(vcpu->kvm, sp->gfn); 3356 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
3183 slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn); 3357 slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
3184 rmapp = &slot->rmap[gfn - slot->base_gfn]; 3358 rmapp = &slot->rmap[gfn - slot->base_gfn];
3185 if (*rmapp) 3359
3186 printk(KERN_ERR "%s: (%s) shadow page has writable" 3360 spte = rmap_next(vcpu->kvm, rmapp, NULL);
3187 " mappings: gfn %lx role %x\n", 3361 while (spte) {
3362 if (*spte & PT_WRITABLE_MASK)
3363 printk(KERN_ERR "%s: (%s) shadow page has "
3364 "writable mappings: gfn %lx role %x\n",
3188 __func__, audit_msg, sp->gfn, 3365 __func__, audit_msg, sp->gfn,
3189 sp->role.word); 3366 sp->role.word);
3367 spte = rmap_next(vcpu->kvm, rmapp, spte);
3368 }
3190 } 3369 }
3191} 3370}
3192 3371
@@ -3198,7 +3377,9 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
3198 audit_msg = msg; 3377 audit_msg = msg;
3199 audit_rmap(vcpu); 3378 audit_rmap(vcpu);
3200 audit_write_protection(vcpu); 3379 audit_write_protection(vcpu);
3201 audit_mappings(vcpu); 3380 if (strcmp("pre pte write", audit_msg) != 0)
3381 audit_mappings(vcpu);
3382 audit_writable_sptes_have_rmaps(vcpu);
3202 dbg = olddbg; 3383 dbg = olddbg;
3203} 3384}
3204 3385
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 3494a2fb136e..61a1b3884b49 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -37,6 +37,8 @@
37#define PT32_ROOT_LEVEL 2 37#define PT32_ROOT_LEVEL 2
38#define PT32E_ROOT_LEVEL 3 38#define PT32E_ROOT_LEVEL 3
39 39
40int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
41
40static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 42static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
41{ 43{
42 if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) 44 if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
@@ -75,7 +77,7 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
75 return vcpu->arch.cr0 & X86_CR0_PG; 77 return vcpu->arch.cr0 & X86_CR0_PG;
76} 78}
77 79
78static inline int is_present_pte(unsigned long pte) 80static inline int is_present_gpte(unsigned long pte)
79{ 81{
80 return pte & PT_PRESENT_MASK; 82 return pte & PT_PRESENT_MASK;
81} 83}
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
new file mode 100644
index 000000000000..3e4a5c6ca2a9
--- /dev/null
+++ b/arch/x86/kvm/mmutrace.h
@@ -0,0 +1,220 @@
1#if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVMMMU_H
3
4#include <linux/tracepoint.h>
5#include <linux/ftrace_event.h>
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM kvmmmu
9#define TRACE_INCLUDE_PATH .
10#define TRACE_INCLUDE_FILE mmutrace
11
12#define KVM_MMU_PAGE_FIELDS \
13 __field(__u64, gfn) \
14 __field(__u32, role) \
15 __field(__u32, root_count) \
16 __field(__u32, unsync)
17
18#define KVM_MMU_PAGE_ASSIGN(sp) \
19 __entry->gfn = sp->gfn; \
20 __entry->role = sp->role.word; \
21 __entry->root_count = sp->root_count; \
22 __entry->unsync = sp->unsync;
23
24#define KVM_MMU_PAGE_PRINTK() ({ \
25 const char *ret = p->buffer + p->len; \
26 static const char *access_str[] = { \
27 "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \
28 }; \
29 union kvm_mmu_page_role role; \
30 \
31 role.word = __entry->role; \
32 \
33 trace_seq_printf(p, "sp gfn %llx %u/%u q%u%s %s%s %spge" \
34 " %snxe root %u %s%c", \
35 __entry->gfn, role.level, role.glevels, \
36 role.quadrant, \
37 role.direct ? " direct" : "", \
38 access_str[role.access], \
39 role.invalid ? " invalid" : "", \
40 role.cr4_pge ? "" : "!", \
41 role.nxe ? "" : "!", \
42 __entry->root_count, \
43 __entry->unsync ? "unsync" : "sync", 0); \
44 ret; \
45 })
46
47#define kvm_mmu_trace_pferr_flags \
48 { PFERR_PRESENT_MASK, "P" }, \
49 { PFERR_WRITE_MASK, "W" }, \
50 { PFERR_USER_MASK, "U" }, \
51 { PFERR_RSVD_MASK, "RSVD" }, \
52 { PFERR_FETCH_MASK, "F" }
53
54/*
55 * A pagetable walk has started
56 */
57TRACE_EVENT(
58 kvm_mmu_pagetable_walk,
59 TP_PROTO(u64 addr, int write_fault, int user_fault, int fetch_fault),
60 TP_ARGS(addr, write_fault, user_fault, fetch_fault),
61
62 TP_STRUCT__entry(
63 __field(__u64, addr)
64 __field(__u32, pferr)
65 ),
66
67 TP_fast_assign(
68 __entry->addr = addr;
69 __entry->pferr = (!!write_fault << 1) | (!!user_fault << 2)
70 | (!!fetch_fault << 4);
71 ),
72
73 TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
74 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
75);
76
77
78/* We just walked a paging element */
79TRACE_EVENT(
80 kvm_mmu_paging_element,
81 TP_PROTO(u64 pte, int level),
82 TP_ARGS(pte, level),
83
84 TP_STRUCT__entry(
85 __field(__u64, pte)
86 __field(__u32, level)
87 ),
88
89 TP_fast_assign(
90 __entry->pte = pte;
91 __entry->level = level;
92 ),
93
94 TP_printk("pte %llx level %u", __entry->pte, __entry->level)
95);
96
97/* We set a pte accessed bit */
98TRACE_EVENT(
99 kvm_mmu_set_accessed_bit,
100 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
101 TP_ARGS(table_gfn, index, size),
102
103 TP_STRUCT__entry(
104 __field(__u64, gpa)
105 ),
106
107 TP_fast_assign(
108 __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
109 + index * size;
110 ),
111
112 TP_printk("gpa %llx", __entry->gpa)
113);
114
115/* We set a pte dirty bit */
116TRACE_EVENT(
117 kvm_mmu_set_dirty_bit,
118 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size),
119 TP_ARGS(table_gfn, index, size),
120
121 TP_STRUCT__entry(
122 __field(__u64, gpa)
123 ),
124
125 TP_fast_assign(
126 __entry->gpa = ((u64)table_gfn << PAGE_SHIFT)
127 + index * size;
128 ),
129
130 TP_printk("gpa %llx", __entry->gpa)
131);
132
133TRACE_EVENT(
134 kvm_mmu_walker_error,
135 TP_PROTO(u32 pferr),
136 TP_ARGS(pferr),
137
138 TP_STRUCT__entry(
139 __field(__u32, pferr)
140 ),
141
142 TP_fast_assign(
143 __entry->pferr = pferr;
144 ),
145
146 TP_printk("pferr %x %s", __entry->pferr,
147 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags))
148);
149
150TRACE_EVENT(
151 kvm_mmu_get_page,
152 TP_PROTO(struct kvm_mmu_page *sp, bool created),
153 TP_ARGS(sp, created),
154
155 TP_STRUCT__entry(
156 KVM_MMU_PAGE_FIELDS
157 __field(bool, created)
158 ),
159
160 TP_fast_assign(
161 KVM_MMU_PAGE_ASSIGN(sp)
162 __entry->created = created;
163 ),
164
165 TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(),
166 __entry->created ? "new" : "existing")
167);
168
169TRACE_EVENT(
170 kvm_mmu_sync_page,
171 TP_PROTO(struct kvm_mmu_page *sp),
172 TP_ARGS(sp),
173
174 TP_STRUCT__entry(
175 KVM_MMU_PAGE_FIELDS
176 ),
177
178 TP_fast_assign(
179 KVM_MMU_PAGE_ASSIGN(sp)
180 ),
181
182 TP_printk("%s", KVM_MMU_PAGE_PRINTK())
183);
184
185TRACE_EVENT(
186 kvm_mmu_unsync_page,
187 TP_PROTO(struct kvm_mmu_page *sp),
188 TP_ARGS(sp),
189
190 TP_STRUCT__entry(
191 KVM_MMU_PAGE_FIELDS
192 ),
193
194 TP_fast_assign(
195 KVM_MMU_PAGE_ASSIGN(sp)
196 ),
197
198 TP_printk("%s", KVM_MMU_PAGE_PRINTK())
199);
200
201TRACE_EVENT(
202 kvm_mmu_zap_page,
203 TP_PROTO(struct kvm_mmu_page *sp),
204 TP_ARGS(sp),
205
206 TP_STRUCT__entry(
207 KVM_MMU_PAGE_FIELDS
208 ),
209
210 TP_fast_assign(
211 KVM_MMU_PAGE_ASSIGN(sp)
212 ),
213
214 TP_printk("%s", KVM_MMU_PAGE_PRINTK())
215);
216
217#endif /* _TRACE_KVMMMU_H */
218
219/* This part must be outside protection */
220#include <trace/define_trace.h>
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 67785f635399..d2fec9c12d22 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -27,7 +27,8 @@
27 #define guest_walker guest_walker64 27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name 28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK 29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK 30 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
31 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level) 32 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level) 33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
33 #define PT_LEVEL_BITS PT64_LEVEL_BITS 34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
@@ -43,7 +44,8 @@
43 #define guest_walker guest_walker32 44 #define guest_walker guest_walker32
44 #define FNAME(name) paging##32_##name 45 #define FNAME(name) paging##32_##name
45 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK 46 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
46 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK 47 #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
48 #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
47 #define PT_INDEX(addr, level) PT32_INDEX(addr, level) 49 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
48 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level) 50 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
49 #define PT_LEVEL_BITS PT32_LEVEL_BITS 51 #define PT_LEVEL_BITS PT32_LEVEL_BITS
@@ -53,8 +55,8 @@
53 #error Invalid PTTYPE value 55 #error Invalid PTTYPE value
54#endif 56#endif
55 57
56#define gpte_to_gfn FNAME(gpte_to_gfn) 58#define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
57#define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde) 59#define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
58 60
59/* 61/*
60 * The guest_walker structure emulates the behavior of the hardware page 62 * The guest_walker structure emulates the behavior of the hardware page
@@ -71,14 +73,9 @@ struct guest_walker {
71 u32 error_code; 73 u32 error_code;
72}; 74};
73 75
74static gfn_t gpte_to_gfn(pt_element_t gpte) 76static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
75{ 77{
76 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; 78 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
77}
78
79static gfn_t gpte_to_gfn_pde(pt_element_t gpte)
80{
81 return (gpte & PT_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
82} 79}
83 80
84static bool FNAME(cmpxchg_gpte)(struct kvm *kvm, 81static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
@@ -125,14 +122,16 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
125 gpa_t pte_gpa; 122 gpa_t pte_gpa;
126 int rsvd_fault = 0; 123 int rsvd_fault = 0;
127 124
128 pgprintk("%s: addr %lx\n", __func__, addr); 125 trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
126 fetch_fault);
129walk: 127walk:
130 walker->level = vcpu->arch.mmu.root_level; 128 walker->level = vcpu->arch.mmu.root_level;
131 pte = vcpu->arch.cr3; 129 pte = vcpu->arch.cr3;
132#if PTTYPE == 64 130#if PTTYPE == 64
133 if (!is_long_mode(vcpu)) { 131 if (!is_long_mode(vcpu)) {
134 pte = vcpu->arch.pdptrs[(addr >> 30) & 3]; 132 pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3);
135 if (!is_present_pte(pte)) 133 trace_kvm_mmu_paging_element(pte, walker->level);
134 if (!is_present_gpte(pte))
136 goto not_present; 135 goto not_present;
137 --walker->level; 136 --walker->level;
138 } 137 }
@@ -150,12 +149,11 @@ walk:
150 pte_gpa += index * sizeof(pt_element_t); 149 pte_gpa += index * sizeof(pt_element_t);
151 walker->table_gfn[walker->level - 1] = table_gfn; 150 walker->table_gfn[walker->level - 1] = table_gfn;
152 walker->pte_gpa[walker->level - 1] = pte_gpa; 151 walker->pte_gpa[walker->level - 1] = pte_gpa;
153 pgprintk("%s: table_gfn[%d] %lx\n", __func__,
154 walker->level - 1, table_gfn);
155 152
156 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)); 153 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
154 trace_kvm_mmu_paging_element(pte, walker->level);
157 155
158 if (!is_present_pte(pte)) 156 if (!is_present_gpte(pte))
159 goto not_present; 157 goto not_present;
160 158
161 rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level); 159 rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level);
@@ -175,6 +173,8 @@ walk:
175#endif 173#endif
176 174
177 if (!(pte & PT_ACCESSED_MASK)) { 175 if (!(pte & PT_ACCESSED_MASK)) {
176 trace_kvm_mmu_set_accessed_bit(table_gfn, index,
177 sizeof(pte));
178 mark_page_dirty(vcpu->kvm, table_gfn); 178 mark_page_dirty(vcpu->kvm, table_gfn);
179 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, 179 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
180 index, pte, pte|PT_ACCESSED_MASK)) 180 index, pte, pte|PT_ACCESSED_MASK))
@@ -186,18 +186,24 @@ walk:
186 186
187 walker->ptes[walker->level - 1] = pte; 187 walker->ptes[walker->level - 1] = pte;
188 188
189 if (walker->level == PT_PAGE_TABLE_LEVEL) { 189 if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
190 walker->gfn = gpte_to_gfn(pte); 190 ((walker->level == PT_DIRECTORY_LEVEL) &&
191 break; 191 (pte & PT_PAGE_SIZE_MASK) &&
192 } 192 (PTTYPE == 64 || is_pse(vcpu))) ||
193 193 ((walker->level == PT_PDPE_LEVEL) &&
194 if (walker->level == PT_DIRECTORY_LEVEL 194 (pte & PT_PAGE_SIZE_MASK) &&
195 && (pte & PT_PAGE_SIZE_MASK) 195 is_long_mode(vcpu))) {
196 && (PTTYPE == 64 || is_pse(vcpu))) { 196 int lvl = walker->level;
197 walker->gfn = gpte_to_gfn_pde(pte); 197
198 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL); 198 walker->gfn = gpte_to_gfn_lvl(pte, lvl);
199 if (PTTYPE == 32 && is_cpuid_PSE36()) 199 walker->gfn += (addr & PT_LVL_OFFSET_MASK(lvl))
200 >> PAGE_SHIFT;
201
202 if (PTTYPE == 32 &&
203 walker->level == PT_DIRECTORY_LEVEL &&
204 is_cpuid_PSE36())
200 walker->gfn += pse36_gfn_delta(pte); 205 walker->gfn += pse36_gfn_delta(pte);
206
201 break; 207 break;
202 } 208 }
203 209
@@ -205,9 +211,10 @@ walk:
205 --walker->level; 211 --walker->level;
206 } 212 }
207 213
208 if (write_fault && !is_dirty_pte(pte)) { 214 if (write_fault && !is_dirty_gpte(pte)) {
209 bool ret; 215 bool ret;
210 216
217 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
211 mark_page_dirty(vcpu->kvm, table_gfn); 218 mark_page_dirty(vcpu->kvm, table_gfn);
212 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte, 219 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
213 pte|PT_DIRTY_MASK); 220 pte|PT_DIRTY_MASK);
@@ -239,6 +246,7 @@ err:
239 walker->error_code |= PFERR_FETCH_MASK; 246 walker->error_code |= PFERR_FETCH_MASK;
240 if (rsvd_fault) 247 if (rsvd_fault)
241 walker->error_code |= PFERR_RSVD_MASK; 248 walker->error_code |= PFERR_RSVD_MASK;
249 trace_kvm_mmu_walker_error(walker->error_code);
242 return 0; 250 return 0;
243} 251}
244 252
@@ -248,12 +256,11 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
248 pt_element_t gpte; 256 pt_element_t gpte;
249 unsigned pte_access; 257 unsigned pte_access;
250 pfn_t pfn; 258 pfn_t pfn;
251 int largepage = vcpu->arch.update_pte.largepage;
252 259
253 gpte = *(const pt_element_t *)pte; 260 gpte = *(const pt_element_t *)pte;
254 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) { 261 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
255 if (!is_present_pte(gpte)) 262 if (!is_present_gpte(gpte))
256 set_shadow_pte(spte, shadow_notrap_nonpresent_pte); 263 __set_spte(spte, shadow_notrap_nonpresent_pte);
257 return; 264 return;
258 } 265 }
259 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); 266 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
@@ -267,7 +274,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
267 return; 274 return;
268 kvm_get_pfn(pfn); 275 kvm_get_pfn(pfn);
269 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, 276 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
270 gpte & PT_DIRTY_MASK, NULL, largepage, 277 gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL,
271 gpte_to_gfn(gpte), pfn, true); 278 gpte_to_gfn(gpte), pfn, true);
272} 279}
273 280
@@ -276,7 +283,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
276 */ 283 */
277static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, 284static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
278 struct guest_walker *gw, 285 struct guest_walker *gw,
279 int user_fault, int write_fault, int largepage, 286 int user_fault, int write_fault, int hlevel,
280 int *ptwrite, pfn_t pfn) 287 int *ptwrite, pfn_t pfn)
281{ 288{
282 unsigned access = gw->pt_access; 289 unsigned access = gw->pt_access;
@@ -289,19 +296,18 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
289 pt_element_t curr_pte; 296 pt_element_t curr_pte;
290 struct kvm_shadow_walk_iterator iterator; 297 struct kvm_shadow_walk_iterator iterator;
291 298
292 if (!is_present_pte(gw->ptes[gw->level - 1])) 299 if (!is_present_gpte(gw->ptes[gw->level - 1]))
293 return NULL; 300 return NULL;
294 301
295 for_each_shadow_entry(vcpu, addr, iterator) { 302 for_each_shadow_entry(vcpu, addr, iterator) {
296 level = iterator.level; 303 level = iterator.level;
297 sptep = iterator.sptep; 304 sptep = iterator.sptep;
298 if (level == PT_PAGE_TABLE_LEVEL 305 if (iterator.level == hlevel) {
299 || (largepage && level == PT_DIRECTORY_LEVEL)) {
300 mmu_set_spte(vcpu, sptep, access, 306 mmu_set_spte(vcpu, sptep, access,
301 gw->pte_access & access, 307 gw->pte_access & access,
302 user_fault, write_fault, 308 user_fault, write_fault,
303 gw->ptes[gw->level-1] & PT_DIRTY_MASK, 309 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
304 ptwrite, largepage, 310 ptwrite, level,
305 gw->gfn, pfn, false); 311 gw->gfn, pfn, false);
306 break; 312 break;
307 } 313 }
@@ -311,16 +317,19 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
311 317
312 if (is_large_pte(*sptep)) { 318 if (is_large_pte(*sptep)) {
313 rmap_remove(vcpu->kvm, sptep); 319 rmap_remove(vcpu->kvm, sptep);
314 set_shadow_pte(sptep, shadow_trap_nonpresent_pte); 320 __set_spte(sptep, shadow_trap_nonpresent_pte);
315 kvm_flush_remote_tlbs(vcpu->kvm); 321 kvm_flush_remote_tlbs(vcpu->kvm);
316 } 322 }
317 323
318 if (level == PT_DIRECTORY_LEVEL 324 if (level <= gw->level) {
319 && gw->level == PT_DIRECTORY_LEVEL) { 325 int delta = level - gw->level + 1;
320 direct = 1; 326 direct = 1;
321 if (!is_dirty_pte(gw->ptes[level - 1])) 327 if (!is_dirty_gpte(gw->ptes[level - delta]))
322 access &= ~ACC_WRITE_MASK; 328 access &= ~ACC_WRITE_MASK;
323 table_gfn = gpte_to_gfn(gw->ptes[level - 1]); 329 table_gfn = gpte_to_gfn(gw->ptes[level - delta]);
330 /* advance table_gfn when emulating 1gb pages with 4k */
331 if (delta == 0)
332 table_gfn += PT_INDEX(addr, level);
324 } else { 333 } else {
325 direct = 0; 334 direct = 0;
326 table_gfn = gw->table_gfn[level - 2]; 335 table_gfn = gw->table_gfn[level - 2];
@@ -369,11 +378,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
369 int user_fault = error_code & PFERR_USER_MASK; 378 int user_fault = error_code & PFERR_USER_MASK;
370 int fetch_fault = error_code & PFERR_FETCH_MASK; 379 int fetch_fault = error_code & PFERR_FETCH_MASK;
371 struct guest_walker walker; 380 struct guest_walker walker;
372 u64 *shadow_pte; 381 u64 *sptep;
373 int write_pt = 0; 382 int write_pt = 0;
374 int r; 383 int r;
375 pfn_t pfn; 384 pfn_t pfn;
376 int largepage = 0; 385 int level = PT_PAGE_TABLE_LEVEL;
377 unsigned long mmu_seq; 386 unsigned long mmu_seq;
378 387
379 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); 388 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
@@ -399,14 +408,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
399 return 0; 408 return 0;
400 } 409 }
401 410
402 if (walker.level == PT_DIRECTORY_LEVEL) { 411 if (walker.level >= PT_DIRECTORY_LEVEL) {
403 gfn_t large_gfn; 412 level = min(walker.level, mapping_level(vcpu, walker.gfn));
404 large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1); 413 walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
405 if (is_largepage_backed(vcpu, large_gfn)) {
406 walker.gfn = large_gfn;
407 largepage = 1;
408 }
409 } 414 }
415
410 mmu_seq = vcpu->kvm->mmu_notifier_seq; 416 mmu_seq = vcpu->kvm->mmu_notifier_seq;
411 smp_rmb(); 417 smp_rmb();
412 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn); 418 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
@@ -422,11 +428,10 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
422 if (mmu_notifier_retry(vcpu, mmu_seq)) 428 if (mmu_notifier_retry(vcpu, mmu_seq))
423 goto out_unlock; 429 goto out_unlock;
424 kvm_mmu_free_some_pages(vcpu); 430 kvm_mmu_free_some_pages(vcpu);
425 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, 431 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
426 largepage, &write_pt, pfn); 432 level, &write_pt, pfn);
427
428 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__, 433 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
429 shadow_pte, *shadow_pte, write_pt); 434 sptep, *sptep, write_pt);
430 435
431 if (!write_pt) 436 if (!write_pt)
432 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ 437 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
@@ -459,8 +464,9 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
459 sptep = iterator.sptep; 464 sptep = iterator.sptep;
460 465
461 /* FIXME: properly handle invlpg on large guest pages */ 466 /* FIXME: properly handle invlpg on large guest pages */
462 if (level == PT_PAGE_TABLE_LEVEL || 467 if (level == PT_PAGE_TABLE_LEVEL ||
463 ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) { 468 ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
469 ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
464 struct kvm_mmu_page *sp = page_header(__pa(sptep)); 470 struct kvm_mmu_page *sp = page_header(__pa(sptep));
465 471
466 pte_gpa = (sp->gfn << PAGE_SHIFT); 472 pte_gpa = (sp->gfn << PAGE_SHIFT);
@@ -472,7 +478,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
472 --vcpu->kvm->stat.lpages; 478 --vcpu->kvm->stat.lpages;
473 need_flush = 1; 479 need_flush = 1;
474 } 480 }
475 set_shadow_pte(sptep, shadow_trap_nonpresent_pte); 481 __set_spte(sptep, shadow_trap_nonpresent_pte);
476 break; 482 break;
477 } 483 }
478 484
@@ -489,7 +495,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
489 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, 495 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
490 sizeof(pt_element_t))) 496 sizeof(pt_element_t)))
491 return; 497 return;
492 if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) { 498 if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
493 if (mmu_topup_memory_caches(vcpu)) 499 if (mmu_topup_memory_caches(vcpu))
494 return; 500 return;
495 kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte, 501 kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
@@ -536,7 +542,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
536 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt); 542 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
537 pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t); 543 pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
538 for (j = 0; j < ARRAY_SIZE(pt); ++j) 544 for (j = 0; j < ARRAY_SIZE(pt); ++j)
539 if (r || is_present_pte(pt[j])) 545 if (r || is_present_gpte(pt[j]))
540 sp->spt[i+j] = shadow_trap_nonpresent_pte; 546 sp->spt[i+j] = shadow_trap_nonpresent_pte;
541 else 547 else
542 sp->spt[i+j] = shadow_notrap_nonpresent_pte; 548 sp->spt[i+j] = shadow_notrap_nonpresent_pte;
@@ -574,23 +580,23 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
574 sizeof(pt_element_t))) 580 sizeof(pt_element_t)))
575 return -EINVAL; 581 return -EINVAL;
576 582
577 if (gpte_to_gfn(gpte) != gfn || !is_present_pte(gpte) || 583 if (gpte_to_gfn(gpte) != gfn || !is_present_gpte(gpte) ||
578 !(gpte & PT_ACCESSED_MASK)) { 584 !(gpte & PT_ACCESSED_MASK)) {
579 u64 nonpresent; 585 u64 nonpresent;
580 586
581 rmap_remove(vcpu->kvm, &sp->spt[i]); 587 rmap_remove(vcpu->kvm, &sp->spt[i]);
582 if (is_present_pte(gpte)) 588 if (is_present_gpte(gpte))
583 nonpresent = shadow_trap_nonpresent_pte; 589 nonpresent = shadow_trap_nonpresent_pte;
584 else 590 else
585 nonpresent = shadow_notrap_nonpresent_pte; 591 nonpresent = shadow_notrap_nonpresent_pte;
586 set_shadow_pte(&sp->spt[i], nonpresent); 592 __set_spte(&sp->spt[i], nonpresent);
587 continue; 593 continue;
588 } 594 }
589 595
590 nr_present++; 596 nr_present++;
591 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); 597 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
592 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, 598 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
593 is_dirty_pte(gpte), 0, gfn, 599 is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
594 spte_to_pfn(sp->spt[i]), true, false); 600 spte_to_pfn(sp->spt[i]), true, false);
595 } 601 }
596 602
@@ -603,9 +609,10 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
603#undef PT_BASE_ADDR_MASK 609#undef PT_BASE_ADDR_MASK
604#undef PT_INDEX 610#undef PT_INDEX
605#undef PT_LEVEL_MASK 611#undef PT_LEVEL_MASK
606#undef PT_DIR_BASE_ADDR_MASK 612#undef PT_LVL_ADDR_MASK
613#undef PT_LVL_OFFSET_MASK
607#undef PT_LEVEL_BITS 614#undef PT_LEVEL_BITS
608#undef PT_MAX_FULL_LEVELS 615#undef PT_MAX_FULL_LEVELS
609#undef gpte_to_gfn 616#undef gpte_to_gfn
610#undef gpte_to_gfn_pde 617#undef gpte_to_gfn_lvl
611#undef CMPXCHG 618#undef CMPXCHG
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b1f658ad2f06..944cc9c04b3c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -15,7 +15,6 @@
15 */ 15 */
16#include <linux/kvm_host.h> 16#include <linux/kvm_host.h>
17 17
18#include "kvm_svm.h"
19#include "irq.h" 18#include "irq.h"
20#include "mmu.h" 19#include "mmu.h"
21#include "kvm_cache_regs.h" 20#include "kvm_cache_regs.h"
@@ -26,10 +25,12 @@
26#include <linux/vmalloc.h> 25#include <linux/vmalloc.h>
27#include <linux/highmem.h> 26#include <linux/highmem.h>
28#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/ftrace_event.h>
29 29
30#include <asm/desc.h> 30#include <asm/desc.h>
31 31
32#include <asm/virtext.h> 32#include <asm/virtext.h>
33#include "trace.h"
33 34
34#define __ex(x) __kvm_handle_fault_on_reboot(x) 35#define __ex(x) __kvm_handle_fault_on_reboot(x)
35 36
@@ -46,6 +47,10 @@ MODULE_LICENSE("GPL");
46#define SVM_FEATURE_LBRV (1 << 1) 47#define SVM_FEATURE_LBRV (1 << 1)
47#define SVM_FEATURE_SVML (1 << 2) 48#define SVM_FEATURE_SVML (1 << 2)
48 49
50#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
51#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
52#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
53
49#define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) 54#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
50 55
51/* Turn on to get debugging output*/ 56/* Turn on to get debugging output*/
@@ -57,6 +62,58 @@ MODULE_LICENSE("GPL");
57#define nsvm_printk(fmt, args...) do {} while(0) 62#define nsvm_printk(fmt, args...) do {} while(0)
58#endif 63#endif
59 64
65static const u32 host_save_user_msrs[] = {
66#ifdef CONFIG_X86_64
67 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
68 MSR_FS_BASE,
69#endif
70 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
71};
72
73#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
74
75struct kvm_vcpu;
76
77struct nested_state {
78 struct vmcb *hsave;
79 u64 hsave_msr;
80 u64 vmcb;
81
82 /* These are the merged vectors */
83 u32 *msrpm;
84
85 /* gpa pointers to the real vectors */
86 u64 vmcb_msrpm;
87
88 /* cache for intercepts of the guest */
89 u16 intercept_cr_read;
90 u16 intercept_cr_write;
91 u16 intercept_dr_read;
92 u16 intercept_dr_write;
93 u32 intercept_exceptions;
94 u64 intercept;
95
96};
97
98struct vcpu_svm {
99 struct kvm_vcpu vcpu;
100 struct vmcb *vmcb;
101 unsigned long vmcb_pa;
102 struct svm_cpu_data *svm_data;
103 uint64_t asid_generation;
104 uint64_t sysenter_esp;
105 uint64_t sysenter_eip;
106
107 u64 next_rip;
108
109 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
110 u64 host_gs_base;
111
112 u32 *msrpm;
113
114 struct nested_state nested;
115};
116
60/* enable NPT for AMD64 and X86 with PAE */ 117/* enable NPT for AMD64 and X86 with PAE */
61#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 118#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
62static bool npt_enabled = true; 119static bool npt_enabled = true;
@@ -67,15 +124,14 @@ static int npt = 1;
67 124
68module_param(npt, int, S_IRUGO); 125module_param(npt, int, S_IRUGO);
69 126
70static int nested = 0; 127static int nested = 1;
71module_param(nested, int, S_IRUGO); 128module_param(nested, int, S_IRUGO);
72 129
73static void svm_flush_tlb(struct kvm_vcpu *vcpu); 130static void svm_flush_tlb(struct kvm_vcpu *vcpu);
131static void svm_complete_interrupts(struct vcpu_svm *svm);
74 132
75static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override); 133static int nested_svm_exit_handled(struct vcpu_svm *svm);
76static int nested_svm_vmexit(struct vcpu_svm *svm); 134static int nested_svm_vmexit(struct vcpu_svm *svm);
77static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb,
78 void *arg2, void *opaque);
79static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, 135static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
80 bool has_error_code, u32 error_code); 136 bool has_error_code, u32 error_code);
81 137
@@ -86,7 +142,22 @@ static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
86 142
87static inline bool is_nested(struct vcpu_svm *svm) 143static inline bool is_nested(struct vcpu_svm *svm)
88{ 144{
89 return svm->nested_vmcb; 145 return svm->nested.vmcb;
146}
147
148static inline void enable_gif(struct vcpu_svm *svm)
149{
150 svm->vcpu.arch.hflags |= HF_GIF_MASK;
151}
152
153static inline void disable_gif(struct vcpu_svm *svm)
154{
155 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
156}
157
158static inline bool gif_set(struct vcpu_svm *svm)
159{
160 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
90} 161}
91 162
92static unsigned long iopm_base; 163static unsigned long iopm_base;
@@ -147,19 +218,6 @@ static inline void invlpga(unsigned long addr, u32 asid)
147 asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid)); 218 asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
148} 219}
149 220
150static inline unsigned long kvm_read_cr2(void)
151{
152 unsigned long cr2;
153
154 asm volatile ("mov %%cr2, %0" : "=r" (cr2));
155 return cr2;
156}
157
158static inline void kvm_write_cr2(unsigned long val)
159{
160 asm volatile ("mov %0, %%cr2" :: "r" (val));
161}
162
163static inline void force_new_asid(struct kvm_vcpu *vcpu) 221static inline void force_new_asid(struct kvm_vcpu *vcpu)
164{ 222{
165 to_svm(vcpu)->asid_generation--; 223 to_svm(vcpu)->asid_generation--;
@@ -263,7 +321,7 @@ static void svm_hardware_enable(void *garbage)
263 321
264 struct svm_cpu_data *svm_data; 322 struct svm_cpu_data *svm_data;
265 uint64_t efer; 323 uint64_t efer;
266 struct desc_ptr gdt_descr; 324 struct descriptor_table gdt_descr;
267 struct desc_struct *gdt; 325 struct desc_struct *gdt;
268 int me = raw_smp_processor_id(); 326 int me = raw_smp_processor_id();
269 327
@@ -283,8 +341,8 @@ static void svm_hardware_enable(void *garbage)
283 svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1; 341 svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
284 svm_data->next_asid = svm_data->max_asid + 1; 342 svm_data->next_asid = svm_data->max_asid + 1;
285 343
286 asm volatile ("sgdt %0" : "=m"(gdt_descr)); 344 kvm_get_gdt(&gdt_descr);
287 gdt = (struct desc_struct *)gdt_descr.address; 345 gdt = (struct desc_struct *)gdt_descr.base;
288 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); 346 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
289 347
290 rdmsrl(MSR_EFER, efer); 348 rdmsrl(MSR_EFER, efer);
@@ -367,8 +425,6 @@ static void svm_vcpu_init_msrpm(u32 *msrpm)
367#endif 425#endif
368 set_msr_interception(msrpm, MSR_K6_STAR, 1, 1); 426 set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
369 set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1); 427 set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
370 set_msr_interception(msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
371 set_msr_interception(msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
372} 428}
373 429
374static void svm_enable_lbrv(struct vcpu_svm *svm) 430static void svm_enable_lbrv(struct vcpu_svm *svm)
@@ -595,8 +651,10 @@ static void init_vmcb(struct vcpu_svm *svm)
595 } 651 }
596 force_new_asid(&svm->vcpu); 652 force_new_asid(&svm->vcpu);
597 653
598 svm->nested_vmcb = 0; 654 svm->nested.vmcb = 0;
599 svm->vcpu.arch.hflags = HF_GIF_MASK; 655 svm->vcpu.arch.hflags = 0;
656
657 enable_gif(svm);
600} 658}
601 659
602static int svm_vcpu_reset(struct kvm_vcpu *vcpu) 660static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
@@ -605,7 +663,7 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
605 663
606 init_vmcb(svm); 664 init_vmcb(svm);
607 665
608 if (vcpu->vcpu_id != 0) { 666 if (!kvm_vcpu_is_bsp(vcpu)) {
609 kvm_rip_write(vcpu, 0); 667 kvm_rip_write(vcpu, 0);
610 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12; 668 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
611 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8; 669 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
@@ -656,9 +714,9 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
656 hsave_page = alloc_page(GFP_KERNEL); 714 hsave_page = alloc_page(GFP_KERNEL);
657 if (!hsave_page) 715 if (!hsave_page)
658 goto uninit; 716 goto uninit;
659 svm->hsave = page_address(hsave_page); 717 svm->nested.hsave = page_address(hsave_page);
660 718
661 svm->nested_msrpm = page_address(nested_msrpm_pages); 719 svm->nested.msrpm = page_address(nested_msrpm_pages);
662 720
663 svm->vmcb = page_address(page); 721 svm->vmcb = page_address(page);
664 clear_page(svm->vmcb); 722 clear_page(svm->vmcb);
@@ -669,7 +727,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
669 fx_init(&svm->vcpu); 727 fx_init(&svm->vcpu);
670 svm->vcpu.fpu_active = 1; 728 svm->vcpu.fpu_active = 1;
671 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; 729 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
672 if (svm->vcpu.vcpu_id == 0) 730 if (kvm_vcpu_is_bsp(&svm->vcpu))
673 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; 731 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
674 732
675 return &svm->vcpu; 733 return &svm->vcpu;
@@ -688,8 +746,8 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
688 746
689 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); 747 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
690 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); 748 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
691 __free_page(virt_to_page(svm->hsave)); 749 __free_page(virt_to_page(svm->nested.hsave));
692 __free_pages(virt_to_page(svm->nested_msrpm), MSRPM_ALLOC_ORDER); 750 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
693 kvm_vcpu_uninit(vcpu); 751 kvm_vcpu_uninit(vcpu);
694 kmem_cache_free(kvm_vcpu_cache, svm); 752 kmem_cache_free(kvm_vcpu_cache, svm);
695} 753}
@@ -740,6 +798,18 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
740 to_svm(vcpu)->vmcb->save.rflags = rflags; 798 to_svm(vcpu)->vmcb->save.rflags = rflags;
741} 799}
742 800
801static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
802{
803 switch (reg) {
804 case VCPU_EXREG_PDPTR:
805 BUG_ON(!npt_enabled);
806 load_pdptrs(vcpu, vcpu->arch.cr3);
807 break;
808 default:
809 BUG();
810 }
811}
812
743static void svm_set_vintr(struct vcpu_svm *svm) 813static void svm_set_vintr(struct vcpu_svm *svm)
744{ 814{
745 svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR; 815 svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
@@ -1061,7 +1131,6 @@ static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
1061 val = 0; 1131 val = 0;
1062 } 1132 }
1063 1133
1064 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
1065 return val; 1134 return val;
1066} 1135}
1067 1136
@@ -1070,8 +1139,6 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
1070{ 1139{
1071 struct vcpu_svm *svm = to_svm(vcpu); 1140 struct vcpu_svm *svm = to_svm(vcpu);
1072 1141
1073 KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)value, handler);
1074
1075 *exception = 0; 1142 *exception = 0;
1076 1143
1077 switch (dr) { 1144 switch (dr) {
@@ -1119,25 +1186,9 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1119 fault_address = svm->vmcb->control.exit_info_2; 1186 fault_address = svm->vmcb->control.exit_info_2;
1120 error_code = svm->vmcb->control.exit_info_1; 1187 error_code = svm->vmcb->control.exit_info_1;
1121 1188
1122 if (!npt_enabled) 1189 trace_kvm_page_fault(fault_address, error_code);
1123 KVMTRACE_3D(PAGE_FAULT, &svm->vcpu, error_code, 1190 if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
1124 (u32)fault_address, (u32)(fault_address >> 32), 1191 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
1125 handler);
1126 else
1127 KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
1128 (u32)fault_address, (u32)(fault_address >> 32),
1129 handler);
1130 /*
1131 * FIXME: Tis shouldn't be necessary here, but there is a flush
1132 * missing in the MMU code. Until we find this bug, flush the
1133 * complete TLB here on an NPF
1134 */
1135 if (npt_enabled)
1136 svm_flush_tlb(&svm->vcpu);
1137 else {
1138 if (kvm_event_needs_reinjection(&svm->vcpu))
1139 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
1140 }
1141 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); 1192 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
1142} 1193}
1143 1194
@@ -1253,14 +1304,12 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1253 1304
1254static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1305static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1255{ 1306{
1256 KVMTRACE_0D(NMI, &svm->vcpu, handler);
1257 return 1; 1307 return 1;
1258} 1308}
1259 1309
1260static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1310static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1261{ 1311{
1262 ++svm->vcpu.stat.irq_exits; 1312 ++svm->vcpu.stat.irq_exits;
1263 KVMTRACE_0D(INTR, &svm->vcpu, handler);
1264 return 1; 1313 return 1;
1265} 1314}
1266 1315
@@ -1303,44 +1352,39 @@ static int nested_svm_check_permissions(struct vcpu_svm *svm)
1303static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, 1352static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1304 bool has_error_code, u32 error_code) 1353 bool has_error_code, u32 error_code)
1305{ 1354{
1306 if (is_nested(svm)) { 1355 if (!is_nested(svm))
1307 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; 1356 return 0;
1308 svm->vmcb->control.exit_code_hi = 0;
1309 svm->vmcb->control.exit_info_1 = error_code;
1310 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1311 if (nested_svm_exit_handled(svm, false)) {
1312 nsvm_printk("VMexit -> EXCP 0x%x\n", nr);
1313
1314 nested_svm_vmexit(svm);
1315 return 1;
1316 }
1317 }
1318 1357
1319 return 0; 1358 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1359 svm->vmcb->control.exit_code_hi = 0;
1360 svm->vmcb->control.exit_info_1 = error_code;
1361 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1362
1363 return nested_svm_exit_handled(svm);
1320} 1364}
1321 1365
1322static inline int nested_svm_intr(struct vcpu_svm *svm) 1366static inline int nested_svm_intr(struct vcpu_svm *svm)
1323{ 1367{
1324 if (is_nested(svm)) { 1368 if (!is_nested(svm))
1325 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) 1369 return 0;
1326 return 0;
1327 1370
1328 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK)) 1371 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1329 return 0; 1372 return 0;
1330 1373
1331 svm->vmcb->control.exit_code = SVM_EXIT_INTR; 1374 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
1375 return 0;
1332 1376
1333 if (nested_svm_exit_handled(svm, false)) { 1377 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
1334 nsvm_printk("VMexit -> INTR\n"); 1378
1335 nested_svm_vmexit(svm); 1379 if (nested_svm_exit_handled(svm)) {
1336 return 1; 1380 nsvm_printk("VMexit -> INTR\n");
1337 } 1381 return 1;
1338 } 1382 }
1339 1383
1340 return 0; 1384 return 0;
1341} 1385}
1342 1386
1343static struct page *nested_svm_get_page(struct vcpu_svm *svm, u64 gpa) 1387static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx)
1344{ 1388{
1345 struct page *page; 1389 struct page *page;
1346 1390
@@ -1348,236 +1392,246 @@ static struct page *nested_svm_get_page(struct vcpu_svm *svm, u64 gpa)
1348 page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT); 1392 page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
1349 up_read(&current->mm->mmap_sem); 1393 up_read(&current->mm->mmap_sem);
1350 1394
1351 if (is_error_page(page)) { 1395 if (is_error_page(page))
1352 printk(KERN_INFO "%s: could not find page at 0x%llx\n", 1396 goto error;
1353 __func__, gpa); 1397
1354 kvm_release_page_clean(page); 1398 return kmap_atomic(page, idx);
1355 kvm_inject_gp(&svm->vcpu, 0); 1399
1356 return NULL; 1400error:
1357 } 1401 kvm_release_page_clean(page);
1358 return page; 1402 kvm_inject_gp(&svm->vcpu, 0);
1403
1404 return NULL;
1359} 1405}
1360 1406
1361static int nested_svm_do(struct vcpu_svm *svm, 1407static void nested_svm_unmap(void *addr, enum km_type idx)
1362 u64 arg1_gpa, u64 arg2_gpa, void *opaque,
1363 int (*handler)(struct vcpu_svm *svm,
1364 void *arg1,
1365 void *arg2,
1366 void *opaque))
1367{ 1408{
1368 struct page *arg1_page; 1409 struct page *page;
1369 struct page *arg2_page = NULL;
1370 void *arg1;
1371 void *arg2 = NULL;
1372 int retval;
1373 1410
1374 arg1_page = nested_svm_get_page(svm, arg1_gpa); 1411 if (!addr)
1375 if(arg1_page == NULL) 1412 return;
1376 return 1;
1377 1413
1378 if (arg2_gpa) { 1414 page = kmap_atomic_to_page(addr);
1379 arg2_page = nested_svm_get_page(svm, arg2_gpa); 1415
1380 if(arg2_page == NULL) { 1416 kunmap_atomic(addr, idx);
1381 kvm_release_page_clean(arg1_page); 1417 kvm_release_page_dirty(page);
1382 return 1; 1418}
1383 } 1419
1384 } 1420static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1421{
1422 u32 param = svm->vmcb->control.exit_info_1 & 1;
1423 u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1424 bool ret = false;
1425 u32 t0, t1;
1426 u8 *msrpm;
1385 1427
1386 arg1 = kmap_atomic(arg1_page, KM_USER0); 1428 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
1387 if (arg2_gpa) 1429 return false;
1388 arg2 = kmap_atomic(arg2_page, KM_USER1);
1389 1430
1390 retval = handler(svm, arg1, arg2, opaque); 1431 msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
1432
1433 if (!msrpm)
1434 goto out;
1435
1436 switch (msr) {
1437 case 0 ... 0x1fff:
1438 t0 = (msr * 2) % 8;
1439 t1 = msr / 8;
1440 break;
1441 case 0xc0000000 ... 0xc0001fff:
1442 t0 = (8192 + msr - 0xc0000000) * 2;
1443 t1 = (t0 / 8);
1444 t0 %= 8;
1445 break;
1446 case 0xc0010000 ... 0xc0011fff:
1447 t0 = (16384 + msr - 0xc0010000) * 2;
1448 t1 = (t0 / 8);
1449 t0 %= 8;
1450 break;
1451 default:
1452 ret = true;
1453 goto out;
1454 }
1391 1455
1392 kunmap_atomic(arg1, KM_USER0); 1456 ret = msrpm[t1] & ((1 << param) << t0);
1393 if (arg2_gpa)
1394 kunmap_atomic(arg2, KM_USER1);
1395 1457
1396 kvm_release_page_dirty(arg1_page); 1458out:
1397 if (arg2_gpa) 1459 nested_svm_unmap(msrpm, KM_USER0);
1398 kvm_release_page_dirty(arg2_page);
1399 1460
1400 return retval; 1461 return ret;
1401} 1462}
1402 1463
1403static int nested_svm_exit_handled_real(struct vcpu_svm *svm, 1464static int nested_svm_exit_special(struct vcpu_svm *svm)
1404 void *arg1,
1405 void *arg2,
1406 void *opaque)
1407{ 1465{
1408 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1409 bool kvm_overrides = *(bool *)opaque;
1410 u32 exit_code = svm->vmcb->control.exit_code; 1466 u32 exit_code = svm->vmcb->control.exit_code;
1411 1467
1412 if (kvm_overrides) { 1468 switch (exit_code) {
1413 switch (exit_code) { 1469 case SVM_EXIT_INTR:
1414 case SVM_EXIT_INTR: 1470 case SVM_EXIT_NMI:
1415 case SVM_EXIT_NMI: 1471 return NESTED_EXIT_HOST;
1416 return 0;
1417 /* For now we are always handling NPFs when using them */ 1472 /* For now we are always handling NPFs when using them */
1418 case SVM_EXIT_NPF: 1473 case SVM_EXIT_NPF:
1419 if (npt_enabled) 1474 if (npt_enabled)
1420 return 0; 1475 return NESTED_EXIT_HOST;
1421 break; 1476 break;
1422 /* When we're shadowing, trap PFs */ 1477 /* When we're shadowing, trap PFs */
1423 case SVM_EXIT_EXCP_BASE + PF_VECTOR: 1478 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
1424 if (!npt_enabled) 1479 if (!npt_enabled)
1425 return 0; 1480 return NESTED_EXIT_HOST;
1426 break; 1481 break;
1427 default: 1482 default:
1428 break; 1483 break;
1429 }
1430 } 1484 }
1431 1485
1486 return NESTED_EXIT_CONTINUE;
1487}
1488
1489/*
1490 * If this function returns true, this #vmexit was already handled
1491 */
1492static int nested_svm_exit_handled(struct vcpu_svm *svm)
1493{
1494 u32 exit_code = svm->vmcb->control.exit_code;
1495 int vmexit = NESTED_EXIT_HOST;
1496
1432 switch (exit_code) { 1497 switch (exit_code) {
1498 case SVM_EXIT_MSR:
1499 vmexit = nested_svm_exit_handled_msr(svm);
1500 break;
1433 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: { 1501 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
1434 u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0); 1502 u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
1435 if (nested_vmcb->control.intercept_cr_read & cr_bits) 1503 if (svm->nested.intercept_cr_read & cr_bits)
1436 return 1; 1504 vmexit = NESTED_EXIT_DONE;
1437 break; 1505 break;
1438 } 1506 }
1439 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: { 1507 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
1440 u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0); 1508 u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
1441 if (nested_vmcb->control.intercept_cr_write & cr_bits) 1509 if (svm->nested.intercept_cr_write & cr_bits)
1442 return 1; 1510 vmexit = NESTED_EXIT_DONE;
1443 break; 1511 break;
1444 } 1512 }
1445 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: { 1513 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
1446 u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0); 1514 u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
1447 if (nested_vmcb->control.intercept_dr_read & dr_bits) 1515 if (svm->nested.intercept_dr_read & dr_bits)
1448 return 1; 1516 vmexit = NESTED_EXIT_DONE;
1449 break; 1517 break;
1450 } 1518 }
1451 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: { 1519 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
1452 u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0); 1520 u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
1453 if (nested_vmcb->control.intercept_dr_write & dr_bits) 1521 if (svm->nested.intercept_dr_write & dr_bits)
1454 return 1; 1522 vmexit = NESTED_EXIT_DONE;
1455 break; 1523 break;
1456 } 1524 }
1457 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: { 1525 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1458 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE); 1526 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1459 if (nested_vmcb->control.intercept_exceptions & excp_bits) 1527 if (svm->nested.intercept_exceptions & excp_bits)
1460 return 1; 1528 vmexit = NESTED_EXIT_DONE;
1461 break; 1529 break;
1462 } 1530 }
1463 default: { 1531 default: {
1464 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR); 1532 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
1465 nsvm_printk("exit code: 0x%x\n", exit_code); 1533 nsvm_printk("exit code: 0x%x\n", exit_code);
1466 if (nested_vmcb->control.intercept & exit_bits) 1534 if (svm->nested.intercept & exit_bits)
1467 return 1; 1535 vmexit = NESTED_EXIT_DONE;
1468 } 1536 }
1469 } 1537 }
1470 1538
1471 return 0; 1539 if (vmexit == NESTED_EXIT_DONE) {
1472} 1540 nsvm_printk("#VMEXIT reason=%04x\n", exit_code);
1473 1541 nested_svm_vmexit(svm);
1474static int nested_svm_exit_handled_msr(struct vcpu_svm *svm,
1475 void *arg1, void *arg2,
1476 void *opaque)
1477{
1478 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1479 u8 *msrpm = (u8 *)arg2;
1480 u32 t0, t1;
1481 u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1482 u32 param = svm->vmcb->control.exit_info_1 & 1;
1483
1484 if (!(nested_vmcb->control.intercept & (1ULL << INTERCEPT_MSR_PROT)))
1485 return 0;
1486
1487 switch(msr) {
1488 case 0 ... 0x1fff:
1489 t0 = (msr * 2) % 8;
1490 t1 = msr / 8;
1491 break;
1492 case 0xc0000000 ... 0xc0001fff:
1493 t0 = (8192 + msr - 0xc0000000) * 2;
1494 t1 = (t0 / 8);
1495 t0 %= 8;
1496 break;
1497 case 0xc0010000 ... 0xc0011fff:
1498 t0 = (16384 + msr - 0xc0010000) * 2;
1499 t1 = (t0 / 8);
1500 t0 %= 8;
1501 break;
1502 default:
1503 return 1;
1504 break;
1505 } 1542 }
1506 if (msrpm[t1] & ((1 << param) << t0))
1507 return 1;
1508 1543
1509 return 0; 1544 return vmexit;
1545}
1546
1547static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
1548{
1549 struct vmcb_control_area *dst = &dst_vmcb->control;
1550 struct vmcb_control_area *from = &from_vmcb->control;
1551
1552 dst->intercept_cr_read = from->intercept_cr_read;
1553 dst->intercept_cr_write = from->intercept_cr_write;
1554 dst->intercept_dr_read = from->intercept_dr_read;
1555 dst->intercept_dr_write = from->intercept_dr_write;
1556 dst->intercept_exceptions = from->intercept_exceptions;
1557 dst->intercept = from->intercept;
1558 dst->iopm_base_pa = from->iopm_base_pa;
1559 dst->msrpm_base_pa = from->msrpm_base_pa;
1560 dst->tsc_offset = from->tsc_offset;
1561 dst->asid = from->asid;
1562 dst->tlb_ctl = from->tlb_ctl;
1563 dst->int_ctl = from->int_ctl;
1564 dst->int_vector = from->int_vector;
1565 dst->int_state = from->int_state;
1566 dst->exit_code = from->exit_code;
1567 dst->exit_code_hi = from->exit_code_hi;
1568 dst->exit_info_1 = from->exit_info_1;
1569 dst->exit_info_2 = from->exit_info_2;
1570 dst->exit_int_info = from->exit_int_info;
1571 dst->exit_int_info_err = from->exit_int_info_err;
1572 dst->nested_ctl = from->nested_ctl;
1573 dst->event_inj = from->event_inj;
1574 dst->event_inj_err = from->event_inj_err;
1575 dst->nested_cr3 = from->nested_cr3;
1576 dst->lbr_ctl = from->lbr_ctl;
1510} 1577}
1511 1578
1512static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override) 1579static int nested_svm_vmexit(struct vcpu_svm *svm)
1513{ 1580{
1514 bool k = kvm_override; 1581 struct vmcb *nested_vmcb;
1515 1582 struct vmcb *hsave = svm->nested.hsave;
1516 switch (svm->vmcb->control.exit_code) { 1583 struct vmcb *vmcb = svm->vmcb;
1517 case SVM_EXIT_MSR:
1518 return nested_svm_do(svm, svm->nested_vmcb,
1519 svm->nested_vmcb_msrpm, NULL,
1520 nested_svm_exit_handled_msr);
1521 default: break;
1522 }
1523 1584
1524 return nested_svm_do(svm, svm->nested_vmcb, 0, &k, 1585 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0);
1525 nested_svm_exit_handled_real); 1586 if (!nested_vmcb)
1526} 1587 return 1;
1527
1528static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
1529 void *arg2, void *opaque)
1530{
1531 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1532 struct vmcb *hsave = svm->hsave;
1533 u64 nested_save[] = { nested_vmcb->save.cr0,
1534 nested_vmcb->save.cr3,
1535 nested_vmcb->save.cr4,
1536 nested_vmcb->save.efer,
1537 nested_vmcb->control.intercept_cr_read,
1538 nested_vmcb->control.intercept_cr_write,
1539 nested_vmcb->control.intercept_dr_read,
1540 nested_vmcb->control.intercept_dr_write,
1541 nested_vmcb->control.intercept_exceptions,
1542 nested_vmcb->control.intercept,
1543 nested_vmcb->control.msrpm_base_pa,
1544 nested_vmcb->control.iopm_base_pa,
1545 nested_vmcb->control.tsc_offset };
1546 1588
1547 /* Give the current vmcb to the guest */ 1589 /* Give the current vmcb to the guest */
1548 memcpy(nested_vmcb, svm->vmcb, sizeof(struct vmcb)); 1590 disable_gif(svm);
1549 nested_vmcb->save.cr0 = nested_save[0]; 1591
1550 if (!npt_enabled) 1592 nested_vmcb->save.es = vmcb->save.es;
1551 nested_vmcb->save.cr3 = nested_save[1]; 1593 nested_vmcb->save.cs = vmcb->save.cs;
1552 nested_vmcb->save.cr4 = nested_save[2]; 1594 nested_vmcb->save.ss = vmcb->save.ss;
1553 nested_vmcb->save.efer = nested_save[3]; 1595 nested_vmcb->save.ds = vmcb->save.ds;
1554 nested_vmcb->control.intercept_cr_read = nested_save[4]; 1596 nested_vmcb->save.gdtr = vmcb->save.gdtr;
1555 nested_vmcb->control.intercept_cr_write = nested_save[5]; 1597 nested_vmcb->save.idtr = vmcb->save.idtr;
1556 nested_vmcb->control.intercept_dr_read = nested_save[6]; 1598 if (npt_enabled)
1557 nested_vmcb->control.intercept_dr_write = nested_save[7]; 1599 nested_vmcb->save.cr3 = vmcb->save.cr3;
1558 nested_vmcb->control.intercept_exceptions = nested_save[8]; 1600 nested_vmcb->save.cr2 = vmcb->save.cr2;
1559 nested_vmcb->control.intercept = nested_save[9]; 1601 nested_vmcb->save.rflags = vmcb->save.rflags;
1560 nested_vmcb->control.msrpm_base_pa = nested_save[10]; 1602 nested_vmcb->save.rip = vmcb->save.rip;
1561 nested_vmcb->control.iopm_base_pa = nested_save[11]; 1603 nested_vmcb->save.rsp = vmcb->save.rsp;
1562 nested_vmcb->control.tsc_offset = nested_save[12]; 1604 nested_vmcb->save.rax = vmcb->save.rax;
1605 nested_vmcb->save.dr7 = vmcb->save.dr7;
1606 nested_vmcb->save.dr6 = vmcb->save.dr6;
1607 nested_vmcb->save.cpl = vmcb->save.cpl;
1608
1609 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
1610 nested_vmcb->control.int_vector = vmcb->control.int_vector;
1611 nested_vmcb->control.int_state = vmcb->control.int_state;
1612 nested_vmcb->control.exit_code = vmcb->control.exit_code;
1613 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
1614 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
1615 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
1616 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
1617 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
1618 nested_vmcb->control.tlb_ctl = 0;
1619 nested_vmcb->control.event_inj = 0;
1620 nested_vmcb->control.event_inj_err = 0;
1563 1621
1564 /* We always set V_INTR_MASKING and remember the old value in hflags */ 1622 /* We always set V_INTR_MASKING and remember the old value in hflags */
1565 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) 1623 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1566 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; 1624 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1567 1625
1568 if ((nested_vmcb->control.int_ctl & V_IRQ_MASK) &&
1569 (nested_vmcb->control.int_vector)) {
1570 nsvm_printk("WARNING: IRQ 0x%x still enabled on #VMEXIT\n",
1571 nested_vmcb->control.int_vector);
1572 }
1573
1574 /* Restore the original control entries */ 1626 /* Restore the original control entries */
1575 svm->vmcb->control = hsave->control; 1627 copy_vmcb_control_area(vmcb, hsave);
1576 1628
1577 /* Kill any pending exceptions */ 1629 /* Kill any pending exceptions */
1578 if (svm->vcpu.arch.exception.pending == true) 1630 if (svm->vcpu.arch.exception.pending == true)
1579 nsvm_printk("WARNING: Pending Exception\n"); 1631 nsvm_printk("WARNING: Pending Exception\n");
1580 svm->vcpu.arch.exception.pending = false; 1632
1633 kvm_clear_exception_queue(&svm->vcpu);
1634 kvm_clear_interrupt_queue(&svm->vcpu);
1581 1635
1582 /* Restore selected save entries */ 1636 /* Restore selected save entries */
1583 svm->vmcb->save.es = hsave->save.es; 1637 svm->vmcb->save.es = hsave->save.es;
@@ -1603,19 +1657,10 @@ static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
1603 svm->vmcb->save.cpl = 0; 1657 svm->vmcb->save.cpl = 0;
1604 svm->vmcb->control.exit_int_info = 0; 1658 svm->vmcb->control.exit_int_info = 0;
1605 1659
1606 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
1607 /* Exit nested SVM mode */ 1660 /* Exit nested SVM mode */
1608 svm->nested_vmcb = 0; 1661 svm->nested.vmcb = 0;
1609 1662
1610 return 0; 1663 nested_svm_unmap(nested_vmcb, KM_USER0);
1611}
1612
1613static int nested_svm_vmexit(struct vcpu_svm *svm)
1614{
1615 nsvm_printk("VMexit\n");
1616 if (nested_svm_do(svm, svm->nested_vmcb, 0,
1617 NULL, nested_svm_vmexit_real))
1618 return 1;
1619 1664
1620 kvm_mmu_reset_context(&svm->vcpu); 1665 kvm_mmu_reset_context(&svm->vcpu);
1621 kvm_mmu_load(&svm->vcpu); 1666 kvm_mmu_load(&svm->vcpu);
@@ -1623,38 +1668,63 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
1623 return 0; 1668 return 0;
1624} 1669}
1625 1670
1626static int nested_svm_vmrun_msrpm(struct vcpu_svm *svm, void *arg1, 1671static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
1627 void *arg2, void *opaque)
1628{ 1672{
1673 u32 *nested_msrpm;
1629 int i; 1674 int i;
1630 u32 *nested_msrpm = (u32*)arg1; 1675
1676 nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0);
1677 if (!nested_msrpm)
1678 return false;
1679
1631 for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++) 1680 for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
1632 svm->nested_msrpm[i] = svm->msrpm[i] | nested_msrpm[i]; 1681 svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
1633 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested_msrpm);
1634 1682
1635 return 0; 1683 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
1684
1685 nested_svm_unmap(nested_msrpm, KM_USER0);
1686
1687 return true;
1636} 1688}
1637 1689
1638static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1, 1690static bool nested_svm_vmrun(struct vcpu_svm *svm)
1639 void *arg2, void *opaque)
1640{ 1691{
1641 struct vmcb *nested_vmcb = (struct vmcb *)arg1; 1692 struct vmcb *nested_vmcb;
1642 struct vmcb *hsave = svm->hsave; 1693 struct vmcb *hsave = svm->nested.hsave;
1694 struct vmcb *vmcb = svm->vmcb;
1695
1696 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
1697 if (!nested_vmcb)
1698 return false;
1643 1699
1644 /* nested_vmcb is our indicator if nested SVM is activated */ 1700 /* nested_vmcb is our indicator if nested SVM is activated */
1645 svm->nested_vmcb = svm->vmcb->save.rax; 1701 svm->nested.vmcb = svm->vmcb->save.rax;
1646 1702
1647 /* Clear internal status */ 1703 /* Clear internal status */
1648 svm->vcpu.arch.exception.pending = false; 1704 kvm_clear_exception_queue(&svm->vcpu);
1705 kvm_clear_interrupt_queue(&svm->vcpu);
1649 1706
1650 /* Save the old vmcb, so we don't need to pick what we save, but 1707 /* Save the old vmcb, so we don't need to pick what we save, but
1651 can restore everything when a VMEXIT occurs */ 1708 can restore everything when a VMEXIT occurs */
1652 memcpy(hsave, svm->vmcb, sizeof(struct vmcb)); 1709 hsave->save.es = vmcb->save.es;
1653 /* We need to remember the original CR3 in the SPT case */ 1710 hsave->save.cs = vmcb->save.cs;
1654 if (!npt_enabled) 1711 hsave->save.ss = vmcb->save.ss;
1655 hsave->save.cr3 = svm->vcpu.arch.cr3; 1712 hsave->save.ds = vmcb->save.ds;
1656 hsave->save.cr4 = svm->vcpu.arch.cr4; 1713 hsave->save.gdtr = vmcb->save.gdtr;
1657 hsave->save.rip = svm->next_rip; 1714 hsave->save.idtr = vmcb->save.idtr;
1715 hsave->save.efer = svm->vcpu.arch.shadow_efer;
1716 hsave->save.cr0 = svm->vcpu.arch.cr0;
1717 hsave->save.cr4 = svm->vcpu.arch.cr4;
1718 hsave->save.rflags = vmcb->save.rflags;
1719 hsave->save.rip = svm->next_rip;
1720 hsave->save.rsp = vmcb->save.rsp;
1721 hsave->save.rax = vmcb->save.rax;
1722 if (npt_enabled)
1723 hsave->save.cr3 = vmcb->save.cr3;
1724 else
1725 hsave->save.cr3 = svm->vcpu.arch.cr3;
1726
1727 copy_vmcb_control_area(hsave, vmcb);
1658 1728
1659 if (svm->vmcb->save.rflags & X86_EFLAGS_IF) 1729 if (svm->vmcb->save.rflags & X86_EFLAGS_IF)
1660 svm->vcpu.arch.hflags |= HF_HIF_MASK; 1730 svm->vcpu.arch.hflags |= HF_HIF_MASK;
@@ -1679,7 +1749,7 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
1679 kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); 1749 kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
1680 kvm_mmu_reset_context(&svm->vcpu); 1750 kvm_mmu_reset_context(&svm->vcpu);
1681 } 1751 }
1682 svm->vmcb->save.cr2 = nested_vmcb->save.cr2; 1752 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
1683 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax); 1753 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
1684 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp); 1754 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
1685 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip); 1755 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
@@ -1706,7 +1776,15 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
1706 1776
1707 svm->vmcb->control.intercept |= nested_vmcb->control.intercept; 1777 svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
1708 1778
1709 svm->nested_vmcb_msrpm = nested_vmcb->control.msrpm_base_pa; 1779 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
1780
1781 /* cache intercepts */
1782 svm->nested.intercept_cr_read = nested_vmcb->control.intercept_cr_read;
1783 svm->nested.intercept_cr_write = nested_vmcb->control.intercept_cr_write;
1784 svm->nested.intercept_dr_read = nested_vmcb->control.intercept_dr_read;
1785 svm->nested.intercept_dr_write = nested_vmcb->control.intercept_dr_write;
1786 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
1787 svm->nested.intercept = nested_vmcb->control.intercept;
1710 1788
1711 force_new_asid(&svm->vcpu); 1789 force_new_asid(&svm->vcpu);
1712 svm->vmcb->control.exit_int_info = nested_vmcb->control.exit_int_info; 1790 svm->vmcb->control.exit_int_info = nested_vmcb->control.exit_int_info;
@@ -1734,12 +1812,14 @@ static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
1734 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; 1812 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
1735 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; 1813 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
1736 1814
1737 svm->vcpu.arch.hflags |= HF_GIF_MASK; 1815 nested_svm_unmap(nested_vmcb, KM_USER0);
1738 1816
1739 return 0; 1817 enable_gif(svm);
1818
1819 return true;
1740} 1820}
1741 1821
1742static int nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) 1822static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
1743{ 1823{
1744 to_vmcb->save.fs = from_vmcb->save.fs; 1824 to_vmcb->save.fs = from_vmcb->save.fs;
1745 to_vmcb->save.gs = from_vmcb->save.gs; 1825 to_vmcb->save.gs = from_vmcb->save.gs;
@@ -1753,44 +1833,44 @@ static int nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
1753 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs; 1833 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
1754 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp; 1834 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
1755 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip; 1835 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
1756
1757 return 1;
1758}
1759
1760static int nested_svm_vmload(struct vcpu_svm *svm, void *nested_vmcb,
1761 void *arg2, void *opaque)
1762{
1763 return nested_svm_vmloadsave((struct vmcb *)nested_vmcb, svm->vmcb);
1764}
1765
1766static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb,
1767 void *arg2, void *opaque)
1768{
1769 return nested_svm_vmloadsave(svm->vmcb, (struct vmcb *)nested_vmcb);
1770} 1836}
1771 1837
1772static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1838static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1773{ 1839{
1840 struct vmcb *nested_vmcb;
1841
1774 if (nested_svm_check_permissions(svm)) 1842 if (nested_svm_check_permissions(svm))
1775 return 1; 1843 return 1;
1776 1844
1777 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; 1845 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1778 skip_emulated_instruction(&svm->vcpu); 1846 skip_emulated_instruction(&svm->vcpu);
1779 1847
1780 nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmload); 1848 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
1849 if (!nested_vmcb)
1850 return 1;
1851
1852 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
1853 nested_svm_unmap(nested_vmcb, KM_USER0);
1781 1854
1782 return 1; 1855 return 1;
1783} 1856}
1784 1857
1785static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1858static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1786{ 1859{
1860 struct vmcb *nested_vmcb;
1861
1787 if (nested_svm_check_permissions(svm)) 1862 if (nested_svm_check_permissions(svm))
1788 return 1; 1863 return 1;
1789 1864
1790 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; 1865 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1791 skip_emulated_instruction(&svm->vcpu); 1866 skip_emulated_instruction(&svm->vcpu);
1792 1867
1793 nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmsave); 1868 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0);
1869 if (!nested_vmcb)
1870 return 1;
1871
1872 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
1873 nested_svm_unmap(nested_vmcb, KM_USER0);
1794 1874
1795 return 1; 1875 return 1;
1796} 1876}
@@ -1798,19 +1878,29 @@ static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1798static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1878static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1799{ 1879{
1800 nsvm_printk("VMrun\n"); 1880 nsvm_printk("VMrun\n");
1881
1801 if (nested_svm_check_permissions(svm)) 1882 if (nested_svm_check_permissions(svm))
1802 return 1; 1883 return 1;
1803 1884
1804 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; 1885 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1805 skip_emulated_instruction(&svm->vcpu); 1886 skip_emulated_instruction(&svm->vcpu);
1806 1887
1807 if (nested_svm_do(svm, svm->vmcb->save.rax, 0, 1888 if (!nested_svm_vmrun(svm))
1808 NULL, nested_svm_vmrun))
1809 return 1; 1889 return 1;
1810 1890
1811 if (nested_svm_do(svm, svm->nested_vmcb_msrpm, 0, 1891 if (!nested_svm_vmrun_msrpm(svm))
1812 NULL, nested_svm_vmrun_msrpm)) 1892 goto failed;
1813 return 1; 1893
1894 return 1;
1895
1896failed:
1897
1898 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
1899 svm->vmcb->control.exit_code_hi = 0;
1900 svm->vmcb->control.exit_info_1 = 0;
1901 svm->vmcb->control.exit_info_2 = 0;
1902
1903 nested_svm_vmexit(svm);
1814 1904
1815 return 1; 1905 return 1;
1816} 1906}
@@ -1823,7 +1913,7 @@ static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1823 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; 1913 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1824 skip_emulated_instruction(&svm->vcpu); 1914 skip_emulated_instruction(&svm->vcpu);
1825 1915
1826 svm->vcpu.arch.hflags |= HF_GIF_MASK; 1916 enable_gif(svm);
1827 1917
1828 return 1; 1918 return 1;
1829} 1919}
@@ -1836,7 +1926,7 @@ static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1836 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; 1926 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1837 skip_emulated_instruction(&svm->vcpu); 1927 skip_emulated_instruction(&svm->vcpu);
1838 1928
1839 svm->vcpu.arch.hflags &= ~HF_GIF_MASK; 1929 disable_gif(svm);
1840 1930
1841 /* After a CLGI no interrupts should come */ 1931 /* After a CLGI no interrupts should come */
1842 svm_clear_vintr(svm); 1932 svm_clear_vintr(svm);
@@ -1845,6 +1935,19 @@ static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1845 return 1; 1935 return 1;
1846} 1936}
1847 1937
1938static int invlpga_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1939{
1940 struct kvm_vcpu *vcpu = &svm->vcpu;
1941 nsvm_printk("INVLPGA\n");
1942
1943 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
1944 kvm_mmu_invlpg(vcpu, vcpu->arch.regs[VCPU_REGS_RAX]);
1945
1946 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1947 skip_emulated_instruction(&svm->vcpu);
1948 return 1;
1949}
1950
1848static int invalid_op_interception(struct vcpu_svm *svm, 1951static int invalid_op_interception(struct vcpu_svm *svm,
1849 struct kvm_run *kvm_run) 1952 struct kvm_run *kvm_run)
1850{ 1953{
@@ -1953,7 +2056,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1953 struct vcpu_svm *svm = to_svm(vcpu); 2056 struct vcpu_svm *svm = to_svm(vcpu);
1954 2057
1955 switch (ecx) { 2058 switch (ecx) {
1956 case MSR_IA32_TIME_STAMP_COUNTER: { 2059 case MSR_IA32_TSC: {
1957 u64 tsc; 2060 u64 tsc;
1958 2061
1959 rdtscll(tsc); 2062 rdtscll(tsc);
@@ -1981,10 +2084,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1981 *data = svm->vmcb->save.sysenter_cs; 2084 *data = svm->vmcb->save.sysenter_cs;
1982 break; 2085 break;
1983 case MSR_IA32_SYSENTER_EIP: 2086 case MSR_IA32_SYSENTER_EIP:
1984 *data = svm->vmcb->save.sysenter_eip; 2087 *data = svm->sysenter_eip;
1985 break; 2088 break;
1986 case MSR_IA32_SYSENTER_ESP: 2089 case MSR_IA32_SYSENTER_ESP:
1987 *data = svm->vmcb->save.sysenter_esp; 2090 *data = svm->sysenter_esp;
1988 break; 2091 break;
1989 /* Nobody will change the following 5 values in the VMCB so 2092 /* Nobody will change the following 5 values in the VMCB so
1990 we can safely return them on rdmsr. They will always be 0 2093 we can safely return them on rdmsr. They will always be 0
@@ -2005,7 +2108,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
2005 *data = svm->vmcb->save.last_excp_to; 2108 *data = svm->vmcb->save.last_excp_to;
2006 break; 2109 break;
2007 case MSR_VM_HSAVE_PA: 2110 case MSR_VM_HSAVE_PA:
2008 *data = svm->hsave_msr; 2111 *data = svm->nested.hsave_msr;
2009 break; 2112 break;
2010 case MSR_VM_CR: 2113 case MSR_VM_CR:
2011 *data = 0; 2114 *data = 0;
@@ -2027,8 +2130,7 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
2027 if (svm_get_msr(&svm->vcpu, ecx, &data)) 2130 if (svm_get_msr(&svm->vcpu, ecx, &data))
2028 kvm_inject_gp(&svm->vcpu, 0); 2131 kvm_inject_gp(&svm->vcpu, 0);
2029 else { 2132 else {
2030 KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data, 2133 trace_kvm_msr_read(ecx, data);
2031 (u32)(data >> 32), handler);
2032 2134
2033 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff; 2135 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
2034 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32; 2136 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
@@ -2043,7 +2145,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2043 struct vcpu_svm *svm = to_svm(vcpu); 2145 struct vcpu_svm *svm = to_svm(vcpu);
2044 2146
2045 switch (ecx) { 2147 switch (ecx) {
2046 case MSR_IA32_TIME_STAMP_COUNTER: { 2148 case MSR_IA32_TSC: {
2047 u64 tsc; 2149 u64 tsc;
2048 2150
2049 rdtscll(tsc); 2151 rdtscll(tsc);
@@ -2071,9 +2173,11 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2071 svm->vmcb->save.sysenter_cs = data; 2173 svm->vmcb->save.sysenter_cs = data;
2072 break; 2174 break;
2073 case MSR_IA32_SYSENTER_EIP: 2175 case MSR_IA32_SYSENTER_EIP:
2176 svm->sysenter_eip = data;
2074 svm->vmcb->save.sysenter_eip = data; 2177 svm->vmcb->save.sysenter_eip = data;
2075 break; 2178 break;
2076 case MSR_IA32_SYSENTER_ESP: 2179 case MSR_IA32_SYSENTER_ESP:
2180 svm->sysenter_esp = data;
2077 svm->vmcb->save.sysenter_esp = data; 2181 svm->vmcb->save.sysenter_esp = data;
2078 break; 2182 break;
2079 case MSR_IA32_DEBUGCTLMSR: 2183 case MSR_IA32_DEBUGCTLMSR:
@@ -2091,24 +2195,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
2091 else 2195 else
2092 svm_disable_lbrv(svm); 2196 svm_disable_lbrv(svm);
2093 break; 2197 break;
2094 case MSR_K7_EVNTSEL0:
2095 case MSR_K7_EVNTSEL1:
2096 case MSR_K7_EVNTSEL2:
2097 case MSR_K7_EVNTSEL3:
2098 case MSR_K7_PERFCTR0:
2099 case MSR_K7_PERFCTR1:
2100 case MSR_K7_PERFCTR2:
2101 case MSR_K7_PERFCTR3:
2102 /*
2103 * Just discard all writes to the performance counters; this
2104 * should keep both older linux and windows 64-bit guests
2105 * happy
2106 */
2107 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data);
2108
2109 break;
2110 case MSR_VM_HSAVE_PA: 2198 case MSR_VM_HSAVE_PA:
2111 svm->hsave_msr = data; 2199 svm->nested.hsave_msr = data;
2200 break;
2201 case MSR_VM_CR:
2202 case MSR_VM_IGNNE:
2203 pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
2112 break; 2204 break;
2113 default: 2205 default:
2114 return kvm_set_msr_common(vcpu, ecx, data); 2206 return kvm_set_msr_common(vcpu, ecx, data);
@@ -2122,8 +2214,7 @@ static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
2122 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) 2214 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
2123 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); 2215 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
2124 2216
2125 KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32), 2217 trace_kvm_msr_write(ecx, data);
2126 handler);
2127 2218
2128 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; 2219 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
2129 if (svm_set_msr(&svm->vcpu, ecx, data)) 2220 if (svm_set_msr(&svm->vcpu, ecx, data))
@@ -2144,8 +2235,6 @@ static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
2144static int interrupt_window_interception(struct vcpu_svm *svm, 2235static int interrupt_window_interception(struct vcpu_svm *svm,
2145 struct kvm_run *kvm_run) 2236 struct kvm_run *kvm_run)
2146{ 2237{
2147 KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler);
2148
2149 svm_clear_vintr(svm); 2238 svm_clear_vintr(svm);
2150 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; 2239 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2151 /* 2240 /*
@@ -2201,7 +2290,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
2201 [SVM_EXIT_INVD] = emulate_on_interception, 2290 [SVM_EXIT_INVD] = emulate_on_interception,
2202 [SVM_EXIT_HLT] = halt_interception, 2291 [SVM_EXIT_HLT] = halt_interception,
2203 [SVM_EXIT_INVLPG] = invlpg_interception, 2292 [SVM_EXIT_INVLPG] = invlpg_interception,
2204 [SVM_EXIT_INVLPGA] = invalid_op_interception, 2293 [SVM_EXIT_INVLPGA] = invlpga_interception,
2205 [SVM_EXIT_IOIO] = io_interception, 2294 [SVM_EXIT_IOIO] = io_interception,
2206 [SVM_EXIT_MSR] = msr_interception, 2295 [SVM_EXIT_MSR] = msr_interception,
2207 [SVM_EXIT_TASK_SWITCH] = task_switch_interception, 2296 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
@@ -2224,20 +2313,26 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2224 struct vcpu_svm *svm = to_svm(vcpu); 2313 struct vcpu_svm *svm = to_svm(vcpu);
2225 u32 exit_code = svm->vmcb->control.exit_code; 2314 u32 exit_code = svm->vmcb->control.exit_code;
2226 2315
2227 KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip, 2316 trace_kvm_exit(exit_code, svm->vmcb->save.rip);
2228 (u32)((u64)svm->vmcb->save.rip >> 32), entryexit);
2229 2317
2230 if (is_nested(svm)) { 2318 if (is_nested(svm)) {
2319 int vmexit;
2320
2231 nsvm_printk("nested handle_exit: 0x%x | 0x%lx | 0x%lx | 0x%lx\n", 2321 nsvm_printk("nested handle_exit: 0x%x | 0x%lx | 0x%lx | 0x%lx\n",
2232 exit_code, svm->vmcb->control.exit_info_1, 2322 exit_code, svm->vmcb->control.exit_info_1,
2233 svm->vmcb->control.exit_info_2, svm->vmcb->save.rip); 2323 svm->vmcb->control.exit_info_2, svm->vmcb->save.rip);
2234 if (nested_svm_exit_handled(svm, true)) { 2324
2235 nested_svm_vmexit(svm); 2325 vmexit = nested_svm_exit_special(svm);
2236 nsvm_printk("-> #VMEXIT\n"); 2326
2327 if (vmexit == NESTED_EXIT_CONTINUE)
2328 vmexit = nested_svm_exit_handled(svm);
2329
2330 if (vmexit == NESTED_EXIT_DONE)
2237 return 1; 2331 return 1;
2238 }
2239 } 2332 }
2240 2333
2334 svm_complete_interrupts(svm);
2335
2241 if (npt_enabled) { 2336 if (npt_enabled) {
2242 int mmu_reload = 0; 2337 int mmu_reload = 0;
2243 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) { 2338 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
@@ -2246,12 +2341,6 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2246 } 2341 }
2247 vcpu->arch.cr0 = svm->vmcb->save.cr0; 2342 vcpu->arch.cr0 = svm->vmcb->save.cr0;
2248 vcpu->arch.cr3 = svm->vmcb->save.cr3; 2343 vcpu->arch.cr3 = svm->vmcb->save.cr3;
2249 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
2250 if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
2251 kvm_inject_gp(vcpu, 0);
2252 return 1;
2253 }
2254 }
2255 if (mmu_reload) { 2344 if (mmu_reload) {
2256 kvm_mmu_reset_context(vcpu); 2345 kvm_mmu_reset_context(vcpu);
2257 kvm_mmu_load(vcpu); 2346 kvm_mmu_load(vcpu);
@@ -2319,7 +2408,7 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
2319{ 2408{
2320 struct vmcb_control_area *control; 2409 struct vmcb_control_area *control;
2321 2410
2322 KVMTRACE_1D(INJ_VIRQ, &svm->vcpu, (u32)irq, handler); 2411 trace_kvm_inj_virq(irq);
2323 2412
2324 ++svm->vcpu.stat.irq_injections; 2413 ++svm->vcpu.stat.irq_injections;
2325 control = &svm->vmcb->control; 2414 control = &svm->vmcb->control;
@@ -2329,21 +2418,14 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
2329 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT); 2418 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
2330} 2419}
2331 2420
2332static void svm_queue_irq(struct kvm_vcpu *vcpu, unsigned nr)
2333{
2334 struct vcpu_svm *svm = to_svm(vcpu);
2335
2336 svm->vmcb->control.event_inj = nr |
2337 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
2338}
2339
2340static void svm_set_irq(struct kvm_vcpu *vcpu) 2421static void svm_set_irq(struct kvm_vcpu *vcpu)
2341{ 2422{
2342 struct vcpu_svm *svm = to_svm(vcpu); 2423 struct vcpu_svm *svm = to_svm(vcpu);
2343 2424
2344 nested_svm_intr(svm); 2425 BUG_ON(!(gif_set(svm)));
2345 2426
2346 svm_queue_irq(vcpu, vcpu->arch.interrupt.nr); 2427 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
2428 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
2347} 2429}
2348 2430
2349static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) 2431static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
@@ -2371,13 +2453,25 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
2371 struct vmcb *vmcb = svm->vmcb; 2453 struct vmcb *vmcb = svm->vmcb;
2372 return (vmcb->save.rflags & X86_EFLAGS_IF) && 2454 return (vmcb->save.rflags & X86_EFLAGS_IF) &&
2373 !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) && 2455 !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
2374 (svm->vcpu.arch.hflags & HF_GIF_MASK); 2456 gif_set(svm) &&
2457 !(is_nested(svm) && (svm->vcpu.arch.hflags & HF_VINTR_MASK));
2375} 2458}
2376 2459
2377static void enable_irq_window(struct kvm_vcpu *vcpu) 2460static void enable_irq_window(struct kvm_vcpu *vcpu)
2378{ 2461{
2379 svm_set_vintr(to_svm(vcpu)); 2462 struct vcpu_svm *svm = to_svm(vcpu);
2380 svm_inject_irq(to_svm(vcpu), 0x0); 2463 nsvm_printk("Trying to open IRQ window\n");
2464
2465 nested_svm_intr(svm);
2466
2467 /* In case GIF=0 we can't rely on the CPU to tell us when
2468 * GIF becomes 1, because that's a separate STGI/VMRUN intercept.
2469 * The next time we get that intercept, this function will be
2470 * called again though and we'll get the vintr intercept. */
2471 if (gif_set(svm)) {
2472 svm_set_vintr(svm);
2473 svm_inject_irq(svm, 0x0);
2474 }
2381} 2475}
2382 2476
2383static void enable_nmi_window(struct kvm_vcpu *vcpu) 2477static void enable_nmi_window(struct kvm_vcpu *vcpu)
@@ -2456,6 +2550,8 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
2456 case SVM_EXITINTINFO_TYPE_EXEPT: 2550 case SVM_EXITINTINFO_TYPE_EXEPT:
2457 /* In case of software exception do not reinject an exception 2551 /* In case of software exception do not reinject an exception
2458 vector, but re-execute and instruction instead */ 2552 vector, but re-execute and instruction instead */
2553 if (is_nested(svm))
2554 break;
2459 if (kvm_exception_is_soft(vector)) 2555 if (kvm_exception_is_soft(vector))
2460 break; 2556 break;
2461 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) { 2557 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
@@ -2498,9 +2594,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2498 fs_selector = kvm_read_fs(); 2594 fs_selector = kvm_read_fs();
2499 gs_selector = kvm_read_gs(); 2595 gs_selector = kvm_read_gs();
2500 ldt_selector = kvm_read_ldt(); 2596 ldt_selector = kvm_read_ldt();
2501 svm->host_cr2 = kvm_read_cr2(); 2597 svm->vmcb->save.cr2 = vcpu->arch.cr2;
2502 if (!is_nested(svm))
2503 svm->vmcb->save.cr2 = vcpu->arch.cr2;
2504 /* required for live migration with NPT */ 2598 /* required for live migration with NPT */
2505 if (npt_enabled) 2599 if (npt_enabled)
2506 svm->vmcb->save.cr3 = vcpu->arch.cr3; 2600 svm->vmcb->save.cr3 = vcpu->arch.cr3;
@@ -2585,8 +2679,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2585 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 2679 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
2586 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 2680 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
2587 2681
2588 kvm_write_cr2(svm->host_cr2);
2589
2590 kvm_load_fs(fs_selector); 2682 kvm_load_fs(fs_selector);
2591 kvm_load_gs(gs_selector); 2683 kvm_load_gs(gs_selector);
2592 kvm_load_ldt(ldt_selector); 2684 kvm_load_ldt(ldt_selector);
@@ -2602,7 +2694,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2602 2694
2603 svm->next_rip = 0; 2695 svm->next_rip = 0;
2604 2696
2605 svm_complete_interrupts(svm); 2697 if (npt_enabled) {
2698 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
2699 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
2700 }
2606} 2701}
2607 2702
2608#undef R 2703#undef R
@@ -2673,6 +2768,64 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
2673 return 0; 2768 return 0;
2674} 2769}
2675 2770
2771static const struct trace_print_flags svm_exit_reasons_str[] = {
2772 { SVM_EXIT_READ_CR0, "read_cr0" },
2773 { SVM_EXIT_READ_CR3, "read_cr3" },
2774 { SVM_EXIT_READ_CR4, "read_cr4" },
2775 { SVM_EXIT_READ_CR8, "read_cr8" },
2776 { SVM_EXIT_WRITE_CR0, "write_cr0" },
2777 { SVM_EXIT_WRITE_CR3, "write_cr3" },
2778 { SVM_EXIT_WRITE_CR4, "write_cr4" },
2779 { SVM_EXIT_WRITE_CR8, "write_cr8" },
2780 { SVM_EXIT_READ_DR0, "read_dr0" },
2781 { SVM_EXIT_READ_DR1, "read_dr1" },
2782 { SVM_EXIT_READ_DR2, "read_dr2" },
2783 { SVM_EXIT_READ_DR3, "read_dr3" },
2784 { SVM_EXIT_WRITE_DR0, "write_dr0" },
2785 { SVM_EXIT_WRITE_DR1, "write_dr1" },
2786 { SVM_EXIT_WRITE_DR2, "write_dr2" },
2787 { SVM_EXIT_WRITE_DR3, "write_dr3" },
2788 { SVM_EXIT_WRITE_DR5, "write_dr5" },
2789 { SVM_EXIT_WRITE_DR7, "write_dr7" },
2790 { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" },
2791 { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" },
2792 { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" },
2793 { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" },
2794 { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" },
2795 { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" },
2796 { SVM_EXIT_INTR, "interrupt" },
2797 { SVM_EXIT_NMI, "nmi" },
2798 { SVM_EXIT_SMI, "smi" },
2799 { SVM_EXIT_INIT, "init" },
2800 { SVM_EXIT_VINTR, "vintr" },
2801 { SVM_EXIT_CPUID, "cpuid" },
2802 { SVM_EXIT_INVD, "invd" },
2803 { SVM_EXIT_HLT, "hlt" },
2804 { SVM_EXIT_INVLPG, "invlpg" },
2805 { SVM_EXIT_INVLPGA, "invlpga" },
2806 { SVM_EXIT_IOIO, "io" },
2807 { SVM_EXIT_MSR, "msr" },
2808 { SVM_EXIT_TASK_SWITCH, "task_switch" },
2809 { SVM_EXIT_SHUTDOWN, "shutdown" },
2810 { SVM_EXIT_VMRUN, "vmrun" },
2811 { SVM_EXIT_VMMCALL, "hypercall" },
2812 { SVM_EXIT_VMLOAD, "vmload" },
2813 { SVM_EXIT_VMSAVE, "vmsave" },
2814 { SVM_EXIT_STGI, "stgi" },
2815 { SVM_EXIT_CLGI, "clgi" },
2816 { SVM_EXIT_SKINIT, "skinit" },
2817 { SVM_EXIT_WBINVD, "wbinvd" },
2818 { SVM_EXIT_MONITOR, "monitor" },
2819 { SVM_EXIT_MWAIT, "mwait" },
2820 { SVM_EXIT_NPF, "npf" },
2821 { -1, NULL }
2822};
2823
2824static bool svm_gb_page_enable(void)
2825{
2826 return true;
2827}
2828
2676static struct kvm_x86_ops svm_x86_ops = { 2829static struct kvm_x86_ops svm_x86_ops = {
2677 .cpu_has_kvm_support = has_svm, 2830 .cpu_has_kvm_support = has_svm,
2678 .disabled_by_bios = is_disabled, 2831 .disabled_by_bios = is_disabled,
@@ -2710,6 +2863,7 @@ static struct kvm_x86_ops svm_x86_ops = {
2710 .set_gdt = svm_set_gdt, 2863 .set_gdt = svm_set_gdt,
2711 .get_dr = svm_get_dr, 2864 .get_dr = svm_get_dr,
2712 .set_dr = svm_set_dr, 2865 .set_dr = svm_set_dr,
2866 .cache_reg = svm_cache_reg,
2713 .get_rflags = svm_get_rflags, 2867 .get_rflags = svm_get_rflags,
2714 .set_rflags = svm_set_rflags, 2868 .set_rflags = svm_set_rflags,
2715 2869
@@ -2733,6 +2887,9 @@ static struct kvm_x86_ops svm_x86_ops = {
2733 .set_tss_addr = svm_set_tss_addr, 2887 .set_tss_addr = svm_set_tss_addr,
2734 .get_tdp_level = get_npt_level, 2888 .get_tdp_level = get_npt_level,
2735 .get_mt_mask = svm_get_mt_mask, 2889 .get_mt_mask = svm_get_mt_mask,
2890
2891 .exit_reasons_str = svm_exit_reasons_str,
2892 .gb_page_enable = svm_gb_page_enable,
2736}; 2893};
2737 2894
2738static int __init svm_init(void) 2895static int __init svm_init(void)
diff --git a/arch/x86/kvm/timer.c b/arch/x86/kvm/timer.c
index 86dbac072d0c..eea40439066c 100644
--- a/arch/x86/kvm/timer.c
+++ b/arch/x86/kvm/timer.c
@@ -9,12 +9,16 @@ static int __kvm_timer_fn(struct kvm_vcpu *vcpu, struct kvm_timer *ktimer)
9 int restart_timer = 0; 9 int restart_timer = 0;
10 wait_queue_head_t *q = &vcpu->wq; 10 wait_queue_head_t *q = &vcpu->wq;
11 11
12 /* FIXME: this code should not know anything about vcpus */ 12 /*
13 if (!atomic_inc_and_test(&ktimer->pending)) 13 * There is a race window between reading and incrementing, but we do
14 * not care about potentially loosing timer events in the !reinject
15 * case anyway.
16 */
17 if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
18 atomic_inc(&ktimer->pending);
19 /* FIXME: this code should not know anything about vcpus */
14 set_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests); 20 set_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
15 21 }
16 if (!ktimer->reinject)
17 atomic_set(&ktimer->pending, 1);
18 22
19 if (waitqueue_active(q)) 23 if (waitqueue_active(q))
20 wake_up_interruptible(q); 24 wake_up_interruptible(q);
@@ -33,7 +37,7 @@ enum hrtimer_restart kvm_timer_fn(struct hrtimer *data)
33 struct kvm_vcpu *vcpu; 37 struct kvm_vcpu *vcpu;
34 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer); 38 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
35 39
36 vcpu = ktimer->kvm->vcpus[ktimer->vcpu_id]; 40 vcpu = ktimer->vcpu;
37 if (!vcpu) 41 if (!vcpu)
38 return HRTIMER_NORESTART; 42 return HRTIMER_NORESTART;
39 43
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
new file mode 100644
index 000000000000..0d480e77eacf
--- /dev/null
+++ b/arch/x86/kvm/trace.h
@@ -0,0 +1,355 @@
1#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
2#define _TRACE_KVM_H
3
4#include <linux/tracepoint.h>
5
6#undef TRACE_SYSTEM
7#define TRACE_SYSTEM kvm
8#define TRACE_INCLUDE_PATH arch/x86/kvm
9#define TRACE_INCLUDE_FILE trace
10
11/*
12 * Tracepoint for guest mode entry.
13 */
14TRACE_EVENT(kvm_entry,
15 TP_PROTO(unsigned int vcpu_id),
16 TP_ARGS(vcpu_id),
17
18 TP_STRUCT__entry(
19 __field( unsigned int, vcpu_id )
20 ),
21
22 TP_fast_assign(
23 __entry->vcpu_id = vcpu_id;
24 ),
25
26 TP_printk("vcpu %u", __entry->vcpu_id)
27);
28
29/*
30 * Tracepoint for hypercall.
31 */
32TRACE_EVENT(kvm_hypercall,
33 TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1,
34 unsigned long a2, unsigned long a3),
35 TP_ARGS(nr, a0, a1, a2, a3),
36
37 TP_STRUCT__entry(
38 __field( unsigned long, nr )
39 __field( unsigned long, a0 )
40 __field( unsigned long, a1 )
41 __field( unsigned long, a2 )
42 __field( unsigned long, a3 )
43 ),
44
45 TP_fast_assign(
46 __entry->nr = nr;
47 __entry->a0 = a0;
48 __entry->a1 = a1;
49 __entry->a2 = a2;
50 __entry->a3 = a3;
51 ),
52
53 TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx",
54 __entry->nr, __entry->a0, __entry->a1, __entry->a2,
55 __entry->a3)
56);
57
58/*
59 * Tracepoint for PIO.
60 */
61TRACE_EVENT(kvm_pio,
62 TP_PROTO(unsigned int rw, unsigned int port, unsigned int size,
63 unsigned int count),
64 TP_ARGS(rw, port, size, count),
65
66 TP_STRUCT__entry(
67 __field( unsigned int, rw )
68 __field( unsigned int, port )
69 __field( unsigned int, size )
70 __field( unsigned int, count )
71 ),
72
73 TP_fast_assign(
74 __entry->rw = rw;
75 __entry->port = port;
76 __entry->size = size;
77 __entry->count = count;
78 ),
79
80 TP_printk("pio_%s at 0x%x size %d count %d",
81 __entry->rw ? "write" : "read",
82 __entry->port, __entry->size, __entry->count)
83);
84
85/*
86 * Tracepoint for cpuid.
87 */
88TRACE_EVENT(kvm_cpuid,
89 TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
90 unsigned long rcx, unsigned long rdx),
91 TP_ARGS(function, rax, rbx, rcx, rdx),
92
93 TP_STRUCT__entry(
94 __field( unsigned int, function )
95 __field( unsigned long, rax )
96 __field( unsigned long, rbx )
97 __field( unsigned long, rcx )
98 __field( unsigned long, rdx )
99 ),
100
101 TP_fast_assign(
102 __entry->function = function;
103 __entry->rax = rax;
104 __entry->rbx = rbx;
105 __entry->rcx = rcx;
106 __entry->rdx = rdx;
107 ),
108
109 TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx",
110 __entry->function, __entry->rax,
111 __entry->rbx, __entry->rcx, __entry->rdx)
112);
113
114#define AREG(x) { APIC_##x, "APIC_" #x }
115
116#define kvm_trace_symbol_apic \
117 AREG(ID), AREG(LVR), AREG(TASKPRI), AREG(ARBPRI), AREG(PROCPRI), \
118 AREG(EOI), AREG(RRR), AREG(LDR), AREG(DFR), AREG(SPIV), AREG(ISR), \
119 AREG(TMR), AREG(IRR), AREG(ESR), AREG(ICR), AREG(ICR2), AREG(LVTT), \
120 AREG(LVTTHMR), AREG(LVTPC), AREG(LVT0), AREG(LVT1), AREG(LVTERR), \
121 AREG(TMICT), AREG(TMCCT), AREG(TDCR), AREG(SELF_IPI), AREG(EFEAT), \
122 AREG(ECTRL)
123/*
124 * Tracepoint for apic access.
125 */
126TRACE_EVENT(kvm_apic,
127 TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val),
128 TP_ARGS(rw, reg, val),
129
130 TP_STRUCT__entry(
131 __field( unsigned int, rw )
132 __field( unsigned int, reg )
133 __field( unsigned int, val )
134 ),
135
136 TP_fast_assign(
137 __entry->rw = rw;
138 __entry->reg = reg;
139 __entry->val = val;
140 ),
141
142 TP_printk("apic_%s %s = 0x%x",
143 __entry->rw ? "write" : "read",
144 __print_symbolic(__entry->reg, kvm_trace_symbol_apic),
145 __entry->val)
146);
147
148#define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val)
149#define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val)
150
151/*
152 * Tracepoint for kvm guest exit:
153 */
154TRACE_EVENT(kvm_exit,
155 TP_PROTO(unsigned int exit_reason, unsigned long guest_rip),
156 TP_ARGS(exit_reason, guest_rip),
157
158 TP_STRUCT__entry(
159 __field( unsigned int, exit_reason )
160 __field( unsigned long, guest_rip )
161 ),
162
163 TP_fast_assign(
164 __entry->exit_reason = exit_reason;
165 __entry->guest_rip = guest_rip;
166 ),
167
168 TP_printk("reason %s rip 0x%lx",
169 ftrace_print_symbols_seq(p, __entry->exit_reason,
170 kvm_x86_ops->exit_reasons_str),
171 __entry->guest_rip)
172);
173
174/*
175 * Tracepoint for kvm interrupt injection:
176 */
177TRACE_EVENT(kvm_inj_virq,
178 TP_PROTO(unsigned int irq),
179 TP_ARGS(irq),
180
181 TP_STRUCT__entry(
182 __field( unsigned int, irq )
183 ),
184
185 TP_fast_assign(
186 __entry->irq = irq;
187 ),
188
189 TP_printk("irq %u", __entry->irq)
190);
191
192/*
193 * Tracepoint for page fault.
194 */
195TRACE_EVENT(kvm_page_fault,
196 TP_PROTO(unsigned long fault_address, unsigned int error_code),
197 TP_ARGS(fault_address, error_code),
198
199 TP_STRUCT__entry(
200 __field( unsigned long, fault_address )
201 __field( unsigned int, error_code )
202 ),
203
204 TP_fast_assign(
205 __entry->fault_address = fault_address;
206 __entry->error_code = error_code;
207 ),
208
209 TP_printk("address %lx error_code %x",
210 __entry->fault_address, __entry->error_code)
211);
212
213/*
214 * Tracepoint for guest MSR access.
215 */
216TRACE_EVENT(kvm_msr,
217 TP_PROTO(unsigned int rw, unsigned int ecx, unsigned long data),
218 TP_ARGS(rw, ecx, data),
219
220 TP_STRUCT__entry(
221 __field( unsigned int, rw )
222 __field( unsigned int, ecx )
223 __field( unsigned long, data )
224 ),
225
226 TP_fast_assign(
227 __entry->rw = rw;
228 __entry->ecx = ecx;
229 __entry->data = data;
230 ),
231
232 TP_printk("msr_%s %x = 0x%lx",
233 __entry->rw ? "write" : "read",
234 __entry->ecx, __entry->data)
235);
236
237#define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data)
238#define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data)
239
240/*
241 * Tracepoint for guest CR access.
242 */
243TRACE_EVENT(kvm_cr,
244 TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val),
245 TP_ARGS(rw, cr, val),
246
247 TP_STRUCT__entry(
248 __field( unsigned int, rw )
249 __field( unsigned int, cr )
250 __field( unsigned long, val )
251 ),
252
253 TP_fast_assign(
254 __entry->rw = rw;
255 __entry->cr = cr;
256 __entry->val = val;
257 ),
258
259 TP_printk("cr_%s %x = 0x%lx",
260 __entry->rw ? "write" : "read",
261 __entry->cr, __entry->val)
262);
263
264#define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val)
265#define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val)
266
267TRACE_EVENT(kvm_pic_set_irq,
268 TP_PROTO(__u8 chip, __u8 pin, __u8 elcr, __u8 imr, bool coalesced),
269 TP_ARGS(chip, pin, elcr, imr, coalesced),
270
271 TP_STRUCT__entry(
272 __field( __u8, chip )
273 __field( __u8, pin )
274 __field( __u8, elcr )
275 __field( __u8, imr )
276 __field( bool, coalesced )
277 ),
278
279 TP_fast_assign(
280 __entry->chip = chip;
281 __entry->pin = pin;
282 __entry->elcr = elcr;
283 __entry->imr = imr;
284 __entry->coalesced = coalesced;
285 ),
286
287 TP_printk("chip %u pin %u (%s%s)%s",
288 __entry->chip, __entry->pin,
289 (__entry->elcr & (1 << __entry->pin)) ? "level":"edge",
290 (__entry->imr & (1 << __entry->pin)) ? "|masked":"",
291 __entry->coalesced ? " (coalesced)" : "")
292);
293
294#define kvm_apic_dst_shorthand \
295 {0x0, "dst"}, \
296 {0x1, "self"}, \
297 {0x2, "all"}, \
298 {0x3, "all-but-self"}
299
300TRACE_EVENT(kvm_apic_ipi,
301 TP_PROTO(__u32 icr_low, __u32 dest_id),
302 TP_ARGS(icr_low, dest_id),
303
304 TP_STRUCT__entry(
305 __field( __u32, icr_low )
306 __field( __u32, dest_id )
307 ),
308
309 TP_fast_assign(
310 __entry->icr_low = icr_low;
311 __entry->dest_id = dest_id;
312 ),
313
314 TP_printk("dst %x vec %u (%s|%s|%s|%s|%s)",
315 __entry->dest_id, (u8)__entry->icr_low,
316 __print_symbolic((__entry->icr_low >> 8 & 0x7),
317 kvm_deliver_mode),
318 (__entry->icr_low & (1<<11)) ? "logical" : "physical",
319 (__entry->icr_low & (1<<14)) ? "assert" : "de-assert",
320 (__entry->icr_low & (1<<15)) ? "level" : "edge",
321 __print_symbolic((__entry->icr_low >> 18 & 0x3),
322 kvm_apic_dst_shorthand))
323);
324
325TRACE_EVENT(kvm_apic_accept_irq,
326 TP_PROTO(__u32 apicid, __u16 dm, __u8 tm, __u8 vec, bool coalesced),
327 TP_ARGS(apicid, dm, tm, vec, coalesced),
328
329 TP_STRUCT__entry(
330 __field( __u32, apicid )
331 __field( __u16, dm )
332 __field( __u8, tm )
333 __field( __u8, vec )
334 __field( bool, coalesced )
335 ),
336
337 TP_fast_assign(
338 __entry->apicid = apicid;
339 __entry->dm = dm;
340 __entry->tm = tm;
341 __entry->vec = vec;
342 __entry->coalesced = coalesced;
343 ),
344
345 TP_printk("apicid %x vec %u (%s|%s)%s",
346 __entry->apicid, __entry->vec,
347 __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
348 __entry->tm ? "level" : "edge",
349 __entry->coalesced ? " (coalesced)" : "")
350);
351
352#endif /* _TRACE_KVM_H */
353
354/* This part must be outside protection */
355#include <trace/define_trace.h>
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 29f912927a58..f3812014bd0b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -25,6 +25,7 @@
25#include <linux/highmem.h> 25#include <linux/highmem.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <linux/moduleparam.h> 27#include <linux/moduleparam.h>
28#include <linux/ftrace_event.h>
28#include "kvm_cache_regs.h" 29#include "kvm_cache_regs.h"
29#include "x86.h" 30#include "x86.h"
30 31
@@ -34,6 +35,8 @@
34#include <asm/virtext.h> 35#include <asm/virtext.h>
35#include <asm/mce.h> 36#include <asm/mce.h>
36 37
38#include "trace.h"
39
37#define __ex(x) __kvm_handle_fault_on_reboot(x) 40#define __ex(x) __kvm_handle_fault_on_reboot(x)
38 41
39MODULE_AUTHOR("Qumranet"); 42MODULE_AUTHOR("Qumranet");
@@ -51,6 +54,10 @@ module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
51static int __read_mostly enable_ept = 1; 54static int __read_mostly enable_ept = 1;
52module_param_named(ept, enable_ept, bool, S_IRUGO); 55module_param_named(ept, enable_ept, bool, S_IRUGO);
53 56
57static int __read_mostly enable_unrestricted_guest = 1;
58module_param_named(unrestricted_guest,
59 enable_unrestricted_guest, bool, S_IRUGO);
60
54static int __read_mostly emulate_invalid_guest_state = 0; 61static int __read_mostly emulate_invalid_guest_state = 0;
55module_param(emulate_invalid_guest_state, bool, S_IRUGO); 62module_param(emulate_invalid_guest_state, bool, S_IRUGO);
56 63
@@ -84,6 +91,14 @@ struct vcpu_vmx {
84 int guest_efer_loaded; 91 int guest_efer_loaded;
85 } host_state; 92 } host_state;
86 struct { 93 struct {
94 int vm86_active;
95 u8 save_iopl;
96 struct kvm_save_segment {
97 u16 selector;
98 unsigned long base;
99 u32 limit;
100 u32 ar;
101 } tr, es, ds, fs, gs;
87 struct { 102 struct {
88 bool pending; 103 bool pending;
89 u8 vector; 104 u8 vector;
@@ -161,6 +176,8 @@ static struct kvm_vmx_segment_field {
161 VMX_SEGMENT_FIELD(LDTR), 176 VMX_SEGMENT_FIELD(LDTR),
162}; 177};
163 178
179static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
180
164/* 181/*
165 * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it 182 * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
166 * away by decrementing the array size. 183 * away by decrementing the array size.
@@ -256,6 +273,26 @@ static inline bool cpu_has_vmx_flexpriority(void)
256 cpu_has_vmx_virtualize_apic_accesses(); 273 cpu_has_vmx_virtualize_apic_accesses();
257} 274}
258 275
276static inline bool cpu_has_vmx_ept_execute_only(void)
277{
278 return !!(vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT);
279}
280
281static inline bool cpu_has_vmx_eptp_uncacheable(void)
282{
283 return !!(vmx_capability.ept & VMX_EPTP_UC_BIT);
284}
285
286static inline bool cpu_has_vmx_eptp_writeback(void)
287{
288 return !!(vmx_capability.ept & VMX_EPTP_WB_BIT);
289}
290
291static inline bool cpu_has_vmx_ept_2m_page(void)
292{
293 return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT);
294}
295
259static inline int cpu_has_vmx_invept_individual_addr(void) 296static inline int cpu_has_vmx_invept_individual_addr(void)
260{ 297{
261 return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT); 298 return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT);
@@ -277,6 +314,12 @@ static inline int cpu_has_vmx_ept(void)
277 SECONDARY_EXEC_ENABLE_EPT; 314 SECONDARY_EXEC_ENABLE_EPT;
278} 315}
279 316
317static inline int cpu_has_vmx_unrestricted_guest(void)
318{
319 return vmcs_config.cpu_based_2nd_exec_ctrl &
320 SECONDARY_EXEC_UNRESTRICTED_GUEST;
321}
322
280static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm) 323static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
281{ 324{
282 return flexpriority_enabled && 325 return flexpriority_enabled &&
@@ -497,14 +540,16 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
497 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR); 540 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR);
498 if (!vcpu->fpu_active) 541 if (!vcpu->fpu_active)
499 eb |= 1u << NM_VECTOR; 542 eb |= 1u << NM_VECTOR;
543 /*
544 * Unconditionally intercept #DB so we can maintain dr6 without
545 * reading it every exit.
546 */
547 eb |= 1u << DB_VECTOR;
500 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { 548 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
501 if (vcpu->guest_debug &
502 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
503 eb |= 1u << DB_VECTOR;
504 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) 549 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
505 eb |= 1u << BP_VECTOR; 550 eb |= 1u << BP_VECTOR;
506 } 551 }
507 if (vcpu->arch.rmode.vm86_active) 552 if (to_vmx(vcpu)->rmode.vm86_active)
508 eb = ~0; 553 eb = ~0;
509 if (enable_ept) 554 if (enable_ept)
510 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ 555 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
@@ -528,12 +573,15 @@ static void reload_tss(void)
528static void load_transition_efer(struct vcpu_vmx *vmx) 573static void load_transition_efer(struct vcpu_vmx *vmx)
529{ 574{
530 int efer_offset = vmx->msr_offset_efer; 575 int efer_offset = vmx->msr_offset_efer;
531 u64 host_efer = vmx->host_msrs[efer_offset].data; 576 u64 host_efer;
532 u64 guest_efer = vmx->guest_msrs[efer_offset].data; 577 u64 guest_efer;
533 u64 ignore_bits; 578 u64 ignore_bits;
534 579
535 if (efer_offset < 0) 580 if (efer_offset < 0)
536 return; 581 return;
582 host_efer = vmx->host_msrs[efer_offset].data;
583 guest_efer = vmx->guest_msrs[efer_offset].data;
584
537 /* 585 /*
538 * NX is emulated; LMA and LME handled by hardware; SCE meaninless 586 * NX is emulated; LMA and LME handled by hardware; SCE meaninless
539 * outside long mode 587 * outside long mode
@@ -735,12 +783,17 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
735 783
736static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) 784static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
737{ 785{
738 return vmcs_readl(GUEST_RFLAGS); 786 unsigned long rflags;
787
788 rflags = vmcs_readl(GUEST_RFLAGS);
789 if (to_vmx(vcpu)->rmode.vm86_active)
790 rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
791 return rflags;
739} 792}
740 793
741static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 794static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
742{ 795{
743 if (vcpu->arch.rmode.vm86_active) 796 if (to_vmx(vcpu)->rmode.vm86_active)
744 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 797 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
745 vmcs_writel(GUEST_RFLAGS, rflags); 798 vmcs_writel(GUEST_RFLAGS, rflags);
746} 799}
@@ -797,12 +850,13 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
797 intr_info |= INTR_INFO_DELIVER_CODE_MASK; 850 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
798 } 851 }
799 852
800 if (vcpu->arch.rmode.vm86_active) { 853 if (vmx->rmode.vm86_active) {
801 vmx->rmode.irq.pending = true; 854 vmx->rmode.irq.pending = true;
802 vmx->rmode.irq.vector = nr; 855 vmx->rmode.irq.vector = nr;
803 vmx->rmode.irq.rip = kvm_rip_read(vcpu); 856 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
804 if (nr == BP_VECTOR || nr == OF_VECTOR) 857 if (kvm_exception_is_soft(nr))
805 vmx->rmode.irq.rip++; 858 vmx->rmode.irq.rip +=
859 vmx->vcpu.arch.event_exit_inst_len;
806 intr_info |= INTR_TYPE_SOFT_INTR; 860 intr_info |= INTR_TYPE_SOFT_INTR;
807 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); 861 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
808 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); 862 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
@@ -940,7 +994,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
940 case MSR_EFER: 994 case MSR_EFER:
941 return kvm_get_msr_common(vcpu, msr_index, pdata); 995 return kvm_get_msr_common(vcpu, msr_index, pdata);
942#endif 996#endif
943 case MSR_IA32_TIME_STAMP_COUNTER: 997 case MSR_IA32_TSC:
944 data = guest_read_tsc(); 998 data = guest_read_tsc();
945 break; 999 break;
946 case MSR_IA32_SYSENTER_CS: 1000 case MSR_IA32_SYSENTER_CS:
@@ -953,9 +1007,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
953 data = vmcs_readl(GUEST_SYSENTER_ESP); 1007 data = vmcs_readl(GUEST_SYSENTER_ESP);
954 break; 1008 break;
955 default: 1009 default:
956 vmx_load_host_state(to_vmx(vcpu));
957 msr = find_msr_entry(to_vmx(vcpu), msr_index); 1010 msr = find_msr_entry(to_vmx(vcpu), msr_index);
958 if (msr) { 1011 if (msr) {
1012 vmx_load_host_state(to_vmx(vcpu));
959 data = msr->data; 1013 data = msr->data;
960 break; 1014 break;
961 } 1015 }
@@ -1000,22 +1054,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1000 case MSR_IA32_SYSENTER_ESP: 1054 case MSR_IA32_SYSENTER_ESP:
1001 vmcs_writel(GUEST_SYSENTER_ESP, data); 1055 vmcs_writel(GUEST_SYSENTER_ESP, data);
1002 break; 1056 break;
1003 case MSR_IA32_TIME_STAMP_COUNTER: 1057 case MSR_IA32_TSC:
1004 rdtscll(host_tsc); 1058 rdtscll(host_tsc);
1005 guest_write_tsc(data, host_tsc); 1059 guest_write_tsc(data, host_tsc);
1006 break; 1060 break;
1007 case MSR_P6_PERFCTR0:
1008 case MSR_P6_PERFCTR1:
1009 case MSR_P6_EVNTSEL0:
1010 case MSR_P6_EVNTSEL1:
1011 /*
1012 * Just discard all writes to the performance counters; this
1013 * should keep both older linux and windows 64-bit guests
1014 * happy
1015 */
1016 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", msr_index, data);
1017
1018 break;
1019 case MSR_IA32_CR_PAT: 1061 case MSR_IA32_CR_PAT:
1020 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { 1062 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
1021 vmcs_write64(GUEST_IA32_PAT, data); 1063 vmcs_write64(GUEST_IA32_PAT, data);
@@ -1024,9 +1066,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1024 } 1066 }
1025 /* Otherwise falls through to kvm_set_msr_common */ 1067 /* Otherwise falls through to kvm_set_msr_common */
1026 default: 1068 default:
1027 vmx_load_host_state(vmx);
1028 msr = find_msr_entry(vmx, msr_index); 1069 msr = find_msr_entry(vmx, msr_index);
1029 if (msr) { 1070 if (msr) {
1071 vmx_load_host_state(vmx);
1030 msr->data = data; 1072 msr->data = data;
1031 break; 1073 break;
1032 } 1074 }
@@ -1046,6 +1088,10 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1046 case VCPU_REGS_RIP: 1088 case VCPU_REGS_RIP:
1047 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); 1089 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
1048 break; 1090 break;
1091 case VCPU_EXREG_PDPTR:
1092 if (enable_ept)
1093 ept_save_pdptrs(vcpu);
1094 break;
1049 default: 1095 default:
1050 break; 1096 break;
1051 } 1097 }
@@ -1203,7 +1249,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
1203 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 1249 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
1204 SECONDARY_EXEC_WBINVD_EXITING | 1250 SECONDARY_EXEC_WBINVD_EXITING |
1205 SECONDARY_EXEC_ENABLE_VPID | 1251 SECONDARY_EXEC_ENABLE_VPID |
1206 SECONDARY_EXEC_ENABLE_EPT; 1252 SECONDARY_EXEC_ENABLE_EPT |
1253 SECONDARY_EXEC_UNRESTRICTED_GUEST;
1207 if (adjust_vmx_controls(min2, opt2, 1254 if (adjust_vmx_controls(min2, opt2,
1208 MSR_IA32_VMX_PROCBASED_CTLS2, 1255 MSR_IA32_VMX_PROCBASED_CTLS2,
1209 &_cpu_based_2nd_exec_control) < 0) 1256 &_cpu_based_2nd_exec_control) < 0)
@@ -1217,12 +1264,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
1217 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { 1264 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
1218 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT 1265 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
1219 enabled */ 1266 enabled */
1220 min &= ~(CPU_BASED_CR3_LOAD_EXITING | 1267 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
1221 CPU_BASED_CR3_STORE_EXITING | 1268 CPU_BASED_CR3_STORE_EXITING |
1222 CPU_BASED_INVLPG_EXITING); 1269 CPU_BASED_INVLPG_EXITING);
1223 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
1224 &_cpu_based_exec_control) < 0)
1225 return -EIO;
1226 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP, 1270 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
1227 vmx_capability.ept, vmx_capability.vpid); 1271 vmx_capability.ept, vmx_capability.vpid);
1228 } 1272 }
@@ -1333,8 +1377,13 @@ static __init int hardware_setup(void)
1333 if (!cpu_has_vmx_vpid()) 1377 if (!cpu_has_vmx_vpid())
1334 enable_vpid = 0; 1378 enable_vpid = 0;
1335 1379
1336 if (!cpu_has_vmx_ept()) 1380 if (!cpu_has_vmx_ept()) {
1337 enable_ept = 0; 1381 enable_ept = 0;
1382 enable_unrestricted_guest = 0;
1383 }
1384
1385 if (!cpu_has_vmx_unrestricted_guest())
1386 enable_unrestricted_guest = 0;
1338 1387
1339 if (!cpu_has_vmx_flexpriority()) 1388 if (!cpu_has_vmx_flexpriority())
1340 flexpriority_enabled = 0; 1389 flexpriority_enabled = 0;
@@ -1342,6 +1391,9 @@ static __init int hardware_setup(void)
1342 if (!cpu_has_vmx_tpr_shadow()) 1391 if (!cpu_has_vmx_tpr_shadow())
1343 kvm_x86_ops->update_cr8_intercept = NULL; 1392 kvm_x86_ops->update_cr8_intercept = NULL;
1344 1393
1394 if (enable_ept && !cpu_has_vmx_ept_2m_page())
1395 kvm_disable_largepages();
1396
1345 return alloc_kvm_area(); 1397 return alloc_kvm_area();
1346} 1398}
1347 1399
@@ -1372,15 +1424,15 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
1372 struct vcpu_vmx *vmx = to_vmx(vcpu); 1424 struct vcpu_vmx *vmx = to_vmx(vcpu);
1373 1425
1374 vmx->emulation_required = 1; 1426 vmx->emulation_required = 1;
1375 vcpu->arch.rmode.vm86_active = 0; 1427 vmx->rmode.vm86_active = 0;
1376 1428
1377 vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base); 1429 vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base);
1378 vmcs_write32(GUEST_TR_LIMIT, vcpu->arch.rmode.tr.limit); 1430 vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit);
1379 vmcs_write32(GUEST_TR_AR_BYTES, vcpu->arch.rmode.tr.ar); 1431 vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
1380 1432
1381 flags = vmcs_readl(GUEST_RFLAGS); 1433 flags = vmcs_readl(GUEST_RFLAGS);
1382 flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM); 1434 flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
1383 flags |= (vcpu->arch.rmode.save_iopl << IOPL_SHIFT); 1435 flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
1384 vmcs_writel(GUEST_RFLAGS, flags); 1436 vmcs_writel(GUEST_RFLAGS, flags);
1385 1437
1386 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | 1438 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
@@ -1391,10 +1443,10 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
1391 if (emulate_invalid_guest_state) 1443 if (emulate_invalid_guest_state)
1392 return; 1444 return;
1393 1445
1394 fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es); 1446 fix_pmode_dataseg(VCPU_SREG_ES, &vmx->rmode.es);
1395 fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds); 1447 fix_pmode_dataseg(VCPU_SREG_DS, &vmx->rmode.ds);
1396 fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs); 1448 fix_pmode_dataseg(VCPU_SREG_GS, &vmx->rmode.gs);
1397 fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->arch.rmode.fs); 1449 fix_pmode_dataseg(VCPU_SREG_FS, &vmx->rmode.fs);
1398 1450
1399 vmcs_write16(GUEST_SS_SELECTOR, 0); 1451 vmcs_write16(GUEST_SS_SELECTOR, 0);
1400 vmcs_write32(GUEST_SS_AR_BYTES, 0x93); 1452 vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
@@ -1433,20 +1485,23 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
1433 unsigned long flags; 1485 unsigned long flags;
1434 struct vcpu_vmx *vmx = to_vmx(vcpu); 1486 struct vcpu_vmx *vmx = to_vmx(vcpu);
1435 1487
1488 if (enable_unrestricted_guest)
1489 return;
1490
1436 vmx->emulation_required = 1; 1491 vmx->emulation_required = 1;
1437 vcpu->arch.rmode.vm86_active = 1; 1492 vmx->rmode.vm86_active = 1;
1438 1493
1439 vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE); 1494 vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
1440 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm)); 1495 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
1441 1496
1442 vcpu->arch.rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT); 1497 vmx->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
1443 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); 1498 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
1444 1499
1445 vcpu->arch.rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES); 1500 vmx->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
1446 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); 1501 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1447 1502
1448 flags = vmcs_readl(GUEST_RFLAGS); 1503 flags = vmcs_readl(GUEST_RFLAGS);
1449 vcpu->arch.rmode.save_iopl 1504 vmx->rmode.save_iopl
1450 = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; 1505 = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1451 1506
1452 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; 1507 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
@@ -1468,10 +1523,10 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
1468 vmcs_writel(GUEST_CS_BASE, 0xf0000); 1523 vmcs_writel(GUEST_CS_BASE, 0xf0000);
1469 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4); 1524 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
1470 1525
1471 fix_rmode_seg(VCPU_SREG_ES, &vcpu->arch.rmode.es); 1526 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
1472 fix_rmode_seg(VCPU_SREG_DS, &vcpu->arch.rmode.ds); 1527 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
1473 fix_rmode_seg(VCPU_SREG_GS, &vcpu->arch.rmode.gs); 1528 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
1474 fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs); 1529 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
1475 1530
1476continue_rmode: 1531continue_rmode:
1477 kvm_mmu_reset_context(vcpu); 1532 kvm_mmu_reset_context(vcpu);
@@ -1545,11 +1600,11 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1545 1600
1546static void ept_load_pdptrs(struct kvm_vcpu *vcpu) 1601static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
1547{ 1602{
1603 if (!test_bit(VCPU_EXREG_PDPTR,
1604 (unsigned long *)&vcpu->arch.regs_dirty))
1605 return;
1606
1548 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 1607 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1549 if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
1550 printk(KERN_ERR "EPT: Fail to load pdptrs!\n");
1551 return;
1552 }
1553 vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]); 1608 vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]);
1554 vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]); 1609 vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]);
1555 vmcs_write64(GUEST_PDPTR2, vcpu->arch.pdptrs[2]); 1610 vmcs_write64(GUEST_PDPTR2, vcpu->arch.pdptrs[2]);
@@ -1557,6 +1612,21 @@ static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
1557 } 1612 }
1558} 1613}
1559 1614
1615static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
1616{
1617 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1618 vcpu->arch.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
1619 vcpu->arch.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
1620 vcpu->arch.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
1621 vcpu->arch.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
1622 }
1623
1624 __set_bit(VCPU_EXREG_PDPTR,
1625 (unsigned long *)&vcpu->arch.regs_avail);
1626 __set_bit(VCPU_EXREG_PDPTR,
1627 (unsigned long *)&vcpu->arch.regs_dirty);
1628}
1629
1560static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 1630static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
1561 1631
1562static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, 1632static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
@@ -1571,8 +1641,6 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
1571 CPU_BASED_CR3_STORE_EXITING)); 1641 CPU_BASED_CR3_STORE_EXITING));
1572 vcpu->arch.cr0 = cr0; 1642 vcpu->arch.cr0 = cr0;
1573 vmx_set_cr4(vcpu, vcpu->arch.cr4); 1643 vmx_set_cr4(vcpu, vcpu->arch.cr4);
1574 *hw_cr0 |= X86_CR0_PE | X86_CR0_PG;
1575 *hw_cr0 &= ~X86_CR0_WP;
1576 } else if (!is_paging(vcpu)) { 1644 } else if (!is_paging(vcpu)) {
1577 /* From nonpaging to paging */ 1645 /* From nonpaging to paging */
1578 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 1646 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
@@ -1581,9 +1649,10 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
1581 CPU_BASED_CR3_STORE_EXITING)); 1649 CPU_BASED_CR3_STORE_EXITING));
1582 vcpu->arch.cr0 = cr0; 1650 vcpu->arch.cr0 = cr0;
1583 vmx_set_cr4(vcpu, vcpu->arch.cr4); 1651 vmx_set_cr4(vcpu, vcpu->arch.cr4);
1584 if (!(vcpu->arch.cr0 & X86_CR0_WP))
1585 *hw_cr0 &= ~X86_CR0_WP;
1586 } 1652 }
1653
1654 if (!(cr0 & X86_CR0_WP))
1655 *hw_cr0 &= ~X86_CR0_WP;
1587} 1656}
1588 1657
1589static void ept_update_paging_mode_cr4(unsigned long *hw_cr4, 1658static void ept_update_paging_mode_cr4(unsigned long *hw_cr4,
@@ -1598,15 +1667,21 @@ static void ept_update_paging_mode_cr4(unsigned long *hw_cr4,
1598 1667
1599static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 1668static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1600{ 1669{
1601 unsigned long hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | 1670 struct vcpu_vmx *vmx = to_vmx(vcpu);
1602 KVM_VM_CR0_ALWAYS_ON; 1671 unsigned long hw_cr0;
1672
1673 if (enable_unrestricted_guest)
1674 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST)
1675 | KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
1676 else
1677 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
1603 1678
1604 vmx_fpu_deactivate(vcpu); 1679 vmx_fpu_deactivate(vcpu);
1605 1680
1606 if (vcpu->arch.rmode.vm86_active && (cr0 & X86_CR0_PE)) 1681 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
1607 enter_pmode(vcpu); 1682 enter_pmode(vcpu);
1608 1683
1609 if (!vcpu->arch.rmode.vm86_active && !(cr0 & X86_CR0_PE)) 1684 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
1610 enter_rmode(vcpu); 1685 enter_rmode(vcpu);
1611 1686
1612#ifdef CONFIG_X86_64 1687#ifdef CONFIG_X86_64
@@ -1650,10 +1725,8 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1650 if (enable_ept) { 1725 if (enable_ept) {
1651 eptp = construct_eptp(cr3); 1726 eptp = construct_eptp(cr3);
1652 vmcs_write64(EPT_POINTER, eptp); 1727 vmcs_write64(EPT_POINTER, eptp);
1653 ept_sync_context(eptp);
1654 ept_load_pdptrs(vcpu);
1655 guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 : 1728 guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 :
1656 VMX_EPT_IDENTITY_PAGETABLE_ADDR; 1729 vcpu->kvm->arch.ept_identity_map_addr;
1657 } 1730 }
1658 1731
1659 vmx_flush_tlb(vcpu); 1732 vmx_flush_tlb(vcpu);
@@ -1664,7 +1737,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1664 1737
1665static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1738static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1666{ 1739{
1667 unsigned long hw_cr4 = cr4 | (vcpu->arch.rmode.vm86_active ? 1740 unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
1668 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); 1741 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
1669 1742
1670 vcpu->arch.cr4 = cr4; 1743 vcpu->arch.cr4 = cr4;
@@ -1707,16 +1780,13 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
1707 1780
1708static int vmx_get_cpl(struct kvm_vcpu *vcpu) 1781static int vmx_get_cpl(struct kvm_vcpu *vcpu)
1709{ 1782{
1710 struct kvm_segment kvm_seg;
1711
1712 if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */ 1783 if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */
1713 return 0; 1784 return 0;
1714 1785
1715 if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */ 1786 if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
1716 return 3; 1787 return 3;
1717 1788
1718 vmx_get_segment(vcpu, &kvm_seg, VCPU_SREG_CS); 1789 return vmcs_read16(GUEST_CS_SELECTOR) & 3;
1719 return kvm_seg.selector & 3;
1720} 1790}
1721 1791
1722static u32 vmx_segment_access_rights(struct kvm_segment *var) 1792static u32 vmx_segment_access_rights(struct kvm_segment *var)
@@ -1744,20 +1814,21 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
1744static void vmx_set_segment(struct kvm_vcpu *vcpu, 1814static void vmx_set_segment(struct kvm_vcpu *vcpu,
1745 struct kvm_segment *var, int seg) 1815 struct kvm_segment *var, int seg)
1746{ 1816{
1817 struct vcpu_vmx *vmx = to_vmx(vcpu);
1747 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 1818 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1748 u32 ar; 1819 u32 ar;
1749 1820
1750 if (vcpu->arch.rmode.vm86_active && seg == VCPU_SREG_TR) { 1821 if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) {
1751 vcpu->arch.rmode.tr.selector = var->selector; 1822 vmx->rmode.tr.selector = var->selector;
1752 vcpu->arch.rmode.tr.base = var->base; 1823 vmx->rmode.tr.base = var->base;
1753 vcpu->arch.rmode.tr.limit = var->limit; 1824 vmx->rmode.tr.limit = var->limit;
1754 vcpu->arch.rmode.tr.ar = vmx_segment_access_rights(var); 1825 vmx->rmode.tr.ar = vmx_segment_access_rights(var);
1755 return; 1826 return;
1756 } 1827 }
1757 vmcs_writel(sf->base, var->base); 1828 vmcs_writel(sf->base, var->base);
1758 vmcs_write32(sf->limit, var->limit); 1829 vmcs_write32(sf->limit, var->limit);
1759 vmcs_write16(sf->selector, var->selector); 1830 vmcs_write16(sf->selector, var->selector);
1760 if (vcpu->arch.rmode.vm86_active && var->s) { 1831 if (vmx->rmode.vm86_active && var->s) {
1761 /* 1832 /*
1762 * Hack real-mode segments into vm86 compatibility. 1833 * Hack real-mode segments into vm86 compatibility.
1763 */ 1834 */
@@ -1766,6 +1837,21 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
1766 ar = 0xf3; 1837 ar = 0xf3;
1767 } else 1838 } else
1768 ar = vmx_segment_access_rights(var); 1839 ar = vmx_segment_access_rights(var);
1840
1841 /*
1842 * Fix the "Accessed" bit in AR field of segment registers for older
1843 * qemu binaries.
1844 * IA32 arch specifies that at the time of processor reset the
1845 * "Accessed" bit in the AR field of segment registers is 1. And qemu
1846 * is setting it to 0 in the usedland code. This causes invalid guest
1847 * state vmexit when "unrestricted guest" mode is turned on.
1848 * Fix for this setup issue in cpu_reset is being pushed in the qemu
1849 * tree. Newer qemu binaries with that qemu fix would not need this
1850 * kvm hack.
1851 */
1852 if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
1853 ar |= 0x1; /* Accessed */
1854
1769 vmcs_write32(sf->ar_bytes, ar); 1855 vmcs_write32(sf->ar_bytes, ar);
1770} 1856}
1771 1857
@@ -2040,7 +2126,7 @@ static int init_rmode_identity_map(struct kvm *kvm)
2040 if (likely(kvm->arch.ept_identity_pagetable_done)) 2126 if (likely(kvm->arch.ept_identity_pagetable_done))
2041 return 1; 2127 return 1;
2042 ret = 0; 2128 ret = 0;
2043 identity_map_pfn = VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT; 2129 identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
2044 r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); 2130 r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
2045 if (r < 0) 2131 if (r < 0)
2046 goto out; 2132 goto out;
@@ -2062,11 +2148,19 @@ out:
2062static void seg_setup(int seg) 2148static void seg_setup(int seg)
2063{ 2149{
2064 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; 2150 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
2151 unsigned int ar;
2065 2152
2066 vmcs_write16(sf->selector, 0); 2153 vmcs_write16(sf->selector, 0);
2067 vmcs_writel(sf->base, 0); 2154 vmcs_writel(sf->base, 0);
2068 vmcs_write32(sf->limit, 0xffff); 2155 vmcs_write32(sf->limit, 0xffff);
2069 vmcs_write32(sf->ar_bytes, 0xf3); 2156 if (enable_unrestricted_guest) {
2157 ar = 0x93;
2158 if (seg == VCPU_SREG_CS)
2159 ar |= 0x08; /* code segment */
2160 } else
2161 ar = 0xf3;
2162
2163 vmcs_write32(sf->ar_bytes, ar);
2070} 2164}
2071 2165
2072static int alloc_apic_access_page(struct kvm *kvm) 2166static int alloc_apic_access_page(struct kvm *kvm)
@@ -2101,14 +2195,15 @@ static int alloc_identity_pagetable(struct kvm *kvm)
2101 goto out; 2195 goto out;
2102 kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; 2196 kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
2103 kvm_userspace_mem.flags = 0; 2197 kvm_userspace_mem.flags = 0;
2104 kvm_userspace_mem.guest_phys_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; 2198 kvm_userspace_mem.guest_phys_addr =
2199 kvm->arch.ept_identity_map_addr;
2105 kvm_userspace_mem.memory_size = PAGE_SIZE; 2200 kvm_userspace_mem.memory_size = PAGE_SIZE;
2106 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); 2201 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
2107 if (r) 2202 if (r)
2108 goto out; 2203 goto out;
2109 2204
2110 kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, 2205 kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
2111 VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT); 2206 kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
2112out: 2207out:
2113 up_write(&kvm->slots_lock); 2208 up_write(&kvm->slots_lock);
2114 return r; 2209 return r;
@@ -2209,6 +2304,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2209 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; 2304 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
2210 if (!enable_ept) 2305 if (!enable_ept)
2211 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; 2306 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
2307 if (!enable_unrestricted_guest)
2308 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
2212 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); 2309 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
2213 } 2310 }
2214 2311
@@ -2326,14 +2423,14 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2326 goto out; 2423 goto out;
2327 } 2424 }
2328 2425
2329 vmx->vcpu.arch.rmode.vm86_active = 0; 2426 vmx->rmode.vm86_active = 0;
2330 2427
2331 vmx->soft_vnmi_blocked = 0; 2428 vmx->soft_vnmi_blocked = 0;
2332 2429
2333 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); 2430 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
2334 kvm_set_cr8(&vmx->vcpu, 0); 2431 kvm_set_cr8(&vmx->vcpu, 0);
2335 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; 2432 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
2336 if (vmx->vcpu.vcpu_id == 0) 2433 if (kvm_vcpu_is_bsp(&vmx->vcpu))
2337 msr |= MSR_IA32_APICBASE_BSP; 2434 msr |= MSR_IA32_APICBASE_BSP;
2338 kvm_set_apic_base(&vmx->vcpu, msr); 2435 kvm_set_apic_base(&vmx->vcpu, msr);
2339 2436
@@ -2344,7 +2441,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2344 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode 2441 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
2345 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh. 2442 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
2346 */ 2443 */
2347 if (vmx->vcpu.vcpu_id == 0) { 2444 if (kvm_vcpu_is_bsp(&vmx->vcpu)) {
2348 vmcs_write16(GUEST_CS_SELECTOR, 0xf000); 2445 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
2349 vmcs_writel(GUEST_CS_BASE, 0x000f0000); 2446 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
2350 } else { 2447 } else {
@@ -2373,7 +2470,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2373 vmcs_writel(GUEST_SYSENTER_EIP, 0); 2470 vmcs_writel(GUEST_SYSENTER_EIP, 0);
2374 2471
2375 vmcs_writel(GUEST_RFLAGS, 0x02); 2472 vmcs_writel(GUEST_RFLAGS, 0x02);
2376 if (vmx->vcpu.vcpu_id == 0) 2473 if (kvm_vcpu_is_bsp(&vmx->vcpu))
2377 kvm_rip_write(vcpu, 0xfff0); 2474 kvm_rip_write(vcpu, 0xfff0);
2378 else 2475 else
2379 kvm_rip_write(vcpu, 0); 2476 kvm_rip_write(vcpu, 0);
@@ -2461,13 +2558,16 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu)
2461 uint32_t intr; 2558 uint32_t intr;
2462 int irq = vcpu->arch.interrupt.nr; 2559 int irq = vcpu->arch.interrupt.nr;
2463 2560
2464 KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler); 2561 trace_kvm_inj_virq(irq);
2465 2562
2466 ++vcpu->stat.irq_injections; 2563 ++vcpu->stat.irq_injections;
2467 if (vcpu->arch.rmode.vm86_active) { 2564 if (vmx->rmode.vm86_active) {
2468 vmx->rmode.irq.pending = true; 2565 vmx->rmode.irq.pending = true;
2469 vmx->rmode.irq.vector = irq; 2566 vmx->rmode.irq.vector = irq;
2470 vmx->rmode.irq.rip = kvm_rip_read(vcpu); 2567 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
2568 if (vcpu->arch.interrupt.soft)
2569 vmx->rmode.irq.rip +=
2570 vmx->vcpu.arch.event_exit_inst_len;
2471 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 2571 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2472 irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK); 2572 irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
2473 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1); 2573 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
@@ -2502,7 +2602,7 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
2502 } 2602 }
2503 2603
2504 ++vcpu->stat.nmi_injections; 2604 ++vcpu->stat.nmi_injections;
2505 if (vcpu->arch.rmode.vm86_active) { 2605 if (vmx->rmode.vm86_active) {
2506 vmx->rmode.irq.pending = true; 2606 vmx->rmode.irq.pending = true;
2507 vmx->rmode.irq.vector = NMI_VECTOR; 2607 vmx->rmode.irq.vector = NMI_VECTOR;
2508 vmx->rmode.irq.rip = kvm_rip_read(vcpu); 2608 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
@@ -2659,14 +2759,14 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2659 if (enable_ept) 2759 if (enable_ept)
2660 BUG(); 2760 BUG();
2661 cr2 = vmcs_readl(EXIT_QUALIFICATION); 2761 cr2 = vmcs_readl(EXIT_QUALIFICATION);
2662 KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, 2762 trace_kvm_page_fault(cr2, error_code);
2663 (u32)((u64)cr2 >> 32), handler); 2763
2664 if (kvm_event_needs_reinjection(vcpu)) 2764 if (kvm_event_needs_reinjection(vcpu))
2665 kvm_mmu_unprotect_page_virt(vcpu, cr2); 2765 kvm_mmu_unprotect_page_virt(vcpu, cr2);
2666 return kvm_mmu_page_fault(vcpu, cr2, error_code); 2766 return kvm_mmu_page_fault(vcpu, cr2, error_code);
2667 } 2767 }
2668 2768
2669 if (vcpu->arch.rmode.vm86_active && 2769 if (vmx->rmode.vm86_active &&
2670 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK, 2770 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
2671 error_code)) { 2771 error_code)) {
2672 if (vcpu->arch.halt_request) { 2772 if (vcpu->arch.halt_request) {
@@ -2707,7 +2807,6 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu,
2707 struct kvm_run *kvm_run) 2807 struct kvm_run *kvm_run)
2708{ 2808{
2709 ++vcpu->stat.irq_exits; 2809 ++vcpu->stat.irq_exits;
2710 KVMTRACE_1D(INTR, vcpu, vmcs_read32(VM_EXIT_INTR_INFO), handler);
2711 return 1; 2810 return 1;
2712} 2811}
2713 2812
@@ -2755,7 +2854,7 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
2755 2854
2756static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2855static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2757{ 2856{
2758 unsigned long exit_qualification; 2857 unsigned long exit_qualification, val;
2759 int cr; 2858 int cr;
2760 int reg; 2859 int reg;
2761 2860
@@ -2764,21 +2863,19 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2764 reg = (exit_qualification >> 8) & 15; 2863 reg = (exit_qualification >> 8) & 15;
2765 switch ((exit_qualification >> 4) & 3) { 2864 switch ((exit_qualification >> 4) & 3) {
2766 case 0: /* mov to cr */ 2865 case 0: /* mov to cr */
2767 KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, 2866 val = kvm_register_read(vcpu, reg);
2768 (u32)kvm_register_read(vcpu, reg), 2867 trace_kvm_cr_write(cr, val);
2769 (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
2770 handler);
2771 switch (cr) { 2868 switch (cr) {
2772 case 0: 2869 case 0:
2773 kvm_set_cr0(vcpu, kvm_register_read(vcpu, reg)); 2870 kvm_set_cr0(vcpu, val);
2774 skip_emulated_instruction(vcpu); 2871 skip_emulated_instruction(vcpu);
2775 return 1; 2872 return 1;
2776 case 3: 2873 case 3:
2777 kvm_set_cr3(vcpu, kvm_register_read(vcpu, reg)); 2874 kvm_set_cr3(vcpu, val);
2778 skip_emulated_instruction(vcpu); 2875 skip_emulated_instruction(vcpu);
2779 return 1; 2876 return 1;
2780 case 4: 2877 case 4:
2781 kvm_set_cr4(vcpu, kvm_register_read(vcpu, reg)); 2878 kvm_set_cr4(vcpu, val);
2782 skip_emulated_instruction(vcpu); 2879 skip_emulated_instruction(vcpu);
2783 return 1; 2880 return 1;
2784 case 8: { 2881 case 8: {
@@ -2800,23 +2897,19 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2800 vcpu->arch.cr0 &= ~X86_CR0_TS; 2897 vcpu->arch.cr0 &= ~X86_CR0_TS;
2801 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); 2898 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
2802 vmx_fpu_activate(vcpu); 2899 vmx_fpu_activate(vcpu);
2803 KVMTRACE_0D(CLTS, vcpu, handler);
2804 skip_emulated_instruction(vcpu); 2900 skip_emulated_instruction(vcpu);
2805 return 1; 2901 return 1;
2806 case 1: /*mov from cr*/ 2902 case 1: /*mov from cr*/
2807 switch (cr) { 2903 switch (cr) {
2808 case 3: 2904 case 3:
2809 kvm_register_write(vcpu, reg, vcpu->arch.cr3); 2905 kvm_register_write(vcpu, reg, vcpu->arch.cr3);
2810 KVMTRACE_3D(CR_READ, vcpu, (u32)cr, 2906 trace_kvm_cr_read(cr, vcpu->arch.cr3);
2811 (u32)kvm_register_read(vcpu, reg),
2812 (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
2813 handler);
2814 skip_emulated_instruction(vcpu); 2907 skip_emulated_instruction(vcpu);
2815 return 1; 2908 return 1;
2816 case 8: 2909 case 8:
2817 kvm_register_write(vcpu, reg, kvm_get_cr8(vcpu)); 2910 val = kvm_get_cr8(vcpu);
2818 KVMTRACE_2D(CR_READ, vcpu, (u32)cr, 2911 kvm_register_write(vcpu, reg, val);
2819 (u32)kvm_register_read(vcpu, reg), handler); 2912 trace_kvm_cr_read(cr, val);
2820 skip_emulated_instruction(vcpu); 2913 skip_emulated_instruction(vcpu);
2821 return 1; 2914 return 1;
2822 } 2915 }
@@ -2841,6 +2934,8 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2841 unsigned long val; 2934 unsigned long val;
2842 int dr, reg; 2935 int dr, reg;
2843 2936
2937 if (!kvm_require_cpl(vcpu, 0))
2938 return 1;
2844 dr = vmcs_readl(GUEST_DR7); 2939 dr = vmcs_readl(GUEST_DR7);
2845 if (dr & DR7_GD) { 2940 if (dr & DR7_GD) {
2846 /* 2941 /*
@@ -2884,7 +2979,6 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2884 val = 0; 2979 val = 0;
2885 } 2980 }
2886 kvm_register_write(vcpu, reg, val); 2981 kvm_register_write(vcpu, reg, val);
2887 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
2888 } else { 2982 } else {
2889 val = vcpu->arch.regs[reg]; 2983 val = vcpu->arch.regs[reg];
2890 switch (dr) { 2984 switch (dr) {
@@ -2917,7 +3011,6 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2917 } 3011 }
2918 break; 3012 break;
2919 } 3013 }
2920 KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)val, handler);
2921 } 3014 }
2922 skip_emulated_instruction(vcpu); 3015 skip_emulated_instruction(vcpu);
2923 return 1; 3016 return 1;
@@ -2939,8 +3032,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2939 return 1; 3032 return 1;
2940 } 3033 }
2941 3034
2942 KVMTRACE_3D(MSR_READ, vcpu, ecx, (u32)data, (u32)(data >> 32), 3035 trace_kvm_msr_read(ecx, data);
2943 handler);
2944 3036
2945 /* FIXME: handling of bits 32:63 of rax, rdx */ 3037 /* FIXME: handling of bits 32:63 of rax, rdx */
2946 vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; 3038 vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
@@ -2955,8 +3047,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2955 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) 3047 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
2956 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); 3048 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
2957 3049
2958 KVMTRACE_3D(MSR_WRITE, vcpu, ecx, (u32)data, (u32)(data >> 32), 3050 trace_kvm_msr_write(ecx, data);
2959 handler);
2960 3051
2961 if (vmx_set_msr(vcpu, ecx, data) != 0) { 3052 if (vmx_set_msr(vcpu, ecx, data) != 0) {
2962 kvm_inject_gp(vcpu, 0); 3053 kvm_inject_gp(vcpu, 0);
@@ -2983,7 +3074,6 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu,
2983 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; 3074 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
2984 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); 3075 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2985 3076
2986 KVMTRACE_0D(PEND_INTR, vcpu, handler);
2987 ++vcpu->stat.irq_window_exits; 3077 ++vcpu->stat.irq_window_exits;
2988 3078
2989 /* 3079 /*
@@ -3049,7 +3139,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3049 printk(KERN_ERR 3139 printk(KERN_ERR
3050 "Fail to handle apic access vmexit! Offset is 0x%lx\n", 3140 "Fail to handle apic access vmexit! Offset is 0x%lx\n",
3051 offset); 3141 offset);
3052 return -ENOTSUPP; 3142 return -ENOEXEC;
3053 } 3143 }
3054 return 1; 3144 return 1;
3055} 3145}
@@ -3118,7 +3208,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3118 3208
3119 if (exit_qualification & (1 << 6)) { 3209 if (exit_qualification & (1 << 6)) {
3120 printk(KERN_ERR "EPT: GPA exceeds GAW!\n"); 3210 printk(KERN_ERR "EPT: GPA exceeds GAW!\n");
3121 return -ENOTSUPP; 3211 return -EINVAL;
3122 } 3212 }
3123 3213
3124 gla_validity = (exit_qualification >> 7) & 0x3; 3214 gla_validity = (exit_qualification >> 7) & 0x3;
@@ -3130,14 +3220,98 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3130 printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", 3220 printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
3131 (long unsigned int)exit_qualification); 3221 (long unsigned int)exit_qualification);
3132 kvm_run->exit_reason = KVM_EXIT_UNKNOWN; 3222 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
3133 kvm_run->hw.hardware_exit_reason = 0; 3223 kvm_run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
3134 return -ENOTSUPP; 3224 return 0;
3135 } 3225 }
3136 3226
3137 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); 3227 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
3228 trace_kvm_page_fault(gpa, exit_qualification);
3138 return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0); 3229 return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0);
3139} 3230}
3140 3231
3232static u64 ept_rsvd_mask(u64 spte, int level)
3233{
3234 int i;
3235 u64 mask = 0;
3236
3237 for (i = 51; i > boot_cpu_data.x86_phys_bits; i--)
3238 mask |= (1ULL << i);
3239
3240 if (level > 2)
3241 /* bits 7:3 reserved */
3242 mask |= 0xf8;
3243 else if (level == 2) {
3244 if (spte & (1ULL << 7))
3245 /* 2MB ref, bits 20:12 reserved */
3246 mask |= 0x1ff000;
3247 else
3248 /* bits 6:3 reserved */
3249 mask |= 0x78;
3250 }
3251
3252 return mask;
3253}
3254
3255static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
3256 int level)
3257{
3258 printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level);
3259
3260 /* 010b (write-only) */
3261 WARN_ON((spte & 0x7) == 0x2);
3262
3263 /* 110b (write/execute) */
3264 WARN_ON((spte & 0x7) == 0x6);
3265
3266 /* 100b (execute-only) and value not supported by logical processor */
3267 if (!cpu_has_vmx_ept_execute_only())
3268 WARN_ON((spte & 0x7) == 0x4);
3269
3270 /* not 000b */
3271 if ((spte & 0x7)) {
3272 u64 rsvd_bits = spte & ept_rsvd_mask(spte, level);
3273
3274 if (rsvd_bits != 0) {
3275 printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n",
3276 __func__, rsvd_bits);
3277 WARN_ON(1);
3278 }
3279
3280 if (level == 1 || (level == 2 && (spte & (1ULL << 7)))) {
3281 u64 ept_mem_type = (spte & 0x38) >> 3;
3282
3283 if (ept_mem_type == 2 || ept_mem_type == 3 ||
3284 ept_mem_type == 7) {
3285 printk(KERN_ERR "%s: ept_mem_type=0x%llx\n",
3286 __func__, ept_mem_type);
3287 WARN_ON(1);
3288 }
3289 }
3290 }
3291}
3292
3293static int handle_ept_misconfig(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3294{
3295 u64 sptes[4];
3296 int nr_sptes, i;
3297 gpa_t gpa;
3298
3299 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
3300
3301 printk(KERN_ERR "EPT: Misconfiguration.\n");
3302 printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
3303
3304 nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes);
3305
3306 for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i)
3307 ept_misconfig_inspect_spte(vcpu, sptes[i-1], i);
3308
3309 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
3310 kvm_run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
3311
3312 return 0;
3313}
3314
3141static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 3315static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3142{ 3316{
3143 u32 cpu_based_vm_exec_control; 3317 u32 cpu_based_vm_exec_control;
@@ -3217,8 +3391,9 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
3217 [EXIT_REASON_APIC_ACCESS] = handle_apic_access, 3391 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
3218 [EXIT_REASON_WBINVD] = handle_wbinvd, 3392 [EXIT_REASON_WBINVD] = handle_wbinvd,
3219 [EXIT_REASON_TASK_SWITCH] = handle_task_switch, 3393 [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
3220 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
3221 [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, 3394 [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
3395 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
3396 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
3222}; 3397};
3223 3398
3224static const int kvm_vmx_max_exit_handlers = 3399static const int kvm_vmx_max_exit_handlers =
@@ -3234,8 +3409,7 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
3234 u32 exit_reason = vmx->exit_reason; 3409 u32 exit_reason = vmx->exit_reason;
3235 u32 vectoring_info = vmx->idt_vectoring_info; 3410 u32 vectoring_info = vmx->idt_vectoring_info;
3236 3411
3237 KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu), 3412 trace_kvm_exit(exit_reason, kvm_rip_read(vcpu));
3238 (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit);
3239 3413
3240 /* If we need to emulate an MMIO from handle_invalid_guest_state 3414 /* If we need to emulate an MMIO from handle_invalid_guest_state
3241 * we just return 0 */ 3415 * we just return 0 */
@@ -3247,10 +3421,8 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
3247 3421
3248 /* Access CR3 don't cause VMExit in paging mode, so we need 3422 /* Access CR3 don't cause VMExit in paging mode, so we need
3249 * to sync with guest real CR3. */ 3423 * to sync with guest real CR3. */
3250 if (enable_ept && is_paging(vcpu)) { 3424 if (enable_ept && is_paging(vcpu))
3251 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); 3425 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3252 ept_load_pdptrs(vcpu);
3253 }
3254 3426
3255 if (unlikely(vmx->fail)) { 3427 if (unlikely(vmx->fail)) {
3256 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; 3428 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
@@ -3326,10 +3498,8 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
3326 3498
3327 /* We need to handle NMIs before interrupts are enabled */ 3499 /* We need to handle NMIs before interrupts are enabled */
3328 if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && 3500 if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
3329 (exit_intr_info & INTR_INFO_VALID_MASK)) { 3501 (exit_intr_info & INTR_INFO_VALID_MASK))
3330 KVMTRACE_0D(NMI, &vmx->vcpu, handler);
3331 asm("int $2"); 3502 asm("int $2");
3332 }
3333 3503
3334 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; 3504 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
3335 3505
@@ -3434,6 +3604,10 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3434{ 3604{
3435 struct vcpu_vmx *vmx = to_vmx(vcpu); 3605 struct vcpu_vmx *vmx = to_vmx(vcpu);
3436 3606
3607 if (enable_ept && is_paging(vcpu)) {
3608 vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
3609 ept_load_pdptrs(vcpu);
3610 }
3437 /* Record the guest's net vcpu time for enforced NMI injections. */ 3611 /* Record the guest's net vcpu time for enforced NMI injections. */
3438 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) 3612 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
3439 vmx->entry_time = ktime_get(); 3613 vmx->entry_time = ktime_get();
@@ -3449,12 +3623,21 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3449 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) 3623 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
3450 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); 3624 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
3451 3625
3626 /* When single-stepping over STI and MOV SS, we must clear the
3627 * corresponding interruptibility bits in the guest state. Otherwise
3628 * vmentry fails as it then expects bit 14 (BS) in pending debug
3629 * exceptions being set, but that's not correct for the guest debugging
3630 * case. */
3631 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
3632 vmx_set_interrupt_shadow(vcpu, 0);
3633
3452 /* 3634 /*
3453 * Loading guest fpu may have cleared host cr0.ts 3635 * Loading guest fpu may have cleared host cr0.ts
3454 */ 3636 */
3455 vmcs_writel(HOST_CR0, read_cr0()); 3637 vmcs_writel(HOST_CR0, read_cr0());
3456 3638
3457 set_debugreg(vcpu->arch.dr6, 6); 3639 if (vcpu->arch.switch_db_regs)
3640 set_debugreg(vcpu->arch.dr6, 6);
3458 3641
3459 asm( 3642 asm(
3460 /* Store host registers */ 3643 /* Store host registers */
@@ -3465,11 +3648,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3465 "mov %%"R"sp, %c[host_rsp](%0) \n\t" 3648 "mov %%"R"sp, %c[host_rsp](%0) \n\t"
3466 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" 3649 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
3467 "1: \n\t" 3650 "1: \n\t"
3651 /* Reload cr2 if changed */
3652 "mov %c[cr2](%0), %%"R"ax \n\t"
3653 "mov %%cr2, %%"R"dx \n\t"
3654 "cmp %%"R"ax, %%"R"dx \n\t"
3655 "je 2f \n\t"
3656 "mov %%"R"ax, %%cr2 \n\t"
3657 "2: \n\t"
3468 /* Check if vmlaunch of vmresume is needed */ 3658 /* Check if vmlaunch of vmresume is needed */
3469 "cmpl $0, %c[launched](%0) \n\t" 3659 "cmpl $0, %c[launched](%0) \n\t"
3470 /* Load guest registers. Don't clobber flags. */ 3660 /* Load guest registers. Don't clobber flags. */
3471 "mov %c[cr2](%0), %%"R"ax \n\t"
3472 "mov %%"R"ax, %%cr2 \n\t"
3473 "mov %c[rax](%0), %%"R"ax \n\t" 3661 "mov %c[rax](%0), %%"R"ax \n\t"
3474 "mov %c[rbx](%0), %%"R"bx \n\t" 3662 "mov %c[rbx](%0), %%"R"bx \n\t"
3475 "mov %c[rdx](%0), %%"R"dx \n\t" 3663 "mov %c[rdx](%0), %%"R"dx \n\t"
@@ -3547,10 +3735,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3547#endif 3735#endif
3548 ); 3736 );
3549 3737
3550 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); 3738 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
3739 | (1 << VCPU_EXREG_PDPTR));
3551 vcpu->arch.regs_dirty = 0; 3740 vcpu->arch.regs_dirty = 0;
3552 3741
3553 get_debugreg(vcpu->arch.dr6, 6); 3742 if (vcpu->arch.switch_db_regs)
3743 get_debugreg(vcpu->arch.dr6, 6);
3554 3744
3555 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 3745 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
3556 if (vmx->rmode.irq.pending) 3746 if (vmx->rmode.irq.pending)
@@ -3633,9 +3823,13 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
3633 if (alloc_apic_access_page(kvm) != 0) 3823 if (alloc_apic_access_page(kvm) != 0)
3634 goto free_vmcs; 3824 goto free_vmcs;
3635 3825
3636 if (enable_ept) 3826 if (enable_ept) {
3827 if (!kvm->arch.ept_identity_map_addr)
3828 kvm->arch.ept_identity_map_addr =
3829 VMX_EPT_IDENTITY_PAGETABLE_ADDR;
3637 if (alloc_identity_pagetable(kvm) != 0) 3830 if (alloc_identity_pagetable(kvm) != 0)
3638 goto free_vmcs; 3831 goto free_vmcs;
3832 }
3639 3833
3640 return &vmx->vcpu; 3834 return &vmx->vcpu;
3641 3835
@@ -3699,6 +3893,34 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
3699 return ret; 3893 return ret;
3700} 3894}
3701 3895
3896static const struct trace_print_flags vmx_exit_reasons_str[] = {
3897 { EXIT_REASON_EXCEPTION_NMI, "exception" },
3898 { EXIT_REASON_EXTERNAL_INTERRUPT, "ext_irq" },
3899 { EXIT_REASON_TRIPLE_FAULT, "triple_fault" },
3900 { EXIT_REASON_NMI_WINDOW, "nmi_window" },
3901 { EXIT_REASON_IO_INSTRUCTION, "io_instruction" },
3902 { EXIT_REASON_CR_ACCESS, "cr_access" },
3903 { EXIT_REASON_DR_ACCESS, "dr_access" },
3904 { EXIT_REASON_CPUID, "cpuid" },
3905 { EXIT_REASON_MSR_READ, "rdmsr" },
3906 { EXIT_REASON_MSR_WRITE, "wrmsr" },
3907 { EXIT_REASON_PENDING_INTERRUPT, "interrupt_window" },
3908 { EXIT_REASON_HLT, "halt" },
3909 { EXIT_REASON_INVLPG, "invlpg" },
3910 { EXIT_REASON_VMCALL, "hypercall" },
3911 { EXIT_REASON_TPR_BELOW_THRESHOLD, "tpr_below_thres" },
3912 { EXIT_REASON_APIC_ACCESS, "apic_access" },
3913 { EXIT_REASON_WBINVD, "wbinvd" },
3914 { EXIT_REASON_TASK_SWITCH, "task_switch" },
3915 { EXIT_REASON_EPT_VIOLATION, "ept_violation" },
3916 { -1, NULL }
3917};
3918
3919static bool vmx_gb_page_enable(void)
3920{
3921 return false;
3922}
3923
3702static struct kvm_x86_ops vmx_x86_ops = { 3924static struct kvm_x86_ops vmx_x86_ops = {
3703 .cpu_has_kvm_support = cpu_has_kvm_support, 3925 .cpu_has_kvm_support = cpu_has_kvm_support,
3704 .disabled_by_bios = vmx_disabled_by_bios, 3926 .disabled_by_bios = vmx_disabled_by_bios,
@@ -3758,6 +3980,9 @@ static struct kvm_x86_ops vmx_x86_ops = {
3758 .set_tss_addr = vmx_set_tss_addr, 3980 .set_tss_addr = vmx_set_tss_addr,
3759 .get_tdp_level = get_ept_level, 3981 .get_tdp_level = get_ept_level,
3760 .get_mt_mask = vmx_get_mt_mask, 3982 .get_mt_mask = vmx_get_mt_mask,
3983
3984 .exit_reasons_str = vmx_exit_reasons_str,
3985 .gb_page_enable = vmx_gb_page_enable,
3761}; 3986};
3762 3987
3763static int __init vmx_init(void) 3988static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3d4529011828..be451ee44249 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -37,11 +37,16 @@
37#include <linux/iommu.h> 37#include <linux/iommu.h>
38#include <linux/intel-iommu.h> 38#include <linux/intel-iommu.h>
39#include <linux/cpufreq.h> 39#include <linux/cpufreq.h>
40#include <trace/events/kvm.h>
41#undef TRACE_INCLUDE_FILE
42#define CREATE_TRACE_POINTS
43#include "trace.h"
40 44
41#include <asm/uaccess.h> 45#include <asm/uaccess.h>
42#include <asm/msr.h> 46#include <asm/msr.h>
43#include <asm/desc.h> 47#include <asm/desc.h>
44#include <asm/mtrr.h> 48#include <asm/mtrr.h>
49#include <asm/mce.h>
45 50
46#define MAX_IO_MSRS 256 51#define MAX_IO_MSRS 256
47#define CR0_RESERVED_BITS \ 52#define CR0_RESERVED_BITS \
@@ -55,6 +60,10 @@
55 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) 60 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
56 61
57#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) 62#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
63
64#define KVM_MAX_MCE_BANKS 32
65#define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
66
58/* EFER defaults: 67/* EFER defaults:
59 * - enable syscall per default because its emulated by KVM 68 * - enable syscall per default because its emulated by KVM
60 * - enable LME and LMA per default on 64 bit KVM 69 * - enable LME and LMA per default on 64 bit KVM
@@ -68,14 +77,16 @@ static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
68#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM 77#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
69#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 78#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
70 79
80static void update_cr8_intercept(struct kvm_vcpu *vcpu);
71static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, 81static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
72 struct kvm_cpuid_entry2 __user *entries); 82 struct kvm_cpuid_entry2 __user *entries);
73struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
74 u32 function, u32 index);
75 83
76struct kvm_x86_ops *kvm_x86_ops; 84struct kvm_x86_ops *kvm_x86_ops;
77EXPORT_SYMBOL_GPL(kvm_x86_ops); 85EXPORT_SYMBOL_GPL(kvm_x86_ops);
78 86
87int ignore_msrs = 0;
88module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
89
79struct kvm_stats_debugfs_item debugfs_entries[] = { 90struct kvm_stats_debugfs_item debugfs_entries[] = {
80 { "pf_fixed", VCPU_STAT(pf_fixed) }, 91 { "pf_fixed", VCPU_STAT(pf_fixed) },
81 { "pf_guest", VCPU_STAT(pf_guest) }, 92 { "pf_guest", VCPU_STAT(pf_guest) },
@@ -122,18 +133,16 @@ unsigned long segment_base(u16 selector)
122 if (selector == 0) 133 if (selector == 0)
123 return 0; 134 return 0;
124 135
125 asm("sgdt %0" : "=m"(gdt)); 136 kvm_get_gdt(&gdt);
126 table_base = gdt.base; 137 table_base = gdt.base;
127 138
128 if (selector & 4) { /* from ldt */ 139 if (selector & 4) { /* from ldt */
129 u16 ldt_selector; 140 u16 ldt_selector = kvm_read_ldt();
130 141
131 asm("sldt %0" : "=g"(ldt_selector));
132 table_base = segment_base(ldt_selector); 142 table_base = segment_base(ldt_selector);
133 } 143 }
134 d = (struct desc_struct *)(table_base + (selector & ~7)); 144 d = (struct desc_struct *)(table_base + (selector & ~7));
135 v = d->base0 | ((unsigned long)d->base1 << 16) | 145 v = get_desc_base(d);
136 ((unsigned long)d->base2 << 24);
137#ifdef CONFIG_X86_64 146#ifdef CONFIG_X86_64
138 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11)) 147 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
139 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32; 148 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
@@ -176,16 +185,22 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
176 ++vcpu->stat.pf_guest; 185 ++vcpu->stat.pf_guest;
177 186
178 if (vcpu->arch.exception.pending) { 187 if (vcpu->arch.exception.pending) {
179 if (vcpu->arch.exception.nr == PF_VECTOR) { 188 switch(vcpu->arch.exception.nr) {
180 printk(KERN_DEBUG "kvm: inject_page_fault:" 189 case DF_VECTOR:
181 " double fault 0x%lx\n", addr);
182 vcpu->arch.exception.nr = DF_VECTOR;
183 vcpu->arch.exception.error_code = 0;
184 } else if (vcpu->arch.exception.nr == DF_VECTOR) {
185 /* triple fault -> shutdown */ 190 /* triple fault -> shutdown */
186 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests); 191 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
192 return;
193 case PF_VECTOR:
194 vcpu->arch.exception.nr = DF_VECTOR;
195 vcpu->arch.exception.error_code = 0;
196 return;
197 default:
198 /* replace previous exception with a new one in a hope
199 that instruction re-execution will regenerate lost
200 exception */
201 vcpu->arch.exception.pending = false;
202 break;
187 } 203 }
188 return;
189 } 204 }
190 vcpu->arch.cr2 = addr; 205 vcpu->arch.cr2 = addr;
191 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code); 206 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
@@ -207,12 +222,18 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
207} 222}
208EXPORT_SYMBOL_GPL(kvm_queue_exception_e); 223EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
209 224
210static void __queue_exception(struct kvm_vcpu *vcpu) 225/*
226 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
227 * a #GP and return false.
228 */
229bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
211{ 230{
212 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, 231 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
213 vcpu->arch.exception.has_error_code, 232 return true;
214 vcpu->arch.exception.error_code); 233 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
234 return false;
215} 235}
236EXPORT_SYMBOL_GPL(kvm_require_cpl);
216 237
217/* 238/*
218 * Load the pae pdptrs. Return true is they are all valid. 239 * Load the pae pdptrs. Return true is they are all valid.
@@ -232,7 +253,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
232 goto out; 253 goto out;
233 } 254 }
234 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { 255 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
235 if (is_present_pte(pdpte[i]) && 256 if (is_present_gpte(pdpte[i]) &&
236 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { 257 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
237 ret = 0; 258 ret = 0;
238 goto out; 259 goto out;
@@ -241,6 +262,10 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
241 ret = 1; 262 ret = 1;
242 263
243 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); 264 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
265 __set_bit(VCPU_EXREG_PDPTR,
266 (unsigned long *)&vcpu->arch.regs_avail);
267 __set_bit(VCPU_EXREG_PDPTR,
268 (unsigned long *)&vcpu->arch.regs_dirty);
244out: 269out:
245 270
246 return ret; 271 return ret;
@@ -256,6 +281,10 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
256 if (is_long_mode(vcpu) || !is_pae(vcpu)) 281 if (is_long_mode(vcpu) || !is_pae(vcpu))
257 return false; 282 return false;
258 283
284 if (!test_bit(VCPU_EXREG_PDPTR,
285 (unsigned long *)&vcpu->arch.regs_avail))
286 return true;
287
259 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte)); 288 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
260 if (r < 0) 289 if (r < 0)
261 goto out; 290 goto out;
@@ -328,9 +357,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);
328void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 357void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
329{ 358{
330 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)); 359 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
331 KVMTRACE_1D(LMSW, vcpu,
332 (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
333 handler);
334} 360}
335EXPORT_SYMBOL_GPL(kvm_lmsw); 361EXPORT_SYMBOL_GPL(kvm_lmsw);
336 362
@@ -466,7 +492,7 @@ static u32 msrs_to_save[] = {
466#ifdef CONFIG_X86_64 492#ifdef CONFIG_X86_64
467 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 493 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
468#endif 494#endif
469 MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, 495 MSR_IA32_TSC, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
470 MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA 496 MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
471}; 497};
472 498
@@ -644,8 +670,7 @@ static void kvm_write_guest_time(struct kvm_vcpu *v)
644 670
645 /* Keep irq disabled to prevent changes to the clock */ 671 /* Keep irq disabled to prevent changes to the clock */
646 local_irq_save(flags); 672 local_irq_save(flags);
647 kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER, 673 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
648 &vcpu->hv_clock.tsc_timestamp);
649 ktime_get_ts(&ts); 674 ktime_get_ts(&ts);
650 local_irq_restore(flags); 675 local_irq_restore(flags);
651 676
@@ -778,23 +803,60 @@ static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
778 return 0; 803 return 0;
779} 804}
780 805
806static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
807{
808 u64 mcg_cap = vcpu->arch.mcg_cap;
809 unsigned bank_num = mcg_cap & 0xff;
810
811 switch (msr) {
812 case MSR_IA32_MCG_STATUS:
813 vcpu->arch.mcg_status = data;
814 break;
815 case MSR_IA32_MCG_CTL:
816 if (!(mcg_cap & MCG_CTL_P))
817 return 1;
818 if (data != 0 && data != ~(u64)0)
819 return -1;
820 vcpu->arch.mcg_ctl = data;
821 break;
822 default:
823 if (msr >= MSR_IA32_MC0_CTL &&
824 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
825 u32 offset = msr - MSR_IA32_MC0_CTL;
826 /* only 0 or all 1s can be written to IA32_MCi_CTL */
827 if ((offset & 0x3) == 0 &&
828 data != 0 && data != ~(u64)0)
829 return -1;
830 vcpu->arch.mce_banks[offset] = data;
831 break;
832 }
833 return 1;
834 }
835 return 0;
836}
837
781int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) 838int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
782{ 839{
783 switch (msr) { 840 switch (msr) {
784 case MSR_EFER: 841 case MSR_EFER:
785 set_efer(vcpu, data); 842 set_efer(vcpu, data);
786 break; 843 break;
787 case MSR_IA32_MC0_STATUS: 844 case MSR_K7_HWCR:
788 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n", 845 data &= ~(u64)0x40; /* ignore flush filter disable */
789 __func__, data); 846 if (data != 0) {
847 pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
848 data);
849 return 1;
850 }
790 break; 851 break;
791 case MSR_IA32_MCG_STATUS: 852 case MSR_FAM10H_MMIO_CONF_BASE:
792 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n", 853 if (data != 0) {
793 __func__, data); 854 pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
855 "0x%llx\n", data);
856 return 1;
857 }
794 break; 858 break;
795 case MSR_IA32_MCG_CTL: 859 case MSR_AMD64_NB_CFG:
796 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
797 __func__, data);
798 break; 860 break;
799 case MSR_IA32_DEBUGCTLMSR: 861 case MSR_IA32_DEBUGCTLMSR:
800 if (!data) { 862 if (!data) {
@@ -811,12 +873,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
811 case MSR_IA32_UCODE_REV: 873 case MSR_IA32_UCODE_REV:
812 case MSR_IA32_UCODE_WRITE: 874 case MSR_IA32_UCODE_WRITE:
813 case MSR_VM_HSAVE_PA: 875 case MSR_VM_HSAVE_PA:
876 case MSR_AMD64_PATCH_LOADER:
814 break; 877 break;
815 case 0x200 ... 0x2ff: 878 case 0x200 ... 0x2ff:
816 return set_msr_mtrr(vcpu, msr, data); 879 return set_msr_mtrr(vcpu, msr, data);
817 case MSR_IA32_APICBASE: 880 case MSR_IA32_APICBASE:
818 kvm_set_apic_base(vcpu, data); 881 kvm_set_apic_base(vcpu, data);
819 break; 882 break;
883 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
884 return kvm_x2apic_msr_write(vcpu, msr, data);
820 case MSR_IA32_MISC_ENABLE: 885 case MSR_IA32_MISC_ENABLE:
821 vcpu->arch.ia32_misc_enable_msr = data; 886 vcpu->arch.ia32_misc_enable_msr = data;
822 break; 887 break;
@@ -850,9 +915,50 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
850 kvm_request_guest_time_update(vcpu); 915 kvm_request_guest_time_update(vcpu);
851 break; 916 break;
852 } 917 }
918 case MSR_IA32_MCG_CTL:
919 case MSR_IA32_MCG_STATUS:
920 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
921 return set_msr_mce(vcpu, msr, data);
922
923 /* Performance counters are not protected by a CPUID bit,
924 * so we should check all of them in the generic path for the sake of
925 * cross vendor migration.
926 * Writing a zero into the event select MSRs disables them,
927 * which we perfectly emulate ;-). Any other value should be at least
928 * reported, some guests depend on them.
929 */
930 case MSR_P6_EVNTSEL0:
931 case MSR_P6_EVNTSEL1:
932 case MSR_K7_EVNTSEL0:
933 case MSR_K7_EVNTSEL1:
934 case MSR_K7_EVNTSEL2:
935 case MSR_K7_EVNTSEL3:
936 if (data != 0)
937 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
938 "0x%x data 0x%llx\n", msr, data);
939 break;
940 /* at least RHEL 4 unconditionally writes to the perfctr registers,
941 * so we ignore writes to make it happy.
942 */
943 case MSR_P6_PERFCTR0:
944 case MSR_P6_PERFCTR1:
945 case MSR_K7_PERFCTR0:
946 case MSR_K7_PERFCTR1:
947 case MSR_K7_PERFCTR2:
948 case MSR_K7_PERFCTR3:
949 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
950 "0x%x data 0x%llx\n", msr, data);
951 break;
853 default: 952 default:
854 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data); 953 if (!ignore_msrs) {
855 return 1; 954 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
955 msr, data);
956 return 1;
957 } else {
958 pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
959 msr, data);
960 break;
961 }
856 } 962 }
857 return 0; 963 return 0;
858} 964}
@@ -905,26 +1011,47 @@ static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
905 return 0; 1011 return 0;
906} 1012}
907 1013
908int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 1014static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
909{ 1015{
910 u64 data; 1016 u64 data;
1017 u64 mcg_cap = vcpu->arch.mcg_cap;
1018 unsigned bank_num = mcg_cap & 0xff;
911 1019
912 switch (msr) { 1020 switch (msr) {
913 case 0xc0010010: /* SYSCFG */
914 case 0xc0010015: /* HWCR */
915 case MSR_IA32_PLATFORM_ID:
916 case MSR_IA32_P5_MC_ADDR: 1021 case MSR_IA32_P5_MC_ADDR:
917 case MSR_IA32_P5_MC_TYPE: 1022 case MSR_IA32_P5_MC_TYPE:
918 case MSR_IA32_MC0_CTL: 1023 data = 0;
919 case MSR_IA32_MCG_STATUS: 1024 break;
920 case MSR_IA32_MCG_CAP: 1025 case MSR_IA32_MCG_CAP:
1026 data = vcpu->arch.mcg_cap;
1027 break;
921 case MSR_IA32_MCG_CTL: 1028 case MSR_IA32_MCG_CTL:
922 case MSR_IA32_MC0_MISC: 1029 if (!(mcg_cap & MCG_CTL_P))
923 case MSR_IA32_MC0_MISC+4: 1030 return 1;
924 case MSR_IA32_MC0_MISC+8: 1031 data = vcpu->arch.mcg_ctl;
925 case MSR_IA32_MC0_MISC+12: 1032 break;
926 case MSR_IA32_MC0_MISC+16: 1033 case MSR_IA32_MCG_STATUS:
927 case MSR_IA32_MC0_MISC+20: 1034 data = vcpu->arch.mcg_status;
1035 break;
1036 default:
1037 if (msr >= MSR_IA32_MC0_CTL &&
1038 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1039 u32 offset = msr - MSR_IA32_MC0_CTL;
1040 data = vcpu->arch.mce_banks[offset];
1041 break;
1042 }
1043 return 1;
1044 }
1045 *pdata = data;
1046 return 0;
1047}
1048
1049int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1050{
1051 u64 data;
1052
1053 switch (msr) {
1054 case MSR_IA32_PLATFORM_ID:
928 case MSR_IA32_UCODE_REV: 1055 case MSR_IA32_UCODE_REV:
929 case MSR_IA32_EBL_CR_POWERON: 1056 case MSR_IA32_EBL_CR_POWERON:
930 case MSR_IA32_DEBUGCTLMSR: 1057 case MSR_IA32_DEBUGCTLMSR:
@@ -932,10 +1059,18 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
932 case MSR_IA32_LASTBRANCHTOIP: 1059 case MSR_IA32_LASTBRANCHTOIP:
933 case MSR_IA32_LASTINTFROMIP: 1060 case MSR_IA32_LASTINTFROMIP:
934 case MSR_IA32_LASTINTTOIP: 1061 case MSR_IA32_LASTINTTOIP:
1062 case MSR_K8_SYSCFG:
1063 case MSR_K7_HWCR:
935 case MSR_VM_HSAVE_PA: 1064 case MSR_VM_HSAVE_PA:
1065 case MSR_P6_PERFCTR0:
1066 case MSR_P6_PERFCTR1:
936 case MSR_P6_EVNTSEL0: 1067 case MSR_P6_EVNTSEL0:
937 case MSR_P6_EVNTSEL1: 1068 case MSR_P6_EVNTSEL1:
938 case MSR_K7_EVNTSEL0: 1069 case MSR_K7_EVNTSEL0:
1070 case MSR_K7_PERFCTR0:
1071 case MSR_K8_INT_PENDING_MSG:
1072 case MSR_AMD64_NB_CFG:
1073 case MSR_FAM10H_MMIO_CONF_BASE:
939 data = 0; 1074 data = 0;
940 break; 1075 break;
941 case MSR_MTRRcap: 1076 case MSR_MTRRcap:
@@ -949,6 +1084,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
949 case MSR_IA32_APICBASE: 1084 case MSR_IA32_APICBASE:
950 data = kvm_get_apic_base(vcpu); 1085 data = kvm_get_apic_base(vcpu);
951 break; 1086 break;
1087 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1088 return kvm_x2apic_msr_read(vcpu, msr, pdata);
1089 break;
952 case MSR_IA32_MISC_ENABLE: 1090 case MSR_IA32_MISC_ENABLE:
953 data = vcpu->arch.ia32_misc_enable_msr; 1091 data = vcpu->arch.ia32_misc_enable_msr;
954 break; 1092 break;
@@ -967,9 +1105,22 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
967 case MSR_KVM_SYSTEM_TIME: 1105 case MSR_KVM_SYSTEM_TIME:
968 data = vcpu->arch.time; 1106 data = vcpu->arch.time;
969 break; 1107 break;
1108 case MSR_IA32_P5_MC_ADDR:
1109 case MSR_IA32_P5_MC_TYPE:
1110 case MSR_IA32_MCG_CAP:
1111 case MSR_IA32_MCG_CTL:
1112 case MSR_IA32_MCG_STATUS:
1113 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1114 return get_msr_mce(vcpu, msr, pdata);
970 default: 1115 default:
971 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr); 1116 if (!ignore_msrs) {
972 return 1; 1117 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
1118 return 1;
1119 } else {
1120 pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
1121 data = 0;
1122 }
1123 break;
973 } 1124 }
974 *pdata = data; 1125 *pdata = data;
975 return 0; 1126 return 0;
@@ -1068,6 +1219,11 @@ int kvm_dev_ioctl_check_extension(long ext)
1068 case KVM_CAP_REINJECT_CONTROL: 1219 case KVM_CAP_REINJECT_CONTROL:
1069 case KVM_CAP_IRQ_INJECT_STATUS: 1220 case KVM_CAP_IRQ_INJECT_STATUS:
1070 case KVM_CAP_ASSIGN_DEV_IRQ: 1221 case KVM_CAP_ASSIGN_DEV_IRQ:
1222 case KVM_CAP_IRQFD:
1223 case KVM_CAP_IOEVENTFD:
1224 case KVM_CAP_PIT2:
1225 case KVM_CAP_PIT_STATE2:
1226 case KVM_CAP_SET_IDENTITY_MAP_ADDR:
1071 r = 1; 1227 r = 1;
1072 break; 1228 break;
1073 case KVM_CAP_COALESCED_MMIO: 1229 case KVM_CAP_COALESCED_MMIO:
@@ -1088,6 +1244,9 @@ int kvm_dev_ioctl_check_extension(long ext)
1088 case KVM_CAP_IOMMU: 1244 case KVM_CAP_IOMMU:
1089 r = iommu_found(); 1245 r = iommu_found();
1090 break; 1246 break;
1247 case KVM_CAP_MCE:
1248 r = KVM_MAX_MCE_BANKS;
1249 break;
1091 default: 1250 default:
1092 r = 0; 1251 r = 0;
1093 break; 1252 break;
@@ -1147,6 +1306,16 @@ long kvm_arch_dev_ioctl(struct file *filp,
1147 r = 0; 1306 r = 0;
1148 break; 1307 break;
1149 } 1308 }
1309 case KVM_X86_GET_MCE_CAP_SUPPORTED: {
1310 u64 mce_cap;
1311
1312 mce_cap = KVM_MCE_CAP_SUPPORTED;
1313 r = -EFAULT;
1314 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
1315 goto out;
1316 r = 0;
1317 break;
1318 }
1150 default: 1319 default:
1151 r = -EINVAL; 1320 r = -EINVAL;
1152 } 1321 }
@@ -1227,6 +1396,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1227 vcpu->arch.cpuid_nent = cpuid->nent; 1396 vcpu->arch.cpuid_nent = cpuid->nent;
1228 cpuid_fix_nx_cap(vcpu); 1397 cpuid_fix_nx_cap(vcpu);
1229 r = 0; 1398 r = 0;
1399 kvm_apic_set_version(vcpu);
1230 1400
1231out_free: 1401out_free:
1232 vfree(cpuid_entries); 1402 vfree(cpuid_entries);
@@ -1248,6 +1418,7 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
1248 cpuid->nent * sizeof(struct kvm_cpuid_entry2))) 1418 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
1249 goto out; 1419 goto out;
1250 vcpu->arch.cpuid_nent = cpuid->nent; 1420 vcpu->arch.cpuid_nent = cpuid->nent;
1421 kvm_apic_set_version(vcpu);
1251 return 0; 1422 return 0;
1252 1423
1253out: 1424out:
@@ -1290,6 +1461,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1290 u32 index, int *nent, int maxnent) 1461 u32 index, int *nent, int maxnent)
1291{ 1462{
1292 unsigned f_nx = is_efer_nx() ? F(NX) : 0; 1463 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
1464 unsigned f_gbpages = kvm_x86_ops->gb_page_enable() ? F(GBPAGES) : 0;
1293#ifdef CONFIG_X86_64 1465#ifdef CONFIG_X86_64
1294 unsigned f_lm = F(LM); 1466 unsigned f_lm = F(LM);
1295#else 1467#else
@@ -1314,7 +1486,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1314 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | 1486 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1315 F(PAT) | F(PSE36) | 0 /* Reserved */ | 1487 F(PAT) | F(PSE36) | 0 /* Reserved */ |
1316 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | 1488 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
1317 F(FXSR) | F(FXSR_OPT) | 0 /* GBPAGES */ | 0 /* RDTSCP */ | 1489 F(FXSR) | F(FXSR_OPT) | f_gbpages | 0 /* RDTSCP */ |
1318 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW); 1490 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
1319 /* cpuid 1.ecx */ 1491 /* cpuid 1.ecx */
1320 const u32 kvm_supported_word4_x86_features = 1492 const u32 kvm_supported_word4_x86_features =
@@ -1323,7 +1495,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1323 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ | 1495 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
1324 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ | 1496 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
1325 0 /* Reserved, DCA */ | F(XMM4_1) | 1497 0 /* Reserved, DCA */ | F(XMM4_1) |
1326 F(XMM4_2) | 0 /* x2APIC */ | F(MOVBE) | F(POPCNT) | 1498 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
1327 0 /* Reserved, XSAVE, OSXSAVE */; 1499 0 /* Reserved, XSAVE, OSXSAVE */;
1328 /* cpuid 0x80000001.ecx */ 1500 /* cpuid 0x80000001.ecx */
1329 const u32 kvm_supported_word6_x86_features = 1501 const u32 kvm_supported_word6_x86_features =
@@ -1344,6 +1516,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1344 case 1: 1516 case 1:
1345 entry->edx &= kvm_supported_word0_x86_features; 1517 entry->edx &= kvm_supported_word0_x86_features;
1346 entry->ecx &= kvm_supported_word4_x86_features; 1518 entry->ecx &= kvm_supported_word4_x86_features;
1519 /* we support x2apic emulation even if host does not support
1520 * it since we emulate x2apic in software */
1521 entry->ecx |= F(X2APIC);
1347 break; 1522 break;
1348 /* function 2 entries are STATEFUL. That is, repeated cpuid commands 1523 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1349 * may return different values. This forces us to get_cpu() before 1524 * may return different values. This forces us to get_cpu() before
@@ -1435,6 +1610,10 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
1435 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func) 1610 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1436 do_cpuid_ent(&cpuid_entries[nent], func, 0, 1611 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1437 &nent, cpuid->nent); 1612 &nent, cpuid->nent);
1613 r = -E2BIG;
1614 if (nent >= cpuid->nent)
1615 goto out_free;
1616
1438 r = -EFAULT; 1617 r = -EFAULT;
1439 if (copy_to_user(entries, cpuid_entries, 1618 if (copy_to_user(entries, cpuid_entries,
1440 nent * sizeof(struct kvm_cpuid_entry2))) 1619 nent * sizeof(struct kvm_cpuid_entry2)))
@@ -1464,6 +1643,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1464 vcpu_load(vcpu); 1643 vcpu_load(vcpu);
1465 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s); 1644 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
1466 kvm_apic_post_state_restore(vcpu); 1645 kvm_apic_post_state_restore(vcpu);
1646 update_cr8_intercept(vcpu);
1467 vcpu_put(vcpu); 1647 vcpu_put(vcpu);
1468 1648
1469 return 0; 1649 return 0;
@@ -1503,6 +1683,80 @@ static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1503 return 0; 1683 return 0;
1504} 1684}
1505 1685
1686static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
1687 u64 mcg_cap)
1688{
1689 int r;
1690 unsigned bank_num = mcg_cap & 0xff, bank;
1691
1692 r = -EINVAL;
1693 if (!bank_num)
1694 goto out;
1695 if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
1696 goto out;
1697 r = 0;
1698 vcpu->arch.mcg_cap = mcg_cap;
1699 /* Init IA32_MCG_CTL to all 1s */
1700 if (mcg_cap & MCG_CTL_P)
1701 vcpu->arch.mcg_ctl = ~(u64)0;
1702 /* Init IA32_MCi_CTL to all 1s */
1703 for (bank = 0; bank < bank_num; bank++)
1704 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
1705out:
1706 return r;
1707}
1708
1709static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
1710 struct kvm_x86_mce *mce)
1711{
1712 u64 mcg_cap = vcpu->arch.mcg_cap;
1713 unsigned bank_num = mcg_cap & 0xff;
1714 u64 *banks = vcpu->arch.mce_banks;
1715
1716 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
1717 return -EINVAL;
1718 /*
1719 * if IA32_MCG_CTL is not all 1s, the uncorrected error
1720 * reporting is disabled
1721 */
1722 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1723 vcpu->arch.mcg_ctl != ~(u64)0)
1724 return 0;
1725 banks += 4 * mce->bank;
1726 /*
1727 * if IA32_MCi_CTL is not all 1s, the uncorrected error
1728 * reporting is disabled for the bank
1729 */
1730 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
1731 return 0;
1732 if (mce->status & MCI_STATUS_UC) {
1733 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
1734 !(vcpu->arch.cr4 & X86_CR4_MCE)) {
1735 printk(KERN_DEBUG "kvm: set_mce: "
1736 "injects mce exception while "
1737 "previous one is in progress!\n");
1738 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
1739 return 0;
1740 }
1741 if (banks[1] & MCI_STATUS_VAL)
1742 mce->status |= MCI_STATUS_OVER;
1743 banks[2] = mce->addr;
1744 banks[3] = mce->misc;
1745 vcpu->arch.mcg_status = mce->mcg_status;
1746 banks[1] = mce->status;
1747 kvm_queue_exception(vcpu, MC_VECTOR);
1748 } else if (!(banks[1] & MCI_STATUS_VAL)
1749 || !(banks[1] & MCI_STATUS_UC)) {
1750 if (banks[1] & MCI_STATUS_VAL)
1751 mce->status |= MCI_STATUS_OVER;
1752 banks[2] = mce->addr;
1753 banks[3] = mce->misc;
1754 banks[1] = mce->status;
1755 } else
1756 banks[1] |= MCI_STATUS_OVER;
1757 return 0;
1758}
1759
1506long kvm_arch_vcpu_ioctl(struct file *filp, 1760long kvm_arch_vcpu_ioctl(struct file *filp,
1507 unsigned int ioctl, unsigned long arg) 1761 unsigned int ioctl, unsigned long arg)
1508{ 1762{
@@ -1636,6 +1890,24 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
1636 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); 1890 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
1637 break; 1891 break;
1638 } 1892 }
1893 case KVM_X86_SETUP_MCE: {
1894 u64 mcg_cap;
1895
1896 r = -EFAULT;
1897 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
1898 goto out;
1899 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
1900 break;
1901 }
1902 case KVM_X86_SET_MCE: {
1903 struct kvm_x86_mce mce;
1904
1905 r = -EFAULT;
1906 if (copy_from_user(&mce, argp, sizeof mce))
1907 goto out;
1908 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
1909 break;
1910 }
1639 default: 1911 default:
1640 r = -EINVAL; 1912 r = -EINVAL;
1641 } 1913 }
@@ -1654,6 +1926,13 @@ static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1654 return ret; 1926 return ret;
1655} 1927}
1656 1928
1929static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
1930 u64 ident_addr)
1931{
1932 kvm->arch.ept_identity_map_addr = ident_addr;
1933 return 0;
1934}
1935
1657static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, 1936static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1658 u32 kvm_nr_mmu_pages) 1937 u32 kvm_nr_mmu_pages)
1659{ 1938{
@@ -1775,19 +2054,25 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1775 r = 0; 2054 r = 0;
1776 switch (chip->chip_id) { 2055 switch (chip->chip_id) {
1777 case KVM_IRQCHIP_PIC_MASTER: 2056 case KVM_IRQCHIP_PIC_MASTER:
2057 spin_lock(&pic_irqchip(kvm)->lock);
1778 memcpy(&pic_irqchip(kvm)->pics[0], 2058 memcpy(&pic_irqchip(kvm)->pics[0],
1779 &chip->chip.pic, 2059 &chip->chip.pic,
1780 sizeof(struct kvm_pic_state)); 2060 sizeof(struct kvm_pic_state));
2061 spin_unlock(&pic_irqchip(kvm)->lock);
1781 break; 2062 break;
1782 case KVM_IRQCHIP_PIC_SLAVE: 2063 case KVM_IRQCHIP_PIC_SLAVE:
2064 spin_lock(&pic_irqchip(kvm)->lock);
1783 memcpy(&pic_irqchip(kvm)->pics[1], 2065 memcpy(&pic_irqchip(kvm)->pics[1],
1784 &chip->chip.pic, 2066 &chip->chip.pic,
1785 sizeof(struct kvm_pic_state)); 2067 sizeof(struct kvm_pic_state));
2068 spin_unlock(&pic_irqchip(kvm)->lock);
1786 break; 2069 break;
1787 case KVM_IRQCHIP_IOAPIC: 2070 case KVM_IRQCHIP_IOAPIC:
2071 mutex_lock(&kvm->irq_lock);
1788 memcpy(ioapic_irqchip(kvm), 2072 memcpy(ioapic_irqchip(kvm),
1789 &chip->chip.ioapic, 2073 &chip->chip.ioapic,
1790 sizeof(struct kvm_ioapic_state)); 2074 sizeof(struct kvm_ioapic_state));
2075 mutex_unlock(&kvm->irq_lock);
1791 break; 2076 break;
1792 default: 2077 default:
1793 r = -EINVAL; 2078 r = -EINVAL;
@@ -1801,7 +2086,9 @@ static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1801{ 2086{
1802 int r = 0; 2087 int r = 0;
1803 2088
2089 mutex_lock(&kvm->arch.vpit->pit_state.lock);
1804 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state)); 2090 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
2091 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
1805 return r; 2092 return r;
1806} 2093}
1807 2094
@@ -1809,8 +2096,39 @@ static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1809{ 2096{
1810 int r = 0; 2097 int r = 0;
1811 2098
2099 mutex_lock(&kvm->arch.vpit->pit_state.lock);
1812 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); 2100 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
1813 kvm_pit_load_count(kvm, 0, ps->channels[0].count); 2101 kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
2102 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2103 return r;
2104}
2105
2106static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2107{
2108 int r = 0;
2109
2110 mutex_lock(&kvm->arch.vpit->pit_state.lock);
2111 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
2112 sizeof(ps->channels));
2113 ps->flags = kvm->arch.vpit->pit_state.flags;
2114 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2115 return r;
2116}
2117
2118static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2119{
2120 int r = 0, start = 0;
2121 u32 prev_legacy, cur_legacy;
2122 mutex_lock(&kvm->arch.vpit->pit_state.lock);
2123 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
2124 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
2125 if (!prev_legacy && cur_legacy)
2126 start = 1;
2127 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
2128 sizeof(kvm->arch.vpit->pit_state.channels));
2129 kvm->arch.vpit->pit_state.flags = ps->flags;
2130 kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
2131 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
1814 return r; 2132 return r;
1815} 2133}
1816 2134
@@ -1819,7 +2137,9 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
1819{ 2137{
1820 if (!kvm->arch.vpit) 2138 if (!kvm->arch.vpit)
1821 return -ENXIO; 2139 return -ENXIO;
2140 mutex_lock(&kvm->arch.vpit->pit_state.lock);
1822 kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject; 2141 kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
2142 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
1823 return 0; 2143 return 0;
1824} 2144}
1825 2145
@@ -1845,7 +2165,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1845 spin_lock(&kvm->mmu_lock); 2165 spin_lock(&kvm->mmu_lock);
1846 kvm_mmu_slot_remove_write_access(kvm, log->slot); 2166 kvm_mmu_slot_remove_write_access(kvm, log->slot);
1847 spin_unlock(&kvm->mmu_lock); 2167 spin_unlock(&kvm->mmu_lock);
1848 kvm_flush_remote_tlbs(kvm);
1849 memslot = &kvm->memslots[log->slot]; 2168 memslot = &kvm->memslots[log->slot];
1850 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 2169 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1851 memset(memslot->dirty_bitmap, 0, n); 2170 memset(memslot->dirty_bitmap, 0, n);
@@ -1869,7 +2188,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
1869 */ 2188 */
1870 union { 2189 union {
1871 struct kvm_pit_state ps; 2190 struct kvm_pit_state ps;
2191 struct kvm_pit_state2 ps2;
1872 struct kvm_memory_alias alias; 2192 struct kvm_memory_alias alias;
2193 struct kvm_pit_config pit_config;
1873 } u; 2194 } u;
1874 2195
1875 switch (ioctl) { 2196 switch (ioctl) {
@@ -1878,6 +2199,17 @@ long kvm_arch_vm_ioctl(struct file *filp,
1878 if (r < 0) 2199 if (r < 0)
1879 goto out; 2200 goto out;
1880 break; 2201 break;
2202 case KVM_SET_IDENTITY_MAP_ADDR: {
2203 u64 ident_addr;
2204
2205 r = -EFAULT;
2206 if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
2207 goto out;
2208 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
2209 if (r < 0)
2210 goto out;
2211 break;
2212 }
1881 case KVM_SET_MEMORY_REGION: { 2213 case KVM_SET_MEMORY_REGION: {
1882 struct kvm_memory_region kvm_mem; 2214 struct kvm_memory_region kvm_mem;
1883 struct kvm_userspace_memory_region kvm_userspace_mem; 2215 struct kvm_userspace_memory_region kvm_userspace_mem;
@@ -1930,16 +2262,24 @@ long kvm_arch_vm_ioctl(struct file *filp,
1930 } 2262 }
1931 break; 2263 break;
1932 case KVM_CREATE_PIT: 2264 case KVM_CREATE_PIT:
1933 mutex_lock(&kvm->lock); 2265 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
2266 goto create_pit;
2267 case KVM_CREATE_PIT2:
2268 r = -EFAULT;
2269 if (copy_from_user(&u.pit_config, argp,
2270 sizeof(struct kvm_pit_config)))
2271 goto out;
2272 create_pit:
2273 down_write(&kvm->slots_lock);
1934 r = -EEXIST; 2274 r = -EEXIST;
1935 if (kvm->arch.vpit) 2275 if (kvm->arch.vpit)
1936 goto create_pit_unlock; 2276 goto create_pit_unlock;
1937 r = -ENOMEM; 2277 r = -ENOMEM;
1938 kvm->arch.vpit = kvm_create_pit(kvm); 2278 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
1939 if (kvm->arch.vpit) 2279 if (kvm->arch.vpit)
1940 r = 0; 2280 r = 0;
1941 create_pit_unlock: 2281 create_pit_unlock:
1942 mutex_unlock(&kvm->lock); 2282 up_write(&kvm->slots_lock);
1943 break; 2283 break;
1944 case KVM_IRQ_LINE_STATUS: 2284 case KVM_IRQ_LINE_STATUS:
1945 case KVM_IRQ_LINE: { 2285 case KVM_IRQ_LINE: {
@@ -1950,10 +2290,10 @@ long kvm_arch_vm_ioctl(struct file *filp,
1950 goto out; 2290 goto out;
1951 if (irqchip_in_kernel(kvm)) { 2291 if (irqchip_in_kernel(kvm)) {
1952 __s32 status; 2292 __s32 status;
1953 mutex_lock(&kvm->lock); 2293 mutex_lock(&kvm->irq_lock);
1954 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 2294 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1955 irq_event.irq, irq_event.level); 2295 irq_event.irq, irq_event.level);
1956 mutex_unlock(&kvm->lock); 2296 mutex_unlock(&kvm->irq_lock);
1957 if (ioctl == KVM_IRQ_LINE_STATUS) { 2297 if (ioctl == KVM_IRQ_LINE_STATUS) {
1958 irq_event.status = status; 2298 irq_event.status = status;
1959 if (copy_to_user(argp, &irq_event, 2299 if (copy_to_user(argp, &irq_event,
@@ -2042,6 +2382,32 @@ long kvm_arch_vm_ioctl(struct file *filp,
2042 r = 0; 2382 r = 0;
2043 break; 2383 break;
2044 } 2384 }
2385 case KVM_GET_PIT2: {
2386 r = -ENXIO;
2387 if (!kvm->arch.vpit)
2388 goto out;
2389 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
2390 if (r)
2391 goto out;
2392 r = -EFAULT;
2393 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
2394 goto out;
2395 r = 0;
2396 break;
2397 }
2398 case KVM_SET_PIT2: {
2399 r = -EFAULT;
2400 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
2401 goto out;
2402 r = -ENXIO;
2403 if (!kvm->arch.vpit)
2404 goto out;
2405 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
2406 if (r)
2407 goto out;
2408 r = 0;
2409 break;
2410 }
2045 case KVM_REINJECT_CONTROL: { 2411 case KVM_REINJECT_CONTROL: {
2046 struct kvm_reinject_control control; 2412 struct kvm_reinject_control control;
2047 r = -EFAULT; 2413 r = -EFAULT;
@@ -2075,35 +2441,23 @@ static void kvm_init_msr_list(void)
2075 num_msrs_to_save = j; 2441 num_msrs_to_save = j;
2076} 2442}
2077 2443
2078/* 2444static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
2079 * Only apic need an MMIO device hook, so shortcut now.. 2445 const void *v)
2080 */
2081static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
2082 gpa_t addr, int len,
2083 int is_write)
2084{ 2446{
2085 struct kvm_io_device *dev; 2447 if (vcpu->arch.apic &&
2448 !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
2449 return 0;
2086 2450
2087 if (vcpu->arch.apic) { 2451 return kvm_io_bus_write(&vcpu->kvm->mmio_bus, addr, len, v);
2088 dev = &vcpu->arch.apic->dev;
2089 if (dev->in_range(dev, addr, len, is_write))
2090 return dev;
2091 }
2092 return NULL;
2093} 2452}
2094 2453
2095 2454static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
2096static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
2097 gpa_t addr, int len,
2098 int is_write)
2099{ 2455{
2100 struct kvm_io_device *dev; 2456 if (vcpu->arch.apic &&
2457 !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
2458 return 0;
2101 2459
2102 dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write); 2460 return kvm_io_bus_read(&vcpu->kvm->mmio_bus, addr, len, v);
2103 if (dev == NULL)
2104 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
2105 is_write);
2106 return dev;
2107} 2461}
2108 2462
2109static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, 2463static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
@@ -2172,11 +2526,12 @@ static int emulator_read_emulated(unsigned long addr,
2172 unsigned int bytes, 2526 unsigned int bytes,
2173 struct kvm_vcpu *vcpu) 2527 struct kvm_vcpu *vcpu)
2174{ 2528{
2175 struct kvm_io_device *mmio_dev;
2176 gpa_t gpa; 2529 gpa_t gpa;
2177 2530
2178 if (vcpu->mmio_read_completed) { 2531 if (vcpu->mmio_read_completed) {
2179 memcpy(val, vcpu->mmio_data, bytes); 2532 memcpy(val, vcpu->mmio_data, bytes);
2533 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
2534 vcpu->mmio_phys_addr, *(u64 *)val);
2180 vcpu->mmio_read_completed = 0; 2535 vcpu->mmio_read_completed = 0;
2181 return X86EMUL_CONTINUE; 2536 return X86EMUL_CONTINUE;
2182 } 2537 }
@@ -2197,14 +2552,12 @@ mmio:
2197 /* 2552 /*
2198 * Is this MMIO handled locally? 2553 * Is this MMIO handled locally?
2199 */ 2554 */
2200 mutex_lock(&vcpu->kvm->lock); 2555 if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) {
2201 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0); 2556 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val);
2202 if (mmio_dev) {
2203 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
2204 mutex_unlock(&vcpu->kvm->lock);
2205 return X86EMUL_CONTINUE; 2557 return X86EMUL_CONTINUE;
2206 } 2558 }
2207 mutex_unlock(&vcpu->kvm->lock); 2559
2560 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
2208 2561
2209 vcpu->mmio_needed = 1; 2562 vcpu->mmio_needed = 1;
2210 vcpu->mmio_phys_addr = gpa; 2563 vcpu->mmio_phys_addr = gpa;
@@ -2231,7 +2584,6 @@ static int emulator_write_emulated_onepage(unsigned long addr,
2231 unsigned int bytes, 2584 unsigned int bytes,
2232 struct kvm_vcpu *vcpu) 2585 struct kvm_vcpu *vcpu)
2233{ 2586{
2234 struct kvm_io_device *mmio_dev;
2235 gpa_t gpa; 2587 gpa_t gpa;
2236 2588
2237 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 2589 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
@@ -2249,17 +2601,12 @@ static int emulator_write_emulated_onepage(unsigned long addr,
2249 return X86EMUL_CONTINUE; 2601 return X86EMUL_CONTINUE;
2250 2602
2251mmio: 2603mmio:
2604 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
2252 /* 2605 /*
2253 * Is this MMIO handled locally? 2606 * Is this MMIO handled locally?
2254 */ 2607 */
2255 mutex_lock(&vcpu->kvm->lock); 2608 if (!vcpu_mmio_write(vcpu, gpa, bytes, val))
2256 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
2257 if (mmio_dev) {
2258 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
2259 mutex_unlock(&vcpu->kvm->lock);
2260 return X86EMUL_CONTINUE; 2609 return X86EMUL_CONTINUE;
2261 }
2262 mutex_unlock(&vcpu->kvm->lock);
2263 2610
2264 vcpu->mmio_needed = 1; 2611 vcpu->mmio_needed = 1;
2265 vcpu->mmio_phys_addr = gpa; 2612 vcpu->mmio_phys_addr = gpa;
@@ -2297,12 +2644,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
2297 unsigned int bytes, 2644 unsigned int bytes,
2298 struct kvm_vcpu *vcpu) 2645 struct kvm_vcpu *vcpu)
2299{ 2646{
2300 static int reported; 2647 printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
2301
2302 if (!reported) {
2303 reported = 1;
2304 printk(KERN_WARNING "kvm: emulating exchange as write\n");
2305 }
2306#ifndef CONFIG_X86_64 2648#ifndef CONFIG_X86_64
2307 /* guests cmpxchg8b have to be emulated atomically */ 2649 /* guests cmpxchg8b have to be emulated atomically */
2308 if (bytes == 8) { 2650 if (bytes == 8) {
@@ -2348,7 +2690,6 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
2348 2690
2349int emulate_clts(struct kvm_vcpu *vcpu) 2691int emulate_clts(struct kvm_vcpu *vcpu)
2350{ 2692{
2351 KVMTRACE_0D(CLTS, vcpu, handler);
2352 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS); 2693 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
2353 return X86EMUL_CONTINUE; 2694 return X86EMUL_CONTINUE;
2354} 2695}
@@ -2425,7 +2766,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
2425 kvm_clear_exception_queue(vcpu); 2766 kvm_clear_exception_queue(vcpu);
2426 vcpu->arch.mmio_fault_cr2 = cr2; 2767 vcpu->arch.mmio_fault_cr2 = cr2;
2427 /* 2768 /*
2428 * TODO: fix x86_emulate.c to use guest_read/write_register 2769 * TODO: fix emulate.c to use guest_read/write_register
2429 * instead of direct ->regs accesses, can save hundred cycles 2770 * instead of direct ->regs accesses, can save hundred cycles
2430 * on Intel for instructions that don't read/change RSP, for 2771 * on Intel for instructions that don't read/change RSP, for
2431 * for example. 2772 * for example.
@@ -2449,14 +2790,33 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
2449 2790
2450 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops); 2791 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
2451 2792
2452 /* Reject the instructions other than VMCALL/VMMCALL when 2793 /* Only allow emulation of specific instructions on #UD
2453 * try to emulate invalid opcode */ 2794 * (namely VMMCALL, sysenter, sysexit, syscall)*/
2454 c = &vcpu->arch.emulate_ctxt.decode; 2795 c = &vcpu->arch.emulate_ctxt.decode;
2455 if ((emulation_type & EMULTYPE_TRAP_UD) && 2796 if (emulation_type & EMULTYPE_TRAP_UD) {
2456 (!(c->twobyte && c->b == 0x01 && 2797 if (!c->twobyte)
2457 (c->modrm_reg == 0 || c->modrm_reg == 3) && 2798 return EMULATE_FAIL;
2458 c->modrm_mod == 3 && c->modrm_rm == 1))) 2799 switch (c->b) {
2459 return EMULATE_FAIL; 2800 case 0x01: /* VMMCALL */
2801 if (c->modrm_mod != 3 || c->modrm_rm != 1)
2802 return EMULATE_FAIL;
2803 break;
2804 case 0x34: /* sysenter */
2805 case 0x35: /* sysexit */
2806 if (c->modrm_mod != 0 || c->modrm_rm != 0)
2807 return EMULATE_FAIL;
2808 break;
2809 case 0x05: /* syscall */
2810 if (c->modrm_mod != 0 || c->modrm_rm != 0)
2811 return EMULATE_FAIL;
2812 break;
2813 default:
2814 return EMULATE_FAIL;
2815 }
2816
2817 if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
2818 return EMULATE_FAIL;
2819 }
2460 2820
2461 ++vcpu->stat.insn_emulation; 2821 ++vcpu->stat.insn_emulation;
2462 if (r) { 2822 if (r) {
@@ -2576,52 +2936,40 @@ int complete_pio(struct kvm_vcpu *vcpu)
2576 return 0; 2936 return 0;
2577} 2937}
2578 2938
2579static void kernel_pio(struct kvm_io_device *pio_dev, 2939static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
2580 struct kvm_vcpu *vcpu,
2581 void *pd)
2582{ 2940{
2583 /* TODO: String I/O for in kernel device */ 2941 /* TODO: String I/O for in kernel device */
2942 int r;
2584 2943
2585 mutex_lock(&vcpu->kvm->lock);
2586 if (vcpu->arch.pio.in) 2944 if (vcpu->arch.pio.in)
2587 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port, 2945 r = kvm_io_bus_read(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
2588 vcpu->arch.pio.size, 2946 vcpu->arch.pio.size, pd);
2589 pd);
2590 else 2947 else
2591 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port, 2948 r = kvm_io_bus_write(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
2592 vcpu->arch.pio.size, 2949 vcpu->arch.pio.size, pd);
2593 pd); 2950 return r;
2594 mutex_unlock(&vcpu->kvm->lock);
2595} 2951}
2596 2952
2597static void pio_string_write(struct kvm_io_device *pio_dev, 2953static int pio_string_write(struct kvm_vcpu *vcpu)
2598 struct kvm_vcpu *vcpu)
2599{ 2954{
2600 struct kvm_pio_request *io = &vcpu->arch.pio; 2955 struct kvm_pio_request *io = &vcpu->arch.pio;
2601 void *pd = vcpu->arch.pio_data; 2956 void *pd = vcpu->arch.pio_data;
2602 int i; 2957 int i, r = 0;
2603 2958
2604 mutex_lock(&vcpu->kvm->lock);
2605 for (i = 0; i < io->cur_count; i++) { 2959 for (i = 0; i < io->cur_count; i++) {
2606 kvm_iodevice_write(pio_dev, io->port, 2960 if (kvm_io_bus_write(&vcpu->kvm->pio_bus,
2607 io->size, 2961 io->port, io->size, pd)) {
2608 pd); 2962 r = -EOPNOTSUPP;
2963 break;
2964 }
2609 pd += io->size; 2965 pd += io->size;
2610 } 2966 }
2611 mutex_unlock(&vcpu->kvm->lock); 2967 return r;
2612}
2613
2614static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
2615 gpa_t addr, int len,
2616 int is_write)
2617{
2618 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
2619} 2968}
2620 2969
2621int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, 2970int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2622 int size, unsigned port) 2971 int size, unsigned port)
2623{ 2972{
2624 struct kvm_io_device *pio_dev;
2625 unsigned long val; 2973 unsigned long val;
2626 2974
2627 vcpu->run->exit_reason = KVM_EXIT_IO; 2975 vcpu->run->exit_reason = KVM_EXIT_IO;
@@ -2635,19 +2983,13 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2635 vcpu->arch.pio.down = 0; 2983 vcpu->arch.pio.down = 0;
2636 vcpu->arch.pio.rep = 0; 2984 vcpu->arch.pio.rep = 0;
2637 2985
2638 if (vcpu->run->io.direction == KVM_EXIT_IO_IN) 2986 trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
2639 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size, 2987 size, 1);
2640 handler);
2641 else
2642 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2643 handler);
2644 2988
2645 val = kvm_register_read(vcpu, VCPU_REGS_RAX); 2989 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2646 memcpy(vcpu->arch.pio_data, &val, 4); 2990 memcpy(vcpu->arch.pio_data, &val, 4);
2647 2991
2648 pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in); 2992 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
2649 if (pio_dev) {
2650 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
2651 complete_pio(vcpu); 2993 complete_pio(vcpu);
2652 return 1; 2994 return 1;
2653 } 2995 }
@@ -2661,7 +3003,6 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2661{ 3003{
2662 unsigned now, in_page; 3004 unsigned now, in_page;
2663 int ret = 0; 3005 int ret = 0;
2664 struct kvm_io_device *pio_dev;
2665 3006
2666 vcpu->run->exit_reason = KVM_EXIT_IO; 3007 vcpu->run->exit_reason = KVM_EXIT_IO;
2667 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 3008 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
@@ -2674,12 +3015,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2674 vcpu->arch.pio.down = down; 3015 vcpu->arch.pio.down = down;
2675 vcpu->arch.pio.rep = rep; 3016 vcpu->arch.pio.rep = rep;
2676 3017
2677 if (vcpu->run->io.direction == KVM_EXIT_IO_IN) 3018 trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
2678 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size, 3019 size, count);
2679 handler);
2680 else
2681 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2682 handler);
2683 3020
2684 if (!count) { 3021 if (!count) {
2685 kvm_x86_ops->skip_emulated_instruction(vcpu); 3022 kvm_x86_ops->skip_emulated_instruction(vcpu);
@@ -2709,9 +3046,6 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2709 3046
2710 vcpu->arch.pio.guest_gva = address; 3047 vcpu->arch.pio.guest_gva = address;
2711 3048
2712 pio_dev = vcpu_find_pio_dev(vcpu, port,
2713 vcpu->arch.pio.cur_count,
2714 !vcpu->arch.pio.in);
2715 if (!vcpu->arch.pio.in) { 3049 if (!vcpu->arch.pio.in) {
2716 /* string PIO write */ 3050 /* string PIO write */
2717 ret = pio_copy_data(vcpu); 3051 ret = pio_copy_data(vcpu);
@@ -2719,16 +3053,13 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2719 kvm_inject_gp(vcpu, 0); 3053 kvm_inject_gp(vcpu, 0);
2720 return 1; 3054 return 1;
2721 } 3055 }
2722 if (ret == 0 && pio_dev) { 3056 if (ret == 0 && !pio_string_write(vcpu)) {
2723 pio_string_write(pio_dev, vcpu);
2724 complete_pio(vcpu); 3057 complete_pio(vcpu);
2725 if (vcpu->arch.pio.count == 0) 3058 if (vcpu->arch.pio.count == 0)
2726 ret = 1; 3059 ret = 1;
2727 } 3060 }
2728 } else if (pio_dev) 3061 }
2729 pr_unimpl(vcpu, "no string pio read support yet, " 3062 /* no string PIO read support yet */
2730 "port %x size %d count %ld\n",
2731 port, size, count);
2732 3063
2733 return ret; 3064 return ret;
2734} 3065}
@@ -2761,10 +3092,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
2761 3092
2762 spin_lock(&kvm_lock); 3093 spin_lock(&kvm_lock);
2763 list_for_each_entry(kvm, &vm_list, vm_list) { 3094 list_for_each_entry(kvm, &vm_list, vm_list) {
2764 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 3095 kvm_for_each_vcpu(i, vcpu, kvm) {
2765 vcpu = kvm->vcpus[i];
2766 if (!vcpu)
2767 continue;
2768 if (vcpu->cpu != freq->cpu) 3096 if (vcpu->cpu != freq->cpu)
2769 continue; 3097 continue;
2770 if (!kvm_request_guest_time_update(vcpu)) 3098 if (!kvm_request_guest_time_update(vcpu))
@@ -2857,7 +3185,6 @@ void kvm_arch_exit(void)
2857int kvm_emulate_halt(struct kvm_vcpu *vcpu) 3185int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2858{ 3186{
2859 ++vcpu->stat.halt_exits; 3187 ++vcpu->stat.halt_exits;
2860 KVMTRACE_0D(HLT, vcpu, handler);
2861 if (irqchip_in_kernel(vcpu->kvm)) { 3188 if (irqchip_in_kernel(vcpu->kvm)) {
2862 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; 3189 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
2863 return 1; 3190 return 1;
@@ -2888,7 +3215,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2888 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); 3215 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
2889 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); 3216 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
2890 3217
2891 KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler); 3218 trace_kvm_hypercall(nr, a0, a1, a2, a3);
2892 3219
2893 if (!is_long_mode(vcpu)) { 3220 if (!is_long_mode(vcpu)) {
2894 nr &= 0xFFFFFFFF; 3221 nr &= 0xFFFFFFFF;
@@ -2898,6 +3225,11 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2898 a3 &= 0xFFFFFFFF; 3225 a3 &= 0xFFFFFFFF;
2899 } 3226 }
2900 3227
3228 if (kvm_x86_ops->get_cpl(vcpu) != 0) {
3229 ret = -KVM_EPERM;
3230 goto out;
3231 }
3232
2901 switch (nr) { 3233 switch (nr) {
2902 case KVM_HC_VAPIC_POLL_IRQ: 3234 case KVM_HC_VAPIC_POLL_IRQ:
2903 ret = 0; 3235 ret = 0;
@@ -2909,6 +3241,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2909 ret = -KVM_ENOSYS; 3241 ret = -KVM_ENOSYS;
2910 break; 3242 break;
2911 } 3243 }
3244out:
2912 kvm_register_write(vcpu, VCPU_REGS_RAX, ret); 3245 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
2913 ++vcpu->stat.hypercalls; 3246 ++vcpu->stat.hypercalls;
2914 return r; 3247 return r;
@@ -2988,8 +3321,6 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2988 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); 3321 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
2989 return 0; 3322 return 0;
2990 } 3323 }
2991 KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
2992 (u32)((u64)value >> 32), handler);
2993 3324
2994 return value; 3325 return value;
2995} 3326}
@@ -2997,9 +3328,6 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2997void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val, 3328void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2998 unsigned long *rflags) 3329 unsigned long *rflags)
2999{ 3330{
3000 KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
3001 (u32)((u64)val >> 32), handler);
3002
3003 switch (cr) { 3331 switch (cr) {
3004 case 0: 3332 case 0:
3005 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val)); 3333 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
@@ -3109,11 +3437,11 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
3109 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx); 3437 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
3110 } 3438 }
3111 kvm_x86_ops->skip_emulated_instruction(vcpu); 3439 kvm_x86_ops->skip_emulated_instruction(vcpu);
3112 KVMTRACE_5D(CPUID, vcpu, function, 3440 trace_kvm_cpuid(function,
3113 (u32)kvm_register_read(vcpu, VCPU_REGS_RAX), 3441 kvm_register_read(vcpu, VCPU_REGS_RAX),
3114 (u32)kvm_register_read(vcpu, VCPU_REGS_RBX), 3442 kvm_register_read(vcpu, VCPU_REGS_RBX),
3115 (u32)kvm_register_read(vcpu, VCPU_REGS_RCX), 3443 kvm_register_read(vcpu, VCPU_REGS_RCX),
3116 (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler); 3444 kvm_register_read(vcpu, VCPU_REGS_RDX));
3117} 3445}
3118EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); 3446EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
3119 3447
@@ -3179,6 +3507,9 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
3179 if (!kvm_x86_ops->update_cr8_intercept) 3507 if (!kvm_x86_ops->update_cr8_intercept)
3180 return; 3508 return;
3181 3509
3510 if (!vcpu->arch.apic)
3511 return;
3512
3182 if (!vcpu->arch.apic->vapic_addr) 3513 if (!vcpu->arch.apic->vapic_addr)
3183 max_irr = kvm_lapic_find_highest_irr(vcpu); 3514 max_irr = kvm_lapic_find_highest_irr(vcpu);
3184 else 3515 else
@@ -3192,12 +3523,16 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
3192 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); 3523 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
3193} 3524}
3194 3525
3195static void inject_pending_irq(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 3526static void inject_pending_event(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3196{ 3527{
3197 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
3198 kvm_x86_ops->set_interrupt_shadow(vcpu, 0);
3199
3200 /* try to reinject previous events if any */ 3528 /* try to reinject previous events if any */
3529 if (vcpu->arch.exception.pending) {
3530 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
3531 vcpu->arch.exception.has_error_code,
3532 vcpu->arch.exception.error_code);
3533 return;
3534 }
3535
3201 if (vcpu->arch.nmi_injected) { 3536 if (vcpu->arch.nmi_injected) {
3202 kvm_x86_ops->set_nmi(vcpu); 3537 kvm_x86_ops->set_nmi(vcpu);
3203 return; 3538 return;
@@ -3271,16 +3606,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3271 smp_mb__after_clear_bit(); 3606 smp_mb__after_clear_bit();
3272 3607
3273 if (vcpu->requests || need_resched() || signal_pending(current)) { 3608 if (vcpu->requests || need_resched() || signal_pending(current)) {
3609 set_bit(KVM_REQ_KICK, &vcpu->requests);
3274 local_irq_enable(); 3610 local_irq_enable();
3275 preempt_enable(); 3611 preempt_enable();
3276 r = 1; 3612 r = 1;
3277 goto out; 3613 goto out;
3278 } 3614 }
3279 3615
3280 if (vcpu->arch.exception.pending) 3616 inject_pending_event(vcpu, kvm_run);
3281 __queue_exception(vcpu);
3282 else
3283 inject_pending_irq(vcpu, kvm_run);
3284 3617
3285 /* enable NMI/IRQ window open exits if needed */ 3618 /* enable NMI/IRQ window open exits if needed */
3286 if (vcpu->arch.nmi_pending) 3619 if (vcpu->arch.nmi_pending)
@@ -3297,14 +3630,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3297 3630
3298 kvm_guest_enter(); 3631 kvm_guest_enter();
3299 3632
3300 get_debugreg(vcpu->arch.host_dr6, 6);
3301 get_debugreg(vcpu->arch.host_dr7, 7);
3302 if (unlikely(vcpu->arch.switch_db_regs)) { 3633 if (unlikely(vcpu->arch.switch_db_regs)) {
3303 get_debugreg(vcpu->arch.host_db[0], 0);
3304 get_debugreg(vcpu->arch.host_db[1], 1);
3305 get_debugreg(vcpu->arch.host_db[2], 2);
3306 get_debugreg(vcpu->arch.host_db[3], 3);
3307
3308 set_debugreg(0, 7); 3634 set_debugreg(0, 7);
3309 set_debugreg(vcpu->arch.eff_db[0], 0); 3635 set_debugreg(vcpu->arch.eff_db[0], 0);
3310 set_debugreg(vcpu->arch.eff_db[1], 1); 3636 set_debugreg(vcpu->arch.eff_db[1], 1);
@@ -3312,18 +3638,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3312 set_debugreg(vcpu->arch.eff_db[3], 3); 3638 set_debugreg(vcpu->arch.eff_db[3], 3);
3313 } 3639 }
3314 3640
3315 KVMTRACE_0D(VMENTRY, vcpu, entryexit); 3641 trace_kvm_entry(vcpu->vcpu_id);
3316 kvm_x86_ops->run(vcpu, kvm_run); 3642 kvm_x86_ops->run(vcpu, kvm_run);
3317 3643
3318 if (unlikely(vcpu->arch.switch_db_regs)) { 3644 if (unlikely(vcpu->arch.switch_db_regs || test_thread_flag(TIF_DEBUG))) {
3319 set_debugreg(0, 7); 3645 set_debugreg(current->thread.debugreg0, 0);
3320 set_debugreg(vcpu->arch.host_db[0], 0); 3646 set_debugreg(current->thread.debugreg1, 1);
3321 set_debugreg(vcpu->arch.host_db[1], 1); 3647 set_debugreg(current->thread.debugreg2, 2);
3322 set_debugreg(vcpu->arch.host_db[2], 2); 3648 set_debugreg(current->thread.debugreg3, 3);
3323 set_debugreg(vcpu->arch.host_db[3], 3); 3649 set_debugreg(current->thread.debugreg6, 6);
3650 set_debugreg(current->thread.debugreg7, 7);
3324 } 3651 }
3325 set_debugreg(vcpu->arch.host_dr6, 6);
3326 set_debugreg(vcpu->arch.host_dr7, 7);
3327 3652
3328 set_bit(KVM_REQ_KICK, &vcpu->requests); 3653 set_bit(KVM_REQ_KICK, &vcpu->requests);
3329 local_irq_enable(); 3654 local_irq_enable();
@@ -3653,11 +3978,8 @@ static void kvm_set_segment(struct kvm_vcpu *vcpu,
3653static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector, 3978static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3654 struct kvm_segment *kvm_desct) 3979 struct kvm_segment *kvm_desct)
3655{ 3980{
3656 kvm_desct->base = seg_desc->base0; 3981 kvm_desct->base = get_desc_base(seg_desc);
3657 kvm_desct->base |= seg_desc->base1 << 16; 3982 kvm_desct->limit = get_desc_limit(seg_desc);
3658 kvm_desct->base |= seg_desc->base2 << 24;
3659 kvm_desct->limit = seg_desc->limit0;
3660 kvm_desct->limit |= seg_desc->limit << 16;
3661 if (seg_desc->g) { 3983 if (seg_desc->g) {
3662 kvm_desct->limit <<= 12; 3984 kvm_desct->limit <<= 12;
3663 kvm_desct->limit |= 0xfff; 3985 kvm_desct->limit |= 0xfff;
@@ -3701,7 +4023,6 @@ static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
3701static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 4023static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3702 struct desc_struct *seg_desc) 4024 struct desc_struct *seg_desc)
3703{ 4025{
3704 gpa_t gpa;
3705 struct descriptor_table dtable; 4026 struct descriptor_table dtable;
3706 u16 index = selector >> 3; 4027 u16 index = selector >> 3;
3707 4028
@@ -3711,16 +4032,13 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3711 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); 4032 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
3712 return 1; 4033 return 1;
3713 } 4034 }
3714 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base); 4035 return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
3715 gpa += index * 8;
3716 return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
3717} 4036}
3718 4037
3719/* allowed just for 8 bytes segments */ 4038/* allowed just for 8 bytes segments */
3720static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 4039static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3721 struct desc_struct *seg_desc) 4040 struct desc_struct *seg_desc)
3722{ 4041{
3723 gpa_t gpa;
3724 struct descriptor_table dtable; 4042 struct descriptor_table dtable;
3725 u16 index = selector >> 3; 4043 u16 index = selector >> 3;
3726 4044
@@ -3728,19 +4046,13 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3728 4046
3729 if (dtable.limit < index * 8 + 7) 4047 if (dtable.limit < index * 8 + 7)
3730 return 1; 4048 return 1;
3731 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base); 4049 return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
3732 gpa += index * 8;
3733 return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
3734} 4050}
3735 4051
3736static u32 get_tss_base_addr(struct kvm_vcpu *vcpu, 4052static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
3737 struct desc_struct *seg_desc) 4053 struct desc_struct *seg_desc)
3738{ 4054{
3739 u32 base_addr; 4055 u32 base_addr = get_desc_base(seg_desc);
3740
3741 base_addr = seg_desc->base0;
3742 base_addr |= (seg_desc->base1 << 16);
3743 base_addr |= (seg_desc->base2 << 24);
3744 4056
3745 return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr); 4057 return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
3746} 4058}
@@ -3785,12 +4097,19 @@ static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int se
3785 return 0; 4097 return 0;
3786} 4098}
3787 4099
4100static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
4101{
4102 return (seg != VCPU_SREG_LDTR) &&
4103 (seg != VCPU_SREG_TR) &&
4104 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_VM);
4105}
4106
3788int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, 4107int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3789 int type_bits, int seg) 4108 int type_bits, int seg)
3790{ 4109{
3791 struct kvm_segment kvm_seg; 4110 struct kvm_segment kvm_seg;
3792 4111
3793 if (!(vcpu->arch.cr0 & X86_CR0_PE)) 4112 if (is_vm86_segment(vcpu, seg) || !(vcpu->arch.cr0 & X86_CR0_PE))
3794 return kvm_load_realmode_segment(vcpu, selector, seg); 4113 return kvm_load_realmode_segment(vcpu, selector, seg);
3795 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg)) 4114 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
3796 return 1; 4115 return 1;
@@ -4029,7 +4348,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
4029 } 4348 }
4030 } 4349 }
4031 4350
4032 if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) { 4351 if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
4033 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc); 4352 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
4034 return 1; 4353 return 1;
4035 } 4354 }
@@ -4099,13 +4418,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4099 4418
4100 vcpu->arch.cr2 = sregs->cr2; 4419 vcpu->arch.cr2 = sregs->cr2;
4101 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3; 4420 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
4102 4421 vcpu->arch.cr3 = sregs->cr3;
4103 down_read(&vcpu->kvm->slots_lock);
4104 if (gfn_to_memslot(vcpu->kvm, sregs->cr3 >> PAGE_SHIFT))
4105 vcpu->arch.cr3 = sregs->cr3;
4106 else
4107 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
4108 up_read(&vcpu->kvm->slots_lock);
4109 4422
4110 kvm_set_cr8(vcpu, sregs->cr8); 4423 kvm_set_cr8(vcpu, sregs->cr8);
4111 4424
@@ -4147,8 +4460,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
4147 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); 4460 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
4148 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); 4461 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
4149 4462
4463 update_cr8_intercept(vcpu);
4464
4150 /* Older userspace won't unhalt the vcpu on reset. */ 4465 /* Older userspace won't unhalt the vcpu on reset. */
4151 if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 && 4466 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
4152 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && 4467 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
4153 !(vcpu->arch.cr0 & X86_CR0_PE)) 4468 !(vcpu->arch.cr0 & X86_CR0_PE))
4154 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 4469 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -4419,7 +4734,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
4419 kvm = vcpu->kvm; 4734 kvm = vcpu->kvm;
4420 4735
4421 vcpu->arch.mmu.root_hpa = INVALID_PAGE; 4736 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
4422 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0) 4737 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
4423 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 4738 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4424 else 4739 else
4425 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; 4740 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
@@ -4441,6 +4756,14 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
4441 goto fail_mmu_destroy; 4756 goto fail_mmu_destroy;
4442 } 4757 }
4443 4758
4759 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
4760 GFP_KERNEL);
4761 if (!vcpu->arch.mce_banks) {
4762 r = -ENOMEM;
4763 goto fail_mmu_destroy;
4764 }
4765 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
4766
4444 return 0; 4767 return 0;
4445 4768
4446fail_mmu_destroy: 4769fail_mmu_destroy:
@@ -4488,20 +4811,22 @@ static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
4488static void kvm_free_vcpus(struct kvm *kvm) 4811static void kvm_free_vcpus(struct kvm *kvm)
4489{ 4812{
4490 unsigned int i; 4813 unsigned int i;
4814 struct kvm_vcpu *vcpu;
4491 4815
4492 /* 4816 /*
4493 * Unpin any mmu pages first. 4817 * Unpin any mmu pages first.
4494 */ 4818 */
4495 for (i = 0; i < KVM_MAX_VCPUS; ++i) 4819 kvm_for_each_vcpu(i, vcpu, kvm)
4496 if (kvm->vcpus[i]) 4820 kvm_unload_vcpu_mmu(vcpu);
4497 kvm_unload_vcpu_mmu(kvm->vcpus[i]); 4821 kvm_for_each_vcpu(i, vcpu, kvm)
4498 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 4822 kvm_arch_vcpu_free(vcpu);
4499 if (kvm->vcpus[i]) { 4823
4500 kvm_arch_vcpu_free(kvm->vcpus[i]); 4824 mutex_lock(&kvm->lock);
4501 kvm->vcpus[i] = NULL; 4825 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
4502 } 4826 kvm->vcpus[i] = NULL;
4503 }
4504 4827
4828 atomic_set(&kvm->online_vcpus, 0);
4829 mutex_unlock(&kvm->lock);
4505} 4830}
4506 4831
4507void kvm_arch_sync_events(struct kvm *kvm) 4832void kvm_arch_sync_events(struct kvm *kvm)
@@ -4578,7 +4903,6 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
4578 4903
4579 kvm_mmu_slot_remove_write_access(kvm, mem->slot); 4904 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
4580 spin_unlock(&kvm->mmu_lock); 4905 spin_unlock(&kvm->mmu_lock);
4581 kvm_flush_remote_tlbs(kvm);
4582 4906
4583 return 0; 4907 return 0;
4584} 4908}
@@ -4592,8 +4916,10 @@ void kvm_arch_flush_shadow(struct kvm *kvm)
4592int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 4916int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
4593{ 4917{
4594 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE 4918 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
4595 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED 4919 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
4596 || vcpu->arch.nmi_pending; 4920 || vcpu->arch.nmi_pending ||
4921 (kvm_arch_interrupt_allowed(vcpu) &&
4922 kvm_cpu_has_interrupt(vcpu));
4597} 4923}
4598 4924
4599void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 4925void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
@@ -4617,3 +4943,9 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
4617{ 4943{
4618 return kvm_x86_ops->interrupt_allowed(vcpu); 4944 return kvm_x86_ops->interrupt_allowed(vcpu);
4619} 4945}
4946
4947EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
4948EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
4949EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
4950EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
4951EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 4c8e10af78e8..5eadea585d2a 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -31,4 +31,8 @@ static inline bool kvm_exception_is_soft(unsigned int nr)
31{ 31{
32 return (nr == BP_VECTOR) || (nr == OF_VECTOR); 32 return (nr == BP_VECTOR) || (nr == OF_VECTOR);
33} 33}
34
35struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
36 u32 function, u32 index);
37
34#endif 38#endif
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 07c31899c9c2..9e609206fac9 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -9,6 +9,8 @@ lib-y += thunk_$(BITS).o
9lib-y += usercopy_$(BITS).o getuser.o putuser.o 9lib-y += usercopy_$(BITS).o getuser.o putuser.o
10lib-y += memcpy_$(BITS).o 10lib-y += memcpy_$(BITS).o
11 11
12obj-y += msr-reg.o msr-reg-export.o
13
12ifeq ($(CONFIG_X86_32),y) 14ifeq ($(CONFIG_X86_32),y)
13 obj-y += atomic64_32.o 15 obj-y += atomic64_32.o
14 lib-y += checksum_32.o 16 lib-y += checksum_32.o
diff --git a/arch/x86/lib/msr-reg-export.c b/arch/x86/lib/msr-reg-export.c
new file mode 100644
index 000000000000..a311cc59b65d
--- /dev/null
+++ b/arch/x86/lib/msr-reg-export.c
@@ -0,0 +1,5 @@
1#include <linux/module.h>
2#include <asm/msr.h>
3
4EXPORT_SYMBOL(native_rdmsr_safe_regs);
5EXPORT_SYMBOL(native_wrmsr_safe_regs);
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
new file mode 100644
index 000000000000..69fa10623f21
--- /dev/null
+++ b/arch/x86/lib/msr-reg.S
@@ -0,0 +1,102 @@
1#include <linux/linkage.h>
2#include <linux/errno.h>
3#include <asm/dwarf2.h>
4#include <asm/asm.h>
5#include <asm/msr.h>
6
7#ifdef CONFIG_X86_64
8/*
9 * int native_{rdmsr,wrmsr}_safe_regs(u32 gprs[8]);
10 *
11 * reg layout: u32 gprs[eax, ecx, edx, ebx, esp, ebp, esi, edi]
12 *
13 */
14.macro op_safe_regs op
15ENTRY(native_\op\()_safe_regs)
16 CFI_STARTPROC
17 pushq_cfi %rbx
18 pushq_cfi %rbp
19 movq %rdi, %r10 /* Save pointer */
20 xorl %r11d, %r11d /* Return value */
21 movl (%rdi), %eax
22 movl 4(%rdi), %ecx
23 movl 8(%rdi), %edx
24 movl 12(%rdi), %ebx
25 movl 20(%rdi), %ebp
26 movl 24(%rdi), %esi
27 movl 28(%rdi), %edi
28 CFI_REMEMBER_STATE
291: \op
302: movl %eax, (%r10)
31 movl %r11d, %eax /* Return value */
32 movl %ecx, 4(%r10)
33 movl %edx, 8(%r10)
34 movl %ebx, 12(%r10)
35 movl %ebp, 20(%r10)
36 movl %esi, 24(%r10)
37 movl %edi, 28(%r10)
38 popq_cfi %rbp
39 popq_cfi %rbx
40 ret
413:
42 CFI_RESTORE_STATE
43 movl $-EIO, %r11d
44 jmp 2b
45
46 _ASM_EXTABLE(1b, 3b)
47 CFI_ENDPROC
48ENDPROC(native_\op\()_safe_regs)
49.endm
50
51#else /* X86_32 */
52
53.macro op_safe_regs op
54ENTRY(native_\op\()_safe_regs)
55 CFI_STARTPROC
56 pushl_cfi %ebx
57 pushl_cfi %ebp
58 pushl_cfi %esi
59 pushl_cfi %edi
60 pushl_cfi $0 /* Return value */
61 pushl_cfi %eax
62 movl 4(%eax), %ecx
63 movl 8(%eax), %edx
64 movl 12(%eax), %ebx
65 movl 20(%eax), %ebp
66 movl 24(%eax), %esi
67 movl 28(%eax), %edi
68 movl (%eax), %eax
69 CFI_REMEMBER_STATE
701: \op
712: pushl_cfi %eax
72 movl 4(%esp), %eax
73 popl_cfi (%eax)
74 addl $4, %esp
75 CFI_ADJUST_CFA_OFFSET -4
76 movl %ecx, 4(%eax)
77 movl %edx, 8(%eax)
78 movl %ebx, 12(%eax)
79 movl %ebp, 20(%eax)
80 movl %esi, 24(%eax)
81 movl %edi, 28(%eax)
82 popl_cfi %eax
83 popl_cfi %edi
84 popl_cfi %esi
85 popl_cfi %ebp
86 popl_cfi %ebx
87 ret
883:
89 CFI_RESTORE_STATE
90 movl $-EIO, 4(%esp)
91 jmp 2b
92
93 _ASM_EXTABLE(1b, 3b)
94 CFI_ENDPROC
95ENDPROC(native_\op\()_safe_regs)
96.endm
97
98#endif
99
100op_safe_regs rdmsr
101op_safe_regs wrmsr
102
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index caa24aca8115..33a1e3ca22d8 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -175,3 +175,52 @@ int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
175 return err ? err : rv.err; 175 return err ? err : rv.err;
176} 176}
177EXPORT_SYMBOL(wrmsr_safe_on_cpu); 177EXPORT_SYMBOL(wrmsr_safe_on_cpu);
178
179/*
180 * These variants are significantly slower, but allows control over
181 * the entire 32-bit GPR set.
182 */
183struct msr_regs_info {
184 u32 *regs;
185 int err;
186};
187
188static void __rdmsr_safe_regs_on_cpu(void *info)
189{
190 struct msr_regs_info *rv = info;
191
192 rv->err = rdmsr_safe_regs(rv->regs);
193}
194
195static void __wrmsr_safe_regs_on_cpu(void *info)
196{
197 struct msr_regs_info *rv = info;
198
199 rv->err = wrmsr_safe_regs(rv->regs);
200}
201
202int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
203{
204 int err;
205 struct msr_regs_info rv;
206
207 rv.regs = regs;
208 rv.err = -EIO;
209 err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
210
211 return err ? err : rv.err;
212}
213EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
214
215int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
216{
217 int err;
218 struct msr_regs_info rv;
219
220 rv.regs = regs;
221 rv.err = -EIO;
222 err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
223
224 return err ? err : rv.err;
225}
226EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index eefdeee8a871..9b5a9f59a478 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,5 +1,9 @@
1obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ 1obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
2 pat.o pgtable.o gup.o 2 pat.o pgtable.o physaddr.o gup.o
3
4# Make sure __phys_addr has no stackprotector
5nostackp := $(call cc-option, -fno-stack-protector)
6CFLAGS_physaddr.o := $(nostackp)
3 7
4obj-$(CONFIG_SMP) += tlb.o 8obj-$(CONFIG_SMP) += tlb.o
5 9
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index bfae139182ff..775a020990a5 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -285,26 +285,25 @@ check_v8086_mode(struct pt_regs *regs, unsigned long address,
285 tsk->thread.screen_bitmap |= 1 << bit; 285 tsk->thread.screen_bitmap |= 1 << bit;
286} 286}
287 287
288static void dump_pagetable(unsigned long address) 288static bool low_pfn(unsigned long pfn)
289{ 289{
290 __typeof__(pte_val(__pte(0))) page; 290 return pfn < max_low_pfn;
291}
291 292
292 page = read_cr3(); 293static void dump_pagetable(unsigned long address)
293 page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; 294{
295 pgd_t *base = __va(read_cr3());
296 pgd_t *pgd = &base[pgd_index(address)];
297 pmd_t *pmd;
298 pte_t *pte;
294 299
295#ifdef CONFIG_X86_PAE 300#ifdef CONFIG_X86_PAE
296 printk("*pdpt = %016Lx ", page); 301 printk("*pdpt = %016Lx ", pgd_val(*pgd));
297 if ((page >> PAGE_SHIFT) < max_low_pfn 302 if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
298 && page & _PAGE_PRESENT) { 303 goto out;
299 page &= PAGE_MASK;
300 page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
301 & (PTRS_PER_PMD - 1)];
302 printk(KERN_CONT "*pde = %016Lx ", page);
303 page &= ~_PAGE_NX;
304 }
305#else
306 printk("*pde = %08lx ", page);
307#endif 304#endif
305 pmd = pmd_offset(pud_offset(pgd, address), address);
306 printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
308 307
309 /* 308 /*
310 * We must not directly access the pte in the highpte 309 * We must not directly access the pte in the highpte
@@ -312,16 +311,12 @@ static void dump_pagetable(unsigned long address)
312 * And let's rather not kmap-atomic the pte, just in case 311 * And let's rather not kmap-atomic the pte, just in case
313 * it's allocated already: 312 * it's allocated already:
314 */ 313 */
315 if ((page >> PAGE_SHIFT) < max_low_pfn 314 if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
316 && (page & _PAGE_PRESENT) 315 goto out;
317 && !(page & _PAGE_PSE)) {
318
319 page &= PAGE_MASK;
320 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
321 & (PTRS_PER_PTE - 1)];
322 printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
323 }
324 316
317 pte = pte_offset_kernel(pmd, address);
318 printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
319out:
325 printk("\n"); 320 printk("\n");
326} 321}
327 322
@@ -450,16 +445,12 @@ static int bad_address(void *p)
450 445
451static void dump_pagetable(unsigned long address) 446static void dump_pagetable(unsigned long address)
452{ 447{
453 pgd_t *pgd; 448 pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
449 pgd_t *pgd = base + pgd_index(address);
454 pud_t *pud; 450 pud_t *pud;
455 pmd_t *pmd; 451 pmd_t *pmd;
456 pte_t *pte; 452 pte_t *pte;
457 453
458 pgd = (pgd_t *)read_cr3();
459
460 pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
461
462 pgd += pgd_index(address);
463 if (bad_address(pgd)) 454 if (bad_address(pgd))
464 goto bad; 455 goto bad;
465 456
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 2112ed55e7ea..63a6ba66cbe0 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -24,7 +24,7 @@ void kunmap(struct page *page)
24 * no global lock is needed and because the kmap code must perform a global TLB 24 * no global lock is needed and because the kmap code must perform a global TLB
25 * invalidation when the kmap pool wraps. 25 * invalidation when the kmap pool wraps.
26 * 26 *
27 * However when holding an atomic kmap is is not legal to sleep, so atomic 27 * However when holding an atomic kmap it is not legal to sleep, so atomic
28 * kmaps are appropriate for short, tight code paths only. 28 * kmaps are appropriate for short, tight code paths only.
29 */ 29 */
30void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) 30void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
@@ -104,6 +104,7 @@ EXPORT_SYMBOL(kunmap);
104EXPORT_SYMBOL(kmap_atomic); 104EXPORT_SYMBOL(kmap_atomic);
105EXPORT_SYMBOL(kunmap_atomic); 105EXPORT_SYMBOL(kunmap_atomic);
106EXPORT_SYMBOL(kmap_atomic_prot); 106EXPORT_SYMBOL(kmap_atomic_prot);
107EXPORT_SYMBOL(kmap_atomic_to_page);
107 108
108void __init set_highmem_pages_init(void) 109void __init set_highmem_pages_init(void)
109{ 110{
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index fe6f84ca121e..84e236ce76ba 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -21,7 +21,7 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/highmem.h> 22#include <linux/highmem.h>
23 23
24int is_io_mapping_possible(resource_size_t base, unsigned long size) 24static int is_io_mapping_possible(resource_size_t base, unsigned long size)
25{ 25{
26#if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT) 26#if !defined(CONFIG_X86_PAE) && defined(CONFIG_PHYS_ADDR_T_64BIT)
27 /* There is no way to map greater than 1 << 32 address without PAE */ 27 /* There is no way to map greater than 1 << 32 address without PAE */
@@ -30,7 +30,30 @@ int is_io_mapping_possible(resource_size_t base, unsigned long size)
30#endif 30#endif
31 return 1; 31 return 1;
32} 32}
33EXPORT_SYMBOL_GPL(is_io_mapping_possible); 33
34int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
35{
36 unsigned long flag = _PAGE_CACHE_WC;
37 int ret;
38
39 if (!is_io_mapping_possible(base, size))
40 return -EINVAL;
41
42 ret = io_reserve_memtype(base, base + size, &flag);
43 if (ret)
44 return ret;
45
46 *prot = __pgprot(__PAGE_KERNEL | flag);
47 return 0;
48}
49EXPORT_SYMBOL_GPL(iomap_create_wc);
50
51void
52iomap_free(resource_size_t base, unsigned long size)
53{
54 io_free_memtype(base, base + size);
55}
56EXPORT_SYMBOL_GPL(iomap_free);
34 57
35void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 58void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
36{ 59{
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 8a450930834f..334e63ca7b2b 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -22,77 +22,7 @@
22#include <asm/pgalloc.h> 22#include <asm/pgalloc.h>
23#include <asm/pat.h> 23#include <asm/pat.h>
24 24
25static inline int phys_addr_valid(resource_size_t addr) 25#include "physaddr.h"
26{
27#ifdef CONFIG_PHYS_ADDR_T_64BIT
28 return !(addr >> boot_cpu_data.x86_phys_bits);
29#else
30 return 1;
31#endif
32}
33
34#ifdef CONFIG_X86_64
35
36unsigned long __phys_addr(unsigned long x)
37{
38 if (x >= __START_KERNEL_map) {
39 x -= __START_KERNEL_map;
40 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
41 x += phys_base;
42 } else {
43 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
44 x -= PAGE_OFFSET;
45 VIRTUAL_BUG_ON(!phys_addr_valid(x));
46 }
47 return x;
48}
49EXPORT_SYMBOL(__phys_addr);
50
51bool __virt_addr_valid(unsigned long x)
52{
53 if (x >= __START_KERNEL_map) {
54 x -= __START_KERNEL_map;
55 if (x >= KERNEL_IMAGE_SIZE)
56 return false;
57 x += phys_base;
58 } else {
59 if (x < PAGE_OFFSET)
60 return false;
61 x -= PAGE_OFFSET;
62 if (!phys_addr_valid(x))
63 return false;
64 }
65
66 return pfn_valid(x >> PAGE_SHIFT);
67}
68EXPORT_SYMBOL(__virt_addr_valid);
69
70#else
71
72#ifdef CONFIG_DEBUG_VIRTUAL
73unsigned long __phys_addr(unsigned long x)
74{
75 /* VMALLOC_* aren't constants */
76 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
77 VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
78 return x - PAGE_OFFSET;
79}
80EXPORT_SYMBOL(__phys_addr);
81#endif
82
83bool __virt_addr_valid(unsigned long x)
84{
85 if (x < PAGE_OFFSET)
86 return false;
87 if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
88 return false;
89 if (x >= FIXADDR_START)
90 return false;
91 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
92}
93EXPORT_SYMBOL(__virt_addr_valid);
94
95#endif
96 26
97int page_is_ram(unsigned long pagenr) 27int page_is_ram(unsigned long pagenr)
98{ 28{
@@ -228,24 +158,14 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
228 retval = reserve_memtype(phys_addr, (u64)phys_addr + size, 158 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
229 prot_val, &new_prot_val); 159 prot_val, &new_prot_val);
230 if (retval) { 160 if (retval) {
231 pr_debug("Warning: reserve_memtype returned %d\n", retval); 161 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
232 return NULL; 162 return NULL;
233 } 163 }
234 164
235 if (prot_val != new_prot_val) { 165 if (prot_val != new_prot_val) {
236 /* 166 if (!is_new_memtype_allowed(phys_addr, size,
237 * Do not fallback to certain memory types with certain 167 prot_val, new_prot_val)) {
238 * requested type: 168 printk(KERN_ERR
239 * - request is uc-, return cannot be write-back
240 * - request is uc-, return cannot be write-combine
241 * - request is write-combine, return cannot be write-back
242 */
243 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
244 (new_prot_val == _PAGE_CACHE_WB ||
245 new_prot_val == _PAGE_CACHE_WC)) ||
246 (prot_val == _PAGE_CACHE_WC &&
247 new_prot_val == _PAGE_CACHE_WB)) {
248 pr_debug(
249 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", 169 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
250 (unsigned long long)phys_addr, 170 (unsigned long long)phys_addr,
251 (unsigned long long)(phys_addr + size), 171 (unsigned long long)(phys_addr + size),
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 165829600566..c8191defc38a 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -29,13 +29,26 @@
29#include <linux/random.h> 29#include <linux/random.h>
30#include <linux/limits.h> 30#include <linux/limits.h>
31#include <linux/sched.h> 31#include <linux/sched.h>
32#include <asm/elf.h>
33
34static unsigned int stack_maxrandom_size(void)
35{
36 unsigned int max = 0;
37 if ((current->flags & PF_RANDOMIZE) &&
38 !(current->personality & ADDR_NO_RANDOMIZE)) {
39 max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT;
40 }
41
42 return max;
43}
44
32 45
33/* 46/*
34 * Top of mmap area (just below the process stack). 47 * Top of mmap area (just below the process stack).
35 * 48 *
36 * Leave an at least ~128 MB hole. 49 * Leave an at least ~128 MB hole with possible stack randomization.
37 */ 50 */
38#define MIN_GAP (128*1024*1024) 51#define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
39#define MAX_GAP (TASK_SIZE/6*5) 52#define MAX_GAP (TASK_SIZE/6*5)
40 53
41/* 54/*
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 7e600c1962db..24952fdc7e40 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -12,6 +12,7 @@
12#include <linux/seq_file.h> 12#include <linux/seq_file.h>
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
14#include <linux/pfn.h> 14#include <linux/pfn.h>
15#include <linux/percpu.h>
15 16
16#include <asm/e820.h> 17#include <asm/e820.h>
17#include <asm/processor.h> 18#include <asm/processor.h>
@@ -686,7 +687,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
686{ 687{
687 struct cpa_data alias_cpa; 688 struct cpa_data alias_cpa;
688 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); 689 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
689 unsigned long vaddr, remapped; 690 unsigned long vaddr;
690 int ret; 691 int ret;
691 692
692 if (cpa->pfn >= max_pfn_mapped) 693 if (cpa->pfn >= max_pfn_mapped)
@@ -744,24 +745,6 @@ static int cpa_process_alias(struct cpa_data *cpa)
744 } 745 }
745#endif 746#endif
746 747
747 /*
748 * If the PMD page was partially used for per-cpu remapping,
749 * the recycled area needs to be split and modified. Because
750 * the area is always proper subset of a PMD page
751 * cpa->numpages is guaranteed to be 1 for these areas, so
752 * there's no need to loop over and check for further remaps.
753 */
754 remapped = (unsigned long)pcpu_lpage_remapped((void *)laddr);
755 if (remapped) {
756 WARN_ON(cpa->numpages > 1);
757 alias_cpa = *cpa;
758 alias_cpa.vaddr = &remapped;
759 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
760 ret = __change_page_attr_set_clr(&alias_cpa, 0);
761 if (ret)
762 return ret;
763 }
764
765 return 0; 748 return 0;
766} 749}
767 750
@@ -822,6 +805,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
822{ 805{
823 struct cpa_data cpa; 806 struct cpa_data cpa;
824 int ret, cache, checkalias; 807 int ret, cache, checkalias;
808 unsigned long baddr = 0;
825 809
826 /* 810 /*
827 * Check, if we are requested to change a not supported 811 * Check, if we are requested to change a not supported
@@ -853,6 +837,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
853 */ 837 */
854 WARN_ON_ONCE(1); 838 WARN_ON_ONCE(1);
855 } 839 }
840 /*
841 * Save address for cache flush. *addr is modified in the call
842 * to __change_page_attr_set_clr() below.
843 */
844 baddr = *addr;
856 } 845 }
857 846
858 /* Must avoid aliasing mappings in the highmem code */ 847 /* Must avoid aliasing mappings in the highmem code */
@@ -900,7 +889,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
900 cpa_flush_array(addr, numpages, cache, 889 cpa_flush_array(addr, numpages, cache,
901 cpa.flags, pages); 890 cpa.flags, pages);
902 } else 891 } else
903 cpa_flush_range(*addr, numpages, cache); 892 cpa_flush_range(baddr, numpages, cache);
904 } else 893 } else
905 cpa_flush_all(cache); 894 cpa_flush_all(cache);
906 895
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 352aa9e927e2..7257cf3decf9 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -15,6 +15,7 @@
15#include <linux/gfp.h> 15#include <linux/gfp.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/rbtree.h>
18 19
19#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
20#include <asm/processor.h> 21#include <asm/processor.h>
@@ -148,11 +149,10 @@ static char *cattr_name(unsigned long flags)
148 * areas). All the aliases have the same cache attributes of course. 149 * areas). All the aliases have the same cache attributes of course.
149 * Zero attributes are represented as holes. 150 * Zero attributes are represented as holes.
150 * 151 *
151 * Currently the data structure is a list because the number of mappings 152 * The data structure is a list that is also organized as an rbtree
152 * are expected to be relatively small. If this should be a problem 153 * sorted on the start address of memtype range.
153 * it could be changed to a rbtree or similar.
154 * 154 *
155 * memtype_lock protects the whole list. 155 * memtype_lock protects both the linear list and rbtree.
156 */ 156 */
157 157
158struct memtype { 158struct memtype {
@@ -160,11 +160,53 @@ struct memtype {
160 u64 end; 160 u64 end;
161 unsigned long type; 161 unsigned long type;
162 struct list_head nd; 162 struct list_head nd;
163 struct rb_node rb;
163}; 164};
164 165
166static struct rb_root memtype_rbroot = RB_ROOT;
165static LIST_HEAD(memtype_list); 167static LIST_HEAD(memtype_list);
166static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ 168static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
167 169
170static struct memtype *memtype_rb_search(struct rb_root *root, u64 start)
171{
172 struct rb_node *node = root->rb_node;
173 struct memtype *last_lower = NULL;
174
175 while (node) {
176 struct memtype *data = container_of(node, struct memtype, rb);
177
178 if (data->start < start) {
179 last_lower = data;
180 node = node->rb_right;
181 } else if (data->start > start) {
182 node = node->rb_left;
183 } else
184 return data;
185 }
186
187 /* Will return NULL if there is no entry with its start <= start */
188 return last_lower;
189}
190
191static void memtype_rb_insert(struct rb_root *root, struct memtype *data)
192{
193 struct rb_node **new = &(root->rb_node);
194 struct rb_node *parent = NULL;
195
196 while (*new) {
197 struct memtype *this = container_of(*new, struct memtype, rb);
198
199 parent = *new;
200 if (data->start <= this->start)
201 new = &((*new)->rb_left);
202 else if (data->start > this->start)
203 new = &((*new)->rb_right);
204 }
205
206 rb_link_node(&data->rb, parent, new);
207 rb_insert_color(&data->rb, root);
208}
209
168/* 210/*
169 * Does intersection of PAT memory type and MTRR memory type and returns 211 * Does intersection of PAT memory type and MTRR memory type and returns
170 * the resulting memory type as PAT understands it. 212 * the resulting memory type as PAT understands it.
@@ -218,9 +260,6 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
218 return -EBUSY; 260 return -EBUSY;
219} 261}
220 262
221static struct memtype *cached_entry;
222static u64 cached_start;
223
224static int pat_pagerange_is_ram(unsigned long start, unsigned long end) 263static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
225{ 264{
226 int ram_page = 0, not_rampage = 0; 265 int ram_page = 0, not_rampage = 0;
@@ -249,63 +288,61 @@ static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
249} 288}
250 289
251/* 290/*
252 * For RAM pages, mark the pages as non WB memory type using 291 * For RAM pages, we use page flags to mark the pages with appropriate type.
253 * PageNonWB (PG_arch_1). We allow only one set_memory_uc() or 292 * Here we do two pass:
254 * set_memory_wc() on a RAM page at a time before marking it as WB again. 293 * - Find the memtype of all the pages in the range, look for any conflicts
255 * This is ok, because only one driver will be owning the page and 294 * - In case of no conflicts, set the new memtype for pages in the range
256 * doing set_memory_*() calls.
257 * 295 *
258 * For now, we use PageNonWB to track that the RAM page is being mapped 296 * Caller must hold memtype_lock for atomicity.
259 * as non WB. In future, we will have to use one more flag
260 * (or some other mechanism in page_struct) to distinguish between
261 * UC and WC mapping.
262 */ 297 */
263static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type, 298static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
264 unsigned long *new_type) 299 unsigned long *new_type)
265{ 300{
266 struct page *page; 301 struct page *page;
267 u64 pfn, end_pfn; 302 u64 pfn;
303
304 if (req_type == _PAGE_CACHE_UC) {
305 /* We do not support strong UC */
306 WARN_ON_ONCE(1);
307 req_type = _PAGE_CACHE_UC_MINUS;
308 }
268 309
269 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { 310 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
270 page = pfn_to_page(pfn); 311 unsigned long type;
271 if (page_mapped(page) || PageNonWB(page))
272 goto out;
273 312
274 SetPageNonWB(page); 313 page = pfn_to_page(pfn);
314 type = get_page_memtype(page);
315 if (type != -1) {
316 printk(KERN_INFO "reserve_ram_pages_type failed "
317 "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n",
318 start, end, type, req_type);
319 if (new_type)
320 *new_type = type;
321
322 return -EBUSY;
323 }
275 } 324 }
276 return 0;
277 325
278out: 326 if (new_type)
279 end_pfn = pfn; 327 *new_type = req_type;
280 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) { 328
329 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
281 page = pfn_to_page(pfn); 330 page = pfn_to_page(pfn);
282 ClearPageNonWB(page); 331 set_page_memtype(page, req_type);
283 } 332 }
284 333 return 0;
285 return -EINVAL;
286} 334}
287 335
288static int free_ram_pages_type(u64 start, u64 end) 336static int free_ram_pages_type(u64 start, u64 end)
289{ 337{
290 struct page *page; 338 struct page *page;
291 u64 pfn, end_pfn; 339 u64 pfn;
292 340
293 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) { 341 for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
294 page = pfn_to_page(pfn); 342 page = pfn_to_page(pfn);
295 if (page_mapped(page) || !PageNonWB(page)) 343 set_page_memtype(page, -1);
296 goto out;
297
298 ClearPageNonWB(page);
299 } 344 }
300 return 0; 345 return 0;
301
302out:
303 end_pfn = pfn;
304 for (pfn = (start >> PAGE_SHIFT); pfn < end_pfn; ++pfn) {
305 page = pfn_to_page(pfn);
306 SetPageNonWB(page);
307 }
308 return -EINVAL;
309} 346}
310 347
311/* 348/*
@@ -339,6 +376,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
339 if (new_type) { 376 if (new_type) {
340 if (req_type == -1) 377 if (req_type == -1)
341 *new_type = _PAGE_CACHE_WB; 378 *new_type = _PAGE_CACHE_WB;
379 else if (req_type == _PAGE_CACHE_WC)
380 *new_type = _PAGE_CACHE_UC_MINUS;
342 else 381 else
343 *new_type = req_type & _PAGE_CACHE_MASK; 382 *new_type = req_type & _PAGE_CACHE_MASK;
344 } 383 }
@@ -364,11 +403,16 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
364 *new_type = actual_type; 403 *new_type = actual_type;
365 404
366 is_range_ram = pat_pagerange_is_ram(start, end); 405 is_range_ram = pat_pagerange_is_ram(start, end);
367 if (is_range_ram == 1) 406 if (is_range_ram == 1) {
368 return reserve_ram_pages_type(start, end, req_type, 407
369 new_type); 408 spin_lock(&memtype_lock);
370 else if (is_range_ram < 0) 409 err = reserve_ram_pages_type(start, end, req_type, new_type);
410 spin_unlock(&memtype_lock);
411
412 return err;
413 } else if (is_range_ram < 0) {
371 return -EINVAL; 414 return -EINVAL;
415 }
372 416
373 new = kmalloc(sizeof(struct memtype), GFP_KERNEL); 417 new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
374 if (!new) 418 if (!new)
@@ -380,17 +424,11 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
380 424
381 spin_lock(&memtype_lock); 425 spin_lock(&memtype_lock);
382 426
383 if (cached_entry && start >= cached_start)
384 entry = cached_entry;
385 else
386 entry = list_entry(&memtype_list, struct memtype, nd);
387
388 /* Search for existing mapping that overlaps the current range */ 427 /* Search for existing mapping that overlaps the current range */
389 where = NULL; 428 where = NULL;
390 list_for_each_entry_continue(entry, &memtype_list, nd) { 429 list_for_each_entry(entry, &memtype_list, nd) {
391 if (end <= entry->start) { 430 if (end <= entry->start) {
392 where = entry->nd.prev; 431 where = entry->nd.prev;
393 cached_entry = list_entry(where, struct memtype, nd);
394 break; 432 break;
395 } else if (start <= entry->start) { /* end > entry->start */ 433 } else if (start <= entry->start) { /* end > entry->start */
396 err = chk_conflict(new, entry, new_type); 434 err = chk_conflict(new, entry, new_type);
@@ -398,8 +436,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
398 dprintk("Overlap at 0x%Lx-0x%Lx\n", 436 dprintk("Overlap at 0x%Lx-0x%Lx\n",
399 entry->start, entry->end); 437 entry->start, entry->end);
400 where = entry->nd.prev; 438 where = entry->nd.prev;
401 cached_entry = list_entry(where,
402 struct memtype, nd);
403 } 439 }
404 break; 440 break;
405 } else if (start < entry->end) { /* start > entry->start */ 441 } else if (start < entry->end) { /* start > entry->start */
@@ -407,8 +443,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
407 if (!err) { 443 if (!err) {
408 dprintk("Overlap at 0x%Lx-0x%Lx\n", 444 dprintk("Overlap at 0x%Lx-0x%Lx\n",
409 entry->start, entry->end); 445 entry->start, entry->end);
410 cached_entry = list_entry(entry->nd.prev,
411 struct memtype, nd);
412 446
413 /* 447 /*
414 * Move to right position in the linked 448 * Move to right position in the linked
@@ -436,13 +470,13 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
436 return err; 470 return err;
437 } 471 }
438 472
439 cached_start = start;
440
441 if (where) 473 if (where)
442 list_add(&new->nd, where); 474 list_add(&new->nd, where);
443 else 475 else
444 list_add_tail(&new->nd, &memtype_list); 476 list_add_tail(&new->nd, &memtype_list);
445 477
478 memtype_rb_insert(&memtype_rbroot, new);
479
446 spin_unlock(&memtype_lock); 480 spin_unlock(&memtype_lock);
447 481
448 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", 482 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
@@ -454,7 +488,7 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
454 488
455int free_memtype(u64 start, u64 end) 489int free_memtype(u64 start, u64 end)
456{ 490{
457 struct memtype *entry; 491 struct memtype *entry, *saved_entry;
458 int err = -EINVAL; 492 int err = -EINVAL;
459 int is_range_ram; 493 int is_range_ram;
460 494
@@ -466,23 +500,58 @@ int free_memtype(u64 start, u64 end)
466 return 0; 500 return 0;
467 501
468 is_range_ram = pat_pagerange_is_ram(start, end); 502 is_range_ram = pat_pagerange_is_ram(start, end);
469 if (is_range_ram == 1) 503 if (is_range_ram == 1) {
470 return free_ram_pages_type(start, end); 504
471 else if (is_range_ram < 0) 505 spin_lock(&memtype_lock);
506 err = free_ram_pages_type(start, end);
507 spin_unlock(&memtype_lock);
508
509 return err;
510 } else if (is_range_ram < 0) {
472 return -EINVAL; 511 return -EINVAL;
512 }
473 513
474 spin_lock(&memtype_lock); 514 spin_lock(&memtype_lock);
475 list_for_each_entry(entry, &memtype_list, nd) { 515
516 entry = memtype_rb_search(&memtype_rbroot, start);
517 if (unlikely(entry == NULL))
518 goto unlock_ret;
519
520 /*
521 * Saved entry points to an entry with start same or less than what
522 * we searched for. Now go through the list in both directions to look
523 * for the entry that matches with both start and end, with list stored
524 * in sorted start address
525 */
526 saved_entry = entry;
527 list_for_each_entry_from(entry, &memtype_list, nd) {
476 if (entry->start == start && entry->end == end) { 528 if (entry->start == start && entry->end == end) {
477 if (cached_entry == entry || cached_start == start) 529 rb_erase(&entry->rb, &memtype_rbroot);
478 cached_entry = NULL; 530 list_del(&entry->nd);
531 kfree(entry);
532 err = 0;
533 break;
534 } else if (entry->start > start) {
535 break;
536 }
537 }
538
539 if (!err)
540 goto unlock_ret;
479 541
542 entry = saved_entry;
543 list_for_each_entry_reverse(entry, &memtype_list, nd) {
544 if (entry->start == start && entry->end == end) {
545 rb_erase(&entry->rb, &memtype_rbroot);
480 list_del(&entry->nd); 546 list_del(&entry->nd);
481 kfree(entry); 547 kfree(entry);
482 err = 0; 548 err = 0;
483 break; 549 break;
550 } else if (entry->start < start) {
551 break;
484 } 552 }
485 } 553 }
554unlock_ret:
486 spin_unlock(&memtype_lock); 555 spin_unlock(&memtype_lock);
487 556
488 if (err) { 557 if (err) {
@@ -496,6 +565,101 @@ int free_memtype(u64 start, u64 end)
496} 565}
497 566
498 567
568/**
569 * lookup_memtype - Looksup the memory type for a physical address
570 * @paddr: physical address of which memory type needs to be looked up
571 *
572 * Only to be called when PAT is enabled
573 *
574 * Returns _PAGE_CACHE_WB, _PAGE_CACHE_WC, _PAGE_CACHE_UC_MINUS or
575 * _PAGE_CACHE_UC
576 */
577static unsigned long lookup_memtype(u64 paddr)
578{
579 int rettype = _PAGE_CACHE_WB;
580 struct memtype *entry;
581
582 if (is_ISA_range(paddr, paddr + PAGE_SIZE - 1))
583 return rettype;
584
585 if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
586 struct page *page;
587 spin_lock(&memtype_lock);
588 page = pfn_to_page(paddr >> PAGE_SHIFT);
589 rettype = get_page_memtype(page);
590 spin_unlock(&memtype_lock);
591 /*
592 * -1 from get_page_memtype() implies RAM page is in its
593 * default state and not reserved, and hence of type WB
594 */
595 if (rettype == -1)
596 rettype = _PAGE_CACHE_WB;
597
598 return rettype;
599 }
600
601 spin_lock(&memtype_lock);
602
603 entry = memtype_rb_search(&memtype_rbroot, paddr);
604 if (entry != NULL)
605 rettype = entry->type;
606 else
607 rettype = _PAGE_CACHE_UC_MINUS;
608
609 spin_unlock(&memtype_lock);
610 return rettype;
611}
612
613/**
614 * io_reserve_memtype - Request a memory type mapping for a region of memory
615 * @start: start (physical address) of the region
616 * @end: end (physical address) of the region
617 * @type: A pointer to memtype, with requested type. On success, requested
618 * or any other compatible type that was available for the region is returned
619 *
620 * On success, returns 0
621 * On failure, returns non-zero
622 */
623int io_reserve_memtype(resource_size_t start, resource_size_t end,
624 unsigned long *type)
625{
626 resource_size_t size = end - start;
627 unsigned long req_type = *type;
628 unsigned long new_type;
629 int ret;
630
631 WARN_ON_ONCE(iomem_map_sanity_check(start, size));
632
633 ret = reserve_memtype(start, end, req_type, &new_type);
634 if (ret)
635 goto out_err;
636
637 if (!is_new_memtype_allowed(start, size, req_type, new_type))
638 goto out_free;
639
640 if (kernel_map_sync_memtype(start, size, new_type) < 0)
641 goto out_free;
642
643 *type = new_type;
644 return 0;
645
646out_free:
647 free_memtype(start, end);
648 ret = -EBUSY;
649out_err:
650 return ret;
651}
652
653/**
654 * io_free_memtype - Release a memory type mapping for a region of memory
655 * @start: start (physical address) of the region
656 * @end: end (physical address) of the region
657 */
658void io_free_memtype(resource_size_t start, resource_size_t end)
659{
660 free_memtype(start, end);
661}
662
499pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 663pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
500 unsigned long size, pgprot_t vma_prot) 664 unsigned long size, pgprot_t vma_prot)
501{ 665{
@@ -577,7 +741,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
577{ 741{
578 unsigned long id_sz; 742 unsigned long id_sz;
579 743
580 if (!pat_enabled || base >= __pa(high_memory)) 744 if (base >= __pa(high_memory))
581 return 0; 745 return 0;
582 746
583 id_sz = (__pa(high_memory) < base + size) ? 747 id_sz = (__pa(high_memory) < base + size) ?
@@ -612,11 +776,29 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
612 is_ram = pat_pagerange_is_ram(paddr, paddr + size); 776 is_ram = pat_pagerange_is_ram(paddr, paddr + size);
613 777
614 /* 778 /*
615 * reserve_pfn_range() doesn't support RAM pages. Maintain the current 779 * reserve_pfn_range() for RAM pages. We do not refcount to keep
616 * behavior with RAM pages by returning success. 780 * track of number of mappings of RAM pages. We can assert that
781 * the type requested matches the type of first page in the range.
617 */ 782 */
618 if (is_ram != 0) 783 if (is_ram) {
784 if (!pat_enabled)
785 return 0;
786
787 flags = lookup_memtype(paddr);
788 if (want_flags != flags) {
789 printk(KERN_WARNING
790 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
791 current->comm, current->pid,
792 cattr_name(want_flags),
793 (unsigned long long)paddr,
794 (unsigned long long)(paddr + size),
795 cattr_name(flags));
796 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
797 (~_PAGE_CACHE_MASK)) |
798 flags);
799 }
619 return 0; 800 return 0;
801 }
620 802
621 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); 803 ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
622 if (ret) 804 if (ret)
@@ -678,14 +860,6 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
678 unsigned long vma_size = vma->vm_end - vma->vm_start; 860 unsigned long vma_size = vma->vm_end - vma->vm_start;
679 pgprot_t pgprot; 861 pgprot_t pgprot;
680 862
681 if (!pat_enabled)
682 return 0;
683
684 /*
685 * For now, only handle remap_pfn_range() vmas where
686 * is_linear_pfn_mapping() == TRUE. Handling of
687 * vm_insert_pfn() is TBD.
688 */
689 if (is_linear_pfn_mapping(vma)) { 863 if (is_linear_pfn_mapping(vma)) {
690 /* 864 /*
691 * reserve the whole chunk covered by vma. We need the 865 * reserve the whole chunk covered by vma. We need the
@@ -713,23 +887,24 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
713int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, 887int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
714 unsigned long pfn, unsigned long size) 888 unsigned long pfn, unsigned long size)
715{ 889{
890 unsigned long flags;
716 resource_size_t paddr; 891 resource_size_t paddr;
717 unsigned long vma_size = vma->vm_end - vma->vm_start; 892 unsigned long vma_size = vma->vm_end - vma->vm_start;
718 893
719 if (!pat_enabled)
720 return 0;
721
722 /*
723 * For now, only handle remap_pfn_range() vmas where
724 * is_linear_pfn_mapping() == TRUE. Handling of
725 * vm_insert_pfn() is TBD.
726 */
727 if (is_linear_pfn_mapping(vma)) { 894 if (is_linear_pfn_mapping(vma)) {
728 /* reserve the whole chunk starting from vm_pgoff */ 895 /* reserve the whole chunk starting from vm_pgoff */
729 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 896 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
730 return reserve_pfn_range(paddr, vma_size, prot, 0); 897 return reserve_pfn_range(paddr, vma_size, prot, 0);
731 } 898 }
732 899
900 if (!pat_enabled)
901 return 0;
902
903 /* for vm_insert_pfn and friends, we set prot based on lookup */
904 flags = lookup_memtype(pfn << PAGE_SHIFT);
905 *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
906 flags);
907
733 return 0; 908 return 0;
734} 909}
735 910
@@ -744,14 +919,6 @@ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
744 resource_size_t paddr; 919 resource_size_t paddr;
745 unsigned long vma_size = vma->vm_end - vma->vm_start; 920 unsigned long vma_size = vma->vm_end - vma->vm_start;
746 921
747 if (!pat_enabled)
748 return;
749
750 /*
751 * For now, only handle remap_pfn_range() vmas where
752 * is_linear_pfn_mapping() == TRUE. Handling of
753 * vm_insert_pfn() is TBD.
754 */
755 if (is_linear_pfn_mapping(vma)) { 922 if (is_linear_pfn_mapping(vma)) {
756 /* free the whole chunk starting from vm_pgoff */ 923 /* free the whole chunk starting from vm_pgoff */
757 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; 924 paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
@@ -827,7 +994,7 @@ static int memtype_seq_show(struct seq_file *seq, void *v)
827 return 0; 994 return 0;
828} 995}
829 996
830static struct seq_operations memtype_seq_ops = { 997static const struct seq_operations memtype_seq_ops = {
831 .start = memtype_seq_start, 998 .start = memtype_seq_start,
832 .next = memtype_seq_next, 999 .next = memtype_seq_next,
833 .stop = memtype_seq_stop, 1000 .stop = memtype_seq_stop,
diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
new file mode 100644
index 000000000000..d2e2735327b4
--- /dev/null
+++ b/arch/x86/mm/physaddr.c
@@ -0,0 +1,70 @@
1#include <linux/mmdebug.h>
2#include <linux/module.h>
3#include <linux/mm.h>
4
5#include <asm/page.h>
6
7#include "physaddr.h"
8
9#ifdef CONFIG_X86_64
10
11unsigned long __phys_addr(unsigned long x)
12{
13 if (x >= __START_KERNEL_map) {
14 x -= __START_KERNEL_map;
15 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
16 x += phys_base;
17 } else {
18 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
19 x -= PAGE_OFFSET;
20 VIRTUAL_BUG_ON(!phys_addr_valid(x));
21 }
22 return x;
23}
24EXPORT_SYMBOL(__phys_addr);
25
26bool __virt_addr_valid(unsigned long x)
27{
28 if (x >= __START_KERNEL_map) {
29 x -= __START_KERNEL_map;
30 if (x >= KERNEL_IMAGE_SIZE)
31 return false;
32 x += phys_base;
33 } else {
34 if (x < PAGE_OFFSET)
35 return false;
36 x -= PAGE_OFFSET;
37 if (!phys_addr_valid(x))
38 return false;
39 }
40
41 return pfn_valid(x >> PAGE_SHIFT);
42}
43EXPORT_SYMBOL(__virt_addr_valid);
44
45#else
46
47#ifdef CONFIG_DEBUG_VIRTUAL
48unsigned long __phys_addr(unsigned long x)
49{
50 /* VMALLOC_* aren't constants */
51 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
52 VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
53 return x - PAGE_OFFSET;
54}
55EXPORT_SYMBOL(__phys_addr);
56#endif
57
58bool __virt_addr_valid(unsigned long x)
59{
60 if (x < PAGE_OFFSET)
61 return false;
62 if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
63 return false;
64 if (x >= FIXADDR_START)
65 return false;
66 return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
67}
68EXPORT_SYMBOL(__virt_addr_valid);
69
70#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/mm/physaddr.h b/arch/x86/mm/physaddr.h
new file mode 100644
index 000000000000..a3cd5a0c97b3
--- /dev/null
+++ b/arch/x86/mm/physaddr.h
@@ -0,0 +1,10 @@
1#include <asm/processor.h>
2
3static inline int phys_addr_valid(resource_size_t addr)
4{
5#ifdef CONFIG_PHYS_ADDR_T_64BIT
6 return !(addr >> boot_cpu_data.x86_phys_bits);
7#else
8 return 1;
9#endif
10}
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index 29a0e37114f8..6f8aa33031c7 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -215,7 +215,7 @@ int __init get_memcfg_from_srat(void)
215 goto out_fail; 215 goto out_fail;
216 216
217 if (num_memory_chunks == 0) { 217 if (num_memory_chunks == 0) {
218 printk(KERN_WARNING 218 printk(KERN_DEBUG
219 "could not find any ACPI SRAT memory areas.\n"); 219 "could not find any ACPI SRAT memory areas.\n");
220 goto out_fail; 220 goto out_fail;
221 } 221 }
@@ -277,7 +277,7 @@ int __init get_memcfg_from_srat(void)
277 } 277 }
278 return 1; 278 return 1;
279out_fail: 279out_fail:
280 printk(KERN_ERR "failed to get NUMA memory information from SRAT" 280 printk(KERN_DEBUG "failed to get NUMA memory information from SRAT"
281 " table\n"); 281 " table\n");
282 return 0; 282 return 0;
283} 283}
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 3ffa10df20b9..572ee9782f2a 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -15,63 +15,6 @@
15 * also get peer root bus resource for io,mmio 15 * also get peer root bus resource for io,mmio
16 */ 16 */
17 17
18#ifdef CONFIG_NUMA
19
20#define BUS_NR 256
21
22#ifdef CONFIG_X86_64
23
24static int mp_bus_to_node[BUS_NR];
25
26void set_mp_bus_to_node(int busnum, int node)
27{
28 if (busnum >= 0 && busnum < BUS_NR)
29 mp_bus_to_node[busnum] = node;
30}
31
32int get_mp_bus_to_node(int busnum)
33{
34 int node = -1;
35
36 if (busnum < 0 || busnum > (BUS_NR - 1))
37 return node;
38
39 node = mp_bus_to_node[busnum];
40
41 /*
42 * let numa_node_id to decide it later in dma_alloc_pages
43 * if there is no ram on that node
44 */
45 if (node != -1 && !node_online(node))
46 node = -1;
47
48 return node;
49}
50
51#else /* CONFIG_X86_32 */
52
53static unsigned char mp_bus_to_node[BUS_NR];
54
55void set_mp_bus_to_node(int busnum, int node)
56{
57 if (busnum >= 0 && busnum < BUS_NR)
58 mp_bus_to_node[busnum] = (unsigned char) node;
59}
60
61int get_mp_bus_to_node(int busnum)
62{
63 int node;
64
65 if (busnum < 0 || busnum > (BUS_NR - 1))
66 return 0;
67 node = mp_bus_to_node[busnum];
68 return node;
69}
70
71#endif /* CONFIG_X86_32 */
72
73#endif /* CONFIG_NUMA */
74
75#ifdef CONFIG_X86_64 18#ifdef CONFIG_X86_64
76 19
77/* 20/*
@@ -301,11 +244,6 @@ static int __init early_fill_mp_bus_info(void)
301 u64 val; 244 u64 val;
302 u32 address; 245 u32 address;
303 246
304#ifdef CONFIG_NUMA
305 for (i = 0; i < BUS_NR; i++)
306 mp_bus_to_node[i] = -1;
307#endif
308
309 if (!early_pci_allowed()) 247 if (!early_pci_allowed())
310 return -1; 248 return -1;
311 249
@@ -346,7 +284,7 @@ static int __init early_fill_mp_bus_info(void)
346 node = (reg >> 4) & 0x07; 284 node = (reg >> 4) & 0x07;
347#ifdef CONFIG_NUMA 285#ifdef CONFIG_NUMA
348 for (j = min_bus; j <= max_bus; j++) 286 for (j = min_bus; j <= max_bus; j++)
349 mp_bus_to_node[j] = (unsigned char) node; 287 set_mp_bus_to_node(j, node);
350#endif 288#endif
351 link = (reg >> 8) & 0x03; 289 link = (reg >> 8) & 0x03;
352 290
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 2202b6257b82..5db96d4304de 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -600,3 +600,72 @@ struct pci_bus * __devinit pci_scan_bus_with_sysdata(int busno)
600{ 600{
601 return pci_scan_bus_on_node(busno, &pci_root_ops, -1); 601 return pci_scan_bus_on_node(busno, &pci_root_ops, -1);
602} 602}
603
604/*
605 * NUMA info for PCI busses
606 *
607 * Early arch code is responsible for filling in reasonable values here.
608 * A node id of "-1" means "use current node". In other words, if a bus
609 * has a -1 node id, it's not tightly coupled to any particular chunk
610 * of memory (as is the case on some Nehalem systems).
611 */
612#ifdef CONFIG_NUMA
613
614#define BUS_NR 256
615
616#ifdef CONFIG_X86_64
617
618static int mp_bus_to_node[BUS_NR] = {
619 [0 ... BUS_NR - 1] = -1
620};
621
622void set_mp_bus_to_node(int busnum, int node)
623{
624 if (busnum >= 0 && busnum < BUS_NR)
625 mp_bus_to_node[busnum] = node;
626}
627
628int get_mp_bus_to_node(int busnum)
629{
630 int node = -1;
631
632 if (busnum < 0 || busnum > (BUS_NR - 1))
633 return node;
634
635 node = mp_bus_to_node[busnum];
636
637 /*
638 * let numa_node_id to decide it later in dma_alloc_pages
639 * if there is no ram on that node
640 */
641 if (node != -1 && !node_online(node))
642 node = -1;
643
644 return node;
645}
646
647#else /* CONFIG_X86_32 */
648
649static unsigned char mp_bus_to_node[BUS_NR] = {
650 [0 ... BUS_NR - 1] = -1
651};
652
653void set_mp_bus_to_node(int busnum, int node)
654{
655 if (busnum >= 0 && busnum < BUS_NR)
656 mp_bus_to_node[busnum] = (unsigned char) node;
657}
658
659int get_mp_bus_to_node(int busnum)
660{
661 int node;
662
663 if (busnum < 0 || busnum > (BUS_NR - 1))
664 return 0;
665 node = mp_bus_to_node[busnum];
666 return node;
667}
668
669#endif /* CONFIG_X86_32 */
670
671#endif /* CONFIG_NUMA */
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index b3d20b9cac63..417c9f5b4afa 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -242,7 +242,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
242 fix_processor_context(); 242 fix_processor_context();
243 243
244 do_fpu_end(); 244 do_fpu_end();
245 mtrr_ap_init(); 245 mtrr_bp_restore();
246 246
247#ifdef CONFIG_X86_OLD_MCE 247#ifdef CONFIG_X86_OLD_MCE
248 mcheck_init(&boot_cpu_data); 248 mcheck_init(&boot_cpu_data);
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 7410640db173..3bb4fc21f4f2 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -8,6 +8,7 @@ endif
8# Make sure early boot has no stackprotector 8# Make sure early boot has no stackprotector
9nostackp := $(call cc-option, -fno-stack-protector) 9nostackp := $(call cc-option, -fno-stack-protector)
10CFLAGS_enlighten.o := $(nostackp) 10CFLAGS_enlighten.o := $(nostackp)
11CFLAGS_mmu.o := $(nostackp)
11 12
12obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ 13obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
13 time.o xen-asm.o xen-asm_$(BITS).o \ 14 time.o xen-asm.o xen-asm_$(BITS).o \
@@ -16,3 +17,4 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
16obj-$(CONFIG_SMP) += smp.o 17obj-$(CONFIG_SMP) += smp.o
17obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o 18obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
18obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o 19obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
20
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index eb33aaa8415d..0dd0c2c6cae0 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -51,6 +51,7 @@
51#include <asm/pgtable.h> 51#include <asm/pgtable.h>
52#include <asm/tlbflush.h> 52#include <asm/tlbflush.h>
53#include <asm/reboot.h> 53#include <asm/reboot.h>
54#include <asm/stackprotector.h>
54 55
55#include "xen-ops.h" 56#include "xen-ops.h"
56#include "mmu.h" 57#include "mmu.h"
@@ -330,18 +331,28 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
330 unsigned long frames[pages]; 331 unsigned long frames[pages];
331 int f; 332 int f;
332 333
333 /* A GDT can be up to 64k in size, which corresponds to 8192 334 /*
334 8-byte entries, or 16 4k pages.. */ 335 * A GDT can be up to 64k in size, which corresponds to 8192
336 * 8-byte entries, or 16 4k pages..
337 */
335 338
336 BUG_ON(size > 65536); 339 BUG_ON(size > 65536);
337 BUG_ON(va & ~PAGE_MASK); 340 BUG_ON(va & ~PAGE_MASK);
338 341
339 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { 342 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
340 int level; 343 int level;
341 pte_t *ptep = lookup_address(va, &level); 344 pte_t *ptep;
342 unsigned long pfn, mfn; 345 unsigned long pfn, mfn;
343 void *virt; 346 void *virt;
344 347
348 /*
349 * The GDT is per-cpu and is in the percpu data area.
350 * That can be virtually mapped, so we need to do a
351 * page-walk to get the underlying MFN for the
352 * hypercall. The page can also be in the kernel's
353 * linear range, so we need to RO that mapping too.
354 */
355 ptep = lookup_address(va, &level);
345 BUG_ON(ptep == NULL); 356 BUG_ON(ptep == NULL);
346 357
347 pfn = pte_pfn(*ptep); 358 pfn = pte_pfn(*ptep);
@@ -358,6 +369,44 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
358 BUG(); 369 BUG();
359} 370}
360 371
372/*
373 * load_gdt for early boot, when the gdt is only mapped once
374 */
375static __init void xen_load_gdt_boot(const struct desc_ptr *dtr)
376{
377 unsigned long va = dtr->address;
378 unsigned int size = dtr->size + 1;
379 unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
380 unsigned long frames[pages];
381 int f;
382
383 /*
384 * A GDT can be up to 64k in size, which corresponds to 8192
385 * 8-byte entries, or 16 4k pages..
386 */
387
388 BUG_ON(size > 65536);
389 BUG_ON(va & ~PAGE_MASK);
390
391 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
392 pte_t pte;
393 unsigned long pfn, mfn;
394
395 pfn = virt_to_pfn(va);
396 mfn = pfn_to_mfn(pfn);
397
398 pte = pfn_pte(pfn, PAGE_KERNEL_RO);
399
400 if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
401 BUG();
402
403 frames[f] = mfn;
404 }
405
406 if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
407 BUG();
408}
409
361static void load_TLS_descriptor(struct thread_struct *t, 410static void load_TLS_descriptor(struct thread_struct *t,
362 unsigned int cpu, unsigned int i) 411 unsigned int cpu, unsigned int i)
363{ 412{
@@ -581,6 +630,29 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
581 preempt_enable(); 630 preempt_enable();
582} 631}
583 632
633/*
634 * Version of write_gdt_entry for use at early boot-time needed to
635 * update an entry as simply as possible.
636 */
637static __init void xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
638 const void *desc, int type)
639{
640 switch (type) {
641 case DESC_LDT:
642 case DESC_TSS:
643 /* ignore */
644 break;
645
646 default: {
647 xmaddr_t maddr = virt_to_machine(&dt[entry]);
648
649 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
650 dt[entry] = *(struct desc_struct *)desc;
651 }
652
653 }
654}
655
584static void xen_load_sp0(struct tss_struct *tss, 656static void xen_load_sp0(struct tss_struct *tss,
585 struct thread_struct *thread) 657 struct thread_struct *thread)
586{ 658{
@@ -714,7 +786,7 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
714 set: 786 set:
715 base = ((u64)high << 32) | low; 787 base = ((u64)high << 32) | low;
716 if (HYPERVISOR_set_segment_base(which, base) != 0) 788 if (HYPERVISOR_set_segment_base(which, base) != 0)
717 ret = -EFAULT; 789 ret = -EIO;
718 break; 790 break;
719#endif 791#endif
720 792
@@ -965,6 +1037,23 @@ static const struct machine_ops __initdata xen_machine_ops = {
965 .emergency_restart = xen_emergency_restart, 1037 .emergency_restart = xen_emergency_restart,
966}; 1038};
967 1039
1040/*
1041 * Set up the GDT and segment registers for -fstack-protector. Until
1042 * we do this, we have to be careful not to call any stack-protected
1043 * function, which is most of the kernel.
1044 */
1045static void __init xen_setup_stackprotector(void)
1046{
1047 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
1048 pv_cpu_ops.load_gdt = xen_load_gdt_boot;
1049
1050 setup_stack_canary_segment(0);
1051 switch_to_new_gdt(0);
1052
1053 pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
1054 pv_cpu_ops.load_gdt = xen_load_gdt;
1055}
1056
968/* First C function to be called on Xen boot */ 1057/* First C function to be called on Xen boot */
969asmlinkage void __init xen_start_kernel(void) 1058asmlinkage void __init xen_start_kernel(void)
970{ 1059{
@@ -983,13 +1072,28 @@ asmlinkage void __init xen_start_kernel(void)
983 pv_apic_ops = xen_apic_ops; 1072 pv_apic_ops = xen_apic_ops;
984 pv_mmu_ops = xen_mmu_ops; 1073 pv_mmu_ops = xen_mmu_ops;
985 1074
986#ifdef CONFIG_X86_64
987 /* 1075 /*
988 * Setup percpu state. We only need to do this for 64-bit 1076 * Set up some pagetable state before starting to set any ptes.
989 * because 32-bit already has %fs set properly.
990 */ 1077 */
991 load_percpu_segment(0); 1078
992#endif 1079 /* Prevent unwanted bits from being set in PTEs. */
1080 __supported_pte_mask &= ~_PAGE_GLOBAL;
1081 if (!xen_initial_domain())
1082 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
1083
1084 __supported_pte_mask |= _PAGE_IOMAP;
1085
1086 xen_setup_features();
1087
1088 /* Get mfn list */
1089 if (!xen_feature(XENFEAT_auto_translated_physmap))
1090 xen_build_dynamic_phys_to_machine();
1091
1092 /*
1093 * Set up kernel GDT and segment registers, mainly so that
1094 * -fstack-protector code can be executed.
1095 */
1096 xen_setup_stackprotector();
993 1097
994 xen_init_irq_ops(); 1098 xen_init_irq_ops();
995 xen_init_cpuid_mask(); 1099 xen_init_cpuid_mask();
@@ -1001,8 +1105,6 @@ asmlinkage void __init xen_start_kernel(void)
1001 set_xen_basic_apic_ops(); 1105 set_xen_basic_apic_ops();
1002#endif 1106#endif
1003 1107
1004 xen_setup_features();
1005
1006 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { 1108 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
1007 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start; 1109 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
1008 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit; 1110 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
@@ -1019,17 +1121,8 @@ asmlinkage void __init xen_start_kernel(void)
1019 1121
1020 xen_smp_init(); 1122 xen_smp_init();
1021 1123
1022 /* Get mfn list */
1023 if (!xen_feature(XENFEAT_auto_translated_physmap))
1024 xen_build_dynamic_phys_to_machine();
1025
1026 pgd = (pgd_t *)xen_start_info->pt_base; 1124 pgd = (pgd_t *)xen_start_info->pt_base;
1027 1125
1028 /* Prevent unwanted bits from being set in PTEs. */
1029 __supported_pte_mask &= ~_PAGE_GLOBAL;
1030 if (!xen_initial_domain())
1031 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
1032
1033#ifdef CONFIG_X86_64 1126#ifdef CONFIG_X86_64
1034 /* Work out if we support NX */ 1127 /* Work out if we support NX */
1035 check_efer(); 1128 check_efer();
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 429834ec1687..fe03eeed7b48 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -236,6 +236,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
236 ctxt->user_regs.ss = __KERNEL_DS; 236 ctxt->user_regs.ss = __KERNEL_DS;
237#ifdef CONFIG_X86_32 237#ifdef CONFIG_X86_32
238 ctxt->user_regs.fs = __KERNEL_PERCPU; 238 ctxt->user_regs.fs = __KERNEL_PERCPU;
239 ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
239#else 240#else
240 ctxt->gs_base_kernel = per_cpu_offset(cpu); 241 ctxt->gs_base_kernel = per_cpu_offset(cpu);
241#endif 242#endif
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 5601506f2dd9..36a5141108df 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -187,7 +187,6 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
187 struct xen_spinlock *prev; 187 struct xen_spinlock *prev;
188 int irq = __get_cpu_var(lock_kicker_irq); 188 int irq = __get_cpu_var(lock_kicker_irq);
189 int ret; 189 int ret;
190 unsigned long flags;
191 u64 start; 190 u64 start;
192 191
193 /* If kicker interrupts not initialized yet, just spin */ 192 /* If kicker interrupts not initialized yet, just spin */
@@ -199,16 +198,12 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
199 /* announce we're spinning */ 198 /* announce we're spinning */
200 prev = spinning_lock(xl); 199 prev = spinning_lock(xl);
201 200
202 flags = __raw_local_save_flags();
203 if (irq_enable) {
204 ADD_STATS(taken_slow_irqenable, 1);
205 raw_local_irq_enable();
206 }
207
208 ADD_STATS(taken_slow, 1); 201 ADD_STATS(taken_slow, 1);
209 ADD_STATS(taken_slow_nested, prev != NULL); 202 ADD_STATS(taken_slow_nested, prev != NULL);
210 203
211 do { 204 do {
205 unsigned long flags;
206
212 /* clear pending */ 207 /* clear pending */
213 xen_clear_irq_pending(irq); 208 xen_clear_irq_pending(irq);
214 209
@@ -228,6 +223,12 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
228 goto out; 223 goto out;
229 } 224 }
230 225
226 flags = __raw_local_save_flags();
227 if (irq_enable) {
228 ADD_STATS(taken_slow_irqenable, 1);
229 raw_local_irq_enable();
230 }
231
231 /* 232 /*
232 * Block until irq becomes pending. If we're 233 * Block until irq becomes pending. If we're
233 * interrupted at this point (after the trylock but 234 * interrupted at this point (after the trylock but
@@ -238,13 +239,15 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
238 * pending. 239 * pending.
239 */ 240 */
240 xen_poll_irq(irq); 241 xen_poll_irq(irq);
242
243 raw_local_irq_restore(flags);
244
241 ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); 245 ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
242 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ 246 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
243 247
244 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); 248 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
245 249
246out: 250out:
247 raw_local_irq_restore(flags);
248 unspinning_lock(xl, prev); 251 unspinning_lock(xl, prev);
249 spin_time_accum_blocked(start); 252 spin_time_accum_blocked(start);
250 253
@@ -323,8 +326,13 @@ static void xen_spin_unlock(struct raw_spinlock *lock)
323 smp_wmb(); /* make sure no writes get moved after unlock */ 326 smp_wmb(); /* make sure no writes get moved after unlock */
324 xl->lock = 0; /* release lock */ 327 xl->lock = 0; /* release lock */
325 328
326 /* make sure unlock happens before kick */ 329 /*
327 barrier(); 330 * Make sure unlock happens before checking for waiting
331 * spinners. We need a strong barrier to enforce the
332 * write-read ordering to different memory locations, as the
333 * CPU makes no implied guarantees about their ordering.
334 */
335 mb();
328 336
329 if (unlikely(xl->spinners)) 337 if (unlikely(xl->spinners))
330 xen_spin_unlock_slow(xl); 338 xen_spin_unlock_slow(xl);