aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig66
-rw-r--r--arch/x86/Makefile4
-rw-r--r--arch/x86/boot/Makefile4
-rw-r--r--arch/x86/boot/compressed/eboot.c21
-rw-r--r--arch/x86/boot/compressed/head_32.S8
-rw-r--r--arch/x86/boot/compressed/head_64.S8
-rw-r--r--arch/x86/boot/tools/build.c81
-rw-r--r--arch/x86/configs/i386_defconfig1
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c37
-rw-r--r--arch/x86/ia32/ia32entry.S4
-rw-r--r--arch/x86/include/asm/acpi.h4
-rw-r--r--arch/x86/include/asm/amd_nb.h17
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/efi.h1
-rw-r--r--arch/x86/include/asm/ftrace.h1
-rw-r--r--arch/x86/include/asm/hpet.h5
-rw-r--r--arch/x86/include/asm/hw_irq.h13
-rw-r--r--arch/x86/include/asm/hypervisor.h13
-rw-r--r--arch/x86/include/asm/io_apic.h28
-rw-r--r--arch/x86/include/asm/irq_remapping.h40
-rw-r--r--arch/x86/include/asm/irq_vectors.h4
-rw-r--r--arch/x86/include/asm/kvm_para.h8
-rw-r--r--arch/x86/include/asm/linkage.h18
-rw-r--r--arch/x86/include/asm/mce.h84
-rw-r--r--arch/x86/include/asm/mshyperv.h4
-rw-r--r--arch/x86/include/asm/mwait.h3
-rw-r--r--arch/x86/include/asm/pci.h3
-rw-r--r--arch/x86/include/asm/perf_event.h13
-rw-r--r--arch/x86/include/asm/pgtable.h18
-rw-r--r--arch/x86/include/asm/pgtable_32.h7
-rw-r--r--arch/x86/include/asm/pgtable_64.h3
-rw-r--r--arch/x86/include/asm/processor.h20
-rw-r--r--arch/x86/include/asm/required-features.h8
-rw-r--r--arch/x86/include/asm/uv/uv.h2
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h44
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h1496
-rw-r--r--arch/x86/include/asm/x86_init.h27
-rw-r--r--arch/x86/include/asm/xor.h491
-rw-r--r--arch/x86/include/asm/xor_32.h309
-rw-r--r--arch/x86/include/asm/xor_64.h305
-rw-r--r--arch/x86/include/uapi/asm/mce.h87
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h6
-rw-r--r--arch/x86/kernel/Makefile3
-rw-r--r--arch/x86/kernel/apb_timer.c10
-rw-r--r--arch/x86/kernel/apic/apic.c28
-rw-r--r--arch/x86/kernel/apic/io_apic.c457
-rw-r--r--arch/x86/kernel/apic/ipi.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c21
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c206
-rw-r--r--arch/x86/kernel/apm_32.c68
-rw-r--r--arch/x86/kernel/cpu/amd.c56
-rw-r--r--arch/x86/kernel/cpu/bugs.c27
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c7
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c9
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c14
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c54
-rw-r--r--arch/x86/kernel/cpu/perf_event.c15
-rw-r--r--arch/x86/kernel/cpu/perf_event.h25
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c322
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c2
-rw-r--r--arch/x86/kernel/cpu/proc.c2
-rw-r--r--arch/x86/kernel/cpu/vmware.c13
-rw-r--r--arch/x86/kernel/entry_32.S9
-rw-r--r--arch/x86/kernel/entry_64.S14
-rw-r--r--arch/x86/kernel/head_32.S102
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/kprobes/Makefile7
-rw-r--r--arch/x86/kernel/kprobes/common.h (renamed from arch/x86/kernel/kprobes-common.h)11
-rw-r--r--arch/x86/kernel/kprobes/core.c (renamed from arch/x86/kernel/kprobes.c)76
-rw-r--r--arch/x86/kernel/kprobes/ftrace.c93
-rw-r--r--arch/x86/kernel/kprobes/opt.c (renamed from arch/x86/kernel/kprobes-opt.c)2
-rw-r--r--arch/x86/kernel/kvm.c1
-rw-r--r--arch/x86/kernel/msr.c3
-rw-r--r--arch/x86/kernel/pci-dma.c2
-rw-r--r--arch/x86/kernel/process.c122
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/ptrace.c2
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/rtc.c1
-rw-r--r--arch/x86/kernel/setup.c28
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/sys_x86_64.c2
-rw-r--r--arch/x86/kernel/tsc.c3
-rw-r--r--arch/x86/kernel/uprobes.c4
-rw-r--r--arch/x86/kernel/x86_init.c24
-rw-r--r--arch/x86/kvm/x86.c16
-rw-r--r--arch/x86/lguest/Kconfig1
-rw-r--r--arch/x86/mm/fault.c8
-rw-r--r--arch/x86/mm/init_64.c7
-rw-r--r--arch/x86/mm/memtest.c10
-rw-r--r--arch/x86/mm/srat.c29
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c40
-rw-r--r--arch/x86/pci/mmconfig-shared.c3
-rw-r--r--arch/x86/platform/Makefile2
-rw-r--r--arch/x86/platform/efi/efi-bgrt.c7
-rw-r--r--arch/x86/platform/efi/efi.c59
-rw-r--r--arch/x86/platform/efi/efi_64.c22
-rw-r--r--arch/x86/platform/goldfish/Makefile1
-rw-r--r--arch/x86/platform/goldfish/goldfish.c51
-rw-r--r--arch/x86/platform/olpc/olpc-xo15-sci.c2
-rw-r--r--arch/x86/platform/sfi/sfi.c2
-rw-r--r--arch/x86/platform/ts5500/Makefile1
-rw-r--r--arch/x86/platform/ts5500/ts5500.c339
-rw-r--r--arch/x86/platform/uv/tlb_uv.c14
-rw-r--r--arch/x86/platform/uv/uv_time.c13
-rw-r--r--arch/x86/tools/insn_sanity.c10
-rw-r--r--arch/x86/um/Kconfig3
-rw-r--r--arch/x86/um/fault.c2
-rw-r--r--arch/x86/vdso/vclock_gettime.c2
-rw-r--r--arch/x86/xen/enlighten.c78
-rw-r--r--arch/x86/xen/setup.c5
-rw-r--r--arch/x86/xen/suspend.c2
-rw-r--r--arch/x86/xen/xen-asm_32.S14
-rw-r--r--arch/x86/xen/xen-ops.h2
117 files changed, 3736 insertions, 2192 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 108efcb21c9e..ff0e5f3c844e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1,7 +1,7 @@
1# Select 32 or 64 bit 1# Select 32 or 64 bit
2config 64BIT 2config 64BIT
3 bool "64-bit kernel" if ARCH = "x86" 3 bool "64-bit kernel" if ARCH = "x86"
4 default ARCH = "x86_64" 4 default ARCH != "i386"
5 ---help--- 5 ---help---
6 Say yes to build a 64-bit kernel - formerly known as x86_64 6 Say yes to build a 64-bit kernel - formerly known as x86_64
7 Say no to build a 32-bit kernel - formerly known as i386 7 Say no to build a 32-bit kernel - formerly known as i386
@@ -28,7 +28,6 @@ config X86
28 select HAVE_OPROFILE 28 select HAVE_OPROFILE
29 select HAVE_PCSPKR_PLATFORM 29 select HAVE_PCSPKR_PLATFORM
30 select HAVE_PERF_EVENTS 30 select HAVE_PERF_EVENTS
31 select HAVE_IRQ_WORK
32 select HAVE_IOREMAP_PROT 31 select HAVE_IOREMAP_PROT
33 select HAVE_KPROBES 32 select HAVE_KPROBES
34 select HAVE_MEMBLOCK 33 select HAVE_MEMBLOCK
@@ -40,10 +39,12 @@ config X86
40 select HAVE_DMA_CONTIGUOUS if !SWIOTLB 39 select HAVE_DMA_CONTIGUOUS if !SWIOTLB
41 select HAVE_KRETPROBES 40 select HAVE_KRETPROBES
42 select HAVE_OPTPROBES 41 select HAVE_OPTPROBES
42 select HAVE_KPROBES_ON_FTRACE
43 select HAVE_FTRACE_MCOUNT_RECORD 43 select HAVE_FTRACE_MCOUNT_RECORD
44 select HAVE_FENTRY if X86_64 44 select HAVE_FENTRY if X86_64
45 select HAVE_C_RECORDMCOUNT 45 select HAVE_C_RECORDMCOUNT
46 select HAVE_DYNAMIC_FTRACE 46 select HAVE_DYNAMIC_FTRACE
47 select HAVE_DYNAMIC_FTRACE_WITH_REGS
47 select HAVE_FUNCTION_TRACER 48 select HAVE_FUNCTION_TRACER
48 select HAVE_FUNCTION_GRAPH_TRACER 49 select HAVE_FUNCTION_GRAPH_TRACER
49 select HAVE_FUNCTION_GRAPH_FP_TEST 50 select HAVE_FUNCTION_GRAPH_FP_TEST
@@ -106,6 +107,7 @@ config X86
106 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) 107 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
107 select GENERIC_TIME_VSYSCALL if X86_64 108 select GENERIC_TIME_VSYSCALL if X86_64
108 select KTIME_SCALAR if X86_32 109 select KTIME_SCALAR if X86_32
110 select ALWAYS_USE_PERSISTENT_CLOCK
109 select GENERIC_STRNCPY_FROM_USER 111 select GENERIC_STRNCPY_FROM_USER
110 select GENERIC_STRNLEN_USER 112 select GENERIC_STRNLEN_USER
111 select HAVE_CONTEXT_TRACKING if X86_64 113 select HAVE_CONTEXT_TRACKING if X86_64
@@ -114,6 +116,7 @@ config X86
114 select MODULES_USE_ELF_RELA if X86_64 116 select MODULES_USE_ELF_RELA if X86_64
115 select CLONE_BACKWARDS if X86_32 117 select CLONE_BACKWARDS if X86_32
116 select GENERIC_SIGALTSTACK 118 select GENERIC_SIGALTSTACK
119 select ARCH_USE_BUILTIN_BSWAP
117 120
118config INSTRUCTION_DECODER 121config INSTRUCTION_DECODER
119 def_bool y 122 def_bool y
@@ -222,7 +225,7 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC
222 225
223config HAVE_INTEL_TXT 226config HAVE_INTEL_TXT
224 def_bool y 227 def_bool y
225 depends on EXPERIMENTAL && INTEL_IOMMU && ACPI 228 depends on INTEL_IOMMU && ACPI
226 229
227config X86_32_SMP 230config X86_32_SMP
228 def_bool y 231 def_bool y
@@ -320,6 +323,10 @@ config X86_BIGSMP
320 ---help--- 323 ---help---
321 This option is needed for the systems that have more than 8 CPUs 324 This option is needed for the systems that have more than 8 CPUs
322 325
326config GOLDFISH
327 def_bool y
328 depends on X86_GOLDFISH
329
323if X86_32 330if X86_32
324config X86_EXTENDED_PLATFORM 331config X86_EXTENDED_PLATFORM
325 bool "Support for extended (non-PC) x86 platforms" 332 bool "Support for extended (non-PC) x86 platforms"
@@ -402,6 +409,14 @@ config X86_UV
402# Following is an alphabetically sorted list of 32 bit extended platforms 409# Following is an alphabetically sorted list of 32 bit extended platforms
403# Please maintain the alphabetic order if and when there are additions 410# Please maintain the alphabetic order if and when there are additions
404 411
412config X86_GOLDFISH
413 bool "Goldfish (Virtual Platform)"
414 depends on X86_32
415 ---help---
416 Enable support for the Goldfish virtual platform used primarily
417 for Android development. Unless you are building for the Android
418 Goldfish emulator say N here.
419
405config X86_INTEL_CE 420config X86_INTEL_CE
406 bool "CE4100 TV platform" 421 bool "CE4100 TV platform"
407 depends on PCI 422 depends on PCI
@@ -454,6 +469,16 @@ config X86_MDFLD
454 469
455endif 470endif
456 471
472config X86_INTEL_LPSS
473 bool "Intel Low Power Subsystem Support"
474 depends on ACPI
475 select COMMON_CLK
476 ---help---
477 Select to build support for Intel Low Power Subsystem such as
478 found on Intel Lynxpoint PCH. Selecting this option enables
479 things like clock tree (common clock framework) which are needed
480 by the LPSS peripheral drivers.
481
457config X86_RDC321X 482config X86_RDC321X
458 bool "RDC R-321x SoC" 483 bool "RDC R-321x SoC"
459 depends on X86_32 484 depends on X86_32
@@ -617,7 +642,7 @@ config PARAVIRT
617 642
618config PARAVIRT_SPINLOCKS 643config PARAVIRT_SPINLOCKS
619 bool "Paravirtualization layer for spinlocks" 644 bool "Paravirtualization layer for spinlocks"
620 depends on PARAVIRT && SMP && EXPERIMENTAL 645 depends on PARAVIRT && SMP
621 ---help--- 646 ---help---
622 Paravirtualized spinlocks allow a pvops backend to replace the 647 Paravirtualized spinlocks allow a pvops backend to replace the
623 spinlock implementation with something virtualization-friendly 648 spinlock implementation with something virtualization-friendly
@@ -729,7 +754,7 @@ config GART_IOMMU
729config CALGARY_IOMMU 754config CALGARY_IOMMU
730 bool "IBM Calgary IOMMU support" 755 bool "IBM Calgary IOMMU support"
731 select SWIOTLB 756 select SWIOTLB
732 depends on X86_64 && PCI && EXPERIMENTAL 757 depends on X86_64 && PCI
733 ---help--- 758 ---help---
734 Support for hardware IOMMUs in IBM's xSeries x366 and x460 759 Support for hardware IOMMUs in IBM's xSeries x366 and x460
735 systems. Needed to run systems with more than 3GB of memory 760 systems. Needed to run systems with more than 3GB of memory
@@ -771,7 +796,7 @@ config IOMMU_HELPER
771 796
772config MAXSMP 797config MAXSMP
773 bool "Enable Maximum number of SMP Processors and NUMA Nodes" 798 bool "Enable Maximum number of SMP Processors and NUMA Nodes"
774 depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL 799 depends on X86_64 && SMP && DEBUG_KERNEL
775 select CPUMASK_OFFSTACK 800 select CPUMASK_OFFSTACK
776 ---help--- 801 ---help---
777 Enable maximum number of CPUS and NUMA Nodes for this architecture. 802 Enable maximum number of CPUS and NUMA Nodes for this architecture.
@@ -1107,7 +1132,6 @@ config HIGHMEM64G
1107endchoice 1132endchoice
1108 1133
1109choice 1134choice
1110 depends on EXPERIMENTAL
1111 prompt "Memory split" if EXPERT 1135 prompt "Memory split" if EXPERT
1112 default VMSPLIT_3G 1136 default VMSPLIT_3G
1113 depends on X86_32 1137 depends on X86_32
@@ -1184,7 +1208,7 @@ config DIRECT_GBPAGES
1184config NUMA 1208config NUMA
1185 bool "Numa Memory Allocation and Scheduler Support" 1209 bool "Numa Memory Allocation and Scheduler Support"
1186 depends on SMP 1210 depends on SMP
1187 depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL) 1211 depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI))
1188 default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP) 1212 default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
1189 ---help--- 1213 ---help---
1190 Enable NUMA (Non Uniform Memory Access) support. 1214 Enable NUMA (Non Uniform Memory Access) support.
@@ -1275,7 +1299,7 @@ config ARCH_DISCONTIGMEM_DEFAULT
1275 1299
1276config ARCH_SPARSEMEM_ENABLE 1300config ARCH_SPARSEMEM_ENABLE
1277 def_bool y 1301 def_bool y
1278 depends on X86_64 || NUMA || (EXPERIMENTAL && X86_32) || X86_32_NON_STANDARD 1302 depends on X86_64 || NUMA || X86_32 || X86_32_NON_STANDARD
1279 select SPARSEMEM_STATIC if X86_32 1303 select SPARSEMEM_STATIC if X86_32
1280 select SPARSEMEM_VMEMMAP_ENABLE if X86_64 1304 select SPARSEMEM_VMEMMAP_ENABLE if X86_64
1281 1305
@@ -1589,8 +1613,7 @@ config CRASH_DUMP
1589 For more details see Documentation/kdump/kdump.txt 1613 For more details see Documentation/kdump/kdump.txt
1590 1614
1591config KEXEC_JUMP 1615config KEXEC_JUMP
1592 bool "kexec jump (EXPERIMENTAL)" 1616 bool "kexec jump"
1593 depends on EXPERIMENTAL
1594 depends on KEXEC && HIBERNATION 1617 depends on KEXEC && HIBERNATION
1595 ---help--- 1618 ---help---
1596 Jump between original kernel and kexeced kernel and invoke 1619 Jump between original kernel and kexeced kernel and invoke
@@ -1695,7 +1718,7 @@ config HOTPLUG_CPU
1695config BOOTPARAM_HOTPLUG_CPU0 1718config BOOTPARAM_HOTPLUG_CPU0
1696 bool "Set default setting of cpu0_hotpluggable" 1719 bool "Set default setting of cpu0_hotpluggable"
1697 default n 1720 default n
1698 depends on HOTPLUG_CPU && EXPERIMENTAL 1721 depends on HOTPLUG_CPU
1699 ---help--- 1722 ---help---
1700 Set whether default state of cpu0_hotpluggable is on or off. 1723 Set whether default state of cpu0_hotpluggable is on or off.
1701 1724
@@ -1724,7 +1747,7 @@ config BOOTPARAM_HOTPLUG_CPU0
1724config DEBUG_HOTPLUG_CPU0 1747config DEBUG_HOTPLUG_CPU0
1725 def_bool n 1748 def_bool n
1726 prompt "Debug CPU0 hotplug" 1749 prompt "Debug CPU0 hotplug"
1727 depends on HOTPLUG_CPU && EXPERIMENTAL 1750 depends on HOTPLUG_CPU
1728 ---help--- 1751 ---help---
1729 Enabling this option offlines CPU0 (if CPU0 can be offlined) as 1752 Enabling this option offlines CPU0 (if CPU0 can be offlined) as
1730 soon as possible and boots up userspace with CPU0 offlined. User 1753 soon as possible and boots up userspace with CPU0 offlined. User
@@ -1908,6 +1931,7 @@ config APM_DO_ENABLE
1908 this feature. 1931 this feature.
1909 1932
1910config APM_CPU_IDLE 1933config APM_CPU_IDLE
1934 depends on CPU_IDLE
1911 bool "Make CPU Idle calls when idle" 1935 bool "Make CPU Idle calls when idle"
1912 ---help--- 1936 ---help---
1913 Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop. 1937 Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop.
@@ -2033,7 +2057,7 @@ config PCI_MMCONFIG
2033 2057
2034config PCI_CNB20LE_QUIRK 2058config PCI_CNB20LE_QUIRK
2035 bool "Read CNB20LE Host Bridge Windows" if EXPERT 2059 bool "Read CNB20LE Host Bridge Windows" if EXPERT
2036 depends on PCI && EXPERIMENTAL 2060 depends on PCI
2037 help 2061 help
2038 Read the PCI windows out of the CNB20LE host bridge. This allows 2062 Read the PCI windows out of the CNB20LE host bridge. This allows
2039 PCI hotplug to work on systems with the CNB20LE chipset which do 2063 PCI hotplug to work on systems with the CNB20LE chipset which do
@@ -2134,6 +2158,7 @@ config OLPC_XO1_RTC
2134config OLPC_XO1_SCI 2158config OLPC_XO1_SCI
2135 bool "OLPC XO-1 SCI extras" 2159 bool "OLPC XO-1 SCI extras"
2136 depends on OLPC && OLPC_XO1_PM 2160 depends on OLPC && OLPC_XO1_PM
2161 depends on INPUT=y
2137 select POWER_SUPPLY 2162 select POWER_SUPPLY
2138 select GPIO_CS5535 2163 select GPIO_CS5535
2139 select MFD_CORE 2164 select MFD_CORE
@@ -2183,6 +2208,15 @@ config GEOS
2183 ---help--- 2208 ---help---
2184 This option enables system support for the Traverse Technologies GEOS. 2209 This option enables system support for the Traverse Technologies GEOS.
2185 2210
2211config TS5500
2212 bool "Technologic Systems TS-5500 platform support"
2213 depends on MELAN
2214 select CHECK_SIGNATURE
2215 select NEW_LEDS
2216 select LEDS_CLASS
2217 ---help---
2218 This option enables system support for the Technologic Systems TS-5500.
2219
2186endif # X86_32 2220endif # X86_32
2187 2221
2188config AMD_NB 2222config AMD_NB
@@ -2227,8 +2261,8 @@ config IA32_AOUT
2227 Support old a.out binaries in the 32bit emulation. 2261 Support old a.out binaries in the 32bit emulation.
2228 2262
2229config X86_X32 2263config X86_X32
2230 bool "x32 ABI for 64-bit mode (EXPERIMENTAL)" 2264 bool "x32 ABI for 64-bit mode"
2231 depends on X86_64 && IA32_EMULATION && EXPERIMENTAL 2265 depends on X86_64 && IA32_EMULATION
2232 ---help--- 2266 ---help---
2233 Include code to run binaries for the x32 native 32-bit ABI 2267 Include code to run binaries for the x32 native 32-bit ABI
2234 for 64-bit processors. An x32 process gets access to the 2268 for 64-bit processors. An x32 process gets access to the
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index e71fc4279aab..5c477260294f 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -2,7 +2,11 @@
2 2
3# select defconfig based on actual architecture 3# select defconfig based on actual architecture
4ifeq ($(ARCH),x86) 4ifeq ($(ARCH),x86)
5 ifeq ($(shell uname -m),x86_64)
6 KBUILD_DEFCONFIG := x86_64_defconfig
7 else
5 KBUILD_DEFCONFIG := i386_defconfig 8 KBUILD_DEFCONFIG := i386_defconfig
9 endif
6else 10else
7 KBUILD_DEFCONFIG := $(ARCH)_defconfig 11 KBUILD_DEFCONFIG := $(ARCH)_defconfig
8endif 12endif
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index ccce0ed67dde..379814bc41e3 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -71,7 +71,7 @@ GCOV_PROFILE := n
71$(obj)/bzImage: asflags-y := $(SVGA_MODE) 71$(obj)/bzImage: asflags-y := $(SVGA_MODE)
72 72
73quiet_cmd_image = BUILD $@ 73quiet_cmd_image = BUILD $@
74cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin > $@ 74cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/zoffset.h > $@
75 75
76$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE 76$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
77 $(call if_changed,image) 77 $(call if_changed,image)
@@ -92,7 +92,7 @@ targets += voffset.h
92$(obj)/voffset.h: vmlinux FORCE 92$(obj)/voffset.h: vmlinux FORCE
93 $(call if_changed,voffset) 93 $(call if_changed,voffset)
94 94
95sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p' 95sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi_pe_entry\|efi_stub_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p'
96 96
97quiet_cmd_zoffset = ZOFFSET $@ 97quiet_cmd_zoffset = ZOFFSET $@
98 cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@ 98 cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 18e329ca108e..f8fa41190c35 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -256,10 +256,10 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
256 int i; 256 int i;
257 struct setup_data *data; 257 struct setup_data *data;
258 258
259 data = (struct setup_data *)params->hdr.setup_data; 259 data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
260 260
261 while (data && data->next) 261 while (data && data->next)
262 data = (struct setup_data *)data->next; 262 data = (struct setup_data *)(unsigned long)data->next;
263 263
264 status = efi_call_phys5(sys_table->boottime->locate_handle, 264 status = efi_call_phys5(sys_table->boottime->locate_handle,
265 EFI_LOCATE_BY_PROTOCOL, &pci_proto, 265 EFI_LOCATE_BY_PROTOCOL, &pci_proto,
@@ -295,16 +295,18 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
295 if (!pci) 295 if (!pci)
296 continue; 296 continue;
297 297
298#ifdef CONFIG_X86_64
298 status = efi_call_phys4(pci->attributes, pci, 299 status = efi_call_phys4(pci->attributes, pci,
299 EfiPciIoAttributeOperationGet, 0, 300 EfiPciIoAttributeOperationGet, 0,
300 &attributes); 301 &attributes);
301 302#else
303 status = efi_call_phys5(pci->attributes, pci,
304 EfiPciIoAttributeOperationGet, 0, 0,
305 &attributes);
306#endif
302 if (status != EFI_SUCCESS) 307 if (status != EFI_SUCCESS)
303 continue; 308 continue;
304 309
305 if (!(attributes & EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM))
306 continue;
307
308 if (!pci->romimage || !pci->romsize) 310 if (!pci->romimage || !pci->romsize)
309 continue; 311 continue;
310 312
@@ -345,9 +347,9 @@ static efi_status_t setup_efi_pci(struct boot_params *params)
345 memcpy(rom->romdata, pci->romimage, pci->romsize); 347 memcpy(rom->romdata, pci->romimage, pci->romsize);
346 348
347 if (data) 349 if (data)
348 data->next = (uint64_t)rom; 350 data->next = (unsigned long)rom;
349 else 351 else
350 params->hdr.setup_data = (uint64_t)rom; 352 params->hdr.setup_data = (unsigned long)rom;
351 353
352 data = (struct setup_data *)rom; 354 data = (struct setup_data *)rom;
353 355
@@ -432,10 +434,9 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
432 * Once we've found a GOP supporting ConOut, 434 * Once we've found a GOP supporting ConOut,
433 * don't bother looking any further. 435 * don't bother looking any further.
434 */ 436 */
437 first_gop = gop;
435 if (conout_found) 438 if (conout_found)
436 break; 439 break;
437
438 first_gop = gop;
439 } 440 }
440 } 441 }
441 442
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index aa4aaf1b2380..1e3184f6072f 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -35,11 +35,11 @@ ENTRY(startup_32)
35#ifdef CONFIG_EFI_STUB 35#ifdef CONFIG_EFI_STUB
36 jmp preferred_addr 36 jmp preferred_addr
37 37
38 .balign 0x10
39 /* 38 /*
40 * We don't need the return address, so set up the stack so 39 * We don't need the return address, so set up the stack so
41 * efi_main() can find its arugments. 40 * efi_main() can find its arguments.
42 */ 41 */
42ENTRY(efi_pe_entry)
43 add $0x4, %esp 43 add $0x4, %esp
44 44
45 call make_boot_params 45 call make_boot_params
@@ -50,8 +50,10 @@ ENTRY(startup_32)
50 pushl %eax 50 pushl %eax
51 pushl %esi 51 pushl %esi
52 pushl %ecx 52 pushl %ecx
53 sub $0x4, %esp
53 54
54 .org 0x30,0x90 55ENTRY(efi_stub_entry)
56 add $0x4, %esp
55 call efi_main 57 call efi_main
56 cmpl $0, %eax 58 cmpl $0, %eax
57 movl %eax, %esi 59 movl %eax, %esi
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index d9ae9a4ffcb9..c1d383d1fb7e 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -203,12 +203,12 @@ ENTRY(startup_64)
203 */ 203 */
204#ifdef CONFIG_EFI_STUB 204#ifdef CONFIG_EFI_STUB
205 /* 205 /*
206 * The entry point for the PE/COFF executable is 0x210, so only 206 * The entry point for the PE/COFF executable is efi_pe_entry, so
207 * legacy boot loaders will execute this jmp. 207 * only legacy boot loaders will execute this jmp.
208 */ 208 */
209 jmp preferred_addr 209 jmp preferred_addr
210 210
211 .org 0x210 211ENTRY(efi_pe_entry)
212 mov %rcx, %rdi 212 mov %rcx, %rdi
213 mov %rdx, %rsi 213 mov %rdx, %rsi
214 pushq %rdi 214 pushq %rdi
@@ -220,7 +220,7 @@ ENTRY(startup_64)
220 popq %rsi 220 popq %rsi
221 popq %rdi 221 popq %rdi
222 222
223 .org 0x230,0x90 223ENTRY(efi_stub_entry)
224 call efi_main 224 call efi_main
225 movq %rax,%rsi 225 movq %rax,%rsi
226 cmpq $0,%rax 226 cmpq $0,%rax
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index 4b8e165ee572..94c544650020 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -52,6 +52,10 @@ int is_big_kernel;
52 52
53#define PECOFF_RELOC_RESERVE 0x20 53#define PECOFF_RELOC_RESERVE 0x20
54 54
55unsigned long efi_stub_entry;
56unsigned long efi_pe_entry;
57unsigned long startup_64;
58
55/*----------------------------------------------------------------------*/ 59/*----------------------------------------------------------------------*/
56 60
57static const u32 crctab32[] = { 61static const u32 crctab32[] = {
@@ -132,7 +136,7 @@ static void die(const char * str, ...)
132 136
133static void usage(void) 137static void usage(void)
134{ 138{
135 die("Usage: build setup system [> image]"); 139 die("Usage: build setup system [zoffset.h] [> image]");
136} 140}
137 141
138#ifdef CONFIG_EFI_STUB 142#ifdef CONFIG_EFI_STUB
@@ -206,30 +210,54 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
206 */ 210 */
207 put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]); 211 put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]);
208 212
209#ifdef CONFIG_X86_32
210 /* 213 /*
211 * Address of entry point. 214 * Address of entry point for PE/COFF executable
212 *
213 * The EFI stub entry point is +16 bytes from the start of
214 * the .text section.
215 */ 215 */
216 put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]); 216 put_unaligned_le32(text_start + efi_pe_entry, &buf[pe_header + 0x28]);
217#else
218 /*
219 * Address of entry point. startup_32 is at the beginning and
220 * the 64-bit entry point (startup_64) is always 512 bytes
221 * after. The EFI stub entry point is 16 bytes after that, as
222 * the first instruction allows legacy loaders to jump over
223 * the EFI stub initialisation
224 */
225 put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]);
226#endif /* CONFIG_X86_32 */
227 217
228 update_pecoff_section_header(".text", text_start, text_sz); 218 update_pecoff_section_header(".text", text_start, text_sz);
229} 219}
230 220
231#endif /* CONFIG_EFI_STUB */ 221#endif /* CONFIG_EFI_STUB */
232 222
223
224/*
225 * Parse zoffset.h and find the entry points. We could just #include zoffset.h
226 * but that would mean tools/build would have to be rebuilt every time. It's
227 * not as if parsing it is hard...
228 */
229#define PARSE_ZOFS(p, sym) do { \
230 if (!strncmp(p, "#define ZO_" #sym " ", 11+sizeof(#sym))) \
231 sym = strtoul(p + 11 + sizeof(#sym), NULL, 16); \
232} while (0)
233
234static void parse_zoffset(char *fname)
235{
236 FILE *file;
237 char *p;
238 int c;
239
240 file = fopen(fname, "r");
241 if (!file)
242 die("Unable to open `%s': %m", fname);
243 c = fread(buf, 1, sizeof(buf) - 1, file);
244 if (ferror(file))
245 die("read-error on `zoffset.h'");
246 buf[c] = 0;
247
248 p = (char *)buf;
249
250 while (p && *p) {
251 PARSE_ZOFS(p, efi_stub_entry);
252 PARSE_ZOFS(p, efi_pe_entry);
253 PARSE_ZOFS(p, startup_64);
254
255 p = strchr(p, '\n');
256 while (p && (*p == '\r' || *p == '\n'))
257 p++;
258 }
259}
260
233int main(int argc, char ** argv) 261int main(int argc, char ** argv)
234{ 262{
235 unsigned int i, sz, setup_sectors; 263 unsigned int i, sz, setup_sectors;
@@ -241,7 +269,19 @@ int main(int argc, char ** argv)
241 void *kernel; 269 void *kernel;
242 u32 crc = 0xffffffffUL; 270 u32 crc = 0xffffffffUL;
243 271
244 if (argc != 3) 272 /* Defaults for old kernel */
273#ifdef CONFIG_X86_32
274 efi_pe_entry = 0x10;
275 efi_stub_entry = 0x30;
276#else
277 efi_pe_entry = 0x210;
278 efi_stub_entry = 0x230;
279 startup_64 = 0x200;
280#endif
281
282 if (argc == 4)
283 parse_zoffset(argv[3]);
284 else if (argc != 3)
245 usage(); 285 usage();
246 286
247 /* Copy the setup code */ 287 /* Copy the setup code */
@@ -299,6 +339,11 @@ int main(int argc, char ** argv)
299 339
300#ifdef CONFIG_EFI_STUB 340#ifdef CONFIG_EFI_STUB
301 update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); 341 update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
342
343#ifdef CONFIG_X86_64 /* Yes, this is really how we defined it :( */
344 efi_stub_entry -= 0x200;
345#endif
346 put_unaligned_le32(efi_stub_entry, &buf[0x264]);
302#endif 347#endif
303 348
304 crc = partial_crc32(buf, i, crc); 349 crc = partial_crc32(buf, i, crc);
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 5598547281a7..94447086e551 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1,3 +1,4 @@
1# CONFIG_64BIT is not set
1CONFIG_EXPERIMENTAL=y 2CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set 3# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 1b9c22bea8a7..a0795da22c02 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -40,10 +40,6 @@
40#include <linux/workqueue.h> 40#include <linux/workqueue.h>
41#include <linux/spinlock.h> 41#include <linux/spinlock.h>
42 42
43#if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
44#define HAS_CTR
45#endif
46
47#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE) 43#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
48#define HAS_PCBC 44#define HAS_PCBC
49#endif 45#endif
@@ -395,12 +391,6 @@ static int ablk_ctr_init(struct crypto_tfm *tfm)
395 return ablk_init_common(tfm, "__driver-ctr-aes-aesni"); 391 return ablk_init_common(tfm, "__driver-ctr-aes-aesni");
396} 392}
397 393
398#ifdef HAS_CTR
399static int ablk_rfc3686_ctr_init(struct crypto_tfm *tfm)
400{
401 return ablk_init_common(tfm, "rfc3686(__driver-ctr-aes-aesni)");
402}
403#endif
404#endif 394#endif
405 395
406#ifdef HAS_PCBC 396#ifdef HAS_PCBC
@@ -1158,33 +1148,6 @@ static struct crypto_alg aesni_algs[] = { {
1158 .maxauthsize = 16, 1148 .maxauthsize = 16,
1159 }, 1149 },
1160 }, 1150 },
1161#ifdef HAS_CTR
1162}, {
1163 .cra_name = "rfc3686(ctr(aes))",
1164 .cra_driver_name = "rfc3686-ctr-aes-aesni",
1165 .cra_priority = 400,
1166 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1167 .cra_blocksize = 1,
1168 .cra_ctxsize = sizeof(struct async_helper_ctx),
1169 .cra_alignmask = 0,
1170 .cra_type = &crypto_ablkcipher_type,
1171 .cra_module = THIS_MODULE,
1172 .cra_init = ablk_rfc3686_ctr_init,
1173 .cra_exit = ablk_exit,
1174 .cra_u = {
1175 .ablkcipher = {
1176 .min_keysize = AES_MIN_KEY_SIZE +
1177 CTR_RFC3686_NONCE_SIZE,
1178 .max_keysize = AES_MAX_KEY_SIZE +
1179 CTR_RFC3686_NONCE_SIZE,
1180 .ivsize = CTR_RFC3686_IV_SIZE,
1181 .setkey = ablk_set_key,
1182 .encrypt = ablk_encrypt,
1183 .decrypt = ablk_decrypt,
1184 .geniv = "seqiv",
1185 },
1186 },
1187#endif
1188#endif 1151#endif
1189#ifdef HAS_PCBC 1152#ifdef HAS_PCBC
1190}, { 1153}, {
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 102ff7cb3e41..142c4ceff112 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -207,7 +207,7 @@ sysexit_from_sys_call:
207 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) 207 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
208 jnz ia32_ret_from_sys_call 208 jnz ia32_ret_from_sys_call
209 TRACE_IRQS_ON 209 TRACE_IRQS_ON
210 sti 210 ENABLE_INTERRUPTS(CLBR_NONE)
211 movl %eax,%esi /* second arg, syscall return value */ 211 movl %eax,%esi /* second arg, syscall return value */
212 cmpl $-MAX_ERRNO,%eax /* is it an error ? */ 212 cmpl $-MAX_ERRNO,%eax /* is it an error ? */
213 jbe 1f 213 jbe 1f
@@ -217,7 +217,7 @@ sysexit_from_sys_call:
217 call __audit_syscall_exit 217 call __audit_syscall_exit
218 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */ 218 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
219 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi 219 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
220 cli 220 DISABLE_INTERRUPTS(CLBR_NONE)
221 TRACE_IRQS_OFF 221 TRACE_IRQS_OFF
222 testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) 222 testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
223 jz \exit 223 jz \exit
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 0c44630d1789..b31bf97775fc 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -49,10 +49,6 @@
49 49
50/* Asm macros */ 50/* Asm macros */
51 51
52#define ACPI_ASM_MACROS
53#define BREAKPOINT3
54#define ACPI_DISABLE_IRQS() local_irq_disable()
55#define ACPI_ENABLE_IRQS() local_irq_enable()
56#define ACPI_FLUSH_CPU_CACHE() wbinvd() 52#define ACPI_FLUSH_CPU_CACHE() wbinvd()
57 53
58int __acpi_acquire_global_lock(unsigned int *lock); 54int __acpi_acquire_global_lock(unsigned int *lock);
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index b3341e9cd8fd..a54ee1d054d9 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -81,6 +81,23 @@ static inline struct amd_northbridge *node_to_amd_nb(int node)
81 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL; 81 return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
82} 82}
83 83
84static inline u16 amd_get_node_id(struct pci_dev *pdev)
85{
86 struct pci_dev *misc;
87 int i;
88
89 for (i = 0; i != amd_nb_num(); i++) {
90 misc = node_to_amd_nb(i)->misc;
91
92 if (pci_domain_nr(misc->bus) == pci_domain_nr(pdev->bus) &&
93 PCI_SLOT(misc->devfn) == PCI_SLOT(pdev->devfn))
94 return i;
95 }
96
97 WARN(1, "Unable to find AMD Northbridge id for %s\n", pci_name(pdev));
98 return 0;
99}
100
84#else 101#else
85 102
86#define amd_nb_num(x) 0 103#define amd_nb_num(x) 0
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 2d9075e863a0..93fe929d1cee 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -167,6 +167,7 @@
167#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ 167#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
168#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ 168#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
169#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */ 169#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
170#define X86_FEATURE_PERFCTR_NB (6*32+24) /* NB performance counter extensions */
170 171
171/* 172/*
172 * Auxiliary flags: Linux defined - For features scattered in various 173 * Auxiliary flags: Linux defined - For features scattered in various
@@ -309,6 +310,7 @@ extern const char * const x86_power_flags[32];
309#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) 310#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
310#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) 311#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
311#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) 312#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
313#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB)
312#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) 314#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
313#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) 315#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
314#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) 316#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 6e8fdf5ad113..28677c55113f 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -94,6 +94,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
94#endif /* CONFIG_X86_32 */ 94#endif /* CONFIG_X86_32 */
95 95
96extern int add_efi_memmap; 96extern int add_efi_memmap;
97extern unsigned long x86_efi_facility;
97extern void efi_set_executable(efi_memory_desc_t *md, bool executable); 98extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
98extern int efi_memblock_x86_reserve_range(void); 99extern int efi_memblock_x86_reserve_range(void);
99extern void efi_call_phys_prelog(void); 100extern void efi_call_phys_prelog(void);
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 9a25b522d377..86cb51e1ca96 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -44,7 +44,6 @@
44 44
45#ifdef CONFIG_DYNAMIC_FTRACE 45#ifdef CONFIG_DYNAMIC_FTRACE
46#define ARCH_SUPPORTS_FTRACE_OPS 1 46#define ARCH_SUPPORTS_FTRACE_OPS 1
47#define ARCH_SUPPORTS_FTRACE_SAVE_REGS
48#endif 47#endif
49 48
50#ifndef __ASSEMBLY__ 49#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h
index 434e2106cc87..b18df579c0e9 100644
--- a/arch/x86/include/asm/hpet.h
+++ b/arch/x86/include/asm/hpet.h
@@ -80,9 +80,9 @@ extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg);
80extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg); 80extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg);
81 81
82#ifdef CONFIG_PCI_MSI 82#ifdef CONFIG_PCI_MSI
83extern int arch_setup_hpet_msi(unsigned int irq, unsigned int id); 83extern int default_setup_hpet_msi(unsigned int irq, unsigned int id);
84#else 84#else
85static inline int arch_setup_hpet_msi(unsigned int irq, unsigned int id) 85static inline int default_setup_hpet_msi(unsigned int irq, unsigned int id)
86{ 86{
87 return -EINVAL; 87 return -EINVAL;
88} 88}
@@ -111,6 +111,7 @@ extern void hpet_unregister_irq_handler(rtc_irq_handler handler);
111static inline int hpet_enable(void) { return 0; } 111static inline int hpet_enable(void) { return 0; }
112static inline int is_hpet_enabled(void) { return 0; } 112static inline int is_hpet_enabled(void) { return 0; }
113#define hpet_readl(a) 0 113#define hpet_readl(a) 0
114#define default_setup_hpet_msi NULL
114 115
115#endif 116#endif
116#endif /* _ASM_X86_HPET_H */ 117#endif /* _ASM_X86_HPET_H */
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index eb92a6ed2be7..10a78c3d3d5a 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -101,6 +101,7 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
101 irq_attr->polarity = polarity; 101 irq_attr->polarity = polarity;
102} 102}
103 103
104/* Intel specific interrupt remapping information */
104struct irq_2_iommu { 105struct irq_2_iommu {
105 struct intel_iommu *iommu; 106 struct intel_iommu *iommu;
106 u16 irte_index; 107 u16 irte_index;
@@ -108,6 +109,12 @@ struct irq_2_iommu {
108 u8 irte_mask; 109 u8 irte_mask;
109}; 110};
110 111
112/* AMD specific interrupt remapping information */
113struct irq_2_irte {
114 u16 devid; /* Device ID for IRTE table */
115 u16 index; /* Index into IRTE table*/
116};
117
111/* 118/*
112 * This is performance-critical, we want to do it O(1) 119 * This is performance-critical, we want to do it O(1)
113 * 120 *
@@ -120,7 +127,11 @@ struct irq_cfg {
120 u8 vector; 127 u8 vector;
121 u8 move_in_progress : 1; 128 u8 move_in_progress : 1;
122#ifdef CONFIG_IRQ_REMAP 129#ifdef CONFIG_IRQ_REMAP
123 struct irq_2_iommu irq_2_iommu; 130 u8 remapped : 1;
131 union {
132 struct irq_2_iommu irq_2_iommu;
133 struct irq_2_irte irq_2_irte;
134 };
124#endif 135#endif
125}; 136};
126 137
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index b518c7509933..86095ed14135 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -25,6 +25,7 @@
25 25
26extern void init_hypervisor(struct cpuinfo_x86 *c); 26extern void init_hypervisor(struct cpuinfo_x86 *c);
27extern void init_hypervisor_platform(void); 27extern void init_hypervisor_platform(void);
28extern bool hypervisor_x2apic_available(void);
28 29
29/* 30/*
30 * x86 hypervisor information 31 * x86 hypervisor information
@@ -41,6 +42,9 @@ struct hypervisor_x86 {
41 42
42 /* Platform setup (run once per boot) */ 43 /* Platform setup (run once per boot) */
43 void (*init_platform)(void); 44 void (*init_platform)(void);
45
46 /* X2APIC detection (run once per boot) */
47 bool (*x2apic_available)(void);
44}; 48};
45 49
46extern const struct hypervisor_x86 *x86_hyper; 50extern const struct hypervisor_x86 *x86_hyper;
@@ -51,13 +55,4 @@ extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
51extern const struct hypervisor_x86 x86_hyper_xen_hvm; 55extern const struct hypervisor_x86 x86_hyper_xen_hvm;
52extern const struct hypervisor_x86 x86_hyper_kvm; 56extern const struct hypervisor_x86 x86_hyper_kvm;
53 57
54static inline bool hypervisor_x2apic_available(void)
55{
56 if (kvm_para_available())
57 return true;
58 if (xen_x2apic_para_available())
59 return true;
60 return false;
61}
62
63#endif 58#endif
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 73d8c5398ea9..459e50a424d1 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -144,11 +144,24 @@ extern int timer_through_8259;
144 (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs) 144 (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
145 145
146struct io_apic_irq_attr; 146struct io_apic_irq_attr;
147struct irq_cfg;
147extern int io_apic_set_pci_routing(struct device *dev, int irq, 148extern int io_apic_set_pci_routing(struct device *dev, int irq,
148 struct io_apic_irq_attr *irq_attr); 149 struct io_apic_irq_attr *irq_attr);
149void setup_IO_APIC_irq_extra(u32 gsi); 150void setup_IO_APIC_irq_extra(u32 gsi);
150extern void ioapic_insert_resources(void); 151extern void ioapic_insert_resources(void);
151 152
153extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *,
154 unsigned int, int,
155 struct io_apic_irq_attr *);
156extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *,
157 unsigned int, int,
158 struct io_apic_irq_attr *);
159extern void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg);
160
161extern void native_compose_msi_msg(struct pci_dev *pdev,
162 unsigned int irq, unsigned int dest,
163 struct msi_msg *msg, u8 hpet_id);
164extern void native_eoi_ioapic_pin(int apic, int pin, int vector);
152int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr); 165int io_apic_setup_irq_pin_once(unsigned int irq, int node, struct io_apic_irq_attr *attr);
153 166
154extern int save_ioapic_entries(void); 167extern int save_ioapic_entries(void);
@@ -179,6 +192,12 @@ extern void __init native_io_apic_init_mappings(void);
179extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg); 192extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg);
180extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val); 193extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val);
181extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val); 194extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val);
195extern void native_disable_io_apic(void);
196extern void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries);
197extern void intel_ir_io_apic_print_entries(unsigned int apic, unsigned int nr_entries);
198extern int native_ioapic_set_affinity(struct irq_data *,
199 const struct cpumask *,
200 bool);
182 201
183static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) 202static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
184{ 203{
@@ -193,6 +212,9 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned
193{ 212{
194 x86_io_apic_ops.modify(apic, reg, value); 213 x86_io_apic_ops.modify(apic, reg, value);
195} 214}
215
216extern void io_apic_eoi(unsigned int apic, unsigned int vector);
217
196#else /* !CONFIG_X86_IO_APIC */ 218#else /* !CONFIG_X86_IO_APIC */
197 219
198#define io_apic_assign_pci_irqs 0 220#define io_apic_assign_pci_irqs 0
@@ -223,6 +245,12 @@ static inline void disable_ioapic_support(void) { }
223#define native_io_apic_read NULL 245#define native_io_apic_read NULL
224#define native_io_apic_write NULL 246#define native_io_apic_write NULL
225#define native_io_apic_modify NULL 247#define native_io_apic_modify NULL
248#define native_disable_io_apic NULL
249#define native_io_apic_print_entries NULL
250#define native_ioapic_set_affinity NULL
251#define native_setup_ioapic_entry NULL
252#define native_compose_msi_msg NULL
253#define native_eoi_ioapic_pin NULL
226#endif 254#endif
227 255
228#endif /* _ASM_X86_IO_APIC_H */ 256#endif /* _ASM_X86_IO_APIC_H */
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index 5fb9bbbd2f14..95fd3527f632 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -26,8 +26,6 @@
26 26
27#ifdef CONFIG_IRQ_REMAP 27#ifdef CONFIG_IRQ_REMAP
28 28
29extern int irq_remapping_enabled;
30
31extern void setup_irq_remapping_ops(void); 29extern void setup_irq_remapping_ops(void);
32extern int irq_remapping_supported(void); 30extern int irq_remapping_supported(void);
33extern int irq_remapping_prepare(void); 31extern int irq_remapping_prepare(void);
@@ -40,21 +38,19 @@ extern int setup_ioapic_remapped_entry(int irq,
40 unsigned int destination, 38 unsigned int destination,
41 int vector, 39 int vector,
42 struct io_apic_irq_attr *attr); 40 struct io_apic_irq_attr *attr);
43extern int set_remapped_irq_affinity(struct irq_data *data,
44 const struct cpumask *mask,
45 bool force);
46extern void free_remapped_irq(int irq); 41extern void free_remapped_irq(int irq);
47extern void compose_remapped_msi_msg(struct pci_dev *pdev, 42extern void compose_remapped_msi_msg(struct pci_dev *pdev,
48 unsigned int irq, unsigned int dest, 43 unsigned int irq, unsigned int dest,
49 struct msi_msg *msg, u8 hpet_id); 44 struct msi_msg *msg, u8 hpet_id);
50extern int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec);
51extern int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
52 int index, int sub_handle);
53extern int setup_hpet_msi_remapped(unsigned int irq, unsigned int id); 45extern int setup_hpet_msi_remapped(unsigned int irq, unsigned int id);
46extern void panic_if_irq_remap(const char *msg);
47extern bool setup_remapped_irq(int irq,
48 struct irq_cfg *cfg,
49 struct irq_chip *chip);
54 50
55#else /* CONFIG_IRQ_REMAP */ 51void irq_remap_modify_chip_defaults(struct irq_chip *chip);
56 52
57#define irq_remapping_enabled 0 53#else /* CONFIG_IRQ_REMAP */
58 54
59static inline void setup_irq_remapping_ops(void) { } 55static inline void setup_irq_remapping_ops(void) { }
60static inline int irq_remapping_supported(void) { return 0; } 56static inline int irq_remapping_supported(void) { return 0; }
@@ -71,30 +67,30 @@ static inline int setup_ioapic_remapped_entry(int irq,
71{ 67{
72 return -ENODEV; 68 return -ENODEV;
73} 69}
74static inline int set_remapped_irq_affinity(struct irq_data *data,
75 const struct cpumask *mask,
76 bool force)
77{
78 return 0;
79}
80static inline void free_remapped_irq(int irq) { } 70static inline void free_remapped_irq(int irq) { }
81static inline void compose_remapped_msi_msg(struct pci_dev *pdev, 71static inline void compose_remapped_msi_msg(struct pci_dev *pdev,
82 unsigned int irq, unsigned int dest, 72 unsigned int irq, unsigned int dest,
83 struct msi_msg *msg, u8 hpet_id) 73 struct msi_msg *msg, u8 hpet_id)
84{ 74{
85} 75}
86static inline int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec) 76static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
87{ 77{
88 return -ENODEV; 78 return -ENODEV;
89} 79}
90static inline int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, 80
91 int index, int sub_handle) 81static inline void panic_if_irq_remap(const char *msg)
82{
83}
84
85static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip)
92{ 86{
93 return -ENODEV;
94} 87}
95static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id) 88
89static inline bool setup_remapped_irq(int irq,
90 struct irq_cfg *cfg,
91 struct irq_chip *chip)
96{ 92{
97 return -ENODEV; 93 return false;
98} 94}
99#endif /* CONFIG_IRQ_REMAP */ 95#endif /* CONFIG_IRQ_REMAP */
100 96
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 1508e518c7e3..aac5fa62a86c 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -109,8 +109,8 @@
109 109
110#define UV_BAU_MESSAGE 0xf5 110#define UV_BAU_MESSAGE 0xf5
111 111
112/* Xen vector callback to receive events in a HVM domain */ 112/* Vector on which hypervisor callbacks will be delivered */
113#define XEN_HVM_EVTCHN_CALLBACK 0xf3 113#define HYPERVISOR_CALLBACK_VECTOR 0xf3
114 114
115/* 115/*
116 * Local APIC timer IRQ vector is on a different priority level, 116 * Local APIC timer IRQ vector is on a different priority level,
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 5ed1f16187be..65231e173baf 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -85,13 +85,13 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
85 return ret; 85 return ret;
86} 86}
87 87
88static inline int kvm_para_available(void) 88static inline bool kvm_para_available(void)
89{ 89{
90 unsigned int eax, ebx, ecx, edx; 90 unsigned int eax, ebx, ecx, edx;
91 char signature[13]; 91 char signature[13];
92 92
93 if (boot_cpu_data.cpuid_level < 0) 93 if (boot_cpu_data.cpuid_level < 0)
94 return 0; /* So we don't blow up on old processors */ 94 return false; /* So we don't blow up on old processors */
95 95
96 if (cpu_has_hypervisor) { 96 if (cpu_has_hypervisor) {
97 cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); 97 cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
@@ -101,10 +101,10 @@ static inline int kvm_para_available(void)
101 signature[12] = 0; 101 signature[12] = 0;
102 102
103 if (strcmp(signature, "KVMKVMKVM") == 0) 103 if (strcmp(signature, "KVMKVMKVM") == 0)
104 return 1; 104 return true;
105 } 105 }
106 106
107 return 0; 107 return false;
108} 108}
109 109
110static inline unsigned int kvm_arch_para_features(void) 110static inline unsigned int kvm_arch_para_features(void)
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index 48142971b25d..79327e9483a3 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -27,20 +27,20 @@
27#define __asmlinkage_protect0(ret) \ 27#define __asmlinkage_protect0(ret) \
28 __asmlinkage_protect_n(ret) 28 __asmlinkage_protect_n(ret)
29#define __asmlinkage_protect1(ret, arg1) \ 29#define __asmlinkage_protect1(ret, arg1) \
30 __asmlinkage_protect_n(ret, "g" (arg1)) 30 __asmlinkage_protect_n(ret, "m" (arg1))
31#define __asmlinkage_protect2(ret, arg1, arg2) \ 31#define __asmlinkage_protect2(ret, arg1, arg2) \
32 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2)) 32 __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
33#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \ 33#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
34 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3)) 34 __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
35#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \ 35#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
36 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ 36 __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
37 "g" (arg4)) 37 "m" (arg4))
38#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \ 38#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
39 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ 39 __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
40 "g" (arg4), "g" (arg5)) 40 "m" (arg4), "m" (arg5))
41#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \ 41#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
42 __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \ 42 __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
43 "g" (arg4), "g" (arg5), "g" (arg6)) 43 "m" (arg4), "m" (arg5), "m" (arg6))
44 44
45#endif /* CONFIG_X86_32 */ 45#endif /* CONFIG_X86_32 */
46 46
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index ecdfee60ee4a..f4076af1f4ed 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -3,6 +3,90 @@
3 3
4#include <uapi/asm/mce.h> 4#include <uapi/asm/mce.h>
5 5
6/*
7 * Machine Check support for x86
8 */
9
10/* MCG_CAP register defines */
11#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
12#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
13#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
14#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
15#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
16#define MCG_EXT_CNT_SHIFT 16
17#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
18#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
19
20/* MCG_STATUS register defines */
21#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
22#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
23#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
24
25/* MCi_STATUS register defines */
26#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
27#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
28#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
29#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
30#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
31#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
32#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
33#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
34#define MCI_STATUS_AR (1ULL<<55) /* Action required */
35#define MCACOD 0xffff /* MCA Error Code */
36
37/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
38#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
39#define MCACOD_SCRUBMSK 0xfff0
40#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
41#define MCACOD_DATA 0x0134 /* Data Load */
42#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
43
44/* MCi_MISC register defines */
45#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
46#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
47#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
48#define MCI_MISC_ADDR_LINEAR 1 /* linear address */
49#define MCI_MISC_ADDR_PHYS 2 /* physical address */
50#define MCI_MISC_ADDR_MEM 3 /* memory address */
51#define MCI_MISC_ADDR_GENERIC 7 /* generic */
52
53/* CTL2 register defines */
54#define MCI_CTL2_CMCI_EN (1ULL << 30)
55#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
56
57#define MCJ_CTX_MASK 3
58#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
59#define MCJ_CTX_RANDOM 0 /* inject context: random */
60#define MCJ_CTX_PROCESS 0x1 /* inject context: process */
61#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
62#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
63#define MCJ_EXCEPTION 0x8 /* raise as exception */
64#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */
65
66#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
67
68/* Software defined banks */
69#define MCE_EXTENDED_BANK 128
70#define MCE_THERMAL_BANK (MCE_EXTENDED_BANK + 0)
71#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1)
72
73#define MCE_LOG_LEN 32
74#define MCE_LOG_SIGNATURE "MACHINECHECK"
75
76/*
77 * This structure contains all data related to the MCE log. Also
78 * carries a signature to make it easier to find from external
79 * debugging tools. Each entry is only valid when its finished flag
80 * is set.
81 */
82struct mce_log {
83 char signature[12]; /* "MACHINECHECK" */
84 unsigned len; /* = MCE_LOG_LEN */
85 unsigned next;
86 unsigned flags;
87 unsigned recordlen; /* length of struct mce */
88 struct mce entry[MCE_LOG_LEN];
89};
6 90
7struct mca_config { 91struct mca_config {
8 bool dont_log_ce; 92 bool dont_log_ce;
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 79ce5685ab64..c2934be2446a 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -11,4 +11,8 @@ struct ms_hyperv_info {
11 11
12extern struct ms_hyperv_info ms_hyperv; 12extern struct ms_hyperv_info ms_hyperv;
13 13
14void hyperv_callback_vector(void);
15void hyperv_vector_handler(struct pt_regs *regs);
16void hv_register_vmbus_handler(int irq, irq_handler_t handler);
17
14#endif 18#endif
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index bcdff997668c..2f366d0ac6b4 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -4,7 +4,8 @@
4#define MWAIT_SUBSTATE_MASK 0xf 4#define MWAIT_SUBSTATE_MASK 0xf
5#define MWAIT_CSTATE_MASK 0xf 5#define MWAIT_CSTATE_MASK 0xf
6#define MWAIT_SUBSTATE_SIZE 4 6#define MWAIT_SUBSTATE_SIZE 4
7#define MWAIT_MAX_NUM_CSTATES 8 7#define MWAIT_HINT2CSTATE(hint) (((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK)
8#define MWAIT_HINT2SUBSTATE(hint) ((hint) & MWAIT_CSTATE_MASK)
8 9
9#define CPUID_MWAIT_LEAF 5 10#define CPUID_MWAIT_LEAF 5
10#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1 11#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index dba7805176bf..c28fd02f4bf7 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -121,9 +121,12 @@ static inline void x86_restore_msi_irqs(struct pci_dev *dev, int irq)
121#define arch_teardown_msi_irq x86_teardown_msi_irq 121#define arch_teardown_msi_irq x86_teardown_msi_irq
122#define arch_restore_msi_irqs x86_restore_msi_irqs 122#define arch_restore_msi_irqs x86_restore_msi_irqs
123/* implemented in arch/x86/kernel/apic/io_apic. */ 123/* implemented in arch/x86/kernel/apic/io_apic. */
124struct msi_desc;
124int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); 125int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
125void native_teardown_msi_irq(unsigned int irq); 126void native_teardown_msi_irq(unsigned int irq);
126void native_restore_msi_irqs(struct pci_dev *dev, int irq); 127void native_restore_msi_irqs(struct pci_dev *dev, int irq);
128int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
129 unsigned int irq_base, unsigned int irq_offset);
127/* default to the implementation in drivers/lib/msi.c */ 130/* default to the implementation in drivers/lib/msi.c */
128#define HAVE_DEFAULT_MSI_TEARDOWN_IRQS 131#define HAVE_DEFAULT_MSI_TEARDOWN_IRQS
129#define HAVE_DEFAULT_MSI_RESTORE_IRQS 132#define HAVE_DEFAULT_MSI_RESTORE_IRQS
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 4fabcdf1cfa7..57cb63402213 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -29,8 +29,13 @@
29#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) 29#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
30#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL 30#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
31 31
32#define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40) 32#define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
33#define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41) 33#define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
34#define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
35
36#define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
37#define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
38 (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
34 39
35#define AMD64_EVENTSEL_EVENT \ 40#define AMD64_EVENTSEL_EVENT \
36 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) 41 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
@@ -46,8 +51,12 @@
46#define AMD64_RAW_EVENT_MASK \ 51#define AMD64_RAW_EVENT_MASK \
47 (X86_RAW_EVENT_MASK | \ 52 (X86_RAW_EVENT_MASK | \
48 AMD64_EVENTSEL_EVENT) 53 AMD64_EVENTSEL_EVENT)
54#define AMD64_RAW_EVENT_MASK_NB \
55 (AMD64_EVENTSEL_EVENT | \
56 ARCH_PERFMON_EVENTSEL_UMASK)
49#define AMD64_NUM_COUNTERS 4 57#define AMD64_NUM_COUNTERS 4
50#define AMD64_NUM_COUNTERS_CORE 6 58#define AMD64_NUM_COUNTERS_CORE 6
59#define AMD64_NUM_COUNTERS_NB 4
51 60
52#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c 61#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
53#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) 62#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index b6e41b8cd659..1e672234c4ff 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -142,6 +142,11 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
142 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; 142 return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
143} 143}
144 144
145static inline unsigned long pud_pfn(pud_t pud)
146{
147 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
148}
149
145#define pte_page(pte) pfn_to_page(pte_pfn(pte)) 150#define pte_page(pte) pfn_to_page(pte_pfn(pte))
146 151
147static inline int pmd_large(pmd_t pte) 152static inline int pmd_large(pmd_t pte)
@@ -798,6 +803,19 @@ static inline unsigned long page_level_mask(enum pg_level level)
798 return ~(page_level_size(level) - 1); 803 return ~(page_level_size(level) - 1);
799} 804}
800 805
806/*
807 * The x86 doesn't have any external MMU info: the kernel page
808 * tables contain all the necessary information.
809 */
810static inline void update_mmu_cache(struct vm_area_struct *vma,
811 unsigned long addr, pte_t *ptep)
812{
813}
814static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
815 unsigned long addr, pmd_t *pmd)
816{
817}
818
801#include <asm-generic/pgtable.h> 819#include <asm-generic/pgtable.h>
802#endif /* __ASSEMBLY__ */ 820#endif /* __ASSEMBLY__ */
803 821
diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
index 8faa215a503e..9ee322103c6d 100644
--- a/arch/x86/include/asm/pgtable_32.h
+++ b/arch/x86/include/asm/pgtable_32.h
@@ -66,13 +66,6 @@ do { \
66 __flush_tlb_one((vaddr)); \ 66 __flush_tlb_one((vaddr)); \
67} while (0) 67} while (0)
68 68
69/*
70 * The i386 doesn't have any external MMU info: the kernel page
71 * tables contain all the necessary information.
72 */
73#define update_mmu_cache(vma, address, ptep) do { } while (0)
74#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
75
76#endif /* !__ASSEMBLY__ */ 69#endif /* !__ASSEMBLY__ */
77 70
78/* 71/*
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index b5d30ad39022..e22c1dbf7feb 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -142,9 +142,6 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
142#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) 142#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
143#define pte_unmap(pte) ((void)(pte))/* NOP */ 143#define pte_unmap(pte) ((void)(pte))/* NOP */
144 144
145#define update_mmu_cache(vma, address, ptep) do { } while (0)
146#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
147
148/* Encode and de-code a swap entry */ 145/* Encode and de-code a swap entry */
149#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE 146#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
150#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) 147#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index bdee8bd318ea..8277941cbe99 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -89,7 +89,6 @@ struct cpuinfo_x86 {
89 char wp_works_ok; /* It doesn't on 386's */ 89 char wp_works_ok; /* It doesn't on 386's */
90 90
91 /* Problems on some 486Dx4's and old 386's: */ 91 /* Problems on some 486Dx4's and old 386's: */
92 char hlt_works_ok;
93 char hard_math; 92 char hard_math;
94 char rfu; 93 char rfu;
95 char fdiv_bug; 94 char fdiv_bug;
@@ -165,15 +164,6 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
165 164
166extern const struct seq_operations cpuinfo_op; 165extern const struct seq_operations cpuinfo_op;
167 166
168static inline int hlt_works(int cpu)
169{
170#ifdef CONFIG_X86_32
171 return cpu_data(cpu).hlt_works_ok;
172#else
173 return 1;
174#endif
175}
176
177#define cache_line_size() (boot_cpu_data.x86_cache_alignment) 167#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
178 168
179extern void cpu_detect(struct cpuinfo_x86 *c); 169extern void cpu_detect(struct cpuinfo_x86 *c);
@@ -725,7 +715,7 @@ extern unsigned long boot_option_idle_override;
725extern bool amd_e400_c1e_detected; 715extern bool amd_e400_c1e_detected;
726 716
727enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, 717enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
728 IDLE_POLL, IDLE_FORCE_MWAIT}; 718 IDLE_POLL};
729 719
730extern void enable_sep_cpu(void); 720extern void enable_sep_cpu(void);
731extern int sysenter_setup(void); 721extern int sysenter_setup(void);
@@ -944,7 +934,7 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
944extern int get_tsc_mode(unsigned long adr); 934extern int get_tsc_mode(unsigned long adr);
945extern int set_tsc_mode(unsigned int val); 935extern int set_tsc_mode(unsigned int val);
946 936
947extern int amd_get_nb_id(int cpu); 937extern u16 amd_get_nb_id(int cpu);
948 938
949struct aperfmperf { 939struct aperfmperf {
950 u64 aperf, mperf; 940 u64 aperf, mperf;
@@ -999,7 +989,11 @@ extern unsigned long arch_align_stack(unsigned long sp);
999extern void free_init_pages(char *what, unsigned long begin, unsigned long end); 989extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
1000 990
1001void default_idle(void); 991void default_idle(void);
1002bool set_pm_idle_to_default(void); 992#ifdef CONFIG_XEN
993bool xen_set_default_idle(void);
994#else
995#define xen_set_default_idle 0
996#endif
1003 997
1004void stop_this_cpu(void *dummy); 998void stop_this_cpu(void *dummy);
1005 999
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index 6c7fc25f2c34..5c6e4fb370f5 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -47,6 +47,12 @@
47# define NEED_NOPL 0 47# define NEED_NOPL 0
48#endif 48#endif
49 49
50#ifdef CONFIG_MATOM
51# define NEED_MOVBE (1<<(X86_FEATURE_MOVBE & 31))
52#else
53# define NEED_MOVBE 0
54#endif
55
50#ifdef CONFIG_X86_64 56#ifdef CONFIG_X86_64
51#ifdef CONFIG_PARAVIRT 57#ifdef CONFIG_PARAVIRT
52/* Paravirtualized systems may not have PSE or PGE available */ 58/* Paravirtualized systems may not have PSE or PGE available */
@@ -80,7 +86,7 @@
80 86
81#define REQUIRED_MASK2 0 87#define REQUIRED_MASK2 0
82#define REQUIRED_MASK3 (NEED_NOPL) 88#define REQUIRED_MASK3 (NEED_NOPL)
83#define REQUIRED_MASK4 0 89#define REQUIRED_MASK4 (NEED_MOVBE)
84#define REQUIRED_MASK5 0 90#define REQUIRED_MASK5 0
85#define REQUIRED_MASK6 0 91#define REQUIRED_MASK6 0
86#define REQUIRED_MASK7 0 92#define REQUIRED_MASK7 0
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index b47c2a82ff15..062921ef34e9 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -16,7 +16,7 @@ extern void uv_system_init(void);
16extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, 16extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
17 struct mm_struct *mm, 17 struct mm_struct *mm,
18 unsigned long start, 18 unsigned long start,
19 unsigned end, 19 unsigned long end,
20 unsigned int cpu); 20 unsigned int cpu);
21 21
22#else /* X86_UV */ 22#else /* X86_UV */
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 21f7385badb8..2c32df95bb78 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * SGI UV architectural definitions 6 * SGI UV architectural definitions
7 * 7 *
8 * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2013 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef _ASM_X86_UV_UV_HUB_H 11#ifndef _ASM_X86_UV_UV_HUB_H
@@ -175,6 +175,7 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
175 */ 175 */
176#define UV1_HUB_REVISION_BASE 1 176#define UV1_HUB_REVISION_BASE 1
177#define UV2_HUB_REVISION_BASE 3 177#define UV2_HUB_REVISION_BASE 3
178#define UV3_HUB_REVISION_BASE 5
178 179
179static inline int is_uv1_hub(void) 180static inline int is_uv1_hub(void)
180{ 181{
@@ -183,6 +184,23 @@ static inline int is_uv1_hub(void)
183 184
184static inline int is_uv2_hub(void) 185static inline int is_uv2_hub(void)
185{ 186{
187 return ((uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE) &&
188 (uv_hub_info->hub_revision < UV3_HUB_REVISION_BASE));
189}
190
191static inline int is_uv3_hub(void)
192{
193 return uv_hub_info->hub_revision >= UV3_HUB_REVISION_BASE;
194}
195
196static inline int is_uv_hub(void)
197{
198 return uv_hub_info->hub_revision;
199}
200
201/* code common to uv2 and uv3 only */
202static inline int is_uvx_hub(void)
203{
186 return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE; 204 return uv_hub_info->hub_revision >= UV2_HUB_REVISION_BASE;
187} 205}
188 206
@@ -230,14 +248,23 @@ union uvh_apicid {
230#define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024) 248#define UV2_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
231#define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024) 249#define UV2_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
232 250
233#define UV_LOCAL_MMR_BASE (is_uv1_hub() ? UV1_LOCAL_MMR_BASE \ 251#define UV3_LOCAL_MMR_BASE 0xfa000000UL
234 : UV2_LOCAL_MMR_BASE) 252#define UV3_GLOBAL_MMR32_BASE 0xfc000000UL
235#define UV_GLOBAL_MMR32_BASE (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE \ 253#define UV3_LOCAL_MMR_SIZE (32UL * 1024 * 1024)
236 : UV2_GLOBAL_MMR32_BASE) 254#define UV3_GLOBAL_MMR32_SIZE (32UL * 1024 * 1024)
237#define UV_LOCAL_MMR_SIZE (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \ 255
238 UV2_LOCAL_MMR_SIZE) 256#define UV_LOCAL_MMR_BASE (is_uv1_hub() ? UV1_LOCAL_MMR_BASE : \
257 (is_uv2_hub() ? UV2_LOCAL_MMR_BASE : \
258 UV3_LOCAL_MMR_BASE))
259#define UV_GLOBAL_MMR32_BASE (is_uv1_hub() ? UV1_GLOBAL_MMR32_BASE :\
260 (is_uv2_hub() ? UV2_GLOBAL_MMR32_BASE :\
261 UV3_GLOBAL_MMR32_BASE))
262#define UV_LOCAL_MMR_SIZE (is_uv1_hub() ? UV1_LOCAL_MMR_SIZE : \
263 (is_uv2_hub() ? UV2_LOCAL_MMR_SIZE : \
264 UV3_LOCAL_MMR_SIZE))
239#define UV_GLOBAL_MMR32_SIZE (is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE :\ 265#define UV_GLOBAL_MMR32_SIZE (is_uv1_hub() ? UV1_GLOBAL_MMR32_SIZE :\
240 UV2_GLOBAL_MMR32_SIZE) 266 (is_uv2_hub() ? UV2_GLOBAL_MMR32_SIZE :\
267 UV3_GLOBAL_MMR32_SIZE))
241#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base) 268#define UV_GLOBAL_MMR64_BASE (uv_hub_info->global_mmr_base)
242 269
243#define UV_GLOBAL_GRU_MMR_BASE 0x4000000 270#define UV_GLOBAL_GRU_MMR_BASE 0x4000000
@@ -599,6 +626,7 @@ static inline void uv_hub_send_ipi(int pnode, int apicid, int vector)
599 * 1 - UV1 rev 1.0 initial silicon 626 * 1 - UV1 rev 1.0 initial silicon
600 * 2 - UV1 rev 2.0 production silicon 627 * 2 - UV1 rev 2.0 production silicon
601 * 3 - UV2 rev 1.0 initial silicon 628 * 3 - UV2 rev 1.0 initial silicon
629 * 5 - UV3 rev 1.0 initial silicon
602 */ 630 */
603static inline int uv_get_min_hub_revision_id(void) 631static inline int uv_get_min_hub_revision_id(void)
604{ 632{
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index cf1d73643f60..bd5f80e58a23 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -5,16 +5,25 @@
5 * 5 *
6 * SGI UV MMR definitions 6 * SGI UV MMR definitions
7 * 7 *
8 * Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2013 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef _ASM_X86_UV_UV_MMRS_H 11#ifndef _ASM_X86_UV_UV_MMRS_H
12#define _ASM_X86_UV_UV_MMRS_H 12#define _ASM_X86_UV_UV_MMRS_H
13 13
14/* 14/*
15 * This file contains MMR definitions for both UV1 & UV2 hubs. 15 * This file contains MMR definitions for all UV hubs types.
16 * 16 *
17 * In general, MMR addresses and structures are identical on both hubs. 17 * To minimize coding differences between hub types, the symbols are
18 * grouped by architecture types.
19 *
20 * UVH - definitions common to all UV hub types.
21 * UVXH - definitions common to all UV eXtended hub types (currently 2 & 3).
22 * UV1H - definitions specific to UV type 1 hub.
23 * UV2H - definitions specific to UV type 2 hub.
24 * UV3H - definitions specific to UV type 3 hub.
25 *
26 * So in general, MMR addresses and structures are identical on all hubs types.
18 * These MMRs are identified as: 27 * These MMRs are identified as:
19 * #define UVH_xxx <address> 28 * #define UVH_xxx <address>
20 * union uvh_xxx { 29 * union uvh_xxx {
@@ -23,24 +32,36 @@
23 * } s; 32 * } s;
24 * }; 33 * };
25 * 34 *
26 * If the MMR exists on both hub type but has different addresses or 35 * If the MMR exists on all hub types but have different addresses:
27 * contents, the MMR definition is similar to: 36 * #define UV1Hxxx a
28 * #define UV1H_xxx <uv1 address> 37 * #define UV2Hxxx b
29 * #define UV2H_xxx <uv2address> 38 * #define UV3Hxxx c
30 * #define UVH_xxx (is_uv1_hub() ? UV1H_xxx : UV2H_xxx) 39 * #define UVHxxx (is_uv1_hub() ? UV1Hxxx :
40 * (is_uv2_hub() ? UV2Hxxx :
41 * UV3Hxxx))
42 *
43 * If the MMR exists on all hub types > 1 but have different addresses:
44 * #define UV2Hxxx b
45 * #define UV3Hxxx c
46 * #define UVXHxxx (is_uv2_hub() ? UV2Hxxx :
47 * UV3Hxxx))
48 *
31 * union uvh_xxx { 49 * union uvh_xxx {
32 * unsigned long v; 50 * unsigned long v;
33 * struct uv1h_int_cmpd_s { (Common fields only) 51 * struct uvh_xxx_s { # Common fields only
34 * } s; 52 * } s;
35 * struct uv1h_int_cmpd_s { (Full UV1 definition) 53 * struct uv1h_xxx_s { # Full UV1 definition (*)
36 * } s1; 54 * } s1;
37 * struct uv2h_int_cmpd_s { (Full UV2 definition) 55 * struct uv2h_xxx_s { # Full UV2 definition (*)
38 * } s2; 56 * } s2;
57 * struct uv3h_xxx_s { # Full UV3 definition (*)
58 * } s3;
39 * }; 59 * };
60 * (* - if present and different than the common struct)
40 * 61 *
41 * Only essential difference are enumerated. For example, if the address is 62 * Only essential differences are enumerated. For example, if the address is
42 * the same for both UV1 & UV2, only a single #define is generated. Likewise, 63 * the same for all UV's, only a single #define is generated. Likewise,
43 * if the contents is the same for both hubs, only the "s" structure is 64 * if the contents is the same for all hubs, only the "s" structure is
44 * generated. 65 * generated.
45 * 66 *
46 * If the MMR exists on ONLY 1 type of hub, no generic definition is 67 * If the MMR exists on ONLY 1 type of hub, no generic definition is
@@ -51,6 +72,8 @@
51 * struct uvh_int_cmpd_s { 72 * struct uvh_int_cmpd_s {
52 * } sn; 73 * } sn;
53 * }; 74 * };
75 *
76 * (GEN Flags: mflags_opt= undefs=0 UV23=UVXH)
54 */ 77 */
55 78
56#define UV_MMR_ENABLE (1UL << 63) 79#define UV_MMR_ENABLE (1UL << 63)
@@ -58,15 +81,18 @@
58#define UV1_HUB_PART_NUMBER 0x88a5 81#define UV1_HUB_PART_NUMBER 0x88a5
59#define UV2_HUB_PART_NUMBER 0x8eb8 82#define UV2_HUB_PART_NUMBER 0x8eb8
60#define UV2_HUB_PART_NUMBER_X 0x1111 83#define UV2_HUB_PART_NUMBER_X 0x1111
84#define UV3_HUB_PART_NUMBER 0x9578
85#define UV3_HUB_PART_NUMBER_X 0x4321
61 86
62/* Compat: if this #define is present, UV headers support UV2 */ 87/* Compat: Indicate which UV Hubs are supported. */
63#define UV2_HUB_IS_SUPPORTED 1 88#define UV2_HUB_IS_SUPPORTED 1
89#define UV3_HUB_IS_SUPPORTED 1
64 90
65/* ========================================================================= */ 91/* ========================================================================= */
66/* UVH_BAU_DATA_BROADCAST */ 92/* UVH_BAU_DATA_BROADCAST */
67/* ========================================================================= */ 93/* ========================================================================= */
68#define UVH_BAU_DATA_BROADCAST 0x61688UL 94#define UVH_BAU_DATA_BROADCAST 0x61688UL
69#define UVH_BAU_DATA_BROADCAST_32 0x440 95#define UVH_BAU_DATA_BROADCAST_32 0x440
70 96
71#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0 97#define UVH_BAU_DATA_BROADCAST_ENABLE_SHFT 0
72#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL 98#define UVH_BAU_DATA_BROADCAST_ENABLE_MASK 0x0000000000000001UL
@@ -82,8 +108,8 @@ union uvh_bau_data_broadcast_u {
82/* ========================================================================= */ 108/* ========================================================================= */
83/* UVH_BAU_DATA_CONFIG */ 109/* UVH_BAU_DATA_CONFIG */
84/* ========================================================================= */ 110/* ========================================================================= */
85#define UVH_BAU_DATA_CONFIG 0x61680UL 111#define UVH_BAU_DATA_CONFIG 0x61680UL
86#define UVH_BAU_DATA_CONFIG_32 0x438 112#define UVH_BAU_DATA_CONFIG_32 0x438
87 113
88#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0 114#define UVH_BAU_DATA_CONFIG_VECTOR_SHFT 0
89#define UVH_BAU_DATA_CONFIG_DM_SHFT 8 115#define UVH_BAU_DATA_CONFIG_DM_SHFT 8
@@ -121,10 +147,14 @@ union uvh_bau_data_config_u {
121/* ========================================================================= */ 147/* ========================================================================= */
122/* UVH_EVENT_OCCURRED0 */ 148/* UVH_EVENT_OCCURRED0 */
123/* ========================================================================= */ 149/* ========================================================================= */
124#define UVH_EVENT_OCCURRED0 0x70000UL 150#define UVH_EVENT_OCCURRED0 0x70000UL
125#define UVH_EVENT_OCCURRED0_32 0x5e8 151#define UVH_EVENT_OCCURRED0_32 0x5e8
152
153#define UVH_EVENT_OCCURRED0_LB_HCERR_SHFT 0
154#define UVH_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
155#define UVH_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
156#define UVH_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
126 157
127#define UV1H_EVENT_OCCURRED0_LB_HCERR_SHFT 0
128#define UV1H_EVENT_OCCURRED0_GR0_HCERR_SHFT 1 158#define UV1H_EVENT_OCCURRED0_GR0_HCERR_SHFT 1
129#define UV1H_EVENT_OCCURRED0_GR1_HCERR_SHFT 2 159#define UV1H_EVENT_OCCURRED0_GR1_HCERR_SHFT 2
130#define UV1H_EVENT_OCCURRED0_LH_HCERR_SHFT 3 160#define UV1H_EVENT_OCCURRED0_LH_HCERR_SHFT 3
@@ -135,7 +165,6 @@ union uvh_bau_data_config_u {
135#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8 165#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 8
136#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9 166#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 9
137#define UV1H_EVENT_OCCURRED0_LH_AOERR0_SHFT 10 167#define UV1H_EVENT_OCCURRED0_LH_AOERR0_SHFT 10
138#define UV1H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11
139#define UV1H_EVENT_OCCURRED0_XN_AOERR0_SHFT 12 168#define UV1H_EVENT_OCCURRED0_XN_AOERR0_SHFT 12
140#define UV1H_EVENT_OCCURRED0_SI_AOERR0_SHFT 13 169#define UV1H_EVENT_OCCURRED0_SI_AOERR0_SHFT 13
141#define UV1H_EVENT_OCCURRED0_LB_AOERR1_SHFT 14 170#define UV1H_EVENT_OCCURRED0_LB_AOERR1_SHFT 14
@@ -181,7 +210,6 @@ union uvh_bau_data_config_u {
181#define UV1H_EVENT_OCCURRED0_RTC3_SHFT 54 210#define UV1H_EVENT_OCCURRED0_RTC3_SHFT 54
182#define UV1H_EVENT_OCCURRED0_BAU_DATA_SHFT 55 211#define UV1H_EVENT_OCCURRED0_BAU_DATA_SHFT 55
183#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56 212#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_SHFT 56
184#define UV1H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL
185#define UV1H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL 213#define UV1H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000002UL
186#define UV1H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL 214#define UV1H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000004UL
187#define UV1H_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL 215#define UV1H_EVENT_OCCURRED0_LH_HCERR_MASK 0x0000000000000008UL
@@ -192,7 +220,6 @@ union uvh_bau_data_config_u {
192#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL 220#define UV1H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000000100UL
193#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL 221#define UV1H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000000200UL
194#define UV1H_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL 222#define UV1H_EVENT_OCCURRED0_LH_AOERR0_MASK 0x0000000000000400UL
195#define UV1H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL
196#define UV1H_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL 223#define UV1H_EVENT_OCCURRED0_XN_AOERR0_MASK 0x0000000000001000UL
197#define UV1H_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL 224#define UV1H_EVENT_OCCURRED0_SI_AOERR0_MASK 0x0000000000002000UL
198#define UV1H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL 225#define UV1H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000004000UL
@@ -239,188 +266,130 @@ union uvh_bau_data_config_u {
239#define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL 266#define UV1H_EVENT_OCCURRED0_BAU_DATA_MASK 0x0080000000000000UL
240#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL 267#define UV1H_EVENT_OCCURRED0_POWER_MANAGEMENT_REQ_MASK 0x0100000000000000UL
241 268
242#define UV2H_EVENT_OCCURRED0_LB_HCERR_SHFT 0 269#define UVXH_EVENT_OCCURRED0_QP_HCERR_SHFT 1
243#define UV2H_EVENT_OCCURRED0_QP_HCERR_SHFT 1 270#define UVXH_EVENT_OCCURRED0_RH_HCERR_SHFT 2
244#define UV2H_EVENT_OCCURRED0_RH_HCERR_SHFT 2 271#define UVXH_EVENT_OCCURRED0_LH0_HCERR_SHFT 3
245#define UV2H_EVENT_OCCURRED0_LH0_HCERR_SHFT 3 272#define UVXH_EVENT_OCCURRED0_LH1_HCERR_SHFT 4
246#define UV2H_EVENT_OCCURRED0_LH1_HCERR_SHFT 4 273#define UVXH_EVENT_OCCURRED0_GR0_HCERR_SHFT 5
247#define UV2H_EVENT_OCCURRED0_GR0_HCERR_SHFT 5 274#define UVXH_EVENT_OCCURRED0_GR1_HCERR_SHFT 6
248#define UV2H_EVENT_OCCURRED0_GR1_HCERR_SHFT 6 275#define UVXH_EVENT_OCCURRED0_NI0_HCERR_SHFT 7
249#define UV2H_EVENT_OCCURRED0_NI0_HCERR_SHFT 7 276#define UVXH_EVENT_OCCURRED0_NI1_HCERR_SHFT 8
250#define UV2H_EVENT_OCCURRED0_NI1_HCERR_SHFT 8 277#define UVXH_EVENT_OCCURRED0_LB_AOERR0_SHFT 9
251#define UV2H_EVENT_OCCURRED0_LB_AOERR0_SHFT 9 278#define UVXH_EVENT_OCCURRED0_QP_AOERR0_SHFT 10
252#define UV2H_EVENT_OCCURRED0_QP_AOERR0_SHFT 10 279#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12
253#define UV2H_EVENT_OCCURRED0_RH_AOERR0_SHFT 11 280#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13
254#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_SHFT 12 281#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14
255#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_SHFT 13 282#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15
256#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_SHFT 14 283#define UVXH_EVENT_OCCURRED0_XB_AOERR0_SHFT 16
257#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_SHFT 15 284#define UVXH_EVENT_OCCURRED0_RT_AOERR0_SHFT 17
258#define UV2H_EVENT_OCCURRED0_XB_AOERR0_SHFT 16 285#define UVXH_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18
259#define UV2H_EVENT_OCCURRED0_RT_AOERR0_SHFT 17 286#define UVXH_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19
260#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_SHFT 18 287#define UVXH_EVENT_OCCURRED0_LB_AOERR1_SHFT 20
261#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_SHFT 19 288#define UVXH_EVENT_OCCURRED0_QP_AOERR1_SHFT 21
262#define UV2H_EVENT_OCCURRED0_LB_AOERR1_SHFT 20 289#define UVXH_EVENT_OCCURRED0_RH_AOERR1_SHFT 22
263#define UV2H_EVENT_OCCURRED0_QP_AOERR1_SHFT 21 290#define UVXH_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23
264#define UV2H_EVENT_OCCURRED0_RH_AOERR1_SHFT 22 291#define UVXH_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24
265#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_SHFT 23 292#define UVXH_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25
266#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_SHFT 24 293#define UVXH_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26
267#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_SHFT 25 294#define UVXH_EVENT_OCCURRED0_XB_AOERR1_SHFT 27
268#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_SHFT 26 295#define UVXH_EVENT_OCCURRED0_RT_AOERR1_SHFT 28
269#define UV2H_EVENT_OCCURRED0_XB_AOERR1_SHFT 27 296#define UVXH_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29
270#define UV2H_EVENT_OCCURRED0_RT_AOERR1_SHFT 28 297#define UVXH_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30
271#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_SHFT 29 298#define UVXH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31
272#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_SHFT 30 299#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32
273#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_SHFT 31 300#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33
274#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_SHFT 32 301#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34
275#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_SHFT 33 302#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35
276#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_SHFT 34 303#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36
277#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_SHFT 35 304#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37
278#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_SHFT 36 305#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38
279#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_SHFT 37 306#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39
280#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_SHFT 38 307#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40
281#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_SHFT 39 308#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41
282#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_SHFT 40 309#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42
283#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_SHFT 41 310#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43
284#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_SHFT 42 311#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44
285#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_SHFT 43 312#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45
286#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_SHFT 44 313#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46
287#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_SHFT 45 314#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47
288#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_SHFT 46 315#define UVXH_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48
289#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_SHFT 47 316#define UVXH_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49
290#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_SHFT 48 317#define UVXH_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50
291#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_SHFT 49 318#define UVXH_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51
292#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_SHFT 50 319#define UVXH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52
293#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_SHFT 51 320#define UVXH_EVENT_OCCURRED0_IPI_INT_SHFT 53
294#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_SHFT 52 321#define UVXH_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54
295#define UV2H_EVENT_OCCURRED0_IPI_INT_SHFT 53 322#define UVXH_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55
296#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_SHFT 54 323#define UVXH_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56
297#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_SHFT 55 324#define UVXH_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57
298#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_SHFT 56 325#define UVXH_EVENT_OCCURRED0_PROFILE_INT_SHFT 58
299#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_SHFT 57 326#define UVXH_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL
300#define UV2H_EVENT_OCCURRED0_PROFILE_INT_SHFT 58 327#define UVXH_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL
301#define UV2H_EVENT_OCCURRED0_LB_HCERR_MASK 0x0000000000000001UL 328#define UVXH_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL
302#define UV2H_EVENT_OCCURRED0_QP_HCERR_MASK 0x0000000000000002UL 329#define UVXH_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL
303#define UV2H_EVENT_OCCURRED0_RH_HCERR_MASK 0x0000000000000004UL 330#define UVXH_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL
304#define UV2H_EVENT_OCCURRED0_LH0_HCERR_MASK 0x0000000000000008UL 331#define UVXH_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL
305#define UV2H_EVENT_OCCURRED0_LH1_HCERR_MASK 0x0000000000000010UL 332#define UVXH_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL
306#define UV2H_EVENT_OCCURRED0_GR0_HCERR_MASK 0x0000000000000020UL 333#define UVXH_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL
307#define UV2H_EVENT_OCCURRED0_GR1_HCERR_MASK 0x0000000000000040UL 334#define UVXH_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL
308#define UV2H_EVENT_OCCURRED0_NI0_HCERR_MASK 0x0000000000000080UL 335#define UVXH_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL
309#define UV2H_EVENT_OCCURRED0_NI1_HCERR_MASK 0x0000000000000100UL 336#define UVXH_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL
310#define UV2H_EVENT_OCCURRED0_LB_AOERR0_MASK 0x0000000000000200UL 337#define UVXH_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL
311#define UV2H_EVENT_OCCURRED0_QP_AOERR0_MASK 0x0000000000000400UL 338#define UVXH_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL
312#define UV2H_EVENT_OCCURRED0_RH_AOERR0_MASK 0x0000000000000800UL 339#define UVXH_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL
313#define UV2H_EVENT_OCCURRED0_LH0_AOERR0_MASK 0x0000000000001000UL 340#define UVXH_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL
314#define UV2H_EVENT_OCCURRED0_LH1_AOERR0_MASK 0x0000000000002000UL 341#define UVXH_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL
315#define UV2H_EVENT_OCCURRED0_GR0_AOERR0_MASK 0x0000000000004000UL 342#define UVXH_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL
316#define UV2H_EVENT_OCCURRED0_GR1_AOERR0_MASK 0x0000000000008000UL 343#define UVXH_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL
317#define UV2H_EVENT_OCCURRED0_XB_AOERR0_MASK 0x0000000000010000UL 344#define UVXH_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL
318#define UV2H_EVENT_OCCURRED0_RT_AOERR0_MASK 0x0000000000020000UL 345#define UVXH_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL
319#define UV2H_EVENT_OCCURRED0_NI0_AOERR0_MASK 0x0000000000040000UL 346#define UVXH_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL
320#define UV2H_EVENT_OCCURRED0_NI1_AOERR0_MASK 0x0000000000080000UL 347#define UVXH_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL
321#define UV2H_EVENT_OCCURRED0_LB_AOERR1_MASK 0x0000000000100000UL 348#define UVXH_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL
322#define UV2H_EVENT_OCCURRED0_QP_AOERR1_MASK 0x0000000000200000UL 349#define UVXH_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL
323#define UV2H_EVENT_OCCURRED0_RH_AOERR1_MASK 0x0000000000400000UL 350#define UVXH_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL
324#define UV2H_EVENT_OCCURRED0_LH0_AOERR1_MASK 0x0000000000800000UL 351#define UVXH_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL
325#define UV2H_EVENT_OCCURRED0_LH1_AOERR1_MASK 0x0000000001000000UL 352#define UVXH_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL
326#define UV2H_EVENT_OCCURRED0_GR0_AOERR1_MASK 0x0000000002000000UL 353#define UVXH_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL
327#define UV2H_EVENT_OCCURRED0_GR1_AOERR1_MASK 0x0000000004000000UL 354#define UVXH_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL
328#define UV2H_EVENT_OCCURRED0_XB_AOERR1_MASK 0x0000000008000000UL 355#define UVXH_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL
329#define UV2H_EVENT_OCCURRED0_RT_AOERR1_MASK 0x0000000010000000UL 356#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL
330#define UV2H_EVENT_OCCURRED0_NI0_AOERR1_MASK 0x0000000020000000UL 357#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL
331#define UV2H_EVENT_OCCURRED0_NI1_AOERR1_MASK 0x0000000040000000UL 358#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL
332#define UV2H_EVENT_OCCURRED0_SYSTEM_SHUTDOWN_INT_MASK 0x0000000080000000UL 359#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL
333#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_0_MASK 0x0000000100000000UL 360#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL
334#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_1_MASK 0x0000000200000000UL 361#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL
335#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_2_MASK 0x0000000400000000UL 362#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL
336#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_3_MASK 0x0000000800000000UL 363#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL
337#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_4_MASK 0x0000001000000000UL 364#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL
338#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_5_MASK 0x0000002000000000UL 365#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL
339#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_6_MASK 0x0000004000000000UL 366#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL
340#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_7_MASK 0x0000008000000000UL 367#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL
341#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_8_MASK 0x0000010000000000UL 368#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL
342#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_9_MASK 0x0000020000000000UL 369#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL
343#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_10_MASK 0x0000040000000000UL 370#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL
344#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_11_MASK 0x0000080000000000UL 371#define UVXH_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL
345#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_12_MASK 0x0000100000000000UL 372#define UVXH_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL
346#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_13_MASK 0x0000200000000000UL 373#define UVXH_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL
347#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_14_MASK 0x0000400000000000UL 374#define UVXH_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL
348#define UV2H_EVENT_OCCURRED0_LB_IRQ_INT_15_MASK 0x0000800000000000UL 375#define UVXH_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL
349#define UV2H_EVENT_OCCURRED0_L1_NMI_INT_MASK 0x0001000000000000UL 376#define UVXH_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL
350#define UV2H_EVENT_OCCURRED0_STOP_CLOCK_MASK 0x0002000000000000UL 377#define UVXH_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL
351#define UV2H_EVENT_OCCURRED0_ASIC_TO_L1_MASK 0x0004000000000000UL 378#define UVXH_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL
352#define UV2H_EVENT_OCCURRED0_L1_TO_ASIC_MASK 0x0008000000000000UL 379#define UVXH_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
353#define UV2H_EVENT_OCCURRED0_LA_SEQ_TRIGGER_MASK 0x0010000000000000UL 380#define UVXH_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
354#define UV2H_EVENT_OCCURRED0_IPI_INT_MASK 0x0020000000000000UL 381#define UVXH_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
355#define UV2H_EVENT_OCCURRED0_EXTIO_INT0_MASK 0x0040000000000000UL 382#define UVXH_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
356#define UV2H_EVENT_OCCURRED0_EXTIO_INT1_MASK 0x0080000000000000UL
357#define UV2H_EVENT_OCCURRED0_EXTIO_INT2_MASK 0x0100000000000000UL
358#define UV2H_EVENT_OCCURRED0_EXTIO_INT3_MASK 0x0200000000000000UL
359#define UV2H_EVENT_OCCURRED0_PROFILE_INT_MASK 0x0400000000000000UL
360 383
361union uvh_event_occurred0_u { 384union uvh_event_occurred0_u {
362 unsigned long v; 385 unsigned long v;
363 struct uv1h_event_occurred0_s { 386 struct uvh_event_occurred0_s {
364 unsigned long lb_hcerr:1; /* RW, W1C */ 387 unsigned long lb_hcerr:1; /* RW, W1C */
365 unsigned long gr0_hcerr:1; /* RW, W1C */ 388 unsigned long rsvd_1_10:10;
366 unsigned long gr1_hcerr:1; /* RW, W1C */
367 unsigned long lh_hcerr:1; /* RW, W1C */
368 unsigned long rh_hcerr:1; /* RW, W1C */
369 unsigned long xn_hcerr:1; /* RW, W1C */
370 unsigned long si_hcerr:1; /* RW, W1C */
371 unsigned long lb_aoerr0:1; /* RW, W1C */
372 unsigned long gr0_aoerr0:1; /* RW, W1C */
373 unsigned long gr1_aoerr0:1; /* RW, W1C */
374 unsigned long lh_aoerr0:1; /* RW, W1C */
375 unsigned long rh_aoerr0:1; /* RW, W1C */ 389 unsigned long rh_aoerr0:1; /* RW, W1C */
376 unsigned long xn_aoerr0:1; /* RW, W1C */ 390 unsigned long rsvd_12_63:52;
377 unsigned long si_aoerr0:1; /* RW, W1C */ 391 } s;
378 unsigned long lb_aoerr1:1; /* RW, W1C */ 392 struct uvxh_event_occurred0_s {
379 unsigned long gr0_aoerr1:1; /* RW, W1C */
380 unsigned long gr1_aoerr1:1; /* RW, W1C */
381 unsigned long lh_aoerr1:1; /* RW, W1C */
382 unsigned long rh_aoerr1:1; /* RW, W1C */
383 unsigned long xn_aoerr1:1; /* RW, W1C */
384 unsigned long si_aoerr1:1; /* RW, W1C */
385 unsigned long rh_vpi_int:1; /* RW, W1C */
386 unsigned long system_shutdown_int:1; /* RW, W1C */
387 unsigned long lb_irq_int_0:1; /* RW, W1C */
388 unsigned long lb_irq_int_1:1; /* RW, W1C */
389 unsigned long lb_irq_int_2:1; /* RW, W1C */
390 unsigned long lb_irq_int_3:1; /* RW, W1C */
391 unsigned long lb_irq_int_4:1; /* RW, W1C */
392 unsigned long lb_irq_int_5:1; /* RW, W1C */
393 unsigned long lb_irq_int_6:1; /* RW, W1C */
394 unsigned long lb_irq_int_7:1; /* RW, W1C */
395 unsigned long lb_irq_int_8:1; /* RW, W1C */
396 unsigned long lb_irq_int_9:1; /* RW, W1C */
397 unsigned long lb_irq_int_10:1; /* RW, W1C */
398 unsigned long lb_irq_int_11:1; /* RW, W1C */
399 unsigned long lb_irq_int_12:1; /* RW, W1C */
400 unsigned long lb_irq_int_13:1; /* RW, W1C */
401 unsigned long lb_irq_int_14:1; /* RW, W1C */
402 unsigned long lb_irq_int_15:1; /* RW, W1C */
403 unsigned long l1_nmi_int:1; /* RW, W1C */
404 unsigned long stop_clock:1; /* RW, W1C */
405 unsigned long asic_to_l1:1; /* RW, W1C */
406 unsigned long l1_to_asic:1; /* RW, W1C */
407 unsigned long ltc_int:1; /* RW, W1C */
408 unsigned long la_seq_trigger:1; /* RW, W1C */
409 unsigned long ipi_int:1; /* RW, W1C */
410 unsigned long extio_int0:1; /* RW, W1C */
411 unsigned long extio_int1:1; /* RW, W1C */
412 unsigned long extio_int2:1; /* RW, W1C */
413 unsigned long extio_int3:1; /* RW, W1C */
414 unsigned long profile_int:1; /* RW, W1C */
415 unsigned long rtc0:1; /* RW, W1C */
416 unsigned long rtc1:1; /* RW, W1C */
417 unsigned long rtc2:1; /* RW, W1C */
418 unsigned long rtc3:1; /* RW, W1C */
419 unsigned long bau_data:1; /* RW, W1C */
420 unsigned long power_management_req:1; /* RW, W1C */
421 unsigned long rsvd_57_63:7;
422 } s1;
423 struct uv2h_event_occurred0_s {
424 unsigned long lb_hcerr:1; /* RW */ 393 unsigned long lb_hcerr:1; /* RW */
425 unsigned long qp_hcerr:1; /* RW */ 394 unsigned long qp_hcerr:1; /* RW */
426 unsigned long rh_hcerr:1; /* RW */ 395 unsigned long rh_hcerr:1; /* RW */
@@ -481,19 +450,20 @@ union uvh_event_occurred0_u {
481 unsigned long extio_int3:1; /* RW */ 450 unsigned long extio_int3:1; /* RW */
482 unsigned long profile_int:1; /* RW */ 451 unsigned long profile_int:1; /* RW */
483 unsigned long rsvd_59_63:5; 452 unsigned long rsvd_59_63:5;
484 } s2; 453 } sx;
485}; 454};
486 455
487/* ========================================================================= */ 456/* ========================================================================= */
488/* UVH_EVENT_OCCURRED0_ALIAS */ 457/* UVH_EVENT_OCCURRED0_ALIAS */
489/* ========================================================================= */ 458/* ========================================================================= */
490#define UVH_EVENT_OCCURRED0_ALIAS 0x0000000000070008UL 459#define UVH_EVENT_OCCURRED0_ALIAS 0x70008UL
491#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0 460#define UVH_EVENT_OCCURRED0_ALIAS_32 0x5f0
461
492 462
493/* ========================================================================= */ 463/* ========================================================================= */
494/* UVH_GR0_TLB_INT0_CONFIG */ 464/* UVH_GR0_TLB_INT0_CONFIG */
495/* ========================================================================= */ 465/* ========================================================================= */
496#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL 466#define UVH_GR0_TLB_INT0_CONFIG 0x61b00UL
497 467
498#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0 468#define UVH_GR0_TLB_INT0_CONFIG_VECTOR_SHFT 0
499#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8 469#define UVH_GR0_TLB_INT0_CONFIG_DM_SHFT 8
@@ -531,7 +501,7 @@ union uvh_gr0_tlb_int0_config_u {
531/* ========================================================================= */ 501/* ========================================================================= */
532/* UVH_GR0_TLB_INT1_CONFIG */ 502/* UVH_GR0_TLB_INT1_CONFIG */
533/* ========================================================================= */ 503/* ========================================================================= */
534#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL 504#define UVH_GR0_TLB_INT1_CONFIG 0x61b40UL
535 505
536#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0 506#define UVH_GR0_TLB_INT1_CONFIG_VECTOR_SHFT 0
537#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8 507#define UVH_GR0_TLB_INT1_CONFIG_DM_SHFT 8
@@ -571,9 +541,11 @@ union uvh_gr0_tlb_int1_config_u {
571/* ========================================================================= */ 541/* ========================================================================= */
572#define UV1H_GR0_TLB_MMR_CONTROL 0x401080UL 542#define UV1H_GR0_TLB_MMR_CONTROL 0x401080UL
573#define UV2H_GR0_TLB_MMR_CONTROL 0xc01080UL 543#define UV2H_GR0_TLB_MMR_CONTROL 0xc01080UL
574#define UVH_GR0_TLB_MMR_CONTROL (is_uv1_hub() ? \ 544#define UV3H_GR0_TLB_MMR_CONTROL 0xc01080UL
575 UV1H_GR0_TLB_MMR_CONTROL : \ 545#define UVH_GR0_TLB_MMR_CONTROL \
576 UV2H_GR0_TLB_MMR_CONTROL) 546 (is_uv1_hub() ? UV1H_GR0_TLB_MMR_CONTROL : \
547 (is_uv2_hub() ? UV2H_GR0_TLB_MMR_CONTROL : \
548 UV3H_GR0_TLB_MMR_CONTROL))
577 549
578#define UVH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0 550#define UVH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
579#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 551#define UVH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
@@ -611,6 +583,21 @@ union uvh_gr0_tlb_int1_config_u {
611#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL 583#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL
612#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL 584#define UV1H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL
613 585
586#define UVXH_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
587#define UVXH_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
588#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
589#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
590#define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
591#define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
592#define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
593#define UVXH_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
594#define UVXH_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
595#define UVXH_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
596#define UVXH_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
597#define UVXH_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
598#define UVXH_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
599#define UVXH_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
600
614#define UV2H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0 601#define UV2H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
615#define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 602#define UV2H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
616#define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 603#define UV2H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
@@ -630,6 +617,23 @@ union uvh_gr0_tlb_int1_config_u {
630#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL 617#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
631#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL 618#define UV2H_GR0_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
632 619
620#define UV3H_GR0_TLB_MMR_CONTROL_INDEX_SHFT 0
621#define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
622#define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
623#define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
624#define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_SHFT 21
625#define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
626#define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_SHFT 31
627#define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
628#define UV3H_GR0_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
629#define UV3H_GR0_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
630#define UV3H_GR0_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
631#define UV3H_GR0_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
632#define UV3H_GR0_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL
633#define UV3H_GR0_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
634#define UV3H_GR0_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
635#define UV3H_GR0_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
636
633union uvh_gr0_tlb_mmr_control_u { 637union uvh_gr0_tlb_mmr_control_u {
634 unsigned long v; 638 unsigned long v;
635 struct uvh_gr0_tlb_mmr_control_s { 639 struct uvh_gr0_tlb_mmr_control_s {
@@ -642,7 +646,9 @@ union uvh_gr0_tlb_mmr_control_u {
642 unsigned long rsvd_21_29:9; 646 unsigned long rsvd_21_29:9;
643 unsigned long mmr_write:1; /* WP */ 647 unsigned long mmr_write:1; /* WP */
644 unsigned long mmr_read:1; /* WP */ 648 unsigned long mmr_read:1; /* WP */
645 unsigned long rsvd_32_63:32; 649 unsigned long rsvd_32_48:17;
650 unsigned long rsvd_49_51:3;
651 unsigned long rsvd_52_63:12;
646 } s; 652 } s;
647 struct uv1h_gr0_tlb_mmr_control_s { 653 struct uv1h_gr0_tlb_mmr_control_s {
648 unsigned long index:12; /* RW */ 654 unsigned long index:12; /* RW */
@@ -666,6 +672,23 @@ union uvh_gr0_tlb_mmr_control_u {
666 unsigned long mmr_inj_tlblruv:1; /* RW */ 672 unsigned long mmr_inj_tlblruv:1; /* RW */
667 unsigned long rsvd_61_63:3; 673 unsigned long rsvd_61_63:3;
668 } s1; 674 } s1;
675 struct uvxh_gr0_tlb_mmr_control_s {
676 unsigned long index:12; /* RW */
677 unsigned long mem_sel:2; /* RW */
678 unsigned long rsvd_14_15:2;
679 unsigned long auto_valid_en:1; /* RW */
680 unsigned long rsvd_17_19:3;
681 unsigned long mmr_hash_index_en:1; /* RW */
682 unsigned long rsvd_21_29:9;
683 unsigned long mmr_write:1; /* WP */
684 unsigned long mmr_read:1; /* WP */
685 unsigned long mmr_op_done:1; /* RW */
686 unsigned long rsvd_33_47:15;
687 unsigned long rsvd_48:1;
688 unsigned long rsvd_49_51:3;
689 unsigned long rsvd_52:1;
690 unsigned long rsvd_53_63:11;
691 } sx;
669 struct uv2h_gr0_tlb_mmr_control_s { 692 struct uv2h_gr0_tlb_mmr_control_s {
670 unsigned long index:12; /* RW */ 693 unsigned long index:12; /* RW */
671 unsigned long mem_sel:2; /* RW */ 694 unsigned long mem_sel:2; /* RW */
@@ -683,6 +706,24 @@ union uvh_gr0_tlb_mmr_control_u {
683 unsigned long mmr_inj_tlbram:1; /* RW */ 706 unsigned long mmr_inj_tlbram:1; /* RW */
684 unsigned long rsvd_53_63:11; 707 unsigned long rsvd_53_63:11;
685 } s2; 708 } s2;
709 struct uv3h_gr0_tlb_mmr_control_s {
710 unsigned long index:12; /* RW */
711 unsigned long mem_sel:2; /* RW */
712 unsigned long rsvd_14_15:2;
713 unsigned long auto_valid_en:1; /* RW */
714 unsigned long rsvd_17_19:3;
715 unsigned long mmr_hash_index_en:1; /* RW */
716 unsigned long ecc_sel:1; /* RW */
717 unsigned long rsvd_22_29:8;
718 unsigned long mmr_write:1; /* WP */
719 unsigned long mmr_read:1; /* WP */
720 unsigned long mmr_op_done:1; /* RW */
721 unsigned long rsvd_33_47:15;
722 unsigned long undef_48:1; /* Undefined */
723 unsigned long rsvd_49_51:3;
724 unsigned long undef_52:1; /* Undefined */
725 unsigned long rsvd_53_63:11;
726 } s3;
686}; 727};
687 728
688/* ========================================================================= */ 729/* ========================================================================= */
@@ -690,9 +731,11 @@ union uvh_gr0_tlb_mmr_control_u {
690/* ========================================================================= */ 731/* ========================================================================= */
691#define UV1H_GR0_TLB_MMR_READ_DATA_HI 0x4010a0UL 732#define UV1H_GR0_TLB_MMR_READ_DATA_HI 0x4010a0UL
692#define UV2H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL 733#define UV2H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
693#define UVH_GR0_TLB_MMR_READ_DATA_HI (is_uv1_hub() ? \ 734#define UV3H_GR0_TLB_MMR_READ_DATA_HI 0xc010a0UL
694 UV1H_GR0_TLB_MMR_READ_DATA_HI : \ 735#define UVH_GR0_TLB_MMR_READ_DATA_HI \
695 UV2H_GR0_TLB_MMR_READ_DATA_HI) 736 (is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_HI : \
737 (is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_HI : \
738 UV3H_GR0_TLB_MMR_READ_DATA_HI))
696 739
697#define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 740#define UVH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
698#define UVH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 741#define UVH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
@@ -703,6 +746,46 @@ union uvh_gr0_tlb_mmr_control_u {
703#define UVH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL 746#define UVH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
704#define UVH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL 747#define UVH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
705 748
749#define UV1H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
750#define UV1H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
751#define UV1H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
752#define UV1H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
753#define UV1H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
754#define UV1H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
755#define UV1H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
756#define UV1H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
757
758#define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
759#define UVXH_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
760#define UVXH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
761#define UVXH_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
762#define UVXH_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
763#define UVXH_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
764#define UVXH_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
765#define UVXH_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
766
767#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
768#define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
769#define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
770#define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
771#define UV2H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
772#define UV2H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
773#define UV2H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
774#define UV2H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
775
776#define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
777#define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
778#define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
779#define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
780#define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 45
781#define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55
782#define UV3H_GR0_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
783#define UV3H_GR0_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
784#define UV3H_GR0_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
785#define UV3H_GR0_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
786#define UV3H_GR0_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0000200000000000UL
787#define UV3H_GR0_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL
788
706union uvh_gr0_tlb_mmr_read_data_hi_u { 789union uvh_gr0_tlb_mmr_read_data_hi_u {
707 unsigned long v; 790 unsigned long v;
708 struct uvh_gr0_tlb_mmr_read_data_hi_s { 791 struct uvh_gr0_tlb_mmr_read_data_hi_s {
@@ -712,6 +795,36 @@ union uvh_gr0_tlb_mmr_read_data_hi_u {
712 unsigned long larger:1; /* RO */ 795 unsigned long larger:1; /* RO */
713 unsigned long rsvd_45_63:19; 796 unsigned long rsvd_45_63:19;
714 } s; 797 } s;
798 struct uv1h_gr0_tlb_mmr_read_data_hi_s {
799 unsigned long pfn:41; /* RO */
800 unsigned long gaa:2; /* RO */
801 unsigned long dirty:1; /* RO */
802 unsigned long larger:1; /* RO */
803 unsigned long rsvd_45_63:19;
804 } s1;
805 struct uvxh_gr0_tlb_mmr_read_data_hi_s {
806 unsigned long pfn:41; /* RO */
807 unsigned long gaa:2; /* RO */
808 unsigned long dirty:1; /* RO */
809 unsigned long larger:1; /* RO */
810 unsigned long rsvd_45_63:19;
811 } sx;
812 struct uv2h_gr0_tlb_mmr_read_data_hi_s {
813 unsigned long pfn:41; /* RO */
814 unsigned long gaa:2; /* RO */
815 unsigned long dirty:1; /* RO */
816 unsigned long larger:1; /* RO */
817 unsigned long rsvd_45_63:19;
818 } s2;
819 struct uv3h_gr0_tlb_mmr_read_data_hi_s {
820 unsigned long pfn:41; /* RO */
821 unsigned long gaa:2; /* RO */
822 unsigned long dirty:1; /* RO */
823 unsigned long larger:1; /* RO */
824 unsigned long aa_ext:1; /* RO */
825 unsigned long undef_46_54:9; /* Undefined */
826 unsigned long way_ecc:9; /* RO */
827 } s3;
715}; 828};
716 829
717/* ========================================================================= */ 830/* ========================================================================= */
@@ -719,9 +832,11 @@ union uvh_gr0_tlb_mmr_read_data_hi_u {
719/* ========================================================================= */ 832/* ========================================================================= */
720#define UV1H_GR0_TLB_MMR_READ_DATA_LO 0x4010a8UL 833#define UV1H_GR0_TLB_MMR_READ_DATA_LO 0x4010a8UL
721#define UV2H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL 834#define UV2H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
722#define UVH_GR0_TLB_MMR_READ_DATA_LO (is_uv1_hub() ? \ 835#define UV3H_GR0_TLB_MMR_READ_DATA_LO 0xc010a8UL
723 UV1H_GR0_TLB_MMR_READ_DATA_LO : \ 836#define UVH_GR0_TLB_MMR_READ_DATA_LO \
724 UV2H_GR0_TLB_MMR_READ_DATA_LO) 837 (is_uv1_hub() ? UV1H_GR0_TLB_MMR_READ_DATA_LO : \
838 (is_uv2_hub() ? UV2H_GR0_TLB_MMR_READ_DATA_LO : \
839 UV3H_GR0_TLB_MMR_READ_DATA_LO))
725 840
726#define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 841#define UVH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
727#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 842#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
@@ -730,6 +845,34 @@ union uvh_gr0_tlb_mmr_read_data_hi_u {
730#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL 845#define UVH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
731#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL 846#define UVH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
732 847
848#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
849#define UV1H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
850#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
851#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
852#define UV1H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
853#define UV1H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
854
855#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
856#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
857#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
858#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
859#define UVXH_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
860#define UVXH_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
861
862#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
863#define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
864#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
865#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
866#define UV2H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
867#define UV2H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
868
869#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
870#define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
871#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
872#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
873#define UV3H_GR0_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
874#define UV3H_GR0_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
875
733union uvh_gr0_tlb_mmr_read_data_lo_u { 876union uvh_gr0_tlb_mmr_read_data_lo_u {
734 unsigned long v; 877 unsigned long v;
735 struct uvh_gr0_tlb_mmr_read_data_lo_s { 878 struct uvh_gr0_tlb_mmr_read_data_lo_s {
@@ -737,12 +880,32 @@ union uvh_gr0_tlb_mmr_read_data_lo_u {
737 unsigned long asid:24; /* RO */ 880 unsigned long asid:24; /* RO */
738 unsigned long valid:1; /* RO */ 881 unsigned long valid:1; /* RO */
739 } s; 882 } s;
883 struct uv1h_gr0_tlb_mmr_read_data_lo_s {
884 unsigned long vpn:39; /* RO */
885 unsigned long asid:24; /* RO */
886 unsigned long valid:1; /* RO */
887 } s1;
888 struct uvxh_gr0_tlb_mmr_read_data_lo_s {
889 unsigned long vpn:39; /* RO */
890 unsigned long asid:24; /* RO */
891 unsigned long valid:1; /* RO */
892 } sx;
893 struct uv2h_gr0_tlb_mmr_read_data_lo_s {
894 unsigned long vpn:39; /* RO */
895 unsigned long asid:24; /* RO */
896 unsigned long valid:1; /* RO */
897 } s2;
898 struct uv3h_gr0_tlb_mmr_read_data_lo_s {
899 unsigned long vpn:39; /* RO */
900 unsigned long asid:24; /* RO */
901 unsigned long valid:1; /* RO */
902 } s3;
740}; 903};
741 904
742/* ========================================================================= */ 905/* ========================================================================= */
743/* UVH_GR1_TLB_INT0_CONFIG */ 906/* UVH_GR1_TLB_INT0_CONFIG */
744/* ========================================================================= */ 907/* ========================================================================= */
745#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL 908#define UVH_GR1_TLB_INT0_CONFIG 0x61f00UL
746 909
747#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0 910#define UVH_GR1_TLB_INT0_CONFIG_VECTOR_SHFT 0
748#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8 911#define UVH_GR1_TLB_INT0_CONFIG_DM_SHFT 8
@@ -780,7 +943,7 @@ union uvh_gr1_tlb_int0_config_u {
780/* ========================================================================= */ 943/* ========================================================================= */
781/* UVH_GR1_TLB_INT1_CONFIG */ 944/* UVH_GR1_TLB_INT1_CONFIG */
782/* ========================================================================= */ 945/* ========================================================================= */
783#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL 946#define UVH_GR1_TLB_INT1_CONFIG 0x61f40UL
784 947
785#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0 948#define UVH_GR1_TLB_INT1_CONFIG_VECTOR_SHFT 0
786#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8 949#define UVH_GR1_TLB_INT1_CONFIG_DM_SHFT 8
@@ -820,9 +983,11 @@ union uvh_gr1_tlb_int1_config_u {
820/* ========================================================================= */ 983/* ========================================================================= */
821#define UV1H_GR1_TLB_MMR_CONTROL 0x801080UL 984#define UV1H_GR1_TLB_MMR_CONTROL 0x801080UL
822#define UV2H_GR1_TLB_MMR_CONTROL 0x1001080UL 985#define UV2H_GR1_TLB_MMR_CONTROL 0x1001080UL
823#define UVH_GR1_TLB_MMR_CONTROL (is_uv1_hub() ? \ 986#define UV3H_GR1_TLB_MMR_CONTROL 0x1001080UL
824 UV1H_GR1_TLB_MMR_CONTROL : \ 987#define UVH_GR1_TLB_MMR_CONTROL \
825 UV2H_GR1_TLB_MMR_CONTROL) 988 (is_uv1_hub() ? UV1H_GR1_TLB_MMR_CONTROL : \
989 (is_uv2_hub() ? UV2H_GR1_TLB_MMR_CONTROL : \
990 UV3H_GR1_TLB_MMR_CONTROL))
826 991
827#define UVH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0 992#define UVH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
828#define UVH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 993#define UVH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
@@ -860,6 +1025,21 @@ union uvh_gr1_tlb_int1_config_u {
860#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL 1025#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRREG_MASK 0x0100000000000000UL
861#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL 1026#define UV1H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBLRUV_MASK 0x1000000000000000UL
862 1027
1028#define UVXH_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
1029#define UVXH_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
1030#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
1031#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
1032#define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
1033#define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
1034#define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
1035#define UVXH_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
1036#define UVXH_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
1037#define UVXH_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
1038#define UVXH_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
1039#define UVXH_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
1040#define UVXH_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
1041#define UVXH_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
1042
863#define UV2H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0 1043#define UV2H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
864#define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12 1044#define UV2H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
865#define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16 1045#define UV2H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
@@ -879,6 +1059,23 @@ union uvh_gr1_tlb_int1_config_u {
879#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL 1059#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_CON_MASK 0x0001000000000000UL
880#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL 1060#define UV2H_GR1_TLB_MMR_CONTROL_MMR_INJ_TLBRAM_MASK 0x0010000000000000UL
881 1061
1062#define UV3H_GR1_TLB_MMR_CONTROL_INDEX_SHFT 0
1063#define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_SHFT 12
1064#define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_SHFT 16
1065#define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_SHFT 20
1066#define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_SHFT 21
1067#define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_SHFT 30
1068#define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_SHFT 31
1069#define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_SHFT 32
1070#define UV3H_GR1_TLB_MMR_CONTROL_INDEX_MASK 0x0000000000000fffUL
1071#define UV3H_GR1_TLB_MMR_CONTROL_MEM_SEL_MASK 0x0000000000003000UL
1072#define UV3H_GR1_TLB_MMR_CONTROL_AUTO_VALID_EN_MASK 0x0000000000010000UL
1073#define UV3H_GR1_TLB_MMR_CONTROL_MMR_HASH_INDEX_EN_MASK 0x0000000000100000UL
1074#define UV3H_GR1_TLB_MMR_CONTROL_ECC_SEL_MASK 0x0000000000200000UL
1075#define UV3H_GR1_TLB_MMR_CONTROL_MMR_WRITE_MASK 0x0000000040000000UL
1076#define UV3H_GR1_TLB_MMR_CONTROL_MMR_READ_MASK 0x0000000080000000UL
1077#define UV3H_GR1_TLB_MMR_CONTROL_MMR_OP_DONE_MASK 0x0000000100000000UL
1078
882union uvh_gr1_tlb_mmr_control_u { 1079union uvh_gr1_tlb_mmr_control_u {
883 unsigned long v; 1080 unsigned long v;
884 struct uvh_gr1_tlb_mmr_control_s { 1081 struct uvh_gr1_tlb_mmr_control_s {
@@ -891,7 +1088,9 @@ union uvh_gr1_tlb_mmr_control_u {
891 unsigned long rsvd_21_29:9; 1088 unsigned long rsvd_21_29:9;
892 unsigned long mmr_write:1; /* WP */ 1089 unsigned long mmr_write:1; /* WP */
893 unsigned long mmr_read:1; /* WP */ 1090 unsigned long mmr_read:1; /* WP */
894 unsigned long rsvd_32_63:32; 1091 unsigned long rsvd_32_48:17;
1092 unsigned long rsvd_49_51:3;
1093 unsigned long rsvd_52_63:12;
895 } s; 1094 } s;
896 struct uv1h_gr1_tlb_mmr_control_s { 1095 struct uv1h_gr1_tlb_mmr_control_s {
897 unsigned long index:12; /* RW */ 1096 unsigned long index:12; /* RW */
@@ -915,6 +1114,23 @@ union uvh_gr1_tlb_mmr_control_u {
915 unsigned long mmr_inj_tlblruv:1; /* RW */ 1114 unsigned long mmr_inj_tlblruv:1; /* RW */
916 unsigned long rsvd_61_63:3; 1115 unsigned long rsvd_61_63:3;
917 } s1; 1116 } s1;
1117 struct uvxh_gr1_tlb_mmr_control_s {
1118 unsigned long index:12; /* RW */
1119 unsigned long mem_sel:2; /* RW */
1120 unsigned long rsvd_14_15:2;
1121 unsigned long auto_valid_en:1; /* RW */
1122 unsigned long rsvd_17_19:3;
1123 unsigned long mmr_hash_index_en:1; /* RW */
1124 unsigned long rsvd_21_29:9;
1125 unsigned long mmr_write:1; /* WP */
1126 unsigned long mmr_read:1; /* WP */
1127 unsigned long mmr_op_done:1; /* RW */
1128 unsigned long rsvd_33_47:15;
1129 unsigned long rsvd_48:1;
1130 unsigned long rsvd_49_51:3;
1131 unsigned long rsvd_52:1;
1132 unsigned long rsvd_53_63:11;
1133 } sx;
918 struct uv2h_gr1_tlb_mmr_control_s { 1134 struct uv2h_gr1_tlb_mmr_control_s {
919 unsigned long index:12; /* RW */ 1135 unsigned long index:12; /* RW */
920 unsigned long mem_sel:2; /* RW */ 1136 unsigned long mem_sel:2; /* RW */
@@ -932,6 +1148,24 @@ union uvh_gr1_tlb_mmr_control_u {
932 unsigned long mmr_inj_tlbram:1; /* RW */ 1148 unsigned long mmr_inj_tlbram:1; /* RW */
933 unsigned long rsvd_53_63:11; 1149 unsigned long rsvd_53_63:11;
934 } s2; 1150 } s2;
1151 struct uv3h_gr1_tlb_mmr_control_s {
1152 unsigned long index:12; /* RW */
1153 unsigned long mem_sel:2; /* RW */
1154 unsigned long rsvd_14_15:2;
1155 unsigned long auto_valid_en:1; /* RW */
1156 unsigned long rsvd_17_19:3;
1157 unsigned long mmr_hash_index_en:1; /* RW */
1158 unsigned long ecc_sel:1; /* RW */
1159 unsigned long rsvd_22_29:8;
1160 unsigned long mmr_write:1; /* WP */
1161 unsigned long mmr_read:1; /* WP */
1162 unsigned long mmr_op_done:1; /* RW */
1163 unsigned long rsvd_33_47:15;
1164 unsigned long undef_48:1; /* Undefined */
1165 unsigned long rsvd_49_51:3;
1166 unsigned long undef_52:1; /* Undefined */
1167 unsigned long rsvd_53_63:11;
1168 } s3;
935}; 1169};
936 1170
937/* ========================================================================= */ 1171/* ========================================================================= */
@@ -939,9 +1173,11 @@ union uvh_gr1_tlb_mmr_control_u {
939/* ========================================================================= */ 1173/* ========================================================================= */
940#define UV1H_GR1_TLB_MMR_READ_DATA_HI 0x8010a0UL 1174#define UV1H_GR1_TLB_MMR_READ_DATA_HI 0x8010a0UL
941#define UV2H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL 1175#define UV2H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
942#define UVH_GR1_TLB_MMR_READ_DATA_HI (is_uv1_hub() ? \ 1176#define UV3H_GR1_TLB_MMR_READ_DATA_HI 0x10010a0UL
943 UV1H_GR1_TLB_MMR_READ_DATA_HI : \ 1177#define UVH_GR1_TLB_MMR_READ_DATA_HI \
944 UV2H_GR1_TLB_MMR_READ_DATA_HI) 1178 (is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_HI : \
1179 (is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_HI : \
1180 UV3H_GR1_TLB_MMR_READ_DATA_HI))
945 1181
946#define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0 1182#define UVH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
947#define UVH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41 1183#define UVH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
@@ -952,6 +1188,46 @@ union uvh_gr1_tlb_mmr_control_u {
952#define UVH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL 1188#define UVH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
953#define UVH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL 1189#define UVH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
954 1190
1191#define UV1H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
1192#define UV1H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
1193#define UV1H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
1194#define UV1H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
1195#define UV1H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
1196#define UV1H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
1197#define UV1H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
1198#define UV1H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
1199
1200#define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
1201#define UVXH_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
1202#define UVXH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
1203#define UVXH_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
1204#define UVXH_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
1205#define UVXH_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
1206#define UVXH_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
1207#define UVXH_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
1208
1209#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
1210#define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
1211#define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
1212#define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
1213#define UV2H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
1214#define UV2H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
1215#define UV2H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
1216#define UV2H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
1217
1218#define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_SHFT 0
1219#define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_SHFT 41
1220#define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_SHFT 43
1221#define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_SHFT 44
1222#define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_SHFT 45
1223#define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_SHFT 55
1224#define UV3H_GR1_TLB_MMR_READ_DATA_HI_PFN_MASK 0x000001ffffffffffUL
1225#define UV3H_GR1_TLB_MMR_READ_DATA_HI_GAA_MASK 0x0000060000000000UL
1226#define UV3H_GR1_TLB_MMR_READ_DATA_HI_DIRTY_MASK 0x0000080000000000UL
1227#define UV3H_GR1_TLB_MMR_READ_DATA_HI_LARGER_MASK 0x0000100000000000UL
1228#define UV3H_GR1_TLB_MMR_READ_DATA_HI_AA_EXT_MASK 0x0000200000000000UL
1229#define UV3H_GR1_TLB_MMR_READ_DATA_HI_WAY_ECC_MASK 0xff80000000000000UL
1230
955union uvh_gr1_tlb_mmr_read_data_hi_u { 1231union uvh_gr1_tlb_mmr_read_data_hi_u {
956 unsigned long v; 1232 unsigned long v;
957 struct uvh_gr1_tlb_mmr_read_data_hi_s { 1233 struct uvh_gr1_tlb_mmr_read_data_hi_s {
@@ -961,6 +1237,36 @@ union uvh_gr1_tlb_mmr_read_data_hi_u {
961 unsigned long larger:1; /* RO */ 1237 unsigned long larger:1; /* RO */
962 unsigned long rsvd_45_63:19; 1238 unsigned long rsvd_45_63:19;
963 } s; 1239 } s;
1240 struct uv1h_gr1_tlb_mmr_read_data_hi_s {
1241 unsigned long pfn:41; /* RO */
1242 unsigned long gaa:2; /* RO */
1243 unsigned long dirty:1; /* RO */
1244 unsigned long larger:1; /* RO */
1245 unsigned long rsvd_45_63:19;
1246 } s1;
1247 struct uvxh_gr1_tlb_mmr_read_data_hi_s {
1248 unsigned long pfn:41; /* RO */
1249 unsigned long gaa:2; /* RO */
1250 unsigned long dirty:1; /* RO */
1251 unsigned long larger:1; /* RO */
1252 unsigned long rsvd_45_63:19;
1253 } sx;
1254 struct uv2h_gr1_tlb_mmr_read_data_hi_s {
1255 unsigned long pfn:41; /* RO */
1256 unsigned long gaa:2; /* RO */
1257 unsigned long dirty:1; /* RO */
1258 unsigned long larger:1; /* RO */
1259 unsigned long rsvd_45_63:19;
1260 } s2;
1261 struct uv3h_gr1_tlb_mmr_read_data_hi_s {
1262 unsigned long pfn:41; /* RO */
1263 unsigned long gaa:2; /* RO */
1264 unsigned long dirty:1; /* RO */
1265 unsigned long larger:1; /* RO */
1266 unsigned long aa_ext:1; /* RO */
1267 unsigned long undef_46_54:9; /* Undefined */
1268 unsigned long way_ecc:9; /* RO */
1269 } s3;
964}; 1270};
965 1271
966/* ========================================================================= */ 1272/* ========================================================================= */
@@ -968,9 +1274,11 @@ union uvh_gr1_tlb_mmr_read_data_hi_u {
968/* ========================================================================= */ 1274/* ========================================================================= */
969#define UV1H_GR1_TLB_MMR_READ_DATA_LO 0x8010a8UL 1275#define UV1H_GR1_TLB_MMR_READ_DATA_LO 0x8010a8UL
970#define UV2H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL 1276#define UV2H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
971#define UVH_GR1_TLB_MMR_READ_DATA_LO (is_uv1_hub() ? \ 1277#define UV3H_GR1_TLB_MMR_READ_DATA_LO 0x10010a8UL
972 UV1H_GR1_TLB_MMR_READ_DATA_LO : \ 1278#define UVH_GR1_TLB_MMR_READ_DATA_LO \
973 UV2H_GR1_TLB_MMR_READ_DATA_LO) 1279 (is_uv1_hub() ? UV1H_GR1_TLB_MMR_READ_DATA_LO : \
1280 (is_uv2_hub() ? UV2H_GR1_TLB_MMR_READ_DATA_LO : \
1281 UV3H_GR1_TLB_MMR_READ_DATA_LO))
974 1282
975#define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0 1283#define UVH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
976#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39 1284#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
@@ -979,6 +1287,34 @@ union uvh_gr1_tlb_mmr_read_data_hi_u {
979#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL 1287#define UVH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
980#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL 1288#define UVH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
981 1289
1290#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
1291#define UV1H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
1292#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
1293#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
1294#define UV1H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
1295#define UV1H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
1296
1297#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
1298#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
1299#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
1300#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
1301#define UVXH_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
1302#define UVXH_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
1303
1304#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
1305#define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
1306#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
1307#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
1308#define UV2H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
1309#define UV2H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
1310
1311#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_SHFT 0
1312#define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_SHFT 39
1313#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_SHFT 63
1314#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VPN_MASK 0x0000007fffffffffUL
1315#define UV3H_GR1_TLB_MMR_READ_DATA_LO_ASID_MASK 0x7fffff8000000000UL
1316#define UV3H_GR1_TLB_MMR_READ_DATA_LO_VALID_MASK 0x8000000000000000UL
1317
982union uvh_gr1_tlb_mmr_read_data_lo_u { 1318union uvh_gr1_tlb_mmr_read_data_lo_u {
983 unsigned long v; 1319 unsigned long v;
984 struct uvh_gr1_tlb_mmr_read_data_lo_s { 1320 struct uvh_gr1_tlb_mmr_read_data_lo_s {
@@ -986,12 +1322,32 @@ union uvh_gr1_tlb_mmr_read_data_lo_u {
986 unsigned long asid:24; /* RO */ 1322 unsigned long asid:24; /* RO */
987 unsigned long valid:1; /* RO */ 1323 unsigned long valid:1; /* RO */
988 } s; 1324 } s;
1325 struct uv1h_gr1_tlb_mmr_read_data_lo_s {
1326 unsigned long vpn:39; /* RO */
1327 unsigned long asid:24; /* RO */
1328 unsigned long valid:1; /* RO */
1329 } s1;
1330 struct uvxh_gr1_tlb_mmr_read_data_lo_s {
1331 unsigned long vpn:39; /* RO */
1332 unsigned long asid:24; /* RO */
1333 unsigned long valid:1; /* RO */
1334 } sx;
1335 struct uv2h_gr1_tlb_mmr_read_data_lo_s {
1336 unsigned long vpn:39; /* RO */
1337 unsigned long asid:24; /* RO */
1338 unsigned long valid:1; /* RO */
1339 } s2;
1340 struct uv3h_gr1_tlb_mmr_read_data_lo_s {
1341 unsigned long vpn:39; /* RO */
1342 unsigned long asid:24; /* RO */
1343 unsigned long valid:1; /* RO */
1344 } s3;
989}; 1345};
990 1346
991/* ========================================================================= */ 1347/* ========================================================================= */
992/* UVH_INT_CMPB */ 1348/* UVH_INT_CMPB */
993/* ========================================================================= */ 1349/* ========================================================================= */
994#define UVH_INT_CMPB 0x22080UL 1350#define UVH_INT_CMPB 0x22080UL
995 1351
996#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0 1352#define UVH_INT_CMPB_REAL_TIME_CMPB_SHFT 0
997#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL 1353#define UVH_INT_CMPB_REAL_TIME_CMPB_MASK 0x00ffffffffffffffUL
@@ -1007,10 +1363,13 @@ union uvh_int_cmpb_u {
1007/* ========================================================================= */ 1363/* ========================================================================= */
1008/* UVH_INT_CMPC */ 1364/* UVH_INT_CMPC */
1009/* ========================================================================= */ 1365/* ========================================================================= */
1010#define UVH_INT_CMPC 0x22100UL 1366#define UVH_INT_CMPC 0x22100UL
1367
1368#define UV1H_INT_CMPC_REAL_TIME_CMPC_SHFT 0
1369#define UV1H_INT_CMPC_REAL_TIME_CMPC_MASK 0x00ffffffffffffffUL
1011 1370
1012#define UVH_INT_CMPC_REAL_TIME_CMPC_SHFT 0 1371#define UVXH_INT_CMPC_REAL_TIME_CMP_2_SHFT 0
1013#define UVH_INT_CMPC_REAL_TIME_CMPC_MASK 0xffffffffffffffUL 1372#define UVXH_INT_CMPC_REAL_TIME_CMP_2_MASK 0x00ffffffffffffffUL
1014 1373
1015union uvh_int_cmpc_u { 1374union uvh_int_cmpc_u {
1016 unsigned long v; 1375 unsigned long v;
@@ -1023,10 +1382,13 @@ union uvh_int_cmpc_u {
1023/* ========================================================================= */ 1382/* ========================================================================= */
1024/* UVH_INT_CMPD */ 1383/* UVH_INT_CMPD */
1025/* ========================================================================= */ 1384/* ========================================================================= */
1026#define UVH_INT_CMPD 0x22180UL 1385#define UVH_INT_CMPD 0x22180UL
1027 1386
1028#define UVH_INT_CMPD_REAL_TIME_CMPD_SHFT 0 1387#define UV1H_INT_CMPD_REAL_TIME_CMPD_SHFT 0
1029#define UVH_INT_CMPD_REAL_TIME_CMPD_MASK 0xffffffffffffffUL 1388#define UV1H_INT_CMPD_REAL_TIME_CMPD_MASK 0x00ffffffffffffffUL
1389
1390#define UVXH_INT_CMPD_REAL_TIME_CMP_3_SHFT 0
1391#define UVXH_INT_CMPD_REAL_TIME_CMP_3_MASK 0x00ffffffffffffffUL
1030 1392
1031union uvh_int_cmpd_u { 1393union uvh_int_cmpd_u {
1032 unsigned long v; 1394 unsigned long v;
@@ -1039,8 +1401,8 @@ union uvh_int_cmpd_u {
1039/* ========================================================================= */ 1401/* ========================================================================= */
1040/* UVH_IPI_INT */ 1402/* UVH_IPI_INT */
1041/* ========================================================================= */ 1403/* ========================================================================= */
1042#define UVH_IPI_INT 0x60500UL 1404#define UVH_IPI_INT 0x60500UL
1043#define UVH_IPI_INT_32 0x348 1405#define UVH_IPI_INT_32 0x348
1044 1406
1045#define UVH_IPI_INT_VECTOR_SHFT 0 1407#define UVH_IPI_INT_VECTOR_SHFT 0
1046#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8 1408#define UVH_IPI_INT_DELIVERY_MODE_SHFT 8
@@ -1069,8 +1431,8 @@ union uvh_ipi_int_u {
1069/* ========================================================================= */ 1431/* ========================================================================= */
1070/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */ 1432/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST */
1071/* ========================================================================= */ 1433/* ========================================================================= */
1072#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL 1434#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST 0x320050UL
1073#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0 1435#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_32 0x9c0
1074 1436
1075#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4 1437#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_ADDRESS_SHFT 4
1076#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49 1438#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST_NODE_ID_SHFT 49
@@ -1091,8 +1453,8 @@ union uvh_lb_bau_intd_payload_queue_first_u {
1091/* ========================================================================= */ 1453/* ========================================================================= */
1092/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */ 1454/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST */
1093/* ========================================================================= */ 1455/* ========================================================================= */
1094#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL 1456#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST 0x320060UL
1095#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8 1457#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_32 0x9c8
1096 1458
1097#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4 1459#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_SHFT 4
1098#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL 1460#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST_ADDRESS_MASK 0x000007fffffffff0UL
@@ -1109,8 +1471,8 @@ union uvh_lb_bau_intd_payload_queue_last_u {
1109/* ========================================================================= */ 1471/* ========================================================================= */
1110/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */ 1472/* UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL */
1111/* ========================================================================= */ 1473/* ========================================================================= */
1112#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL 1474#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL 0x320070UL
1113#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0 1475#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_32 0x9d0
1114 1476
1115#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4 1477#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_SHFT 4
1116#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL 1478#define UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL_ADDRESS_MASK 0x000007fffffffff0UL
@@ -1127,8 +1489,8 @@ union uvh_lb_bau_intd_payload_queue_tail_u {
1127/* ========================================================================= */ 1489/* ========================================================================= */
1128/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */ 1490/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE */
1129/* ========================================================================= */ 1491/* ========================================================================= */
1130#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL 1492#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE 0x320080UL
1131#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68 1493#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_32 0xa68
1132 1494
1133#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0 1495#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_0_SHFT 0
1134#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1 1496#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_PENDING_1_SHFT 1
@@ -1189,14 +1551,21 @@ union uvh_lb_bau_intd_software_acknowledge_u {
1189/* ========================================================================= */ 1551/* ========================================================================= */
1190/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */ 1552/* UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS */
1191/* ========================================================================= */ 1553/* ========================================================================= */
1192#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x0000000000320088UL 1554#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS 0x320088UL
1193#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70 1555#define UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS_32 0xa70
1556
1194 1557
1195/* ========================================================================= */ 1558/* ========================================================================= */
1196/* UVH_LB_BAU_MISC_CONTROL */ 1559/* UVH_LB_BAU_MISC_CONTROL */
1197/* ========================================================================= */ 1560/* ========================================================================= */
1198#define UVH_LB_BAU_MISC_CONTROL 0x320170UL 1561#define UVH_LB_BAU_MISC_CONTROL 0x320170UL
1199#define UVH_LB_BAU_MISC_CONTROL_32 0xa10 1562#define UV1H_LB_BAU_MISC_CONTROL 0x320170UL
1563#define UV2H_LB_BAU_MISC_CONTROL 0x320170UL
1564#define UV3H_LB_BAU_MISC_CONTROL 0x320170UL
1565#define UVH_LB_BAU_MISC_CONTROL_32 0xa10
1566#define UV1H_LB_BAU_MISC_CONTROL_32 0x320170UL
1567#define UV2H_LB_BAU_MISC_CONTROL_32 0x320170UL
1568#define UV3H_LB_BAU_MISC_CONTROL_32 0x320170UL
1200 1569
1201#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 1570#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
1202#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 1571#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
@@ -1213,6 +1582,7 @@ union uvh_lb_bau_intd_software_acknowledge_u {
1213#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24 1582#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
1214#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27 1583#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
1215#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28 1584#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
1585#define UVH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
1216#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL 1586#define UVH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
1217#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL 1587#define UVH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
1218#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL 1588#define UVH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
@@ -1228,6 +1598,7 @@ union uvh_lb_bau_intd_software_acknowledge_u {
1228#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL 1598#define UVH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
1229#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL 1599#define UVH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
1230#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL 1600#define UVH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
1601#define UVH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
1231 1602
1232#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 1603#define UV1H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
1233#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 1604#define UV1H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
@@ -1262,6 +1633,53 @@ union uvh_lb_bau_intd_software_acknowledge_u {
1262#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL 1633#define UV1H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
1263#define UV1H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL 1634#define UV1H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
1264 1635
1636#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
1637#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
1638#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
1639#define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
1640#define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
1641#define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
1642#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
1643#define UVXH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
1644#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
1645#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
1646#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
1647#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
1648#define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
1649#define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
1650#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
1651#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
1652#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
1653#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
1654#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
1655#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
1656#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
1657#define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
1658#define UVXH_LB_BAU_MISC_CONTROL_FUN_SHFT 48
1659#define UVXH_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
1660#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
1661#define UVXH_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
1662#define UVXH_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
1663#define UVXH_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
1664#define UVXH_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
1665#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
1666#define UVXH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
1667#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
1668#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
1669#define UVXH_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
1670#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
1671#define UVXH_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
1672#define UVXH_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
1673#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
1674#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
1675#define UVXH_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
1676#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
1677#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
1678#define UVXH_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
1679#define UVXH_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
1680#define UVXH_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
1681#define UVXH_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
1682
1265#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0 1683#define UV2H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
1266#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8 1684#define UV2H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
1267#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9 1685#define UV2H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
@@ -1309,6 +1727,59 @@ union uvh_lb_bau_intd_software_acknowledge_u {
1309#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL 1727#define UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
1310#define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL 1728#define UV2H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
1311 1729
1730#define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_SHFT 0
1731#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_SHFT 8
1732#define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_SHFT 9
1733#define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_SHFT 10
1734#define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_SHFT 11
1735#define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_SHFT 14
1736#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT 15
1737#define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT 16
1738#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_SHFT 20
1739#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_SHFT 21
1740#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_SHFT 22
1741#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_SHFT 23
1742#define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_SHFT 24
1743#define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_SHFT 27
1744#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_SHFT 28
1745#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_SHFT 29
1746#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_SHFT 30
1747#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_SHFT 31
1748#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_SHFT 32
1749#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 33
1750#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_SHFT 34
1751#define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT 35
1752#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_SHFT 36
1753#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_SHFT 37
1754#define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_SHFT 38
1755#define UV3H_LB_BAU_MISC_CONTROL_FUN_SHFT 48
1756#define UV3H_LB_BAU_MISC_CONTROL_REJECTION_DELAY_MASK 0x00000000000000ffUL
1757#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_MASK 0x0000000000000100UL
1758#define UV3H_LB_BAU_MISC_CONTROL_FORCE_BROADCAST_MASK 0x0000000000000200UL
1759#define UV3H_LB_BAU_MISC_CONTROL_FORCE_LOCK_NOP_MASK 0x0000000000000400UL
1760#define UV3H_LB_BAU_MISC_CONTROL_QPI_AGENT_PRESENCE_VECTOR_MASK 0x0000000000003800UL
1761#define UV3H_LB_BAU_MISC_CONTROL_DESCRIPTOR_FETCH_MODE_MASK 0x0000000000004000UL
1762#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_MASK 0x0000000000008000UL
1763#define UV3H_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK 0x00000000000f0000UL
1764#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_DUAL_MAPPING_MODE_MASK 0x0000000000100000UL
1765#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_DECODE_ENABLE_MASK 0x0000000000200000UL
1766#define UV3H_LB_BAU_MISC_CONTROL_VGA_IO_PORT_16_BIT_DECODE_MASK 0x0000000000400000UL
1767#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_DEST_REGISTRATION_MASK 0x0000000000800000UL
1768#define UV3H_LB_BAU_MISC_CONTROL_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000007000000UL
1769#define UV3H_LB_BAU_MISC_CONTROL_USE_INCOMING_PRIORITY_MASK 0x0000000008000000UL
1770#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_PROGRAMMED_INITIAL_PRIORITY_MASK 0x0000000010000000UL
1771#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_AUTOMATIC_APIC_MODE_SELECTION_MASK 0x0000000020000000UL
1772#define UV3H_LB_BAU_MISC_CONTROL_APIC_MODE_STATUS_MASK 0x0000000040000000UL
1773#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INTERRUPTS_TO_SELF_MASK 0x0000000080000000UL
1774#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_LOCK_BASED_SYSTEM_FLUSH_MASK 0x0000000100000000UL
1775#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_MASK 0x0000000200000000UL
1776#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_INT_PRIO_UDT_TO_SELF_MASK 0x0000000400000000UL
1777#define UV3H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_MASK 0x0000000800000000UL
1778#define UV3H_LB_BAU_MISC_CONTROL_SUPPRESS_QUIESCE_MSGS_TO_QPI_MASK 0x0000001000000000UL
1779#define UV3H_LB_BAU_MISC_CONTROL_ENABLE_INTD_PREFETCH_HINT_MASK 0x0000002000000000UL
1780#define UV3H_LB_BAU_MISC_CONTROL_THREAD_KILL_TIMEBASE_MASK 0x00003fc000000000UL
1781#define UV3H_LB_BAU_MISC_CONTROL_FUN_MASK 0xffff000000000000UL
1782
1312union uvh_lb_bau_misc_control_u { 1783union uvh_lb_bau_misc_control_u {
1313 unsigned long v; 1784 unsigned long v;
1314 struct uvh_lb_bau_misc_control_s { 1785 struct uvh_lb_bau_misc_control_s {
@@ -1327,7 +1798,8 @@ union uvh_lb_bau_misc_control_u {
1327 unsigned long programmed_initial_priority:3; /* RW */ 1798 unsigned long programmed_initial_priority:3; /* RW */
1328 unsigned long use_incoming_priority:1; /* RW */ 1799 unsigned long use_incoming_priority:1; /* RW */
1329 unsigned long enable_programmed_initial_priority:1;/* RW */ 1800 unsigned long enable_programmed_initial_priority:1;/* RW */
1330 unsigned long rsvd_29_63:35; 1801 unsigned long rsvd_29_47:19;
1802 unsigned long fun:16; /* RW */
1331 } s; 1803 } s;
1332 struct uv1h_lb_bau_misc_control_s { 1804 struct uv1h_lb_bau_misc_control_s {
1333 unsigned long rejection_delay:8; /* RW */ 1805 unsigned long rejection_delay:8; /* RW */
@@ -1348,6 +1820,32 @@ union uvh_lb_bau_misc_control_u {
1348 unsigned long rsvd_29_47:19; 1820 unsigned long rsvd_29_47:19;
1349 unsigned long fun:16; /* RW */ 1821 unsigned long fun:16; /* RW */
1350 } s1; 1822 } s1;
1823 struct uvxh_lb_bau_misc_control_s {
1824 unsigned long rejection_delay:8; /* RW */
1825 unsigned long apic_mode:1; /* RW */
1826 unsigned long force_broadcast:1; /* RW */
1827 unsigned long force_lock_nop:1; /* RW */
1828 unsigned long qpi_agent_presence_vector:3; /* RW */
1829 unsigned long descriptor_fetch_mode:1; /* RW */
1830 unsigned long enable_intd_soft_ack_mode:1; /* RW */
1831 unsigned long intd_soft_ack_timeout_period:4; /* RW */
1832 unsigned long enable_dual_mapping_mode:1; /* RW */
1833 unsigned long vga_io_port_decode_enable:1; /* RW */
1834 unsigned long vga_io_port_16_bit_decode:1; /* RW */
1835 unsigned long suppress_dest_registration:1; /* RW */
1836 unsigned long programmed_initial_priority:3; /* RW */
1837 unsigned long use_incoming_priority:1; /* RW */
1838 unsigned long enable_programmed_initial_priority:1;/* RW */
1839 unsigned long enable_automatic_apic_mode_selection:1;/* RW */
1840 unsigned long apic_mode_status:1; /* RO */
1841 unsigned long suppress_interrupts_to_self:1; /* RW */
1842 unsigned long enable_lock_based_system_flush:1;/* RW */
1843 unsigned long enable_extended_sb_status:1; /* RW */
1844 unsigned long suppress_int_prio_udt_to_self:1;/* RW */
1845 unsigned long use_legacy_descriptor_formats:1;/* RW */
1846 unsigned long rsvd_36_47:12;
1847 unsigned long fun:16; /* RW */
1848 } sx;
1351 struct uv2h_lb_bau_misc_control_s { 1849 struct uv2h_lb_bau_misc_control_s {
1352 unsigned long rejection_delay:8; /* RW */ 1850 unsigned long rejection_delay:8; /* RW */
1353 unsigned long apic_mode:1; /* RW */ 1851 unsigned long apic_mode:1; /* RW */
@@ -1374,13 +1872,42 @@ union uvh_lb_bau_misc_control_u {
1374 unsigned long rsvd_36_47:12; 1872 unsigned long rsvd_36_47:12;
1375 unsigned long fun:16; /* RW */ 1873 unsigned long fun:16; /* RW */
1376 } s2; 1874 } s2;
1875 struct uv3h_lb_bau_misc_control_s {
1876 unsigned long rejection_delay:8; /* RW */
1877 unsigned long apic_mode:1; /* RW */
1878 unsigned long force_broadcast:1; /* RW */
1879 unsigned long force_lock_nop:1; /* RW */
1880 unsigned long qpi_agent_presence_vector:3; /* RW */
1881 unsigned long descriptor_fetch_mode:1; /* RW */
1882 unsigned long enable_intd_soft_ack_mode:1; /* RW */
1883 unsigned long intd_soft_ack_timeout_period:4; /* RW */
1884 unsigned long enable_dual_mapping_mode:1; /* RW */
1885 unsigned long vga_io_port_decode_enable:1; /* RW */
1886 unsigned long vga_io_port_16_bit_decode:1; /* RW */
1887 unsigned long suppress_dest_registration:1; /* RW */
1888 unsigned long programmed_initial_priority:3; /* RW */
1889 unsigned long use_incoming_priority:1; /* RW */
1890 unsigned long enable_programmed_initial_priority:1;/* RW */
1891 unsigned long enable_automatic_apic_mode_selection:1;/* RW */
1892 unsigned long apic_mode_status:1; /* RO */
1893 unsigned long suppress_interrupts_to_self:1; /* RW */
1894 unsigned long enable_lock_based_system_flush:1;/* RW */
1895 unsigned long enable_extended_sb_status:1; /* RW */
1896 unsigned long suppress_int_prio_udt_to_self:1;/* RW */
1897 unsigned long use_legacy_descriptor_formats:1;/* RW */
1898 unsigned long suppress_quiesce_msgs_to_qpi:1; /* RW */
1899 unsigned long enable_intd_prefetch_hint:1; /* RW */
1900 unsigned long thread_kill_timebase:8; /* RW */
1901 unsigned long rsvd_46_47:2;
1902 unsigned long fun:16; /* RW */
1903 } s3;
1377}; 1904};
1378 1905
1379/* ========================================================================= */ 1906/* ========================================================================= */
1380/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */ 1907/* UVH_LB_BAU_SB_ACTIVATION_CONTROL */
1381/* ========================================================================= */ 1908/* ========================================================================= */
1382#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL 1909#define UVH_LB_BAU_SB_ACTIVATION_CONTROL 0x320020UL
1383#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8 1910#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_32 0x9a8
1384 1911
1385#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0 1912#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_INDEX_SHFT 0
1386#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62 1913#define UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT 62
@@ -1402,8 +1929,8 @@ union uvh_lb_bau_sb_activation_control_u {
1402/* ========================================================================= */ 1929/* ========================================================================= */
1403/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */ 1930/* UVH_LB_BAU_SB_ACTIVATION_STATUS_0 */
1404/* ========================================================================= */ 1931/* ========================================================================= */
1405#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL 1932#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0 0x320030UL
1406#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0 1933#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_32 0x9b0
1407 1934
1408#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0 1935#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_SHFT 0
1409#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL 1936#define UVH_LB_BAU_SB_ACTIVATION_STATUS_0_STATUS_MASK 0xffffffffffffffffUL
@@ -1418,8 +1945,8 @@ union uvh_lb_bau_sb_activation_status_0_u {
1418/* ========================================================================= */ 1945/* ========================================================================= */
1419/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */ 1946/* UVH_LB_BAU_SB_ACTIVATION_STATUS_1 */
1420/* ========================================================================= */ 1947/* ========================================================================= */
1421#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL 1948#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1 0x320040UL
1422#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8 1949#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_32 0x9b8
1423 1950
1424#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0 1951#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_SHFT 0
1425#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL 1952#define UVH_LB_BAU_SB_ACTIVATION_STATUS_1_STATUS_MASK 0xffffffffffffffffUL
@@ -1434,8 +1961,8 @@ union uvh_lb_bau_sb_activation_status_1_u {
1434/* ========================================================================= */ 1961/* ========================================================================= */
1435/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */ 1962/* UVH_LB_BAU_SB_DESCRIPTOR_BASE */
1436/* ========================================================================= */ 1963/* ========================================================================= */
1437#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL 1964#define UVH_LB_BAU_SB_DESCRIPTOR_BASE 0x320010UL
1438#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0 1965#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_32 0x9a0
1439 1966
1440#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12 1967#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_PAGE_ADDRESS_SHFT 12
1441#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49 1968#define UVH_LB_BAU_SB_DESCRIPTOR_BASE_NODE_ID_SHFT 49
@@ -1456,7 +1983,10 @@ union uvh_lb_bau_sb_descriptor_base_u {
1456/* ========================================================================= */ 1983/* ========================================================================= */
1457/* UVH_NODE_ID */ 1984/* UVH_NODE_ID */
1458/* ========================================================================= */ 1985/* ========================================================================= */
1459#define UVH_NODE_ID 0x0UL 1986#define UVH_NODE_ID 0x0UL
1987#define UV1H_NODE_ID 0x0UL
1988#define UV2H_NODE_ID 0x0UL
1989#define UV3H_NODE_ID 0x0UL
1460 1990
1461#define UVH_NODE_ID_FORCE1_SHFT 0 1991#define UVH_NODE_ID_FORCE1_SHFT 0
1462#define UVH_NODE_ID_MANUFACTURER_SHFT 1 1992#define UVH_NODE_ID_MANUFACTURER_SHFT 1
@@ -1484,6 +2014,21 @@ union uvh_lb_bau_sb_descriptor_base_u {
1484#define UV1H_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL 2014#define UV1H_NODE_ID_NODES_PER_BIT_MASK 0x007f000000000000UL
1485#define UV1H_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL 2015#define UV1H_NODE_ID_NI_PORT_MASK 0x0f00000000000000UL
1486 2016
2017#define UVXH_NODE_ID_FORCE1_SHFT 0
2018#define UVXH_NODE_ID_MANUFACTURER_SHFT 1
2019#define UVXH_NODE_ID_PART_NUMBER_SHFT 12
2020#define UVXH_NODE_ID_REVISION_SHFT 28
2021#define UVXH_NODE_ID_NODE_ID_SHFT 32
2022#define UVXH_NODE_ID_NODES_PER_BIT_SHFT 50
2023#define UVXH_NODE_ID_NI_PORT_SHFT 57
2024#define UVXH_NODE_ID_FORCE1_MASK 0x0000000000000001UL
2025#define UVXH_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
2026#define UVXH_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
2027#define UVXH_NODE_ID_REVISION_MASK 0x00000000f0000000UL
2028#define UVXH_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
2029#define UVXH_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
2030#define UVXH_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
2031
1487#define UV2H_NODE_ID_FORCE1_SHFT 0 2032#define UV2H_NODE_ID_FORCE1_SHFT 0
1488#define UV2H_NODE_ID_MANUFACTURER_SHFT 1 2033#define UV2H_NODE_ID_MANUFACTURER_SHFT 1
1489#define UV2H_NODE_ID_PART_NUMBER_SHFT 12 2034#define UV2H_NODE_ID_PART_NUMBER_SHFT 12
@@ -1499,6 +2044,25 @@ union uvh_lb_bau_sb_descriptor_base_u {
1499#define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL 2044#define UV2H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
1500#define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL 2045#define UV2H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
1501 2046
2047#define UV3H_NODE_ID_FORCE1_SHFT 0
2048#define UV3H_NODE_ID_MANUFACTURER_SHFT 1
2049#define UV3H_NODE_ID_PART_NUMBER_SHFT 12
2050#define UV3H_NODE_ID_REVISION_SHFT 28
2051#define UV3H_NODE_ID_NODE_ID_SHFT 32
2052#define UV3H_NODE_ID_ROUTER_SELECT_SHFT 48
2053#define UV3H_NODE_ID_RESERVED_2_SHFT 49
2054#define UV3H_NODE_ID_NODES_PER_BIT_SHFT 50
2055#define UV3H_NODE_ID_NI_PORT_SHFT 57
2056#define UV3H_NODE_ID_FORCE1_MASK 0x0000000000000001UL
2057#define UV3H_NODE_ID_MANUFACTURER_MASK 0x0000000000000ffeUL
2058#define UV3H_NODE_ID_PART_NUMBER_MASK 0x000000000ffff000UL
2059#define UV3H_NODE_ID_REVISION_MASK 0x00000000f0000000UL
2060#define UV3H_NODE_ID_NODE_ID_MASK 0x00007fff00000000UL
2061#define UV3H_NODE_ID_ROUTER_SELECT_MASK 0x0001000000000000UL
2062#define UV3H_NODE_ID_RESERVED_2_MASK 0x0002000000000000UL
2063#define UV3H_NODE_ID_NODES_PER_BIT_MASK 0x01fc000000000000UL
2064#define UV3H_NODE_ID_NI_PORT_MASK 0x3e00000000000000UL
2065
1502union uvh_node_id_u { 2066union uvh_node_id_u {
1503 unsigned long v; 2067 unsigned long v;
1504 struct uvh_node_id_s { 2068 struct uvh_node_id_s {
@@ -1521,6 +2085,17 @@ union uvh_node_id_u {
1521 unsigned long ni_port:4; /* RO */ 2085 unsigned long ni_port:4; /* RO */
1522 unsigned long rsvd_60_63:4; 2086 unsigned long rsvd_60_63:4;
1523 } s1; 2087 } s1;
2088 struct uvxh_node_id_s {
2089 unsigned long force1:1; /* RO */
2090 unsigned long manufacturer:11; /* RO */
2091 unsigned long part_number:16; /* RO */
2092 unsigned long revision:4; /* RO */
2093 unsigned long node_id:15; /* RW */
2094 unsigned long rsvd_47_49:3;
2095 unsigned long nodes_per_bit:7; /* RO */
2096 unsigned long ni_port:5; /* RO */
2097 unsigned long rsvd_62_63:2;
2098 } sx;
1524 struct uv2h_node_id_s { 2099 struct uv2h_node_id_s {
1525 unsigned long force1:1; /* RO */ 2100 unsigned long force1:1; /* RO */
1526 unsigned long manufacturer:11; /* RO */ 2101 unsigned long manufacturer:11; /* RO */
@@ -1532,13 +2107,26 @@ union uvh_node_id_u {
1532 unsigned long ni_port:5; /* RO */ 2107 unsigned long ni_port:5; /* RO */
1533 unsigned long rsvd_62_63:2; 2108 unsigned long rsvd_62_63:2;
1534 } s2; 2109 } s2;
2110 struct uv3h_node_id_s {
2111 unsigned long force1:1; /* RO */
2112 unsigned long manufacturer:11; /* RO */
2113 unsigned long part_number:16; /* RO */
2114 unsigned long revision:4; /* RO */
2115 unsigned long node_id:15; /* RW */
2116 unsigned long rsvd_47:1;
2117 unsigned long router_select:1; /* RO */
2118 unsigned long rsvd_49:1;
2119 unsigned long nodes_per_bit:7; /* RO */
2120 unsigned long ni_port:5; /* RO */
2121 unsigned long rsvd_62_63:2;
2122 } s3;
1535}; 2123};
1536 2124
1537/* ========================================================================= */ 2125/* ========================================================================= */
1538/* UVH_NODE_PRESENT_TABLE */ 2126/* UVH_NODE_PRESENT_TABLE */
1539/* ========================================================================= */ 2127/* ========================================================================= */
1540#define UVH_NODE_PRESENT_TABLE 0x1400UL 2128#define UVH_NODE_PRESENT_TABLE 0x1400UL
1541#define UVH_NODE_PRESENT_TABLE_DEPTH 16 2129#define UVH_NODE_PRESENT_TABLE_DEPTH 16
1542 2130
1543#define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0 2131#define UVH_NODE_PRESENT_TABLE_NODES_SHFT 0
1544#define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL 2132#define UVH_NODE_PRESENT_TABLE_NODES_MASK 0xffffffffffffffffUL
@@ -1553,7 +2141,7 @@ union uvh_node_present_table_u {
1553/* ========================================================================= */ 2141/* ========================================================================= */
1554/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */ 2142/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR */
1555/* ========================================================================= */ 2143/* ========================================================================= */
1556#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL 2144#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
1557 2145
1558#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24 2146#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
1559#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48 2147#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
@@ -1577,7 +2165,7 @@ union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
1577/* ========================================================================= */ 2165/* ========================================================================= */
1578/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */ 2166/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR */
1579/* ========================================================================= */ 2167/* ========================================================================= */
1580#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL 2168#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
1581 2169
1582#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24 2170#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
1583#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48 2171#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
@@ -1601,7 +2189,7 @@ union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
1601/* ========================================================================= */ 2189/* ========================================================================= */
1602/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */ 2190/* UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR */
1603/* ========================================================================= */ 2191/* ========================================================================= */
1604#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL 2192#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
1605 2193
1606#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24 2194#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
1607#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48 2195#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
@@ -1625,7 +2213,7 @@ union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
1625/* ========================================================================= */ 2213/* ========================================================================= */
1626/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */ 2214/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR */
1627/* ========================================================================= */ 2215/* ========================================================================= */
1628#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL 2216#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
1629 2217
1630#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24 2218#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT 24
1631#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL 2219#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_MASK 0x00003fffff000000UL
@@ -1642,7 +2230,7 @@ union uvh_rh_gam_alias210_redirect_config_0_mmr_u {
1642/* ========================================================================= */ 2230/* ========================================================================= */
1643/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */ 2231/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR */
1644/* ========================================================================= */ 2232/* ========================================================================= */
1645#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL 2233#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR 0x16000e0UL
1646 2234
1647#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24 2235#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_SHFT 24
1648#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL 2236#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR_DEST_BASE_MASK 0x00003fffff000000UL
@@ -1659,7 +2247,7 @@ union uvh_rh_gam_alias210_redirect_config_1_mmr_u {
1659/* ========================================================================= */ 2247/* ========================================================================= */
1660/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */ 2248/* UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR */
1661/* ========================================================================= */ 2249/* ========================================================================= */
1662#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL 2250#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR 0x16000f0UL
1663 2251
1664#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24 2252#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_SHFT 24
1665#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL 2253#define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR_DEST_BASE_MASK 0x00003fffff000000UL
@@ -1676,7 +2264,10 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
1676/* ========================================================================= */ 2264/* ========================================================================= */
1677/* UVH_RH_GAM_CONFIG_MMR */ 2265/* UVH_RH_GAM_CONFIG_MMR */
1678/* ========================================================================= */ 2266/* ========================================================================= */
1679#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL 2267#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
2268#define UV1H_RH_GAM_CONFIG_MMR 0x1600000UL
2269#define UV2H_RH_GAM_CONFIG_MMR 0x1600000UL
2270#define UV3H_RH_GAM_CONFIG_MMR 0x1600000UL
1680 2271
1681#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0 2272#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
1682#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 2273#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
@@ -1690,11 +2281,21 @@ union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
1690#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL 2281#define UV1H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
1691#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL 2282#define UV1H_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
1692 2283
2284#define UVXH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
2285#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
2286#define UVXH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
2287#define UVXH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
2288
1693#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0 2289#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
1694#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6 2290#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
1695#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL 2291#define UV2H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
1696#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL 2292#define UV2H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
1697 2293
2294#define UV3H_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
2295#define UV3H_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
2296#define UV3H_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
2297#define UV3H_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
2298
1698union uvh_rh_gam_config_mmr_u { 2299union uvh_rh_gam_config_mmr_u {
1699 unsigned long v; 2300 unsigned long v;
1700 struct uvh_rh_gam_config_mmr_s { 2301 struct uvh_rh_gam_config_mmr_s {
@@ -1709,20 +2310,37 @@ union uvh_rh_gam_config_mmr_u {
1709 unsigned long mmiol_cfg:1; /* RW */ 2310 unsigned long mmiol_cfg:1; /* RW */
1710 unsigned long rsvd_13_63:51; 2311 unsigned long rsvd_13_63:51;
1711 } s1; 2312 } s1;
2313 struct uvxh_rh_gam_config_mmr_s {
2314 unsigned long m_skt:6; /* RW */
2315 unsigned long n_skt:4; /* RW */
2316 unsigned long rsvd_10_63:54;
2317 } sx;
1712 struct uv2h_rh_gam_config_mmr_s { 2318 struct uv2h_rh_gam_config_mmr_s {
1713 unsigned long m_skt:6; /* RW */ 2319 unsigned long m_skt:6; /* RW */
1714 unsigned long n_skt:4; /* RW */ 2320 unsigned long n_skt:4; /* RW */
1715 unsigned long rsvd_10_63:54; 2321 unsigned long rsvd_10_63:54;
1716 } s2; 2322 } s2;
2323 struct uv3h_rh_gam_config_mmr_s {
2324 unsigned long m_skt:6; /* RW */
2325 unsigned long n_skt:4; /* RW */
2326 unsigned long rsvd_10_63:54;
2327 } s3;
1717}; 2328};
1718 2329
1719/* ========================================================================= */ 2330/* ========================================================================= */
1720/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */ 2331/* UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR */
1721/* ========================================================================= */ 2332/* ========================================================================= */
1722#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL 2333#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
2334#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
2335#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
2336#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
1723 2337
1724#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 2338#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
2339#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
2340#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1725#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL 2341#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
2342#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
2343#define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1726 2344
1727#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 2345#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
1728#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48 2346#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_GR4_SHFT 48
@@ -1733,6 +2351,13 @@ union uvh_rh_gam_config_mmr_u {
1733#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL 2351#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
1734#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL 2352#define UV1H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1735 2353
2354#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
2355#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
2356#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
2357#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
2358#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
2359#define UVXH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
2360
1736#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28 2361#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
1737#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52 2362#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
1738#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 2363#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
@@ -1740,12 +2365,23 @@ union uvh_rh_gam_config_mmr_u {
1740#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL 2365#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
1741#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL 2366#define UV2H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1742 2367
2368#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT 28
2369#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_SHFT 52
2370#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_SHFT 62
2371#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
2372#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffff0000000UL
2373#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_N_GRU_MASK 0x00f0000000000000UL
2374#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_MODE_MASK 0x4000000000000000UL
2375#define UV3H_RH_GAM_GRU_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
2376
1743union uvh_rh_gam_gru_overlay_config_mmr_u { 2377union uvh_rh_gam_gru_overlay_config_mmr_u {
1744 unsigned long v; 2378 unsigned long v;
1745 struct uvh_rh_gam_gru_overlay_config_mmr_s { 2379 struct uvh_rh_gam_gru_overlay_config_mmr_s {
1746 unsigned long rsvd_0_27:28; 2380 unsigned long rsvd_0_27:28;
1747 unsigned long base:18; /* RW */ 2381 unsigned long base:18; /* RW */
1748 unsigned long rsvd_46_62:17; 2382 unsigned long rsvd_46_51:6;
2383 unsigned long n_gru:4; /* RW */
2384 unsigned long rsvd_56_62:7;
1749 unsigned long enable:1; /* RW */ 2385 unsigned long enable:1; /* RW */
1750 } s; 2386 } s;
1751 struct uv1h_rh_gam_gru_overlay_config_mmr_s { 2387 struct uv1h_rh_gam_gru_overlay_config_mmr_s {
@@ -1758,6 +2394,14 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
1758 unsigned long rsvd_56_62:7; 2394 unsigned long rsvd_56_62:7;
1759 unsigned long enable:1; /* RW */ 2395 unsigned long enable:1; /* RW */
1760 } s1; 2396 } s1;
2397 struct uvxh_rh_gam_gru_overlay_config_mmr_s {
2398 unsigned long rsvd_0_27:28;
2399 unsigned long base:18; /* RW */
2400 unsigned long rsvd_46_51:6;
2401 unsigned long n_gru:4; /* RW */
2402 unsigned long rsvd_56_62:7;
2403 unsigned long enable:1; /* RW */
2404 } sx;
1761 struct uv2h_rh_gam_gru_overlay_config_mmr_s { 2405 struct uv2h_rh_gam_gru_overlay_config_mmr_s {
1762 unsigned long rsvd_0_27:28; 2406 unsigned long rsvd_0_27:28;
1763 unsigned long base:18; /* RW */ 2407 unsigned long base:18; /* RW */
@@ -1766,12 +2410,22 @@ union uvh_rh_gam_gru_overlay_config_mmr_u {
1766 unsigned long rsvd_56_62:7; 2410 unsigned long rsvd_56_62:7;
1767 unsigned long enable:1; /* RW */ 2411 unsigned long enable:1; /* RW */
1768 } s2; 2412 } s2;
2413 struct uv3h_rh_gam_gru_overlay_config_mmr_s {
2414 unsigned long rsvd_0_27:28;
2415 unsigned long base:18; /* RW */
2416 unsigned long rsvd_46_51:6;
2417 unsigned long n_gru:4; /* RW */
2418 unsigned long rsvd_56_61:6;
2419 unsigned long mode:1; /* RW */
2420 unsigned long enable:1; /* RW */
2421 } s3;
1769}; 2422};
1770 2423
1771/* ========================================================================= */ 2424/* ========================================================================= */
1772/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */ 2425/* UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR */
1773/* ========================================================================= */ 2426/* ========================================================================= */
1774#define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL 2427#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
2428#define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR 0x1600030UL
1775 2429
1776#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30 2430#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT 30
1777#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46 2431#define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_M_IO_SHFT 46
@@ -1814,10 +2468,15 @@ union uvh_rh_gam_mmioh_overlay_config_mmr_u {
1814/* ========================================================================= */ 2468/* ========================================================================= */
1815/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */ 2469/* UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR */
1816/* ========================================================================= */ 2470/* ========================================================================= */
1817#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL 2471#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
2472#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
2473#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
2474#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR 0x1600028UL
1818 2475
1819#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 2476#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
2477#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1820#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL 2478#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
2479#define UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1821 2480
1822#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 2481#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
1823#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46 2482#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_SHFT 46
@@ -1826,11 +2485,21 @@ union uvh_rh_gam_mmioh_overlay_config_mmr_u {
1826#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL 2485#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_DUAL_HUB_MASK 0x0000400000000000UL
1827#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL 2486#define UV1H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1828 2487
2488#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
2489#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
2490#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
2491#define UVXH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
2492
1829#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26 2493#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
1830#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63 2494#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
1831#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL 2495#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
1832#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL 2496#define UV2H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
1833 2497
2498#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT 26
2499#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_SHFT 63
2500#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_MASK 0x00003ffffc000000UL
2501#define UV3H_RH_GAM_MMR_OVERLAY_CONFIG_MMR_ENABLE_MASK 0x8000000000000000UL
2502
1834union uvh_rh_gam_mmr_overlay_config_mmr_u { 2503union uvh_rh_gam_mmr_overlay_config_mmr_u {
1835 unsigned long v; 2504 unsigned long v;
1836 struct uvh_rh_gam_mmr_overlay_config_mmr_s { 2505 struct uvh_rh_gam_mmr_overlay_config_mmr_s {
@@ -1846,18 +2515,30 @@ union uvh_rh_gam_mmr_overlay_config_mmr_u {
1846 unsigned long rsvd_47_62:16; 2515 unsigned long rsvd_47_62:16;
1847 unsigned long enable:1; /* RW */ 2516 unsigned long enable:1; /* RW */
1848 } s1; 2517 } s1;
2518 struct uvxh_rh_gam_mmr_overlay_config_mmr_s {
2519 unsigned long rsvd_0_25:26;
2520 unsigned long base:20; /* RW */
2521 unsigned long rsvd_46_62:17;
2522 unsigned long enable:1; /* RW */
2523 } sx;
1849 struct uv2h_rh_gam_mmr_overlay_config_mmr_s { 2524 struct uv2h_rh_gam_mmr_overlay_config_mmr_s {
1850 unsigned long rsvd_0_25:26; 2525 unsigned long rsvd_0_25:26;
1851 unsigned long base:20; /* RW */ 2526 unsigned long base:20; /* RW */
1852 unsigned long rsvd_46_62:17; 2527 unsigned long rsvd_46_62:17;
1853 unsigned long enable:1; /* RW */ 2528 unsigned long enable:1; /* RW */
1854 } s2; 2529 } s2;
2530 struct uv3h_rh_gam_mmr_overlay_config_mmr_s {
2531 unsigned long rsvd_0_25:26;
2532 unsigned long base:20; /* RW */
2533 unsigned long rsvd_46_62:17;
2534 unsigned long enable:1; /* RW */
2535 } s3;
1855}; 2536};
1856 2537
1857/* ========================================================================= */ 2538/* ========================================================================= */
1858/* UVH_RTC */ 2539/* UVH_RTC */
1859/* ========================================================================= */ 2540/* ========================================================================= */
1860#define UVH_RTC 0x340000UL 2541#define UVH_RTC 0x340000UL
1861 2542
1862#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0 2543#define UVH_RTC_REAL_TIME_CLOCK_SHFT 0
1863#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL 2544#define UVH_RTC_REAL_TIME_CLOCK_MASK 0x00ffffffffffffffUL
@@ -1873,7 +2554,7 @@ union uvh_rtc_u {
1873/* ========================================================================= */ 2554/* ========================================================================= */
1874/* UVH_RTC1_INT_CONFIG */ 2555/* UVH_RTC1_INT_CONFIG */
1875/* ========================================================================= */ 2556/* ========================================================================= */
1876#define UVH_RTC1_INT_CONFIG 0x615c0UL 2557#define UVH_RTC1_INT_CONFIG 0x615c0UL
1877 2558
1878#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0 2559#define UVH_RTC1_INT_CONFIG_VECTOR_SHFT 0
1879#define UVH_RTC1_INT_CONFIG_DM_SHFT 8 2560#define UVH_RTC1_INT_CONFIG_DM_SHFT 8
@@ -1911,8 +2592,8 @@ union uvh_rtc1_int_config_u {
1911/* ========================================================================= */ 2592/* ========================================================================= */
1912/* UVH_SCRATCH5 */ 2593/* UVH_SCRATCH5 */
1913/* ========================================================================= */ 2594/* ========================================================================= */
1914#define UVH_SCRATCH5 0x2d0200UL 2595#define UVH_SCRATCH5 0x2d0200UL
1915#define UVH_SCRATCH5_32 0x778 2596#define UVH_SCRATCH5_32 0x778
1916 2597
1917#define UVH_SCRATCH5_SCRATCH5_SHFT 0 2598#define UVH_SCRATCH5_SCRATCH5_SHFT 0
1918#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL 2599#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
@@ -1925,79 +2606,79 @@ union uvh_scratch5_u {
1925}; 2606};
1926 2607
1927/* ========================================================================= */ 2608/* ========================================================================= */
1928/* UV2H_EVENT_OCCURRED2 */ 2609/* UVXH_EVENT_OCCURRED2 */
1929/* ========================================================================= */ 2610/* ========================================================================= */
1930#define UV2H_EVENT_OCCURRED2 0x70100UL 2611#define UVXH_EVENT_OCCURRED2 0x70100UL
1931#define UV2H_EVENT_OCCURRED2_32 0xb68 2612#define UVXH_EVENT_OCCURRED2_32 0xb68
1932 2613
1933#define UV2H_EVENT_OCCURRED2_RTC_0_SHFT 0 2614#define UVXH_EVENT_OCCURRED2_RTC_0_SHFT 0
1934#define UV2H_EVENT_OCCURRED2_RTC_1_SHFT 1 2615#define UVXH_EVENT_OCCURRED2_RTC_1_SHFT 1
1935#define UV2H_EVENT_OCCURRED2_RTC_2_SHFT 2 2616#define UVXH_EVENT_OCCURRED2_RTC_2_SHFT 2
1936#define UV2H_EVENT_OCCURRED2_RTC_3_SHFT 3 2617#define UVXH_EVENT_OCCURRED2_RTC_3_SHFT 3
1937#define UV2H_EVENT_OCCURRED2_RTC_4_SHFT 4 2618#define UVXH_EVENT_OCCURRED2_RTC_4_SHFT 4
1938#define UV2H_EVENT_OCCURRED2_RTC_5_SHFT 5 2619#define UVXH_EVENT_OCCURRED2_RTC_5_SHFT 5
1939#define UV2H_EVENT_OCCURRED2_RTC_6_SHFT 6 2620#define UVXH_EVENT_OCCURRED2_RTC_6_SHFT 6
1940#define UV2H_EVENT_OCCURRED2_RTC_7_SHFT 7 2621#define UVXH_EVENT_OCCURRED2_RTC_7_SHFT 7
1941#define UV2H_EVENT_OCCURRED2_RTC_8_SHFT 8 2622#define UVXH_EVENT_OCCURRED2_RTC_8_SHFT 8
1942#define UV2H_EVENT_OCCURRED2_RTC_9_SHFT 9 2623#define UVXH_EVENT_OCCURRED2_RTC_9_SHFT 9
1943#define UV2H_EVENT_OCCURRED2_RTC_10_SHFT 10 2624#define UVXH_EVENT_OCCURRED2_RTC_10_SHFT 10
1944#define UV2H_EVENT_OCCURRED2_RTC_11_SHFT 11 2625#define UVXH_EVENT_OCCURRED2_RTC_11_SHFT 11
1945#define UV2H_EVENT_OCCURRED2_RTC_12_SHFT 12 2626#define UVXH_EVENT_OCCURRED2_RTC_12_SHFT 12
1946#define UV2H_EVENT_OCCURRED2_RTC_13_SHFT 13 2627#define UVXH_EVENT_OCCURRED2_RTC_13_SHFT 13
1947#define UV2H_EVENT_OCCURRED2_RTC_14_SHFT 14 2628#define UVXH_EVENT_OCCURRED2_RTC_14_SHFT 14
1948#define UV2H_EVENT_OCCURRED2_RTC_15_SHFT 15 2629#define UVXH_EVENT_OCCURRED2_RTC_15_SHFT 15
1949#define UV2H_EVENT_OCCURRED2_RTC_16_SHFT 16 2630#define UVXH_EVENT_OCCURRED2_RTC_16_SHFT 16
1950#define UV2H_EVENT_OCCURRED2_RTC_17_SHFT 17 2631#define UVXH_EVENT_OCCURRED2_RTC_17_SHFT 17
1951#define UV2H_EVENT_OCCURRED2_RTC_18_SHFT 18 2632#define UVXH_EVENT_OCCURRED2_RTC_18_SHFT 18
1952#define UV2H_EVENT_OCCURRED2_RTC_19_SHFT 19 2633#define UVXH_EVENT_OCCURRED2_RTC_19_SHFT 19
1953#define UV2H_EVENT_OCCURRED2_RTC_20_SHFT 20 2634#define UVXH_EVENT_OCCURRED2_RTC_20_SHFT 20
1954#define UV2H_EVENT_OCCURRED2_RTC_21_SHFT 21 2635#define UVXH_EVENT_OCCURRED2_RTC_21_SHFT 21
1955#define UV2H_EVENT_OCCURRED2_RTC_22_SHFT 22 2636#define UVXH_EVENT_OCCURRED2_RTC_22_SHFT 22
1956#define UV2H_EVENT_OCCURRED2_RTC_23_SHFT 23 2637#define UVXH_EVENT_OCCURRED2_RTC_23_SHFT 23
1957#define UV2H_EVENT_OCCURRED2_RTC_24_SHFT 24 2638#define UVXH_EVENT_OCCURRED2_RTC_24_SHFT 24
1958#define UV2H_EVENT_OCCURRED2_RTC_25_SHFT 25 2639#define UVXH_EVENT_OCCURRED2_RTC_25_SHFT 25
1959#define UV2H_EVENT_OCCURRED2_RTC_26_SHFT 26 2640#define UVXH_EVENT_OCCURRED2_RTC_26_SHFT 26
1960#define UV2H_EVENT_OCCURRED2_RTC_27_SHFT 27 2641#define UVXH_EVENT_OCCURRED2_RTC_27_SHFT 27
1961#define UV2H_EVENT_OCCURRED2_RTC_28_SHFT 28 2642#define UVXH_EVENT_OCCURRED2_RTC_28_SHFT 28
1962#define UV2H_EVENT_OCCURRED2_RTC_29_SHFT 29 2643#define UVXH_EVENT_OCCURRED2_RTC_29_SHFT 29
1963#define UV2H_EVENT_OCCURRED2_RTC_30_SHFT 30 2644#define UVXH_EVENT_OCCURRED2_RTC_30_SHFT 30
1964#define UV2H_EVENT_OCCURRED2_RTC_31_SHFT 31 2645#define UVXH_EVENT_OCCURRED2_RTC_31_SHFT 31
1965#define UV2H_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL 2646#define UVXH_EVENT_OCCURRED2_RTC_0_MASK 0x0000000000000001UL
1966#define UV2H_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL 2647#define UVXH_EVENT_OCCURRED2_RTC_1_MASK 0x0000000000000002UL
1967#define UV2H_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL 2648#define UVXH_EVENT_OCCURRED2_RTC_2_MASK 0x0000000000000004UL
1968#define UV2H_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL 2649#define UVXH_EVENT_OCCURRED2_RTC_3_MASK 0x0000000000000008UL
1969#define UV2H_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL 2650#define UVXH_EVENT_OCCURRED2_RTC_4_MASK 0x0000000000000010UL
1970#define UV2H_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL 2651#define UVXH_EVENT_OCCURRED2_RTC_5_MASK 0x0000000000000020UL
1971#define UV2H_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL 2652#define UVXH_EVENT_OCCURRED2_RTC_6_MASK 0x0000000000000040UL
1972#define UV2H_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL 2653#define UVXH_EVENT_OCCURRED2_RTC_7_MASK 0x0000000000000080UL
1973#define UV2H_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL 2654#define UVXH_EVENT_OCCURRED2_RTC_8_MASK 0x0000000000000100UL
1974#define UV2H_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL 2655#define UVXH_EVENT_OCCURRED2_RTC_9_MASK 0x0000000000000200UL
1975#define UV2H_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL 2656#define UVXH_EVENT_OCCURRED2_RTC_10_MASK 0x0000000000000400UL
1976#define UV2H_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL 2657#define UVXH_EVENT_OCCURRED2_RTC_11_MASK 0x0000000000000800UL
1977#define UV2H_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL 2658#define UVXH_EVENT_OCCURRED2_RTC_12_MASK 0x0000000000001000UL
1978#define UV2H_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL 2659#define UVXH_EVENT_OCCURRED2_RTC_13_MASK 0x0000000000002000UL
1979#define UV2H_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL 2660#define UVXH_EVENT_OCCURRED2_RTC_14_MASK 0x0000000000004000UL
1980#define UV2H_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL 2661#define UVXH_EVENT_OCCURRED2_RTC_15_MASK 0x0000000000008000UL
1981#define UV2H_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL 2662#define UVXH_EVENT_OCCURRED2_RTC_16_MASK 0x0000000000010000UL
1982#define UV2H_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL 2663#define UVXH_EVENT_OCCURRED2_RTC_17_MASK 0x0000000000020000UL
1983#define UV2H_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL 2664#define UVXH_EVENT_OCCURRED2_RTC_18_MASK 0x0000000000040000UL
1984#define UV2H_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL 2665#define UVXH_EVENT_OCCURRED2_RTC_19_MASK 0x0000000000080000UL
1985#define UV2H_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL 2666#define UVXH_EVENT_OCCURRED2_RTC_20_MASK 0x0000000000100000UL
1986#define UV2H_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL 2667#define UVXH_EVENT_OCCURRED2_RTC_21_MASK 0x0000000000200000UL
1987#define UV2H_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL 2668#define UVXH_EVENT_OCCURRED2_RTC_22_MASK 0x0000000000400000UL
1988#define UV2H_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL 2669#define UVXH_EVENT_OCCURRED2_RTC_23_MASK 0x0000000000800000UL
1989#define UV2H_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL 2670#define UVXH_EVENT_OCCURRED2_RTC_24_MASK 0x0000000001000000UL
1990#define UV2H_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL 2671#define UVXH_EVENT_OCCURRED2_RTC_25_MASK 0x0000000002000000UL
1991#define UV2H_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL 2672#define UVXH_EVENT_OCCURRED2_RTC_26_MASK 0x0000000004000000UL
1992#define UV2H_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL 2673#define UVXH_EVENT_OCCURRED2_RTC_27_MASK 0x0000000008000000UL
1993#define UV2H_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL 2674#define UVXH_EVENT_OCCURRED2_RTC_28_MASK 0x0000000010000000UL
1994#define UV2H_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL 2675#define UVXH_EVENT_OCCURRED2_RTC_29_MASK 0x0000000020000000UL
1995#define UV2H_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL 2676#define UVXH_EVENT_OCCURRED2_RTC_30_MASK 0x0000000040000000UL
1996#define UV2H_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL 2677#define UVXH_EVENT_OCCURRED2_RTC_31_MASK 0x0000000080000000UL
1997 2678
1998union uv2h_event_occurred2_u { 2679union uvxh_event_occurred2_u {
1999 unsigned long v; 2680 unsigned long v;
2000 struct uv2h_event_occurred2_s { 2681 struct uvxh_event_occurred2_s {
2001 unsigned long rtc_0:1; /* RW */ 2682 unsigned long rtc_0:1; /* RW */
2002 unsigned long rtc_1:1; /* RW */ 2683 unsigned long rtc_1:1; /* RW */
2003 unsigned long rtc_2:1; /* RW */ 2684 unsigned long rtc_2:1; /* RW */
@@ -2031,29 +2712,46 @@ union uv2h_event_occurred2_u {
2031 unsigned long rtc_30:1; /* RW */ 2712 unsigned long rtc_30:1; /* RW */
2032 unsigned long rtc_31:1; /* RW */ 2713 unsigned long rtc_31:1; /* RW */
2033 unsigned long rsvd_32_63:32; 2714 unsigned long rsvd_32_63:32;
2034 } s1; 2715 } sx;
2035}; 2716};
2036 2717
2037/* ========================================================================= */ 2718/* ========================================================================= */
2038/* UV2H_EVENT_OCCURRED2_ALIAS */ 2719/* UVXH_EVENT_OCCURRED2_ALIAS */
2039/* ========================================================================= */ 2720/* ========================================================================= */
2040#define UV2H_EVENT_OCCURRED2_ALIAS 0x70108UL 2721#define UVXH_EVENT_OCCURRED2_ALIAS 0x70108UL
2041#define UV2H_EVENT_OCCURRED2_ALIAS_32 0xb70 2722#define UVXH_EVENT_OCCURRED2_ALIAS_32 0xb70
2723
2042 2724
2043/* ========================================================================= */ 2725/* ========================================================================= */
2044/* UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 */ 2726/* UVXH_LB_BAU_SB_ACTIVATION_STATUS_2 */
2045/* ========================================================================= */ 2727/* ========================================================================= */
2046#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL 2728#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
2047#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0 2729#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
2730#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2 0x320130UL
2731#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x9f0
2732#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x320130UL
2733#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_32 0x320130UL
2734
2735#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
2736#define UVXH_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
2048 2737
2049#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0 2738#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
2050#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL 2739#define UV2H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
2051 2740
2052union uv2h_lb_bau_sb_activation_status_2_u { 2741#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_SHFT 0
2742#define UV3H_LB_BAU_SB_ACTIVATION_STATUS_2_AUX_ERROR_MASK 0xffffffffffffffffUL
2743
2744union uvxh_lb_bau_sb_activation_status_2_u {
2053 unsigned long v; 2745 unsigned long v;
2746 struct uvxh_lb_bau_sb_activation_status_2_s {
2747 unsigned long aux_error:64; /* RW */
2748 } sx;
2054 struct uv2h_lb_bau_sb_activation_status_2_s { 2749 struct uv2h_lb_bau_sb_activation_status_2_s {
2055 unsigned long aux_error:64; /* RW */ 2750 unsigned long aux_error:64; /* RW */
2056 } s1; 2751 } s2;
2752 struct uv3h_lb_bau_sb_activation_status_2_s {
2753 unsigned long aux_error:64; /* RW */
2754 } s3;
2057}; 2755};
2058 2756
2059/* ========================================================================= */ 2757/* ========================================================================= */
@@ -2073,5 +2771,87 @@ union uv1h_lb_target_physical_apic_id_mask_u {
2073 } s1; 2771 } s1;
2074}; 2772};
2075 2773
2774/* ========================================================================= */
2775/* UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR */
2776/* ========================================================================= */
2777#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR 0x1603000UL
2778
2779#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT 26
2780#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_SHFT 46
2781#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_SHFT 63
2782#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_MASK 0x00003ffffc000000UL
2783#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_M_IO_MASK 0x000fc00000000000UL
2784#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_ENABLE_MASK 0x8000000000000000UL
2785
2786union uv3h_rh_gam_mmioh_overlay_config0_mmr_u {
2787 unsigned long v;
2788 struct uv3h_rh_gam_mmioh_overlay_config0_mmr_s {
2789 unsigned long rsvd_0_25:26;
2790 unsigned long base:20; /* RW */
2791 unsigned long m_io:6; /* RW */
2792 unsigned long n_io:4;
2793 unsigned long rsvd_56_62:7;
2794 unsigned long enable:1; /* RW */
2795 } s3;
2796};
2797
2798/* ========================================================================= */
2799/* UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR */
2800/* ========================================================================= */
2801#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x1604000UL
2802
2803#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_SHFT 26
2804#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_SHFT 46
2805#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_SHFT 63
2806#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_BASE_MASK 0x00003ffffc000000UL
2807#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_M_IO_MASK 0x000fc00000000000UL
2808#define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR_ENABLE_MASK 0x8000000000000000UL
2809
2810union uv3h_rh_gam_mmioh_overlay_config1_mmr_u {
2811 unsigned long v;
2812 struct uv3h_rh_gam_mmioh_overlay_config1_mmr_s {
2813 unsigned long rsvd_0_25:26;
2814 unsigned long base:20; /* RW */
2815 unsigned long m_io:6; /* RW */
2816 unsigned long n_io:4;
2817 unsigned long rsvd_56_62:7;
2818 unsigned long enable:1; /* RW */
2819 } s3;
2820};
2821
2822/* ========================================================================= */
2823/* UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR */
2824/* ========================================================================= */
2825#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR 0x1603800UL
2826#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH 128
2827
2828#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_SHFT 0
2829#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_NASID_MASK 0x0000000000007fffUL
2830
2831union uv3h_rh_gam_mmioh_redirect_config0_mmr_u {
2832 unsigned long v;
2833 struct uv3h_rh_gam_mmioh_redirect_config0_mmr_s {
2834 unsigned long nasid:15; /* RW */
2835 unsigned long rsvd_15_63:49;
2836 } s3;
2837};
2838
2839/* ========================================================================= */
2840/* UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR */
2841/* ========================================================================= */
2842#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR 0x1604800UL
2843#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_DEPTH 128
2844
2845#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_SHFT 0
2846#define UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR_NASID_MASK 0x0000000000007fffUL
2847
2848union uv3h_rh_gam_mmioh_redirect_config1_mmr_u {
2849 unsigned long v;
2850 struct uv3h_rh_gam_mmioh_redirect_config1_mmr_s {
2851 unsigned long nasid:15; /* RW */
2852 unsigned long rsvd_15_63:49;
2853 } s3;
2854};
2855
2076 2856
2077#endif /* _ASM_X86_UV_UV_MMRS_H */ 2857#endif /* _ASM_X86_UV_UV_MMRS_H */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 3b2ce8fc995a..d8d99222b36a 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -169,19 +169,38 @@ struct x86_platform_ops {
169}; 169};
170 170
171struct pci_dev; 171struct pci_dev;
172struct msi_msg;
172 173
173struct x86_msi_ops { 174struct x86_msi_ops {
174 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); 175 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
176 void (*compose_msi_msg)(struct pci_dev *dev, unsigned int irq,
177 unsigned int dest, struct msi_msg *msg,
178 u8 hpet_id);
175 void (*teardown_msi_irq)(unsigned int irq); 179 void (*teardown_msi_irq)(unsigned int irq);
176 void (*teardown_msi_irqs)(struct pci_dev *dev); 180 void (*teardown_msi_irqs)(struct pci_dev *dev);
177 void (*restore_msi_irqs)(struct pci_dev *dev, int irq); 181 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
182 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
178}; 183};
179 184
185struct IO_APIC_route_entry;
186struct io_apic_irq_attr;
187struct irq_data;
188struct cpumask;
189
180struct x86_io_apic_ops { 190struct x86_io_apic_ops {
181 void (*init) (void); 191 void (*init) (void);
182 unsigned int (*read) (unsigned int apic, unsigned int reg); 192 unsigned int (*read) (unsigned int apic, unsigned int reg);
183 void (*write) (unsigned int apic, unsigned int reg, unsigned int value); 193 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
184 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value); 194 void (*modify) (unsigned int apic, unsigned int reg, unsigned int value);
195 void (*disable)(void);
196 void (*print_entries)(unsigned int apic, unsigned int nr_entries);
197 int (*set_affinity)(struct irq_data *data,
198 const struct cpumask *mask,
199 bool force);
200 int (*setup_entry)(int irq, struct IO_APIC_route_entry *entry,
201 unsigned int destination, int vector,
202 struct io_apic_irq_attr *attr);
203 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
185}; 204};
186 205
187extern struct x86_init_ops x86_init; 206extern struct x86_init_ops x86_init;
diff --git a/arch/x86/include/asm/xor.h b/arch/x86/include/asm/xor.h
index f8fde90bc45e..d8829751b3f8 100644
--- a/arch/x86/include/asm/xor.h
+++ b/arch/x86/include/asm/xor.h
@@ -1,10 +1,499 @@
1#ifdef CONFIG_KMEMCHECK 1#ifdef CONFIG_KMEMCHECK
2/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */ 2/* kmemcheck doesn't handle MMX/SSE/SSE2 instructions */
3# include <asm-generic/xor.h> 3# include <asm-generic/xor.h>
4#elif !defined(_ASM_X86_XOR_H)
5#define _ASM_X86_XOR_H
6
7/*
8 * Optimized RAID-5 checksumming functions for SSE.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * You should have received a copy of the GNU General Public License
16 * (for example /usr/src/linux/COPYING); if not, write to the Free
17 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20/*
21 * Cache avoiding checksumming functions utilizing KNI instructions
22 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
23 */
24
25/*
26 * Based on
27 * High-speed RAID5 checksumming functions utilizing SSE instructions.
28 * Copyright (C) 1998 Ingo Molnar.
29 */
30
31/*
32 * x86-64 changes / gcc fixes from Andi Kleen.
33 * Copyright 2002 Andi Kleen, SuSE Labs.
34 *
35 * This hasn't been optimized for the hammer yet, but there are likely
36 * no advantages to be gotten from x86-64 here anyways.
37 */
38
39#include <asm/i387.h>
40
41#ifdef CONFIG_X86_32
42/* reduce register pressure */
43# define XOR_CONSTANT_CONSTRAINT "i"
4#else 44#else
45# define XOR_CONSTANT_CONSTRAINT "re"
46#endif
47
48#define OFFS(x) "16*("#x")"
49#define PF_OFFS(x) "256+16*("#x")"
50#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
51#define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
52#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
53#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
54#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
55#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
56#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
57#define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
58#define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
59#define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
60#define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
61#define NOP(x)
62
63#define BLK64(pf, op, i) \
64 pf(i) \
65 op(i, 0) \
66 op(i + 1, 1) \
67 op(i + 2, 2) \
68 op(i + 3, 3)
69
70static void
71xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
72{
73 unsigned long lines = bytes >> 8;
74
75 kernel_fpu_begin();
76
77 asm volatile(
78#undef BLOCK
79#define BLOCK(i) \
80 LD(i, 0) \
81 LD(i + 1, 1) \
82 PF1(i) \
83 PF1(i + 2) \
84 LD(i + 2, 2) \
85 LD(i + 3, 3) \
86 PF0(i + 4) \
87 PF0(i + 6) \
88 XO1(i, 0) \
89 XO1(i + 1, 1) \
90 XO1(i + 2, 2) \
91 XO1(i + 3, 3) \
92 ST(i, 0) \
93 ST(i + 1, 1) \
94 ST(i + 2, 2) \
95 ST(i + 3, 3) \
96
97
98 PF0(0)
99 PF0(2)
100
101 " .align 32 ;\n"
102 " 1: ;\n"
103
104 BLOCK(0)
105 BLOCK(4)
106 BLOCK(8)
107 BLOCK(12)
108
109 " add %[inc], %[p1] ;\n"
110 " add %[inc], %[p2] ;\n"
111 " dec %[cnt] ;\n"
112 " jnz 1b ;\n"
113 : [cnt] "+r" (lines),
114 [p1] "+r" (p1), [p2] "+r" (p2)
115 : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
116 : "memory");
117
118 kernel_fpu_end();
119}
120
121static void
122xor_sse_2_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2)
123{
124 unsigned long lines = bytes >> 8;
125
126 kernel_fpu_begin();
127
128 asm volatile(
129#undef BLOCK
130#define BLOCK(i) \
131 BLK64(PF0, LD, i) \
132 BLK64(PF1, XO1, i) \
133 BLK64(NOP, ST, i) \
134
135 " .align 32 ;\n"
136 " 1: ;\n"
137
138 BLOCK(0)
139 BLOCK(4)
140 BLOCK(8)
141 BLOCK(12)
142
143 " add %[inc], %[p1] ;\n"
144 " add %[inc], %[p2] ;\n"
145 " dec %[cnt] ;\n"
146 " jnz 1b ;\n"
147 : [cnt] "+r" (lines),
148 [p1] "+r" (p1), [p2] "+r" (p2)
149 : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
150 : "memory");
151
152 kernel_fpu_end();
153}
154
155static void
156xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
157 unsigned long *p3)
158{
159 unsigned long lines = bytes >> 8;
160
161 kernel_fpu_begin();
162
163 asm volatile(
164#undef BLOCK
165#define BLOCK(i) \
166 PF1(i) \
167 PF1(i + 2) \
168 LD(i, 0) \
169 LD(i + 1, 1) \
170 LD(i + 2, 2) \
171 LD(i + 3, 3) \
172 PF2(i) \
173 PF2(i + 2) \
174 PF0(i + 4) \
175 PF0(i + 6) \
176 XO1(i, 0) \
177 XO1(i + 1, 1) \
178 XO1(i + 2, 2) \
179 XO1(i + 3, 3) \
180 XO2(i, 0) \
181 XO2(i + 1, 1) \
182 XO2(i + 2, 2) \
183 XO2(i + 3, 3) \
184 ST(i, 0) \
185 ST(i + 1, 1) \
186 ST(i + 2, 2) \
187 ST(i + 3, 3) \
188
189
190 PF0(0)
191 PF0(2)
192
193 " .align 32 ;\n"
194 " 1: ;\n"
195
196 BLOCK(0)
197 BLOCK(4)
198 BLOCK(8)
199 BLOCK(12)
200
201 " add %[inc], %[p1] ;\n"
202 " add %[inc], %[p2] ;\n"
203 " add %[inc], %[p3] ;\n"
204 " dec %[cnt] ;\n"
205 " jnz 1b ;\n"
206 : [cnt] "+r" (lines),
207 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
208 : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
209 : "memory");
210
211 kernel_fpu_end();
212}
213
214static void
215xor_sse_3_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
216 unsigned long *p3)
217{
218 unsigned long lines = bytes >> 8;
219
220 kernel_fpu_begin();
221
222 asm volatile(
223#undef BLOCK
224#define BLOCK(i) \
225 BLK64(PF0, LD, i) \
226 BLK64(PF1, XO1, i) \
227 BLK64(PF2, XO2, i) \
228 BLK64(NOP, ST, i) \
229
230 " .align 32 ;\n"
231 " 1: ;\n"
232
233 BLOCK(0)
234 BLOCK(4)
235 BLOCK(8)
236 BLOCK(12)
237
238 " add %[inc], %[p1] ;\n"
239 " add %[inc], %[p2] ;\n"
240 " add %[inc], %[p3] ;\n"
241 " dec %[cnt] ;\n"
242 " jnz 1b ;\n"
243 : [cnt] "+r" (lines),
244 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
245 : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
246 : "memory");
247
248 kernel_fpu_end();
249}
250
251static void
252xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
253 unsigned long *p3, unsigned long *p4)
254{
255 unsigned long lines = bytes >> 8;
256
257 kernel_fpu_begin();
258
259 asm volatile(
260#undef BLOCK
261#define BLOCK(i) \
262 PF1(i) \
263 PF1(i + 2) \
264 LD(i, 0) \
265 LD(i + 1, 1) \
266 LD(i + 2, 2) \
267 LD(i + 3, 3) \
268 PF2(i) \
269 PF2(i + 2) \
270 XO1(i, 0) \
271 XO1(i + 1, 1) \
272 XO1(i + 2, 2) \
273 XO1(i + 3, 3) \
274 PF3(i) \
275 PF3(i + 2) \
276 PF0(i + 4) \
277 PF0(i + 6) \
278 XO2(i, 0) \
279 XO2(i + 1, 1) \
280 XO2(i + 2, 2) \
281 XO2(i + 3, 3) \
282 XO3(i, 0) \
283 XO3(i + 1, 1) \
284 XO3(i + 2, 2) \
285 XO3(i + 3, 3) \
286 ST(i, 0) \
287 ST(i + 1, 1) \
288 ST(i + 2, 2) \
289 ST(i + 3, 3) \
290
291
292 PF0(0)
293 PF0(2)
294
295 " .align 32 ;\n"
296 " 1: ;\n"
297
298 BLOCK(0)
299 BLOCK(4)
300 BLOCK(8)
301 BLOCK(12)
302
303 " add %[inc], %[p1] ;\n"
304 " add %[inc], %[p2] ;\n"
305 " add %[inc], %[p3] ;\n"
306 " add %[inc], %[p4] ;\n"
307 " dec %[cnt] ;\n"
308 " jnz 1b ;\n"
309 : [cnt] "+r" (lines), [p1] "+r" (p1),
310 [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
311 : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
312 : "memory");
313
314 kernel_fpu_end();
315}
316
317static void
318xor_sse_4_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
319 unsigned long *p3, unsigned long *p4)
320{
321 unsigned long lines = bytes >> 8;
322
323 kernel_fpu_begin();
324
325 asm volatile(
326#undef BLOCK
327#define BLOCK(i) \
328 BLK64(PF0, LD, i) \
329 BLK64(PF1, XO1, i) \
330 BLK64(PF2, XO2, i) \
331 BLK64(PF3, XO3, i) \
332 BLK64(NOP, ST, i) \
333
334 " .align 32 ;\n"
335 " 1: ;\n"
336
337 BLOCK(0)
338 BLOCK(4)
339 BLOCK(8)
340 BLOCK(12)
341
342 " add %[inc], %[p1] ;\n"
343 " add %[inc], %[p2] ;\n"
344 " add %[inc], %[p3] ;\n"
345 " add %[inc], %[p4] ;\n"
346 " dec %[cnt] ;\n"
347 " jnz 1b ;\n"
348 : [cnt] "+r" (lines), [p1] "+r" (p1),
349 [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
350 : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
351 : "memory");
352
353 kernel_fpu_end();
354}
355
356static void
357xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
358 unsigned long *p3, unsigned long *p4, unsigned long *p5)
359{
360 unsigned long lines = bytes >> 8;
361
362 kernel_fpu_begin();
363
364 asm volatile(
365#undef BLOCK
366#define BLOCK(i) \
367 PF1(i) \
368 PF1(i + 2) \
369 LD(i, 0) \
370 LD(i + 1, 1) \
371 LD(i + 2, 2) \
372 LD(i + 3, 3) \
373 PF2(i) \
374 PF2(i + 2) \
375 XO1(i, 0) \
376 XO1(i + 1, 1) \
377 XO1(i + 2, 2) \
378 XO1(i + 3, 3) \
379 PF3(i) \
380 PF3(i + 2) \
381 XO2(i, 0) \
382 XO2(i + 1, 1) \
383 XO2(i + 2, 2) \
384 XO2(i + 3, 3) \
385 PF4(i) \
386 PF4(i + 2) \
387 PF0(i + 4) \
388 PF0(i + 6) \
389 XO3(i, 0) \
390 XO3(i + 1, 1) \
391 XO3(i + 2, 2) \
392 XO3(i + 3, 3) \
393 XO4(i, 0) \
394 XO4(i + 1, 1) \
395 XO4(i + 2, 2) \
396 XO4(i + 3, 3) \
397 ST(i, 0) \
398 ST(i + 1, 1) \
399 ST(i + 2, 2) \
400 ST(i + 3, 3) \
401
402
403 PF0(0)
404 PF0(2)
405
406 " .align 32 ;\n"
407 " 1: ;\n"
408
409 BLOCK(0)
410 BLOCK(4)
411 BLOCK(8)
412 BLOCK(12)
413
414 " add %[inc], %[p1] ;\n"
415 " add %[inc], %[p2] ;\n"
416 " add %[inc], %[p3] ;\n"
417 " add %[inc], %[p4] ;\n"
418 " add %[inc], %[p5] ;\n"
419 " dec %[cnt] ;\n"
420 " jnz 1b ;\n"
421 : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2),
422 [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5)
423 : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
424 : "memory");
425
426 kernel_fpu_end();
427}
428
429static void
430xor_sse_5_pf64(unsigned long bytes, unsigned long *p1, unsigned long *p2,
431 unsigned long *p3, unsigned long *p4, unsigned long *p5)
432{
433 unsigned long lines = bytes >> 8;
434
435 kernel_fpu_begin();
436
437 asm volatile(
438#undef BLOCK
439#define BLOCK(i) \
440 BLK64(PF0, LD, i) \
441 BLK64(PF1, XO1, i) \
442 BLK64(PF2, XO2, i) \
443 BLK64(PF3, XO3, i) \
444 BLK64(PF4, XO4, i) \
445 BLK64(NOP, ST, i) \
446
447 " .align 32 ;\n"
448 " 1: ;\n"
449
450 BLOCK(0)
451 BLOCK(4)
452 BLOCK(8)
453 BLOCK(12)
454
455 " add %[inc], %[p1] ;\n"
456 " add %[inc], %[p2] ;\n"
457 " add %[inc], %[p3] ;\n"
458 " add %[inc], %[p4] ;\n"
459 " add %[inc], %[p5] ;\n"
460 " dec %[cnt] ;\n"
461 " jnz 1b ;\n"
462 : [cnt] "+r" (lines), [p1] "+r" (p1), [p2] "+r" (p2),
463 [p3] "+r" (p3), [p4] "+r" (p4), [p5] "+r" (p5)
464 : [inc] XOR_CONSTANT_CONSTRAINT (256UL)
465 : "memory");
466
467 kernel_fpu_end();
468}
469
470static struct xor_block_template xor_block_sse_pf64 = {
471 .name = "prefetch64-sse",
472 .do_2 = xor_sse_2_pf64,
473 .do_3 = xor_sse_3_pf64,
474 .do_4 = xor_sse_4_pf64,
475 .do_5 = xor_sse_5_pf64,
476};
477
478#undef LD
479#undef XO1
480#undef XO2
481#undef XO3
482#undef XO4
483#undef ST
484#undef NOP
485#undef BLK64
486#undef BLOCK
487
488#undef XOR_CONSTANT_CONSTRAINT
489
5#ifdef CONFIG_X86_32 490#ifdef CONFIG_X86_32
6# include <asm/xor_32.h> 491# include <asm/xor_32.h>
7#else 492#else
8# include <asm/xor_64.h> 493# include <asm/xor_64.h>
9#endif 494#endif
10#endif 495
496#define XOR_SELECT_TEMPLATE(FASTEST) \
497 AVX_SELECT(FASTEST)
498
499#endif /* _ASM_X86_XOR_H */
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h
index f79cb7ec0e06..ce05722e3c68 100644
--- a/arch/x86/include/asm/xor_32.h
+++ b/arch/x86/include/asm/xor_32.h
@@ -2,7 +2,7 @@
2#define _ASM_X86_XOR_32_H 2#define _ASM_X86_XOR_32_H
3 3
4/* 4/*
5 * Optimized RAID-5 checksumming functions for MMX and SSE. 5 * Optimized RAID-5 checksumming functions for MMX.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -529,290 +529,6 @@ static struct xor_block_template xor_block_p5_mmx = {
529 .do_5 = xor_p5_mmx_5, 529 .do_5 = xor_p5_mmx_5,
530}; 530};
531 531
532/*
533 * Cache avoiding checksumming functions utilizing KNI instructions
534 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
535 */
536
537#define OFFS(x) "16*("#x")"
538#define PF_OFFS(x) "256+16*("#x")"
539#define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n"
540#define LD(x, y) " movaps "OFFS(x)"(%1), %%xmm"#y" ;\n"
541#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%1) ;\n"
542#define PF1(x) " prefetchnta "PF_OFFS(x)"(%2) ;\n"
543#define PF2(x) " prefetchnta "PF_OFFS(x)"(%3) ;\n"
544#define PF3(x) " prefetchnta "PF_OFFS(x)"(%4) ;\n"
545#define PF4(x) " prefetchnta "PF_OFFS(x)"(%5) ;\n"
546#define PF5(x) " prefetchnta "PF_OFFS(x)"(%6) ;\n"
547#define XO1(x, y) " xorps "OFFS(x)"(%2), %%xmm"#y" ;\n"
548#define XO2(x, y) " xorps "OFFS(x)"(%3), %%xmm"#y" ;\n"
549#define XO3(x, y) " xorps "OFFS(x)"(%4), %%xmm"#y" ;\n"
550#define XO4(x, y) " xorps "OFFS(x)"(%5), %%xmm"#y" ;\n"
551#define XO5(x, y) " xorps "OFFS(x)"(%6), %%xmm"#y" ;\n"
552
553
554static void
555xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
556{
557 unsigned long lines = bytes >> 8;
558
559 kernel_fpu_begin();
560
561 asm volatile(
562#undef BLOCK
563#define BLOCK(i) \
564 LD(i, 0) \
565 LD(i + 1, 1) \
566 PF1(i) \
567 PF1(i + 2) \
568 LD(i + 2, 2) \
569 LD(i + 3, 3) \
570 PF0(i + 4) \
571 PF0(i + 6) \
572 XO1(i, 0) \
573 XO1(i + 1, 1) \
574 XO1(i + 2, 2) \
575 XO1(i + 3, 3) \
576 ST(i, 0) \
577 ST(i + 1, 1) \
578 ST(i + 2, 2) \
579 ST(i + 3, 3) \
580
581
582 PF0(0)
583 PF0(2)
584
585 " .align 32 ;\n"
586 " 1: ;\n"
587
588 BLOCK(0)
589 BLOCK(4)
590 BLOCK(8)
591 BLOCK(12)
592
593 " addl $256, %1 ;\n"
594 " addl $256, %2 ;\n"
595 " decl %0 ;\n"
596 " jnz 1b ;\n"
597 : "+r" (lines),
598 "+r" (p1), "+r" (p2)
599 :
600 : "memory");
601
602 kernel_fpu_end();
603}
604
605static void
606xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
607 unsigned long *p3)
608{
609 unsigned long lines = bytes >> 8;
610
611 kernel_fpu_begin();
612
613 asm volatile(
614#undef BLOCK
615#define BLOCK(i) \
616 PF1(i) \
617 PF1(i + 2) \
618 LD(i,0) \
619 LD(i + 1, 1) \
620 LD(i + 2, 2) \
621 LD(i + 3, 3) \
622 PF2(i) \
623 PF2(i + 2) \
624 PF0(i + 4) \
625 PF0(i + 6) \
626 XO1(i,0) \
627 XO1(i + 1, 1) \
628 XO1(i + 2, 2) \
629 XO1(i + 3, 3) \
630 XO2(i,0) \
631 XO2(i + 1, 1) \
632 XO2(i + 2, 2) \
633 XO2(i + 3, 3) \
634 ST(i,0) \
635 ST(i + 1, 1) \
636 ST(i + 2, 2) \
637 ST(i + 3, 3) \
638
639
640 PF0(0)
641 PF0(2)
642
643 " .align 32 ;\n"
644 " 1: ;\n"
645
646 BLOCK(0)
647 BLOCK(4)
648 BLOCK(8)
649 BLOCK(12)
650
651 " addl $256, %1 ;\n"
652 " addl $256, %2 ;\n"
653 " addl $256, %3 ;\n"
654 " decl %0 ;\n"
655 " jnz 1b ;\n"
656 : "+r" (lines),
657 "+r" (p1), "+r"(p2), "+r"(p3)
658 :
659 : "memory" );
660
661 kernel_fpu_end();
662}
663
664static void
665xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
666 unsigned long *p3, unsigned long *p4)
667{
668 unsigned long lines = bytes >> 8;
669
670 kernel_fpu_begin();
671
672 asm volatile(
673#undef BLOCK
674#define BLOCK(i) \
675 PF1(i) \
676 PF1(i + 2) \
677 LD(i,0) \
678 LD(i + 1, 1) \
679 LD(i + 2, 2) \
680 LD(i + 3, 3) \
681 PF2(i) \
682 PF2(i + 2) \
683 XO1(i,0) \
684 XO1(i + 1, 1) \
685 XO1(i + 2, 2) \
686 XO1(i + 3, 3) \
687 PF3(i) \
688 PF3(i + 2) \
689 PF0(i + 4) \
690 PF0(i + 6) \
691 XO2(i,0) \
692 XO2(i + 1, 1) \
693 XO2(i + 2, 2) \
694 XO2(i + 3, 3) \
695 XO3(i,0) \
696 XO3(i + 1, 1) \
697 XO3(i + 2, 2) \
698 XO3(i + 3, 3) \
699 ST(i,0) \
700 ST(i + 1, 1) \
701 ST(i + 2, 2) \
702 ST(i + 3, 3) \
703
704
705 PF0(0)
706 PF0(2)
707
708 " .align 32 ;\n"
709 " 1: ;\n"
710
711 BLOCK(0)
712 BLOCK(4)
713 BLOCK(8)
714 BLOCK(12)
715
716 " addl $256, %1 ;\n"
717 " addl $256, %2 ;\n"
718 " addl $256, %3 ;\n"
719 " addl $256, %4 ;\n"
720 " decl %0 ;\n"
721 " jnz 1b ;\n"
722 : "+r" (lines),
723 "+r" (p1), "+r" (p2), "+r" (p3), "+r" (p4)
724 :
725 : "memory" );
726
727 kernel_fpu_end();
728}
729
730static void
731xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
732 unsigned long *p3, unsigned long *p4, unsigned long *p5)
733{
734 unsigned long lines = bytes >> 8;
735
736 kernel_fpu_begin();
737
738 /* Make sure GCC forgets anything it knows about p4 or p5,
739 such that it won't pass to the asm volatile below a
740 register that is shared with any other variable. That's
741 because we modify p4 and p5 there, but we can't mark them
742 as read/write, otherwise we'd overflow the 10-asm-operands
743 limit of GCC < 3.1. */
744 asm("" : "+r" (p4), "+r" (p5));
745
746 asm volatile(
747#undef BLOCK
748#define BLOCK(i) \
749 PF1(i) \
750 PF1(i + 2) \
751 LD(i,0) \
752 LD(i + 1, 1) \
753 LD(i + 2, 2) \
754 LD(i + 3, 3) \
755 PF2(i) \
756 PF2(i + 2) \
757 XO1(i,0) \
758 XO1(i + 1, 1) \
759 XO1(i + 2, 2) \
760 XO1(i + 3, 3) \
761 PF3(i) \
762 PF3(i + 2) \
763 XO2(i,0) \
764 XO2(i + 1, 1) \
765 XO2(i + 2, 2) \
766 XO2(i + 3, 3) \
767 PF4(i) \
768 PF4(i + 2) \
769 PF0(i + 4) \
770 PF0(i + 6) \
771 XO3(i,0) \
772 XO3(i + 1, 1) \
773 XO3(i + 2, 2) \
774 XO3(i + 3, 3) \
775 XO4(i,0) \
776 XO4(i + 1, 1) \
777 XO4(i + 2, 2) \
778 XO4(i + 3, 3) \
779 ST(i,0) \
780 ST(i + 1, 1) \
781 ST(i + 2, 2) \
782 ST(i + 3, 3) \
783
784
785 PF0(0)
786 PF0(2)
787
788 " .align 32 ;\n"
789 " 1: ;\n"
790
791 BLOCK(0)
792 BLOCK(4)
793 BLOCK(8)
794 BLOCK(12)
795
796 " addl $256, %1 ;\n"
797 " addl $256, %2 ;\n"
798 " addl $256, %3 ;\n"
799 " addl $256, %4 ;\n"
800 " addl $256, %5 ;\n"
801 " decl %0 ;\n"
802 " jnz 1b ;\n"
803 : "+r" (lines),
804 "+r" (p1), "+r" (p2), "+r" (p3)
805 : "r" (p4), "r" (p5)
806 : "memory");
807
808 /* p4 and p5 were modified, and now the variables are dead.
809 Clobber them just to be sure nobody does something stupid
810 like assuming they have some legal value. */
811 asm("" : "=r" (p4), "=r" (p5));
812
813 kernel_fpu_end();
814}
815
816static struct xor_block_template xor_block_pIII_sse = { 532static struct xor_block_template xor_block_pIII_sse = {
817 .name = "pIII_sse", 533 .name = "pIII_sse",
818 .do_2 = xor_sse_2, 534 .do_2 = xor_sse_2,
@@ -827,26 +543,25 @@ static struct xor_block_template xor_block_pIII_sse = {
827/* Also try the generic routines. */ 543/* Also try the generic routines. */
828#include <asm-generic/xor.h> 544#include <asm-generic/xor.h>
829 545
546/* We force the use of the SSE xor block because it can write around L2.
547 We may also be able to load into the L1 only depending on how the cpu
548 deals with a load to a line that is being prefetched. */
830#undef XOR_TRY_TEMPLATES 549#undef XOR_TRY_TEMPLATES
831#define XOR_TRY_TEMPLATES \ 550#define XOR_TRY_TEMPLATES \
832do { \ 551do { \
833 xor_speed(&xor_block_8regs); \
834 xor_speed(&xor_block_8regs_p); \
835 xor_speed(&xor_block_32regs); \
836 xor_speed(&xor_block_32regs_p); \
837 AVX_XOR_SPEED; \ 552 AVX_XOR_SPEED; \
838 if (cpu_has_xmm) \ 553 if (cpu_has_xmm) { \
839 xor_speed(&xor_block_pIII_sse); \ 554 xor_speed(&xor_block_pIII_sse); \
840 if (cpu_has_mmx) { \ 555 xor_speed(&xor_block_sse_pf64); \
556 } else if (cpu_has_mmx) { \
841 xor_speed(&xor_block_pII_mmx); \ 557 xor_speed(&xor_block_pII_mmx); \
842 xor_speed(&xor_block_p5_mmx); \ 558 xor_speed(&xor_block_p5_mmx); \
559 } else { \
560 xor_speed(&xor_block_8regs); \
561 xor_speed(&xor_block_8regs_p); \
562 xor_speed(&xor_block_32regs); \
563 xor_speed(&xor_block_32regs_p); \
843 } \ 564 } \
844} while (0) 565} while (0)
845 566
846/* We force the use of the SSE xor block because it can write around L2.
847 We may also be able to load into the L1 only depending on how the cpu
848 deals with a load to a line that is being prefetched. */
849#define XOR_SELECT_TEMPLATE(FASTEST) \
850 AVX_SELECT(cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
851
852#endif /* _ASM_X86_XOR_32_H */ 567#endif /* _ASM_X86_XOR_32_H */
diff --git a/arch/x86/include/asm/xor_64.h b/arch/x86/include/asm/xor_64.h
index 87ac522c4af5..546f1e3b87cc 100644
--- a/arch/x86/include/asm/xor_64.h
+++ b/arch/x86/include/asm/xor_64.h
@@ -1,301 +1,6 @@
1#ifndef _ASM_X86_XOR_64_H 1#ifndef _ASM_X86_XOR_64_H
2#define _ASM_X86_XOR_64_H 2#define _ASM_X86_XOR_64_H
3 3
4/*
5 * Optimized RAID-5 checksumming functions for MMX and SSE.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option)
10 * any later version.
11 *
12 * You should have received a copy of the GNU General Public License
13 * (for example /usr/src/linux/COPYING); if not, write to the Free
14 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
15 */
16
17
18/*
19 * Cache avoiding checksumming functions utilizing KNI instructions
20 * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo)
21 */
22
23/*
24 * Based on
25 * High-speed RAID5 checksumming functions utilizing SSE instructions.
26 * Copyright (C) 1998 Ingo Molnar.
27 */
28
29/*
30 * x86-64 changes / gcc fixes from Andi Kleen.
31 * Copyright 2002 Andi Kleen, SuSE Labs.
32 *
33 * This hasn't been optimized for the hammer yet, but there are likely
34 * no advantages to be gotten from x86-64 here anyways.
35 */
36
37#include <asm/i387.h>
38
39#define OFFS(x) "16*("#x")"
40#define PF_OFFS(x) "256+16*("#x")"
41#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
42#define LD(x, y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
43#define ST(x, y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
44#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
45#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
46#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
47#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
48#define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
49#define XO1(x, y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
50#define XO2(x, y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
51#define XO3(x, y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
52#define XO4(x, y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
53#define XO5(x, y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
54
55
56static void
57xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
58{
59 unsigned int lines = bytes >> 8;
60
61 kernel_fpu_begin();
62
63 asm volatile(
64#undef BLOCK
65#define BLOCK(i) \
66 LD(i, 0) \
67 LD(i + 1, 1) \
68 PF1(i) \
69 PF1(i + 2) \
70 LD(i + 2, 2) \
71 LD(i + 3, 3) \
72 PF0(i + 4) \
73 PF0(i + 6) \
74 XO1(i, 0) \
75 XO1(i + 1, 1) \
76 XO1(i + 2, 2) \
77 XO1(i + 3, 3) \
78 ST(i, 0) \
79 ST(i + 1, 1) \
80 ST(i + 2, 2) \
81 ST(i + 3, 3) \
82
83
84 PF0(0)
85 PF0(2)
86
87 " .align 32 ;\n"
88 " 1: ;\n"
89
90 BLOCK(0)
91 BLOCK(4)
92 BLOCK(8)
93 BLOCK(12)
94
95 " addq %[inc], %[p1] ;\n"
96 " addq %[inc], %[p2] ;\n"
97 " decl %[cnt] ; jnz 1b"
98 : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
99 : [inc] "r" (256UL)
100 : "memory");
101
102 kernel_fpu_end();
103}
104
105static void
106xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
107 unsigned long *p3)
108{
109 unsigned int lines = bytes >> 8;
110
111 kernel_fpu_begin();
112 asm volatile(
113#undef BLOCK
114#define BLOCK(i) \
115 PF1(i) \
116 PF1(i + 2) \
117 LD(i, 0) \
118 LD(i + 1, 1) \
119 LD(i + 2, 2) \
120 LD(i + 3, 3) \
121 PF2(i) \
122 PF2(i + 2) \
123 PF0(i + 4) \
124 PF0(i + 6) \
125 XO1(i, 0) \
126 XO1(i + 1, 1) \
127 XO1(i + 2, 2) \
128 XO1(i + 3, 3) \
129 XO2(i, 0) \
130 XO2(i + 1, 1) \
131 XO2(i + 2, 2) \
132 XO2(i + 3, 3) \
133 ST(i, 0) \
134 ST(i + 1, 1) \
135 ST(i + 2, 2) \
136 ST(i + 3, 3) \
137
138
139 PF0(0)
140 PF0(2)
141
142 " .align 32 ;\n"
143 " 1: ;\n"
144
145 BLOCK(0)
146 BLOCK(4)
147 BLOCK(8)
148 BLOCK(12)
149
150 " addq %[inc], %[p1] ;\n"
151 " addq %[inc], %[p2] ;\n"
152 " addq %[inc], %[p3] ;\n"
153 " decl %[cnt] ; jnz 1b"
154 : [cnt] "+r" (lines),
155 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
156 : [inc] "r" (256UL)
157 : "memory");
158 kernel_fpu_end();
159}
160
161static void
162xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
163 unsigned long *p3, unsigned long *p4)
164{
165 unsigned int lines = bytes >> 8;
166
167 kernel_fpu_begin();
168
169 asm volatile(
170#undef BLOCK
171#define BLOCK(i) \
172 PF1(i) \
173 PF1(i + 2) \
174 LD(i, 0) \
175 LD(i + 1, 1) \
176 LD(i + 2, 2) \
177 LD(i + 3, 3) \
178 PF2(i) \
179 PF2(i + 2) \
180 XO1(i, 0) \
181 XO1(i + 1, 1) \
182 XO1(i + 2, 2) \
183 XO1(i + 3, 3) \
184 PF3(i) \
185 PF3(i + 2) \
186 PF0(i + 4) \
187 PF0(i + 6) \
188 XO2(i, 0) \
189 XO2(i + 1, 1) \
190 XO2(i + 2, 2) \
191 XO2(i + 3, 3) \
192 XO3(i, 0) \
193 XO3(i + 1, 1) \
194 XO3(i + 2, 2) \
195 XO3(i + 3, 3) \
196 ST(i, 0) \
197 ST(i + 1, 1) \
198 ST(i + 2, 2) \
199 ST(i + 3, 3) \
200
201
202 PF0(0)
203 PF0(2)
204
205 " .align 32 ;\n"
206 " 1: ;\n"
207
208 BLOCK(0)
209 BLOCK(4)
210 BLOCK(8)
211 BLOCK(12)
212
213 " addq %[inc], %[p1] ;\n"
214 " addq %[inc], %[p2] ;\n"
215 " addq %[inc], %[p3] ;\n"
216 " addq %[inc], %[p4] ;\n"
217 " decl %[cnt] ; jnz 1b"
218 : [cnt] "+c" (lines),
219 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
220 : [inc] "r" (256UL)
221 : "memory" );
222
223 kernel_fpu_end();
224}
225
226static void
227xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
228 unsigned long *p3, unsigned long *p4, unsigned long *p5)
229{
230 unsigned int lines = bytes >> 8;
231
232 kernel_fpu_begin();
233
234 asm volatile(
235#undef BLOCK
236#define BLOCK(i) \
237 PF1(i) \
238 PF1(i + 2) \
239 LD(i, 0) \
240 LD(i + 1, 1) \
241 LD(i + 2, 2) \
242 LD(i + 3, 3) \
243 PF2(i) \
244 PF2(i + 2) \
245 XO1(i, 0) \
246 XO1(i + 1, 1) \
247 XO1(i + 2, 2) \
248 XO1(i + 3, 3) \
249 PF3(i) \
250 PF3(i + 2) \
251 XO2(i, 0) \
252 XO2(i + 1, 1) \
253 XO2(i + 2, 2) \
254 XO2(i + 3, 3) \
255 PF4(i) \
256 PF4(i + 2) \
257 PF0(i + 4) \
258 PF0(i + 6) \
259 XO3(i, 0) \
260 XO3(i + 1, 1) \
261 XO3(i + 2, 2) \
262 XO3(i + 3, 3) \
263 XO4(i, 0) \
264 XO4(i + 1, 1) \
265 XO4(i + 2, 2) \
266 XO4(i + 3, 3) \
267 ST(i, 0) \
268 ST(i + 1, 1) \
269 ST(i + 2, 2) \
270 ST(i + 3, 3) \
271
272
273 PF0(0)
274 PF0(2)
275
276 " .align 32 ;\n"
277 " 1: ;\n"
278
279 BLOCK(0)
280 BLOCK(4)
281 BLOCK(8)
282 BLOCK(12)
283
284 " addq %[inc], %[p1] ;\n"
285 " addq %[inc], %[p2] ;\n"
286 " addq %[inc], %[p3] ;\n"
287 " addq %[inc], %[p4] ;\n"
288 " addq %[inc], %[p5] ;\n"
289 " decl %[cnt] ; jnz 1b"
290 : [cnt] "+c" (lines),
291 [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
292 [p5] "+r" (p5)
293 : [inc] "r" (256UL)
294 : "memory");
295
296 kernel_fpu_end();
297}
298
299static struct xor_block_template xor_block_sse = { 4static struct xor_block_template xor_block_sse = {
300 .name = "generic_sse", 5 .name = "generic_sse",
301 .do_2 = xor_sse_2, 6 .do_2 = xor_sse_2,
@@ -308,17 +13,15 @@ static struct xor_block_template xor_block_sse = {
308/* Also try the AVX routines */ 13/* Also try the AVX routines */
309#include <asm/xor_avx.h> 14#include <asm/xor_avx.h>
310 15
16/* We force the use of the SSE xor block because it can write around L2.
17 We may also be able to load into the L1 only depending on how the cpu
18 deals with a load to a line that is being prefetched. */
311#undef XOR_TRY_TEMPLATES 19#undef XOR_TRY_TEMPLATES
312#define XOR_TRY_TEMPLATES \ 20#define XOR_TRY_TEMPLATES \
313do { \ 21do { \
314 AVX_XOR_SPEED; \ 22 AVX_XOR_SPEED; \
23 xor_speed(&xor_block_sse_pf64); \
315 xor_speed(&xor_block_sse); \ 24 xor_speed(&xor_block_sse); \
316} while (0) 25} while (0)
317 26
318/* We force the use of the SSE xor block because it can write around L2.
319 We may also be able to load into the L1 only depending on how the cpu
320 deals with a load to a line that is being prefetched. */
321#define XOR_SELECT_TEMPLATE(FASTEST) \
322 AVX_SELECT(&xor_block_sse)
323
324#endif /* _ASM_X86_XOR_64_H */ 27#endif /* _ASM_X86_XOR_64_H */
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h
index 58c829871c31..a0eab85ce7b8 100644
--- a/arch/x86/include/uapi/asm/mce.h
+++ b/arch/x86/include/uapi/asm/mce.h
@@ -4,66 +4,6 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/ioctls.h> 5#include <asm/ioctls.h>
6 6
7/*
8 * Machine Check support for x86
9 */
10
11/* MCG_CAP register defines */
12#define MCG_BANKCNT_MASK 0xff /* Number of Banks */
13#define MCG_CTL_P (1ULL<<8) /* MCG_CTL register available */
14#define MCG_EXT_P (1ULL<<9) /* Extended registers available */
15#define MCG_CMCI_P (1ULL<<10) /* CMCI supported */
16#define MCG_EXT_CNT_MASK 0xff0000 /* Number of Extended registers */
17#define MCG_EXT_CNT_SHIFT 16
18#define MCG_EXT_CNT(c) (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
19#define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */
20
21/* MCG_STATUS register defines */
22#define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */
23#define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */
24#define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */
25
26/* MCi_STATUS register defines */
27#define MCI_STATUS_VAL (1ULL<<63) /* valid error */
28#define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */
29#define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */
30#define MCI_STATUS_EN (1ULL<<60) /* error enabled */
31#define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */
32#define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */
33#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
34#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
35#define MCI_STATUS_AR (1ULL<<55) /* Action required */
36#define MCACOD 0xffff /* MCA Error Code */
37
38/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
39#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
40#define MCACOD_SCRUBMSK 0xfff0
41#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
42#define MCACOD_DATA 0x0134 /* Data Load */
43#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
44
45/* MCi_MISC register defines */
46#define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f)
47#define MCI_MISC_ADDR_MODE(m) (((m) >> 6) & 7)
48#define MCI_MISC_ADDR_SEGOFF 0 /* segment offset */
49#define MCI_MISC_ADDR_LINEAR 1 /* linear address */
50#define MCI_MISC_ADDR_PHYS 2 /* physical address */
51#define MCI_MISC_ADDR_MEM 3 /* memory address */
52#define MCI_MISC_ADDR_GENERIC 7 /* generic */
53
54/* CTL2 register defines */
55#define MCI_CTL2_CMCI_EN (1ULL << 30)
56#define MCI_CTL2_CMCI_THRESHOLD_MASK 0x7fffULL
57
58#define MCJ_CTX_MASK 3
59#define MCJ_CTX(flags) ((flags) & MCJ_CTX_MASK)
60#define MCJ_CTX_RANDOM 0 /* inject context: random */
61#define MCJ_CTX_PROCESS 0x1 /* inject context: process */
62#define MCJ_CTX_IRQ 0x2 /* inject context: IRQ */
63#define MCJ_NMI_BROADCAST 0x4 /* do NMI broadcasting */
64#define MCJ_EXCEPTION 0x8 /* raise as exception */
65#define MCJ_IRQ_BRAODCAST 0x10 /* do IRQ broadcasting */
66
67/* Fields are zero when not available */ 7/* Fields are zero when not available */
68struct mce { 8struct mce {
69 __u64 status; 9 __u64 status;
@@ -87,35 +27,8 @@ struct mce {
87 __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */ 27 __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
88}; 28};
89 29
90/*
91 * This structure contains all data related to the MCE log. Also
92 * carries a signature to make it easier to find from external
93 * debugging tools. Each entry is only valid when its finished flag
94 * is set.
95 */
96
97#define MCE_LOG_LEN 32
98
99struct mce_log {
100 char signature[12]; /* "MACHINECHECK" */
101 unsigned len; /* = MCE_LOG_LEN */
102 unsigned next;
103 unsigned flags;
104 unsigned recordlen; /* length of struct mce */
105 struct mce entry[MCE_LOG_LEN];
106};
107
108#define MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
109
110#define MCE_LOG_SIGNATURE "MACHINECHECK"
111
112#define MCE_GET_RECORD_LEN _IOR('M', 1, int) 30#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
113#define MCE_GET_LOG_LEN _IOR('M', 2, int) 31#define MCE_GET_LOG_LEN _IOR('M', 2, int)
114#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int) 32#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
115 33
116/* Software defined banks */
117#define MCE_EXTENDED_BANK 128
118#define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0
119#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1)
120
121#endif /* _UAPI_ASM_X86_MCE_H */ 34#endif /* _UAPI_ASM_X86_MCE_H */
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 433a59fb1a74..892ce40a7470 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -103,6 +103,8 @@
103#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10) 103#define DEBUGCTLMSR_BTS_OFF_USR (1UL << 10)
104#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11) 104#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
105 105
106#define MSR_IA32_POWER_CTL 0x000001fc
107
106#define MSR_IA32_MC0_CTL 0x00000400 108#define MSR_IA32_MC0_CTL 0x00000400
107#define MSR_IA32_MC0_STATUS 0x00000401 109#define MSR_IA32_MC0_STATUS 0x00000401
108#define MSR_IA32_MC0_ADDR 0x00000402 110#define MSR_IA32_MC0_ADDR 0x00000402
@@ -173,6 +175,7 @@
173#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140 175#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
174#define MSR_AMD64_OSVW_STATUS 0xc0010141 176#define MSR_AMD64_OSVW_STATUS 0xc0010141
175#define MSR_AMD64_DC_CFG 0xc0011022 177#define MSR_AMD64_DC_CFG 0xc0011022
178#define MSR_AMD64_BU_CFG2 0xc001102a
176#define MSR_AMD64_IBSFETCHCTL 0xc0011030 179#define MSR_AMD64_IBSFETCHCTL 0xc0011030
177#define MSR_AMD64_IBSFETCHLINAD 0xc0011031 180#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
178#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032 181#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
@@ -194,6 +197,8 @@
194/* Fam 15h MSRs */ 197/* Fam 15h MSRs */
195#define MSR_F15H_PERF_CTL 0xc0010200 198#define MSR_F15H_PERF_CTL 0xc0010200
196#define MSR_F15H_PERF_CTR 0xc0010201 199#define MSR_F15H_PERF_CTR 0xc0010201
200#define MSR_F15H_NB_PERF_CTL 0xc0010240
201#define MSR_F15H_NB_PERF_CTR 0xc0010241
197 202
198/* Fam 10h MSRs */ 203/* Fam 10h MSRs */
199#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058 204#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
@@ -272,6 +277,7 @@
272#define MSR_IA32_PLATFORM_ID 0x00000017 277#define MSR_IA32_PLATFORM_ID 0x00000017
273#define MSR_IA32_EBL_CR_POWERON 0x0000002a 278#define MSR_IA32_EBL_CR_POWERON 0x0000002a
274#define MSR_EBC_FREQUENCY_ID 0x0000002c 279#define MSR_EBC_FREQUENCY_ID 0x0000002c
280#define MSR_SMI_COUNT 0x00000034
275#define MSR_IA32_FEATURE_CONTROL 0x0000003a 281#define MSR_IA32_FEATURE_CONTROL 0x0000003a
276#define MSR_IA32_TSC_ADJUST 0x0000003b 282#define MSR_IA32_TSC_ADJUST 0x0000003b
277 283
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 34e923a53762..ac3b3d002833 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -65,8 +65,7 @@ obj-$(CONFIG_X86_TSC) += trace_clock.o
65obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 65obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
66obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 66obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
67obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 67obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
68obj-$(CONFIG_KPROBES) += kprobes.o 68obj-y += kprobes/
69obj-$(CONFIG_OPTPROBES) += kprobes-opt.o
70obj-$(CONFIG_MODULES) += module.o 69obj-$(CONFIG_MODULES) += module.o
71obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o 70obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o
72obj-$(CONFIG_KGDB) += kgdb.o 71obj-$(CONFIG_KGDB) += kgdb.o
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index afdc3f756dea..c9876efecafb 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -240,7 +240,7 @@ static int apbt_cpuhp_notify(struct notifier_block *n,
240 dw_apb_clockevent_pause(adev->timer); 240 dw_apb_clockevent_pause(adev->timer);
241 if (system_state == SYSTEM_RUNNING) { 241 if (system_state == SYSTEM_RUNNING) {
242 pr_debug("skipping APBT CPU %lu offline\n", cpu); 242 pr_debug("skipping APBT CPU %lu offline\n", cpu);
243 } else if (adev) { 243 } else {
244 pr_debug("APBT clockevent for cpu %lu offline\n", cpu); 244 pr_debug("APBT clockevent for cpu %lu offline\n", cpu);
245 dw_apb_clockevent_stop(adev->timer); 245 dw_apb_clockevent_stop(adev->timer);
246 } 246 }
@@ -311,7 +311,6 @@ void __init apbt_time_init(void)
311#ifdef CONFIG_SMP 311#ifdef CONFIG_SMP
312 int i; 312 int i;
313 struct sfi_timer_table_entry *p_mtmr; 313 struct sfi_timer_table_entry *p_mtmr;
314 unsigned int percpu_timer;
315 struct apbt_dev *adev; 314 struct apbt_dev *adev;
316#endif 315#endif
317 316
@@ -346,13 +345,10 @@ void __init apbt_time_init(void)
346 return; 345 return;
347 } 346 }
348 pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus()); 347 pr_debug("%s: %d CPUs online\n", __func__, num_online_cpus());
349 if (num_possible_cpus() <= sfi_mtimer_num) { 348 if (num_possible_cpus() <= sfi_mtimer_num)
350 percpu_timer = 1;
351 apbt_num_timers_used = num_possible_cpus(); 349 apbt_num_timers_used = num_possible_cpus();
352 } else { 350 else
353 percpu_timer = 0;
354 apbt_num_timers_used = 1; 351 apbt_num_timers_used = 1;
355 }
356 pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used); 352 pr_debug("%s: %d APB timers used\n", __func__, apbt_num_timers_used);
357 353
358 /* here we set up per CPU timer data structure */ 354 /* here we set up per CPU timer data structure */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index b994cc84aa7e..a5b4dce1b7ac 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1477,8 +1477,7 @@ void __init bsp_end_local_APIC_setup(void)
1477 * Now that local APIC setup is completed for BP, configure the fault 1477 * Now that local APIC setup is completed for BP, configure the fault
1478 * handling for interrupt remapping. 1478 * handling for interrupt remapping.
1479 */ 1479 */
1480 if (irq_remapping_enabled) 1480 irq_remap_enable_fault_handling();
1481 irq_remap_enable_fault_handling();
1482 1481
1483} 1482}
1484 1483
@@ -2251,8 +2250,7 @@ static int lapic_suspend(void)
2251 local_irq_save(flags); 2250 local_irq_save(flags);
2252 disable_local_APIC(); 2251 disable_local_APIC();
2253 2252
2254 if (irq_remapping_enabled) 2253 irq_remapping_disable();
2255 irq_remapping_disable();
2256 2254
2257 local_irq_restore(flags); 2255 local_irq_restore(flags);
2258 return 0; 2256 return 0;
@@ -2268,16 +2266,15 @@ static void lapic_resume(void)
2268 return; 2266 return;
2269 2267
2270 local_irq_save(flags); 2268 local_irq_save(flags);
2271 if (irq_remapping_enabled) { 2269
2272 /* 2270 /*
2273 * IO-APIC and PIC have their own resume routines. 2271 * IO-APIC and PIC have their own resume routines.
2274 * We just mask them here to make sure the interrupt 2272 * We just mask them here to make sure the interrupt
2275 * subsystem is completely quiet while we enable x2apic 2273 * subsystem is completely quiet while we enable x2apic
2276 * and interrupt-remapping. 2274 * and interrupt-remapping.
2277 */ 2275 */
2278 mask_ioapic_entries(); 2276 mask_ioapic_entries();
2279 legacy_pic->mask_all(); 2277 legacy_pic->mask_all();
2280 }
2281 2278
2282 if (x2apic_mode) 2279 if (x2apic_mode)
2283 enable_x2apic(); 2280 enable_x2apic();
@@ -2320,8 +2317,7 @@ static void lapic_resume(void)
2320 apic_write(APIC_ESR, 0); 2317 apic_write(APIC_ESR, 0);
2321 apic_read(APIC_ESR); 2318 apic_read(APIC_ESR);
2322 2319
2323 if (irq_remapping_enabled) 2320 irq_remapping_reenable(x2apic_mode);
2324 irq_remapping_reenable(x2apic_mode);
2325 2321
2326 local_irq_restore(flags); 2322 local_irq_restore(flags);
2327} 2323}
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index b739d398bb29..9ed796ccc32c 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -68,22 +68,6 @@
68#define for_each_irq_pin(entry, head) \ 68#define for_each_irq_pin(entry, head) \
69 for (entry = head; entry; entry = entry->next) 69 for (entry = head; entry; entry = entry->next)
70 70
71#ifdef CONFIG_IRQ_REMAP
72static void irq_remap_modify_chip_defaults(struct irq_chip *chip);
73static inline bool irq_remapped(struct irq_cfg *cfg)
74{
75 return cfg->irq_2_iommu.iommu != NULL;
76}
77#else
78static inline bool irq_remapped(struct irq_cfg *cfg)
79{
80 return false;
81}
82static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip)
83{
84}
85#endif
86
87/* 71/*
88 * Is the SiS APIC rmw bug present ? 72 * Is the SiS APIC rmw bug present ?
89 * -1 = don't know, 0 = no, 1 = yes 73 * -1 = don't know, 0 = no, 1 = yes
@@ -300,9 +284,9 @@ static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
300 return cfg; 284 return cfg;
301} 285}
302 286
303static int alloc_irq_from(unsigned int from, int node) 287static int alloc_irqs_from(unsigned int from, unsigned int count, int node)
304{ 288{
305 return irq_alloc_desc_from(from, node); 289 return irq_alloc_descs_from(from, count, node);
306} 290}
307 291
308static void free_irq_at(unsigned int at, struct irq_cfg *cfg) 292static void free_irq_at(unsigned int at, struct irq_cfg *cfg)
@@ -326,7 +310,7 @@ static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
326 + (mpc_ioapic_addr(idx) & ~PAGE_MASK); 310 + (mpc_ioapic_addr(idx) & ~PAGE_MASK);
327} 311}
328 312
329static inline void io_apic_eoi(unsigned int apic, unsigned int vector) 313void io_apic_eoi(unsigned int apic, unsigned int vector)
330{ 314{
331 struct io_apic __iomem *io_apic = io_apic_base(apic); 315 struct io_apic __iomem *io_apic = io_apic_base(apic);
332 writel(vector, &io_apic->eoi); 316 writel(vector, &io_apic->eoi);
@@ -573,19 +557,10 @@ static void unmask_ioapic_irq(struct irq_data *data)
573 * Otherwise, we simulate the EOI message manually by changing the trigger 557 * Otherwise, we simulate the EOI message manually by changing the trigger
574 * mode to edge and then back to level, with RTE being masked during this. 558 * mode to edge and then back to level, with RTE being masked during this.
575 */ 559 */
576static void __eoi_ioapic_pin(int apic, int pin, int vector, struct irq_cfg *cfg) 560void native_eoi_ioapic_pin(int apic, int pin, int vector)
577{ 561{
578 if (mpc_ioapic_ver(apic) >= 0x20) { 562 if (mpc_ioapic_ver(apic) >= 0x20) {
579 /* 563 io_apic_eoi(apic, vector);
580 * Intr-remapping uses pin number as the virtual vector
581 * in the RTE. Actual vector is programmed in
582 * intr-remapping table entry. Hence for the io-apic
583 * EOI we use the pin number.
584 */
585 if (cfg && irq_remapped(cfg))
586 io_apic_eoi(apic, pin);
587 else
588 io_apic_eoi(apic, vector);
589 } else { 564 } else {
590 struct IO_APIC_route_entry entry, entry1; 565 struct IO_APIC_route_entry entry, entry1;
591 566
@@ -606,14 +581,15 @@ static void __eoi_ioapic_pin(int apic, int pin, int vector, struct irq_cfg *cfg)
606 } 581 }
607} 582}
608 583
609static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 584void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
610{ 585{
611 struct irq_pin_list *entry; 586 struct irq_pin_list *entry;
612 unsigned long flags; 587 unsigned long flags;
613 588
614 raw_spin_lock_irqsave(&ioapic_lock, flags); 589 raw_spin_lock_irqsave(&ioapic_lock, flags);
615 for_each_irq_pin(entry, cfg->irq_2_pin) 590 for_each_irq_pin(entry, cfg->irq_2_pin)
616 __eoi_ioapic_pin(entry->apic, entry->pin, cfg->vector, cfg); 591 x86_io_apic_ops.eoi_ioapic_pin(entry->apic, entry->pin,
592 cfg->vector);
617 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 593 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
618} 594}
619 595
@@ -650,7 +626,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
650 } 626 }
651 627
652 raw_spin_lock_irqsave(&ioapic_lock, flags); 628 raw_spin_lock_irqsave(&ioapic_lock, flags);
653 __eoi_ioapic_pin(apic, pin, entry.vector, NULL); 629 x86_io_apic_ops.eoi_ioapic_pin(apic, pin, entry.vector);
654 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 630 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
655 } 631 }
656 632
@@ -1304,25 +1280,18 @@ static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg,
1304 fasteoi = false; 1280 fasteoi = false;
1305 } 1281 }
1306 1282
1307 if (irq_remapped(cfg)) { 1283 if (setup_remapped_irq(irq, cfg, chip))
1308 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
1309 irq_remap_modify_chip_defaults(chip);
1310 fasteoi = trigger != 0; 1284 fasteoi = trigger != 0;
1311 }
1312 1285
1313 hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq; 1286 hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq;
1314 irq_set_chip_and_handler_name(irq, chip, hdl, 1287 irq_set_chip_and_handler_name(irq, chip, hdl,
1315 fasteoi ? "fasteoi" : "edge"); 1288 fasteoi ? "fasteoi" : "edge");
1316} 1289}
1317 1290
1318static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, 1291int native_setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
1319 unsigned int destination, int vector, 1292 unsigned int destination, int vector,
1320 struct io_apic_irq_attr *attr) 1293 struct io_apic_irq_attr *attr)
1321{ 1294{
1322 if (irq_remapping_enabled)
1323 return setup_ioapic_remapped_entry(irq, entry, destination,
1324 vector, attr);
1325
1326 memset(entry, 0, sizeof(*entry)); 1295 memset(entry, 0, sizeof(*entry));
1327 1296
1328 entry->delivery_mode = apic->irq_delivery_mode; 1297 entry->delivery_mode = apic->irq_delivery_mode;
@@ -1370,8 +1339,8 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
1370 attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin, 1339 attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin,
1371 cfg->vector, irq, attr->trigger, attr->polarity, dest); 1340 cfg->vector, irq, attr->trigger, attr->polarity, dest);
1372 1341
1373 if (setup_ioapic_entry(irq, &entry, dest, cfg->vector, attr)) { 1342 if (x86_io_apic_ops.setup_entry(irq, &entry, dest, cfg->vector, attr)) {
1374 pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1343 pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1375 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin); 1344 mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
1376 __clear_irq_vector(irq, cfg); 1345 __clear_irq_vector(irq, cfg);
1377 1346
@@ -1479,9 +1448,6 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
1479 struct IO_APIC_route_entry entry; 1448 struct IO_APIC_route_entry entry;
1480 unsigned int dest; 1449 unsigned int dest;
1481 1450
1482 if (irq_remapping_enabled)
1483 return;
1484
1485 memset(&entry, 0, sizeof(entry)); 1451 memset(&entry, 0, sizeof(entry));
1486 1452
1487 /* 1453 /*
@@ -1513,9 +1479,63 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
1513 ioapic_write_entry(ioapic_idx, pin, entry); 1479 ioapic_write_entry(ioapic_idx, pin, entry);
1514} 1480}
1515 1481
1516__apicdebuginit(void) print_IO_APIC(int ioapic_idx) 1482void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries)
1517{ 1483{
1518 int i; 1484 int i;
1485
1486 pr_debug(" NR Dst Mask Trig IRR Pol Stat Dmod Deli Vect:\n");
1487
1488 for (i = 0; i <= nr_entries; i++) {
1489 struct IO_APIC_route_entry entry;
1490
1491 entry = ioapic_read_entry(apic, i);
1492
1493 pr_debug(" %02x %02X ", i, entry.dest);
1494 pr_cont("%1d %1d %1d %1d %1d "
1495 "%1d %1d %02X\n",
1496 entry.mask,
1497 entry.trigger,
1498 entry.irr,
1499 entry.polarity,
1500 entry.delivery_status,
1501 entry.dest_mode,
1502 entry.delivery_mode,
1503 entry.vector);
1504 }
1505}
1506
1507void intel_ir_io_apic_print_entries(unsigned int apic,
1508 unsigned int nr_entries)
1509{
1510 int i;
1511
1512 pr_debug(" NR Indx Fmt Mask Trig IRR Pol Stat Indx2 Zero Vect:\n");
1513
1514 for (i = 0; i <= nr_entries; i++) {
1515 struct IR_IO_APIC_route_entry *ir_entry;
1516 struct IO_APIC_route_entry entry;
1517
1518 entry = ioapic_read_entry(apic, i);
1519
1520 ir_entry = (struct IR_IO_APIC_route_entry *)&entry;
1521
1522 pr_debug(" %02x %04X ", i, ir_entry->index);
1523 pr_cont("%1d %1d %1d %1d %1d "
1524 "%1d %1d %X %02X\n",
1525 ir_entry->format,
1526 ir_entry->mask,
1527 ir_entry->trigger,
1528 ir_entry->irr,
1529 ir_entry->polarity,
1530 ir_entry->delivery_status,
1531 ir_entry->index2,
1532 ir_entry->zero,
1533 ir_entry->vector);
1534 }
1535}
1536
1537__apicdebuginit(void) print_IO_APIC(int ioapic_idx)
1538{
1519 union IO_APIC_reg_00 reg_00; 1539 union IO_APIC_reg_00 reg_00;
1520 union IO_APIC_reg_01 reg_01; 1540 union IO_APIC_reg_01 reg_01;
1521 union IO_APIC_reg_02 reg_02; 1541 union IO_APIC_reg_02 reg_02;
@@ -1568,58 +1588,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx)
1568 1588
1569 printk(KERN_DEBUG ".... IRQ redirection table:\n"); 1589 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1570 1590
1571 if (irq_remapping_enabled) { 1591 x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries);
1572 printk(KERN_DEBUG " NR Indx Fmt Mask Trig IRR"
1573 " Pol Stat Indx2 Zero Vect:\n");
1574 } else {
1575 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1576 " Stat Dmod Deli Vect:\n");
1577 }
1578
1579 for (i = 0; i <= reg_01.bits.entries; i++) {
1580 if (irq_remapping_enabled) {
1581 struct IO_APIC_route_entry entry;
1582 struct IR_IO_APIC_route_entry *ir_entry;
1583
1584 entry = ioapic_read_entry(ioapic_idx, i);
1585 ir_entry = (struct IR_IO_APIC_route_entry *) &entry;
1586 printk(KERN_DEBUG " %02x %04X ",
1587 i,
1588 ir_entry->index
1589 );
1590 pr_cont("%1d %1d %1d %1d %1d "
1591 "%1d %1d %X %02X\n",
1592 ir_entry->format,
1593 ir_entry->mask,
1594 ir_entry->trigger,
1595 ir_entry->irr,
1596 ir_entry->polarity,
1597 ir_entry->delivery_status,
1598 ir_entry->index2,
1599 ir_entry->zero,
1600 ir_entry->vector
1601 );
1602 } else {
1603 struct IO_APIC_route_entry entry;
1604
1605 entry = ioapic_read_entry(ioapic_idx, i);
1606 printk(KERN_DEBUG " %02x %02X ",
1607 i,
1608 entry.dest
1609 );
1610 pr_cont("%1d %1d %1d %1d %1d "
1611 "%1d %1d %02X\n",
1612 entry.mask,
1613 entry.trigger,
1614 entry.irr,
1615 entry.polarity,
1616 entry.delivery_status,
1617 entry.dest_mode,
1618 entry.delivery_mode,
1619 entry.vector
1620 );
1621 }
1622 }
1623} 1592}
1624 1593
1625__apicdebuginit(void) print_IO_APICs(void) 1594__apicdebuginit(void) print_IO_APICs(void)
@@ -1921,30 +1890,14 @@ void __init enable_IO_APIC(void)
1921 clear_IO_APIC(); 1890 clear_IO_APIC();
1922} 1891}
1923 1892
1924/* 1893void native_disable_io_apic(void)
1925 * Not an __init, needed by the reboot code
1926 */
1927void disable_IO_APIC(void)
1928{ 1894{
1929 /* 1895 /*
1930 * Clear the IO-APIC before rebooting:
1931 */
1932 clear_IO_APIC();
1933
1934 if (!legacy_pic->nr_legacy_irqs)
1935 return;
1936
1937 /*
1938 * If the i8259 is routed through an IOAPIC 1896 * If the i8259 is routed through an IOAPIC
1939 * Put that IOAPIC in virtual wire mode 1897 * Put that IOAPIC in virtual wire mode
1940 * so legacy interrupts can be delivered. 1898 * so legacy interrupts can be delivered.
1941 *
1942 * With interrupt-remapping, for now we will use virtual wire A mode,
1943 * as virtual wire B is little complex (need to configure both
1944 * IOAPIC RTE as well as interrupt-remapping table entry).
1945 * As this gets called during crash dump, keep this simple for now.
1946 */ 1899 */
1947 if (ioapic_i8259.pin != -1 && !irq_remapping_enabled) { 1900 if (ioapic_i8259.pin != -1) {
1948 struct IO_APIC_route_entry entry; 1901 struct IO_APIC_route_entry entry;
1949 1902
1950 memset(&entry, 0, sizeof(entry)); 1903 memset(&entry, 0, sizeof(entry));
@@ -1964,12 +1917,25 @@ void disable_IO_APIC(void)
1964 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); 1917 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
1965 } 1918 }
1966 1919
1920 if (cpu_has_apic || apic_from_smp_config())
1921 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1922
1923}
1924
1925/*
1926 * Not an __init, needed by the reboot code
1927 */
1928void disable_IO_APIC(void)
1929{
1967 /* 1930 /*
1968 * Use virtual wire A mode when interrupt remapping is enabled. 1931 * Clear the IO-APIC before rebooting:
1969 */ 1932 */
1970 if (cpu_has_apic || apic_from_smp_config()) 1933 clear_IO_APIC();
1971 disconnect_bsp_APIC(!irq_remapping_enabled && 1934
1972 ioapic_i8259.pin != -1); 1935 if (!legacy_pic->nr_legacy_irqs)
1936 return;
1937
1938 x86_io_apic_ops.disable();
1973} 1939}
1974 1940
1975#ifdef CONFIG_X86_32 1941#ifdef CONFIG_X86_32
@@ -2322,12 +2288,8 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
2322 2288
2323 apic = entry->apic; 2289 apic = entry->apic;
2324 pin = entry->pin; 2290 pin = entry->pin;
2325 /* 2291
2326 * With interrupt-remapping, destination information comes 2292 io_apic_write(apic, 0x11 + pin*2, dest);
2327 * from interrupt-remapping table entry.
2328 */
2329 if (!irq_remapped(cfg))
2330 io_apic_write(apic, 0x11 + pin*2, dest);
2331 reg = io_apic_read(apic, 0x10 + pin*2); 2293 reg = io_apic_read(apic, 0x10 + pin*2);
2332 reg &= ~IO_APIC_REDIR_VECTOR_MASK; 2294 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
2333 reg |= vector; 2295 reg |= vector;
@@ -2369,9 +2331,10 @@ int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
2369 return 0; 2331 return 0;
2370} 2332}
2371 2333
2372static int 2334
2373ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2335int native_ioapic_set_affinity(struct irq_data *data,
2374 bool force) 2336 const struct cpumask *mask,
2337 bool force)
2375{ 2338{
2376 unsigned int dest, irq = data->irq; 2339 unsigned int dest, irq = data->irq;
2377 unsigned long flags; 2340 unsigned long flags;
@@ -2548,33 +2511,6 @@ static void ack_apic_level(struct irq_data *data)
2548 ioapic_irqd_unmask(data, cfg, masked); 2511 ioapic_irqd_unmask(data, cfg, masked);
2549} 2512}
2550 2513
2551#ifdef CONFIG_IRQ_REMAP
2552static void ir_ack_apic_edge(struct irq_data *data)
2553{
2554 ack_APIC_irq();
2555}
2556
2557static void ir_ack_apic_level(struct irq_data *data)
2558{
2559 ack_APIC_irq();
2560 eoi_ioapic_irq(data->irq, data->chip_data);
2561}
2562
2563static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
2564{
2565 seq_printf(p, " IR-%s", data->chip->name);
2566}
2567
2568static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
2569{
2570 chip->irq_print_chip = ir_print_prefix;
2571 chip->irq_ack = ir_ack_apic_edge;
2572 chip->irq_eoi = ir_ack_apic_level;
2573
2574 chip->irq_set_affinity = set_remapped_irq_affinity;
2575}
2576#endif /* CONFIG_IRQ_REMAP */
2577
2578static struct irq_chip ioapic_chip __read_mostly = { 2514static struct irq_chip ioapic_chip __read_mostly = {
2579 .name = "IO-APIC", 2515 .name = "IO-APIC",
2580 .irq_startup = startup_ioapic_irq, 2516 .irq_startup = startup_ioapic_irq,
@@ -2582,7 +2518,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
2582 .irq_unmask = unmask_ioapic_irq, 2518 .irq_unmask = unmask_ioapic_irq,
2583 .irq_ack = ack_apic_edge, 2519 .irq_ack = ack_apic_edge,
2584 .irq_eoi = ack_apic_level, 2520 .irq_eoi = ack_apic_level,
2585 .irq_set_affinity = ioapic_set_affinity, 2521 .irq_set_affinity = native_ioapic_set_affinity,
2586 .irq_retrigger = ioapic_retrigger_irq, 2522 .irq_retrigger = ioapic_retrigger_irq,
2587}; 2523};
2588 2524
@@ -2781,8 +2717,7 @@ static inline void __init check_timer(void)
2781 * 8259A. 2717 * 8259A.
2782 */ 2718 */
2783 if (pin1 == -1) { 2719 if (pin1 == -1) {
2784 if (irq_remapping_enabled) 2720 panic_if_irq_remap("BIOS bug: timer not connected to IO-APIC");
2785 panic("BIOS bug: timer not connected to IO-APIC");
2786 pin1 = pin2; 2721 pin1 = pin2;
2787 apic1 = apic2; 2722 apic1 = apic2;
2788 no_pin1 = 1; 2723 no_pin1 = 1;
@@ -2814,8 +2749,7 @@ static inline void __init check_timer(void)
2814 clear_IO_APIC_pin(0, pin1); 2749 clear_IO_APIC_pin(0, pin1);
2815 goto out; 2750 goto out;
2816 } 2751 }
2817 if (irq_remapping_enabled) 2752 panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC");
2818 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2819 local_irq_disable(); 2753 local_irq_disable();
2820 clear_IO_APIC_pin(apic1, pin1); 2754 clear_IO_APIC_pin(apic1, pin1);
2821 if (!no_pin1) 2755 if (!no_pin1)
@@ -2982,37 +2916,58 @@ device_initcall(ioapic_init_ops);
2982/* 2916/*
2983 * Dynamic irq allocate and deallocation 2917 * Dynamic irq allocate and deallocation
2984 */ 2918 */
2985unsigned int create_irq_nr(unsigned int from, int node) 2919unsigned int __create_irqs(unsigned int from, unsigned int count, int node)
2986{ 2920{
2987 struct irq_cfg *cfg; 2921 struct irq_cfg **cfg;
2988 unsigned long flags; 2922 unsigned long flags;
2989 unsigned int ret = 0; 2923 int irq, i;
2990 int irq;
2991 2924
2992 if (from < nr_irqs_gsi) 2925 if (from < nr_irqs_gsi)
2993 from = nr_irqs_gsi; 2926 from = nr_irqs_gsi;
2994 2927
2995 irq = alloc_irq_from(from, node); 2928 cfg = kzalloc_node(count * sizeof(cfg[0]), GFP_KERNEL, node);
2996 if (irq < 0) 2929 if (!cfg)
2997 return 0;
2998 cfg = alloc_irq_cfg(irq, node);
2999 if (!cfg) {
3000 free_irq_at(irq, NULL);
3001 return 0; 2930 return 0;
2931
2932 irq = alloc_irqs_from(from, count, node);
2933 if (irq < 0)
2934 goto out_cfgs;
2935
2936 for (i = 0; i < count; i++) {
2937 cfg[i] = alloc_irq_cfg(irq + i, node);
2938 if (!cfg[i])
2939 goto out_irqs;
3002 } 2940 }
3003 2941
3004 raw_spin_lock_irqsave(&vector_lock, flags); 2942 raw_spin_lock_irqsave(&vector_lock, flags);
3005 if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) 2943 for (i = 0; i < count; i++)
3006 ret = irq; 2944 if (__assign_irq_vector(irq + i, cfg[i], apic->target_cpus()))
2945 goto out_vecs;
3007 raw_spin_unlock_irqrestore(&vector_lock, flags); 2946 raw_spin_unlock_irqrestore(&vector_lock, flags);
3008 2947
3009 if (ret) { 2948 for (i = 0; i < count; i++) {
3010 irq_set_chip_data(irq, cfg); 2949 irq_set_chip_data(irq + i, cfg[i]);
3011 irq_clear_status_flags(irq, IRQ_NOREQUEST); 2950 irq_clear_status_flags(irq + i, IRQ_NOREQUEST);
3012 } else {
3013 free_irq_at(irq, cfg);
3014 } 2951 }
3015 return ret; 2952
2953 kfree(cfg);
2954 return irq;
2955
2956out_vecs:
2957 for (i--; i >= 0; i--)
2958 __clear_irq_vector(irq + i, cfg[i]);
2959 raw_spin_unlock_irqrestore(&vector_lock, flags);
2960out_irqs:
2961 for (i = 0; i < count; i++)
2962 free_irq_at(irq + i, cfg[i]);
2963out_cfgs:
2964 kfree(cfg);
2965 return 0;
2966}
2967
2968unsigned int create_irq_nr(unsigned int from, int node)
2969{
2970 return __create_irqs(from, 1, node);
3016} 2971}
3017 2972
3018int create_irq(void) 2973int create_irq(void)
@@ -3037,48 +2992,35 @@ void destroy_irq(unsigned int irq)
3037 2992
3038 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); 2993 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE);
3039 2994
3040 if (irq_remapped(cfg)) 2995 free_remapped_irq(irq);
3041 free_remapped_irq(irq); 2996
3042 raw_spin_lock_irqsave(&vector_lock, flags); 2997 raw_spin_lock_irqsave(&vector_lock, flags);
3043 __clear_irq_vector(irq, cfg); 2998 __clear_irq_vector(irq, cfg);
3044 raw_spin_unlock_irqrestore(&vector_lock, flags); 2999 raw_spin_unlock_irqrestore(&vector_lock, flags);
3045 free_irq_at(irq, cfg); 3000 free_irq_at(irq, cfg);
3046} 3001}
3047 3002
3003void destroy_irqs(unsigned int irq, unsigned int count)
3004{
3005 unsigned int i;
3006
3007 for (i = 0; i < count; i++)
3008 destroy_irq(irq + i);
3009}
3010
3048/* 3011/*
3049 * MSI message composition 3012 * MSI message composition
3050 */ 3013 */
3051#ifdef CONFIG_PCI_MSI 3014void native_compose_msi_msg(struct pci_dev *pdev,
3052static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, 3015 unsigned int irq, unsigned int dest,
3053 struct msi_msg *msg, u8 hpet_id) 3016 struct msi_msg *msg, u8 hpet_id)
3054{ 3017{
3055 struct irq_cfg *cfg; 3018 struct irq_cfg *cfg = irq_cfg(irq);
3056 int err;
3057 unsigned dest;
3058
3059 if (disable_apic)
3060 return -ENXIO;
3061
3062 cfg = irq_cfg(irq);
3063 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3064 if (err)
3065 return err;
3066 3019
3067 err = apic->cpu_mask_to_apicid_and(cfg->domain, 3020 msg->address_hi = MSI_ADDR_BASE_HI;
3068 apic->target_cpus(), &dest);
3069 if (err)
3070 return err;
3071
3072 if (irq_remapped(cfg)) {
3073 compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id);
3074 return err;
3075 }
3076 3021
3077 if (x2apic_enabled()) 3022 if (x2apic_enabled())
3078 msg->address_hi = MSI_ADDR_BASE_HI | 3023 msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest);
3079 MSI_ADDR_EXT_DEST_ID(dest);
3080 else
3081 msg->address_hi = MSI_ADDR_BASE_HI;
3082 3024
3083 msg->address_lo = 3025 msg->address_lo =
3084 MSI_ADDR_BASE_LO | 3026 MSI_ADDR_BASE_LO |
@@ -3097,8 +3039,32 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3097 MSI_DATA_DELIVERY_FIXED: 3039 MSI_DATA_DELIVERY_FIXED:
3098 MSI_DATA_DELIVERY_LOWPRI) | 3040 MSI_DATA_DELIVERY_LOWPRI) |
3099 MSI_DATA_VECTOR(cfg->vector); 3041 MSI_DATA_VECTOR(cfg->vector);
3042}
3100 3043
3101 return err; 3044#ifdef CONFIG_PCI_MSI
3045static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3046 struct msi_msg *msg, u8 hpet_id)
3047{
3048 struct irq_cfg *cfg;
3049 int err;
3050 unsigned dest;
3051
3052 if (disable_apic)
3053 return -ENXIO;
3054
3055 cfg = irq_cfg(irq);
3056 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3057 if (err)
3058 return err;
3059
3060 err = apic->cpu_mask_to_apicid_and(cfg->domain,
3061 apic->target_cpus(), &dest);
3062 if (err)
3063 return err;
3064
3065 x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id);
3066
3067 return 0;
3102} 3068}
3103 3069
3104static int 3070static int
@@ -3136,23 +3102,28 @@ static struct irq_chip msi_chip = {
3136 .irq_retrigger = ioapic_retrigger_irq, 3102 .irq_retrigger = ioapic_retrigger_irq,
3137}; 3103};
3138 3104
3139static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) 3105int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
3106 unsigned int irq_base, unsigned int irq_offset)
3140{ 3107{
3141 struct irq_chip *chip = &msi_chip; 3108 struct irq_chip *chip = &msi_chip;
3142 struct msi_msg msg; 3109 struct msi_msg msg;
3110 unsigned int irq = irq_base + irq_offset;
3143 int ret; 3111 int ret;
3144 3112
3145 ret = msi_compose_msg(dev, irq, &msg, -1); 3113 ret = msi_compose_msg(dev, irq, &msg, -1);
3146 if (ret < 0) 3114 if (ret < 0)
3147 return ret; 3115 return ret;
3148 3116
3149 irq_set_msi_desc(irq, msidesc); 3117 irq_set_msi_desc_off(irq_base, irq_offset, msidesc);
3150 write_msi_msg(irq, &msg);
3151 3118
3152 if (irq_remapped(irq_get_chip_data(irq))) { 3119 /*
3153 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3120 * MSI-X message is written per-IRQ, the offset is always 0.
3154 irq_remap_modify_chip_defaults(chip); 3121 * MSI message denotes a contiguous group of IRQs, written for 0th IRQ.
3155 } 3122 */
3123 if (!irq_offset)
3124 write_msi_msg(irq, &msg);
3125
3126 setup_remapped_irq(irq, irq_get_chip_data(irq), chip);
3156 3127
3157 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); 3128 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
3158 3129
@@ -3163,46 +3134,26 @@ static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3163 3134
3164int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3135int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3165{ 3136{
3166 int node, ret, sub_handle, index = 0;
3167 unsigned int irq, irq_want; 3137 unsigned int irq, irq_want;
3168 struct msi_desc *msidesc; 3138 struct msi_desc *msidesc;
3139 int node, ret;
3169 3140
3170 /* x86 doesn't support multiple MSI yet */ 3141 /* Multiple MSI vectors only supported with interrupt remapping */
3171 if (type == PCI_CAP_ID_MSI && nvec > 1) 3142 if (type == PCI_CAP_ID_MSI && nvec > 1)
3172 return 1; 3143 return 1;
3173 3144
3174 node = dev_to_node(&dev->dev); 3145 node = dev_to_node(&dev->dev);
3175 irq_want = nr_irqs_gsi; 3146 irq_want = nr_irqs_gsi;
3176 sub_handle = 0;
3177 list_for_each_entry(msidesc, &dev->msi_list, list) { 3147 list_for_each_entry(msidesc, &dev->msi_list, list) {
3178 irq = create_irq_nr(irq_want, node); 3148 irq = create_irq_nr(irq_want, node);
3179 if (irq == 0) 3149 if (irq == 0)
3180 return -1; 3150 return -ENOSPC;
3151
3181 irq_want = irq + 1; 3152 irq_want = irq + 1;
3182 if (!irq_remapping_enabled)
3183 goto no_ir;
3184 3153
3185 if (!sub_handle) { 3154 ret = setup_msi_irq(dev, msidesc, irq, 0);
3186 /*
3187 * allocate the consecutive block of IRTE's
3188 * for 'nvec'
3189 */
3190 index = msi_alloc_remapped_irq(dev, irq, nvec);
3191 if (index < 0) {
3192 ret = index;
3193 goto error;
3194 }
3195 } else {
3196 ret = msi_setup_remapped_irq(dev, irq, index,
3197 sub_handle);
3198 if (ret < 0)
3199 goto error;
3200 }
3201no_ir:
3202 ret = setup_msi_irq(dev, msidesc, irq);
3203 if (ret < 0) 3155 if (ret < 0)
3204 goto error; 3156 goto error;
3205 sub_handle++;
3206 } 3157 }
3207 return 0; 3158 return 0;
3208 3159
@@ -3298,26 +3249,19 @@ static struct irq_chip hpet_msi_type = {
3298 .irq_retrigger = ioapic_retrigger_irq, 3249 .irq_retrigger = ioapic_retrigger_irq,
3299}; 3250};
3300 3251
3301int arch_setup_hpet_msi(unsigned int irq, unsigned int id) 3252int default_setup_hpet_msi(unsigned int irq, unsigned int id)
3302{ 3253{
3303 struct irq_chip *chip = &hpet_msi_type; 3254 struct irq_chip *chip = &hpet_msi_type;
3304 struct msi_msg msg; 3255 struct msi_msg msg;
3305 int ret; 3256 int ret;
3306 3257
3307 if (irq_remapping_enabled) {
3308 ret = setup_hpet_msi_remapped(irq, id);
3309 if (ret)
3310 return ret;
3311 }
3312
3313 ret = msi_compose_msg(NULL, irq, &msg, id); 3258 ret = msi_compose_msg(NULL, irq, &msg, id);
3314 if (ret < 0) 3259 if (ret < 0)
3315 return ret; 3260 return ret;
3316 3261
3317 hpet_msi_write(irq_get_handler_data(irq), &msg); 3262 hpet_msi_write(irq_get_handler_data(irq), &msg);
3318 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3263 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
3319 if (irq_remapped(irq_get_chip_data(irq))) 3264 setup_remapped_irq(irq, irq_get_chip_data(irq), chip);
3320 irq_remap_modify_chip_defaults(chip);
3321 3265
3322 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge"); 3266 irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
3323 return 0; 3267 return 0;
@@ -3683,10 +3627,7 @@ void __init setup_ioapic_dest(void)
3683 else 3627 else
3684 mask = apic->target_cpus(); 3628 mask = apic->target_cpus();
3685 3629
3686 if (irq_remapping_enabled) 3630 x86_io_apic_ops.set_affinity(idata, mask, false);
3687 set_remapped_irq_affinity(idata, mask, false);
3688 else
3689 ioapic_set_affinity(idata, mask, false);
3690 } 3631 }
3691 3632
3692} 3633}
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index cce91bf26676..7434d8556d09 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -106,7 +106,7 @@ void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
106 unsigned long mask = cpumask_bits(cpumask)[0]; 106 unsigned long mask = cpumask_bits(cpumask)[0];
107 unsigned long flags; 107 unsigned long flags;
108 108
109 if (WARN_ONCE(!mask, "empty IPI mask")) 109 if (!mask)
110 return; 110 return;
111 111
112 local_irq_save(flags); 112 local_irq_save(flags);
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index e03a1e180e81..562a76d433c8 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -20,18 +20,19 @@ static int set_x2apic_phys_mode(char *arg)
20} 20}
21early_param("x2apic_phys", set_x2apic_phys_mode); 21early_param("x2apic_phys", set_x2apic_phys_mode);
22 22
23static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 23static bool x2apic_fadt_phys(void)
24{ 24{
25 if (x2apic_phys) 25 if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
26 return x2apic_enabled(); 26 (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
27 else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
28 (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) &&
29 x2apic_enabled()) {
30 printk(KERN_DEBUG "System requires x2apic physical mode\n"); 27 printk(KERN_DEBUG "System requires x2apic physical mode\n");
31 return 1; 28 return true;
32 } 29 }
33 else 30 return false;
34 return 0; 31}
32
33static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
34{
35 return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys());
35} 36}
36 37
37static void 38static void
@@ -82,7 +83,7 @@ static void init_x2apic_ldr(void)
82 83
83static int x2apic_phys_probe(void) 84static int x2apic_phys_probe(void)
84{ 85{
85 if (x2apic_mode && x2apic_phys) 86 if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))
86 return 1; 87 return 1;
87 88
88 return apic == &apic_x2apic_phys; 89 return apic == &apic_x2apic_phys;
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 8cfade9510a4..794f6eb54cd3 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * SGI UV APIC functions (note: not an Intel compatible APIC) 6 * SGI UV APIC functions (note: not an Intel compatible APIC)
7 * 7 *
8 * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2013 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10#include <linux/cpumask.h> 10#include <linux/cpumask.h>
11#include <linux/hardirq.h> 11#include <linux/hardirq.h>
@@ -91,10 +91,16 @@ static int __init early_get_pnodeid(void)
91 m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR); 91 m_n_config.v = uv_early_read_mmr(UVH_RH_GAM_CONFIG_MMR);
92 uv_min_hub_revision_id = node_id.s.revision; 92 uv_min_hub_revision_id = node_id.s.revision;
93 93
94 if (node_id.s.part_number == UV2_HUB_PART_NUMBER) 94 switch (node_id.s.part_number) {
95 uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1; 95 case UV2_HUB_PART_NUMBER:
96 if (node_id.s.part_number == UV2_HUB_PART_NUMBER_X) 96 case UV2_HUB_PART_NUMBER_X:
97 uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1; 97 uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
98 break;
99 case UV3_HUB_PART_NUMBER:
100 case UV3_HUB_PART_NUMBER_X:
101 uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1;
102 break;
103 }
98 104
99 uv_hub_info->hub_revision = uv_min_hub_revision_id; 105 uv_hub_info->hub_revision = uv_min_hub_revision_id;
100 pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1); 106 pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
@@ -130,13 +136,16 @@ static void __init uv_set_apicid_hibit(void)
130 136
131static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 137static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
132{ 138{
133 int pnodeid, is_uv1, is_uv2; 139 int pnodeid, is_uv1, is_uv2, is_uv3;
134 140
135 is_uv1 = !strcmp(oem_id, "SGI"); 141 is_uv1 = !strcmp(oem_id, "SGI");
136 is_uv2 = !strcmp(oem_id, "SGI2"); 142 is_uv2 = !strcmp(oem_id, "SGI2");
137 if (is_uv1 || is_uv2) { 143 is_uv3 = !strncmp(oem_id, "SGI3", 4); /* there are varieties of UV3 */
144 if (is_uv1 || is_uv2 || is_uv3) {
138 uv_hub_info->hub_revision = 145 uv_hub_info->hub_revision =
139 is_uv1 ? UV1_HUB_REVISION_BASE : UV2_HUB_REVISION_BASE; 146 (is_uv1 ? UV1_HUB_REVISION_BASE :
147 (is_uv2 ? UV2_HUB_REVISION_BASE :
148 UV3_HUB_REVISION_BASE));
140 pnodeid = early_get_pnodeid(); 149 pnodeid = early_get_pnodeid();
141 early_get_apic_pnode_shift(); 150 early_get_apic_pnode_shift();
142 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range; 151 x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
@@ -450,14 +459,17 @@ static __init void map_high(char *id, unsigned long base, int pshift,
450 459
451 paddr = base << pshift; 460 paddr = base << pshift;
452 bytes = (1UL << bshift) * (max_pnode + 1); 461 bytes = (1UL << bshift) * (max_pnode + 1);
453 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, 462 if (!paddr) {
454 paddr + bytes); 463 pr_info("UV: Map %s_HI base address NULL\n", id);
464 return;
465 }
466 pr_info("UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, paddr + bytes);
455 if (map_type == map_uc) 467 if (map_type == map_uc)
456 init_extra_mapping_uc(paddr, bytes); 468 init_extra_mapping_uc(paddr, bytes);
457 else 469 else
458 init_extra_mapping_wb(paddr, bytes); 470 init_extra_mapping_wb(paddr, bytes);
459
460} 471}
472
461static __init void map_gru_high(int max_pnode) 473static __init void map_gru_high(int max_pnode)
462{ 474{
463 union uvh_rh_gam_gru_overlay_config_mmr_u gru; 475 union uvh_rh_gam_gru_overlay_config_mmr_u gru;
@@ -468,7 +480,8 @@ static __init void map_gru_high(int max_pnode)
468 map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb); 480 map_high("GRU", gru.s.base, shift, shift, max_pnode, map_wb);
469 gru_start_paddr = ((u64)gru.s.base << shift); 481 gru_start_paddr = ((u64)gru.s.base << shift);
470 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1); 482 gru_end_paddr = gru_start_paddr + (1UL << shift) * (max_pnode + 1);
471 483 } else {
484 pr_info("UV: GRU disabled\n");
472 } 485 }
473} 486}
474 487
@@ -480,23 +493,146 @@ static __init void map_mmr_high(int max_pnode)
480 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR); 493 mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
481 if (mmr.s.enable) 494 if (mmr.s.enable)
482 map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc); 495 map_high("MMR", mmr.s.base, shift, shift, max_pnode, map_uc);
496 else
497 pr_info("UV: MMR disabled\n");
498}
499
500/*
501 * This commonality works because both 0 & 1 versions of the MMIOH OVERLAY
502 * and REDIRECT MMR regs are exactly the same on UV3.
503 */
504struct mmioh_config {
505 unsigned long overlay;
506 unsigned long redirect;
507 char *id;
508};
509
510static __initdata struct mmioh_config mmiohs[] = {
511 {
512 UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR,
513 UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR,
514 "MMIOH0"
515 },
516 {
517 UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR,
518 UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG1_MMR,
519 "MMIOH1"
520 },
521};
522
523static __init void map_mmioh_high_uv3(int index, int min_pnode, int max_pnode)
524{
525 union uv3h_rh_gam_mmioh_overlay_config0_mmr_u overlay;
526 unsigned long mmr;
527 unsigned long base;
528 int i, n, shift, m_io, max_io;
529 int nasid, lnasid, fi, li;
530 char *id;
531
532 id = mmiohs[index].id;
533 overlay.v = uv_read_local_mmr(mmiohs[index].overlay);
534 pr_info("UV: %s overlay 0x%lx base:0x%x m_io:%d\n",
535 id, overlay.v, overlay.s3.base, overlay.s3.m_io);
536 if (!overlay.s3.enable) {
537 pr_info("UV: %s disabled\n", id);
538 return;
539 }
540
541 shift = UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG0_MMR_BASE_SHFT;
542 base = (unsigned long)overlay.s3.base;
543 m_io = overlay.s3.m_io;
544 mmr = mmiohs[index].redirect;
545 n = UV3H_RH_GAM_MMIOH_REDIRECT_CONFIG0_MMR_DEPTH;
546 min_pnode *= 2; /* convert to NASID */
547 max_pnode *= 2;
548 max_io = lnasid = fi = li = -1;
549
550 for (i = 0; i < n; i++) {
551 union uv3h_rh_gam_mmioh_redirect_config0_mmr_u redirect;
552
553 redirect.v = uv_read_local_mmr(mmr + i * 8);
554 nasid = redirect.s3.nasid;
555 if (nasid < min_pnode || max_pnode < nasid)
556 nasid = -1; /* invalid NASID */
557
558 if (nasid == lnasid) {
559 li = i;
560 if (i != n-1) /* last entry check */
561 continue;
562 }
563
564 /* check if we have a cached (or last) redirect to print */
565 if (lnasid != -1 || (i == n-1 && nasid != -1)) {
566 unsigned long addr1, addr2;
567 int f, l;
568
569 if (lnasid == -1) {
570 f = l = i;
571 lnasid = nasid;
572 } else {
573 f = fi;
574 l = li;
575 }
576 addr1 = (base << shift) +
577 f * (unsigned long)(1 << m_io);
578 addr2 = (base << shift) +
579 (l + 1) * (unsigned long)(1 << m_io);
580 pr_info("UV: %s[%03d..%03d] NASID 0x%04x ADDR 0x%016lx - 0x%016lx\n",
581 id, fi, li, lnasid, addr1, addr2);
582 if (max_io < l)
583 max_io = l;
584 }
585 fi = li = i;
586 lnasid = nasid;
587 }
588
589 pr_info("UV: %s base:0x%lx shift:%d M_IO:%d MAX_IO:%d\n",
590 id, base, shift, m_io, max_io);
591
592 if (max_io >= 0)
593 map_high(id, base, shift, m_io, max_io, map_uc);
483} 594}
484 595
485static __init void map_mmioh_high(int max_pnode) 596static __init void map_mmioh_high(int min_pnode, int max_pnode)
486{ 597{
487 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh; 598 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
488 int shift; 599 unsigned long mmr, base;
600 int shift, enable, m_io, n_io;
489 601
490 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); 602 if (is_uv3_hub()) {
491 if (is_uv1_hub() && mmioh.s1.enable) { 603 /* Map both MMIOH Regions */
492 shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT; 604 map_mmioh_high_uv3(0, min_pnode, max_pnode);
493 map_high("MMIOH", mmioh.s1.base, shift, mmioh.s1.m_io, 605 map_mmioh_high_uv3(1, min_pnode, max_pnode);
494 max_pnode, map_uc); 606 return;
495 } 607 }
496 if (is_uv2_hub() && mmioh.s2.enable) { 608
609 if (is_uv1_hub()) {
610 mmr = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
611 shift = UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
612 mmioh.v = uv_read_local_mmr(mmr);
613 enable = !!mmioh.s1.enable;
614 base = mmioh.s1.base;
615 m_io = mmioh.s1.m_io;
616 n_io = mmioh.s1.n_io;
617 } else if (is_uv2_hub()) {
618 mmr = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR;
497 shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT; 619 shift = UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
498 map_high("MMIOH", mmioh.s2.base, shift, mmioh.s2.m_io, 620 mmioh.v = uv_read_local_mmr(mmr);
499 max_pnode, map_uc); 621 enable = !!mmioh.s2.enable;
622 base = mmioh.s2.base;
623 m_io = mmioh.s2.m_io;
624 n_io = mmioh.s2.n_io;
625 } else
626 return;
627
628 if (enable) {
629 max_pnode &= (1 << n_io) - 1;
630 pr_info(
631 "UV: base:0x%lx shift:%d N_IO:%d M_IO:%d max_pnode:0x%x\n",
632 base, shift, m_io, n_io, max_pnode);
633 map_high("MMIOH", base, shift, m_io, max_pnode, map_uc);
634 } else {
635 pr_info("UV: MMIOH disabled\n");
500 } 636 }
501} 637}
502 638
@@ -724,42 +860,41 @@ void uv_nmi_init(void)
724void __init uv_system_init(void) 860void __init uv_system_init(void)
725{ 861{
726 union uvh_rh_gam_config_mmr_u m_n_config; 862 union uvh_rh_gam_config_mmr_u m_n_config;
727 union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
728 union uvh_node_id_u node_id; 863 union uvh_node_id_u node_id;
729 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size; 864 unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
730 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val, n_io; 865 int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
731 int gnode_extra, max_pnode = 0; 866 int gnode_extra, min_pnode = 999999, max_pnode = -1;
732 unsigned long mmr_base, present, paddr; 867 unsigned long mmr_base, present, paddr;
733 unsigned short pnode_mask, pnode_io_mask; 868 unsigned short pnode_mask;
869 char *hub = (is_uv1_hub() ? "UV1" :
870 (is_uv2_hub() ? "UV2" :
871 "UV3"));
734 872
735 printk(KERN_INFO "UV: Found %s hub\n", is_uv1_hub() ? "UV1" : "UV2"); 873 pr_info("UV: Found %s hub\n", hub);
736 map_low_mmrs(); 874 map_low_mmrs();
737 875
738 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR ); 876 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
739 m_val = m_n_config.s.m_skt; 877 m_val = m_n_config.s.m_skt;
740 n_val = m_n_config.s.n_skt; 878 n_val = m_n_config.s.n_skt;
741 mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR); 879 pnode_mask = (1 << n_val) - 1;
742 n_io = is_uv1_hub() ? mmioh.s1.n_io : mmioh.s2.n_io;
743 mmr_base = 880 mmr_base =
744 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & 881 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
745 ~UV_MMR_ENABLE; 882 ~UV_MMR_ENABLE;
746 pnode_mask = (1 << n_val) - 1;
747 pnode_io_mask = (1 << n_io) - 1;
748 883
749 node_id.v = uv_read_local_mmr(UVH_NODE_ID); 884 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
750 gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1; 885 gnode_extra = (node_id.s.node_id & ~((1 << n_val) - 1)) >> 1;
751 gnode_upper = ((unsigned long)gnode_extra << m_val); 886 gnode_upper = ((unsigned long)gnode_extra << m_val);
752 printk(KERN_INFO "UV: N %d, M %d, N_IO: %d, gnode_upper 0x%lx, gnode_extra 0x%x, pnode_mask 0x%x, pnode_io_mask 0x%x\n", 887 pr_info("UV: N:%d M:%d pnode_mask:0x%x gnode_upper/extra:0x%lx/0x%x\n",
753 n_val, m_val, n_io, gnode_upper, gnode_extra, pnode_mask, pnode_io_mask); 888 n_val, m_val, pnode_mask, gnode_upper, gnode_extra);
754 889
755 printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); 890 pr_info("UV: global MMR base 0x%lx\n", mmr_base);
756 891
757 for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) 892 for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
758 uv_possible_blades += 893 uv_possible_blades +=
759 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8)); 894 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
760 895
761 /* uv_num_possible_blades() is really the hub count */ 896 /* uv_num_possible_blades() is really the hub count */
762 printk(KERN_INFO "UV: Found %d blades, %d hubs\n", 897 pr_info("UV: Found %d blades, %d hubs\n",
763 is_uv1_hub() ? uv_num_possible_blades() : 898 is_uv1_hub() ? uv_num_possible_blades() :
764 (uv_num_possible_blades() + 1) / 2, 899 (uv_num_possible_blades() + 1) / 2,
765 uv_num_possible_blades()); 900 uv_num_possible_blades());
@@ -794,6 +929,7 @@ void __init uv_system_init(void)
794 uv_blade_info[blade].nr_possible_cpus = 0; 929 uv_blade_info[blade].nr_possible_cpus = 0;
795 uv_blade_info[blade].nr_online_cpus = 0; 930 uv_blade_info[blade].nr_online_cpus = 0;
796 spin_lock_init(&uv_blade_info[blade].nmi_lock); 931 spin_lock_init(&uv_blade_info[blade].nmi_lock);
932 min_pnode = min(pnode, min_pnode);
797 max_pnode = max(pnode, max_pnode); 933 max_pnode = max(pnode, max_pnode);
798 blade++; 934 blade++;
799 } 935 }
@@ -856,7 +992,7 @@ void __init uv_system_init(void)
856 992
857 map_gru_high(max_pnode); 993 map_gru_high(max_pnode);
858 map_mmr_high(max_pnode); 994 map_mmr_high(max_pnode);
859 map_mmioh_high(max_pnode & pnode_io_mask); 995 map_mmioh_high(min_pnode, max_pnode);
860 996
861 uv_cpu_init(); 997 uv_cpu_init();
862 uv_scir_register_cpu_notifier(); 998 uv_scir_register_cpu_notifier();
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index d65464e43503..66b5faffe14a 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -232,6 +232,7 @@
232#include <linux/acpi.h> 232#include <linux/acpi.h>
233#include <linux/syscore_ops.h> 233#include <linux/syscore_ops.h>
234#include <linux/i8253.h> 234#include <linux/i8253.h>
235#include <linux/cpuidle.h>
235 236
236#include <asm/uaccess.h> 237#include <asm/uaccess.h>
237#include <asm/desc.h> 238#include <asm/desc.h>
@@ -360,13 +361,35 @@ struct apm_user {
360 * idle percentage above which bios idle calls are done 361 * idle percentage above which bios idle calls are done
361 */ 362 */
362#ifdef CONFIG_APM_CPU_IDLE 363#ifdef CONFIG_APM_CPU_IDLE
363#warning deprecated CONFIG_APM_CPU_IDLE will be deleted in 2012
364#define DEFAULT_IDLE_THRESHOLD 95 364#define DEFAULT_IDLE_THRESHOLD 95
365#else 365#else
366#define DEFAULT_IDLE_THRESHOLD 100 366#define DEFAULT_IDLE_THRESHOLD 100
367#endif 367#endif
368#define DEFAULT_IDLE_PERIOD (100 / 3) 368#define DEFAULT_IDLE_PERIOD (100 / 3)
369 369
370static int apm_cpu_idle(struct cpuidle_device *dev,
371 struct cpuidle_driver *drv, int index);
372
373static struct cpuidle_driver apm_idle_driver = {
374 .name = "apm_idle",
375 .owner = THIS_MODULE,
376 .en_core_tk_irqen = 1,
377 .states = {
378 { /* entry 0 is for polling */ },
379 { /* entry 1 is for APM idle */
380 .name = "APM",
381 .desc = "APM idle",
382 .flags = CPUIDLE_FLAG_TIME_VALID,
383 .exit_latency = 250, /* WAG */
384 .target_residency = 500, /* WAG */
385 .enter = &apm_cpu_idle
386 },
387 },
388 .state_count = 2,
389};
390
391static struct cpuidle_device apm_cpuidle_device;
392
370/* 393/*
371 * Local variables 394 * Local variables
372 */ 395 */
@@ -377,7 +400,6 @@ static struct {
377static int clock_slowed; 400static int clock_slowed;
378static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD; 401static int idle_threshold __read_mostly = DEFAULT_IDLE_THRESHOLD;
379static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD; 402static int idle_period __read_mostly = DEFAULT_IDLE_PERIOD;
380static int set_pm_idle;
381static int suspends_pending; 403static int suspends_pending;
382static int standbys_pending; 404static int standbys_pending;
383static int ignore_sys_suspend; 405static int ignore_sys_suspend;
@@ -884,8 +906,6 @@ static void apm_do_busy(void)
884#define IDLE_CALC_LIMIT (HZ * 100) 906#define IDLE_CALC_LIMIT (HZ * 100)
885#define IDLE_LEAKY_MAX 16 907#define IDLE_LEAKY_MAX 16
886 908
887static void (*original_pm_idle)(void) __read_mostly;
888
889/** 909/**
890 * apm_cpu_idle - cpu idling for APM capable Linux 910 * apm_cpu_idle - cpu idling for APM capable Linux
891 * 911 *
@@ -894,35 +914,36 @@ static void (*original_pm_idle)(void) __read_mostly;
894 * Furthermore it calls the system default idle routine. 914 * Furthermore it calls the system default idle routine.
895 */ 915 */
896 916
897static void apm_cpu_idle(void) 917static int apm_cpu_idle(struct cpuidle_device *dev,
918 struct cpuidle_driver *drv, int index)
898{ 919{
899 static int use_apm_idle; /* = 0 */ 920 static int use_apm_idle; /* = 0 */
900 static unsigned int last_jiffies; /* = 0 */ 921 static unsigned int last_jiffies; /* = 0 */
901 static unsigned int last_stime; /* = 0 */ 922 static unsigned int last_stime; /* = 0 */
923 cputime_t stime;
902 924
903 int apm_idle_done = 0; 925 int apm_idle_done = 0;
904 unsigned int jiffies_since_last_check = jiffies - last_jiffies; 926 unsigned int jiffies_since_last_check = jiffies - last_jiffies;
905 unsigned int bucket; 927 unsigned int bucket;
906 928
907 WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012");
908recalc: 929recalc:
930 task_cputime(current, NULL, &stime);
909 if (jiffies_since_last_check > IDLE_CALC_LIMIT) { 931 if (jiffies_since_last_check > IDLE_CALC_LIMIT) {
910 use_apm_idle = 0; 932 use_apm_idle = 0;
911 last_jiffies = jiffies;
912 last_stime = current->stime;
913 } else if (jiffies_since_last_check > idle_period) { 933 } else if (jiffies_since_last_check > idle_period) {
914 unsigned int idle_percentage; 934 unsigned int idle_percentage;
915 935
916 idle_percentage = current->stime - last_stime; 936 idle_percentage = stime - last_stime;
917 idle_percentage *= 100; 937 idle_percentage *= 100;
918 idle_percentage /= jiffies_since_last_check; 938 idle_percentage /= jiffies_since_last_check;
919 use_apm_idle = (idle_percentage > idle_threshold); 939 use_apm_idle = (idle_percentage > idle_threshold);
920 if (apm_info.forbid_idle) 940 if (apm_info.forbid_idle)
921 use_apm_idle = 0; 941 use_apm_idle = 0;
922 last_jiffies = jiffies;
923 last_stime = current->stime;
924 } 942 }
925 943
944 last_jiffies = jiffies;
945 last_stime = stime;
946
926 bucket = IDLE_LEAKY_MAX; 947 bucket = IDLE_LEAKY_MAX;
927 948
928 while (!need_resched()) { 949 while (!need_resched()) {
@@ -950,10 +971,7 @@ recalc:
950 break; 971 break;
951 } 972 }
952 } 973 }
953 if (original_pm_idle) 974 default_idle();
954 original_pm_idle();
955 else
956 default_idle();
957 local_irq_disable(); 975 local_irq_disable();
958 jiffies_since_last_check = jiffies - last_jiffies; 976 jiffies_since_last_check = jiffies - last_jiffies;
959 if (jiffies_since_last_check > idle_period) 977 if (jiffies_since_last_check > idle_period)
@@ -963,7 +981,7 @@ recalc:
963 if (apm_idle_done) 981 if (apm_idle_done)
964 apm_do_busy(); 982 apm_do_busy();
965 983
966 local_irq_enable(); 984 return index;
967} 985}
968 986
969/** 987/**
@@ -2381,9 +2399,9 @@ static int __init apm_init(void)
2381 if (HZ != 100) 2399 if (HZ != 100)
2382 idle_period = (idle_period * HZ) / 100; 2400 idle_period = (idle_period * HZ) / 100;
2383 if (idle_threshold < 100) { 2401 if (idle_threshold < 100) {
2384 original_pm_idle = pm_idle; 2402 if (!cpuidle_register_driver(&apm_idle_driver))
2385 pm_idle = apm_cpu_idle; 2403 if (cpuidle_register_device(&apm_cpuidle_device))
2386 set_pm_idle = 1; 2404 cpuidle_unregister_driver(&apm_idle_driver);
2387 } 2405 }
2388 2406
2389 return 0; 2407 return 0;
@@ -2393,15 +2411,9 @@ static void __exit apm_exit(void)
2393{ 2411{
2394 int error; 2412 int error;
2395 2413
2396 if (set_pm_idle) { 2414 cpuidle_unregister_device(&apm_cpuidle_device);
2397 pm_idle = original_pm_idle; 2415 cpuidle_unregister_driver(&apm_idle_driver);
2398 /* 2416
2399 * We are about to unload the current idle thread pm callback
2400 * (pm_idle), Wait for all processors to update cached/local
2401 * copies of pm_idle before proceeding.
2402 */
2403 kick_all_cpus_sync();
2404 }
2405 if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0) 2417 if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0)
2406 && (apm_info.connection_version > 0x0100)) { 2418 && (apm_info.connection_version > 0x0100)) {
2407 error = apm_engage_power_management(APM_DEVICE_ALL, 0); 2419 error = apm_engage_power_management(APM_DEVICE_ALL, 0);
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index eafb084e80f8..edd77e7508b3 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -363,9 +363,9 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c)
363#endif 363#endif
364} 364}
365 365
366int amd_get_nb_id(int cpu) 366u16 amd_get_nb_id(int cpu)
367{ 367{
368 int id = 0; 368 u16 id = 0;
369#ifdef CONFIG_SMP 369#ifdef CONFIG_SMP
370 id = per_cpu(cpu_llc_id, cpu); 370 id = per_cpu(cpu_llc_id, cpu);
371#endif 371#endif
@@ -517,10 +517,9 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
517static void __cpuinit init_amd(struct cpuinfo_x86 *c) 517static void __cpuinit init_amd(struct cpuinfo_x86 *c)
518{ 518{
519 u32 dummy; 519 u32 dummy;
520
521#ifdef CONFIG_SMP
522 unsigned long long value; 520 unsigned long long value;
523 521
522#ifdef CONFIG_SMP
524 /* 523 /*
525 * Disable TLB flush filter by setting HWCR.FFDIS on K8 524 * Disable TLB flush filter by setting HWCR.FFDIS on K8
526 * bit 6 of msr C001_0015 525 * bit 6 of msr C001_0015
@@ -558,12 +557,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
558 * (AMD Erratum #110, docId: 25759). 557 * (AMD Erratum #110, docId: 25759).
559 */ 558 */
560 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { 559 if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
561 u64 val;
562
563 clear_cpu_cap(c, X86_FEATURE_LAHF_LM); 560 clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
564 if (!rdmsrl_amd_safe(0xc001100d, &val)) { 561 if (!rdmsrl_amd_safe(0xc001100d, &value)) {
565 val &= ~(1ULL << 32); 562 value &= ~(1ULL << 32);
566 wrmsrl_amd_safe(0xc001100d, val); 563 wrmsrl_amd_safe(0xc001100d, value);
567 } 564 }
568 } 565 }
569 566
@@ -616,13 +613,12 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
616 if ((c->x86 == 0x15) && 613 if ((c->x86 == 0x15) &&
617 (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && 614 (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
618 !cpu_has(c, X86_FEATURE_TOPOEXT)) { 615 !cpu_has(c, X86_FEATURE_TOPOEXT)) {
619 u64 val;
620 616
621 if (!rdmsrl_safe(0xc0011005, &val)) { 617 if (!rdmsrl_safe(0xc0011005, &value)) {
622 val |= 1ULL << 54; 618 value |= 1ULL << 54;
623 wrmsrl_safe(0xc0011005, val); 619 wrmsrl_safe(0xc0011005, value);
624 rdmsrl(0xc0011005, val); 620 rdmsrl(0xc0011005, value);
625 if (val & (1ULL << 54)) { 621 if (value & (1ULL << 54)) {
626 set_cpu_cap(c, X86_FEATURE_TOPOEXT); 622 set_cpu_cap(c, X86_FEATURE_TOPOEXT);
627 printk(KERN_INFO FW_INFO "CPU: Re-enabling " 623 printk(KERN_INFO FW_INFO "CPU: Re-enabling "
628 "disabled Topology Extensions Support\n"); 624 "disabled Topology Extensions Support\n");
@@ -636,11 +632,10 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
636 */ 632 */
637 if ((c->x86 == 0x15) && 633 if ((c->x86 == 0x15) &&
638 (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { 634 (c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
639 u64 val;
640 635
641 if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) { 636 if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) {
642 val |= 0x1E; 637 value |= 0x1E;
643 wrmsrl_safe(0xc0011021, val); 638 wrmsrl_safe(0xc0011021, value);
644 } 639 }
645 } 640 }
646 641
@@ -700,13 +695,11 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
700 if (c->x86 > 0x11) 695 if (c->x86 > 0x11)
701 set_cpu_cap(c, X86_FEATURE_ARAT); 696 set_cpu_cap(c, X86_FEATURE_ARAT);
702 697
703 /*
704 * Disable GART TLB Walk Errors on Fam10h. We do this here
705 * because this is always needed when GART is enabled, even in a
706 * kernel which has no MCE support built in.
707 */
708 if (c->x86 == 0x10) { 698 if (c->x86 == 0x10) {
709 /* 699 /*
700 * Disable GART TLB Walk Errors on Fam10h. We do this here
701 * because this is always needed when GART is enabled, even in a
702 * kernel which has no MCE support built in.
710 * BIOS should disable GartTlbWlk Errors themself. If 703 * BIOS should disable GartTlbWlk Errors themself. If
711 * it doesn't do it here as suggested by the BKDG. 704 * it doesn't do it here as suggested by the BKDG.
712 * 705 *
@@ -720,6 +713,21 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
720 mask |= (1 << 10); 713 mask |= (1 << 10);
721 wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask); 714 wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask);
722 } 715 }
716
717 /*
718 * On family 10h BIOS may not have properly enabled WC+ support,
719 * causing it to be converted to CD memtype. This may result in
720 * performance degradation for certain nested-paging guests.
721 * Prevent this conversion by clearing bit 24 in
722 * MSR_AMD64_BU_CFG2.
723 *
724 * NOTE: we want to use the _safe accessors so as not to #GP kvm
725 * guests on older kvm hosts.
726 */
727
728 rdmsrl_safe(MSR_AMD64_BU_CFG2, &value);
729 value &= ~(1ULL << 24);
730 wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
723 } 731 }
724 732
725 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); 733 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 92dfec986a48..af6455e3fcc9 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -17,15 +17,6 @@
17#include <asm/paravirt.h> 17#include <asm/paravirt.h>
18#include <asm/alternative.h> 18#include <asm/alternative.h>
19 19
20static int __init no_halt(char *s)
21{
22 WARN_ONCE(1, "\"no-hlt\" is deprecated, please use \"idle=poll\"\n");
23 boot_cpu_data.hlt_works_ok = 0;
24 return 1;
25}
26
27__setup("no-hlt", no_halt);
28
29static int __init no_387(char *s) 20static int __init no_387(char *s)
30{ 21{
31 boot_cpu_data.hard_math = 0; 22 boot_cpu_data.hard_math = 0;
@@ -89,23 +80,6 @@ static void __init check_fpu(void)
89 pr_warn("Hmm, FPU with FDIV bug\n"); 80 pr_warn("Hmm, FPU with FDIV bug\n");
90} 81}
91 82
92static void __init check_hlt(void)
93{
94 if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
95 return;
96
97 pr_info("Checking 'hlt' instruction... ");
98 if (!boot_cpu_data.hlt_works_ok) {
99 pr_cont("disabled\n");
100 return;
101 }
102 halt();
103 halt();
104 halt();
105 halt();
106 pr_cont("OK\n");
107}
108
109/* 83/*
110 * Check whether we are able to run this kernel safely on SMP. 84 * Check whether we are able to run this kernel safely on SMP.
111 * 85 *
@@ -129,7 +103,6 @@ void __init check_bugs(void)
129 print_cpu_info(&boot_cpu_data); 103 print_cpu_info(&boot_cpu_data);
130#endif 104#endif
131 check_config(); 105 check_config();
132 check_hlt();
133 init_utsname()->machine[1] = 106 init_utsname()->machine[1] =
134 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); 107 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
135 alternative_instructions(); 108 alternative_instructions();
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index a8f8fa9769d6..1e7e84a02eba 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -79,3 +79,10 @@ void __init init_hypervisor_platform(void)
79 if (x86_hyper->init_platform) 79 if (x86_hyper->init_platform)
80 x86_hyper->init_platform(); 80 x86_hyper->init_platform();
81} 81}
82
83bool __init hypervisor_x2apic_available(void)
84{
85 return x86_hyper &&
86 x86_hyper->x2apic_available &&
87 x86_hyper->x2apic_available();
88}
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index fe9edec6698a..7c6f7d548c0f 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -298,8 +298,7 @@ struct _cache_attr {
298 unsigned int); 298 unsigned int);
299}; 299};
300 300
301#ifdef CONFIG_AMD_NB 301#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
302
303/* 302/*
304 * L3 cache descriptors 303 * L3 cache descriptors
305 */ 304 */
@@ -524,9 +523,9 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
524static struct _cache_attr subcaches = 523static struct _cache_attr subcaches =
525 __ATTR(subcaches, 0644, show_subcaches, store_subcaches); 524 __ATTR(subcaches, 0644, show_subcaches, store_subcaches);
526 525
527#else /* CONFIG_AMD_NB */ 526#else
528#define amd_init_l3_cache(x, y) 527#define amd_init_l3_cache(x, y)
529#endif /* CONFIG_AMD_NB */ 528#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
530 529
531static int 530static int
532__cpuinit cpuid4_cache_lookup_regs(int index, 531__cpuinit cpuid4_cache_lookup_regs(int index,
@@ -1227,7 +1226,7 @@ static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
1227 .notifier_call = cacheinfo_cpu_callback, 1226 .notifier_call = cacheinfo_cpu_callback,
1228}; 1227};
1229 1228
1230static int __cpuinit cache_sysfs_init(void) 1229static int __init cache_sysfs_init(void)
1231{ 1230{
1232 int i; 1231 int i;
1233 1232
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 80dbda84f1c3..fc7608a89d93 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -512,11 +512,8 @@ int mce_available(struct cpuinfo_x86 *c)
512 512
513static void mce_schedule_work(void) 513static void mce_schedule_work(void)
514{ 514{
515 if (!mce_ring_empty()) { 515 if (!mce_ring_empty())
516 struct work_struct *work = &__get_cpu_var(mce_work); 516 schedule_work(&__get_cpu_var(mce_work));
517 if (!work_pending(work))
518 schedule_work(work);
519 }
520} 517}
521 518
522DEFINE_PER_CPU(struct irq_work, mce_irq_work); 519DEFINE_PER_CPU(struct irq_work, mce_irq_work);
@@ -1351,12 +1348,7 @@ int mce_notify_irq(void)
1351 /* wake processes polling /dev/mcelog */ 1348 /* wake processes polling /dev/mcelog */
1352 wake_up_interruptible(&mce_chrdev_wait); 1349 wake_up_interruptible(&mce_chrdev_wait);
1353 1350
1354 /* 1351 if (mce_helper[0])
1355 * There is no risk of missing notifications because
1356 * work_pending is always cleared before the function is
1357 * executed.
1358 */
1359 if (mce_helper[0] && !work_pending(&mce_trigger_work))
1360 schedule_work(&mce_trigger_work); 1352 schedule_work(&mce_trigger_work);
1361 1353
1362 if (__ratelimit(&ratelimit)) 1354 if (__ratelimit(&ratelimit))
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 0a630dd4b620..a7d26d83fb70 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -14,10 +14,15 @@
14#include <linux/time.h> 14#include <linux/time.h>
15#include <linux/clocksource.h> 15#include <linux/clocksource.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/hardirq.h>
18#include <linux/interrupt.h>
17#include <asm/processor.h> 19#include <asm/processor.h>
18#include <asm/hypervisor.h> 20#include <asm/hypervisor.h>
19#include <asm/hyperv.h> 21#include <asm/hyperv.h>
20#include <asm/mshyperv.h> 22#include <asm/mshyperv.h>
23#include <asm/desc.h>
24#include <asm/idle.h>
25#include <asm/irq_regs.h>
21 26
22struct ms_hyperv_info ms_hyperv; 27struct ms_hyperv_info ms_hyperv;
23EXPORT_SYMBOL_GPL(ms_hyperv); 28EXPORT_SYMBOL_GPL(ms_hyperv);
@@ -30,6 +35,13 @@ static bool __init ms_hyperv_platform(void)
30 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) 35 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
31 return false; 36 return false;
32 37
38 /*
39 * Xen emulates Hyper-V to support enlightened Windows.
40 * Check to see first if we are on a Xen Hypervisor.
41 */
42 if (xen_cpuid_base())
43 return false;
44
33 cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, 45 cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
34 &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); 46 &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
35 47
@@ -68,7 +80,14 @@ static void __init ms_hyperv_init_platform(void)
68 printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n", 80 printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
69 ms_hyperv.features, ms_hyperv.hints); 81 ms_hyperv.features, ms_hyperv.hints);
70 82
71 clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100); 83 if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
84 clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
85#if IS_ENABLED(CONFIG_HYPERV)
86 /*
87 * Setup the IDT for hypervisor callback.
88 */
89 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
90#endif
72} 91}
73 92
74const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = { 93const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
@@ -77,3 +96,36 @@ const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
77 .init_platform = ms_hyperv_init_platform, 96 .init_platform = ms_hyperv_init_platform,
78}; 97};
79EXPORT_SYMBOL(x86_hyper_ms_hyperv); 98EXPORT_SYMBOL(x86_hyper_ms_hyperv);
99
100#if IS_ENABLED(CONFIG_HYPERV)
101static int vmbus_irq = -1;
102static irq_handler_t vmbus_isr;
103
104void hv_register_vmbus_handler(int irq, irq_handler_t handler)
105{
106 vmbus_irq = irq;
107 vmbus_isr = handler;
108}
109
110void hyperv_vector_handler(struct pt_regs *regs)
111{
112 struct pt_regs *old_regs = set_irq_regs(regs);
113 struct irq_desc *desc;
114
115 irq_enter();
116 exit_idle();
117
118 desc = irq_to_desc(vmbus_irq);
119
120 if (desc)
121 generic_handle_irq_desc(vmbus_irq, desc);
122
123 irq_exit();
124 set_irq_regs(old_regs);
125}
126#else
127void hv_register_vmbus_handler(int irq, irq_handler_t handler)
128{
129}
130#endif
131EXPORT_SYMBOL_GPL(hv_register_vmbus_handler);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 6774c17a5576..bf0f01aea994 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -829,7 +829,7 @@ static inline void x86_assign_hw_event(struct perf_event *event,
829 } else { 829 } else {
830 hwc->config_base = x86_pmu_config_addr(hwc->idx); 830 hwc->config_base = x86_pmu_config_addr(hwc->idx);
831 hwc->event_base = x86_pmu_event_addr(hwc->idx); 831 hwc->event_base = x86_pmu_event_addr(hwc->idx);
832 hwc->event_base_rdpmc = hwc->idx; 832 hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
833 } 833 }
834} 834}
835 835
@@ -1310,11 +1310,6 @@ static struct attribute_group x86_pmu_format_group = {
1310 .attrs = NULL, 1310 .attrs = NULL,
1311}; 1311};
1312 1312
1313struct perf_pmu_events_attr {
1314 struct device_attribute attr;
1315 u64 id;
1316};
1317
1318/* 1313/*
1319 * Remove all undefined events (x86_pmu.event_map(id) == 0) 1314 * Remove all undefined events (x86_pmu.event_map(id) == 0)
1320 * out of events_attr attributes. 1315 * out of events_attr attributes.
@@ -1348,11 +1343,9 @@ static ssize_t events_sysfs_show(struct device *dev, struct device_attribute *at
1348#define EVENT_VAR(_id) event_attr_##_id 1343#define EVENT_VAR(_id) event_attr_##_id
1349#define EVENT_PTR(_id) &event_attr_##_id.attr.attr 1344#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
1350 1345
1351#define EVENT_ATTR(_name, _id) \ 1346#define EVENT_ATTR(_name, _id) \
1352static struct perf_pmu_events_attr EVENT_VAR(_id) = { \ 1347 PMU_EVENT_ATTR(_name, EVENT_VAR(_id), PERF_COUNT_HW_##_id, \
1353 .attr = __ATTR(_name, 0444, events_sysfs_show, NULL), \ 1348 events_sysfs_show)
1354 .id = PERF_COUNT_HW_##_id, \
1355};
1356 1349
1357EVENT_ATTR(cpu-cycles, CPU_CYCLES ); 1350EVENT_ATTR(cpu-cycles, CPU_CYCLES );
1358EVENT_ATTR(instructions, INSTRUCTIONS ); 1351EVENT_ATTR(instructions, INSTRUCTIONS );
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 115c1ea97746..7f5c75c2afdd 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -325,6 +325,8 @@ struct x86_pmu {
325 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); 325 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
326 unsigned eventsel; 326 unsigned eventsel;
327 unsigned perfctr; 327 unsigned perfctr;
328 int (*addr_offset)(int index, bool eventsel);
329 int (*rdpmc_index)(int index);
328 u64 (*event_map)(int); 330 u64 (*event_map)(int);
329 int max_events; 331 int max_events;
330 int num_counters; 332 int num_counters;
@@ -446,28 +448,21 @@ extern u64 __read_mostly hw_cache_extra_regs
446 448
447u64 x86_perf_event_update(struct perf_event *event); 449u64 x86_perf_event_update(struct perf_event *event);
448 450
449static inline int x86_pmu_addr_offset(int index) 451static inline unsigned int x86_pmu_config_addr(int index)
450{ 452{
451 int offset; 453 return x86_pmu.eventsel + (x86_pmu.addr_offset ?
452 454 x86_pmu.addr_offset(index, true) : index);
453 /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
454 alternative_io(ASM_NOP2,
455 "shll $1, %%eax",
456 X86_FEATURE_PERFCTR_CORE,
457 "=a" (offset),
458 "a" (index));
459
460 return offset;
461} 455}
462 456
463static inline unsigned int x86_pmu_config_addr(int index) 457static inline unsigned int x86_pmu_event_addr(int index)
464{ 458{
465 return x86_pmu.eventsel + x86_pmu_addr_offset(index); 459 return x86_pmu.perfctr + (x86_pmu.addr_offset ?
460 x86_pmu.addr_offset(index, false) : index);
466} 461}
467 462
468static inline unsigned int x86_pmu_event_addr(int index) 463static inline int x86_pmu_rdpmc_index(int index)
469{ 464{
470 return x86_pmu.perfctr + x86_pmu_addr_offset(index); 465 return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
471} 466}
472 467
473int x86_setup_perfctr(struct perf_event *event); 468int x86_setup_perfctr(struct perf_event *event);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index c93bc4e813a0..dfdab42aed27 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -132,21 +132,102 @@ static u64 amd_pmu_event_map(int hw_event)
132 return amd_perfmon_event_map[hw_event]; 132 return amd_perfmon_event_map[hw_event];
133} 133}
134 134
135static int amd_pmu_hw_config(struct perf_event *event) 135static struct event_constraint *amd_nb_event_constraint;
136
137/*
138 * Previously calculated offsets
139 */
140static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
141static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
142static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly;
143
144/*
145 * Legacy CPUs:
146 * 4 counters starting at 0xc0010000 each offset by 1
147 *
148 * CPUs with core performance counter extensions:
149 * 6 counters starting at 0xc0010200 each offset by 2
150 *
151 * CPUs with north bridge performance counter extensions:
152 * 4 additional counters starting at 0xc0010240 each offset by 2
153 * (indexed right above either one of the above core counters)
154 */
155static inline int amd_pmu_addr_offset(int index, bool eventsel)
136{ 156{
137 int ret; 157 int offset, first, base;
138 158
139 /* pass precise event sampling to ibs: */ 159 if (!index)
140 if (event->attr.precise_ip && get_ibs_caps()) 160 return index;
141 return -ENOENT; 161
162 if (eventsel)
163 offset = event_offsets[index];
164 else
165 offset = count_offsets[index];
166
167 if (offset)
168 return offset;
169
170 if (amd_nb_event_constraint &&
171 test_bit(index, amd_nb_event_constraint->idxmsk)) {
172 /*
173 * calculate the offset of NB counters with respect to
174 * base eventsel or perfctr
175 */
176
177 first = find_first_bit(amd_nb_event_constraint->idxmsk,
178 X86_PMC_IDX_MAX);
179
180 if (eventsel)
181 base = MSR_F15H_NB_PERF_CTL - x86_pmu.eventsel;
182 else
183 base = MSR_F15H_NB_PERF_CTR - x86_pmu.perfctr;
184
185 offset = base + ((index - first) << 1);
186 } else if (!cpu_has_perfctr_core)
187 offset = index;
188 else
189 offset = index << 1;
190
191 if (eventsel)
192 event_offsets[index] = offset;
193 else
194 count_offsets[index] = offset;
195
196 return offset;
197}
198
199static inline int amd_pmu_rdpmc_index(int index)
200{
201 int ret, first;
202
203 if (!index)
204 return index;
205
206 ret = rdpmc_indexes[index];
142 207
143 ret = x86_pmu_hw_config(event);
144 if (ret) 208 if (ret)
145 return ret; 209 return ret;
146 210
147 if (has_branch_stack(event)) 211 if (amd_nb_event_constraint &&
148 return -EOPNOTSUPP; 212 test_bit(index, amd_nb_event_constraint->idxmsk)) {
213 /*
214 * according to the mnual, ECX value of the NB counters is
215 * the index of the NB counter (0, 1, 2 or 3) plus 6
216 */
217
218 first = find_first_bit(amd_nb_event_constraint->idxmsk,
219 X86_PMC_IDX_MAX);
220 ret = index - first + 6;
221 } else
222 ret = index;
223
224 rdpmc_indexes[index] = ret;
225
226 return ret;
227}
149 228
229static int amd_core_hw_config(struct perf_event *event)
230{
150 if (event->attr.exclude_host && event->attr.exclude_guest) 231 if (event->attr.exclude_host && event->attr.exclude_guest)
151 /* 232 /*
152 * When HO == GO == 1 the hardware treats that as GO == HO == 0 233 * When HO == GO == 1 the hardware treats that as GO == HO == 0
@@ -156,14 +237,37 @@ static int amd_pmu_hw_config(struct perf_event *event)
156 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | 237 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
157 ARCH_PERFMON_EVENTSEL_OS); 238 ARCH_PERFMON_EVENTSEL_OS);
158 else if (event->attr.exclude_host) 239 else if (event->attr.exclude_host)
159 event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY; 240 event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
160 else if (event->attr.exclude_guest) 241 else if (event->attr.exclude_guest)
161 event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY; 242 event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
243
244 return 0;
245}
246
247/*
248 * NB counters do not support the following event select bits:
249 * Host/Guest only
250 * Counter mask
251 * Invert counter mask
252 * Edge detect
253 * OS/User mode
254 */
255static int amd_nb_hw_config(struct perf_event *event)
256{
257 /* for NB, we only allow system wide counting mode */
258 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
259 return -EINVAL;
260
261 if (event->attr.exclude_user || event->attr.exclude_kernel ||
262 event->attr.exclude_host || event->attr.exclude_guest)
263 return -EINVAL;
162 264
163 if (event->attr.type != PERF_TYPE_RAW) 265 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
164 return 0; 266 ARCH_PERFMON_EVENTSEL_OS);
165 267
166 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; 268 if (event->hw.config & ~(AMD64_RAW_EVENT_MASK_NB |
269 ARCH_PERFMON_EVENTSEL_INT))
270 return -EINVAL;
167 271
168 return 0; 272 return 0;
169} 273}
@@ -181,6 +285,11 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
181 return (hwc->config & 0xe0) == 0xe0; 285 return (hwc->config & 0xe0) == 0xe0;
182} 286}
183 287
288static inline int amd_is_perfctr_nb_event(struct hw_perf_event *hwc)
289{
290 return amd_nb_event_constraint && amd_is_nb_event(hwc);
291}
292
184static inline int amd_has_nb(struct cpu_hw_events *cpuc) 293static inline int amd_has_nb(struct cpu_hw_events *cpuc)
185{ 294{
186 struct amd_nb *nb = cpuc->amd_nb; 295 struct amd_nb *nb = cpuc->amd_nb;
@@ -188,20 +297,37 @@ static inline int amd_has_nb(struct cpu_hw_events *cpuc)
188 return nb && nb->nb_id != -1; 297 return nb && nb->nb_id != -1;
189} 298}
190 299
191static void amd_put_event_constraints(struct cpu_hw_events *cpuc, 300static int amd_pmu_hw_config(struct perf_event *event)
192 struct perf_event *event) 301{
302 int ret;
303
304 /* pass precise event sampling to ibs: */
305 if (event->attr.precise_ip && get_ibs_caps())
306 return -ENOENT;
307
308 if (has_branch_stack(event))
309 return -EOPNOTSUPP;
310
311 ret = x86_pmu_hw_config(event);
312 if (ret)
313 return ret;
314
315 if (event->attr.type == PERF_TYPE_RAW)
316 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
317
318 if (amd_is_perfctr_nb_event(&event->hw))
319 return amd_nb_hw_config(event);
320
321 return amd_core_hw_config(event);
322}
323
324static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
325 struct perf_event *event)
193{ 326{
194 struct hw_perf_event *hwc = &event->hw;
195 struct amd_nb *nb = cpuc->amd_nb; 327 struct amd_nb *nb = cpuc->amd_nb;
196 int i; 328 int i;
197 329
198 /* 330 /*
199 * only care about NB events
200 */
201 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
202 return;
203
204 /*
205 * need to scan whole list because event may not have 331 * need to scan whole list because event may not have
206 * been assigned during scheduling 332 * been assigned during scheduling
207 * 333 *
@@ -215,6 +341,19 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
215 } 341 }
216} 342}
217 343
344static void amd_nb_interrupt_hw_config(struct hw_perf_event *hwc)
345{
346 int core_id = cpu_data(smp_processor_id()).cpu_core_id;
347
348 /* deliver interrupts only to this core */
349 if (hwc->config & ARCH_PERFMON_EVENTSEL_INT) {
350 hwc->config |= AMD64_EVENTSEL_INT_CORE_ENABLE;
351 hwc->config &= ~AMD64_EVENTSEL_INT_CORE_SEL_MASK;
352 hwc->config |= (u64)(core_id) <<
353 AMD64_EVENTSEL_INT_CORE_SEL_SHIFT;
354 }
355}
356
218 /* 357 /*
219 * AMD64 NorthBridge events need special treatment because 358 * AMD64 NorthBridge events need special treatment because
220 * counter access needs to be synchronized across all cores 359 * counter access needs to be synchronized across all cores
@@ -247,24 +386,24 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
247 * 386 *
248 * Given that resources are allocated (cmpxchg), they must be 387 * Given that resources are allocated (cmpxchg), they must be
249 * eventually freed for others to use. This is accomplished by 388 * eventually freed for others to use. This is accomplished by
250 * calling amd_put_event_constraints(). 389 * calling __amd_put_nb_event_constraints()
251 * 390 *
252 * Non NB events are not impacted by this restriction. 391 * Non NB events are not impacted by this restriction.
253 */ 392 */
254static struct event_constraint * 393static struct event_constraint *
255amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) 394__amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
395 struct event_constraint *c)
256{ 396{
257 struct hw_perf_event *hwc = &event->hw; 397 struct hw_perf_event *hwc = &event->hw;
258 struct amd_nb *nb = cpuc->amd_nb; 398 struct amd_nb *nb = cpuc->amd_nb;
259 struct perf_event *old = NULL; 399 struct perf_event *old;
260 int max = x86_pmu.num_counters; 400 int idx, new = -1;
261 int i, j, k = -1;
262 401
263 /* 402 if (!c)
264 * if not NB event or no NB, then no constraints 403 c = &unconstrained;
265 */ 404
266 if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) 405 if (cpuc->is_fake)
267 return &unconstrained; 406 return c;
268 407
269 /* 408 /*
270 * detect if already present, if so reuse 409 * detect if already present, if so reuse
@@ -276,48 +415,36 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
276 * because of successive calls to x86_schedule_events() from 415 * because of successive calls to x86_schedule_events() from
277 * hw_perf_group_sched_in() without hw_perf_enable() 416 * hw_perf_group_sched_in() without hw_perf_enable()
278 */ 417 */
279 for (i = 0; i < max; i++) { 418 for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
280 /* 419 if (new == -1 || hwc->idx == idx)
281 * keep track of first free slot 420 /* assign free slot, prefer hwc->idx */
282 */ 421 old = cmpxchg(nb->owners + idx, NULL, event);
283 if (k == -1 && !nb->owners[i]) 422 else if (nb->owners[idx] == event)
284 k = i; 423 /* event already present */
424 old = event;
425 else
426 continue;
427
428 if (old && old != event)
429 continue;
430
431 /* reassign to this slot */
432 if (new != -1)
433 cmpxchg(nb->owners + new, event, NULL);
434 new = idx;
285 435
286 /* already present, reuse */ 436 /* already present, reuse */
287 if (nb->owners[i] == event) 437 if (old == event)
288 goto done;
289 }
290 /*
291 * not present, so grab a new slot
292 * starting either at:
293 */
294 if (hwc->idx != -1) {
295 /* previous assignment */
296 i = hwc->idx;
297 } else if (k != -1) {
298 /* start from free slot found */
299 i = k;
300 } else {
301 /*
302 * event not found, no slot found in
303 * first pass, try again from the
304 * beginning
305 */
306 i = 0;
307 }
308 j = i;
309 do {
310 old = cmpxchg(nb->owners+i, NULL, event);
311 if (!old)
312 break; 438 break;
313 if (++i == max) 439 }
314 i = 0; 440
315 } while (i != j); 441 if (new == -1)
316done: 442 return &emptyconstraint;
317 if (!old) 443
318 return &nb->event_constraints[i]; 444 if (amd_is_perfctr_nb_event(hwc))
319 445 amd_nb_interrupt_hw_config(hwc);
320 return &emptyconstraint; 446
447 return &nb->event_constraints[new];
321} 448}
322 449
323static struct amd_nb *amd_alloc_nb(int cpu) 450static struct amd_nb *amd_alloc_nb(int cpu)
@@ -364,7 +491,7 @@ static void amd_pmu_cpu_starting(int cpu)
364 struct amd_nb *nb; 491 struct amd_nb *nb;
365 int i, nb_id; 492 int i, nb_id;
366 493
367 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; 494 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
368 495
369 if (boot_cpu_data.x86_max_cores < 2) 496 if (boot_cpu_data.x86_max_cores < 2)
370 return; 497 return;
@@ -407,6 +534,26 @@ static void amd_pmu_cpu_dead(int cpu)
407 } 534 }
408} 535}
409 536
537static struct event_constraint *
538amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
539{
540 /*
541 * if not NB event or no NB, then no constraints
542 */
543 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
544 return &unconstrained;
545
546 return __amd_get_nb_event_constraints(cpuc, event,
547 amd_nb_event_constraint);
548}
549
550static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
551 struct perf_event *event)
552{
553 if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
554 __amd_put_nb_event_constraints(cpuc, event);
555}
556
410PMU_FORMAT_ATTR(event, "config:0-7,32-35"); 557PMU_FORMAT_ATTR(event, "config:0-7,32-35");
411PMU_FORMAT_ATTR(umask, "config:8-15" ); 558PMU_FORMAT_ATTR(umask, "config:8-15" );
412PMU_FORMAT_ATTR(edge, "config:18" ); 559PMU_FORMAT_ATTR(edge, "config:18" );
@@ -496,6 +643,9 @@ static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09,
496static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); 643static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
497static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); 644static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
498 645
646static struct event_constraint amd_NBPMC96 = EVENT_CONSTRAINT(0, 0x3C0, 0);
647static struct event_constraint amd_NBPMC74 = EVENT_CONSTRAINT(0, 0xF0, 0);
648
499static struct event_constraint * 649static struct event_constraint *
500amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) 650amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
501{ 651{
@@ -561,8 +711,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev
561 return &amd_f15_PMC20; 711 return &amd_f15_PMC20;
562 } 712 }
563 case AMD_EVENT_NB: 713 case AMD_EVENT_NB:
564 /* not yet implemented */ 714 return __amd_get_nb_event_constraints(cpuc, event,
565 return &emptyconstraint; 715 amd_nb_event_constraint);
566 default: 716 default:
567 return &emptyconstraint; 717 return &emptyconstraint;
568 } 718 }
@@ -587,6 +737,8 @@ static __initconst const struct x86_pmu amd_pmu = {
587 .schedule_events = x86_schedule_events, 737 .schedule_events = x86_schedule_events,
588 .eventsel = MSR_K7_EVNTSEL0, 738 .eventsel = MSR_K7_EVNTSEL0,
589 .perfctr = MSR_K7_PERFCTR0, 739 .perfctr = MSR_K7_PERFCTR0,
740 .addr_offset = amd_pmu_addr_offset,
741 .rdpmc_index = amd_pmu_rdpmc_index,
590 .event_map = amd_pmu_event_map, 742 .event_map = amd_pmu_event_map,
591 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 743 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
592 .num_counters = AMD64_NUM_COUNTERS, 744 .num_counters = AMD64_NUM_COUNTERS,
@@ -608,7 +760,7 @@ static __initconst const struct x86_pmu amd_pmu = {
608 760
609static int setup_event_constraints(void) 761static int setup_event_constraints(void)
610{ 762{
611 if (boot_cpu_data.x86 >= 0x15) 763 if (boot_cpu_data.x86 == 0x15)
612 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; 764 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
613 return 0; 765 return 0;
614} 766}
@@ -638,6 +790,23 @@ static int setup_perfctr_core(void)
638 return 0; 790 return 0;
639} 791}
640 792
793static int setup_perfctr_nb(void)
794{
795 if (!cpu_has_perfctr_nb)
796 return -ENODEV;
797
798 x86_pmu.num_counters += AMD64_NUM_COUNTERS_NB;
799
800 if (cpu_has_perfctr_core)
801 amd_nb_event_constraint = &amd_NBPMC96;
802 else
803 amd_nb_event_constraint = &amd_NBPMC74;
804
805 printk(KERN_INFO "perf: AMD northbridge performance counters detected\n");
806
807 return 0;
808}
809
641__init int amd_pmu_init(void) 810__init int amd_pmu_init(void)
642{ 811{
643 /* Performance-monitoring supported from K7 and later: */ 812 /* Performance-monitoring supported from K7 and later: */
@@ -648,6 +817,7 @@ __init int amd_pmu_init(void)
648 817
649 setup_event_constraints(); 818 setup_event_constraints();
650 setup_perfctr_core(); 819 setup_perfctr_core();
820 setup_perfctr_nb();
651 821
652 /* Events are common for all AMDs */ 822 /* Events are common for all AMDs */
653 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, 823 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
@@ -678,7 +848,7 @@ void amd_pmu_disable_virt(void)
678 * SVM is disabled the Guest-only bits still gets set and the counter 848 * SVM is disabled the Guest-only bits still gets set and the counter
679 * will not count anything. 849 * will not count anything.
680 */ 850 */
681 cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY; 851 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
682 852
683 /* Reload all events */ 853 /* Reload all events */
684 x86_pmu_disable_all(); 854 x86_pmu_disable_all();
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index 6336bcbd0618..5f0581e713c2 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -528,7 +528,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
528 if (!test_bit(IBS_STARTED, pcpu->state)) { 528 if (!test_bit(IBS_STARTED, pcpu->state)) {
529 /* 529 /*
530 * Catch spurious interrupts after stopping IBS: After 530 * Catch spurious interrupts after stopping IBS: After
531 * disabling IBS there could be still incomming NMIs 531 * disabling IBS there could be still incoming NMIs
532 * with samples that even have the valid bit cleared. 532 * with samples that even have the valid bit cleared.
533 * Mark all this NMIs as handled. 533 * Mark all this NMIs as handled.
534 */ 534 */
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 93b9e1181f83..4914e94ad6e8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2019,7 +2019,10 @@ __init int intel_pmu_init(void)
2019 break; 2019 break;
2020 2020
2021 case 28: /* Atom */ 2021 case 28: /* Atom */
2022 case 54: /* Cedariew */ 2022 case 38: /* Lincroft */
2023 case 39: /* Penwell */
2024 case 53: /* Cloverview */
2025 case 54: /* Cedarview */
2023 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, 2026 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2024 sizeof(hw_cache_event_ids)); 2027 sizeof(hw_cache_event_ids));
2025 2028
@@ -2084,6 +2087,7 @@ __init int intel_pmu_init(void)
2084 pr_cont("SandyBridge events, "); 2087 pr_cont("SandyBridge events, ");
2085 break; 2088 break;
2086 case 58: /* IvyBridge */ 2089 case 58: /* IvyBridge */
2090 case 62: /* IvyBridge EP */
2087 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 2091 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2088 sizeof(hw_cache_event_ids)); 2092 sizeof(hw_cache_event_ids));
2089 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, 2093 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index f2af39f5dc3d..4820c232a0b9 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -19,7 +19,7 @@ static const u64 p6_perfmon_event_map[] =
19 19
20}; 20};
21 21
22static __initconst u64 p6_hw_cache_event_ids 22static u64 p6_hw_cache_event_ids
23 [PERF_COUNT_HW_CACHE_MAX] 23 [PERF_COUNT_HW_CACHE_MAX]
24 [PERF_COUNT_HW_CACHE_OP_MAX] 24 [PERF_COUNT_HW_CACHE_OP_MAX]
25 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 25 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 3286a92e662a..e280253f6f94 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -28,7 +28,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
28{ 28{
29 seq_printf(m, 29 seq_printf(m,
30 "fdiv_bug\t: %s\n" 30 "fdiv_bug\t: %s\n"
31 "hlt_bug\t\t: %s\n"
32 "f00f_bug\t: %s\n" 31 "f00f_bug\t: %s\n"
33 "coma_bug\t: %s\n" 32 "coma_bug\t: %s\n"
34 "fpu\t\t: %s\n" 33 "fpu\t\t: %s\n"
@@ -36,7 +35,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
36 "cpuid level\t: %d\n" 35 "cpuid level\t: %d\n"
37 "wp\t\t: %s\n", 36 "wp\t\t: %s\n",
38 c->fdiv_bug ? "yes" : "no", 37 c->fdiv_bug ? "yes" : "no",
39 c->hlt_works_ok ? "no" : "yes",
40 c->f00f_bug ? "yes" : "no", 38 c->f00f_bug ? "yes" : "no",
41 c->coma_bug ? "yes" : "no", 39 c->coma_bug ? "yes" : "no",
42 c->hard_math ? "yes" : "no", 40 c->hard_math ? "yes" : "no",
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index d22d0c4edcfd..03a36321ec54 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -33,6 +33,9 @@
33 33
34#define VMWARE_PORT_CMD_GETVERSION 10 34#define VMWARE_PORT_CMD_GETVERSION 10
35#define VMWARE_PORT_CMD_GETHZ 45 35#define VMWARE_PORT_CMD_GETHZ 45
36#define VMWARE_PORT_CMD_GETVCPU_INFO 68
37#define VMWARE_PORT_CMD_LEGACY_X2APIC 3
38#define VMWARE_PORT_CMD_VCPU_RESERVED 31
36 39
37#define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \ 40#define VMWARE_PORT(cmd, eax, ebx, ecx, edx) \
38 __asm__("inl (%%dx)" : \ 41 __asm__("inl (%%dx)" : \
@@ -125,10 +128,20 @@ static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c)
125 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); 128 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
126} 129}
127 130
131/* Checks if hypervisor supports x2apic without VT-D interrupt remapping. */
132static bool __init vmware_legacy_x2apic_available(void)
133{
134 uint32_t eax, ebx, ecx, edx;
135 VMWARE_PORT(GETVCPU_INFO, eax, ebx, ecx, edx);
136 return (eax & (1 << VMWARE_PORT_CMD_VCPU_RESERVED)) == 0 &&
137 (eax & (1 << VMWARE_PORT_CMD_LEGACY_X2APIC)) != 0;
138}
139
128const __refconst struct hypervisor_x86 x86_hyper_vmware = { 140const __refconst struct hypervisor_x86 x86_hyper_vmware = {
129 .name = "VMware", 141 .name = "VMware",
130 .detect = vmware_platform, 142 .detect = vmware_platform,
131 .set_cpu_features = vmware_set_cpu_features, 143 .set_cpu_features = vmware_set_cpu_features,
132 .init_platform = vmware_platform_setup, 144 .init_platform = vmware_platform_setup,
145 .x2apic_available = vmware_legacy_x2apic_available,
133}; 146};
134EXPORT_SYMBOL(x86_hyper_vmware); 147EXPORT_SYMBOL(x86_hyper_vmware);
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 6ed91d9980e2..8831176aa5ef 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1091,11 +1091,18 @@ ENTRY(xen_failsafe_callback)
1091 _ASM_EXTABLE(4b,9b) 1091 _ASM_EXTABLE(4b,9b)
1092ENDPROC(xen_failsafe_callback) 1092ENDPROC(xen_failsafe_callback)
1093 1093
1094BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK, 1094BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1095 xen_evtchn_do_upcall) 1095 xen_evtchn_do_upcall)
1096 1096
1097#endif /* CONFIG_XEN */ 1097#endif /* CONFIG_XEN */
1098 1098
1099#if IS_ENABLED(CONFIG_HYPERV)
1100
1101BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1102 hyperv_vector_handler)
1103
1104#endif /* CONFIG_HYPERV */
1105
1099#ifdef CONFIG_FUNCTION_TRACER 1106#ifdef CONFIG_FUNCTION_TRACER
1100#ifdef CONFIG_DYNAMIC_FTRACE 1107#ifdef CONFIG_DYNAMIC_FTRACE
1101 1108
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 07a7a04529bc..048f2240f8e6 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1454,11 +1454,16 @@ ENTRY(xen_failsafe_callback)
1454 CFI_ENDPROC 1454 CFI_ENDPROC
1455END(xen_failsafe_callback) 1455END(xen_failsafe_callback)
1456 1456
1457apicinterrupt XEN_HVM_EVTCHN_CALLBACK \ 1457apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
1458 xen_hvm_callback_vector xen_evtchn_do_upcall 1458 xen_hvm_callback_vector xen_evtchn_do_upcall
1459 1459
1460#endif /* CONFIG_XEN */ 1460#endif /* CONFIG_XEN */
1461 1461
1462#if IS_ENABLED(CONFIG_HYPERV)
1463apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
1464 hyperv_callback_vector hyperv_vector_handler
1465#endif /* CONFIG_HYPERV */
1466
1462/* 1467/*
1463 * Some functions should be protected against kprobes 1468 * Some functions should be protected against kprobes
1464 */ 1469 */
@@ -1781,6 +1786,7 @@ first_nmi:
1781 * Leave room for the "copied" frame 1786 * Leave room for the "copied" frame
1782 */ 1787 */
1783 subq $(5*8), %rsp 1788 subq $(5*8), %rsp
1789 CFI_ADJUST_CFA_OFFSET 5*8
1784 1790
1785 /* Copy the stack frame to the Saved frame */ 1791 /* Copy the stack frame to the Saved frame */
1786 .rept 5 1792 .rept 5
@@ -1863,10 +1869,8 @@ end_repeat_nmi:
1863nmi_swapgs: 1869nmi_swapgs:
1864 SWAPGS_UNSAFE_STACK 1870 SWAPGS_UNSAFE_STACK
1865nmi_restore: 1871nmi_restore:
1866 RESTORE_ALL 8 1872 /* Pop the extra iret frame at once */
1867 1873 RESTORE_ALL 6*8
1868 /* Pop the extra iret frame */
1869 addq $(5*8), %rsp
1870 1874
1871 /* Clear the NMI executing stack variable */ 1875 /* Clear the NMI executing stack variable */
1872 movq $0, 5*8(%rsp) 1876 movq $0, 5*8(%rsp)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 8e7f6556028f..3c3f58a0808f 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -300,37 +300,52 @@ ENTRY(startup_32_smp)
300 leal -__PAGE_OFFSET(%ecx),%esp 300 leal -__PAGE_OFFSET(%ecx),%esp
301 301
302default_entry: 302default_entry:
303#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
304 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
305 X86_CR0_PG)
306 movl $(CR0_STATE & ~X86_CR0_PG),%eax
307 movl %eax,%cr0
308
309/*
310 * We want to start out with EFLAGS unambiguously cleared. Some BIOSes leave
311 * bits like NT set. This would confuse the debugger if this code is traced. So
312 * initialize them properly now before switching to protected mode. That means
313 * DF in particular (even though we have cleared it earlier after copying the
314 * command line) because GCC expects it.
315 */
316 pushl $0
317 popfl
318
303/* 319/*
304 * New page tables may be in 4Mbyte page mode and may 320 * New page tables may be in 4Mbyte page mode and may be using the global pages.
305 * be using the global pages.
306 * 321 *
307 * NOTE! If we are on a 486 we may have no cr4 at all! 322 * NOTE! If we are on a 486 we may have no cr4 at all! Specifically, cr4 exists
308 * Specifically, cr4 exists if and only if CPUID exists 323 * if and only if CPUID exists and has flags other than the FPU flag set.
309 * and has flags other than the FPU flag set.
310 */ 324 */
325 movl $-1,pa(X86_CPUID) # preset CPUID level
311 movl $X86_EFLAGS_ID,%ecx 326 movl $X86_EFLAGS_ID,%ecx
312 pushl %ecx 327 pushl %ecx
313 popfl 328 popfl # set EFLAGS=ID
314 pushfl 329 pushfl
315 popl %eax 330 popl %eax # get EFLAGS
316 pushl $0 331 testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set?
317 popfl 332 jz enable_paging # hw disallowed setting of ID bit
318 pushfl 333 # which means no CPUID and no CR4
319 popl %edx 334
320 xorl %edx,%eax 335 xorl %eax,%eax
321 testl %ecx,%eax 336 cpuid
322 jz 6f # No ID flag = no CPUID = no CR4 337 movl %eax,pa(X86_CPUID) # save largest std CPUID function
323 338
324 movl $1,%eax 339 movl $1,%eax
325 cpuid 340 cpuid
326 andl $~1,%edx # Ignore CPUID.FPU 341 andl $~1,%edx # Ignore CPUID.FPU
327 jz 6f # No flags or only CPUID.FPU = no CR4 342 jz enable_paging # No flags or only CPUID.FPU = no CR4
328 343
329 movl pa(mmu_cr4_features),%eax 344 movl pa(mmu_cr4_features),%eax
330 movl %eax,%cr4 345 movl %eax,%cr4
331 346
332 testb $X86_CR4_PAE, %al # check if PAE is enabled 347 testb $X86_CR4_PAE, %al # check if PAE is enabled
333 jz 6f 348 jz enable_paging
334 349
335 /* Check if extended functions are implemented */ 350 /* Check if extended functions are implemented */
336 movl $0x80000000, %eax 351 movl $0x80000000, %eax
@@ -338,7 +353,7 @@ default_entry:
338 /* Value must be in the range 0x80000001 to 0x8000ffff */ 353 /* Value must be in the range 0x80000001 to 0x8000ffff */
339 subl $0x80000001, %eax 354 subl $0x80000001, %eax
340 cmpl $(0x8000ffff-0x80000001), %eax 355 cmpl $(0x8000ffff-0x80000001), %eax
341 ja 6f 356 ja enable_paging
342 357
343 /* Clear bogus XD_DISABLE bits */ 358 /* Clear bogus XD_DISABLE bits */
344 call verify_cpu 359 call verify_cpu
@@ -347,7 +362,7 @@ default_entry:
347 cpuid 362 cpuid
348 /* Execute Disable bit supported? */ 363 /* Execute Disable bit supported? */
349 btl $(X86_FEATURE_NX & 31), %edx 364 btl $(X86_FEATURE_NX & 31), %edx
350 jnc 6f 365 jnc enable_paging
351 366
352 /* Setup EFER (Extended Feature Enable Register) */ 367 /* Setup EFER (Extended Feature Enable Register) */
353 movl $MSR_EFER, %ecx 368 movl $MSR_EFER, %ecx
@@ -357,15 +372,14 @@ default_entry:
357 /* Make changes effective */ 372 /* Make changes effective */
358 wrmsr 373 wrmsr
359 374
3606: 375enable_paging:
361 376
362/* 377/*
363 * Enable paging 378 * Enable paging
364 */ 379 */
365 movl $pa(initial_page_table), %eax 380 movl $pa(initial_page_table), %eax
366 movl %eax,%cr3 /* set the page table pointer.. */ 381 movl %eax,%cr3 /* set the page table pointer.. */
367 movl %cr0,%eax 382 movl $CR0_STATE,%eax
368 orl $X86_CR0_PG,%eax
369 movl %eax,%cr0 /* ..and set paging (PG) bit */ 383 movl %eax,%cr0 /* ..and set paging (PG) bit */
370 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ 384 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
3711: 3851:
@@ -373,14 +387,6 @@ default_entry:
373 addl $__PAGE_OFFSET, %esp 387 addl $__PAGE_OFFSET, %esp
374 388
375/* 389/*
376 * Initialize eflags. Some BIOS's leave bits like NT set. This would
377 * confuse the debugger if this code is traced.
378 * XXX - best to initialize before switching to protected mode.
379 */
380 pushl $0
381 popfl
382
383/*
384 * start system 32-bit setup. We need to re-do some of the things done 390 * start system 32-bit setup. We need to re-do some of the things done
385 * in 16-bit mode for the "real" operations. 391 * in 16-bit mode for the "real" operations.
386 */ 392 */
@@ -389,31 +395,11 @@ default_entry:
389 jz 1f # Did we do this already? 395 jz 1f # Did we do this already?
390 call *%eax 396 call *%eax
3911: 3971:
392 398
393/* check if it is 486 or 386. */
394/* 399/*
395 * XXX - this does a lot of unnecessary setup. Alignment checks don't 400 * Check if it is 486
396 * apply at our cpl of 0 and the stack ought to be aligned already, and
397 * we don't need to preserve eflags.
398 */ 401 */
399 movl $-1,X86_CPUID # -1 for no CPUID initially 402 cmpl $-1,X86_CPUID
400 movb $3,X86 # at least 386
401 pushfl # push EFLAGS
402 popl %eax # get EFLAGS
403 movl %eax,%ecx # save original EFLAGS
404 xorl $0x240000,%eax # flip AC and ID bits in EFLAGS
405 pushl %eax # copy to EFLAGS
406 popfl # set EFLAGS
407 pushfl # get new EFLAGS
408 popl %eax # put it in eax
409 xorl %ecx,%eax # change in flags
410 pushl %ecx # restore original EFLAGS
411 popfl
412 testl $0x40000,%eax # check if AC bit changed
413 je is386
414
415 movb $4,X86 # at least 486
416 testl $0x200000,%eax # check if ID bit changed
417 je is486 403 je is486
418 404
419 /* get vendor info */ 405 /* get vendor info */
@@ -439,11 +425,10 @@ default_entry:
439 movb %cl,X86_MASK 425 movb %cl,X86_MASK
440 movl %edx,X86_CAPABILITY 426 movl %edx,X86_CAPABILITY
441 427
442is486: movl $0x50022,%ecx # set AM, WP, NE and MP 428is486:
443 jmp 2f 429 movb $4,X86
444 430 movl $0x50022,%ecx # set AM, WP, NE and MP
445is386: movl $2,%ecx # set MP 431 movl %cr0,%eax
4462: movl %cr0,%eax
447 andl $0x80000011,%eax # Save PG,PE,ET 432 andl $0x80000011,%eax # Save PG,PE,ET
448 orl %ecx,%eax 433 orl %ecx,%eax
449 movl %eax,%cr0 434 movl %eax,%cr0
@@ -468,7 +453,6 @@ is386: movl $2,%ecx # set MP
468 xorl %eax,%eax # Clear LDT 453 xorl %eax,%eax # Clear LDT
469 lldt %ax 454 lldt %ax
470 455
471 cld # gcc2 wants the direction flag cleared at all times
472 pushl $0 # fake return address for unwinder 456 pushl $0 # fake return address for unwinder
473 jmp *(initial_code) 457 jmp *(initial_code)
474 458
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index e28670f9a589..da85a8e830a1 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -478,7 +478,7 @@ static int hpet_msi_next_event(unsigned long delta,
478 478
479static int hpet_setup_msi_irq(unsigned int irq) 479static int hpet_setup_msi_irq(unsigned int irq)
480{ 480{
481 if (arch_setup_hpet_msi(irq, hpet_blockid)) { 481 if (x86_msi.setup_hpet_msi(irq, hpet_blockid)) {
482 destroy_irq(irq); 482 destroy_irq(irq);
483 return -EINVAL; 483 return -EINVAL;
484 } 484 }
diff --git a/arch/x86/kernel/kprobes/Makefile b/arch/x86/kernel/kprobes/Makefile
new file mode 100644
index 000000000000..0d33169cc1a2
--- /dev/null
+++ b/arch/x86/kernel/kprobes/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for kernel probes
3#
4
5obj-$(CONFIG_KPROBES) += core.o
6obj-$(CONFIG_OPTPROBES) += opt.o
7obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o
diff --git a/arch/x86/kernel/kprobes-common.h b/arch/x86/kernel/kprobes/common.h
index 3230b68ef29a..2e9d4b5af036 100644
--- a/arch/x86/kernel/kprobes-common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -99,4 +99,15 @@ static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsig
99 return addr; 99 return addr;
100} 100}
101#endif 101#endif
102
103#ifdef CONFIG_KPROBES_ON_FTRACE
104extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
105 struct kprobe_ctlblk *kcb);
106#else
107static inline int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
108 struct kprobe_ctlblk *kcb)
109{
110 return 0;
111}
112#endif
102#endif 113#endif
diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes/core.c
index 57916c0d3cf6..e124554598ee 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -58,7 +58,7 @@
58#include <asm/insn.h> 58#include <asm/insn.h>
59#include <asm/debugreg.h> 59#include <asm/debugreg.h>
60 60
61#include "kprobes-common.h" 61#include "common.h"
62 62
63void jprobe_return_end(void); 63void jprobe_return_end(void);
64 64
@@ -78,7 +78,7 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
78 * Groups, and some special opcodes can not boost. 78 * Groups, and some special opcodes can not boost.
79 * This is non-const and volatile to keep gcc from statically 79 * This is non-const and volatile to keep gcc from statically
80 * optimizing it out, as variable_test_bit makes gcc think only 80 * optimizing it out, as variable_test_bit makes gcc think only
81 * *(unsigned long*) is used. 81 * *(unsigned long*) is used.
82 */ 82 */
83static volatile u32 twobyte_is_boostable[256 / 32] = { 83static volatile u32 twobyte_is_boostable[256 / 32] = {
84 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ 84 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
@@ -117,7 +117,7 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
117 struct __arch_relative_insn { 117 struct __arch_relative_insn {
118 u8 op; 118 u8 op;
119 s32 raddr; 119 s32 raddr;
120 } __attribute__((packed)) *insn; 120 } __packed *insn;
121 121
122 insn = (struct __arch_relative_insn *)from; 122 insn = (struct __arch_relative_insn *)from;
123 insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); 123 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
@@ -541,23 +541,6 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb
541 return 1; 541 return 1;
542} 542}
543 543
544#ifdef KPROBES_CAN_USE_FTRACE
545static void __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs,
546 struct kprobe_ctlblk *kcb)
547{
548 /*
549 * Emulate singlestep (and also recover regs->ip)
550 * as if there is a 5byte nop
551 */
552 regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
553 if (unlikely(p->post_handler)) {
554 kcb->kprobe_status = KPROBE_HIT_SSDONE;
555 p->post_handler(p, regs, 0);
556 }
557 __this_cpu_write(current_kprobe, NULL);
558}
559#endif
560
561/* 544/*
562 * Interrupts are disabled on entry as trap3 is an interrupt gate and they 545 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
563 * remain disabled throughout this function. 546 * remain disabled throughout this function.
@@ -616,13 +599,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
616 } else if (kprobe_running()) { 599 } else if (kprobe_running()) {
617 p = __this_cpu_read(current_kprobe); 600 p = __this_cpu_read(current_kprobe);
618 if (p->break_handler && p->break_handler(p, regs)) { 601 if (p->break_handler && p->break_handler(p, regs)) {
619#ifdef KPROBES_CAN_USE_FTRACE 602 if (!skip_singlestep(p, regs, kcb))
620 if (kprobe_ftrace(p)) { 603 setup_singlestep(p, regs, kcb, 0);
621 skip_singlestep(p, regs, kcb);
622 return 1;
623 }
624#endif
625 setup_singlestep(p, regs, kcb, 0);
626 return 1; 604 return 1;
627 } 605 }
628 } /* else: not a kprobe fault; let the kernel handle it */ 606 } /* else: not a kprobe fault; let the kernel handle it */
@@ -1075,50 +1053,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1075 return 0; 1053 return 0;
1076} 1054}
1077 1055
1078#ifdef KPROBES_CAN_USE_FTRACE
1079/* Ftrace callback handler for kprobes */
1080void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
1081 struct ftrace_ops *ops, struct pt_regs *regs)
1082{
1083 struct kprobe *p;
1084 struct kprobe_ctlblk *kcb;
1085 unsigned long flags;
1086
1087 /* Disable irq for emulating a breakpoint and avoiding preempt */
1088 local_irq_save(flags);
1089
1090 p = get_kprobe((kprobe_opcode_t *)ip);
1091 if (unlikely(!p) || kprobe_disabled(p))
1092 goto end;
1093
1094 kcb = get_kprobe_ctlblk();
1095 if (kprobe_running()) {
1096 kprobes_inc_nmissed_count(p);
1097 } else {
1098 /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
1099 regs->ip = ip + sizeof(kprobe_opcode_t);
1100
1101 __this_cpu_write(current_kprobe, p);
1102 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1103 if (!p->pre_handler || !p->pre_handler(p, regs))
1104 skip_singlestep(p, regs, kcb);
1105 /*
1106 * If pre_handler returns !0, it sets regs->ip and
1107 * resets current kprobe.
1108 */
1109 }
1110end:
1111 local_irq_restore(flags);
1112}
1113
1114int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
1115{
1116 p->ainsn.insn = NULL;
1117 p->ainsn.boostable = -1;
1118 return 0;
1119}
1120#endif
1121
1122int __init arch_init_kprobes(void) 1056int __init arch_init_kprobes(void)
1123{ 1057{
1124 return arch_init_optprobes(); 1058 return arch_init_optprobes();
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
new file mode 100644
index 000000000000..23ef5c556f06
--- /dev/null
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -0,0 +1,93 @@
1/*
2 * Dynamic Ftrace based Kprobes Optimization
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) Hitachi Ltd., 2012
19 */
20#include <linux/kprobes.h>
21#include <linux/ptrace.h>
22#include <linux/hardirq.h>
23#include <linux/preempt.h>
24#include <linux/ftrace.h>
25
26#include "common.h"
27
28static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
29 struct kprobe_ctlblk *kcb)
30{
31 /*
32 * Emulate singlestep (and also recover regs->ip)
33 * as if there is a 5byte nop
34 */
35 regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
36 if (unlikely(p->post_handler)) {
37 kcb->kprobe_status = KPROBE_HIT_SSDONE;
38 p->post_handler(p, regs, 0);
39 }
40 __this_cpu_write(current_kprobe, NULL);
41 return 1;
42}
43
44int __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs,
45 struct kprobe_ctlblk *kcb)
46{
47 if (kprobe_ftrace(p))
48 return __skip_singlestep(p, regs, kcb);
49 else
50 return 0;
51}
52
53/* Ftrace callback handler for kprobes */
54void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
55 struct ftrace_ops *ops, struct pt_regs *regs)
56{
57 struct kprobe *p;
58 struct kprobe_ctlblk *kcb;
59 unsigned long flags;
60
61 /* Disable irq for emulating a breakpoint and avoiding preempt */
62 local_irq_save(flags);
63
64 p = get_kprobe((kprobe_opcode_t *)ip);
65 if (unlikely(!p) || kprobe_disabled(p))
66 goto end;
67
68 kcb = get_kprobe_ctlblk();
69 if (kprobe_running()) {
70 kprobes_inc_nmissed_count(p);
71 } else {
72 /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
73 regs->ip = ip + sizeof(kprobe_opcode_t);
74
75 __this_cpu_write(current_kprobe, p);
76 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
77 if (!p->pre_handler || !p->pre_handler(p, regs))
78 __skip_singlestep(p, regs, kcb);
79 /*
80 * If pre_handler returns !0, it sets regs->ip and
81 * resets current kprobe.
82 */
83 }
84end:
85 local_irq_restore(flags);
86}
87
88int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
89{
90 p->ainsn.insn = NULL;
91 p->ainsn.boostable = -1;
92 return 0;
93}
diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes/opt.c
index c5e410eed403..76dc6f095724 100644
--- a/arch/x86/kernel/kprobes-opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -37,7 +37,7 @@
37#include <asm/insn.h> 37#include <asm/insn.h>
38#include <asm/debugreg.h> 38#include <asm/debugreg.h>
39 39
40#include "kprobes-common.h" 40#include "common.h"
41 41
42unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) 42unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
43{ 43{
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 9cec20253093..b686a904d7c3 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -506,6 +506,7 @@ static bool __init kvm_detect(void)
506const struct hypervisor_x86 x86_hyper_kvm __refconst = { 506const struct hypervisor_x86 x86_hyper_kvm __refconst = {
507 .name = "KVM", 507 .name = "KVM",
508 .detect = kvm_detect, 508 .detect = kvm_detect,
509 .x2apic_available = kvm_para_available,
509}; 510};
510EXPORT_SYMBOL_GPL(x86_hyper_kvm); 511EXPORT_SYMBOL_GPL(x86_hyper_kvm);
511 512
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index a7c5661f8496..4929502c1372 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -174,6 +174,9 @@ static int msr_open(struct inode *inode, struct file *file)
174 unsigned int cpu; 174 unsigned int cpu;
175 struct cpuinfo_x86 *c; 175 struct cpuinfo_x86 *c;
176 176
177 if (!capable(CAP_SYS_RAWIO))
178 return -EPERM;
179
177 cpu = iminor(file->f_path.dentry->d_inode); 180 cpu = iminor(file->f_path.dentry->d_inode);
178 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) 181 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
179 return -ENXIO; /* No such CPU */ 182 return -ENXIO; /* No such CPU */
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 0f5dec5c80e0..872079a67e4d 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -56,7 +56,7 @@ struct device x86_dma_fallback_dev = {
56EXPORT_SYMBOL(x86_dma_fallback_dev); 56EXPORT_SYMBOL(x86_dma_fallback_dev);
57 57
58/* Number of entries preallocated for DMA-API debugging */ 58/* Number of entries preallocated for DMA-API debugging */
59#define PREALLOC_DMA_DEBUG_ENTRIES 32768 59#define PREALLOC_DMA_DEBUG_ENTRIES 65536
60 60
61int dma_set_mask(struct device *dev, u64 mask) 61int dma_set_mask(struct device *dev, u64 mask)
62{ 62{
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 2ed787f15bf0..14ae10031ff0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -268,13 +268,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
268unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; 268unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
269EXPORT_SYMBOL(boot_option_idle_override); 269EXPORT_SYMBOL(boot_option_idle_override);
270 270
271/* 271static void (*x86_idle)(void);
272 * Powermanagement idle function, if any..
273 */
274void (*pm_idle)(void);
275#ifdef CONFIG_APM_MODULE
276EXPORT_SYMBOL(pm_idle);
277#endif
278 272
279#ifndef CONFIG_SMP 273#ifndef CONFIG_SMP
280static inline void play_dead(void) 274static inline void play_dead(void)
@@ -351,7 +345,7 @@ void cpu_idle(void)
351 rcu_idle_enter(); 345 rcu_idle_enter();
352 346
353 if (cpuidle_idle_call()) 347 if (cpuidle_idle_call())
354 pm_idle(); 348 x86_idle();
355 349
356 rcu_idle_exit(); 350 rcu_idle_exit();
357 start_critical_timings(); 351 start_critical_timings();
@@ -375,7 +369,6 @@ void cpu_idle(void)
375 */ 369 */
376void default_idle(void) 370void default_idle(void)
377{ 371{
378 trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
379 trace_cpu_idle_rcuidle(1, smp_processor_id()); 372 trace_cpu_idle_rcuidle(1, smp_processor_id());
380 current_thread_info()->status &= ~TS_POLLING; 373 current_thread_info()->status &= ~TS_POLLING;
381 /* 374 /*
@@ -389,21 +382,22 @@ void default_idle(void)
389 else 382 else
390 local_irq_enable(); 383 local_irq_enable();
391 current_thread_info()->status |= TS_POLLING; 384 current_thread_info()->status |= TS_POLLING;
392 trace_power_end_rcuidle(smp_processor_id());
393 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 385 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
394} 386}
395#ifdef CONFIG_APM_MODULE 387#ifdef CONFIG_APM_MODULE
396EXPORT_SYMBOL(default_idle); 388EXPORT_SYMBOL(default_idle);
397#endif 389#endif
398 390
399bool set_pm_idle_to_default(void) 391#ifdef CONFIG_XEN
392bool xen_set_default_idle(void)
400{ 393{
401 bool ret = !!pm_idle; 394 bool ret = !!x86_idle;
402 395
403 pm_idle = default_idle; 396 x86_idle = default_idle;
404 397
405 return ret; 398 return ret;
406} 399}
400#endif
407void stop_this_cpu(void *dummy) 401void stop_this_cpu(void *dummy)
408{ 402{
409 local_irq_disable(); 403 local_irq_disable();
@@ -413,31 +407,8 @@ void stop_this_cpu(void *dummy)
413 set_cpu_online(smp_processor_id(), false); 407 set_cpu_online(smp_processor_id(), false);
414 disable_local_APIC(); 408 disable_local_APIC();
415 409
416 for (;;) { 410 for (;;)
417 if (hlt_works(smp_processor_id())) 411 halt();
418 halt();
419 }
420}
421
422/* Default MONITOR/MWAIT with no hints, used for default C1 state */
423static void mwait_idle(void)
424{
425 if (!need_resched()) {
426 trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
427 trace_cpu_idle_rcuidle(1, smp_processor_id());
428 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
429 clflush((void *)&current_thread_info()->flags);
430
431 __monitor((void *)&current_thread_info()->flags, 0, 0);
432 smp_mb();
433 if (!need_resched())
434 __sti_mwait(0, 0);
435 else
436 local_irq_enable();
437 trace_power_end_rcuidle(smp_processor_id());
438 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
439 } else
440 local_irq_enable();
441} 412}
442 413
443/* 414/*
@@ -447,62 +418,13 @@ static void mwait_idle(void)
447 */ 418 */
448static void poll_idle(void) 419static void poll_idle(void)
449{ 420{
450 trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
451 trace_cpu_idle_rcuidle(0, smp_processor_id()); 421 trace_cpu_idle_rcuidle(0, smp_processor_id());
452 local_irq_enable(); 422 local_irq_enable();
453 while (!need_resched()) 423 while (!need_resched())
454 cpu_relax(); 424 cpu_relax();
455 trace_power_end_rcuidle(smp_processor_id());
456 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 425 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
457} 426}
458 427
459/*
460 * mwait selection logic:
461 *
462 * It depends on the CPU. For AMD CPUs that support MWAIT this is
463 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
464 * then depend on a clock divisor and current Pstate of the core. If
465 * all cores of a processor are in halt state (C1) the processor can
466 * enter the C1E (C1 enhanced) state. If mwait is used this will never
467 * happen.
468 *
469 * idle=mwait overrides this decision and forces the usage of mwait.
470 */
471
472#define MWAIT_INFO 0x05
473#define MWAIT_ECX_EXTENDED_INFO 0x01
474#define MWAIT_EDX_C1 0xf0
475
476int mwait_usable(const struct cpuinfo_x86 *c)
477{
478 u32 eax, ebx, ecx, edx;
479
480 /* Use mwait if idle=mwait boot option is given */
481 if (boot_option_idle_override == IDLE_FORCE_MWAIT)
482 return 1;
483
484 /*
485 * Any idle= boot option other than idle=mwait means that we must not
486 * use mwait. Eg: idle=halt or idle=poll or idle=nomwait
487 */
488 if (boot_option_idle_override != IDLE_NO_OVERRIDE)
489 return 0;
490
491 if (c->cpuid_level < MWAIT_INFO)
492 return 0;
493
494 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
495 /* Check, whether EDX has extended info about MWAIT */
496 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
497 return 1;
498
499 /*
500 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
501 * C1 supports MWAIT
502 */
503 return (edx & MWAIT_EDX_C1);
504}
505
506bool amd_e400_c1e_detected; 428bool amd_e400_c1e_detected;
507EXPORT_SYMBOL(amd_e400_c1e_detected); 429EXPORT_SYMBOL(amd_e400_c1e_detected);
508 430
@@ -567,31 +489,24 @@ static void amd_e400_idle(void)
567void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 489void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
568{ 490{
569#ifdef CONFIG_SMP 491#ifdef CONFIG_SMP
570 if (pm_idle == poll_idle && smp_num_siblings > 1) { 492 if (x86_idle == poll_idle && smp_num_siblings > 1)
571 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); 493 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
572 }
573#endif 494#endif
574 if (pm_idle) 495 if (x86_idle)
575 return; 496 return;
576 497
577 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { 498 if (cpu_has_amd_erratum(amd_erratum_400)) {
578 /*
579 * One CPU supports mwait => All CPUs supports mwait
580 */
581 pr_info("using mwait in idle threads\n");
582 pm_idle = mwait_idle;
583 } else if (cpu_has_amd_erratum(amd_erratum_400)) {
584 /* E400: APIC timer interrupt does not wake up CPU from C1e */ 499 /* E400: APIC timer interrupt does not wake up CPU from C1e */
585 pr_info("using AMD E400 aware idle routine\n"); 500 pr_info("using AMD E400 aware idle routine\n");
586 pm_idle = amd_e400_idle; 501 x86_idle = amd_e400_idle;
587 } else 502 } else
588 pm_idle = default_idle; 503 x86_idle = default_idle;
589} 504}
590 505
591void __init init_amd_e400_c1e_mask(void) 506void __init init_amd_e400_c1e_mask(void)
592{ 507{
593 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */ 508 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
594 if (pm_idle == amd_e400_idle) 509 if (x86_idle == amd_e400_idle)
595 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL); 510 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
596} 511}
597 512
@@ -602,11 +517,8 @@ static int __init idle_setup(char *str)
602 517
603 if (!strcmp(str, "poll")) { 518 if (!strcmp(str, "poll")) {
604 pr_info("using polling idle threads\n"); 519 pr_info("using polling idle threads\n");
605 pm_idle = poll_idle; 520 x86_idle = poll_idle;
606 boot_option_idle_override = IDLE_POLL; 521 boot_option_idle_override = IDLE_POLL;
607 } else if (!strcmp(str, "mwait")) {
608 boot_option_idle_override = IDLE_FORCE_MWAIT;
609 WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
610 } else if (!strcmp(str, "halt")) { 522 } else if (!strcmp(str, "halt")) {
611 /* 523 /*
612 * When the boot option of idle=halt is added, halt is 524 * When the boot option of idle=halt is added, halt is
@@ -615,7 +527,7 @@ static int __init idle_setup(char *str)
615 * To continue to load the CPU idle driver, don't touch 527 * To continue to load the CPU idle driver, don't touch
616 * the boot_option_idle_override. 528 * the boot_option_idle_override.
617 */ 529 */
618 pm_idle = default_idle; 530 x86_idle = default_idle;
619 boot_option_idle_override = IDLE_HALT; 531 boot_option_idle_override = IDLE_HALT;
620 } else if (!strcmp(str, "nomwait")) { 532 } else if (!strcmp(str, "nomwait")) {
621 /* 533 /*
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6e68a6194965..0f49677da51e 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -117,7 +117,7 @@ void release_thread(struct task_struct *dead_task)
117{ 117{
118 if (dead_task->mm) { 118 if (dead_task->mm) {
119 if (dead_task->mm->context.size) { 119 if (dead_task->mm->context.size) {
120 pr_warn("WARNING: dead process %8s still has LDT? <%p/%d>\n", 120 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
121 dead_task->comm, 121 dead_task->comm,
122 dead_task->mm->context.ldt, 122 dead_task->mm->context.ldt,
123 dead_task->mm->context.size); 123 dead_task->mm->context.size);
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index b629bbe0d9bd..29a8120e6fe8 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -22,7 +22,7 @@
22#include <linux/perf_event.h> 22#include <linux/perf_event.h>
23#include <linux/hw_breakpoint.h> 23#include <linux/hw_breakpoint.h>
24#include <linux/rcupdate.h> 24#include <linux/rcupdate.h>
25#include <linux/module.h> 25#include <linux/export.h>
26#include <linux/context_tracking.h> 26#include <linux/context_tracking.h>
27 27
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 4e8ba39eaf0f..76fa1e9a2b39 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -584,7 +584,7 @@ static void native_machine_emergency_restart(void)
584 break; 584 break;
585 585
586 case BOOT_EFI: 586 case BOOT_EFI:
587 if (efi_enabled) 587 if (efi_enabled(EFI_RUNTIME_SERVICES))
588 efi.reset_system(reboot_mode ? 588 efi.reset_system(reboot_mode ?
589 EFI_RESET_WARM : 589 EFI_RESET_WARM :
590 EFI_RESET_COLD, 590 EFI_RESET_COLD,
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 801602b5d745..2e8f3d3b5641 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -149,7 +149,6 @@ unsigned long mach_get_cmos_time(void)
149 if (century) { 149 if (century) {
150 century = bcd2bin(century); 150 century = bcd2bin(century);
151 year += century * 100; 151 year += century * 100;
152 printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
153 } else 152 } else
154 year += CMOS_YEARS_OFFS; 153 year += CMOS_YEARS_OFFS;
155 154
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 878cf1d326e5..915f5efefcf5 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -879,15 +879,15 @@ void __init setup_arch(char **cmdline_p)
879#ifdef CONFIG_EFI 879#ifdef CONFIG_EFI
880 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, 880 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
881 "EL32", 4)) { 881 "EL32", 4)) {
882 efi_enabled = 1; 882 set_bit(EFI_BOOT, &x86_efi_facility);
883 efi_64bit = false;
884 } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, 883 } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
885 "EL64", 4)) { 884 "EL64", 4)) {
886 efi_enabled = 1; 885 set_bit(EFI_BOOT, &x86_efi_facility);
887 efi_64bit = true; 886 set_bit(EFI_64BIT, &x86_efi_facility);
888 } 887 }
889 if (efi_enabled && efi_memblock_x86_reserve_range()) 888
890 efi_enabled = 0; 889 if (efi_enabled(EFI_BOOT))
890 efi_memblock_x86_reserve_range();
891#endif 891#endif
892 892
893 x86_init.oem.arch_setup(); 893 x86_init.oem.arch_setup();
@@ -960,7 +960,7 @@ void __init setup_arch(char **cmdline_p)
960 960
961 finish_e820_parsing(); 961 finish_e820_parsing();
962 962
963 if (efi_enabled) 963 if (efi_enabled(EFI_BOOT))
964 efi_init(); 964 efi_init();
965 965
966 dmi_scan_machine(); 966 dmi_scan_machine();
@@ -1046,7 +1046,7 @@ void __init setup_arch(char **cmdline_p)
1046 * The EFI specification says that boot service code won't be called 1046 * The EFI specification says that boot service code won't be called
1047 * after ExitBootServices(). This is, in fact, a lie. 1047 * after ExitBootServices(). This is, in fact, a lie.
1048 */ 1048 */
1049 if (efi_enabled) 1049 if (efi_enabled(EFI_MEMMAP))
1050 efi_reserve_boot_services(); 1050 efi_reserve_boot_services();
1051 1051
1052 /* preallocate 4k for mptable mpc */ 1052 /* preallocate 4k for mptable mpc */
@@ -1170,7 +1170,7 @@ void __init setup_arch(char **cmdline_p)
1170 1170
1171#ifdef CONFIG_VT 1171#ifdef CONFIG_VT
1172#if defined(CONFIG_VGA_CONSOLE) 1172#if defined(CONFIG_VGA_CONSOLE)
1173 if (!efi_enabled || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY)) 1173 if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1174 conswitchp = &vga_con; 1174 conswitchp = &vga_con;
1175#elif defined(CONFIG_DUMMY_CONSOLE) 1175#elif defined(CONFIG_DUMMY_CONSOLE)
1176 conswitchp = &dummy_con; 1176 conswitchp = &dummy_con;
@@ -1187,14 +1187,14 @@ void __init setup_arch(char **cmdline_p)
1187 register_refined_jiffies(CLOCK_TICK_RATE); 1187 register_refined_jiffies(CLOCK_TICK_RATE);
1188 1188
1189#ifdef CONFIG_EFI 1189#ifdef CONFIG_EFI
1190 /* Once setup is done above, disable efi_enabled on mismatched 1190 /* Once setup is done above, unmap the EFI memory map on
1191 * firmware/kernel archtectures since there is no support for 1191 * mismatched firmware/kernel archtectures since there is no
1192 * runtime services. 1192 * support for runtime services.
1193 */ 1193 */
1194 if (efi_enabled && IS_ENABLED(CONFIG_X86_64) != efi_64bit) { 1194 if (efi_enabled(EFI_BOOT) &&
1195 IS_ENABLED(CONFIG_X86_64) != efi_enabled(EFI_64BIT)) {
1195 pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); 1196 pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
1196 efi_unmap_memmap(); 1197 efi_unmap_memmap();
1197 efi_enabled = 0;
1198 } 1198 }
1199#endif 1199#endif
1200} 1200}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index ed0fe385289d..a6ceaedc396a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1369,7 +1369,7 @@ static inline void mwait_play_dead(void)
1369 void *mwait_ptr; 1369 void *mwait_ptr;
1370 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info); 1370 struct cpuinfo_x86 *c = __this_cpu_ptr(&cpu_info);
1371 1371
1372 if (!(this_cpu_has(X86_FEATURE_MWAIT) && mwait_usable(c))) 1372 if (!this_cpu_has(X86_FEATURE_MWAIT))
1373 return; 1373 return;
1374 if (!this_cpu_has(X86_FEATURE_CLFLSH)) 1374 if (!this_cpu_has(X86_FEATURE_CLFLSH))
1375 return; 1375 return;
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 97ef74b88e0f..dbded5aedb81 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -157,7 +157,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
157 if (flags & MAP_FIXED) 157 if (flags & MAP_FIXED)
158 return addr; 158 return addr;
159 159
160 /* for MAP_32BIT mappings we force the legact mmap base */ 160 /* for MAP_32BIT mappings we force the legacy mmap base */
161 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) 161 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
162 goto bottomup; 162 goto bottomup;
163 163
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 06ccb5073a3f..4b9ea101fe3b 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -623,7 +623,8 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
623 ns_now = __cycles_2_ns(tsc_now); 623 ns_now = __cycles_2_ns(tsc_now);
624 624
625 if (cpu_khz) { 625 if (cpu_khz) {
626 *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; 626 *scale = ((NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR) +
627 cpu_khz / 2) / cpu_khz;
627 *offset = ns_now - mult_frac(tsc_now, *scale, 628 *offset = ns_now - mult_frac(tsc_now, *scale,
628 (1UL << CYC2NS_SCALE_FACTOR)); 629 (1UL << CYC2NS_SCALE_FACTOR));
629 } 630 }
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index c71025b67462..0ba4cfb4f412 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -680,8 +680,10 @@ static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
680 if (auprobe->insn[i] == 0x66) 680 if (auprobe->insn[i] == 0x66)
681 continue; 681 continue;
682 682
683 if (auprobe->insn[i] == 0x90) 683 if (auprobe->insn[i] == 0x90) {
684 regs->ip += i + 1;
684 return true; 685 return true;
686 }
685 687
686 break; 688 break;
687 } 689 }
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 50cf83ecd32e..45a14dbbddaf 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -19,6 +19,7 @@
19#include <asm/time.h> 19#include <asm/time.h>
20#include <asm/irq.h> 20#include <asm/irq.h>
21#include <asm/io_apic.h> 21#include <asm/io_apic.h>
22#include <asm/hpet.h>
22#include <asm/pat.h> 23#include <asm/pat.h>
23#include <asm/tsc.h> 24#include <asm/tsc.h>
24#include <asm/iommu.h> 25#include <asm/iommu.h>
@@ -107,15 +108,22 @@ struct x86_platform_ops x86_platform = {
107 108
108EXPORT_SYMBOL_GPL(x86_platform); 109EXPORT_SYMBOL_GPL(x86_platform);
109struct x86_msi_ops x86_msi = { 110struct x86_msi_ops x86_msi = {
110 .setup_msi_irqs = native_setup_msi_irqs, 111 .setup_msi_irqs = native_setup_msi_irqs,
111 .teardown_msi_irq = native_teardown_msi_irq, 112 .compose_msi_msg = native_compose_msi_msg,
112 .teardown_msi_irqs = default_teardown_msi_irqs, 113 .teardown_msi_irq = native_teardown_msi_irq,
113 .restore_msi_irqs = default_restore_msi_irqs, 114 .teardown_msi_irqs = default_teardown_msi_irqs,
115 .restore_msi_irqs = default_restore_msi_irqs,
116 .setup_hpet_msi = default_setup_hpet_msi,
114}; 117};
115 118
116struct x86_io_apic_ops x86_io_apic_ops = { 119struct x86_io_apic_ops x86_io_apic_ops = {
117 .init = native_io_apic_init_mappings, 120 .init = native_io_apic_init_mappings,
118 .read = native_io_apic_read, 121 .read = native_io_apic_read,
119 .write = native_io_apic_write, 122 .write = native_io_apic_write,
120 .modify = native_io_apic_modify, 123 .modify = native_io_apic_modify,
124 .disable = native_disable_io_apic,
125 .print_entries = native_io_apic_print_entries,
126 .set_affinity = native_ioapic_set_affinity,
127 .setup_entry = native_setup_ioapic_entry,
128 .eoi_ioapic_pin = native_eoi_ioapic_pin,
121}; 129};
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c243b81e3c74..37040079cd6b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1881,6 +1881,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1881 u64 data = msr_info->data; 1881 u64 data = msr_info->data;
1882 1882
1883 switch (msr) { 1883 switch (msr) {
1884 case MSR_AMD64_NB_CFG:
1885 case MSR_IA32_UCODE_REV:
1886 case MSR_IA32_UCODE_WRITE:
1887 case MSR_VM_HSAVE_PA:
1888 case MSR_AMD64_PATCH_LOADER:
1889 case MSR_AMD64_BU_CFG2:
1890 break;
1891
1884 case MSR_EFER: 1892 case MSR_EFER:
1885 return set_efer(vcpu, data); 1893 return set_efer(vcpu, data);
1886 case MSR_K7_HWCR: 1894 case MSR_K7_HWCR:
@@ -1900,8 +1908,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1900 return 1; 1908 return 1;
1901 } 1909 }
1902 break; 1910 break;
1903 case MSR_AMD64_NB_CFG:
1904 break;
1905 case MSR_IA32_DEBUGCTLMSR: 1911 case MSR_IA32_DEBUGCTLMSR:
1906 if (!data) { 1912 if (!data) {
1907 /* We support the non-activated case already */ 1913 /* We support the non-activated case already */
@@ -1914,11 +1920,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1914 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", 1920 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
1915 __func__, data); 1921 __func__, data);
1916 break; 1922 break;
1917 case MSR_IA32_UCODE_REV:
1918 case MSR_IA32_UCODE_WRITE:
1919 case MSR_VM_HSAVE_PA:
1920 case MSR_AMD64_PATCH_LOADER:
1921 break;
1922 case 0x200 ... 0x2ff: 1923 case 0x200 ... 0x2ff:
1923 return set_msr_mtrr(vcpu, msr, data); 1924 return set_msr_mtrr(vcpu, msr, data);
1924 case MSR_IA32_APICBASE: 1925 case MSR_IA32_APICBASE:
@@ -2253,6 +2254,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2253 case MSR_K8_INT_PENDING_MSG: 2254 case MSR_K8_INT_PENDING_MSG:
2254 case MSR_AMD64_NB_CFG: 2255 case MSR_AMD64_NB_CFG:
2255 case MSR_FAM10H_MMIO_CONF_BASE: 2256 case MSR_FAM10H_MMIO_CONF_BASE:
2257 case MSR_AMD64_BU_CFG2:
2256 data = 0; 2258 data = 0;
2257 break; 2259 break;
2258 case MSR_P6_PERFCTR0: 2260 case MSR_P6_PERFCTR0:
diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig
index 7872a3330fb5..29043d2048a0 100644
--- a/arch/x86/lguest/Kconfig
+++ b/arch/x86/lguest/Kconfig
@@ -2,6 +2,7 @@ config LGUEST_GUEST
2 bool "Lguest guest support" 2 bool "Lguest guest support"
3 select PARAVIRT 3 select PARAVIRT
4 depends on X86_32 4 depends on X86_32
5 select TTY
5 select VIRTUALIZATION 6 select VIRTUALIZATION
6 select VIRTIO 7 select VIRTIO
7 select VIRTIO_CONSOLE 8 select VIRTIO_CONSOLE
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 027088f2f7dd..fb674fd3fc22 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -748,13 +748,15 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
748 return; 748 return;
749 } 749 }
750#endif 750#endif
751 /* Kernel addresses are always protection faults: */
752 if (address >= TASK_SIZE)
753 error_code |= PF_PROT;
751 754
752 if (unlikely(show_unhandled_signals)) 755 if (likely(show_unhandled_signals))
753 show_signal_msg(regs, error_code, address, tsk); 756 show_signal_msg(regs, error_code, address, tsk);
754 757
755 /* Kernel addresses are always protection faults: */
756 tsk->thread.cr2 = address; 758 tsk->thread.cr2 = address;
757 tsk->thread.error_code = error_code | (address >= TASK_SIZE); 759 tsk->thread.error_code = error_code;
758 tsk->thread.trap_nr = X86_TRAP_PF; 760 tsk->thread.trap_nr = X86_TRAP_PF;
759 761
760 force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0); 762 force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index edaa2daf4b37..3eba7f429880 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -632,7 +632,7 @@ kernel_physical_mapping_init(unsigned long start,
632 } 632 }
633 633
634 if (pgd_changed) 634 if (pgd_changed)
635 sync_global_pgds(addr, end); 635 sync_global_pgds(addr, end - 1);
636 636
637 __flush_tlb_all(); 637 __flush_tlb_all();
638 638
@@ -862,6 +862,9 @@ int kern_addr_valid(unsigned long addr)
862 if (pud_none(*pud)) 862 if (pud_none(*pud))
863 return 0; 863 return 0;
864 864
865 if (pud_large(*pud))
866 return pfn_valid(pud_pfn(*pud));
867
865 pmd = pmd_offset(pud, addr); 868 pmd = pmd_offset(pud, addr);
866 if (pmd_none(*pmd)) 869 if (pmd_none(*pmd))
867 return 0; 870 return 0;
@@ -1012,7 +1015,7 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
1012 } 1015 }
1013 1016
1014 } 1017 }
1015 sync_global_pgds((unsigned long)start_page, end); 1018 sync_global_pgds((unsigned long)start_page, end - 1);
1016 return 0; 1019 return 0;
1017} 1020}
1018 1021
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
index c80b9fb95734..8dabbed409ee 100644
--- a/arch/x86/mm/memtest.c
+++ b/arch/x86/mm/memtest.c
@@ -9,6 +9,7 @@
9#include <linux/memblock.h> 9#include <linux/memblock.h>
10 10
11static u64 patterns[] __initdata = { 11static u64 patterns[] __initdata = {
12 /* The first entry has to be 0 to leave memtest with zeroed memory */
12 0, 13 0,
13 0xffffffffffffffffULL, 14 0xffffffffffffffffULL,
14 0x5555555555555555ULL, 15 0x5555555555555555ULL,
@@ -110,15 +111,8 @@ void __init early_memtest(unsigned long start, unsigned long end)
110 return; 111 return;
111 112
112 printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern); 113 printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern);
113 for (i = 0; i < memtest_pattern; i++) { 114 for (i = memtest_pattern-1; i < UINT_MAX; --i) {
114 idx = i % ARRAY_SIZE(patterns); 115 idx = i % ARRAY_SIZE(patterns);
115 do_one_pass(patterns[idx], start, end); 116 do_one_pass(patterns[idx], start, end);
116 } 117 }
117
118 if (idx > 0) {
119 printk(KERN_INFO "early_memtest: wipe out "
120 "test pattern from memory\n");
121 /* additional test with pattern 0 will do this */
122 do_one_pass(0, start, end);
123 }
124} 118}
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 4ddf497ca65b..cdd0da9dd530 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -149,39 +149,40 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
149 int node, pxm; 149 int node, pxm;
150 150
151 if (srat_disabled()) 151 if (srat_disabled())
152 return -1; 152 goto out_err;
153 if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) { 153 if (ma->header.length != sizeof(struct acpi_srat_mem_affinity))
154 bad_srat(); 154 goto out_err_bad_srat;
155 return -1;
156 }
157 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) 155 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
158 return -1; 156 goto out_err;
159
160 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) 157 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
161 return -1; 158 goto out_err;
159
162 start = ma->base_address; 160 start = ma->base_address;
163 end = start + ma->length; 161 end = start + ma->length;
164 pxm = ma->proximity_domain; 162 pxm = ma->proximity_domain;
165 if (acpi_srat_revision <= 1) 163 if (acpi_srat_revision <= 1)
166 pxm &= 0xff; 164 pxm &= 0xff;
165
167 node = setup_node(pxm); 166 node = setup_node(pxm);
168 if (node < 0) { 167 if (node < 0) {
169 printk(KERN_ERR "SRAT: Too many proximity domains.\n"); 168 printk(KERN_ERR "SRAT: Too many proximity domains.\n");
170 bad_srat(); 169 goto out_err_bad_srat;
171 return -1;
172 } 170 }
173 171
174 if (numa_add_memblk(node, start, end) < 0) { 172 if (numa_add_memblk(node, start, end) < 0)
175 bad_srat(); 173 goto out_err_bad_srat;
176 return -1;
177 }
178 174
179 node_set(node, numa_nodes_parsed); 175 node_set(node, numa_nodes_parsed);
180 176
181 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", 177 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
182 node, pxm, 178 node, pxm,
183 (unsigned long long) start, (unsigned long long) end - 1); 179 (unsigned long long) start, (unsigned long long) end - 1);
180
184 return 0; 181 return 0;
182out_err_bad_srat:
183 bad_srat();
184out_err:
185 return -1;
185} 186}
186 187
187void __init acpi_numa_arch_fixup(void) {} 188void __init acpi_numa_arch_fixup(void) {}
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 13a6b29e2e5d..282375f13c7e 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -335,7 +335,7 @@ static const struct file_operations fops_tlbflush = {
335 .llseek = default_llseek, 335 .llseek = default_llseek,
336}; 336};
337 337
338static int __cpuinit create_tlb_flushall_shift(void) 338static int __init create_tlb_flushall_shift(void)
339{ 339{
340 debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR, 340 debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR,
341 arch_debugfs_dir, NULL, &fops_tlbflush); 341 arch_debugfs_dir, NULL, &fops_tlbflush);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index d11a47099d33..3cbe45381bbb 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1,6 +1,6 @@
1/* bpf_jit_comp.c : BPF JIT compiler 1/* bpf_jit_comp.c : BPF JIT compiler
2 * 2 *
3 * Copyright (C) 2011 Eric Dumazet (eric.dumazet@gmail.com) 3 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License 6 * modify it under the terms of the GNU General Public License
@@ -124,6 +124,26 @@ static inline void bpf_flush_icache(void *start, void *end)
124#define CHOOSE_LOAD_FUNC(K, func) \ 124#define CHOOSE_LOAD_FUNC(K, func) \
125 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) 125 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
126 126
127/* Helper to find the offset of pkt_type in sk_buff
128 * We want to make sure its still a 3bit field starting at a byte boundary.
129 */
130#define PKT_TYPE_MAX 7
131static int pkt_type_offset(void)
132{
133 struct sk_buff skb_probe = {
134 .pkt_type = ~0,
135 };
136 char *ct = (char *)&skb_probe;
137 unsigned int off;
138
139 for (off = 0; off < sizeof(struct sk_buff); off++) {
140 if (ct[off] == PKT_TYPE_MAX)
141 return off;
142 }
143 pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n");
144 return -1;
145}
146
127void bpf_jit_compile(struct sk_filter *fp) 147void bpf_jit_compile(struct sk_filter *fp)
128{ 148{
129 u8 temp[64]; 149 u8 temp[64];
@@ -216,6 +236,7 @@ void bpf_jit_compile(struct sk_filter *fp)
216 case BPF_S_ANC_VLAN_TAG: 236 case BPF_S_ANC_VLAN_TAG:
217 case BPF_S_ANC_VLAN_TAG_PRESENT: 237 case BPF_S_ANC_VLAN_TAG_PRESENT:
218 case BPF_S_ANC_QUEUE: 238 case BPF_S_ANC_QUEUE:
239 case BPF_S_ANC_PKTTYPE:
219 case BPF_S_LD_W_ABS: 240 case BPF_S_LD_W_ABS:
220 case BPF_S_LD_H_ABS: 241 case BPF_S_LD_H_ABS:
221 case BPF_S_LD_B_ABS: 242 case BPF_S_LD_B_ABS:
@@ -536,6 +557,23 @@ void bpf_jit_compile(struct sk_filter *fp)
536 EMIT3(0x83, 0xe0, 0x01); /* and $0x1,%eax */ 557 EMIT3(0x83, 0xe0, 0x01); /* and $0x1,%eax */
537 } 558 }
538 break; 559 break;
560 case BPF_S_ANC_PKTTYPE:
561 {
562 int off = pkt_type_offset();
563
564 if (off < 0)
565 goto out;
566 if (is_imm8(off)) {
567 /* movzbl off8(%rdi),%eax */
568 EMIT4(0x0f, 0xb6, 0x47, off);
569 } else {
570 /* movbl off32(%rdi),%eax */
571 EMIT3(0x0f, 0xb6, 0x87);
572 EMIT(off, 4);
573 }
574 EMIT3(0x83, 0xe0, PKT_TYPE_MAX); /* and $0x7,%eax */
575 break;
576 }
539 case BPF_S_LD_W_ABS: 577 case BPF_S_LD_W_ABS:
540 func = CHOOSE_LOAD_FUNC(K, sk_load_word); 578 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
541common_load: seen |= SEEN_DATAREF; 579common_load: seen |= SEEN_DATAREF;
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index fb29968a7cd5..082e88129712 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -548,8 +548,7 @@ static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
548 if (cfg->address < 0xFFFFFFFF) 548 if (cfg->address < 0xFFFFFFFF)
549 return 0; 549 return 0;
550 550
551 if (!strcmp(mcfg->header.oem_id, "SGI") || 551 if (!strncmp(mcfg->header.oem_id, "SGI", 3))
552 !strcmp(mcfg->header.oem_id, "SGI2"))
553 return 0; 552 return 0;
554 553
555 if (mcfg->header.revision >= 1) { 554 if (mcfg->header.revision >= 1) {
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile
index 8d874396cb29..01e0231a113e 100644
--- a/arch/x86/platform/Makefile
+++ b/arch/x86/platform/Makefile
@@ -2,10 +2,12 @@
2obj-y += ce4100/ 2obj-y += ce4100/
3obj-y += efi/ 3obj-y += efi/
4obj-y += geode/ 4obj-y += geode/
5obj-y += goldfish/
5obj-y += iris/ 6obj-y += iris/
6obj-y += mrst/ 7obj-y += mrst/
7obj-y += olpc/ 8obj-y += olpc/
8obj-y += scx200/ 9obj-y += scx200/
9obj-y += sfi/ 10obj-y += sfi/
11obj-y += ts5500/
10obj-y += visws/ 12obj-y += visws/
11obj-y += uv/ 13obj-y += uv/
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
index d9c1b95af17c..7145ec63c520 100644
--- a/arch/x86/platform/efi/efi-bgrt.c
+++ b/arch/x86/platform/efi/efi-bgrt.c
@@ -11,20 +11,21 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/init.h>
14#include <linux/acpi.h> 15#include <linux/acpi.h>
15#include <linux/efi.h> 16#include <linux/efi.h>
16#include <linux/efi-bgrt.h> 17#include <linux/efi-bgrt.h>
17 18
18struct acpi_table_bgrt *bgrt_tab; 19struct acpi_table_bgrt *bgrt_tab;
19void *bgrt_image; 20void *__initdata bgrt_image;
20size_t bgrt_image_size; 21size_t __initdata bgrt_image_size;
21 22
22struct bmp_header { 23struct bmp_header {
23 u16 id; 24 u16 id;
24 u32 size; 25 u32 size;
25} __packed; 26} __packed;
26 27
27void efi_bgrt_init(void) 28void __init efi_bgrt_init(void)
28{ 29{
29 acpi_status status; 30 acpi_status status;
30 void __iomem *image; 31 void __iomem *image;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 1743c1c92411..70b2a3a305d6 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -51,9 +51,6 @@
51 51
52#define EFI_DEBUG 1 52#define EFI_DEBUG 1
53 53
54int efi_enabled;
55EXPORT_SYMBOL(efi_enabled);
56
57struct efi __read_mostly efi = { 54struct efi __read_mostly efi = {
58 .mps = EFI_INVALID_TABLE_ADDR, 55 .mps = EFI_INVALID_TABLE_ADDR,
59 .acpi = EFI_INVALID_TABLE_ADDR, 56 .acpi = EFI_INVALID_TABLE_ADDR,
@@ -69,19 +66,28 @@ EXPORT_SYMBOL(efi);
69 66
70struct efi_memory_map memmap; 67struct efi_memory_map memmap;
71 68
72bool efi_64bit;
73
74static struct efi efi_phys __initdata; 69static struct efi efi_phys __initdata;
75static efi_system_table_t efi_systab __initdata; 70static efi_system_table_t efi_systab __initdata;
76 71
77static inline bool efi_is_native(void) 72static inline bool efi_is_native(void)
78{ 73{
79 return IS_ENABLED(CONFIG_X86_64) == efi_64bit; 74 return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
75}
76
77unsigned long x86_efi_facility;
78
79/*
80 * Returns 1 if 'facility' is enabled, 0 otherwise.
81 */
82int efi_enabled(int facility)
83{
84 return test_bit(facility, &x86_efi_facility) != 0;
80} 85}
86EXPORT_SYMBOL(efi_enabled);
81 87
82static int __init setup_noefi(char *arg) 88static int __init setup_noefi(char *arg)
83{ 89{
84 efi_enabled = 0; 90 clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
85 return 0; 91 return 0;
86} 92}
87early_param("noefi", setup_noefi); 93early_param("noefi", setup_noefi);
@@ -426,6 +432,7 @@ void __init efi_reserve_boot_services(void)
426 432
427void __init efi_unmap_memmap(void) 433void __init efi_unmap_memmap(void)
428{ 434{
435 clear_bit(EFI_MEMMAP, &x86_efi_facility);
429 if (memmap.map) { 436 if (memmap.map) {
430 early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); 437 early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
431 memmap.map = NULL; 438 memmap.map = NULL;
@@ -460,7 +467,7 @@ void __init efi_free_boot_services(void)
460 467
461static int __init efi_systab_init(void *phys) 468static int __init efi_systab_init(void *phys)
462{ 469{
463 if (efi_64bit) { 470 if (efi_enabled(EFI_64BIT)) {
464 efi_system_table_64_t *systab64; 471 efi_system_table_64_t *systab64;
465 u64 tmp = 0; 472 u64 tmp = 0;
466 473
@@ -552,7 +559,7 @@ static int __init efi_config_init(u64 tables, int nr_tables)
552 void *config_tables, *tablep; 559 void *config_tables, *tablep;
553 int i, sz; 560 int i, sz;
554 561
555 if (efi_64bit) 562 if (efi_enabled(EFI_64BIT))
556 sz = sizeof(efi_config_table_64_t); 563 sz = sizeof(efi_config_table_64_t);
557 else 564 else
558 sz = sizeof(efi_config_table_32_t); 565 sz = sizeof(efi_config_table_32_t);
@@ -572,7 +579,7 @@ static int __init efi_config_init(u64 tables, int nr_tables)
572 efi_guid_t guid; 579 efi_guid_t guid;
573 unsigned long table; 580 unsigned long table;
574 581
575 if (efi_64bit) { 582 if (efi_enabled(EFI_64BIT)) {
576 u64 table64; 583 u64 table64;
577 guid = ((efi_config_table_64_t *)tablep)->guid; 584 guid = ((efi_config_table_64_t *)tablep)->guid;
578 table64 = ((efi_config_table_64_t *)tablep)->table; 585 table64 = ((efi_config_table_64_t *)tablep)->table;
@@ -684,7 +691,6 @@ void __init efi_init(void)
684 if (boot_params.efi_info.efi_systab_hi || 691 if (boot_params.efi_info.efi_systab_hi ||
685 boot_params.efi_info.efi_memmap_hi) { 692 boot_params.efi_info.efi_memmap_hi) {
686 pr_info("Table located above 4GB, disabling EFI.\n"); 693 pr_info("Table located above 4GB, disabling EFI.\n");
687 efi_enabled = 0;
688 return; 694 return;
689 } 695 }
690 efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab; 696 efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
@@ -694,10 +700,10 @@ void __init efi_init(void)
694 ((__u64)boot_params.efi_info.efi_systab_hi<<32)); 700 ((__u64)boot_params.efi_info.efi_systab_hi<<32));
695#endif 701#endif
696 702
697 if (efi_systab_init(efi_phys.systab)) { 703 if (efi_systab_init(efi_phys.systab))
698 efi_enabled = 0;
699 return; 704 return;
700 } 705
706 set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
701 707
702 /* 708 /*
703 * Show what we know for posterity 709 * Show what we know for posterity
@@ -715,10 +721,10 @@ void __init efi_init(void)
715 efi.systab->hdr.revision >> 16, 721 efi.systab->hdr.revision >> 16,
716 efi.systab->hdr.revision & 0xffff, vendor); 722 efi.systab->hdr.revision & 0xffff, vendor);
717 723
718 if (efi_config_init(efi.systab->tables, efi.systab->nr_tables)) { 724 if (efi_config_init(efi.systab->tables, efi.systab->nr_tables))
719 efi_enabled = 0;
720 return; 725 return;
721 } 726
727 set_bit(EFI_CONFIG_TABLES, &x86_efi_facility);
722 728
723 /* 729 /*
724 * Note: We currently don't support runtime services on an EFI 730 * Note: We currently don't support runtime services on an EFI
@@ -727,15 +733,17 @@ void __init efi_init(void)
727 733
728 if (!efi_is_native()) 734 if (!efi_is_native())
729 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); 735 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
730 else if (efi_runtime_init()) { 736 else {
731 efi_enabled = 0; 737 if (efi_runtime_init())
732 return; 738 return;
739 set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
733 } 740 }
734 741
735 if (efi_memmap_init()) { 742 if (efi_memmap_init())
736 efi_enabled = 0;
737 return; 743 return;
738 } 744
745 set_bit(EFI_MEMMAP, &x86_efi_facility);
746
739#ifdef CONFIG_X86_32 747#ifdef CONFIG_X86_32
740 if (efi_is_native()) { 748 if (efi_is_native()) {
741 x86_platform.get_wallclock = efi_get_time; 749 x86_platform.get_wallclock = efi_get_time;
@@ -940,7 +948,7 @@ void __init efi_enter_virtual_mode(void)
940 * 948 *
941 * Call EFI services through wrapper functions. 949 * Call EFI services through wrapper functions.
942 */ 950 */
943 efi.runtime_version = efi_systab.fw_revision; 951 efi.runtime_version = efi_systab.hdr.revision;
944 efi.get_time = virt_efi_get_time; 952 efi.get_time = virt_efi_get_time;
945 efi.set_time = virt_efi_set_time; 953 efi.set_time = virt_efi_set_time;
946 efi.get_wakeup_time = virt_efi_get_wakeup_time; 954 efi.get_wakeup_time = virt_efi_get_wakeup_time;
@@ -968,6 +976,9 @@ u32 efi_mem_type(unsigned long phys_addr)
968 efi_memory_desc_t *md; 976 efi_memory_desc_t *md;
969 void *p; 977 void *p;
970 978
979 if (!efi_enabled(EFI_MEMMAP))
980 return 0;
981
971 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 982 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
972 md = p; 983 md = p;
973 if ((md->phys_addr <= phys_addr) && 984 if ((md->phys_addr <= phys_addr) &&
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 95fd505dfeb6..2b2003860615 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -38,7 +38,7 @@
38#include <asm/cacheflush.h> 38#include <asm/cacheflush.h>
39#include <asm/fixmap.h> 39#include <asm/fixmap.h>
40 40
41static pgd_t save_pgd __initdata; 41static pgd_t *save_pgd __initdata;
42static unsigned long efi_flags __initdata; 42static unsigned long efi_flags __initdata;
43 43
44static void __init early_code_mapping_set_exec(int executable) 44static void __init early_code_mapping_set_exec(int executable)
@@ -61,12 +61,20 @@ static void __init early_code_mapping_set_exec(int executable)
61void __init efi_call_phys_prelog(void) 61void __init efi_call_phys_prelog(void)
62{ 62{
63 unsigned long vaddress; 63 unsigned long vaddress;
64 int pgd;
65 int n_pgds;
64 66
65 early_code_mapping_set_exec(1); 67 early_code_mapping_set_exec(1);
66 local_irq_save(efi_flags); 68 local_irq_save(efi_flags);
67 vaddress = (unsigned long)__va(0x0UL); 69
68 save_pgd = *pgd_offset_k(0x0UL); 70 n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
69 set_pgd(pgd_offset_k(0x0UL), *pgd_offset_k(vaddress)); 71 save_pgd = kmalloc(n_pgds * sizeof(pgd_t), GFP_KERNEL);
72
73 for (pgd = 0; pgd < n_pgds; pgd++) {
74 save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE);
75 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
76 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
77 }
70 __flush_tlb_all(); 78 __flush_tlb_all();
71} 79}
72 80
@@ -75,7 +83,11 @@ void __init efi_call_phys_epilog(void)
75 /* 83 /*
76 * After the lock is released, the original page table is restored. 84 * After the lock is released, the original page table is restored.
77 */ 85 */
78 set_pgd(pgd_offset_k(0x0UL), save_pgd); 86 int pgd;
87 int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
88 for (pgd = 0; pgd < n_pgds; pgd++)
89 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
90 kfree(save_pgd);
79 __flush_tlb_all(); 91 __flush_tlb_all();
80 local_irq_restore(efi_flags); 92 local_irq_restore(efi_flags);
81 early_code_mapping_set_exec(0); 93 early_code_mapping_set_exec(0);
diff --git a/arch/x86/platform/goldfish/Makefile b/arch/x86/platform/goldfish/Makefile
new file mode 100644
index 000000000000..f030b532fdf3
--- /dev/null
+++ b/arch/x86/platform/goldfish/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_GOLDFISH) += goldfish.o
diff --git a/arch/x86/platform/goldfish/goldfish.c b/arch/x86/platform/goldfish/goldfish.c
new file mode 100644
index 000000000000..1693107a518e
--- /dev/null
+++ b/arch/x86/platform/goldfish/goldfish.c
@@ -0,0 +1,51 @@
1/*
2 * Copyright (C) 2007 Google, Inc.
3 * Copyright (C) 2011 Intel, Inc.
4 * Copyright (C) 2013 Intel, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/kernel.h>
18#include <linux/irq.h>
19#include <linux/platform_device.h>
20
21/*
22 * Where in virtual device memory the IO devices (timers, system controllers
23 * and so on)
24 */
25
26#define GOLDFISH_PDEV_BUS_BASE (0xff001000)
27#define GOLDFISH_PDEV_BUS_END (0xff7fffff)
28#define GOLDFISH_PDEV_BUS_IRQ (4)
29
30#define GOLDFISH_TTY_BASE (0x2000)
31
32static struct resource goldfish_pdev_bus_resources[] = {
33 {
34 .start = GOLDFISH_PDEV_BUS_BASE,
35 .end = GOLDFISH_PDEV_BUS_END,
36 .flags = IORESOURCE_MEM,
37 },
38 {
39 .start = GOLDFISH_PDEV_BUS_IRQ,
40 .end = GOLDFISH_PDEV_BUS_IRQ,
41 .flags = IORESOURCE_IRQ,
42 }
43};
44
45static int __init goldfish_init(void)
46{
47 platform_device_register_simple("goldfish_pdev_bus", -1,
48 goldfish_pdev_bus_resources, 2);
49 return 0;
50}
51device_initcall(goldfish_init);
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
index 2fdca25905ae..fef7d0ba7e3a 100644
--- a/arch/x86/platform/olpc/olpc-xo15-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
@@ -195,7 +195,7 @@ err_sysfs:
195 return r; 195 return r;
196} 196}
197 197
198static int xo15_sci_remove(struct acpi_device *device, int type) 198static int xo15_sci_remove(struct acpi_device *device)
199{ 199{
200 acpi_disable_gpe(NULL, xo15_sci_gpe); 200 acpi_disable_gpe(NULL, xo15_sci_gpe);
201 acpi_remove_gpe_handler(NULL, xo15_sci_gpe, xo15_sci_gpe_handler); 201 acpi_remove_gpe_handler(NULL, xo15_sci_gpe, xo15_sci_gpe_handler);
diff --git a/arch/x86/platform/sfi/sfi.c b/arch/x86/platform/sfi/sfi.c
index 7785b72ecc3a..bcd1a703e3e6 100644
--- a/arch/x86/platform/sfi/sfi.c
+++ b/arch/x86/platform/sfi/sfi.c
@@ -35,7 +35,7 @@
35static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; 35static unsigned long sfi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
36 36
37/* All CPUs enumerated by SFI must be present and enabled */ 37/* All CPUs enumerated by SFI must be present and enabled */
38static void __cpuinit mp_sfi_register_lapic(u8 id) 38static void __init mp_sfi_register_lapic(u8 id)
39{ 39{
40 if (MAX_LOCAL_APIC - id <= 0) { 40 if (MAX_LOCAL_APIC - id <= 0) {
41 pr_warning("Processor #%d invalid (max %d)\n", 41 pr_warning("Processor #%d invalid (max %d)\n",
diff --git a/arch/x86/platform/ts5500/Makefile b/arch/x86/platform/ts5500/Makefile
new file mode 100644
index 000000000000..c54e348c96a7
--- /dev/null
+++ b/arch/x86/platform/ts5500/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_TS5500) += ts5500.o
diff --git a/arch/x86/platform/ts5500/ts5500.c b/arch/x86/platform/ts5500/ts5500.c
new file mode 100644
index 000000000000..39febb214e8c
--- /dev/null
+++ b/arch/x86/platform/ts5500/ts5500.c
@@ -0,0 +1,339 @@
1/*
2 * Technologic Systems TS-5500 Single Board Computer support
3 *
4 * Copyright (C) 2013 Savoir-faire Linux Inc.
5 * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it under
8 * the terms of the GNU General Public License as published by the Free Software
9 * Foundation; either version 2 of the License, or (at your option) any later
10 * version.
11 *
12 *
13 * This driver registers the Technologic Systems TS-5500 Single Board Computer
14 * (SBC) and its devices, and exposes information to userspace such as jumpers'
15 * state or available options. For further information about sysfs entries, see
16 * Documentation/ABI/testing/sysfs-platform-ts5500.
17 *
18 * This code actually supports the TS-5500 platform, but it may be extended to
19 * support similar Technologic Systems x86-based platforms, such as the TS-5600.
20 */
21
22#include <linux/delay.h>
23#include <linux/io.h>
24#include <linux/kernel.h>
25#include <linux/leds.h>
26#include <linux/module.h>
27#include <linux/platform_data/gpio-ts5500.h>
28#include <linux/platform_data/max197.h>
29#include <linux/platform_device.h>
30#include <linux/slab.h>
31
32/* Product code register */
33#define TS5500_PRODUCT_CODE_ADDR 0x74
34#define TS5500_PRODUCT_CODE 0x60 /* TS-5500 product code */
35
36/* SRAM/RS-485/ADC options, and RS-485 RTS/Automatic RS-485 flags register */
37#define TS5500_SRAM_RS485_ADC_ADDR 0x75
38#define TS5500_SRAM BIT(0) /* SRAM option */
39#define TS5500_RS485 BIT(1) /* RS-485 option */
40#define TS5500_ADC BIT(2) /* A/D converter option */
41#define TS5500_RS485_RTS BIT(6) /* RTS for RS-485 */
42#define TS5500_RS485_AUTO BIT(7) /* Automatic RS-485 */
43
44/* External Reset/Industrial Temperature Range options register */
45#define TS5500_ERESET_ITR_ADDR 0x76
46#define TS5500_ERESET BIT(0) /* External Reset option */
47#define TS5500_ITR BIT(1) /* Indust. Temp. Range option */
48
49/* LED/Jumpers register */
50#define TS5500_LED_JP_ADDR 0x77
51#define TS5500_LED BIT(0) /* LED flag */
52#define TS5500_JP1 BIT(1) /* Automatic CMOS */
53#define TS5500_JP2 BIT(2) /* Enable Serial Console */
54#define TS5500_JP3 BIT(3) /* Write Enable Drive A */
55#define TS5500_JP4 BIT(4) /* Fast Console (115K baud) */
56#define TS5500_JP5 BIT(5) /* User Jumper */
57#define TS5500_JP6 BIT(6) /* Console on COM1 (req. JP2) */
58#define TS5500_JP7 BIT(7) /* Undocumented (Unused) */
59
60/* A/D Converter registers */
61#define TS5500_ADC_CONV_BUSY_ADDR 0x195 /* Conversion state register */
62#define TS5500_ADC_CONV_BUSY BIT(0)
63#define TS5500_ADC_CONV_INIT_LSB_ADDR 0x196 /* Start conv. / LSB register */
64#define TS5500_ADC_CONV_MSB_ADDR 0x197 /* MSB register */
65#define TS5500_ADC_CONV_DELAY 12 /* usec */
66
67/**
68 * struct ts5500_sbc - TS-5500 board description
69 * @id: Board product ID.
70 * @sram: Flag for SRAM option.
71 * @rs485: Flag for RS-485 option.
72 * @adc: Flag for Analog/Digital converter option.
73 * @ereset: Flag for External Reset option.
74 * @itr: Flag for Industrial Temperature Range option.
75 * @jumpers: Bitfield for jumpers' state.
76 */
77struct ts5500_sbc {
78 int id;
79 bool sram;
80 bool rs485;
81 bool adc;
82 bool ereset;
83 bool itr;
84 u8 jumpers;
85};
86
87/* Board signatures in BIOS shadow RAM */
88static const struct {
89 const char * const string;
90 const ssize_t offset;
91} ts5500_signatures[] __initdata = {
92 { "TS-5x00 AMD Elan", 0xb14 },
93};
94
95static int __init ts5500_check_signature(void)
96{
97 void __iomem *bios;
98 int i, ret = -ENODEV;
99
100 bios = ioremap(0xf0000, 0x10000);
101 if (!bios)
102 return -ENOMEM;
103
104 for (i = 0; i < ARRAY_SIZE(ts5500_signatures); i++) {
105 if (check_signature(bios + ts5500_signatures[i].offset,
106 ts5500_signatures[i].string,
107 strlen(ts5500_signatures[i].string))) {
108 ret = 0;
109 break;
110 }
111 }
112
113 iounmap(bios);
114 return ret;
115}
116
117static int __init ts5500_detect_config(struct ts5500_sbc *sbc)
118{
119 u8 tmp;
120 int ret = 0;
121
122 if (!request_region(TS5500_PRODUCT_CODE_ADDR, 4, "ts5500"))
123 return -EBUSY;
124
125 tmp = inb(TS5500_PRODUCT_CODE_ADDR);
126 if (tmp != TS5500_PRODUCT_CODE) {
127 pr_err("This platform is not a TS-5500 (found ID 0x%x)\n", tmp);
128 ret = -ENODEV;
129 goto cleanup;
130 }
131 sbc->id = tmp;
132
133 tmp = inb(TS5500_SRAM_RS485_ADC_ADDR);
134 sbc->sram = tmp & TS5500_SRAM;
135 sbc->rs485 = tmp & TS5500_RS485;
136 sbc->adc = tmp & TS5500_ADC;
137
138 tmp = inb(TS5500_ERESET_ITR_ADDR);
139 sbc->ereset = tmp & TS5500_ERESET;
140 sbc->itr = tmp & TS5500_ITR;
141
142 tmp = inb(TS5500_LED_JP_ADDR);
143 sbc->jumpers = tmp & ~TS5500_LED;
144
145cleanup:
146 release_region(TS5500_PRODUCT_CODE_ADDR, 4);
147 return ret;
148}
149
150static ssize_t ts5500_show_id(struct device *dev,
151 struct device_attribute *attr, char *buf)
152{
153 struct ts5500_sbc *sbc = dev_get_drvdata(dev);
154
155 return sprintf(buf, "0x%.2x\n", sbc->id);
156}
157
158static ssize_t ts5500_show_jumpers(struct device *dev,
159 struct device_attribute *attr,
160 char *buf)
161{
162 struct ts5500_sbc *sbc = dev_get_drvdata(dev);
163
164 return sprintf(buf, "0x%.2x\n", sbc->jumpers >> 1);
165}
166
167#define TS5500_SHOW(field) \
168 static ssize_t ts5500_show_##field(struct device *dev, \
169 struct device_attribute *attr, \
170 char *buf) \
171 { \
172 struct ts5500_sbc *sbc = dev_get_drvdata(dev); \
173 return sprintf(buf, "%d\n", sbc->field); \
174 }
175
176TS5500_SHOW(sram)
177TS5500_SHOW(rs485)
178TS5500_SHOW(adc)
179TS5500_SHOW(ereset)
180TS5500_SHOW(itr)
181
182static DEVICE_ATTR(id, S_IRUGO, ts5500_show_id, NULL);
183static DEVICE_ATTR(jumpers, S_IRUGO, ts5500_show_jumpers, NULL);
184static DEVICE_ATTR(sram, S_IRUGO, ts5500_show_sram, NULL);
185static DEVICE_ATTR(rs485, S_IRUGO, ts5500_show_rs485, NULL);
186static DEVICE_ATTR(adc, S_IRUGO, ts5500_show_adc, NULL);
187static DEVICE_ATTR(ereset, S_IRUGO, ts5500_show_ereset, NULL);
188static DEVICE_ATTR(itr, S_IRUGO, ts5500_show_itr, NULL);
189
190static struct attribute *ts5500_attributes[] = {
191 &dev_attr_id.attr,
192 &dev_attr_jumpers.attr,
193 &dev_attr_sram.attr,
194 &dev_attr_rs485.attr,
195 &dev_attr_adc.attr,
196 &dev_attr_ereset.attr,
197 &dev_attr_itr.attr,
198 NULL
199};
200
201static const struct attribute_group ts5500_attr_group = {
202 .attrs = ts5500_attributes,
203};
204
205static struct resource ts5500_dio1_resource[] = {
206 DEFINE_RES_IRQ_NAMED(7, "DIO1 interrupt"),
207};
208
209static struct platform_device ts5500_dio1_pdev = {
210 .name = "ts5500-dio1",
211 .id = -1,
212 .resource = ts5500_dio1_resource,
213 .num_resources = 1,
214};
215
216static struct resource ts5500_dio2_resource[] = {
217 DEFINE_RES_IRQ_NAMED(6, "DIO2 interrupt"),
218};
219
220static struct platform_device ts5500_dio2_pdev = {
221 .name = "ts5500-dio2",
222 .id = -1,
223 .resource = ts5500_dio2_resource,
224 .num_resources = 1,
225};
226
227static void ts5500_led_set(struct led_classdev *led_cdev,
228 enum led_brightness brightness)
229{
230 outb(!!brightness, TS5500_LED_JP_ADDR);
231}
232
233static enum led_brightness ts5500_led_get(struct led_classdev *led_cdev)
234{
235 return (inb(TS5500_LED_JP_ADDR) & TS5500_LED) ? LED_FULL : LED_OFF;
236}
237
238static struct led_classdev ts5500_led_cdev = {
239 .name = "ts5500:green:",
240 .brightness_set = ts5500_led_set,
241 .brightness_get = ts5500_led_get,
242};
243
244static int ts5500_adc_convert(u8 ctrl)
245{
246 u8 lsb, msb;
247
248 /* Start conversion (ensure the 3 MSB are set to 0) */
249 outb(ctrl & 0x1f, TS5500_ADC_CONV_INIT_LSB_ADDR);
250
251 /*
252 * The platform has CPLD logic driving the A/D converter.
253 * The conversion must complete within 11 microseconds,
254 * otherwise we have to re-initiate a conversion.
255 */
256 udelay(TS5500_ADC_CONV_DELAY);
257 if (inb(TS5500_ADC_CONV_BUSY_ADDR) & TS5500_ADC_CONV_BUSY)
258 return -EBUSY;
259
260 /* Read the raw data */
261 lsb = inb(TS5500_ADC_CONV_INIT_LSB_ADDR);
262 msb = inb(TS5500_ADC_CONV_MSB_ADDR);
263
264 return (msb << 8) | lsb;
265}
266
267static struct max197_platform_data ts5500_adc_pdata = {
268 .convert = ts5500_adc_convert,
269};
270
271static struct platform_device ts5500_adc_pdev = {
272 .name = "max197",
273 .id = -1,
274 .dev = {
275 .platform_data = &ts5500_adc_pdata,
276 },
277};
278
279static int __init ts5500_init(void)
280{
281 struct platform_device *pdev;
282 struct ts5500_sbc *sbc;
283 int err;
284
285 /*
286 * There is no DMI available or PCI bridge subvendor info,
287 * only the BIOS provides a 16-bit identification call.
288 * It is safer to find a signature in the BIOS shadow RAM.
289 */
290 err = ts5500_check_signature();
291 if (err)
292 return err;
293
294 pdev = platform_device_register_simple("ts5500", -1, NULL, 0);
295 if (IS_ERR(pdev))
296 return PTR_ERR(pdev);
297
298 sbc = devm_kzalloc(&pdev->dev, sizeof(struct ts5500_sbc), GFP_KERNEL);
299 if (!sbc) {
300 err = -ENOMEM;
301 goto error;
302 }
303
304 err = ts5500_detect_config(sbc);
305 if (err)
306 goto error;
307
308 platform_set_drvdata(pdev, sbc);
309
310 err = sysfs_create_group(&pdev->dev.kobj, &ts5500_attr_group);
311 if (err)
312 goto error;
313
314 ts5500_dio1_pdev.dev.parent = &pdev->dev;
315 if (platform_device_register(&ts5500_dio1_pdev))
316 dev_warn(&pdev->dev, "DIO1 block registration failed\n");
317 ts5500_dio2_pdev.dev.parent = &pdev->dev;
318 if (platform_device_register(&ts5500_dio2_pdev))
319 dev_warn(&pdev->dev, "DIO2 block registration failed\n");
320
321 if (led_classdev_register(&pdev->dev, &ts5500_led_cdev))
322 dev_warn(&pdev->dev, "LED registration failed\n");
323
324 if (sbc->adc) {
325 ts5500_adc_pdev.dev.parent = &pdev->dev;
326 if (platform_device_register(&ts5500_adc_pdev))
327 dev_warn(&pdev->dev, "ADC registration failed\n");
328 }
329
330 return 0;
331error:
332 platform_device_unregister(pdev);
333 return err;
334}
335device_initcall(ts5500_init);
336
337MODULE_LICENSE("GPL");
338MODULE_AUTHOR("Savoir-faire Linux Inc. <kernel@savoirfairelinux.com>");
339MODULE_DESCRIPTION("Technologic Systems TS-5500 platform driver");
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index b8b3a37c80cd..0f92173a12b6 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1034,7 +1034,8 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
1034 * globally purge translation cache of a virtual address or all TLB's 1034 * globally purge translation cache of a virtual address or all TLB's
1035 * @cpumask: mask of all cpu's in which the address is to be removed 1035 * @cpumask: mask of all cpu's in which the address is to be removed
1036 * @mm: mm_struct containing virtual address range 1036 * @mm: mm_struct containing virtual address range
1037 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu) 1037 * @start: start virtual address to be removed from TLB
1038 * @end: end virtual address to be remove from TLB
1038 * @cpu: the current cpu 1039 * @cpu: the current cpu
1039 * 1040 *
1040 * This is the entry point for initiating any UV global TLB shootdown. 1041 * This is the entry point for initiating any UV global TLB shootdown.
@@ -1056,7 +1057,7 @@ static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
1056 */ 1057 */
1057const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, 1058const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
1058 struct mm_struct *mm, unsigned long start, 1059 struct mm_struct *mm, unsigned long start,
1059 unsigned end, unsigned int cpu) 1060 unsigned long end, unsigned int cpu)
1060{ 1061{
1061 int locals = 0; 1062 int locals = 0;
1062 int remotes = 0; 1063 int remotes = 0;
@@ -1113,7 +1114,10 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
1113 1114
1114 record_send_statistics(stat, locals, hubs, remotes, bau_desc); 1115 record_send_statistics(stat, locals, hubs, remotes, bau_desc);
1115 1116
1116 bau_desc->payload.address = start; 1117 if (!end || (end - start) <= PAGE_SIZE)
1118 bau_desc->payload.address = start;
1119 else
1120 bau_desc->payload.address = TLB_FLUSH_ALL;
1117 bau_desc->payload.sending_cpu = cpu; 1121 bau_desc->payload.sending_cpu = cpu;
1118 /* 1122 /*
1119 * uv_flush_send_and_wait returns 0 if all cpu's were messaged, 1123 * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
@@ -1463,7 +1467,7 @@ static ssize_t ptc_proc_write(struct file *file, const char __user *user,
1463 } 1467 }
1464 1468
1465 if (input_arg == 0) { 1469 if (input_arg == 0) {
1466 elements = sizeof(stat_description)/sizeof(*stat_description); 1470 elements = ARRAY_SIZE(stat_description);
1467 printk(KERN_DEBUG "# cpu: cpu number\n"); 1471 printk(KERN_DEBUG "# cpu: cpu number\n");
1468 printk(KERN_DEBUG "Sender statistics:\n"); 1472 printk(KERN_DEBUG "Sender statistics:\n");
1469 for (i = 0; i < elements; i++) 1473 for (i = 0; i < elements; i++)
@@ -1504,7 +1508,7 @@ static int parse_tunables_write(struct bau_control *bcp, char *instr,
1504 char *q; 1508 char *q;
1505 int cnt = 0; 1509 int cnt = 0;
1506 int val; 1510 int val;
1507 int e = sizeof(tunables) / sizeof(*tunables); 1511 int e = ARRAY_SIZE(tunables);
1508 1512
1509 p = instr + strspn(instr, WHITESPACE); 1513 p = instr + strspn(instr, WHITESPACE);
1510 q = p; 1514 q = p;
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index 5032e0d19b86..98718f604eb6 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -15,7 +15,7 @@
15 * along with this program; if not, write to the Free Software 15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 * 17 *
18 * Copyright (c) 2009 Silicon Graphics, Inc. All Rights Reserved. 18 * Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved.
19 * Copyright (c) Dimitri Sivanich 19 * Copyright (c) Dimitri Sivanich
20 */ 20 */
21#include <linux/clockchips.h> 21#include <linux/clockchips.h>
@@ -102,9 +102,10 @@ static int uv_intr_pending(int pnode)
102 if (is_uv1_hub()) 102 if (is_uv1_hub())
103 return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) & 103 return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
104 UV1H_EVENT_OCCURRED0_RTC1_MASK; 104 UV1H_EVENT_OCCURRED0_RTC1_MASK;
105 else 105 else if (is_uvx_hub())
106 return uv_read_global_mmr64(pnode, UV2H_EVENT_OCCURRED2) & 106 return uv_read_global_mmr64(pnode, UVXH_EVENT_OCCURRED2) &
107 UV2H_EVENT_OCCURRED2_RTC_1_MASK; 107 UVXH_EVENT_OCCURRED2_RTC_1_MASK;
108 return 0;
108} 109}
109 110
110/* Setup interrupt and return non-zero if early expiration occurred. */ 111/* Setup interrupt and return non-zero if early expiration occurred. */
@@ -122,8 +123,8 @@ static int uv_setup_intr(int cpu, u64 expires)
122 uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS, 123 uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
123 UV1H_EVENT_OCCURRED0_RTC1_MASK); 124 UV1H_EVENT_OCCURRED0_RTC1_MASK);
124 else 125 else
125 uv_write_global_mmr64(pnode, UV2H_EVENT_OCCURRED2_ALIAS, 126 uv_write_global_mmr64(pnode, UVXH_EVENT_OCCURRED2_ALIAS,
126 UV2H_EVENT_OCCURRED2_RTC_1_MASK); 127 UVXH_EVENT_OCCURRED2_RTC_1_MASK);
127 128
128 val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | 129 val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
129 ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); 130 ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c
index cc2f8c131286..872eb60e7806 100644
--- a/arch/x86/tools/insn_sanity.c
+++ b/arch/x86/tools/insn_sanity.c
@@ -55,7 +55,7 @@ static FILE *input_file; /* Input file name */
55static void usage(const char *err) 55static void usage(const char *err)
56{ 56{
57 if (err) 57 if (err)
58 fprintf(stderr, "Error: %s\n\n", err); 58 fprintf(stderr, "%s: Error: %s\n\n", prog, err);
59 fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog); 59 fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog);
60 fprintf(stderr, "\t-y 64bit mode\n"); 60 fprintf(stderr, "\t-y 64bit mode\n");
61 fprintf(stderr, "\t-n 32bit mode\n"); 61 fprintf(stderr, "\t-n 32bit mode\n");
@@ -269,7 +269,13 @@ int main(int argc, char **argv)
269 insns++; 269 insns++;
270 } 270 }
271 271
272 fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed); 272 fprintf(stdout, "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n",
273 prog,
274 (errors) ? "Failure" : "Success",
275 insns,
276 (input_file) ? "given" : "random",
277 errors,
278 seed);
273 279
274 return errors ? 1 : 0; 280 return errors ? 1 : 0;
275} 281}
diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig
index 53c90fd412d1..21a13ce1d751 100644
--- a/arch/x86/um/Kconfig
+++ b/arch/x86/um/Kconfig
@@ -37,9 +37,8 @@ config RWSEM_GENERIC_SPINLOCK
37 def_bool !RWSEM_XCHGADD_ALGORITHM 37 def_bool !RWSEM_XCHGADD_ALGORITHM
38 38
39config 3_LEVEL_PGTABLES 39config 3_LEVEL_PGTABLES
40 bool "Three-level pagetables (EXPERIMENTAL)" if !64BIT 40 bool "Three-level pagetables" if !64BIT
41 default 64BIT 41 default 64BIT
42 depends on EXPERIMENTAL
43 help 42 help
44 Three-level pagetables will let UML have more than 4G of physical 43 Three-level pagetables will let UML have more than 4G of physical
45 memory. All the memory that can't be mapped directly will be treated 44 memory. All the memory that can't be mapped directly will be treated
diff --git a/arch/x86/um/fault.c b/arch/x86/um/fault.c
index 8784ab30d91b..84ac7f7b0257 100644
--- a/arch/x86/um/fault.c
+++ b/arch/x86/um/fault.c
@@ -20,7 +20,7 @@ int arch_fixup(unsigned long address, struct uml_pt_regs *regs)
20 const struct exception_table_entry *fixup; 20 const struct exception_table_entry *fixup;
21 21
22 fixup = search_exception_tables(address); 22 fixup = search_exception_tables(address);
23 if (fixup != 0) { 23 if (fixup) {
24 UPT_IP(regs) = fixup->fixup; 24 UPT_IP(regs) = fixup->fixup;
25 return 1; 25 return 1;
26 } 26 }
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 205ad328aa52..c74436e687bf 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -60,7 +60,7 @@ notrace static cycle_t vread_tsc(void)
60 60
61static notrace cycle_t vread_hpet(void) 61static notrace cycle_t vread_hpet(void)
62{ 62{
63 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); 63 return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + HPET_COUNTER);
64} 64}
65 65
66#ifdef CONFIG_PARAVIRT_CLOCK 66#ifdef CONFIG_PARAVIRT_CLOCK
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 138e5667409a..39928d16be3b 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1517,72 +1517,51 @@ asmlinkage void __init xen_start_kernel(void)
1517#endif 1517#endif
1518} 1518}
1519 1519
1520#ifdef CONFIG_XEN_PVHVM 1520void __ref xen_hvm_init_shared_info(void)
1521#define HVM_SHARED_INFO_ADDR 0xFE700000UL
1522static struct shared_info *xen_hvm_shared_info;
1523static unsigned long xen_hvm_sip_phys;
1524static int xen_major, xen_minor;
1525
1526static void xen_hvm_connect_shared_info(unsigned long pfn)
1527{ 1521{
1522 int cpu;
1528 struct xen_add_to_physmap xatp; 1523 struct xen_add_to_physmap xatp;
1524 static struct shared_info *shared_info_page = 0;
1529 1525
1526 if (!shared_info_page)
1527 shared_info_page = (struct shared_info *)
1528 extend_brk(PAGE_SIZE, PAGE_SIZE);
1530 xatp.domid = DOMID_SELF; 1529 xatp.domid = DOMID_SELF;
1531 xatp.idx = 0; 1530 xatp.idx = 0;
1532 xatp.space = XENMAPSPACE_shared_info; 1531 xatp.space = XENMAPSPACE_shared_info;
1533 xatp.gpfn = pfn; 1532 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
1534 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) 1533 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
1535 BUG(); 1534 BUG();
1536 1535
1537} 1536 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
1538static void __init xen_hvm_set_shared_info(struct shared_info *sip)
1539{
1540 int cpu;
1541
1542 HYPERVISOR_shared_info = sip;
1543 1537
1544 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info 1538 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
1545 * page, we use it in the event channel upcall and in some pvclock 1539 * page, we use it in the event channel upcall and in some pvclock
1546 * related functions. We don't need the vcpu_info placement 1540 * related functions. We don't need the vcpu_info placement
1547 * optimizations because we don't use any pv_mmu or pv_irq op on 1541 * optimizations because we don't use any pv_mmu or pv_irq op on
1548 * HVM. */ 1542 * HVM.
1549 for_each_online_cpu(cpu) 1543 * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
1544 * online but xen_hvm_init_shared_info is run at resume time too and
1545 * in that case multiple vcpus might be online. */
1546 for_each_online_cpu(cpu) {
1550 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 1547 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
1551}
1552
1553/* Reconnect the shared_info pfn to a (new) mfn */
1554void xen_hvm_resume_shared_info(void)
1555{
1556 xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT);
1557}
1558
1559/* Xen tools prior to Xen 4 do not provide a E820_Reserved area for guest usage.
1560 * On these old tools the shared info page will be placed in E820_Ram.
1561 * Xen 4 provides a E820_Reserved area at 0xFC000000, and this code expects
1562 * that nothing is mapped up to HVM_SHARED_INFO_ADDR.
1563 * Xen 4.3+ provides an explicit 1MB area at HVM_SHARED_INFO_ADDR which is used
1564 * here for the shared info page. */
1565static void __init xen_hvm_init_shared_info(void)
1566{
1567 if (xen_major < 4) {
1568 xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE);
1569 xen_hvm_sip_phys = __pa(xen_hvm_shared_info);
1570 } else {
1571 xen_hvm_sip_phys = HVM_SHARED_INFO_ADDR;
1572 set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_hvm_sip_phys);
1573 xen_hvm_shared_info =
1574 (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
1575 } 1548 }
1576 xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT);
1577 xen_hvm_set_shared_info(xen_hvm_shared_info);
1578} 1549}
1579 1550
1551#ifdef CONFIG_XEN_PVHVM
1580static void __init init_hvm_pv_info(void) 1552static void __init init_hvm_pv_info(void)
1581{ 1553{
1582 uint32_t ecx, edx, pages, msr, base; 1554 int major, minor;
1555 uint32_t eax, ebx, ecx, edx, pages, msr, base;
1583 u64 pfn; 1556 u64 pfn;
1584 1557
1585 base = xen_cpuid_base(); 1558 base = xen_cpuid_base();
1559 cpuid(base + 1, &eax, &ebx, &ecx, &edx);
1560
1561 major = eax >> 16;
1562 minor = eax & 0xffff;
1563 printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
1564
1586 cpuid(base + 2, &pages, &msr, &ecx, &edx); 1565 cpuid(base + 2, &pages, &msr, &ecx, &edx);
1587 1566
1588 pfn = __pa(hypercall_page); 1567 pfn = __pa(hypercall_page);
@@ -1633,22 +1612,12 @@ static void __init xen_hvm_guest_init(void)
1633 1612
1634static bool __init xen_hvm_platform(void) 1613static bool __init xen_hvm_platform(void)
1635{ 1614{
1636 uint32_t eax, ebx, ecx, edx, base;
1637
1638 if (xen_pv_domain()) 1615 if (xen_pv_domain())
1639 return false; 1616 return false;
1640 1617
1641 base = xen_cpuid_base(); 1618 if (!xen_cpuid_base())
1642 if (!base)
1643 return false; 1619 return false;
1644 1620
1645 cpuid(base + 1, &eax, &ebx, &ecx, &edx);
1646
1647 xen_major = eax >> 16;
1648 xen_minor = eax & 0xffff;
1649
1650 printk(KERN_INFO "Xen version %d.%d.\n", xen_major, xen_minor);
1651
1652 return true; 1621 return true;
1653} 1622}
1654 1623
@@ -1668,6 +1637,7 @@ const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = {
1668 .name = "Xen HVM", 1637 .name = "Xen HVM",
1669 .detect = xen_hvm_platform, 1638 .detect = xen_hvm_platform,
1670 .init_platform = xen_hvm_guest_init, 1639 .init_platform = xen_hvm_guest_init,
1640 .x2apic_available = xen_x2apic_para_available,
1671}; 1641};
1672EXPORT_SYMBOL(x86_hyper_xen_hvm); 1642EXPORT_SYMBOL(x86_hyper_xen_hvm);
1673#endif 1643#endif
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 8971a26d21ab..94eac5c85cdc 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -556,12 +556,9 @@ void __init xen_arch_setup(void)
556 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE); 556 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
557 557
558 /* Set up idle, making sure it calls safe_halt() pvop */ 558 /* Set up idle, making sure it calls safe_halt() pvop */
559#ifdef CONFIG_X86_32
560 boot_cpu_data.hlt_works_ok = 1;
561#endif
562 disable_cpuidle(); 559 disable_cpuidle();
563 disable_cpufreq(); 560 disable_cpufreq();
564 WARN_ON(set_pm_idle_to_default()); 561 WARN_ON(xen_set_default_idle());
565 fiddle_vdso(); 562 fiddle_vdso();
566#ifdef CONFIG_NUMA 563#ifdef CONFIG_NUMA
567 numa_off = 1; 564 numa_off = 1;
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index ae8a00c39de4..45329c8c226e 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled)
30{ 30{
31#ifdef CONFIG_XEN_PVHVM 31#ifdef CONFIG_XEN_PVHVM
32 int cpu; 32 int cpu;
33 xen_hvm_resume_shared_info(); 33 xen_hvm_init_shared_info();
34 xen_callback_vector(); 34 xen_callback_vector();
35 xen_unplug_emulated_devices(); 35 xen_unplug_emulated_devices();
36 if (xen_feature(XENFEAT_hvm_safe_pvclock)) { 36 if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index f9643fc50de5..33ca6e42a4ca 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -89,11 +89,11 @@ ENTRY(xen_iret)
89 */ 89 */
90#ifdef CONFIG_SMP 90#ifdef CONFIG_SMP
91 GET_THREAD_INFO(%eax) 91 GET_THREAD_INFO(%eax)
92 movl TI_cpu(%eax), %eax 92 movl %ss:TI_cpu(%eax), %eax
93 movl __per_cpu_offset(,%eax,4), %eax 93 movl %ss:__per_cpu_offset(,%eax,4), %eax
94 mov xen_vcpu(%eax), %eax 94 mov %ss:xen_vcpu(%eax), %eax
95#else 95#else
96 movl xen_vcpu, %eax 96 movl %ss:xen_vcpu, %eax
97#endif 97#endif
98 98
99 /* check IF state we're restoring */ 99 /* check IF state we're restoring */
@@ -106,11 +106,11 @@ ENTRY(xen_iret)
106 * resuming the code, so we don't have to be worried about 106 * resuming the code, so we don't have to be worried about
107 * being preempted to another CPU. 107 * being preempted to another CPU.
108 */ 108 */
109 setz XEN_vcpu_info_mask(%eax) 109 setz %ss:XEN_vcpu_info_mask(%eax)
110xen_iret_start_crit: 110xen_iret_start_crit:
111 111
112 /* check for unmasked and pending */ 112 /* check for unmasked and pending */
113 cmpw $0x0001, XEN_vcpu_info_pending(%eax) 113 cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax)
114 114
115 /* 115 /*
116 * If there's something pending, mask events again so we can 116 * If there's something pending, mask events again so we can
@@ -118,7 +118,7 @@ xen_iret_start_crit:
118 * touch XEN_vcpu_info_mask. 118 * touch XEN_vcpu_info_mask.
119 */ 119 */
120 jne 1f 120 jne 1f
121 movb $1, XEN_vcpu_info_mask(%eax) 121 movb $1, %ss:XEN_vcpu_info_mask(%eax)
122 122
1231: popl %eax 1231: popl %eax
124 124
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index d2e73d19d366..a95b41744ad0 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -40,7 +40,7 @@ void xen_enable_syscall(void);
40void xen_vcpu_restore(void); 40void xen_vcpu_restore(void);
41 41
42void xen_callback_vector(void); 42void xen_callback_vector(void);
43void xen_hvm_resume_shared_info(void); 43void xen_hvm_init_shared_info(void);
44void xen_unplug_emulated_devices(void); 44void xen_unplug_emulated_devices(void);
45 45
46void __init xen_build_dynamic_phys_to_machine(void); 46void __init xen_build_dynamic_phys_to_machine(void);