diff options
Diffstat (limited to 'arch/powerpc')
120 files changed, 5183 insertions, 2278 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index feab3bad6d0f..00b9874e2240 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -27,15 +27,6 @@ config MMU | |||
27 | bool | 27 | bool |
28 | default y | 28 | default y |
29 | 29 | ||
30 | config GENERIC_CMOS_UPDATE | ||
31 | def_bool y | ||
32 | |||
33 | config GENERIC_TIME_VSYSCALL | ||
34 | def_bool y | ||
35 | |||
36 | config GENERIC_CLOCKEVENTS | ||
37 | def_bool y | ||
38 | |||
39 | config HAVE_SETUP_PER_CPU_AREA | 30 | config HAVE_SETUP_PER_CPU_AREA |
40 | def_bool PPC64 | 31 | def_bool PPC64 |
41 | 32 | ||
@@ -87,10 +78,6 @@ config ARCH_HAS_ILOG2_U64 | |||
87 | bool | 78 | bool |
88 | default y if 64BIT | 79 | default y if 64BIT |
89 | 80 | ||
90 | config ARCH_HAS_CPU_IDLE_WAIT | ||
91 | bool | ||
92 | default y | ||
93 | |||
94 | config GENERIC_HWEIGHT | 81 | config GENERIC_HWEIGHT |
95 | bool | 82 | bool |
96 | default y | 83 | default y |
@@ -141,9 +128,13 @@ config PPC | |||
141 | select IRQ_FORCED_THREADING | 128 | select IRQ_FORCED_THREADING |
142 | select HAVE_RCU_TABLE_FREE if SMP | 129 | select HAVE_RCU_TABLE_FREE if SMP |
143 | select HAVE_SYSCALL_TRACEPOINTS | 130 | select HAVE_SYSCALL_TRACEPOINTS |
144 | select HAVE_BPF_JIT if (PPC64 && NET) | 131 | select HAVE_BPF_JIT if PPC64 |
145 | select HAVE_ARCH_JUMP_LABEL | 132 | select HAVE_ARCH_JUMP_LABEL |
146 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 133 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
134 | select GENERIC_SMP_IDLE_THREAD | ||
135 | select GENERIC_CMOS_UPDATE | ||
136 | select GENERIC_TIME_VSYSCALL | ||
137 | select GENERIC_CLOCKEVENTS | ||
147 | 138 | ||
148 | config EARLY_PRINTK | 139 | config EARLY_PRINTK |
149 | bool | 140 | bool |
@@ -284,7 +275,6 @@ config HIGHMEM | |||
284 | bool "High memory support" | 275 | bool "High memory support" |
285 | depends on PPC32 | 276 | depends on PPC32 |
286 | 277 | ||
287 | source kernel/time/Kconfig | ||
288 | source kernel/Kconfig.hz | 278 | source kernel/Kconfig.hz |
289 | source kernel/Kconfig.preempt | 279 | source kernel/Kconfig.preempt |
290 | source "fs/Kconfig.binfmt" | 280 | source "fs/Kconfig.binfmt" |
@@ -353,7 +343,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE | |||
353 | 343 | ||
354 | config KEXEC | 344 | config KEXEC |
355 | bool "kexec system call (EXPERIMENTAL)" | 345 | bool "kexec system call (EXPERIMENTAL)" |
356 | depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP && !PPC_47x)) && EXPERIMENTAL | 346 | depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) && EXPERIMENTAL |
357 | help | 347 | help |
358 | kexec is a system call that implements the ability to shutdown your | 348 | kexec is a system call that implements the ability to shutdown your |
359 | current kernel, and to start another kernel. It is like a reboot | 349 | current kernel, and to start another kernel. It is like a reboot |
@@ -370,7 +360,7 @@ config KEXEC | |||
370 | 360 | ||
371 | config CRASH_DUMP | 361 | config CRASH_DUMP |
372 | bool "Build a kdump crash kernel" | 362 | bool "Build a kdump crash kernel" |
373 | depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP && !PPC_47x) | 363 | depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP) |
374 | select RELOCATABLE if PPC64 || 44x | 364 | select RELOCATABLE if PPC64 || 44x |
375 | select DYNAMIC_MEMSTART if FSL_BOOKE | 365 | select DYNAMIC_MEMSTART if FSL_BOOKE |
376 | help | 366 | help |
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 6524c6e21896..950d1f7a5a39 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile | |||
@@ -69,6 +69,16 @@ LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y) | |||
69 | 69 | ||
70 | CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=no -mcall-aixdesc | 70 | CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=no -mcall-aixdesc |
71 | CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple | 71 | CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple |
72 | |||
73 | CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,-mtune=power4) | ||
74 | CFLAGS-$(CONFIG_CELL_CPU) += $(call cc-option,-mcpu=cell) | ||
75 | CFLAGS-$(CONFIG_POWER4_CPU) += $(call cc-option,-mcpu=power4) | ||
76 | CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5) | ||
77 | CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6) | ||
78 | CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7) | ||
79 | |||
80 | CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell) | ||
81 | |||
72 | KBUILD_CPPFLAGS += -Iarch/$(ARCH) | 82 | KBUILD_CPPFLAGS += -Iarch/$(ARCH) |
73 | KBUILD_AFLAGS += -Iarch/$(ARCH) | 83 | KBUILD_AFLAGS += -Iarch/$(ARCH) |
74 | KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y) | 84 | KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y) |
@@ -76,32 +86,11 @@ CPP = $(CC) -E $(KBUILD_CFLAGS) | |||
76 | 86 | ||
77 | CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__ | 87 | CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__ |
78 | 88 | ||
79 | ifeq ($(CONFIG_PPC64),y) | ||
80 | GCC_BROKEN_VEC := $(call cc-ifversion, -lt, 0400, y) | ||
81 | |||
82 | ifeq ($(CONFIG_POWER4_ONLY),y) | ||
83 | ifeq ($(CONFIG_ALTIVEC),y) | ||
84 | ifeq ($(GCC_BROKEN_VEC),y) | ||
85 | KBUILD_CFLAGS += $(call cc-option,-mcpu=970) | ||
86 | else | ||
87 | KBUILD_CFLAGS += $(call cc-option,-mcpu=power4) | ||
88 | endif | ||
89 | else | ||
90 | KBUILD_CFLAGS += $(call cc-option,-mcpu=power4) | ||
91 | endif | ||
92 | else | ||
93 | KBUILD_CFLAGS += $(call cc-option,-mtune=power4) | ||
94 | endif | ||
95 | endif | ||
96 | |||
97 | KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o | 89 | KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o |
98 | 90 | ||
99 | ifeq ($(CONFIG_TUNE_CELL),y) | 91 | # No AltiVec or VSX instructions when building kernel |
100 | KBUILD_CFLAGS += $(call cc-option,-mtune=cell) | ||
101 | endif | ||
102 | |||
103 | # No AltiVec instruction when building kernel | ||
104 | KBUILD_CFLAGS += $(call cc-option,-mno-altivec) | 92 | KBUILD_CFLAGS += $(call cc-option,-mno-altivec) |
93 | KBUILD_CFLAGS += $(call cc-option,-mno-vsx) | ||
105 | 94 | ||
106 | # No SPE instruction when building kernel | 95 | # No SPE instruction when building kernel |
107 | # (We use all available options to help semi-broken compilers) | 96 | # (We use all available options to help semi-broken compilers) |
@@ -160,6 +149,7 @@ core-$(CONFIG_KVM) += arch/powerpc/kvm/ | |||
160 | core-$(CONFIG_PERF_EVENTS) += arch/powerpc/perf/ | 149 | core-$(CONFIG_PERF_EVENTS) += arch/powerpc/perf/ |
161 | 150 | ||
162 | drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ | 151 | drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ |
152 | drivers-$(CONFIG_CRYPTO_DEV_NX) += drivers/crypto/nx/ | ||
163 | 153 | ||
164 | # Default to zImage, override when needed | 154 | # Default to zImage, override when needed |
165 | all: zImage | 155 | all: zImage |
@@ -234,10 +224,11 @@ archprepare: checkbin | |||
234 | # Use the file '.tmp_gas_check' for binutils tests, as gas won't output | 224 | # Use the file '.tmp_gas_check' for binutils tests, as gas won't output |
235 | # to stdout and these checks are run even on install targets. | 225 | # to stdout and these checks are run even on install targets. |
236 | TOUT := .tmp_gas_check | 226 | TOUT := .tmp_gas_check |
237 | # Ensure this is binutils 2.12.1 (or 2.12.90.0.7) or later for altivec | ||
238 | # instructions. | ||
239 | # gcc-3.4 and binutils-2.14 are a fatal combination. | ||
240 | 227 | ||
228 | # Check gcc and binutils versions: | ||
229 | # - gcc-3.4 and binutils-2.14 are a fatal combination | ||
230 | # - Require gcc 4.0 or above on 64-bit | ||
231 | # - gcc-4.2.0 has issues compiling modules on 64-bit | ||
241 | checkbin: | 232 | checkbin: |
242 | @if test "$(call cc-version)" = "0304" ; then \ | 233 | @if test "$(call cc-version)" = "0304" ; then \ |
243 | if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \ | 234 | if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \ |
@@ -247,6 +238,12 @@ checkbin: | |||
247 | false; \ | 238 | false; \ |
248 | fi ; \ | 239 | fi ; \ |
249 | fi | 240 | fi |
241 | @if test "$(call cc-version)" -lt "0400" \ | ||
242 | && test "x${CONFIG_PPC64}" = "xy" ; then \ | ||
243 | echo -n "Sorry, GCC v4.0 or above is required to build " ; \ | ||
244 | echo "the 64-bit powerpc kernel." ; \ | ||
245 | false ; \ | ||
246 | fi | ||
250 | @if test "$(call cc-fullversion)" = "040200" \ | 247 | @if test "$(call cc-fullversion)" = "040200" \ |
251 | && test "x${CONFIG_MODULES}${CONFIG_PPC64}" = "xyy" ; then \ | 248 | && test "x${CONFIG_MODULES}${CONFIG_PPC64}" = "xyy" ; then \ |
252 | echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \ | 249 | echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \ |
diff --git a/arch/powerpc/boot/dts/bluestone.dts b/arch/powerpc/boot/dts/bluestone.dts index 7bda373f10ef..9d4917aebe6b 100644 --- a/arch/powerpc/boot/dts/bluestone.dts +++ b/arch/powerpc/boot/dts/bluestone.dts | |||
@@ -373,5 +373,30 @@ | |||
373 | 0x0 0x0 0x0 0x3 &UIC3 0xe 0x4 /* swizzled int C */ | 373 | 0x0 0x0 0x0 0x3 &UIC3 0xe 0x4 /* swizzled int C */ |
374 | 0x0 0x0 0x0 0x4 &UIC3 0xf 0x4 /* swizzled int D */>; | 374 | 0x0 0x0 0x0 0x4 &UIC3 0xf 0x4 /* swizzled int D */>; |
375 | }; | 375 | }; |
376 | |||
377 | MSI: ppc4xx-msi@C10000000 { | ||
378 | compatible = "amcc,ppc4xx-msi", "ppc4xx-msi"; | ||
379 | reg = < 0xC 0x10000000 0x100 | ||
380 | 0xC 0x10000000 0x100>; | ||
381 | sdr-base = <0x36C>; | ||
382 | msi-data = <0x00004440>; | ||
383 | msi-mask = <0x0000ffe0>; | ||
384 | interrupts =<0 1 2 3 4 5 6 7>; | ||
385 | interrupt-parent = <&MSI>; | ||
386 | #interrupt-cells = <1>; | ||
387 | #address-cells = <0>; | ||
388 | #size-cells = <0>; | ||
389 | msi-available-ranges = <0x0 0x100>; | ||
390 | interrupt-map = < | ||
391 | 0 &UIC3 0x18 1 | ||
392 | 1 &UIC3 0x19 1 | ||
393 | 2 &UIC3 0x1A 1 | ||
394 | 3 &UIC3 0x1B 1 | ||
395 | 4 &UIC3 0x1C 1 | ||
396 | 5 &UIC3 0x1D 1 | ||
397 | 6 &UIC3 0x1E 1 | ||
398 | 7 &UIC3 0x1F 1 | ||
399 | >; | ||
400 | }; | ||
376 | }; | 401 | }; |
377 | }; | 402 | }; |
diff --git a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig index 0db9ba0423ff..c09598b31de1 100644 --- a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig +++ b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig | |||
@@ -100,6 +100,7 @@ CONFIG_SND_MIXER_OSS=y | |||
100 | CONFIG_SND_PCM_OSS=y | 100 | CONFIG_SND_PCM_OSS=y |
101 | # CONFIG_SND_SUPPORT_OLD_API is not set | 101 | # CONFIG_SND_SUPPORT_OLD_API is not set |
102 | CONFIG_SND_SOC=y | 102 | CONFIG_SND_SOC=y |
103 | CONFIG_SND_POWERPC_SOC=y | ||
103 | CONFIG_RTC_CLASS=y | 104 | CONFIG_RTC_CLASS=y |
104 | CONFIG_RTC_DRV_CMOS=y | 105 | CONFIG_RTC_DRV_CMOS=y |
105 | CONFIG_EXT2_FS=y | 106 | CONFIG_EXT2_FS=y |
diff --git a/arch/powerpc/configs/chroma_defconfig b/arch/powerpc/configs/chroma_defconfig index f104ccde6b53..b1f9597fe312 100644 --- a/arch/powerpc/configs/chroma_defconfig +++ b/arch/powerpc/configs/chroma_defconfig | |||
@@ -32,7 +32,7 @@ CONFIG_RD_LZMA=y | |||
32 | CONFIG_INITRAMFS_COMPRESSION_GZIP=y | 32 | CONFIG_INITRAMFS_COMPRESSION_GZIP=y |
33 | CONFIG_KALLSYMS_ALL=y | 33 | CONFIG_KALLSYMS_ALL=y |
34 | CONFIG_EMBEDDED=y | 34 | CONFIG_EMBEDDED=y |
35 | CONFIG_PERF_COUNTERS=y | 35 | CONFIG_PERF_EVENTS=y |
36 | CONFIG_PROFILING=y | 36 | CONFIG_PROFILING=y |
37 | CONFIG_OPROFILE=y | 37 | CONFIG_OPROFILE=y |
38 | CONFIG_KPROBES=y | 38 | CONFIG_KPROBES=y |
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index 1196c34163b7..07b7f2af2dca 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig | |||
@@ -1,5 +1,4 @@ | |||
1 | CONFIG_PPC64=y | 1 | CONFIG_PPC64=y |
2 | CONFIG_POWER4_ONLY=y | ||
3 | CONFIG_ALTIVEC=y | 2 | CONFIG_ALTIVEC=y |
4 | CONFIG_SMP=y | 3 | CONFIG_SMP=y |
5 | CONFIG_NR_CPUS=4 | 4 | CONFIG_NR_CPUS=4 |
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig index e74d3a483705..9ef2cc13e1b4 100644 --- a/arch/powerpc/configs/gamecube_defconfig +++ b/arch/powerpc/configs/gamecube_defconfig | |||
@@ -8,7 +8,7 @@ CONFIG_BLK_DEV_INITRD=y | |||
8 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 8 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
9 | CONFIG_EXPERT=y | 9 | CONFIG_EXPERT=y |
10 | # CONFIG_ELF_CORE is not set | 10 | # CONFIG_ELF_CORE is not set |
11 | CONFIG_PERF_COUNTERS=y | 11 | CONFIG_PERF_EVENTS=y |
12 | # CONFIG_VM_EVENT_COUNTERS is not set | 12 | # CONFIG_VM_EVENT_COUNTERS is not set |
13 | CONFIG_SLAB=y | 13 | CONFIG_SLAB=y |
14 | CONFIG_MODULES=y | 14 | CONFIG_MODULES=y |
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig index 2244d370f24d..02ac96b679b8 100644 --- a/arch/powerpc/configs/maple_defconfig +++ b/arch/powerpc/configs/maple_defconfig | |||
@@ -1,5 +1,4 @@ | |||
1 | CONFIG_PPC64=y | 1 | CONFIG_PPC64=y |
2 | CONFIG_POWER4_ONLY=y | ||
3 | CONFIG_SMP=y | 2 | CONFIG_SMP=y |
4 | CONFIG_NR_CPUS=4 | 3 | CONFIG_NR_CPUS=4 |
5 | CONFIG_EXPERIMENTAL=y | 4 | CONFIG_EXPERIMENTAL=y |
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig index d6b6df5e8743..62bb723c5b54 100644 --- a/arch/powerpc/configs/mpc85xx_defconfig +++ b/arch/powerpc/configs/mpc85xx_defconfig | |||
@@ -141,6 +141,7 @@ CONFIG_SND_INTEL8X0=y | |||
141 | # CONFIG_SND_PPC is not set | 141 | # CONFIG_SND_PPC is not set |
142 | # CONFIG_SND_USB is not set | 142 | # CONFIG_SND_USB is not set |
143 | CONFIG_SND_SOC=y | 143 | CONFIG_SND_SOC=y |
144 | CONFIG_SND_POWERPC_SOC=y | ||
144 | CONFIG_HID_A4TECH=y | 145 | CONFIG_HID_A4TECH=y |
145 | CONFIG_HID_APPLE=y | 146 | CONFIG_HID_APPLE=y |
146 | CONFIG_HID_BELKIN=y | 147 | CONFIG_HID_BELKIN=y |
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig index 5b0e2926becd..d1828427ae55 100644 --- a/arch/powerpc/configs/mpc85xx_smp_defconfig +++ b/arch/powerpc/configs/mpc85xx_smp_defconfig | |||
@@ -143,6 +143,7 @@ CONFIG_SND_INTEL8X0=y | |||
143 | # CONFIG_SND_PPC is not set | 143 | # CONFIG_SND_PPC is not set |
144 | # CONFIG_SND_USB is not set | 144 | # CONFIG_SND_USB is not set |
145 | CONFIG_SND_SOC=y | 145 | CONFIG_SND_SOC=y |
146 | CONFIG_SND_POWERPC_SOC=y | ||
146 | CONFIG_HID_A4TECH=y | 147 | CONFIG_HID_A4TECH=y |
147 | CONFIG_HID_APPLE=y | 148 | CONFIG_HID_APPLE=y |
148 | CONFIG_HID_BELKIN=y | 149 | CONFIG_HID_BELKIN=y |
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index f4deb0b78cf0..840a2c2d0430 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig | |||
@@ -1,5 +1,4 @@ | |||
1 | CONFIG_PPC64=y | 1 | CONFIG_PPC64=y |
2 | CONFIG_POWER4_ONLY=y | ||
3 | CONFIG_ALTIVEC=y | 2 | CONFIG_ALTIVEC=y |
4 | # CONFIG_VIRT_CPU_ACCOUNTING is not set | 3 | # CONFIG_VIRT_CPU_ACCOUNTING is not set |
5 | CONFIG_SMP=y | 4 | CONFIG_SMP=y |
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig index ded867871e97..c2f4b4a86ece 100644 --- a/arch/powerpc/configs/ps3_defconfig +++ b/arch/powerpc/configs/ps3_defconfig | |||
@@ -6,7 +6,6 @@ CONFIG_NR_CPUS=2 | |||
6 | CONFIG_EXPERIMENTAL=y | 6 | CONFIG_EXPERIMENTAL=y |
7 | CONFIG_SYSVIPC=y | 7 | CONFIG_SYSVIPC=y |
8 | CONFIG_POSIX_MQUEUE=y | 8 | CONFIG_POSIX_MQUEUE=y |
9 | CONFIG_SPARSE_IRQ=y | ||
10 | CONFIG_BLK_DEV_INITRD=y | 9 | CONFIG_BLK_DEV_INITRD=y |
11 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y | 10 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y |
12 | CONFIG_EMBEDDED=y | 11 | CONFIG_EMBEDDED=y |
@@ -25,7 +24,6 @@ CONFIG_PS3_DISK=y | |||
25 | CONFIG_PS3_ROM=y | 24 | CONFIG_PS3_ROM=y |
26 | CONFIG_PS3_FLASH=y | 25 | CONFIG_PS3_FLASH=y |
27 | CONFIG_PS3_VRAM=m | 26 | CONFIG_PS3_VRAM=m |
28 | CONFIG_PS3_LPM=m | ||
29 | # CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set | 27 | # CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set |
30 | CONFIG_HIGH_RES_TIMERS=y | 28 | CONFIG_HIGH_RES_TIMERS=y |
31 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 29 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
@@ -53,8 +51,6 @@ CONFIG_IP_PNP_DHCP=y | |||
53 | # CONFIG_INET_DIAG is not set | 51 | # CONFIG_INET_DIAG is not set |
54 | CONFIG_IPV6=y | 52 | CONFIG_IPV6=y |
55 | CONFIG_BT=m | 53 | CONFIG_BT=m |
56 | CONFIG_BT_L2CAP=y | ||
57 | CONFIG_BT_SCO=y | ||
58 | CONFIG_BT_RFCOMM=m | 54 | CONFIG_BT_RFCOMM=m |
59 | CONFIG_BT_RFCOMM_TTY=y | 55 | CONFIG_BT_RFCOMM_TTY=y |
60 | CONFIG_BT_BNEP=m | 56 | CONFIG_BT_BNEP=m |
@@ -63,7 +59,6 @@ CONFIG_BT_BNEP_PROTO_FILTER=y | |||
63 | CONFIG_BT_HIDP=m | 59 | CONFIG_BT_HIDP=m |
64 | CONFIG_BT_HCIBTUSB=m | 60 | CONFIG_BT_HCIBTUSB=m |
65 | CONFIG_CFG80211=m | 61 | CONFIG_CFG80211=m |
66 | # CONFIG_WIRELESS_EXT_SYSFS is not set | ||
67 | CONFIG_MAC80211=m | 62 | CONFIG_MAC80211=m |
68 | CONFIG_MAC80211_RC_PID=y | 63 | CONFIG_MAC80211_RC_PID=y |
69 | # CONFIG_MAC80211_RC_MINSTREL is not set | 64 | # CONFIG_MAC80211_RC_MINSTREL is not set |
@@ -181,7 +176,6 @@ CONFIG_DEBUG_INFO=y | |||
181 | CONFIG_DEBUG_WRITECOUNT=y | 176 | CONFIG_DEBUG_WRITECOUNT=y |
182 | CONFIG_DEBUG_MEMORY_INIT=y | 177 | CONFIG_DEBUG_MEMORY_INIT=y |
183 | CONFIG_DEBUG_LIST=y | 178 | CONFIG_DEBUG_LIST=y |
184 | CONFIG_SYSCTL_SYSCALL_CHECK=y | ||
185 | # CONFIG_FTRACE is not set | 179 | # CONFIG_FTRACE is not set |
186 | CONFIG_DEBUG_STACKOVERFLOW=y | 180 | CONFIG_DEBUG_STACKOVERFLOW=y |
187 | CONFIG_CRYPTO_CCM=m | 181 | CONFIG_CRYPTO_CCM=m |
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig index 175295fbf4f3..1e2b7d062aa4 100644 --- a/arch/powerpc/configs/wii_defconfig +++ b/arch/powerpc/configs/wii_defconfig | |||
@@ -9,7 +9,7 @@ CONFIG_BLK_DEV_INITRD=y | |||
9 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 9 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
10 | CONFIG_EXPERT=y | 10 | CONFIG_EXPERT=y |
11 | # CONFIG_ELF_CORE is not set | 11 | # CONFIG_ELF_CORE is not set |
12 | CONFIG_PERF_COUNTERS=y | 12 | CONFIG_PERF_EVENTS=y |
13 | # CONFIG_VM_EVENT_COUNTERS is not set | 13 | # CONFIG_VM_EVENT_COUNTERS is not set |
14 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
15 | CONFIG_MODULES=y | 15 | CONFIG_MODULES=y |
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h index decad950f11a..5d7fbe1950f9 100644 --- a/arch/powerpc/include/asm/asm-compat.h +++ b/arch/powerpc/include/asm/asm-compat.h | |||
@@ -29,18 +29,9 @@ | |||
29 | #define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh) | 29 | #define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh) |
30 | #define PPC_STLCX stringify_in_c(stdcx.) | 30 | #define PPC_STLCX stringify_in_c(stdcx.) |
31 | #define PPC_CNTLZL stringify_in_c(cntlzd) | 31 | #define PPC_CNTLZL stringify_in_c(cntlzd) |
32 | #define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), (RS)) | ||
32 | #define PPC_LR_STKOFF 16 | 33 | #define PPC_LR_STKOFF 16 |
33 | #define PPC_MIN_STKFRM 112 | 34 | #define PPC_MIN_STKFRM 112 |
34 | |||
35 | /* Move to CR, single-entry optimized version. Only available | ||
36 | * on POWER4 and later. | ||
37 | */ | ||
38 | #ifdef CONFIG_POWER4_ONLY | ||
39 | #define PPC_MTOCRF stringify_in_c(mtocrf) | ||
40 | #else | ||
41 | #define PPC_MTOCRF stringify_in_c(mtcrf) | ||
42 | #endif | ||
43 | |||
44 | #else /* 32-bit */ | 35 | #else /* 32-bit */ |
45 | 36 | ||
46 | /* operations for longs and pointers */ | 37 | /* operations for longs and pointers */ |
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index b9219e99bd2a..50d82c8a037f 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h | |||
@@ -168,6 +168,7 @@ extern const char *powerpc_base_platform; | |||
168 | #define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000) | 168 | #define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000) |
169 | #define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000) | 169 | #define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000) |
170 | #define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000) | 170 | #define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000) |
171 | #define CPU_FTR_EMB_HV ASM_CONST(0x0000000040000000) | ||
171 | 172 | ||
172 | /* | 173 | /* |
173 | * Add the 64-bit processor unique features in the top half of the word; | 174 | * Add the 64-bit processor unique features in the top half of the word; |
@@ -376,7 +377,8 @@ extern const char *powerpc_base_platform; | |||
376 | #define CPU_FTRS_47X (CPU_FTRS_440x6) | 377 | #define CPU_FTRS_47X (CPU_FTRS_440x6) |
377 | #define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \ | 378 | #define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \ |
378 | CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \ | 379 | CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \ |
379 | CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE) | 380 | CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE | \ |
381 | CPU_FTR_DEBUG_LVL_EXC) | ||
380 | #define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ | 382 | #define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ |
381 | CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ | 383 | CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ |
382 | CPU_FTR_NOEXECUTE) | 384 | CPU_FTR_NOEXECUTE) |
@@ -385,15 +387,15 @@ extern const char *powerpc_base_platform; | |||
385 | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) | 387 | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) |
386 | #define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ | 388 | #define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ |
387 | CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ | 389 | CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ |
388 | CPU_FTR_DBELL) | 390 | CPU_FTR_DBELL | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV) |
389 | #define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ | 391 | #define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ |
390 | CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ | 392 | CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ |
391 | CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ | 393 | CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ |
392 | CPU_FTR_DEBUG_LVL_EXC) | 394 | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV) |
393 | #define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ | 395 | #define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ |
394 | CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ | 396 | CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ |
395 | CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ | 397 | CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ |
396 | CPU_FTR_DEBUG_LVL_EXC) | 398 | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV) |
397 | #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) | 399 | #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) |
398 | 400 | ||
399 | /* 64-bit CPUs */ | 401 | /* 64-bit CPUs */ |
@@ -486,8 +488,10 @@ enum { | |||
486 | CPU_FTRS_E200 | | 488 | CPU_FTRS_E200 | |
487 | #endif | 489 | #endif |
488 | #ifdef CONFIG_E500 | 490 | #ifdef CONFIG_E500 |
489 | CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC | | 491 | CPU_FTRS_E500 | CPU_FTRS_E500_2 | |
490 | CPU_FTRS_E5500 | CPU_FTRS_E6500 | | 492 | #endif |
493 | #ifdef CONFIG_PPC_E500MC | ||
494 | CPU_FTRS_E500MC | CPU_FTRS_E5500 | CPU_FTRS_E6500 | | ||
491 | #endif | 495 | #endif |
492 | 0, | 496 | 0, |
493 | }; | 497 | }; |
@@ -531,9 +535,12 @@ enum { | |||
531 | CPU_FTRS_E200 & | 535 | CPU_FTRS_E200 & |
532 | #endif | 536 | #endif |
533 | #ifdef CONFIG_E500 | 537 | #ifdef CONFIG_E500 |
534 | CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC & | 538 | CPU_FTRS_E500 & CPU_FTRS_E500_2 & |
535 | CPU_FTRS_E5500 & CPU_FTRS_E6500 & | 539 | #endif |
540 | #ifdef CONFIG_PPC_E500MC | ||
541 | CPU_FTRS_E500MC & CPU_FTRS_E5500 & CPU_FTRS_E6500 & | ||
536 | #endif | 542 | #endif |
543 | ~CPU_FTR_EMB_HV & /* can be removed at runtime */ | ||
537 | CPU_FTRS_POSSIBLE, | 544 | CPU_FTRS_POSSIBLE, |
538 | }; | 545 | }; |
539 | #endif /* __powerpc64__ */ | 546 | #endif /* __powerpc64__ */ |
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h index ce516e5eb0d3..ac3eedb9b74a 100644 --- a/arch/powerpc/include/asm/cputhreads.h +++ b/arch/powerpc/include/asm/cputhreads.h | |||
@@ -9,7 +9,7 @@ | |||
9 | * Note: This implementation is limited to a power of 2 number of | 9 | * Note: This implementation is limited to a power of 2 number of |
10 | * threads per core and the same number for each core in the system | 10 | * threads per core and the same number for each core in the system |
11 | * (though it would work if some processors had less threads as long | 11 | * (though it would work if some processors had less threads as long |
12 | * as the CPU numbers are still allocated, just not brought offline). | 12 | * as the CPU numbers are still allocated, just not brought online). |
13 | * | 13 | * |
14 | * However, the API allows for a different implementation in the future | 14 | * However, the API allows for a different implementation in the future |
15 | * if needed, as long as you only use the functions and not the variables | 15 | * if needed, as long as you only use the functions and not the variables |
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h index efa74ac44a35..154c067761b1 100644 --- a/arch/powerpc/include/asm/dbell.h +++ b/arch/powerpc/include/asm/dbell.h | |||
@@ -19,6 +19,9 @@ | |||
19 | 19 | ||
20 | #define PPC_DBELL_MSG_BRDCAST (0x04000000) | 20 | #define PPC_DBELL_MSG_BRDCAST (0x04000000) |
21 | #define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36)) | 21 | #define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36)) |
22 | #define PPC_DBELL_TYPE_MASK PPC_DBELL_TYPE(0xf) | ||
23 | #define PPC_DBELL_LPID(x) ((x) << (63 - 49)) | ||
24 | #define PPC_DBELL_PIR_MASK 0x3fff | ||
22 | enum ppc_dbell { | 25 | enum ppc_dbell { |
23 | PPC_DBELL = 0, /* doorbell */ | 26 | PPC_DBELL = 0, /* doorbell */ |
24 | PPC_DBELL_CRIT = 1, /* critical doorbell */ | 27 | PPC_DBELL_CRIT = 1, /* critical doorbell */ |
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 548da3aa0a30..d58fc4e4149c 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
@@ -288,13 +288,6 @@ label##_hv: \ | |||
288 | /* Exception addition: Hard disable interrupts */ | 288 | /* Exception addition: Hard disable interrupts */ |
289 | #define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11) | 289 | #define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11) |
290 | 290 | ||
291 | /* Exception addition: Keep interrupt state */ | ||
292 | #define ENABLE_INTS \ | ||
293 | ld r11,PACAKMSR(r13); \ | ||
294 | ld r12,_MSR(r1); \ | ||
295 | rlwimi r11,r12,0,MSR_EE; \ | ||
296 | mtmsrd r11,1 | ||
297 | |||
298 | #define ADD_NVGPRS \ | 291 | #define ADD_NVGPRS \ |
299 | bl .save_nvgprs | 292 | bl .save_nvgprs |
300 | 293 | ||
diff --git a/arch/powerpc/include/asm/gpio.h b/arch/powerpc/include/asm/gpio.h index 38762edb5e58..b3799d88ffcf 100644 --- a/arch/powerpc/include/asm/gpio.h +++ b/arch/powerpc/include/asm/gpio.h | |||
@@ -1,53 +1,4 @@ | |||
1 | /* | 1 | #ifndef __LINUX_GPIO_H |
2 | * Generic GPIO API implementation for PowerPC. | 2 | #warning Include linux/gpio.h instead of asm/gpio.h |
3 | * | 3 | #include <linux/gpio.h> |
4 | * Copyright (c) 2007-2008 MontaVista Software, Inc. | 4 | #endif |
5 | * | ||
6 | * Author: Anton Vorontsov <avorontsov@ru.mvista.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #ifndef __ASM_POWERPC_GPIO_H | ||
15 | #define __ASM_POWERPC_GPIO_H | ||
16 | |||
17 | #include <linux/errno.h> | ||
18 | #include <asm-generic/gpio.h> | ||
19 | |||
20 | #ifdef CONFIG_GPIOLIB | ||
21 | |||
22 | /* | ||
23 | * We don't (yet) implement inlined/rapid versions for on-chip gpios. | ||
24 | * Just call gpiolib. | ||
25 | */ | ||
26 | static inline int gpio_get_value(unsigned int gpio) | ||
27 | { | ||
28 | return __gpio_get_value(gpio); | ||
29 | } | ||
30 | |||
31 | static inline void gpio_set_value(unsigned int gpio, int value) | ||
32 | { | ||
33 | __gpio_set_value(gpio, value); | ||
34 | } | ||
35 | |||
36 | static inline int gpio_cansleep(unsigned int gpio) | ||
37 | { | ||
38 | return __gpio_cansleep(gpio); | ||
39 | } | ||
40 | |||
41 | static inline int gpio_to_irq(unsigned int gpio) | ||
42 | { | ||
43 | return __gpio_to_irq(gpio); | ||
44 | } | ||
45 | |||
46 | static inline int irq_to_gpio(unsigned int irq) | ||
47 | { | ||
48 | return -EINVAL; | ||
49 | } | ||
50 | |||
51 | #endif /* CONFIG_GPIOLIB */ | ||
52 | |||
53 | #endif /* __ASM_POWERPC_GPIO_H */ | ||
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 1c324ff55ea8..423cf9eaf4a4 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h | |||
@@ -77,8 +77,27 @@ | |||
77 | #define H_MR_CONDITION -43 | 77 | #define H_MR_CONDITION -43 |
78 | #define H_NOT_ENOUGH_RESOURCES -44 | 78 | #define H_NOT_ENOUGH_RESOURCES -44 |
79 | #define H_R_STATE -45 | 79 | #define H_R_STATE -45 |
80 | #define H_RESCINDEND -46 | 80 | #define H_RESCINDED -46 |
81 | #define H_MULTI_THREADS_ACTIVE -9005 | 81 | #define H_P2 -55 |
82 | #define H_P3 -56 | ||
83 | #define H_P4 -57 | ||
84 | #define H_P5 -58 | ||
85 | #define H_P6 -59 | ||
86 | #define H_P7 -60 | ||
87 | #define H_P8 -61 | ||
88 | #define H_P9 -62 | ||
89 | #define H_TOO_BIG -64 | ||
90 | #define H_OVERLAP -68 | ||
91 | #define H_INTERRUPT -69 | ||
92 | #define H_BAD_DATA -70 | ||
93 | #define H_NOT_ACTIVE -71 | ||
94 | #define H_SG_LIST -72 | ||
95 | #define H_OP_MODE -73 | ||
96 | #define H_COP_HW -74 | ||
97 | #define H_UNSUPPORTED_FLAG_START -256 | ||
98 | #define H_UNSUPPORTED_FLAG_END -511 | ||
99 | #define H_MULTI_THREADS_ACTIVE -9005 | ||
100 | #define H_OUTSTANDING_COP_OPS -9006 | ||
82 | 101 | ||
83 | 102 | ||
84 | /* Long Busy is a condition that can be returned by the firmware | 103 | /* Long Busy is a condition that can be returned by the firmware |
@@ -114,6 +133,16 @@ | |||
114 | #define H_PP1 (1UL<<(63-62)) | 133 | #define H_PP1 (1UL<<(63-62)) |
115 | #define H_PP2 (1UL<<(63-63)) | 134 | #define H_PP2 (1UL<<(63-63)) |
116 | 135 | ||
136 | /* Flags for H_REGISTER_VPA subfunction field */ | ||
137 | #define H_VPA_FUNC_SHIFT (63-18) /* Bit posn of subfunction code */ | ||
138 | #define H_VPA_FUNC_MASK 7UL | ||
139 | #define H_VPA_REG_VPA 1UL /* Register Virtual Processor Area */ | ||
140 | #define H_VPA_REG_DTL 2UL /* Register Dispatch Trace Log */ | ||
141 | #define H_VPA_REG_SLB 3UL /* Register SLB shadow buffer */ | ||
142 | #define H_VPA_DEREG_VPA 5UL /* Deregister Virtual Processor Area */ | ||
143 | #define H_VPA_DEREG_DTL 6UL /* Deregister Dispatch Trace Log */ | ||
144 | #define H_VPA_DEREG_SLB 7UL /* Deregister SLB shadow buffer */ | ||
145 | |||
117 | /* VASI States */ | 146 | /* VASI States */ |
118 | #define H_VASI_INVALID 0 | 147 | #define H_VASI_INVALID 0 |
119 | #define H_VASI_ENABLED 1 | 148 | #define H_VASI_ENABLED 1 |
@@ -240,6 +269,8 @@ | |||
240 | #define H_GET_MPP 0x2D4 | 269 | #define H_GET_MPP 0x2D4 |
241 | #define H_HOME_NODE_ASSOCIATIVITY 0x2EC | 270 | #define H_HOME_NODE_ASSOCIATIVITY 0x2EC |
242 | #define H_BEST_ENERGY 0x2F4 | 271 | #define H_BEST_ENERGY 0x2F4 |
272 | #define H_RANDOM 0x300 | ||
273 | #define H_COP 0x304 | ||
243 | #define H_GET_MPP_X 0x314 | 274 | #define H_GET_MPP_X 0x314 |
244 | #define MAX_HCALL_OPCODE H_GET_MPP_X | 275 | #define MAX_HCALL_OPCODE H_GET_MPP_X |
245 | 276 | ||
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 51010bfc792e..c9aac24b02e2 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h | |||
@@ -33,6 +33,7 @@ | |||
33 | extern void __replay_interrupt(unsigned int vector); | 33 | extern void __replay_interrupt(unsigned int vector); |
34 | 34 | ||
35 | extern void timer_interrupt(struct pt_regs *); | 35 | extern void timer_interrupt(struct pt_regs *); |
36 | extern void performance_monitor_exception(struct pt_regs *regs); | ||
36 | 37 | ||
37 | #ifdef CONFIG_PPC64 | 38 | #ifdef CONFIG_PPC64 |
38 | #include <asm/paca.h> | 39 | #include <asm/paca.h> |
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h index b921c3f48928..1bea4d8ea6f4 100644 --- a/arch/powerpc/include/asm/kvm.h +++ b/arch/powerpc/include/asm/kvm.h | |||
@@ -277,6 +277,7 @@ struct kvm_sync_regs { | |||
277 | #define KVM_CPU_E500V2 2 | 277 | #define KVM_CPU_E500V2 2 |
278 | #define KVM_CPU_3S_32 3 | 278 | #define KVM_CPU_3S_32 3 |
279 | #define KVM_CPU_3S_64 4 | 279 | #define KVM_CPU_3S_64 4 |
280 | #define KVM_CPU_E500MC 5 | ||
280 | 281 | ||
281 | /* for KVM_CAP_SPAPR_TCE */ | 282 | /* for KVM_CAP_SPAPR_TCE */ |
282 | struct kvm_create_spapr_tce { | 283 | struct kvm_create_spapr_tce { |
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 7b1f0e0fc653..76fdcfef0889 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h | |||
@@ -20,6 +20,16 @@ | |||
20 | #ifndef __POWERPC_KVM_ASM_H__ | 20 | #ifndef __POWERPC_KVM_ASM_H__ |
21 | #define __POWERPC_KVM_ASM_H__ | 21 | #define __POWERPC_KVM_ASM_H__ |
22 | 22 | ||
23 | #ifdef __ASSEMBLY__ | ||
24 | #ifdef CONFIG_64BIT | ||
25 | #define PPC_STD(sreg, offset, areg) std sreg, (offset)(areg) | ||
26 | #define PPC_LD(treg, offset, areg) ld treg, (offset)(areg) | ||
27 | #else | ||
28 | #define PPC_STD(sreg, offset, areg) stw sreg, (offset+4)(areg) | ||
29 | #define PPC_LD(treg, offset, areg) lwz treg, (offset+4)(areg) | ||
30 | #endif | ||
31 | #endif | ||
32 | |||
23 | /* IVPR must be 64KiB-aligned. */ | 33 | /* IVPR must be 64KiB-aligned. */ |
24 | #define VCPU_SIZE_ORDER 4 | 34 | #define VCPU_SIZE_ORDER 4 |
25 | #define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12) | 35 | #define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12) |
@@ -48,6 +58,14 @@ | |||
48 | #define BOOKE_INTERRUPT_SPE_FP_DATA 33 | 58 | #define BOOKE_INTERRUPT_SPE_FP_DATA 33 |
49 | #define BOOKE_INTERRUPT_SPE_FP_ROUND 34 | 59 | #define BOOKE_INTERRUPT_SPE_FP_ROUND 34 |
50 | #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 | 60 | #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 |
61 | #define BOOKE_INTERRUPT_DOORBELL 36 | ||
62 | #define BOOKE_INTERRUPT_DOORBELL_CRITICAL 37 | ||
63 | |||
64 | /* booke_hv */ | ||
65 | #define BOOKE_INTERRUPT_GUEST_DBELL 38 | ||
66 | #define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39 | ||
67 | #define BOOKE_INTERRUPT_HV_SYSCALL 40 | ||
68 | #define BOOKE_INTERRUPT_HV_PRIV 41 | ||
51 | 69 | ||
52 | /* book3s */ | 70 | /* book3s */ |
53 | 71 | ||
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index aa795ccef294..f0e0c6a66d97 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -81,12 +81,13 @@ struct kvmppc_vcpu_book3s { | |||
81 | u64 sdr1; | 81 | u64 sdr1; |
82 | u64 hior; | 82 | u64 hior; |
83 | u64 msr_mask; | 83 | u64 msr_mask; |
84 | u64 vsid_next; | ||
85 | #ifdef CONFIG_PPC_BOOK3S_32 | 84 | #ifdef CONFIG_PPC_BOOK3S_32 |
86 | u32 vsid_pool[VSID_POOL_SIZE]; | 85 | u32 vsid_pool[VSID_POOL_SIZE]; |
86 | u32 vsid_next; | ||
87 | #else | 87 | #else |
88 | u64 vsid_first; | 88 | u64 proto_vsid_first; |
89 | u64 vsid_max; | 89 | u64 proto_vsid_max; |
90 | u64 proto_vsid_next; | ||
90 | #endif | 91 | #endif |
91 | int context_id[SID_CONTEXTS]; | 92 | int context_id[SID_CONTEXTS]; |
92 | 93 | ||
@@ -452,4 +453,7 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) | |||
452 | 453 | ||
453 | #define INS_DCBZ 0x7c0007ec | 454 | #define INS_DCBZ 0x7c0007ec |
454 | 455 | ||
456 | /* LPIDs we support with this build -- runtime limit may be lower */ | ||
457 | #define KVMPPC_NR_LPIDS (LPID_RSVD + 1) | ||
458 | |||
455 | #endif /* __ASM_KVM_BOOK3S_H__ */ | 459 | #endif /* __ASM_KVM_BOOK3S_H__ */ |
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 1f2f5b6156bd..88609b23b775 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h | |||
@@ -79,6 +79,9 @@ struct kvmppc_host_state { | |||
79 | u8 napping; | 79 | u8 napping; |
80 | 80 | ||
81 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 81 | #ifdef CONFIG_KVM_BOOK3S_64_HV |
82 | u8 hwthread_req; | ||
83 | u8 hwthread_state; | ||
84 | |||
82 | struct kvm_vcpu *kvm_vcpu; | 85 | struct kvm_vcpu *kvm_vcpu; |
83 | struct kvmppc_vcore *kvm_vcore; | 86 | struct kvmppc_vcore *kvm_vcore; |
84 | unsigned long xics_phys; | 87 | unsigned long xics_phys; |
@@ -122,4 +125,9 @@ struct kvmppc_book3s_shadow_vcpu { | |||
122 | 125 | ||
123 | #endif /*__ASSEMBLY__ */ | 126 | #endif /*__ASSEMBLY__ */ |
124 | 127 | ||
128 | /* Values for kvm_state */ | ||
129 | #define KVM_HWTHREAD_IN_KERNEL 0 | ||
130 | #define KVM_HWTHREAD_IN_NAP 1 | ||
131 | #define KVM_HWTHREAD_IN_KVM 2 | ||
132 | |||
125 | #endif /* __ASM_KVM_BOOK3S_ASM_H__ */ | 133 | #endif /* __ASM_KVM_BOOK3S_ASM_H__ */ |
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index a90e09188777..b7cd3356a532 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h | |||
@@ -23,6 +23,9 @@ | |||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | 25 | ||
26 | /* LPIDs we support with this build -- runtime limit may be lower */ | ||
27 | #define KVMPPC_NR_LPIDS 64 | ||
28 | |||
26 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) | 29 | static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) |
27 | { | 30 | { |
28 | vcpu->arch.gpr[num] = val; | 31 | vcpu->arch.gpr[num] = val; |
diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h b/arch/powerpc/include/asm/kvm_booke_hv_asm.h new file mode 100644 index 000000000000..30a600fa1b6a --- /dev/null +++ b/arch/powerpc/include/asm/kvm_booke_hv_asm.h | |||
@@ -0,0 +1,49 @@ | |||
1 | /* | ||
2 | * Copyright 2010-2011 Freescale Semiconductor, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License, version 2, as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef ASM_KVM_BOOKE_HV_ASM_H | ||
10 | #define ASM_KVM_BOOKE_HV_ASM_H | ||
11 | |||
12 | #ifdef __ASSEMBLY__ | ||
13 | |||
14 | /* | ||
15 | * All exceptions from guest state must go through KVM | ||
16 | * (except for those which are delivered directly to the guest) -- | ||
17 | * there are no exceptions for which we fall through directly to | ||
18 | * the normal host handler. | ||
19 | * | ||
20 | * Expected inputs (normal exceptions): | ||
21 | * SCRATCH0 = saved r10 | ||
22 | * r10 = thread struct | ||
23 | * r11 = appropriate SRR1 variant (currently used as scratch) | ||
24 | * r13 = saved CR | ||
25 | * *(r10 + THREAD_NORMSAVE(0)) = saved r11 | ||
26 | * *(r10 + THREAD_NORMSAVE(2)) = saved r13 | ||
27 | * | ||
28 | * Expected inputs (crit/mcheck/debug exceptions): | ||
29 | * appropriate SCRATCH = saved r8 | ||
30 | * r8 = exception level stack frame | ||
31 | * r9 = *(r8 + _CCR) = saved CR | ||
32 | * r11 = appropriate SRR1 variant (currently used as scratch) | ||
33 | * *(r8 + GPR9) = saved r9 | ||
34 | * *(r8 + GPR10) = saved r10 (r10 not yet clobbered) | ||
35 | * *(r8 + GPR11) = saved r11 | ||
36 | */ | ||
37 | .macro DO_KVM intno srr1 | ||
38 | #ifdef CONFIG_KVM_BOOKE_HV | ||
39 | BEGIN_FTR_SECTION | ||
40 | mtocrf 0x80, r11 /* check MSR[GS] without clobbering reg */ | ||
41 | bf 3, kvmppc_resume_\intno\()_\srr1 | ||
42 | b kvmppc_handler_\intno\()_\srr1 | ||
43 | kvmppc_resume_\intno\()_\srr1: | ||
44 | END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) | ||
45 | #endif | ||
46 | .endm | ||
47 | |||
48 | #endif /*__ASSEMBLY__ */ | ||
49 | #endif /* ASM_KVM_BOOKE_HV_ASM_H */ | ||
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h deleted file mode 100644 index 8cd50a514271..000000000000 --- a/arch/powerpc/include/asm/kvm_e500.h +++ /dev/null | |||
@@ -1,96 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Author: Yu Liu, <yu.liu@freescale.com> | ||
5 | * | ||
6 | * Description: | ||
7 | * This file is derived from arch/powerpc/include/asm/kvm_44x.h, | ||
8 | * by Hollis Blanchard <hollisb@us.ibm.com>. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License, version 2, as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #ifndef __ASM_KVM_E500_H__ | ||
16 | #define __ASM_KVM_E500_H__ | ||
17 | |||
18 | #include <linux/kvm_host.h> | ||
19 | |||
20 | #define BOOKE_INTERRUPT_SIZE 36 | ||
21 | |||
22 | #define E500_PID_NUM 3 | ||
23 | #define E500_TLB_NUM 2 | ||
24 | |||
25 | #define E500_TLB_VALID 1 | ||
26 | #define E500_TLB_DIRTY 2 | ||
27 | |||
28 | struct tlbe_ref { | ||
29 | pfn_t pfn; | ||
30 | unsigned int flags; /* E500_TLB_* */ | ||
31 | }; | ||
32 | |||
33 | struct tlbe_priv { | ||
34 | struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */ | ||
35 | }; | ||
36 | |||
37 | struct vcpu_id_table; | ||
38 | |||
39 | struct kvmppc_e500_tlb_params { | ||
40 | int entries, ways, sets; | ||
41 | }; | ||
42 | |||
43 | struct kvmppc_vcpu_e500 { | ||
44 | /* Unmodified copy of the guest's TLB -- shared with host userspace. */ | ||
45 | struct kvm_book3e_206_tlb_entry *gtlb_arch; | ||
46 | |||
47 | /* Starting entry number in gtlb_arch[] */ | ||
48 | int gtlb_offset[E500_TLB_NUM]; | ||
49 | |||
50 | /* KVM internal information associated with each guest TLB entry */ | ||
51 | struct tlbe_priv *gtlb_priv[E500_TLB_NUM]; | ||
52 | |||
53 | struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM]; | ||
54 | |||
55 | unsigned int gtlb_nv[E500_TLB_NUM]; | ||
56 | |||
57 | /* | ||
58 | * information associated with each host TLB entry -- | ||
59 | * TLB1 only for now. If/when guest TLB1 entries can be | ||
60 | * mapped with host TLB0, this will be used for that too. | ||
61 | * | ||
62 | * We don't want to use this for guest TLB0 because then we'd | ||
63 | * have the overhead of doing the translation again even if | ||
64 | * the entry is still in the guest TLB (e.g. we swapped out | ||
65 | * and back, and our host TLB entries got evicted). | ||
66 | */ | ||
67 | struct tlbe_ref *tlb_refs[E500_TLB_NUM]; | ||
68 | unsigned int host_tlb1_nv; | ||
69 | |||
70 | u32 host_pid[E500_PID_NUM]; | ||
71 | u32 pid[E500_PID_NUM]; | ||
72 | u32 svr; | ||
73 | |||
74 | /* vcpu id table */ | ||
75 | struct vcpu_id_table *idt; | ||
76 | |||
77 | u32 l1csr0; | ||
78 | u32 l1csr1; | ||
79 | u32 hid0; | ||
80 | u32 hid1; | ||
81 | u32 tlb0cfg; | ||
82 | u32 tlb1cfg; | ||
83 | u64 mcar; | ||
84 | |||
85 | struct page **shared_tlb_pages; | ||
86 | int num_shared_tlb_pages; | ||
87 | |||
88 | struct kvm_vcpu vcpu; | ||
89 | }; | ||
90 | |||
91 | static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu) | ||
92 | { | ||
93 | return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu); | ||
94 | } | ||
95 | |||
96 | #endif /* __ASM_KVM_E500_H__ */ | ||
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 52eb9c1f4fe0..d848cdc49715 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -82,7 +82,7 @@ struct kvm_vcpu; | |||
82 | 82 | ||
83 | struct lppaca; | 83 | struct lppaca; |
84 | struct slb_shadow; | 84 | struct slb_shadow; |
85 | struct dtl; | 85 | struct dtl_entry; |
86 | 86 | ||
87 | struct kvm_vm_stat { | 87 | struct kvm_vm_stat { |
88 | u32 remote_tlb_flush; | 88 | u32 remote_tlb_flush; |
@@ -106,6 +106,8 @@ struct kvm_vcpu_stat { | |||
106 | u32 dec_exits; | 106 | u32 dec_exits; |
107 | u32 ext_intr_exits; | 107 | u32 ext_intr_exits; |
108 | u32 halt_wakeup; | 108 | u32 halt_wakeup; |
109 | u32 dbell_exits; | ||
110 | u32 gdbell_exits; | ||
109 | #ifdef CONFIG_PPC_BOOK3S | 111 | #ifdef CONFIG_PPC_BOOK3S |
110 | u32 pf_storage; | 112 | u32 pf_storage; |
111 | u32 pf_instruc; | 113 | u32 pf_instruc; |
@@ -140,6 +142,7 @@ enum kvm_exit_types { | |||
140 | EMULATED_TLBSX_EXITS, | 142 | EMULATED_TLBSX_EXITS, |
141 | EMULATED_TLBWE_EXITS, | 143 | EMULATED_TLBWE_EXITS, |
142 | EMULATED_RFI_EXITS, | 144 | EMULATED_RFI_EXITS, |
145 | EMULATED_RFCI_EXITS, | ||
143 | DEC_EXITS, | 146 | DEC_EXITS, |
144 | EXT_INTR_EXITS, | 147 | EXT_INTR_EXITS, |
145 | HALT_WAKEUP, | 148 | HALT_WAKEUP, |
@@ -147,6 +150,8 @@ enum kvm_exit_types { | |||
147 | FP_UNAVAIL, | 150 | FP_UNAVAIL, |
148 | DEBUG_EXITS, | 151 | DEBUG_EXITS, |
149 | TIMEINGUEST, | 152 | TIMEINGUEST, |
153 | DBELL_EXITS, | ||
154 | GDBELL_EXITS, | ||
150 | __NUMBER_OF_KVM_EXIT_TYPES | 155 | __NUMBER_OF_KVM_EXIT_TYPES |
151 | }; | 156 | }; |
152 | 157 | ||
@@ -217,10 +222,10 @@ struct kvm_arch_memory_slot { | |||
217 | }; | 222 | }; |
218 | 223 | ||
219 | struct kvm_arch { | 224 | struct kvm_arch { |
225 | unsigned int lpid; | ||
220 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 226 | #ifdef CONFIG_KVM_BOOK3S_64_HV |
221 | unsigned long hpt_virt; | 227 | unsigned long hpt_virt; |
222 | struct revmap_entry *revmap; | 228 | struct revmap_entry *revmap; |
223 | unsigned int lpid; | ||
224 | unsigned int host_lpid; | 229 | unsigned int host_lpid; |
225 | unsigned long host_lpcr; | 230 | unsigned long host_lpcr; |
226 | unsigned long sdr1; | 231 | unsigned long sdr1; |
@@ -232,7 +237,6 @@ struct kvm_arch { | |||
232 | unsigned long vrma_slb_v; | 237 | unsigned long vrma_slb_v; |
233 | int rma_setup_done; | 238 | int rma_setup_done; |
234 | int using_mmu_notifiers; | 239 | int using_mmu_notifiers; |
235 | struct list_head spapr_tce_tables; | ||
236 | spinlock_t slot_phys_lock; | 240 | spinlock_t slot_phys_lock; |
237 | unsigned long *slot_phys[KVM_MEM_SLOTS_NUM]; | 241 | unsigned long *slot_phys[KVM_MEM_SLOTS_NUM]; |
238 | int slot_npages[KVM_MEM_SLOTS_NUM]; | 242 | int slot_npages[KVM_MEM_SLOTS_NUM]; |
@@ -240,6 +244,9 @@ struct kvm_arch { | |||
240 | struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; | 244 | struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; |
241 | struct kvmppc_linear_info *hpt_li; | 245 | struct kvmppc_linear_info *hpt_li; |
242 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | 246 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ |
247 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
248 | struct list_head spapr_tce_tables; | ||
249 | #endif | ||
243 | }; | 250 | }; |
244 | 251 | ||
245 | /* | 252 | /* |
@@ -263,6 +270,9 @@ struct kvmppc_vcore { | |||
263 | struct list_head runnable_threads; | 270 | struct list_head runnable_threads; |
264 | spinlock_t lock; | 271 | spinlock_t lock; |
265 | wait_queue_head_t wq; | 272 | wait_queue_head_t wq; |
273 | u64 stolen_tb; | ||
274 | u64 preempt_tb; | ||
275 | struct kvm_vcpu *runner; | ||
266 | }; | 276 | }; |
267 | 277 | ||
268 | #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) | 278 | #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) |
@@ -274,6 +284,19 @@ struct kvmppc_vcore { | |||
274 | #define VCORE_EXITING 2 | 284 | #define VCORE_EXITING 2 |
275 | #define VCORE_SLEEPING 3 | 285 | #define VCORE_SLEEPING 3 |
276 | 286 | ||
287 | /* | ||
288 | * Struct used to manage memory for a virtual processor area | ||
289 | * registered by a PAPR guest. There are three types of area | ||
290 | * that a guest can register. | ||
291 | */ | ||
292 | struct kvmppc_vpa { | ||
293 | void *pinned_addr; /* Address in kernel linear mapping */ | ||
294 | void *pinned_end; /* End of region */ | ||
295 | unsigned long next_gpa; /* Guest phys addr for update */ | ||
296 | unsigned long len; /* Number of bytes required */ | ||
297 | u8 update_pending; /* 1 => update pinned_addr from next_gpa */ | ||
298 | }; | ||
299 | |||
277 | struct kvmppc_pte { | 300 | struct kvmppc_pte { |
278 | ulong eaddr; | 301 | ulong eaddr; |
279 | u64 vpage; | 302 | u64 vpage; |
@@ -345,6 +368,17 @@ struct kvm_vcpu_arch { | |||
345 | u64 vsr[64]; | 368 | u64 vsr[64]; |
346 | #endif | 369 | #endif |
347 | 370 | ||
371 | #ifdef CONFIG_KVM_BOOKE_HV | ||
372 | u32 host_mas4; | ||
373 | u32 host_mas6; | ||
374 | u32 shadow_epcr; | ||
375 | u32 epcr; | ||
376 | u32 shadow_msrp; | ||
377 | u32 eplc; | ||
378 | u32 epsc; | ||
379 | u32 oldpir; | ||
380 | #endif | ||
381 | |||
348 | #ifdef CONFIG_PPC_BOOK3S | 382 | #ifdef CONFIG_PPC_BOOK3S |
349 | /* For Gekko paired singles */ | 383 | /* For Gekko paired singles */ |
350 | u32 qpr[32]; | 384 | u32 qpr[32]; |
@@ -370,6 +404,7 @@ struct kvm_vcpu_arch { | |||
370 | #endif | 404 | #endif |
371 | u32 vrsave; /* also USPRG0 */ | 405 | u32 vrsave; /* also USPRG0 */ |
372 | u32 mmucr; | 406 | u32 mmucr; |
407 | /* shadow_msr is unused for BookE HV */ | ||
373 | ulong shadow_msr; | 408 | ulong shadow_msr; |
374 | ulong csrr0; | 409 | ulong csrr0; |
375 | ulong csrr1; | 410 | ulong csrr1; |
@@ -426,8 +461,12 @@ struct kvm_vcpu_arch { | |||
426 | ulong fault_esr; | 461 | ulong fault_esr; |
427 | ulong queued_dear; | 462 | ulong queued_dear; |
428 | ulong queued_esr; | 463 | ulong queued_esr; |
464 | u32 tlbcfg[4]; | ||
465 | u32 mmucfg; | ||
466 | u32 epr; | ||
429 | #endif | 467 | #endif |
430 | gpa_t paddr_accessed; | 468 | gpa_t paddr_accessed; |
469 | gva_t vaddr_accessed; | ||
431 | 470 | ||
432 | u8 io_gpr; /* GPR used as IO source/target */ | 471 | u8 io_gpr; /* GPR used as IO source/target */ |
433 | u8 mmio_is_bigendian; | 472 | u8 mmio_is_bigendian; |
@@ -453,11 +492,6 @@ struct kvm_vcpu_arch { | |||
453 | u8 prodded; | 492 | u8 prodded; |
454 | u32 last_inst; | 493 | u32 last_inst; |
455 | 494 | ||
456 | struct lppaca *vpa; | ||
457 | struct slb_shadow *slb_shadow; | ||
458 | struct dtl *dtl; | ||
459 | struct dtl *dtl_end; | ||
460 | |||
461 | wait_queue_head_t *wqp; | 495 | wait_queue_head_t *wqp; |
462 | struct kvmppc_vcore *vcore; | 496 | struct kvmppc_vcore *vcore; |
463 | int ret; | 497 | int ret; |
@@ -482,6 +516,14 @@ struct kvm_vcpu_arch { | |||
482 | struct task_struct *run_task; | 516 | struct task_struct *run_task; |
483 | struct kvm_run *kvm_run; | 517 | struct kvm_run *kvm_run; |
484 | pgd_t *pgdir; | 518 | pgd_t *pgdir; |
519 | |||
520 | spinlock_t vpa_update_lock; | ||
521 | struct kvmppc_vpa vpa; | ||
522 | struct kvmppc_vpa dtl; | ||
523 | struct dtl_entry *dtl_ptr; | ||
524 | unsigned long dtl_index; | ||
525 | u64 stolen_logged; | ||
526 | struct kvmppc_vpa slb_shadow; | ||
485 | #endif | 527 | #endif |
486 | }; | 528 | }; |
487 | 529 | ||
@@ -498,4 +540,6 @@ struct kvm_vcpu_arch { | |||
498 | #define KVM_MMIO_REG_QPR 0x0040 | 540 | #define KVM_MMIO_REG_QPR 0x0040 |
499 | #define KVM_MMIO_REG_FQPR 0x0060 | 541 | #define KVM_MMIO_REG_FQPR 0x0060 |
500 | 542 | ||
543 | #define __KVM_HAVE_ARCH_WQP | ||
544 | |||
501 | #endif /* __POWERPC_KVM_HOST_H__ */ | 545 | #endif /* __POWERPC_KVM_HOST_H__ */ |
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h index 7b754e743003..c18916bff689 100644 --- a/arch/powerpc/include/asm/kvm_para.h +++ b/arch/powerpc/include/asm/kvm_para.h | |||
@@ -206,6 +206,11 @@ static inline unsigned int kvm_arch_para_features(void) | |||
206 | return r; | 206 | return r; |
207 | } | 207 | } |
208 | 208 | ||
209 | static inline bool kvm_check_and_clear_guest_paused(void) | ||
210 | { | ||
211 | return false; | ||
212 | } | ||
213 | |||
209 | #endif /* __KERNEL__ */ | 214 | #endif /* __KERNEL__ */ |
210 | 215 | ||
211 | #endif /* __POWERPC_KVM_PARA_H__ */ | 216 | #endif /* __POWERPC_KVM_PARA_H__ */ |
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 9d6dee0f7d48..f68c22fa2fce 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
@@ -95,7 +95,7 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, | |||
95 | extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | 95 | extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
96 | extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); | 96 | extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); |
97 | 97 | ||
98 | extern void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); | 98 | extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); |
99 | extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); | 99 | extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); |
100 | extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); | 100 | extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); |
101 | extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); | 101 | extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); |
@@ -107,8 +107,10 @@ extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, | |||
107 | 107 | ||
108 | extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | 108 | extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, |
109 | unsigned int op, int *advance); | 109 | unsigned int op, int *advance); |
110 | extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); | 110 | extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, |
111 | extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); | 111 | ulong val); |
112 | extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, | ||
113 | ulong *val); | ||
112 | 114 | ||
113 | extern int kvmppc_booke_init(void); | 115 | extern int kvmppc_booke_init(void); |
114 | extern void kvmppc_booke_exit(void); | 116 | extern void kvmppc_booke_exit(void); |
@@ -126,6 +128,8 @@ extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu, | |||
126 | extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); | 128 | extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); |
127 | extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, | 129 | extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, |
128 | struct kvm_create_spapr_tce *args); | 130 | struct kvm_create_spapr_tce *args); |
131 | extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | ||
132 | unsigned long ioba, unsigned long tce); | ||
129 | extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, | 133 | extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, |
130 | struct kvm_allocate_rma *rma); | 134 | struct kvm_allocate_rma *rma); |
131 | extern struct kvmppc_linear_info *kvm_alloc_rma(void); | 135 | extern struct kvmppc_linear_info *kvm_alloc_rma(void); |
@@ -138,6 +142,11 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, | |||
138 | struct kvm_userspace_memory_region *mem); | 142 | struct kvm_userspace_memory_region *mem); |
139 | extern void kvmppc_core_commit_memory_region(struct kvm *kvm, | 143 | extern void kvmppc_core_commit_memory_region(struct kvm *kvm, |
140 | struct kvm_userspace_memory_region *mem); | 144 | struct kvm_userspace_memory_region *mem); |
145 | extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, | ||
146 | struct kvm_ppc_smmu_info *info); | ||
147 | |||
148 | extern int kvmppc_bookehv_init(void); | ||
149 | extern void kvmppc_bookehv_exit(void); | ||
141 | 150 | ||
142 | /* | 151 | /* |
143 | * Cuts out inst bits with ordering according to spec. | 152 | * Cuts out inst bits with ordering according to spec. |
@@ -204,4 +213,9 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, | |||
204 | int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, | 213 | int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, |
205 | struct kvm_dirty_tlb *cfg); | 214 | struct kvm_dirty_tlb *cfg); |
206 | 215 | ||
216 | long kvmppc_alloc_lpid(void); | ||
217 | void kvmppc_claim_lpid(long lpid); | ||
218 | void kvmppc_free_lpid(long lpid); | ||
219 | void kvmppc_init_lpid(unsigned long nr_lpids); | ||
220 | |||
207 | #endif /* __POWERPC_KVM_PPC_H__ */ | 221 | #endif /* __POWERPC_KVM_PPC_H__ */ |
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index a76254af0aaa..531fe0c3108f 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h | |||
@@ -20,18 +20,16 @@ | |||
20 | #define _ASM_POWERPC_LPPACA_H | 20 | #define _ASM_POWERPC_LPPACA_H |
21 | #ifdef __KERNEL__ | 21 | #ifdef __KERNEL__ |
22 | 22 | ||
23 | /* These definitions relate to hypervisors that only exist when using | 23 | /* |
24 | * These definitions relate to hypervisors that only exist when using | ||
24 | * a server type processor | 25 | * a server type processor |
25 | */ | 26 | */ |
26 | #ifdef CONFIG_PPC_BOOK3S | 27 | #ifdef CONFIG_PPC_BOOK3S |
27 | 28 | ||
28 | //============================================================================= | 29 | /* |
29 | // | 30 | * This control block contains the data that is shared between the |
30 | // This control block contains the data that is shared between the | 31 | * hypervisor and the OS. |
31 | // hypervisor (PLIC) and the OS. | 32 | */ |
32 | // | ||
33 | // | ||
34 | //---------------------------------------------------------------------------- | ||
35 | #include <linux/cache.h> | 33 | #include <linux/cache.h> |
36 | #include <linux/threads.h> | 34 | #include <linux/threads.h> |
37 | #include <asm/types.h> | 35 | #include <asm/types.h> |
@@ -43,123 +41,65 @@ | |||
43 | */ | 41 | */ |
44 | #define NR_LPPACAS 1 | 42 | #define NR_LPPACAS 1 |
45 | 43 | ||
46 | 44 | /* | |
47 | /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k | 45 | * The Hypervisor barfs if the lppaca crosses a page boundary. A 1k |
48 | * alignment is sufficient to prevent this */ | 46 | * alignment is sufficient to prevent this |
47 | */ | ||
49 | struct lppaca { | 48 | struct lppaca { |
50 | //============================================================================= | 49 | /* cacheline 1 contains read-only data */ |
51 | // CACHE_LINE_1 0x0000 - 0x007F Contains read-only data | 50 | |
52 | // NOTE: The xDynXyz fields are fields that will be dynamically changed by | 51 | u32 desc; /* Eye catcher 0xD397D781 */ |
53 | // PLIC when preparing to bring a processor online or when dispatching a | 52 | u16 size; /* Size of this struct */ |
54 | // virtual processor! | 53 | u16 reserved1; |
55 | //============================================================================= | 54 | u16 reserved2:14; |
56 | u32 desc; // Eye catcher 0xD397D781 x00-x03 | 55 | u8 shared_proc:1; /* Shared processor indicator */ |
57 | u16 size; // Size of this struct x04-x05 | 56 | u8 secondary_thread:1; /* Secondary thread indicator */ |
58 | u16 reserved1; // Reserved x06-x07 | 57 | u8 reserved3[14]; |
59 | u16 reserved2:14; // Reserved x08-x09 | 58 | volatile u32 dyn_hw_node_id; /* Dynamic hardware node id */ |
60 | u8 shared_proc:1; // Shared processor indicator ... | 59 | volatile u32 dyn_hw_proc_id; /* Dynamic hardware proc id */ |
61 | u8 secondary_thread:1; // Secondary thread indicator ... | 60 | u8 reserved4[56]; |
62 | volatile u8 dyn_proc_status:8; // Dynamic Status of this proc x0A-x0A | 61 | volatile u8 vphn_assoc_counts[8]; /* Virtual processor home node */ |
63 | u8 secondary_thread_count; // Secondary thread count x0B-x0B | 62 | /* associativity change counters */ |
64 | volatile u16 dyn_hv_phys_proc_index;// Dynamic HV Physical Proc Index0C-x0D | 63 | u8 reserved5[32]; |
65 | volatile u16 dyn_hv_log_proc_index;// Dynamic HV Logical Proc Indexx0E-x0F | 64 | |
66 | u32 decr_val; // Value for Decr programming x10-x13 | 65 | /* cacheline 2 contains local read-write data */ |
67 | u32 pmc_val; // Value for PMC regs x14-x17 | 66 | |
68 | volatile u32 dyn_hw_node_id; // Dynamic Hardware Node id x18-x1B | 67 | u8 reserved6[48]; |
69 | volatile u32 dyn_hw_proc_id; // Dynamic Hardware Proc Id x1C-x1F | 68 | u8 cede_latency_hint; |
70 | volatile u32 dyn_pir; // Dynamic ProcIdReg value x20-x23 | 69 | u8 reserved7[7]; |
71 | u32 dsei_data; // DSEI data x24-x27 | 70 | u8 dtl_enable_mask; /* Dispatch Trace Log mask */ |
72 | u64 sprg3; // SPRG3 value x28-x2F | 71 | u8 donate_dedicated_cpu; /* Donate dedicated CPU cycles */ |
73 | u8 reserved3[40]; // Reserved x30-x57 | 72 | u8 fpregs_in_use; |
74 | volatile u8 vphn_assoc_counts[8]; // Virtual processor home node | 73 | u8 pmcregs_in_use; |
75 | // associativity change counters x58-x5F | 74 | u8 reserved8[28]; |
76 | u8 reserved4[32]; // Reserved x60-x7F | 75 | u64 wait_state_cycles; /* Wait cycles for this proc */ |
77 | 76 | u8 reserved9[28]; | |
78 | //============================================================================= | 77 | u16 slb_count; /* # of SLBs to maintain */ |
79 | // CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data | 78 | u8 idle; /* Indicate OS is idle */ |
80 | //============================================================================= | 79 | u8 vmxregs_in_use; |
81 | // This Dword contains a byte for each type of interrupt that can occur. | 80 | |
82 | // The IPI is a count while the others are just a binary 1 or 0. | 81 | /* cacheline 3 is shared with other processors */ |
83 | union { | 82 | |
84 | u64 any_int; | 83 | /* |
85 | struct { | 84 | * This is the yield_count. An "odd" value (low bit on) means that |
86 | u16 reserved; // Reserved - cleared by #mpasmbl | 85 | * the processor is yielded (either because of an OS yield or a |
87 | u8 xirr_int; // Indicates xXirrValue is valid or Immed IO | 86 | * hypervisor preempt). An even value implies that the processor is |
88 | u8 ipi_cnt; // IPI Count | 87 | * currently executing. |
89 | u8 decr_int; // DECR interrupt occurred | 88 | * NOTE: This value will ALWAYS be zero for dedicated processors and |
90 | u8 pdc_int; // PDC interrupt occurred | 89 | * will NEVER be zero for shared processors (ie, initialized to a 1). |
91 | u8 quantum_int; // Interrupt quantum reached | 90 | */ |
92 | u8 old_plic_deferred_ext_int; // Old PLIC has a deferred XIRR pending | 91 | volatile u32 yield_count; |
93 | } fields; | 92 | volatile u32 dispersion_count; /* dispatch changed physical cpu */ |
94 | } int_dword; | 93 | volatile u64 cmo_faults; /* CMO page fault count */ |
95 | 94 | volatile u64 cmo_fault_time; /* CMO page fault time */ | |
96 | // Whenever any fields in this Dword are set then PLIC will defer the | 95 | u8 reserved10[104]; |
97 | // processing of external interrupts. Note that PLIC will store the | 96 | |
98 | // XIRR directly into the xXirrValue field so that another XIRR will | 97 | /* cacheline 4-5 */ |
99 | // not be presented until this one clears. The layout of the low | 98 | |
100 | // 4-bytes of this Dword is up to SLIC - PLIC just checks whether the | 99 | u32 page_ins; /* CMO Hint - # page ins by OS */ |
101 | // entire Dword is zero or not. A non-zero value in the low order | 100 | u8 reserved11[148]; |
102 | // 2-bytes will result in SLIC being granted the highest thread | 101 | volatile u64 dtl_idx; /* Dispatch Trace Log head index */ |
103 | // priority upon return. A 0 will return to SLIC as medium priority. | 102 | u8 reserved12[96]; |
104 | u64 plic_defer_ints_area; // Entire Dword | ||
105 | |||
106 | // Used to pass the real SRR0/1 from PLIC to SLIC as well as to | ||
107 | // pass the target SRR0/1 from SLIC to PLIC on a SetAsrAndRfid. | ||
108 | u64 saved_srr0; // Saved SRR0 x10-x17 | ||
109 | u64 saved_srr1; // Saved SRR1 x18-x1F | ||
110 | |||
111 | // Used to pass parms from the OS to PLIC for SetAsrAndRfid | ||
112 | u64 saved_gpr3; // Saved GPR3 x20-x27 | ||
113 | u64 saved_gpr4; // Saved GPR4 x28-x2F | ||
114 | union { | ||
115 | u64 saved_gpr5; /* Saved GPR5 x30-x37 */ | ||
116 | struct { | ||
117 | u8 cede_latency_hint; /* x30 */ | ||
118 | u8 reserved[7]; /* x31-x36 */ | ||
119 | } fields; | ||
120 | } gpr5_dword; | ||
121 | |||
122 | |||
123 | u8 dtl_enable_mask; // Dispatch Trace Log mask x38-x38 | ||
124 | u8 donate_dedicated_cpu; // Donate dedicated CPU cycles x39-x39 | ||
125 | u8 fpregs_in_use; // FP regs in use x3A-x3A | ||
126 | u8 pmcregs_in_use; // PMC regs in use x3B-x3B | ||
127 | volatile u32 saved_decr; // Saved Decr Value x3C-x3F | ||
128 | volatile u64 emulated_time_base;// Emulated TB for this thread x40-x47 | ||
129 | volatile u64 cur_plic_latency; // Unaccounted PLIC latency x48-x4F | ||
130 | u64 tot_plic_latency; // Accumulated PLIC latency x50-x57 | ||
131 | u64 wait_state_cycles; // Wait cycles for this proc x58-x5F | ||
132 | u64 end_of_quantum; // TB at end of quantum x60-x67 | ||
133 | u64 pdc_saved_sprg1; // Saved SPRG1 for PMC int x68-x6F | ||
134 | u64 pdc_saved_srr0; // Saved SRR0 for PMC int x70-x77 | ||
135 | volatile u32 virtual_decr; // Virtual DECR for shared procsx78-x7B | ||
136 | u16 slb_count; // # of SLBs to maintain x7C-x7D | ||
137 | u8 idle; // Indicate OS is idle x7E | ||
138 | u8 vmxregs_in_use; // VMX registers in use x7F | ||
139 | |||
140 | |||
141 | //============================================================================= | ||
142 | // CACHE_LINE_3 0x0100 - 0x017F: This line is shared with other processors | ||
143 | //============================================================================= | ||
144 | // This is the yield_count. An "odd" value (low bit on) means that | ||
145 | // the processor is yielded (either because of an OS yield or a PLIC | ||
146 | // preempt). An even value implies that the processor is currently | ||
147 | // executing. | ||
148 | // NOTE: This value will ALWAYS be zero for dedicated processors and | ||
149 | // will NEVER be zero for shared processors (ie, initialized to a 1). | ||
150 | volatile u32 yield_count; // PLIC increments each dispatchx00-x03 | ||
151 | volatile u32 dispersion_count; // dispatch changed phys cpu x04-x07 | ||
152 | volatile u64 cmo_faults; // CMO page fault count x08-x0F | ||
153 | volatile u64 cmo_fault_time; // CMO page fault time x10-x17 | ||
154 | u8 reserved7[104]; // Reserved x18-x7F | ||
155 | |||
156 | //============================================================================= | ||
157 | // CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data | ||
158 | //============================================================================= | ||
159 | u32 page_ins; // CMO Hint - # page ins by OS x00-x03 | ||
160 | u8 reserved8[148]; // Reserved x04-x97 | ||
161 | volatile u64 dtl_idx; // Dispatch Trace Log head idx x98-x9F | ||
162 | u8 reserved9[96]; // Reserved xA0-xFF | ||
163 | } __attribute__((__aligned__(0x400))); | 103 | } __attribute__((__aligned__(0x400))); |
164 | 104 | ||
165 | extern struct lppaca lppaca[]; | 105 | extern struct lppaca lppaca[]; |
@@ -172,13 +112,13 @@ extern struct lppaca lppaca[]; | |||
172 | * ESID is stored in the lower 64bits, then the VSID. | 112 | * ESID is stored in the lower 64bits, then the VSID. |
173 | */ | 113 | */ |
174 | struct slb_shadow { | 114 | struct slb_shadow { |
175 | u32 persistent; // Number of persistent SLBs x00-x03 | 115 | u32 persistent; /* Number of persistent SLBs */ |
176 | u32 buffer_length; // Total shadow buffer length x04-x07 | 116 | u32 buffer_length; /* Total shadow buffer length */ |
177 | u64 reserved; // Alignment x08-x0f | 117 | u64 reserved; |
178 | struct { | 118 | struct { |
179 | u64 esid; | 119 | u64 esid; |
180 | u64 vsid; | 120 | u64 vsid; |
181 | } save_area[SLB_NUM_BOLTED]; // x10-x40 | 121 | } save_area[SLB_NUM_BOLTED]; |
182 | } ____cacheline_aligned; | 122 | } ____cacheline_aligned; |
183 | 123 | ||
184 | extern struct slb_shadow slb_shadow[]; | 124 | extern struct slb_shadow slb_shadow[]; |
diff --git a/arch/powerpc/include/asm/lv1call.h b/arch/powerpc/include/asm/lv1call.h index 233f9ecae761..f5117674bf92 100644 --- a/arch/powerpc/include/asm/lv1call.h +++ b/arch/powerpc/include/asm/lv1call.h | |||
@@ -265,8 +265,8 @@ LV1_CALL(get_spe_irq_outlet, 2, 1, 78 ) | |||
265 | LV1_CALL(set_spe_privilege_state_area_1_register, 3, 0, 79 ) | 265 | LV1_CALL(set_spe_privilege_state_area_1_register, 3, 0, 79 ) |
266 | LV1_CALL(create_repository_node, 6, 0, 90 ) | 266 | LV1_CALL(create_repository_node, 6, 0, 90 ) |
267 | LV1_CALL(read_repository_node, 5, 2, 91 ) | 267 | LV1_CALL(read_repository_node, 5, 2, 91 ) |
268 | LV1_CALL(modify_repository_node_value, 6, 0, 92 ) | 268 | LV1_CALL(write_repository_node, 6, 0, 92 ) |
269 | LV1_CALL(remove_repository_node, 4, 0, 93 ) | 269 | LV1_CALL(delete_repository_node, 4, 0, 93 ) |
270 | LV1_CALL(read_htab_entries, 2, 5, 95 ) | 270 | LV1_CALL(read_htab_entries, 2, 5, 95 ) |
271 | LV1_CALL(set_dabr, 2, 0, 96 ) | 271 | LV1_CALL(set_dabr, 2, 0, 96 ) |
272 | LV1_CALL(get_total_execution_time, 2, 1, 103 ) | 272 | LV1_CALL(get_total_execution_time, 2, 1, 103 ) |
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index cdb5421877e2..eeabcdbc30f7 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h | |||
@@ -104,6 +104,8 @@ | |||
104 | #define MAS4_TSIZED_MASK 0x00000f80 /* Default TSIZE */ | 104 | #define MAS4_TSIZED_MASK 0x00000f80 /* Default TSIZE */ |
105 | #define MAS4_TSIZED_SHIFT 7 | 105 | #define MAS4_TSIZED_SHIFT 7 |
106 | 106 | ||
107 | #define MAS5_SGS 0x80000000 | ||
108 | |||
107 | #define MAS6_SPID0 0x3FFF0000 | 109 | #define MAS6_SPID0 0x3FFF0000 |
108 | #define MAS6_SPID1 0x00007FFE | 110 | #define MAS6_SPID1 0x00007FFE |
109 | #define MAS6_ISIZE(x) MAS1_TSIZE(x) | 111 | #define MAS6_ISIZE(x) MAS1_TSIZE(x) |
@@ -118,6 +120,10 @@ | |||
118 | 120 | ||
119 | #define MAS7_RPN 0xFFFFFFFF | 121 | #define MAS7_RPN 0xFFFFFFFF |
120 | 122 | ||
123 | #define MAS8_TGS 0x80000000 /* Guest space */ | ||
124 | #define MAS8_VF 0x40000000 /* Virtualization Fault */ | ||
125 | #define MAS8_TLPID 0x000000ff | ||
126 | |||
121 | /* Bit definitions for MMUCFG */ | 127 | /* Bit definitions for MMUCFG */ |
122 | #define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */ | 128 | #define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */ |
123 | #define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */ | 129 | #define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */ |
diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h index 23cd6cc30bcf..c07edfe98b98 100644 --- a/arch/powerpc/include/asm/pSeries_reconfig.h +++ b/arch/powerpc/include/asm/pSeries_reconfig.h | |||
@@ -13,6 +13,18 @@ | |||
13 | #define PSERIES_RECONFIG_REMOVE 0x0002 | 13 | #define PSERIES_RECONFIG_REMOVE 0x0002 |
14 | #define PSERIES_DRCONF_MEM_ADD 0x0003 | 14 | #define PSERIES_DRCONF_MEM_ADD 0x0003 |
15 | #define PSERIES_DRCONF_MEM_REMOVE 0x0004 | 15 | #define PSERIES_DRCONF_MEM_REMOVE 0x0004 |
16 | #define PSERIES_UPDATE_PROPERTY 0x0005 | ||
17 | |||
18 | /** | ||
19 | * pSeries_reconfig_notify - Notifier value structure for OFDT property updates | ||
20 | * | ||
21 | * @node: Device tree node which owns the property being updated | ||
22 | * @property: Updated property | ||
23 | */ | ||
24 | struct pSeries_reconfig_prop_update { | ||
25 | struct device_node *node; | ||
26 | struct property *property; | ||
27 | }; | ||
16 | 28 | ||
17 | #ifdef CONFIG_PPC_PSERIES | 29 | #ifdef CONFIG_PPC_PSERIES |
18 | extern int pSeries_reconfig_notifier_register(struct notifier_block *); | 30 | extern int pSeries_reconfig_notifier_register(struct notifier_block *); |
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 50f73aa2ba21..15444204a3a1 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h | |||
@@ -369,7 +369,15 @@ BEGIN_FTR_SECTION \ | |||
369 | END_FTR_SECTION_IFCLR(CPU_FTR_601) | 369 | END_FTR_SECTION_IFCLR(CPU_FTR_601) |
370 | #endif | 370 | #endif |
371 | 371 | ||
372 | 372 | #ifdef CONFIG_PPC64 | |
373 | #define MTOCRF(FXM, RS) \ | ||
374 | BEGIN_FTR_SECTION_NESTED(848); \ | ||
375 | mtcrf (FXM), (RS); \ | ||
376 | FTR_SECTION_ELSE_NESTED(848); \ | ||
377 | mtocrf (FXM), (RS); \ | ||
378 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848) | ||
379 | #endif | ||
380 | |||
373 | /* | 381 | /* |
374 | * This instruction is not implemented on the PPC 603 or 601; however, on | 382 | * This instruction is not implemented on the PPC 603 or 601; however, on |
375 | * the 403GCX and 405GP tlbia IS defined and tlbie is not. | 383 | * the 403GCX and 405GP tlbia IS defined and tlbie is not. |
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 8e2d0371fe1e..413a5eaef56c 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h | |||
@@ -74,9 +74,6 @@ struct task_struct; | |||
74 | void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); | 74 | void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); |
75 | void release_thread(struct task_struct *); | 75 | void release_thread(struct task_struct *); |
76 | 76 | ||
77 | /* Prepare to copy thread state - unlazy all lazy status */ | ||
78 | extern void prepare_to_copy(struct task_struct *tsk); | ||
79 | |||
80 | /* Create a new kernel thread. */ | 77 | /* Create a new kernel thread. */ |
81 | extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | 78 | extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); |
82 | 79 | ||
@@ -243,6 +240,9 @@ struct thread_struct { | |||
243 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER | 240 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
244 | void* kvm_shadow_vcpu; /* KVM internal data */ | 241 | void* kvm_shadow_vcpu; /* KVM internal data */ |
245 | #endif /* CONFIG_KVM_BOOK3S_32_HANDLER */ | 242 | #endif /* CONFIG_KVM_BOOK3S_32_HANDLER */ |
243 | #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE) | ||
244 | struct kvm_vcpu *kvm_vcpu; | ||
245 | #endif | ||
246 | #ifdef CONFIG_PPC64 | 246 | #ifdef CONFIG_PPC64 |
247 | unsigned long dscr; | 247 | unsigned long dscr; |
248 | int dscr_inherit; | 248 | int dscr_inherit; |
@@ -386,7 +386,6 @@ extern unsigned long cpuidle_disable; | |||
386 | enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; | 386 | enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; |
387 | 387 | ||
388 | extern int powersave_nap; /* set if nap mode can be used in idle loop */ | 388 | extern int powersave_nap; /* set if nap mode can be used in idle loop */ |
389 | void cpu_idle_wait(void); | ||
390 | 389 | ||
391 | #ifdef CONFIG_PSERIES_IDLE | 390 | #ifdef CONFIG_PSERIES_IDLE |
392 | extern void update_smt_snooze_delay(int snooze); | 391 | extern void update_smt_snooze_delay(int snooze); |
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 84cc7840cd18..9c21ed42aba6 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h | |||
@@ -354,12 +354,6 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, | |||
354 | #define PTRACE_GETREGS64 22 | 354 | #define PTRACE_GETREGS64 22 |
355 | #define PTRACE_SETREGS64 23 | 355 | #define PTRACE_SETREGS64 23 |
356 | 356 | ||
357 | /* (old) PTRACE requests with inverted arguments */ | ||
358 | #define PPC_PTRACE_GETREGS 0x99 /* Get GPRs 0 - 31 */ | ||
359 | #define PPC_PTRACE_SETREGS 0x98 /* Set GPRs 0 - 31 */ | ||
360 | #define PPC_PTRACE_GETFPREGS 0x97 /* Get FPRs 0 - 31 */ | ||
361 | #define PPC_PTRACE_SETFPREGS 0x96 /* Set FPRs 0 - 31 */ | ||
362 | |||
363 | /* Calls to trace a 64bit program from a 32bit program */ | 357 | /* Calls to trace a 64bit program from a 32bit program */ |
364 | #define PPC_PTRACE_PEEKTEXT_3264 0x95 | 358 | #define PPC_PTRACE_PEEKTEXT_3264 0x95 |
365 | #define PPC_PTRACE_PEEKDATA_3264 0x94 | 359 | #define PPC_PTRACE_PEEKDATA_3264 0x94 |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 9d7f0fb69028..f0cb7f461b9d 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -257,7 +257,9 @@ | |||
257 | #define LPCR_LPES_SH 2 | 257 | #define LPCR_LPES_SH 2 |
258 | #define LPCR_RMI 0x00000002 /* real mode is cache inhibit */ | 258 | #define LPCR_RMI 0x00000002 /* real mode is cache inhibit */ |
259 | #define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */ | 259 | #define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */ |
260 | #ifndef SPRN_LPID | ||
260 | #define SPRN_LPID 0x13F /* Logical Partition Identifier */ | 261 | #define SPRN_LPID 0x13F /* Logical Partition Identifier */ |
262 | #endif | ||
261 | #define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */ | 263 | #define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */ |
262 | #define SPRN_HMER 0x150 /* Hardware m? error recovery */ | 264 | #define SPRN_HMER 0x150 /* Hardware m? error recovery */ |
263 | #define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ | 265 | #define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ |
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 8a97aa7289d3..2d916c4982c5 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h | |||
@@ -56,18 +56,30 @@ | |||
56 | #define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */ | 56 | #define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */ |
57 | #define SPRN_EPCR 0x133 /* Embedded Processor Control Register */ | 57 | #define SPRN_EPCR 0x133 /* Embedded Processor Control Register */ |
58 | #define SPRN_DBCR2 0x136 /* Debug Control Register 2 */ | 58 | #define SPRN_DBCR2 0x136 /* Debug Control Register 2 */ |
59 | #define SPRN_MSRP 0x137 /* MSR Protect Register */ | ||
59 | #define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */ | 60 | #define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */ |
60 | #define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */ | 61 | #define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */ |
61 | #define SPRN_DVC1 0x13E /* Data Value Compare Register 1 */ | 62 | #define SPRN_DVC1 0x13E /* Data Value Compare Register 1 */ |
62 | #define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */ | 63 | #define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */ |
64 | #define SPRN_LPID 0x152 /* Logical Partition ID */ | ||
63 | #define SPRN_MAS8 0x155 /* MMU Assist Register 8 */ | 65 | #define SPRN_MAS8 0x155 /* MMU Assist Register 8 */ |
64 | #define SPRN_TLB0PS 0x158 /* TLB 0 Page Size Register */ | 66 | #define SPRN_TLB0PS 0x158 /* TLB 0 Page Size Register */ |
65 | #define SPRN_TLB1PS 0x159 /* TLB 1 Page Size Register */ | 67 | #define SPRN_TLB1PS 0x159 /* TLB 1 Page Size Register */ |
66 | #define SPRN_MAS5_MAS6 0x15c /* MMU Assist Register 5 || 6 */ | 68 | #define SPRN_MAS5_MAS6 0x15c /* MMU Assist Register 5 || 6 */ |
67 | #define SPRN_MAS8_MAS1 0x15d /* MMU Assist Register 8 || 1 */ | 69 | #define SPRN_MAS8_MAS1 0x15d /* MMU Assist Register 8 || 1 */ |
68 | #define SPRN_EPTCFG 0x15e /* Embedded Page Table Config */ | 70 | #define SPRN_EPTCFG 0x15e /* Embedded Page Table Config */ |
71 | #define SPRN_GSPRG0 0x170 /* Guest SPRG0 */ | ||
72 | #define SPRN_GSPRG1 0x171 /* Guest SPRG1 */ | ||
73 | #define SPRN_GSPRG2 0x172 /* Guest SPRG2 */ | ||
74 | #define SPRN_GSPRG3 0x173 /* Guest SPRG3 */ | ||
69 | #define SPRN_MAS7_MAS3 0x174 /* MMU Assist Register 7 || 3 */ | 75 | #define SPRN_MAS7_MAS3 0x174 /* MMU Assist Register 7 || 3 */ |
70 | #define SPRN_MAS0_MAS1 0x175 /* MMU Assist Register 0 || 1 */ | 76 | #define SPRN_MAS0_MAS1 0x175 /* MMU Assist Register 0 || 1 */ |
77 | #define SPRN_GSRR0 0x17A /* Guest SRR0 */ | ||
78 | #define SPRN_GSRR1 0x17B /* Guest SRR1 */ | ||
79 | #define SPRN_GEPR 0x17C /* Guest EPR */ | ||
80 | #define SPRN_GDEAR 0x17D /* Guest DEAR */ | ||
81 | #define SPRN_GPIR 0x17E /* Guest PIR */ | ||
82 | #define SPRN_GESR 0x17F /* Guest Exception Syndrome Register */ | ||
71 | #define SPRN_IVOR0 0x190 /* Interrupt Vector Offset Register 0 */ | 83 | #define SPRN_IVOR0 0x190 /* Interrupt Vector Offset Register 0 */ |
72 | #define SPRN_IVOR1 0x191 /* Interrupt Vector Offset Register 1 */ | 84 | #define SPRN_IVOR1 0x191 /* Interrupt Vector Offset Register 1 */ |
73 | #define SPRN_IVOR2 0x192 /* Interrupt Vector Offset Register 2 */ | 85 | #define SPRN_IVOR2 0x192 /* Interrupt Vector Offset Register 2 */ |
@@ -88,6 +100,13 @@ | |||
88 | #define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */ | 100 | #define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */ |
89 | #define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */ | 101 | #define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */ |
90 | #define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */ | 102 | #define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */ |
103 | #define SPRN_GIVOR2 0x1B8 /* Guest IVOR2 */ | ||
104 | #define SPRN_GIVOR3 0x1B9 /* Guest IVOR3 */ | ||
105 | #define SPRN_GIVOR4 0x1BA /* Guest IVOR4 */ | ||
106 | #define SPRN_GIVOR8 0x1BB /* Guest IVOR8 */ | ||
107 | #define SPRN_GIVOR13 0x1BC /* Guest IVOR13 */ | ||
108 | #define SPRN_GIVOR14 0x1BD /* Guest IVOR14 */ | ||
109 | #define SPRN_GIVPR 0x1BF /* Guest IVPR */ | ||
91 | #define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */ | 110 | #define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */ |
92 | #define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */ | 111 | #define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */ |
93 | #define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */ | 112 | #define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */ |
@@ -240,6 +259,10 @@ | |||
240 | #define MCSR_LDG 0x00002000UL /* Guarded Load */ | 259 | #define MCSR_LDG 0x00002000UL /* Guarded Load */ |
241 | #define MCSR_TLBSYNC 0x00000002UL /* Multiple tlbsyncs detected */ | 260 | #define MCSR_TLBSYNC 0x00000002UL /* Multiple tlbsyncs detected */ |
242 | #define MCSR_BSL2_ERR 0x00000001UL /* Backside L2 cache error */ | 261 | #define MCSR_BSL2_ERR 0x00000001UL /* Backside L2 cache error */ |
262 | |||
263 | #define MSRP_UCLEP 0x04000000 /* Protect MSR[UCLE] */ | ||
264 | #define MSRP_DEP 0x00000200 /* Protect MSR[DE] */ | ||
265 | #define MSRP_PMMP 0x00000004 /* Protect MSR[PMM] */ | ||
243 | #endif | 266 | #endif |
244 | 267 | ||
245 | #ifdef CONFIG_E200 | 268 | #ifdef CONFIG_E200 |
@@ -594,6 +617,17 @@ | |||
594 | #define SPRN_EPCR_DMIUH 0x00400000 /* Disable MAS Interrupt updates | 617 | #define SPRN_EPCR_DMIUH 0x00400000 /* Disable MAS Interrupt updates |
595 | * for hypervisor */ | 618 | * for hypervisor */ |
596 | 619 | ||
620 | /* Bit definitions for EPLC/EPSC */ | ||
621 | #define EPC_EPR 0x80000000 /* 1 = user, 0 = kernel */ | ||
622 | #define EPC_EPR_SHIFT 31 | ||
623 | #define EPC_EAS 0x40000000 /* Address Space */ | ||
624 | #define EPC_EAS_SHIFT 30 | ||
625 | #define EPC_EGS 0x20000000 /* 1 = guest, 0 = hypervisor */ | ||
626 | #define EPC_EGS_SHIFT 29 | ||
627 | #define EPC_ELPID 0x00ff0000 | ||
628 | #define EPC_ELPID_SHIFT 16 | ||
629 | #define EPC_EPID 0x00003fff | ||
630 | #define EPC_EPID_SHIFT 0 | ||
597 | 631 | ||
598 | /* | 632 | /* |
599 | * The IBM-403 is an even more odd special case, as it is much | 633 | * The IBM-403 is an even more odd special case, as it is much |
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index caf82d0a00de..200d763a0a67 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h | |||
@@ -17,11 +17,11 @@ extern struct task_struct *_switch(struct thread_struct *prev, | |||
17 | struct thread_struct *next); | 17 | struct thread_struct *next); |
18 | 18 | ||
19 | extern void giveup_fpu(struct task_struct *); | 19 | extern void giveup_fpu(struct task_struct *); |
20 | extern void load_up_fpu(void); | ||
20 | extern void disable_kernel_fp(void); | 21 | extern void disable_kernel_fp(void); |
21 | extern void enable_kernel_fp(void); | 22 | extern void enable_kernel_fp(void); |
22 | extern void flush_fp_to_thread(struct task_struct *); | 23 | extern void flush_fp_to_thread(struct task_struct *); |
23 | extern void enable_kernel_altivec(void); | 24 | extern void enable_kernel_altivec(void); |
24 | extern void giveup_altivec(struct task_struct *); | ||
25 | extern void load_up_altivec(struct task_struct *); | 25 | extern void load_up_altivec(struct task_struct *); |
26 | extern int emulate_altivec(struct pt_regs *); | 26 | extern int emulate_altivec(struct pt_regs *); |
27 | extern void __giveup_vsx(struct task_struct *); | 27 | extern void __giveup_vsx(struct task_struct *); |
@@ -40,10 +40,15 @@ static inline void discard_lazy_cpu_state(void) | |||
40 | 40 | ||
41 | #ifdef CONFIG_ALTIVEC | 41 | #ifdef CONFIG_ALTIVEC |
42 | extern void flush_altivec_to_thread(struct task_struct *); | 42 | extern void flush_altivec_to_thread(struct task_struct *); |
43 | extern void giveup_altivec(struct task_struct *); | ||
44 | extern void giveup_altivec_notask(void); | ||
43 | #else | 45 | #else |
44 | static inline void flush_altivec_to_thread(struct task_struct *t) | 46 | static inline void flush_altivec_to_thread(struct task_struct *t) |
45 | { | 47 | { |
46 | } | 48 | } |
49 | static inline void giveup_altivec(struct task_struct *t) | ||
50 | { | ||
51 | } | ||
47 | #endif | 52 | #endif |
48 | 53 | ||
49 | #ifdef CONFIG_VSX | 54 | #ifdef CONFIG_VSX |
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 4a741c7efd02..a556ccc16b58 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h | |||
@@ -62,21 +62,8 @@ struct thread_info { | |||
62 | #define init_thread_info (init_thread_union.thread_info) | 62 | #define init_thread_info (init_thread_union.thread_info) |
63 | #define init_stack (init_thread_union.stack) | 63 | #define init_stack (init_thread_union.stack) |
64 | 64 | ||
65 | /* thread information allocation */ | ||
66 | |||
67 | #if THREAD_SHIFT >= PAGE_SHIFT | ||
68 | |||
69 | #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) | 65 | #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) |
70 | 66 | ||
71 | #else /* THREAD_SHIFT < PAGE_SHIFT */ | ||
72 | |||
73 | #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR | ||
74 | |||
75 | extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node); | ||
76 | extern void free_thread_info(struct thread_info *ti); | ||
77 | |||
78 | #endif /* THREAD_SHIFT < PAGE_SHIFT */ | ||
79 | |||
80 | /* how to get the thread information struct from C */ | 67 | /* how to get the thread information struct from C */ |
81 | static inline struct thread_info *current_thread_info(void) | 68 | static inline struct thread_info *current_thread_info(void) |
82 | { | 69 | { |
@@ -126,7 +113,6 @@ static inline struct thread_info *current_thread_info(void) | |||
126 | #define _TIF_NOERROR (1<<TIF_NOERROR) | 113 | #define _TIF_NOERROR (1<<TIF_NOERROR) |
127 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | 114 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) |
128 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) | 115 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) |
129 | #define _TIF_RUNLATCH (1<<TIF_RUNLATCH) | ||
130 | #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ | 116 | #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ |
131 | _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT) | 117 | _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT) |
132 | 118 | ||
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index 2136f58a54e8..3b4b4a8da922 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h | |||
@@ -23,6 +23,7 @@ | |||
23 | extern unsigned long tb_ticks_per_jiffy; | 23 | extern unsigned long tb_ticks_per_jiffy; |
24 | extern unsigned long tb_ticks_per_usec; | 24 | extern unsigned long tb_ticks_per_usec; |
25 | extern unsigned long tb_ticks_per_sec; | 25 | extern unsigned long tb_ticks_per_sec; |
26 | extern struct clock_event_device decrementer_clockevent; | ||
26 | 27 | ||
27 | struct rtc_time; | 28 | struct rtc_time; |
28 | extern void to_tm(int tim, struct rtc_time * tm); | 29 | extern void to_tm(int tim, struct rtc_time * tm); |
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index c97185885c6d..852ed1b384f6 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h | |||
@@ -18,12 +18,6 @@ struct device_node; | |||
18 | */ | 18 | */ |
19 | #define RECLAIM_DISTANCE 10 | 19 | #define RECLAIM_DISTANCE 10 |
20 | 20 | ||
21 | /* | ||
22 | * Avoid creating an extra level of balancing (SD_ALLNODES) on the largest | ||
23 | * POWER7 boxes which have a maximum of 32 nodes. | ||
24 | */ | ||
25 | #define SD_NODES_PER_DOMAIN 32 | ||
26 | |||
27 | #include <asm/mmzone.h> | 21 | #include <asm/mmzone.h> |
28 | 22 | ||
29 | static inline int cpu_to_node(int cpu) | 23 | static inline int cpu_to_node(int cpu) |
@@ -51,36 +45,6 @@ static inline int pcibus_to_node(struct pci_bus *bus) | |||
51 | cpu_all_mask : \ | 45 | cpu_all_mask : \ |
52 | cpumask_of_node(pcibus_to_node(bus))) | 46 | cpumask_of_node(pcibus_to_node(bus))) |
53 | 47 | ||
54 | /* sched_domains SD_NODE_INIT for PPC64 machines */ | ||
55 | #define SD_NODE_INIT (struct sched_domain) { \ | ||
56 | .min_interval = 8, \ | ||
57 | .max_interval = 32, \ | ||
58 | .busy_factor = 32, \ | ||
59 | .imbalance_pct = 125, \ | ||
60 | .cache_nice_tries = 1, \ | ||
61 | .busy_idx = 3, \ | ||
62 | .idle_idx = 1, \ | ||
63 | .newidle_idx = 0, \ | ||
64 | .wake_idx = 0, \ | ||
65 | .forkexec_idx = 0, \ | ||
66 | \ | ||
67 | .flags = 1*SD_LOAD_BALANCE \ | ||
68 | | 0*SD_BALANCE_NEWIDLE \ | ||
69 | | 1*SD_BALANCE_EXEC \ | ||
70 | | 1*SD_BALANCE_FORK \ | ||
71 | | 0*SD_BALANCE_WAKE \ | ||
72 | | 1*SD_WAKE_AFFINE \ | ||
73 | | 0*SD_PREFER_LOCAL \ | ||
74 | | 0*SD_SHARE_CPUPOWER \ | ||
75 | | 0*SD_POWERSAVINGS_BALANCE \ | ||
76 | | 0*SD_SHARE_PKG_RESOURCES \ | ||
77 | | 1*SD_SERIALIZE \ | ||
78 | | 0*SD_PREFER_SIBLING \ | ||
79 | , \ | ||
80 | .last_balance = jiffies, \ | ||
81 | .balance_interval = 1, \ | ||
82 | } | ||
83 | |||
84 | extern int __node_distance(int, int); | 48 | extern int __node_distance(int, int); |
85 | #define node_distance(a, b) __node_distance(a, b) | 49 | #define node_distance(a, b) __node_distance(a, b) |
86 | 50 | ||
diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h index 6bfd5ffe1d4f..b19adf751dd9 100644 --- a/arch/powerpc/include/asm/vio.h +++ b/arch/powerpc/include/asm/vio.h | |||
@@ -46,6 +46,48 @@ | |||
46 | 46 | ||
47 | struct iommu_table; | 47 | struct iommu_table; |
48 | 48 | ||
49 | /* | ||
50 | * Platform Facilities Option (PFO)-specific data | ||
51 | */ | ||
52 | |||
53 | /* Starting unit address for PFO devices on the VIO BUS */ | ||
54 | #define VIO_BASE_PFO_UA 0x50000000 | ||
55 | |||
56 | /** | ||
57 | * vio_pfo_op - PFO operation parameters | ||
58 | * | ||
59 | * @flags: h_call subfunctions and modifiers | ||
60 | * @in: Input data block logical real address | ||
61 | * @inlen: If non-negative, the length of the input data block. If negative, | ||
62 | * the length of the input data descriptor list in bytes. | ||
63 | * @out: Output data block logical real address | ||
64 | * @outlen: If non-negative, the length of the input data block. If negative, | ||
65 | * the length of the input data descriptor list in bytes. | ||
66 | * @csbcpb: Logical real address of the 4k naturally-aligned storage block | ||
67 | * containing the CSB & optional FC field specific CPB | ||
68 | * @timeout: # of milliseconds to retry h_call, 0 for no timeout. | ||
69 | * @hcall_err: pointer to return the h_call return value, else NULL | ||
70 | */ | ||
71 | struct vio_pfo_op { | ||
72 | u64 flags; | ||
73 | s64 in; | ||
74 | s64 inlen; | ||
75 | s64 out; | ||
76 | s64 outlen; | ||
77 | u64 csbcpb; | ||
78 | void *done; | ||
79 | unsigned long handle; | ||
80 | unsigned int timeout; | ||
81 | long hcall_err; | ||
82 | }; | ||
83 | |||
84 | /* End PFO specific data */ | ||
85 | |||
86 | enum vio_dev_family { | ||
87 | VDEVICE, /* The OF node is a child of /vdevice */ | ||
88 | PFO, /* The OF node is a child of /ibm,platform-facilities */ | ||
89 | }; | ||
90 | |||
49 | /** | 91 | /** |
50 | * vio_dev - This structure is used to describe virtual I/O devices. | 92 | * vio_dev - This structure is used to describe virtual I/O devices. |
51 | * | 93 | * |
@@ -58,6 +100,7 @@ struct vio_dev { | |||
58 | const char *name; | 100 | const char *name; |
59 | const char *type; | 101 | const char *type; |
60 | uint32_t unit_address; | 102 | uint32_t unit_address; |
103 | uint32_t resource_id; | ||
61 | unsigned int irq; | 104 | unsigned int irq; |
62 | struct { | 105 | struct { |
63 | size_t desired; | 106 | size_t desired; |
@@ -65,6 +108,7 @@ struct vio_dev { | |||
65 | size_t allocated; | 108 | size_t allocated; |
66 | atomic_t allocs_failed; | 109 | atomic_t allocs_failed; |
67 | } cmo; | 110 | } cmo; |
111 | enum vio_dev_family family; | ||
68 | struct device dev; | 112 | struct device dev; |
69 | }; | 113 | }; |
70 | 114 | ||
@@ -95,6 +139,8 @@ extern void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired); | |||
95 | 139 | ||
96 | extern void __devinit vio_unregister_device(struct vio_dev *dev); | 140 | extern void __devinit vio_unregister_device(struct vio_dev *dev); |
97 | 141 | ||
142 | extern int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op); | ||
143 | |||
98 | struct device_node; | 144 | struct device_node; |
99 | 145 | ||
100 | extern struct vio_dev *vio_register_device_node( | 146 | extern struct vio_dev *vio_register_device_node( |
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index f5808a35688c..83afacd3ba7b 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -28,7 +28,7 @@ endif | |||
28 | 28 | ||
29 | obj-y := cputable.o ptrace.o syscalls.o \ | 29 | obj-y := cputable.o ptrace.o syscalls.o \ |
30 | irq.o align.o signal_32.o pmc.o vdso.o \ | 30 | irq.o align.o signal_32.o pmc.o vdso.o \ |
31 | init_task.o process.o systbl.o idle.o \ | 31 | process.o systbl.o idle.o \ |
32 | signal.o sysfs.o cacheinfo.o time.o \ | 32 | signal.o sysfs.o cacheinfo.o time.o \ |
33 | prom.o traps.o setup-common.o \ | 33 | prom.o traps.o setup-common.o \ |
34 | udbg.o misc.o io.o dma.o \ | 34 | udbg.o misc.o io.o dma.o \ |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 34b8afe94a50..52c7ad78242e 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -116,6 +116,9 @@ int main(void) | |||
116 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER | 116 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
117 | DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); | 117 | DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); |
118 | #endif | 118 | #endif |
119 | #ifdef CONFIG_KVM_BOOKE_HV | ||
120 | DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu)); | ||
121 | #endif | ||
119 | 122 | ||
120 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | 123 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); |
121 | DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); | 124 | DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); |
@@ -188,10 +191,6 @@ int main(void) | |||
188 | DEFINE(SLBSHADOW_STACKESID, | 191 | DEFINE(SLBSHADOW_STACKESID, |
189 | offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid)); | 192 | offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid)); |
190 | DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); | 193 | DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); |
191 | DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); | ||
192 | DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); | ||
193 | DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); | ||
194 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); | ||
195 | DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use)); | 194 | DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use)); |
196 | DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); | 195 | DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); |
197 | DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count)); | 196 | DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count)); |
@@ -387,6 +386,7 @@ int main(void) | |||
387 | #ifdef CONFIG_KVM | 386 | #ifdef CONFIG_KVM |
388 | DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); | 387 | DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); |
389 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); | 388 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); |
389 | DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid)); | ||
390 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); | 390 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); |
391 | DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); | 391 | DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); |
392 | DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr)); | 392 | DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr)); |
@@ -429,9 +429,11 @@ int main(void) | |||
429 | DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4)); | 429 | DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4)); |
430 | DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6)); | 430 | DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6)); |
431 | 431 | ||
432 | DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); | ||
433 | DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); | ||
434 | |||
432 | /* book3s */ | 435 | /* book3s */ |
433 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 436 | #ifdef CONFIG_KVM_BOOK3S_64_HV |
434 | DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); | ||
435 | DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); | 437 | DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); |
436 | DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); | 438 | DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); |
437 | DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); | 439 | DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); |
@@ -444,9 +446,9 @@ int main(void) | |||
444 | DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v)); | 446 | DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v)); |
445 | DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr)); | 447 | DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr)); |
446 | DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); | 448 | DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); |
449 | DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); | ||
447 | #endif | 450 | #endif |
448 | #ifdef CONFIG_PPC_BOOK3S | 451 | #ifdef CONFIG_PPC_BOOK3S |
449 | DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); | ||
450 | DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); | 452 | DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); |
451 | DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); | 453 | DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); |
452 | DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); | 454 | DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); |
@@ -461,7 +463,6 @@ int main(void) | |||
461 | DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions)); | 463 | DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions)); |
462 | DEFINE(VCPU_CEDED, offsetof(struct kvm_vcpu, arch.ceded)); | 464 | DEFINE(VCPU_CEDED, offsetof(struct kvm_vcpu, arch.ceded)); |
463 | DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); | 465 | DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); |
464 | DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa)); | ||
465 | DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); | 466 | DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); |
466 | DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); | 467 | DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); |
467 | DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); | 468 | DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); |
@@ -537,6 +538,8 @@ int main(void) | |||
537 | HSTATE_FIELD(HSTATE_NAPPING, napping); | 538 | HSTATE_FIELD(HSTATE_NAPPING, napping); |
538 | 539 | ||
539 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 540 | #ifdef CONFIG_KVM_BOOK3S_64_HV |
541 | HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req); | ||
542 | HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state); | ||
540 | HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); | 543 | HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); |
541 | HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore); | 544 | HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore); |
542 | HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); | 545 | HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); |
@@ -597,6 +600,12 @@ int main(void) | |||
597 | DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr)); | 600 | DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr)); |
598 | #endif | 601 | #endif |
599 | 602 | ||
603 | #ifdef CONFIG_KVM_BOOKE_HV | ||
604 | DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4)); | ||
605 | DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6)); | ||
606 | DEFINE(VCPU_EPLC, offsetof(struct kvm_vcpu, arch.eplc)); | ||
607 | #endif | ||
608 | |||
600 | #ifdef CONFIG_KVM_EXIT_TIMING | 609 | #ifdef CONFIG_KVM_EXIT_TIMING |
601 | DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, | 610 | DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, |
602 | arch.timing_exit.tv32.tbu)); | 611 | arch.timing_exit.tv32.tbu)); |
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S index 8053db02b85e..69fdd2322a66 100644 --- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S +++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S | |||
@@ -73,6 +73,7 @@ _GLOBAL(__setup_cpu_e500v2) | |||
73 | mtlr r4 | 73 | mtlr r4 |
74 | blr | 74 | blr |
75 | _GLOBAL(__setup_cpu_e500mc) | 75 | _GLOBAL(__setup_cpu_e500mc) |
76 | mr r5, r4 | ||
76 | mflr r4 | 77 | mflr r4 |
77 | bl __e500_icache_setup | 78 | bl __e500_icache_setup |
78 | bl __e500_dcache_setup | 79 | bl __e500_dcache_setup |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index f8a7a1a1a9f4..ed1718feb9d9 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -63,15 +63,9 @@ system_call_common: | |||
63 | std r0,GPR0(r1) | 63 | std r0,GPR0(r1) |
64 | std r10,GPR1(r1) | 64 | std r10,GPR1(r1) |
65 | ACCOUNT_CPU_USER_ENTRY(r10, r11) | 65 | ACCOUNT_CPU_USER_ENTRY(r10, r11) |
66 | /* | ||
67 | * This "crclr so" clears CR0.SO, which is the error indication on | ||
68 | * return from this system call. There must be no cmp instruction | ||
69 | * between it and the "mfcr r9" below, otherwise if XER.SO is set, | ||
70 | * CR0.SO will get set, causing all system calls to appear to fail. | ||
71 | */ | ||
72 | crclr so | ||
73 | std r2,GPR2(r1) | 66 | std r2,GPR2(r1) |
74 | std r3,GPR3(r1) | 67 | std r3,GPR3(r1) |
68 | mfcr r2 | ||
75 | std r4,GPR4(r1) | 69 | std r4,GPR4(r1) |
76 | std r5,GPR5(r1) | 70 | std r5,GPR5(r1) |
77 | std r6,GPR6(r1) | 71 | std r6,GPR6(r1) |
@@ -82,18 +76,20 @@ system_call_common: | |||
82 | std r11,GPR10(r1) | 76 | std r11,GPR10(r1) |
83 | std r11,GPR11(r1) | 77 | std r11,GPR11(r1) |
84 | std r11,GPR12(r1) | 78 | std r11,GPR12(r1) |
79 | std r11,_XER(r1) | ||
80 | std r11,_CTR(r1) | ||
85 | std r9,GPR13(r1) | 81 | std r9,GPR13(r1) |
86 | mfcr r9 | ||
87 | mflr r10 | 82 | mflr r10 |
83 | /* | ||
84 | * This clears CR0.SO (bit 28), which is the error indication on | ||
85 | * return from this system call. | ||
86 | */ | ||
87 | rldimi r2,r11,28,(63-28) | ||
88 | li r11,0xc01 | 88 | li r11,0xc01 |
89 | std r9,_CCR(r1) | ||
90 | std r10,_LINK(r1) | 89 | std r10,_LINK(r1) |
91 | std r11,_TRAP(r1) | 90 | std r11,_TRAP(r1) |
92 | mfxer r9 | ||
93 | mfctr r10 | ||
94 | std r9,_XER(r1) | ||
95 | std r10,_CTR(r1) | ||
96 | std r3,ORIG_GPR3(r1) | 91 | std r3,ORIG_GPR3(r1) |
92 | std r2,_CCR(r1) | ||
97 | ld r2,PACATOC(r13) | 93 | ld r2,PACATOC(r13) |
98 | addi r9,r1,STACK_FRAME_OVERHEAD | 94 | addi r9,r1,STACK_FRAME_OVERHEAD |
99 | ld r11,exception_marker@toc(r2) | 95 | ld r11,exception_marker@toc(r2) |
@@ -154,7 +150,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | |||
154 | ld r10,TI_FLAGS(r11) | 150 | ld r10,TI_FLAGS(r11) |
155 | andi. r11,r10,_TIF_SYSCALL_T_OR_A | 151 | andi. r11,r10,_TIF_SYSCALL_T_OR_A |
156 | bne- syscall_dotrace | 152 | bne- syscall_dotrace |
157 | syscall_dotrace_cont: | 153 | .Lsyscall_dotrace_cont: |
158 | cmpldi 0,r0,NR_syscalls | 154 | cmpldi 0,r0,NR_syscalls |
159 | bge- syscall_enosys | 155 | bge- syscall_enosys |
160 | 156 | ||
@@ -211,7 +207,7 @@ syscall_exit: | |||
211 | cmpld r3,r11 | 207 | cmpld r3,r11 |
212 | ld r5,_CCR(r1) | 208 | ld r5,_CCR(r1) |
213 | bge- syscall_error | 209 | bge- syscall_error |
214 | syscall_error_cont: | 210 | .Lsyscall_error_cont: |
215 | ld r7,_NIP(r1) | 211 | ld r7,_NIP(r1) |
216 | BEGIN_FTR_SECTION | 212 | BEGIN_FTR_SECTION |
217 | stdcx. r0,0,r1 /* to clear the reservation */ | 213 | stdcx. r0,0,r1 /* to clear the reservation */ |
@@ -246,7 +242,7 @@ syscall_error: | |||
246 | oris r5,r5,0x1000 /* Set SO bit in CR */ | 242 | oris r5,r5,0x1000 /* Set SO bit in CR */ |
247 | neg r3,r3 | 243 | neg r3,r3 |
248 | std r5,_CCR(r1) | 244 | std r5,_CCR(r1) |
249 | b syscall_error_cont | 245 | b .Lsyscall_error_cont |
250 | 246 | ||
251 | /* Traced system call support */ | 247 | /* Traced system call support */ |
252 | syscall_dotrace: | 248 | syscall_dotrace: |
@@ -268,7 +264,7 @@ syscall_dotrace: | |||
268 | addi r9,r1,STACK_FRAME_OVERHEAD | 264 | addi r9,r1,STACK_FRAME_OVERHEAD |
269 | clrrdi r10,r1,THREAD_SHIFT | 265 | clrrdi r10,r1,THREAD_SHIFT |
270 | ld r10,TI_FLAGS(r10) | 266 | ld r10,TI_FLAGS(r10) |
271 | b syscall_dotrace_cont | 267 | b .Lsyscall_dotrace_cont |
272 | 268 | ||
273 | syscall_enosys: | 269 | syscall_enosys: |
274 | li r3,-ENOSYS | 270 | li r3,-ENOSYS |
@@ -588,23 +584,19 @@ _GLOBAL(ret_from_except_lite) | |||
588 | fast_exc_return_irq: | 584 | fast_exc_return_irq: |
589 | restore: | 585 | restore: |
590 | /* | 586 | /* |
591 | * This is the main kernel exit path, we first check if we | 587 | * This is the main kernel exit path. First we check if we |
592 | * have to change our interrupt state. | 588 | * are about to re-enable interrupts |
593 | */ | 589 | */ |
594 | ld r5,SOFTE(r1) | 590 | ld r5,SOFTE(r1) |
595 | lbz r6,PACASOFTIRQEN(r13) | 591 | lbz r6,PACASOFTIRQEN(r13) |
596 | cmpwi cr1,r5,0 | 592 | cmpwi cr0,r5,0 |
597 | cmpw cr0,r5,r6 | 593 | beq restore_irq_off |
598 | beq cr0,4f | ||
599 | 594 | ||
600 | /* We do, handle disable first, which is easy */ | 595 | /* We are enabling, were we already enabled ? Yes, just return */ |
601 | bne cr1,3f; | 596 | cmpwi cr0,r6,1 |
602 | li r0,0 | 597 | beq cr0,do_restore |
603 | stb r0,PACASOFTIRQEN(r13); | ||
604 | TRACE_DISABLE_INTS | ||
605 | b 4f | ||
606 | 598 | ||
607 | 3: /* | 599 | /* |
608 | * We are about to soft-enable interrupts (we are hard disabled | 600 | * We are about to soft-enable interrupts (we are hard disabled |
609 | * at this point). We check if there's anything that needs to | 601 | * at this point). We check if there's anything that needs to |
610 | * be replayed first. | 602 | * be replayed first. |
@@ -626,7 +618,7 @@ restore_no_replay: | |||
626 | /* | 618 | /* |
627 | * Final return path. BookE is handled in a different file | 619 | * Final return path. BookE is handled in a different file |
628 | */ | 620 | */ |
629 | 4: | 621 | do_restore: |
630 | #ifdef CONFIG_PPC_BOOK3E | 622 | #ifdef CONFIG_PPC_BOOK3E |
631 | b .exception_return_book3e | 623 | b .exception_return_book3e |
632 | #else | 624 | #else |
@@ -700,6 +692,25 @@ fast_exception_return: | |||
700 | #endif /* CONFIG_PPC_BOOK3E */ | 692 | #endif /* CONFIG_PPC_BOOK3E */ |
701 | 693 | ||
702 | /* | 694 | /* |
695 | * We are returning to a context with interrupts soft disabled. | ||
696 | * | ||
697 | * However, we may also about to hard enable, so we need to | ||
698 | * make sure that in this case, we also clear PACA_IRQ_HARD_DIS | ||
699 | * or that bit can get out of sync and bad things will happen | ||
700 | */ | ||
701 | restore_irq_off: | ||
702 | ld r3,_MSR(r1) | ||
703 | lbz r7,PACAIRQHAPPENED(r13) | ||
704 | andi. r0,r3,MSR_EE | ||
705 | beq 1f | ||
706 | rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS | ||
707 | stb r7,PACAIRQHAPPENED(r13) | ||
708 | 1: li r0,0 | ||
709 | stb r0,PACASOFTIRQEN(r13); | ||
710 | TRACE_DISABLE_INTS | ||
711 | b do_restore | ||
712 | |||
713 | /* | ||
703 | * Something did happen, check if a re-emit is needed | 714 | * Something did happen, check if a re-emit is needed |
704 | * (this also clears paca->irq_happened) | 715 | * (this also clears paca->irq_happened) |
705 | */ | 716 | */ |
@@ -748,6 +759,9 @@ restore_check_irq_replay: | |||
748 | #endif /* CONFIG_PPC_BOOK3E */ | 759 | #endif /* CONFIG_PPC_BOOK3E */ |
749 | 1: b .ret_from_except /* What else to do here ? */ | 760 | 1: b .ret_from_except /* What else to do here ? */ |
750 | 761 | ||
762 | |||
763 | |||
764 | 3: | ||
751 | do_work: | 765 | do_work: |
752 | #ifdef CONFIG_PREEMPT | 766 | #ifdef CONFIG_PREEMPT |
753 | andi. r0,r3,MSR_PR /* Returning to user mode? */ | 767 | andi. r0,r3,MSR_PR /* Returning to user mode? */ |
@@ -767,16 +781,6 @@ do_work: | |||
767 | SOFT_DISABLE_INTS(r3,r4) | 781 | SOFT_DISABLE_INTS(r3,r4) |
768 | 1: bl .preempt_schedule_irq | 782 | 1: bl .preempt_schedule_irq |
769 | 783 | ||
770 | /* Hard-disable interrupts again (and update PACA) */ | ||
771 | #ifdef CONFIG_PPC_BOOK3E | ||
772 | wrteei 0 | ||
773 | #else | ||
774 | ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ | ||
775 | mtmsrd r10,1 | ||
776 | #endif /* CONFIG_PPC_BOOK3E */ | ||
777 | li r0,PACA_IRQ_HARD_DIS | ||
778 | stb r0,PACAIRQHAPPENED(r13) | ||
779 | |||
780 | /* Re-test flags and eventually loop */ | 784 | /* Re-test flags and eventually loop */ |
781 | clrrdi r9,r1,THREAD_SHIFT | 785 | clrrdi r9,r1,THREAD_SHIFT |
782 | ld r4,TI_FLAGS(r9) | 786 | ld r4,TI_FLAGS(r9) |
@@ -787,14 +791,6 @@ do_work: | |||
787 | user_work: | 791 | user_work: |
788 | #endif /* CONFIG_PREEMPT */ | 792 | #endif /* CONFIG_PREEMPT */ |
789 | 793 | ||
790 | /* Enable interrupts */ | ||
791 | #ifdef CONFIG_PPC_BOOK3E | ||
792 | wrteei 1 | ||
793 | #else | ||
794 | ori r10,r10,MSR_EE | ||
795 | mtmsrd r10,1 | ||
796 | #endif /* CONFIG_PPC_BOOK3E */ | ||
797 | |||
798 | andi. r0,r4,_TIF_NEED_RESCHED | 794 | andi. r0,r4,_TIF_NEED_RESCHED |
799 | beq 1f | 795 | beq 1f |
800 | bl .restore_interrupts | 796 | bl .restore_interrupts |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index cb705fdbb458..1c06d2971545 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -63,11 +63,13 @@ BEGIN_FTR_SECTION | |||
63 | GET_PACA(r13) | 63 | GET_PACA(r13) |
64 | 64 | ||
65 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 65 | #ifdef CONFIG_KVM_BOOK3S_64_HV |
66 | lbz r0,PACAPROCSTART(r13) | 66 | li r0,KVM_HWTHREAD_IN_KERNEL |
67 | cmpwi r0,0x80 | 67 | stb r0,HSTATE_HWTHREAD_STATE(r13) |
68 | bne 1f | 68 | /* Order setting hwthread_state vs. testing hwthread_req */ |
69 | li r0,1 | 69 | sync |
70 | stb r0,PACAPROCSTART(r13) | 70 | lbz r0,HSTATE_HWTHREAD_REQ(r13) |
71 | cmpwi r0,0 | ||
72 | beq 1f | ||
71 | b kvm_start_guest | 73 | b kvm_start_guest |
72 | 1: | 74 | 1: |
73 | #endif | 75 | #endif |
@@ -94,12 +96,10 @@ machine_check_pSeries_1: | |||
94 | data_access_pSeries: | 96 | data_access_pSeries: |
95 | HMT_MEDIUM | 97 | HMT_MEDIUM |
96 | SET_SCRATCH0(r13) | 98 | SET_SCRATCH0(r13) |
97 | #ifndef CONFIG_POWER4_ONLY | ||
98 | BEGIN_FTR_SECTION | 99 | BEGIN_FTR_SECTION |
99 | b data_access_check_stab | 100 | b data_access_check_stab |
100 | data_access_not_stab: | 101 | data_access_not_stab: |
101 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) | 102 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) |
102 | #endif | ||
103 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, | 103 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, |
104 | KVMTEST, 0x300) | 104 | KVMTEST, 0x300) |
105 | 105 | ||
@@ -301,7 +301,6 @@ machine_check_fwnmi: | |||
301 | EXC_STD, KVMTEST, 0x200) | 301 | EXC_STD, KVMTEST, 0x200) |
302 | KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) | 302 | KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200) |
303 | 303 | ||
304 | #ifndef CONFIG_POWER4_ONLY | ||
305 | /* moved from 0x300 */ | 304 | /* moved from 0x300 */ |
306 | data_access_check_stab: | 305 | data_access_check_stab: |
307 | GET_PACA(r13) | 306 | GET_PACA(r13) |
@@ -328,7 +327,6 @@ do_stab_bolted_pSeries: | |||
328 | GET_SCRATCH0(r10) | 327 | GET_SCRATCH0(r10) |
329 | std r10,PACA_EXSLB+EX_R13(r13) | 328 | std r10,PACA_EXSLB+EX_R13(r13) |
330 | EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) | 329 | EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) |
331 | #endif /* CONFIG_POWER4_ONLY */ | ||
332 | 330 | ||
333 | KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) | 331 | KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) |
334 | KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) | 332 | KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) |
@@ -768,8 +766,8 @@ alignment_common: | |||
768 | std r3,_DAR(r1) | 766 | std r3,_DAR(r1) |
769 | std r4,_DSISR(r1) | 767 | std r4,_DSISR(r1) |
770 | bl .save_nvgprs | 768 | bl .save_nvgprs |
769 | DISABLE_INTS | ||
771 | addi r3,r1,STACK_FRAME_OVERHEAD | 770 | addi r3,r1,STACK_FRAME_OVERHEAD |
772 | ENABLE_INTS | ||
773 | bl .alignment_exception | 771 | bl .alignment_exception |
774 | b .ret_from_except | 772 | b .ret_from_except |
775 | 773 | ||
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 7dd2981bcc50..7a2e5e421abf 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S | |||
@@ -248,10 +248,11 @@ _ENTRY(_start); | |||
248 | 248 | ||
249 | interrupt_base: | 249 | interrupt_base: |
250 | /* Critical Input Interrupt */ | 250 | /* Critical Input Interrupt */ |
251 | CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception) | 251 | CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception) |
252 | 252 | ||
253 | /* Machine Check Interrupt */ | 253 | /* Machine Check Interrupt */ |
254 | CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) | 254 | CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \ |
255 | machine_check_exception) | ||
255 | MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception) | 256 | MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception) |
256 | 257 | ||
257 | /* Data Storage Interrupt */ | 258 | /* Data Storage Interrupt */ |
@@ -261,7 +262,8 @@ interrupt_base: | |||
261 | INSTRUCTION_STORAGE_EXCEPTION | 262 | INSTRUCTION_STORAGE_EXCEPTION |
262 | 263 | ||
263 | /* External Input Interrupt */ | 264 | /* External Input Interrupt */ |
264 | EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) | 265 | EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, \ |
266 | do_IRQ, EXC_XFER_LITE) | ||
265 | 267 | ||
266 | /* Alignment Interrupt */ | 268 | /* Alignment Interrupt */ |
267 | ALIGNMENT_EXCEPTION | 269 | ALIGNMENT_EXCEPTION |
@@ -273,29 +275,32 @@ interrupt_base: | |||
273 | #ifdef CONFIG_PPC_FPU | 275 | #ifdef CONFIG_PPC_FPU |
274 | FP_UNAVAILABLE_EXCEPTION | 276 | FP_UNAVAILABLE_EXCEPTION |
275 | #else | 277 | #else |
276 | EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) | 278 | EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \ |
279 | FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) | ||
277 | #endif | 280 | #endif |
278 | /* System Call Interrupt */ | 281 | /* System Call Interrupt */ |
279 | START_EXCEPTION(SystemCall) | 282 | START_EXCEPTION(SystemCall) |
280 | NORMAL_EXCEPTION_PROLOG | 283 | NORMAL_EXCEPTION_PROLOG(BOOKE_INTERRUPT_SYSCALL) |
281 | EXC_XFER_EE_LITE(0x0c00, DoSyscall) | 284 | EXC_XFER_EE_LITE(0x0c00, DoSyscall) |
282 | 285 | ||
283 | /* Auxiliary Processor Unavailable Interrupt */ | 286 | /* Auxiliary Processor Unavailable Interrupt */ |
284 | EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) | 287 | EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \ |
288 | AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) | ||
285 | 289 | ||
286 | /* Decrementer Interrupt */ | 290 | /* Decrementer Interrupt */ |
287 | DECREMENTER_EXCEPTION | 291 | DECREMENTER_EXCEPTION |
288 | 292 | ||
289 | /* Fixed Internal Timer Interrupt */ | 293 | /* Fixed Internal Timer Interrupt */ |
290 | /* TODO: Add FIT support */ | 294 | /* TODO: Add FIT support */ |
291 | EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE) | 295 | EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, \ |
296 | unknown_exception, EXC_XFER_EE) | ||
292 | 297 | ||
293 | /* Watchdog Timer Interrupt */ | 298 | /* Watchdog Timer Interrupt */ |
294 | /* TODO: Add watchdog support */ | 299 | /* TODO: Add watchdog support */ |
295 | #ifdef CONFIG_BOOKE_WDT | 300 | #ifdef CONFIG_BOOKE_WDT |
296 | CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException) | 301 | CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, WatchdogException) |
297 | #else | 302 | #else |
298 | CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception) | 303 | CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, unknown_exception) |
299 | #endif | 304 | #endif |
300 | 305 | ||
301 | /* Data TLB Error Interrupt */ | 306 | /* Data TLB Error Interrupt */ |
@@ -778,14 +783,6 @@ _GLOBAL(__fixup_440A_mcheck) | |||
778 | blr | 783 | blr |
779 | 784 | ||
780 | /* | 785 | /* |
781 | * extern void giveup_altivec(struct task_struct *prev) | ||
782 | * | ||
783 | * The 44x core does not have an AltiVec unit. | ||
784 | */ | ||
785 | _GLOBAL(giveup_altivec) | ||
786 | blr | ||
787 | |||
788 | /* | ||
789 | * extern void giveup_fpu(struct task_struct *prev) | 786 | * extern void giveup_fpu(struct task_struct *prev) |
790 | * | 787 | * |
791 | * The 44x core does not have an FPU. | 788 | * The 44x core does not have an FPU. |
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 0e4175388f47..5f051eeb93a2 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h | |||
@@ -2,6 +2,9 @@ | |||
2 | #define __HEAD_BOOKE_H__ | 2 | #define __HEAD_BOOKE_H__ |
3 | 3 | ||
4 | #include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */ | 4 | #include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */ |
5 | #include <asm/kvm_asm.h> | ||
6 | #include <asm/kvm_booke_hv_asm.h> | ||
7 | |||
5 | /* | 8 | /* |
6 | * Macros used for common Book-e exception handling | 9 | * Macros used for common Book-e exception handling |
7 | */ | 10 | */ |
@@ -28,14 +31,15 @@ | |||
28 | */ | 31 | */ |
29 | #define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4)) | 32 | #define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4)) |
30 | 33 | ||
31 | #define NORMAL_EXCEPTION_PROLOG \ | 34 | #define NORMAL_EXCEPTION_PROLOG(intno) \ |
32 | mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \ | 35 | mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \ |
33 | mfspr r10, SPRN_SPRG_THREAD; \ | 36 | mfspr r10, SPRN_SPRG_THREAD; \ |
34 | stw r11, THREAD_NORMSAVE(0)(r10); \ | 37 | stw r11, THREAD_NORMSAVE(0)(r10); \ |
35 | stw r13, THREAD_NORMSAVE(2)(r10); \ | 38 | stw r13, THREAD_NORMSAVE(2)(r10); \ |
36 | mfcr r13; /* save CR in r13 for now */\ | 39 | mfcr r13; /* save CR in r13 for now */\ |
37 | mfspr r11,SPRN_SRR1; /* check whether user or kernel */\ | 40 | mfspr r11, SPRN_SRR1; \ |
38 | andi. r11,r11,MSR_PR; \ | 41 | DO_KVM BOOKE_INTERRUPT_##intno SPRN_SRR1; \ |
42 | andi. r11, r11, MSR_PR; /* check whether user or kernel */\ | ||
39 | mr r11, r1; \ | 43 | mr r11, r1; \ |
40 | beq 1f; \ | 44 | beq 1f; \ |
41 | /* if from user, start at top of this thread's kernel stack */ \ | 45 | /* if from user, start at top of this thread's kernel stack */ \ |
@@ -113,7 +117,7 @@ | |||
113 | * registers as the normal prolog above. Instead we use a portion of the | 117 | * registers as the normal prolog above. Instead we use a portion of the |
114 | * critical/machine check exception stack at low physical addresses. | 118 | * critical/machine check exception stack at low physical addresses. |
115 | */ | 119 | */ |
116 | #define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, exc_level_srr0, exc_level_srr1) \ | 120 | #define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, intno, exc_level_srr0, exc_level_srr1) \ |
117 | mtspr SPRN_SPRG_WSCRATCH_##exc_level,r8; \ | 121 | mtspr SPRN_SPRG_WSCRATCH_##exc_level,r8; \ |
118 | BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \ | 122 | BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \ |
119 | stw r9,GPR9(r8); /* save various registers */\ | 123 | stw r9,GPR9(r8); /* save various registers */\ |
@@ -121,8 +125,9 @@ | |||
121 | stw r10,GPR10(r8); \ | 125 | stw r10,GPR10(r8); \ |
122 | stw r11,GPR11(r8); \ | 126 | stw r11,GPR11(r8); \ |
123 | stw r9,_CCR(r8); /* save CR on stack */\ | 127 | stw r9,_CCR(r8); /* save CR on stack */\ |
124 | mfspr r10,exc_level_srr1; /* check whether user or kernel */\ | 128 | mfspr r11,exc_level_srr1; /* check whether user or kernel */\ |
125 | andi. r10,r10,MSR_PR; \ | 129 | DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \ |
130 | andi. r11,r11,MSR_PR; \ | ||
126 | mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ | 131 | mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ |
127 | lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ | 132 | lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ |
128 | addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\ | 133 | addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\ |
@@ -162,12 +167,30 @@ | |||
162 | SAVE_4GPRS(3, r11); \ | 167 | SAVE_4GPRS(3, r11); \ |
163 | SAVE_2GPRS(7, r11) | 168 | SAVE_2GPRS(7, r11) |
164 | 169 | ||
165 | #define CRITICAL_EXCEPTION_PROLOG \ | 170 | #define CRITICAL_EXCEPTION_PROLOG(intno) \ |
166 | EXC_LEVEL_EXCEPTION_PROLOG(CRIT, SPRN_CSRR0, SPRN_CSRR1) | 171 | EXC_LEVEL_EXCEPTION_PROLOG(CRIT, intno, SPRN_CSRR0, SPRN_CSRR1) |
167 | #define DEBUG_EXCEPTION_PROLOG \ | 172 | #define DEBUG_EXCEPTION_PROLOG \ |
168 | EXC_LEVEL_EXCEPTION_PROLOG(DBG, SPRN_DSRR0, SPRN_DSRR1) | 173 | EXC_LEVEL_EXCEPTION_PROLOG(DBG, DEBUG, SPRN_DSRR0, SPRN_DSRR1) |
169 | #define MCHECK_EXCEPTION_PROLOG \ | 174 | #define MCHECK_EXCEPTION_PROLOG \ |
170 | EXC_LEVEL_EXCEPTION_PROLOG(MC, SPRN_MCSRR0, SPRN_MCSRR1) | 175 | EXC_LEVEL_EXCEPTION_PROLOG(MC, MACHINE_CHECK, \ |
176 | SPRN_MCSRR0, SPRN_MCSRR1) | ||
177 | |||
178 | /* | ||
179 | * Guest Doorbell -- this is a bit odd in that uses GSRR0/1 despite | ||
180 | * being delivered to the host. This exception can only happen | ||
181 | * inside a KVM guest -- so we just handle up to the DO_KVM rather | ||
182 | * than try to fit this into one of the existing prolog macros. | ||
183 | */ | ||
184 | #define GUEST_DOORBELL_EXCEPTION \ | ||
185 | START_EXCEPTION(GuestDoorbell); \ | ||
186 | mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \ | ||
187 | mfspr r10, SPRN_SPRG_THREAD; \ | ||
188 | stw r11, THREAD_NORMSAVE(0)(r10); \ | ||
189 | mfspr r11, SPRN_SRR1; \ | ||
190 | stw r13, THREAD_NORMSAVE(2)(r10); \ | ||
191 | mfcr r13; /* save CR in r13 for now */\ | ||
192 | DO_KVM BOOKE_INTERRUPT_GUEST_DBELL SPRN_GSRR1; \ | ||
193 | trap | ||
171 | 194 | ||
172 | /* | 195 | /* |
173 | * Exception vectors. | 196 | * Exception vectors. |
@@ -181,16 +204,16 @@ label: | |||
181 | .long func; \ | 204 | .long func; \ |
182 | .long ret_from_except_full | 205 | .long ret_from_except_full |
183 | 206 | ||
184 | #define EXCEPTION(n, label, hdlr, xfer) \ | 207 | #define EXCEPTION(n, intno, label, hdlr, xfer) \ |
185 | START_EXCEPTION(label); \ | 208 | START_EXCEPTION(label); \ |
186 | NORMAL_EXCEPTION_PROLOG; \ | 209 | NORMAL_EXCEPTION_PROLOG(intno); \ |
187 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | 210 | addi r3,r1,STACK_FRAME_OVERHEAD; \ |
188 | xfer(n, hdlr) | 211 | xfer(n, hdlr) |
189 | 212 | ||
190 | #define CRITICAL_EXCEPTION(n, label, hdlr) \ | 213 | #define CRITICAL_EXCEPTION(n, intno, label, hdlr) \ |
191 | START_EXCEPTION(label); \ | 214 | START_EXCEPTION(label); \ |
192 | CRITICAL_EXCEPTION_PROLOG; \ | 215 | CRITICAL_EXCEPTION_PROLOG(intno); \ |
193 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | 216 | addi r3,r1,STACK_FRAME_OVERHEAD; \ |
194 | EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ | 217 | EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ |
195 | NOCOPY, crit_transfer_to_handler, \ | 218 | NOCOPY, crit_transfer_to_handler, \ |
196 | ret_from_crit_exc) | 219 | ret_from_crit_exc) |
@@ -302,7 +325,7 @@ label: | |||
302 | 325 | ||
303 | #define DEBUG_CRIT_EXCEPTION \ | 326 | #define DEBUG_CRIT_EXCEPTION \ |
304 | START_EXCEPTION(DebugCrit); \ | 327 | START_EXCEPTION(DebugCrit); \ |
305 | CRITICAL_EXCEPTION_PROLOG; \ | 328 | CRITICAL_EXCEPTION_PROLOG(DEBUG); \ |
306 | \ | 329 | \ |
307 | /* \ | 330 | /* \ |
308 | * If there is a single step or branch-taken exception in an \ | 331 | * If there is a single step or branch-taken exception in an \ |
@@ -355,7 +378,7 @@ label: | |||
355 | 378 | ||
356 | #define DATA_STORAGE_EXCEPTION \ | 379 | #define DATA_STORAGE_EXCEPTION \ |
357 | START_EXCEPTION(DataStorage) \ | 380 | START_EXCEPTION(DataStorage) \ |
358 | NORMAL_EXCEPTION_PROLOG; \ | 381 | NORMAL_EXCEPTION_PROLOG(DATA_STORAGE); \ |
359 | mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \ | 382 | mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \ |
360 | stw r5,_ESR(r11); \ | 383 | stw r5,_ESR(r11); \ |
361 | mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \ | 384 | mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \ |
@@ -363,7 +386,7 @@ label: | |||
363 | 386 | ||
364 | #define INSTRUCTION_STORAGE_EXCEPTION \ | 387 | #define INSTRUCTION_STORAGE_EXCEPTION \ |
365 | START_EXCEPTION(InstructionStorage) \ | 388 | START_EXCEPTION(InstructionStorage) \ |
366 | NORMAL_EXCEPTION_PROLOG; \ | 389 | NORMAL_EXCEPTION_PROLOG(INST_STORAGE); \ |
367 | mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \ | 390 | mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \ |
368 | stw r5,_ESR(r11); \ | 391 | stw r5,_ESR(r11); \ |
369 | mr r4,r12; /* Pass SRR0 as arg2 */ \ | 392 | mr r4,r12; /* Pass SRR0 as arg2 */ \ |
@@ -372,7 +395,7 @@ label: | |||
372 | 395 | ||
373 | #define ALIGNMENT_EXCEPTION \ | 396 | #define ALIGNMENT_EXCEPTION \ |
374 | START_EXCEPTION(Alignment) \ | 397 | START_EXCEPTION(Alignment) \ |
375 | NORMAL_EXCEPTION_PROLOG; \ | 398 | NORMAL_EXCEPTION_PROLOG(ALIGNMENT); \ |
376 | mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \ | 399 | mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \ |
377 | stw r4,_DEAR(r11); \ | 400 | stw r4,_DEAR(r11); \ |
378 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | 401 | addi r3,r1,STACK_FRAME_OVERHEAD; \ |
@@ -380,7 +403,7 @@ label: | |||
380 | 403 | ||
381 | #define PROGRAM_EXCEPTION \ | 404 | #define PROGRAM_EXCEPTION \ |
382 | START_EXCEPTION(Program) \ | 405 | START_EXCEPTION(Program) \ |
383 | NORMAL_EXCEPTION_PROLOG; \ | 406 | NORMAL_EXCEPTION_PROLOG(PROGRAM); \ |
384 | mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \ | 407 | mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \ |
385 | stw r4,_ESR(r11); \ | 408 | stw r4,_ESR(r11); \ |
386 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | 409 | addi r3,r1,STACK_FRAME_OVERHEAD; \ |
@@ -388,7 +411,7 @@ label: | |||
388 | 411 | ||
389 | #define DECREMENTER_EXCEPTION \ | 412 | #define DECREMENTER_EXCEPTION \ |
390 | START_EXCEPTION(Decrementer) \ | 413 | START_EXCEPTION(Decrementer) \ |
391 | NORMAL_EXCEPTION_PROLOG; \ | 414 | NORMAL_EXCEPTION_PROLOG(DECREMENTER); \ |
392 | lis r0,TSR_DIS@h; /* Setup the DEC interrupt mask */ \ | 415 | lis r0,TSR_DIS@h; /* Setup the DEC interrupt mask */ \ |
393 | mtspr SPRN_TSR,r0; /* Clear the DEC interrupt */ \ | 416 | mtspr SPRN_TSR,r0; /* Clear the DEC interrupt */ \ |
394 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | 417 | addi r3,r1,STACK_FRAME_OVERHEAD; \ |
@@ -396,7 +419,7 @@ label: | |||
396 | 419 | ||
397 | #define FP_UNAVAILABLE_EXCEPTION \ | 420 | #define FP_UNAVAILABLE_EXCEPTION \ |
398 | START_EXCEPTION(FloatingPointUnavailable) \ | 421 | START_EXCEPTION(FloatingPointUnavailable) \ |
399 | NORMAL_EXCEPTION_PROLOG; \ | 422 | NORMAL_EXCEPTION_PROLOG(FP_UNAVAIL); \ |
400 | beq 1f; \ | 423 | beq 1f; \ |
401 | bl load_up_fpu; /* if from user, just load it up */ \ | 424 | bl load_up_fpu; /* if from user, just load it up */ \ |
402 | b fast_exception_return; \ | 425 | b fast_exception_return; \ |
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 28e62598d0e8..1f4434a38608 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S | |||
@@ -301,19 +301,20 @@ _ENTRY(__early_start) | |||
301 | 301 | ||
302 | interrupt_base: | 302 | interrupt_base: |
303 | /* Critical Input Interrupt */ | 303 | /* Critical Input Interrupt */ |
304 | CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception) | 304 | CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception) |
305 | 305 | ||
306 | /* Machine Check Interrupt */ | 306 | /* Machine Check Interrupt */ |
307 | #ifdef CONFIG_E200 | 307 | #ifdef CONFIG_E200 |
308 | /* no RFMCI, MCSRRs on E200 */ | 308 | /* no RFMCI, MCSRRs on E200 */ |
309 | CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) | 309 | CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \ |
310 | machine_check_exception) | ||
310 | #else | 311 | #else |
311 | MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception) | 312 | MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception) |
312 | #endif | 313 | #endif |
313 | 314 | ||
314 | /* Data Storage Interrupt */ | 315 | /* Data Storage Interrupt */ |
315 | START_EXCEPTION(DataStorage) | 316 | START_EXCEPTION(DataStorage) |
316 | NORMAL_EXCEPTION_PROLOG | 317 | NORMAL_EXCEPTION_PROLOG(DATA_STORAGE) |
317 | mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ | 318 | mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ |
318 | stw r5,_ESR(r11) | 319 | stw r5,_ESR(r11) |
319 | mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ | 320 | mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ |
@@ -328,7 +329,7 @@ interrupt_base: | |||
328 | INSTRUCTION_STORAGE_EXCEPTION | 329 | INSTRUCTION_STORAGE_EXCEPTION |
329 | 330 | ||
330 | /* External Input Interrupt */ | 331 | /* External Input Interrupt */ |
331 | EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) | 332 | EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ, EXC_XFER_LITE) |
332 | 333 | ||
333 | /* Alignment Interrupt */ | 334 | /* Alignment Interrupt */ |
334 | ALIGNMENT_EXCEPTION | 335 | ALIGNMENT_EXCEPTION |
@@ -342,32 +343,36 @@ interrupt_base: | |||
342 | #else | 343 | #else |
343 | #ifdef CONFIG_E200 | 344 | #ifdef CONFIG_E200 |
344 | /* E200 treats 'normal' floating point instructions as FP Unavail exception */ | 345 | /* E200 treats 'normal' floating point instructions as FP Unavail exception */ |
345 | EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE) | 346 | EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \ |
347 | program_check_exception, EXC_XFER_EE) | ||
346 | #else | 348 | #else |
347 | EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) | 349 | EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \ |
350 | unknown_exception, EXC_XFER_EE) | ||
348 | #endif | 351 | #endif |
349 | #endif | 352 | #endif |
350 | 353 | ||
351 | /* System Call Interrupt */ | 354 | /* System Call Interrupt */ |
352 | START_EXCEPTION(SystemCall) | 355 | START_EXCEPTION(SystemCall) |
353 | NORMAL_EXCEPTION_PROLOG | 356 | NORMAL_EXCEPTION_PROLOG(SYSCALL) |
354 | EXC_XFER_EE_LITE(0x0c00, DoSyscall) | 357 | EXC_XFER_EE_LITE(0x0c00, DoSyscall) |
355 | 358 | ||
356 | /* Auxiliary Processor Unavailable Interrupt */ | 359 | /* Auxiliary Processor Unavailable Interrupt */ |
357 | EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) | 360 | EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \ |
361 | unknown_exception, EXC_XFER_EE) | ||
358 | 362 | ||
359 | /* Decrementer Interrupt */ | 363 | /* Decrementer Interrupt */ |
360 | DECREMENTER_EXCEPTION | 364 | DECREMENTER_EXCEPTION |
361 | 365 | ||
362 | /* Fixed Internal Timer Interrupt */ | 366 | /* Fixed Internal Timer Interrupt */ |
363 | /* TODO: Add FIT support */ | 367 | /* TODO: Add FIT support */ |
364 | EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE) | 368 | EXCEPTION(0x3100, FIT, FixedIntervalTimer, \ |
369 | unknown_exception, EXC_XFER_EE) | ||
365 | 370 | ||
366 | /* Watchdog Timer Interrupt */ | 371 | /* Watchdog Timer Interrupt */ |
367 | #ifdef CONFIG_BOOKE_WDT | 372 | #ifdef CONFIG_BOOKE_WDT |
368 | CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException) | 373 | CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, WatchdogException) |
369 | #else | 374 | #else |
370 | CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception) | 375 | CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, unknown_exception) |
371 | #endif | 376 | #endif |
372 | 377 | ||
373 | /* Data TLB Error Interrupt */ | 378 | /* Data TLB Error Interrupt */ |
@@ -375,10 +380,16 @@ interrupt_base: | |||
375 | mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ | 380 | mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ |
376 | mfspr r10, SPRN_SPRG_THREAD | 381 | mfspr r10, SPRN_SPRG_THREAD |
377 | stw r11, THREAD_NORMSAVE(0)(r10) | 382 | stw r11, THREAD_NORMSAVE(0)(r10) |
383 | #ifdef CONFIG_KVM_BOOKE_HV | ||
384 | BEGIN_FTR_SECTION | ||
385 | mfspr r11, SPRN_SRR1 | ||
386 | END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) | ||
387 | #endif | ||
378 | stw r12, THREAD_NORMSAVE(1)(r10) | 388 | stw r12, THREAD_NORMSAVE(1)(r10) |
379 | stw r13, THREAD_NORMSAVE(2)(r10) | 389 | stw r13, THREAD_NORMSAVE(2)(r10) |
380 | mfcr r13 | 390 | mfcr r13 |
381 | stw r13, THREAD_NORMSAVE(3)(r10) | 391 | stw r13, THREAD_NORMSAVE(3)(r10) |
392 | DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1 | ||
382 | mfspr r10, SPRN_DEAR /* Get faulting address */ | 393 | mfspr r10, SPRN_DEAR /* Get faulting address */ |
383 | 394 | ||
384 | /* If we are faulting a kernel address, we have to use the | 395 | /* If we are faulting a kernel address, we have to use the |
@@ -463,10 +474,16 @@ interrupt_base: | |||
463 | mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ | 474 | mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ |
464 | mfspr r10, SPRN_SPRG_THREAD | 475 | mfspr r10, SPRN_SPRG_THREAD |
465 | stw r11, THREAD_NORMSAVE(0)(r10) | 476 | stw r11, THREAD_NORMSAVE(0)(r10) |
477 | #ifdef CONFIG_KVM_BOOKE_HV | ||
478 | BEGIN_FTR_SECTION | ||
479 | mfspr r11, SPRN_SRR1 | ||
480 | END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV) | ||
481 | #endif | ||
466 | stw r12, THREAD_NORMSAVE(1)(r10) | 482 | stw r12, THREAD_NORMSAVE(1)(r10) |
467 | stw r13, THREAD_NORMSAVE(2)(r10) | 483 | stw r13, THREAD_NORMSAVE(2)(r10) |
468 | mfcr r13 | 484 | mfcr r13 |
469 | stw r13, THREAD_NORMSAVE(3)(r10) | 485 | stw r13, THREAD_NORMSAVE(3)(r10) |
486 | DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1 | ||
470 | mfspr r10, SPRN_SRR0 /* Get faulting address */ | 487 | mfspr r10, SPRN_SRR0 /* Get faulting address */ |
471 | 488 | ||
472 | /* If we are faulting a kernel address, we have to use the | 489 | /* If we are faulting a kernel address, we have to use the |
@@ -538,36 +555,54 @@ interrupt_base: | |||
538 | #ifdef CONFIG_SPE | 555 | #ifdef CONFIG_SPE |
539 | /* SPE Unavailable */ | 556 | /* SPE Unavailable */ |
540 | START_EXCEPTION(SPEUnavailable) | 557 | START_EXCEPTION(SPEUnavailable) |
541 | NORMAL_EXCEPTION_PROLOG | 558 | NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL) |
542 | bne load_up_spe | 559 | bne load_up_spe |
543 | addi r3,r1,STACK_FRAME_OVERHEAD | 560 | addi r3,r1,STACK_FRAME_OVERHEAD |
544 | EXC_XFER_EE_LITE(0x2010, KernelSPE) | 561 | EXC_XFER_EE_LITE(0x2010, KernelSPE) |
545 | #else | 562 | #else |
546 | EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE) | 563 | EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \ |
564 | unknown_exception, EXC_XFER_EE) | ||
547 | #endif /* CONFIG_SPE */ | 565 | #endif /* CONFIG_SPE */ |
548 | 566 | ||
549 | /* SPE Floating Point Data */ | 567 | /* SPE Floating Point Data */ |
550 | #ifdef CONFIG_SPE | 568 | #ifdef CONFIG_SPE |
551 | EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); | 569 | EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData, \ |
570 | SPEFloatingPointException, EXC_XFER_EE); | ||
552 | 571 | ||
553 | /* SPE Floating Point Round */ | 572 | /* SPE Floating Point Round */ |
554 | EXCEPTION(0x2050, SPEFloatingPointRound, SPEFloatingPointRoundException, EXC_XFER_EE) | 573 | EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \ |
574 | SPEFloatingPointRoundException, EXC_XFER_EE) | ||
555 | #else | 575 | #else |
556 | EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE) | 576 | EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, \ |
557 | EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE) | 577 | unknown_exception, EXC_XFER_EE) |
578 | EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \ | ||
579 | unknown_exception, EXC_XFER_EE) | ||
558 | #endif /* CONFIG_SPE */ | 580 | #endif /* CONFIG_SPE */ |
559 | 581 | ||
560 | /* Performance Monitor */ | 582 | /* Performance Monitor */ |
561 | EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD) | 583 | EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \ |
584 | performance_monitor_exception, EXC_XFER_STD) | ||
562 | 585 | ||
563 | EXCEPTION(0x2070, Doorbell, doorbell_exception, EXC_XFER_STD) | 586 | EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception, EXC_XFER_STD) |
564 | 587 | ||
565 | CRITICAL_EXCEPTION(0x2080, CriticalDoorbell, unknown_exception) | 588 | CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \ |
589 | CriticalDoorbell, unknown_exception) | ||
566 | 590 | ||
567 | /* Debug Interrupt */ | 591 | /* Debug Interrupt */ |
568 | DEBUG_DEBUG_EXCEPTION | 592 | DEBUG_DEBUG_EXCEPTION |
569 | DEBUG_CRIT_EXCEPTION | 593 | DEBUG_CRIT_EXCEPTION |
570 | 594 | ||
595 | GUEST_DOORBELL_EXCEPTION | ||
596 | |||
597 | CRITICAL_EXCEPTION(0, GUEST_DBELL_CRIT, CriticalGuestDoorbell, \ | ||
598 | unknown_exception) | ||
599 | |||
600 | /* Hypercall */ | ||
601 | EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception, EXC_XFER_EE) | ||
602 | |||
603 | /* Embedded Hypervisor Privilege */ | ||
604 | EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception, EXC_XFER_EE) | ||
605 | |||
571 | /* | 606 | /* |
572 | * Local functions | 607 | * Local functions |
573 | */ | 608 | */ |
@@ -871,16 +906,31 @@ _GLOBAL(__setup_e500mc_ivors) | |||
871 | mtspr SPRN_IVOR36,r3 | 906 | mtspr SPRN_IVOR36,r3 |
872 | li r3,CriticalDoorbell@l | 907 | li r3,CriticalDoorbell@l |
873 | mtspr SPRN_IVOR37,r3 | 908 | mtspr SPRN_IVOR37,r3 |
874 | sync | ||
875 | blr | ||
876 | 909 | ||
877 | /* | 910 | /* |
878 | * extern void giveup_altivec(struct task_struct *prev) | 911 | * We only want to touch IVOR38-41 if we're running on hardware |
879 | * | 912 | * that supports category E.HV. The architectural way to determine |
880 | * The e500 core does not have an AltiVec unit. | 913 | * this is MMUCFG[LPIDSIZE]. |
881 | */ | 914 | */ |
882 | _GLOBAL(giveup_altivec) | 915 | mfspr r3, SPRN_MMUCFG |
916 | andis. r3, r3, MMUCFG_LPIDSIZE@h | ||
917 | beq no_hv | ||
918 | li r3,GuestDoorbell@l | ||
919 | mtspr SPRN_IVOR38,r3 | ||
920 | li r3,CriticalGuestDoorbell@l | ||
921 | mtspr SPRN_IVOR39,r3 | ||
922 | li r3,Hypercall@l | ||
923 | mtspr SPRN_IVOR40,r3 | ||
924 | li r3,Ehvpriv@l | ||
925 | mtspr SPRN_IVOR41,r3 | ||
926 | skip_hv_ivors: | ||
927 | sync | ||
883 | blr | 928 | blr |
929 | no_hv: | ||
930 | lwz r3, CPU_SPEC_FEATURES(r5) | ||
931 | rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV | ||
932 | stw r3, CPU_SPEC_FEATURES(r5) | ||
933 | b skip_hv_ivors | ||
884 | 934 | ||
885 | #ifdef CONFIG_SPE | 935 | #ifdef CONFIG_SPE |
886 | /* | 936 | /* |
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index 6d2209ac0c44..2099d9a879e8 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c | |||
@@ -113,29 +113,6 @@ void cpu_idle(void) | |||
113 | } | 113 | } |
114 | } | 114 | } |
115 | 115 | ||
116 | |||
117 | /* | ||
118 | * cpu_idle_wait - Used to ensure that all the CPUs come out of the old | ||
119 | * idle loop and start using the new idle loop. | ||
120 | * Required while changing idle handler on SMP systems. | ||
121 | * Caller must have changed idle handler to the new value before the call. | ||
122 | * This window may be larger on shared systems. | ||
123 | */ | ||
124 | void cpu_idle_wait(void) | ||
125 | { | ||
126 | int cpu; | ||
127 | smp_mb(); | ||
128 | |||
129 | /* kick all the CPUs so that they exit out of old idle routine */ | ||
130 | get_online_cpus(); | ||
131 | for_each_online_cpu(cpu) { | ||
132 | if (cpu != smp_processor_id()) | ||
133 | smp_send_reschedule(cpu); | ||
134 | } | ||
135 | put_online_cpus(); | ||
136 | } | ||
137 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | ||
138 | |||
139 | int powersave_nap; | 116 | int powersave_nap; |
140 | 117 | ||
141 | #ifdef CONFIG_SYSCTL | 118 | #ifdef CONFIG_SYSCTL |
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index 0cdc9a392839..7140d838339e 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <asm/asm-offsets.h> | 16 | #include <asm/asm-offsets.h> |
17 | #include <asm/ppc-opcode.h> | 17 | #include <asm/ppc-opcode.h> |
18 | #include <asm/hw_irq.h> | 18 | #include <asm/hw_irq.h> |
19 | #include <asm/kvm_book3s_asm.h> | ||
19 | 20 | ||
20 | #undef DEBUG | 21 | #undef DEBUG |
21 | 22 | ||
@@ -81,6 +82,12 @@ _GLOBAL(power7_idle) | |||
81 | std r9,_MSR(r1) | 82 | std r9,_MSR(r1) |
82 | std r1,PACAR1(r13) | 83 | std r1,PACAR1(r13) |
83 | 84 | ||
85 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
86 | /* Tell KVM we're napping */ | ||
87 | li r4,KVM_HWTHREAD_IN_NAP | ||
88 | stb r4,HSTATE_HWTHREAD_STATE(r13) | ||
89 | #endif | ||
90 | |||
84 | /* Magic NAP mode enter sequence */ | 91 | /* Magic NAP mode enter sequence */ |
85 | std r0,0(r1) | 92 | std r0,0(r1) |
86 | ptesync | 93 | ptesync |
diff --git a/arch/powerpc/kernel/init_task.c b/arch/powerpc/kernel/init_task.c deleted file mode 100644 index d076d465dbd1..000000000000 --- a/arch/powerpc/kernel/init_task.c +++ /dev/null | |||
@@ -1,29 +0,0 @@ | |||
1 | #include <linux/mm.h> | ||
2 | #include <linux/export.h> | ||
3 | #include <linux/sched.h> | ||
4 | #include <linux/init.h> | ||
5 | #include <linux/init_task.h> | ||
6 | #include <linux/fs.h> | ||
7 | #include <linux/mqueue.h> | ||
8 | #include <asm/uaccess.h> | ||
9 | |||
10 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | ||
11 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | ||
12 | /* | ||
13 | * Initial thread structure. | ||
14 | * | ||
15 | * We need to make sure that this is 16384-byte aligned due to the | ||
16 | * way process stacks are handled. This is done by having a special | ||
17 | * "init_task" linker map entry.. | ||
18 | */ | ||
19 | union thread_union init_thread_union __init_task_data = | ||
20 | { INIT_THREAD_INFO(init_task) }; | ||
21 | |||
22 | /* | ||
23 | * Initial task structure. | ||
24 | * | ||
25 | * All other task structs will be allocated on slabs in fork.c | ||
26 | */ | ||
27 | struct task_struct init_task = INIT_TASK(init_task); | ||
28 | |||
29 | EXPORT_SYMBOL(init_task); | ||
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 43eb74fcedde..7835a5e1ea5f 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -229,6 +229,19 @@ notrace void arch_local_irq_restore(unsigned long en) | |||
229 | */ | 229 | */ |
230 | if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) | 230 | if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) |
231 | __hard_irq_disable(); | 231 | __hard_irq_disable(); |
232 | #ifdef CONFIG_TRACE_IRQFLAG | ||
233 | else { | ||
234 | /* | ||
235 | * We should already be hard disabled here. We had bugs | ||
236 | * where that wasn't the case so let's dbl check it and | ||
237 | * warn if we are wrong. Only do that when IRQ tracing | ||
238 | * is enabled as mfmsr() can be costly. | ||
239 | */ | ||
240 | if (WARN_ON(mfmsr() & MSR_EE)) | ||
241 | __hard_irq_disable(); | ||
242 | } | ||
243 | #endif /* CONFIG_TRACE_IRQFLAG */ | ||
244 | |||
232 | set_soft_enabled(0); | 245 | set_soft_enabled(0); |
233 | 246 | ||
234 | /* | 247 | /* |
@@ -260,11 +273,17 @@ EXPORT_SYMBOL(arch_local_irq_restore); | |||
260 | * if they are currently disabled. This is typically called before | 273 | * if they are currently disabled. This is typically called before |
261 | * schedule() or do_signal() when returning to userspace. We do it | 274 | * schedule() or do_signal() when returning to userspace. We do it |
262 | * in C to avoid the burden of dealing with lockdep etc... | 275 | * in C to avoid the burden of dealing with lockdep etc... |
276 | * | ||
277 | * NOTE: This is called with interrupts hard disabled but not marked | ||
278 | * as such in paca->irq_happened, so we need to resync this. | ||
263 | */ | 279 | */ |
264 | void restore_interrupts(void) | 280 | void restore_interrupts(void) |
265 | { | 281 | { |
266 | if (irqs_disabled()) | 282 | if (irqs_disabled()) { |
283 | local_paca->irq_happened |= PACA_IRQ_HARD_DIS; | ||
267 | local_irq_enable(); | 284 | local_irq_enable(); |
285 | } else | ||
286 | __hard_irq_enable(); | ||
268 | } | 287 | } |
269 | 288 | ||
270 | #endif /* CONFIG_PPC64 */ | 289 | #endif /* CONFIG_PPC64 */ |
@@ -568,7 +587,7 @@ int irq_choose_cpu(const struct cpumask *mask) | |||
568 | { | 587 | { |
569 | int cpuid; | 588 | int cpuid; |
570 | 589 | ||
571 | if (cpumask_equal(mask, cpu_all_mask)) { | 590 | if (cpumask_equal(mask, cpu_online_mask)) { |
572 | static int irq_rover; | 591 | static int irq_rover; |
573 | static DEFINE_RAW_SPINLOCK(irq_rover_lock); | 592 | static DEFINE_RAW_SPINLOCK(irq_rover_lock); |
574 | unsigned long flags; | 593 | unsigned long flags; |
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 7cd07b42ca1a..386d57f66f28 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S | |||
@@ -738,8 +738,23 @@ relocate_new_kernel: | |||
738 | mr r5, r31 | 738 | mr r5, r31 |
739 | 739 | ||
740 | li r0, 0 | 740 | li r0, 0 |
741 | #elif defined(CONFIG_44x) && !defined(CONFIG_PPC_47x) | 741 | #elif defined(CONFIG_44x) |
742 | 742 | ||
743 | /* Save our parameters */ | ||
744 | mr r29, r3 | ||
745 | mr r30, r4 | ||
746 | mr r31, r5 | ||
747 | |||
748 | #ifdef CONFIG_PPC_47x | ||
749 | /* Check for 47x cores */ | ||
750 | mfspr r3,SPRN_PVR | ||
751 | srwi r3,r3,16 | ||
752 | cmplwi cr0,r3,PVR_476@h | ||
753 | beq setup_map_47x | ||
754 | cmplwi cr0,r3,PVR_476_ISS@h | ||
755 | beq setup_map_47x | ||
756 | #endif /* CONFIG_PPC_47x */ | ||
757 | |||
743 | /* | 758 | /* |
744 | * Code for setting up 1:1 mapping for PPC440x for KEXEC | 759 | * Code for setting up 1:1 mapping for PPC440x for KEXEC |
745 | * | 760 | * |
@@ -753,16 +768,15 @@ relocate_new_kernel: | |||
753 | * 5) Invalidate the tmp mapping. | 768 | * 5) Invalidate the tmp mapping. |
754 | * | 769 | * |
755 | * - Based on the kexec support code for FSL BookE | 770 | * - Based on the kexec support code for FSL BookE |
756 | * - Doesn't support 47x yet. | ||
757 | * | 771 | * |
758 | */ | 772 | */ |
759 | /* Save our parameters */ | ||
760 | mr r29, r3 | ||
761 | mr r30, r4 | ||
762 | mr r31, r5 | ||
763 | 773 | ||
764 | /* Load our MSR_IS and TID to MMUCR for TLB search */ | 774 | /* |
765 | mfspr r3,SPRN_PID | 775 | * Load the PID with kernel PID (0). |
776 | * Also load our MSR_IS and TID to MMUCR for TLB search. | ||
777 | */ | ||
778 | li r3, 0 | ||
779 | mtspr SPRN_PID, r3 | ||
766 | mfmsr r4 | 780 | mfmsr r4 |
767 | andi. r4,r4,MSR_IS@l | 781 | andi. r4,r4,MSR_IS@l |
768 | beq wmmucr | 782 | beq wmmucr |
@@ -900,6 +914,179 @@ next_tlb: | |||
900 | li r3, 0 | 914 | li r3, 0 |
901 | tlbwe r3, r24, PPC44x_TLB_PAGEID | 915 | tlbwe r3, r24, PPC44x_TLB_PAGEID |
902 | sync | 916 | sync |
917 | b ppc44x_map_done | ||
918 | |||
919 | #ifdef CONFIG_PPC_47x | ||
920 | |||
921 | /* 1:1 mapping for 47x */ | ||
922 | |||
923 | setup_map_47x: | ||
924 | |||
925 | /* | ||
926 | * Load the kernel pid (0) to PID and also to MMUCR[TID]. | ||
927 | * Also set the MSR IS->MMUCR STS | ||
928 | */ | ||
929 | li r3, 0 | ||
930 | mtspr SPRN_PID, r3 /* Set PID */ | ||
931 | mfmsr r4 /* Get MSR */ | ||
932 | andi. r4, r4, MSR_IS@l /* TS=1? */ | ||
933 | beq 1f /* If not, leave STS=0 */ | ||
934 | oris r3, r3, PPC47x_MMUCR_STS@h /* Set STS=1 */ | ||
935 | 1: mtspr SPRN_MMUCR, r3 /* Put MMUCR */ | ||
936 | sync | ||
937 | |||
938 | /* Find the entry we are running from */ | ||
939 | bl 2f | ||
940 | 2: mflr r23 | ||
941 | tlbsx r23, 0, r23 | ||
942 | tlbre r24, r23, 0 /* TLB Word 0 */ | ||
943 | tlbre r25, r23, 1 /* TLB Word 1 */ | ||
944 | tlbre r26, r23, 2 /* TLB Word 2 */ | ||
945 | |||
946 | |||
947 | /* | ||
948 | * Invalidates all the tlb entries by writing to 256 RPNs(r4) | ||
949 | * of 4k page size in all 4 ways (0-3 in r3). | ||
950 | * This would invalidate the entire UTLB including the one we are | ||
951 | * running from. However the shadow TLB entries would help us | ||
952 | * to continue the execution, until we flush them (rfi/isync). | ||
953 | */ | ||
954 | addis r3, 0, 0x8000 /* specify the way */ | ||
955 | addi r4, 0, 0 /* TLB Word0 = (EPN=0, VALID = 0) */ | ||
956 | addi r5, 0, 0 | ||
957 | b clear_utlb_entry | ||
958 | |||
959 | /* Align the loop to speed things up. from head_44x.S */ | ||
960 | .align 6 | ||
961 | |||
962 | clear_utlb_entry: | ||
963 | |||
964 | tlbwe r4, r3, 0 | ||
965 | tlbwe r5, r3, 1 | ||
966 | tlbwe r5, r3, 2 | ||
967 | addis r3, r3, 0x2000 /* Increment the way */ | ||
968 | cmpwi r3, 0 | ||
969 | bne clear_utlb_entry | ||
970 | addis r3, 0, 0x8000 | ||
971 | addis r4, r4, 0x100 /* Increment the EPN */ | ||
972 | cmpwi r4, 0 | ||
973 | bne clear_utlb_entry | ||
974 | |||
975 | /* Create the entries in the other address space */ | ||
976 | mfmsr r5 | ||
977 | rlwinm r7, r5, 27, 31, 31 /* Get the TS (Bit 26) from MSR */ | ||
978 | xori r7, r7, 1 /* r7 = !TS */ | ||
979 | |||
980 | insrwi r24, r7, 1, 21 /* Change the TS in the saved TLB word 0 */ | ||
981 | |||
982 | /* | ||
983 | * write out the TLB entries for the tmp mapping | ||
984 | * Use way '0' so that we could easily invalidate it later. | ||
985 | */ | ||
986 | lis r3, 0x8000 /* Way '0' */ | ||
987 | |||
988 | tlbwe r24, r3, 0 | ||
989 | tlbwe r25, r3, 1 | ||
990 | tlbwe r26, r3, 2 | ||
991 | |||
992 | /* Update the msr to the new TS */ | ||
993 | insrwi r5, r7, 1, 26 | ||
994 | |||
995 | bl 1f | ||
996 | 1: mflr r6 | ||
997 | addi r6, r6, (2f-1b) | ||
998 | |||
999 | mtspr SPRN_SRR0, r6 | ||
1000 | mtspr SPRN_SRR1, r5 | ||
1001 | rfi | ||
1002 | |||
1003 | /* | ||
1004 | * Now we are in the tmp address space. | ||
1005 | * Create a 1:1 mapping for 0-2GiB in the original TS. | ||
1006 | */ | ||
1007 | 2: | ||
1008 | li r3, 0 | ||
1009 | li r4, 0 /* TLB Word 0 */ | ||
1010 | li r5, 0 /* TLB Word 1 */ | ||
1011 | li r6, 0 | ||
1012 | ori r6, r6, PPC47x_TLB2_S_RWX /* TLB word 2 */ | ||
1013 | |||
1014 | li r8, 0 /* PageIndex */ | ||
1015 | |||
1016 | xori r7, r7, 1 /* revert back to original TS */ | ||
1017 | |||
1018 | write_utlb: | ||
1019 | rotlwi r5, r8, 28 /* RPN = PageIndex * 256M */ | ||
1020 | /* ERPN = 0 as we don't use memory above 2G */ | ||
1021 | |||
1022 | mr r4, r5 /* EPN = RPN */ | ||
1023 | ori r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M) | ||
1024 | insrwi r4, r7, 1, 21 /* Insert the TS to Word 0 */ | ||
1025 | |||
1026 | tlbwe r4, r3, 0 /* Write out the entries */ | ||
1027 | tlbwe r5, r3, 1 | ||
1028 | tlbwe r6, r3, 2 | ||
1029 | addi r8, r8, 1 | ||
1030 | cmpwi r8, 8 /* Have we completed ? */ | ||
1031 | bne write_utlb | ||
1032 | |||
1033 | /* make sure we complete the TLB write up */ | ||
1034 | isync | ||
1035 | |||
1036 | /* | ||
1037 | * Prepare to jump to the 1:1 mapping. | ||
1038 | * 1) Extract page size of the tmp mapping | ||
1039 | * DSIZ = TLB_Word0[22:27] | ||
1040 | * 2) Calculate the physical address of the address | ||
1041 | * to jump to. | ||
1042 | */ | ||
1043 | rlwinm r10, r24, 0, 22, 27 | ||
1044 | |||
1045 | cmpwi r10, PPC47x_TLB0_4K | ||
1046 | bne 0f | ||
1047 | li r10, 0x1000 /* r10 = 4k */ | ||
1048 | bl 1f | ||
1049 | |||
1050 | 0: | ||
1051 | /* Defaults to 256M */ | ||
1052 | lis r10, 0x1000 | ||
1053 | |||
1054 | bl 1f | ||
1055 | 1: mflr r4 | ||
1056 | addi r4, r4, (2f-1b) /* virtual address of 2f */ | ||
1057 | |||
1058 | subi r11, r10, 1 /* offsetmask = Pagesize - 1 */ | ||
1059 | not r10, r11 /* Pagemask = ~(offsetmask) */ | ||
1060 | |||
1061 | and r5, r25, r10 /* Physical page */ | ||
1062 | and r6, r4, r11 /* offset within the current page */ | ||
1063 | |||
1064 | or r5, r5, r6 /* Physical address for 2f */ | ||
1065 | |||
1066 | /* Switch the TS in MSR to the original one */ | ||
1067 | mfmsr r8 | ||
1068 | insrwi r8, r7, 1, 26 | ||
1069 | |||
1070 | mtspr SPRN_SRR1, r8 | ||
1071 | mtspr SPRN_SRR0, r5 | ||
1072 | rfi | ||
1073 | |||
1074 | 2: | ||
1075 | /* Invalidate the tmp mapping */ | ||
1076 | lis r3, 0x8000 /* Way '0' */ | ||
1077 | |||
1078 | clrrwi r24, r24, 12 /* Clear the valid bit */ | ||
1079 | tlbwe r24, r3, 0 | ||
1080 | tlbwe r25, r3, 1 | ||
1081 | tlbwe r26, r3, 2 | ||
1082 | |||
1083 | /* Make sure we complete the TLB write and flush the shadow TLB */ | ||
1084 | isync | ||
1085 | |||
1086 | #endif | ||
1087 | |||
1088 | ppc44x_map_done: | ||
1089 | |||
903 | 1090 | ||
904 | /* Restore the parameters */ | 1091 | /* Restore the parameters */ |
905 | mr r3, r29 | 1092 | mr r3, r29 |
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 0bb1f98613ba..fbe1a12dc7f1 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
@@ -36,10 +36,7 @@ struct lppaca lppaca[] = { | |||
36 | [0 ... (NR_LPPACAS-1)] = { | 36 | [0 ... (NR_LPPACAS-1)] = { |
37 | .desc = 0xd397d781, /* "LpPa" */ | 37 | .desc = 0xd397d781, /* "LpPa" */ |
38 | .size = sizeof(struct lppaca), | 38 | .size = sizeof(struct lppaca), |
39 | .dyn_proc_status = 2, | ||
40 | .decr_val = 0x00ff0000, | ||
41 | .fpregs_in_use = 1, | 39 | .fpregs_in_use = 1, |
42 | .end_of_quantum = 0xfffffffffffffffful, | ||
43 | .slb_count = 64, | 40 | .slb_count = 64, |
44 | .vmxregs_in_use = 0, | 41 | .vmxregs_in_use = 0, |
45 | .page_ins = 0, | 42 | .page_ins = 0, |
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 786a2700ec2d..d1f2aafcbe8c 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -190,3 +190,7 @@ EXPORT_SYMBOL(__arch_hweight16); | |||
190 | EXPORT_SYMBOL(__arch_hweight32); | 190 | EXPORT_SYMBOL(__arch_hweight32); |
191 | EXPORT_SYMBOL(__arch_hweight64); | 191 | EXPORT_SYMBOL(__arch_hweight64); |
192 | #endif | 192 | #endif |
193 | |||
194 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
195 | EXPORT_SYMBOL_GPL(mmu_psize_defs); | ||
196 | #endif | ||
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 4937c9690090..710f400476de 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -124,7 +124,7 @@ void enable_kernel_altivec(void) | |||
124 | if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) | 124 | if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) |
125 | giveup_altivec(current); | 125 | giveup_altivec(current); |
126 | else | 126 | else |
127 | giveup_altivec(NULL); /* just enable AltiVec for kernel - force */ | 127 | giveup_altivec_notask(); |
128 | #else | 128 | #else |
129 | giveup_altivec(last_task_used_altivec); | 129 | giveup_altivec(last_task_used_altivec); |
130 | #endif /* CONFIG_SMP */ | 130 | #endif /* CONFIG_SMP */ |
@@ -711,18 +711,21 @@ release_thread(struct task_struct *t) | |||
711 | } | 711 | } |
712 | 712 | ||
713 | /* | 713 | /* |
714 | * This gets called before we allocate a new thread and copy | 714 | * this gets called so that we can store coprocessor state into memory and |
715 | * the current task into it. | 715 | * copy the current task into the new thread. |
716 | */ | 716 | */ |
717 | void prepare_to_copy(struct task_struct *tsk) | 717 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
718 | { | 718 | { |
719 | flush_fp_to_thread(current); | 719 | flush_fp_to_thread(src); |
720 | flush_altivec_to_thread(current); | 720 | flush_altivec_to_thread(src); |
721 | flush_vsx_to_thread(current); | 721 | flush_vsx_to_thread(src); |
722 | flush_spe_to_thread(current); | 722 | flush_spe_to_thread(src); |
723 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 723 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
724 | flush_ptrace_hw_breakpoint(tsk); | 724 | flush_ptrace_hw_breakpoint(src); |
725 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 725 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
726 | |||
727 | *dst = *src; | ||
728 | return 0; | ||
726 | } | 729 | } |
727 | 730 | ||
728 | /* | 731 | /* |
@@ -1252,37 +1255,6 @@ void __ppc64_runlatch_off(void) | |||
1252 | } | 1255 | } |
1253 | #endif /* CONFIG_PPC64 */ | 1256 | #endif /* CONFIG_PPC64 */ |
1254 | 1257 | ||
1255 | #if THREAD_SHIFT < PAGE_SHIFT | ||
1256 | |||
1257 | static struct kmem_cache *thread_info_cache; | ||
1258 | |||
1259 | struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) | ||
1260 | { | ||
1261 | struct thread_info *ti; | ||
1262 | |||
1263 | ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node); | ||
1264 | if (unlikely(ti == NULL)) | ||
1265 | return NULL; | ||
1266 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
1267 | memset(ti, 0, THREAD_SIZE); | ||
1268 | #endif | ||
1269 | return ti; | ||
1270 | } | ||
1271 | |||
1272 | void free_thread_info(struct thread_info *ti) | ||
1273 | { | ||
1274 | kmem_cache_free(thread_info_cache, ti); | ||
1275 | } | ||
1276 | |||
1277 | void thread_info_cache_init(void) | ||
1278 | { | ||
1279 | thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, | ||
1280 | THREAD_SIZE, 0, NULL); | ||
1281 | BUG_ON(thread_info_cache == NULL); | ||
1282 | } | ||
1283 | |||
1284 | #endif /* THREAD_SHIFT < PAGE_SHIFT */ | ||
1285 | |||
1286 | unsigned long arch_align_stack(unsigned long sp) | 1258 | unsigned long arch_align_stack(unsigned long sp) |
1287 | { | 1259 | { |
1288 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | 1260 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 99860273211b..1b488e5305c5 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -680,6 +680,9 @@ static void __init early_cmdline_parse(void) | |||
680 | #define OV3_VMX 0x40 /* VMX/Altivec */ | 680 | #define OV3_VMX 0x40 /* VMX/Altivec */ |
681 | #define OV3_DFP 0x20 /* decimal FP */ | 681 | #define OV3_DFP 0x20 /* decimal FP */ |
682 | 682 | ||
683 | /* Option vector 4: IBM PAPR implementation */ | ||
684 | #define OV4_MIN_ENT_CAP 0x01 /* minimum VP entitled capacity */ | ||
685 | |||
683 | /* Option vector 5: PAPR/OF options supported */ | 686 | /* Option vector 5: PAPR/OF options supported */ |
684 | #define OV5_LPAR 0x80 /* logical partitioning supported */ | 687 | #define OV5_LPAR 0x80 /* logical partitioning supported */ |
685 | #define OV5_SPLPAR 0x40 /* shared-processor LPAR supported */ | 688 | #define OV5_SPLPAR 0x40 /* shared-processor LPAR supported */ |
@@ -701,6 +704,8 @@ static void __init early_cmdline_parse(void) | |||
701 | #define OV5_XCMO 0x00 | 704 | #define OV5_XCMO 0x00 |
702 | #endif | 705 | #endif |
703 | #define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */ | 706 | #define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */ |
707 | #define OV5_PFO_HW_RNG 0x80 /* PFO Random Number Generator */ | ||
708 | #define OV5_PFO_HW_ENCR 0x20 /* PFO Encryption Accelerator */ | ||
704 | 709 | ||
705 | /* Option Vector 6: IBM PAPR hints */ | 710 | /* Option Vector 6: IBM PAPR hints */ |
706 | #define OV6_LINUX 0x02 /* Linux is our OS */ | 711 | #define OV6_LINUX 0x02 /* Linux is our OS */ |
@@ -744,11 +749,12 @@ static unsigned char ibm_architecture_vec[] = { | |||
744 | OV3_FP | OV3_VMX | OV3_DFP, | 749 | OV3_FP | OV3_VMX | OV3_DFP, |
745 | 750 | ||
746 | /* option vector 4: IBM PAPR implementation */ | 751 | /* option vector 4: IBM PAPR implementation */ |
747 | 2 - 2, /* length */ | 752 | 3 - 2, /* length */ |
748 | 0, /* don't halt */ | 753 | 0, /* don't halt */ |
754 | OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */ | ||
749 | 755 | ||
750 | /* option vector 5: PAPR/OF options */ | 756 | /* option vector 5: PAPR/OF options */ |
751 | 13 - 2, /* length */ | 757 | 18 - 2, /* length */ |
752 | 0, /* don't ignore, don't halt */ | 758 | 0, /* don't ignore, don't halt */ |
753 | OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY | | 759 | OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY | |
754 | OV5_DONATE_DEDICATE_CPU | OV5_MSI, | 760 | OV5_DONATE_DEDICATE_CPU | OV5_MSI, |
@@ -762,8 +768,13 @@ static unsigned char ibm_architecture_vec[] = { | |||
762 | * must match by the macro below. Update the definition if | 768 | * must match by the macro below. Update the definition if |
763 | * the structure layout changes. | 769 | * the structure layout changes. |
764 | */ | 770 | */ |
765 | #define IBM_ARCH_VEC_NRCORES_OFFSET 100 | 771 | #define IBM_ARCH_VEC_NRCORES_OFFSET 101 |
766 | W(NR_CPUS), /* number of cores supported */ | 772 | W(NR_CPUS), /* number of cores supported */ |
773 | 0, | ||
774 | 0, | ||
775 | 0, | ||
776 | 0, | ||
777 | OV5_PFO_HW_RNG | OV5_PFO_HW_ENCR, | ||
767 | 778 | ||
768 | /* option vector 6: IBM PAPR hints */ | 779 | /* option vector 6: IBM PAPR hints */ |
769 | 4 - 2, /* length */ | 780 | 4 - 2, /* length */ |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 8d8e028893be..c10fc28b9092 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -1432,40 +1432,6 @@ static long ppc_del_hwdebug(struct task_struct *child, long addr, long data) | |||
1432 | #endif | 1432 | #endif |
1433 | } | 1433 | } |
1434 | 1434 | ||
1435 | /* | ||
1436 | * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls, | ||
1437 | * we mark them as obsolete now, they will be removed in a future version | ||
1438 | */ | ||
1439 | static long arch_ptrace_old(struct task_struct *child, long request, | ||
1440 | unsigned long addr, unsigned long data) | ||
1441 | { | ||
1442 | void __user *datavp = (void __user *) data; | ||
1443 | |||
1444 | switch (request) { | ||
1445 | case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */ | ||
1446 | return copy_regset_to_user(child, &user_ppc_native_view, | ||
1447 | REGSET_GPR, 0, 32 * sizeof(long), | ||
1448 | datavp); | ||
1449 | |||
1450 | case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */ | ||
1451 | return copy_regset_from_user(child, &user_ppc_native_view, | ||
1452 | REGSET_GPR, 0, 32 * sizeof(long), | ||
1453 | datavp); | ||
1454 | |||
1455 | case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */ | ||
1456 | return copy_regset_to_user(child, &user_ppc_native_view, | ||
1457 | REGSET_FPR, 0, 32 * sizeof(double), | ||
1458 | datavp); | ||
1459 | |||
1460 | case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */ | ||
1461 | return copy_regset_from_user(child, &user_ppc_native_view, | ||
1462 | REGSET_FPR, 0, 32 * sizeof(double), | ||
1463 | datavp); | ||
1464 | } | ||
1465 | |||
1466 | return -EPERM; | ||
1467 | } | ||
1468 | |||
1469 | long arch_ptrace(struct task_struct *child, long request, | 1435 | long arch_ptrace(struct task_struct *child, long request, |
1470 | unsigned long addr, unsigned long data) | 1436 | unsigned long addr, unsigned long data) |
1471 | { | 1437 | { |
@@ -1687,14 +1653,6 @@ long arch_ptrace(struct task_struct *child, long request, | |||
1687 | datavp); | 1653 | datavp); |
1688 | #endif | 1654 | #endif |
1689 | 1655 | ||
1690 | /* Old reverse args ptrace callss */ | ||
1691 | case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */ | ||
1692 | case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */ | ||
1693 | case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */ | ||
1694 | case PPC_PTRACE_SETFPREGS: /* Get FPRs 0 - 31. */ | ||
1695 | ret = arch_ptrace_old(child, request, addr, data); | ||
1696 | break; | ||
1697 | |||
1698 | default: | 1656 | default: |
1699 | ret = ptrace_request(child, request, addr, data); | 1657 | ret = ptrace_request(child, request, addr, data); |
1700 | break; | 1658 | break; |
@@ -1710,7 +1668,7 @@ long do_syscall_trace_enter(struct pt_regs *regs) | |||
1710 | { | 1668 | { |
1711 | long ret = 0; | 1669 | long ret = 0; |
1712 | 1670 | ||
1713 | secure_computing(regs->gpr[0]); | 1671 | secure_computing_strict(regs->gpr[0]); |
1714 | 1672 | ||
1715 | if (test_thread_flag(TIF_SYSCALL_TRACE) && | 1673 | if (test_thread_flag(TIF_SYSCALL_TRACE) && |
1716 | tracehook_report_syscall_entry(regs)) | 1674 | tracehook_report_syscall_entry(regs)) |
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c index 469349d14a97..8c21658719d9 100644 --- a/arch/powerpc/kernel/ptrace32.c +++ b/arch/powerpc/kernel/ptrace32.c | |||
@@ -39,30 +39,6 @@ | |||
39 | * in exit.c or in signal.c. | 39 | * in exit.c or in signal.c. |
40 | */ | 40 | */ |
41 | 41 | ||
42 | /* | ||
43 | * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls, | ||
44 | * we mark them as obsolete now, they will be removed in a future version | ||
45 | */ | ||
46 | static long compat_ptrace_old(struct task_struct *child, long request, | ||
47 | long addr, long data) | ||
48 | { | ||
49 | switch (request) { | ||
50 | case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */ | ||
51 | return copy_regset_to_user(child, | ||
52 | task_user_regset_view(current), 0, | ||
53 | 0, 32 * sizeof(compat_long_t), | ||
54 | compat_ptr(data)); | ||
55 | |||
56 | case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */ | ||
57 | return copy_regset_from_user(child, | ||
58 | task_user_regset_view(current), 0, | ||
59 | 0, 32 * sizeof(compat_long_t), | ||
60 | compat_ptr(data)); | ||
61 | } | ||
62 | |||
63 | return -EPERM; | ||
64 | } | ||
65 | |||
66 | /* Macros to workout the correct index for the FPR in the thread struct */ | 42 | /* Macros to workout the correct index for the FPR in the thread struct */ |
67 | #define FPRNUMBER(i) (((i) - PT_FPR0) >> 1) | 43 | #define FPRNUMBER(i) (((i) - PT_FPR0) >> 1) |
68 | #define FPRHALF(i) (((i) - PT_FPR0) & 1) | 44 | #define FPRHALF(i) (((i) - PT_FPR0) & 1) |
@@ -308,8 +284,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
308 | case PTRACE_SETVSRREGS: | 284 | case PTRACE_SETVSRREGS: |
309 | case PTRACE_GETREGS64: | 285 | case PTRACE_GETREGS64: |
310 | case PTRACE_SETREGS64: | 286 | case PTRACE_SETREGS64: |
311 | case PPC_PTRACE_GETFPREGS: | ||
312 | case PPC_PTRACE_SETFPREGS: | ||
313 | case PTRACE_KILL: | 287 | case PTRACE_KILL: |
314 | case PTRACE_SINGLESTEP: | 288 | case PTRACE_SINGLESTEP: |
315 | case PTRACE_DETACH: | 289 | case PTRACE_DETACH: |
@@ -322,12 +296,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
322 | ret = arch_ptrace(child, request, addr, data); | 296 | ret = arch_ptrace(child, request, addr, data); |
323 | break; | 297 | break; |
324 | 298 | ||
325 | /* Old reverse args ptrace callss */ | ||
326 | case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */ | ||
327 | case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */ | ||
328 | ret = compat_ptrace_old(child, request, addr, data); | ||
329 | break; | ||
330 | |||
331 | default: | 299 | default: |
332 | ret = compat_ptrace_request(child, request, addr, data); | 300 | ret = compat_ptrace_request(child, request, addr, data); |
333 | break; | 301 | break; |
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 45eb998557f8..61f6aff25edc 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c | |||
@@ -204,10 +204,10 @@ static inline int get_old_sigaction(struct k_sigaction *new_ka, | |||
204 | 204 | ||
205 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | 205 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || |
206 | __get_user(new_ka->sa.sa_handler, &act->sa_handler) || | 206 | __get_user(new_ka->sa.sa_handler, &act->sa_handler) || |
207 | __get_user(new_ka->sa.sa_restorer, &act->sa_restorer)) | 207 | __get_user(new_ka->sa.sa_restorer, &act->sa_restorer) || |
208 | __get_user(new_ka->sa.sa_flags, &act->sa_flags) || | ||
209 | __get_user(mask, &act->sa_mask)) | ||
208 | return -EFAULT; | 210 | return -EFAULT; |
209 | __get_user(new_ka->sa.sa_flags, &act->sa_flags); | ||
210 | __get_user(mask, &act->sa_mask); | ||
211 | siginitset(&new_ka->sa.sa_mask, mask); | 211 | siginitset(&new_ka->sa.sa_mask, mask); |
212 | return 0; | 212 | return 0; |
213 | } | 213 | } |
@@ -244,17 +244,8 @@ static inline int restore_general_regs(struct pt_regs *regs, | |||
244 | long sys_sigsuspend(old_sigset_t mask) | 244 | long sys_sigsuspend(old_sigset_t mask) |
245 | { | 245 | { |
246 | sigset_t blocked; | 246 | sigset_t blocked; |
247 | |||
248 | current->saved_sigmask = current->blocked; | ||
249 | |||
250 | mask &= _BLOCKABLE; | ||
251 | siginitset(&blocked, mask); | 247 | siginitset(&blocked, mask); |
252 | set_current_blocked(&blocked); | 248 | return sigsuspend(&blocked); |
253 | |||
254 | current->state = TASK_INTERRUPTIBLE; | ||
255 | schedule(); | ||
256 | set_restore_sigmask(); | ||
257 | return -ERESTARTNOHAND; | ||
258 | } | 249 | } |
259 | 250 | ||
260 | long sys_sigaction(int sig, struct old_sigaction __user *act, | 251 | long sys_sigaction(int sig, struct old_sigaction __user *act, |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index d9f94410fd7f..e4cb34322de4 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -57,27 +57,9 @@ | |||
57 | #define DBG(fmt...) | 57 | #define DBG(fmt...) |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | |||
61 | /* Store all idle threads, this can be reused instead of creating | ||
62 | * a new thread. Also avoids complicated thread destroy functionality | ||
63 | * for idle threads. | ||
64 | */ | ||
65 | #ifdef CONFIG_HOTPLUG_CPU | 60 | #ifdef CONFIG_HOTPLUG_CPU |
66 | /* | ||
67 | * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is | ||
68 | * removed after init for !CONFIG_HOTPLUG_CPU. | ||
69 | */ | ||
70 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | ||
71 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | ||
72 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) | ||
73 | |||
74 | /* State of each CPU during hotplug phases */ | 61 | /* State of each CPU during hotplug phases */ |
75 | static DEFINE_PER_CPU(int, cpu_state) = { 0 }; | 62 | static DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
76 | |||
77 | #else | ||
78 | static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | ||
79 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | ||
80 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) | ||
81 | #endif | 63 | #endif |
82 | 64 | ||
83 | struct thread_info *secondary_ti; | 65 | struct thread_info *secondary_ti; |
@@ -429,60 +411,19 @@ int generic_check_cpu_restart(unsigned int cpu) | |||
429 | } | 411 | } |
430 | #endif | 412 | #endif |
431 | 413 | ||
432 | struct create_idle { | 414 | static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) |
433 | struct work_struct work; | ||
434 | struct task_struct *idle; | ||
435 | struct completion done; | ||
436 | int cpu; | ||
437 | }; | ||
438 | |||
439 | static void __cpuinit do_fork_idle(struct work_struct *work) | ||
440 | { | 415 | { |
441 | struct create_idle *c_idle = | 416 | struct thread_info *ti = task_thread_info(idle); |
442 | container_of(work, struct create_idle, work); | ||
443 | |||
444 | c_idle->idle = fork_idle(c_idle->cpu); | ||
445 | complete(&c_idle->done); | ||
446 | } | ||
447 | |||
448 | static int __cpuinit create_idle(unsigned int cpu) | ||
449 | { | ||
450 | struct thread_info *ti; | ||
451 | struct create_idle c_idle = { | ||
452 | .cpu = cpu, | ||
453 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | ||
454 | }; | ||
455 | INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); | ||
456 | |||
457 | c_idle.idle = get_idle_for_cpu(cpu); | ||
458 | |||
459 | /* We can't use kernel_thread since we must avoid to | ||
460 | * reschedule the child. We use a workqueue because | ||
461 | * we want to fork from a kernel thread, not whatever | ||
462 | * userspace process happens to be trying to online us. | ||
463 | */ | ||
464 | if (!c_idle.idle) { | ||
465 | schedule_work(&c_idle.work); | ||
466 | wait_for_completion(&c_idle.done); | ||
467 | } else | ||
468 | init_idle(c_idle.idle, cpu); | ||
469 | if (IS_ERR(c_idle.idle)) { | ||
470 | pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle)); | ||
471 | return PTR_ERR(c_idle.idle); | ||
472 | } | ||
473 | ti = task_thread_info(c_idle.idle); | ||
474 | 417 | ||
475 | #ifdef CONFIG_PPC64 | 418 | #ifdef CONFIG_PPC64 |
476 | paca[cpu].__current = c_idle.idle; | 419 | paca[cpu].__current = idle; |
477 | paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; | 420 | paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; |
478 | #endif | 421 | #endif |
479 | ti->cpu = cpu; | 422 | ti->cpu = cpu; |
480 | current_set[cpu] = ti; | 423 | secondary_ti = current_set[cpu] = ti; |
481 | |||
482 | return 0; | ||
483 | } | 424 | } |
484 | 425 | ||
485 | int __cpuinit __cpu_up(unsigned int cpu) | 426 | int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) |
486 | { | 427 | { |
487 | int rc, c; | 428 | int rc, c; |
488 | 429 | ||
@@ -490,12 +431,7 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
490 | (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) | 431 | (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) |
491 | return -EINVAL; | 432 | return -EINVAL; |
492 | 433 | ||
493 | /* Make sure we have an idle thread */ | 434 | cpu_idle_thread_init(cpu, tidle); |
494 | rc = create_idle(cpu); | ||
495 | if (rc) | ||
496 | return rc; | ||
497 | |||
498 | secondary_ti = current_set[cpu]; | ||
499 | 435 | ||
500 | /* Make sure callin-map entry is 0 (can be leftover a CPU | 436 | /* Make sure callin-map entry is 0 (can be leftover a CPU |
501 | * hotplug | 437 | * hotplug |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 2c42cd72d0f5..99a995c2a3f2 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -100,7 +100,7 @@ static int decrementer_set_next_event(unsigned long evt, | |||
100 | static void decrementer_set_mode(enum clock_event_mode mode, | 100 | static void decrementer_set_mode(enum clock_event_mode mode, |
101 | struct clock_event_device *dev); | 101 | struct clock_event_device *dev); |
102 | 102 | ||
103 | static struct clock_event_device decrementer_clockevent = { | 103 | struct clock_event_device decrementer_clockevent = { |
104 | .name = "decrementer", | 104 | .name = "decrementer", |
105 | .rating = 200, | 105 | .rating = 200, |
106 | .irq = 0, | 106 | .irq = 0, |
@@ -108,6 +108,7 @@ static struct clock_event_device decrementer_clockevent = { | |||
108 | .set_mode = decrementer_set_mode, | 108 | .set_mode = decrementer_set_mode, |
109 | .features = CLOCK_EVT_FEAT_ONESHOT, | 109 | .features = CLOCK_EVT_FEAT_ONESHOT, |
110 | }; | 110 | }; |
111 | EXPORT_SYMBOL(decrementer_clockevent); | ||
111 | 112 | ||
112 | DEFINE_PER_CPU(u64, decrementers_next_tb); | 113 | DEFINE_PER_CPU(u64, decrementers_next_tb); |
113 | static DEFINE_PER_CPU(struct clock_event_device, decrementers); | 114 | static DEFINE_PER_CPU(struct clock_event_device, decrementers); |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 6aa0c663e247..158972341a2d 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -248,7 +248,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) | |||
248 | addr, regs->nip, regs->link, code); | 248 | addr, regs->nip, regs->link, code); |
249 | } | 249 | } |
250 | 250 | ||
251 | if (!arch_irq_disabled_regs(regs)) | 251 | if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) |
252 | local_irq_enable(); | 252 | local_irq_enable(); |
253 | 253 | ||
254 | memset(&info, 0, sizeof(info)); | 254 | memset(&info, 0, sizeof(info)); |
@@ -1019,7 +1019,9 @@ void __kprobes program_check_exception(struct pt_regs *regs) | |||
1019 | return; | 1019 | return; |
1020 | } | 1020 | } |
1021 | 1021 | ||
1022 | local_irq_enable(); | 1022 | /* We restore the interrupt state now */ |
1023 | if (!arch_irq_disabled_regs(regs)) | ||
1024 | local_irq_enable(); | ||
1023 | 1025 | ||
1024 | #ifdef CONFIG_MATH_EMULATION | 1026 | #ifdef CONFIG_MATH_EMULATION |
1025 | /* (reason & REASON_ILLEGAL) would be the obvious thing here, | 1027 | /* (reason & REASON_ILLEGAL) would be the obvious thing here, |
@@ -1069,6 +1071,10 @@ void alignment_exception(struct pt_regs *regs) | |||
1069 | { | 1071 | { |
1070 | int sig, code, fixed = 0; | 1072 | int sig, code, fixed = 0; |
1071 | 1073 | ||
1074 | /* We restore the interrupt state now */ | ||
1075 | if (!arch_irq_disabled_regs(regs)) | ||
1076 | local_irq_enable(); | ||
1077 | |||
1072 | /* we don't implement logging of alignment exceptions */ | 1078 | /* we don't implement logging of alignment exceptions */ |
1073 | if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) | 1079 | if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) |
1074 | fixed = fix_alignment(regs); | 1080 | fixed = fix_alignment(regs); |
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index 4d5a3edff49e..e830289d2e48 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S | |||
@@ -89,6 +89,16 @@ _GLOBAL(load_up_altivec) | |||
89 | /* restore registers and return */ | 89 | /* restore registers and return */ |
90 | blr | 90 | blr |
91 | 91 | ||
92 | _GLOBAL(giveup_altivec_notask) | ||
93 | mfmsr r3 | ||
94 | andis. r4,r3,MSR_VEC@h | ||
95 | bnelr /* Already enabled? */ | ||
96 | oris r3,r3,MSR_VEC@h | ||
97 | SYNC | ||
98 | MTMSRD(r3) /* enable use of VMX now */ | ||
99 | isync | ||
100 | blr | ||
101 | |||
92 | /* | 102 | /* |
93 | * giveup_altivec(tsk) | 103 | * giveup_altivec(tsk) |
94 | * Disable VMX for the task given as the argument, | 104 | * Disable VMX for the task given as the argument, |
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index a3a99901c8ec..cb87301ccd55 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c | |||
@@ -14,7 +14,9 @@ | |||
14 | * 2 of the License, or (at your option) any later version. | 14 | * 2 of the License, or (at your option) any later version. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/cpu.h> | ||
17 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <linux/delay.h> | ||
18 | #include <linux/stat.h> | 20 | #include <linux/stat.h> |
19 | #include <linux/device.h> | 21 | #include <linux/device.h> |
20 | #include <linux/init.h> | 22 | #include <linux/init.h> |
@@ -709,13 +711,26 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev) | |||
709 | struct vio_driver *viodrv = to_vio_driver(dev->driver); | 711 | struct vio_driver *viodrv = to_vio_driver(dev->driver); |
710 | unsigned long flags; | 712 | unsigned long flags; |
711 | size_t size; | 713 | size_t size; |
714 | bool dma_capable = false; | ||
715 | |||
716 | /* A device requires entitlement if it has a DMA window property */ | ||
717 | switch (viodev->family) { | ||
718 | case VDEVICE: | ||
719 | if (of_get_property(viodev->dev.of_node, | ||
720 | "ibm,my-dma-window", NULL)) | ||
721 | dma_capable = true; | ||
722 | break; | ||
723 | case PFO: | ||
724 | dma_capable = false; | ||
725 | break; | ||
726 | default: | ||
727 | dev_warn(dev, "unknown device family: %d\n", viodev->family); | ||
728 | BUG(); | ||
729 | break; | ||
730 | } | ||
712 | 731 | ||
713 | /* | 732 | /* Configure entitlement for the device. */ |
714 | * Check to see that device has a DMA window and configure | 733 | if (dma_capable) { |
715 | * entitlement for the device. | ||
716 | */ | ||
717 | if (of_get_property(viodev->dev.of_node, | ||
718 | "ibm,my-dma-window", NULL)) { | ||
719 | /* Check that the driver is CMO enabled and get desired DMA */ | 734 | /* Check that the driver is CMO enabled and get desired DMA */ |
720 | if (!viodrv->get_desired_dma) { | 735 | if (!viodrv->get_desired_dma) { |
721 | dev_err(dev, "%s: device driver does not support CMO\n", | 736 | dev_err(dev, "%s: device driver does not support CMO\n", |
@@ -1050,6 +1065,94 @@ static void vio_cmo_sysfs_init(void) { } | |||
1050 | EXPORT_SYMBOL(vio_cmo_entitlement_update); | 1065 | EXPORT_SYMBOL(vio_cmo_entitlement_update); |
1051 | EXPORT_SYMBOL(vio_cmo_set_dev_desired); | 1066 | EXPORT_SYMBOL(vio_cmo_set_dev_desired); |
1052 | 1067 | ||
1068 | |||
1069 | /* | ||
1070 | * Platform Facilities Option (PFO) support | ||
1071 | */ | ||
1072 | |||
1073 | /** | ||
1074 | * vio_h_cop_sync - Perform a synchronous PFO co-processor operation | ||
1075 | * | ||
1076 | * @vdev - Pointer to a struct vio_dev for device | ||
1077 | * @op - Pointer to a struct vio_pfo_op for the operation parameters | ||
1078 | * | ||
1079 | * Calls the hypervisor to synchronously perform the PFO operation | ||
1080 | * described in @op. In the case of a busy response from the hypervisor, | ||
1081 | * the operation will be re-submitted indefinitely unless a non-zero timeout | ||
1082 | * is specified or an error occurs. The timeout places a limit on when to | ||
1083 | * stop re-submitting a operation, the total time can be exceeded if an | ||
1084 | * operation is in progress. | ||
1085 | * | ||
1086 | * If op->hcall_ret is not NULL, this will be set to the return from the | ||
1087 | * last h_cop_op call or it will be 0 if an error not involving the h_call | ||
1088 | * was encountered. | ||
1089 | * | ||
1090 | * Returns: | ||
1091 | * 0 on success, | ||
1092 | * -EINVAL if the h_call fails due to an invalid parameter, | ||
1093 | * -E2BIG if the h_call can not be performed synchronously, | ||
1094 | * -EBUSY if a timeout is specified and has elapsed, | ||
1095 | * -EACCES if the memory area for data/status has been rescinded, or | ||
1096 | * -EPERM if a hardware fault has been indicated | ||
1097 | */ | ||
1098 | int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op) | ||
1099 | { | ||
1100 | struct device *dev = &vdev->dev; | ||
1101 | unsigned long deadline = 0; | ||
1102 | long hret = 0; | ||
1103 | int ret = 0; | ||
1104 | |||
1105 | if (op->timeout) | ||
1106 | deadline = jiffies + msecs_to_jiffies(op->timeout); | ||
1107 | |||
1108 | while (true) { | ||
1109 | hret = plpar_hcall_norets(H_COP, op->flags, | ||
1110 | vdev->resource_id, | ||
1111 | op->in, op->inlen, op->out, | ||
1112 | op->outlen, op->csbcpb); | ||
1113 | |||
1114 | if (hret == H_SUCCESS || | ||
1115 | (hret != H_NOT_ENOUGH_RESOURCES && | ||
1116 | hret != H_BUSY && hret != H_RESOURCE) || | ||
1117 | (op->timeout && time_after(deadline, jiffies))) | ||
1118 | break; | ||
1119 | |||
1120 | dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret); | ||
1121 | } | ||
1122 | |||
1123 | switch (hret) { | ||
1124 | case H_SUCCESS: | ||
1125 | ret = 0; | ||
1126 | break; | ||
1127 | case H_OP_MODE: | ||
1128 | case H_TOO_BIG: | ||
1129 | ret = -E2BIG; | ||
1130 | break; | ||
1131 | case H_RESCINDED: | ||
1132 | ret = -EACCES; | ||
1133 | break; | ||
1134 | case H_HARDWARE: | ||
1135 | ret = -EPERM; | ||
1136 | break; | ||
1137 | case H_NOT_ENOUGH_RESOURCES: | ||
1138 | case H_RESOURCE: | ||
1139 | case H_BUSY: | ||
1140 | ret = -EBUSY; | ||
1141 | break; | ||
1142 | default: | ||
1143 | ret = -EINVAL; | ||
1144 | break; | ||
1145 | } | ||
1146 | |||
1147 | if (ret) | ||
1148 | dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n", | ||
1149 | __func__, ret, hret); | ||
1150 | |||
1151 | op->hcall_err = hret; | ||
1152 | return ret; | ||
1153 | } | ||
1154 | EXPORT_SYMBOL(vio_h_cop_sync); | ||
1155 | |||
1053 | static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) | 1156 | static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) |
1054 | { | 1157 | { |
1055 | const unsigned char *dma_window; | 1158 | const unsigned char *dma_window; |
@@ -1211,35 +1314,87 @@ static void __devinit vio_dev_release(struct device *dev) | |||
1211 | struct vio_dev *vio_register_device_node(struct device_node *of_node) | 1314 | struct vio_dev *vio_register_device_node(struct device_node *of_node) |
1212 | { | 1315 | { |
1213 | struct vio_dev *viodev; | 1316 | struct vio_dev *viodev; |
1317 | struct device_node *parent_node; | ||
1214 | const unsigned int *unit_address; | 1318 | const unsigned int *unit_address; |
1319 | const unsigned int *pfo_resid = NULL; | ||
1320 | enum vio_dev_family family; | ||
1321 | const char *of_node_name = of_node->name ? of_node->name : "<unknown>"; | ||
1215 | 1322 | ||
1216 | /* we need the 'device_type' property, in order to match with drivers */ | 1323 | /* |
1217 | if (of_node->type == NULL) { | 1324 | * Determine if this node is a under the /vdevice node or under the |
1218 | printk(KERN_WARNING "%s: node %s missing 'device_type'\n", | 1325 | * /ibm,platform-facilities node. This decides the device's family. |
1219 | __func__, | 1326 | */ |
1220 | of_node->name ? of_node->name : "<unknown>"); | 1327 | parent_node = of_get_parent(of_node); |
1328 | if (parent_node) { | ||
1329 | if (!strcmp(parent_node->full_name, "/ibm,platform-facilities")) | ||
1330 | family = PFO; | ||
1331 | else if (!strcmp(parent_node->full_name, "/vdevice")) | ||
1332 | family = VDEVICE; | ||
1333 | else { | ||
1334 | pr_warn("%s: parent(%s) of %s not recognized.\n", | ||
1335 | __func__, | ||
1336 | parent_node->full_name, | ||
1337 | of_node_name); | ||
1338 | of_node_put(parent_node); | ||
1339 | return NULL; | ||
1340 | } | ||
1341 | of_node_put(parent_node); | ||
1342 | } else { | ||
1343 | pr_warn("%s: could not determine the parent of node %s.\n", | ||
1344 | __func__, of_node_name); | ||
1221 | return NULL; | 1345 | return NULL; |
1222 | } | 1346 | } |
1223 | 1347 | ||
1224 | unit_address = of_get_property(of_node, "reg", NULL); | 1348 | if (family == PFO) { |
1225 | if (unit_address == NULL) { | 1349 | if (of_get_property(of_node, "interrupt-controller", NULL)) { |
1226 | printk(KERN_WARNING "%s: node %s missing 'reg'\n", | 1350 | pr_debug("%s: Skipping the interrupt controller %s.\n", |
1227 | __func__, | 1351 | __func__, of_node_name); |
1228 | of_node->name ? of_node->name : "<unknown>"); | 1352 | return NULL; |
1229 | return NULL; | 1353 | } |
1230 | } | 1354 | } |
1231 | 1355 | ||
1232 | /* allocate a vio_dev for this node */ | 1356 | /* allocate a vio_dev for this node */ |
1233 | viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL); | 1357 | viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL); |
1234 | if (viodev == NULL) | 1358 | if (viodev == NULL) { |
1359 | pr_warn("%s: allocation failure for VIO device.\n", __func__); | ||
1235 | return NULL; | 1360 | return NULL; |
1361 | } | ||
1236 | 1362 | ||
1237 | viodev->irq = irq_of_parse_and_map(of_node, 0); | 1363 | /* we need the 'device_type' property, in order to match with drivers */ |
1364 | viodev->family = family; | ||
1365 | if (viodev->family == VDEVICE) { | ||
1366 | if (of_node->type != NULL) | ||
1367 | viodev->type = of_node->type; | ||
1368 | else { | ||
1369 | pr_warn("%s: node %s is missing the 'device_type' " | ||
1370 | "property.\n", __func__, of_node_name); | ||
1371 | goto out; | ||
1372 | } | ||
1373 | |||
1374 | unit_address = of_get_property(of_node, "reg", NULL); | ||
1375 | if (unit_address == NULL) { | ||
1376 | pr_warn("%s: node %s missing 'reg'\n", | ||
1377 | __func__, of_node_name); | ||
1378 | goto out; | ||
1379 | } | ||
1380 | dev_set_name(&viodev->dev, "%x", *unit_address); | ||
1381 | viodev->irq = irq_of_parse_and_map(of_node, 0); | ||
1382 | viodev->unit_address = *unit_address; | ||
1383 | } else { | ||
1384 | /* PFO devices need their resource_id for submitting COP_OPs | ||
1385 | * This is an optional field for devices, but is required when | ||
1386 | * performing synchronous ops */ | ||
1387 | pfo_resid = of_get_property(of_node, "ibm,resource-id", NULL); | ||
1388 | if (pfo_resid != NULL) | ||
1389 | viodev->resource_id = *pfo_resid; | ||
1390 | |||
1391 | unit_address = NULL; | ||
1392 | dev_set_name(&viodev->dev, "%s", of_node_name); | ||
1393 | viodev->type = of_node_name; | ||
1394 | viodev->irq = 0; | ||
1395 | } | ||
1238 | 1396 | ||
1239 | dev_set_name(&viodev->dev, "%x", *unit_address); | ||
1240 | viodev->name = of_node->name; | 1397 | viodev->name = of_node->name; |
1241 | viodev->type = of_node->type; | ||
1242 | viodev->unit_address = *unit_address; | ||
1243 | viodev->dev.of_node = of_node_get(of_node); | 1398 | viodev->dev.of_node = of_node_get(of_node); |
1244 | 1399 | ||
1245 | if (firmware_has_feature(FW_FEATURE_CMO)) | 1400 | if (firmware_has_feature(FW_FEATURE_CMO)) |
@@ -1267,16 +1422,51 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) | |||
1267 | } | 1422 | } |
1268 | 1423 | ||
1269 | return viodev; | 1424 | return viodev; |
1425 | |||
1426 | out: /* Use this exit point for any return prior to device_register */ | ||
1427 | kfree(viodev); | ||
1428 | |||
1429 | return NULL; | ||
1270 | } | 1430 | } |
1271 | EXPORT_SYMBOL(vio_register_device_node); | 1431 | EXPORT_SYMBOL(vio_register_device_node); |
1272 | 1432 | ||
1433 | /* | ||
1434 | * vio_bus_scan_for_devices - Scan OF and register each child device | ||
1435 | * @root_name - OF node name for the root of the subtree to search. | ||
1436 | * This must be non-NULL | ||
1437 | * | ||
1438 | * Starting from the root node provide, register the device node for | ||
1439 | * each child beneath the root. | ||
1440 | */ | ||
1441 | static void vio_bus_scan_register_devices(char *root_name) | ||
1442 | { | ||
1443 | struct device_node *node_root, *node_child; | ||
1444 | |||
1445 | if (!root_name) | ||
1446 | return; | ||
1447 | |||
1448 | node_root = of_find_node_by_name(NULL, root_name); | ||
1449 | if (node_root) { | ||
1450 | |||
1451 | /* | ||
1452 | * Create struct vio_devices for each virtual device in | ||
1453 | * the device tree. Drivers will associate with them later. | ||
1454 | */ | ||
1455 | node_child = of_get_next_child(node_root, NULL); | ||
1456 | while (node_child) { | ||
1457 | vio_register_device_node(node_child); | ||
1458 | node_child = of_get_next_child(node_root, node_child); | ||
1459 | } | ||
1460 | of_node_put(node_root); | ||
1461 | } | ||
1462 | } | ||
1463 | |||
1273 | /** | 1464 | /** |
1274 | * vio_bus_init: - Initialize the virtual IO bus | 1465 | * vio_bus_init: - Initialize the virtual IO bus |
1275 | */ | 1466 | */ |
1276 | static int __init vio_bus_init(void) | 1467 | static int __init vio_bus_init(void) |
1277 | { | 1468 | { |
1278 | int err; | 1469 | int err; |
1279 | struct device_node *node_vroot; | ||
1280 | 1470 | ||
1281 | if (firmware_has_feature(FW_FEATURE_CMO)) | 1471 | if (firmware_has_feature(FW_FEATURE_CMO)) |
1282 | vio_cmo_sysfs_init(); | 1472 | vio_cmo_sysfs_init(); |
@@ -1301,19 +1491,8 @@ static int __init vio_bus_init(void) | |||
1301 | if (firmware_has_feature(FW_FEATURE_CMO)) | 1491 | if (firmware_has_feature(FW_FEATURE_CMO)) |
1302 | vio_cmo_bus_init(); | 1492 | vio_cmo_bus_init(); |
1303 | 1493 | ||
1304 | node_vroot = of_find_node_by_name(NULL, "vdevice"); | 1494 | vio_bus_scan_register_devices("vdevice"); |
1305 | if (node_vroot) { | 1495 | vio_bus_scan_register_devices("ibm,platform-facilities"); |
1306 | struct device_node *of_node; | ||
1307 | |||
1308 | /* | ||
1309 | * Create struct vio_devices for each virtual device in | ||
1310 | * the device tree. Drivers will associate with them later. | ||
1311 | */ | ||
1312 | for (of_node = node_vroot->child; of_node != NULL; | ||
1313 | of_node = of_node->sibling) | ||
1314 | vio_register_device_node(of_node); | ||
1315 | of_node_put(node_vroot); | ||
1316 | } | ||
1317 | 1496 | ||
1318 | return 0; | 1497 | return 0; |
1319 | } | 1498 | } |
@@ -1436,12 +1615,28 @@ struct vio_dev *vio_find_node(struct device_node *vnode) | |||
1436 | { | 1615 | { |
1437 | const uint32_t *unit_address; | 1616 | const uint32_t *unit_address; |
1438 | char kobj_name[20]; | 1617 | char kobj_name[20]; |
1618 | struct device_node *vnode_parent; | ||
1619 | const char *dev_type; | ||
1620 | |||
1621 | vnode_parent = of_get_parent(vnode); | ||
1622 | if (!vnode_parent) | ||
1623 | return NULL; | ||
1624 | |||
1625 | dev_type = of_get_property(vnode_parent, "device_type", NULL); | ||
1626 | of_node_put(vnode_parent); | ||
1627 | if (!dev_type) | ||
1628 | return NULL; | ||
1439 | 1629 | ||
1440 | /* construct the kobject name from the device node */ | 1630 | /* construct the kobject name from the device node */ |
1441 | unit_address = of_get_property(vnode, "reg", NULL); | 1631 | if (!strcmp(dev_type, "vdevice")) { |
1442 | if (!unit_address) | 1632 | unit_address = of_get_property(vnode, "reg", NULL); |
1633 | if (!unit_address) | ||
1634 | return NULL; | ||
1635 | snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address); | ||
1636 | } else if (!strcmp(dev_type, "ibm,platform-facilities")) | ||
1637 | snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name); | ||
1638 | else | ||
1443 | return NULL; | 1639 | return NULL; |
1444 | snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address); | ||
1445 | 1640 | ||
1446 | return vio_find_name(kobj_name); | 1641 | return vio_find_name(kobj_name); |
1447 | } | 1642 | } |
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c index 7b612a76c701..50e7dbc7356c 100644 --- a/arch/powerpc/kvm/44x.c +++ b/arch/powerpc/kvm/44x.c | |||
@@ -29,15 +29,18 @@ | |||
29 | #include <asm/kvm_ppc.h> | 29 | #include <asm/kvm_ppc.h> |
30 | 30 | ||
31 | #include "44x_tlb.h" | 31 | #include "44x_tlb.h" |
32 | #include "booke.h" | ||
32 | 33 | ||
33 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 34 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
34 | { | 35 | { |
36 | kvmppc_booke_vcpu_load(vcpu, cpu); | ||
35 | kvmppc_44x_tlb_load(vcpu); | 37 | kvmppc_44x_tlb_load(vcpu); |
36 | } | 38 | } |
37 | 39 | ||
38 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 40 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) |
39 | { | 41 | { |
40 | kvmppc_44x_tlb_put(vcpu); | 42 | kvmppc_44x_tlb_put(vcpu); |
43 | kvmppc_booke_vcpu_put(vcpu); | ||
41 | } | 44 | } |
42 | 45 | ||
43 | int kvmppc_core_check_processor_compat(void) | 46 | int kvmppc_core_check_processor_compat(void) |
@@ -160,6 +163,15 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
160 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); | 163 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); |
161 | } | 164 | } |
162 | 165 | ||
166 | int kvmppc_core_init_vm(struct kvm *kvm) | ||
167 | { | ||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | void kvmppc_core_destroy_vm(struct kvm *kvm) | ||
172 | { | ||
173 | } | ||
174 | |||
163 | static int __init kvmppc_44x_init(void) | 175 | static int __init kvmppc_44x_init(void) |
164 | { | 176 | { |
165 | int r; | 177 | int r; |
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c index 549bb2c9a47a..c8c61578fdfc 100644 --- a/arch/powerpc/kvm/44x_emulate.c +++ b/arch/powerpc/kvm/44x_emulate.c | |||
@@ -37,22 +37,19 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
37 | unsigned int inst, int *advance) | 37 | unsigned int inst, int *advance) |
38 | { | 38 | { |
39 | int emulated = EMULATE_DONE; | 39 | int emulated = EMULATE_DONE; |
40 | int dcrn; | 40 | int dcrn = get_dcrn(inst); |
41 | int ra; | 41 | int ra = get_ra(inst); |
42 | int rb; | 42 | int rb = get_rb(inst); |
43 | int rc; | 43 | int rc = get_rc(inst); |
44 | int rs; | 44 | int rs = get_rs(inst); |
45 | int rt; | 45 | int rt = get_rt(inst); |
46 | int ws; | 46 | int ws = get_ws(inst); |
47 | 47 | ||
48 | switch (get_op(inst)) { | 48 | switch (get_op(inst)) { |
49 | case 31: | 49 | case 31: |
50 | switch (get_xop(inst)) { | 50 | switch (get_xop(inst)) { |
51 | 51 | ||
52 | case XOP_MFDCR: | 52 | case XOP_MFDCR: |
53 | dcrn = get_dcrn(inst); | ||
54 | rt = get_rt(inst); | ||
55 | |||
56 | /* The guest may access CPR0 registers to determine the timebase | 53 | /* The guest may access CPR0 registers to determine the timebase |
57 | * frequency, and it must know the real host frequency because it | 54 | * frequency, and it must know the real host frequency because it |
58 | * can directly access the timebase registers. | 55 | * can directly access the timebase registers. |
@@ -88,9 +85,6 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
88 | break; | 85 | break; |
89 | 86 | ||
90 | case XOP_MTDCR: | 87 | case XOP_MTDCR: |
91 | dcrn = get_dcrn(inst); | ||
92 | rs = get_rs(inst); | ||
93 | |||
94 | /* emulate some access in kernel */ | 88 | /* emulate some access in kernel */ |
95 | switch (dcrn) { | 89 | switch (dcrn) { |
96 | case DCRN_CPR0_CONFIG_ADDR: | 90 | case DCRN_CPR0_CONFIG_ADDR: |
@@ -108,17 +102,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
108 | break; | 102 | break; |
109 | 103 | ||
110 | case XOP_TLBWE: | 104 | case XOP_TLBWE: |
111 | ra = get_ra(inst); | ||
112 | rs = get_rs(inst); | ||
113 | ws = get_ws(inst); | ||
114 | emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); | 105 | emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); |
115 | break; | 106 | break; |
116 | 107 | ||
117 | case XOP_TLBSX: | 108 | case XOP_TLBSX: |
118 | rt = get_rt(inst); | ||
119 | ra = get_ra(inst); | ||
120 | rb = get_rb(inst); | ||
121 | rc = get_rc(inst); | ||
122 | emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); | 109 | emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); |
123 | break; | 110 | break; |
124 | 111 | ||
@@ -141,41 +128,41 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
141 | return emulated; | 128 | return emulated; |
142 | } | 129 | } |
143 | 130 | ||
144 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 131 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) |
145 | { | 132 | { |
146 | int emulated = EMULATE_DONE; | 133 | int emulated = EMULATE_DONE; |
147 | 134 | ||
148 | switch (sprn) { | 135 | switch (sprn) { |
149 | case SPRN_PID: | 136 | case SPRN_PID: |
150 | kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break; | 137 | kvmppc_set_pid(vcpu, spr_val); break; |
151 | case SPRN_MMUCR: | 138 | case SPRN_MMUCR: |
152 | vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break; | 139 | vcpu->arch.mmucr = spr_val; break; |
153 | case SPRN_CCR0: | 140 | case SPRN_CCR0: |
154 | vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break; | 141 | vcpu->arch.ccr0 = spr_val; break; |
155 | case SPRN_CCR1: | 142 | case SPRN_CCR1: |
156 | vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break; | 143 | vcpu->arch.ccr1 = spr_val; break; |
157 | default: | 144 | default: |
158 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); | 145 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val); |
159 | } | 146 | } |
160 | 147 | ||
161 | return emulated; | 148 | return emulated; |
162 | } | 149 | } |
163 | 150 | ||
164 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | 151 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
165 | { | 152 | { |
166 | int emulated = EMULATE_DONE; | 153 | int emulated = EMULATE_DONE; |
167 | 154 | ||
168 | switch (sprn) { | 155 | switch (sprn) { |
169 | case SPRN_PID: | 156 | case SPRN_PID: |
170 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break; | 157 | *spr_val = vcpu->arch.pid; break; |
171 | case SPRN_MMUCR: | 158 | case SPRN_MMUCR: |
172 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break; | 159 | *spr_val = vcpu->arch.mmucr; break; |
173 | case SPRN_CCR0: | 160 | case SPRN_CCR0: |
174 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break; | 161 | *spr_val = vcpu->arch.ccr0; break; |
175 | case SPRN_CCR1: | 162 | case SPRN_CCR1: |
176 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break; | 163 | *spr_val = vcpu->arch.ccr1; break; |
177 | default: | 164 | default: |
178 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); | 165 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val); |
179 | } | 166 | } |
180 | 167 | ||
181 | return emulated; | 168 | return emulated; |
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 8f64709ae331..f4dacb9c57fa 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig | |||
@@ -90,6 +90,9 @@ config KVM_BOOK3S_64_PR | |||
90 | depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV | 90 | depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV |
91 | select KVM_BOOK3S_PR | 91 | select KVM_BOOK3S_PR |
92 | 92 | ||
93 | config KVM_BOOKE_HV | ||
94 | bool | ||
95 | |||
93 | config KVM_440 | 96 | config KVM_440 |
94 | bool "KVM support for PowerPC 440 processors" | 97 | bool "KVM support for PowerPC 440 processors" |
95 | depends on EXPERIMENTAL && 44x | 98 | depends on EXPERIMENTAL && 44x |
@@ -106,7 +109,7 @@ config KVM_440 | |||
106 | 109 | ||
107 | config KVM_EXIT_TIMING | 110 | config KVM_EXIT_TIMING |
108 | bool "Detailed exit timing" | 111 | bool "Detailed exit timing" |
109 | depends on KVM_440 || KVM_E500 | 112 | depends on KVM_440 || KVM_E500V2 || KVM_E500MC |
110 | ---help--- | 113 | ---help--- |
111 | Calculate elapsed time for every exit/enter cycle. A per-vcpu | 114 | Calculate elapsed time for every exit/enter cycle. A per-vcpu |
112 | report is available in debugfs kvm/vm#_vcpu#_timing. | 115 | report is available in debugfs kvm/vm#_vcpu#_timing. |
@@ -115,14 +118,29 @@ config KVM_EXIT_TIMING | |||
115 | 118 | ||
116 | If unsure, say N. | 119 | If unsure, say N. |
117 | 120 | ||
118 | config KVM_E500 | 121 | config KVM_E500V2 |
119 | bool "KVM support for PowerPC E500 processors" | 122 | bool "KVM support for PowerPC E500v2 processors" |
120 | depends on EXPERIMENTAL && E500 | 123 | depends on EXPERIMENTAL && E500 && !PPC_E500MC |
121 | select KVM | 124 | select KVM |
122 | select KVM_MMIO | 125 | select KVM_MMIO |
123 | ---help--- | 126 | ---help--- |
124 | Support running unmodified E500 guest kernels in virtual machines on | 127 | Support running unmodified E500 guest kernels in virtual machines on |
125 | E500 host processors. | 128 | E500v2 host processors. |
129 | |||
130 | This module provides access to the hardware capabilities through | ||
131 | a character device node named /dev/kvm. | ||
132 | |||
133 | If unsure, say N. | ||
134 | |||
135 | config KVM_E500MC | ||
136 | bool "KVM support for PowerPC E500MC/E5500 processors" | ||
137 | depends on EXPERIMENTAL && PPC_E500MC | ||
138 | select KVM | ||
139 | select KVM_MMIO | ||
140 | select KVM_BOOKE_HV | ||
141 | ---help--- | ||
142 | Support running unmodified E500MC/E5500 (32-bit) guest kernels in | ||
143 | virtual machines on E500MC/E5500 host processors. | ||
126 | 144 | ||
127 | This module provides access to the hardware capabilities through | 145 | This module provides access to the hardware capabilities through |
128 | a character device node named /dev/kvm. | 146 | a character device node named /dev/kvm. |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 3688aeecc4b2..c2a08636e6d4 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
@@ -36,7 +36,17 @@ kvm-e500-objs := \ | |||
36 | e500.o \ | 36 | e500.o \ |
37 | e500_tlb.o \ | 37 | e500_tlb.o \ |
38 | e500_emulate.o | 38 | e500_emulate.o |
39 | kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs) | 39 | kvm-objs-$(CONFIG_KVM_E500V2) := $(kvm-e500-objs) |
40 | |||
41 | kvm-e500mc-objs := \ | ||
42 | $(common-objs-y) \ | ||
43 | booke.o \ | ||
44 | booke_emulate.o \ | ||
45 | bookehv_interrupts.o \ | ||
46 | e500mc.o \ | ||
47 | e500_tlb.o \ | ||
48 | e500_emulate.o | ||
49 | kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs) | ||
40 | 50 | ||
41 | kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ | 51 | kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ |
42 | ../../../virt/kvm/coalesced_mmio.o \ | 52 | ../../../virt/kvm/coalesced_mmio.o \ |
@@ -44,6 +54,7 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ | |||
44 | book3s_paired_singles.o \ | 54 | book3s_paired_singles.o \ |
45 | book3s_pr.o \ | 55 | book3s_pr.o \ |
46 | book3s_pr_papr.o \ | 56 | book3s_pr_papr.o \ |
57 | book3s_64_vio_hv.o \ | ||
47 | book3s_emulate.o \ | 58 | book3s_emulate.o \ |
48 | book3s_interrupts.o \ | 59 | book3s_interrupts.o \ |
49 | book3s_mmu_hpte.o \ | 60 | book3s_mmu_hpte.o \ |
@@ -68,6 +79,7 @@ kvm-book3s_64-module-objs := \ | |||
68 | powerpc.o \ | 79 | powerpc.o \ |
69 | emulate.o \ | 80 | emulate.o \ |
70 | book3s.o \ | 81 | book3s.o \ |
82 | book3s_64_vio.o \ | ||
71 | $(kvm-book3s_64-objs-y) | 83 | $(kvm-book3s_64-objs-y) |
72 | 84 | ||
73 | kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs) | 85 | kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs) |
@@ -88,7 +100,8 @@ kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs) | |||
88 | kvm-objs := $(kvm-objs-m) $(kvm-objs-y) | 100 | kvm-objs := $(kvm-objs-m) $(kvm-objs-y) |
89 | 101 | ||
90 | obj-$(CONFIG_KVM_440) += kvm.o | 102 | obj-$(CONFIG_KVM_440) += kvm.o |
91 | obj-$(CONFIG_KVM_E500) += kvm.o | 103 | obj-$(CONFIG_KVM_E500V2) += kvm.o |
104 | obj-$(CONFIG_KVM_E500MC) += kvm.o | ||
92 | obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o | 105 | obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o |
93 | obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o | 106 | obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o |
94 | 107 | ||
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 7d54f4ed6d96..3f2a8360c857 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -258,7 +258,7 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) | |||
258 | return true; | 258 | return true; |
259 | } | 259 | } |
260 | 260 | ||
261 | void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) | 261 | int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) |
262 | { | 262 | { |
263 | unsigned long *pending = &vcpu->arch.pending_exceptions; | 263 | unsigned long *pending = &vcpu->arch.pending_exceptions; |
264 | unsigned long old_pending = vcpu->arch.pending_exceptions; | 264 | unsigned long old_pending = vcpu->arch.pending_exceptions; |
@@ -283,12 +283,17 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) | |||
283 | 283 | ||
284 | /* Tell the guest about our interrupt status */ | 284 | /* Tell the guest about our interrupt status */ |
285 | kvmppc_update_int_pending(vcpu, *pending, old_pending); | 285 | kvmppc_update_int_pending(vcpu, *pending, old_pending); |
286 | |||
287 | return 0; | ||
286 | } | 288 | } |
287 | 289 | ||
288 | pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) | 290 | pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) |
289 | { | 291 | { |
290 | ulong mp_pa = vcpu->arch.magic_page_pa; | 292 | ulong mp_pa = vcpu->arch.magic_page_pa; |
291 | 293 | ||
294 | if (!(vcpu->arch.shared->msr & MSR_SF)) | ||
295 | mp_pa = (uint32_t)mp_pa; | ||
296 | |||
292 | /* Magic page override */ | 297 | /* Magic page override */ |
293 | if (unlikely(mp_pa) && | 298 | if (unlikely(mp_pa) && |
294 | unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == | 299 | unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 6f87f39a1ac2..10fc8ec9d2a8 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c | |||
@@ -194,14 +194,14 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) | |||
194 | backwards_map = !backwards_map; | 194 | backwards_map = !backwards_map; |
195 | 195 | ||
196 | /* Uh-oh ... out of mappings. Let's flush! */ | 196 | /* Uh-oh ... out of mappings. Let's flush! */ |
197 | if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) { | 197 | if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) { |
198 | vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; | 198 | vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first; |
199 | memset(vcpu_book3s->sid_map, 0, | 199 | memset(vcpu_book3s->sid_map, 0, |
200 | sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); | 200 | sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); |
201 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | 201 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
202 | kvmppc_mmu_flush_segments(vcpu); | 202 | kvmppc_mmu_flush_segments(vcpu); |
203 | } | 203 | } |
204 | map->host_vsid = vcpu_book3s->vsid_next++; | 204 | map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M); |
205 | 205 | ||
206 | map->guest_vsid = gvsid; | 206 | map->guest_vsid = gvsid; |
207 | map->valid = true; | 207 | map->valid = true; |
@@ -319,9 +319,10 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) | |||
319 | return -1; | 319 | return -1; |
320 | vcpu3s->context_id[0] = err; | 320 | vcpu3s->context_id[0] = err; |
321 | 321 | ||
322 | vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1; | 322 | vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) |
323 | vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; | 323 | << USER_ESID_BITS) - 1; |
324 | vcpu3s->vsid_next = vcpu3s->vsid_first; | 324 | vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; |
325 | vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; | ||
325 | 326 | ||
326 | kvmppc_mmu_hpte_init(vcpu); | 327 | kvmppc_mmu_hpte_init(vcpu); |
327 | 328 | ||
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index ddc485a529f2..80a577517584 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -36,13 +36,11 @@ | |||
36 | 36 | ||
37 | /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ | 37 | /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ |
38 | #define MAX_LPID_970 63 | 38 | #define MAX_LPID_970 63 |
39 | #define NR_LPIDS (LPID_RSVD + 1) | ||
40 | unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)]; | ||
41 | 39 | ||
42 | long kvmppc_alloc_hpt(struct kvm *kvm) | 40 | long kvmppc_alloc_hpt(struct kvm *kvm) |
43 | { | 41 | { |
44 | unsigned long hpt; | 42 | unsigned long hpt; |
45 | unsigned long lpid; | 43 | long lpid; |
46 | struct revmap_entry *rev; | 44 | struct revmap_entry *rev; |
47 | struct kvmppc_linear_info *li; | 45 | struct kvmppc_linear_info *li; |
48 | 46 | ||
@@ -72,14 +70,9 @@ long kvmppc_alloc_hpt(struct kvm *kvm) | |||
72 | } | 70 | } |
73 | kvm->arch.revmap = rev; | 71 | kvm->arch.revmap = rev; |
74 | 72 | ||
75 | /* Allocate the guest's logical partition ID */ | 73 | lpid = kvmppc_alloc_lpid(); |
76 | do { | 74 | if (lpid < 0) |
77 | lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS); | 75 | goto out_freeboth; |
78 | if (lpid >= NR_LPIDS) { | ||
79 | pr_err("kvm_alloc_hpt: No LPIDs free\n"); | ||
80 | goto out_freeboth; | ||
81 | } | ||
82 | } while (test_and_set_bit(lpid, lpid_inuse)); | ||
83 | 76 | ||
84 | kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18); | 77 | kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18); |
85 | kvm->arch.lpid = lpid; | 78 | kvm->arch.lpid = lpid; |
@@ -96,7 +89,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm) | |||
96 | 89 | ||
97 | void kvmppc_free_hpt(struct kvm *kvm) | 90 | void kvmppc_free_hpt(struct kvm *kvm) |
98 | { | 91 | { |
99 | clear_bit(kvm->arch.lpid, lpid_inuse); | 92 | kvmppc_free_lpid(kvm->arch.lpid); |
100 | vfree(kvm->arch.revmap); | 93 | vfree(kvm->arch.revmap); |
101 | if (kvm->arch.hpt_li) | 94 | if (kvm->arch.hpt_li) |
102 | kvm_release_hpt(kvm->arch.hpt_li); | 95 | kvm_release_hpt(kvm->arch.hpt_li); |
@@ -171,8 +164,7 @@ int kvmppc_mmu_hv_init(void) | |||
171 | if (!cpu_has_feature(CPU_FTR_HVMODE)) | 164 | if (!cpu_has_feature(CPU_FTR_HVMODE)) |
172 | return -EINVAL; | 165 | return -EINVAL; |
173 | 166 | ||
174 | memset(lpid_inuse, 0, sizeof(lpid_inuse)); | 167 | /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */ |
175 | |||
176 | if (cpu_has_feature(CPU_FTR_ARCH_206)) { | 168 | if (cpu_has_feature(CPU_FTR_ARCH_206)) { |
177 | host_lpid = mfspr(SPRN_LPID); /* POWER7 */ | 169 | host_lpid = mfspr(SPRN_LPID); /* POWER7 */ |
178 | rsvd_lpid = LPID_RSVD; | 170 | rsvd_lpid = LPID_RSVD; |
@@ -181,9 +173,11 @@ int kvmppc_mmu_hv_init(void) | |||
181 | rsvd_lpid = MAX_LPID_970; | 173 | rsvd_lpid = MAX_LPID_970; |
182 | } | 174 | } |
183 | 175 | ||
184 | set_bit(host_lpid, lpid_inuse); | 176 | kvmppc_init_lpid(rsvd_lpid + 1); |
177 | |||
178 | kvmppc_claim_lpid(host_lpid); | ||
185 | /* rsvd_lpid is reserved for use in partition switching */ | 179 | /* rsvd_lpid is reserved for use in partition switching */ |
186 | set_bit(rsvd_lpid, lpid_inuse); | 180 | kvmppc_claim_lpid(rsvd_lpid); |
187 | 181 | ||
188 | return 0; | 182 | return 0; |
189 | } | 183 | } |
@@ -258,6 +252,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, | |||
258 | !(memslot->userspace_addr & (s - 1))) { | 252 | !(memslot->userspace_addr & (s - 1))) { |
259 | start &= ~(s - 1); | 253 | start &= ~(s - 1); |
260 | pgsize = s; | 254 | pgsize = s; |
255 | get_page(hpage); | ||
256 | put_page(page); | ||
261 | page = hpage; | 257 | page = hpage; |
262 | } | 258 | } |
263 | } | 259 | } |
@@ -281,11 +277,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, | |||
281 | err = 0; | 277 | err = 0; |
282 | 278 | ||
283 | out: | 279 | out: |
284 | if (got) { | 280 | if (got) |
285 | if (PageHuge(page)) | ||
286 | page = compound_head(page); | ||
287 | put_page(page); | 281 | put_page(page); |
288 | } | ||
289 | return err; | 282 | return err; |
290 | 283 | ||
291 | up_err: | 284 | up_err: |
@@ -453,7 +446,7 @@ static int instruction_is_store(unsigned int instr) | |||
453 | } | 446 | } |
454 | 447 | ||
455 | static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, | 448 | static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, |
456 | unsigned long gpa, int is_store) | 449 | unsigned long gpa, gva_t ea, int is_store) |
457 | { | 450 | { |
458 | int ret; | 451 | int ret; |
459 | u32 last_inst; | 452 | u32 last_inst; |
@@ -500,6 +493,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
500 | */ | 493 | */ |
501 | 494 | ||
502 | vcpu->arch.paddr_accessed = gpa; | 495 | vcpu->arch.paddr_accessed = gpa; |
496 | vcpu->arch.vaddr_accessed = ea; | ||
503 | return kvmppc_emulate_mmio(run, vcpu); | 497 | return kvmppc_emulate_mmio(run, vcpu); |
504 | } | 498 | } |
505 | 499 | ||
@@ -553,7 +547,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
553 | /* No memslot means it's an emulated MMIO region */ | 547 | /* No memslot means it's an emulated MMIO region */ |
554 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { | 548 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { |
555 | unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1)); | 549 | unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1)); |
556 | return kvmppc_hv_emulate_mmio(run, vcpu, gpa, | 550 | return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, |
557 | dsisr & DSISR_ISSTORE); | 551 | dsisr & DSISR_ISSTORE); |
558 | } | 552 | } |
559 | 553 | ||
@@ -678,8 +672,15 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
678 | SetPageDirty(page); | 672 | SetPageDirty(page); |
679 | 673 | ||
680 | out_put: | 674 | out_put: |
681 | if (page) | 675 | if (page) { |
682 | put_page(page); | 676 | /* |
677 | * We drop pages[0] here, not page because page might | ||
678 | * have been set to the head page of a compound, but | ||
679 | * we have to drop the reference on the correct tail | ||
680 | * page to match the get inside gup() | ||
681 | */ | ||
682 | put_page(pages[0]); | ||
683 | } | ||
683 | return ret; | 684 | return ret; |
684 | 685 | ||
685 | out_unlock: | 686 | out_unlock: |
@@ -979,6 +980,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, | |||
979 | pa = *physp; | 980 | pa = *physp; |
980 | } | 981 | } |
981 | page = pfn_to_page(pa >> PAGE_SHIFT); | 982 | page = pfn_to_page(pa >> PAGE_SHIFT); |
983 | get_page(page); | ||
982 | } else { | 984 | } else { |
983 | hva = gfn_to_hva_memslot(memslot, gfn); | 985 | hva = gfn_to_hva_memslot(memslot, gfn); |
984 | npages = get_user_pages_fast(hva, 1, 1, pages); | 986 | npages = get_user_pages_fast(hva, 1, 1, pages); |
@@ -991,8 +993,6 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, | |||
991 | page = compound_head(page); | 993 | page = compound_head(page); |
992 | psize <<= compound_order(page); | 994 | psize <<= compound_order(page); |
993 | } | 995 | } |
994 | if (!kvm->arch.using_mmu_notifiers) | ||
995 | get_page(page); | ||
996 | offset = gpa & (psize - 1); | 996 | offset = gpa & (psize - 1); |
997 | if (nb_ret) | 997 | if (nb_ret) |
998 | *nb_ret = psize - offset; | 998 | *nb_ret = psize - offset; |
@@ -1003,7 +1003,6 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va) | |||
1003 | { | 1003 | { |
1004 | struct page *page = virt_to_page(va); | 1004 | struct page *page = virt_to_page(va); |
1005 | 1005 | ||
1006 | page = compound_head(page); | ||
1007 | put_page(page); | 1006 | put_page(page); |
1008 | } | 1007 | } |
1009 | 1008 | ||
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S index f2e6e48ea463..56b983e7b738 100644 --- a/arch/powerpc/kvm/book3s_64_slb.S +++ b/arch/powerpc/kvm/book3s_64_slb.S | |||
@@ -90,8 +90,6 @@ slb_exit_skip_ ## num: | |||
90 | or r10, r10, r12 | 90 | or r10, r10, r12 |
91 | slbie r10 | 91 | slbie r10 |
92 | 92 | ||
93 | isync | ||
94 | |||
95 | /* Fill SLB with our shadow */ | 93 | /* Fill SLB with our shadow */ |
96 | 94 | ||
97 | lbz r12, SVCPU_SLB_MAX(r3) | 95 | lbz r12, SVCPU_SLB_MAX(r3) |
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c new file mode 100644 index 000000000000..72ffc899c082 --- /dev/null +++ b/arch/powerpc/kvm/book3s_64_vio.c | |||
@@ -0,0 +1,150 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | ||
16 | * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com> | ||
17 | */ | ||
18 | |||
19 | #include <linux/types.h> | ||
20 | #include <linux/string.h> | ||
21 | #include <linux/kvm.h> | ||
22 | #include <linux/kvm_host.h> | ||
23 | #include <linux/highmem.h> | ||
24 | #include <linux/gfp.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/hugetlb.h> | ||
27 | #include <linux/list.h> | ||
28 | #include <linux/anon_inodes.h> | ||
29 | |||
30 | #include <asm/tlbflush.h> | ||
31 | #include <asm/kvm_ppc.h> | ||
32 | #include <asm/kvm_book3s.h> | ||
33 | #include <asm/mmu-hash64.h> | ||
34 | #include <asm/hvcall.h> | ||
35 | #include <asm/synch.h> | ||
36 | #include <asm/ppc-opcode.h> | ||
37 | #include <asm/kvm_host.h> | ||
38 | #include <asm/udbg.h> | ||
39 | |||
40 | #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) | ||
41 | |||
42 | static long kvmppc_stt_npages(unsigned long window_size) | ||
43 | { | ||
44 | return ALIGN((window_size >> SPAPR_TCE_SHIFT) | ||
45 | * sizeof(u64), PAGE_SIZE) / PAGE_SIZE; | ||
46 | } | ||
47 | |||
48 | static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt) | ||
49 | { | ||
50 | struct kvm *kvm = stt->kvm; | ||
51 | int i; | ||
52 | |||
53 | mutex_lock(&kvm->lock); | ||
54 | list_del(&stt->list); | ||
55 | for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++) | ||
56 | __free_page(stt->pages[i]); | ||
57 | kfree(stt); | ||
58 | mutex_unlock(&kvm->lock); | ||
59 | |||
60 | kvm_put_kvm(kvm); | ||
61 | } | ||
62 | |||
63 | static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
64 | { | ||
65 | struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data; | ||
66 | struct page *page; | ||
67 | |||
68 | if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size)) | ||
69 | return VM_FAULT_SIGBUS; | ||
70 | |||
71 | page = stt->pages[vmf->pgoff]; | ||
72 | get_page(page); | ||
73 | vmf->page = page; | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static const struct vm_operations_struct kvm_spapr_tce_vm_ops = { | ||
78 | .fault = kvm_spapr_tce_fault, | ||
79 | }; | ||
80 | |||
81 | static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma) | ||
82 | { | ||
83 | vma->vm_ops = &kvm_spapr_tce_vm_ops; | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | static int kvm_spapr_tce_release(struct inode *inode, struct file *filp) | ||
88 | { | ||
89 | struct kvmppc_spapr_tce_table *stt = filp->private_data; | ||
90 | |||
91 | release_spapr_tce_table(stt); | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | static struct file_operations kvm_spapr_tce_fops = { | ||
96 | .mmap = kvm_spapr_tce_mmap, | ||
97 | .release = kvm_spapr_tce_release, | ||
98 | }; | ||
99 | |||
100 | long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, | ||
101 | struct kvm_create_spapr_tce *args) | ||
102 | { | ||
103 | struct kvmppc_spapr_tce_table *stt = NULL; | ||
104 | long npages; | ||
105 | int ret = -ENOMEM; | ||
106 | int i; | ||
107 | |||
108 | /* Check this LIOBN hasn't been previously allocated */ | ||
109 | list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) { | ||
110 | if (stt->liobn == args->liobn) | ||
111 | return -EBUSY; | ||
112 | } | ||
113 | |||
114 | npages = kvmppc_stt_npages(args->window_size); | ||
115 | |||
116 | stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *), | ||
117 | GFP_KERNEL); | ||
118 | if (!stt) | ||
119 | goto fail; | ||
120 | |||
121 | stt->liobn = args->liobn; | ||
122 | stt->window_size = args->window_size; | ||
123 | stt->kvm = kvm; | ||
124 | |||
125 | for (i = 0; i < npages; i++) { | ||
126 | stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
127 | if (!stt->pages[i]) | ||
128 | goto fail; | ||
129 | } | ||
130 | |||
131 | kvm_get_kvm(kvm); | ||
132 | |||
133 | mutex_lock(&kvm->lock); | ||
134 | list_add(&stt->list, &kvm->arch.spapr_tce_tables); | ||
135 | |||
136 | mutex_unlock(&kvm->lock); | ||
137 | |||
138 | return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, | ||
139 | stt, O_RDWR); | ||
140 | |||
141 | fail: | ||
142 | if (stt) { | ||
143 | for (i = 0; i < npages; i++) | ||
144 | if (stt->pages[i]) | ||
145 | __free_page(stt->pages[i]); | ||
146 | |||
147 | kfree(stt); | ||
148 | } | ||
149 | return ret; | ||
150 | } | ||
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index ea0f8c537c28..30c2f3b134c6 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c | |||
@@ -38,6 +38,9 @@ | |||
38 | 38 | ||
39 | #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) | 39 | #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) |
40 | 40 | ||
41 | /* WARNING: This will be called in real-mode on HV KVM and virtual | ||
42 | * mode on PR KVM | ||
43 | */ | ||
41 | long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | 44 | long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
42 | unsigned long ioba, unsigned long tce) | 45 | unsigned long ioba, unsigned long tce) |
43 | { | 46 | { |
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index 135663a3e4fc..b9a989dc76cc 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c | |||
@@ -87,6 +87,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
87 | unsigned int inst, int *advance) | 87 | unsigned int inst, int *advance) |
88 | { | 88 | { |
89 | int emulated = EMULATE_DONE; | 89 | int emulated = EMULATE_DONE; |
90 | int rt = get_rt(inst); | ||
91 | int rs = get_rs(inst); | ||
92 | int ra = get_ra(inst); | ||
93 | int rb = get_rb(inst); | ||
90 | 94 | ||
91 | switch (get_op(inst)) { | 95 | switch (get_op(inst)) { |
92 | case 19: | 96 | case 19: |
@@ -106,21 +110,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
106 | case 31: | 110 | case 31: |
107 | switch (get_xop(inst)) { | 111 | switch (get_xop(inst)) { |
108 | case OP_31_XOP_MFMSR: | 112 | case OP_31_XOP_MFMSR: |
109 | kvmppc_set_gpr(vcpu, get_rt(inst), | 113 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); |
110 | vcpu->arch.shared->msr); | ||
111 | break; | 114 | break; |
112 | case OP_31_XOP_MTMSRD: | 115 | case OP_31_XOP_MTMSRD: |
113 | { | 116 | { |
114 | ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); | 117 | ulong rs_val = kvmppc_get_gpr(vcpu, rs); |
115 | if (inst & 0x10000) { | 118 | if (inst & 0x10000) { |
116 | vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE); | 119 | ulong new_msr = vcpu->arch.shared->msr; |
117 | vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE); | 120 | new_msr &= ~(MSR_RI | MSR_EE); |
121 | new_msr |= rs_val & (MSR_RI | MSR_EE); | ||
122 | vcpu->arch.shared->msr = new_msr; | ||
118 | } else | 123 | } else |
119 | kvmppc_set_msr(vcpu, rs); | 124 | kvmppc_set_msr(vcpu, rs_val); |
120 | break; | 125 | break; |
121 | } | 126 | } |
122 | case OP_31_XOP_MTMSR: | 127 | case OP_31_XOP_MTMSR: |
123 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst))); | 128 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); |
124 | break; | 129 | break; |
125 | case OP_31_XOP_MFSR: | 130 | case OP_31_XOP_MFSR: |
126 | { | 131 | { |
@@ -130,7 +135,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
130 | if (vcpu->arch.mmu.mfsrin) { | 135 | if (vcpu->arch.mmu.mfsrin) { |
131 | u32 sr; | 136 | u32 sr; |
132 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); | 137 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); |
133 | kvmppc_set_gpr(vcpu, get_rt(inst), sr); | 138 | kvmppc_set_gpr(vcpu, rt, sr); |
134 | } | 139 | } |
135 | break; | 140 | break; |
136 | } | 141 | } |
@@ -138,29 +143,29 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
138 | { | 143 | { |
139 | int srnum; | 144 | int srnum; |
140 | 145 | ||
141 | srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf; | 146 | srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf; |
142 | if (vcpu->arch.mmu.mfsrin) { | 147 | if (vcpu->arch.mmu.mfsrin) { |
143 | u32 sr; | 148 | u32 sr; |
144 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); | 149 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); |
145 | kvmppc_set_gpr(vcpu, get_rt(inst), sr); | 150 | kvmppc_set_gpr(vcpu, rt, sr); |
146 | } | 151 | } |
147 | break; | 152 | break; |
148 | } | 153 | } |
149 | case OP_31_XOP_MTSR: | 154 | case OP_31_XOP_MTSR: |
150 | vcpu->arch.mmu.mtsrin(vcpu, | 155 | vcpu->arch.mmu.mtsrin(vcpu, |
151 | (inst >> 16) & 0xf, | 156 | (inst >> 16) & 0xf, |
152 | kvmppc_get_gpr(vcpu, get_rs(inst))); | 157 | kvmppc_get_gpr(vcpu, rs)); |
153 | break; | 158 | break; |
154 | case OP_31_XOP_MTSRIN: | 159 | case OP_31_XOP_MTSRIN: |
155 | vcpu->arch.mmu.mtsrin(vcpu, | 160 | vcpu->arch.mmu.mtsrin(vcpu, |
156 | (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf, | 161 | (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf, |
157 | kvmppc_get_gpr(vcpu, get_rs(inst))); | 162 | kvmppc_get_gpr(vcpu, rs)); |
158 | break; | 163 | break; |
159 | case OP_31_XOP_TLBIE: | 164 | case OP_31_XOP_TLBIE: |
160 | case OP_31_XOP_TLBIEL: | 165 | case OP_31_XOP_TLBIEL: |
161 | { | 166 | { |
162 | bool large = (inst & 0x00200000) ? true : false; | 167 | bool large = (inst & 0x00200000) ? true : false; |
163 | ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst)); | 168 | ulong addr = kvmppc_get_gpr(vcpu, rb); |
164 | vcpu->arch.mmu.tlbie(vcpu, addr, large); | 169 | vcpu->arch.mmu.tlbie(vcpu, addr, large); |
165 | break; | 170 | break; |
166 | } | 171 | } |
@@ -171,15 +176,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
171 | return EMULATE_FAIL; | 176 | return EMULATE_FAIL; |
172 | 177 | ||
173 | vcpu->arch.mmu.slbmte(vcpu, | 178 | vcpu->arch.mmu.slbmte(vcpu, |
174 | kvmppc_get_gpr(vcpu, get_rs(inst)), | 179 | kvmppc_get_gpr(vcpu, rs), |
175 | kvmppc_get_gpr(vcpu, get_rb(inst))); | 180 | kvmppc_get_gpr(vcpu, rb)); |
176 | break; | 181 | break; |
177 | case OP_31_XOP_SLBIE: | 182 | case OP_31_XOP_SLBIE: |
178 | if (!vcpu->arch.mmu.slbie) | 183 | if (!vcpu->arch.mmu.slbie) |
179 | return EMULATE_FAIL; | 184 | return EMULATE_FAIL; |
180 | 185 | ||
181 | vcpu->arch.mmu.slbie(vcpu, | 186 | vcpu->arch.mmu.slbie(vcpu, |
182 | kvmppc_get_gpr(vcpu, get_rb(inst))); | 187 | kvmppc_get_gpr(vcpu, rb)); |
183 | break; | 188 | break; |
184 | case OP_31_XOP_SLBIA: | 189 | case OP_31_XOP_SLBIA: |
185 | if (!vcpu->arch.mmu.slbia) | 190 | if (!vcpu->arch.mmu.slbia) |
@@ -191,22 +196,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
191 | if (!vcpu->arch.mmu.slbmfee) { | 196 | if (!vcpu->arch.mmu.slbmfee) { |
192 | emulated = EMULATE_FAIL; | 197 | emulated = EMULATE_FAIL; |
193 | } else { | 198 | } else { |
194 | ulong t, rb; | 199 | ulong t, rb_val; |
195 | 200 | ||
196 | rb = kvmppc_get_gpr(vcpu, get_rb(inst)); | 201 | rb_val = kvmppc_get_gpr(vcpu, rb); |
197 | t = vcpu->arch.mmu.slbmfee(vcpu, rb); | 202 | t = vcpu->arch.mmu.slbmfee(vcpu, rb_val); |
198 | kvmppc_set_gpr(vcpu, get_rt(inst), t); | 203 | kvmppc_set_gpr(vcpu, rt, t); |
199 | } | 204 | } |
200 | break; | 205 | break; |
201 | case OP_31_XOP_SLBMFEV: | 206 | case OP_31_XOP_SLBMFEV: |
202 | if (!vcpu->arch.mmu.slbmfev) { | 207 | if (!vcpu->arch.mmu.slbmfev) { |
203 | emulated = EMULATE_FAIL; | 208 | emulated = EMULATE_FAIL; |
204 | } else { | 209 | } else { |
205 | ulong t, rb; | 210 | ulong t, rb_val; |
206 | 211 | ||
207 | rb = kvmppc_get_gpr(vcpu, get_rb(inst)); | 212 | rb_val = kvmppc_get_gpr(vcpu, rb); |
208 | t = vcpu->arch.mmu.slbmfev(vcpu, rb); | 213 | t = vcpu->arch.mmu.slbmfev(vcpu, rb_val); |
209 | kvmppc_set_gpr(vcpu, get_rt(inst), t); | 214 | kvmppc_set_gpr(vcpu, rt, t); |
210 | } | 215 | } |
211 | break; | 216 | break; |
212 | case OP_31_XOP_DCBA: | 217 | case OP_31_XOP_DCBA: |
@@ -214,17 +219,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
214 | break; | 219 | break; |
215 | case OP_31_XOP_DCBZ: | 220 | case OP_31_XOP_DCBZ: |
216 | { | 221 | { |
217 | ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst)); | 222 | ulong rb_val = kvmppc_get_gpr(vcpu, rb); |
218 | ulong ra = 0; | 223 | ulong ra_val = 0; |
219 | ulong addr, vaddr; | 224 | ulong addr, vaddr; |
220 | u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; | 225 | u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; |
221 | u32 dsisr; | 226 | u32 dsisr; |
222 | int r; | 227 | int r; |
223 | 228 | ||
224 | if (get_ra(inst)) | 229 | if (ra) |
225 | ra = kvmppc_get_gpr(vcpu, get_ra(inst)); | 230 | ra_val = kvmppc_get_gpr(vcpu, ra); |
226 | 231 | ||
227 | addr = (ra + rb) & ~31ULL; | 232 | addr = (ra_val + rb_val) & ~31ULL; |
228 | if (!(vcpu->arch.shared->msr & MSR_SF)) | 233 | if (!(vcpu->arch.shared->msr & MSR_SF)) |
229 | addr &= 0xffffffff; | 234 | addr &= 0xffffffff; |
230 | vaddr = addr; | 235 | vaddr = addr; |
@@ -313,10 +318,9 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn) | |||
313 | return bat; | 318 | return bat; |
314 | } | 319 | } |
315 | 320 | ||
316 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 321 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) |
317 | { | 322 | { |
318 | int emulated = EMULATE_DONE; | 323 | int emulated = EMULATE_DONE; |
319 | ulong spr_val = kvmppc_get_gpr(vcpu, rs); | ||
320 | 324 | ||
321 | switch (sprn) { | 325 | switch (sprn) { |
322 | case SPRN_SDR1: | 326 | case SPRN_SDR1: |
@@ -428,7 +432,7 @@ unprivileged: | |||
428 | return emulated; | 432 | return emulated; |
429 | } | 433 | } |
430 | 434 | ||
431 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | 435 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
432 | { | 436 | { |
433 | int emulated = EMULATE_DONE; | 437 | int emulated = EMULATE_DONE; |
434 | 438 | ||
@@ -441,46 +445,46 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
441 | struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); | 445 | struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); |
442 | 446 | ||
443 | if (sprn % 2) | 447 | if (sprn % 2) |
444 | kvmppc_set_gpr(vcpu, rt, bat->raw >> 32); | 448 | *spr_val = bat->raw >> 32; |
445 | else | 449 | else |
446 | kvmppc_set_gpr(vcpu, rt, bat->raw); | 450 | *spr_val = bat->raw; |
447 | 451 | ||
448 | break; | 452 | break; |
449 | } | 453 | } |
450 | case SPRN_SDR1: | 454 | case SPRN_SDR1: |
451 | if (!spr_allowed(vcpu, PRIV_HYPER)) | 455 | if (!spr_allowed(vcpu, PRIV_HYPER)) |
452 | goto unprivileged; | 456 | goto unprivileged; |
453 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); | 457 | *spr_val = to_book3s(vcpu)->sdr1; |
454 | break; | 458 | break; |
455 | case SPRN_DSISR: | 459 | case SPRN_DSISR: |
456 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dsisr); | 460 | *spr_val = vcpu->arch.shared->dsisr; |
457 | break; | 461 | break; |
458 | case SPRN_DAR: | 462 | case SPRN_DAR: |
459 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); | 463 | *spr_val = vcpu->arch.shared->dar; |
460 | break; | 464 | break; |
461 | case SPRN_HIOR: | 465 | case SPRN_HIOR: |
462 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); | 466 | *spr_val = to_book3s(vcpu)->hior; |
463 | break; | 467 | break; |
464 | case SPRN_HID0: | 468 | case SPRN_HID0: |
465 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]); | 469 | *spr_val = to_book3s(vcpu)->hid[0]; |
466 | break; | 470 | break; |
467 | case SPRN_HID1: | 471 | case SPRN_HID1: |
468 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]); | 472 | *spr_val = to_book3s(vcpu)->hid[1]; |
469 | break; | 473 | break; |
470 | case SPRN_HID2: | 474 | case SPRN_HID2: |
471 | case SPRN_HID2_GEKKO: | 475 | case SPRN_HID2_GEKKO: |
472 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]); | 476 | *spr_val = to_book3s(vcpu)->hid[2]; |
473 | break; | 477 | break; |
474 | case SPRN_HID4: | 478 | case SPRN_HID4: |
475 | case SPRN_HID4_GEKKO: | 479 | case SPRN_HID4_GEKKO: |
476 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]); | 480 | *spr_val = to_book3s(vcpu)->hid[4]; |
477 | break; | 481 | break; |
478 | case SPRN_HID5: | 482 | case SPRN_HID5: |
479 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]); | 483 | *spr_val = to_book3s(vcpu)->hid[5]; |
480 | break; | 484 | break; |
481 | case SPRN_CFAR: | 485 | case SPRN_CFAR: |
482 | case SPRN_PURR: | 486 | case SPRN_PURR: |
483 | kvmppc_set_gpr(vcpu, rt, 0); | 487 | *spr_val = 0; |
484 | break; | 488 | break; |
485 | case SPRN_GQR0: | 489 | case SPRN_GQR0: |
486 | case SPRN_GQR1: | 490 | case SPRN_GQR1: |
@@ -490,8 +494,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
490 | case SPRN_GQR5: | 494 | case SPRN_GQR5: |
491 | case SPRN_GQR6: | 495 | case SPRN_GQR6: |
492 | case SPRN_GQR7: | 496 | case SPRN_GQR7: |
493 | kvmppc_set_gpr(vcpu, rt, | 497 | *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]; |
494 | to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]); | ||
495 | break; | 498 | break; |
496 | case SPRN_THRM1: | 499 | case SPRN_THRM1: |
497 | case SPRN_THRM2: | 500 | case SPRN_THRM2: |
@@ -506,7 +509,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
506 | case SPRN_PMC3_GEKKO: | 509 | case SPRN_PMC3_GEKKO: |
507 | case SPRN_PMC4_GEKKO: | 510 | case SPRN_PMC4_GEKKO: |
508 | case SPRN_WPAR_GEKKO: | 511 | case SPRN_WPAR_GEKKO: |
509 | kvmppc_set_gpr(vcpu, rt, 0); | 512 | *spr_val = 0; |
510 | break; | 513 | break; |
511 | default: | 514 | default: |
512 | unprivileged: | 515 | unprivileged: |
@@ -565,23 +568,22 @@ u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) | |||
565 | ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) | 568 | ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) |
566 | { | 569 | { |
567 | ulong dar = 0; | 570 | ulong dar = 0; |
568 | ulong ra; | 571 | ulong ra = get_ra(inst); |
572 | ulong rb = get_rb(inst); | ||
569 | 573 | ||
570 | switch (get_op(inst)) { | 574 | switch (get_op(inst)) { |
571 | case OP_LFS: | 575 | case OP_LFS: |
572 | case OP_LFD: | 576 | case OP_LFD: |
573 | case OP_STFD: | 577 | case OP_STFD: |
574 | case OP_STFS: | 578 | case OP_STFS: |
575 | ra = get_ra(inst); | ||
576 | if (ra) | 579 | if (ra) |
577 | dar = kvmppc_get_gpr(vcpu, ra); | 580 | dar = kvmppc_get_gpr(vcpu, ra); |
578 | dar += (s32)((s16)inst); | 581 | dar += (s32)((s16)inst); |
579 | break; | 582 | break; |
580 | case 31: | 583 | case 31: |
581 | ra = get_ra(inst); | ||
582 | if (ra) | 584 | if (ra) |
583 | dar = kvmppc_get_gpr(vcpu, ra); | 585 | dar = kvmppc_get_gpr(vcpu, ra); |
584 | dar += kvmppc_get_gpr(vcpu, get_rb(inst)); | 586 | dar += kvmppc_get_gpr(vcpu, rb); |
585 | break; | 587 | break; |
586 | default: | 588 | default: |
587 | printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); | 589 | printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 01294a5099dd..c6af1d623839 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -60,12 +60,20 @@ static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu); | |||
60 | 60 | ||
61 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 61 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
62 | { | 62 | { |
63 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | ||
64 | |||
63 | local_paca->kvm_hstate.kvm_vcpu = vcpu; | 65 | local_paca->kvm_hstate.kvm_vcpu = vcpu; |
64 | local_paca->kvm_hstate.kvm_vcore = vcpu->arch.vcore; | 66 | local_paca->kvm_hstate.kvm_vcore = vc; |
67 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) | ||
68 | vc->stolen_tb += mftb() - vc->preempt_tb; | ||
65 | } | 69 | } |
66 | 70 | ||
67 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 71 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) |
68 | { | 72 | { |
73 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | ||
74 | |||
75 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) | ||
76 | vc->preempt_tb = mftb(); | ||
69 | } | 77 | } |
70 | 78 | ||
71 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | 79 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) |
@@ -134,6 +142,22 @@ static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) | |||
134 | vpa->yield_count = 1; | 142 | vpa->yield_count = 1; |
135 | } | 143 | } |
136 | 144 | ||
145 | /* Length for a per-processor buffer is passed in at offset 4 in the buffer */ | ||
146 | struct reg_vpa { | ||
147 | u32 dummy; | ||
148 | union { | ||
149 | u16 hword; | ||
150 | u32 word; | ||
151 | } length; | ||
152 | }; | ||
153 | |||
154 | static int vpa_is_registered(struct kvmppc_vpa *vpap) | ||
155 | { | ||
156 | if (vpap->update_pending) | ||
157 | return vpap->next_gpa != 0; | ||
158 | return vpap->pinned_addr != NULL; | ||
159 | } | ||
160 | |||
137 | static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, | 161 | static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, |
138 | unsigned long flags, | 162 | unsigned long flags, |
139 | unsigned long vcpuid, unsigned long vpa) | 163 | unsigned long vcpuid, unsigned long vpa) |
@@ -142,88 +166,182 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, | |||
142 | unsigned long len, nb; | 166 | unsigned long len, nb; |
143 | void *va; | 167 | void *va; |
144 | struct kvm_vcpu *tvcpu; | 168 | struct kvm_vcpu *tvcpu; |
145 | int err = H_PARAMETER; | 169 | int err; |
170 | int subfunc; | ||
171 | struct kvmppc_vpa *vpap; | ||
146 | 172 | ||
147 | tvcpu = kvmppc_find_vcpu(kvm, vcpuid); | 173 | tvcpu = kvmppc_find_vcpu(kvm, vcpuid); |
148 | if (!tvcpu) | 174 | if (!tvcpu) |
149 | return H_PARAMETER; | 175 | return H_PARAMETER; |
150 | 176 | ||
151 | flags >>= 63 - 18; | 177 | subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK; |
152 | flags &= 7; | 178 | if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL || |
153 | if (flags == 0 || flags == 4) | 179 | subfunc == H_VPA_REG_SLB) { |
154 | return H_PARAMETER; | 180 | /* Registering new area - address must be cache-line aligned */ |
155 | if (flags < 4) { | 181 | if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa) |
156 | if (vpa & 0x7f) | ||
157 | return H_PARAMETER; | 182 | return H_PARAMETER; |
158 | if (flags >= 2 && !tvcpu->arch.vpa) | 183 | |
159 | return H_RESOURCE; | 184 | /* convert logical addr to kernel addr and read length */ |
160 | /* registering new area; convert logical addr to real */ | ||
161 | va = kvmppc_pin_guest_page(kvm, vpa, &nb); | 185 | va = kvmppc_pin_guest_page(kvm, vpa, &nb); |
162 | if (va == NULL) | 186 | if (va == NULL) |
163 | return H_PARAMETER; | 187 | return H_PARAMETER; |
164 | if (flags <= 1) | 188 | if (subfunc == H_VPA_REG_VPA) |
165 | len = *(unsigned short *)(va + 4); | 189 | len = ((struct reg_vpa *)va)->length.hword; |
166 | else | 190 | else |
167 | len = *(unsigned int *)(va + 4); | 191 | len = ((struct reg_vpa *)va)->length.word; |
168 | if (len > nb) | 192 | kvmppc_unpin_guest_page(kvm, va); |
169 | goto out_unpin; | 193 | |
170 | switch (flags) { | 194 | /* Check length */ |
171 | case 1: /* register VPA */ | 195 | if (len > nb || len < sizeof(struct reg_vpa)) |
172 | if (len < 640) | 196 | return H_PARAMETER; |
173 | goto out_unpin; | 197 | } else { |
174 | if (tvcpu->arch.vpa) | 198 | vpa = 0; |
175 | kvmppc_unpin_guest_page(kvm, vcpu->arch.vpa); | 199 | len = 0; |
176 | tvcpu->arch.vpa = va; | 200 | } |
177 | init_vpa(vcpu, va); | 201 | |
178 | break; | 202 | err = H_PARAMETER; |
179 | case 2: /* register DTL */ | 203 | vpap = NULL; |
180 | if (len < 48) | 204 | spin_lock(&tvcpu->arch.vpa_update_lock); |
181 | goto out_unpin; | 205 | |
182 | len -= len % 48; | 206 | switch (subfunc) { |
183 | if (tvcpu->arch.dtl) | 207 | case H_VPA_REG_VPA: /* register VPA */ |
184 | kvmppc_unpin_guest_page(kvm, vcpu->arch.dtl); | 208 | if (len < sizeof(struct lppaca)) |
185 | tvcpu->arch.dtl = va; | ||
186 | tvcpu->arch.dtl_end = va + len; | ||
187 | break; | 209 | break; |
188 | case 3: /* register SLB shadow buffer */ | 210 | vpap = &tvcpu->arch.vpa; |
189 | if (len < 16) | 211 | err = 0; |
190 | goto out_unpin; | 212 | break; |
191 | if (tvcpu->arch.slb_shadow) | 213 | |
192 | kvmppc_unpin_guest_page(kvm, vcpu->arch.slb_shadow); | 214 | case H_VPA_REG_DTL: /* register DTL */ |
193 | tvcpu->arch.slb_shadow = va; | 215 | if (len < sizeof(struct dtl_entry)) |
194 | break; | 216 | break; |
195 | } | 217 | len -= len % sizeof(struct dtl_entry); |
196 | } else { | 218 | |
197 | switch (flags) { | 219 | /* Check that they have previously registered a VPA */ |
198 | case 5: /* unregister VPA */ | 220 | err = H_RESOURCE; |
199 | if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl) | 221 | if (!vpa_is_registered(&tvcpu->arch.vpa)) |
200 | return H_RESOURCE; | ||
201 | if (!tvcpu->arch.vpa) | ||
202 | break; | ||
203 | kvmppc_unpin_guest_page(kvm, tvcpu->arch.vpa); | ||
204 | tvcpu->arch.vpa = NULL; | ||
205 | break; | 222 | break; |
206 | case 6: /* unregister DTL */ | 223 | |
207 | if (!tvcpu->arch.dtl) | 224 | vpap = &tvcpu->arch.dtl; |
208 | break; | 225 | err = 0; |
209 | kvmppc_unpin_guest_page(kvm, tvcpu->arch.dtl); | 226 | break; |
210 | tvcpu->arch.dtl = NULL; | 227 | |
228 | case H_VPA_REG_SLB: /* register SLB shadow buffer */ | ||
229 | /* Check that they have previously registered a VPA */ | ||
230 | err = H_RESOURCE; | ||
231 | if (!vpa_is_registered(&tvcpu->arch.vpa)) | ||
211 | break; | 232 | break; |
212 | case 7: /* unregister SLB shadow buffer */ | 233 | |
213 | if (!tvcpu->arch.slb_shadow) | 234 | vpap = &tvcpu->arch.slb_shadow; |
214 | break; | 235 | err = 0; |
215 | kvmppc_unpin_guest_page(kvm, tvcpu->arch.slb_shadow); | 236 | break; |
216 | tvcpu->arch.slb_shadow = NULL; | 237 | |
238 | case H_VPA_DEREG_VPA: /* deregister VPA */ | ||
239 | /* Check they don't still have a DTL or SLB buf registered */ | ||
240 | err = H_RESOURCE; | ||
241 | if (vpa_is_registered(&tvcpu->arch.dtl) || | ||
242 | vpa_is_registered(&tvcpu->arch.slb_shadow)) | ||
217 | break; | 243 | break; |
218 | } | 244 | |
245 | vpap = &tvcpu->arch.vpa; | ||
246 | err = 0; | ||
247 | break; | ||
248 | |||
249 | case H_VPA_DEREG_DTL: /* deregister DTL */ | ||
250 | vpap = &tvcpu->arch.dtl; | ||
251 | err = 0; | ||
252 | break; | ||
253 | |||
254 | case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */ | ||
255 | vpap = &tvcpu->arch.slb_shadow; | ||
256 | err = 0; | ||
257 | break; | ||
258 | } | ||
259 | |||
260 | if (vpap) { | ||
261 | vpap->next_gpa = vpa; | ||
262 | vpap->len = len; | ||
263 | vpap->update_pending = 1; | ||
219 | } | 264 | } |
220 | return H_SUCCESS; | ||
221 | 265 | ||
222 | out_unpin: | 266 | spin_unlock(&tvcpu->arch.vpa_update_lock); |
223 | kvmppc_unpin_guest_page(kvm, va); | 267 | |
224 | return err; | 268 | return err; |
225 | } | 269 | } |
226 | 270 | ||
271 | static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap) | ||
272 | { | ||
273 | void *va; | ||
274 | unsigned long nb; | ||
275 | |||
276 | vpap->update_pending = 0; | ||
277 | va = NULL; | ||
278 | if (vpap->next_gpa) { | ||
279 | va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb); | ||
280 | if (nb < vpap->len) { | ||
281 | /* | ||
282 | * If it's now too short, it must be that userspace | ||
283 | * has changed the mappings underlying guest memory, | ||
284 | * so unregister the region. | ||
285 | */ | ||
286 | kvmppc_unpin_guest_page(kvm, va); | ||
287 | va = NULL; | ||
288 | } | ||
289 | } | ||
290 | if (vpap->pinned_addr) | ||
291 | kvmppc_unpin_guest_page(kvm, vpap->pinned_addr); | ||
292 | vpap->pinned_addr = va; | ||
293 | if (va) | ||
294 | vpap->pinned_end = va + vpap->len; | ||
295 | } | ||
296 | |||
297 | static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) | ||
298 | { | ||
299 | struct kvm *kvm = vcpu->kvm; | ||
300 | |||
301 | spin_lock(&vcpu->arch.vpa_update_lock); | ||
302 | if (vcpu->arch.vpa.update_pending) { | ||
303 | kvmppc_update_vpa(kvm, &vcpu->arch.vpa); | ||
304 | init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); | ||
305 | } | ||
306 | if (vcpu->arch.dtl.update_pending) { | ||
307 | kvmppc_update_vpa(kvm, &vcpu->arch.dtl); | ||
308 | vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; | ||
309 | vcpu->arch.dtl_index = 0; | ||
310 | } | ||
311 | if (vcpu->arch.slb_shadow.update_pending) | ||
312 | kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow); | ||
313 | spin_unlock(&vcpu->arch.vpa_update_lock); | ||
314 | } | ||
315 | |||
316 | static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, | ||
317 | struct kvmppc_vcore *vc) | ||
318 | { | ||
319 | struct dtl_entry *dt; | ||
320 | struct lppaca *vpa; | ||
321 | unsigned long old_stolen; | ||
322 | |||
323 | dt = vcpu->arch.dtl_ptr; | ||
324 | vpa = vcpu->arch.vpa.pinned_addr; | ||
325 | old_stolen = vcpu->arch.stolen_logged; | ||
326 | vcpu->arch.stolen_logged = vc->stolen_tb; | ||
327 | if (!dt || !vpa) | ||
328 | return; | ||
329 | memset(dt, 0, sizeof(struct dtl_entry)); | ||
330 | dt->dispatch_reason = 7; | ||
331 | dt->processor_id = vc->pcpu + vcpu->arch.ptid; | ||
332 | dt->timebase = mftb(); | ||
333 | dt->enqueue_to_dispatch_time = vc->stolen_tb - old_stolen; | ||
334 | dt->srr0 = kvmppc_get_pc(vcpu); | ||
335 | dt->srr1 = vcpu->arch.shregs.msr; | ||
336 | ++dt; | ||
337 | if (dt == vcpu->arch.dtl.pinned_end) | ||
338 | dt = vcpu->arch.dtl.pinned_addr; | ||
339 | vcpu->arch.dtl_ptr = dt; | ||
340 | /* order writing *dt vs. writing vpa->dtl_idx */ | ||
341 | smp_wmb(); | ||
342 | vpa->dtl_idx = ++vcpu->arch.dtl_index; | ||
343 | } | ||
344 | |||
227 | int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | 345 | int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) |
228 | { | 346 | { |
229 | unsigned long req = kvmppc_get_gpr(vcpu, 3); | 347 | unsigned long req = kvmppc_get_gpr(vcpu, 3); |
@@ -468,6 +586,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
468 | /* default to host PVR, since we can't spoof it */ | 586 | /* default to host PVR, since we can't spoof it */ |
469 | vcpu->arch.pvr = mfspr(SPRN_PVR); | 587 | vcpu->arch.pvr = mfspr(SPRN_PVR); |
470 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); | 588 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); |
589 | spin_lock_init(&vcpu->arch.vpa_update_lock); | ||
471 | 590 | ||
472 | kvmppc_mmu_book3s_hv_init(vcpu); | 591 | kvmppc_mmu_book3s_hv_init(vcpu); |
473 | 592 | ||
@@ -486,6 +605,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
486 | INIT_LIST_HEAD(&vcore->runnable_threads); | 605 | INIT_LIST_HEAD(&vcore->runnable_threads); |
487 | spin_lock_init(&vcore->lock); | 606 | spin_lock_init(&vcore->lock); |
488 | init_waitqueue_head(&vcore->wq); | 607 | init_waitqueue_head(&vcore->wq); |
608 | vcore->preempt_tb = mftb(); | ||
489 | } | 609 | } |
490 | kvm->arch.vcores[core] = vcore; | 610 | kvm->arch.vcores[core] = vcore; |
491 | } | 611 | } |
@@ -498,6 +618,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
498 | ++vcore->num_threads; | 618 | ++vcore->num_threads; |
499 | spin_unlock(&vcore->lock); | 619 | spin_unlock(&vcore->lock); |
500 | vcpu->arch.vcore = vcore; | 620 | vcpu->arch.vcore = vcore; |
621 | vcpu->arch.stolen_logged = vcore->stolen_tb; | ||
501 | 622 | ||
502 | vcpu->arch.cpu_type = KVM_CPU_3S_64; | 623 | vcpu->arch.cpu_type = KVM_CPU_3S_64; |
503 | kvmppc_sanity_check(vcpu); | 624 | kvmppc_sanity_check(vcpu); |
@@ -512,12 +633,14 @@ out: | |||
512 | 633 | ||
513 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | 634 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) |
514 | { | 635 | { |
515 | if (vcpu->arch.dtl) | 636 | spin_lock(&vcpu->arch.vpa_update_lock); |
516 | kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl); | 637 | if (vcpu->arch.dtl.pinned_addr) |
517 | if (vcpu->arch.slb_shadow) | 638 | kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl.pinned_addr); |
518 | kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow); | 639 | if (vcpu->arch.slb_shadow.pinned_addr) |
519 | if (vcpu->arch.vpa) | 640 | kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow.pinned_addr); |
520 | kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa); | 641 | if (vcpu->arch.vpa.pinned_addr) |
642 | kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa.pinned_addr); | ||
643 | spin_unlock(&vcpu->arch.vpa_update_lock); | ||
521 | kvm_vcpu_uninit(vcpu); | 644 | kvm_vcpu_uninit(vcpu); |
522 | kmem_cache_free(kvm_vcpu_cache, vcpu); | 645 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
523 | } | 646 | } |
@@ -569,6 +692,45 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, | |||
569 | list_del(&vcpu->arch.run_list); | 692 | list_del(&vcpu->arch.run_list); |
570 | } | 693 | } |
571 | 694 | ||
695 | static int kvmppc_grab_hwthread(int cpu) | ||
696 | { | ||
697 | struct paca_struct *tpaca; | ||
698 | long timeout = 1000; | ||
699 | |||
700 | tpaca = &paca[cpu]; | ||
701 | |||
702 | /* Ensure the thread won't go into the kernel if it wakes */ | ||
703 | tpaca->kvm_hstate.hwthread_req = 1; | ||
704 | |||
705 | /* | ||
706 | * If the thread is already executing in the kernel (e.g. handling | ||
707 | * a stray interrupt), wait for it to get back to nap mode. | ||
708 | * The smp_mb() is to ensure that our setting of hwthread_req | ||
709 | * is visible before we look at hwthread_state, so if this | ||
710 | * races with the code at system_reset_pSeries and the thread | ||
711 | * misses our setting of hwthread_req, we are sure to see its | ||
712 | * setting of hwthread_state, and vice versa. | ||
713 | */ | ||
714 | smp_mb(); | ||
715 | while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) { | ||
716 | if (--timeout <= 0) { | ||
717 | pr_err("KVM: couldn't grab cpu %d\n", cpu); | ||
718 | return -EBUSY; | ||
719 | } | ||
720 | udelay(1); | ||
721 | } | ||
722 | return 0; | ||
723 | } | ||
724 | |||
725 | static void kvmppc_release_hwthread(int cpu) | ||
726 | { | ||
727 | struct paca_struct *tpaca; | ||
728 | |||
729 | tpaca = &paca[cpu]; | ||
730 | tpaca->kvm_hstate.hwthread_req = 0; | ||
731 | tpaca->kvm_hstate.kvm_vcpu = NULL; | ||
732 | } | ||
733 | |||
572 | static void kvmppc_start_thread(struct kvm_vcpu *vcpu) | 734 | static void kvmppc_start_thread(struct kvm_vcpu *vcpu) |
573 | { | 735 | { |
574 | int cpu; | 736 | int cpu; |
@@ -588,8 +750,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu) | |||
588 | smp_wmb(); | 750 | smp_wmb(); |
589 | #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) | 751 | #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) |
590 | if (vcpu->arch.ptid) { | 752 | if (vcpu->arch.ptid) { |
591 | tpaca->cpu_start = 0x80; | 753 | kvmppc_grab_hwthread(cpu); |
592 | wmb(); | ||
593 | xics_wake_cpu(cpu); | 754 | xics_wake_cpu(cpu); |
594 | ++vc->n_woken; | 755 | ++vc->n_woken; |
595 | } | 756 | } |
@@ -639,7 +800,7 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc) | |||
639 | struct kvm_vcpu *vcpu, *vcpu0, *vnext; | 800 | struct kvm_vcpu *vcpu, *vcpu0, *vnext; |
640 | long ret; | 801 | long ret; |
641 | u64 now; | 802 | u64 now; |
642 | int ptid; | 803 | int ptid, i; |
643 | 804 | ||
644 | /* don't start if any threads have a signal pending */ | 805 | /* don't start if any threads have a signal pending */ |
645 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) | 806 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) |
@@ -681,17 +842,29 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc) | |||
681 | vc->nap_count = 0; | 842 | vc->nap_count = 0; |
682 | vc->entry_exit_count = 0; | 843 | vc->entry_exit_count = 0; |
683 | vc->vcore_state = VCORE_RUNNING; | 844 | vc->vcore_state = VCORE_RUNNING; |
845 | vc->stolen_tb += mftb() - vc->preempt_tb; | ||
684 | vc->in_guest = 0; | 846 | vc->in_guest = 0; |
685 | vc->pcpu = smp_processor_id(); | 847 | vc->pcpu = smp_processor_id(); |
686 | vc->napping_threads = 0; | 848 | vc->napping_threads = 0; |
687 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) | 849 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { |
688 | kvmppc_start_thread(vcpu); | 850 | kvmppc_start_thread(vcpu); |
851 | if (vcpu->arch.vpa.update_pending || | ||
852 | vcpu->arch.slb_shadow.update_pending || | ||
853 | vcpu->arch.dtl.update_pending) | ||
854 | kvmppc_update_vpas(vcpu); | ||
855 | kvmppc_create_dtl_entry(vcpu, vc); | ||
856 | } | ||
857 | /* Grab any remaining hw threads so they can't go into the kernel */ | ||
858 | for (i = ptid; i < threads_per_core; ++i) | ||
859 | kvmppc_grab_hwthread(vc->pcpu + i); | ||
689 | 860 | ||
690 | preempt_disable(); | 861 | preempt_disable(); |
691 | spin_unlock(&vc->lock); | 862 | spin_unlock(&vc->lock); |
692 | 863 | ||
693 | kvm_guest_enter(); | 864 | kvm_guest_enter(); |
694 | __kvmppc_vcore_entry(NULL, vcpu0); | 865 | __kvmppc_vcore_entry(NULL, vcpu0); |
866 | for (i = 0; i < threads_per_core; ++i) | ||
867 | kvmppc_release_hwthread(vc->pcpu + i); | ||
695 | 868 | ||
696 | spin_lock(&vc->lock); | 869 | spin_lock(&vc->lock); |
697 | /* disable sending of IPIs on virtual external irqs */ | 870 | /* disable sending of IPIs on virtual external irqs */ |
@@ -737,6 +910,7 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc) | |||
737 | spin_lock(&vc->lock); | 910 | spin_lock(&vc->lock); |
738 | out: | 911 | out: |
739 | vc->vcore_state = VCORE_INACTIVE; | 912 | vc->vcore_state = VCORE_INACTIVE; |
913 | vc->preempt_tb = mftb(); | ||
740 | list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, | 914 | list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, |
741 | arch.run_list) { | 915 | arch.run_list) { |
742 | if (vcpu->arch.ret != RESUME_GUEST) { | 916 | if (vcpu->arch.ret != RESUME_GUEST) { |
@@ -835,6 +1009,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
835 | spin_lock(&vc->lock); | 1009 | spin_lock(&vc->lock); |
836 | continue; | 1010 | continue; |
837 | } | 1011 | } |
1012 | vc->runner = vcpu; | ||
838 | n_ceded = 0; | 1013 | n_ceded = 0; |
839 | list_for_each_entry(v, &vc->runnable_threads, arch.run_list) | 1014 | list_for_each_entry(v, &vc->runnable_threads, arch.run_list) |
840 | n_ceded += v->arch.ceded; | 1015 | n_ceded += v->arch.ceded; |
@@ -854,6 +1029,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
854 | wake_up(&v->arch.cpu_run); | 1029 | wake_up(&v->arch.cpu_run); |
855 | } | 1030 | } |
856 | } | 1031 | } |
1032 | vc->runner = NULL; | ||
857 | } | 1033 | } |
858 | 1034 | ||
859 | if (signal_pending(current)) { | 1035 | if (signal_pending(current)) { |
@@ -917,115 +1093,6 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
917 | return r; | 1093 | return r; |
918 | } | 1094 | } |
919 | 1095 | ||
920 | static long kvmppc_stt_npages(unsigned long window_size) | ||
921 | { | ||
922 | return ALIGN((window_size >> SPAPR_TCE_SHIFT) | ||
923 | * sizeof(u64), PAGE_SIZE) / PAGE_SIZE; | ||
924 | } | ||
925 | |||
926 | static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt) | ||
927 | { | ||
928 | struct kvm *kvm = stt->kvm; | ||
929 | int i; | ||
930 | |||
931 | mutex_lock(&kvm->lock); | ||
932 | list_del(&stt->list); | ||
933 | for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++) | ||
934 | __free_page(stt->pages[i]); | ||
935 | kfree(stt); | ||
936 | mutex_unlock(&kvm->lock); | ||
937 | |||
938 | kvm_put_kvm(kvm); | ||
939 | } | ||
940 | |||
941 | static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
942 | { | ||
943 | struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data; | ||
944 | struct page *page; | ||
945 | |||
946 | if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size)) | ||
947 | return VM_FAULT_SIGBUS; | ||
948 | |||
949 | page = stt->pages[vmf->pgoff]; | ||
950 | get_page(page); | ||
951 | vmf->page = page; | ||
952 | return 0; | ||
953 | } | ||
954 | |||
955 | static const struct vm_operations_struct kvm_spapr_tce_vm_ops = { | ||
956 | .fault = kvm_spapr_tce_fault, | ||
957 | }; | ||
958 | |||
959 | static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma) | ||
960 | { | ||
961 | vma->vm_ops = &kvm_spapr_tce_vm_ops; | ||
962 | return 0; | ||
963 | } | ||
964 | |||
965 | static int kvm_spapr_tce_release(struct inode *inode, struct file *filp) | ||
966 | { | ||
967 | struct kvmppc_spapr_tce_table *stt = filp->private_data; | ||
968 | |||
969 | release_spapr_tce_table(stt); | ||
970 | return 0; | ||
971 | } | ||
972 | |||
973 | static struct file_operations kvm_spapr_tce_fops = { | ||
974 | .mmap = kvm_spapr_tce_mmap, | ||
975 | .release = kvm_spapr_tce_release, | ||
976 | }; | ||
977 | |||
978 | long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, | ||
979 | struct kvm_create_spapr_tce *args) | ||
980 | { | ||
981 | struct kvmppc_spapr_tce_table *stt = NULL; | ||
982 | long npages; | ||
983 | int ret = -ENOMEM; | ||
984 | int i; | ||
985 | |||
986 | /* Check this LIOBN hasn't been previously allocated */ | ||
987 | list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) { | ||
988 | if (stt->liobn == args->liobn) | ||
989 | return -EBUSY; | ||
990 | } | ||
991 | |||
992 | npages = kvmppc_stt_npages(args->window_size); | ||
993 | |||
994 | stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *), | ||
995 | GFP_KERNEL); | ||
996 | if (!stt) | ||
997 | goto fail; | ||
998 | |||
999 | stt->liobn = args->liobn; | ||
1000 | stt->window_size = args->window_size; | ||
1001 | stt->kvm = kvm; | ||
1002 | |||
1003 | for (i = 0; i < npages; i++) { | ||
1004 | stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
1005 | if (!stt->pages[i]) | ||
1006 | goto fail; | ||
1007 | } | ||
1008 | |||
1009 | kvm_get_kvm(kvm); | ||
1010 | |||
1011 | mutex_lock(&kvm->lock); | ||
1012 | list_add(&stt->list, &kvm->arch.spapr_tce_tables); | ||
1013 | |||
1014 | mutex_unlock(&kvm->lock); | ||
1015 | |||
1016 | return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, | ||
1017 | stt, O_RDWR); | ||
1018 | |||
1019 | fail: | ||
1020 | if (stt) { | ||
1021 | for (i = 0; i < npages; i++) | ||
1022 | if (stt->pages[i]) | ||
1023 | __free_page(stt->pages[i]); | ||
1024 | |||
1025 | kfree(stt); | ||
1026 | } | ||
1027 | return ret; | ||
1028 | } | ||
1029 | 1096 | ||
1030 | /* Work out RMLS (real mode limit selector) field value for a given RMA size. | 1097 | /* Work out RMLS (real mode limit selector) field value for a given RMA size. |
1031 | Assumes POWER7 or PPC970. */ | 1098 | Assumes POWER7 or PPC970. */ |
@@ -1108,6 +1175,38 @@ long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret) | |||
1108 | return fd; | 1175 | return fd; |
1109 | } | 1176 | } |
1110 | 1177 | ||
1178 | static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps, | ||
1179 | int linux_psize) | ||
1180 | { | ||
1181 | struct mmu_psize_def *def = &mmu_psize_defs[linux_psize]; | ||
1182 | |||
1183 | if (!def->shift) | ||
1184 | return; | ||
1185 | (*sps)->page_shift = def->shift; | ||
1186 | (*sps)->slb_enc = def->sllp; | ||
1187 | (*sps)->enc[0].page_shift = def->shift; | ||
1188 | (*sps)->enc[0].pte_enc = def->penc; | ||
1189 | (*sps)++; | ||
1190 | } | ||
1191 | |||
1192 | int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) | ||
1193 | { | ||
1194 | struct kvm_ppc_one_seg_page_size *sps; | ||
1195 | |||
1196 | info->flags = KVM_PPC_PAGE_SIZES_REAL; | ||
1197 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) | ||
1198 | info->flags |= KVM_PPC_1T_SEGMENTS; | ||
1199 | info->slb_size = mmu_slb_size; | ||
1200 | |||
1201 | /* We only support these sizes for now, and no muti-size segments */ | ||
1202 | sps = &info->sps[0]; | ||
1203 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K); | ||
1204 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K); | ||
1205 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M); | ||
1206 | |||
1207 | return 0; | ||
1208 | } | ||
1209 | |||
1111 | /* | 1210 | /* |
1112 | * Get (and clear) the dirty memory log for a memory slot. | 1211 | * Get (and clear) the dirty memory log for a memory slot. |
1113 | */ | 1212 | */ |
@@ -1192,8 +1291,6 @@ static void unpin_slot(struct kvm *kvm, int slot_id) | |||
1192 | continue; | 1291 | continue; |
1193 | pfn = physp[j] >> PAGE_SHIFT; | 1292 | pfn = physp[j] >> PAGE_SHIFT; |
1194 | page = pfn_to_page(pfn); | 1293 | page = pfn_to_page(pfn); |
1195 | if (PageHuge(page)) | ||
1196 | page = compound_head(page); | ||
1197 | SetPageDirty(page); | 1294 | SetPageDirty(page); |
1198 | put_page(page); | 1295 | put_page(page); |
1199 | } | 1296 | } |
@@ -1406,12 +1503,12 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
1406 | return EMULATE_FAIL; | 1503 | return EMULATE_FAIL; |
1407 | } | 1504 | } |
1408 | 1505 | ||
1409 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 1506 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) |
1410 | { | 1507 | { |
1411 | return EMULATE_FAIL; | 1508 | return EMULATE_FAIL; |
1412 | } | 1509 | } |
1413 | 1510 | ||
1414 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | 1511 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
1415 | { | 1512 | { |
1416 | return EMULATE_FAIL; | 1513 | return EMULATE_FAIL; |
1417 | } | 1514 | } |
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index d3fb4df02c41..84035a528c80 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S | |||
@@ -68,19 +68,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
68 | rotldi r10,r10,16 | 68 | rotldi r10,r10,16 |
69 | mtmsrd r10,1 | 69 | mtmsrd r10,1 |
70 | 70 | ||
71 | /* Save host PMU registers and load guest PMU registers */ | 71 | /* Save host PMU registers */ |
72 | /* R4 is live here (vcpu pointer) but not r3 or r5 */ | 72 | /* R4 is live here (vcpu pointer) but not r3 or r5 */ |
73 | li r3, 1 | 73 | li r3, 1 |
74 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | 74 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ |
75 | mfspr r7, SPRN_MMCR0 /* save MMCR0 */ | 75 | mfspr r7, SPRN_MMCR0 /* save MMCR0 */ |
76 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */ | 76 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */ |
77 | mfspr r6, SPRN_MMCRA | ||
78 | BEGIN_FTR_SECTION | ||
79 | /* On P7, clear MMCRA in order to disable SDAR updates */ | ||
80 | li r5, 0 | ||
81 | mtspr SPRN_MMCRA, r5 | ||
82 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | ||
77 | isync | 83 | isync |
78 | ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ | 84 | ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ |
79 | lbz r5, LPPACA_PMCINUSE(r3) | 85 | lbz r5, LPPACA_PMCINUSE(r3) |
80 | cmpwi r5, 0 | 86 | cmpwi r5, 0 |
81 | beq 31f /* skip if not */ | 87 | beq 31f /* skip if not */ |
82 | mfspr r5, SPRN_MMCR1 | 88 | mfspr r5, SPRN_MMCR1 |
83 | mfspr r6, SPRN_MMCRA | ||
84 | std r7, HSTATE_MMCR(r13) | 89 | std r7, HSTATE_MMCR(r13) |
85 | std r5, HSTATE_MMCR + 8(r13) | 90 | std r5, HSTATE_MMCR + 8(r13) |
86 | std r6, HSTATE_MMCR + 16(r13) | 91 | std r6, HSTATE_MMCR + 16(r13) |
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index def880aea63a..cec4daddbf31 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c | |||
@@ -463,6 +463,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) | |||
463 | /* insert R and C bits from PTE */ | 463 | /* insert R and C bits from PTE */ |
464 | rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); | 464 | rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); |
465 | args[j] |= rcbits << (56 - 5); | 465 | args[j] |= rcbits << (56 - 5); |
466 | hp[0] = 0; | ||
466 | continue; | 467 | continue; |
467 | } | 468 | } |
468 | 469 | ||
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index b70bf22a3ff3..a84aafce2a12 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <asm/hvcall.h> | 26 | #include <asm/hvcall.h> |
27 | #include <asm/asm-offsets.h> | 27 | #include <asm/asm-offsets.h> |
28 | #include <asm/exception-64s.h> | 28 | #include <asm/exception-64s.h> |
29 | #include <asm/kvm_book3s_asm.h> | ||
29 | 30 | ||
30 | /***************************************************************************** | 31 | /***************************************************************************** |
31 | * * | 32 | * * |
@@ -82,6 +83,7 @@ _GLOBAL(kvmppc_hv_entry_trampoline) | |||
82 | 83 | ||
83 | #define XICS_XIRR 4 | 84 | #define XICS_XIRR 4 |
84 | #define XICS_QIRR 0xc | 85 | #define XICS_QIRR 0xc |
86 | #define XICS_IPI 2 /* interrupt source # for IPIs */ | ||
85 | 87 | ||
86 | /* | 88 | /* |
87 | * We come in here when wakened from nap mode on a secondary hw thread. | 89 | * We come in here when wakened from nap mode on a secondary hw thread. |
@@ -94,26 +96,54 @@ kvm_start_guest: | |||
94 | subi r1,r1,STACK_FRAME_OVERHEAD | 96 | subi r1,r1,STACK_FRAME_OVERHEAD |
95 | ld r2,PACATOC(r13) | 97 | ld r2,PACATOC(r13) |
96 | 98 | ||
97 | /* were we napping due to cede? */ | 99 | li r0,KVM_HWTHREAD_IN_KVM |
98 | lbz r0,HSTATE_NAPPING(r13) | 100 | stb r0,HSTATE_HWTHREAD_STATE(r13) |
99 | cmpwi r0,0 | ||
100 | bne kvm_end_cede | ||
101 | 101 | ||
102 | /* get vcpu pointer */ | 102 | /* NV GPR values from power7_idle() will no longer be valid */ |
103 | ld r4, HSTATE_KVM_VCPU(r13) | 103 | li r0,1 |
104 | stb r0,PACA_NAPSTATELOST(r13) | ||
104 | 105 | ||
105 | /* We got here with an IPI; clear it */ | 106 | /* get vcpu pointer, NULL if we have no vcpu to run */ |
106 | ld r5, HSTATE_XICS_PHYS(r13) | 107 | ld r4,HSTATE_KVM_VCPU(r13) |
107 | li r0, 0xff | 108 | cmpdi cr1,r4,0 |
108 | li r6, XICS_QIRR | 109 | |
109 | li r7, XICS_XIRR | 110 | /* Check the wake reason in SRR1 to see why we got here */ |
110 | lwzcix r8, r5, r7 /* ack the interrupt */ | 111 | mfspr r3,SPRN_SRR1 |
112 | rlwinm r3,r3,44-31,0x7 /* extract wake reason field */ | ||
113 | cmpwi r3,4 /* was it an external interrupt? */ | ||
114 | bne 27f | ||
115 | |||
116 | /* | ||
117 | * External interrupt - for now assume it is an IPI, since we | ||
118 | * should never get any other interrupts sent to offline threads. | ||
119 | * Only do this for secondary threads. | ||
120 | */ | ||
121 | beq cr1,25f | ||
122 | lwz r3,VCPU_PTID(r4) | ||
123 | cmpwi r3,0 | ||
124 | beq 27f | ||
125 | 25: ld r5,HSTATE_XICS_PHYS(r13) | ||
126 | li r0,0xff | ||
127 | li r6,XICS_QIRR | ||
128 | li r7,XICS_XIRR | ||
129 | lwzcix r8,r5,r7 /* get and ack the interrupt */ | ||
111 | sync | 130 | sync |
112 | stbcix r0, r5, r6 /* clear it */ | 131 | clrldi. r9,r8,40 /* get interrupt source ID. */ |
113 | stwcix r8, r5, r7 /* EOI it */ | 132 | beq 27f /* none there? */ |
133 | cmpwi r9,XICS_IPI | ||
134 | bne 26f | ||
135 | stbcix r0,r5,r6 /* clear IPI */ | ||
136 | 26: stwcix r8,r5,r7 /* EOI the interrupt */ | ||
114 | 137 | ||
115 | /* NV GPR values from power7_idle() will no longer be valid */ | 138 | 27: /* XXX should handle hypervisor maintenance interrupts etc. here */ |
116 | stb r0, PACA_NAPSTATELOST(r13) | 139 | |
140 | /* if we have no vcpu to run, go back to sleep */ | ||
141 | beq cr1,kvm_no_guest | ||
142 | |||
143 | /* were we napping due to cede? */ | ||
144 | lbz r0,HSTATE_NAPPING(r13) | ||
145 | cmpwi r0,0 | ||
146 | bne kvm_end_cede | ||
117 | 147 | ||
118 | .global kvmppc_hv_entry | 148 | .global kvmppc_hv_entry |
119 | kvmppc_hv_entry: | 149 | kvmppc_hv_entry: |
@@ -129,24 +159,15 @@ kvmppc_hv_entry: | |||
129 | mflr r0 | 159 | mflr r0 |
130 | std r0, HSTATE_VMHANDLER(r13) | 160 | std r0, HSTATE_VMHANDLER(r13) |
131 | 161 | ||
132 | ld r14, VCPU_GPR(r14)(r4) | 162 | /* Set partition DABR */ |
133 | ld r15, VCPU_GPR(r15)(r4) | 163 | /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ |
134 | ld r16, VCPU_GPR(r16)(r4) | 164 | li r5,3 |
135 | ld r17, VCPU_GPR(r17)(r4) | 165 | ld r6,VCPU_DABR(r4) |
136 | ld r18, VCPU_GPR(r18)(r4) | 166 | mtspr SPRN_DABRX,r5 |
137 | ld r19, VCPU_GPR(r19)(r4) | 167 | mtspr SPRN_DABR,r6 |
138 | ld r20, VCPU_GPR(r20)(r4) | 168 | BEGIN_FTR_SECTION |
139 | ld r21, VCPU_GPR(r21)(r4) | 169 | isync |
140 | ld r22, VCPU_GPR(r22)(r4) | 170 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
141 | ld r23, VCPU_GPR(r23)(r4) | ||
142 | ld r24, VCPU_GPR(r24)(r4) | ||
143 | ld r25, VCPU_GPR(r25)(r4) | ||
144 | ld r26, VCPU_GPR(r26)(r4) | ||
145 | ld r27, VCPU_GPR(r27)(r4) | ||
146 | ld r28, VCPU_GPR(r28)(r4) | ||
147 | ld r29, VCPU_GPR(r29)(r4) | ||
148 | ld r30, VCPU_GPR(r30)(r4) | ||
149 | ld r31, VCPU_GPR(r31)(r4) | ||
150 | 171 | ||
151 | /* Load guest PMU registers */ | 172 | /* Load guest PMU registers */ |
152 | /* R4 is live here (vcpu pointer) */ | 173 | /* R4 is live here (vcpu pointer) */ |
@@ -185,6 +206,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
185 | /* Load up FP, VMX and VSX registers */ | 206 | /* Load up FP, VMX and VSX registers */ |
186 | bl kvmppc_load_fp | 207 | bl kvmppc_load_fp |
187 | 208 | ||
209 | ld r14, VCPU_GPR(r14)(r4) | ||
210 | ld r15, VCPU_GPR(r15)(r4) | ||
211 | ld r16, VCPU_GPR(r16)(r4) | ||
212 | ld r17, VCPU_GPR(r17)(r4) | ||
213 | ld r18, VCPU_GPR(r18)(r4) | ||
214 | ld r19, VCPU_GPR(r19)(r4) | ||
215 | ld r20, VCPU_GPR(r20)(r4) | ||
216 | ld r21, VCPU_GPR(r21)(r4) | ||
217 | ld r22, VCPU_GPR(r22)(r4) | ||
218 | ld r23, VCPU_GPR(r23)(r4) | ||
219 | ld r24, VCPU_GPR(r24)(r4) | ||
220 | ld r25, VCPU_GPR(r25)(r4) | ||
221 | ld r26, VCPU_GPR(r26)(r4) | ||
222 | ld r27, VCPU_GPR(r27)(r4) | ||
223 | ld r28, VCPU_GPR(r28)(r4) | ||
224 | ld r29, VCPU_GPR(r29)(r4) | ||
225 | ld r30, VCPU_GPR(r30)(r4) | ||
226 | ld r31, VCPU_GPR(r31)(r4) | ||
227 | |||
188 | BEGIN_FTR_SECTION | 228 | BEGIN_FTR_SECTION |
189 | /* Switch DSCR to guest value */ | 229 | /* Switch DSCR to guest value */ |
190 | ld r5, VCPU_DSCR(r4) | 230 | ld r5, VCPU_DSCR(r4) |
@@ -226,12 +266,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
226 | mtspr SPRN_DAR, r5 | 266 | mtspr SPRN_DAR, r5 |
227 | mtspr SPRN_DSISR, r6 | 267 | mtspr SPRN_DSISR, r6 |
228 | 268 | ||
229 | /* Set partition DABR */ | ||
230 | li r5,3 | ||
231 | ld r6,VCPU_DABR(r4) | ||
232 | mtspr SPRN_DABRX,r5 | ||
233 | mtspr SPRN_DABR,r6 | ||
234 | |||
235 | BEGIN_FTR_SECTION | 269 | BEGIN_FTR_SECTION |
236 | /* Restore AMR and UAMOR, set AMOR to all 1s */ | 270 | /* Restore AMR and UAMOR, set AMOR to all 1s */ |
237 | ld r5,VCPU_AMR(r4) | 271 | ld r5,VCPU_AMR(r4) |
@@ -925,12 +959,6 @@ BEGIN_FTR_SECTION | |||
925 | mtspr SPRN_AMR,r6 | 959 | mtspr SPRN_AMR,r6 |
926 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | 960 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
927 | 961 | ||
928 | /* Restore host DABR and DABRX */ | ||
929 | ld r5,HSTATE_DABR(r13) | ||
930 | li r6,7 | ||
931 | mtspr SPRN_DABR,r5 | ||
932 | mtspr SPRN_DABRX,r6 | ||
933 | |||
934 | /* Switch DSCR back to host value */ | 962 | /* Switch DSCR back to host value */ |
935 | BEGIN_FTR_SECTION | 963 | BEGIN_FTR_SECTION |
936 | mfspr r8, SPRN_DSCR | 964 | mfspr r8, SPRN_DSCR |
@@ -969,6 +997,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
969 | std r5, VCPU_SPRG2(r9) | 997 | std r5, VCPU_SPRG2(r9) |
970 | std r6, VCPU_SPRG3(r9) | 998 | std r6, VCPU_SPRG3(r9) |
971 | 999 | ||
1000 | /* save FP state */ | ||
1001 | mr r3, r9 | ||
1002 | bl .kvmppc_save_fp | ||
1003 | |||
972 | /* Increment yield count if they have a VPA */ | 1004 | /* Increment yield count if they have a VPA */ |
973 | ld r8, VCPU_VPA(r9) /* do they have a VPA? */ | 1005 | ld r8, VCPU_VPA(r9) /* do they have a VPA? */ |
974 | cmpdi r8, 0 | 1006 | cmpdi r8, 0 |
@@ -983,6 +1015,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
983 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | 1015 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ |
984 | mfspr r4, SPRN_MMCR0 /* save MMCR0 */ | 1016 | mfspr r4, SPRN_MMCR0 /* save MMCR0 */ |
985 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ | 1017 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ |
1018 | mfspr r6, SPRN_MMCRA | ||
1019 | BEGIN_FTR_SECTION | ||
1020 | /* On P7, clear MMCRA in order to disable SDAR updates */ | ||
1021 | li r7, 0 | ||
1022 | mtspr SPRN_MMCRA, r7 | ||
1023 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | ||
986 | isync | 1024 | isync |
987 | beq 21f /* if no VPA, save PMU stuff anyway */ | 1025 | beq 21f /* if no VPA, save PMU stuff anyway */ |
988 | lbz r7, LPPACA_PMCINUSE(r8) | 1026 | lbz r7, LPPACA_PMCINUSE(r8) |
@@ -991,7 +1029,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
991 | std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ | 1029 | std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ |
992 | b 22f | 1030 | b 22f |
993 | 21: mfspr r5, SPRN_MMCR1 | 1031 | 21: mfspr r5, SPRN_MMCR1 |
994 | mfspr r6, SPRN_MMCRA | ||
995 | std r4, VCPU_MMCR(r9) | 1032 | std r4, VCPU_MMCR(r9) |
996 | std r5, VCPU_MMCR + 8(r9) | 1033 | std r5, VCPU_MMCR + 8(r9) |
997 | std r6, VCPU_MMCR + 16(r9) | 1034 | std r6, VCPU_MMCR + 16(r9) |
@@ -1016,17 +1053,20 @@ BEGIN_FTR_SECTION | |||
1016 | stw r11, VCPU_PMC + 28(r9) | 1053 | stw r11, VCPU_PMC + 28(r9) |
1017 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | 1054 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) |
1018 | 22: | 1055 | 22: |
1019 | /* save FP state */ | ||
1020 | mr r3, r9 | ||
1021 | bl .kvmppc_save_fp | ||
1022 | 1056 | ||
1023 | /* Secondary threads go off to take a nap on POWER7 */ | 1057 | /* Secondary threads go off to take a nap on POWER7 */ |
1024 | BEGIN_FTR_SECTION | 1058 | BEGIN_FTR_SECTION |
1025 | lwz r0,VCPU_PTID(r3) | 1059 | lwz r0,VCPU_PTID(r9) |
1026 | cmpwi r0,0 | 1060 | cmpwi r0,0 |
1027 | bne secondary_nap | 1061 | bne secondary_nap |
1028 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | 1062 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) |
1029 | 1063 | ||
1064 | /* Restore host DABR and DABRX */ | ||
1065 | ld r5,HSTATE_DABR(r13) | ||
1066 | li r6,7 | ||
1067 | mtspr SPRN_DABR,r5 | ||
1068 | mtspr SPRN_DABRX,r6 | ||
1069 | |||
1030 | /* | 1070 | /* |
1031 | * Reload DEC. HDEC interrupts were disabled when | 1071 | * Reload DEC. HDEC interrupts were disabled when |
1032 | * we reloaded the host's LPCR value. | 1072 | * we reloaded the host's LPCR value. |
@@ -1363,7 +1403,12 @@ bounce_ext_interrupt: | |||
1363 | 1403 | ||
1364 | _GLOBAL(kvmppc_h_set_dabr) | 1404 | _GLOBAL(kvmppc_h_set_dabr) |
1365 | std r4,VCPU_DABR(r3) | 1405 | std r4,VCPU_DABR(r3) |
1366 | mtspr SPRN_DABR,r4 | 1406 | /* Work around P7 bug where DABR can get corrupted on mtspr */ |
1407 | 1: mtspr SPRN_DABR,r4 | ||
1408 | mfspr r5, SPRN_DABR | ||
1409 | cmpd r4, r5 | ||
1410 | bne 1b | ||
1411 | isync | ||
1367 | li r3,0 | 1412 | li r3,0 |
1368 | blr | 1413 | blr |
1369 | 1414 | ||
@@ -1445,8 +1490,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
1445 | * Take a nap until a decrementer or external interrupt occurs, | 1490 | * Take a nap until a decrementer or external interrupt occurs, |
1446 | * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR | 1491 | * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR |
1447 | */ | 1492 | */ |
1448 | li r0,0x80 | 1493 | li r0,1 |
1449 | stb r0,PACAPROCSTART(r13) | 1494 | stb r0,HSTATE_HWTHREAD_REQ(r13) |
1450 | mfspr r5,SPRN_LPCR | 1495 | mfspr r5,SPRN_LPCR |
1451 | ori r5,r5,LPCR_PECE0 | LPCR_PECE1 | 1496 | ori r5,r5,LPCR_PECE0 | LPCR_PECE1 |
1452 | mtspr SPRN_LPCR,r5 | 1497 | mtspr SPRN_LPCR,r5 |
@@ -1463,26 +1508,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
1463 | kvm_end_cede: | 1508 | kvm_end_cede: |
1464 | /* Woken by external or decrementer interrupt */ | 1509 | /* Woken by external or decrementer interrupt */ |
1465 | ld r1, HSTATE_HOST_R1(r13) | 1510 | ld r1, HSTATE_HOST_R1(r13) |
1466 | ld r2, PACATOC(r13) | ||
1467 | 1511 | ||
1468 | /* If we're a secondary thread and we got here by an IPI, ack it */ | ||
1469 | ld r4,HSTATE_KVM_VCPU(r13) | ||
1470 | lwz r3,VCPU_PTID(r4) | ||
1471 | cmpwi r3,0 | ||
1472 | beq 27f | ||
1473 | mfspr r3,SPRN_SRR1 | ||
1474 | rlwinm r3,r3,44-31,0x7 /* extract wake reason field */ | ||
1475 | cmpwi r3,4 /* was it an external interrupt? */ | ||
1476 | bne 27f | ||
1477 | ld r5, HSTATE_XICS_PHYS(r13) | ||
1478 | li r0,0xff | ||
1479 | li r6,XICS_QIRR | ||
1480 | li r7,XICS_XIRR | ||
1481 | lwzcix r8,r5,r7 /* ack the interrupt */ | ||
1482 | sync | ||
1483 | stbcix r0,r5,r6 /* clear it */ | ||
1484 | stwcix r8,r5,r7 /* EOI it */ | ||
1485 | 27: | ||
1486 | /* load up FP state */ | 1512 | /* load up FP state */ |
1487 | bl kvmppc_load_fp | 1513 | bl kvmppc_load_fp |
1488 | 1514 | ||
@@ -1580,12 +1606,17 @@ secondary_nap: | |||
1580 | stwcx. r3, 0, r4 | 1606 | stwcx. r3, 0, r4 |
1581 | bne 51b | 1607 | bne 51b |
1582 | 1608 | ||
1609 | kvm_no_guest: | ||
1610 | li r0, KVM_HWTHREAD_IN_NAP | ||
1611 | stb r0, HSTATE_HWTHREAD_STATE(r13) | ||
1612 | li r0, 0 | ||
1613 | std r0, HSTATE_KVM_VCPU(r13) | ||
1614 | |||
1583 | li r3, LPCR_PECE0 | 1615 | li r3, LPCR_PECE0 |
1584 | mfspr r4, SPRN_LPCR | 1616 | mfspr r4, SPRN_LPCR |
1585 | rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 | 1617 | rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 |
1586 | mtspr SPRN_LPCR, r4 | 1618 | mtspr SPRN_LPCR, r4 |
1587 | isync | 1619 | isync |
1588 | li r0, 0 | ||
1589 | std r0, HSTATE_SCRATCH0(r13) | 1620 | std r0, HSTATE_SCRATCH0(r13) |
1590 | ptesync | 1621 | ptesync |
1591 | ld r0, HSTATE_SCRATCH0(r13) | 1622 | ld r0, HSTATE_SCRATCH0(r13) |
@@ -1599,8 +1630,8 @@ secondary_nap: | |||
1599 | * r3 = vcpu pointer | 1630 | * r3 = vcpu pointer |
1600 | */ | 1631 | */ |
1601 | _GLOBAL(kvmppc_save_fp) | 1632 | _GLOBAL(kvmppc_save_fp) |
1602 | mfmsr r9 | 1633 | mfmsr r5 |
1603 | ori r8,r9,MSR_FP | 1634 | ori r8,r5,MSR_FP |
1604 | #ifdef CONFIG_ALTIVEC | 1635 | #ifdef CONFIG_ALTIVEC |
1605 | BEGIN_FTR_SECTION | 1636 | BEGIN_FTR_SECTION |
1606 | oris r8,r8,MSR_VEC@h | 1637 | oris r8,r8,MSR_VEC@h |
@@ -1649,7 +1680,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
1649 | #endif | 1680 | #endif |
1650 | mfspr r6,SPRN_VRSAVE | 1681 | mfspr r6,SPRN_VRSAVE |
1651 | stw r6,VCPU_VRSAVE(r3) | 1682 | stw r6,VCPU_VRSAVE(r3) |
1652 | mtmsrd r9 | 1683 | mtmsrd r5 |
1653 | isync | 1684 | isync |
1654 | blr | 1685 | blr |
1655 | 1686 | ||
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 7759053d391b..a1baec340f7e 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -120,6 +120,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |||
120 | if (msr & MSR_POW) { | 120 | if (msr & MSR_POW) { |
121 | if (!vcpu->arch.pending_exceptions) { | 121 | if (!vcpu->arch.pending_exceptions) { |
122 | kvm_vcpu_block(vcpu); | 122 | kvm_vcpu_block(vcpu); |
123 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | ||
123 | vcpu->stat.halt_wakeup++; | 124 | vcpu->stat.halt_wakeup++; |
124 | 125 | ||
125 | /* Unset POW bit after we woke up */ | 126 | /* Unset POW bit after we woke up */ |
@@ -144,6 +145,21 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |||
144 | } | 145 | } |
145 | } | 146 | } |
146 | 147 | ||
148 | /* | ||
149 | * When switching from 32 to 64-bit, we may have a stale 32-bit | ||
150 | * magic page around, we need to flush it. Typically 32-bit magic | ||
151 | * page will be instanciated when calling into RTAS. Note: We | ||
152 | * assume that such transition only happens while in kernel mode, | ||
153 | * ie, we never transition from user 32-bit to kernel 64-bit with | ||
154 | * a 32-bit magic page around. | ||
155 | */ | ||
156 | if (vcpu->arch.magic_page_pa && | ||
157 | !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) { | ||
158 | /* going from RTAS to normal kernel code */ | ||
159 | kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa, | ||
160 | ~0xFFFUL); | ||
161 | } | ||
162 | |||
147 | /* Preload FPU if it's enabled */ | 163 | /* Preload FPU if it's enabled */ |
148 | if (vcpu->arch.shared->msr & MSR_FP) | 164 | if (vcpu->arch.shared->msr & MSR_FP) |
149 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | 165 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
@@ -251,6 +267,9 @@ static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) | |||
251 | { | 267 | { |
252 | ulong mp_pa = vcpu->arch.magic_page_pa; | 268 | ulong mp_pa = vcpu->arch.magic_page_pa; |
253 | 269 | ||
270 | if (!(vcpu->arch.shared->msr & MSR_SF)) | ||
271 | mp_pa = (uint32_t)mp_pa; | ||
272 | |||
254 | if (unlikely(mp_pa) && | 273 | if (unlikely(mp_pa) && |
255 | unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { | 274 | unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { |
256 | return 1; | 275 | return 1; |
@@ -351,6 +370,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
351 | /* MMIO */ | 370 | /* MMIO */ |
352 | vcpu->stat.mmio_exits++; | 371 | vcpu->stat.mmio_exits++; |
353 | vcpu->arch.paddr_accessed = pte.raddr; | 372 | vcpu->arch.paddr_accessed = pte.raddr; |
373 | vcpu->arch.vaddr_accessed = pte.eaddr; | ||
354 | r = kvmppc_emulate_mmio(run, vcpu); | 374 | r = kvmppc_emulate_mmio(run, vcpu); |
355 | if ( r == RESUME_HOST_NV ) | 375 | if ( r == RESUME_HOST_NV ) |
356 | r = RESUME_HOST; | 376 | r = RESUME_HOST; |
@@ -528,6 +548,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
528 | run->exit_reason = KVM_EXIT_UNKNOWN; | 548 | run->exit_reason = KVM_EXIT_UNKNOWN; |
529 | run->ready_for_interrupt_injection = 1; | 549 | run->ready_for_interrupt_injection = 1; |
530 | 550 | ||
551 | /* We get here with MSR.EE=0, so enable it to be a nice citizen */ | ||
552 | __hard_irq_enable(); | ||
553 | |||
531 | trace_kvm_book3s_exit(exit_nr, vcpu); | 554 | trace_kvm_book3s_exit(exit_nr, vcpu); |
532 | preempt_enable(); | 555 | preempt_enable(); |
533 | kvm_resched(vcpu); | 556 | kvm_resched(vcpu); |
@@ -617,10 +640,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
617 | break; | 640 | break; |
618 | /* We're good on these - the host merely wanted to get our attention */ | 641 | /* We're good on these - the host merely wanted to get our attention */ |
619 | case BOOK3S_INTERRUPT_DECREMENTER: | 642 | case BOOK3S_INTERRUPT_DECREMENTER: |
643 | case BOOK3S_INTERRUPT_HV_DECREMENTER: | ||
620 | vcpu->stat.dec_exits++; | 644 | vcpu->stat.dec_exits++; |
621 | r = RESUME_GUEST; | 645 | r = RESUME_GUEST; |
622 | break; | 646 | break; |
623 | case BOOK3S_INTERRUPT_EXTERNAL: | 647 | case BOOK3S_INTERRUPT_EXTERNAL: |
648 | case BOOK3S_INTERRUPT_EXTERNAL_LEVEL: | ||
649 | case BOOK3S_INTERRUPT_EXTERNAL_HV: | ||
624 | vcpu->stat.ext_intr_exits++; | 650 | vcpu->stat.ext_intr_exits++; |
625 | r = RESUME_GUEST; | 651 | r = RESUME_GUEST; |
626 | break; | 652 | break; |
@@ -628,6 +654,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
628 | r = RESUME_GUEST; | 654 | r = RESUME_GUEST; |
629 | break; | 655 | break; |
630 | case BOOK3S_INTERRUPT_PROGRAM: | 656 | case BOOK3S_INTERRUPT_PROGRAM: |
657 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: | ||
631 | { | 658 | { |
632 | enum emulation_result er; | 659 | enum emulation_result er; |
633 | struct kvmppc_book3s_shadow_vcpu *svcpu; | 660 | struct kvmppc_book3s_shadow_vcpu *svcpu; |
@@ -1131,6 +1158,31 @@ out: | |||
1131 | return r; | 1158 | return r; |
1132 | } | 1159 | } |
1133 | 1160 | ||
1161 | #ifdef CONFIG_PPC64 | ||
1162 | int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) | ||
1163 | { | ||
1164 | /* No flags */ | ||
1165 | info->flags = 0; | ||
1166 | |||
1167 | /* SLB is always 64 entries */ | ||
1168 | info->slb_size = 64; | ||
1169 | |||
1170 | /* Standard 4k base page size segment */ | ||
1171 | info->sps[0].page_shift = 12; | ||
1172 | info->sps[0].slb_enc = 0; | ||
1173 | info->sps[0].enc[0].page_shift = 12; | ||
1174 | info->sps[0].enc[0].pte_enc = 0; | ||
1175 | |||
1176 | /* Standard 16M large page size segment */ | ||
1177 | info->sps[1].page_shift = 24; | ||
1178 | info->sps[1].slb_enc = SLB_VSID_L; | ||
1179 | info->sps[1].enc[0].page_shift = 24; | ||
1180 | info->sps[1].enc[0].pte_enc = 0; | ||
1181 | |||
1182 | return 0; | ||
1183 | } | ||
1184 | #endif /* CONFIG_PPC64 */ | ||
1185 | |||
1134 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, | 1186 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
1135 | struct kvm_userspace_memory_region *mem) | 1187 | struct kvm_userspace_memory_region *mem) |
1136 | { | 1188 | { |
@@ -1144,11 +1196,18 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, | |||
1144 | 1196 | ||
1145 | int kvmppc_core_init_vm(struct kvm *kvm) | 1197 | int kvmppc_core_init_vm(struct kvm *kvm) |
1146 | { | 1198 | { |
1199 | #ifdef CONFIG_PPC64 | ||
1200 | INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); | ||
1201 | #endif | ||
1202 | |||
1147 | return 0; | 1203 | return 0; |
1148 | } | 1204 | } |
1149 | 1205 | ||
1150 | void kvmppc_core_destroy_vm(struct kvm *kvm) | 1206 | void kvmppc_core_destroy_vm(struct kvm *kvm) |
1151 | { | 1207 | { |
1208 | #ifdef CONFIG_PPC64 | ||
1209 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); | ||
1210 | #endif | ||
1152 | } | 1211 | } |
1153 | 1212 | ||
1154 | static int kvmppc_book3s_init(void) | 1213 | static int kvmppc_book3s_init(void) |
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index b9589324797b..3ff9013d6e79 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c | |||
@@ -15,6 +15,8 @@ | |||
15 | * published by the Free Software Foundation. | 15 | * published by the Free Software Foundation. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/anon_inodes.h> | ||
19 | |||
18 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
19 | #include <asm/kvm_ppc.h> | 21 | #include <asm/kvm_ppc.h> |
20 | #include <asm/kvm_book3s.h> | 22 | #include <asm/kvm_book3s.h> |
@@ -98,6 +100,83 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) | |||
98 | return EMULATE_DONE; | 100 | return EMULATE_DONE; |
99 | } | 101 | } |
100 | 102 | ||
103 | /* Request defs for kvmppc_h_pr_bulk_remove() */ | ||
104 | #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL | ||
105 | #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL | ||
106 | #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL | ||
107 | #define H_BULK_REMOVE_END 0xc000000000000000ULL | ||
108 | #define H_BULK_REMOVE_CODE 0x3000000000000000ULL | ||
109 | #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL | ||
110 | #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL | ||
111 | #define H_BULK_REMOVE_PARM 0x2000000000000000ULL | ||
112 | #define H_BULK_REMOVE_HW 0x3000000000000000ULL | ||
113 | #define H_BULK_REMOVE_RC 0x0c00000000000000ULL | ||
114 | #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL | ||
115 | #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL | ||
116 | #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL | ||
117 | #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL | ||
118 | #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL | ||
119 | #define H_BULK_REMOVE_MAX_BATCH 4 | ||
120 | |||
121 | static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) | ||
122 | { | ||
123 | int i; | ||
124 | int paramnr = 4; | ||
125 | int ret = H_SUCCESS; | ||
126 | |||
127 | for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { | ||
128 | unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i)); | ||
129 | unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1); | ||
130 | unsigned long pteg, rb, flags; | ||
131 | unsigned long pte[2]; | ||
132 | unsigned long v = 0; | ||
133 | |||
134 | if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) { | ||
135 | break; /* Exit success */ | ||
136 | } else if ((tsh & H_BULK_REMOVE_TYPE) != | ||
137 | H_BULK_REMOVE_REQUEST) { | ||
138 | ret = H_PARAMETER; | ||
139 | break; /* Exit fail */ | ||
140 | } | ||
141 | |||
142 | tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS; | ||
143 | tsh |= H_BULK_REMOVE_RESPONSE; | ||
144 | |||
145 | if ((tsh & H_BULK_REMOVE_ANDCOND) && | ||
146 | (tsh & H_BULK_REMOVE_AVPN)) { | ||
147 | tsh |= H_BULK_REMOVE_PARM; | ||
148 | kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh); | ||
149 | ret = H_PARAMETER; | ||
150 | break; /* Exit fail */ | ||
151 | } | ||
152 | |||
153 | pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); | ||
154 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); | ||
155 | |||
156 | /* tsl = AVPN */ | ||
157 | flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26; | ||
158 | |||
159 | if ((pte[0] & HPTE_V_VALID) == 0 || | ||
160 | ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) || | ||
161 | ((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) { | ||
162 | tsh |= H_BULK_REMOVE_NOT_FOUND; | ||
163 | } else { | ||
164 | /* Splat the pteg in (userland) hpt */ | ||
165 | copy_to_user((void __user *)pteg, &v, sizeof(v)); | ||
166 | |||
167 | rb = compute_tlbie_rb(pte[0], pte[1], | ||
168 | tsh & H_BULK_REMOVE_PTEX); | ||
169 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); | ||
170 | tsh |= H_BULK_REMOVE_SUCCESS; | ||
171 | tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43; | ||
172 | } | ||
173 | kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh); | ||
174 | } | ||
175 | kvmppc_set_gpr(vcpu, 3, ret); | ||
176 | |||
177 | return EMULATE_DONE; | ||
178 | } | ||
179 | |||
101 | static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) | 180 | static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) |
102 | { | 181 | { |
103 | unsigned long flags = kvmppc_get_gpr(vcpu, 4); | 182 | unsigned long flags = kvmppc_get_gpr(vcpu, 4); |
@@ -134,6 +213,20 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) | |||
134 | return EMULATE_DONE; | 213 | return EMULATE_DONE; |
135 | } | 214 | } |
136 | 215 | ||
216 | static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) | ||
217 | { | ||
218 | unsigned long liobn = kvmppc_get_gpr(vcpu, 4); | ||
219 | unsigned long ioba = kvmppc_get_gpr(vcpu, 5); | ||
220 | unsigned long tce = kvmppc_get_gpr(vcpu, 6); | ||
221 | long rc; | ||
222 | |||
223 | rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce); | ||
224 | if (rc == H_TOO_HARD) | ||
225 | return EMULATE_FAIL; | ||
226 | kvmppc_set_gpr(vcpu, 3, rc); | ||
227 | return EMULATE_DONE; | ||
228 | } | ||
229 | |||
137 | int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) | 230 | int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) |
138 | { | 231 | { |
139 | switch (cmd) { | 232 | switch (cmd) { |
@@ -144,12 +237,12 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) | |||
144 | case H_PROTECT: | 237 | case H_PROTECT: |
145 | return kvmppc_h_pr_protect(vcpu); | 238 | return kvmppc_h_pr_protect(vcpu); |
146 | case H_BULK_REMOVE: | 239 | case H_BULK_REMOVE: |
147 | /* We just flush all PTEs, so user space can | 240 | return kvmppc_h_pr_bulk_remove(vcpu); |
148 | handle the HPT modifications */ | 241 | case H_PUT_TCE: |
149 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | 242 | return kvmppc_h_pr_put_tce(vcpu); |
150 | break; | ||
151 | case H_CEDE: | 243 | case H_CEDE: |
152 | kvm_vcpu_block(vcpu); | 244 | kvm_vcpu_block(vcpu); |
245 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | ||
153 | vcpu->stat.halt_wakeup++; | 246 | vcpu->stat.halt_wakeup++; |
154 | return EMULATE_DONE; | 247 | return EMULATE_DONE; |
155 | } | 248 | } |
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 0676ae249b9f..798491a268b3 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S | |||
@@ -128,24 +128,25 @@ no_dcbz32_on: | |||
128 | /* First clear RI in our current MSR value */ | 128 | /* First clear RI in our current MSR value */ |
129 | li r0, MSR_RI | 129 | li r0, MSR_RI |
130 | andc r6, r6, r0 | 130 | andc r6, r6, r0 |
131 | MTMSR_EERI(r6) | ||
132 | mtsrr0 r9 | ||
133 | mtsrr1 r4 | ||
134 | 131 | ||
135 | PPC_LL r0, SVCPU_R0(r3) | 132 | PPC_LL r0, SVCPU_R0(r3) |
136 | PPC_LL r1, SVCPU_R1(r3) | 133 | PPC_LL r1, SVCPU_R1(r3) |
137 | PPC_LL r2, SVCPU_R2(r3) | 134 | PPC_LL r2, SVCPU_R2(r3) |
138 | PPC_LL r4, SVCPU_R4(r3) | ||
139 | PPC_LL r5, SVCPU_R5(r3) | 135 | PPC_LL r5, SVCPU_R5(r3) |
140 | PPC_LL r6, SVCPU_R6(r3) | ||
141 | PPC_LL r7, SVCPU_R7(r3) | 136 | PPC_LL r7, SVCPU_R7(r3) |
142 | PPC_LL r8, SVCPU_R8(r3) | 137 | PPC_LL r8, SVCPU_R8(r3) |
143 | PPC_LL r9, SVCPU_R9(r3) | ||
144 | PPC_LL r10, SVCPU_R10(r3) | 138 | PPC_LL r10, SVCPU_R10(r3) |
145 | PPC_LL r11, SVCPU_R11(r3) | 139 | PPC_LL r11, SVCPU_R11(r3) |
146 | PPC_LL r12, SVCPU_R12(r3) | 140 | PPC_LL r12, SVCPU_R12(r3) |
147 | PPC_LL r13, SVCPU_R13(r3) | 141 | PPC_LL r13, SVCPU_R13(r3) |
148 | 142 | ||
143 | MTMSR_EERI(r6) | ||
144 | mtsrr0 r9 | ||
145 | mtsrr1 r4 | ||
146 | |||
147 | PPC_LL r4, SVCPU_R4(r3) | ||
148 | PPC_LL r6, SVCPU_R6(r3) | ||
149 | PPC_LL r9, SVCPU_R9(r3) | ||
149 | PPC_LL r3, (SVCPU_R3)(r3) | 150 | PPC_LL r3, (SVCPU_R3)(r3) |
150 | 151 | ||
151 | RFI | 152 | RFI |
@@ -197,7 +198,8 @@ kvmppc_interrupt: | |||
197 | /* Save guest PC and MSR */ | 198 | /* Save guest PC and MSR */ |
198 | #ifdef CONFIG_PPC64 | 199 | #ifdef CONFIG_PPC64 |
199 | BEGIN_FTR_SECTION | 200 | BEGIN_FTR_SECTION |
200 | andi. r0,r12,0x2 | 201 | andi. r0, r12, 0x2 |
202 | cmpwi cr1, r0, 0 | ||
201 | beq 1f | 203 | beq 1f |
202 | mfspr r3,SPRN_HSRR0 | 204 | mfspr r3,SPRN_HSRR0 |
203 | mfspr r4,SPRN_HSRR1 | 205 | mfspr r4,SPRN_HSRR1 |
@@ -250,6 +252,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |||
250 | beq ld_last_prev_inst | 252 | beq ld_last_prev_inst |
251 | cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT | 253 | cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT |
252 | beq- ld_last_inst | 254 | beq- ld_last_inst |
255 | #ifdef CONFIG_PPC64 | ||
256 | BEGIN_FTR_SECTION | ||
257 | cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST | ||
258 | beq- ld_last_inst | ||
259 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | ||
260 | #endif | ||
253 | 261 | ||
254 | b no_ld_last_inst | 262 | b no_ld_last_inst |
255 | 263 | ||
@@ -316,23 +324,17 @@ no_dcbz32_off: | |||
316 | * Having set up SRR0/1 with the address where we want | 324 | * Having set up SRR0/1 with the address where we want |
317 | * to continue with relocation on (potentially in module | 325 | * to continue with relocation on (potentially in module |
318 | * space), we either just go straight there with rfi[d], | 326 | * space), we either just go straight there with rfi[d], |
319 | * or we jump to an interrupt handler with bctr if there | 327 | * or we jump to an interrupt handler if there is an |
320 | * is an interrupt to be handled first. In the latter | 328 | * interrupt to be handled first. In the latter case, |
321 | * case, the rfi[d] at the end of the interrupt handler | 329 | * the rfi[d] at the end of the interrupt handler will |
322 | * will get us back to where we want to continue. | 330 | * get us back to where we want to continue. |
323 | */ | 331 | */ |
324 | 332 | ||
325 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL | ||
326 | beq 1f | ||
327 | cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER | ||
328 | beq 1f | ||
329 | cmpwi r12, BOOK3S_INTERRUPT_PERFMON | ||
330 | 1: mtctr r12 | ||
331 | |||
332 | /* Register usage at this point: | 333 | /* Register usage at this point: |
333 | * | 334 | * |
334 | * R1 = host R1 | 335 | * R1 = host R1 |
335 | * R2 = host R2 | 336 | * R2 = host R2 |
337 | * R10 = raw exit handler id | ||
336 | * R12 = exit handler id | 338 | * R12 = exit handler id |
337 | * R13 = shadow vcpu (32-bit) or PACA (64-bit) | 339 | * R13 = shadow vcpu (32-bit) or PACA (64-bit) |
338 | * SVCPU.* = guest * | 340 | * SVCPU.* = guest * |
@@ -342,12 +344,25 @@ no_dcbz32_off: | |||
342 | PPC_LL r6, HSTATE_HOST_MSR(r13) | 344 | PPC_LL r6, HSTATE_HOST_MSR(r13) |
343 | PPC_LL r8, HSTATE_VMHANDLER(r13) | 345 | PPC_LL r8, HSTATE_VMHANDLER(r13) |
344 | 346 | ||
345 | /* Restore host msr -> SRR1 */ | 347 | #ifdef CONFIG_PPC64 |
348 | BEGIN_FTR_SECTION | ||
349 | beq cr1, 1f | ||
350 | mtspr SPRN_HSRR1, r6 | ||
351 | mtspr SPRN_HSRR0, r8 | ||
352 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | ||
353 | #endif | ||
354 | 1: /* Restore host msr -> SRR1 */ | ||
346 | mtsrr1 r6 | 355 | mtsrr1 r6 |
347 | /* Load highmem handler address */ | 356 | /* Load highmem handler address */ |
348 | mtsrr0 r8 | 357 | mtsrr0 r8 |
349 | 358 | ||
350 | /* RFI into the highmem handler, or jump to interrupt handler */ | 359 | /* RFI into the highmem handler, or jump to interrupt handler */ |
351 | beqctr | 360 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL |
361 | beqa BOOK3S_INTERRUPT_EXTERNAL | ||
362 | cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER | ||
363 | beqa BOOK3S_INTERRUPT_DECREMENTER | ||
364 | cmpwi r12, BOOK3S_INTERRUPT_PERFMON | ||
365 | beqa BOOK3S_INTERRUPT_PERFMON | ||
366 | |||
352 | RFI | 367 | RFI |
353 | kvmppc_handler_trampoline_exit_end: | 368 | kvmppc_handler_trampoline_exit_end: |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index ee9e1ee9c858..72f13f4a06e0 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -17,6 +17,8 @@ | |||
17 | * | 17 | * |
18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | 18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> |
19 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | 19 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> |
20 | * Scott Wood <scottwood@freescale.com> | ||
21 | * Varun Sethi <varun.sethi@freescale.com> | ||
20 | */ | 22 | */ |
21 | 23 | ||
22 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
@@ -30,9 +32,12 @@ | |||
30 | #include <asm/cputable.h> | 32 | #include <asm/cputable.h> |
31 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
32 | #include <asm/kvm_ppc.h> | 34 | #include <asm/kvm_ppc.h> |
33 | #include "timing.h" | ||
34 | #include <asm/cacheflush.h> | 35 | #include <asm/cacheflush.h> |
36 | #include <asm/dbell.h> | ||
37 | #include <asm/hw_irq.h> | ||
38 | #include <asm/irq.h> | ||
35 | 39 | ||
40 | #include "timing.h" | ||
36 | #include "booke.h" | 41 | #include "booke.h" |
37 | 42 | ||
38 | unsigned long kvmppc_booke_handlers; | 43 | unsigned long kvmppc_booke_handlers; |
@@ -55,6 +60,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
55 | { "dec", VCPU_STAT(dec_exits) }, | 60 | { "dec", VCPU_STAT(dec_exits) }, |
56 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, | 61 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, |
57 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | 62 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
63 | { "doorbell", VCPU_STAT(dbell_exits) }, | ||
64 | { "guest doorbell", VCPU_STAT(gdbell_exits) }, | ||
58 | { NULL } | 65 | { NULL } |
59 | }; | 66 | }; |
60 | 67 | ||
@@ -121,6 +128,10 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) | |||
121 | { | 128 | { |
122 | u32 old_msr = vcpu->arch.shared->msr; | 129 | u32 old_msr = vcpu->arch.shared->msr; |
123 | 130 | ||
131 | #ifdef CONFIG_KVM_BOOKE_HV | ||
132 | new_msr |= MSR_GS; | ||
133 | #endif | ||
134 | |||
124 | vcpu->arch.shared->msr = new_msr; | 135 | vcpu->arch.shared->msr = new_msr; |
125 | 136 | ||
126 | kvmppc_mmu_msr_notify(vcpu, old_msr); | 137 | kvmppc_mmu_msr_notify(vcpu, old_msr); |
@@ -195,17 +206,87 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, | |||
195 | clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); | 206 | clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); |
196 | } | 207 | } |
197 | 208 | ||
209 | static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | ||
210 | { | ||
211 | #ifdef CONFIG_KVM_BOOKE_HV | ||
212 | mtspr(SPRN_GSRR0, srr0); | ||
213 | mtspr(SPRN_GSRR1, srr1); | ||
214 | #else | ||
215 | vcpu->arch.shared->srr0 = srr0; | ||
216 | vcpu->arch.shared->srr1 = srr1; | ||
217 | #endif | ||
218 | } | ||
219 | |||
220 | static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | ||
221 | { | ||
222 | vcpu->arch.csrr0 = srr0; | ||
223 | vcpu->arch.csrr1 = srr1; | ||
224 | } | ||
225 | |||
226 | static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | ||
227 | { | ||
228 | if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) { | ||
229 | vcpu->arch.dsrr0 = srr0; | ||
230 | vcpu->arch.dsrr1 = srr1; | ||
231 | } else { | ||
232 | set_guest_csrr(vcpu, srr0, srr1); | ||
233 | } | ||
234 | } | ||
235 | |||
236 | static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | ||
237 | { | ||
238 | vcpu->arch.mcsrr0 = srr0; | ||
239 | vcpu->arch.mcsrr1 = srr1; | ||
240 | } | ||
241 | |||
242 | static unsigned long get_guest_dear(struct kvm_vcpu *vcpu) | ||
243 | { | ||
244 | #ifdef CONFIG_KVM_BOOKE_HV | ||
245 | return mfspr(SPRN_GDEAR); | ||
246 | #else | ||
247 | return vcpu->arch.shared->dar; | ||
248 | #endif | ||
249 | } | ||
250 | |||
251 | static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear) | ||
252 | { | ||
253 | #ifdef CONFIG_KVM_BOOKE_HV | ||
254 | mtspr(SPRN_GDEAR, dear); | ||
255 | #else | ||
256 | vcpu->arch.shared->dar = dear; | ||
257 | #endif | ||
258 | } | ||
259 | |||
260 | static unsigned long get_guest_esr(struct kvm_vcpu *vcpu) | ||
261 | { | ||
262 | #ifdef CONFIG_KVM_BOOKE_HV | ||
263 | return mfspr(SPRN_GESR); | ||
264 | #else | ||
265 | return vcpu->arch.shared->esr; | ||
266 | #endif | ||
267 | } | ||
268 | |||
269 | static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr) | ||
270 | { | ||
271 | #ifdef CONFIG_KVM_BOOKE_HV | ||
272 | mtspr(SPRN_GESR, esr); | ||
273 | #else | ||
274 | vcpu->arch.shared->esr = esr; | ||
275 | #endif | ||
276 | } | ||
277 | |||
198 | /* Deliver the interrupt of the corresponding priority, if possible. */ | 278 | /* Deliver the interrupt of the corresponding priority, if possible. */ |
199 | static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | 279 | static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, |
200 | unsigned int priority) | 280 | unsigned int priority) |
201 | { | 281 | { |
202 | int allowed = 0; | 282 | int allowed = 0; |
203 | ulong uninitialized_var(msr_mask); | 283 | ulong msr_mask = 0; |
204 | bool update_esr = false, update_dear = false; | 284 | bool update_esr = false, update_dear = false; |
205 | ulong crit_raw = vcpu->arch.shared->critical; | 285 | ulong crit_raw = vcpu->arch.shared->critical; |
206 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); | 286 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); |
207 | bool crit; | 287 | bool crit; |
208 | bool keep_irq = false; | 288 | bool keep_irq = false; |
289 | enum int_class int_class; | ||
209 | 290 | ||
210 | /* Truncate crit indicators in 32 bit mode */ | 291 | /* Truncate crit indicators in 32 bit mode */ |
211 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | 292 | if (!(vcpu->arch.shared->msr & MSR_SF)) { |
@@ -241,46 +322,85 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
241 | case BOOKE_IRQPRIO_AP_UNAVAIL: | 322 | case BOOKE_IRQPRIO_AP_UNAVAIL: |
242 | case BOOKE_IRQPRIO_ALIGNMENT: | 323 | case BOOKE_IRQPRIO_ALIGNMENT: |
243 | allowed = 1; | 324 | allowed = 1; |
244 | msr_mask = MSR_CE|MSR_ME|MSR_DE; | 325 | msr_mask = MSR_CE | MSR_ME | MSR_DE; |
326 | int_class = INT_CLASS_NONCRIT; | ||
245 | break; | 327 | break; |
246 | case BOOKE_IRQPRIO_CRITICAL: | 328 | case BOOKE_IRQPRIO_CRITICAL: |
247 | case BOOKE_IRQPRIO_WATCHDOG: | 329 | case BOOKE_IRQPRIO_DBELL_CRIT: |
248 | allowed = vcpu->arch.shared->msr & MSR_CE; | 330 | allowed = vcpu->arch.shared->msr & MSR_CE; |
331 | allowed = allowed && !crit; | ||
249 | msr_mask = MSR_ME; | 332 | msr_mask = MSR_ME; |
333 | int_class = INT_CLASS_CRIT; | ||
250 | break; | 334 | break; |
251 | case BOOKE_IRQPRIO_MACHINE_CHECK: | 335 | case BOOKE_IRQPRIO_MACHINE_CHECK: |
252 | allowed = vcpu->arch.shared->msr & MSR_ME; | 336 | allowed = vcpu->arch.shared->msr & MSR_ME; |
253 | msr_mask = 0; | 337 | allowed = allowed && !crit; |
338 | int_class = INT_CLASS_MC; | ||
254 | break; | 339 | break; |
255 | case BOOKE_IRQPRIO_DECREMENTER: | 340 | case BOOKE_IRQPRIO_DECREMENTER: |
256 | case BOOKE_IRQPRIO_FIT: | 341 | case BOOKE_IRQPRIO_FIT: |
257 | keep_irq = true; | 342 | keep_irq = true; |
258 | /* fall through */ | 343 | /* fall through */ |
259 | case BOOKE_IRQPRIO_EXTERNAL: | 344 | case BOOKE_IRQPRIO_EXTERNAL: |
345 | case BOOKE_IRQPRIO_DBELL: | ||
260 | allowed = vcpu->arch.shared->msr & MSR_EE; | 346 | allowed = vcpu->arch.shared->msr & MSR_EE; |
261 | allowed = allowed && !crit; | 347 | allowed = allowed && !crit; |
262 | msr_mask = MSR_CE|MSR_ME|MSR_DE; | 348 | msr_mask = MSR_CE | MSR_ME | MSR_DE; |
349 | int_class = INT_CLASS_NONCRIT; | ||
263 | break; | 350 | break; |
264 | case BOOKE_IRQPRIO_DEBUG: | 351 | case BOOKE_IRQPRIO_DEBUG: |
265 | allowed = vcpu->arch.shared->msr & MSR_DE; | 352 | allowed = vcpu->arch.shared->msr & MSR_DE; |
353 | allowed = allowed && !crit; | ||
266 | msr_mask = MSR_ME; | 354 | msr_mask = MSR_ME; |
355 | int_class = INT_CLASS_CRIT; | ||
267 | break; | 356 | break; |
268 | } | 357 | } |
269 | 358 | ||
270 | if (allowed) { | 359 | if (allowed) { |
271 | vcpu->arch.shared->srr0 = vcpu->arch.pc; | 360 | switch (int_class) { |
272 | vcpu->arch.shared->srr1 = vcpu->arch.shared->msr; | 361 | case INT_CLASS_NONCRIT: |
362 | set_guest_srr(vcpu, vcpu->arch.pc, | ||
363 | vcpu->arch.shared->msr); | ||
364 | break; | ||
365 | case INT_CLASS_CRIT: | ||
366 | set_guest_csrr(vcpu, vcpu->arch.pc, | ||
367 | vcpu->arch.shared->msr); | ||
368 | break; | ||
369 | case INT_CLASS_DBG: | ||
370 | set_guest_dsrr(vcpu, vcpu->arch.pc, | ||
371 | vcpu->arch.shared->msr); | ||
372 | break; | ||
373 | case INT_CLASS_MC: | ||
374 | set_guest_mcsrr(vcpu, vcpu->arch.pc, | ||
375 | vcpu->arch.shared->msr); | ||
376 | break; | ||
377 | } | ||
378 | |||
273 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; | 379 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; |
274 | if (update_esr == true) | 380 | if (update_esr == true) |
275 | vcpu->arch.shared->esr = vcpu->arch.queued_esr; | 381 | set_guest_esr(vcpu, vcpu->arch.queued_esr); |
276 | if (update_dear == true) | 382 | if (update_dear == true) |
277 | vcpu->arch.shared->dar = vcpu->arch.queued_dear; | 383 | set_guest_dear(vcpu, vcpu->arch.queued_dear); |
278 | kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); | 384 | kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); |
279 | 385 | ||
280 | if (!keep_irq) | 386 | if (!keep_irq) |
281 | clear_bit(priority, &vcpu->arch.pending_exceptions); | 387 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
282 | } | 388 | } |
283 | 389 | ||
390 | #ifdef CONFIG_KVM_BOOKE_HV | ||
391 | /* | ||
392 | * If an interrupt is pending but masked, raise a guest doorbell | ||
393 | * so that we are notified when the guest enables the relevant | ||
394 | * MSR bit. | ||
395 | */ | ||
396 | if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE) | ||
397 | kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT); | ||
398 | if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE) | ||
399 | kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT); | ||
400 | if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK) | ||
401 | kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC); | ||
402 | #endif | ||
403 | |||
284 | return allowed; | 404 | return allowed; |
285 | } | 405 | } |
286 | 406 | ||
@@ -305,7 +425,7 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu) | |||
305 | } | 425 | } |
306 | 426 | ||
307 | priority = __ffs(*pending); | 427 | priority = __ffs(*pending); |
308 | while (priority <= BOOKE_IRQPRIO_MAX) { | 428 | while (priority < BOOKE_IRQPRIO_MAX) { |
309 | if (kvmppc_booke_irqprio_deliver(vcpu, priority)) | 429 | if (kvmppc_booke_irqprio_deliver(vcpu, priority)) |
310 | break; | 430 | break; |
311 | 431 | ||
@@ -319,8 +439,9 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu) | |||
319 | } | 439 | } |
320 | 440 | ||
321 | /* Check pending exceptions and deliver one, if possible. */ | 441 | /* Check pending exceptions and deliver one, if possible. */ |
322 | void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) | 442 | int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) |
323 | { | 443 | { |
444 | int r = 0; | ||
324 | WARN_ON_ONCE(!irqs_disabled()); | 445 | WARN_ON_ONCE(!irqs_disabled()); |
325 | 446 | ||
326 | kvmppc_core_check_exceptions(vcpu); | 447 | kvmppc_core_check_exceptions(vcpu); |
@@ -328,16 +449,60 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) | |||
328 | if (vcpu->arch.shared->msr & MSR_WE) { | 449 | if (vcpu->arch.shared->msr & MSR_WE) { |
329 | local_irq_enable(); | 450 | local_irq_enable(); |
330 | kvm_vcpu_block(vcpu); | 451 | kvm_vcpu_block(vcpu); |
452 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | ||
331 | local_irq_disable(); | 453 | local_irq_disable(); |
332 | 454 | ||
333 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); | 455 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); |
334 | kvmppc_core_check_exceptions(vcpu); | 456 | r = 1; |
335 | }; | 457 | }; |
458 | |||
459 | return r; | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * Common checks before entering the guest world. Call with interrupts | ||
464 | * disabled. | ||
465 | * | ||
466 | * returns !0 if a signal is pending and check_signal is true | ||
467 | */ | ||
468 | static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) | ||
469 | { | ||
470 | int r = 0; | ||
471 | |||
472 | WARN_ON_ONCE(!irqs_disabled()); | ||
473 | while (true) { | ||
474 | if (need_resched()) { | ||
475 | local_irq_enable(); | ||
476 | cond_resched(); | ||
477 | local_irq_disable(); | ||
478 | continue; | ||
479 | } | ||
480 | |||
481 | if (signal_pending(current)) { | ||
482 | r = 1; | ||
483 | break; | ||
484 | } | ||
485 | |||
486 | if (kvmppc_core_prepare_to_enter(vcpu)) { | ||
487 | /* interrupts got enabled in between, so we | ||
488 | are back at square 1 */ | ||
489 | continue; | ||
490 | } | ||
491 | |||
492 | break; | ||
493 | } | ||
494 | |||
495 | return r; | ||
336 | } | 496 | } |
337 | 497 | ||
338 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 498 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
339 | { | 499 | { |
340 | int ret; | 500 | int ret; |
501 | #ifdef CONFIG_PPC_FPU | ||
502 | unsigned int fpscr; | ||
503 | int fpexc_mode; | ||
504 | u64 fpr[32]; | ||
505 | #endif | ||
341 | 506 | ||
342 | if (!vcpu->arch.sane) { | 507 | if (!vcpu->arch.sane) { |
343 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 508 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
@@ -345,17 +510,53 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
345 | } | 510 | } |
346 | 511 | ||
347 | local_irq_disable(); | 512 | local_irq_disable(); |
348 | 513 | if (kvmppc_prepare_to_enter(vcpu)) { | |
349 | kvmppc_core_prepare_to_enter(vcpu); | ||
350 | |||
351 | if (signal_pending(current)) { | ||
352 | kvm_run->exit_reason = KVM_EXIT_INTR; | 514 | kvm_run->exit_reason = KVM_EXIT_INTR; |
353 | ret = -EINTR; | 515 | ret = -EINTR; |
354 | goto out; | 516 | goto out; |
355 | } | 517 | } |
356 | 518 | ||
357 | kvm_guest_enter(); | 519 | kvm_guest_enter(); |
520 | |||
521 | #ifdef CONFIG_PPC_FPU | ||
522 | /* Save userspace FPU state in stack */ | ||
523 | enable_kernel_fp(); | ||
524 | memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); | ||
525 | fpscr = current->thread.fpscr.val; | ||
526 | fpexc_mode = current->thread.fpexc_mode; | ||
527 | |||
528 | /* Restore guest FPU state to thread */ | ||
529 | memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr)); | ||
530 | current->thread.fpscr.val = vcpu->arch.fpscr; | ||
531 | |||
532 | /* | ||
533 | * Since we can't trap on MSR_FP in GS-mode, we consider the guest | ||
534 | * as always using the FPU. Kernel usage of FP (via | ||
535 | * enable_kernel_fp()) in this thread must not occur while | ||
536 | * vcpu->fpu_active is set. | ||
537 | */ | ||
538 | vcpu->fpu_active = 1; | ||
539 | |||
540 | kvmppc_load_guest_fp(vcpu); | ||
541 | #endif | ||
542 | |||
358 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); | 543 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); |
544 | |||
545 | #ifdef CONFIG_PPC_FPU | ||
546 | kvmppc_save_guest_fp(vcpu); | ||
547 | |||
548 | vcpu->fpu_active = 0; | ||
549 | |||
550 | /* Save guest FPU state from thread */ | ||
551 | memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr)); | ||
552 | vcpu->arch.fpscr = current->thread.fpscr.val; | ||
553 | |||
554 | /* Restore userspace FPU state from stack */ | ||
555 | memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); | ||
556 | current->thread.fpscr.val = fpscr; | ||
557 | current->thread.fpexc_mode = fpexc_mode; | ||
558 | #endif | ||
559 | |||
359 | kvm_guest_exit(); | 560 | kvm_guest_exit(); |
360 | 561 | ||
361 | out: | 562 | out: |
@@ -363,6 +564,84 @@ out: | |||
363 | return ret; | 564 | return ret; |
364 | } | 565 | } |
365 | 566 | ||
567 | static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | ||
568 | { | ||
569 | enum emulation_result er; | ||
570 | |||
571 | er = kvmppc_emulate_instruction(run, vcpu); | ||
572 | switch (er) { | ||
573 | case EMULATE_DONE: | ||
574 | /* don't overwrite subtypes, just account kvm_stats */ | ||
575 | kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS); | ||
576 | /* Future optimization: only reload non-volatiles if | ||
577 | * they were actually modified by emulation. */ | ||
578 | return RESUME_GUEST_NV; | ||
579 | |||
580 | case EMULATE_DO_DCR: | ||
581 | run->exit_reason = KVM_EXIT_DCR; | ||
582 | return RESUME_HOST; | ||
583 | |||
584 | case EMULATE_FAIL: | ||
585 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | ||
586 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | ||
587 | /* For debugging, encode the failing instruction and | ||
588 | * report it to userspace. */ | ||
589 | run->hw.hardware_exit_reason = ~0ULL << 32; | ||
590 | run->hw.hardware_exit_reason |= vcpu->arch.last_inst; | ||
591 | kvmppc_core_queue_program(vcpu, ESR_PIL); | ||
592 | return RESUME_HOST; | ||
593 | |||
594 | default: | ||
595 | BUG(); | ||
596 | } | ||
597 | } | ||
598 | |||
599 | static void kvmppc_fill_pt_regs(struct pt_regs *regs) | ||
600 | { | ||
601 | ulong r1, ip, msr, lr; | ||
602 | |||
603 | asm("mr %0, 1" : "=r"(r1)); | ||
604 | asm("mflr %0" : "=r"(lr)); | ||
605 | asm("mfmsr %0" : "=r"(msr)); | ||
606 | asm("bl 1f; 1: mflr %0" : "=r"(ip)); | ||
607 | |||
608 | memset(regs, 0, sizeof(*regs)); | ||
609 | regs->gpr[1] = r1; | ||
610 | regs->nip = ip; | ||
611 | regs->msr = msr; | ||
612 | regs->link = lr; | ||
613 | } | ||
614 | |||
615 | static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, | ||
616 | unsigned int exit_nr) | ||
617 | { | ||
618 | struct pt_regs regs; | ||
619 | |||
620 | switch (exit_nr) { | ||
621 | case BOOKE_INTERRUPT_EXTERNAL: | ||
622 | kvmppc_fill_pt_regs(®s); | ||
623 | do_IRQ(®s); | ||
624 | break; | ||
625 | case BOOKE_INTERRUPT_DECREMENTER: | ||
626 | kvmppc_fill_pt_regs(®s); | ||
627 | timer_interrupt(®s); | ||
628 | break; | ||
629 | #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64) | ||
630 | case BOOKE_INTERRUPT_DOORBELL: | ||
631 | kvmppc_fill_pt_regs(®s); | ||
632 | doorbell_exception(®s); | ||
633 | break; | ||
634 | #endif | ||
635 | case BOOKE_INTERRUPT_MACHINE_CHECK: | ||
636 | /* FIXME */ | ||
637 | break; | ||
638 | case BOOKE_INTERRUPT_PERFORMANCE_MONITOR: | ||
639 | kvmppc_fill_pt_regs(®s); | ||
640 | performance_monitor_exception(®s); | ||
641 | break; | ||
642 | } | ||
643 | } | ||
644 | |||
366 | /** | 645 | /** |
367 | * kvmppc_handle_exit | 646 | * kvmppc_handle_exit |
368 | * | 647 | * |
@@ -371,12 +650,14 @@ out: | |||
371 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | 650 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, |
372 | unsigned int exit_nr) | 651 | unsigned int exit_nr) |
373 | { | 652 | { |
374 | enum emulation_result er; | ||
375 | int r = RESUME_HOST; | 653 | int r = RESUME_HOST; |
376 | 654 | ||
377 | /* update before a new last_exit_type is rewritten */ | 655 | /* update before a new last_exit_type is rewritten */ |
378 | kvmppc_update_timing_stats(vcpu); | 656 | kvmppc_update_timing_stats(vcpu); |
379 | 657 | ||
658 | /* restart interrupts if they were meant for the host */ | ||
659 | kvmppc_restart_interrupt(vcpu, exit_nr); | ||
660 | |||
380 | local_irq_enable(); | 661 | local_irq_enable(); |
381 | 662 | ||
382 | run->exit_reason = KVM_EXIT_UNKNOWN; | 663 | run->exit_reason = KVM_EXIT_UNKNOWN; |
@@ -386,62 +667,74 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
386 | case BOOKE_INTERRUPT_MACHINE_CHECK: | 667 | case BOOKE_INTERRUPT_MACHINE_CHECK: |
387 | printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); | 668 | printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); |
388 | kvmppc_dump_vcpu(vcpu); | 669 | kvmppc_dump_vcpu(vcpu); |
670 | /* For debugging, send invalid exit reason to user space */ | ||
671 | run->hw.hardware_exit_reason = ~1ULL << 32; | ||
672 | run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR); | ||
389 | r = RESUME_HOST; | 673 | r = RESUME_HOST; |
390 | break; | 674 | break; |
391 | 675 | ||
392 | case BOOKE_INTERRUPT_EXTERNAL: | 676 | case BOOKE_INTERRUPT_EXTERNAL: |
393 | kvmppc_account_exit(vcpu, EXT_INTR_EXITS); | 677 | kvmppc_account_exit(vcpu, EXT_INTR_EXITS); |
394 | if (need_resched()) | ||
395 | cond_resched(); | ||
396 | r = RESUME_GUEST; | 678 | r = RESUME_GUEST; |
397 | break; | 679 | break; |
398 | 680 | ||
399 | case BOOKE_INTERRUPT_DECREMENTER: | 681 | case BOOKE_INTERRUPT_DECREMENTER: |
400 | /* Since we switched IVPR back to the host's value, the host | ||
401 | * handled this interrupt the moment we enabled interrupts. | ||
402 | * Now we just offer it a chance to reschedule the guest. */ | ||
403 | kvmppc_account_exit(vcpu, DEC_EXITS); | 682 | kvmppc_account_exit(vcpu, DEC_EXITS); |
404 | if (need_resched()) | ||
405 | cond_resched(); | ||
406 | r = RESUME_GUEST; | 683 | r = RESUME_GUEST; |
407 | break; | 684 | break; |
408 | 685 | ||
686 | case BOOKE_INTERRUPT_DOORBELL: | ||
687 | kvmppc_account_exit(vcpu, DBELL_EXITS); | ||
688 | r = RESUME_GUEST; | ||
689 | break; | ||
690 | |||
691 | case BOOKE_INTERRUPT_GUEST_DBELL_CRIT: | ||
692 | kvmppc_account_exit(vcpu, GDBELL_EXITS); | ||
693 | |||
694 | /* | ||
695 | * We are here because there is a pending guest interrupt | ||
696 | * which could not be delivered as MSR_CE or MSR_ME was not | ||
697 | * set. Once we break from here we will retry delivery. | ||
698 | */ | ||
699 | r = RESUME_GUEST; | ||
700 | break; | ||
701 | |||
702 | case BOOKE_INTERRUPT_GUEST_DBELL: | ||
703 | kvmppc_account_exit(vcpu, GDBELL_EXITS); | ||
704 | |||
705 | /* | ||
706 | * We are here because there is a pending guest interrupt | ||
707 | * which could not be delivered as MSR_EE was not set. Once | ||
708 | * we break from here we will retry delivery. | ||
709 | */ | ||
710 | r = RESUME_GUEST; | ||
711 | break; | ||
712 | |||
713 | case BOOKE_INTERRUPT_PERFORMANCE_MONITOR: | ||
714 | r = RESUME_GUEST; | ||
715 | break; | ||
716 | |||
717 | case BOOKE_INTERRUPT_HV_PRIV: | ||
718 | r = emulation_exit(run, vcpu); | ||
719 | break; | ||
720 | |||
409 | case BOOKE_INTERRUPT_PROGRAM: | 721 | case BOOKE_INTERRUPT_PROGRAM: |
410 | if (vcpu->arch.shared->msr & MSR_PR) { | 722 | if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) { |
411 | /* Program traps generated by user-level software must be handled | 723 | /* |
412 | * by the guest kernel. */ | 724 | * Program traps generated by user-level software must |
725 | * be handled by the guest kernel. | ||
726 | * | ||
727 | * In GS mode, hypervisor privileged instructions trap | ||
728 | * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are | ||
729 | * actual program interrupts, handled by the guest. | ||
730 | */ | ||
413 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); | 731 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); |
414 | r = RESUME_GUEST; | 732 | r = RESUME_GUEST; |
415 | kvmppc_account_exit(vcpu, USR_PR_INST); | 733 | kvmppc_account_exit(vcpu, USR_PR_INST); |
416 | break; | 734 | break; |
417 | } | 735 | } |
418 | 736 | ||
419 | er = kvmppc_emulate_instruction(run, vcpu); | 737 | r = emulation_exit(run, vcpu); |
420 | switch (er) { | ||
421 | case EMULATE_DONE: | ||
422 | /* don't overwrite subtypes, just account kvm_stats */ | ||
423 | kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS); | ||
424 | /* Future optimization: only reload non-volatiles if | ||
425 | * they were actually modified by emulation. */ | ||
426 | r = RESUME_GUEST_NV; | ||
427 | break; | ||
428 | case EMULATE_DO_DCR: | ||
429 | run->exit_reason = KVM_EXIT_DCR; | ||
430 | r = RESUME_HOST; | ||
431 | break; | ||
432 | case EMULATE_FAIL: | ||
433 | /* XXX Deliver Program interrupt to guest. */ | ||
434 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | ||
435 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | ||
436 | /* For debugging, encode the failing instruction and | ||
437 | * report it to userspace. */ | ||
438 | run->hw.hardware_exit_reason = ~0ULL << 32; | ||
439 | run->hw.hardware_exit_reason |= vcpu->arch.last_inst; | ||
440 | r = RESUME_HOST; | ||
441 | break; | ||
442 | default: | ||
443 | BUG(); | ||
444 | } | ||
445 | break; | 738 | break; |
446 | 739 | ||
447 | case BOOKE_INTERRUPT_FP_UNAVAIL: | 740 | case BOOKE_INTERRUPT_FP_UNAVAIL: |
@@ -506,6 +799,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
506 | r = RESUME_GUEST; | 799 | r = RESUME_GUEST; |
507 | break; | 800 | break; |
508 | 801 | ||
802 | #ifdef CONFIG_KVM_BOOKE_HV | ||
803 | case BOOKE_INTERRUPT_HV_SYSCALL: | ||
804 | if (!(vcpu->arch.shared->msr & MSR_PR)) { | ||
805 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | ||
806 | } else { | ||
807 | /* | ||
808 | * hcall from guest userspace -- send privileged | ||
809 | * instruction program check. | ||
810 | */ | ||
811 | kvmppc_core_queue_program(vcpu, ESR_PPR); | ||
812 | } | ||
813 | |||
814 | r = RESUME_GUEST; | ||
815 | break; | ||
816 | #else | ||
509 | case BOOKE_INTERRUPT_SYSCALL: | 817 | case BOOKE_INTERRUPT_SYSCALL: |
510 | if (!(vcpu->arch.shared->msr & MSR_PR) && | 818 | if (!(vcpu->arch.shared->msr & MSR_PR) && |
511 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { | 819 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { |
@@ -519,6 +827,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
519 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); | 827 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); |
520 | r = RESUME_GUEST; | 828 | r = RESUME_GUEST; |
521 | break; | 829 | break; |
830 | #endif | ||
522 | 831 | ||
523 | case BOOKE_INTERRUPT_DTLB_MISS: { | 832 | case BOOKE_INTERRUPT_DTLB_MISS: { |
524 | unsigned long eaddr = vcpu->arch.fault_dear; | 833 | unsigned long eaddr = vcpu->arch.fault_dear; |
@@ -526,7 +835,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
526 | gpa_t gpaddr; | 835 | gpa_t gpaddr; |
527 | gfn_t gfn; | 836 | gfn_t gfn; |
528 | 837 | ||
529 | #ifdef CONFIG_KVM_E500 | 838 | #ifdef CONFIG_KVM_E500V2 |
530 | if (!(vcpu->arch.shared->msr & MSR_PR) && | 839 | if (!(vcpu->arch.shared->msr & MSR_PR) && |
531 | (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { | 840 | (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { |
532 | kvmppc_map_magic(vcpu); | 841 | kvmppc_map_magic(vcpu); |
@@ -567,6 +876,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
567 | /* Guest has mapped and accessed a page which is not | 876 | /* Guest has mapped and accessed a page which is not |
568 | * actually RAM. */ | 877 | * actually RAM. */ |
569 | vcpu->arch.paddr_accessed = gpaddr; | 878 | vcpu->arch.paddr_accessed = gpaddr; |
879 | vcpu->arch.vaddr_accessed = eaddr; | ||
570 | r = kvmppc_emulate_mmio(run, vcpu); | 880 | r = kvmppc_emulate_mmio(run, vcpu); |
571 | kvmppc_account_exit(vcpu, MMIO_EXITS); | 881 | kvmppc_account_exit(vcpu, MMIO_EXITS); |
572 | } | 882 | } |
@@ -634,15 +944,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
634 | BUG(); | 944 | BUG(); |
635 | } | 945 | } |
636 | 946 | ||
637 | local_irq_disable(); | 947 | /* |
638 | 948 | * To avoid clobbering exit_reason, only check for signals if we | |
639 | kvmppc_core_prepare_to_enter(vcpu); | 949 | * aren't already exiting to userspace for some other reason. |
640 | 950 | */ | |
641 | if (!(r & RESUME_HOST)) { | 951 | if (!(r & RESUME_HOST)) { |
642 | /* To avoid clobbering exit_reason, only check for signals if | 952 | local_irq_disable(); |
643 | * we aren't already exiting to userspace for some other | 953 | if (kvmppc_prepare_to_enter(vcpu)) { |
644 | * reason. */ | ||
645 | if (signal_pending(current)) { | ||
646 | run->exit_reason = KVM_EXIT_INTR; | 954 | run->exit_reason = KVM_EXIT_INTR; |
647 | r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); | 955 | r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); |
648 | kvmppc_account_exit(vcpu, SIGNAL_EXITS); | 956 | kvmppc_account_exit(vcpu, SIGNAL_EXITS); |
@@ -659,12 +967,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
659 | int r; | 967 | int r; |
660 | 968 | ||
661 | vcpu->arch.pc = 0; | 969 | vcpu->arch.pc = 0; |
662 | vcpu->arch.shared->msr = 0; | ||
663 | vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; | ||
664 | vcpu->arch.shared->pir = vcpu->vcpu_id; | 970 | vcpu->arch.shared->pir = vcpu->vcpu_id; |
665 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ | 971 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ |
972 | kvmppc_set_msr(vcpu, 0); | ||
666 | 973 | ||
974 | #ifndef CONFIG_KVM_BOOKE_HV | ||
975 | vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; | ||
667 | vcpu->arch.shadow_pid = 1; | 976 | vcpu->arch.shadow_pid = 1; |
977 | vcpu->arch.shared->msr = 0; | ||
978 | #endif | ||
668 | 979 | ||
669 | /* Eye-catching numbers so we know if the guest takes an interrupt | 980 | /* Eye-catching numbers so we know if the guest takes an interrupt |
670 | * before it's programmed its own IVPR/IVORs. */ | 981 | * before it's programmed its own IVPR/IVORs. */ |
@@ -745,8 +1056,8 @@ static void get_sregs_base(struct kvm_vcpu *vcpu, | |||
745 | sregs->u.e.csrr0 = vcpu->arch.csrr0; | 1056 | sregs->u.e.csrr0 = vcpu->arch.csrr0; |
746 | sregs->u.e.csrr1 = vcpu->arch.csrr1; | 1057 | sregs->u.e.csrr1 = vcpu->arch.csrr1; |
747 | sregs->u.e.mcsr = vcpu->arch.mcsr; | 1058 | sregs->u.e.mcsr = vcpu->arch.mcsr; |
748 | sregs->u.e.esr = vcpu->arch.shared->esr; | 1059 | sregs->u.e.esr = get_guest_esr(vcpu); |
749 | sregs->u.e.dear = vcpu->arch.shared->dar; | 1060 | sregs->u.e.dear = get_guest_dear(vcpu); |
750 | sregs->u.e.tsr = vcpu->arch.tsr; | 1061 | sregs->u.e.tsr = vcpu->arch.tsr; |
751 | sregs->u.e.tcr = vcpu->arch.tcr; | 1062 | sregs->u.e.tcr = vcpu->arch.tcr; |
752 | sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); | 1063 | sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); |
@@ -763,8 +1074,8 @@ static int set_sregs_base(struct kvm_vcpu *vcpu, | |||
763 | vcpu->arch.csrr0 = sregs->u.e.csrr0; | 1074 | vcpu->arch.csrr0 = sregs->u.e.csrr0; |
764 | vcpu->arch.csrr1 = sregs->u.e.csrr1; | 1075 | vcpu->arch.csrr1 = sregs->u.e.csrr1; |
765 | vcpu->arch.mcsr = sregs->u.e.mcsr; | 1076 | vcpu->arch.mcsr = sregs->u.e.mcsr; |
766 | vcpu->arch.shared->esr = sregs->u.e.esr; | 1077 | set_guest_esr(vcpu, sregs->u.e.esr); |
767 | vcpu->arch.shared->dar = sregs->u.e.dear; | 1078 | set_guest_dear(vcpu, sregs->u.e.dear); |
768 | vcpu->arch.vrsave = sregs->u.e.vrsave; | 1079 | vcpu->arch.vrsave = sregs->u.e.vrsave; |
769 | kvmppc_set_tcr(vcpu, sregs->u.e.tcr); | 1080 | kvmppc_set_tcr(vcpu, sregs->u.e.tcr); |
770 | 1081 | ||
@@ -932,15 +1243,6 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm, | |||
932 | { | 1243 | { |
933 | } | 1244 | } |
934 | 1245 | ||
935 | int kvmppc_core_init_vm(struct kvm *kvm) | ||
936 | { | ||
937 | return 0; | ||
938 | } | ||
939 | |||
940 | void kvmppc_core_destroy_vm(struct kvm *kvm) | ||
941 | { | ||
942 | } | ||
943 | |||
944 | void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) | 1246 | void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) |
945 | { | 1247 | { |
946 | vcpu->arch.tcr = new_tcr; | 1248 | vcpu->arch.tcr = new_tcr; |
@@ -968,8 +1270,19 @@ void kvmppc_decrementer_func(unsigned long data) | |||
968 | kvmppc_set_tsr_bits(vcpu, TSR_DIS); | 1270 | kvmppc_set_tsr_bits(vcpu, TSR_DIS); |
969 | } | 1271 | } |
970 | 1272 | ||
1273 | void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
1274 | { | ||
1275 | current->thread.kvm_vcpu = vcpu; | ||
1276 | } | ||
1277 | |||
1278 | void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) | ||
1279 | { | ||
1280 | current->thread.kvm_vcpu = NULL; | ||
1281 | } | ||
1282 | |||
971 | int __init kvmppc_booke_init(void) | 1283 | int __init kvmppc_booke_init(void) |
972 | { | 1284 | { |
1285 | #ifndef CONFIG_KVM_BOOKE_HV | ||
973 | unsigned long ivor[16]; | 1286 | unsigned long ivor[16]; |
974 | unsigned long max_ivor = 0; | 1287 | unsigned long max_ivor = 0; |
975 | int i; | 1288 | int i; |
@@ -1012,7 +1325,7 @@ int __init kvmppc_booke_init(void) | |||
1012 | } | 1325 | } |
1013 | flush_icache_range(kvmppc_booke_handlers, | 1326 | flush_icache_range(kvmppc_booke_handlers, |
1014 | kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); | 1327 | kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); |
1015 | 1328 | #endif /* !BOOKE_HV */ | |
1016 | return 0; | 1329 | return 0; |
1017 | } | 1330 | } |
1018 | 1331 | ||
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index 2fe202705a3f..ba61974c1e20 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | #include <asm/kvm_ppc.h> | 25 | #include <asm/kvm_ppc.h> |
26 | #include <asm/switch_to.h> | ||
26 | #include "timing.h" | 27 | #include "timing.h" |
27 | 28 | ||
28 | /* interrupt priortity ordering */ | 29 | /* interrupt priortity ordering */ |
@@ -48,7 +49,20 @@ | |||
48 | #define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 | 49 | #define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 |
49 | /* Internal pseudo-irqprio for level triggered externals */ | 50 | /* Internal pseudo-irqprio for level triggered externals */ |
50 | #define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20 | 51 | #define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20 |
51 | #define BOOKE_IRQPRIO_MAX 20 | 52 | #define BOOKE_IRQPRIO_DBELL 21 |
53 | #define BOOKE_IRQPRIO_DBELL_CRIT 22 | ||
54 | #define BOOKE_IRQPRIO_MAX 23 | ||
55 | |||
56 | #define BOOKE_IRQMASK_EE ((1 << BOOKE_IRQPRIO_EXTERNAL_LEVEL) | \ | ||
57 | (1 << BOOKE_IRQPRIO_PERFORMANCE_MONITOR) | \ | ||
58 | (1 << BOOKE_IRQPRIO_DBELL) | \ | ||
59 | (1 << BOOKE_IRQPRIO_DECREMENTER) | \ | ||
60 | (1 << BOOKE_IRQPRIO_FIT) | \ | ||
61 | (1 << BOOKE_IRQPRIO_EXTERNAL)) | ||
62 | |||
63 | #define BOOKE_IRQMASK_CE ((1 << BOOKE_IRQPRIO_DBELL_CRIT) | \ | ||
64 | (1 << BOOKE_IRQPRIO_WATCHDOG) | \ | ||
65 | (1 << BOOKE_IRQPRIO_CRITICAL)) | ||
52 | 66 | ||
53 | extern unsigned long kvmppc_booke_handlers; | 67 | extern unsigned long kvmppc_booke_handlers; |
54 | 68 | ||
@@ -61,8 +75,8 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits); | |||
61 | 75 | ||
62 | int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | 76 | int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, |
63 | unsigned int inst, int *advance); | 77 | unsigned int inst, int *advance); |
64 | int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); | 78 | int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); |
65 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); | 79 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); |
66 | 80 | ||
67 | /* low-level asm code to transfer guest state */ | 81 | /* low-level asm code to transfer guest state */ |
68 | void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu); | 82 | void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu); |
@@ -71,4 +85,46 @@ void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu); | |||
71 | /* high-level function, manages flags, host state */ | 85 | /* high-level function, manages flags, host state */ |
72 | void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu); | 86 | void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu); |
73 | 87 | ||
88 | void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | ||
89 | void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu); | ||
90 | |||
91 | enum int_class { | ||
92 | INT_CLASS_NONCRIT, | ||
93 | INT_CLASS_CRIT, | ||
94 | INT_CLASS_MC, | ||
95 | INT_CLASS_DBG, | ||
96 | }; | ||
97 | |||
98 | void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); | ||
99 | |||
100 | /* | ||
101 | * Load up guest vcpu FP state if it's needed. | ||
102 | * It also set the MSR_FP in thread so that host know | ||
103 | * we're holding FPU, and then host can help to save | ||
104 | * guest vcpu FP state if other threads require to use FPU. | ||
105 | * This simulates an FP unavailable fault. | ||
106 | * | ||
107 | * It requires to be called with preemption disabled. | ||
108 | */ | ||
109 | static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu) | ||
110 | { | ||
111 | #ifdef CONFIG_PPC_FPU | ||
112 | if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) { | ||
113 | load_up_fpu(); | ||
114 | current->thread.regs->msr |= MSR_FP; | ||
115 | } | ||
116 | #endif | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Save guest vcpu FP state into thread. | ||
121 | * It requires to be called with preemption disabled. | ||
122 | */ | ||
123 | static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu) | ||
124 | { | ||
125 | #ifdef CONFIG_PPC_FPU | ||
126 | if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP)) | ||
127 | giveup_fpu(current); | ||
128 | #endif | ||
129 | } | ||
74 | #endif /* __KVM_BOOKE_H__ */ | 130 | #endif /* __KVM_BOOKE_H__ */ |
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index 3e652da36534..6c76397f2af4 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c | |||
@@ -40,8 +40,8 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
40 | unsigned int inst, int *advance) | 40 | unsigned int inst, int *advance) |
41 | { | 41 | { |
42 | int emulated = EMULATE_DONE; | 42 | int emulated = EMULATE_DONE; |
43 | int rs; | 43 | int rs = get_rs(inst); |
44 | int rt; | 44 | int rt = get_rt(inst); |
45 | 45 | ||
46 | switch (get_op(inst)) { | 46 | switch (get_op(inst)) { |
47 | case 19: | 47 | case 19: |
@@ -62,19 +62,16 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
62 | switch (get_xop(inst)) { | 62 | switch (get_xop(inst)) { |
63 | 63 | ||
64 | case OP_31_XOP_MFMSR: | 64 | case OP_31_XOP_MFMSR: |
65 | rt = get_rt(inst); | ||
66 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); | 65 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); |
67 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); | 66 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); |
68 | break; | 67 | break; |
69 | 68 | ||
70 | case OP_31_XOP_MTMSR: | 69 | case OP_31_XOP_MTMSR: |
71 | rs = get_rs(inst); | ||
72 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); | 70 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); |
73 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); | 71 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); |
74 | break; | 72 | break; |
75 | 73 | ||
76 | case OP_31_XOP_WRTEE: | 74 | case OP_31_XOP_WRTEE: |
77 | rs = get_rs(inst); | ||
78 | vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) | 75 | vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) |
79 | | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); | 76 | | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); |
80 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); | 77 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); |
@@ -99,22 +96,32 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
99 | return emulated; | 96 | return emulated; |
100 | } | 97 | } |
101 | 98 | ||
102 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 99 | /* |
100 | * NOTE: some of these registers are not emulated on BOOKE_HV (GS-mode). | ||
101 | * Their backing store is in real registers, and these functions | ||
102 | * will return the wrong result if called for them in another context | ||
103 | * (such as debugging). | ||
104 | */ | ||
105 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) | ||
103 | { | 106 | { |
104 | int emulated = EMULATE_DONE; | 107 | int emulated = EMULATE_DONE; |
105 | ulong spr_val = kvmppc_get_gpr(vcpu, rs); | ||
106 | 108 | ||
107 | switch (sprn) { | 109 | switch (sprn) { |
108 | case SPRN_DEAR: | 110 | case SPRN_DEAR: |
109 | vcpu->arch.shared->dar = spr_val; break; | 111 | vcpu->arch.shared->dar = spr_val; |
112 | break; | ||
110 | case SPRN_ESR: | 113 | case SPRN_ESR: |
111 | vcpu->arch.shared->esr = spr_val; break; | 114 | vcpu->arch.shared->esr = spr_val; |
115 | break; | ||
112 | case SPRN_DBCR0: | 116 | case SPRN_DBCR0: |
113 | vcpu->arch.dbcr0 = spr_val; break; | 117 | vcpu->arch.dbcr0 = spr_val; |
118 | break; | ||
114 | case SPRN_DBCR1: | 119 | case SPRN_DBCR1: |
115 | vcpu->arch.dbcr1 = spr_val; break; | 120 | vcpu->arch.dbcr1 = spr_val; |
121 | break; | ||
116 | case SPRN_DBSR: | 122 | case SPRN_DBSR: |
117 | vcpu->arch.dbsr &= ~spr_val; break; | 123 | vcpu->arch.dbsr &= ~spr_val; |
124 | break; | ||
118 | case SPRN_TSR: | 125 | case SPRN_TSR: |
119 | kvmppc_clr_tsr_bits(vcpu, spr_val); | 126 | kvmppc_clr_tsr_bits(vcpu, spr_val); |
120 | break; | 127 | break; |
@@ -122,20 +129,29 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
122 | kvmppc_set_tcr(vcpu, spr_val); | 129 | kvmppc_set_tcr(vcpu, spr_val); |
123 | break; | 130 | break; |
124 | 131 | ||
125 | /* Note: SPRG4-7 are user-readable. These values are | 132 | /* |
126 | * loaded into the real SPRGs when resuming the | 133 | * Note: SPRG4-7 are user-readable. |
127 | * guest. */ | 134 | * These values are loaded into the real SPRGs when resuming the |
135 | * guest (PR-mode only). | ||
136 | */ | ||
128 | case SPRN_SPRG4: | 137 | case SPRN_SPRG4: |
129 | vcpu->arch.shared->sprg4 = spr_val; break; | 138 | vcpu->arch.shared->sprg4 = spr_val; |
139 | break; | ||
130 | case SPRN_SPRG5: | 140 | case SPRN_SPRG5: |
131 | vcpu->arch.shared->sprg5 = spr_val; break; | 141 | vcpu->arch.shared->sprg5 = spr_val; |
142 | break; | ||
132 | case SPRN_SPRG6: | 143 | case SPRN_SPRG6: |
133 | vcpu->arch.shared->sprg6 = spr_val; break; | 144 | vcpu->arch.shared->sprg6 = spr_val; |
145 | break; | ||
134 | case SPRN_SPRG7: | 146 | case SPRN_SPRG7: |
135 | vcpu->arch.shared->sprg7 = spr_val; break; | 147 | vcpu->arch.shared->sprg7 = spr_val; |
148 | break; | ||
136 | 149 | ||
137 | case SPRN_IVPR: | 150 | case SPRN_IVPR: |
138 | vcpu->arch.ivpr = spr_val; | 151 | vcpu->arch.ivpr = spr_val; |
152 | #ifdef CONFIG_KVM_BOOKE_HV | ||
153 | mtspr(SPRN_GIVPR, spr_val); | ||
154 | #endif | ||
139 | break; | 155 | break; |
140 | case SPRN_IVOR0: | 156 | case SPRN_IVOR0: |
141 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val; | 157 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val; |
@@ -145,6 +161,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
145 | break; | 161 | break; |
146 | case SPRN_IVOR2: | 162 | case SPRN_IVOR2: |
147 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val; | 163 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val; |
164 | #ifdef CONFIG_KVM_BOOKE_HV | ||
165 | mtspr(SPRN_GIVOR2, spr_val); | ||
166 | #endif | ||
148 | break; | 167 | break; |
149 | case SPRN_IVOR3: | 168 | case SPRN_IVOR3: |
150 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val; | 169 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val; |
@@ -163,6 +182,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
163 | break; | 182 | break; |
164 | case SPRN_IVOR8: | 183 | case SPRN_IVOR8: |
165 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val; | 184 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val; |
185 | #ifdef CONFIG_KVM_BOOKE_HV | ||
186 | mtspr(SPRN_GIVOR8, spr_val); | ||
187 | #endif | ||
166 | break; | 188 | break; |
167 | case SPRN_IVOR9: | 189 | case SPRN_IVOR9: |
168 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val; | 190 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val; |
@@ -193,75 +215,83 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
193 | return emulated; | 215 | return emulated; |
194 | } | 216 | } |
195 | 217 | ||
196 | int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | 218 | int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
197 | { | 219 | { |
198 | int emulated = EMULATE_DONE; | 220 | int emulated = EMULATE_DONE; |
199 | 221 | ||
200 | switch (sprn) { | 222 | switch (sprn) { |
201 | case SPRN_IVPR: | 223 | case SPRN_IVPR: |
202 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break; | 224 | *spr_val = vcpu->arch.ivpr; |
225 | break; | ||
203 | case SPRN_DEAR: | 226 | case SPRN_DEAR: |
204 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break; | 227 | *spr_val = vcpu->arch.shared->dar; |
228 | break; | ||
205 | case SPRN_ESR: | 229 | case SPRN_ESR: |
206 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break; | 230 | *spr_val = vcpu->arch.shared->esr; |
231 | break; | ||
207 | case SPRN_DBCR0: | 232 | case SPRN_DBCR0: |
208 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break; | 233 | *spr_val = vcpu->arch.dbcr0; |
234 | break; | ||
209 | case SPRN_DBCR1: | 235 | case SPRN_DBCR1: |
210 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break; | 236 | *spr_val = vcpu->arch.dbcr1; |
237 | break; | ||
211 | case SPRN_DBSR: | 238 | case SPRN_DBSR: |
212 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break; | 239 | *spr_val = vcpu->arch.dbsr; |
240 | break; | ||
213 | case SPRN_TSR: | 241 | case SPRN_TSR: |
214 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.tsr); break; | 242 | *spr_val = vcpu->arch.tsr; |
243 | break; | ||
215 | case SPRN_TCR: | 244 | case SPRN_TCR: |
216 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.tcr); break; | 245 | *spr_val = vcpu->arch.tcr; |
246 | break; | ||
217 | 247 | ||
218 | case SPRN_IVOR0: | 248 | case SPRN_IVOR0: |
219 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]); | 249 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; |
220 | break; | 250 | break; |
221 | case SPRN_IVOR1: | 251 | case SPRN_IVOR1: |
222 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]); | 252 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; |
223 | break; | 253 | break; |
224 | case SPRN_IVOR2: | 254 | case SPRN_IVOR2: |
225 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); | 255 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; |
226 | break; | 256 | break; |
227 | case SPRN_IVOR3: | 257 | case SPRN_IVOR3: |
228 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]); | 258 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; |
229 | break; | 259 | break; |
230 | case SPRN_IVOR4: | 260 | case SPRN_IVOR4: |
231 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]); | 261 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; |
232 | break; | 262 | break; |
233 | case SPRN_IVOR5: | 263 | case SPRN_IVOR5: |
234 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]); | 264 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; |
235 | break; | 265 | break; |
236 | case SPRN_IVOR6: | 266 | case SPRN_IVOR6: |
237 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]); | 267 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; |
238 | break; | 268 | break; |
239 | case SPRN_IVOR7: | 269 | case SPRN_IVOR7: |
240 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]); | 270 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; |
241 | break; | 271 | break; |
242 | case SPRN_IVOR8: | 272 | case SPRN_IVOR8: |
243 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); | 273 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; |
244 | break; | 274 | break; |
245 | case SPRN_IVOR9: | 275 | case SPRN_IVOR9: |
246 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]); | 276 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; |
247 | break; | 277 | break; |
248 | case SPRN_IVOR10: | 278 | case SPRN_IVOR10: |
249 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]); | 279 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; |
250 | break; | 280 | break; |
251 | case SPRN_IVOR11: | 281 | case SPRN_IVOR11: |
252 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]); | 282 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; |
253 | break; | 283 | break; |
254 | case SPRN_IVOR12: | 284 | case SPRN_IVOR12: |
255 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]); | 285 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; |
256 | break; | 286 | break; |
257 | case SPRN_IVOR13: | 287 | case SPRN_IVOR13: |
258 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]); | 288 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; |
259 | break; | 289 | break; |
260 | case SPRN_IVOR14: | 290 | case SPRN_IVOR14: |
261 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]); | 291 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; |
262 | break; | 292 | break; |
263 | case SPRN_IVOR15: | 293 | case SPRN_IVOR15: |
264 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]); | 294 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; |
265 | break; | 295 | break; |
266 | 296 | ||
267 | default: | 297 | default: |
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index c8c4b878795a..8feec2ff3928 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S | |||
@@ -419,13 +419,13 @@ lightweight_exit: | |||
419 | * written directly to the shared area, so we | 419 | * written directly to the shared area, so we |
420 | * need to reload them here with the guest's values. | 420 | * need to reload them here with the guest's values. |
421 | */ | 421 | */ |
422 | lwz r3, VCPU_SHARED_SPRG4(r5) | 422 | PPC_LD(r3, VCPU_SHARED_SPRG4, r5) |
423 | mtspr SPRN_SPRG4W, r3 | 423 | mtspr SPRN_SPRG4W, r3 |
424 | lwz r3, VCPU_SHARED_SPRG5(r5) | 424 | PPC_LD(r3, VCPU_SHARED_SPRG5, r5) |
425 | mtspr SPRN_SPRG5W, r3 | 425 | mtspr SPRN_SPRG5W, r3 |
426 | lwz r3, VCPU_SHARED_SPRG6(r5) | 426 | PPC_LD(r3, VCPU_SHARED_SPRG6, r5) |
427 | mtspr SPRN_SPRG6W, r3 | 427 | mtspr SPRN_SPRG6W, r3 |
428 | lwz r3, VCPU_SHARED_SPRG7(r5) | 428 | PPC_LD(r3, VCPU_SHARED_SPRG7, r5) |
429 | mtspr SPRN_SPRG7W, r3 | 429 | mtspr SPRN_SPRG7W, r3 |
430 | 430 | ||
431 | #ifdef CONFIG_KVM_EXIT_TIMING | 431 | #ifdef CONFIG_KVM_EXIT_TIMING |
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S new file mode 100644 index 000000000000..6048a00515d7 --- /dev/null +++ b/arch/powerpc/kvm/bookehv_interrupts.S | |||
@@ -0,0 +1,597 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. | ||
16 | * | ||
17 | * Author: Varun Sethi <varun.sethi@freescale.com> | ||
18 | * Author: Scott Wood <scotwood@freescale.com> | ||
19 | * | ||
20 | * This file is derived from arch/powerpc/kvm/booke_interrupts.S | ||
21 | */ | ||
22 | |||
23 | #include <asm/ppc_asm.h> | ||
24 | #include <asm/kvm_asm.h> | ||
25 | #include <asm/reg.h> | ||
26 | #include <asm/mmu-44x.h> | ||
27 | #include <asm/page.h> | ||
28 | #include <asm/asm-compat.h> | ||
29 | #include <asm/asm-offsets.h> | ||
30 | #include <asm/bitsperlong.h> | ||
31 | #include <asm/thread_info.h> | ||
32 | |||
33 | #include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */ | ||
34 | |||
35 | #define GET_VCPU(vcpu, thread) \ | ||
36 | PPC_LL vcpu, THREAD_KVM_VCPU(thread) | ||
37 | |||
38 | #define LONGBYTES (BITS_PER_LONG / 8) | ||
39 | |||
40 | #define VCPU_GPR(n) (VCPU_GPRS + (n * LONGBYTES)) | ||
41 | #define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES)) | ||
42 | |||
43 | /* The host stack layout: */ | ||
44 | #define HOST_R1 (0 * LONGBYTES) /* Implied by stwu. */ | ||
45 | #define HOST_CALLEE_LR (1 * LONGBYTES) | ||
46 | #define HOST_RUN (2 * LONGBYTES) /* struct kvm_run */ | ||
47 | /* | ||
48 | * r2 is special: it holds 'current', and it made nonvolatile in the | ||
49 | * kernel with the -ffixed-r2 gcc option. | ||
50 | */ | ||
51 | #define HOST_R2 (3 * LONGBYTES) | ||
52 | #define HOST_CR (4 * LONGBYTES) | ||
53 | #define HOST_NV_GPRS (5 * LONGBYTES) | ||
54 | #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES)) | ||
55 | #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + LONGBYTES) | ||
56 | #define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */ | ||
57 | #define HOST_STACK_LR (HOST_STACK_SIZE + LONGBYTES) /* In caller stack frame. */ | ||
58 | |||
59 | #define NEED_EMU 0x00000001 /* emulation -- save nv regs */ | ||
60 | #define NEED_DEAR 0x00000002 /* save faulting DEAR */ | ||
61 | #define NEED_ESR 0x00000004 /* save faulting ESR */ | ||
62 | |||
63 | /* | ||
64 | * On entry: | ||
65 | * r4 = vcpu, r5 = srr0, r6 = srr1 | ||
66 | * saved in vcpu: cr, ctr, r3-r13 | ||
67 | */ | ||
68 | .macro kvm_handler_common intno, srr0, flags | ||
69 | /* Restore host stack pointer */ | ||
70 | PPC_STL r1, VCPU_GPR(r1)(r4) | ||
71 | PPC_STL r2, VCPU_GPR(r2)(r4) | ||
72 | PPC_LL r1, VCPU_HOST_STACK(r4) | ||
73 | PPC_LL r2, HOST_R2(r1) | ||
74 | |||
75 | mfspr r10, SPRN_PID | ||
76 | lwz r8, VCPU_HOST_PID(r4) | ||
77 | PPC_LL r11, VCPU_SHARED(r4) | ||
78 | PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */ | ||
79 | li r14, \intno | ||
80 | |||
81 | stw r10, VCPU_GUEST_PID(r4) | ||
82 | mtspr SPRN_PID, r8 | ||
83 | |||
84 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
85 | /* save exit time */ | ||
86 | 1: mfspr r7, SPRN_TBRU | ||
87 | mfspr r8, SPRN_TBRL | ||
88 | mfspr r9, SPRN_TBRU | ||
89 | cmpw r9, r7 | ||
90 | stw r8, VCPU_TIMING_EXIT_TBL(r4) | ||
91 | bne- 1b | ||
92 | stw r9, VCPU_TIMING_EXIT_TBU(r4) | ||
93 | #endif | ||
94 | |||
95 | oris r8, r6, MSR_CE@h | ||
96 | PPC_STD(r6, VCPU_SHARED_MSR, r11) | ||
97 | ori r8, r8, MSR_ME | MSR_RI | ||
98 | PPC_STL r5, VCPU_PC(r4) | ||
99 | |||
100 | /* | ||
101 | * Make sure CE/ME/RI are set (if appropriate for exception type) | ||
102 | * whether or not the guest had it set. Since mfmsr/mtmsr are | ||
103 | * somewhat expensive, skip in the common case where the guest | ||
104 | * had all these bits set (and thus they're still set if | ||
105 | * appropriate for the exception type). | ||
106 | */ | ||
107 | cmpw r6, r8 | ||
108 | beq 1f | ||
109 | mfmsr r7 | ||
110 | .if \srr0 != SPRN_MCSRR0 && \srr0 != SPRN_CSRR0 | ||
111 | oris r7, r7, MSR_CE@h | ||
112 | .endif | ||
113 | .if \srr0 != SPRN_MCSRR0 | ||
114 | ori r7, r7, MSR_ME | MSR_RI | ||
115 | .endif | ||
116 | mtmsr r7 | ||
117 | 1: | ||
118 | |||
119 | .if \flags & NEED_EMU | ||
120 | /* | ||
121 | * This assumes you have external PID support. | ||
122 | * To support a bookehv CPU without external PID, you'll | ||
123 | * need to look up the TLB entry and create a temporary mapping. | ||
124 | * | ||
125 | * FIXME: we don't currently handle if the lwepx faults. PR-mode | ||
126 | * booke doesn't handle it either. Since Linux doesn't use | ||
127 | * broadcast tlbivax anymore, the only way this should happen is | ||
128 | * if the guest maps its memory execute-but-not-read, or if we | ||
129 | * somehow take a TLB miss in the middle of this entry code and | ||
130 | * evict the relevant entry. On e500mc, all kernel lowmem is | ||
131 | * bolted into TLB1 large page mappings, and we don't use | ||
132 | * broadcast invalidates, so we should not take a TLB miss here. | ||
133 | * | ||
134 | * Later we'll need to deal with faults here. Disallowing guest | ||
135 | * mappings that are execute-but-not-read could be an option on | ||
136 | * e500mc, but not on chips with an LRAT if it is used. | ||
137 | */ | ||
138 | |||
139 | mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ | ||
140 | PPC_STL r15, VCPU_GPR(r15)(r4) | ||
141 | PPC_STL r16, VCPU_GPR(r16)(r4) | ||
142 | PPC_STL r17, VCPU_GPR(r17)(r4) | ||
143 | PPC_STL r18, VCPU_GPR(r18)(r4) | ||
144 | PPC_STL r19, VCPU_GPR(r19)(r4) | ||
145 | mr r8, r3 | ||
146 | PPC_STL r20, VCPU_GPR(r20)(r4) | ||
147 | rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS | ||
148 | PPC_STL r21, VCPU_GPR(r21)(r4) | ||
149 | rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR | ||
150 | PPC_STL r22, VCPU_GPR(r22)(r4) | ||
151 | rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID | ||
152 | PPC_STL r23, VCPU_GPR(r23)(r4) | ||
153 | PPC_STL r24, VCPU_GPR(r24)(r4) | ||
154 | PPC_STL r25, VCPU_GPR(r25)(r4) | ||
155 | PPC_STL r26, VCPU_GPR(r26)(r4) | ||
156 | PPC_STL r27, VCPU_GPR(r27)(r4) | ||
157 | PPC_STL r28, VCPU_GPR(r28)(r4) | ||
158 | PPC_STL r29, VCPU_GPR(r29)(r4) | ||
159 | PPC_STL r30, VCPU_GPR(r30)(r4) | ||
160 | PPC_STL r31, VCPU_GPR(r31)(r4) | ||
161 | mtspr SPRN_EPLC, r8 | ||
162 | |||
163 | /* disable preemption, so we are sure we hit the fixup handler */ | ||
164 | #ifdef CONFIG_PPC64 | ||
165 | clrrdi r8,r1,THREAD_SHIFT | ||
166 | #else | ||
167 | rlwinm r8,r1,0,0,31-THREAD_SHIFT /* current thread_info */ | ||
168 | #endif | ||
169 | li r7, 1 | ||
170 | stw r7, TI_PREEMPT(r8) | ||
171 | |||
172 | isync | ||
173 | |||
174 | /* | ||
175 | * In case the read goes wrong, we catch it and write an invalid value | ||
176 | * in LAST_INST instead. | ||
177 | */ | ||
178 | 1: lwepx r9, 0, r5 | ||
179 | 2: | ||
180 | .section .fixup, "ax" | ||
181 | 3: li r9, KVM_INST_FETCH_FAILED | ||
182 | b 2b | ||
183 | .previous | ||
184 | .section __ex_table,"a" | ||
185 | PPC_LONG_ALIGN | ||
186 | PPC_LONG 1b,3b | ||
187 | .previous | ||
188 | |||
189 | mtspr SPRN_EPLC, r3 | ||
190 | li r7, 0 | ||
191 | stw r7, TI_PREEMPT(r8) | ||
192 | stw r9, VCPU_LAST_INST(r4) | ||
193 | .endif | ||
194 | |||
195 | .if \flags & NEED_ESR | ||
196 | mfspr r8, SPRN_ESR | ||
197 | PPC_STL r8, VCPU_FAULT_ESR(r4) | ||
198 | .endif | ||
199 | |||
200 | .if \flags & NEED_DEAR | ||
201 | mfspr r9, SPRN_DEAR | ||
202 | PPC_STL r9, VCPU_FAULT_DEAR(r4) | ||
203 | .endif | ||
204 | |||
205 | b kvmppc_resume_host | ||
206 | .endm | ||
207 | |||
208 | /* | ||
209 | * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h | ||
210 | */ | ||
211 | .macro kvm_handler intno srr0, srr1, flags | ||
212 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) | ||
213 | GET_VCPU(r11, r10) | ||
214 | PPC_STL r3, VCPU_GPR(r3)(r11) | ||
215 | mfspr r3, SPRN_SPRG_RSCRATCH0 | ||
216 | PPC_STL r4, VCPU_GPR(r4)(r11) | ||
217 | PPC_LL r4, THREAD_NORMSAVE(0)(r10) | ||
218 | PPC_STL r5, VCPU_GPR(r5)(r11) | ||
219 | stw r13, VCPU_CR(r11) | ||
220 | mfspr r5, \srr0 | ||
221 | PPC_STL r3, VCPU_GPR(r10)(r11) | ||
222 | PPC_LL r3, THREAD_NORMSAVE(2)(r10) | ||
223 | PPC_STL r6, VCPU_GPR(r6)(r11) | ||
224 | PPC_STL r4, VCPU_GPR(r11)(r11) | ||
225 | mfspr r6, \srr1 | ||
226 | PPC_STL r7, VCPU_GPR(r7)(r11) | ||
227 | PPC_STL r8, VCPU_GPR(r8)(r11) | ||
228 | PPC_STL r9, VCPU_GPR(r9)(r11) | ||
229 | PPC_STL r3, VCPU_GPR(r13)(r11) | ||
230 | mfctr r7 | ||
231 | PPC_STL r12, VCPU_GPR(r12)(r11) | ||
232 | PPC_STL r7, VCPU_CTR(r11) | ||
233 | mr r4, r11 | ||
234 | kvm_handler_common \intno, \srr0, \flags | ||
235 | .endm | ||
236 | |||
237 | .macro kvm_lvl_handler intno scratch srr0, srr1, flags | ||
238 | _GLOBAL(kvmppc_handler_\intno\()_\srr1) | ||
239 | mfspr r10, SPRN_SPRG_THREAD | ||
240 | GET_VCPU(r11, r10) | ||
241 | PPC_STL r3, VCPU_GPR(r3)(r11) | ||
242 | mfspr r3, \scratch | ||
243 | PPC_STL r4, VCPU_GPR(r4)(r11) | ||
244 | PPC_LL r4, GPR9(r8) | ||
245 | PPC_STL r5, VCPU_GPR(r5)(r11) | ||
246 | stw r9, VCPU_CR(r11) | ||
247 | mfspr r5, \srr0 | ||
248 | PPC_STL r3, VCPU_GPR(r8)(r11) | ||
249 | PPC_LL r3, GPR10(r8) | ||
250 | PPC_STL r6, VCPU_GPR(r6)(r11) | ||
251 | PPC_STL r4, VCPU_GPR(r9)(r11) | ||
252 | mfspr r6, \srr1 | ||
253 | PPC_LL r4, GPR11(r8) | ||
254 | PPC_STL r7, VCPU_GPR(r7)(r11) | ||
255 | PPC_STL r3, VCPU_GPR(r10)(r11) | ||
256 | mfctr r7 | ||
257 | PPC_STL r12, VCPU_GPR(r12)(r11) | ||
258 | PPC_STL r13, VCPU_GPR(r13)(r11) | ||
259 | PPC_STL r4, VCPU_GPR(r11)(r11) | ||
260 | PPC_STL r7, VCPU_CTR(r11) | ||
261 | mr r4, r11 | ||
262 | kvm_handler_common \intno, \srr0, \flags | ||
263 | .endm | ||
264 | |||
265 | kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \ | ||
266 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | ||
267 | kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \ | ||
268 | SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0 | ||
269 | kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \ | ||
270 | SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR) | ||
271 | kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR | ||
272 | kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0 | ||
273 | kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \ | ||
274 | SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR) | ||
275 | kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, NEED_ESR | ||
276 | kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 | ||
277 | kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0 | ||
278 | kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 | ||
279 | kvm_handler BOOKE_INTERRUPT_DECREMENTER, SPRN_SRR0, SPRN_SRR1, 0 | ||
280 | kvm_handler BOOKE_INTERRUPT_FIT, SPRN_SRR0, SPRN_SRR1, 0 | ||
281 | kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \ | ||
282 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | ||
283 | kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \ | ||
284 | SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR) | ||
285 | kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0 | ||
286 | kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0 | ||
287 | kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, SPRN_SRR0, SPRN_SRR1, 0 | ||
288 | kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, SPRN_SRR0, SPRN_SRR1, 0 | ||
289 | kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0 | ||
290 | kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0 | ||
291 | kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \ | ||
292 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | ||
293 | kvm_handler BOOKE_INTERRUPT_HV_PRIV, SPRN_SRR0, SPRN_SRR1, NEED_EMU | ||
294 | kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0 | ||
295 | kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, SPRN_GSRR0, SPRN_GSRR1, 0 | ||
296 | kvm_lvl_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, \ | ||
297 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | ||
298 | kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ | ||
299 | SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0 | ||
300 | kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ | ||
301 | SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0 | ||
302 | |||
303 | |||
304 | /* Registers: | ||
305 | * SPRG_SCRATCH0: guest r10 | ||
306 | * r4: vcpu pointer | ||
307 | * r11: vcpu->arch.shared | ||
308 | * r14: KVM exit number | ||
309 | */ | ||
310 | _GLOBAL(kvmppc_resume_host) | ||
311 | /* Save remaining volatile guest register state to vcpu. */ | ||
312 | mfspr r3, SPRN_VRSAVE | ||
313 | PPC_STL r0, VCPU_GPR(r0)(r4) | ||
314 | mflr r5 | ||
315 | mfspr r6, SPRN_SPRG4 | ||
316 | PPC_STL r5, VCPU_LR(r4) | ||
317 | mfspr r7, SPRN_SPRG5 | ||
318 | stw r3, VCPU_VRSAVE(r4) | ||
319 | PPC_STD(r6, VCPU_SHARED_SPRG4, r11) | ||
320 | mfspr r8, SPRN_SPRG6 | ||
321 | PPC_STD(r7, VCPU_SHARED_SPRG5, r11) | ||
322 | mfspr r9, SPRN_SPRG7 | ||
323 | PPC_STD(r8, VCPU_SHARED_SPRG6, r11) | ||
324 | mfxer r3 | ||
325 | PPC_STD(r9, VCPU_SHARED_SPRG7, r11) | ||
326 | |||
327 | /* save guest MAS registers and restore host mas4 & mas6 */ | ||
328 | mfspr r5, SPRN_MAS0 | ||
329 | PPC_STL r3, VCPU_XER(r4) | ||
330 | mfspr r6, SPRN_MAS1 | ||
331 | stw r5, VCPU_SHARED_MAS0(r11) | ||
332 | mfspr r7, SPRN_MAS2 | ||
333 | stw r6, VCPU_SHARED_MAS1(r11) | ||
334 | PPC_STD(r7, VCPU_SHARED_MAS2, r11) | ||
335 | mfspr r5, SPRN_MAS3 | ||
336 | mfspr r6, SPRN_MAS4 | ||
337 | stw r5, VCPU_SHARED_MAS7_3+4(r11) | ||
338 | mfspr r7, SPRN_MAS6 | ||
339 | stw r6, VCPU_SHARED_MAS4(r11) | ||
340 | mfspr r5, SPRN_MAS7 | ||
341 | lwz r6, VCPU_HOST_MAS4(r4) | ||
342 | stw r7, VCPU_SHARED_MAS6(r11) | ||
343 | lwz r8, VCPU_HOST_MAS6(r4) | ||
344 | mtspr SPRN_MAS4, r6 | ||
345 | stw r5, VCPU_SHARED_MAS7_3+0(r11) | ||
346 | mtspr SPRN_MAS6, r8 | ||
347 | /* Enable MAS register updates via exception */ | ||
348 | mfspr r3, SPRN_EPCR | ||
349 | rlwinm r3, r3, 0, ~SPRN_EPCR_DMIUH | ||
350 | mtspr SPRN_EPCR, r3 | ||
351 | isync | ||
352 | |||
353 | /* Switch to kernel stack and jump to handler. */ | ||
354 | PPC_LL r3, HOST_RUN(r1) | ||
355 | mr r5, r14 /* intno */ | ||
356 | mr r14, r4 /* Save vcpu pointer. */ | ||
357 | bl kvmppc_handle_exit | ||
358 | |||
359 | /* Restore vcpu pointer and the nonvolatiles we used. */ | ||
360 | mr r4, r14 | ||
361 | PPC_LL r14, VCPU_GPR(r14)(r4) | ||
362 | |||
363 | andi. r5, r3, RESUME_FLAG_NV | ||
364 | beq skip_nv_load | ||
365 | PPC_LL r15, VCPU_GPR(r15)(r4) | ||
366 | PPC_LL r16, VCPU_GPR(r16)(r4) | ||
367 | PPC_LL r17, VCPU_GPR(r17)(r4) | ||
368 | PPC_LL r18, VCPU_GPR(r18)(r4) | ||
369 | PPC_LL r19, VCPU_GPR(r19)(r4) | ||
370 | PPC_LL r20, VCPU_GPR(r20)(r4) | ||
371 | PPC_LL r21, VCPU_GPR(r21)(r4) | ||
372 | PPC_LL r22, VCPU_GPR(r22)(r4) | ||
373 | PPC_LL r23, VCPU_GPR(r23)(r4) | ||
374 | PPC_LL r24, VCPU_GPR(r24)(r4) | ||
375 | PPC_LL r25, VCPU_GPR(r25)(r4) | ||
376 | PPC_LL r26, VCPU_GPR(r26)(r4) | ||
377 | PPC_LL r27, VCPU_GPR(r27)(r4) | ||
378 | PPC_LL r28, VCPU_GPR(r28)(r4) | ||
379 | PPC_LL r29, VCPU_GPR(r29)(r4) | ||
380 | PPC_LL r30, VCPU_GPR(r30)(r4) | ||
381 | PPC_LL r31, VCPU_GPR(r31)(r4) | ||
382 | skip_nv_load: | ||
383 | /* Should we return to the guest? */ | ||
384 | andi. r5, r3, RESUME_FLAG_HOST | ||
385 | beq lightweight_exit | ||
386 | |||
387 | srawi r3, r3, 2 /* Shift -ERR back down. */ | ||
388 | |||
389 | heavyweight_exit: | ||
390 | /* Not returning to guest. */ | ||
391 | PPC_LL r5, HOST_STACK_LR(r1) | ||
392 | lwz r6, HOST_CR(r1) | ||
393 | |||
394 | /* | ||
395 | * We already saved guest volatile register state; now save the | ||
396 | * non-volatiles. | ||
397 | */ | ||
398 | |||
399 | PPC_STL r15, VCPU_GPR(r15)(r4) | ||
400 | PPC_STL r16, VCPU_GPR(r16)(r4) | ||
401 | PPC_STL r17, VCPU_GPR(r17)(r4) | ||
402 | PPC_STL r18, VCPU_GPR(r18)(r4) | ||
403 | PPC_STL r19, VCPU_GPR(r19)(r4) | ||
404 | PPC_STL r20, VCPU_GPR(r20)(r4) | ||
405 | PPC_STL r21, VCPU_GPR(r21)(r4) | ||
406 | PPC_STL r22, VCPU_GPR(r22)(r4) | ||
407 | PPC_STL r23, VCPU_GPR(r23)(r4) | ||
408 | PPC_STL r24, VCPU_GPR(r24)(r4) | ||
409 | PPC_STL r25, VCPU_GPR(r25)(r4) | ||
410 | PPC_STL r26, VCPU_GPR(r26)(r4) | ||
411 | PPC_STL r27, VCPU_GPR(r27)(r4) | ||
412 | PPC_STL r28, VCPU_GPR(r28)(r4) | ||
413 | PPC_STL r29, VCPU_GPR(r29)(r4) | ||
414 | PPC_STL r30, VCPU_GPR(r30)(r4) | ||
415 | PPC_STL r31, VCPU_GPR(r31)(r4) | ||
416 | |||
417 | /* Load host non-volatile register state from host stack. */ | ||
418 | PPC_LL r14, HOST_NV_GPR(r14)(r1) | ||
419 | PPC_LL r15, HOST_NV_GPR(r15)(r1) | ||
420 | PPC_LL r16, HOST_NV_GPR(r16)(r1) | ||
421 | PPC_LL r17, HOST_NV_GPR(r17)(r1) | ||
422 | PPC_LL r18, HOST_NV_GPR(r18)(r1) | ||
423 | PPC_LL r19, HOST_NV_GPR(r19)(r1) | ||
424 | PPC_LL r20, HOST_NV_GPR(r20)(r1) | ||
425 | PPC_LL r21, HOST_NV_GPR(r21)(r1) | ||
426 | PPC_LL r22, HOST_NV_GPR(r22)(r1) | ||
427 | PPC_LL r23, HOST_NV_GPR(r23)(r1) | ||
428 | PPC_LL r24, HOST_NV_GPR(r24)(r1) | ||
429 | PPC_LL r25, HOST_NV_GPR(r25)(r1) | ||
430 | PPC_LL r26, HOST_NV_GPR(r26)(r1) | ||
431 | PPC_LL r27, HOST_NV_GPR(r27)(r1) | ||
432 | PPC_LL r28, HOST_NV_GPR(r28)(r1) | ||
433 | PPC_LL r29, HOST_NV_GPR(r29)(r1) | ||
434 | PPC_LL r30, HOST_NV_GPR(r30)(r1) | ||
435 | PPC_LL r31, HOST_NV_GPR(r31)(r1) | ||
436 | |||
437 | /* Return to kvm_vcpu_run(). */ | ||
438 | mtlr r5 | ||
439 | mtcr r6 | ||
440 | addi r1, r1, HOST_STACK_SIZE | ||
441 | /* r3 still contains the return code from kvmppc_handle_exit(). */ | ||
442 | blr | ||
443 | |||
444 | /* Registers: | ||
445 | * r3: kvm_run pointer | ||
446 | * r4: vcpu pointer | ||
447 | */ | ||
448 | _GLOBAL(__kvmppc_vcpu_run) | ||
449 | stwu r1, -HOST_STACK_SIZE(r1) | ||
450 | PPC_STL r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */ | ||
451 | |||
452 | /* Save host state to stack. */ | ||
453 | PPC_STL r3, HOST_RUN(r1) | ||
454 | mflr r3 | ||
455 | mfcr r5 | ||
456 | PPC_STL r3, HOST_STACK_LR(r1) | ||
457 | |||
458 | stw r5, HOST_CR(r1) | ||
459 | |||
460 | /* Save host non-volatile register state to stack. */ | ||
461 | PPC_STL r14, HOST_NV_GPR(r14)(r1) | ||
462 | PPC_STL r15, HOST_NV_GPR(r15)(r1) | ||
463 | PPC_STL r16, HOST_NV_GPR(r16)(r1) | ||
464 | PPC_STL r17, HOST_NV_GPR(r17)(r1) | ||
465 | PPC_STL r18, HOST_NV_GPR(r18)(r1) | ||
466 | PPC_STL r19, HOST_NV_GPR(r19)(r1) | ||
467 | PPC_STL r20, HOST_NV_GPR(r20)(r1) | ||
468 | PPC_STL r21, HOST_NV_GPR(r21)(r1) | ||
469 | PPC_STL r22, HOST_NV_GPR(r22)(r1) | ||
470 | PPC_STL r23, HOST_NV_GPR(r23)(r1) | ||
471 | PPC_STL r24, HOST_NV_GPR(r24)(r1) | ||
472 | PPC_STL r25, HOST_NV_GPR(r25)(r1) | ||
473 | PPC_STL r26, HOST_NV_GPR(r26)(r1) | ||
474 | PPC_STL r27, HOST_NV_GPR(r27)(r1) | ||
475 | PPC_STL r28, HOST_NV_GPR(r28)(r1) | ||
476 | PPC_STL r29, HOST_NV_GPR(r29)(r1) | ||
477 | PPC_STL r30, HOST_NV_GPR(r30)(r1) | ||
478 | PPC_STL r31, HOST_NV_GPR(r31)(r1) | ||
479 | |||
480 | /* Load guest non-volatiles. */ | ||
481 | PPC_LL r14, VCPU_GPR(r14)(r4) | ||
482 | PPC_LL r15, VCPU_GPR(r15)(r4) | ||
483 | PPC_LL r16, VCPU_GPR(r16)(r4) | ||
484 | PPC_LL r17, VCPU_GPR(r17)(r4) | ||
485 | PPC_LL r18, VCPU_GPR(r18)(r4) | ||
486 | PPC_LL r19, VCPU_GPR(r19)(r4) | ||
487 | PPC_LL r20, VCPU_GPR(r20)(r4) | ||
488 | PPC_LL r21, VCPU_GPR(r21)(r4) | ||
489 | PPC_LL r22, VCPU_GPR(r22)(r4) | ||
490 | PPC_LL r23, VCPU_GPR(r23)(r4) | ||
491 | PPC_LL r24, VCPU_GPR(r24)(r4) | ||
492 | PPC_LL r25, VCPU_GPR(r25)(r4) | ||
493 | PPC_LL r26, VCPU_GPR(r26)(r4) | ||
494 | PPC_LL r27, VCPU_GPR(r27)(r4) | ||
495 | PPC_LL r28, VCPU_GPR(r28)(r4) | ||
496 | PPC_LL r29, VCPU_GPR(r29)(r4) | ||
497 | PPC_LL r30, VCPU_GPR(r30)(r4) | ||
498 | PPC_LL r31, VCPU_GPR(r31)(r4) | ||
499 | |||
500 | |||
501 | lightweight_exit: | ||
502 | PPC_STL r2, HOST_R2(r1) | ||
503 | |||
504 | mfspr r3, SPRN_PID | ||
505 | stw r3, VCPU_HOST_PID(r4) | ||
506 | lwz r3, VCPU_GUEST_PID(r4) | ||
507 | mtspr SPRN_PID, r3 | ||
508 | |||
509 | PPC_LL r11, VCPU_SHARED(r4) | ||
510 | /* Disable MAS register updates via exception */ | ||
511 | mfspr r3, SPRN_EPCR | ||
512 | oris r3, r3, SPRN_EPCR_DMIUH@h | ||
513 | mtspr SPRN_EPCR, r3 | ||
514 | isync | ||
515 | /* Save host mas4 and mas6 and load guest MAS registers */ | ||
516 | mfspr r3, SPRN_MAS4 | ||
517 | stw r3, VCPU_HOST_MAS4(r4) | ||
518 | mfspr r3, SPRN_MAS6 | ||
519 | stw r3, VCPU_HOST_MAS6(r4) | ||
520 | lwz r3, VCPU_SHARED_MAS0(r11) | ||
521 | lwz r5, VCPU_SHARED_MAS1(r11) | ||
522 | PPC_LD(r6, VCPU_SHARED_MAS2, r11) | ||
523 | lwz r7, VCPU_SHARED_MAS7_3+4(r11) | ||
524 | lwz r8, VCPU_SHARED_MAS4(r11) | ||
525 | mtspr SPRN_MAS0, r3 | ||
526 | mtspr SPRN_MAS1, r5 | ||
527 | mtspr SPRN_MAS2, r6 | ||
528 | mtspr SPRN_MAS3, r7 | ||
529 | mtspr SPRN_MAS4, r8 | ||
530 | lwz r3, VCPU_SHARED_MAS6(r11) | ||
531 | lwz r5, VCPU_SHARED_MAS7_3+0(r11) | ||
532 | mtspr SPRN_MAS6, r3 | ||
533 | mtspr SPRN_MAS7, r5 | ||
534 | |||
535 | /* | ||
536 | * Host interrupt handlers may have clobbered these guest-readable | ||
537 | * SPRGs, so we need to reload them here with the guest's values. | ||
538 | */ | ||
539 | lwz r3, VCPU_VRSAVE(r4) | ||
540 | PPC_LD(r5, VCPU_SHARED_SPRG4, r11) | ||
541 | mtspr SPRN_VRSAVE, r3 | ||
542 | PPC_LD(r6, VCPU_SHARED_SPRG5, r11) | ||
543 | mtspr SPRN_SPRG4W, r5 | ||
544 | PPC_LD(r7, VCPU_SHARED_SPRG6, r11) | ||
545 | mtspr SPRN_SPRG5W, r6 | ||
546 | PPC_LD(r8, VCPU_SHARED_SPRG7, r11) | ||
547 | mtspr SPRN_SPRG6W, r7 | ||
548 | mtspr SPRN_SPRG7W, r8 | ||
549 | |||
550 | /* Load some guest volatiles. */ | ||
551 | PPC_LL r3, VCPU_LR(r4) | ||
552 | PPC_LL r5, VCPU_XER(r4) | ||
553 | PPC_LL r6, VCPU_CTR(r4) | ||
554 | lwz r7, VCPU_CR(r4) | ||
555 | PPC_LL r8, VCPU_PC(r4) | ||
556 | PPC_LD(r9, VCPU_SHARED_MSR, r11) | ||
557 | PPC_LL r0, VCPU_GPR(r0)(r4) | ||
558 | PPC_LL r1, VCPU_GPR(r1)(r4) | ||
559 | PPC_LL r2, VCPU_GPR(r2)(r4) | ||
560 | PPC_LL r10, VCPU_GPR(r10)(r4) | ||
561 | PPC_LL r11, VCPU_GPR(r11)(r4) | ||
562 | PPC_LL r12, VCPU_GPR(r12)(r4) | ||
563 | PPC_LL r13, VCPU_GPR(r13)(r4) | ||
564 | mtlr r3 | ||
565 | mtxer r5 | ||
566 | mtctr r6 | ||
567 | mtsrr0 r8 | ||
568 | mtsrr1 r9 | ||
569 | |||
570 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
571 | /* save enter time */ | ||
572 | 1: | ||
573 | mfspr r6, SPRN_TBRU | ||
574 | mfspr r9, SPRN_TBRL | ||
575 | mfspr r8, SPRN_TBRU | ||
576 | cmpw r8, r6 | ||
577 | stw r9, VCPU_TIMING_LAST_ENTER_TBL(r4) | ||
578 | bne 1b | ||
579 | stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4) | ||
580 | #endif | ||
581 | |||
582 | /* | ||
583 | * Don't execute any instruction which can change CR after | ||
584 | * below instruction. | ||
585 | */ | ||
586 | mtcr r7 | ||
587 | |||
588 | /* Finish loading guest volatiles and jump to guest. */ | ||
589 | PPC_LL r5, VCPU_GPR(r5)(r4) | ||
590 | PPC_LL r6, VCPU_GPR(r6)(r4) | ||
591 | PPC_LL r7, VCPU_GPR(r7)(r4) | ||
592 | PPC_LL r8, VCPU_GPR(r8)(r4) | ||
593 | PPC_LL r9, VCPU_GPR(r9)(r4) | ||
594 | |||
595 | PPC_LL r3, VCPU_GPR(r3)(r4) | ||
596 | PPC_LL r4, VCPU_GPR(r4)(r4) | ||
597 | rfi | ||
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index ddcd896fa2ff..b479ed77c515 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c | |||
@@ -20,11 +20,282 @@ | |||
20 | #include <asm/reg.h> | 20 | #include <asm/reg.h> |
21 | #include <asm/cputable.h> | 21 | #include <asm/cputable.h> |
22 | #include <asm/tlbflush.h> | 22 | #include <asm/tlbflush.h> |
23 | #include <asm/kvm_e500.h> | ||
24 | #include <asm/kvm_ppc.h> | 23 | #include <asm/kvm_ppc.h> |
25 | 24 | ||
25 | #include "../mm/mmu_decl.h" | ||
26 | #include "booke.h" | 26 | #include "booke.h" |
27 | #include "e500_tlb.h" | 27 | #include "e500.h" |
28 | |||
29 | struct id { | ||
30 | unsigned long val; | ||
31 | struct id **pentry; | ||
32 | }; | ||
33 | |||
34 | #define NUM_TIDS 256 | ||
35 | |||
36 | /* | ||
37 | * This table provide mappings from: | ||
38 | * (guestAS,guestTID,guestPR) --> ID of physical cpu | ||
39 | * guestAS [0..1] | ||
40 | * guestTID [0..255] | ||
41 | * guestPR [0..1] | ||
42 | * ID [1..255] | ||
43 | * Each vcpu keeps one vcpu_id_table. | ||
44 | */ | ||
45 | struct vcpu_id_table { | ||
46 | struct id id[2][NUM_TIDS][2]; | ||
47 | }; | ||
48 | |||
49 | /* | ||
50 | * This table provide reversed mappings of vcpu_id_table: | ||
51 | * ID --> address of vcpu_id_table item. | ||
52 | * Each physical core has one pcpu_id_table. | ||
53 | */ | ||
54 | struct pcpu_id_table { | ||
55 | struct id *entry[NUM_TIDS]; | ||
56 | }; | ||
57 | |||
58 | static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids); | ||
59 | |||
60 | /* This variable keeps last used shadow ID on local core. | ||
61 | * The valid range of shadow ID is [1..255] */ | ||
62 | static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid); | ||
63 | |||
64 | /* | ||
65 | * Allocate a free shadow id and setup a valid sid mapping in given entry. | ||
66 | * A mapping is only valid when vcpu_id_table and pcpu_id_table are match. | ||
67 | * | ||
68 | * The caller must have preemption disabled, and keep it that way until | ||
69 | * it has finished with the returned shadow id (either written into the | ||
70 | * TLB or arch.shadow_pid, or discarded). | ||
71 | */ | ||
72 | static inline int local_sid_setup_one(struct id *entry) | ||
73 | { | ||
74 | unsigned long sid; | ||
75 | int ret = -1; | ||
76 | |||
77 | sid = ++(__get_cpu_var(pcpu_last_used_sid)); | ||
78 | if (sid < NUM_TIDS) { | ||
79 | __get_cpu_var(pcpu_sids).entry[sid] = entry; | ||
80 | entry->val = sid; | ||
81 | entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid]; | ||
82 | ret = sid; | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * If sid == NUM_TIDS, we've run out of sids. We return -1, and | ||
87 | * the caller will invalidate everything and start over. | ||
88 | * | ||
89 | * sid > NUM_TIDS indicates a race, which we disable preemption to | ||
90 | * avoid. | ||
91 | */ | ||
92 | WARN_ON(sid > NUM_TIDS); | ||
93 | |||
94 | return ret; | ||
95 | } | ||
96 | |||
97 | /* | ||
98 | * Check if given entry contain a valid shadow id mapping. | ||
99 | * An ID mapping is considered valid only if | ||
100 | * both vcpu and pcpu know this mapping. | ||
101 | * | ||
102 | * The caller must have preemption disabled, and keep it that way until | ||
103 | * it has finished with the returned shadow id (either written into the | ||
104 | * TLB or arch.shadow_pid, or discarded). | ||
105 | */ | ||
106 | static inline int local_sid_lookup(struct id *entry) | ||
107 | { | ||
108 | if (entry && entry->val != 0 && | ||
109 | __get_cpu_var(pcpu_sids).entry[entry->val] == entry && | ||
110 | entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val]) | ||
111 | return entry->val; | ||
112 | return -1; | ||
113 | } | ||
114 | |||
115 | /* Invalidate all id mappings on local core -- call with preempt disabled */ | ||
116 | static inline void local_sid_destroy_all(void) | ||
117 | { | ||
118 | __get_cpu_var(pcpu_last_used_sid) = 0; | ||
119 | memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids))); | ||
120 | } | ||
121 | |||
122 | static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
123 | { | ||
124 | vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL); | ||
125 | return vcpu_e500->idt; | ||
126 | } | ||
127 | |||
128 | static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
129 | { | ||
130 | kfree(vcpu_e500->idt); | ||
131 | vcpu_e500->idt = NULL; | ||
132 | } | ||
133 | |||
134 | /* Map guest pid to shadow. | ||
135 | * We use PID to keep shadow of current guest non-zero PID, | ||
136 | * and use PID1 to keep shadow of guest zero PID. | ||
137 | * So that guest tlbe with TID=0 can be accessed at any time */ | ||
138 | static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
139 | { | ||
140 | preempt_disable(); | ||
141 | vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500, | ||
142 | get_cur_as(&vcpu_e500->vcpu), | ||
143 | get_cur_pid(&vcpu_e500->vcpu), | ||
144 | get_cur_pr(&vcpu_e500->vcpu), 1); | ||
145 | vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500, | ||
146 | get_cur_as(&vcpu_e500->vcpu), 0, | ||
147 | get_cur_pr(&vcpu_e500->vcpu), 1); | ||
148 | preempt_enable(); | ||
149 | } | ||
150 | |||
151 | /* Invalidate all mappings on vcpu */ | ||
152 | static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
153 | { | ||
154 | memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table)); | ||
155 | |||
156 | /* Update shadow pid when mappings are changed */ | ||
157 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
158 | } | ||
159 | |||
160 | /* Invalidate one ID mapping on vcpu */ | ||
161 | static inline void kvmppc_e500_id_table_reset_one( | ||
162 | struct kvmppc_vcpu_e500 *vcpu_e500, | ||
163 | int as, int pid, int pr) | ||
164 | { | ||
165 | struct vcpu_id_table *idt = vcpu_e500->idt; | ||
166 | |||
167 | BUG_ON(as >= 2); | ||
168 | BUG_ON(pid >= NUM_TIDS); | ||
169 | BUG_ON(pr >= 2); | ||
170 | |||
171 | idt->id[as][pid][pr].val = 0; | ||
172 | idt->id[as][pid][pr].pentry = NULL; | ||
173 | |||
174 | /* Update shadow pid when mappings are changed */ | ||
175 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * Map guest (vcpu,AS,ID,PR) to physical core shadow id. | ||
180 | * This function first lookup if a valid mapping exists, | ||
181 | * if not, then creates a new one. | ||
182 | * | ||
183 | * The caller must have preemption disabled, and keep it that way until | ||
184 | * it has finished with the returned shadow id (either written into the | ||
185 | * TLB or arch.shadow_pid, or discarded). | ||
186 | */ | ||
187 | unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
188 | unsigned int as, unsigned int gid, | ||
189 | unsigned int pr, int avoid_recursion) | ||
190 | { | ||
191 | struct vcpu_id_table *idt = vcpu_e500->idt; | ||
192 | int sid; | ||
193 | |||
194 | BUG_ON(as >= 2); | ||
195 | BUG_ON(gid >= NUM_TIDS); | ||
196 | BUG_ON(pr >= 2); | ||
197 | |||
198 | sid = local_sid_lookup(&idt->id[as][gid][pr]); | ||
199 | |||
200 | while (sid <= 0) { | ||
201 | /* No mapping yet */ | ||
202 | sid = local_sid_setup_one(&idt->id[as][gid][pr]); | ||
203 | if (sid <= 0) { | ||
204 | _tlbil_all(); | ||
205 | local_sid_destroy_all(); | ||
206 | } | ||
207 | |||
208 | /* Update shadow pid when mappings are changed */ | ||
209 | if (!avoid_recursion) | ||
210 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
211 | } | ||
212 | |||
213 | return sid; | ||
214 | } | ||
215 | |||
216 | unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu, | ||
217 | struct kvm_book3e_206_tlb_entry *gtlbe) | ||
218 | { | ||
219 | return kvmppc_e500_get_sid(to_e500(vcpu), get_tlb_ts(gtlbe), | ||
220 | get_tlb_tid(gtlbe), get_cur_pr(vcpu), 0); | ||
221 | } | ||
222 | |||
223 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) | ||
224 | { | ||
225 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
226 | |||
227 | if (vcpu->arch.pid != pid) { | ||
228 | vcpu_e500->pid[0] = vcpu->arch.pid = pid; | ||
229 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
230 | } | ||
231 | } | ||
232 | |||
233 | /* gtlbe must not be mapped by more than one host tlbe */ | ||
234 | void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
235 | struct kvm_book3e_206_tlb_entry *gtlbe) | ||
236 | { | ||
237 | struct vcpu_id_table *idt = vcpu_e500->idt; | ||
238 | unsigned int pr, tid, ts, pid; | ||
239 | u32 val, eaddr; | ||
240 | unsigned long flags; | ||
241 | |||
242 | ts = get_tlb_ts(gtlbe); | ||
243 | tid = get_tlb_tid(gtlbe); | ||
244 | |||
245 | preempt_disable(); | ||
246 | |||
247 | /* One guest ID may be mapped to two shadow IDs */ | ||
248 | for (pr = 0; pr < 2; pr++) { | ||
249 | /* | ||
250 | * The shadow PID can have a valid mapping on at most one | ||
251 | * host CPU. In the common case, it will be valid on this | ||
252 | * CPU, in which case we do a local invalidation of the | ||
253 | * specific address. | ||
254 | * | ||
255 | * If the shadow PID is not valid on the current host CPU, | ||
256 | * we invalidate the entire shadow PID. | ||
257 | */ | ||
258 | pid = local_sid_lookup(&idt->id[ts][tid][pr]); | ||
259 | if (pid <= 0) { | ||
260 | kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr); | ||
261 | continue; | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * The guest is invalidating a 4K entry which is in a PID | ||
266 | * that has a valid shadow mapping on this host CPU. We | ||
267 | * search host TLB to invalidate it's shadow TLB entry, | ||
268 | * similar to __tlbil_va except that we need to look in AS1. | ||
269 | */ | ||
270 | val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS; | ||
271 | eaddr = get_tlb_eaddr(gtlbe); | ||
272 | |||
273 | local_irq_save(flags); | ||
274 | |||
275 | mtspr(SPRN_MAS6, val); | ||
276 | asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr)); | ||
277 | val = mfspr(SPRN_MAS1); | ||
278 | if (val & MAS1_VALID) { | ||
279 | mtspr(SPRN_MAS1, val & ~MAS1_VALID); | ||
280 | asm volatile("tlbwe"); | ||
281 | } | ||
282 | |||
283 | local_irq_restore(flags); | ||
284 | } | ||
285 | |||
286 | preempt_enable(); | ||
287 | } | ||
288 | |||
289 | void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
290 | { | ||
291 | kvmppc_e500_id_table_reset_all(vcpu_e500); | ||
292 | } | ||
293 | |||
294 | void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) | ||
295 | { | ||
296 | /* Recalc shadow pid since MSR changes */ | ||
297 | kvmppc_e500_recalc_shadow_pid(to_e500(vcpu)); | ||
298 | } | ||
28 | 299 | ||
29 | void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) | 300 | void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) |
30 | { | 301 | { |
@@ -36,17 +307,20 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | |||
36 | 307 | ||
37 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 308 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
38 | { | 309 | { |
39 | kvmppc_e500_tlb_load(vcpu, cpu); | 310 | kvmppc_booke_vcpu_load(vcpu, cpu); |
311 | |||
312 | /* Shadow PID may be expired on local core */ | ||
313 | kvmppc_e500_recalc_shadow_pid(to_e500(vcpu)); | ||
40 | } | 314 | } |
41 | 315 | ||
42 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 316 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) |
43 | { | 317 | { |
44 | kvmppc_e500_tlb_put(vcpu); | ||
45 | |||
46 | #ifdef CONFIG_SPE | 318 | #ifdef CONFIG_SPE |
47 | if (vcpu->arch.shadow_msr & MSR_SPE) | 319 | if (vcpu->arch.shadow_msr & MSR_SPE) |
48 | kvmppc_vcpu_disable_spe(vcpu); | 320 | kvmppc_vcpu_disable_spe(vcpu); |
49 | #endif | 321 | #endif |
322 | |||
323 | kvmppc_booke_vcpu_put(vcpu); | ||
50 | } | 324 | } |
51 | 325 | ||
52 | int kvmppc_core_check_processor_compat(void) | 326 | int kvmppc_core_check_processor_compat(void) |
@@ -61,6 +335,23 @@ int kvmppc_core_check_processor_compat(void) | |||
61 | return r; | 335 | return r; |
62 | } | 336 | } |
63 | 337 | ||
338 | static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
339 | { | ||
340 | struct kvm_book3e_206_tlb_entry *tlbe; | ||
341 | |||
342 | /* Insert large initial mapping for guest. */ | ||
343 | tlbe = get_entry(vcpu_e500, 1, 0); | ||
344 | tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M); | ||
345 | tlbe->mas2 = 0; | ||
346 | tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK; | ||
347 | |||
348 | /* 4K map for serial output. Used by kernel wrapper. */ | ||
349 | tlbe = get_entry(vcpu_e500, 1, 1); | ||
350 | tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K); | ||
351 | tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G; | ||
352 | tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK; | ||
353 | } | ||
354 | |||
64 | int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | 355 | int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) |
65 | { | 356 | { |
66 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 357 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
@@ -76,32 +367,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | |||
76 | return 0; | 367 | return 0; |
77 | } | 368 | } |
78 | 369 | ||
79 | /* 'linear_address' is actually an encoding of AS|PID|EADDR . */ | ||
80 | int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, | ||
81 | struct kvm_translation *tr) | ||
82 | { | ||
83 | int index; | ||
84 | gva_t eaddr; | ||
85 | u8 pid; | ||
86 | u8 as; | ||
87 | |||
88 | eaddr = tr->linear_address; | ||
89 | pid = (tr->linear_address >> 32) & 0xff; | ||
90 | as = (tr->linear_address >> 40) & 0x1; | ||
91 | |||
92 | index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as); | ||
93 | if (index < 0) { | ||
94 | tr->valid = 0; | ||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr); | ||
99 | /* XXX what does "writeable" and "usermode" even mean? */ | ||
100 | tr->valid = 1; | ||
101 | |||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 370 | void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) |
106 | { | 371 | { |
107 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 372 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
@@ -115,19 +380,6 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
115 | sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; | 380 | sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; |
116 | sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; | 381 | sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; |
117 | 382 | ||
118 | sregs->u.e.mas0 = vcpu->arch.shared->mas0; | ||
119 | sregs->u.e.mas1 = vcpu->arch.shared->mas1; | ||
120 | sregs->u.e.mas2 = vcpu->arch.shared->mas2; | ||
121 | sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3; | ||
122 | sregs->u.e.mas4 = vcpu->arch.shared->mas4; | ||
123 | sregs->u.e.mas6 = vcpu->arch.shared->mas6; | ||
124 | |||
125 | sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG); | ||
126 | sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg; | ||
127 | sregs->u.e.tlbcfg[1] = vcpu_e500->tlb1cfg; | ||
128 | sregs->u.e.tlbcfg[2] = 0; | ||
129 | sregs->u.e.tlbcfg[3] = 0; | ||
130 | |||
131 | sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; | 383 | sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; |
132 | sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; | 384 | sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; |
133 | sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; | 385 | sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; |
@@ -135,11 +387,13 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
135 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; | 387 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; |
136 | 388 | ||
137 | kvmppc_get_sregs_ivor(vcpu, sregs); | 389 | kvmppc_get_sregs_ivor(vcpu, sregs); |
390 | kvmppc_get_sregs_e500_tlb(vcpu, sregs); | ||
138 | } | 391 | } |
139 | 392 | ||
140 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 393 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) |
141 | { | 394 | { |
142 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 395 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
396 | int ret; | ||
143 | 397 | ||
144 | if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { | 398 | if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { |
145 | vcpu_e500->svr = sregs->u.e.impl.fsl.svr; | 399 | vcpu_e500->svr = sregs->u.e.impl.fsl.svr; |
@@ -147,14 +401,9 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
147 | vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar; | 401 | vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar; |
148 | } | 402 | } |
149 | 403 | ||
150 | if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) { | 404 | ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs); |
151 | vcpu->arch.shared->mas0 = sregs->u.e.mas0; | 405 | if (ret < 0) |
152 | vcpu->arch.shared->mas1 = sregs->u.e.mas1; | 406 | return ret; |
153 | vcpu->arch.shared->mas2 = sregs->u.e.mas2; | ||
154 | vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3; | ||
155 | vcpu->arch.shared->mas4 = sregs->u.e.mas4; | ||
156 | vcpu->arch.shared->mas6 = sregs->u.e.mas6; | ||
157 | } | ||
158 | 407 | ||
159 | if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) | 408 | if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) |
160 | return 0; | 409 | return 0; |
@@ -193,9 +442,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
193 | if (err) | 442 | if (err) |
194 | goto free_vcpu; | 443 | goto free_vcpu; |
195 | 444 | ||
445 | if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL) | ||
446 | goto uninit_vcpu; | ||
447 | |||
196 | err = kvmppc_e500_tlb_init(vcpu_e500); | 448 | err = kvmppc_e500_tlb_init(vcpu_e500); |
197 | if (err) | 449 | if (err) |
198 | goto uninit_vcpu; | 450 | goto uninit_id; |
199 | 451 | ||
200 | vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); | 452 | vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); |
201 | if (!vcpu->arch.shared) | 453 | if (!vcpu->arch.shared) |
@@ -205,6 +457,8 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
205 | 457 | ||
206 | uninit_tlb: | 458 | uninit_tlb: |
207 | kvmppc_e500_tlb_uninit(vcpu_e500); | 459 | kvmppc_e500_tlb_uninit(vcpu_e500); |
460 | uninit_id: | ||
461 | kvmppc_e500_id_table_free(vcpu_e500); | ||
208 | uninit_vcpu: | 462 | uninit_vcpu: |
209 | kvm_vcpu_uninit(vcpu); | 463 | kvm_vcpu_uninit(vcpu); |
210 | free_vcpu: | 464 | free_vcpu: |
@@ -218,11 +472,21 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
218 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 472 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
219 | 473 | ||
220 | free_page((unsigned long)vcpu->arch.shared); | 474 | free_page((unsigned long)vcpu->arch.shared); |
221 | kvm_vcpu_uninit(vcpu); | ||
222 | kvmppc_e500_tlb_uninit(vcpu_e500); | 475 | kvmppc_e500_tlb_uninit(vcpu_e500); |
476 | kvmppc_e500_id_table_free(vcpu_e500); | ||
477 | kvm_vcpu_uninit(vcpu); | ||
223 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); | 478 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); |
224 | } | 479 | } |
225 | 480 | ||
481 | int kvmppc_core_init_vm(struct kvm *kvm) | ||
482 | { | ||
483 | return 0; | ||
484 | } | ||
485 | |||
486 | void kvmppc_core_destroy_vm(struct kvm *kvm) | ||
487 | { | ||
488 | } | ||
489 | |||
226 | static int __init kvmppc_e500_init(void) | 490 | static int __init kvmppc_e500_init(void) |
227 | { | 491 | { |
228 | int r, i; | 492 | int r, i; |
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h new file mode 100644 index 000000000000..aa8b81428bf4 --- /dev/null +++ b/arch/powerpc/kvm/e500.h | |||
@@ -0,0 +1,306 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Author: Yu Liu <yu.liu@freescale.com> | ||
5 | * Scott Wood <scottwood@freescale.com> | ||
6 | * Ashish Kalra <ashish.kalra@freescale.com> | ||
7 | * Varun Sethi <varun.sethi@freescale.com> | ||
8 | * | ||
9 | * Description: | ||
10 | * This file is based on arch/powerpc/kvm/44x_tlb.h and | ||
11 | * arch/powerpc/include/asm/kvm_44x.h by Hollis Blanchard <hollisb@us.ibm.com>, | ||
12 | * Copyright IBM Corp. 2007-2008 | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License, version 2, as | ||
16 | * published by the Free Software Foundation. | ||
17 | */ | ||
18 | |||
19 | #ifndef KVM_E500_H | ||
20 | #define KVM_E500_H | ||
21 | |||
22 | #include <linux/kvm_host.h> | ||
23 | #include <asm/mmu-book3e.h> | ||
24 | #include <asm/tlb.h> | ||
25 | |||
26 | #define E500_PID_NUM 3 | ||
27 | #define E500_TLB_NUM 2 | ||
28 | |||
29 | #define E500_TLB_VALID 1 | ||
30 | #define E500_TLB_DIRTY 2 | ||
31 | #define E500_TLB_BITMAP 4 | ||
32 | |||
33 | struct tlbe_ref { | ||
34 | pfn_t pfn; | ||
35 | unsigned int flags; /* E500_TLB_* */ | ||
36 | }; | ||
37 | |||
38 | struct tlbe_priv { | ||
39 | struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */ | ||
40 | }; | ||
41 | |||
42 | #ifdef CONFIG_KVM_E500V2 | ||
43 | struct vcpu_id_table; | ||
44 | #endif | ||
45 | |||
46 | struct kvmppc_e500_tlb_params { | ||
47 | int entries, ways, sets; | ||
48 | }; | ||
49 | |||
50 | struct kvmppc_vcpu_e500 { | ||
51 | struct kvm_vcpu vcpu; | ||
52 | |||
53 | /* Unmodified copy of the guest's TLB -- shared with host userspace. */ | ||
54 | struct kvm_book3e_206_tlb_entry *gtlb_arch; | ||
55 | |||
56 | /* Starting entry number in gtlb_arch[] */ | ||
57 | int gtlb_offset[E500_TLB_NUM]; | ||
58 | |||
59 | /* KVM internal information associated with each guest TLB entry */ | ||
60 | struct tlbe_priv *gtlb_priv[E500_TLB_NUM]; | ||
61 | |||
62 | struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM]; | ||
63 | |||
64 | unsigned int gtlb_nv[E500_TLB_NUM]; | ||
65 | |||
66 | /* | ||
67 | * information associated with each host TLB entry -- | ||
68 | * TLB1 only for now. If/when guest TLB1 entries can be | ||
69 | * mapped with host TLB0, this will be used for that too. | ||
70 | * | ||
71 | * We don't want to use this for guest TLB0 because then we'd | ||
72 | * have the overhead of doing the translation again even if | ||
73 | * the entry is still in the guest TLB (e.g. we swapped out | ||
74 | * and back, and our host TLB entries got evicted). | ||
75 | */ | ||
76 | struct tlbe_ref *tlb_refs[E500_TLB_NUM]; | ||
77 | unsigned int host_tlb1_nv; | ||
78 | |||
79 | u32 svr; | ||
80 | u32 l1csr0; | ||
81 | u32 l1csr1; | ||
82 | u32 hid0; | ||
83 | u32 hid1; | ||
84 | u64 mcar; | ||
85 | |||
86 | struct page **shared_tlb_pages; | ||
87 | int num_shared_tlb_pages; | ||
88 | |||
89 | u64 *g2h_tlb1_map; | ||
90 | unsigned int *h2g_tlb1_rmap; | ||
91 | |||
92 | /* Minimum and maximum address mapped my TLB1 */ | ||
93 | unsigned long tlb1_min_eaddr; | ||
94 | unsigned long tlb1_max_eaddr; | ||
95 | |||
96 | #ifdef CONFIG_KVM_E500V2 | ||
97 | u32 pid[E500_PID_NUM]; | ||
98 | |||
99 | /* vcpu id table */ | ||
100 | struct vcpu_id_table *idt; | ||
101 | #endif | ||
102 | }; | ||
103 | |||
104 | static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu) | ||
105 | { | ||
106 | return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu); | ||
107 | } | ||
108 | |||
109 | |||
110 | /* This geometry is the legacy default -- can be overridden by userspace */ | ||
111 | #define KVM_E500_TLB0_WAY_SIZE 128 | ||
112 | #define KVM_E500_TLB0_WAY_NUM 2 | ||
113 | |||
114 | #define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM) | ||
115 | #define KVM_E500_TLB1_SIZE 16 | ||
116 | |||
117 | #define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF)) | ||
118 | #define tlbsel_of(index) ((index) >> 16) | ||
119 | #define esel_of(index) ((index) & 0xFFFF) | ||
120 | |||
121 | #define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW) | ||
122 | #define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW) | ||
123 | #define MAS2_ATTRIB_MASK \ | ||
124 | (MAS2_X0 | MAS2_X1) | ||
125 | #define MAS3_ATTRIB_MASK \ | ||
126 | (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \ | ||
127 | | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK) | ||
128 | |||
129 | int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
130 | ulong value); | ||
131 | int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu); | ||
132 | int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu); | ||
133 | int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb); | ||
134 | int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb); | ||
135 | int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb); | ||
136 | int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500); | ||
137 | void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500); | ||
138 | |||
139 | void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | ||
140 | int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | ||
141 | |||
142 | |||
143 | #ifdef CONFIG_KVM_E500V2 | ||
144 | unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
145 | unsigned int as, unsigned int gid, | ||
146 | unsigned int pr, int avoid_recursion); | ||
147 | #endif | ||
148 | |||
149 | /* TLB helper functions */ | ||
150 | static inline unsigned int | ||
151 | get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
152 | { | ||
153 | return (tlbe->mas1 >> 7) & 0x1f; | ||
154 | } | ||
155 | |||
156 | static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
157 | { | ||
158 | return tlbe->mas2 & 0xfffff000; | ||
159 | } | ||
160 | |||
161 | static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
162 | { | ||
163 | unsigned int pgsize = get_tlb_size(tlbe); | ||
164 | return 1ULL << 10 << pgsize; | ||
165 | } | ||
166 | |||
167 | static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
168 | { | ||
169 | u64 bytes = get_tlb_bytes(tlbe); | ||
170 | return get_tlb_eaddr(tlbe) + bytes - 1; | ||
171 | } | ||
172 | |||
173 | static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
174 | { | ||
175 | return tlbe->mas7_3 & ~0xfffULL; | ||
176 | } | ||
177 | |||
178 | static inline unsigned int | ||
179 | get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
180 | { | ||
181 | return (tlbe->mas1 >> 16) & 0xff; | ||
182 | } | ||
183 | |||
184 | static inline unsigned int | ||
185 | get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
186 | { | ||
187 | return (tlbe->mas1 >> 12) & 0x1; | ||
188 | } | ||
189 | |||
190 | static inline unsigned int | ||
191 | get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
192 | { | ||
193 | return (tlbe->mas1 >> 31) & 0x1; | ||
194 | } | ||
195 | |||
196 | static inline unsigned int | ||
197 | get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
198 | { | ||
199 | return (tlbe->mas1 >> 30) & 0x1; | ||
200 | } | ||
201 | |||
202 | static inline unsigned int | ||
203 | get_tlb_tsize(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
204 | { | ||
205 | return (tlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; | ||
206 | } | ||
207 | |||
208 | static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu) | ||
209 | { | ||
210 | return vcpu->arch.pid & 0xff; | ||
211 | } | ||
212 | |||
213 | static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu) | ||
214 | { | ||
215 | return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS)); | ||
216 | } | ||
217 | |||
218 | static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu) | ||
219 | { | ||
220 | return !!(vcpu->arch.shared->msr & MSR_PR); | ||
221 | } | ||
222 | |||
223 | static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu) | ||
224 | { | ||
225 | return (vcpu->arch.shared->mas6 >> 16) & 0xff; | ||
226 | } | ||
227 | |||
228 | static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu) | ||
229 | { | ||
230 | return vcpu->arch.shared->mas6 & 0x1; | ||
231 | } | ||
232 | |||
233 | static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu) | ||
234 | { | ||
235 | /* | ||
236 | * Manual says that tlbsel has 2 bits wide. | ||
237 | * Since we only have two TLBs, only lower bit is used. | ||
238 | */ | ||
239 | return (vcpu->arch.shared->mas0 >> 28) & 0x1; | ||
240 | } | ||
241 | |||
242 | static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu) | ||
243 | { | ||
244 | return vcpu->arch.shared->mas0 & 0xfff; | ||
245 | } | ||
246 | |||
247 | static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu) | ||
248 | { | ||
249 | return (vcpu->arch.shared->mas0 >> 16) & 0xfff; | ||
250 | } | ||
251 | |||
252 | static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, | ||
253 | const struct kvm_book3e_206_tlb_entry *tlbe) | ||
254 | { | ||
255 | gpa_t gpa; | ||
256 | |||
257 | if (!get_tlb_v(tlbe)) | ||
258 | return 0; | ||
259 | |||
260 | #ifndef CONFIG_KVM_BOOKE_HV | ||
261 | /* Does it match current guest AS? */ | ||
262 | /* XXX what about IS != DS? */ | ||
263 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS)) | ||
264 | return 0; | ||
265 | #endif | ||
266 | |||
267 | gpa = get_tlb_raddr(tlbe); | ||
268 | if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT)) | ||
269 | /* Mapping is not for RAM. */ | ||
270 | return 0; | ||
271 | |||
272 | return 1; | ||
273 | } | ||
274 | |||
275 | static inline struct kvm_book3e_206_tlb_entry *get_entry( | ||
276 | struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry) | ||
277 | { | ||
278 | int offset = vcpu_e500->gtlb_offset[tlbsel]; | ||
279 | return &vcpu_e500->gtlb_arch[offset + entry]; | ||
280 | } | ||
281 | |||
282 | void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
283 | struct kvm_book3e_206_tlb_entry *gtlbe); | ||
284 | void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500); | ||
285 | |||
286 | #ifdef CONFIG_KVM_BOOKE_HV | ||
287 | #define kvmppc_e500_get_tlb_stid(vcpu, gtlbe) get_tlb_tid(gtlbe) | ||
288 | #define get_tlbmiss_tid(vcpu) get_cur_pid(vcpu) | ||
289 | #define get_tlb_sts(gtlbe) (gtlbe->mas1 & MAS1_TS) | ||
290 | #else | ||
291 | unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu, | ||
292 | struct kvm_book3e_206_tlb_entry *gtlbe); | ||
293 | |||
294 | static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu *vcpu) | ||
295 | { | ||
296 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
297 | unsigned int tidseld = (vcpu->arch.shared->mas4 >> 16) & 0xf; | ||
298 | |||
299 | return vcpu_e500->pid[tidseld]; | ||
300 | } | ||
301 | |||
302 | /* Force TS=1 for all guest mappings. */ | ||
303 | #define get_tlb_sts(gtlbe) (MAS1_TS) | ||
304 | #endif /* !BOOKE_HV */ | ||
305 | |||
306 | #endif /* KVM_E500_H */ | ||
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 6d0b2bd54fb0..8b99e076dc81 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c | |||
@@ -14,27 +14,96 @@ | |||
14 | 14 | ||
15 | #include <asm/kvm_ppc.h> | 15 | #include <asm/kvm_ppc.h> |
16 | #include <asm/disassemble.h> | 16 | #include <asm/disassemble.h> |
17 | #include <asm/kvm_e500.h> | 17 | #include <asm/dbell.h> |
18 | 18 | ||
19 | #include "booke.h" | 19 | #include "booke.h" |
20 | #include "e500_tlb.h" | 20 | #include "e500.h" |
21 | 21 | ||
22 | #define XOP_MSGSND 206 | ||
23 | #define XOP_MSGCLR 238 | ||
22 | #define XOP_TLBIVAX 786 | 24 | #define XOP_TLBIVAX 786 |
23 | #define XOP_TLBSX 914 | 25 | #define XOP_TLBSX 914 |
24 | #define XOP_TLBRE 946 | 26 | #define XOP_TLBRE 946 |
25 | #define XOP_TLBWE 978 | 27 | #define XOP_TLBWE 978 |
28 | #define XOP_TLBILX 18 | ||
29 | |||
30 | #ifdef CONFIG_KVM_E500MC | ||
31 | static int dbell2prio(ulong param) | ||
32 | { | ||
33 | int msg = param & PPC_DBELL_TYPE_MASK; | ||
34 | int prio = -1; | ||
35 | |||
36 | switch (msg) { | ||
37 | case PPC_DBELL_TYPE(PPC_DBELL): | ||
38 | prio = BOOKE_IRQPRIO_DBELL; | ||
39 | break; | ||
40 | case PPC_DBELL_TYPE(PPC_DBELL_CRIT): | ||
41 | prio = BOOKE_IRQPRIO_DBELL_CRIT; | ||
42 | break; | ||
43 | default: | ||
44 | break; | ||
45 | } | ||
46 | |||
47 | return prio; | ||
48 | } | ||
49 | |||
50 | static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb) | ||
51 | { | ||
52 | ulong param = vcpu->arch.gpr[rb]; | ||
53 | int prio = dbell2prio(param); | ||
54 | |||
55 | if (prio < 0) | ||
56 | return EMULATE_FAIL; | ||
57 | |||
58 | clear_bit(prio, &vcpu->arch.pending_exceptions); | ||
59 | return EMULATE_DONE; | ||
60 | } | ||
61 | |||
62 | static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb) | ||
63 | { | ||
64 | ulong param = vcpu->arch.gpr[rb]; | ||
65 | int prio = dbell2prio(rb); | ||
66 | int pir = param & PPC_DBELL_PIR_MASK; | ||
67 | int i; | ||
68 | struct kvm_vcpu *cvcpu; | ||
69 | |||
70 | if (prio < 0) | ||
71 | return EMULATE_FAIL; | ||
72 | |||
73 | kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) { | ||
74 | int cpir = cvcpu->arch.shared->pir; | ||
75 | if ((param & PPC_DBELL_MSG_BRDCAST) || (cpir == pir)) { | ||
76 | set_bit(prio, &cvcpu->arch.pending_exceptions); | ||
77 | kvm_vcpu_kick(cvcpu); | ||
78 | } | ||
79 | } | ||
80 | |||
81 | return EMULATE_DONE; | ||
82 | } | ||
83 | #endif | ||
26 | 84 | ||
27 | int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | 85 | int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, |
28 | unsigned int inst, int *advance) | 86 | unsigned int inst, int *advance) |
29 | { | 87 | { |
30 | int emulated = EMULATE_DONE; | 88 | int emulated = EMULATE_DONE; |
31 | int ra; | 89 | int ra = get_ra(inst); |
32 | int rb; | 90 | int rb = get_rb(inst); |
91 | int rt = get_rt(inst); | ||
33 | 92 | ||
34 | switch (get_op(inst)) { | 93 | switch (get_op(inst)) { |
35 | case 31: | 94 | case 31: |
36 | switch (get_xop(inst)) { | 95 | switch (get_xop(inst)) { |
37 | 96 | ||
97 | #ifdef CONFIG_KVM_E500MC | ||
98 | case XOP_MSGSND: | ||
99 | emulated = kvmppc_e500_emul_msgsnd(vcpu, rb); | ||
100 | break; | ||
101 | |||
102 | case XOP_MSGCLR: | ||
103 | emulated = kvmppc_e500_emul_msgclr(vcpu, rb); | ||
104 | break; | ||
105 | #endif | ||
106 | |||
38 | case XOP_TLBRE: | 107 | case XOP_TLBRE: |
39 | emulated = kvmppc_e500_emul_tlbre(vcpu); | 108 | emulated = kvmppc_e500_emul_tlbre(vcpu); |
40 | break; | 109 | break; |
@@ -44,13 +113,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
44 | break; | 113 | break; |
45 | 114 | ||
46 | case XOP_TLBSX: | 115 | case XOP_TLBSX: |
47 | rb = get_rb(inst); | ||
48 | emulated = kvmppc_e500_emul_tlbsx(vcpu,rb); | 116 | emulated = kvmppc_e500_emul_tlbsx(vcpu,rb); |
49 | break; | 117 | break; |
50 | 118 | ||
119 | case XOP_TLBILX: | ||
120 | emulated = kvmppc_e500_emul_tlbilx(vcpu, rt, ra, rb); | ||
121 | break; | ||
122 | |||
51 | case XOP_TLBIVAX: | 123 | case XOP_TLBIVAX: |
52 | ra = get_ra(inst); | ||
53 | rb = get_rb(inst); | ||
54 | emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb); | 124 | emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb); |
55 | break; | 125 | break; |
56 | 126 | ||
@@ -70,52 +140,63 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
70 | return emulated; | 140 | return emulated; |
71 | } | 141 | } |
72 | 142 | ||
73 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 143 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) |
74 | { | 144 | { |
75 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 145 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
76 | int emulated = EMULATE_DONE; | 146 | int emulated = EMULATE_DONE; |
77 | ulong spr_val = kvmppc_get_gpr(vcpu, rs); | ||
78 | 147 | ||
79 | switch (sprn) { | 148 | switch (sprn) { |
149 | #ifndef CONFIG_KVM_BOOKE_HV | ||
80 | case SPRN_PID: | 150 | case SPRN_PID: |
81 | kvmppc_set_pid(vcpu, spr_val); | 151 | kvmppc_set_pid(vcpu, spr_val); |
82 | break; | 152 | break; |
83 | case SPRN_PID1: | 153 | case SPRN_PID1: |
84 | if (spr_val != 0) | 154 | if (spr_val != 0) |
85 | return EMULATE_FAIL; | 155 | return EMULATE_FAIL; |
86 | vcpu_e500->pid[1] = spr_val; break; | 156 | vcpu_e500->pid[1] = spr_val; |
157 | break; | ||
87 | case SPRN_PID2: | 158 | case SPRN_PID2: |
88 | if (spr_val != 0) | 159 | if (spr_val != 0) |
89 | return EMULATE_FAIL; | 160 | return EMULATE_FAIL; |
90 | vcpu_e500->pid[2] = spr_val; break; | 161 | vcpu_e500->pid[2] = spr_val; |
162 | break; | ||
91 | case SPRN_MAS0: | 163 | case SPRN_MAS0: |
92 | vcpu->arch.shared->mas0 = spr_val; break; | 164 | vcpu->arch.shared->mas0 = spr_val; |
165 | break; | ||
93 | case SPRN_MAS1: | 166 | case SPRN_MAS1: |
94 | vcpu->arch.shared->mas1 = spr_val; break; | 167 | vcpu->arch.shared->mas1 = spr_val; |
168 | break; | ||
95 | case SPRN_MAS2: | 169 | case SPRN_MAS2: |
96 | vcpu->arch.shared->mas2 = spr_val; break; | 170 | vcpu->arch.shared->mas2 = spr_val; |
171 | break; | ||
97 | case SPRN_MAS3: | 172 | case SPRN_MAS3: |
98 | vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff; | 173 | vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff; |
99 | vcpu->arch.shared->mas7_3 |= spr_val; | 174 | vcpu->arch.shared->mas7_3 |= spr_val; |
100 | break; | 175 | break; |
101 | case SPRN_MAS4: | 176 | case SPRN_MAS4: |
102 | vcpu->arch.shared->mas4 = spr_val; break; | 177 | vcpu->arch.shared->mas4 = spr_val; |
178 | break; | ||
103 | case SPRN_MAS6: | 179 | case SPRN_MAS6: |
104 | vcpu->arch.shared->mas6 = spr_val; break; | 180 | vcpu->arch.shared->mas6 = spr_val; |
181 | break; | ||
105 | case SPRN_MAS7: | 182 | case SPRN_MAS7: |
106 | vcpu->arch.shared->mas7_3 &= (u64)0xffffffff; | 183 | vcpu->arch.shared->mas7_3 &= (u64)0xffffffff; |
107 | vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32; | 184 | vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32; |
108 | break; | 185 | break; |
186 | #endif | ||
109 | case SPRN_L1CSR0: | 187 | case SPRN_L1CSR0: |
110 | vcpu_e500->l1csr0 = spr_val; | 188 | vcpu_e500->l1csr0 = spr_val; |
111 | vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); | 189 | vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); |
112 | break; | 190 | break; |
113 | case SPRN_L1CSR1: | 191 | case SPRN_L1CSR1: |
114 | vcpu_e500->l1csr1 = spr_val; break; | 192 | vcpu_e500->l1csr1 = spr_val; |
193 | break; | ||
115 | case SPRN_HID0: | 194 | case SPRN_HID0: |
116 | vcpu_e500->hid0 = spr_val; break; | 195 | vcpu_e500->hid0 = spr_val; |
196 | break; | ||
117 | case SPRN_HID1: | 197 | case SPRN_HID1: |
118 | vcpu_e500->hid1 = spr_val; break; | 198 | vcpu_e500->hid1 = spr_val; |
199 | break; | ||
119 | 200 | ||
120 | case SPRN_MMUCSR0: | 201 | case SPRN_MMUCSR0: |
121 | emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, | 202 | emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, |
@@ -135,81 +216,112 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
135 | case SPRN_IVOR35: | 216 | case SPRN_IVOR35: |
136 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val; | 217 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val; |
137 | break; | 218 | break; |
138 | 219 | #ifdef CONFIG_KVM_BOOKE_HV | |
220 | case SPRN_IVOR36: | ||
221 | vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val; | ||
222 | break; | ||
223 | case SPRN_IVOR37: | ||
224 | vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val; | ||
225 | break; | ||
226 | #endif | ||
139 | default: | 227 | default: |
140 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); | 228 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val); |
141 | } | 229 | } |
142 | 230 | ||
143 | return emulated; | 231 | return emulated; |
144 | } | 232 | } |
145 | 233 | ||
146 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | 234 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) |
147 | { | 235 | { |
148 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 236 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
149 | int emulated = EMULATE_DONE; | 237 | int emulated = EMULATE_DONE; |
150 | unsigned long val; | ||
151 | 238 | ||
152 | switch (sprn) { | 239 | switch (sprn) { |
240 | #ifndef CONFIG_KVM_BOOKE_HV | ||
153 | case SPRN_PID: | 241 | case SPRN_PID: |
154 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break; | 242 | *spr_val = vcpu_e500->pid[0]; |
243 | break; | ||
155 | case SPRN_PID1: | 244 | case SPRN_PID1: |
156 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break; | 245 | *spr_val = vcpu_e500->pid[1]; |
246 | break; | ||
157 | case SPRN_PID2: | 247 | case SPRN_PID2: |
158 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break; | 248 | *spr_val = vcpu_e500->pid[2]; |
249 | break; | ||
159 | case SPRN_MAS0: | 250 | case SPRN_MAS0: |
160 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break; | 251 | *spr_val = vcpu->arch.shared->mas0; |
252 | break; | ||
161 | case SPRN_MAS1: | 253 | case SPRN_MAS1: |
162 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break; | 254 | *spr_val = vcpu->arch.shared->mas1; |
255 | break; | ||
163 | case SPRN_MAS2: | 256 | case SPRN_MAS2: |
164 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break; | 257 | *spr_val = vcpu->arch.shared->mas2; |
258 | break; | ||
165 | case SPRN_MAS3: | 259 | case SPRN_MAS3: |
166 | val = (u32)vcpu->arch.shared->mas7_3; | 260 | *spr_val = (u32)vcpu->arch.shared->mas7_3; |
167 | kvmppc_set_gpr(vcpu, rt, val); | ||
168 | break; | 261 | break; |
169 | case SPRN_MAS4: | 262 | case SPRN_MAS4: |
170 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break; | 263 | *spr_val = vcpu->arch.shared->mas4; |
264 | break; | ||
171 | case SPRN_MAS6: | 265 | case SPRN_MAS6: |
172 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break; | 266 | *spr_val = vcpu->arch.shared->mas6; |
267 | break; | ||
173 | case SPRN_MAS7: | 268 | case SPRN_MAS7: |
174 | val = vcpu->arch.shared->mas7_3 >> 32; | 269 | *spr_val = vcpu->arch.shared->mas7_3 >> 32; |
175 | kvmppc_set_gpr(vcpu, rt, val); | ||
176 | break; | 270 | break; |
271 | #endif | ||
177 | case SPRN_TLB0CFG: | 272 | case SPRN_TLB0CFG: |
178 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break; | 273 | *spr_val = vcpu->arch.tlbcfg[0]; |
274 | break; | ||
179 | case SPRN_TLB1CFG: | 275 | case SPRN_TLB1CFG: |
180 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb1cfg); break; | 276 | *spr_val = vcpu->arch.tlbcfg[1]; |
277 | break; | ||
181 | case SPRN_L1CSR0: | 278 | case SPRN_L1CSR0: |
182 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break; | 279 | *spr_val = vcpu_e500->l1csr0; |
280 | break; | ||
183 | case SPRN_L1CSR1: | 281 | case SPRN_L1CSR1: |
184 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break; | 282 | *spr_val = vcpu_e500->l1csr1; |
283 | break; | ||
185 | case SPRN_HID0: | 284 | case SPRN_HID0: |
186 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break; | 285 | *spr_val = vcpu_e500->hid0; |
286 | break; | ||
187 | case SPRN_HID1: | 287 | case SPRN_HID1: |
188 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break; | 288 | *spr_val = vcpu_e500->hid1; |
289 | break; | ||
189 | case SPRN_SVR: | 290 | case SPRN_SVR: |
190 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->svr); break; | 291 | *spr_val = vcpu_e500->svr; |
292 | break; | ||
191 | 293 | ||
192 | case SPRN_MMUCSR0: | 294 | case SPRN_MMUCSR0: |
193 | kvmppc_set_gpr(vcpu, rt, 0); break; | 295 | *spr_val = 0; |
296 | break; | ||
194 | 297 | ||
195 | case SPRN_MMUCFG: | 298 | case SPRN_MMUCFG: |
196 | kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break; | 299 | *spr_val = vcpu->arch.mmucfg; |
300 | break; | ||
197 | 301 | ||
198 | /* extra exceptions */ | 302 | /* extra exceptions */ |
199 | case SPRN_IVOR32: | 303 | case SPRN_IVOR32: |
200 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]); | 304 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; |
201 | break; | 305 | break; |
202 | case SPRN_IVOR33: | 306 | case SPRN_IVOR33: |
203 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]); | 307 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; |
204 | break; | 308 | break; |
205 | case SPRN_IVOR34: | 309 | case SPRN_IVOR34: |
206 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]); | 310 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; |
207 | break; | 311 | break; |
208 | case SPRN_IVOR35: | 312 | case SPRN_IVOR35: |
209 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]); | 313 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; |
314 | break; | ||
315 | #ifdef CONFIG_KVM_BOOKE_HV | ||
316 | case SPRN_IVOR36: | ||
317 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; | ||
318 | break; | ||
319 | case SPRN_IVOR37: | ||
320 | *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; | ||
210 | break; | 321 | break; |
322 | #endif | ||
211 | default: | 323 | default: |
212 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); | 324 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val); |
213 | } | 325 | } |
214 | 326 | ||
215 | return emulated; | 327 | return emulated; |
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index 6e53e4164de1..c510fc961302 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c | |||
@@ -2,6 +2,9 @@ | |||
2 | * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. | 2 | * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * Author: Yu Liu, yu.liu@freescale.com | 4 | * Author: Yu Liu, yu.liu@freescale.com |
5 | * Scott Wood, scottwood@freescale.com | ||
6 | * Ashish Kalra, ashish.kalra@freescale.com | ||
7 | * Varun Sethi, varun.sethi@freescale.com | ||
5 | * | 8 | * |
6 | * Description: | 9 | * Description: |
7 | * This file is based on arch/powerpc/kvm/44x_tlb.c, | 10 | * This file is based on arch/powerpc/kvm/44x_tlb.c, |
@@ -26,210 +29,15 @@ | |||
26 | #include <linux/vmalloc.h> | 29 | #include <linux/vmalloc.h> |
27 | #include <linux/hugetlb.h> | 30 | #include <linux/hugetlb.h> |
28 | #include <asm/kvm_ppc.h> | 31 | #include <asm/kvm_ppc.h> |
29 | #include <asm/kvm_e500.h> | ||
30 | 32 | ||
31 | #include "../mm/mmu_decl.h" | 33 | #include "e500.h" |
32 | #include "e500_tlb.h" | ||
33 | #include "trace.h" | 34 | #include "trace.h" |
34 | #include "timing.h" | 35 | #include "timing.h" |
35 | 36 | ||
36 | #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) | 37 | #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) |
37 | 38 | ||
38 | struct id { | ||
39 | unsigned long val; | ||
40 | struct id **pentry; | ||
41 | }; | ||
42 | |||
43 | #define NUM_TIDS 256 | ||
44 | |||
45 | /* | ||
46 | * This table provide mappings from: | ||
47 | * (guestAS,guestTID,guestPR) --> ID of physical cpu | ||
48 | * guestAS [0..1] | ||
49 | * guestTID [0..255] | ||
50 | * guestPR [0..1] | ||
51 | * ID [1..255] | ||
52 | * Each vcpu keeps one vcpu_id_table. | ||
53 | */ | ||
54 | struct vcpu_id_table { | ||
55 | struct id id[2][NUM_TIDS][2]; | ||
56 | }; | ||
57 | |||
58 | /* | ||
59 | * This table provide reversed mappings of vcpu_id_table: | ||
60 | * ID --> address of vcpu_id_table item. | ||
61 | * Each physical core has one pcpu_id_table. | ||
62 | */ | ||
63 | struct pcpu_id_table { | ||
64 | struct id *entry[NUM_TIDS]; | ||
65 | }; | ||
66 | |||
67 | static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids); | ||
68 | |||
69 | /* This variable keeps last used shadow ID on local core. | ||
70 | * The valid range of shadow ID is [1..255] */ | ||
71 | static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid); | ||
72 | |||
73 | static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; | 39 | static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; |
74 | 40 | ||
75 | static struct kvm_book3e_206_tlb_entry *get_entry( | ||
76 | struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry) | ||
77 | { | ||
78 | int offset = vcpu_e500->gtlb_offset[tlbsel]; | ||
79 | return &vcpu_e500->gtlb_arch[offset + entry]; | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Allocate a free shadow id and setup a valid sid mapping in given entry. | ||
84 | * A mapping is only valid when vcpu_id_table and pcpu_id_table are match. | ||
85 | * | ||
86 | * The caller must have preemption disabled, and keep it that way until | ||
87 | * it has finished with the returned shadow id (either written into the | ||
88 | * TLB or arch.shadow_pid, or discarded). | ||
89 | */ | ||
90 | static inline int local_sid_setup_one(struct id *entry) | ||
91 | { | ||
92 | unsigned long sid; | ||
93 | int ret = -1; | ||
94 | |||
95 | sid = ++(__get_cpu_var(pcpu_last_used_sid)); | ||
96 | if (sid < NUM_TIDS) { | ||
97 | __get_cpu_var(pcpu_sids).entry[sid] = entry; | ||
98 | entry->val = sid; | ||
99 | entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid]; | ||
100 | ret = sid; | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * If sid == NUM_TIDS, we've run out of sids. We return -1, and | ||
105 | * the caller will invalidate everything and start over. | ||
106 | * | ||
107 | * sid > NUM_TIDS indicates a race, which we disable preemption to | ||
108 | * avoid. | ||
109 | */ | ||
110 | WARN_ON(sid > NUM_TIDS); | ||
111 | |||
112 | return ret; | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * Check if given entry contain a valid shadow id mapping. | ||
117 | * An ID mapping is considered valid only if | ||
118 | * both vcpu and pcpu know this mapping. | ||
119 | * | ||
120 | * The caller must have preemption disabled, and keep it that way until | ||
121 | * it has finished with the returned shadow id (either written into the | ||
122 | * TLB or arch.shadow_pid, or discarded). | ||
123 | */ | ||
124 | static inline int local_sid_lookup(struct id *entry) | ||
125 | { | ||
126 | if (entry && entry->val != 0 && | ||
127 | __get_cpu_var(pcpu_sids).entry[entry->val] == entry && | ||
128 | entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val]) | ||
129 | return entry->val; | ||
130 | return -1; | ||
131 | } | ||
132 | |||
133 | /* Invalidate all id mappings on local core -- call with preempt disabled */ | ||
134 | static inline void local_sid_destroy_all(void) | ||
135 | { | ||
136 | __get_cpu_var(pcpu_last_used_sid) = 0; | ||
137 | memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids))); | ||
138 | } | ||
139 | |||
140 | static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
141 | { | ||
142 | vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL); | ||
143 | return vcpu_e500->idt; | ||
144 | } | ||
145 | |||
146 | static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
147 | { | ||
148 | kfree(vcpu_e500->idt); | ||
149 | } | ||
150 | |||
151 | /* Invalidate all mappings on vcpu */ | ||
152 | static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
153 | { | ||
154 | memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table)); | ||
155 | |||
156 | /* Update shadow pid when mappings are changed */ | ||
157 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
158 | } | ||
159 | |||
160 | /* Invalidate one ID mapping on vcpu */ | ||
161 | static inline void kvmppc_e500_id_table_reset_one( | ||
162 | struct kvmppc_vcpu_e500 *vcpu_e500, | ||
163 | int as, int pid, int pr) | ||
164 | { | ||
165 | struct vcpu_id_table *idt = vcpu_e500->idt; | ||
166 | |||
167 | BUG_ON(as >= 2); | ||
168 | BUG_ON(pid >= NUM_TIDS); | ||
169 | BUG_ON(pr >= 2); | ||
170 | |||
171 | idt->id[as][pid][pr].val = 0; | ||
172 | idt->id[as][pid][pr].pentry = NULL; | ||
173 | |||
174 | /* Update shadow pid when mappings are changed */ | ||
175 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * Map guest (vcpu,AS,ID,PR) to physical core shadow id. | ||
180 | * This function first lookup if a valid mapping exists, | ||
181 | * if not, then creates a new one. | ||
182 | * | ||
183 | * The caller must have preemption disabled, and keep it that way until | ||
184 | * it has finished with the returned shadow id (either written into the | ||
185 | * TLB or arch.shadow_pid, or discarded). | ||
186 | */ | ||
187 | static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
188 | unsigned int as, unsigned int gid, | ||
189 | unsigned int pr, int avoid_recursion) | ||
190 | { | ||
191 | struct vcpu_id_table *idt = vcpu_e500->idt; | ||
192 | int sid; | ||
193 | |||
194 | BUG_ON(as >= 2); | ||
195 | BUG_ON(gid >= NUM_TIDS); | ||
196 | BUG_ON(pr >= 2); | ||
197 | |||
198 | sid = local_sid_lookup(&idt->id[as][gid][pr]); | ||
199 | |||
200 | while (sid <= 0) { | ||
201 | /* No mapping yet */ | ||
202 | sid = local_sid_setup_one(&idt->id[as][gid][pr]); | ||
203 | if (sid <= 0) { | ||
204 | _tlbil_all(); | ||
205 | local_sid_destroy_all(); | ||
206 | } | ||
207 | |||
208 | /* Update shadow pid when mappings are changed */ | ||
209 | if (!avoid_recursion) | ||
210 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
211 | } | ||
212 | |||
213 | return sid; | ||
214 | } | ||
215 | |||
216 | /* Map guest pid to shadow. | ||
217 | * We use PID to keep shadow of current guest non-zero PID, | ||
218 | * and use PID1 to keep shadow of guest zero PID. | ||
219 | * So that guest tlbe with TID=0 can be accessed at any time */ | ||
220 | void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
221 | { | ||
222 | preempt_disable(); | ||
223 | vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500, | ||
224 | get_cur_as(&vcpu_e500->vcpu), | ||
225 | get_cur_pid(&vcpu_e500->vcpu), | ||
226 | get_cur_pr(&vcpu_e500->vcpu), 1); | ||
227 | vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500, | ||
228 | get_cur_as(&vcpu_e500->vcpu), 0, | ||
229 | get_cur_pr(&vcpu_e500->vcpu), 1); | ||
230 | preempt_enable(); | ||
231 | } | ||
232 | |||
233 | static inline unsigned int gtlb0_get_next_victim( | 41 | static inline unsigned int gtlb0_get_next_victim( |
234 | struct kvmppc_vcpu_e500 *vcpu_e500) | 42 | struct kvmppc_vcpu_e500 *vcpu_e500) |
235 | { | 43 | { |
@@ -258,6 +66,7 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) | |||
258 | /* Mask off reserved bits. */ | 66 | /* Mask off reserved bits. */ |
259 | mas3 &= MAS3_ATTRIB_MASK; | 67 | mas3 &= MAS3_ATTRIB_MASK; |
260 | 68 | ||
69 | #ifndef CONFIG_KVM_BOOKE_HV | ||
261 | if (!usermode) { | 70 | if (!usermode) { |
262 | /* Guest is in supervisor mode, | 71 | /* Guest is in supervisor mode, |
263 | * so we need to translate guest | 72 | * so we need to translate guest |
@@ -265,8 +74,9 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) | |||
265 | mas3 &= ~E500_TLB_USER_PERM_MASK; | 74 | mas3 &= ~E500_TLB_USER_PERM_MASK; |
266 | mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; | 75 | mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; |
267 | } | 76 | } |
268 | 77 | mas3 |= E500_TLB_SUPER_PERM_MASK; | |
269 | return mas3 | E500_TLB_SUPER_PERM_MASK; | 78 | #endif |
79 | return mas3; | ||
270 | } | 80 | } |
271 | 81 | ||
272 | static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) | 82 | static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) |
@@ -292,7 +102,16 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, | |||
292 | mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); | 102 | mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); |
293 | mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); | 103 | mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); |
294 | mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); | 104 | mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); |
105 | #ifdef CONFIG_KVM_BOOKE_HV | ||
106 | mtspr(SPRN_MAS8, stlbe->mas8); | ||
107 | #endif | ||
295 | asm volatile("isync; tlbwe" : : : "memory"); | 108 | asm volatile("isync; tlbwe" : : : "memory"); |
109 | |||
110 | #ifdef CONFIG_KVM_BOOKE_HV | ||
111 | /* Must clear mas8 for other host tlbwe's */ | ||
112 | mtspr(SPRN_MAS8, 0); | ||
113 | isync(); | ||
114 | #endif | ||
296 | local_irq_restore(flags); | 115 | local_irq_restore(flags); |
297 | 116 | ||
298 | trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, | 117 | trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, |
@@ -337,6 +156,7 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
337 | } | 156 | } |
338 | } | 157 | } |
339 | 158 | ||
159 | #ifdef CONFIG_KVM_E500V2 | ||
340 | void kvmppc_map_magic(struct kvm_vcpu *vcpu) | 160 | void kvmppc_map_magic(struct kvm_vcpu *vcpu) |
341 | { | 161 | { |
342 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 162 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
@@ -361,75 +181,41 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu) | |||
361 | __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); | 181 | __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); |
362 | preempt_enable(); | 182 | preempt_enable(); |
363 | } | 183 | } |
364 | 184 | #endif | |
365 | void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu) | ||
366 | { | ||
367 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
368 | |||
369 | /* Shadow PID may be expired on local core */ | ||
370 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
371 | } | ||
372 | |||
373 | void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu) | ||
374 | { | ||
375 | } | ||
376 | 185 | ||
377 | static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, | 186 | static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, |
378 | int tlbsel, int esel) | 187 | int tlbsel, int esel) |
379 | { | 188 | { |
380 | struct kvm_book3e_206_tlb_entry *gtlbe = | 189 | struct kvm_book3e_206_tlb_entry *gtlbe = |
381 | get_entry(vcpu_e500, tlbsel, esel); | 190 | get_entry(vcpu_e500, tlbsel, esel); |
382 | struct vcpu_id_table *idt = vcpu_e500->idt; | ||
383 | unsigned int pr, tid, ts, pid; | ||
384 | u32 val, eaddr; | ||
385 | unsigned long flags; | ||
386 | |||
387 | ts = get_tlb_ts(gtlbe); | ||
388 | tid = get_tlb_tid(gtlbe); | ||
389 | |||
390 | preempt_disable(); | ||
391 | |||
392 | /* One guest ID may be mapped to two shadow IDs */ | ||
393 | for (pr = 0; pr < 2; pr++) { | ||
394 | /* | ||
395 | * The shadow PID can have a valid mapping on at most one | ||
396 | * host CPU. In the common case, it will be valid on this | ||
397 | * CPU, in which case (for TLB0) we do a local invalidation | ||
398 | * of the specific address. | ||
399 | * | ||
400 | * If the shadow PID is not valid on the current host CPU, or | ||
401 | * if we're invalidating a TLB1 entry, we invalidate the | ||
402 | * entire shadow PID. | ||
403 | */ | ||
404 | if (tlbsel == 1 || | ||
405 | (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) { | ||
406 | kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr); | ||
407 | continue; | ||
408 | } | ||
409 | 191 | ||
410 | /* | 192 | if (tlbsel == 1 && |
411 | * The guest is invalidating a TLB0 entry which is in a PID | 193 | vcpu_e500->gtlb_priv[1][esel].ref.flags & E500_TLB_BITMAP) { |
412 | * that has a valid shadow mapping on this host CPU. We | 194 | u64 tmp = vcpu_e500->g2h_tlb1_map[esel]; |
413 | * search host TLB0 to invalidate it's shadow TLB entry, | 195 | int hw_tlb_indx; |
414 | * similar to __tlbil_va except that we need to look in AS1. | 196 | unsigned long flags; |
415 | */ | ||
416 | val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS; | ||
417 | eaddr = get_tlb_eaddr(gtlbe); | ||
418 | 197 | ||
419 | local_irq_save(flags); | 198 | local_irq_save(flags); |
420 | 199 | while (tmp) { | |
421 | mtspr(SPRN_MAS6, val); | 200 | hw_tlb_indx = __ilog2_u64(tmp & -tmp); |
422 | asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr)); | 201 | mtspr(SPRN_MAS0, |
423 | val = mfspr(SPRN_MAS1); | 202 | MAS0_TLBSEL(1) | |
424 | if (val & MAS1_VALID) { | 203 | MAS0_ESEL(to_htlb1_esel(hw_tlb_indx))); |
425 | mtspr(SPRN_MAS1, val & ~MAS1_VALID); | 204 | mtspr(SPRN_MAS1, 0); |
426 | asm volatile("tlbwe"); | 205 | asm volatile("tlbwe"); |
206 | vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0; | ||
207 | tmp &= tmp - 1; | ||
427 | } | 208 | } |
428 | 209 | mb(); | |
210 | vcpu_e500->g2h_tlb1_map[esel] = 0; | ||
211 | vcpu_e500->gtlb_priv[1][esel].ref.flags &= ~E500_TLB_BITMAP; | ||
429 | local_irq_restore(flags); | 212 | local_irq_restore(flags); |
213 | |||
214 | return; | ||
430 | } | 215 | } |
431 | 216 | ||
432 | preempt_enable(); | 217 | /* Guest tlbe is backed by at most one host tlbe per shadow pid. */ |
218 | kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); | ||
433 | } | 219 | } |
434 | 220 | ||
435 | static int tlb0_set_base(gva_t addr, int sets, int ways) | 221 | static int tlb0_set_base(gva_t addr, int sets, int ways) |
@@ -475,6 +261,9 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
475 | set_base = gtlb0_set_base(vcpu_e500, eaddr); | 261 | set_base = gtlb0_set_base(vcpu_e500, eaddr); |
476 | size = vcpu_e500->gtlb_params[0].ways; | 262 | size = vcpu_e500->gtlb_params[0].ways; |
477 | } else { | 263 | } else { |
264 | if (eaddr < vcpu_e500->tlb1_min_eaddr || | ||
265 | eaddr > vcpu_e500->tlb1_max_eaddr) | ||
266 | return -1; | ||
478 | set_base = 0; | 267 | set_base = 0; |
479 | } | 268 | } |
480 | 269 | ||
@@ -530,6 +319,16 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) | |||
530 | } | 319 | } |
531 | } | 320 | } |
532 | 321 | ||
322 | static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
323 | { | ||
324 | if (vcpu_e500->g2h_tlb1_map) | ||
325 | memset(vcpu_e500->g2h_tlb1_map, | ||
326 | sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0); | ||
327 | if (vcpu_e500->h2g_tlb1_rmap) | ||
328 | memset(vcpu_e500->h2g_tlb1_rmap, | ||
329 | sizeof(unsigned int) * host_tlb_params[1].entries, 0); | ||
330 | } | ||
331 | |||
533 | static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) | 332 | static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) |
534 | { | 333 | { |
535 | int tlbsel = 0; | 334 | int tlbsel = 0; |
@@ -547,7 +346,7 @@ static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
547 | int stlbsel = 1; | 346 | int stlbsel = 1; |
548 | int i; | 347 | int i; |
549 | 348 | ||
550 | kvmppc_e500_id_table_reset_all(vcpu_e500); | 349 | kvmppc_e500_tlbil_all(vcpu_e500); |
551 | 350 | ||
552 | for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { | 351 | for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { |
553 | struct tlbe_ref *ref = | 352 | struct tlbe_ref *ref = |
@@ -562,19 +361,18 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, | |||
562 | unsigned int eaddr, int as) | 361 | unsigned int eaddr, int as) |
563 | { | 362 | { |
564 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 363 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
565 | unsigned int victim, pidsel, tsized; | 364 | unsigned int victim, tsized; |
566 | int tlbsel; | 365 | int tlbsel; |
567 | 366 | ||
568 | /* since we only have two TLBs, only lower bit is used. */ | 367 | /* since we only have two TLBs, only lower bit is used. */ |
569 | tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; | 368 | tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; |
570 | victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; | 369 | victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; |
571 | pidsel = (vcpu->arch.shared->mas4 >> 16) & 0xf; | ||
572 | tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f; | 370 | tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f; |
573 | 371 | ||
574 | vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) | 372 | vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) |
575 | | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); | 373 | | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); |
576 | vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) | 374 | vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) |
577 | | MAS1_TID(vcpu_e500->pid[pidsel]) | 375 | | MAS1_TID(get_tlbmiss_tid(vcpu)) |
578 | | MAS1_TSIZE(tsized); | 376 | | MAS1_TSIZE(tsized); |
579 | vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) | 377 | vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) |
580 | | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK); | 378 | | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK); |
@@ -586,23 +384,26 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, | |||
586 | 384 | ||
587 | /* TID must be supplied by the caller */ | 385 | /* TID must be supplied by the caller */ |
588 | static inline void kvmppc_e500_setup_stlbe( | 386 | static inline void kvmppc_e500_setup_stlbe( |
589 | struct kvmppc_vcpu_e500 *vcpu_e500, | 387 | struct kvm_vcpu *vcpu, |
590 | struct kvm_book3e_206_tlb_entry *gtlbe, | 388 | struct kvm_book3e_206_tlb_entry *gtlbe, |
591 | int tsize, struct tlbe_ref *ref, u64 gvaddr, | 389 | int tsize, struct tlbe_ref *ref, u64 gvaddr, |
592 | struct kvm_book3e_206_tlb_entry *stlbe) | 390 | struct kvm_book3e_206_tlb_entry *stlbe) |
593 | { | 391 | { |
594 | pfn_t pfn = ref->pfn; | 392 | pfn_t pfn = ref->pfn; |
393 | u32 pr = vcpu->arch.shared->msr & MSR_PR; | ||
595 | 394 | ||
596 | BUG_ON(!(ref->flags & E500_TLB_VALID)); | 395 | BUG_ON(!(ref->flags & E500_TLB_VALID)); |
597 | 396 | ||
598 | /* Force TS=1 IPROT=0 for all guest mappings. */ | 397 | /* Force IPROT=0 for all guest mappings. */ |
599 | stlbe->mas1 = MAS1_TSIZE(tsize) | MAS1_TS | MAS1_VALID; | 398 | stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; |
600 | stlbe->mas2 = (gvaddr & MAS2_EPN) | 399 | stlbe->mas2 = (gvaddr & MAS2_EPN) | |
601 | | e500_shadow_mas2_attrib(gtlbe->mas2, | 400 | e500_shadow_mas2_attrib(gtlbe->mas2, pr); |
602 | vcpu_e500->vcpu.arch.shared->msr & MSR_PR); | 401 | stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | |
603 | stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | 402 | e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); |
604 | | e500_shadow_mas3_attrib(gtlbe->mas7_3, | 403 | |
605 | vcpu_e500->vcpu.arch.shared->msr & MSR_PR); | 404 | #ifdef CONFIG_KVM_BOOKE_HV |
405 | stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid; | ||
406 | #endif | ||
606 | } | 407 | } |
607 | 408 | ||
608 | static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | 409 | static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, |
@@ -736,7 +537,8 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
736 | kvmppc_e500_ref_release(ref); | 537 | kvmppc_e500_ref_release(ref); |
737 | kvmppc_e500_ref_setup(ref, gtlbe, pfn); | 538 | kvmppc_e500_ref_setup(ref, gtlbe, pfn); |
738 | 539 | ||
739 | kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, ref, gvaddr, stlbe); | 540 | kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, |
541 | ref, gvaddr, stlbe); | ||
740 | } | 542 | } |
741 | 543 | ||
742 | /* XXX only map the one-one case, for now use TLB0 */ | 544 | /* XXX only map the one-one case, for now use TLB0 */ |
@@ -760,7 +562,7 @@ static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
760 | /* XXX for both one-one and one-to-many , for now use TLB1 */ | 562 | /* XXX for both one-one and one-to-many , for now use TLB1 */ |
761 | static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, | 563 | static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, |
762 | u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, | 564 | u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, |
763 | struct kvm_book3e_206_tlb_entry *stlbe) | 565 | struct kvm_book3e_206_tlb_entry *stlbe, int esel) |
764 | { | 566 | { |
765 | struct tlbe_ref *ref; | 567 | struct tlbe_ref *ref; |
766 | unsigned int victim; | 568 | unsigned int victim; |
@@ -773,15 +575,74 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
773 | ref = &vcpu_e500->tlb_refs[1][victim]; | 575 | ref = &vcpu_e500->tlb_refs[1][victim]; |
774 | kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref); | 576 | kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref); |
775 | 577 | ||
578 | vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << victim; | ||
579 | vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP; | ||
580 | if (vcpu_e500->h2g_tlb1_rmap[victim]) { | ||
581 | unsigned int idx = vcpu_e500->h2g_tlb1_rmap[victim]; | ||
582 | vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << victim); | ||
583 | } | ||
584 | vcpu_e500->h2g_tlb1_rmap[victim] = esel; | ||
585 | |||
776 | return victim; | 586 | return victim; |
777 | } | 587 | } |
778 | 588 | ||
779 | void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) | 589 | static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500) |
590 | { | ||
591 | int size = vcpu_e500->gtlb_params[1].entries; | ||
592 | unsigned int offset; | ||
593 | gva_t eaddr; | ||
594 | int i; | ||
595 | |||
596 | vcpu_e500->tlb1_min_eaddr = ~0UL; | ||
597 | vcpu_e500->tlb1_max_eaddr = 0; | ||
598 | offset = vcpu_e500->gtlb_offset[1]; | ||
599 | |||
600 | for (i = 0; i < size; i++) { | ||
601 | struct kvm_book3e_206_tlb_entry *tlbe = | ||
602 | &vcpu_e500->gtlb_arch[offset + i]; | ||
603 | |||
604 | if (!get_tlb_v(tlbe)) | ||
605 | continue; | ||
606 | |||
607 | eaddr = get_tlb_eaddr(tlbe); | ||
608 | vcpu_e500->tlb1_min_eaddr = | ||
609 | min(vcpu_e500->tlb1_min_eaddr, eaddr); | ||
610 | |||
611 | eaddr = get_tlb_end(tlbe); | ||
612 | vcpu_e500->tlb1_max_eaddr = | ||
613 | max(vcpu_e500->tlb1_max_eaddr, eaddr); | ||
614 | } | ||
615 | } | ||
616 | |||
617 | static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
618 | struct kvm_book3e_206_tlb_entry *gtlbe) | ||
780 | { | 619 | { |
620 | unsigned long start, end, size; | ||
621 | |||
622 | size = get_tlb_bytes(gtlbe); | ||
623 | start = get_tlb_eaddr(gtlbe) & ~(size - 1); | ||
624 | end = start + size - 1; | ||
625 | |||
626 | return vcpu_e500->tlb1_min_eaddr == start || | ||
627 | vcpu_e500->tlb1_max_eaddr == end; | ||
628 | } | ||
629 | |||
630 | /* This function is supposed to be called for a adding a new valid tlb entry */ | ||
631 | static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu, | ||
632 | struct kvm_book3e_206_tlb_entry *gtlbe) | ||
633 | { | ||
634 | unsigned long start, end, size; | ||
781 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 635 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
782 | 636 | ||
783 | /* Recalc shadow pid since MSR changes */ | 637 | if (!get_tlb_v(gtlbe)) |
784 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | 638 | return; |
639 | |||
640 | size = get_tlb_bytes(gtlbe); | ||
641 | start = get_tlb_eaddr(gtlbe) & ~(size - 1); | ||
642 | end = start + size - 1; | ||
643 | |||
644 | vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start); | ||
645 | vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end); | ||
785 | } | 646 | } |
786 | 647 | ||
787 | static inline int kvmppc_e500_gtlbe_invalidate( | 648 | static inline int kvmppc_e500_gtlbe_invalidate( |
@@ -794,6 +655,9 @@ static inline int kvmppc_e500_gtlbe_invalidate( | |||
794 | if (unlikely(get_tlb_iprot(gtlbe))) | 655 | if (unlikely(get_tlb_iprot(gtlbe))) |
795 | return -1; | 656 | return -1; |
796 | 657 | ||
658 | if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) | ||
659 | kvmppc_recalc_tlb1map_range(vcpu_e500); | ||
660 | |||
797 | gtlbe->mas1 = 0; | 661 | gtlbe->mas1 = 0; |
798 | 662 | ||
799 | return 0; | 663 | return 0; |
@@ -811,7 +675,7 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value) | |||
811 | kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); | 675 | kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); |
812 | 676 | ||
813 | /* Invalidate all vcpu id mappings */ | 677 | /* Invalidate all vcpu id mappings */ |
814 | kvmppc_e500_id_table_reset_all(vcpu_e500); | 678 | kvmppc_e500_tlbil_all(vcpu_e500); |
815 | 679 | ||
816 | return EMULATE_DONE; | 680 | return EMULATE_DONE; |
817 | } | 681 | } |
@@ -844,7 +708,59 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb) | |||
844 | } | 708 | } |
845 | 709 | ||
846 | /* Invalidate all vcpu id mappings */ | 710 | /* Invalidate all vcpu id mappings */ |
847 | kvmppc_e500_id_table_reset_all(vcpu_e500); | 711 | kvmppc_e500_tlbil_all(vcpu_e500); |
712 | |||
713 | return EMULATE_DONE; | ||
714 | } | ||
715 | |||
716 | static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, | ||
717 | int pid, int rt) | ||
718 | { | ||
719 | struct kvm_book3e_206_tlb_entry *tlbe; | ||
720 | int tid, esel; | ||
721 | |||
722 | /* invalidate all entries */ | ||
723 | for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) { | ||
724 | tlbe = get_entry(vcpu_e500, tlbsel, esel); | ||
725 | tid = get_tlb_tid(tlbe); | ||
726 | if (rt == 0 || tid == pid) { | ||
727 | inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); | ||
728 | kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); | ||
729 | } | ||
730 | } | ||
731 | } | ||
732 | |||
733 | static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid, | ||
734 | int ra, int rb) | ||
735 | { | ||
736 | int tlbsel, esel; | ||
737 | gva_t ea; | ||
738 | |||
739 | ea = kvmppc_get_gpr(&vcpu_e500->vcpu, rb); | ||
740 | if (ra) | ||
741 | ea += kvmppc_get_gpr(&vcpu_e500->vcpu, ra); | ||
742 | |||
743 | for (tlbsel = 0; tlbsel < 2; tlbsel++) { | ||
744 | esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1); | ||
745 | if (esel >= 0) { | ||
746 | inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); | ||
747 | kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); | ||
748 | break; | ||
749 | } | ||
750 | } | ||
751 | } | ||
752 | |||
753 | int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb) | ||
754 | { | ||
755 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
756 | int pid = get_cur_spid(vcpu); | ||
757 | |||
758 | if (rt == 0 || rt == 1) { | ||
759 | tlbilx_all(vcpu_e500, 0, pid, rt); | ||
760 | tlbilx_all(vcpu_e500, 1, pid, rt); | ||
761 | } else if (rt == 3) { | ||
762 | tlbilx_one(vcpu_e500, pid, ra, rb); | ||
763 | } | ||
848 | 764 | ||
849 | return EMULATE_DONE; | 765 | return EMULATE_DONE; |
850 | } | 766 | } |
@@ -929,9 +845,7 @@ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
929 | int stid; | 845 | int stid; |
930 | 846 | ||
931 | preempt_disable(); | 847 | preempt_disable(); |
932 | stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe), | 848 | stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); |
933 | get_tlb_tid(gtlbe), | ||
934 | get_cur_pr(&vcpu_e500->vcpu), 0); | ||
935 | 849 | ||
936 | stlbe->mas1 |= MAS1_TID(stid); | 850 | stlbe->mas1 |= MAS1_TID(stid); |
937 | write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); | 851 | write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); |
@@ -941,16 +855,21 @@ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
941 | int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | 855 | int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) |
942 | { | 856 | { |
943 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 857 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
944 | struct kvm_book3e_206_tlb_entry *gtlbe; | 858 | struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; |
945 | int tlbsel, esel; | 859 | int tlbsel, esel, stlbsel, sesel; |
860 | int recal = 0; | ||
946 | 861 | ||
947 | tlbsel = get_tlb_tlbsel(vcpu); | 862 | tlbsel = get_tlb_tlbsel(vcpu); |
948 | esel = get_tlb_esel(vcpu, tlbsel); | 863 | esel = get_tlb_esel(vcpu, tlbsel); |
949 | 864 | ||
950 | gtlbe = get_entry(vcpu_e500, tlbsel, esel); | 865 | gtlbe = get_entry(vcpu_e500, tlbsel, esel); |
951 | 866 | ||
952 | if (get_tlb_v(gtlbe)) | 867 | if (get_tlb_v(gtlbe)) { |
953 | inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); | 868 | inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); |
869 | if ((tlbsel == 1) && | ||
870 | kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe)) | ||
871 | recal = 1; | ||
872 | } | ||
954 | 873 | ||
955 | gtlbe->mas1 = vcpu->arch.shared->mas1; | 874 | gtlbe->mas1 = vcpu->arch.shared->mas1; |
956 | gtlbe->mas2 = vcpu->arch.shared->mas2; | 875 | gtlbe->mas2 = vcpu->arch.shared->mas2; |
@@ -959,10 +878,20 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
959 | trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, | 878 | trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, |
960 | gtlbe->mas2, gtlbe->mas7_3); | 879 | gtlbe->mas2, gtlbe->mas7_3); |
961 | 880 | ||
881 | if (tlbsel == 1) { | ||
882 | /* | ||
883 | * If a valid tlb1 entry is overwritten then recalculate the | ||
884 | * min/max TLB1 map address range otherwise no need to look | ||
885 | * in tlb1 array. | ||
886 | */ | ||
887 | if (recal) | ||
888 | kvmppc_recalc_tlb1map_range(vcpu_e500); | ||
889 | else | ||
890 | kvmppc_set_tlb1map_range(vcpu, gtlbe); | ||
891 | } | ||
892 | |||
962 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ | 893 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ |
963 | if (tlbe_is_host_safe(vcpu, gtlbe)) { | 894 | if (tlbe_is_host_safe(vcpu, gtlbe)) { |
964 | struct kvm_book3e_206_tlb_entry stlbe; | ||
965 | int stlbsel, sesel; | ||
966 | u64 eaddr; | 895 | u64 eaddr; |
967 | u64 raddr; | 896 | u64 raddr; |
968 | 897 | ||
@@ -989,7 +918,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
989 | * are mapped on the fly. */ | 918 | * are mapped on the fly. */ |
990 | stlbsel = 1; | 919 | stlbsel = 1; |
991 | sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, | 920 | sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, |
992 | raddr >> PAGE_SHIFT, gtlbe, &stlbe); | 921 | raddr >> PAGE_SHIFT, gtlbe, &stlbe, esel); |
993 | break; | 922 | break; |
994 | 923 | ||
995 | default: | 924 | default: |
@@ -1003,6 +932,48 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
1003 | return EMULATE_DONE; | 932 | return EMULATE_DONE; |
1004 | } | 933 | } |
1005 | 934 | ||
935 | static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, | ||
936 | gva_t eaddr, unsigned int pid, int as) | ||
937 | { | ||
938 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
939 | int esel, tlbsel; | ||
940 | |||
941 | for (tlbsel = 0; tlbsel < 2; tlbsel++) { | ||
942 | esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as); | ||
943 | if (esel >= 0) | ||
944 | return index_of(tlbsel, esel); | ||
945 | } | ||
946 | |||
947 | return -1; | ||
948 | } | ||
949 | |||
950 | /* 'linear_address' is actually an encoding of AS|PID|EADDR . */ | ||
951 | int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, | ||
952 | struct kvm_translation *tr) | ||
953 | { | ||
954 | int index; | ||
955 | gva_t eaddr; | ||
956 | u8 pid; | ||
957 | u8 as; | ||
958 | |||
959 | eaddr = tr->linear_address; | ||
960 | pid = (tr->linear_address >> 32) & 0xff; | ||
961 | as = (tr->linear_address >> 40) & 0x1; | ||
962 | |||
963 | index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as); | ||
964 | if (index < 0) { | ||
965 | tr->valid = 0; | ||
966 | return 0; | ||
967 | } | ||
968 | |||
969 | tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr); | ||
970 | /* XXX what does "writeable" and "usermode" even mean? */ | ||
971 | tr->valid = 1; | ||
972 | |||
973 | return 0; | ||
974 | } | ||
975 | |||
976 | |||
1006 | int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) | 977 | int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
1007 | { | 978 | { |
1008 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); | 979 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); |
@@ -1066,7 +1037,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | |||
1066 | sesel = 0; /* unused */ | 1037 | sesel = 0; /* unused */ |
1067 | priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; | 1038 | priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; |
1068 | 1039 | ||
1069 | kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K, | 1040 | kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, |
1070 | &priv->ref, eaddr, &stlbe); | 1041 | &priv->ref, eaddr, &stlbe); |
1071 | break; | 1042 | break; |
1072 | 1043 | ||
@@ -1075,7 +1046,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | |||
1075 | 1046 | ||
1076 | stlbsel = 1; | 1047 | stlbsel = 1; |
1077 | sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, | 1048 | sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, |
1078 | gtlbe, &stlbe); | 1049 | gtlbe, &stlbe, esel); |
1079 | break; | 1050 | break; |
1080 | } | 1051 | } |
1081 | 1052 | ||
@@ -1087,52 +1058,13 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | |||
1087 | write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); | 1058 | write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); |
1088 | } | 1059 | } |
1089 | 1060 | ||
1090 | int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, | ||
1091 | gva_t eaddr, unsigned int pid, int as) | ||
1092 | { | ||
1093 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
1094 | int esel, tlbsel; | ||
1095 | |||
1096 | for (tlbsel = 0; tlbsel < 2; tlbsel++) { | ||
1097 | esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as); | ||
1098 | if (esel >= 0) | ||
1099 | return index_of(tlbsel, esel); | ||
1100 | } | ||
1101 | |||
1102 | return -1; | ||
1103 | } | ||
1104 | |||
1105 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) | ||
1106 | { | ||
1107 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
1108 | |||
1109 | if (vcpu->arch.pid != pid) { | ||
1110 | vcpu_e500->pid[0] = vcpu->arch.pid = pid; | ||
1111 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
1112 | } | ||
1113 | } | ||
1114 | |||
1115 | void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
1116 | { | ||
1117 | struct kvm_book3e_206_tlb_entry *tlbe; | ||
1118 | |||
1119 | /* Insert large initial mapping for guest. */ | ||
1120 | tlbe = get_entry(vcpu_e500, 1, 0); | ||
1121 | tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M); | ||
1122 | tlbe->mas2 = 0; | ||
1123 | tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK; | ||
1124 | |||
1125 | /* 4K map for serial output. Used by kernel wrapper. */ | ||
1126 | tlbe = get_entry(vcpu_e500, 1, 1); | ||
1127 | tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K); | ||
1128 | tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G; | ||
1129 | tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK; | ||
1130 | } | ||
1131 | |||
1132 | static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) | 1061 | static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) |
1133 | { | 1062 | { |
1134 | int i; | 1063 | int i; |
1135 | 1064 | ||
1065 | clear_tlb1_bitmap(vcpu_e500); | ||
1066 | kfree(vcpu_e500->g2h_tlb1_map); | ||
1067 | |||
1136 | clear_tlb_refs(vcpu_e500); | 1068 | clear_tlb_refs(vcpu_e500); |
1137 | kfree(vcpu_e500->gtlb_priv[0]); | 1069 | kfree(vcpu_e500->gtlb_priv[0]); |
1138 | kfree(vcpu_e500->gtlb_priv[1]); | 1070 | kfree(vcpu_e500->gtlb_priv[1]); |
@@ -1155,6 +1087,36 @@ static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
1155 | vcpu_e500->gtlb_arch = NULL; | 1087 | vcpu_e500->gtlb_arch = NULL; |
1156 | } | 1088 | } |
1157 | 1089 | ||
1090 | void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | ||
1091 | { | ||
1092 | sregs->u.e.mas0 = vcpu->arch.shared->mas0; | ||
1093 | sregs->u.e.mas1 = vcpu->arch.shared->mas1; | ||
1094 | sregs->u.e.mas2 = vcpu->arch.shared->mas2; | ||
1095 | sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3; | ||
1096 | sregs->u.e.mas4 = vcpu->arch.shared->mas4; | ||
1097 | sregs->u.e.mas6 = vcpu->arch.shared->mas6; | ||
1098 | |||
1099 | sregs->u.e.mmucfg = vcpu->arch.mmucfg; | ||
1100 | sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0]; | ||
1101 | sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1]; | ||
1102 | sregs->u.e.tlbcfg[2] = 0; | ||
1103 | sregs->u.e.tlbcfg[3] = 0; | ||
1104 | } | ||
1105 | |||
1106 | int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | ||
1107 | { | ||
1108 | if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) { | ||
1109 | vcpu->arch.shared->mas0 = sregs->u.e.mas0; | ||
1110 | vcpu->arch.shared->mas1 = sregs->u.e.mas1; | ||
1111 | vcpu->arch.shared->mas2 = sregs->u.e.mas2; | ||
1112 | vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3; | ||
1113 | vcpu->arch.shared->mas4 = sregs->u.e.mas4; | ||
1114 | vcpu->arch.shared->mas6 = sregs->u.e.mas6; | ||
1115 | } | ||
1116 | |||
1117 | return 0; | ||
1118 | } | ||
1119 | |||
1158 | int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, | 1120 | int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, |
1159 | struct kvm_config_tlb *cfg) | 1121 | struct kvm_config_tlb *cfg) |
1160 | { | 1122 | { |
@@ -1163,6 +1125,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, | |||
1163 | char *virt; | 1125 | char *virt; |
1164 | struct page **pages; | 1126 | struct page **pages; |
1165 | struct tlbe_priv *privs[2] = {}; | 1127 | struct tlbe_priv *privs[2] = {}; |
1128 | u64 *g2h_bitmap = NULL; | ||
1166 | size_t array_len; | 1129 | size_t array_len; |
1167 | u32 sets; | 1130 | u32 sets; |
1168 | int num_pages, ret, i; | 1131 | int num_pages, ret, i; |
@@ -1224,10 +1187,16 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, | |||
1224 | if (!privs[0] || !privs[1]) | 1187 | if (!privs[0] || !privs[1]) |
1225 | goto err_put_page; | 1188 | goto err_put_page; |
1226 | 1189 | ||
1190 | g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1], | ||
1191 | GFP_KERNEL); | ||
1192 | if (!g2h_bitmap) | ||
1193 | goto err_put_page; | ||
1194 | |||
1227 | free_gtlb(vcpu_e500); | 1195 | free_gtlb(vcpu_e500); |
1228 | 1196 | ||
1229 | vcpu_e500->gtlb_priv[0] = privs[0]; | 1197 | vcpu_e500->gtlb_priv[0] = privs[0]; |
1230 | vcpu_e500->gtlb_priv[1] = privs[1]; | 1198 | vcpu_e500->gtlb_priv[1] = privs[1]; |
1199 | vcpu_e500->g2h_tlb1_map = g2h_bitmap; | ||
1231 | 1200 | ||
1232 | vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *) | 1201 | vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *) |
1233 | (virt + (cfg->array & (PAGE_SIZE - 1))); | 1202 | (virt + (cfg->array & (PAGE_SIZE - 1))); |
@@ -1238,14 +1207,16 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, | |||
1238 | vcpu_e500->gtlb_offset[0] = 0; | 1207 | vcpu_e500->gtlb_offset[0] = 0; |
1239 | vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0]; | 1208 | vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0]; |
1240 | 1209 | ||
1241 | vcpu_e500->tlb0cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); | 1210 | vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE; |
1211 | |||
1212 | vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); | ||
1242 | if (params.tlb_sizes[0] <= 2048) | 1213 | if (params.tlb_sizes[0] <= 2048) |
1243 | vcpu_e500->tlb0cfg |= params.tlb_sizes[0]; | 1214 | vcpu->arch.tlbcfg[0] |= params.tlb_sizes[0]; |
1244 | vcpu_e500->tlb0cfg |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT; | 1215 | vcpu->arch.tlbcfg[0] |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT; |
1245 | 1216 | ||
1246 | vcpu_e500->tlb1cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); | 1217 | vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); |
1247 | vcpu_e500->tlb1cfg |= params.tlb_sizes[1]; | 1218 | vcpu->arch.tlbcfg[1] |= params.tlb_sizes[1]; |
1248 | vcpu_e500->tlb1cfg |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT; | 1219 | vcpu->arch.tlbcfg[1] |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT; |
1249 | 1220 | ||
1250 | vcpu_e500->shared_tlb_pages = pages; | 1221 | vcpu_e500->shared_tlb_pages = pages; |
1251 | vcpu_e500->num_shared_tlb_pages = num_pages; | 1222 | vcpu_e500->num_shared_tlb_pages = num_pages; |
@@ -1256,6 +1227,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, | |||
1256 | vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1]; | 1227 | vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1]; |
1257 | vcpu_e500->gtlb_params[1].sets = 1; | 1228 | vcpu_e500->gtlb_params[1].sets = 1; |
1258 | 1229 | ||
1230 | kvmppc_recalc_tlb1map_range(vcpu_e500); | ||
1259 | return 0; | 1231 | return 0; |
1260 | 1232 | ||
1261 | err_put_page: | 1233 | err_put_page: |
@@ -1274,13 +1246,14 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, | |||
1274 | struct kvm_dirty_tlb *dirty) | 1246 | struct kvm_dirty_tlb *dirty) |
1275 | { | 1247 | { |
1276 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 1248 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
1277 | 1249 | kvmppc_recalc_tlb1map_range(vcpu_e500); | |
1278 | clear_tlb_refs(vcpu_e500); | 1250 | clear_tlb_refs(vcpu_e500); |
1279 | return 0; | 1251 | return 0; |
1280 | } | 1252 | } |
1281 | 1253 | ||
1282 | int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) | 1254 | int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) |
1283 | { | 1255 | { |
1256 | struct kvm_vcpu *vcpu = &vcpu_e500->vcpu; | ||
1284 | int entry_size = sizeof(struct kvm_book3e_206_tlb_entry); | 1257 | int entry_size = sizeof(struct kvm_book3e_206_tlb_entry); |
1285 | int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE; | 1258 | int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE; |
1286 | 1259 | ||
@@ -1357,22 +1330,32 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
1357 | if (!vcpu_e500->gtlb_priv[1]) | 1330 | if (!vcpu_e500->gtlb_priv[1]) |
1358 | goto err; | 1331 | goto err; |
1359 | 1332 | ||
1360 | if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL) | 1333 | vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(unsigned int) * |
1334 | vcpu_e500->gtlb_params[1].entries, | ||
1335 | GFP_KERNEL); | ||
1336 | if (!vcpu_e500->g2h_tlb1_map) | ||
1337 | goto err; | ||
1338 | |||
1339 | vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) * | ||
1340 | host_tlb_params[1].entries, | ||
1341 | GFP_KERNEL); | ||
1342 | if (!vcpu_e500->h2g_tlb1_rmap) | ||
1361 | goto err; | 1343 | goto err; |
1362 | 1344 | ||
1363 | /* Init TLB configuration register */ | 1345 | /* Init TLB configuration register */ |
1364 | vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & | 1346 | vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) & |
1365 | ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); | 1347 | ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); |
1366 | vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[0].entries; | 1348 | vcpu->arch.tlbcfg[0] |= vcpu_e500->gtlb_params[0].entries; |
1367 | vcpu_e500->tlb0cfg |= | 1349 | vcpu->arch.tlbcfg[0] |= |
1368 | vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT; | 1350 | vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT; |
1369 | 1351 | ||
1370 | vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & | 1352 | vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) & |
1371 | ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); | 1353 | ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); |
1372 | vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[1].entries; | 1354 | vcpu->arch.tlbcfg[1] |= vcpu_e500->gtlb_params[1].entries; |
1373 | vcpu_e500->tlb0cfg |= | 1355 | vcpu->arch.tlbcfg[1] |= |
1374 | vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT; | 1356 | vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT; |
1375 | 1357 | ||
1358 | kvmppc_recalc_tlb1map_range(vcpu_e500); | ||
1376 | return 0; | 1359 | return 0; |
1377 | 1360 | ||
1378 | err: | 1361 | err: |
@@ -1385,8 +1368,7 @@ err: | |||
1385 | void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) | 1368 | void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) |
1386 | { | 1369 | { |
1387 | free_gtlb(vcpu_e500); | 1370 | free_gtlb(vcpu_e500); |
1388 | kvmppc_e500_id_table_free(vcpu_e500); | 1371 | kfree(vcpu_e500->h2g_tlb1_rmap); |
1389 | |||
1390 | kfree(vcpu_e500->tlb_refs[0]); | 1372 | kfree(vcpu_e500->tlb_refs[0]); |
1391 | kfree(vcpu_e500->tlb_refs[1]); | 1373 | kfree(vcpu_e500->tlb_refs[1]); |
1392 | } | 1374 | } |
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h deleted file mode 100644 index 5c6d2d7bf058..000000000000 --- a/arch/powerpc/kvm/e500_tlb.h +++ /dev/null | |||
@@ -1,174 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Author: Yu Liu, yu.liu@freescale.com | ||
5 | * | ||
6 | * Description: | ||
7 | * This file is based on arch/powerpc/kvm/44x_tlb.h, | ||
8 | * by Hollis Blanchard <hollisb@us.ibm.com>. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License, version 2, as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #ifndef __KVM_E500_TLB_H__ | ||
16 | #define __KVM_E500_TLB_H__ | ||
17 | |||
18 | #include <linux/kvm_host.h> | ||
19 | #include <asm/mmu-book3e.h> | ||
20 | #include <asm/tlb.h> | ||
21 | #include <asm/kvm_e500.h> | ||
22 | |||
23 | /* This geometry is the legacy default -- can be overridden by userspace */ | ||
24 | #define KVM_E500_TLB0_WAY_SIZE 128 | ||
25 | #define KVM_E500_TLB0_WAY_NUM 2 | ||
26 | |||
27 | #define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM) | ||
28 | #define KVM_E500_TLB1_SIZE 16 | ||
29 | |||
30 | #define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF)) | ||
31 | #define tlbsel_of(index) ((index) >> 16) | ||
32 | #define esel_of(index) ((index) & 0xFFFF) | ||
33 | |||
34 | #define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW) | ||
35 | #define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW) | ||
36 | #define MAS2_ATTRIB_MASK \ | ||
37 | (MAS2_X0 | MAS2_X1) | ||
38 | #define MAS3_ATTRIB_MASK \ | ||
39 | (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \ | ||
40 | | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK) | ||
41 | |||
42 | extern void kvmppc_dump_tlbs(struct kvm_vcpu *); | ||
43 | extern int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *, ulong); | ||
44 | extern int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *); | ||
45 | extern int kvmppc_e500_emul_tlbre(struct kvm_vcpu *); | ||
46 | extern int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *, int, int); | ||
47 | extern int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *, int); | ||
48 | extern int kvmppc_e500_tlb_search(struct kvm_vcpu *, gva_t, unsigned int, int); | ||
49 | extern void kvmppc_e500_tlb_put(struct kvm_vcpu *); | ||
50 | extern void kvmppc_e500_tlb_load(struct kvm_vcpu *, int); | ||
51 | extern int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *); | ||
52 | extern void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *); | ||
53 | extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *); | ||
54 | extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *); | ||
55 | |||
56 | /* TLB helper functions */ | ||
57 | static inline unsigned int | ||
58 | get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
59 | { | ||
60 | return (tlbe->mas1 >> 7) & 0x1f; | ||
61 | } | ||
62 | |||
63 | static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
64 | { | ||
65 | return tlbe->mas2 & 0xfffff000; | ||
66 | } | ||
67 | |||
68 | static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
69 | { | ||
70 | unsigned int pgsize = get_tlb_size(tlbe); | ||
71 | return 1ULL << 10 << pgsize; | ||
72 | } | ||
73 | |||
74 | static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
75 | { | ||
76 | u64 bytes = get_tlb_bytes(tlbe); | ||
77 | return get_tlb_eaddr(tlbe) + bytes - 1; | ||
78 | } | ||
79 | |||
80 | static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
81 | { | ||
82 | return tlbe->mas7_3 & ~0xfffULL; | ||
83 | } | ||
84 | |||
85 | static inline unsigned int | ||
86 | get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
87 | { | ||
88 | return (tlbe->mas1 >> 16) & 0xff; | ||
89 | } | ||
90 | |||
91 | static inline unsigned int | ||
92 | get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
93 | { | ||
94 | return (tlbe->mas1 >> 12) & 0x1; | ||
95 | } | ||
96 | |||
97 | static inline unsigned int | ||
98 | get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
99 | { | ||
100 | return (tlbe->mas1 >> 31) & 0x1; | ||
101 | } | ||
102 | |||
103 | static inline unsigned int | ||
104 | get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
105 | { | ||
106 | return (tlbe->mas1 >> 30) & 0x1; | ||
107 | } | ||
108 | |||
109 | static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu) | ||
110 | { | ||
111 | return vcpu->arch.pid & 0xff; | ||
112 | } | ||
113 | |||
114 | static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu) | ||
115 | { | ||
116 | return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS)); | ||
117 | } | ||
118 | |||
119 | static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu) | ||
120 | { | ||
121 | return !!(vcpu->arch.shared->msr & MSR_PR); | ||
122 | } | ||
123 | |||
124 | static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu) | ||
125 | { | ||
126 | return (vcpu->arch.shared->mas6 >> 16) & 0xff; | ||
127 | } | ||
128 | |||
129 | static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu) | ||
130 | { | ||
131 | return vcpu->arch.shared->mas6 & 0x1; | ||
132 | } | ||
133 | |||
134 | static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu) | ||
135 | { | ||
136 | /* | ||
137 | * Manual says that tlbsel has 2 bits wide. | ||
138 | * Since we only have two TLBs, only lower bit is used. | ||
139 | */ | ||
140 | return (vcpu->arch.shared->mas0 >> 28) & 0x1; | ||
141 | } | ||
142 | |||
143 | static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu) | ||
144 | { | ||
145 | return vcpu->arch.shared->mas0 & 0xfff; | ||
146 | } | ||
147 | |||
148 | static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu) | ||
149 | { | ||
150 | return (vcpu->arch.shared->mas0 >> 16) & 0xfff; | ||
151 | } | ||
152 | |||
153 | static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, | ||
154 | const struct kvm_book3e_206_tlb_entry *tlbe) | ||
155 | { | ||
156 | gpa_t gpa; | ||
157 | |||
158 | if (!get_tlb_v(tlbe)) | ||
159 | return 0; | ||
160 | |||
161 | /* Does it match current guest AS? */ | ||
162 | /* XXX what about IS != DS? */ | ||
163 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS)) | ||
164 | return 0; | ||
165 | |||
166 | gpa = get_tlb_raddr(tlbe); | ||
167 | if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT)) | ||
168 | /* Mapping is not for RAM. */ | ||
169 | return 0; | ||
170 | |||
171 | return 1; | ||
172 | } | ||
173 | |||
174 | #endif /* __KVM_E500_TLB_H__ */ | ||
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c new file mode 100644 index 000000000000..fe6c1de6b701 --- /dev/null +++ b/arch/powerpc/kvm/e500mc.c | |||
@@ -0,0 +1,342 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Freescale Semiconductor, Inc. All rights reserved. | ||
3 | * | ||
4 | * Author: Varun Sethi, <varun.sethi@freescale.com> | ||
5 | * | ||
6 | * Description: | ||
7 | * This file is derived from arch/powerpc/kvm/e500.c, | ||
8 | * by Yu Liu <yu.liu@freescale.com>. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License, version 2, as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kvm_host.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/err.h> | ||
18 | #include <linux/export.h> | ||
19 | |||
20 | #include <asm/reg.h> | ||
21 | #include <asm/cputable.h> | ||
22 | #include <asm/tlbflush.h> | ||
23 | #include <asm/kvm_ppc.h> | ||
24 | #include <asm/dbell.h> | ||
25 | |||
26 | #include "booke.h" | ||
27 | #include "e500.h" | ||
28 | |||
29 | void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type) | ||
30 | { | ||
31 | enum ppc_dbell dbell_type; | ||
32 | unsigned long tag; | ||
33 | |||
34 | switch (type) { | ||
35 | case INT_CLASS_NONCRIT: | ||
36 | dbell_type = PPC_G_DBELL; | ||
37 | break; | ||
38 | case INT_CLASS_CRIT: | ||
39 | dbell_type = PPC_G_DBELL_CRIT; | ||
40 | break; | ||
41 | case INT_CLASS_MC: | ||
42 | dbell_type = PPC_G_DBELL_MC; | ||
43 | break; | ||
44 | default: | ||
45 | WARN_ONCE(1, "%s: unknown int type %d\n", __func__, type); | ||
46 | return; | ||
47 | } | ||
48 | |||
49 | |||
50 | tag = PPC_DBELL_LPID(vcpu->kvm->arch.lpid) | vcpu->vcpu_id; | ||
51 | mb(); | ||
52 | ppc_msgsnd(dbell_type, 0, tag); | ||
53 | } | ||
54 | |||
55 | /* gtlbe must not be mapped by more than one host tlb entry */ | ||
56 | void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
57 | struct kvm_book3e_206_tlb_entry *gtlbe) | ||
58 | { | ||
59 | unsigned int tid, ts; | ||
60 | u32 val, eaddr, lpid; | ||
61 | unsigned long flags; | ||
62 | |||
63 | ts = get_tlb_ts(gtlbe); | ||
64 | tid = get_tlb_tid(gtlbe); | ||
65 | lpid = vcpu_e500->vcpu.kvm->arch.lpid; | ||
66 | |||
67 | /* We search the host TLB to invalidate its shadow TLB entry */ | ||
68 | val = (tid << 16) | ts; | ||
69 | eaddr = get_tlb_eaddr(gtlbe); | ||
70 | |||
71 | local_irq_save(flags); | ||
72 | |||
73 | mtspr(SPRN_MAS6, val); | ||
74 | mtspr(SPRN_MAS5, MAS5_SGS | lpid); | ||
75 | |||
76 | asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr)); | ||
77 | val = mfspr(SPRN_MAS1); | ||
78 | if (val & MAS1_VALID) { | ||
79 | mtspr(SPRN_MAS1, val & ~MAS1_VALID); | ||
80 | asm volatile("tlbwe"); | ||
81 | } | ||
82 | mtspr(SPRN_MAS5, 0); | ||
83 | /* NOTE: tlbsx also updates mas8, so clear it for host tlbwe */ | ||
84 | mtspr(SPRN_MAS8, 0); | ||
85 | isync(); | ||
86 | |||
87 | local_irq_restore(flags); | ||
88 | } | ||
89 | |||
90 | void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
91 | { | ||
92 | unsigned long flags; | ||
93 | |||
94 | local_irq_save(flags); | ||
95 | mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.kvm->arch.lpid); | ||
96 | asm volatile("tlbilxlpid"); | ||
97 | mtspr(SPRN_MAS5, 0); | ||
98 | local_irq_restore(flags); | ||
99 | } | ||
100 | |||
101 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) | ||
102 | { | ||
103 | vcpu->arch.pid = pid; | ||
104 | } | ||
105 | |||
106 | void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) | ||
107 | { | ||
108 | } | ||
109 | |||
110 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
111 | { | ||
112 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
113 | |||
114 | kvmppc_booke_vcpu_load(vcpu, cpu); | ||
115 | |||
116 | mtspr(SPRN_LPID, vcpu->kvm->arch.lpid); | ||
117 | mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr); | ||
118 | mtspr(SPRN_GPIR, vcpu->vcpu_id); | ||
119 | mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp); | ||
120 | mtspr(SPRN_EPLC, vcpu->arch.eplc); | ||
121 | mtspr(SPRN_EPSC, vcpu->arch.epsc); | ||
122 | |||
123 | mtspr(SPRN_GIVPR, vcpu->arch.ivpr); | ||
124 | mtspr(SPRN_GIVOR2, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); | ||
125 | mtspr(SPRN_GIVOR8, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); | ||
126 | mtspr(SPRN_GSPRG0, (unsigned long)vcpu->arch.shared->sprg0); | ||
127 | mtspr(SPRN_GSPRG1, (unsigned long)vcpu->arch.shared->sprg1); | ||
128 | mtspr(SPRN_GSPRG2, (unsigned long)vcpu->arch.shared->sprg2); | ||
129 | mtspr(SPRN_GSPRG3, (unsigned long)vcpu->arch.shared->sprg3); | ||
130 | |||
131 | mtspr(SPRN_GSRR0, vcpu->arch.shared->srr0); | ||
132 | mtspr(SPRN_GSRR1, vcpu->arch.shared->srr1); | ||
133 | |||
134 | mtspr(SPRN_GEPR, vcpu->arch.epr); | ||
135 | mtspr(SPRN_GDEAR, vcpu->arch.shared->dar); | ||
136 | mtspr(SPRN_GESR, vcpu->arch.shared->esr); | ||
137 | |||
138 | if (vcpu->arch.oldpir != mfspr(SPRN_PIR)) | ||
139 | kvmppc_e500_tlbil_all(vcpu_e500); | ||
140 | |||
141 | kvmppc_load_guest_fp(vcpu); | ||
142 | } | ||
143 | |||
144 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | ||
145 | { | ||
146 | vcpu->arch.eplc = mfspr(SPRN_EPLC); | ||
147 | vcpu->arch.epsc = mfspr(SPRN_EPSC); | ||
148 | |||
149 | vcpu->arch.shared->sprg0 = mfspr(SPRN_GSPRG0); | ||
150 | vcpu->arch.shared->sprg1 = mfspr(SPRN_GSPRG1); | ||
151 | vcpu->arch.shared->sprg2 = mfspr(SPRN_GSPRG2); | ||
152 | vcpu->arch.shared->sprg3 = mfspr(SPRN_GSPRG3); | ||
153 | |||
154 | vcpu->arch.shared->srr0 = mfspr(SPRN_GSRR0); | ||
155 | vcpu->arch.shared->srr1 = mfspr(SPRN_GSRR1); | ||
156 | |||
157 | vcpu->arch.epr = mfspr(SPRN_GEPR); | ||
158 | vcpu->arch.shared->dar = mfspr(SPRN_GDEAR); | ||
159 | vcpu->arch.shared->esr = mfspr(SPRN_GESR); | ||
160 | |||
161 | vcpu->arch.oldpir = mfspr(SPRN_PIR); | ||
162 | |||
163 | kvmppc_booke_vcpu_put(vcpu); | ||
164 | } | ||
165 | |||
166 | int kvmppc_core_check_processor_compat(void) | ||
167 | { | ||
168 | int r; | ||
169 | |||
170 | if (strcmp(cur_cpu_spec->cpu_name, "e500mc") == 0) | ||
171 | r = 0; | ||
172 | else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0) | ||
173 | r = 0; | ||
174 | else | ||
175 | r = -ENOTSUPP; | ||
176 | |||
177 | return r; | ||
178 | } | ||
179 | |||
180 | int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | ||
181 | { | ||
182 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
183 | |||
184 | vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \ | ||
185 | SPRN_EPCR_DUVD; | ||
186 | vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_DEP | MSRP_PMMP; | ||
187 | vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT); | ||
188 | vcpu->arch.epsc = vcpu->arch.eplc; | ||
189 | |||
190 | vcpu->arch.pvr = mfspr(SPRN_PVR); | ||
191 | vcpu_e500->svr = mfspr(SPRN_SVR); | ||
192 | |||
193 | vcpu->arch.cpu_type = KVM_CPU_E500MC; | ||
194 | |||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | ||
199 | { | ||
200 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
201 | |||
202 | sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_PM | | ||
203 | KVM_SREGS_E_PC; | ||
204 | sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL; | ||
205 | |||
206 | sregs->u.e.impl.fsl.features = 0; | ||
207 | sregs->u.e.impl.fsl.svr = vcpu_e500->svr; | ||
208 | sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; | ||
209 | sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; | ||
210 | |||
211 | kvmppc_get_sregs_e500_tlb(vcpu, sregs); | ||
212 | |||
213 | sregs->u.e.ivor_high[3] = | ||
214 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; | ||
215 | sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL]; | ||
216 | sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT]; | ||
217 | |||
218 | kvmppc_get_sregs_ivor(vcpu, sregs); | ||
219 | } | ||
220 | |||
221 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | ||
222 | { | ||
223 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
224 | int ret; | ||
225 | |||
226 | if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { | ||
227 | vcpu_e500->svr = sregs->u.e.impl.fsl.svr; | ||
228 | vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0; | ||
229 | vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar; | ||
230 | } | ||
231 | |||
232 | ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs); | ||
233 | if (ret < 0) | ||
234 | return ret; | ||
235 | |||
236 | if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) | ||
237 | return 0; | ||
238 | |||
239 | if (sregs->u.e.features & KVM_SREGS_E_PM) { | ||
240 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = | ||
241 | sregs->u.e.ivor_high[3]; | ||
242 | } | ||
243 | |||
244 | if (sregs->u.e.features & KVM_SREGS_E_PC) { | ||
245 | vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = | ||
246 | sregs->u.e.ivor_high[4]; | ||
247 | vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = | ||
248 | sregs->u.e.ivor_high[5]; | ||
249 | } | ||
250 | |||
251 | return kvmppc_set_sregs_ivor(vcpu, sregs); | ||
252 | } | ||
253 | |||
254 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | ||
255 | { | ||
256 | struct kvmppc_vcpu_e500 *vcpu_e500; | ||
257 | struct kvm_vcpu *vcpu; | ||
258 | int err; | ||
259 | |||
260 | vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); | ||
261 | if (!vcpu_e500) { | ||
262 | err = -ENOMEM; | ||
263 | goto out; | ||
264 | } | ||
265 | vcpu = &vcpu_e500->vcpu; | ||
266 | |||
267 | /* Invalid PIR value -- this LPID dosn't have valid state on any cpu */ | ||
268 | vcpu->arch.oldpir = 0xffffffff; | ||
269 | |||
270 | err = kvm_vcpu_init(vcpu, kvm, id); | ||
271 | if (err) | ||
272 | goto free_vcpu; | ||
273 | |||
274 | err = kvmppc_e500_tlb_init(vcpu_e500); | ||
275 | if (err) | ||
276 | goto uninit_vcpu; | ||
277 | |||
278 | vcpu->arch.shared = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
279 | if (!vcpu->arch.shared) | ||
280 | goto uninit_tlb; | ||
281 | |||
282 | return vcpu; | ||
283 | |||
284 | uninit_tlb: | ||
285 | kvmppc_e500_tlb_uninit(vcpu_e500); | ||
286 | uninit_vcpu: | ||
287 | kvm_vcpu_uninit(vcpu); | ||
288 | |||
289 | free_vcpu: | ||
290 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); | ||
291 | out: | ||
292 | return ERR_PTR(err); | ||
293 | } | ||
294 | |||
295 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | ||
296 | { | ||
297 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
298 | |||
299 | free_page((unsigned long)vcpu->arch.shared); | ||
300 | kvmppc_e500_tlb_uninit(vcpu_e500); | ||
301 | kvm_vcpu_uninit(vcpu); | ||
302 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); | ||
303 | } | ||
304 | |||
305 | int kvmppc_core_init_vm(struct kvm *kvm) | ||
306 | { | ||
307 | int lpid; | ||
308 | |||
309 | lpid = kvmppc_alloc_lpid(); | ||
310 | if (lpid < 0) | ||
311 | return lpid; | ||
312 | |||
313 | kvm->arch.lpid = lpid; | ||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | void kvmppc_core_destroy_vm(struct kvm *kvm) | ||
318 | { | ||
319 | kvmppc_free_lpid(kvm->arch.lpid); | ||
320 | } | ||
321 | |||
322 | static int __init kvmppc_e500mc_init(void) | ||
323 | { | ||
324 | int r; | ||
325 | |||
326 | r = kvmppc_booke_init(); | ||
327 | if (r) | ||
328 | return r; | ||
329 | |||
330 | kvmppc_init_lpid(64); | ||
331 | kvmppc_claim_lpid(0); /* host */ | ||
332 | |||
333 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); | ||
334 | } | ||
335 | |||
336 | static void __exit kvmppc_e500mc_exit(void) | ||
337 | { | ||
338 | kvmppc_booke_exit(); | ||
339 | } | ||
340 | |||
341 | module_init(kvmppc_e500mc_init); | ||
342 | module_exit(kvmppc_e500mc_exit); | ||
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 968f40101883..f90e86dea7a2 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/string.h> | 24 | #include <linux/string.h> |
25 | #include <linux/kvm_host.h> | 25 | #include <linux/kvm_host.h> |
26 | #include <linux/clockchips.h> | ||
26 | 27 | ||
27 | #include <asm/reg.h> | 28 | #include <asm/reg.h> |
28 | #include <asm/time.h> | 29 | #include <asm/time.h> |
@@ -35,7 +36,9 @@ | |||
35 | #define OP_TRAP 3 | 36 | #define OP_TRAP 3 |
36 | #define OP_TRAP_64 2 | 37 | #define OP_TRAP_64 2 |
37 | 38 | ||
39 | #define OP_31_XOP_TRAP 4 | ||
38 | #define OP_31_XOP_LWZX 23 | 40 | #define OP_31_XOP_LWZX 23 |
41 | #define OP_31_XOP_TRAP_64 68 | ||
39 | #define OP_31_XOP_LBZX 87 | 42 | #define OP_31_XOP_LBZX 87 |
40 | #define OP_31_XOP_STWX 151 | 43 | #define OP_31_XOP_STWX 151 |
41 | #define OP_31_XOP_STBX 215 | 44 | #define OP_31_XOP_STBX 215 |
@@ -102,8 +105,12 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) | |||
102 | */ | 105 | */ |
103 | 106 | ||
104 | dec_time = vcpu->arch.dec; | 107 | dec_time = vcpu->arch.dec; |
105 | dec_time *= 1000; | 108 | /* |
106 | do_div(dec_time, tb_ticks_per_usec); | 109 | * Guest timebase ticks at the same frequency as host decrementer. |
110 | * So use the host decrementer calculations for decrementer emulation. | ||
111 | */ | ||
112 | dec_time = dec_time << decrementer_clockevent.shift; | ||
113 | do_div(dec_time, decrementer_clockevent.mult); | ||
107 | dec_nsec = do_div(dec_time, NSEC_PER_SEC); | 114 | dec_nsec = do_div(dec_time, NSEC_PER_SEC); |
108 | hrtimer_start(&vcpu->arch.dec_timer, | 115 | hrtimer_start(&vcpu->arch.dec_timer, |
109 | ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL); | 116 | ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL); |
@@ -141,14 +148,13 @@ u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb) | |||
141 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | 148 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) |
142 | { | 149 | { |
143 | u32 inst = kvmppc_get_last_inst(vcpu); | 150 | u32 inst = kvmppc_get_last_inst(vcpu); |
144 | u32 ea; | 151 | int ra = get_ra(inst); |
145 | int ra; | 152 | int rs = get_rs(inst); |
146 | int rb; | 153 | int rt = get_rt(inst); |
147 | int rs; | 154 | int sprn = get_sprn(inst); |
148 | int rt; | ||
149 | int sprn; | ||
150 | enum emulation_result emulated = EMULATE_DONE; | 155 | enum emulation_result emulated = EMULATE_DONE; |
151 | int advance = 1; | 156 | int advance = 1; |
157 | ulong spr_val = 0; | ||
152 | 158 | ||
153 | /* this default type might be overwritten by subcategories */ | 159 | /* this default type might be overwritten by subcategories */ |
154 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); | 160 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); |
@@ -170,173 +176,143 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
170 | case 31: | 176 | case 31: |
171 | switch (get_xop(inst)) { | 177 | switch (get_xop(inst)) { |
172 | 178 | ||
179 | case OP_31_XOP_TRAP: | ||
180 | #ifdef CONFIG_64BIT | ||
181 | case OP_31_XOP_TRAP_64: | ||
182 | #endif | ||
183 | #ifdef CONFIG_PPC_BOOK3S | ||
184 | kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); | ||
185 | #else | ||
186 | kvmppc_core_queue_program(vcpu, | ||
187 | vcpu->arch.shared->esr | ESR_PTR); | ||
188 | #endif | ||
189 | advance = 0; | ||
190 | break; | ||
173 | case OP_31_XOP_LWZX: | 191 | case OP_31_XOP_LWZX: |
174 | rt = get_rt(inst); | ||
175 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 192 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
176 | break; | 193 | break; |
177 | 194 | ||
178 | case OP_31_XOP_LBZX: | 195 | case OP_31_XOP_LBZX: |
179 | rt = get_rt(inst); | ||
180 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 196 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
181 | break; | 197 | break; |
182 | 198 | ||
183 | case OP_31_XOP_LBZUX: | 199 | case OP_31_XOP_LBZUX: |
184 | rt = get_rt(inst); | ||
185 | ra = get_ra(inst); | ||
186 | rb = get_rb(inst); | ||
187 | |||
188 | ea = kvmppc_get_gpr(vcpu, rb); | ||
189 | if (ra) | ||
190 | ea += kvmppc_get_gpr(vcpu, ra); | ||
191 | |||
192 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 200 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
193 | kvmppc_set_gpr(vcpu, ra, ea); | 201 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
194 | break; | 202 | break; |
195 | 203 | ||
196 | case OP_31_XOP_STWX: | 204 | case OP_31_XOP_STWX: |
197 | rs = get_rs(inst); | ||
198 | emulated = kvmppc_handle_store(run, vcpu, | 205 | emulated = kvmppc_handle_store(run, vcpu, |
199 | kvmppc_get_gpr(vcpu, rs), | 206 | kvmppc_get_gpr(vcpu, rs), |
200 | 4, 1); | 207 | 4, 1); |
201 | break; | 208 | break; |
202 | 209 | ||
203 | case OP_31_XOP_STBX: | 210 | case OP_31_XOP_STBX: |
204 | rs = get_rs(inst); | ||
205 | emulated = kvmppc_handle_store(run, vcpu, | 211 | emulated = kvmppc_handle_store(run, vcpu, |
206 | kvmppc_get_gpr(vcpu, rs), | 212 | kvmppc_get_gpr(vcpu, rs), |
207 | 1, 1); | 213 | 1, 1); |
208 | break; | 214 | break; |
209 | 215 | ||
210 | case OP_31_XOP_STBUX: | 216 | case OP_31_XOP_STBUX: |
211 | rs = get_rs(inst); | ||
212 | ra = get_ra(inst); | ||
213 | rb = get_rb(inst); | ||
214 | |||
215 | ea = kvmppc_get_gpr(vcpu, rb); | ||
216 | if (ra) | ||
217 | ea += kvmppc_get_gpr(vcpu, ra); | ||
218 | |||
219 | emulated = kvmppc_handle_store(run, vcpu, | 217 | emulated = kvmppc_handle_store(run, vcpu, |
220 | kvmppc_get_gpr(vcpu, rs), | 218 | kvmppc_get_gpr(vcpu, rs), |
221 | 1, 1); | 219 | 1, 1); |
222 | kvmppc_set_gpr(vcpu, rs, ea); | 220 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
223 | break; | 221 | break; |
224 | 222 | ||
225 | case OP_31_XOP_LHAX: | 223 | case OP_31_XOP_LHAX: |
226 | rt = get_rt(inst); | ||
227 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | 224 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); |
228 | break; | 225 | break; |
229 | 226 | ||
230 | case OP_31_XOP_LHZX: | 227 | case OP_31_XOP_LHZX: |
231 | rt = get_rt(inst); | ||
232 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 228 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
233 | break; | 229 | break; |
234 | 230 | ||
235 | case OP_31_XOP_LHZUX: | 231 | case OP_31_XOP_LHZUX: |
236 | rt = get_rt(inst); | ||
237 | ra = get_ra(inst); | ||
238 | rb = get_rb(inst); | ||
239 | |||
240 | ea = kvmppc_get_gpr(vcpu, rb); | ||
241 | if (ra) | ||
242 | ea += kvmppc_get_gpr(vcpu, ra); | ||
243 | |||
244 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 232 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
245 | kvmppc_set_gpr(vcpu, ra, ea); | 233 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
246 | break; | 234 | break; |
247 | 235 | ||
248 | case OP_31_XOP_MFSPR: | 236 | case OP_31_XOP_MFSPR: |
249 | sprn = get_sprn(inst); | ||
250 | rt = get_rt(inst); | ||
251 | |||
252 | switch (sprn) { | 237 | switch (sprn) { |
253 | case SPRN_SRR0: | 238 | case SPRN_SRR0: |
254 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0); | 239 | spr_val = vcpu->arch.shared->srr0; |
255 | break; | 240 | break; |
256 | case SPRN_SRR1: | 241 | case SPRN_SRR1: |
257 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1); | 242 | spr_val = vcpu->arch.shared->srr1; |
258 | break; | 243 | break; |
259 | case SPRN_PVR: | 244 | case SPRN_PVR: |
260 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; | 245 | spr_val = vcpu->arch.pvr; |
246 | break; | ||
261 | case SPRN_PIR: | 247 | case SPRN_PIR: |
262 | kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; | 248 | spr_val = vcpu->vcpu_id; |
249 | break; | ||
263 | case SPRN_MSSSR0: | 250 | case SPRN_MSSSR0: |
264 | kvmppc_set_gpr(vcpu, rt, 0); break; | 251 | spr_val = 0; |
252 | break; | ||
265 | 253 | ||
266 | /* Note: mftb and TBRL/TBWL are user-accessible, so | 254 | /* Note: mftb and TBRL/TBWL are user-accessible, so |
267 | * the guest can always access the real TB anyways. | 255 | * the guest can always access the real TB anyways. |
268 | * In fact, we probably will never see these traps. */ | 256 | * In fact, we probably will never see these traps. */ |
269 | case SPRN_TBWL: | 257 | case SPRN_TBWL: |
270 | kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; | 258 | spr_val = get_tb() >> 32; |
259 | break; | ||
271 | case SPRN_TBWU: | 260 | case SPRN_TBWU: |
272 | kvmppc_set_gpr(vcpu, rt, get_tb()); break; | 261 | spr_val = get_tb(); |
262 | break; | ||
273 | 263 | ||
274 | case SPRN_SPRG0: | 264 | case SPRN_SPRG0: |
275 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0); | 265 | spr_val = vcpu->arch.shared->sprg0; |
276 | break; | 266 | break; |
277 | case SPRN_SPRG1: | 267 | case SPRN_SPRG1: |
278 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1); | 268 | spr_val = vcpu->arch.shared->sprg1; |
279 | break; | 269 | break; |
280 | case SPRN_SPRG2: | 270 | case SPRN_SPRG2: |
281 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2); | 271 | spr_val = vcpu->arch.shared->sprg2; |
282 | break; | 272 | break; |
283 | case SPRN_SPRG3: | 273 | case SPRN_SPRG3: |
284 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3); | 274 | spr_val = vcpu->arch.shared->sprg3; |
285 | break; | 275 | break; |
286 | /* Note: SPRG4-7 are user-readable, so we don't get | 276 | /* Note: SPRG4-7 are user-readable, so we don't get |
287 | * a trap. */ | 277 | * a trap. */ |
288 | 278 | ||
289 | case SPRN_DEC: | 279 | case SPRN_DEC: |
290 | { | 280 | spr_val = kvmppc_get_dec(vcpu, get_tb()); |
291 | kvmppc_set_gpr(vcpu, rt, | ||
292 | kvmppc_get_dec(vcpu, get_tb())); | ||
293 | break; | 281 | break; |
294 | } | ||
295 | default: | 282 | default: |
296 | emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); | 283 | emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, |
297 | if (emulated == EMULATE_FAIL) { | 284 | &spr_val); |
298 | printk("mfspr: unknown spr %x\n", sprn); | 285 | if (unlikely(emulated == EMULATE_FAIL)) { |
299 | kvmppc_set_gpr(vcpu, rt, 0); | 286 | printk(KERN_INFO "mfspr: unknown spr " |
287 | "0x%x\n", sprn); | ||
300 | } | 288 | } |
301 | break; | 289 | break; |
302 | } | 290 | } |
291 | kvmppc_set_gpr(vcpu, rt, spr_val); | ||
303 | kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); | 292 | kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); |
304 | break; | 293 | break; |
305 | 294 | ||
306 | case OP_31_XOP_STHX: | 295 | case OP_31_XOP_STHX: |
307 | rs = get_rs(inst); | ||
308 | ra = get_ra(inst); | ||
309 | rb = get_rb(inst); | ||
310 | |||
311 | emulated = kvmppc_handle_store(run, vcpu, | 296 | emulated = kvmppc_handle_store(run, vcpu, |
312 | kvmppc_get_gpr(vcpu, rs), | 297 | kvmppc_get_gpr(vcpu, rs), |
313 | 2, 1); | 298 | 2, 1); |
314 | break; | 299 | break; |
315 | 300 | ||
316 | case OP_31_XOP_STHUX: | 301 | case OP_31_XOP_STHUX: |
317 | rs = get_rs(inst); | ||
318 | ra = get_ra(inst); | ||
319 | rb = get_rb(inst); | ||
320 | |||
321 | ea = kvmppc_get_gpr(vcpu, rb); | ||
322 | if (ra) | ||
323 | ea += kvmppc_get_gpr(vcpu, ra); | ||
324 | |||
325 | emulated = kvmppc_handle_store(run, vcpu, | 302 | emulated = kvmppc_handle_store(run, vcpu, |
326 | kvmppc_get_gpr(vcpu, rs), | 303 | kvmppc_get_gpr(vcpu, rs), |
327 | 2, 1); | 304 | 2, 1); |
328 | kvmppc_set_gpr(vcpu, ra, ea); | 305 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
329 | break; | 306 | break; |
330 | 307 | ||
331 | case OP_31_XOP_MTSPR: | 308 | case OP_31_XOP_MTSPR: |
332 | sprn = get_sprn(inst); | 309 | spr_val = kvmppc_get_gpr(vcpu, rs); |
333 | rs = get_rs(inst); | ||
334 | switch (sprn) { | 310 | switch (sprn) { |
335 | case SPRN_SRR0: | 311 | case SPRN_SRR0: |
336 | vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs); | 312 | vcpu->arch.shared->srr0 = spr_val; |
337 | break; | 313 | break; |
338 | case SPRN_SRR1: | 314 | case SPRN_SRR1: |
339 | vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs); | 315 | vcpu->arch.shared->srr1 = spr_val; |
340 | break; | 316 | break; |
341 | 317 | ||
342 | /* XXX We need to context-switch the timebase for | 318 | /* XXX We need to context-switch the timebase for |
@@ -347,27 +323,29 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
347 | case SPRN_MSSSR0: break; | 323 | case SPRN_MSSSR0: break; |
348 | 324 | ||
349 | case SPRN_DEC: | 325 | case SPRN_DEC: |
350 | vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); | 326 | vcpu->arch.dec = spr_val; |
351 | kvmppc_emulate_dec(vcpu); | 327 | kvmppc_emulate_dec(vcpu); |
352 | break; | 328 | break; |
353 | 329 | ||
354 | case SPRN_SPRG0: | 330 | case SPRN_SPRG0: |
355 | vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs); | 331 | vcpu->arch.shared->sprg0 = spr_val; |
356 | break; | 332 | break; |
357 | case SPRN_SPRG1: | 333 | case SPRN_SPRG1: |
358 | vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs); | 334 | vcpu->arch.shared->sprg1 = spr_val; |
359 | break; | 335 | break; |
360 | case SPRN_SPRG2: | 336 | case SPRN_SPRG2: |
361 | vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs); | 337 | vcpu->arch.shared->sprg2 = spr_val; |
362 | break; | 338 | break; |
363 | case SPRN_SPRG3: | 339 | case SPRN_SPRG3: |
364 | vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs); | 340 | vcpu->arch.shared->sprg3 = spr_val; |
365 | break; | 341 | break; |
366 | 342 | ||
367 | default: | 343 | default: |
368 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); | 344 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, |
345 | spr_val); | ||
369 | if (emulated == EMULATE_FAIL) | 346 | if (emulated == EMULATE_FAIL) |
370 | printk("mtspr: unknown spr %x\n", sprn); | 347 | printk(KERN_INFO "mtspr: unknown spr " |
348 | "0x%x\n", sprn); | ||
371 | break; | 349 | break; |
372 | } | 350 | } |
373 | kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); | 351 | kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); |
@@ -382,7 +360,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
382 | break; | 360 | break; |
383 | 361 | ||
384 | case OP_31_XOP_LWBRX: | 362 | case OP_31_XOP_LWBRX: |
385 | rt = get_rt(inst); | ||
386 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); | 363 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); |
387 | break; | 364 | break; |
388 | 365 | ||
@@ -390,25 +367,16 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
390 | break; | 367 | break; |
391 | 368 | ||
392 | case OP_31_XOP_STWBRX: | 369 | case OP_31_XOP_STWBRX: |
393 | rs = get_rs(inst); | ||
394 | ra = get_ra(inst); | ||
395 | rb = get_rb(inst); | ||
396 | |||
397 | emulated = kvmppc_handle_store(run, vcpu, | 370 | emulated = kvmppc_handle_store(run, vcpu, |
398 | kvmppc_get_gpr(vcpu, rs), | 371 | kvmppc_get_gpr(vcpu, rs), |
399 | 4, 0); | 372 | 4, 0); |
400 | break; | 373 | break; |
401 | 374 | ||
402 | case OP_31_XOP_LHBRX: | 375 | case OP_31_XOP_LHBRX: |
403 | rt = get_rt(inst); | ||
404 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); | 376 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); |
405 | break; | 377 | break; |
406 | 378 | ||
407 | case OP_31_XOP_STHBRX: | 379 | case OP_31_XOP_STHBRX: |
408 | rs = get_rs(inst); | ||
409 | ra = get_ra(inst); | ||
410 | rb = get_rb(inst); | ||
411 | |||
412 | emulated = kvmppc_handle_store(run, vcpu, | 380 | emulated = kvmppc_handle_store(run, vcpu, |
413 | kvmppc_get_gpr(vcpu, rs), | 381 | kvmppc_get_gpr(vcpu, rs), |
414 | 2, 0); | 382 | 2, 0); |
@@ -421,99 +389,78 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
421 | break; | 389 | break; |
422 | 390 | ||
423 | case OP_LWZ: | 391 | case OP_LWZ: |
424 | rt = get_rt(inst); | ||
425 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 392 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
426 | break; | 393 | break; |
427 | 394 | ||
428 | case OP_LWZU: | 395 | case OP_LWZU: |
429 | ra = get_ra(inst); | ||
430 | rt = get_rt(inst); | ||
431 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 396 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
432 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 397 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
433 | break; | 398 | break; |
434 | 399 | ||
435 | case OP_LBZ: | 400 | case OP_LBZ: |
436 | rt = get_rt(inst); | ||
437 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 401 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
438 | break; | 402 | break; |
439 | 403 | ||
440 | case OP_LBZU: | 404 | case OP_LBZU: |
441 | ra = get_ra(inst); | ||
442 | rt = get_rt(inst); | ||
443 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 405 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
444 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 406 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
445 | break; | 407 | break; |
446 | 408 | ||
447 | case OP_STW: | 409 | case OP_STW: |
448 | rs = get_rs(inst); | ||
449 | emulated = kvmppc_handle_store(run, vcpu, | 410 | emulated = kvmppc_handle_store(run, vcpu, |
450 | kvmppc_get_gpr(vcpu, rs), | 411 | kvmppc_get_gpr(vcpu, rs), |
451 | 4, 1); | 412 | 4, 1); |
452 | break; | 413 | break; |
453 | 414 | ||
454 | case OP_STWU: | 415 | case OP_STWU: |
455 | ra = get_ra(inst); | ||
456 | rs = get_rs(inst); | ||
457 | emulated = kvmppc_handle_store(run, vcpu, | 416 | emulated = kvmppc_handle_store(run, vcpu, |
458 | kvmppc_get_gpr(vcpu, rs), | 417 | kvmppc_get_gpr(vcpu, rs), |
459 | 4, 1); | 418 | 4, 1); |
460 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 419 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
461 | break; | 420 | break; |
462 | 421 | ||
463 | case OP_STB: | 422 | case OP_STB: |
464 | rs = get_rs(inst); | ||
465 | emulated = kvmppc_handle_store(run, vcpu, | 423 | emulated = kvmppc_handle_store(run, vcpu, |
466 | kvmppc_get_gpr(vcpu, rs), | 424 | kvmppc_get_gpr(vcpu, rs), |
467 | 1, 1); | 425 | 1, 1); |
468 | break; | 426 | break; |
469 | 427 | ||
470 | case OP_STBU: | 428 | case OP_STBU: |
471 | ra = get_ra(inst); | ||
472 | rs = get_rs(inst); | ||
473 | emulated = kvmppc_handle_store(run, vcpu, | 429 | emulated = kvmppc_handle_store(run, vcpu, |
474 | kvmppc_get_gpr(vcpu, rs), | 430 | kvmppc_get_gpr(vcpu, rs), |
475 | 1, 1); | 431 | 1, 1); |
476 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 432 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
477 | break; | 433 | break; |
478 | 434 | ||
479 | case OP_LHZ: | 435 | case OP_LHZ: |
480 | rt = get_rt(inst); | ||
481 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 436 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
482 | break; | 437 | break; |
483 | 438 | ||
484 | case OP_LHZU: | 439 | case OP_LHZU: |
485 | ra = get_ra(inst); | ||
486 | rt = get_rt(inst); | ||
487 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 440 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
488 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 441 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
489 | break; | 442 | break; |
490 | 443 | ||
491 | case OP_LHA: | 444 | case OP_LHA: |
492 | rt = get_rt(inst); | ||
493 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | 445 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); |
494 | break; | 446 | break; |
495 | 447 | ||
496 | case OP_LHAU: | 448 | case OP_LHAU: |
497 | ra = get_ra(inst); | ||
498 | rt = get_rt(inst); | ||
499 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); | 449 | emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); |
500 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 450 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
501 | break; | 451 | break; |
502 | 452 | ||
503 | case OP_STH: | 453 | case OP_STH: |
504 | rs = get_rs(inst); | ||
505 | emulated = kvmppc_handle_store(run, vcpu, | 454 | emulated = kvmppc_handle_store(run, vcpu, |
506 | kvmppc_get_gpr(vcpu, rs), | 455 | kvmppc_get_gpr(vcpu, rs), |
507 | 2, 1); | 456 | 2, 1); |
508 | break; | 457 | break; |
509 | 458 | ||
510 | case OP_STHU: | 459 | case OP_STHU: |
511 | ra = get_ra(inst); | ||
512 | rs = get_rs(inst); | ||
513 | emulated = kvmppc_handle_store(run, vcpu, | 460 | emulated = kvmppc_handle_store(run, vcpu, |
514 | kvmppc_get_gpr(vcpu, rs), | 461 | kvmppc_get_gpr(vcpu, rs), |
515 | 2, 1); | 462 | 2, 1); |
516 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); | 463 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); |
517 | break; | 464 | break; |
518 | 465 | ||
519 | default: | 466 | default: |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 00d7e345b3fe..1493c8de947b 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -43,6 +43,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | |||
43 | v->requests; | 43 | v->requests; |
44 | } | 44 | } |
45 | 45 | ||
46 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) | ||
47 | { | ||
48 | return 1; | ||
49 | } | ||
50 | |||
46 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | 51 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) |
47 | { | 52 | { |
48 | int nr = kvmppc_get_gpr(vcpu, 11); | 53 | int nr = kvmppc_get_gpr(vcpu, 11); |
@@ -74,7 +79,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | |||
74 | } | 79 | } |
75 | case HC_VENDOR_KVM | KVM_HC_FEATURES: | 80 | case HC_VENDOR_KVM | KVM_HC_FEATURES: |
76 | r = HC_EV_SUCCESS; | 81 | r = HC_EV_SUCCESS; |
77 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500) | 82 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) |
78 | /* XXX Missing magic page on 44x */ | 83 | /* XXX Missing magic page on 44x */ |
79 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); | 84 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); |
80 | #endif | 85 | #endif |
@@ -109,6 +114,11 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu) | |||
109 | goto out; | 114 | goto out; |
110 | #endif | 115 | #endif |
111 | 116 | ||
117 | #ifdef CONFIG_KVM_BOOKE_HV | ||
118 | if (!cpu_has_feature(CPU_FTR_EMB_HV)) | ||
119 | goto out; | ||
120 | #endif | ||
121 | |||
112 | r = true; | 122 | r = true; |
113 | 123 | ||
114 | out: | 124 | out: |
@@ -225,7 +235,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
225 | case KVM_CAP_PPC_PAIRED_SINGLES: | 235 | case KVM_CAP_PPC_PAIRED_SINGLES: |
226 | case KVM_CAP_PPC_OSI: | 236 | case KVM_CAP_PPC_OSI: |
227 | case KVM_CAP_PPC_GET_PVINFO: | 237 | case KVM_CAP_PPC_GET_PVINFO: |
228 | #ifdef CONFIG_KVM_E500 | 238 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
229 | case KVM_CAP_SW_TLB: | 239 | case KVM_CAP_SW_TLB: |
230 | #endif | 240 | #endif |
231 | r = 1; | 241 | r = 1; |
@@ -234,10 +244,12 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
234 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 244 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
235 | break; | 245 | break; |
236 | #endif | 246 | #endif |
237 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 247 | #ifdef CONFIG_PPC_BOOK3S_64 |
238 | case KVM_CAP_SPAPR_TCE: | 248 | case KVM_CAP_SPAPR_TCE: |
239 | r = 1; | 249 | r = 1; |
240 | break; | 250 | break; |
251 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
252 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
241 | case KVM_CAP_PPC_SMT: | 253 | case KVM_CAP_PPC_SMT: |
242 | r = threads_per_core; | 254 | r = threads_per_core; |
243 | break; | 255 | break; |
@@ -267,6 +279,11 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
267 | case KVM_CAP_MAX_VCPUS: | 279 | case KVM_CAP_MAX_VCPUS: |
268 | r = KVM_MAX_VCPUS; | 280 | r = KVM_MAX_VCPUS; |
269 | break; | 281 | break; |
282 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
283 | case KVM_CAP_PPC_GET_SMMU_INFO: | ||
284 | r = 1; | ||
285 | break; | ||
286 | #endif | ||
270 | default: | 287 | default: |
271 | r = 0; | 288 | r = 0; |
272 | break; | 289 | break; |
@@ -588,21 +605,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
588 | return r; | 605 | return r; |
589 | } | 606 | } |
590 | 607 | ||
591 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu) | ||
592 | { | ||
593 | int me; | ||
594 | int cpu = vcpu->cpu; | ||
595 | |||
596 | me = get_cpu(); | ||
597 | if (waitqueue_active(vcpu->arch.wqp)) { | ||
598 | wake_up_interruptible(vcpu->arch.wqp); | ||
599 | vcpu->stat.halt_wakeup++; | ||
600 | } else if (cpu != me && cpu != -1) { | ||
601 | smp_send_reschedule(vcpu->cpu); | ||
602 | } | ||
603 | put_cpu(); | ||
604 | } | ||
605 | |||
606 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | 608 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) |
607 | { | 609 | { |
608 | if (irq->irq == KVM_INTERRUPT_UNSET) { | 610 | if (irq->irq == KVM_INTERRUPT_UNSET) { |
@@ -611,6 +613,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | |||
611 | } | 613 | } |
612 | 614 | ||
613 | kvmppc_core_queue_external(vcpu, irq); | 615 | kvmppc_core_queue_external(vcpu, irq); |
616 | |||
614 | kvm_vcpu_kick(vcpu); | 617 | kvm_vcpu_kick(vcpu); |
615 | 618 | ||
616 | return 0; | 619 | return 0; |
@@ -633,7 +636,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, | |||
633 | r = 0; | 636 | r = 0; |
634 | vcpu->arch.papr_enabled = true; | 637 | vcpu->arch.papr_enabled = true; |
635 | break; | 638 | break; |
636 | #ifdef CONFIG_KVM_E500 | 639 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
637 | case KVM_CAP_SW_TLB: { | 640 | case KVM_CAP_SW_TLB: { |
638 | struct kvm_config_tlb cfg; | 641 | struct kvm_config_tlb cfg; |
639 | void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; | 642 | void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; |
@@ -710,7 +713,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
710 | break; | 713 | break; |
711 | } | 714 | } |
712 | 715 | ||
713 | #ifdef CONFIG_KVM_E500 | 716 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
714 | case KVM_DIRTY_TLB: { | 717 | case KVM_DIRTY_TLB: { |
715 | struct kvm_dirty_tlb dirty; | 718 | struct kvm_dirty_tlb dirty; |
716 | r = -EFAULT; | 719 | r = -EFAULT; |
@@ -720,7 +723,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
720 | break; | 723 | break; |
721 | } | 724 | } |
722 | #endif | 725 | #endif |
723 | |||
724 | default: | 726 | default: |
725 | r = -EINVAL; | 727 | r = -EINVAL; |
726 | } | 728 | } |
@@ -777,7 +779,7 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
777 | 779 | ||
778 | break; | 780 | break; |
779 | } | 781 | } |
780 | #ifdef CONFIG_KVM_BOOK3S_64_HV | 782 | #ifdef CONFIG_PPC_BOOK3S_64 |
781 | case KVM_CREATE_SPAPR_TCE: { | 783 | case KVM_CREATE_SPAPR_TCE: { |
782 | struct kvm_create_spapr_tce create_tce; | 784 | struct kvm_create_spapr_tce create_tce; |
783 | struct kvm *kvm = filp->private_data; | 785 | struct kvm *kvm = filp->private_data; |
@@ -788,7 +790,9 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
788 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); | 790 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); |
789 | goto out; | 791 | goto out; |
790 | } | 792 | } |
793 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
791 | 794 | ||
795 | #ifdef CONFIG_KVM_BOOK3S_64_HV | ||
792 | case KVM_ALLOCATE_RMA: { | 796 | case KVM_ALLOCATE_RMA: { |
793 | struct kvm *kvm = filp->private_data; | 797 | struct kvm *kvm = filp->private_data; |
794 | struct kvm_allocate_rma rma; | 798 | struct kvm_allocate_rma rma; |
@@ -800,6 +804,18 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
800 | } | 804 | } |
801 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | 805 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ |
802 | 806 | ||
807 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
808 | case KVM_PPC_GET_SMMU_INFO: { | ||
809 | struct kvm *kvm = filp->private_data; | ||
810 | struct kvm_ppc_smmu_info info; | ||
811 | |||
812 | memset(&info, 0, sizeof(info)); | ||
813 | r = kvm_vm_ioctl_get_smmu_info(kvm, &info); | ||
814 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) | ||
815 | r = -EFAULT; | ||
816 | break; | ||
817 | } | ||
818 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
803 | default: | 819 | default: |
804 | r = -ENOTTY; | 820 | r = -ENOTTY; |
805 | } | 821 | } |
@@ -808,6 +824,40 @@ out: | |||
808 | return r; | 824 | return r; |
809 | } | 825 | } |
810 | 826 | ||
827 | static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; | ||
828 | static unsigned long nr_lpids; | ||
829 | |||
830 | long kvmppc_alloc_lpid(void) | ||
831 | { | ||
832 | long lpid; | ||
833 | |||
834 | do { | ||
835 | lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); | ||
836 | if (lpid >= nr_lpids) { | ||
837 | pr_err("%s: No LPIDs free\n", __func__); | ||
838 | return -ENOMEM; | ||
839 | } | ||
840 | } while (test_and_set_bit(lpid, lpid_inuse)); | ||
841 | |||
842 | return lpid; | ||
843 | } | ||
844 | |||
845 | void kvmppc_claim_lpid(long lpid) | ||
846 | { | ||
847 | set_bit(lpid, lpid_inuse); | ||
848 | } | ||
849 | |||
850 | void kvmppc_free_lpid(long lpid) | ||
851 | { | ||
852 | clear_bit(lpid, lpid_inuse); | ||
853 | } | ||
854 | |||
855 | void kvmppc_init_lpid(unsigned long nr_lpids_param) | ||
856 | { | ||
857 | nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); | ||
858 | memset(lpid_inuse, 0, sizeof(lpid_inuse)); | ||
859 | } | ||
860 | |||
811 | int kvm_arch_init(void *opaque) | 861 | int kvm_arch_init(void *opaque) |
812 | { | 862 | { |
813 | return 0; | 863 | return 0; |
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h index 8167d42a776f..bf191e72b2d8 100644 --- a/arch/powerpc/kvm/timing.h +++ b/arch/powerpc/kvm/timing.h | |||
@@ -93,6 +93,12 @@ static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type) | |||
93 | case SIGNAL_EXITS: | 93 | case SIGNAL_EXITS: |
94 | vcpu->stat.signal_exits++; | 94 | vcpu->stat.signal_exits++; |
95 | break; | 95 | break; |
96 | case DBELL_EXITS: | ||
97 | vcpu->stat.dbell_exits++; | ||
98 | break; | ||
99 | case GDBELL_EXITS: | ||
100 | vcpu->stat.gdbell_exits++; | ||
101 | break; | ||
96 | } | 102 | } |
97 | } | 103 | } |
98 | 104 | ||
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index 773d38f90aaa..d73a59014900 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S | |||
@@ -30,7 +30,7 @@ _GLOBAL(__copy_tofrom_user_base) | |||
30 | dcbt 0,r4 | 30 | dcbt 0,r4 |
31 | beq .Lcopy_page_4K | 31 | beq .Lcopy_page_4K |
32 | andi. r6,r6,7 | 32 | andi. r6,r6,7 |
33 | PPC_MTOCRF 0x01,r5 | 33 | PPC_MTOCRF(0x01,r5) |
34 | blt cr1,.Lshort_copy | 34 | blt cr1,.Lshort_copy |
35 | /* Below we want to nop out the bne if we're on a CPU that has the | 35 | /* Below we want to nop out the bne if we're on a CPU that has the |
36 | * CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit | 36 | * CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit |
@@ -186,7 +186,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
186 | blr | 186 | blr |
187 | 187 | ||
188 | .Ldst_unaligned: | 188 | .Ldst_unaligned: |
189 | PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */ | 189 | PPC_MTOCRF(0x01,r6) /* put #bytes to 8B bdry into cr7 */ |
190 | subf r5,r6,r5 | 190 | subf r5,r6,r5 |
191 | li r7,0 | 191 | li r7,0 |
192 | cmpldi cr1,r5,16 | 192 | cmpldi cr1,r5,16 |
@@ -201,7 +201,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
201 | 2: bf cr7*4+1,3f | 201 | 2: bf cr7*4+1,3f |
202 | 37: lwzx r0,r7,r4 | 202 | 37: lwzx r0,r7,r4 |
203 | 83: stwx r0,r7,r3 | 203 | 83: stwx r0,r7,r3 |
204 | 3: PPC_MTOCRF 0x01,r5 | 204 | 3: PPC_MTOCRF(0x01,r5) |
205 | add r4,r6,r4 | 205 | add r4,r6,r4 |
206 | add r3,r6,r3 | 206 | add r3,r6,r3 |
207 | b .Ldst_aligned | 207 | b .Ldst_aligned |
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S index 11ce045e21fd..f4fcb0bc6563 100644 --- a/arch/powerpc/lib/mem_64.S +++ b/arch/powerpc/lib/mem_64.S | |||
@@ -19,7 +19,7 @@ _GLOBAL(memset) | |||
19 | rlwimi r4,r4,16,0,15 | 19 | rlwimi r4,r4,16,0,15 |
20 | cmplw cr1,r5,r0 /* do we get that far? */ | 20 | cmplw cr1,r5,r0 /* do we get that far? */ |
21 | rldimi r4,r4,32,0 | 21 | rldimi r4,r4,32,0 |
22 | PPC_MTOCRF 1,r0 | 22 | PPC_MTOCRF(1,r0) |
23 | mr r6,r3 | 23 | mr r6,r3 |
24 | blt cr1,8f | 24 | blt cr1,8f |
25 | beq+ 3f /* if already 8-byte aligned */ | 25 | beq+ 3f /* if already 8-byte aligned */ |
@@ -49,7 +49,7 @@ _GLOBAL(memset) | |||
49 | bdnz 4b | 49 | bdnz 4b |
50 | 5: srwi. r0,r5,3 | 50 | 5: srwi. r0,r5,3 |
51 | clrlwi r5,r5,29 | 51 | clrlwi r5,r5,29 |
52 | PPC_MTOCRF 1,r0 | 52 | PPC_MTOCRF(1,r0) |
53 | beq 8f | 53 | beq 8f |
54 | bf 29,6f | 54 | bf 29,6f |
55 | std r4,0(r6) | 55 | std r4,0(r6) |
@@ -65,7 +65,7 @@ _GLOBAL(memset) | |||
65 | std r4,0(r6) | 65 | std r4,0(r6) |
66 | addi r6,r6,8 | 66 | addi r6,r6,8 |
67 | 8: cmpwi r5,0 | 67 | 8: cmpwi r5,0 |
68 | PPC_MTOCRF 1,r5 | 68 | PPC_MTOCRF(1,r5) |
69 | beqlr+ | 69 | beqlr+ |
70 | bf 29,9f | 70 | bf 29,9f |
71 | stw r4,0(r6) | 71 | stw r4,0(r6) |
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S index e178922b2c21..82fea3963e15 100644 --- a/arch/powerpc/lib/memcpy_64.S +++ b/arch/powerpc/lib/memcpy_64.S | |||
@@ -12,7 +12,7 @@ | |||
12 | .align 7 | 12 | .align 7 |
13 | _GLOBAL(memcpy) | 13 | _GLOBAL(memcpy) |
14 | std r3,48(r1) /* save destination pointer for return value */ | 14 | std r3,48(r1) /* save destination pointer for return value */ |
15 | PPC_MTOCRF 0x01,r5 | 15 | PPC_MTOCRF(0x01,r5) |
16 | cmpldi cr1,r5,16 | 16 | cmpldi cr1,r5,16 |
17 | neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry | 17 | neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry |
18 | andi. r6,r6,7 | 18 | andi. r6,r6,7 |
@@ -154,7 +154,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
154 | blr | 154 | blr |
155 | 155 | ||
156 | .Ldst_unaligned: | 156 | .Ldst_unaligned: |
157 | PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7 | 157 | PPC_MTOCRF(0x01,r6) # put #bytes to 8B bdry into cr7 |
158 | subf r5,r6,r5 | 158 | subf r5,r6,r5 |
159 | li r7,0 | 159 | li r7,0 |
160 | cmpldi cr1,r5,16 | 160 | cmpldi cr1,r5,16 |
@@ -169,7 +169,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
169 | 2: bf cr7*4+1,3f | 169 | 2: bf cr7*4+1,3f |
170 | lwzx r0,r7,r4 | 170 | lwzx r0,r7,r4 |
171 | stwx r0,r7,r3 | 171 | stwx r0,r7,r3 |
172 | 3: PPC_MTOCRF 0x01,r5 | 172 | 3: PPC_MTOCRF(0x01,r5) |
173 | add r4,r6,r4 | 173 | add r4,r6,r4 |
174 | add r3,r6,r3 | 174 | add r3,r6,r3 |
175 | b .Ldst_aligned | 175 | b .Ldst_aligned |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index fb05b123218f..1a6de0a7d8eb 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -271,7 +271,8 @@ int alloc_bootmem_huge_page(struct hstate *hstate) | |||
271 | 271 | ||
272 | unsigned long gpage_npages[MMU_PAGE_COUNT]; | 272 | unsigned long gpage_npages[MMU_PAGE_COUNT]; |
273 | 273 | ||
274 | static int __init do_gpage_early_setup(char *param, char *val) | 274 | static int __init do_gpage_early_setup(char *param, char *val, |
275 | const char *unused) | ||
275 | { | 276 | { |
276 | static phys_addr_t size; | 277 | static phys_addr_t size; |
277 | unsigned long npages; | 278 | unsigned long npages; |
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 02aee03e713c..8f84bcba18da 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c | |||
@@ -1299,8 +1299,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1299 | if (record) { | 1299 | if (record) { |
1300 | struct perf_sample_data data; | 1300 | struct perf_sample_data data; |
1301 | 1301 | ||
1302 | perf_sample_data_init(&data, ~0ULL); | 1302 | perf_sample_data_init(&data, ~0ULL, event->hw.last_period); |
1303 | data.period = event->hw.last_period; | ||
1304 | 1303 | ||
1305 | if (event->attr.sample_type & PERF_SAMPLE_ADDR) | 1304 | if (event->attr.sample_type & PERF_SAMPLE_ADDR) |
1306 | perf_get_data_addr(regs, &data.addr); | 1305 | perf_get_data_addr(regs, &data.addr); |
diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c index 0a6d2a9d569c..106c53354675 100644 --- a/arch/powerpc/perf/core-fsl-emb.c +++ b/arch/powerpc/perf/core-fsl-emb.c | |||
@@ -613,8 +613,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
613 | if (record) { | 613 | if (record) { |
614 | struct perf_sample_data data; | 614 | struct perf_sample_data data; |
615 | 615 | ||
616 | perf_sample_data_init(&data, 0); | 616 | perf_sample_data_init(&data, 0, event->hw.last_period); |
617 | data.period = event->hw.last_period; | ||
618 | 617 | ||
619 | if (perf_event_overflow(event, &data, regs)) | 618 | if (perf_event_overflow(event, &data, regs)) |
620 | fsl_emb_pmu_stop(event, 0); | 619 | fsl_emb_pmu_stop(event, 0); |
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 2e4e64abfab4..8abf6fb8f410 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig | |||
@@ -23,6 +23,8 @@ config BLUESTONE | |||
23 | default n | 23 | default n |
24 | select PPC44x_SIMPLE | 24 | select PPC44x_SIMPLE |
25 | select APM821xx | 25 | select APM821xx |
26 | select PCI_MSI | ||
27 | select PPC4xx_MSI | ||
26 | select PPC4xx_PCI_EXPRESS | 28 | select PPC4xx_PCI_EXPRESS |
27 | select IBM_EMAC_RGMII | 29 | select IBM_EMAC_RGMII |
28 | help | 30 | help |
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 425db18580a2..61c9550819a2 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype | |||
@@ -78,6 +78,36 @@ config PPC_BOOK3E_64 | |||
78 | 78 | ||
79 | endchoice | 79 | endchoice |
80 | 80 | ||
81 | choice | ||
82 | prompt "CPU selection" | ||
83 | depends on PPC64 | ||
84 | default GENERIC_CPU | ||
85 | help | ||
86 | This will create a kernel which is optimised for a particular CPU. | ||
87 | The resulting kernel may not run on other CPUs, so use this with care. | ||
88 | |||
89 | If unsure, select Generic. | ||
90 | |||
91 | config GENERIC_CPU | ||
92 | bool "Generic" | ||
93 | |||
94 | config CELL_CPU | ||
95 | bool "Cell Broadband Engine" | ||
96 | |||
97 | config POWER4_CPU | ||
98 | bool "POWER4" | ||
99 | |||
100 | config POWER5_CPU | ||
101 | bool "POWER5" | ||
102 | |||
103 | config POWER6_CPU | ||
104 | bool "POWER6" | ||
105 | |||
106 | config POWER7_CPU | ||
107 | bool "POWER7" | ||
108 | |||
109 | endchoice | ||
110 | |||
81 | config PPC_BOOK3S | 111 | config PPC_BOOK3S |
82 | def_bool y | 112 | def_bool y |
83 | depends on PPC_BOOK3S_32 || PPC_BOOK3S_64 | 113 | depends on PPC_BOOK3S_32 || PPC_BOOK3S_64 |
@@ -86,15 +116,6 @@ config PPC_BOOK3E | |||
86 | def_bool y | 116 | def_bool y |
87 | depends on PPC_BOOK3E_64 | 117 | depends on PPC_BOOK3E_64 |
88 | 118 | ||
89 | config POWER4_ONLY | ||
90 | bool "Optimize for POWER4" | ||
91 | depends on PPC64 && PPC_BOOK3S | ||
92 | default n | ||
93 | ---help--- | ||
94 | Cause the compiler to optimize for POWER4/POWER5/PPC970 processors. | ||
95 | The resulting binary will not work on POWER3 or RS64 processors | ||
96 | when compiled with binutils 2.15 or later. | ||
97 | |||
98 | config 6xx | 119 | config 6xx |
99 | def_bool y | 120 | def_bool y |
100 | depends on PPC32 && PPC_BOOK3S | 121 | depends on PPC32 && PPC_BOOK3S |
@@ -258,7 +279,7 @@ config PPC_ICSWX_PID | |||
258 | default y | 279 | default y |
259 | ---help--- | 280 | ---help--- |
260 | The PID register in server is used explicitly for ICSWX. In | 281 | The PID register in server is used explicitly for ICSWX. In |
261 | embedded systems PID managment is done by the system. | 282 | embedded systems PID management is done by the system. |
262 | 283 | ||
263 | config PPC_ICSWX_USE_SIGILL | 284 | config PPC_ICSWX_USE_SIGILL |
264 | bool "Should a bad CT cause a SIGILL?" | 285 | bool "Should a bad CT cause a SIGILL?" |
@@ -266,7 +287,7 @@ config PPC_ICSWX_USE_SIGILL | |||
266 | default n | 287 | default n |
267 | ---help--- | 288 | ---help--- |
268 | Should a bad CT used for "non-record form ICSWX" cause an | 289 | Should a bad CT used for "non-record form ICSWX" cause an |
269 | illegal intruction signal or should it be silent as | 290 | illegal instruction signal or should it be silent as |
270 | architected. | 291 | architected. |
271 | 292 | ||
272 | If in doubt, say N here. | 293 | If in doubt, say N here. |
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index 03685a329d7d..fc536f2971c0 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c | |||
@@ -1503,6 +1503,7 @@ static int __init pmac_i2c_create_platform_devices(void) | |||
1503 | if (bus->platform_dev == NULL) | 1503 | if (bus->platform_dev == NULL) |
1504 | return -ENOMEM; | 1504 | return -ENOMEM; |
1505 | bus->platform_dev->dev.platform_data = bus; | 1505 | bus->platform_dev->dev.platform_data = bus; |
1506 | bus->platform_dev->dev.of_node = bus->busnode; | ||
1506 | platform_device_add(bus->platform_dev); | 1507 | platform_device_add(bus->platform_dev); |
1507 | } | 1508 | } |
1508 | 1509 | ||
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig index 476d9d9b2405..46b7f0232523 100644 --- a/arch/powerpc/platforms/ps3/Kconfig +++ b/arch/powerpc/platforms/ps3/Kconfig | |||
@@ -7,7 +7,6 @@ config PPC_PS3 | |||
7 | select USB_OHCI_BIG_ENDIAN_MMIO | 7 | select USB_OHCI_BIG_ENDIAN_MMIO |
8 | select USB_ARCH_HAS_EHCI | 8 | select USB_ARCH_HAS_EHCI |
9 | select USB_EHCI_BIG_ENDIAN_MMIO | 9 | select USB_EHCI_BIG_ENDIAN_MMIO |
10 | select MEMORY_HOTPLUG | ||
11 | select PPC_PCI_CHOICE | 10 | select PPC_PCI_CHOICE |
12 | help | 11 | help |
13 | This option enables support for the Sony PS3 game console | 12 | This option enables support for the Sony PS3 game console |
@@ -74,7 +73,7 @@ config PS3_PS3AV | |||
74 | help | 73 | help |
75 | Include support for the PS3 AV Settings driver. | 74 | Include support for the PS3 AV Settings driver. |
76 | 75 | ||
77 | This support is required for graphics and sound. In | 76 | This support is required for PS3 graphics and sound. In |
78 | general, all users will say Y or M. | 77 | general, all users will say Y or M. |
79 | 78 | ||
80 | config PS3_SYS_MANAGER | 79 | config PS3_SYS_MANAGER |
@@ -85,9 +84,22 @@ config PS3_SYS_MANAGER | |||
85 | help | 84 | help |
86 | Include support for the PS3 System Manager. | 85 | Include support for the PS3 System Manager. |
87 | 86 | ||
88 | This support is required for system control. In | 87 | This support is required for PS3 system control. In |
89 | general, all users will say Y or M. | 88 | general, all users will say Y or M. |
90 | 89 | ||
90 | config PS3_REPOSITORY_WRITE | ||
91 | bool "PS3 Repository write support" if PS3_ADVANCED | ||
92 | depends on PPC_PS3 | ||
93 | default n | ||
94 | help | ||
95 | Enables support for writing to the PS3 System Repository. | ||
96 | |||
97 | This support is intended for bootloaders that need to store data | ||
98 | in the repository for later boot stages. | ||
99 | |||
100 | If in doubt, say N here and reduce the size of the kernel by a | ||
101 | small amount. | ||
102 | |||
91 | config PS3_STORAGE | 103 | config PS3_STORAGE |
92 | depends on PPC_PS3 | 104 | depends on PPC_PS3 |
93 | tristate | 105 | tristate |
@@ -122,7 +134,7 @@ config PS3_FLASH | |||
122 | 134 | ||
123 | This support is required to access the PS3 FLASH ROM, which | 135 | This support is required to access the PS3 FLASH ROM, which |
124 | contains the boot loader and some boot options. | 136 | contains the boot loader and some boot options. |
125 | In general, all users will say Y or M. | 137 | In general, PS3 OtherOS users will say Y or M. |
126 | 138 | ||
127 | As this driver needs a fixed buffer of 256 KiB of memory, it can | 139 | As this driver needs a fixed buffer of 256 KiB of memory, it can |
128 | be disabled on the kernel command line using "ps3flash=off", to | 140 | be disabled on the kernel command line using "ps3flash=off", to |
@@ -156,7 +168,7 @@ config PS3GELIC_UDBG | |||
156 | via the Ethernet port (UDP port number 18194). | 168 | via the Ethernet port (UDP port number 18194). |
157 | 169 | ||
158 | This driver uses a trivial implementation and is independent | 170 | This driver uses a trivial implementation and is independent |
159 | from the main network driver. | 171 | from the main PS3 gelic network driver. |
160 | 172 | ||
161 | If in doubt, say N here. | 173 | If in doubt, say N here. |
162 | 174 | ||
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index de2aea421707..0c9f643d9e2a 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c | |||
@@ -20,7 +20,6 @@ | |||
20 | 20 | ||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/export.h> | 22 | #include <linux/export.h> |
23 | #include <linux/memory_hotplug.h> | ||
24 | #include <linux/memblock.h> | 23 | #include <linux/memblock.h> |
25 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
26 | 25 | ||
@@ -79,12 +78,14 @@ enum { | |||
79 | * @base: base address | 78 | * @base: base address |
80 | * @size: size in bytes | 79 | * @size: size in bytes |
81 | * @offset: difference between base and rm.size | 80 | * @offset: difference between base and rm.size |
81 | * @destroy: flag if region should be destroyed upon shutdown | ||
82 | */ | 82 | */ |
83 | 83 | ||
84 | struct mem_region { | 84 | struct mem_region { |
85 | u64 base; | 85 | u64 base; |
86 | u64 size; | 86 | u64 size; |
87 | unsigned long offset; | 87 | unsigned long offset; |
88 | int destroy; | ||
88 | }; | 89 | }; |
89 | 90 | ||
90 | /** | 91 | /** |
@@ -96,7 +97,7 @@ struct mem_region { | |||
96 | * The HV virtual address space (vas) allows for hotplug memory regions. | 97 | * The HV virtual address space (vas) allows for hotplug memory regions. |
97 | * Memory regions can be created and destroyed in the vas at runtime. | 98 | * Memory regions can be created and destroyed in the vas at runtime. |
98 | * @rm: real mode (bootmem) region | 99 | * @rm: real mode (bootmem) region |
99 | * @r1: hotplug memory region(s) | 100 | * @r1: highmem region(s) |
100 | * | 101 | * |
101 | * ps3 addresses | 102 | * ps3 addresses |
102 | * virt_addr: a cpu 'translated' effective address | 103 | * virt_addr: a cpu 'translated' effective address |
@@ -222,10 +223,6 @@ void ps3_mm_vas_destroy(void) | |||
222 | } | 223 | } |
223 | } | 224 | } |
224 | 225 | ||
225 | /*============================================================================*/ | ||
226 | /* memory hotplug routines */ | ||
227 | /*============================================================================*/ | ||
228 | |||
229 | /** | 226 | /** |
230 | * ps3_mm_region_create - create a memory region in the vas | 227 | * ps3_mm_region_create - create a memory region in the vas |
231 | * @r: pointer to a struct mem_region to accept initialized values | 228 | * @r: pointer to a struct mem_region to accept initialized values |
@@ -262,6 +259,7 @@ static int ps3_mm_region_create(struct mem_region *r, unsigned long size) | |||
262 | goto zero_region; | 259 | goto zero_region; |
263 | } | 260 | } |
264 | 261 | ||
262 | r->destroy = 1; | ||
265 | r->offset = r->base - map.rm.size; | 263 | r->offset = r->base - map.rm.size; |
266 | return result; | 264 | return result; |
267 | 265 | ||
@@ -279,7 +277,14 @@ static void ps3_mm_region_destroy(struct mem_region *r) | |||
279 | { | 277 | { |
280 | int result; | 278 | int result; |
281 | 279 | ||
280 | if (!r->destroy) { | ||
281 | pr_info("%s:%d: Not destroying high region: %llxh %llxh\n", | ||
282 | __func__, __LINE__, r->base, r->size); | ||
283 | return; | ||
284 | } | ||
285 | |||
282 | DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base); | 286 | DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base); |
287 | |||
283 | if (r->base) { | 288 | if (r->base) { |
284 | result = lv1_release_memory(r->base); | 289 | result = lv1_release_memory(r->base); |
285 | BUG_ON(result); | 290 | BUG_ON(result); |
@@ -288,50 +293,36 @@ static void ps3_mm_region_destroy(struct mem_region *r) | |||
288 | } | 293 | } |
289 | } | 294 | } |
290 | 295 | ||
291 | /** | 296 | static int ps3_mm_get_repository_highmem(struct mem_region *r) |
292 | * ps3_mm_add_memory - hot add memory | ||
293 | */ | ||
294 | |||
295 | static int __init ps3_mm_add_memory(void) | ||
296 | { | 297 | { |
297 | int result; | 298 | int result; |
298 | unsigned long start_addr; | ||
299 | unsigned long start_pfn; | ||
300 | unsigned long nr_pages; | ||
301 | |||
302 | if (!firmware_has_feature(FW_FEATURE_PS3_LV1)) | ||
303 | return -ENODEV; | ||
304 | 299 | ||
305 | BUG_ON(!mem_init_done); | 300 | /* Assume a single highmem region. */ |
306 | 301 | ||
307 | start_addr = map.rm.size; | 302 | result = ps3_repository_read_highmem_info(0, &r->base, &r->size); |
308 | start_pfn = start_addr >> PAGE_SHIFT; | ||
309 | nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
310 | 303 | ||
311 | DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n", | 304 | if (result) |
312 | __func__, __LINE__, start_addr, start_pfn, nr_pages); | 305 | goto zero_region; |
313 | |||
314 | result = add_memory(0, start_addr, map.r1.size); | ||
315 | 306 | ||
316 | if (result) { | 307 | if (!r->base || !r->size) { |
317 | pr_err("%s:%d: add_memory failed: (%d)\n", | 308 | result = -1; |
318 | __func__, __LINE__, result); | 309 | goto zero_region; |
319 | return result; | ||
320 | } | 310 | } |
321 | 311 | ||
322 | memblock_add(start_addr, map.r1.size); | 312 | r->offset = r->base - map.rm.size; |
323 | 313 | ||
324 | result = online_pages(start_pfn, nr_pages); | 314 | DBG("%s:%d: Found high region in repository: %llxh %llxh\n", |
315 | __func__, __LINE__, r->base, r->size); | ||
325 | 316 | ||
326 | if (result) | 317 | return 0; |
327 | pr_err("%s:%d: online_pages failed: (%d)\n", | ||
328 | __func__, __LINE__, result); | ||
329 | 318 | ||
319 | zero_region: | ||
320 | DBG("%s:%d: No high region in repository.\n", __func__, __LINE__); | ||
321 | |||
322 | r->size = r->base = r->offset = 0; | ||
330 | return result; | 323 | return result; |
331 | } | 324 | } |
332 | 325 | ||
333 | device_initcall(ps3_mm_add_memory); | ||
334 | |||
335 | /*============================================================================*/ | 326 | /*============================================================================*/ |
336 | /* dma routines */ | 327 | /* dma routines */ |
337 | /*============================================================================*/ | 328 | /*============================================================================*/ |
@@ -1217,13 +1208,23 @@ void __init ps3_mm_init(void) | |||
1217 | BUG_ON(map.rm.base); | 1208 | BUG_ON(map.rm.base); |
1218 | BUG_ON(!map.rm.size); | 1209 | BUG_ON(!map.rm.size); |
1219 | 1210 | ||
1211 | /* Check if we got the highmem region from an earlier boot step */ | ||
1220 | 1212 | ||
1221 | /* arrange to do this in ps3_mm_add_memory */ | 1213 | if (ps3_mm_get_repository_highmem(&map.r1)) |
1222 | ps3_mm_region_create(&map.r1, map.total - map.rm.size); | 1214 | ps3_mm_region_create(&map.r1, map.total - map.rm.size); |
1223 | 1215 | ||
1224 | /* correct map.total for the real total amount of memory we use */ | 1216 | /* correct map.total for the real total amount of memory we use */ |
1225 | map.total = map.rm.size + map.r1.size; | 1217 | map.total = map.rm.size + map.r1.size; |
1226 | 1218 | ||
1219 | if (!map.r1.size) { | ||
1220 | DBG("%s:%d: No highmem region found\n", __func__, __LINE__); | ||
1221 | } else { | ||
1222 | DBG("%s:%d: Adding highmem region: %llxh %llxh\n", | ||
1223 | __func__, __LINE__, map.rm.size, | ||
1224 | map.total - map.rm.size); | ||
1225 | memblock_add(map.rm.size, map.total - map.rm.size); | ||
1226 | } | ||
1227 | |||
1227 | DBG(" <- %s:%d\n", __func__, __LINE__); | 1228 | DBG(" <- %s:%d\n", __func__, __LINE__); |
1228 | } | 1229 | } |
1229 | 1230 | ||
diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h index 1a633ed0fe98..d71329a8e325 100644 --- a/arch/powerpc/platforms/ps3/platform.h +++ b/arch/powerpc/platforms/ps3/platform.h | |||
@@ -188,6 +188,22 @@ int ps3_repository_read_rm_size(unsigned int ppe_id, u64 *rm_size); | |||
188 | int ps3_repository_read_region_total(u64 *region_total); | 188 | int ps3_repository_read_region_total(u64 *region_total); |
189 | int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size, | 189 | int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size, |
190 | u64 *region_total); | 190 | u64 *region_total); |
191 | int ps3_repository_read_highmem_region_count(unsigned int *region_count); | ||
192 | int ps3_repository_read_highmem_base(unsigned int region_index, | ||
193 | u64 *highmem_base); | ||
194 | int ps3_repository_read_highmem_size(unsigned int region_index, | ||
195 | u64 *highmem_size); | ||
196 | int ps3_repository_read_highmem_info(unsigned int region_index, | ||
197 | u64 *highmem_base, u64 *highmem_size); | ||
198 | |||
199 | int ps3_repository_write_highmem_region_count(unsigned int region_count); | ||
200 | int ps3_repository_write_highmem_base(unsigned int region_index, | ||
201 | u64 highmem_base); | ||
202 | int ps3_repository_write_highmem_size(unsigned int region_index, | ||
203 | u64 highmem_size); | ||
204 | int ps3_repository_write_highmem_info(unsigned int region_index, | ||
205 | u64 highmem_base, u64 highmem_size); | ||
206 | int ps3_repository_delete_highmem_info(unsigned int region_index); | ||
191 | 207 | ||
192 | /* repository pme info */ | 208 | /* repository pme info */ |
193 | 209 | ||
diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c index 7bdfea336f5e..9b47ba7a5de7 100644 --- a/arch/powerpc/platforms/ps3/repository.c +++ b/arch/powerpc/platforms/ps3/repository.c | |||
@@ -779,6 +779,72 @@ int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size, u64 *region_total) | |||
779 | } | 779 | } |
780 | 780 | ||
781 | /** | 781 | /** |
782 | * ps3_repository_read_highmem_region_count - Read the number of highmem regions | ||
783 | * | ||
784 | * Bootloaders must arrange the repository nodes such that regions are indexed | ||
785 | * with a region_index from 0 to region_count-1. | ||
786 | */ | ||
787 | |||
788 | int ps3_repository_read_highmem_region_count(unsigned int *region_count) | ||
789 | { | ||
790 | int result; | ||
791 | u64 v1 = 0; | ||
792 | |||
793 | result = read_node(PS3_LPAR_ID_CURRENT, | ||
794 | make_first_field("highmem", 0), | ||
795 | make_field("region", 0), | ||
796 | make_field("count", 0), | ||
797 | 0, | ||
798 | &v1, NULL); | ||
799 | *region_count = v1; | ||
800 | return result; | ||
801 | } | ||
802 | |||
803 | |||
804 | int ps3_repository_read_highmem_base(unsigned int region_index, | ||
805 | u64 *highmem_base) | ||
806 | { | ||
807 | return read_node(PS3_LPAR_ID_CURRENT, | ||
808 | make_first_field("highmem", 0), | ||
809 | make_field("region", region_index), | ||
810 | make_field("base", 0), | ||
811 | 0, | ||
812 | highmem_base, NULL); | ||
813 | } | ||
814 | |||
815 | int ps3_repository_read_highmem_size(unsigned int region_index, | ||
816 | u64 *highmem_size) | ||
817 | { | ||
818 | return read_node(PS3_LPAR_ID_CURRENT, | ||
819 | make_first_field("highmem", 0), | ||
820 | make_field("region", region_index), | ||
821 | make_field("size", 0), | ||
822 | 0, | ||
823 | highmem_size, NULL); | ||
824 | } | ||
825 | |||
826 | /** | ||
827 | * ps3_repository_read_highmem_info - Read high memory region info | ||
828 | * @region_index: Region index, {0,..,region_count-1}. | ||
829 | * @highmem_base: High memory base address. | ||
830 | * @highmem_size: High memory size. | ||
831 | * | ||
832 | * Bootloaders that preallocate highmem regions must place the | ||
833 | * region info into the repository at these well known nodes. | ||
834 | */ | ||
835 | |||
836 | int ps3_repository_read_highmem_info(unsigned int region_index, | ||
837 | u64 *highmem_base, u64 *highmem_size) | ||
838 | { | ||
839 | int result; | ||
840 | |||
841 | *highmem_base = 0; | ||
842 | result = ps3_repository_read_highmem_base(region_index, highmem_base); | ||
843 | return result ? result | ||
844 | : ps3_repository_read_highmem_size(region_index, highmem_size); | ||
845 | } | ||
846 | |||
847 | /** | ||
782 | * ps3_repository_read_num_spu_reserved - Number of physical spus reserved. | 848 | * ps3_repository_read_num_spu_reserved - Number of physical spus reserved. |
783 | * @num_spu: Number of physical spus. | 849 | * @num_spu: Number of physical spus. |
784 | */ | 850 | */ |
@@ -1002,6 +1068,138 @@ int ps3_repository_read_lpm_privileges(unsigned int be_index, u64 *lpar, | |||
1002 | lpar, rights); | 1068 | lpar, rights); |
1003 | } | 1069 | } |
1004 | 1070 | ||
1071 | #if defined(CONFIG_PS3_REPOSITORY_WRITE) | ||
1072 | |||
1073 | static int create_node(u64 n1, u64 n2, u64 n3, u64 n4, u64 v1, u64 v2) | ||
1074 | { | ||
1075 | int result; | ||
1076 | |||
1077 | dump_node(0, n1, n2, n3, n4, v1, v2); | ||
1078 | |||
1079 | result = lv1_create_repository_node(n1, n2, n3, n4, v1, v2); | ||
1080 | |||
1081 | if (result) { | ||
1082 | pr_devel("%s:%d: lv1_create_repository_node failed: %s\n", | ||
1083 | __func__, __LINE__, ps3_result(result)); | ||
1084 | return -ENOENT; | ||
1085 | } | ||
1086 | |||
1087 | return 0; | ||
1088 | } | ||
1089 | |||
1090 | static int delete_node(u64 n1, u64 n2, u64 n3, u64 n4) | ||
1091 | { | ||
1092 | int result; | ||
1093 | |||
1094 | dump_node(0, n1, n2, n3, n4, 0, 0); | ||
1095 | |||
1096 | result = lv1_delete_repository_node(n1, n2, n3, n4); | ||
1097 | |||
1098 | if (result) { | ||
1099 | pr_devel("%s:%d: lv1_delete_repository_node failed: %s\n", | ||
1100 | __func__, __LINE__, ps3_result(result)); | ||
1101 | return -ENOENT; | ||
1102 | } | ||
1103 | |||
1104 | return 0; | ||
1105 | } | ||
1106 | |||
1107 | static int write_node(u64 n1, u64 n2, u64 n3, u64 n4, u64 v1, u64 v2) | ||
1108 | { | ||
1109 | int result; | ||
1110 | |||
1111 | result = create_node(n1, n2, n3, n4, v1, v2); | ||
1112 | |||
1113 | if (!result) | ||
1114 | return 0; | ||
1115 | |||
1116 | result = lv1_write_repository_node(n1, n2, n3, n4, v1, v2); | ||
1117 | |||
1118 | if (result) { | ||
1119 | pr_devel("%s:%d: lv1_write_repository_node failed: %s\n", | ||
1120 | __func__, __LINE__, ps3_result(result)); | ||
1121 | return -ENOENT; | ||
1122 | } | ||
1123 | |||
1124 | return 0; | ||
1125 | } | ||
1126 | |||
1127 | int ps3_repository_write_highmem_region_count(unsigned int region_count) | ||
1128 | { | ||
1129 | int result; | ||
1130 | u64 v1 = (u64)region_count; | ||
1131 | |||
1132 | result = write_node( | ||
1133 | make_first_field("highmem", 0), | ||
1134 | make_field("region", 0), | ||
1135 | make_field("count", 0), | ||
1136 | 0, | ||
1137 | v1, 0); | ||
1138 | return result; | ||
1139 | } | ||
1140 | |||
1141 | int ps3_repository_write_highmem_base(unsigned int region_index, | ||
1142 | u64 highmem_base) | ||
1143 | { | ||
1144 | return write_node( | ||
1145 | make_first_field("highmem", 0), | ||
1146 | make_field("region", region_index), | ||
1147 | make_field("base", 0), | ||
1148 | 0, | ||
1149 | highmem_base, 0); | ||
1150 | } | ||
1151 | |||
1152 | int ps3_repository_write_highmem_size(unsigned int region_index, | ||
1153 | u64 highmem_size) | ||
1154 | { | ||
1155 | return write_node( | ||
1156 | make_first_field("highmem", 0), | ||
1157 | make_field("region", region_index), | ||
1158 | make_field("size", 0), | ||
1159 | 0, | ||
1160 | highmem_size, 0); | ||
1161 | } | ||
1162 | |||
1163 | int ps3_repository_write_highmem_info(unsigned int region_index, | ||
1164 | u64 highmem_base, u64 highmem_size) | ||
1165 | { | ||
1166 | int result; | ||
1167 | |||
1168 | result = ps3_repository_write_highmem_base(region_index, highmem_base); | ||
1169 | return result ? result | ||
1170 | : ps3_repository_write_highmem_size(region_index, highmem_size); | ||
1171 | } | ||
1172 | |||
1173 | static int ps3_repository_delete_highmem_base(unsigned int region_index) | ||
1174 | { | ||
1175 | return delete_node( | ||
1176 | make_first_field("highmem", 0), | ||
1177 | make_field("region", region_index), | ||
1178 | make_field("base", 0), | ||
1179 | 0); | ||
1180 | } | ||
1181 | |||
1182 | static int ps3_repository_delete_highmem_size(unsigned int region_index) | ||
1183 | { | ||
1184 | return delete_node( | ||
1185 | make_first_field("highmem", 0), | ||
1186 | make_field("region", region_index), | ||
1187 | make_field("size", 0), | ||
1188 | 0); | ||
1189 | } | ||
1190 | |||
1191 | int ps3_repository_delete_highmem_info(unsigned int region_index) | ||
1192 | { | ||
1193 | int result; | ||
1194 | |||
1195 | result = ps3_repository_delete_highmem_base(region_index); | ||
1196 | result += ps3_repository_delete_highmem_size(region_index); | ||
1197 | |||
1198 | return result ? -1 : 0; | ||
1199 | } | ||
1200 | |||
1201 | #endif /* defined(CONFIG_PS3_WRITE_REPOSITORY) */ | ||
1202 | |||
1005 | #if defined(DEBUG) | 1203 | #if defined(DEBUG) |
1006 | 1204 | ||
1007 | int ps3_repository_dump_resource_info(const struct ps3_repository_device *repo) | 1205 | int ps3_repository_dump_resource_info(const struct ps3_repository_device *repo) |
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 178a5f300bc9..837cf49357ed 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig | |||
@@ -67,7 +67,7 @@ config IO_EVENT_IRQ | |||
67 | 67 | ||
68 | This option will only enable the IO event platform code. You | 68 | This option will only enable the IO event platform code. You |
69 | will still need to enable or compile the actual drivers | 69 | will still need to enable or compile the actual drivers |
70 | that use this infrastruture to handle IO event interrupts. | 70 | that use this infrastructure to handle IO event interrupts. |
71 | 71 | ||
72 | Say Y if you are unsure. | 72 | Say Y if you are unsure. |
73 | 73 | ||
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index a75e37dc41aa..ecd394cf34e6 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c | |||
@@ -489,7 +489,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) | |||
489 | * a stack trace will help the device-driver authors figure | 489 | * a stack trace will help the device-driver authors figure |
490 | * out what happened. So print that out. | 490 | * out what happened. So print that out. |
491 | */ | 491 | */ |
492 | dump_stack(); | 492 | WARN(1, "EEH: failure detected\n"); |
493 | return 1; | 493 | return 1; |
494 | 494 | ||
495 | dn_unlock: | 495 | dn_unlock: |
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h index 342797fc0f9c..13e8cc43adf7 100644 --- a/arch/powerpc/platforms/pseries/plpar_wrappers.h +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h | |||
@@ -22,12 +22,12 @@ static inline long poll_pending(void) | |||
22 | 22 | ||
23 | static inline u8 get_cede_latency_hint(void) | 23 | static inline u8 get_cede_latency_hint(void) |
24 | { | 24 | { |
25 | return get_lppaca()->gpr5_dword.fields.cede_latency_hint; | 25 | return get_lppaca()->cede_latency_hint; |
26 | } | 26 | } |
27 | 27 | ||
28 | static inline void set_cede_latency_hint(u8 latency_hint) | 28 | static inline void set_cede_latency_hint(u8 latency_hint) |
29 | { | 29 | { |
30 | get_lppaca()->gpr5_dword.fields.cede_latency_hint = latency_hint; | 30 | get_lppaca()->cede_latency_hint = latency_hint; |
31 | } | 31 | } |
32 | 32 | ||
33 | static inline long cede_processor(void) | 33 | static inline long cede_processor(void) |
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index 168651acdd83..7b3bf76ef834 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c | |||
@@ -103,11 +103,13 @@ int pSeries_reconfig_notifier_register(struct notifier_block *nb) | |||
103 | { | 103 | { |
104 | return blocking_notifier_chain_register(&pSeries_reconfig_chain, nb); | 104 | return blocking_notifier_chain_register(&pSeries_reconfig_chain, nb); |
105 | } | 105 | } |
106 | EXPORT_SYMBOL_GPL(pSeries_reconfig_notifier_register); | ||
106 | 107 | ||
107 | void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) | 108 | void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) |
108 | { | 109 | { |
109 | blocking_notifier_chain_unregister(&pSeries_reconfig_chain, nb); | 110 | blocking_notifier_chain_unregister(&pSeries_reconfig_chain, nb); |
110 | } | 111 | } |
112 | EXPORT_SYMBOL_GPL(pSeries_reconfig_notifier_unregister); | ||
111 | 113 | ||
112 | int pSeries_reconfig_notify(unsigned long action, void *p) | 114 | int pSeries_reconfig_notify(unsigned long action, void *p) |
113 | { | 115 | { |
@@ -426,6 +428,7 @@ static int do_remove_property(char *buf, size_t bufsize) | |||
426 | static int do_update_property(char *buf, size_t bufsize) | 428 | static int do_update_property(char *buf, size_t bufsize) |
427 | { | 429 | { |
428 | struct device_node *np; | 430 | struct device_node *np; |
431 | struct pSeries_reconfig_prop_update upd_value; | ||
429 | unsigned char *value; | 432 | unsigned char *value; |
430 | char *name, *end, *next_prop; | 433 | char *name, *end, *next_prop; |
431 | int rc, length; | 434 | int rc, length; |
@@ -454,6 +457,10 @@ static int do_update_property(char *buf, size_t bufsize) | |||
454 | return -ENODEV; | 457 | return -ENODEV; |
455 | } | 458 | } |
456 | 459 | ||
460 | upd_value.node = np; | ||
461 | upd_value.property = newprop; | ||
462 | pSeries_reconfig_notify(PSERIES_UPDATE_PROPERTY, &upd_value); | ||
463 | |||
457 | rc = prom_update_property(np, newprop, oldprop); | 464 | rc = prom_update_property(np, newprop, oldprop); |
458 | if (rc) | 465 | if (rc) |
459 | return rc; | 466 | return rc; |
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c index 1c2d7af17bbe..82c6702dcbab 100644 --- a/arch/powerpc/sysdev/ppc4xx_msi.c +++ b/arch/powerpc/sysdev/ppc4xx_msi.c | |||
@@ -28,10 +28,11 @@ | |||
28 | #include <linux/of_platform.h> | 28 | #include <linux/of_platform.h> |
29 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
30 | #include <linux/export.h> | 30 | #include <linux/export.h> |
31 | #include <linux/kernel.h> | ||
31 | #include <asm/prom.h> | 32 | #include <asm/prom.h> |
32 | #include <asm/hw_irq.h> | 33 | #include <asm/hw_irq.h> |
33 | #include <asm/ppc-pci.h> | 34 | #include <asm/ppc-pci.h> |
34 | #include <boot/dcr.h> | 35 | #include <asm/dcr.h> |
35 | #include <asm/dcr-regs.h> | 36 | #include <asm/dcr-regs.h> |
36 | #include <asm/msi_bitmap.h> | 37 | #include <asm/msi_bitmap.h> |
37 | 38 | ||
@@ -43,13 +44,14 @@ | |||
43 | #define PEIH_FLUSH0 0x30 | 44 | #define PEIH_FLUSH0 0x30 |
44 | #define PEIH_FLUSH1 0x38 | 45 | #define PEIH_FLUSH1 0x38 |
45 | #define PEIH_CNTRST 0x48 | 46 | #define PEIH_CNTRST 0x48 |
46 | #define NR_MSI_IRQS 4 | 47 | |
48 | static int msi_irqs; | ||
47 | 49 | ||
48 | struct ppc4xx_msi { | 50 | struct ppc4xx_msi { |
49 | u32 msi_addr_lo; | 51 | u32 msi_addr_lo; |
50 | u32 msi_addr_hi; | 52 | u32 msi_addr_hi; |
51 | void __iomem *msi_regs; | 53 | void __iomem *msi_regs; |
52 | int msi_virqs[NR_MSI_IRQS]; | 54 | int *msi_virqs; |
53 | struct msi_bitmap bitmap; | 55 | struct msi_bitmap bitmap; |
54 | struct device_node *msi_dev; | 56 | struct device_node *msi_dev; |
55 | }; | 57 | }; |
@@ -61,7 +63,7 @@ static int ppc4xx_msi_init_allocator(struct platform_device *dev, | |||
61 | { | 63 | { |
62 | int err; | 64 | int err; |
63 | 65 | ||
64 | err = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS, | 66 | err = msi_bitmap_alloc(&msi_data->bitmap, msi_irqs, |
65 | dev->dev.of_node); | 67 | dev->dev.of_node); |
66 | if (err) | 68 | if (err) |
67 | return err; | 69 | return err; |
@@ -83,6 +85,11 @@ static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
83 | struct msi_desc *entry; | 85 | struct msi_desc *entry; |
84 | struct ppc4xx_msi *msi_data = &ppc4xx_msi; | 86 | struct ppc4xx_msi *msi_data = &ppc4xx_msi; |
85 | 87 | ||
88 | msi_data->msi_virqs = kmalloc((msi_irqs) * sizeof(int), | ||
89 | GFP_KERNEL); | ||
90 | if (!msi_data->msi_virqs) | ||
91 | return -ENOMEM; | ||
92 | |||
86 | list_for_each_entry(entry, &dev->msi_list, list) { | 93 | list_for_each_entry(entry, &dev->msi_list, list) { |
87 | int_no = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1); | 94 | int_no = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1); |
88 | if (int_no >= 0) | 95 | if (int_no >= 0) |
@@ -150,12 +157,11 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev, | |||
150 | if (!sdr_addr) | 157 | if (!sdr_addr) |
151 | return -1; | 158 | return -1; |
152 | 159 | ||
153 | SDR0_WRITE(sdr_addr, (u64)res.start >> 32); /*HIGH addr */ | 160 | mtdcri(SDR0, *sdr_addr, upper_32_bits(res.start)); /*HIGH addr */ |
154 | SDR0_WRITE(sdr_addr + 1, res.start & 0xFFFFFFFF); /* Low addr */ | 161 | mtdcri(SDR0, *sdr_addr + 1, lower_32_bits(res.start)); /* Low addr */ |
155 | |||
156 | 162 | ||
157 | msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi"); | 163 | msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi"); |
158 | if (msi->msi_dev) | 164 | if (!msi->msi_dev) |
159 | return -ENODEV; | 165 | return -ENODEV; |
160 | 166 | ||
161 | msi->msi_regs = of_iomap(msi->msi_dev, 0); | 167 | msi->msi_regs = of_iomap(msi->msi_dev, 0); |
@@ -167,9 +173,12 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev, | |||
167 | (u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs)); | 173 | (u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs)); |
168 | 174 | ||
169 | msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL); | 175 | msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL); |
170 | msi->msi_addr_hi = 0x0; | 176 | if (!msi_virt) |
171 | msi->msi_addr_lo = (u32) msi_phys; | 177 | return -ENOMEM; |
172 | dev_dbg(&dev->dev, "PCIE-MSI: msi address 0x%x\n", msi->msi_addr_lo); | 178 | msi->msi_addr_hi = upper_32_bits(msi_phys); |
179 | msi->msi_addr_lo = lower_32_bits(msi_phys & 0xffffffff); | ||
180 | dev_dbg(&dev->dev, "PCIE-MSI: msi address high 0x%x, low 0x%x\n", | ||
181 | msi->msi_addr_hi, msi->msi_addr_lo); | ||
173 | 182 | ||
174 | /* Progam the Interrupt handler Termination addr registers */ | 183 | /* Progam the Interrupt handler Termination addr registers */ |
175 | out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi); | 184 | out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi); |
@@ -185,6 +194,8 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev, | |||
185 | out_be32(msi->msi_regs + PEIH_MSIED, *msi_data); | 194 | out_be32(msi->msi_regs + PEIH_MSIED, *msi_data); |
186 | out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask); | 195 | out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask); |
187 | 196 | ||
197 | dma_free_coherent(&dev->dev, 64, msi_virt, msi_phys); | ||
198 | |||
188 | return 0; | 199 | return 0; |
189 | } | 200 | } |
190 | 201 | ||
@@ -194,7 +205,7 @@ static int ppc4xx_of_msi_remove(struct platform_device *dev) | |||
194 | int i; | 205 | int i; |
195 | int virq; | 206 | int virq; |
196 | 207 | ||
197 | for (i = 0; i < NR_MSI_IRQS; i++) { | 208 | for (i = 0; i < msi_irqs; i++) { |
198 | virq = msi->msi_virqs[i]; | 209 | virq = msi->msi_virqs[i]; |
199 | if (virq != NO_IRQ) | 210 | if (virq != NO_IRQ) |
200 | irq_dispose_mapping(virq); | 211 | irq_dispose_mapping(virq); |
@@ -215,8 +226,6 @@ static int __devinit ppc4xx_msi_probe(struct platform_device *dev) | |||
215 | struct resource res; | 226 | struct resource res; |
216 | int err = 0; | 227 | int err = 0; |
217 | 228 | ||
218 | msi = &ppc4xx_msi;/*keep the msi data for further use*/ | ||
219 | |||
220 | dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n"); | 229 | dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n"); |
221 | 230 | ||
222 | msi = kzalloc(sizeof(struct ppc4xx_msi), GFP_KERNEL); | 231 | msi = kzalloc(sizeof(struct ppc4xx_msi), GFP_KERNEL); |
@@ -234,6 +243,10 @@ static int __devinit ppc4xx_msi_probe(struct platform_device *dev) | |||
234 | goto error_out; | 243 | goto error_out; |
235 | } | 244 | } |
236 | 245 | ||
246 | msi_irqs = of_irq_count(dev->dev.of_node); | ||
247 | if (!msi_irqs) | ||
248 | return -ENODEV; | ||
249 | |||
237 | if (ppc4xx_setup_pcieh_hw(dev, res, msi)) | 250 | if (ppc4xx_setup_pcieh_hw(dev, res, msi)) |
238 | goto error_out; | 251 | goto error_out; |
239 | 252 | ||
@@ -242,6 +255,7 @@ static int __devinit ppc4xx_msi_probe(struct platform_device *dev) | |||
242 | dev_err(&dev->dev, "Error allocating MSI bitmap\n"); | 255 | dev_err(&dev->dev, "Error allocating MSI bitmap\n"); |
243 | goto error_out; | 256 | goto error_out; |
244 | } | 257 | } |
258 | ppc4xx_msi = *msi; | ||
245 | 259 | ||
246 | ppc_md.setup_msi_irqs = ppc4xx_setup_msi_irqs; | 260 | ppc_md.setup_msi_irqs = ppc4xx_setup_msi_irqs; |
247 | ppc_md.teardown_msi_irqs = ppc4xx_teardown_msi_irqs; | 261 | ppc_md.teardown_msi_irqs = ppc4xx_teardown_msi_irqs; |