aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-16 12:55:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-16 12:55:35 -0400
commit10dc3747661bea9215417b659449bb7b8ed3df2c (patch)
treed943974b4941203a7db2fabe4896852cf0f16bc4 /arch/arm/kvm
parent047486d8e7c2a7e8d75b068b69cb67b47364f5d4 (diff)
parentf958ee745f70b60d0e41927cab2c073104bc70c2 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini: "One of the largest releases for KVM... Hardly any generic changes, but lots of architecture-specific updates. ARM: - VHE support so that we can run the kernel at EL2 on ARMv8.1 systems - PMU support for guests - 32bit world switch rewritten in C - various optimizations to the vgic save/restore code. PPC: - enabled KVM-VFIO integration ("VFIO device") - optimizations to speed up IPIs between vcpus - in-kernel handling of IOMMU hypercalls - support for dynamic DMA windows (DDW). s390: - provide the floating point registers via sync regs; - separated instruction vs. data accesses - dirty log improvements for huge guests - bugfixes and documentation improvements. x86: - Hyper-V VMBus hypercall userspace exit - alternative implementation of lowest-priority interrupts using vector hashing (for better VT-d posted interrupt support) - fixed guest debugging with nested virtualizations - improved interrupt tracking in the in-kernel IOAPIC - generic infrastructure for tracking writes to guest memory - currently its only use is to speedup the legacy shadow paging (pre-EPT) case, but in the future it will be used for virtual GPUs as well - much cleanup (LAPIC, kvmclock, MMU, PIT), including ubsan fixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (217 commits) KVM: x86: remove eager_fpu field of struct kvm_vcpu_arch KVM: x86: disable MPX if host did not enable MPX XSAVE features arm64: KVM: vgic-v3: Only wipe LRs on vcpu exit arm64: KVM: vgic-v3: Reset LRs at boot time arm64: KVM: vgic-v3: Do not save an LR known to be empty arm64: KVM: vgic-v3: Save maintenance interrupt state only if required arm64: KVM: vgic-v3: Avoid accessing ICH registers KVM: arm/arm64: vgic-v2: Make GICD_SGIR quicker to hit KVM: arm/arm64: vgic-v2: Only wipe LRs on vcpu exit KVM: arm/arm64: vgic-v2: Reset LRs at boot time KVM: arm/arm64: vgic-v2: Do not save an LR known to be empty KVM: arm/arm64: vgic-v2: Move GICH_ELRSR saving to its own function KVM: arm/arm64: vgic-v2: Save maintenance interrupt state only if required KVM: arm/arm64: vgic-v2: Avoid accessing GICH registers KVM: s390: allocate only one DMA page per VM KVM: s390: enable STFLE interpretation only if enabled for the guest KVM: s390: wake up when the VCPU cpu timer expires KVM: s390: step the VCPU timer while in enabled wait KVM: s390: protect VCPU cpu timer with a seqcount KVM: s390: step VCPU cpu timer during kvm_run ioctl ...
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/Makefile1
-rw-r--r--arch/arm/kvm/arm.c244
-rw-r--r--arch/arm/kvm/coproc.c126
-rw-r--r--arch/arm/kvm/coproc.h24
-rw-r--r--arch/arm/kvm/emulate.c34
-rw-r--r--arch/arm/kvm/guest.c5
-rw-r--r--arch/arm/kvm/handle_exit.c7
-rw-r--r--arch/arm/kvm/hyp/Makefile17
-rw-r--r--arch/arm/kvm/hyp/banked-sr.c77
-rw-r--r--arch/arm/kvm/hyp/cp15-sr.c84
-rw-r--r--arch/arm/kvm/hyp/entry.S101
-rw-r--r--arch/arm/kvm/hyp/hyp-entry.S169
-rw-r--r--arch/arm/kvm/hyp/s2-setup.c33
-rw-r--r--arch/arm/kvm/hyp/switch.c232
-rw-r--r--arch/arm/kvm/hyp/tlb.c70
-rw-r--r--arch/arm/kvm/hyp/vfp.S68
-rw-r--r--arch/arm/kvm/init.S8
-rw-r--r--arch/arm/kvm/interrupts.S480
-rw-r--r--arch/arm/kvm/interrupts_head.S648
-rw-r--r--arch/arm/kvm/mmu.c23
-rw-r--r--arch/arm/kvm/reset.c2
21 files changed, 1162 insertions, 1291 deletions
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index c5eef02c52ba..eb1bf4309c13 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -17,6 +17,7 @@ AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
17KVM := ../../../virt/kvm 17KVM := ../../../virt/kvm
18kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o 18kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
19 19
20obj-$(CONFIG_KVM_ARM_HOST) += hyp/
20obj-y += kvm-arm.o init.o interrupts.o 21obj-y += kvm-arm.o init.o interrupts.o
21obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o 22obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
22obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o 23obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 08e49c423c24..76552b51c7ae 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -28,6 +28,7 @@
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/kvm.h> 29#include <linux/kvm.h>
30#include <trace/events/kvm.h> 30#include <trace/events/kvm.h>
31#include <kvm/arm_pmu.h>
31 32
32#define CREATE_TRACE_POINTS 33#define CREATE_TRACE_POINTS
33#include "trace.h" 34#include "trace.h"
@@ -265,6 +266,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
265 kvm_mmu_free_memory_caches(vcpu); 266 kvm_mmu_free_memory_caches(vcpu);
266 kvm_timer_vcpu_terminate(vcpu); 267 kvm_timer_vcpu_terminate(vcpu);
267 kvm_vgic_vcpu_destroy(vcpu); 268 kvm_vgic_vcpu_destroy(vcpu);
269 kvm_pmu_vcpu_destroy(vcpu);
268 kmem_cache_free(kvm_vcpu_cache, vcpu); 270 kmem_cache_free(kvm_vcpu_cache, vcpu);
269} 271}
270 272
@@ -320,6 +322,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
320 vcpu->cpu = -1; 322 vcpu->cpu = -1;
321 323
322 kvm_arm_set_running_vcpu(NULL); 324 kvm_arm_set_running_vcpu(NULL);
325 kvm_timer_vcpu_put(vcpu);
323} 326}
324 327
325int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 328int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
@@ -577,6 +580,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
577 * non-preemptible context. 580 * non-preemptible context.
578 */ 581 */
579 preempt_disable(); 582 preempt_disable();
583 kvm_pmu_flush_hwstate(vcpu);
580 kvm_timer_flush_hwstate(vcpu); 584 kvm_timer_flush_hwstate(vcpu);
581 kvm_vgic_flush_hwstate(vcpu); 585 kvm_vgic_flush_hwstate(vcpu);
582 586
@@ -593,6 +597,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
593 if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) || 597 if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
594 vcpu->arch.power_off || vcpu->arch.pause) { 598 vcpu->arch.power_off || vcpu->arch.pause) {
595 local_irq_enable(); 599 local_irq_enable();
600 kvm_pmu_sync_hwstate(vcpu);
596 kvm_timer_sync_hwstate(vcpu); 601 kvm_timer_sync_hwstate(vcpu);
597 kvm_vgic_sync_hwstate(vcpu); 602 kvm_vgic_sync_hwstate(vcpu);
598 preempt_enable(); 603 preempt_enable();
@@ -642,10 +647,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
642 trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); 647 trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
643 648
644 /* 649 /*
645 * We must sync the timer state before the vgic state so that 650 * We must sync the PMU and timer state before the vgic state so
646 * the vgic can properly sample the updated state of the 651 * that the vgic can properly sample the updated state of the
647 * interrupt line. 652 * interrupt line.
648 */ 653 */
654 kvm_pmu_sync_hwstate(vcpu);
649 kvm_timer_sync_hwstate(vcpu); 655 kvm_timer_sync_hwstate(vcpu);
650 656
651 kvm_vgic_sync_hwstate(vcpu); 657 kvm_vgic_sync_hwstate(vcpu);
@@ -823,11 +829,54 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
823 return 0; 829 return 0;
824} 830}
825 831
832static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
833 struct kvm_device_attr *attr)
834{
835 int ret = -ENXIO;
836
837 switch (attr->group) {
838 default:
839 ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
840 break;
841 }
842
843 return ret;
844}
845
846static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
847 struct kvm_device_attr *attr)
848{
849 int ret = -ENXIO;
850
851 switch (attr->group) {
852 default:
853 ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
854 break;
855 }
856
857 return ret;
858}
859
860static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
861 struct kvm_device_attr *attr)
862{
863 int ret = -ENXIO;
864
865 switch (attr->group) {
866 default:
867 ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
868 break;
869 }
870
871 return ret;
872}
873
826long kvm_arch_vcpu_ioctl(struct file *filp, 874long kvm_arch_vcpu_ioctl(struct file *filp,
827 unsigned int ioctl, unsigned long arg) 875 unsigned int ioctl, unsigned long arg)
828{ 876{
829 struct kvm_vcpu *vcpu = filp->private_data; 877 struct kvm_vcpu *vcpu = filp->private_data;
830 void __user *argp = (void __user *)arg; 878 void __user *argp = (void __user *)arg;
879 struct kvm_device_attr attr;
831 880
832 switch (ioctl) { 881 switch (ioctl) {
833 case KVM_ARM_VCPU_INIT: { 882 case KVM_ARM_VCPU_INIT: {
@@ -870,6 +919,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
870 return -E2BIG; 919 return -E2BIG;
871 return kvm_arm_copy_reg_indices(vcpu, user_list->reg); 920 return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
872 } 921 }
922 case KVM_SET_DEVICE_ATTR: {
923 if (copy_from_user(&attr, argp, sizeof(attr)))
924 return -EFAULT;
925 return kvm_arm_vcpu_set_attr(vcpu, &attr);
926 }
927 case KVM_GET_DEVICE_ATTR: {
928 if (copy_from_user(&attr, argp, sizeof(attr)))
929 return -EFAULT;
930 return kvm_arm_vcpu_get_attr(vcpu, &attr);
931 }
932 case KVM_HAS_DEVICE_ATTR: {
933 if (copy_from_user(&attr, argp, sizeof(attr)))
934 return -EFAULT;
935 return kvm_arm_vcpu_has_attr(vcpu, &attr);
936 }
873 default: 937 default:
874 return -EINVAL; 938 return -EINVAL;
875 } 939 }
@@ -967,6 +1031,11 @@ long kvm_arch_vm_ioctl(struct file *filp,
967 } 1031 }
968} 1032}
969 1033
1034static void cpu_init_stage2(void *dummy)
1035{
1036 __cpu_init_stage2();
1037}
1038
970static void cpu_init_hyp_mode(void *dummy) 1039static void cpu_init_hyp_mode(void *dummy)
971{ 1040{
972 phys_addr_t boot_pgd_ptr; 1041 phys_addr_t boot_pgd_ptr;
@@ -985,6 +1054,7 @@ static void cpu_init_hyp_mode(void *dummy)
985 vector_ptr = (unsigned long)__kvm_hyp_vector; 1054 vector_ptr = (unsigned long)__kvm_hyp_vector;
986 1055
987 __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); 1056 __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
1057 __cpu_init_stage2();
988 1058
989 kvm_arm_init_debug(); 1059 kvm_arm_init_debug();
990} 1060}
@@ -1035,6 +1105,82 @@ static inline void hyp_cpu_pm_init(void)
1035} 1105}
1036#endif 1106#endif
1037 1107
1108static void teardown_common_resources(void)
1109{
1110 free_percpu(kvm_host_cpu_state);
1111}
1112
1113static int init_common_resources(void)
1114{
1115 kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
1116 if (!kvm_host_cpu_state) {
1117 kvm_err("Cannot allocate host CPU state\n");
1118 return -ENOMEM;
1119 }
1120
1121 return 0;
1122}
1123
1124static int init_subsystems(void)
1125{
1126 int err;
1127
1128 /*
1129 * Init HYP view of VGIC
1130 */
1131 err = kvm_vgic_hyp_init();
1132 switch (err) {
1133 case 0:
1134 vgic_present = true;
1135 break;
1136 case -ENODEV:
1137 case -ENXIO:
1138 vgic_present = false;
1139 break;
1140 default:
1141 return err;
1142 }
1143
1144 /*
1145 * Init HYP architected timer support
1146 */
1147 err = kvm_timer_hyp_init();
1148 if (err)
1149 return err;
1150
1151 kvm_perf_init();
1152 kvm_coproc_table_init();
1153
1154 return 0;
1155}
1156
1157static void teardown_hyp_mode(void)
1158{
1159 int cpu;
1160
1161 if (is_kernel_in_hyp_mode())
1162 return;
1163
1164 free_hyp_pgds();
1165 for_each_possible_cpu(cpu)
1166 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
1167}
1168
1169static int init_vhe_mode(void)
1170{
1171 /*
1172 * Execute the init code on each CPU.
1173 */
1174 on_each_cpu(cpu_init_stage2, NULL, 1);
1175
1176 /* set size of VMID supported by CPU */
1177 kvm_vmid_bits = kvm_get_vmid_bits();
1178 kvm_info("%d-bit VMID\n", kvm_vmid_bits);
1179
1180 kvm_info("VHE mode initialized successfully\n");
1181 return 0;
1182}
1183
1038/** 1184/**
1039 * Inits Hyp-mode on all online CPUs 1185 * Inits Hyp-mode on all online CPUs
1040 */ 1186 */
@@ -1065,7 +1211,7 @@ static int init_hyp_mode(void)
1065 stack_page = __get_free_page(GFP_KERNEL); 1211 stack_page = __get_free_page(GFP_KERNEL);
1066 if (!stack_page) { 1212 if (!stack_page) {
1067 err = -ENOMEM; 1213 err = -ENOMEM;
1068 goto out_free_stack_pages; 1214 goto out_err;
1069 } 1215 }
1070 1216
1071 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; 1217 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
@@ -1074,16 +1220,16 @@ static int init_hyp_mode(void)
1074 /* 1220 /*
1075 * Map the Hyp-code called directly from the host 1221 * Map the Hyp-code called directly from the host
1076 */ 1222 */
1077 err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); 1223 err = create_hyp_mappings(__hyp_text_start, __hyp_text_end);
1078 if (err) { 1224 if (err) {
1079 kvm_err("Cannot map world-switch code\n"); 1225 kvm_err("Cannot map world-switch code\n");
1080 goto out_free_mappings; 1226 goto out_err;
1081 } 1227 }
1082 1228
1083 err = create_hyp_mappings(__start_rodata, __end_rodata); 1229 err = create_hyp_mappings(__start_rodata, __end_rodata);
1084 if (err) { 1230 if (err) {
1085 kvm_err("Cannot map rodata section\n"); 1231 kvm_err("Cannot map rodata section\n");
1086 goto out_free_mappings; 1232 goto out_err;
1087 } 1233 }
1088 1234
1089 /* 1235 /*
@@ -1095,20 +1241,10 @@ static int init_hyp_mode(void)
1095 1241
1096 if (err) { 1242 if (err) {
1097 kvm_err("Cannot map hyp stack\n"); 1243 kvm_err("Cannot map hyp stack\n");
1098 goto out_free_mappings; 1244 goto out_err;
1099 } 1245 }
1100 } 1246 }
1101 1247
1102 /*
1103 * Map the host CPU structures
1104 */
1105 kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
1106 if (!kvm_host_cpu_state) {
1107 err = -ENOMEM;
1108 kvm_err("Cannot allocate host CPU state\n");
1109 goto out_free_mappings;
1110 }
1111
1112 for_each_possible_cpu(cpu) { 1248 for_each_possible_cpu(cpu) {
1113 kvm_cpu_context_t *cpu_ctxt; 1249 kvm_cpu_context_t *cpu_ctxt;
1114 1250
@@ -1117,7 +1253,7 @@ static int init_hyp_mode(void)
1117 1253
1118 if (err) { 1254 if (err) {
1119 kvm_err("Cannot map host CPU state: %d\n", err); 1255 kvm_err("Cannot map host CPU state: %d\n", err);
1120 goto out_free_context; 1256 goto out_err;
1121 } 1257 }
1122 } 1258 }
1123 1259
@@ -1126,34 +1262,22 @@ static int init_hyp_mode(void)
1126 */ 1262 */
1127 on_each_cpu(cpu_init_hyp_mode, NULL, 1); 1263 on_each_cpu(cpu_init_hyp_mode, NULL, 1);
1128 1264
1129 /*
1130 * Init HYP view of VGIC
1131 */
1132 err = kvm_vgic_hyp_init();
1133 switch (err) {
1134 case 0:
1135 vgic_present = true;
1136 break;
1137 case -ENODEV:
1138 case -ENXIO:
1139 vgic_present = false;
1140 break;
1141 default:
1142 goto out_free_context;
1143 }
1144
1145 /*
1146 * Init HYP architected timer support
1147 */
1148 err = kvm_timer_hyp_init();
1149 if (err)
1150 goto out_free_context;
1151
1152#ifndef CONFIG_HOTPLUG_CPU 1265#ifndef CONFIG_HOTPLUG_CPU
1153 free_boot_hyp_pgd(); 1266 free_boot_hyp_pgd();
1154#endif 1267#endif
1155 1268
1156 kvm_perf_init(); 1269 cpu_notifier_register_begin();
1270
1271 err = __register_cpu_notifier(&hyp_init_cpu_nb);
1272
1273 cpu_notifier_register_done();
1274
1275 if (err) {
1276 kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
1277 goto out_err;
1278 }
1279
1280 hyp_cpu_pm_init();
1157 1281
1158 /* set size of VMID supported by CPU */ 1282 /* set size of VMID supported by CPU */
1159 kvm_vmid_bits = kvm_get_vmid_bits(); 1283 kvm_vmid_bits = kvm_get_vmid_bits();
@@ -1162,14 +1286,9 @@ static int init_hyp_mode(void)
1162 kvm_info("Hyp mode initialized successfully\n"); 1286 kvm_info("Hyp mode initialized successfully\n");
1163 1287
1164 return 0; 1288 return 0;
1165out_free_context: 1289
1166 free_percpu(kvm_host_cpu_state);
1167out_free_mappings:
1168 free_hyp_pgds();
1169out_free_stack_pages:
1170 for_each_possible_cpu(cpu)
1171 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
1172out_err: 1290out_err:
1291 teardown_hyp_mode();
1173 kvm_err("error initializing Hyp mode: %d\n", err); 1292 kvm_err("error initializing Hyp mode: %d\n", err);
1174 return err; 1293 return err;
1175} 1294}
@@ -1213,26 +1332,27 @@ int kvm_arch_init(void *opaque)
1213 } 1332 }
1214 } 1333 }
1215 1334
1216 cpu_notifier_register_begin(); 1335 err = init_common_resources();
1217
1218 err = init_hyp_mode();
1219 if (err) 1336 if (err)
1220 goto out_err; 1337 return err;
1221 1338
1222 err = __register_cpu_notifier(&hyp_init_cpu_nb); 1339 if (is_kernel_in_hyp_mode())
1223 if (err) { 1340 err = init_vhe_mode();
1224 kvm_err("Cannot register HYP init CPU notifier (%d)\n", err); 1341 else
1342 err = init_hyp_mode();
1343 if (err)
1225 goto out_err; 1344 goto out_err;
1226 }
1227
1228 cpu_notifier_register_done();
1229 1345
1230 hyp_cpu_pm_init(); 1346 err = init_subsystems();
1347 if (err)
1348 goto out_hyp;
1231 1349
1232 kvm_coproc_table_init();
1233 return 0; 1350 return 0;
1351
1352out_hyp:
1353 teardown_hyp_mode();
1234out_err: 1354out_err:
1235 cpu_notifier_register_done(); 1355 teardown_common_resources();
1236 return err; 1356 return err;
1237} 1357}
1238 1358
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index f3d88dc388bc..1bb2b79c01ff 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -16,6 +16,8 @@
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 */ 18 */
19
20#include <linux/bsearch.h>
19#include <linux/mm.h> 21#include <linux/mm.h>
20#include <linux/kvm_host.h> 22#include <linux/kvm_host.h>
21#include <linux/uaccess.h> 23#include <linux/uaccess.h>
@@ -54,8 +56,8 @@ static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu,
54 const struct coproc_reg *r, 56 const struct coproc_reg *r,
55 u64 val) 57 u64 val)
56{ 58{
57 vcpu->arch.cp15[r->reg] = val & 0xffffffff; 59 vcpu_cp15(vcpu, r->reg) = val & 0xffffffff;
58 vcpu->arch.cp15[r->reg + 1] = val >> 32; 60 vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
59} 61}
60 62
61static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, 63static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
@@ -63,9 +65,9 @@ static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
63{ 65{
64 u64 val; 66 u64 val;
65 67
66 val = vcpu->arch.cp15[r->reg + 1]; 68 val = vcpu_cp15(vcpu, r->reg + 1);
67 val = val << 32; 69 val = val << 32;
68 val = val | vcpu->arch.cp15[r->reg]; 70 val = val | vcpu_cp15(vcpu, r->reg);
69 return val; 71 return val;
70} 72}
71 73
@@ -104,7 +106,7 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
104 * vcpu_id, but we read the 'U' bit from the underlying 106 * vcpu_id, but we read the 'U' bit from the underlying
105 * hardware directly. 107 * hardware directly.
106 */ 108 */
107 vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | 109 vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
108 ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) | 110 ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
109 (vcpu->vcpu_id & 3)); 111 (vcpu->vcpu_id & 3));
110} 112}
@@ -117,7 +119,7 @@ static bool access_actlr(struct kvm_vcpu *vcpu,
117 if (p->is_write) 119 if (p->is_write)
118 return ignore_write(vcpu, p); 120 return ignore_write(vcpu, p);
119 121
120 *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR]; 122 *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR);
121 return true; 123 return true;
122} 124}
123 125
@@ -139,7 +141,7 @@ static bool access_l2ctlr(struct kvm_vcpu *vcpu,
139 if (p->is_write) 141 if (p->is_write)
140 return ignore_write(vcpu, p); 142 return ignore_write(vcpu, p);
141 143
142 *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR]; 144 *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR);
143 return true; 145 return true;
144} 146}
145 147
@@ -156,7 +158,7 @@ static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
156 ncores = min(ncores, 3U); 158 ncores = min(ncores, 3U);
157 l2ctlr |= (ncores & 3) << 24; 159 l2ctlr |= (ncores & 3) << 24;
158 160
159 vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; 161 vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr;
160} 162}
161 163
162static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) 164static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
@@ -171,7 +173,7 @@ static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
171 else 173 else
172 actlr &= ~(1U << 6); 174 actlr &= ~(1U << 6);
173 175
174 vcpu->arch.cp15[c1_ACTLR] = actlr; 176 vcpu_cp15(vcpu, c1_ACTLR) = actlr;
175} 177}
176 178
177/* 179/*
@@ -218,9 +220,9 @@ bool access_vm_reg(struct kvm_vcpu *vcpu,
218 220
219 BUG_ON(!p->is_write); 221 BUG_ON(!p->is_write);
220 222
221 vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); 223 vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1);
222 if (p->is_64bit) 224 if (p->is_64bit)
223 vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); 225 vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2);
224 226
225 kvm_toggle_cache(vcpu, was_enabled); 227 kvm_toggle_cache(vcpu, was_enabled);
226 return true; 228 return true;
@@ -381,17 +383,26 @@ static const struct coproc_reg cp15_regs[] = {
381 { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, 383 { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
382}; 384};
383 385
386static int check_reg_table(const struct coproc_reg *table, unsigned int n)
387{
388 unsigned int i;
389
390 for (i = 1; i < n; i++) {
391 if (cmp_reg(&table[i-1], &table[i]) >= 0) {
392 kvm_err("reg table %p out of order (%d)\n", table, i - 1);
393 return 1;
394 }
395 }
396
397 return 0;
398}
399
384/* Target specific emulation tables */ 400/* Target specific emulation tables */
385static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS]; 401static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
386 402
387void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table) 403void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
388{ 404{
389 unsigned int i; 405 BUG_ON(check_reg_table(table->table, table->num));
390
391 for (i = 1; i < table->num; i++)
392 BUG_ON(cmp_reg(&table->table[i-1],
393 &table->table[i]) >= 0);
394
395 target_tables[table->target] = table; 406 target_tables[table->target] = table;
396} 407}
397 408
@@ -405,29 +416,32 @@ static const struct coproc_reg *get_target_table(unsigned target, size_t *num)
405 return table->table; 416 return table->table;
406} 417}
407 418
419#define reg_to_match_value(x) \
420 ({ \
421 unsigned long val; \
422 val = (x)->CRn << 11; \
423 val |= (x)->CRm << 7; \
424 val |= (x)->Op1 << 4; \
425 val |= (x)->Op2 << 1; \
426 val |= !(x)->is_64bit; \
427 val; \
428 })
429
430static int match_reg(const void *key, const void *elt)
431{
432 const unsigned long pval = (unsigned long)key;
433 const struct coproc_reg *r = elt;
434
435 return pval - reg_to_match_value(r);
436}
437
408static const struct coproc_reg *find_reg(const struct coproc_params *params, 438static const struct coproc_reg *find_reg(const struct coproc_params *params,
409 const struct coproc_reg table[], 439 const struct coproc_reg table[],
410 unsigned int num) 440 unsigned int num)
411{ 441{
412 unsigned int i; 442 unsigned long pval = reg_to_match_value(params);
413
414 for (i = 0; i < num; i++) {
415 const struct coproc_reg *r = &table[i];
416
417 if (params->is_64bit != r->is_64)
418 continue;
419 if (params->CRn != r->CRn)
420 continue;
421 if (params->CRm != r->CRm)
422 continue;
423 if (params->Op1 != r->Op1)
424 continue;
425 if (params->Op2 != r->Op2)
426 continue;
427 443
428 return r; 444 return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg);
429 }
430 return NULL;
431} 445}
432 446
433static int emulate_cp15(struct kvm_vcpu *vcpu, 447static int emulate_cp15(struct kvm_vcpu *vcpu,
@@ -645,6 +659,9 @@ static struct coproc_reg invariant_cp15[] = {
645 { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR }, 659 { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
646 { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR }, 660 { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
647 661
662 { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
663 { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
664
648 { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 }, 665 { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
649 { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 }, 666 { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
650 { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 }, 667 { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
@@ -660,9 +677,6 @@ static struct coproc_reg invariant_cp15[] = {
660 { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 }, 677 { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
661 { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 }, 678 { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
662 { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 }, 679 { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
663
664 { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
665 { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
666}; 680};
667 681
668/* 682/*
@@ -901,7 +915,7 @@ static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
901 if (vfpid < num_fp_regs()) { 915 if (vfpid < num_fp_regs()) {
902 if (KVM_REG_SIZE(id) != 8) 916 if (KVM_REG_SIZE(id) != 8)
903 return -ENOENT; 917 return -ENOENT;
904 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid], 918 return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid],
905 id); 919 id);
906 } 920 }
907 921
@@ -911,13 +925,13 @@ static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
911 925
912 switch (vfpid) { 926 switch (vfpid) {
913 case KVM_REG_ARM_VFP_FPEXC: 927 case KVM_REG_ARM_VFP_FPEXC:
914 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id); 928 return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id);
915 case KVM_REG_ARM_VFP_FPSCR: 929 case KVM_REG_ARM_VFP_FPSCR:
916 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id); 930 return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id);
917 case KVM_REG_ARM_VFP_FPINST: 931 case KVM_REG_ARM_VFP_FPINST:
918 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id); 932 return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id);
919 case KVM_REG_ARM_VFP_FPINST2: 933 case KVM_REG_ARM_VFP_FPINST2:
920 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id); 934 return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id);
921 case KVM_REG_ARM_VFP_MVFR0: 935 case KVM_REG_ARM_VFP_MVFR0:
922 val = fmrx(MVFR0); 936 val = fmrx(MVFR0);
923 return reg_to_user(uaddr, &val, id); 937 return reg_to_user(uaddr, &val, id);
@@ -945,7 +959,7 @@ static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
945 if (vfpid < num_fp_regs()) { 959 if (vfpid < num_fp_regs()) {
946 if (KVM_REG_SIZE(id) != 8) 960 if (KVM_REG_SIZE(id) != 8)
947 return -ENOENT; 961 return -ENOENT;
948 return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid], 962 return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid],
949 uaddr, id); 963 uaddr, id);
950 } 964 }
951 965
@@ -955,13 +969,13 @@ static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr)
955 969
956 switch (vfpid) { 970 switch (vfpid) {
957 case KVM_REG_ARM_VFP_FPEXC: 971 case KVM_REG_ARM_VFP_FPEXC:
958 return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id); 972 return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id);
959 case KVM_REG_ARM_VFP_FPSCR: 973 case KVM_REG_ARM_VFP_FPSCR:
960 return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id); 974 return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id);
961 case KVM_REG_ARM_VFP_FPINST: 975 case KVM_REG_ARM_VFP_FPINST:
962 return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id); 976 return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id);
963 case KVM_REG_ARM_VFP_FPINST2: 977 case KVM_REG_ARM_VFP_FPINST2:
964 return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id); 978 return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id);
965 /* These are invariant. */ 979 /* These are invariant. */
966 case KVM_REG_ARM_VFP_MVFR0: 980 case KVM_REG_ARM_VFP_MVFR0:
967 if (reg_from_user(&val, uaddr, id)) 981 if (reg_from_user(&val, uaddr, id))
@@ -1030,7 +1044,7 @@ int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1030 val = vcpu_cp15_reg64_get(vcpu, r); 1044 val = vcpu_cp15_reg64_get(vcpu, r);
1031 ret = reg_to_user(uaddr, &val, reg->id); 1045 ret = reg_to_user(uaddr, &val, reg->id);
1032 } else if (KVM_REG_SIZE(reg->id) == 4) { 1046 } else if (KVM_REG_SIZE(reg->id) == 4) {
1033 ret = reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); 1047 ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id);
1034 } 1048 }
1035 1049
1036 return ret; 1050 return ret;
@@ -1060,7 +1074,7 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1060 if (!ret) 1074 if (!ret)
1061 vcpu_cp15_reg64_set(vcpu, r, val); 1075 vcpu_cp15_reg64_set(vcpu, r, val);
1062 } else if (KVM_REG_SIZE(reg->id) == 4) { 1076 } else if (KVM_REG_SIZE(reg->id) == 4) {
1063 ret = reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); 1077 ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id);
1064 } 1078 }
1065 1079
1066 return ret; 1080 return ret;
@@ -1096,7 +1110,7 @@ static int write_demux_regids(u64 __user *uindices)
1096static u64 cp15_to_index(const struct coproc_reg *reg) 1110static u64 cp15_to_index(const struct coproc_reg *reg)
1097{ 1111{
1098 u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT); 1112 u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
1099 if (reg->is_64) { 1113 if (reg->is_64bit) {
1100 val |= KVM_REG_SIZE_U64; 1114 val |= KVM_REG_SIZE_U64;
1101 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); 1115 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
1102 /* 1116 /*
@@ -1210,8 +1224,8 @@ void kvm_coproc_table_init(void)
1210 unsigned int i; 1224 unsigned int i;
1211 1225
1212 /* Make sure tables are unique and in order. */ 1226 /* Make sure tables are unique and in order. */
1213 for (i = 1; i < ARRAY_SIZE(cp15_regs); i++) 1227 BUG_ON(check_reg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
1214 BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0); 1228 BUG_ON(check_reg_table(invariant_cp15, ARRAY_SIZE(invariant_cp15)));
1215 1229
1216 /* We abuse the reset function to overwrite the table itself. */ 1230 /* We abuse the reset function to overwrite the table itself. */
1217 for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) 1231 for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
@@ -1248,7 +1262,7 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1248 const struct coproc_reg *table; 1262 const struct coproc_reg *table;
1249 1263
1250 /* Catch someone adding a register without putting in reset entry. */ 1264 /* Catch someone adding a register without putting in reset entry. */
1251 memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15)); 1265 memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
1252 1266
1253 /* Generic chip reset first (so target could override). */ 1267 /* Generic chip reset first (so target could override). */
1254 reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); 1268 reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
@@ -1257,6 +1271,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
1257 reset_coproc_regs(vcpu, table, num); 1271 reset_coproc_regs(vcpu, table, num);
1258 1272
1259 for (num = 1; num < NR_CP15_REGS; num++) 1273 for (num = 1; num < NR_CP15_REGS; num++)
1260 if (vcpu->arch.cp15[num] == 0x42424242) 1274 if (vcpu_cp15(vcpu, num) == 0x42424242)
1261 panic("Didn't reset vcpu->arch.cp15[%zi]", num); 1275 panic("Didn't reset vcpu_cp15(vcpu, %zi)", num);
1262} 1276}
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index 88d24a3a9778..eef1759c2b65 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -37,7 +37,7 @@ struct coproc_reg {
37 unsigned long Op1; 37 unsigned long Op1;
38 unsigned long Op2; 38 unsigned long Op2;
39 39
40 bool is_64; 40 bool is_64bit;
41 41
42 /* Trapped access from guest, if non-NULL. */ 42 /* Trapped access from guest, if non-NULL. */
43 bool (*access)(struct kvm_vcpu *, 43 bool (*access)(struct kvm_vcpu *,
@@ -47,7 +47,7 @@ struct coproc_reg {
47 /* Initialization for vcpu. */ 47 /* Initialization for vcpu. */
48 void (*reset)(struct kvm_vcpu *, const struct coproc_reg *); 48 void (*reset)(struct kvm_vcpu *, const struct coproc_reg *);
49 49
50 /* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */ 50 /* Index into vcpu_cp15(vcpu, ...), or 0 if we don't need to save it. */
51 unsigned long reg; 51 unsigned long reg;
52 52
53 /* Value (usually reset value) */ 53 /* Value (usually reset value) */
@@ -104,25 +104,25 @@ static inline void reset_unknown(struct kvm_vcpu *vcpu,
104 const struct coproc_reg *r) 104 const struct coproc_reg *r)
105{ 105{
106 BUG_ON(!r->reg); 106 BUG_ON(!r->reg);
107 BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); 107 BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
108 vcpu->arch.cp15[r->reg] = 0xdecafbad; 108 vcpu_cp15(vcpu, r->reg) = 0xdecafbad;
109} 109}
110 110
111static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r) 111static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
112{ 112{
113 BUG_ON(!r->reg); 113 BUG_ON(!r->reg);
114 BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); 114 BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
115 vcpu->arch.cp15[r->reg] = r->val; 115 vcpu_cp15(vcpu, r->reg) = r->val;
116} 116}
117 117
118static inline void reset_unknown64(struct kvm_vcpu *vcpu, 118static inline void reset_unknown64(struct kvm_vcpu *vcpu,
119 const struct coproc_reg *r) 119 const struct coproc_reg *r)
120{ 120{
121 BUG_ON(!r->reg); 121 BUG_ON(!r->reg);
122 BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15)); 122 BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
123 123
124 vcpu->arch.cp15[r->reg] = 0xdecafbad; 124 vcpu_cp15(vcpu, r->reg) = 0xdecafbad;
125 vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee; 125 vcpu_cp15(vcpu, r->reg+1) = 0xd0c0ffee;
126} 126}
127 127
128static inline int cmp_reg(const struct coproc_reg *i1, 128static inline int cmp_reg(const struct coproc_reg *i1,
@@ -141,7 +141,7 @@ static inline int cmp_reg(const struct coproc_reg *i1,
141 return i1->Op1 - i2->Op1; 141 return i1->Op1 - i2->Op1;
142 if (i1->Op2 != i2->Op2) 142 if (i1->Op2 != i2->Op2)
143 return i1->Op2 - i2->Op2; 143 return i1->Op2 - i2->Op2;
144 return i2->is_64 - i1->is_64; 144 return i2->is_64bit - i1->is_64bit;
145} 145}
146 146
147 147
@@ -150,8 +150,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
150#define CRm64(_x) .CRn = _x, .CRm = 0 150#define CRm64(_x) .CRn = _x, .CRm = 0
151#define Op1(_x) .Op1 = _x 151#define Op1(_x) .Op1 = _x
152#define Op2(_x) .Op2 = _x 152#define Op2(_x) .Op2 = _x
153#define is64 .is_64 = true 153#define is64 .is_64bit = true
154#define is32 .is_64 = false 154#define is32 .is_64bit = false
155 155
156bool access_vm_reg(struct kvm_vcpu *vcpu, 156bool access_vm_reg(struct kvm_vcpu *vcpu,
157 const struct coproc_params *p, 157 const struct coproc_params *p,
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index dc99159857b4..a494def3f195 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -112,7 +112,7 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
112 */ 112 */
113unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) 113unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
114{ 114{
115 unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs; 115 unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs;
116 unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; 116 unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
117 117
118 switch (mode) { 118 switch (mode) {
@@ -147,15 +147,15 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
147 unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; 147 unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
148 switch (mode) { 148 switch (mode) {
149 case SVC_MODE: 149 case SVC_MODE:
150 return &vcpu->arch.regs.KVM_ARM_SVC_spsr; 150 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr;
151 case ABT_MODE: 151 case ABT_MODE:
152 return &vcpu->arch.regs.KVM_ARM_ABT_spsr; 152 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr;
153 case UND_MODE: 153 case UND_MODE:
154 return &vcpu->arch.regs.KVM_ARM_UND_spsr; 154 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr;
155 case IRQ_MODE: 155 case IRQ_MODE:
156 return &vcpu->arch.regs.KVM_ARM_IRQ_spsr; 156 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr;
157 case FIQ_MODE: 157 case FIQ_MODE:
158 return &vcpu->arch.regs.KVM_ARM_FIQ_spsr; 158 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr;
159 default: 159 default:
160 BUG(); 160 BUG();
161 } 161 }
@@ -266,8 +266,8 @@ void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
266 266
267static u32 exc_vector_base(struct kvm_vcpu *vcpu) 267static u32 exc_vector_base(struct kvm_vcpu *vcpu)
268{ 268{
269 u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; 269 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
270 u32 vbar = vcpu->arch.cp15[c12_VBAR]; 270 u32 vbar = vcpu_cp15(vcpu, c12_VBAR);
271 271
272 if (sctlr & SCTLR_V) 272 if (sctlr & SCTLR_V)
273 return 0xffff0000; 273 return 0xffff0000;
@@ -282,7 +282,7 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
282static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode) 282static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode)
283{ 283{
284 unsigned long cpsr = *vcpu_cpsr(vcpu); 284 unsigned long cpsr = *vcpu_cpsr(vcpu);
285 u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; 285 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
286 286
287 *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode; 287 *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode;
288 288
@@ -357,22 +357,22 @@ static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
357 357
358 if (is_pabt) { 358 if (is_pabt) {
359 /* Set IFAR and IFSR */ 359 /* Set IFAR and IFSR */
360 vcpu->arch.cp15[c6_IFAR] = addr; 360 vcpu_cp15(vcpu, c6_IFAR) = addr;
361 is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); 361 is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
362 /* Always give debug fault for now - should give guest a clue */ 362 /* Always give debug fault for now - should give guest a clue */
363 if (is_lpae) 363 if (is_lpae)
364 vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22; 364 vcpu_cp15(vcpu, c5_IFSR) = 1 << 9 | 0x22;
365 else 365 else
366 vcpu->arch.cp15[c5_IFSR] = 2; 366 vcpu_cp15(vcpu, c5_IFSR) = 2;
367 } else { /* !iabt */ 367 } else { /* !iabt */
368 /* Set DFAR and DFSR */ 368 /* Set DFAR and DFSR */
369 vcpu->arch.cp15[c6_DFAR] = addr; 369 vcpu_cp15(vcpu, c6_DFAR) = addr;
370 is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); 370 is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
371 /* Always give debug fault for now - should give guest a clue */ 371 /* Always give debug fault for now - should give guest a clue */
372 if (is_lpae) 372 if (is_lpae)
373 vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22; 373 vcpu_cp15(vcpu, c5_DFSR) = 1 << 9 | 0x22;
374 else 374 else
375 vcpu->arch.cp15[c5_DFSR] = 2; 375 vcpu_cp15(vcpu, c5_DFSR) = 2;
376 } 376 }
377 377
378} 378}
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 99361f11354a..9093ed0f8b2a 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -25,7 +25,6 @@
25#include <asm/cputype.h> 25#include <asm/cputype.h>
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include <asm/kvm.h> 27#include <asm/kvm.h>
28#include <asm/kvm_asm.h>
29#include <asm/kvm_emulate.h> 28#include <asm/kvm_emulate.h>
30#include <asm/kvm_coproc.h> 29#include <asm/kvm_coproc.h>
31 30
@@ -55,7 +54,7 @@ static u64 core_reg_offset_from_id(u64 id)
55static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 54static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
56{ 55{
57 u32 __user *uaddr = (u32 __user *)(long)reg->addr; 56 u32 __user *uaddr = (u32 __user *)(long)reg->addr;
58 struct kvm_regs *regs = &vcpu->arch.regs; 57 struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
59 u64 off; 58 u64 off;
60 59
61 if (KVM_REG_SIZE(reg->id) != 4) 60 if (KVM_REG_SIZE(reg->id) != 4)
@@ -72,7 +71,7 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
72static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 71static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
73{ 72{
74 u32 __user *uaddr = (u32 __user *)(long)reg->addr; 73 u32 __user *uaddr = (u32 __user *)(long)reg->addr;
75 struct kvm_regs *regs = &vcpu->arch.regs; 74 struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
76 u64 off, val; 75 u64 off, val;
77 76
78 if (KVM_REG_SIZE(reg->id) != 4) 77 if (KVM_REG_SIZE(reg->id) != 4)
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 3ede90d8b20b..3f1ef0dbc899 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -147,13 +147,6 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
147 switch (exception_index) { 147 switch (exception_index) {
148 case ARM_EXCEPTION_IRQ: 148 case ARM_EXCEPTION_IRQ:
149 return 1; 149 return 1;
150 case ARM_EXCEPTION_UNDEFINED:
151 kvm_err("Undefined exception in Hyp mode at: %#08lx\n",
152 kvm_vcpu_get_hyp_pc(vcpu));
153 BUG();
154 panic("KVM: Hypervisor undefined exception!\n");
155 case ARM_EXCEPTION_DATA_ABORT:
156 case ARM_EXCEPTION_PREF_ABORT:
157 case ARM_EXCEPTION_HVC: 150 case ARM_EXCEPTION_HVC:
158 /* 151 /*
159 * See ARM ARM B1.14.1: "Hyp traps on instructions 152 * See ARM ARM B1.14.1: "Hyp traps on instructions
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
new file mode 100644
index 000000000000..8dfa5f7f9290
--- /dev/null
+++ b/arch/arm/kvm/hyp/Makefile
@@ -0,0 +1,17 @@
1#
2# Makefile for Kernel-based Virtual Machine module, HYP part
3#
4
5KVM=../../../../virt/kvm
6
7obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
8obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
9
10obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
11obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
12obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
13obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
14obj-$(CONFIG_KVM_ARM_HOST) += entry.o
15obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
16obj-$(CONFIG_KVM_ARM_HOST) += switch.o
17obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c
new file mode 100644
index 000000000000..111bda8cdebd
--- /dev/null
+++ b/arch/arm/kvm/hyp/banked-sr.c
@@ -0,0 +1,77 @@
1/*
2 * Original code:
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 *
6 * Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <asm/kvm_hyp.h>
22
23__asm__(".arch_extension virt");
24
25void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)
26{
27 ctxt->gp_regs.usr_regs.ARM_sp = read_special(SP_usr);
28 ctxt->gp_regs.usr_regs.ARM_pc = read_special(ELR_hyp);
29 ctxt->gp_regs.usr_regs.ARM_cpsr = read_special(SPSR);
30 ctxt->gp_regs.KVM_ARM_SVC_sp = read_special(SP_svc);
31 ctxt->gp_regs.KVM_ARM_SVC_lr = read_special(LR_svc);
32 ctxt->gp_regs.KVM_ARM_SVC_spsr = read_special(SPSR_svc);
33 ctxt->gp_regs.KVM_ARM_ABT_sp = read_special(SP_abt);
34 ctxt->gp_regs.KVM_ARM_ABT_lr = read_special(LR_abt);
35 ctxt->gp_regs.KVM_ARM_ABT_spsr = read_special(SPSR_abt);
36 ctxt->gp_regs.KVM_ARM_UND_sp = read_special(SP_und);
37 ctxt->gp_regs.KVM_ARM_UND_lr = read_special(LR_und);
38 ctxt->gp_regs.KVM_ARM_UND_spsr = read_special(SPSR_und);
39 ctxt->gp_regs.KVM_ARM_IRQ_sp = read_special(SP_irq);
40 ctxt->gp_regs.KVM_ARM_IRQ_lr = read_special(LR_irq);
41 ctxt->gp_regs.KVM_ARM_IRQ_spsr = read_special(SPSR_irq);
42 ctxt->gp_regs.KVM_ARM_FIQ_r8 = read_special(R8_fiq);
43 ctxt->gp_regs.KVM_ARM_FIQ_r9 = read_special(R9_fiq);
44 ctxt->gp_regs.KVM_ARM_FIQ_r10 = read_special(R10_fiq);
45 ctxt->gp_regs.KVM_ARM_FIQ_fp = read_special(R11_fiq);
46 ctxt->gp_regs.KVM_ARM_FIQ_ip = read_special(R12_fiq);
47 ctxt->gp_regs.KVM_ARM_FIQ_sp = read_special(SP_fiq);
48 ctxt->gp_regs.KVM_ARM_FIQ_lr = read_special(LR_fiq);
49 ctxt->gp_regs.KVM_ARM_FIQ_spsr = read_special(SPSR_fiq);
50}
51
52void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt)
53{
54 write_special(ctxt->gp_regs.usr_regs.ARM_sp, SP_usr);
55 write_special(ctxt->gp_regs.usr_regs.ARM_pc, ELR_hyp);
56 write_special(ctxt->gp_regs.usr_regs.ARM_cpsr, SPSR_cxsf);
57 write_special(ctxt->gp_regs.KVM_ARM_SVC_sp, SP_svc);
58 write_special(ctxt->gp_regs.KVM_ARM_SVC_lr, LR_svc);
59 write_special(ctxt->gp_regs.KVM_ARM_SVC_spsr, SPSR_svc);
60 write_special(ctxt->gp_regs.KVM_ARM_ABT_sp, SP_abt);
61 write_special(ctxt->gp_regs.KVM_ARM_ABT_lr, LR_abt);
62 write_special(ctxt->gp_regs.KVM_ARM_ABT_spsr, SPSR_abt);
63 write_special(ctxt->gp_regs.KVM_ARM_UND_sp, SP_und);
64 write_special(ctxt->gp_regs.KVM_ARM_UND_lr, LR_und);
65 write_special(ctxt->gp_regs.KVM_ARM_UND_spsr, SPSR_und);
66 write_special(ctxt->gp_regs.KVM_ARM_IRQ_sp, SP_irq);
67 write_special(ctxt->gp_regs.KVM_ARM_IRQ_lr, LR_irq);
68 write_special(ctxt->gp_regs.KVM_ARM_IRQ_spsr, SPSR_irq);
69 write_special(ctxt->gp_regs.KVM_ARM_FIQ_r8, R8_fiq);
70 write_special(ctxt->gp_regs.KVM_ARM_FIQ_r9, R9_fiq);
71 write_special(ctxt->gp_regs.KVM_ARM_FIQ_r10, R10_fiq);
72 write_special(ctxt->gp_regs.KVM_ARM_FIQ_fp, R11_fiq);
73 write_special(ctxt->gp_regs.KVM_ARM_FIQ_ip, R12_fiq);
74 write_special(ctxt->gp_regs.KVM_ARM_FIQ_sp, SP_fiq);
75 write_special(ctxt->gp_regs.KVM_ARM_FIQ_lr, LR_fiq);
76 write_special(ctxt->gp_regs.KVM_ARM_FIQ_spsr, SPSR_fiq);
77}
diff --git a/arch/arm/kvm/hyp/cp15-sr.c b/arch/arm/kvm/hyp/cp15-sr.c
new file mode 100644
index 000000000000..c4782812714c
--- /dev/null
+++ b/arch/arm/kvm/hyp/cp15-sr.c
@@ -0,0 +1,84 @@
1/*
2 * Original code:
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 *
6 * Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <asm/kvm_hyp.h>
22
23static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx)
24{
25 return (u64 *)(ctxt->cp15 + idx);
26}
27
28void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
29{
30 ctxt->cp15[c0_MPIDR] = read_sysreg(VMPIDR);
31 ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR);
32 ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR);
33 ctxt->cp15[c1_CPACR] = read_sysreg(CPACR);
34 *cp15_64(ctxt, c2_TTBR0) = read_sysreg(TTBR0);
35 *cp15_64(ctxt, c2_TTBR1) = read_sysreg(TTBR1);
36 ctxt->cp15[c2_TTBCR] = read_sysreg(TTBCR);
37 ctxt->cp15[c3_DACR] = read_sysreg(DACR);
38 ctxt->cp15[c5_DFSR] = read_sysreg(DFSR);
39 ctxt->cp15[c5_IFSR] = read_sysreg(IFSR);
40 ctxt->cp15[c5_ADFSR] = read_sysreg(ADFSR);
41 ctxt->cp15[c5_AIFSR] = read_sysreg(AIFSR);
42 ctxt->cp15[c6_DFAR] = read_sysreg(DFAR);
43 ctxt->cp15[c6_IFAR] = read_sysreg(IFAR);
44 *cp15_64(ctxt, c7_PAR) = read_sysreg(PAR);
45 ctxt->cp15[c10_PRRR] = read_sysreg(PRRR);
46 ctxt->cp15[c10_NMRR] = read_sysreg(NMRR);
47 ctxt->cp15[c10_AMAIR0] = read_sysreg(AMAIR0);
48 ctxt->cp15[c10_AMAIR1] = read_sysreg(AMAIR1);
49 ctxt->cp15[c12_VBAR] = read_sysreg(VBAR);
50 ctxt->cp15[c13_CID] = read_sysreg(CID);
51 ctxt->cp15[c13_TID_URW] = read_sysreg(TID_URW);
52 ctxt->cp15[c13_TID_URO] = read_sysreg(TID_URO);
53 ctxt->cp15[c13_TID_PRIV] = read_sysreg(TID_PRIV);
54 ctxt->cp15[c14_CNTKCTL] = read_sysreg(CNTKCTL);
55}
56
57void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
58{
59 write_sysreg(ctxt->cp15[c0_MPIDR], VMPIDR);
60 write_sysreg(ctxt->cp15[c0_CSSELR], CSSELR);
61 write_sysreg(ctxt->cp15[c1_SCTLR], SCTLR);
62 write_sysreg(ctxt->cp15[c1_CPACR], CPACR);
63 write_sysreg(*cp15_64(ctxt, c2_TTBR0), TTBR0);
64 write_sysreg(*cp15_64(ctxt, c2_TTBR1), TTBR1);
65 write_sysreg(ctxt->cp15[c2_TTBCR], TTBCR);
66 write_sysreg(ctxt->cp15[c3_DACR], DACR);
67 write_sysreg(ctxt->cp15[c5_DFSR], DFSR);
68 write_sysreg(ctxt->cp15[c5_IFSR], IFSR);
69 write_sysreg(ctxt->cp15[c5_ADFSR], ADFSR);
70 write_sysreg(ctxt->cp15[c5_AIFSR], AIFSR);
71 write_sysreg(ctxt->cp15[c6_DFAR], DFAR);
72 write_sysreg(ctxt->cp15[c6_IFAR], IFAR);
73 write_sysreg(*cp15_64(ctxt, c7_PAR), PAR);
74 write_sysreg(ctxt->cp15[c10_PRRR], PRRR);
75 write_sysreg(ctxt->cp15[c10_NMRR], NMRR);
76 write_sysreg(ctxt->cp15[c10_AMAIR0], AMAIR0);
77 write_sysreg(ctxt->cp15[c10_AMAIR1], AMAIR1);
78 write_sysreg(ctxt->cp15[c12_VBAR], VBAR);
79 write_sysreg(ctxt->cp15[c13_CID], CID);
80 write_sysreg(ctxt->cp15[c13_TID_URW], TID_URW);
81 write_sysreg(ctxt->cp15[c13_TID_URO], TID_URO);
82 write_sysreg(ctxt->cp15[c13_TID_PRIV], TID_PRIV);
83 write_sysreg(ctxt->cp15[c14_CNTKCTL], CNTKCTL);
84}
diff --git a/arch/arm/kvm/hyp/entry.S b/arch/arm/kvm/hyp/entry.S
new file mode 100644
index 000000000000..21c238871c9e
--- /dev/null
+++ b/arch/arm/kvm/hyp/entry.S
@@ -0,0 +1,101 @@
1/*
2 * Copyright (C) 2016 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16*/
17
18#include <linux/linkage.h>
19#include <asm/asm-offsets.h>
20#include <asm/kvm_arm.h>
21
22 .arch_extension virt
23
24 .text
25 .pushsection .hyp.text, "ax"
26
27#define USR_REGS_OFFSET (CPU_CTXT_GP_REGS + GP_REGS_USR)
28
29/* int __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host) */
30ENTRY(__guest_enter)
31 @ Save host registers
32 add r1, r1, #(USR_REGS_OFFSET + S_R4)
33 stm r1!, {r4-r12}
34 str lr, [r1, #4] @ Skip SP_usr (already saved)
35
36 @ Restore guest registers
37 add r0, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
38 ldr lr, [r0, #S_LR]
39 ldm r0, {r0-r12}
40
41 clrex
42 eret
43ENDPROC(__guest_enter)
44
45ENTRY(__guest_exit)
46 /*
47 * return convention:
48 * guest r0, r1, r2 saved on the stack
49 * r0: vcpu pointer
50 * r1: exception code
51 */
52
53 add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R3)
54 stm r2!, {r3-r12}
55 str lr, [r2, #4]
56 add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
57 pop {r3, r4, r5} @ r0, r1, r2
58 stm r2, {r3-r5}
59
60 ldr r0, [r0, #VCPU_HOST_CTXT]
61 add r0, r0, #(USR_REGS_OFFSET + S_R4)
62 ldm r0!, {r4-r12}
63 ldr lr, [r0, #4]
64
65 mov r0, r1
66 bx lr
67ENDPROC(__guest_exit)
68
69/*
70 * If VFPv3 support is not available, then we will not switch the VFP
71 * registers; however cp10 and cp11 accesses will still trap and fallback
72 * to the regular coprocessor emulation code, which currently will
73 * inject an undefined exception to the guest.
74 */
75#ifdef CONFIG_VFPv3
76ENTRY(__vfp_guest_restore)
77 push {r3, r4, lr}
78
79 @ NEON/VFP used. Turn on VFP access.
80 mrc p15, 4, r1, c1, c1, 2 @ HCPTR
81 bic r1, r1, #(HCPTR_TCP(10) | HCPTR_TCP(11))
82 mcr p15, 4, r1, c1, c1, 2 @ HCPTR
83 isb
84
85 @ Switch VFP/NEON hardware state to the guest's
86 mov r4, r0
87 ldr r0, [r0, #VCPU_HOST_CTXT]
88 add r0, r0, #CPU_CTXT_VFP
89 bl __vfp_save_state
90 add r0, r4, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP)
91 bl __vfp_restore_state
92
93 pop {r3, r4, lr}
94 pop {r0, r1, r2}
95 clrex
96 eret
97ENDPROC(__vfp_guest_restore)
98#endif
99
100 .popsection
101
diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S
new file mode 100644
index 000000000000..78091383a5d9
--- /dev/null
+++ b/arch/arm/kvm/hyp/hyp-entry.S
@@ -0,0 +1,169 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/linkage.h>
20#include <asm/kvm_arm.h>
21#include <asm/kvm_asm.h>
22
23 .arch_extension virt
24
25 .text
26 .pushsection .hyp.text, "ax"
27
28.macro load_vcpu reg
29 mrc p15, 4, \reg, c13, c0, 2 @ HTPIDR
30.endm
31
32/********************************************************************
33 * Hypervisor exception vector and handlers
34 *
35 *
36 * The KVM/ARM Hypervisor ABI is defined as follows:
37 *
38 * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
39 * instruction is issued since all traps are disabled when running the host
40 * kernel as per the Hyp-mode initialization at boot time.
41 *
42 * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
43 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
44 * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
45 * instructions are called from within Hyp-mode.
46 *
47 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
48 * Switching to Hyp mode is done through a simple HVC #0 instruction. The
49 * exception vector code will check that the HVC comes from VMID==0.
50 * - r0 contains a pointer to a HYP function
51 * - r1, r2, and r3 contain arguments to the above function.
52 * - The HYP function will be called with its arguments in r0, r1 and r2.
53 * On HYP function return, we return directly to SVC.
54 *
55 * Note that the above is used to execute code in Hyp-mode from a host-kernel
56 * point of view, and is a different concept from performing a world-switch and
57 * executing guest code SVC mode (with a VMID != 0).
58 */
59
60 .align 5
61__kvm_hyp_vector:
62 .global __kvm_hyp_vector
63
64 @ Hyp-mode exception vector
65 W(b) hyp_reset
66 W(b) hyp_undef
67 W(b) hyp_svc
68 W(b) hyp_pabt
69 W(b) hyp_dabt
70 W(b) hyp_hvc
71 W(b) hyp_irq
72 W(b) hyp_fiq
73
74.macro invalid_vector label, cause
75 .align
76\label: mov r0, #\cause
77 b __hyp_panic
78.endm
79
80 invalid_vector hyp_reset ARM_EXCEPTION_RESET
81 invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED
82 invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE
83 invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT
84 invalid_vector hyp_dabt ARM_EXCEPTION_DATA_ABORT
85 invalid_vector hyp_fiq ARM_EXCEPTION_FIQ
86
87ENTRY(__hyp_do_panic)
88 mrs lr, cpsr
89 bic lr, lr, #MODE_MASK
90 orr lr, lr, #SVC_MODE
91THUMB( orr lr, lr, #PSR_T_BIT )
92 msr spsr_cxsf, lr
93 ldr lr, =panic
94 msr ELR_hyp, lr
95 ldr lr, =kvm_call_hyp
96 clrex
97 eret
98ENDPROC(__hyp_do_panic)
99
100hyp_hvc:
101 /*
102 * Getting here is either because of a trap from a guest,
103 * or from executing HVC from the host kernel, which means
104 * "do something in Hyp mode".
105 */
106 push {r0, r1, r2}
107
108 @ Check syndrome register
109 mrc p15, 4, r1, c5, c2, 0 @ HSR
110 lsr r0, r1, #HSR_EC_SHIFT
111 cmp r0, #HSR_EC_HVC
112 bne guest_trap @ Not HVC instr.
113
114 /*
115 * Let's check if the HVC came from VMID 0 and allow simple
116 * switch to Hyp mode
117 */
118 mrrc p15, 6, r0, r2, c2
119 lsr r2, r2, #16
120 and r2, r2, #0xff
121 cmp r2, #0
122 bne guest_trap @ Guest called HVC
123
124 /*
125 * Getting here means host called HVC, we shift parameters and branch
126 * to Hyp function.
127 */
128 pop {r0, r1, r2}
129
130 /* Check for __hyp_get_vectors */
131 cmp r0, #-1
132 mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
133 beq 1f
134
135 push {lr}
136
137 mov lr, r0
138 mov r0, r1
139 mov r1, r2
140 mov r2, r3
141
142THUMB( orr lr, #1)
143 blx lr @ Call the HYP function
144
145 pop {lr}
1461: eret
147
148guest_trap:
149 load_vcpu r0 @ Load VCPU pointer to r0
150
151#ifdef CONFIG_VFPv3
152 @ Check for a VFP access
153 lsr r1, r1, #HSR_EC_SHIFT
154 cmp r1, #HSR_EC_CP_0_13
155 beq __vfp_guest_restore
156#endif
157
158 mov r1, #ARM_EXCEPTION_HVC
159 b __guest_exit
160
161hyp_irq:
162 push {r0, r1, r2}
163 mov r1, #ARM_EXCEPTION_IRQ
164 load_vcpu r0 @ Load VCPU pointer to r0
165 b __guest_exit
166
167 .ltorg
168
169 .popsection
diff --git a/arch/arm/kvm/hyp/s2-setup.c b/arch/arm/kvm/hyp/s2-setup.c
new file mode 100644
index 000000000000..7be39af2ed6c
--- /dev/null
+++ b/arch/arm/kvm/hyp/s2-setup.c
@@ -0,0 +1,33 @@
1/*
2 * Copyright (C) 2016 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/types.h>
19#include <asm/kvm_arm.h>
20#include <asm/kvm_asm.h>
21#include <asm/kvm_hyp.h>
22
23void __hyp_text __init_stage2_translation(void)
24{
25 u64 val;
26
27 val = read_sysreg(VTCR) & ~VTCR_MASK;
28
29 val |= read_sysreg(HTCR) & VTCR_HTCR_SH;
30 val |= KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S;
31
32 write_sysreg(val, VTCR);
33}
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
new file mode 100644
index 000000000000..b13caa90cd44
--- /dev/null
+++ b/arch/arm/kvm/hyp/switch.c
@@ -0,0 +1,232 @@
1/*
2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <asm/kvm_asm.h>
19#include <asm/kvm_hyp.h>
20
21__asm__(".arch_extension virt");
22
23/*
24 * Activate the traps, saving the host's fpexc register before
25 * overwriting it. We'll restore it on VM exit.
26 */
27static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
28{
29 u32 val;
30
31 /*
32 * We are about to set HCPTR.TCP10/11 to trap all floating point
33 * register accesses to HYP, however, the ARM ARM clearly states that
34 * traps are only taken to HYP if the operation would not otherwise
35 * trap to SVC. Therefore, always make sure that for 32-bit guests,
36 * we set FPEXC.EN to prevent traps to SVC, when setting the TCP bits.
37 */
38 val = read_sysreg(VFP_FPEXC);
39 *fpexc_host = val;
40 if (!(val & FPEXC_EN)) {
41 write_sysreg(val | FPEXC_EN, VFP_FPEXC);
42 isb();
43 }
44
45 write_sysreg(vcpu->arch.hcr | vcpu->arch.irq_lines, HCR);
46 /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
47 write_sysreg(HSTR_T(15), HSTR);
48 write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
49 val = read_sysreg(HDCR);
50 write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR);
51}
52
53static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
54{
55 u32 val;
56
57 write_sysreg(0, HCR);
58 write_sysreg(0, HSTR);
59 val = read_sysreg(HDCR);
60 write_sysreg(val & ~(HDCR_TPM | HDCR_TPMCR), HDCR);
61 write_sysreg(0, HCPTR);
62}
63
64static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
65{
66 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
67 write_sysreg(kvm->arch.vttbr, VTTBR);
68 write_sysreg(vcpu->arch.midr, VPIDR);
69}
70
71static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
72{
73 write_sysreg(0, VTTBR);
74 write_sysreg(read_sysreg(MIDR), VPIDR);
75}
76
77static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
78{
79 __vgic_v2_save_state(vcpu);
80}
81
82static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
83{
84 __vgic_v2_restore_state(vcpu);
85}
86
87static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
88{
89 u32 hsr = read_sysreg(HSR);
90 u8 ec = hsr >> HSR_EC_SHIFT;
91 u32 hpfar, far;
92
93 vcpu->arch.fault.hsr = hsr;
94
95 if (ec == HSR_EC_IABT)
96 far = read_sysreg(HIFAR);
97 else if (ec == HSR_EC_DABT)
98 far = read_sysreg(HDFAR);
99 else
100 return true;
101
102 /*
103 * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
104 *
105 * Abort on the stage 2 translation for a memory access from a
106 * Non-secure PL1 or PL0 mode:
107 *
108 * For any Access flag fault or Translation fault, and also for any
109 * Permission fault on the stage 2 translation of a memory access
110 * made as part of a translation table walk for a stage 1 translation,
111 * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
112 * is UNKNOWN.
113 */
114 if (!(hsr & HSR_DABT_S1PTW) && (hsr & HSR_FSC_TYPE) == FSC_PERM) {
115 u64 par, tmp;
116
117 par = read_sysreg(PAR);
118 write_sysreg(far, ATS1CPR);
119 isb();
120
121 tmp = read_sysreg(PAR);
122 write_sysreg(par, PAR);
123
124 if (unlikely(tmp & 1))
125 return false; /* Translation failed, back to guest */
126
127 hpfar = ((tmp >> 12) & ((1UL << 28) - 1)) << 4;
128 } else {
129 hpfar = read_sysreg(HPFAR);
130 }
131
132 vcpu->arch.fault.hxfar = far;
133 vcpu->arch.fault.hpfar = hpfar;
134 return true;
135}
136
137static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
138{
139 struct kvm_cpu_context *host_ctxt;
140 struct kvm_cpu_context *guest_ctxt;
141 bool fp_enabled;
142 u64 exit_code;
143 u32 fpexc;
144
145 vcpu = kern_hyp_va(vcpu);
146 write_sysreg(vcpu, HTPIDR);
147
148 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
149 guest_ctxt = &vcpu->arch.ctxt;
150
151 __sysreg_save_state(host_ctxt);
152 __banked_save_state(host_ctxt);
153
154 __activate_traps(vcpu, &fpexc);
155 __activate_vm(vcpu);
156
157 __vgic_restore_state(vcpu);
158 __timer_restore_state(vcpu);
159
160 __sysreg_restore_state(guest_ctxt);
161 __banked_restore_state(guest_ctxt);
162
163 /* Jump in the fire! */
164again:
165 exit_code = __guest_enter(vcpu, host_ctxt);
166 /* And we're baaack! */
167
168 if (exit_code == ARM_EXCEPTION_HVC && !__populate_fault_info(vcpu))
169 goto again;
170
171 fp_enabled = __vfp_enabled();
172
173 __banked_save_state(guest_ctxt);
174 __sysreg_save_state(guest_ctxt);
175 __timer_save_state(vcpu);
176 __vgic_save_state(vcpu);
177
178 __deactivate_traps(vcpu);
179 __deactivate_vm(vcpu);
180
181 __banked_restore_state(host_ctxt);
182 __sysreg_restore_state(host_ctxt);
183
184 if (fp_enabled) {
185 __vfp_save_state(&guest_ctxt->vfp);
186 __vfp_restore_state(&host_ctxt->vfp);
187 }
188
189 write_sysreg(fpexc, VFP_FPEXC);
190
191 return exit_code;
192}
193
194__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
195
196static const char * const __hyp_panic_string[] = {
197 [ARM_EXCEPTION_RESET] = "\nHYP panic: RST PC:%08x CPSR:%08x",
198 [ARM_EXCEPTION_UNDEFINED] = "\nHYP panic: UNDEF PC:%08x CPSR:%08x",
199 [ARM_EXCEPTION_SOFTWARE] = "\nHYP panic: SVC PC:%08x CPSR:%08x",
200 [ARM_EXCEPTION_PREF_ABORT] = "\nHYP panic: PABRT PC:%08x CPSR:%08x",
201 [ARM_EXCEPTION_DATA_ABORT] = "\nHYP panic: DABRT PC:%08x ADDR:%08x",
202 [ARM_EXCEPTION_IRQ] = "\nHYP panic: IRQ PC:%08x CPSR:%08x",
203 [ARM_EXCEPTION_FIQ] = "\nHYP panic: FIQ PC:%08x CPSR:%08x",
204 [ARM_EXCEPTION_HVC] = "\nHYP panic: HVC PC:%08x CPSR:%08x",
205};
206
207void __hyp_text __noreturn __hyp_panic(int cause)
208{
209 u32 elr = read_special(ELR_hyp);
210 u32 val;
211
212 if (cause == ARM_EXCEPTION_DATA_ABORT)
213 val = read_sysreg(HDFAR);
214 else
215 val = read_special(SPSR);
216
217 if (read_sysreg(VTTBR)) {
218 struct kvm_vcpu *vcpu;
219 struct kvm_cpu_context *host_ctxt;
220
221 vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR);
222 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
223 __deactivate_traps(vcpu);
224 __deactivate_vm(vcpu);
225 __sysreg_restore_state(host_ctxt);
226 }
227
228 /* Call panic for real */
229 __hyp_do_panic(__hyp_panic_string[cause], elr, val);
230
231 unreachable();
232}
diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c
new file mode 100644
index 000000000000..a2636001e616
--- /dev/null
+++ b/arch/arm/kvm/hyp/tlb.c
@@ -0,0 +1,70 @@
1/*
2 * Original code:
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
4 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 *
6 * Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <asm/kvm_hyp.h>
22
23/**
24 * Flush per-VMID TLBs
25 *
26 * __kvm_tlb_flush_vmid(struct kvm *kvm);
27 *
28 * We rely on the hardware to broadcast the TLB invalidation to all CPUs
29 * inside the inner-shareable domain (which is the case for all v7
30 * implementations). If we come across a non-IS SMP implementation, we'll
31 * have to use an IPI based mechanism. Until then, we stick to the simple
32 * hardware assisted version.
33 *
34 * As v7 does not support flushing per IPA, just nuke the whole TLB
35 * instead, ignoring the ipa value.
36 */
37static void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
38{
39 dsb(ishst);
40
41 /* Switch to requested VMID */
42 kvm = kern_hyp_va(kvm);
43 write_sysreg(kvm->arch.vttbr, VTTBR);
44 isb();
45
46 write_sysreg(0, TLBIALLIS);
47 dsb(ish);
48 isb();
49
50 write_sysreg(0, VTTBR);
51}
52
53__alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm);
54
55static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
56{
57 __tlb_flush_vmid(kvm);
58}
59
60__alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm,
61 phys_addr_t ipa);
62
63static void __hyp_text __tlb_flush_vm_context(void)
64{
65 write_sysreg(0, TLBIALLNSNHIS);
66 write_sysreg(0, ICIALLUIS);
67 dsb(ish);
68}
69
70__alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void);
diff --git a/arch/arm/kvm/hyp/vfp.S b/arch/arm/kvm/hyp/vfp.S
new file mode 100644
index 000000000000..7c297e87eb8b
--- /dev/null
+++ b/arch/arm/kvm/hyp/vfp.S
@@ -0,0 +1,68 @@
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19#include <asm/vfpmacros.h>
20
21 .text
22 .pushsection .hyp.text, "ax"
23
24/* void __vfp_save_state(struct vfp_hard_struct *vfp); */
25ENTRY(__vfp_save_state)
26 push {r4, r5}
27 VFPFMRX r1, FPEXC
28
29 @ Make sure *really* VFP is enabled so we can touch the registers.
30 orr r5, r1, #FPEXC_EN
31 tst r5, #FPEXC_EX @ Check for VFP Subarchitecture
32 bic r5, r5, #FPEXC_EX @ FPEXC_EX disable
33 VFPFMXR FPEXC, r5
34 isb
35
36 VFPFMRX r2, FPSCR
37 beq 1f
38
39 @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
40 @ we only need to save them if FPEXC_EX is set.
41 VFPFMRX r3, FPINST
42 tst r5, #FPEXC_FP2V
43 VFPFMRX r4, FPINST2, ne @ vmrsne
441:
45 VFPFSTMIA r0, r5 @ Save VFP registers
46 stm r0, {r1-r4} @ Save FPEXC, FPSCR, FPINST, FPINST2
47 pop {r4, r5}
48 bx lr
49ENDPROC(__vfp_save_state)
50
51/* void __vfp_restore_state(struct vfp_hard_struct *vfp);
52 * Assume FPEXC_EN is on and FPEXC_EX is off */
53ENTRY(__vfp_restore_state)
54 VFPFLDMIA r0, r1 @ Load VFP registers
55 ldm r0, {r0-r3} @ Load FPEXC, FPSCR, FPINST, FPINST2
56
57 VFPFMXR FPSCR, r1
58 tst r0, #FPEXC_EX @ Check for VFP Subarchitecture
59 beq 1f
60 VFPFMXR FPINST, r2
61 tst r0, #FPEXC_FP2V
62 VFPFMXR FPINST2, r3, ne
631:
64 VFPFMXR FPEXC, r0 @ FPEXC (last, in case !EN)
65 bx lr
66ENDPROC(__vfp_restore_state)
67
68 .popsection
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 3988e72d16ff..1f9ae17476f9 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -84,14 +84,6 @@ __do_hyp_init:
84 orr r0, r0, r1 84 orr r0, r0, r1
85 mcr p15, 4, r0, c2, c0, 2 @ HTCR 85 mcr p15, 4, r0, c2, c0, 2 @ HTCR
86 86
87 mrc p15, 4, r1, c2, c1, 2 @ VTCR
88 ldr r2, =VTCR_MASK
89 bic r1, r1, r2
90 bic r0, r0, #(~VTCR_HTCR_SH) @ clear non-reusable HTCR bits
91 orr r1, r0, r1
92 orr r1, r1, #(KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S)
93 mcr p15, 4, r1, c2, c1, 2 @ VTCR
94
95 @ Use the same memory attributes for hyp. accesses as the kernel 87 @ Use the same memory attributes for hyp. accesses as the kernel
96 @ (copy MAIRx ro HMAIRx). 88 @ (copy MAIRx ro HMAIRx).
97 mrc p15, 0, r0, c10, c2, 0 89 mrc p15, 0, r0, c10, c2, 0
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index 900ef6dd8f72..b1bd316f14c0 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -17,211 +17,14 @@
17 */ 17 */
18 18
19#include <linux/linkage.h> 19#include <linux/linkage.h>
20#include <linux/const.h>
21#include <asm/unified.h>
22#include <asm/page.h>
23#include <asm/ptrace.h>
24#include <asm/asm-offsets.h>
25#include <asm/kvm_asm.h>
26#include <asm/kvm_arm.h>
27#include <asm/vfpmacros.h>
28#include "interrupts_head.S"
29 20
30 .text 21 .text
31 22
32__kvm_hyp_code_start:
33 .globl __kvm_hyp_code_start
34
35/********************************************************************
36 * Flush per-VMID TLBs
37 *
38 * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
39 *
40 * We rely on the hardware to broadcast the TLB invalidation to all CPUs
41 * inside the inner-shareable domain (which is the case for all v7
42 * implementations). If we come across a non-IS SMP implementation, we'll
43 * have to use an IPI based mechanism. Until then, we stick to the simple
44 * hardware assisted version.
45 *
46 * As v7 does not support flushing per IPA, just nuke the whole TLB
47 * instead, ignoring the ipa value.
48 */
49ENTRY(__kvm_tlb_flush_vmid_ipa)
50 push {r2, r3}
51
52 dsb ishst
53 add r0, r0, #KVM_VTTBR
54 ldrd r2, r3, [r0]
55 mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR
56 isb
57 mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored)
58 dsb ish
59 isb
60 mov r2, #0
61 mov r3, #0
62 mcrr p15, 6, r2, r3, c2 @ Back to VMID #0
63 isb @ Not necessary if followed by eret
64
65 pop {r2, r3}
66 bx lr
67ENDPROC(__kvm_tlb_flush_vmid_ipa)
68
69/**
70 * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
71 *
72 * Reuses __kvm_tlb_flush_vmid_ipa() for ARMv7, without passing address
73 * parameter
74 */
75
76ENTRY(__kvm_tlb_flush_vmid)
77 b __kvm_tlb_flush_vmid_ipa
78ENDPROC(__kvm_tlb_flush_vmid)
79
80/********************************************************************
81 * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
82 * domain, for all VMIDs
83 *
84 * void __kvm_flush_vm_context(void);
85 */
86ENTRY(__kvm_flush_vm_context)
87 mov r0, #0 @ rn parameter for c15 flushes is SBZ
88
89 /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */
90 mcr p15, 4, r0, c8, c3, 4
91 /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
92 mcr p15, 0, r0, c7, c1, 0
93 dsb ish
94 isb @ Not necessary if followed by eret
95
96 bx lr
97ENDPROC(__kvm_flush_vm_context)
98
99
100/********************************************************************
101 * Hypervisor world-switch code
102 *
103 *
104 * int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
105 */
106ENTRY(__kvm_vcpu_run)
107 @ Save the vcpu pointer
108 mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR
109
110 save_host_regs
111
112 restore_vgic_state
113 restore_timer_state
114
115 @ Store hardware CP15 state and load guest state
116 read_cp15_state store_to_vcpu = 0
117 write_cp15_state read_from_vcpu = 1
118
119 @ If the host kernel has not been configured with VFPv3 support,
120 @ then it is safer if we deny guests from using it as well.
121#ifdef CONFIG_VFPv3
122 @ Set FPEXC_EN so the guest doesn't trap floating point instructions
123 VFPFMRX r2, FPEXC @ VMRS
124 push {r2}
125 orr r2, r2, #FPEXC_EN
126 VFPFMXR FPEXC, r2 @ VMSR
127#endif
128
129 @ Configure Hyp-role
130 configure_hyp_role vmentry
131
132 @ Trap coprocessor CRx accesses
133 set_hstr vmentry
134 set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
135 set_hdcr vmentry
136
137 @ Write configured ID register into MIDR alias
138 ldr r1, [vcpu, #VCPU_MIDR]
139 mcr p15, 4, r1, c0, c0, 0
140
141 @ Write guest view of MPIDR into VMPIDR
142 ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)]
143 mcr p15, 4, r1, c0, c0, 5
144
145 @ Set up guest memory translation
146 ldr r1, [vcpu, #VCPU_KVM]
147 add r1, r1, #KVM_VTTBR
148 ldrd r2, r3, [r1]
149 mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR
150
151 @ We're all done, just restore the GPRs and go to the guest
152 restore_guest_regs
153 clrex @ Clear exclusive monitor
154 eret
155
156__kvm_vcpu_return:
157 /*
158 * return convention:
159 * guest r0, r1, r2 saved on the stack
160 * r0: vcpu pointer
161 * r1: exception code
162 */
163 save_guest_regs
164
165 @ Set VMID == 0
166 mov r2, #0
167 mov r3, #0
168 mcrr p15, 6, r2, r3, c2 @ Write VTTBR
169
170 @ Don't trap coprocessor accesses for host kernel
171 set_hstr vmexit
172 set_hdcr vmexit
173 set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
174
175#ifdef CONFIG_VFPv3
176 @ Switch VFP/NEON hardware state to the host's
177 add r7, vcpu, #VCPU_VFP_GUEST
178 store_vfp_state r7
179 add r7, vcpu, #VCPU_VFP_HOST
180 ldr r7, [r7]
181 restore_vfp_state r7
182
183after_vfp_restore:
184 @ Restore FPEXC_EN which we clobbered on entry
185 pop {r2}
186 VFPFMXR FPEXC, r2
187#else
188after_vfp_restore:
189#endif
190
191 @ Reset Hyp-role
192 configure_hyp_role vmexit
193
194 @ Let host read hardware MIDR
195 mrc p15, 0, r2, c0, c0, 0
196 mcr p15, 4, r2, c0, c0, 0
197
198 @ Back to hardware MPIDR
199 mrc p15, 0, r2, c0, c0, 5
200 mcr p15, 4, r2, c0, c0, 5
201
202 @ Store guest CP15 state and restore host state
203 read_cp15_state store_to_vcpu = 1
204 write_cp15_state read_from_vcpu = 0
205
206 save_timer_state
207 save_vgic_state
208
209 restore_host_regs
210 clrex @ Clear exclusive monitor
211#ifndef CONFIG_CPU_ENDIAN_BE8
212 mov r0, r1 @ Return the return code
213 mov r1, #0 @ Clear upper bits in return value
214#else
215 @ r1 already has return code
216 mov r0, #0 @ Clear upper bits in return value
217#endif /* CONFIG_CPU_ENDIAN_BE8 */
218 bx lr @ return to IOCTL
219
220/******************************************************************** 23/********************************************************************
221 * Call function in Hyp mode 24 * Call function in Hyp mode
222 * 25 *
223 * 26 *
224 * u64 kvm_call_hyp(void *hypfn, ...); 27 * unsigned long kvm_call_hyp(void *hypfn, ...);
225 * 28 *
226 * This is not really a variadic function in the classic C-way and care must 29 * This is not really a variadic function in the classic C-way and care must
227 * be taken when calling this to ensure parameters are passed in registers 30 * be taken when calling this to ensure parameters are passed in registers
@@ -232,7 +35,7 @@ after_vfp_restore:
232 * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the 35 * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the
233 * function pointer can be passed). The function being called must be mapped 36 * function pointer can be passed). The function being called must be mapped
234 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are 37 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
235 * passed in r0 and r1. 38 * passed in r0 (strictly 32bit).
236 * 39 *
237 * A function pointer with a value of 0xffffffff has a special meaning, 40 * A function pointer with a value of 0xffffffff has a special meaning,
238 * and is used to implement __hyp_get_vectors in the same way as in 41 * and is used to implement __hyp_get_vectors in the same way as in
@@ -246,281 +49,4 @@ after_vfp_restore:
246ENTRY(kvm_call_hyp) 49ENTRY(kvm_call_hyp)
247 hvc #0 50 hvc #0
248 bx lr 51 bx lr
249 52ENDPROC(kvm_call_hyp)
250/********************************************************************
251 * Hypervisor exception vector and handlers
252 *
253 *
254 * The KVM/ARM Hypervisor ABI is defined as follows:
255 *
256 * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
257 * instruction is issued since all traps are disabled when running the host
258 * kernel as per the Hyp-mode initialization at boot time.
259 *
260 * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
261 * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
262 * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
263 * instructions are called from within Hyp-mode.
264 *
265 * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
266 * Switching to Hyp mode is done through a simple HVC #0 instruction. The
267 * exception vector code will check that the HVC comes from VMID==0 and if
268 * so will push the necessary state (SPSR, lr_usr) on the Hyp stack.
269 * - r0 contains a pointer to a HYP function
270 * - r1, r2, and r3 contain arguments to the above function.
271 * - The HYP function will be called with its arguments in r0, r1 and r2.
272 * On HYP function return, we return directly to SVC.
273 *
274 * Note that the above is used to execute code in Hyp-mode from a host-kernel
275 * point of view, and is a different concept from performing a world-switch and
276 * executing guest code SVC mode (with a VMID != 0).
277 */
278
279/* Handle undef, svc, pabt, or dabt by crashing with a user notice */
280.macro bad_exception exception_code, panic_str
281 push {r0-r2}
282 mrrc p15, 6, r0, r1, c2 @ Read VTTBR
283 lsr r1, r1, #16
284 ands r1, r1, #0xff
285 beq 99f
286
287 load_vcpu @ Load VCPU pointer
288 .if \exception_code == ARM_EXCEPTION_DATA_ABORT
289 mrc p15, 4, r2, c5, c2, 0 @ HSR
290 mrc p15, 4, r1, c6, c0, 0 @ HDFAR
291 str r2, [vcpu, #VCPU_HSR]
292 str r1, [vcpu, #VCPU_HxFAR]
293 .endif
294 .if \exception_code == ARM_EXCEPTION_PREF_ABORT
295 mrc p15, 4, r2, c5, c2, 0 @ HSR
296 mrc p15, 4, r1, c6, c0, 2 @ HIFAR
297 str r2, [vcpu, #VCPU_HSR]
298 str r1, [vcpu, #VCPU_HxFAR]
299 .endif
300 mov r1, #\exception_code
301 b __kvm_vcpu_return
302
303 @ We were in the host already. Let's craft a panic-ing return to SVC.
30499: mrs r2, cpsr
305 bic r2, r2, #MODE_MASK
306 orr r2, r2, #SVC_MODE
307THUMB( orr r2, r2, #PSR_T_BIT )
308 msr spsr_cxsf, r2
309 mrs r1, ELR_hyp
310 ldr r2, =panic
311 msr ELR_hyp, r2
312 ldr r0, =\panic_str
313 clrex @ Clear exclusive monitor
314 eret
315.endm
316
317 .text
318
319 .align 5
320__kvm_hyp_vector:
321 .globl __kvm_hyp_vector
322
323 @ Hyp-mode exception vector
324 W(b) hyp_reset
325 W(b) hyp_undef
326 W(b) hyp_svc
327 W(b) hyp_pabt
328 W(b) hyp_dabt
329 W(b) hyp_hvc
330 W(b) hyp_irq
331 W(b) hyp_fiq
332
333 .align
334hyp_reset:
335 b hyp_reset
336
337 .align
338hyp_undef:
339 bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str
340
341 .align
342hyp_svc:
343 bad_exception ARM_EXCEPTION_HVC, svc_die_str
344
345 .align
346hyp_pabt:
347 bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str
348
349 .align
350hyp_dabt:
351 bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str
352
353 .align
354hyp_hvc:
355 /*
356 * Getting here is either becuase of a trap from a guest or from calling
357 * HVC from the host kernel, which means "switch to Hyp mode".
358 */
359 push {r0, r1, r2}
360
361 @ Check syndrome register
362 mrc p15, 4, r1, c5, c2, 0 @ HSR
363 lsr r0, r1, #HSR_EC_SHIFT
364 cmp r0, #HSR_EC_HVC
365 bne guest_trap @ Not HVC instr.
366
367 /*
368 * Let's check if the HVC came from VMID 0 and allow simple
369 * switch to Hyp mode
370 */
371 mrrc p15, 6, r0, r2, c2
372 lsr r2, r2, #16
373 and r2, r2, #0xff
374 cmp r2, #0
375 bne guest_trap @ Guest called HVC
376
377 /*
378 * Getting here means host called HVC, we shift parameters and branch
379 * to Hyp function.
380 */
381 pop {r0, r1, r2}
382
383 /* Check for __hyp_get_vectors */
384 cmp r0, #-1
385 mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
386 beq 1f
387
388 push {lr}
389 mrs lr, SPSR
390 push {lr}
391
392 mov lr, r0
393 mov r0, r1
394 mov r1, r2
395 mov r2, r3
396
397THUMB( orr lr, #1)
398 blx lr @ Call the HYP function
399
400 pop {lr}
401 msr SPSR_csxf, lr
402 pop {lr}
4031: eret
404
405guest_trap:
406 load_vcpu @ Load VCPU pointer to r0
407 str r1, [vcpu, #VCPU_HSR]
408
409 @ Check if we need the fault information
410 lsr r1, r1, #HSR_EC_SHIFT
411#ifdef CONFIG_VFPv3
412 cmp r1, #HSR_EC_CP_0_13
413 beq switch_to_guest_vfp
414#endif
415 cmp r1, #HSR_EC_IABT
416 mrceq p15, 4, r2, c6, c0, 2 @ HIFAR
417 beq 2f
418 cmp r1, #HSR_EC_DABT
419 bne 1f
420 mrc p15, 4, r2, c6, c0, 0 @ HDFAR
421
4222: str r2, [vcpu, #VCPU_HxFAR]
423
424 /*
425 * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
426 *
427 * Abort on the stage 2 translation for a memory access from a
428 * Non-secure PL1 or PL0 mode:
429 *
430 * For any Access flag fault or Translation fault, and also for any
431 * Permission fault on the stage 2 translation of a memory access
432 * made as part of a translation table walk for a stage 1 translation,
433 * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
434 * is UNKNOWN.
435 */
436
437 /* Check for permission fault, and S1PTW */
438 mrc p15, 4, r1, c5, c2, 0 @ HSR
439 and r0, r1, #HSR_FSC_TYPE
440 cmp r0, #FSC_PERM
441 tsteq r1, #(1 << 7) @ S1PTW
442 mrcne p15, 4, r2, c6, c0, 4 @ HPFAR
443 bne 3f
444
445 /* Preserve PAR */
446 mrrc p15, 0, r0, r1, c7 @ PAR
447 push {r0, r1}
448
449 /* Resolve IPA using the xFAR */
450 mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR
451 isb
452 mrrc p15, 0, r0, r1, c7 @ PAR
453 tst r0, #1
454 bne 4f @ Failed translation
455 ubfx r2, r0, #12, #20
456 lsl r2, r2, #4
457 orr r2, r2, r1, lsl #24
458
459 /* Restore PAR */
460 pop {r0, r1}
461 mcrr p15, 0, r0, r1, c7 @ PAR
462
4633: load_vcpu @ Load VCPU pointer to r0
464 str r2, [r0, #VCPU_HPFAR]
465
4661: mov r1, #ARM_EXCEPTION_HVC
467 b __kvm_vcpu_return
468
4694: pop {r0, r1} @ Failed translation, return to guest
470 mcrr p15, 0, r0, r1, c7 @ PAR
471 clrex
472 pop {r0, r1, r2}
473 eret
474
475/*
476 * If VFPv3 support is not available, then we will not switch the VFP
477 * registers; however cp10 and cp11 accesses will still trap and fallback
478 * to the regular coprocessor emulation code, which currently will
479 * inject an undefined exception to the guest.
480 */
481#ifdef CONFIG_VFPv3
482switch_to_guest_vfp:
483 push {r3-r7}
484
485 @ NEON/VFP used. Turn on VFP access.
486 set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
487
488 @ Switch VFP/NEON hardware state to the guest's
489 add r7, r0, #VCPU_VFP_HOST
490 ldr r7, [r7]
491 store_vfp_state r7
492 add r7, r0, #VCPU_VFP_GUEST
493 restore_vfp_state r7
494
495 pop {r3-r7}
496 pop {r0-r2}
497 clrex
498 eret
499#endif
500
501 .align
502hyp_irq:
503 push {r0, r1, r2}
504 mov r1, #ARM_EXCEPTION_IRQ
505 load_vcpu @ Load VCPU pointer to r0
506 b __kvm_vcpu_return
507
508 .align
509hyp_fiq:
510 b hyp_fiq
511
512 .ltorg
513
514__kvm_hyp_code_end:
515 .globl __kvm_hyp_code_end
516
517 .section ".rodata"
518
519und_die_str:
520 .ascii "unexpected undefined exception in Hyp mode at: %#08x\n"
521pabt_die_str:
522 .ascii "unexpected prefetch abort in Hyp mode at: %#08x\n"
523dabt_die_str:
524 .ascii "unexpected data abort in Hyp mode at: %#08x\n"
525svc_die_str:
526 .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x\n"
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
deleted file mode 100644
index 51a59504bef4..000000000000
--- a/arch/arm/kvm/interrupts_head.S
+++ /dev/null
@@ -1,648 +0,0 @@
1#include <linux/irqchip/arm-gic.h>
2#include <asm/assembler.h>
3
4#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4))
5#define VCPU_USR_SP (VCPU_USR_REG(13))
6#define VCPU_USR_LR (VCPU_USR_REG(14))
7#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4))
8
9/*
10 * Many of these macros need to access the VCPU structure, which is always
11 * held in r0. These macros should never clobber r1, as it is used to hold the
12 * exception code on the return path (except of course the macro that switches
13 * all the registers before the final jump to the VM).
14 */
15vcpu .req r0 @ vcpu pointer always in r0
16
17/* Clobbers {r2-r6} */
18.macro store_vfp_state vfp_base
19 @ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions
20 VFPFMRX r2, FPEXC
21 @ Make sure VFP is enabled so we can touch the registers.
22 orr r6, r2, #FPEXC_EN
23 VFPFMXR FPEXC, r6
24
25 VFPFMRX r3, FPSCR
26 tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
27 beq 1f
28 @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
29 @ we only need to save them if FPEXC_EX is set.
30 VFPFMRX r4, FPINST
31 tst r2, #FPEXC_FP2V
32 VFPFMRX r5, FPINST2, ne @ vmrsne
33 bic r6, r2, #FPEXC_EX @ FPEXC_EX disable
34 VFPFMXR FPEXC, r6
351:
36 VFPFSTMIA \vfp_base, r6 @ Save VFP registers
37 stm \vfp_base, {r2-r5} @ Save FPEXC, FPSCR, FPINST, FPINST2
38.endm
39
40/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */
41.macro restore_vfp_state vfp_base
42 VFPFLDMIA \vfp_base, r6 @ Load VFP registers
43 ldm \vfp_base, {r2-r5} @ Load FPEXC, FPSCR, FPINST, FPINST2
44
45 VFPFMXR FPSCR, r3
46 tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
47 beq 1f
48 VFPFMXR FPINST, r4
49 tst r2, #FPEXC_FP2V
50 VFPFMXR FPINST2, r5, ne
511:
52 VFPFMXR FPEXC, r2 @ FPEXC (last, in case !EN)
53.endm
54
55/* These are simply for the macros to work - value don't have meaning */
56.equ usr, 0
57.equ svc, 1
58.equ abt, 2
59.equ und, 3
60.equ irq, 4
61.equ fiq, 5
62
63.macro push_host_regs_mode mode
64 mrs r2, SP_\mode
65 mrs r3, LR_\mode
66 mrs r4, SPSR_\mode
67 push {r2, r3, r4}
68.endm
69
70/*
71 * Store all host persistent registers on the stack.
72 * Clobbers all registers, in all modes, except r0 and r1.
73 */
74.macro save_host_regs
75 /* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */
76 mrs r2, ELR_hyp
77 push {r2}
78
79 /* usr regs */
80 push {r4-r12} @ r0-r3 are always clobbered
81 mrs r2, SP_usr
82 mov r3, lr
83 push {r2, r3}
84
85 push_host_regs_mode svc
86 push_host_regs_mode abt
87 push_host_regs_mode und
88 push_host_regs_mode irq
89
90 /* fiq regs */
91 mrs r2, r8_fiq
92 mrs r3, r9_fiq
93 mrs r4, r10_fiq
94 mrs r5, r11_fiq
95 mrs r6, r12_fiq
96 mrs r7, SP_fiq
97 mrs r8, LR_fiq
98 mrs r9, SPSR_fiq
99 push {r2-r9}
100.endm
101
102.macro pop_host_regs_mode mode
103 pop {r2, r3, r4}
104 msr SP_\mode, r2
105 msr LR_\mode, r3
106 msr SPSR_\mode, r4
107.endm
108
109/*
110 * Restore all host registers from the stack.
111 * Clobbers all registers, in all modes, except r0 and r1.
112 */
113.macro restore_host_regs
114 pop {r2-r9}
115 msr r8_fiq, r2
116 msr r9_fiq, r3
117 msr r10_fiq, r4
118 msr r11_fiq, r5
119 msr r12_fiq, r6
120 msr SP_fiq, r7
121 msr LR_fiq, r8
122 msr SPSR_fiq, r9
123
124 pop_host_regs_mode irq
125 pop_host_regs_mode und
126 pop_host_regs_mode abt
127 pop_host_regs_mode svc
128
129 pop {r2, r3}
130 msr SP_usr, r2
131 mov lr, r3
132 pop {r4-r12}
133
134 pop {r2}
135 msr ELR_hyp, r2
136.endm
137
138/*
139 * Restore SP, LR and SPSR for a given mode. offset is the offset of
140 * this mode's registers from the VCPU base.
141 *
142 * Assumes vcpu pointer in vcpu reg
143 *
144 * Clobbers r1, r2, r3, r4.
145 */
146.macro restore_guest_regs_mode mode, offset
147 add r1, vcpu, \offset
148 ldm r1, {r2, r3, r4}
149 msr SP_\mode, r2
150 msr LR_\mode, r3
151 msr SPSR_\mode, r4
152.endm
153
154/*
155 * Restore all guest registers from the vcpu struct.
156 *
157 * Assumes vcpu pointer in vcpu reg
158 *
159 * Clobbers *all* registers.
160 */
161.macro restore_guest_regs
162 restore_guest_regs_mode svc, #VCPU_SVC_REGS
163 restore_guest_regs_mode abt, #VCPU_ABT_REGS
164 restore_guest_regs_mode und, #VCPU_UND_REGS
165 restore_guest_regs_mode irq, #VCPU_IRQ_REGS
166
167 add r1, vcpu, #VCPU_FIQ_REGS
168 ldm r1, {r2-r9}
169 msr r8_fiq, r2
170 msr r9_fiq, r3
171 msr r10_fiq, r4
172 msr r11_fiq, r5
173 msr r12_fiq, r6
174 msr SP_fiq, r7
175 msr LR_fiq, r8
176 msr SPSR_fiq, r9
177
178 @ Load return state
179 ldr r2, [vcpu, #VCPU_PC]
180 ldr r3, [vcpu, #VCPU_CPSR]
181 msr ELR_hyp, r2
182 msr SPSR_cxsf, r3
183
184 @ Load user registers
185 ldr r2, [vcpu, #VCPU_USR_SP]
186 ldr r3, [vcpu, #VCPU_USR_LR]
187 msr SP_usr, r2
188 mov lr, r3
189 add vcpu, vcpu, #(VCPU_USR_REGS)
190 ldm vcpu, {r0-r12}
191.endm
192
193/*
194 * Save SP, LR and SPSR for a given mode. offset is the offset of
195 * this mode's registers from the VCPU base.
196 *
197 * Assumes vcpu pointer in vcpu reg
198 *
199 * Clobbers r2, r3, r4, r5.
200 */
201.macro save_guest_regs_mode mode, offset
202 add r2, vcpu, \offset
203 mrs r3, SP_\mode
204 mrs r4, LR_\mode
205 mrs r5, SPSR_\mode
206 stm r2, {r3, r4, r5}
207.endm
208
209/*
210 * Save all guest registers to the vcpu struct
211 * Expects guest's r0, r1, r2 on the stack.
212 *
213 * Assumes vcpu pointer in vcpu reg
214 *
215 * Clobbers r2, r3, r4, r5.
216 */
217.macro save_guest_regs
218 @ Store usr registers
219 add r2, vcpu, #VCPU_USR_REG(3)
220 stm r2, {r3-r12}
221 add r2, vcpu, #VCPU_USR_REG(0)
222 pop {r3, r4, r5} @ r0, r1, r2
223 stm r2, {r3, r4, r5}
224 mrs r2, SP_usr
225 mov r3, lr
226 str r2, [vcpu, #VCPU_USR_SP]
227 str r3, [vcpu, #VCPU_USR_LR]
228
229 @ Store return state
230 mrs r2, ELR_hyp
231 mrs r3, spsr
232 str r2, [vcpu, #VCPU_PC]
233 str r3, [vcpu, #VCPU_CPSR]
234
235 @ Store other guest registers
236 save_guest_regs_mode svc, #VCPU_SVC_REGS
237 save_guest_regs_mode abt, #VCPU_ABT_REGS
238 save_guest_regs_mode und, #VCPU_UND_REGS
239 save_guest_regs_mode irq, #VCPU_IRQ_REGS
240.endm
241
242/* Reads cp15 registers from hardware and stores them in memory
243 * @store_to_vcpu: If 0, registers are written in-order to the stack,
244 * otherwise to the VCPU struct pointed to by vcpup
245 *
246 * Assumes vcpu pointer in vcpu reg
247 *
248 * Clobbers r2 - r12
249 */
250.macro read_cp15_state store_to_vcpu
251 mrc p15, 0, r2, c1, c0, 0 @ SCTLR
252 mrc p15, 0, r3, c1, c0, 2 @ CPACR
253 mrc p15, 0, r4, c2, c0, 2 @ TTBCR
254 mrc p15, 0, r5, c3, c0, 0 @ DACR
255 mrrc p15, 0, r6, r7, c2 @ TTBR 0
256 mrrc p15, 1, r8, r9, c2 @ TTBR 1
257 mrc p15, 0, r10, c10, c2, 0 @ PRRR
258 mrc p15, 0, r11, c10, c2, 1 @ NMRR
259 mrc p15, 2, r12, c0, c0, 0 @ CSSELR
260
261 .if \store_to_vcpu == 0
262 push {r2-r12} @ Push CP15 registers
263 .else
264 str r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
265 str r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
266 str r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
267 str r5, [vcpu, #CP15_OFFSET(c3_DACR)]
268 add r2, vcpu, #CP15_OFFSET(c2_TTBR0)
269 strd r6, r7, [r2]
270 add r2, vcpu, #CP15_OFFSET(c2_TTBR1)
271 strd r8, r9, [r2]
272 str r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
273 str r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
274 str r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
275 .endif
276
277 mrc p15, 0, r2, c13, c0, 1 @ CID
278 mrc p15, 0, r3, c13, c0, 2 @ TID_URW
279 mrc p15, 0, r4, c13, c0, 3 @ TID_URO
280 mrc p15, 0, r5, c13, c0, 4 @ TID_PRIV
281 mrc p15, 0, r6, c5, c0, 0 @ DFSR
282 mrc p15, 0, r7, c5, c0, 1 @ IFSR
283 mrc p15, 0, r8, c5, c1, 0 @ ADFSR
284 mrc p15, 0, r9, c5, c1, 1 @ AIFSR
285 mrc p15, 0, r10, c6, c0, 0 @ DFAR
286 mrc p15, 0, r11, c6, c0, 2 @ IFAR
287 mrc p15, 0, r12, c12, c0, 0 @ VBAR
288
289 .if \store_to_vcpu == 0
290 push {r2-r12} @ Push CP15 registers
291 .else
292 str r2, [vcpu, #CP15_OFFSET(c13_CID)]
293 str r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
294 str r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
295 str r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
296 str r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
297 str r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
298 str r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
299 str r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
300 str r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
301 str r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
302 str r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
303 .endif
304
305 mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL
306 mrrc p15, 0, r4, r5, c7 @ PAR
307 mrc p15, 0, r6, c10, c3, 0 @ AMAIR0
308 mrc p15, 0, r7, c10, c3, 1 @ AMAIR1
309
310 .if \store_to_vcpu == 0
311 push {r2,r4-r7}
312 .else
313 str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
314 add r12, vcpu, #CP15_OFFSET(c7_PAR)
315 strd r4, r5, [r12]
316 str r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
317 str r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
318 .endif
319.endm
320
321/*
322 * Reads cp15 registers from memory and writes them to hardware
323 * @read_from_vcpu: If 0, registers are read in-order from the stack,
324 * otherwise from the VCPU struct pointed to by vcpup
325 *
326 * Assumes vcpu pointer in vcpu reg
327 */
328.macro write_cp15_state read_from_vcpu
329 .if \read_from_vcpu == 0
330 pop {r2,r4-r7}
331 .else
332 ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
333 add r12, vcpu, #CP15_OFFSET(c7_PAR)
334 ldrd r4, r5, [r12]
335 ldr r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
336 ldr r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
337 .endif
338
339 mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL
340 mcrr p15, 0, r4, r5, c7 @ PAR
341 mcr p15, 0, r6, c10, c3, 0 @ AMAIR0
342 mcr p15, 0, r7, c10, c3, 1 @ AMAIR1
343
344 .if \read_from_vcpu == 0
345 pop {r2-r12}
346 .else
347 ldr r2, [vcpu, #CP15_OFFSET(c13_CID)]
348 ldr r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
349 ldr r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
350 ldr r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
351 ldr r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
352 ldr r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
353 ldr r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
354 ldr r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
355 ldr r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
356 ldr r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
357 ldr r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
358 .endif
359
360 mcr p15, 0, r2, c13, c0, 1 @ CID
361 mcr p15, 0, r3, c13, c0, 2 @ TID_URW
362 mcr p15, 0, r4, c13, c0, 3 @ TID_URO
363 mcr p15, 0, r5, c13, c0, 4 @ TID_PRIV
364 mcr p15, 0, r6, c5, c0, 0 @ DFSR
365 mcr p15, 0, r7, c5, c0, 1 @ IFSR
366 mcr p15, 0, r8, c5, c1, 0 @ ADFSR
367 mcr p15, 0, r9, c5, c1, 1 @ AIFSR
368 mcr p15, 0, r10, c6, c0, 0 @ DFAR
369 mcr p15, 0, r11, c6, c0, 2 @ IFAR
370 mcr p15, 0, r12, c12, c0, 0 @ VBAR
371
372 .if \read_from_vcpu == 0
373 pop {r2-r12}
374 .else
375 ldr r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
376 ldr r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
377 ldr r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
378 ldr r5, [vcpu, #CP15_OFFSET(c3_DACR)]
379 add r12, vcpu, #CP15_OFFSET(c2_TTBR0)
380 ldrd r6, r7, [r12]
381 add r12, vcpu, #CP15_OFFSET(c2_TTBR1)
382 ldrd r8, r9, [r12]
383 ldr r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
384 ldr r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
385 ldr r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
386 .endif
387
388 mcr p15, 0, r2, c1, c0, 0 @ SCTLR
389 mcr p15, 0, r3, c1, c0, 2 @ CPACR
390 mcr p15, 0, r4, c2, c0, 2 @ TTBCR
391 mcr p15, 0, r5, c3, c0, 0 @ DACR
392 mcrr p15, 0, r6, r7, c2 @ TTBR 0
393 mcrr p15, 1, r8, r9, c2 @ TTBR 1
394 mcr p15, 0, r10, c10, c2, 0 @ PRRR
395 mcr p15, 0, r11, c10, c2, 1 @ NMRR
396 mcr p15, 2, r12, c0, c0, 0 @ CSSELR
397.endm
398
399/*
400 * Save the VGIC CPU state into memory
401 *
402 * Assumes vcpu pointer in vcpu reg
403 */
404.macro save_vgic_state
405 /* Get VGIC VCTRL base into r2 */
406 ldr r2, [vcpu, #VCPU_KVM]
407 ldr r2, [r2, #KVM_VGIC_VCTRL]
408 cmp r2, #0
409 beq 2f
410
411 /* Compute the address of struct vgic_cpu */
412 add r11, vcpu, #VCPU_VGIC_CPU
413
414 /* Save all interesting registers */
415 ldr r4, [r2, #GICH_VMCR]
416 ldr r5, [r2, #GICH_MISR]
417 ldr r6, [r2, #GICH_EISR0]
418 ldr r7, [r2, #GICH_EISR1]
419 ldr r8, [r2, #GICH_ELRSR0]
420 ldr r9, [r2, #GICH_ELRSR1]
421 ldr r10, [r2, #GICH_APR]
422ARM_BE8(rev r4, r4 )
423ARM_BE8(rev r5, r5 )
424ARM_BE8(rev r6, r6 )
425ARM_BE8(rev r7, r7 )
426ARM_BE8(rev r8, r8 )
427ARM_BE8(rev r9, r9 )
428ARM_BE8(rev r10, r10 )
429
430 str r4, [r11, #VGIC_V2_CPU_VMCR]
431 str r5, [r11, #VGIC_V2_CPU_MISR]
432#ifdef CONFIG_CPU_ENDIAN_BE8
433 str r6, [r11, #(VGIC_V2_CPU_EISR + 4)]
434 str r7, [r11, #VGIC_V2_CPU_EISR]
435 str r8, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
436 str r9, [r11, #VGIC_V2_CPU_ELRSR]
437#else
438 str r6, [r11, #VGIC_V2_CPU_EISR]
439 str r7, [r11, #(VGIC_V2_CPU_EISR + 4)]
440 str r8, [r11, #VGIC_V2_CPU_ELRSR]
441 str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
442#endif
443 str r10, [r11, #VGIC_V2_CPU_APR]
444
445 /* Clear GICH_HCR */
446 mov r5, #0
447 str r5, [r2, #GICH_HCR]
448
449 /* Save list registers */
450 add r2, r2, #GICH_LR0
451 add r3, r11, #VGIC_V2_CPU_LR
452 ldr r4, [r11, #VGIC_CPU_NR_LR]
4531: ldr r6, [r2], #4
454ARM_BE8(rev r6, r6 )
455 str r6, [r3], #4
456 subs r4, r4, #1
457 bne 1b
4582:
459.endm
460
461/*
462 * Restore the VGIC CPU state from memory
463 *
464 * Assumes vcpu pointer in vcpu reg
465 */
466.macro restore_vgic_state
467 /* Get VGIC VCTRL base into r2 */
468 ldr r2, [vcpu, #VCPU_KVM]
469 ldr r2, [r2, #KVM_VGIC_VCTRL]
470 cmp r2, #0
471 beq 2f
472
473 /* Compute the address of struct vgic_cpu */
474 add r11, vcpu, #VCPU_VGIC_CPU
475
476 /* We only restore a minimal set of registers */
477 ldr r3, [r11, #VGIC_V2_CPU_HCR]
478 ldr r4, [r11, #VGIC_V2_CPU_VMCR]
479 ldr r8, [r11, #VGIC_V2_CPU_APR]
480ARM_BE8(rev r3, r3 )
481ARM_BE8(rev r4, r4 )
482ARM_BE8(rev r8, r8 )
483
484 str r3, [r2, #GICH_HCR]
485 str r4, [r2, #GICH_VMCR]
486 str r8, [r2, #GICH_APR]
487
488 /* Restore list registers */
489 add r2, r2, #GICH_LR0
490 add r3, r11, #VGIC_V2_CPU_LR
491 ldr r4, [r11, #VGIC_CPU_NR_LR]
4921: ldr r6, [r3], #4
493ARM_BE8(rev r6, r6 )
494 str r6, [r2], #4
495 subs r4, r4, #1
496 bne 1b
4972:
498.endm
499
500#define CNTHCTL_PL1PCTEN (1 << 0)
501#define CNTHCTL_PL1PCEN (1 << 1)
502
503/*
504 * Save the timer state onto the VCPU and allow physical timer/counter access
505 * for the host.
506 *
507 * Assumes vcpu pointer in vcpu reg
508 * Clobbers r2-r5
509 */
510.macro save_timer_state
511 ldr r4, [vcpu, #VCPU_KVM]
512 ldr r2, [r4, #KVM_TIMER_ENABLED]
513 cmp r2, #0
514 beq 1f
515
516 mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
517 str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
518
519 isb
520
521 mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
522 ldr r4, =VCPU_TIMER_CNTV_CVAL
523 add r5, vcpu, r4
524 strd r2, r3, [r5]
525
526 @ Ensure host CNTVCT == CNTPCT
527 mov r2, #0
528 mcrr p15, 4, r2, r2, c14 @ CNTVOFF
529
5301:
531 mov r2, #0 @ Clear ENABLE
532 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
533
534 @ Allow physical timer/counter access for the host
535 mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
536 orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
537 mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
538.endm
539
540/*
541 * Load the timer state from the VCPU and deny physical timer/counter access
542 * for the host.
543 *
544 * Assumes vcpu pointer in vcpu reg
545 * Clobbers r2-r5
546 */
547.macro restore_timer_state
548 @ Disallow physical timer access for the guest
549 @ Physical counter access is allowed
550 mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
551 orr r2, r2, #CNTHCTL_PL1PCTEN
552 bic r2, r2, #CNTHCTL_PL1PCEN
553 mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
554
555 ldr r4, [vcpu, #VCPU_KVM]
556 ldr r2, [r4, #KVM_TIMER_ENABLED]
557 cmp r2, #0
558 beq 1f
559
560 ldr r2, [r4, #KVM_TIMER_CNTVOFF]
561 ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)]
562 mcrr p15, 4, rr_lo_hi(r2, r3), c14 @ CNTVOFF
563
564 ldr r4, =VCPU_TIMER_CNTV_CVAL
565 add r5, vcpu, r4
566 ldrd r2, r3, [r5]
567 mcrr p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
568 isb
569
570 ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
571 and r2, r2, #3
572 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
5731:
574.endm
575
576.equ vmentry, 0
577.equ vmexit, 1
578
579/* Configures the HSTR (Hyp System Trap Register) on entry/return
580 * (hardware reset value is 0) */
581.macro set_hstr operation
582 mrc p15, 4, r2, c1, c1, 3
583 ldr r3, =HSTR_T(15)
584 .if \operation == vmentry
585 orr r2, r2, r3 @ Trap CR{15}
586 .else
587 bic r2, r2, r3 @ Don't trap any CRx accesses
588 .endif
589 mcr p15, 4, r2, c1, c1, 3
590.endm
591
592/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
593 * (hardware reset value is 0). Keep previous value in r2.
594 * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if
595 * VFP wasn't already enabled (always executed on vmtrap).
596 * If a label is specified with vmexit, it is branched to if VFP wasn't
597 * enabled.
598 */
599.macro set_hcptr operation, mask, label = none
600 mrc p15, 4, r2, c1, c1, 2
601 ldr r3, =\mask
602 .if \operation == vmentry
603 orr r3, r2, r3 @ Trap coproc-accesses defined in mask
604 .else
605 bic r3, r2, r3 @ Don't trap defined coproc-accesses
606 .endif
607 mcr p15, 4, r3, c1, c1, 2
608 .if \operation != vmentry
609 .if \operation == vmexit
610 tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
611 beq 1f
612 .endif
613 isb
614 .if \label != none
615 b \label
616 .endif
6171:
618 .endif
619.endm
620
621/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
622 * (hardware reset value is 0) */
623.macro set_hdcr operation
624 mrc p15, 4, r2, c1, c1, 1
625 ldr r3, =(HDCR_TPM|HDCR_TPMCR)
626 .if \operation == vmentry
627 orr r2, r2, r3 @ Trap some perfmon accesses
628 .else
629 bic r2, r2, r3 @ Don't trap any perfmon accesses
630 .endif
631 mcr p15, 4, r2, c1, c1, 1
632.endm
633
634/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
635.macro configure_hyp_role operation
636 .if \operation == vmentry
637 ldr r2, [vcpu, #VCPU_HCR]
638 ldr r3, [vcpu, #VCPU_IRQ_LINES]
639 orr r2, r2, r3
640 .else
641 mov r2, #0
642 .endif
643 mcr p15, 4, r2, c1, c1, 0 @ HCR
644.endm
645
646.macro load_vcpu
647 mrc p15, 4, vcpu, c13, c0, 2 @ HTPIDR
648.endm
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index aba61fd3697a..58dbd5c439df 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -28,6 +28,7 @@
28#include <asm/kvm_mmio.h> 28#include <asm/kvm_mmio.h>
29#include <asm/kvm_asm.h> 29#include <asm/kvm_asm.h>
30#include <asm/kvm_emulate.h> 30#include <asm/kvm_emulate.h>
31#include <asm/virt.h>
31 32
32#include "trace.h" 33#include "trace.h"
33 34
@@ -598,6 +599,9 @@ int create_hyp_mappings(void *from, void *to)
598 unsigned long start = KERN_TO_HYP((unsigned long)from); 599 unsigned long start = KERN_TO_HYP((unsigned long)from);
599 unsigned long end = KERN_TO_HYP((unsigned long)to); 600 unsigned long end = KERN_TO_HYP((unsigned long)to);
600 601
602 if (is_kernel_in_hyp_mode())
603 return 0;
604
601 start = start & PAGE_MASK; 605 start = start & PAGE_MASK;
602 end = PAGE_ALIGN(end); 606 end = PAGE_ALIGN(end);
603 607
@@ -630,6 +634,9 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
630 unsigned long start = KERN_TO_HYP((unsigned long)from); 634 unsigned long start = KERN_TO_HYP((unsigned long)from);
631 unsigned long end = KERN_TO_HYP((unsigned long)to); 635 unsigned long end = KERN_TO_HYP((unsigned long)to);
632 636
637 if (is_kernel_in_hyp_mode())
638 return 0;
639
633 /* Check for a valid kernel IO mapping */ 640 /* Check for a valid kernel IO mapping */
634 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)) 641 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
635 return -EINVAL; 642 return -EINVAL;
@@ -1431,6 +1438,22 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1431 } 1438 }
1432 1439
1433 /* 1440 /*
1441 * Check for a cache maintenance operation. Since we
1442 * ended-up here, we know it is outside of any memory
1443 * slot. But we can't find out if that is for a device,
1444 * or if the guest is just being stupid. The only thing
1445 * we know for sure is that this range cannot be cached.
1446 *
1447 * So let's assume that the guest is just being
1448 * cautious, and skip the instruction.
1449 */
1450 if (kvm_vcpu_dabt_is_cm(vcpu)) {
1451 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1452 ret = 1;
1453 goto out_unlock;
1454 }
1455
1456 /*
1434 * The IPA is reported as [MAX:12], so we need to 1457 * The IPA is reported as [MAX:12], so we need to
1435 * complement it with the bottom 12 bits from the 1458 * complement it with the bottom 12 bits from the
1436 * faulting VA. This is always 12 bits, irrespective 1459 * faulting VA. This is always 12 bits, irrespective
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index eeb85858d6bb..0048b5a62a50 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -71,7 +71,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
71 } 71 }
72 72
73 /* Reset core registers */ 73 /* Reset core registers */
74 memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs)); 74 memcpy(&vcpu->arch.ctxt.gp_regs, reset_regs, sizeof(vcpu->arch.ctxt.gp_regs));
75 75
76 /* Reset CP15 registers */ 76 /* Reset CP15 registers */
77 kvm_reset_coprocs(vcpu); 77 kvm_reset_coprocs(vcpu);