aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm/arm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kvm/arm.c')
-rw-r--r--arch/arm/kvm/arm.c129
1 files changed, 75 insertions, 54 deletions
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index a0dfc2a53f91..37d216d814cd 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -16,6 +16,7 @@
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */ 17 */
18 18
19#include <linux/cpu.h>
19#include <linux/errno.h> 20#include <linux/errno.h>
20#include <linux/err.h> 21#include <linux/err.h>
21#include <linux/kvm_host.h> 22#include <linux/kvm_host.h>
@@ -48,7 +49,7 @@ __asm__(".arch_extension virt");
48#endif 49#endif
49 50
50static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); 51static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
51static kvm_kernel_vfp_t __percpu *kvm_host_vfp_state; 52static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
52static unsigned long hyp_default_vectors; 53static unsigned long hyp_default_vectors;
53 54
54/* Per-CPU variable containing the currently running vcpu. */ 55/* Per-CPU variable containing the currently running vcpu. */
@@ -206,7 +207,7 @@ int kvm_dev_ioctl_check_extension(long ext)
206 r = KVM_MAX_VCPUS; 207 r = KVM_MAX_VCPUS;
207 break; 208 break;
208 default: 209 default:
209 r = 0; 210 r = kvm_arch_dev_ioctl_check_extension(ext);
210 break; 211 break;
211 } 212 }
212 return r; 213 return r;
@@ -218,27 +219,18 @@ long kvm_arch_dev_ioctl(struct file *filp,
218 return -EINVAL; 219 return -EINVAL;
219} 220}
220 221
221int kvm_arch_set_memory_region(struct kvm *kvm,
222 struct kvm_userspace_memory_region *mem,
223 struct kvm_memory_slot old,
224 int user_alloc)
225{
226 return 0;
227}
228
229int kvm_arch_prepare_memory_region(struct kvm *kvm, 222int kvm_arch_prepare_memory_region(struct kvm *kvm,
230 struct kvm_memory_slot *memslot, 223 struct kvm_memory_slot *memslot,
231 struct kvm_memory_slot old,
232 struct kvm_userspace_memory_region *mem, 224 struct kvm_userspace_memory_region *mem,
233 bool user_alloc) 225 enum kvm_mr_change change)
234{ 226{
235 return 0; 227 return 0;
236} 228}
237 229
238void kvm_arch_commit_memory_region(struct kvm *kvm, 230void kvm_arch_commit_memory_region(struct kvm *kvm,
239 struct kvm_userspace_memory_region *mem, 231 struct kvm_userspace_memory_region *mem,
240 struct kvm_memory_slot old, 232 const struct kvm_memory_slot *old,
241 bool user_alloc) 233 enum kvm_mr_change change)
242{ 234{
243} 235}
244 236
@@ -326,7 +318,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
326void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 318void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
327{ 319{
328 vcpu->cpu = cpu; 320 vcpu->cpu = cpu;
329 vcpu->arch.vfp_host = this_cpu_ptr(kvm_host_vfp_state); 321 vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
330 322
331 /* 323 /*
332 * Check whether this vcpu requires the cache to be flushed on 324 * Check whether this vcpu requires the cache to be flushed on
@@ -639,7 +631,8 @@ static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
639 return 0; 631 return 0;
640} 632}
641 633
642int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level) 634int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
635 bool line_status)
643{ 636{
644 u32 irq = irq_level->irq; 637 u32 irq = irq_level->irq;
645 unsigned int irq_type, vcpu_idx, irq_num; 638 unsigned int irq_type, vcpu_idx, irq_num;
@@ -794,30 +787,48 @@ long kvm_arch_vm_ioctl(struct file *filp,
794 } 787 }
795} 788}
796 789
797static void cpu_init_hyp_mode(void *vector) 790static void cpu_init_hyp_mode(void *dummy)
798{ 791{
792 unsigned long long boot_pgd_ptr;
799 unsigned long long pgd_ptr; 793 unsigned long long pgd_ptr;
800 unsigned long hyp_stack_ptr; 794 unsigned long hyp_stack_ptr;
801 unsigned long stack_page; 795 unsigned long stack_page;
802 unsigned long vector_ptr; 796 unsigned long vector_ptr;
803 797
804 /* Switch from the HYP stub to our own HYP init vector */ 798 /* Switch from the HYP stub to our own HYP init vector */
805 __hyp_set_vectors((unsigned long)vector); 799 __hyp_set_vectors(kvm_get_idmap_vector());
806 800
801 boot_pgd_ptr = (unsigned long long)kvm_mmu_get_boot_httbr();
807 pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); 802 pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
808 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); 803 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
809 hyp_stack_ptr = stack_page + PAGE_SIZE; 804 hyp_stack_ptr = stack_page + PAGE_SIZE;
810 vector_ptr = (unsigned long)__kvm_hyp_vector; 805 vector_ptr = (unsigned long)__kvm_hyp_vector;
811 806
812 __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr); 807 __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
808}
809
810static int hyp_init_cpu_notify(struct notifier_block *self,
811 unsigned long action, void *cpu)
812{
813 switch (action) {
814 case CPU_STARTING:
815 case CPU_STARTING_FROZEN:
816 cpu_init_hyp_mode(NULL);
817 break;
818 }
819
820 return NOTIFY_OK;
813} 821}
814 822
823static struct notifier_block hyp_init_cpu_nb = {
824 .notifier_call = hyp_init_cpu_notify,
825};
826
815/** 827/**
816 * Inits Hyp-mode on all online CPUs 828 * Inits Hyp-mode on all online CPUs
817 */ 829 */
818static int init_hyp_mode(void) 830static int init_hyp_mode(void)
819{ 831{
820 phys_addr_t init_phys_addr;
821 int cpu; 832 int cpu;
822 int err = 0; 833 int err = 0;
823 834
@@ -850,24 +861,6 @@ static int init_hyp_mode(void)
850 } 861 }
851 862
852 /* 863 /*
853 * Execute the init code on each CPU.
854 *
855 * Note: The stack is not mapped yet, so don't do anything else than
856 * initializing the hypervisor mode on each CPU using a local stack
857 * space for temporary storage.
858 */
859 init_phys_addr = virt_to_phys(__kvm_hyp_init);
860 for_each_online_cpu(cpu) {
861 smp_call_function_single(cpu, cpu_init_hyp_mode,
862 (void *)(long)init_phys_addr, 1);
863 }
864
865 /*
866 * Unmap the identity mapping
867 */
868 kvm_clear_hyp_idmap();
869
870 /*
871 * Map the Hyp-code called directly from the host 864 * Map the Hyp-code called directly from the host
872 */ 865 */
873 err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); 866 err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
@@ -890,33 +883,38 @@ static int init_hyp_mode(void)
890 } 883 }
891 884
892 /* 885 /*
893 * Map the host VFP structures 886 * Map the host CPU structures
894 */ 887 */
895 kvm_host_vfp_state = alloc_percpu(kvm_kernel_vfp_t); 888 kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
896 if (!kvm_host_vfp_state) { 889 if (!kvm_host_cpu_state) {
897 err = -ENOMEM; 890 err = -ENOMEM;
898 kvm_err("Cannot allocate host VFP state\n"); 891 kvm_err("Cannot allocate host CPU state\n");
899 goto out_free_mappings; 892 goto out_free_mappings;
900 } 893 }
901 894
902 for_each_possible_cpu(cpu) { 895 for_each_possible_cpu(cpu) {
903 kvm_kernel_vfp_t *vfp; 896 kvm_cpu_context_t *cpu_ctxt;
904 897
905 vfp = per_cpu_ptr(kvm_host_vfp_state, cpu); 898 cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
906 err = create_hyp_mappings(vfp, vfp + 1); 899 err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1);
907 900
908 if (err) { 901 if (err) {
909 kvm_err("Cannot map host VFP state: %d\n", err); 902 kvm_err("Cannot map host CPU state: %d\n", err);
910 goto out_free_vfp; 903 goto out_free_context;
911 } 904 }
912 } 905 }
913 906
914 /* 907 /*
908 * Execute the init code on each CPU.
909 */
910 on_each_cpu(cpu_init_hyp_mode, NULL, 1);
911
912 /*
915 * Init HYP view of VGIC 913 * Init HYP view of VGIC
916 */ 914 */
917 err = kvm_vgic_hyp_init(); 915 err = kvm_vgic_hyp_init();
918 if (err) 916 if (err)
919 goto out_free_vfp; 917 goto out_free_context;
920 918
921#ifdef CONFIG_KVM_ARM_VGIC 919#ifdef CONFIG_KVM_ARM_VGIC
922 vgic_present = true; 920 vgic_present = true;
@@ -929,12 +927,19 @@ static int init_hyp_mode(void)
929 if (err) 927 if (err)
930 goto out_free_mappings; 928 goto out_free_mappings;
931 929
930#ifndef CONFIG_HOTPLUG_CPU
931 free_boot_hyp_pgd();
932#endif
933
934 kvm_perf_init();
935
932 kvm_info("Hyp mode initialized successfully\n"); 936 kvm_info("Hyp mode initialized successfully\n");
937
933 return 0; 938 return 0;
934out_free_vfp: 939out_free_context:
935 free_percpu(kvm_host_vfp_state); 940 free_percpu(kvm_host_cpu_state);
936out_free_mappings: 941out_free_mappings:
937 free_hyp_pmds(); 942 free_hyp_pgds();
938out_free_stack_pages: 943out_free_stack_pages:
939 for_each_possible_cpu(cpu) 944 for_each_possible_cpu(cpu)
940 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); 945 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
@@ -943,27 +948,42 @@ out_err:
943 return err; 948 return err;
944} 949}
945 950
951static void check_kvm_target_cpu(void *ret)
952{
953 *(int *)ret = kvm_target_cpu();
954}
955
946/** 956/**
947 * Initialize Hyp-mode and memory mappings on all CPUs. 957 * Initialize Hyp-mode and memory mappings on all CPUs.
948 */ 958 */
949int kvm_arch_init(void *opaque) 959int kvm_arch_init(void *opaque)
950{ 960{
951 int err; 961 int err;
962 int ret, cpu;
952 963
953 if (!is_hyp_mode_available()) { 964 if (!is_hyp_mode_available()) {
954 kvm_err("HYP mode not available\n"); 965 kvm_err("HYP mode not available\n");
955 return -ENODEV; 966 return -ENODEV;
956 } 967 }
957 968
958 if (kvm_target_cpu() < 0) { 969 for_each_online_cpu(cpu) {
959 kvm_err("Target CPU not supported!\n"); 970 smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
960 return -ENODEV; 971 if (ret < 0) {
972 kvm_err("Error, CPU %d not supported!\n", cpu);
973 return -ENODEV;
974 }
961 } 975 }
962 976
963 err = init_hyp_mode(); 977 err = init_hyp_mode();
964 if (err) 978 if (err)
965 goto out_err; 979 goto out_err;
966 980
981 err = register_cpu_notifier(&hyp_init_cpu_nb);
982 if (err) {
983 kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
984 goto out_err;
985 }
986
967 kvm_coproc_table_init(); 987 kvm_coproc_table_init();
968 return 0; 988 return 0;
969out_err: 989out_err:
@@ -973,6 +993,7 @@ out_err:
973/* NOP: Compiling as a module not supported */ 993/* NOP: Compiling as a module not supported */
974void kvm_arch_exit(void) 994void kvm_arch_exit(void)
975{ 995{
996 kvm_perf_teardown();
976} 997}
977 998
978static int arm_init(void) 999static int arm_init(void)