aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm/arm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kvm/arm.c')
-rw-r--r--arch/arm/kvm/arm.c172
1 files changed, 172 insertions, 0 deletions
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index d3506b4001aa..2c6b780e78a7 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -34,11 +34,21 @@
34#include <asm/ptrace.h> 34#include <asm/ptrace.h>
35#include <asm/mman.h> 35#include <asm/mman.h>
36#include <asm/cputype.h> 36#include <asm/cputype.h>
37#include <asm/tlbflush.h>
38#include <asm/virt.h>
39#include <asm/kvm_arm.h>
40#include <asm/kvm_asm.h>
41#include <asm/kvm_mmu.h>
37 42
38#ifdef REQUIRES_VIRT 43#ifdef REQUIRES_VIRT
39__asm__(".arch_extension virt"); 44__asm__(".arch_extension virt");
40#endif 45#endif
41 46
47static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
48static struct vfp_hard_struct __percpu *kvm_host_vfp_state;
49static unsigned long hyp_default_vectors;
50
51
42int kvm_arch_hardware_enable(void *garbage) 52int kvm_arch_hardware_enable(void *garbage)
43{ 53{
44 return 0; 54 return 0;
@@ -331,9 +341,171 @@ long kvm_arch_vm_ioctl(struct file *filp,
331 return -EINVAL; 341 return -EINVAL;
332} 342}
333 343
344static void cpu_init_hyp_mode(void *vector)
345{
346 unsigned long long pgd_ptr;
347 unsigned long pgd_low, pgd_high;
348 unsigned long hyp_stack_ptr;
349 unsigned long stack_page;
350 unsigned long vector_ptr;
351
352 /* Switch from the HYP stub to our own HYP init vector */
353 __hyp_set_vectors((unsigned long)vector);
354
355 pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
356 pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
357 pgd_high = (pgd_ptr >> 32ULL);
358 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
359 hyp_stack_ptr = stack_page + PAGE_SIZE;
360 vector_ptr = (unsigned long)__kvm_hyp_vector;
361
362 /*
363 * Call initialization code, and switch to the full blown
364 * HYP code. The init code doesn't need to preserve these registers as
365 * r1-r3 and r12 are already callee save according to the AAPCS.
366 * Note that we slightly misuse the prototype by casing the pgd_low to
367 * a void *.
368 */
369 kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
370}
371
372/**
373 * Inits Hyp-mode on all online CPUs
374 */
375static int init_hyp_mode(void)
376{
377 phys_addr_t init_phys_addr;
378 int cpu;
379 int err = 0;
380
381 /*
382 * Allocate Hyp PGD and setup Hyp identity mapping
383 */
384 err = kvm_mmu_init();
385 if (err)
386 goto out_err;
387
388 /*
389 * It is probably enough to obtain the default on one
390 * CPU. It's unlikely to be different on the others.
391 */
392 hyp_default_vectors = __hyp_get_vectors();
393
394 /*
395 * Allocate stack pages for Hypervisor-mode
396 */
397 for_each_possible_cpu(cpu) {
398 unsigned long stack_page;
399
400 stack_page = __get_free_page(GFP_KERNEL);
401 if (!stack_page) {
402 err = -ENOMEM;
403 goto out_free_stack_pages;
404 }
405
406 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
407 }
408
409 /*
410 * Execute the init code on each CPU.
411 *
412 * Note: The stack is not mapped yet, so don't do anything else than
413 * initializing the hypervisor mode on each CPU using a local stack
414 * space for temporary storage.
415 */
416 init_phys_addr = virt_to_phys(__kvm_hyp_init);
417 for_each_online_cpu(cpu) {
418 smp_call_function_single(cpu, cpu_init_hyp_mode,
419 (void *)(long)init_phys_addr, 1);
420 }
421
422 /*
423 * Unmap the identity mapping
424 */
425 kvm_clear_hyp_idmap();
426
427 /*
428 * Map the Hyp-code called directly from the host
429 */
430 err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
431 if (err) {
432 kvm_err("Cannot map world-switch code\n");
433 goto out_free_mappings;
434 }
435
436 /*
437 * Map the Hyp stack pages
438 */
439 for_each_possible_cpu(cpu) {
440 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
441 err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE);
442
443 if (err) {
444 kvm_err("Cannot map hyp stack\n");
445 goto out_free_mappings;
446 }
447 }
448
449 /*
450 * Map the host VFP structures
451 */
452 kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct);
453 if (!kvm_host_vfp_state) {
454 err = -ENOMEM;
455 kvm_err("Cannot allocate host VFP state\n");
456 goto out_free_mappings;
457 }
458
459 for_each_possible_cpu(cpu) {
460 struct vfp_hard_struct *vfp;
461
462 vfp = per_cpu_ptr(kvm_host_vfp_state, cpu);
463 err = create_hyp_mappings(vfp, vfp + 1);
464
465 if (err) {
466 kvm_err("Cannot map host VFP state: %d\n", err);
467 goto out_free_vfp;
468 }
469 }
470
471 kvm_info("Hyp mode initialized successfully\n");
472 return 0;
473out_free_vfp:
474 free_percpu(kvm_host_vfp_state);
475out_free_mappings:
476 free_hyp_pmds();
477out_free_stack_pages:
478 for_each_possible_cpu(cpu)
479 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
480out_err:
481 kvm_err("error initializing Hyp mode: %d\n", err);
482 return err;
483}
484
485/**
486 * Initialize Hyp-mode and memory mappings on all CPUs.
487 */
334int kvm_arch_init(void *opaque) 488int kvm_arch_init(void *opaque)
335{ 489{
490 int err;
491
492 if (!is_hyp_mode_available()) {
493 kvm_err("HYP mode not available\n");
494 return -ENODEV;
495 }
496
497 if (kvm_target_cpu() < 0) {
498 kvm_err("Target CPU not supported!\n");
499 return -ENODEV;
500 }
501
502 err = init_hyp_mode();
503 if (err)
504 goto out_err;
505
336 return 0; 506 return 0;
507out_err:
508 return err;
337} 509}
338 510
339/* NOP: Compiling as a module not supported */ 511/* NOP: Compiling as a module not supported */