diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/uapi/asm/kvm.h | 9 | ||||
-rw-r--r-- | arch/arm/kvm/coproc.c | 164 |
2 files changed, 170 insertions, 3 deletions
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index 53f45f146875..71ae27ec0599 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h | |||
@@ -104,6 +104,15 @@ struct kvm_arch_memory_slot { | |||
104 | #define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) | 104 | #define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) |
105 | #define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4) | 105 | #define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4) |
106 | 106 | ||
107 | /* Some registers need more space to represent values. */ | ||
108 | #define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT) | ||
109 | #define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00 | ||
110 | #define KVM_REG_ARM_DEMUX_ID_SHIFT 8 | ||
111 | #define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT) | ||
112 | #define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF | ||
113 | #define KVM_REG_ARM_DEMUX_VAL_SHIFT 0 | ||
114 | |||
115 | |||
107 | /* KVM_IRQ_LINE irq field index values */ | 116 | /* KVM_IRQ_LINE irq field index values */ |
108 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 | 117 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 |
109 | #define KVM_ARM_IRQ_TYPE_MASK 0xff | 118 | #define KVM_ARM_IRQ_TYPE_MASK 0xff |
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 95a0f5e5c1fc..1827b643af15 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -35,6 +35,12 @@ | |||
35 | * Co-processor emulation | 35 | * Co-processor emulation |
36 | *****************************************************************************/ | 36 | *****************************************************************************/ |
37 | 37 | ||
38 | /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ | ||
39 | static u32 cache_levels; | ||
40 | |||
41 | /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ | ||
42 | #define CSSELR_MAX 12 | ||
43 | |||
38 | int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) | 44 | int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) |
39 | { | 45 | { |
40 | kvm_inject_undefined(vcpu); | 46 | kvm_inject_undefined(vcpu); |
@@ -548,11 +554,113 @@ static int set_invariant_cp15(u64 id, void __user *uaddr) | |||
548 | return 0; | 554 | return 0; |
549 | } | 555 | } |
550 | 556 | ||
557 | static bool is_valid_cache(u32 val) | ||
558 | { | ||
559 | u32 level, ctype; | ||
560 | |||
561 | if (val >= CSSELR_MAX) | ||
562 | return -ENOENT; | ||
563 | |||
564 | /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ | ||
565 | level = (val >> 1); | ||
566 | ctype = (cache_levels >> (level * 3)) & 7; | ||
567 | |||
568 | switch (ctype) { | ||
569 | case 0: /* No cache */ | ||
570 | return false; | ||
571 | case 1: /* Instruction cache only */ | ||
572 | return (val & 1); | ||
573 | case 2: /* Data cache only */ | ||
574 | case 4: /* Unified cache */ | ||
575 | return !(val & 1); | ||
576 | case 3: /* Separate instruction and data caches */ | ||
577 | return true; | ||
578 | default: /* Reserved: we can't know instruction or data. */ | ||
579 | return false; | ||
580 | } | ||
581 | } | ||
582 | |||
583 | /* Which cache CCSIDR represents depends on CSSELR value. */ | ||
584 | static u32 get_ccsidr(u32 csselr) | ||
585 | { | ||
586 | u32 ccsidr; | ||
587 | |||
588 | /* Make sure noone else changes CSSELR during this! */ | ||
589 | local_irq_disable(); | ||
590 | /* Put value into CSSELR */ | ||
591 | asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr)); | ||
592 | isb(); | ||
593 | /* Read result out of CCSIDR */ | ||
594 | asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr)); | ||
595 | local_irq_enable(); | ||
596 | |||
597 | return ccsidr; | ||
598 | } | ||
599 | |||
600 | static int demux_c15_get(u64 id, void __user *uaddr) | ||
601 | { | ||
602 | u32 val; | ||
603 | u32 __user *uval = uaddr; | ||
604 | |||
605 | /* Fail if we have unknown bits set. */ | ||
606 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | ||
607 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | ||
608 | return -ENOENT; | ||
609 | |||
610 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { | ||
611 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: | ||
612 | if (KVM_REG_SIZE(id) != 4) | ||
613 | return -ENOENT; | ||
614 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) | ||
615 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; | ||
616 | if (!is_valid_cache(val)) | ||
617 | return -ENOENT; | ||
618 | |||
619 | return put_user(get_ccsidr(val), uval); | ||
620 | default: | ||
621 | return -ENOENT; | ||
622 | } | ||
623 | } | ||
624 | |||
625 | static int demux_c15_set(u64 id, void __user *uaddr) | ||
626 | { | ||
627 | u32 val, newval; | ||
628 | u32 __user *uval = uaddr; | ||
629 | |||
630 | /* Fail if we have unknown bits set. */ | ||
631 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | ||
632 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | ||
633 | return -ENOENT; | ||
634 | |||
635 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { | ||
636 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: | ||
637 | if (KVM_REG_SIZE(id) != 4) | ||
638 | return -ENOENT; | ||
639 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) | ||
640 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; | ||
641 | if (!is_valid_cache(val)) | ||
642 | return -ENOENT; | ||
643 | |||
644 | if (get_user(newval, uval)) | ||
645 | return -EFAULT; | ||
646 | |||
647 | /* This is also invariant: you can't change it. */ | ||
648 | if (newval != get_ccsidr(val)) | ||
649 | return -EINVAL; | ||
650 | return 0; | ||
651 | default: | ||
652 | return -ENOENT; | ||
653 | } | ||
654 | } | ||
655 | |||
551 | int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | 656 | int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
552 | { | 657 | { |
553 | const struct coproc_reg *r; | 658 | const struct coproc_reg *r; |
554 | void __user *uaddr = (void __user *)(long)reg->addr; | 659 | void __user *uaddr = (void __user *)(long)reg->addr; |
555 | 660 | ||
661 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) | ||
662 | return demux_c15_get(reg->id, uaddr); | ||
663 | |||
556 | r = index_to_coproc_reg(vcpu, reg->id); | 664 | r = index_to_coproc_reg(vcpu, reg->id); |
557 | if (!r) | 665 | if (!r) |
558 | return get_invariant_cp15(reg->id, uaddr); | 666 | return get_invariant_cp15(reg->id, uaddr); |
@@ -566,6 +674,9 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |||
566 | const struct coproc_reg *r; | 674 | const struct coproc_reg *r; |
567 | void __user *uaddr = (void __user *)(long)reg->addr; | 675 | void __user *uaddr = (void __user *)(long)reg->addr; |
568 | 676 | ||
677 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) | ||
678 | return demux_c15_set(reg->id, uaddr); | ||
679 | |||
569 | r = index_to_coproc_reg(vcpu, reg->id); | 680 | r = index_to_coproc_reg(vcpu, reg->id); |
570 | if (!r) | 681 | if (!r) |
571 | return set_invariant_cp15(reg->id, uaddr); | 682 | return set_invariant_cp15(reg->id, uaddr); |
@@ -574,6 +685,33 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |||
574 | return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); | 685 | return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); |
575 | } | 686 | } |
576 | 687 | ||
688 | static unsigned int num_demux_regs(void) | ||
689 | { | ||
690 | unsigned int i, count = 0; | ||
691 | |||
692 | for (i = 0; i < CSSELR_MAX; i++) | ||
693 | if (is_valid_cache(i)) | ||
694 | count++; | ||
695 | |||
696 | return count; | ||
697 | } | ||
698 | |||
699 | static int write_demux_regids(u64 __user *uindices) | ||
700 | { | ||
701 | u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; | ||
702 | unsigned int i; | ||
703 | |||
704 | val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; | ||
705 | for (i = 0; i < CSSELR_MAX; i++) { | ||
706 | if (!is_valid_cache(i)) | ||
707 | continue; | ||
708 | if (put_user(val | i, uindices)) | ||
709 | return -EFAULT; | ||
710 | uindices++; | ||
711 | } | ||
712 | return 0; | ||
713 | } | ||
714 | |||
577 | static u64 cp15_to_index(const struct coproc_reg *reg) | 715 | static u64 cp15_to_index(const struct coproc_reg *reg) |
578 | { | 716 | { |
579 | u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT); | 717 | u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT); |
@@ -649,6 +787,7 @@ static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind) | |||
649 | unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu) | 787 | unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu) |
650 | { | 788 | { |
651 | return ARRAY_SIZE(invariant_cp15) | 789 | return ARRAY_SIZE(invariant_cp15) |
790 | + num_demux_regs() | ||
652 | + walk_cp15(vcpu, (u64 __user *)NULL); | 791 | + walk_cp15(vcpu, (u64 __user *)NULL); |
653 | } | 792 | } |
654 | 793 | ||
@@ -665,9 +804,11 @@ int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | |||
665 | } | 804 | } |
666 | 805 | ||
667 | err = walk_cp15(vcpu, uindices); | 806 | err = walk_cp15(vcpu, uindices); |
668 | if (err > 0) | 807 | if (err < 0) |
669 | err = 0; | 808 | return err; |
670 | return err; | 809 | uindices += err; |
810 | |||
811 | return write_demux_regids(uindices); | ||
671 | } | 812 | } |
672 | 813 | ||
673 | void kvm_coproc_table_init(void) | 814 | void kvm_coproc_table_init(void) |
@@ -681,6 +822,23 @@ void kvm_coproc_table_init(void) | |||
681 | /* We abuse the reset function to overwrite the table itself. */ | 822 | /* We abuse the reset function to overwrite the table itself. */ |
682 | for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) | 823 | for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) |
683 | invariant_cp15[i].reset(NULL, &invariant_cp15[i]); | 824 | invariant_cp15[i].reset(NULL, &invariant_cp15[i]); |
825 | |||
826 | /* | ||
827 | * CLIDR format is awkward, so clean it up. See ARM B4.1.20: | ||
828 | * | ||
829 | * If software reads the Cache Type fields from Ctype1 | ||
830 | * upwards, once it has seen a value of 0b000, no caches | ||
831 | * exist at further-out levels of the hierarchy. So, for | ||
832 | * example, if Ctype3 is the first Cache Type field with a | ||
833 | * value of 0b000, the values of Ctype4 to Ctype7 must be | ||
834 | * ignored. | ||
835 | */ | ||
836 | asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels)); | ||
837 | for (i = 0; i < 7; i++) | ||
838 | if (((cache_levels >> (i*3)) & 7) == 0) | ||
839 | break; | ||
840 | /* Clear all higher bits. */ | ||
841 | cache_levels &= (1 << (i*3))-1; | ||
684 | } | 842 | } |
685 | 843 | ||
686 | /** | 844 | /** |