diff options
author | Marc Zyngier <marc.zyngier@arm.com> | 2013-12-28 05:26:48 -0500 |
---|---|---|
committer | Marc Zyngier <marc.zyngier@arm.com> | 2013-12-28 05:26:48 -0500 |
commit | 60dd133a7b77b6adc2d18ff1a85db5a67434a071 (patch) | |
tree | eb547e3490340335770449ad81d13b9c9f0e5031 | |
parent | 989c6b34f6a9480e397b170cc62237e89bf4fdb9 (diff) | |
parent | fa20f5aea56f271f83e91b9cde00f043a5a14990 (diff) |
Merge tag 'vgic-migrate-for-marc' of git://git.linaro.org/people/christoffer.dall/linux-kvm-arm into kvm-arm64/next
VGIC and timer migration pull
-rw-r--r-- | Documentation/virtual/kvm/api.txt | 7 | ||||
-rw-r--r-- | Documentation/virtual/kvm/devices/arm-vgic.txt | 73 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/arm/include/uapi/asm/kvm.h | 28 | ||||
-rw-r--r-- | arch/arm/kvm/arm.c | 19 | ||||
-rw-r--r-- | arch/arm/kvm/guest.c | 92 | ||||
-rw-r--r-- | arch/arm64/include/uapi/asm/kvm.h | 18 | ||||
-rw-r--r-- | include/kvm/arm_vgic.h | 2 | ||||
-rw-r--r-- | include/linux/irqchip/arm-gic.h | 12 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 1 | ||||
-rw-r--r-- | include/uapi/linux/kvm.h | 1 | ||||
-rw-r--r-- | virt/kvm/arm/arch_timer.c | 34 | ||||
-rw-r--r-- | virt/kvm/arm/vgic.c | 584 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 5 |
14 files changed, 840 insertions, 39 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index a30035dd4c26..867112f1968d 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
@@ -2391,7 +2391,8 @@ struct kvm_reg_list { | |||
2391 | This ioctl returns the guest registers that are supported for the | 2391 | This ioctl returns the guest registers that are supported for the |
2392 | KVM_GET_ONE_REG/KVM_SET_ONE_REG calls. | 2392 | KVM_GET_ONE_REG/KVM_SET_ONE_REG calls. |
2393 | 2393 | ||
2394 | 4.85 KVM_ARM_SET_DEVICE_ADDR | 2394 | |
2395 | 4.85 KVM_ARM_SET_DEVICE_ADDR (deprecated) | ||
2395 | 2396 | ||
2396 | Capability: KVM_CAP_ARM_SET_DEVICE_ADDR | 2397 | Capability: KVM_CAP_ARM_SET_DEVICE_ADDR |
2397 | Architectures: arm, arm64 | 2398 | Architectures: arm, arm64 |
@@ -2429,6 +2430,10 @@ must be called after calling KVM_CREATE_IRQCHIP, but before calling | |||
2429 | KVM_RUN on any of the VCPUs. Calling this ioctl twice for any of the | 2430 | KVM_RUN on any of the VCPUs. Calling this ioctl twice for any of the |
2430 | base addresses will return -EEXIST. | 2431 | base addresses will return -EEXIST. |
2431 | 2432 | ||
2433 | Note, this IOCTL is deprecated and the more flexible SET/GET_DEVICE_ATTR API | ||
2434 | should be used instead. | ||
2435 | |||
2436 | |||
2432 | 4.86 KVM_PPC_RTAS_DEFINE_TOKEN | 2437 | 4.86 KVM_PPC_RTAS_DEFINE_TOKEN |
2433 | 2438 | ||
2434 | Capability: KVM_CAP_PPC_RTAS | 2439 | Capability: KVM_CAP_PPC_RTAS |
diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt new file mode 100644 index 000000000000..7f4e91b1316b --- /dev/null +++ b/Documentation/virtual/kvm/devices/arm-vgic.txt | |||
@@ -0,0 +1,73 @@ | |||
1 | ARM Virtual Generic Interrupt Controller (VGIC) | ||
2 | =============================================== | ||
3 | |||
4 | Device types supported: | ||
5 | KVM_DEV_TYPE_ARM_VGIC_V2 ARM Generic Interrupt Controller v2.0 | ||
6 | |||
7 | Only one VGIC instance may be instantiated through either this API or the | ||
8 | legacy KVM_CREATE_IRQCHIP api. The created VGIC will act as the VM interrupt | ||
9 | controller, requiring emulated user-space devices to inject interrupts to the | ||
10 | VGIC instead of directly to CPUs. | ||
11 | |||
12 | Groups: | ||
13 | KVM_DEV_ARM_VGIC_GRP_ADDR | ||
14 | Attributes: | ||
15 | KVM_VGIC_V2_ADDR_TYPE_DIST (rw, 64-bit) | ||
16 | Base address in the guest physical address space of the GIC distributor | ||
17 | register mappings. | ||
18 | |||
19 | KVM_VGIC_V2_ADDR_TYPE_CPU (rw, 64-bit) | ||
20 | Base address in the guest physical address space of the GIC virtual cpu | ||
21 | interface register mappings. | ||
22 | |||
23 | KVM_DEV_ARM_VGIC_GRP_DIST_REGS | ||
24 | Attributes: | ||
25 | The attr field of kvm_device_attr encodes two values: | ||
26 | bits: | 63 .... 40 | 39 .. 32 | 31 .... 0 | | ||
27 | values: | reserved | cpu id | offset | | ||
28 | |||
29 | All distributor regs are (rw, 32-bit) | ||
30 | |||
31 | The offset is relative to the "Distributor base address" as defined in the | ||
32 | GICv2 specs. Getting or setting such a register has the same effect as | ||
33 | reading or writing the register on the actual hardware from the cpu | ||
34 | specified with cpu id field. Note that most distributor fields are not | ||
35 | banked, but return the same value regardless of the cpu id used to access | ||
36 | the register. | ||
37 | Limitations: | ||
38 | - Priorities are not implemented, and registers are RAZ/WI | ||
39 | Errors: | ||
40 | -ENODEV: Getting or setting this register is not yet supported | ||
41 | -EBUSY: One or more VCPUs are running | ||
42 | |||
43 | KVM_DEV_ARM_VGIC_GRP_CPU_REGS | ||
44 | Attributes: | ||
45 | The attr field of kvm_device_attr encodes two values: | ||
46 | bits: | 63 .... 40 | 39 .. 32 | 31 .... 0 | | ||
47 | values: | reserved | cpu id | offset | | ||
48 | |||
49 | All CPU interface regs are (rw, 32-bit) | ||
50 | |||
51 | The offset specifies the offset from the "CPU interface base address" as | ||
52 | defined in the GICv2 specs. Getting or setting such a register has the | ||
53 | same effect as reading or writing the register on the actual hardware. | ||
54 | |||
55 | The Active Priorities Registers APRn are implementation defined, so we set a | ||
56 | fixed format for our implementation that fits with the model of a "GICv2 | ||
57 | implementation without the security extensions" which we present to the | ||
58 | guest. This interface always exposes four register APR[0-3] describing the | ||
59 | maximum possible 128 preemption levels. The semantics of the register | ||
60 | indicate if any interrupts in a given preemption level are in the active | ||
61 | state by setting the corresponding bit. | ||
62 | |||
63 | Thus, preemption level X has one or more active interrupts if and only if: | ||
64 | |||
65 | APRn[X mod 32] == 0b1, where n = X / 32 | ||
66 | |||
67 | Bits for undefined preemption levels are RAZ/WI. | ||
68 | |||
69 | Limitations: | ||
70 | - Priorities are not implemented, and registers are RAZ/WI | ||
71 | Errors: | ||
72 | -ENODEV: Getting or setting this register is not yet supported | ||
73 | -EBUSY: One or more VCPUs are running | ||
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 8a6f6db14ee4..098f7dd6d564 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -225,4 +225,7 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext) | |||
225 | int kvm_perf_init(void); | 225 | int kvm_perf_init(void); |
226 | int kvm_perf_teardown(void); | 226 | int kvm_perf_teardown(void); |
227 | 227 | ||
228 | u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid); | ||
229 | int kvm_arm_timer_set_reg(struct kvm_vcpu *, u64 regid, u64 value); | ||
230 | |||
228 | #endif /* __ARM_KVM_HOST_H__ */ | 231 | #endif /* __ARM_KVM_HOST_H__ */ |
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index c498b60c0505..ef0c8785ba16 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h | |||
@@ -119,6 +119,26 @@ struct kvm_arch_memory_slot { | |||
119 | #define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 | 119 | #define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 |
120 | #define KVM_REG_ARM_32_CRN_SHIFT 11 | 120 | #define KVM_REG_ARM_32_CRN_SHIFT 11 |
121 | 121 | ||
122 | #define ARM_CP15_REG_SHIFT_MASK(x,n) \ | ||
123 | (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK) | ||
124 | |||
125 | #define __ARM_CP15_REG(op1,crn,crm,op2) \ | ||
126 | (KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | \ | ||
127 | ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \ | ||
128 | ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | \ | ||
129 | ARM_CP15_REG_SHIFT_MASK(crm, CRM) | \ | ||
130 | ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2)) | ||
131 | |||
132 | #define ARM_CP15_REG32(...) (__ARM_CP15_REG(__VA_ARGS__) | KVM_REG_SIZE_U32) | ||
133 | |||
134 | #define __ARM_CP15_REG64(op1,crm) \ | ||
135 | (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64) | ||
136 | #define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) | ||
137 | |||
138 | #define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1) | ||
139 | #define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) | ||
140 | #define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) | ||
141 | |||
122 | /* Normal registers are mapped as coprocessor 16. */ | 142 | /* Normal registers are mapped as coprocessor 16. */ |
123 | #define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) | 143 | #define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) |
124 | #define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4) | 144 | #define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4) |
@@ -143,6 +163,14 @@ struct kvm_arch_memory_slot { | |||
143 | #define KVM_REG_ARM_VFP_FPINST 0x1009 | 163 | #define KVM_REG_ARM_VFP_FPINST 0x1009 |
144 | #define KVM_REG_ARM_VFP_FPINST2 0x100A | 164 | #define KVM_REG_ARM_VFP_FPINST2 0x100A |
145 | 165 | ||
166 | /* Device Control API: ARM VGIC */ | ||
167 | #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 | ||
168 | #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 | ||
169 | #define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2 | ||
170 | #define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32 | ||
171 | #define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) | ||
172 | #define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 | ||
173 | #define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) | ||
146 | 174 | ||
147 | /* KVM_IRQ_LINE irq field index values */ | 175 | /* KVM_IRQ_LINE irq field index values */ |
148 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 | 176 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 2a700e00528d..b92ff6d3e34b 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -137,6 +137,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
137 | if (ret) | 137 | if (ret) |
138 | goto out_free_stage2_pgd; | 138 | goto out_free_stage2_pgd; |
139 | 139 | ||
140 | kvm_timer_init(kvm); | ||
141 | |||
140 | /* Mark the initial VMID generation invalid */ | 142 | /* Mark the initial VMID generation invalid */ |
141 | kvm->arch.vmid_gen = 0; | 143 | kvm->arch.vmid_gen = 0; |
142 | 144 | ||
@@ -188,6 +190,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
188 | case KVM_CAP_IRQCHIP: | 190 | case KVM_CAP_IRQCHIP: |
189 | r = vgic_present; | 191 | r = vgic_present; |
190 | break; | 192 | break; |
193 | case KVM_CAP_DEVICE_CTRL: | ||
191 | case KVM_CAP_USER_MEMORY: | 194 | case KVM_CAP_USER_MEMORY: |
192 | case KVM_CAP_SYNC_MMU: | 195 | case KVM_CAP_SYNC_MMU: |
193 | case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: | 196 | case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: |
@@ -339,6 +342,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
339 | 342 | ||
340 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 343 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
341 | { | 344 | { |
345 | /* | ||
346 | * The arch-generic KVM code expects the cpu field of a vcpu to be -1 | ||
347 | * if the vcpu is no longer assigned to a cpu. This is used for the | ||
348 | * optimized make_all_cpus_request path. | ||
349 | */ | ||
350 | vcpu->cpu = -1; | ||
351 | |||
342 | kvm_arm_set_running_vcpu(NULL); | 352 | kvm_arm_set_running_vcpu(NULL); |
343 | } | 353 | } |
344 | 354 | ||
@@ -462,6 +472,8 @@ static void update_vttbr(struct kvm *kvm) | |||
462 | 472 | ||
463 | static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) | 473 | static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) |
464 | { | 474 | { |
475 | int ret; | ||
476 | |||
465 | if (likely(vcpu->arch.has_run_once)) | 477 | if (likely(vcpu->arch.has_run_once)) |
466 | return 0; | 478 | return 0; |
467 | 479 | ||
@@ -471,9 +483,8 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) | |||
471 | * Initialize the VGIC before running a vcpu the first time on | 483 | * Initialize the VGIC before running a vcpu the first time on |
472 | * this VM. | 484 | * this VM. |
473 | */ | 485 | */ |
474 | if (irqchip_in_kernel(vcpu->kvm) && | 486 | if (unlikely(!vgic_initialized(vcpu->kvm))) { |
475 | unlikely(!vgic_initialized(vcpu->kvm))) { | 487 | ret = kvm_vgic_init(vcpu->kvm); |
476 | int ret = kvm_vgic_init(vcpu->kvm); | ||
477 | if (ret) | 488 | if (ret) |
478 | return ret; | 489 | return ret; |
479 | } | 490 | } |
@@ -772,7 +783,7 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, | |||
772 | case KVM_ARM_DEVICE_VGIC_V2: | 783 | case KVM_ARM_DEVICE_VGIC_V2: |
773 | if (!vgic_present) | 784 | if (!vgic_present) |
774 | return -ENXIO; | 785 | return -ENXIO; |
775 | return kvm_vgic_set_addr(kvm, type, dev_addr->addr); | 786 | return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); |
776 | default: | 787 | default: |
777 | return -ENODEV; | 788 | return -ENODEV; |
778 | } | 789 | } |
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 20f8d97904af..2786eae10c0d 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c | |||
@@ -109,6 +109,83 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
109 | return -EINVAL; | 109 | return -EINVAL; |
110 | } | 110 | } |
111 | 111 | ||
112 | #ifndef CONFIG_KVM_ARM_TIMER | ||
113 | |||
114 | #define NUM_TIMER_REGS 0 | ||
115 | |||
116 | static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | ||
117 | { | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | static bool is_timer_reg(u64 index) | ||
122 | { | ||
123 | return false; | ||
124 | } | ||
125 | |||
126 | int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) | ||
127 | { | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) | ||
132 | { | ||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | #else | ||
137 | |||
138 | #define NUM_TIMER_REGS 3 | ||
139 | |||
140 | static bool is_timer_reg(u64 index) | ||
141 | { | ||
142 | switch (index) { | ||
143 | case KVM_REG_ARM_TIMER_CTL: | ||
144 | case KVM_REG_ARM_TIMER_CNT: | ||
145 | case KVM_REG_ARM_TIMER_CVAL: | ||
146 | return true; | ||
147 | } | ||
148 | return false; | ||
149 | } | ||
150 | |||
151 | static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | ||
152 | { | ||
153 | if (put_user(KVM_REG_ARM_TIMER_CTL, uindices)) | ||
154 | return -EFAULT; | ||
155 | uindices++; | ||
156 | if (put_user(KVM_REG_ARM_TIMER_CNT, uindices)) | ||
157 | return -EFAULT; | ||
158 | uindices++; | ||
159 | if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices)) | ||
160 | return -EFAULT; | ||
161 | |||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | #endif | ||
166 | |||
167 | static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
168 | { | ||
169 | void __user *uaddr = (void __user *)(long)reg->addr; | ||
170 | u64 val; | ||
171 | int ret; | ||
172 | |||
173 | ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); | ||
174 | if (ret != 0) | ||
175 | return ret; | ||
176 | |||
177 | return kvm_arm_timer_set_reg(vcpu, reg->id, val); | ||
178 | } | ||
179 | |||
180 | static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
181 | { | ||
182 | void __user *uaddr = (void __user *)(long)reg->addr; | ||
183 | u64 val; | ||
184 | |||
185 | val = kvm_arm_timer_get_reg(vcpu, reg->id); | ||
186 | return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); | ||
187 | } | ||
188 | |||
112 | static unsigned long num_core_regs(void) | 189 | static unsigned long num_core_regs(void) |
113 | { | 190 | { |
114 | return sizeof(struct kvm_regs) / sizeof(u32); | 191 | return sizeof(struct kvm_regs) / sizeof(u32); |
@@ -121,7 +198,8 @@ static unsigned long num_core_regs(void) | |||
121 | */ | 198 | */ |
122 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) | 199 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) |
123 | { | 200 | { |
124 | return num_core_regs() + kvm_arm_num_coproc_regs(vcpu); | 201 | return num_core_regs() + kvm_arm_num_coproc_regs(vcpu) |
202 | + NUM_TIMER_REGS; | ||
125 | } | 203 | } |
126 | 204 | ||
127 | /** | 205 | /** |
@@ -133,6 +211,7 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | |||
133 | { | 211 | { |
134 | unsigned int i; | 212 | unsigned int i; |
135 | const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE; | 213 | const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE; |
214 | int ret; | ||
136 | 215 | ||
137 | for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) { | 216 | for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) { |
138 | if (put_user(core_reg | i, uindices)) | 217 | if (put_user(core_reg | i, uindices)) |
@@ -140,6 +219,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | |||
140 | uindices++; | 219 | uindices++; |
141 | } | 220 | } |
142 | 221 | ||
222 | ret = copy_timer_indices(vcpu, uindices); | ||
223 | if (ret) | ||
224 | return ret; | ||
225 | uindices += NUM_TIMER_REGS; | ||
226 | |||
143 | return kvm_arm_copy_coproc_indices(vcpu, uindices); | 227 | return kvm_arm_copy_coproc_indices(vcpu, uindices); |
144 | } | 228 | } |
145 | 229 | ||
@@ -153,6 +237,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |||
153 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) | 237 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) |
154 | return get_core_reg(vcpu, reg); | 238 | return get_core_reg(vcpu, reg); |
155 | 239 | ||
240 | if (is_timer_reg(reg->id)) | ||
241 | return get_timer_reg(vcpu, reg); | ||
242 | |||
156 | return kvm_arm_coproc_get_reg(vcpu, reg); | 243 | return kvm_arm_coproc_get_reg(vcpu, reg); |
157 | } | 244 | } |
158 | 245 | ||
@@ -166,6 +253,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |||
166 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) | 253 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) |
167 | return set_core_reg(vcpu, reg); | 254 | return set_core_reg(vcpu, reg); |
168 | 255 | ||
256 | if (is_timer_reg(reg->id)) | ||
257 | return set_timer_reg(vcpu, reg); | ||
258 | |||
169 | return kvm_arm_coproc_set_reg(vcpu, reg); | 259 | return kvm_arm_coproc_set_reg(vcpu, reg); |
170 | } | 260 | } |
171 | 261 | ||
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 5031f4263937..7c25ca8b02b3 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h | |||
@@ -129,6 +129,24 @@ struct kvm_arch_memory_slot { | |||
129 | #define KVM_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007 | 129 | #define KVM_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007 |
130 | #define KVM_REG_ARM64_SYSREG_OP2_SHIFT 0 | 130 | #define KVM_REG_ARM64_SYSREG_OP2_SHIFT 0 |
131 | 131 | ||
132 | #define ARM64_SYS_REG_SHIFT_MASK(x,n) \ | ||
133 | (((x) << KVM_REG_ARM64_SYSREG_ ## n ## _SHIFT) & \ | ||
134 | KVM_REG_ARM64_SYSREG_ ## n ## _MASK) | ||
135 | |||
136 | #define __ARM64_SYS_REG(op0,op1,crn,crm,op2) \ | ||
137 | (KVM_REG_ARM64 | KVM_REG_ARM64_SYSREG | \ | ||
138 | ARM64_SYS_REG_SHIFT_MASK(op0, OP0) | \ | ||
139 | ARM64_SYS_REG_SHIFT_MASK(op1, OP1) | \ | ||
140 | ARM64_SYS_REG_SHIFT_MASK(crn, CRN) | \ | ||
141 | ARM64_SYS_REG_SHIFT_MASK(crm, CRM) | \ | ||
142 | ARM64_SYS_REG_SHIFT_MASK(op2, OP2)) | ||
143 | |||
144 | #define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64) | ||
145 | |||
146 | #define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1) | ||
147 | #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) | ||
148 | #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) | ||
149 | |||
132 | /* KVM_IRQ_LINE irq field index values */ | 150 | /* KVM_IRQ_LINE irq field index values */ |
133 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 | 151 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 |
134 | #define KVM_ARM_IRQ_TYPE_MASK 0xff | 152 | #define KVM_ARM_IRQ_TYPE_MASK 0xff |
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 7e2d15837b02..be85127bfed3 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
@@ -144,7 +144,7 @@ struct kvm_run; | |||
144 | struct kvm_exit_mmio; | 144 | struct kvm_exit_mmio; |
145 | 145 | ||
146 | #ifdef CONFIG_KVM_ARM_VGIC | 146 | #ifdef CONFIG_KVM_ARM_VGIC |
147 | int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr); | 147 | int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); |
148 | int kvm_vgic_hyp_init(void); | 148 | int kvm_vgic_hyp_init(void); |
149 | int kvm_vgic_init(struct kvm *kvm); | 149 | int kvm_vgic_init(struct kvm *kvm); |
150 | int kvm_vgic_create(struct kvm *kvm); | 150 | int kvm_vgic_create(struct kvm *kvm); |
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index cac496b1e279..0ceb389dba6c 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h | |||
@@ -17,6 +17,9 @@ | |||
17 | #define GIC_CPU_EOI 0x10 | 17 | #define GIC_CPU_EOI 0x10 |
18 | #define GIC_CPU_RUNNINGPRI 0x14 | 18 | #define GIC_CPU_RUNNINGPRI 0x14 |
19 | #define GIC_CPU_HIGHPRI 0x18 | 19 | #define GIC_CPU_HIGHPRI 0x18 |
20 | #define GIC_CPU_ALIAS_BINPOINT 0x1c | ||
21 | #define GIC_CPU_ACTIVEPRIO 0xd0 | ||
22 | #define GIC_CPU_IDENT 0xfc | ||
20 | 23 | ||
21 | #define GIC_DIST_CTRL 0x000 | 24 | #define GIC_DIST_CTRL 0x000 |
22 | #define GIC_DIST_CTR 0x004 | 25 | #define GIC_DIST_CTR 0x004 |
@@ -56,6 +59,15 @@ | |||
56 | #define GICH_LR_ACTIVE_BIT (1 << 29) | 59 | #define GICH_LR_ACTIVE_BIT (1 << 29) |
57 | #define GICH_LR_EOI (1 << 19) | 60 | #define GICH_LR_EOI (1 << 19) |
58 | 61 | ||
62 | #define GICH_VMCR_CTRL_SHIFT 0 | ||
63 | #define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT) | ||
64 | #define GICH_VMCR_PRIMASK_SHIFT 27 | ||
65 | #define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT) | ||
66 | #define GICH_VMCR_BINPOINT_SHIFT 21 | ||
67 | #define GICH_VMCR_BINPOINT_MASK (0x7 << GICH_VMCR_BINPOINT_SHIFT) | ||
68 | #define GICH_VMCR_ALIAS_BINPOINT_SHIFT 18 | ||
69 | #define GICH_VMCR_ALIAS_BINPOINT_MASK (0x7 << GICH_VMCR_ALIAS_BINPOINT_SHIFT) | ||
70 | |||
59 | #define GICH_MISR_EOI (1 << 0) | 71 | #define GICH_MISR_EOI (1 << 0) |
60 | #define GICH_MISR_U (1 << 1) | 72 | #define GICH_MISR_U (1 << 1) |
61 | 73 | ||
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 4ecf10775c4f..1f46f66f60ab 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -1075,6 +1075,7 @@ struct kvm_device *kvm_device_from_filp(struct file *filp); | |||
1075 | extern struct kvm_device_ops kvm_mpic_ops; | 1075 | extern struct kvm_device_ops kvm_mpic_ops; |
1076 | extern struct kvm_device_ops kvm_xics_ops; | 1076 | extern struct kvm_device_ops kvm_xics_ops; |
1077 | extern struct kvm_device_ops kvm_vfio_ops; | 1077 | extern struct kvm_device_ops kvm_vfio_ops; |
1078 | extern struct kvm_device_ops kvm_arm_vgic_v2_ops; | ||
1078 | 1079 | ||
1079 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT | 1080 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
1080 | 1081 | ||
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 902f12461873..b647c2917391 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h | |||
@@ -853,6 +853,7 @@ struct kvm_device_attr { | |||
853 | #define KVM_DEV_VFIO_GROUP 1 | 853 | #define KVM_DEV_VFIO_GROUP 1 |
854 | #define KVM_DEV_VFIO_GROUP_ADD 1 | 854 | #define KVM_DEV_VFIO_GROUP_ADD 1 |
855 | #define KVM_DEV_VFIO_GROUP_DEL 2 | 855 | #define KVM_DEV_VFIO_GROUP_DEL 2 |
856 | #define KVM_DEV_TYPE_ARM_VGIC_V2 5 | ||
856 | 857 | ||
857 | /* | 858 | /* |
858 | * ioctls for VM fds | 859 | * ioctls for VM fds |
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index c2e1ef4604e8..5081e809821f 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c | |||
@@ -182,6 +182,40 @@ static void kvm_timer_init_interrupt(void *info) | |||
182 | enable_percpu_irq(host_vtimer_irq, 0); | 182 | enable_percpu_irq(host_vtimer_irq, 0); |
183 | } | 183 | } |
184 | 184 | ||
185 | int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) | ||
186 | { | ||
187 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
188 | |||
189 | switch (regid) { | ||
190 | case KVM_REG_ARM_TIMER_CTL: | ||
191 | timer->cntv_ctl = value; | ||
192 | break; | ||
193 | case KVM_REG_ARM_TIMER_CNT: | ||
194 | vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value; | ||
195 | break; | ||
196 | case KVM_REG_ARM_TIMER_CVAL: | ||
197 | timer->cntv_cval = value; | ||
198 | break; | ||
199 | default: | ||
200 | return -1; | ||
201 | } | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) | ||
206 | { | ||
207 | struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; | ||
208 | |||
209 | switch (regid) { | ||
210 | case KVM_REG_ARM_TIMER_CTL: | ||
211 | return timer->cntv_ctl; | ||
212 | case KVM_REG_ARM_TIMER_CNT: | ||
213 | return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff; | ||
214 | case KVM_REG_ARM_TIMER_CVAL: | ||
215 | return timer->cntv_cval; | ||
216 | } | ||
217 | return (u64)-1; | ||
218 | } | ||
185 | 219 | ||
186 | static int kvm_timer_cpu_notify(struct notifier_block *self, | 220 | static int kvm_timer_cpu_notify(struct notifier_block *self, |
187 | unsigned long action, void *cpu) | 221 | unsigned long action, void *cpu) |
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 685fc72fc751..be456ce264d0 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -71,6 +71,10 @@ | |||
71 | #define VGIC_ADDR_UNDEF (-1) | 71 | #define VGIC_ADDR_UNDEF (-1) |
72 | #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) | 72 | #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) |
73 | 73 | ||
74 | #define PRODUCT_ID_KVM 0x4b /* ASCII code K */ | ||
75 | #define IMPLEMENTER_ARM 0x43b | ||
76 | #define GICC_ARCH_VERSION_V2 0x2 | ||
77 | |||
74 | /* Physical address of vgic virtual cpu interface */ | 78 | /* Physical address of vgic virtual cpu interface */ |
75 | static phys_addr_t vgic_vcpu_base; | 79 | static phys_addr_t vgic_vcpu_base; |
76 | 80 | ||
@@ -312,7 +316,7 @@ static bool handle_mmio_misc(struct kvm_vcpu *vcpu, | |||
312 | u32 word_offset = offset & 3; | 316 | u32 word_offset = offset & 3; |
313 | 317 | ||
314 | switch (offset & ~3) { | 318 | switch (offset & ~3) { |
315 | case 0: /* CTLR */ | 319 | case 0: /* GICD_CTLR */ |
316 | reg = vcpu->kvm->arch.vgic.enabled; | 320 | reg = vcpu->kvm->arch.vgic.enabled; |
317 | vgic_reg_access(mmio, ®, word_offset, | 321 | vgic_reg_access(mmio, ®, word_offset, |
318 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); | 322 | ACCESS_READ_VALUE | ACCESS_WRITE_VALUE); |
@@ -323,15 +327,15 @@ static bool handle_mmio_misc(struct kvm_vcpu *vcpu, | |||
323 | } | 327 | } |
324 | break; | 328 | break; |
325 | 329 | ||
326 | case 4: /* TYPER */ | 330 | case 4: /* GICD_TYPER */ |
327 | reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; | 331 | reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5; |
328 | reg |= (VGIC_NR_IRQS >> 5) - 1; | 332 | reg |= (VGIC_NR_IRQS >> 5) - 1; |
329 | vgic_reg_access(mmio, ®, word_offset, | 333 | vgic_reg_access(mmio, ®, word_offset, |
330 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | 334 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); |
331 | break; | 335 | break; |
332 | 336 | ||
333 | case 8: /* IIDR */ | 337 | case 8: /* GICD_IIDR */ |
334 | reg = 0x4B00043B; | 338 | reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0); |
335 | vgic_reg_access(mmio, ®, word_offset, | 339 | vgic_reg_access(mmio, ®, word_offset, |
336 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); | 340 | ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED); |
337 | break; | 341 | break; |
@@ -589,6 +593,156 @@ static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu, | |||
589 | return false; | 593 | return false; |
590 | } | 594 | } |
591 | 595 | ||
596 | #define LR_CPUID(lr) \ | ||
597 | (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT) | ||
598 | #define LR_IRQID(lr) \ | ||
599 | ((lr) & GICH_LR_VIRTUALID) | ||
600 | |||
601 | static void vgic_retire_lr(int lr_nr, int irq, struct vgic_cpu *vgic_cpu) | ||
602 | { | ||
603 | clear_bit(lr_nr, vgic_cpu->lr_used); | ||
604 | vgic_cpu->vgic_lr[lr_nr] &= ~GICH_LR_STATE; | ||
605 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; | ||
606 | } | ||
607 | |||
608 | /** | ||
609 | * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor | ||
610 | * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs | ||
611 | * | ||
612 | * Move any pending IRQs that have already been assigned to LRs back to the | ||
613 | * emulated distributor state so that the complete emulated state can be read | ||
614 | * from the main emulation structures without investigating the LRs. | ||
615 | * | ||
616 | * Note that IRQs in the active state in the LRs get their pending state moved | ||
617 | * to the distributor but the active state stays in the LRs, because we don't | ||
618 | * track the active state on the distributor side. | ||
619 | */ | ||
620 | static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) | ||
621 | { | ||
622 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
623 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
624 | int vcpu_id = vcpu->vcpu_id; | ||
625 | int i, irq, source_cpu; | ||
626 | u32 *lr; | ||
627 | |||
628 | for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) { | ||
629 | lr = &vgic_cpu->vgic_lr[i]; | ||
630 | irq = LR_IRQID(*lr); | ||
631 | source_cpu = LR_CPUID(*lr); | ||
632 | |||
633 | /* | ||
634 | * There are three options for the state bits: | ||
635 | * | ||
636 | * 01: pending | ||
637 | * 10: active | ||
638 | * 11: pending and active | ||
639 | * | ||
640 | * If the LR holds only an active interrupt (not pending) then | ||
641 | * just leave it alone. | ||
642 | */ | ||
643 | if ((*lr & GICH_LR_STATE) == GICH_LR_ACTIVE_BIT) | ||
644 | continue; | ||
645 | |||
646 | /* | ||
647 | * Reestablish the pending state on the distributor and the | ||
648 | * CPU interface. It may have already been pending, but that | ||
649 | * is fine, then we are only setting a few bits that were | ||
650 | * already set. | ||
651 | */ | ||
652 | vgic_dist_irq_set(vcpu, irq); | ||
653 | if (irq < VGIC_NR_SGIS) | ||
654 | dist->irq_sgi_sources[vcpu_id][irq] |= 1 << source_cpu; | ||
655 | *lr &= ~GICH_LR_PENDING_BIT; | ||
656 | |||
657 | /* | ||
658 | * If there's no state left on the LR (it could still be | ||
659 | * active), then the LR does not hold any useful info and can | ||
660 | * be marked as free for other use. | ||
661 | */ | ||
662 | if (!(*lr & GICH_LR_STATE)) | ||
663 | vgic_retire_lr(i, irq, vgic_cpu); | ||
664 | |||
665 | /* Finally update the VGIC state. */ | ||
666 | vgic_update_state(vcpu->kvm); | ||
667 | } | ||
668 | } | ||
669 | |||
670 | /* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */ | ||
671 | static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, | ||
672 | struct kvm_exit_mmio *mmio, | ||
673 | phys_addr_t offset) | ||
674 | { | ||
675 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
676 | int sgi; | ||
677 | int min_sgi = (offset & ~0x3) * 4; | ||
678 | int max_sgi = min_sgi + 3; | ||
679 | int vcpu_id = vcpu->vcpu_id; | ||
680 | u32 reg = 0; | ||
681 | |||
682 | /* Copy source SGIs from distributor side */ | ||
683 | for (sgi = min_sgi; sgi <= max_sgi; sgi++) { | ||
684 | int shift = 8 * (sgi - min_sgi); | ||
685 | reg |= (u32)dist->irq_sgi_sources[vcpu_id][sgi] << shift; | ||
686 | } | ||
687 | |||
688 | mmio_data_write(mmio, ~0, reg); | ||
689 | return false; | ||
690 | } | ||
691 | |||
692 | static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu, | ||
693 | struct kvm_exit_mmio *mmio, | ||
694 | phys_addr_t offset, bool set) | ||
695 | { | ||
696 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
697 | int sgi; | ||
698 | int min_sgi = (offset & ~0x3) * 4; | ||
699 | int max_sgi = min_sgi + 3; | ||
700 | int vcpu_id = vcpu->vcpu_id; | ||
701 | u32 reg; | ||
702 | bool updated = false; | ||
703 | |||
704 | reg = mmio_data_read(mmio, ~0); | ||
705 | |||
706 | /* Clear pending SGIs on the distributor */ | ||
707 | for (sgi = min_sgi; sgi <= max_sgi; sgi++) { | ||
708 | u8 mask = reg >> (8 * (sgi - min_sgi)); | ||
709 | if (set) { | ||
710 | if ((dist->irq_sgi_sources[vcpu_id][sgi] & mask) != mask) | ||
711 | updated = true; | ||
712 | dist->irq_sgi_sources[vcpu_id][sgi] |= mask; | ||
713 | } else { | ||
714 | if (dist->irq_sgi_sources[vcpu_id][sgi] & mask) | ||
715 | updated = true; | ||
716 | dist->irq_sgi_sources[vcpu_id][sgi] &= ~mask; | ||
717 | } | ||
718 | } | ||
719 | |||
720 | if (updated) | ||
721 | vgic_update_state(vcpu->kvm); | ||
722 | |||
723 | return updated; | ||
724 | } | ||
725 | |||
726 | static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu, | ||
727 | struct kvm_exit_mmio *mmio, | ||
728 | phys_addr_t offset) | ||
729 | { | ||
730 | if (!mmio->is_write) | ||
731 | return read_set_clear_sgi_pend_reg(vcpu, mmio, offset); | ||
732 | else | ||
733 | return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true); | ||
734 | } | ||
735 | |||
736 | static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu, | ||
737 | struct kvm_exit_mmio *mmio, | ||
738 | phys_addr_t offset) | ||
739 | { | ||
740 | if (!mmio->is_write) | ||
741 | return read_set_clear_sgi_pend_reg(vcpu, mmio, offset); | ||
742 | else | ||
743 | return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false); | ||
744 | } | ||
745 | |||
592 | /* | 746 | /* |
593 | * I would have liked to use the kvm_bus_io_*() API instead, but it | 747 | * I would have liked to use the kvm_bus_io_*() API instead, but it |
594 | * cannot cope with banked registers (only the VM pointer is passed | 748 | * cannot cope with banked registers (only the VM pointer is passed |
@@ -602,7 +756,7 @@ struct mmio_range { | |||
602 | phys_addr_t offset); | 756 | phys_addr_t offset); |
603 | }; | 757 | }; |
604 | 758 | ||
605 | static const struct mmio_range vgic_ranges[] = { | 759 | static const struct mmio_range vgic_dist_ranges[] = { |
606 | { | 760 | { |
607 | .base = GIC_DIST_CTRL, | 761 | .base = GIC_DIST_CTRL, |
608 | .len = 12, | 762 | .len = 12, |
@@ -663,20 +817,29 @@ static const struct mmio_range vgic_ranges[] = { | |||
663 | .len = 4, | 817 | .len = 4, |
664 | .handle_mmio = handle_mmio_sgi_reg, | 818 | .handle_mmio = handle_mmio_sgi_reg, |
665 | }, | 819 | }, |
820 | { | ||
821 | .base = GIC_DIST_SGI_PENDING_CLEAR, | ||
822 | .len = VGIC_NR_SGIS, | ||
823 | .handle_mmio = handle_mmio_sgi_clear, | ||
824 | }, | ||
825 | { | ||
826 | .base = GIC_DIST_SGI_PENDING_SET, | ||
827 | .len = VGIC_NR_SGIS, | ||
828 | .handle_mmio = handle_mmio_sgi_set, | ||
829 | }, | ||
666 | {} | 830 | {} |
667 | }; | 831 | }; |
668 | 832 | ||
669 | static const | 833 | static const |
670 | struct mmio_range *find_matching_range(const struct mmio_range *ranges, | 834 | struct mmio_range *find_matching_range(const struct mmio_range *ranges, |
671 | struct kvm_exit_mmio *mmio, | 835 | struct kvm_exit_mmio *mmio, |
672 | phys_addr_t base) | 836 | phys_addr_t offset) |
673 | { | 837 | { |
674 | const struct mmio_range *r = ranges; | 838 | const struct mmio_range *r = ranges; |
675 | phys_addr_t addr = mmio->phys_addr - base; | ||
676 | 839 | ||
677 | while (r->len) { | 840 | while (r->len) { |
678 | if (addr >= r->base && | 841 | if (offset >= r->base && |
679 | (addr + mmio->len) <= (r->base + r->len)) | 842 | (offset + mmio->len) <= (r->base + r->len)) |
680 | return r; | 843 | return r; |
681 | r++; | 844 | r++; |
682 | } | 845 | } |
@@ -713,7 +876,8 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
713 | return true; | 876 | return true; |
714 | } | 877 | } |
715 | 878 | ||
716 | range = find_matching_range(vgic_ranges, mmio, base); | 879 | offset = mmio->phys_addr - base; |
880 | range = find_matching_range(vgic_dist_ranges, mmio, offset); | ||
717 | if (unlikely(!range || !range->handle_mmio)) { | 881 | if (unlikely(!range || !range->handle_mmio)) { |
718 | pr_warn("Unhandled access %d %08llx %d\n", | 882 | pr_warn("Unhandled access %d %08llx %d\n", |
719 | mmio->is_write, mmio->phys_addr, mmio->len); | 883 | mmio->is_write, mmio->phys_addr, mmio->len); |
@@ -824,8 +988,6 @@ static void vgic_update_state(struct kvm *kvm) | |||
824 | } | 988 | } |
825 | } | 989 | } |
826 | 990 | ||
827 | #define LR_CPUID(lr) \ | ||
828 | (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT) | ||
829 | #define MK_LR_PEND(src, irq) \ | 991 | #define MK_LR_PEND(src, irq) \ |
830 | (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq)) | 992 | (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq)) |
831 | 993 | ||
@@ -847,9 +1009,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) | |||
847 | int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; | 1009 | int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID; |
848 | 1010 | ||
849 | if (!vgic_irq_is_enabled(vcpu, irq)) { | 1011 | if (!vgic_irq_is_enabled(vcpu, irq)) { |
850 | vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY; | 1012 | vgic_retire_lr(lr, irq, vgic_cpu); |
851 | clear_bit(lr, vgic_cpu->lr_used); | ||
852 | vgic_cpu->vgic_lr[lr] &= ~GICH_LR_STATE; | ||
853 | if (vgic_irq_is_active(vcpu, irq)) | 1013 | if (vgic_irq_is_active(vcpu, irq)) |
854 | vgic_irq_clear_active(vcpu, irq); | 1014 | vgic_irq_clear_active(vcpu, irq); |
855 | } | 1015 | } |
@@ -1243,15 +1403,19 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data) | |||
1243 | return IRQ_HANDLED; | 1403 | return IRQ_HANDLED; |
1244 | } | 1404 | } |
1245 | 1405 | ||
1406 | /** | ||
1407 | * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state | ||
1408 | * @vcpu: pointer to the vcpu struct | ||
1409 | * | ||
1410 | * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to | ||
1411 | * this vcpu and enable the VGIC for this VCPU | ||
1412 | */ | ||
1246 | int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | 1413 | int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) |
1247 | { | 1414 | { |
1248 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 1415 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
1249 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 1416 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
1250 | int i; | 1417 | int i; |
1251 | 1418 | ||
1252 | if (!irqchip_in_kernel(vcpu->kvm)) | ||
1253 | return 0; | ||
1254 | |||
1255 | if (vcpu->vcpu_id >= VGIC_MAX_CPUS) | 1419 | if (vcpu->vcpu_id >= VGIC_MAX_CPUS) |
1256 | return -EBUSY; | 1420 | return -EBUSY; |
1257 | 1421 | ||
@@ -1383,10 +1547,22 @@ out: | |||
1383 | return ret; | 1547 | return ret; |
1384 | } | 1548 | } |
1385 | 1549 | ||
1550 | /** | ||
1551 | * kvm_vgic_init - Initialize global VGIC state before running any VCPUs | ||
1552 | * @kvm: pointer to the kvm struct | ||
1553 | * | ||
1554 | * Map the virtual CPU interface into the VM before running any VCPUs. We | ||
1555 | * can't do this at creation time, because user space must first set the | ||
1556 | * virtual CPU interface address in the guest physical address space. Also | ||
1557 | * initialize the ITARGETSRn regs to 0 on the emulated distributor. | ||
1558 | */ | ||
1386 | int kvm_vgic_init(struct kvm *kvm) | 1559 | int kvm_vgic_init(struct kvm *kvm) |
1387 | { | 1560 | { |
1388 | int ret = 0, i; | 1561 | int ret = 0, i; |
1389 | 1562 | ||
1563 | if (!irqchip_in_kernel(kvm)) | ||
1564 | return 0; | ||
1565 | |||
1390 | mutex_lock(&kvm->lock); | 1566 | mutex_lock(&kvm->lock); |
1391 | 1567 | ||
1392 | if (vgic_initialized(kvm)) | 1568 | if (vgic_initialized(kvm)) |
@@ -1409,7 +1585,6 @@ int kvm_vgic_init(struct kvm *kvm) | |||
1409 | for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4) | 1585 | for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4) |
1410 | vgic_set_target_reg(kvm, 0, i); | 1586 | vgic_set_target_reg(kvm, 0, i); |
1411 | 1587 | ||
1412 | kvm_timer_init(kvm); | ||
1413 | kvm->arch.vgic.ready = true; | 1588 | kvm->arch.vgic.ready = true; |
1414 | out: | 1589 | out: |
1415 | mutex_unlock(&kvm->lock); | 1590 | mutex_unlock(&kvm->lock); |
@@ -1418,20 +1593,45 @@ out: | |||
1418 | 1593 | ||
1419 | int kvm_vgic_create(struct kvm *kvm) | 1594 | int kvm_vgic_create(struct kvm *kvm) |
1420 | { | 1595 | { |
1421 | int ret = 0; | 1596 | int i, vcpu_lock_idx = -1, ret = 0; |
1597 | struct kvm_vcpu *vcpu; | ||
1422 | 1598 | ||
1423 | mutex_lock(&kvm->lock); | 1599 | mutex_lock(&kvm->lock); |
1424 | 1600 | ||
1425 | if (atomic_read(&kvm->online_vcpus) || kvm->arch.vgic.vctrl_base) { | 1601 | if (kvm->arch.vgic.vctrl_base) { |
1426 | ret = -EEXIST; | 1602 | ret = -EEXIST; |
1427 | goto out; | 1603 | goto out; |
1428 | } | 1604 | } |
1429 | 1605 | ||
1606 | /* | ||
1607 | * Any time a vcpu is run, vcpu_load is called which tries to grab the | ||
1608 | * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure | ||
1609 | * that no other VCPUs are run while we create the vgic. | ||
1610 | */ | ||
1611 | kvm_for_each_vcpu(i, vcpu, kvm) { | ||
1612 | if (!mutex_trylock(&vcpu->mutex)) | ||
1613 | goto out_unlock; | ||
1614 | vcpu_lock_idx = i; | ||
1615 | } | ||
1616 | |||
1617 | kvm_for_each_vcpu(i, vcpu, kvm) { | ||
1618 | if (vcpu->arch.has_run_once) { | ||
1619 | ret = -EBUSY; | ||
1620 | goto out_unlock; | ||
1621 | } | ||
1622 | } | ||
1623 | |||
1430 | spin_lock_init(&kvm->arch.vgic.lock); | 1624 | spin_lock_init(&kvm->arch.vgic.lock); |
1431 | kvm->arch.vgic.vctrl_base = vgic_vctrl_base; | 1625 | kvm->arch.vgic.vctrl_base = vgic_vctrl_base; |
1432 | kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; | 1626 | kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; |
1433 | kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; | 1627 | kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; |
1434 | 1628 | ||
1629 | out_unlock: | ||
1630 | for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { | ||
1631 | vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); | ||
1632 | mutex_unlock(&vcpu->mutex); | ||
1633 | } | ||
1634 | |||
1435 | out: | 1635 | out: |
1436 | mutex_unlock(&kvm->lock); | 1636 | mutex_unlock(&kvm->lock); |
1437 | return ret; | 1637 | return ret; |
@@ -1455,6 +1655,12 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr, | |||
1455 | { | 1655 | { |
1456 | int ret; | 1656 | int ret; |
1457 | 1657 | ||
1658 | if (addr & ~KVM_PHYS_MASK) | ||
1659 | return -E2BIG; | ||
1660 | |||
1661 | if (addr & (SZ_4K - 1)) | ||
1662 | return -EINVAL; | ||
1663 | |||
1458 | if (!IS_VGIC_ADDR_UNDEF(*ioaddr)) | 1664 | if (!IS_VGIC_ADDR_UNDEF(*ioaddr)) |
1459 | return -EEXIST; | 1665 | return -EEXIST; |
1460 | if (addr + size < addr) | 1666 | if (addr + size < addr) |
@@ -1467,26 +1673,41 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr, | |||
1467 | return ret; | 1673 | return ret; |
1468 | } | 1674 | } |
1469 | 1675 | ||
1470 | int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr) | 1676 | /** |
1677 | * kvm_vgic_addr - set or get vgic VM base addresses | ||
1678 | * @kvm: pointer to the vm struct | ||
1679 | * @type: the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX | ||
1680 | * @addr: pointer to address value | ||
1681 | * @write: if true set the address in the VM address space, if false read the | ||
1682 | * address | ||
1683 | * | ||
1684 | * Set or get the vgic base addresses for the distributor and the virtual CPU | ||
1685 | * interface in the VM physical address space. These addresses are properties | ||
1686 | * of the emulated core/SoC and therefore user space initially knows this | ||
1687 | * information. | ||
1688 | */ | ||
1689 | int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) | ||
1471 | { | 1690 | { |
1472 | int r = 0; | 1691 | int r = 0; |
1473 | struct vgic_dist *vgic = &kvm->arch.vgic; | 1692 | struct vgic_dist *vgic = &kvm->arch.vgic; |
1474 | 1693 | ||
1475 | if (addr & ~KVM_PHYS_MASK) | ||
1476 | return -E2BIG; | ||
1477 | |||
1478 | if (addr & (SZ_4K - 1)) | ||
1479 | return -EINVAL; | ||
1480 | |||
1481 | mutex_lock(&kvm->lock); | 1694 | mutex_lock(&kvm->lock); |
1482 | switch (type) { | 1695 | switch (type) { |
1483 | case KVM_VGIC_V2_ADDR_TYPE_DIST: | 1696 | case KVM_VGIC_V2_ADDR_TYPE_DIST: |
1484 | r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base, | 1697 | if (write) { |
1485 | addr, KVM_VGIC_V2_DIST_SIZE); | 1698 | r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base, |
1699 | *addr, KVM_VGIC_V2_DIST_SIZE); | ||
1700 | } else { | ||
1701 | *addr = vgic->vgic_dist_base; | ||
1702 | } | ||
1486 | break; | 1703 | break; |
1487 | case KVM_VGIC_V2_ADDR_TYPE_CPU: | 1704 | case KVM_VGIC_V2_ADDR_TYPE_CPU: |
1488 | r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base, | 1705 | if (write) { |
1489 | addr, KVM_VGIC_V2_CPU_SIZE); | 1706 | r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base, |
1707 | *addr, KVM_VGIC_V2_CPU_SIZE); | ||
1708 | } else { | ||
1709 | *addr = vgic->vgic_cpu_base; | ||
1710 | } | ||
1490 | break; | 1711 | break; |
1491 | default: | 1712 | default: |
1492 | r = -ENODEV; | 1713 | r = -ENODEV; |
@@ -1495,3 +1716,302 @@ int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr) | |||
1495 | mutex_unlock(&kvm->lock); | 1716 | mutex_unlock(&kvm->lock); |
1496 | return r; | 1717 | return r; |
1497 | } | 1718 | } |
1719 | |||
1720 | static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu, | ||
1721 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | ||
1722 | { | ||
1723 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
1724 | u32 reg, mask = 0, shift = 0; | ||
1725 | bool updated = false; | ||
1726 | |||
1727 | switch (offset & ~0x3) { | ||
1728 | case GIC_CPU_CTRL: | ||
1729 | mask = GICH_VMCR_CTRL_MASK; | ||
1730 | shift = GICH_VMCR_CTRL_SHIFT; | ||
1731 | break; | ||
1732 | case GIC_CPU_PRIMASK: | ||
1733 | mask = GICH_VMCR_PRIMASK_MASK; | ||
1734 | shift = GICH_VMCR_PRIMASK_SHIFT; | ||
1735 | break; | ||
1736 | case GIC_CPU_BINPOINT: | ||
1737 | mask = GICH_VMCR_BINPOINT_MASK; | ||
1738 | shift = GICH_VMCR_BINPOINT_SHIFT; | ||
1739 | break; | ||
1740 | case GIC_CPU_ALIAS_BINPOINT: | ||
1741 | mask = GICH_VMCR_ALIAS_BINPOINT_MASK; | ||
1742 | shift = GICH_VMCR_ALIAS_BINPOINT_SHIFT; | ||
1743 | break; | ||
1744 | } | ||
1745 | |||
1746 | if (!mmio->is_write) { | ||
1747 | reg = (vgic_cpu->vgic_vmcr & mask) >> shift; | ||
1748 | mmio_data_write(mmio, ~0, reg); | ||
1749 | } else { | ||
1750 | reg = mmio_data_read(mmio, ~0); | ||
1751 | reg = (reg << shift) & mask; | ||
1752 | if (reg != (vgic_cpu->vgic_vmcr & mask)) | ||
1753 | updated = true; | ||
1754 | vgic_cpu->vgic_vmcr &= ~mask; | ||
1755 | vgic_cpu->vgic_vmcr |= reg; | ||
1756 | } | ||
1757 | return updated; | ||
1758 | } | ||
1759 | |||
1760 | static bool handle_mmio_abpr(struct kvm_vcpu *vcpu, | ||
1761 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | ||
1762 | { | ||
1763 | return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT); | ||
1764 | } | ||
1765 | |||
1766 | static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu, | ||
1767 | struct kvm_exit_mmio *mmio, | ||
1768 | phys_addr_t offset) | ||
1769 | { | ||
1770 | u32 reg; | ||
1771 | |||
1772 | if (mmio->is_write) | ||
1773 | return false; | ||
1774 | |||
1775 | /* GICC_IIDR */ | ||
1776 | reg = (PRODUCT_ID_KVM << 20) | | ||
1777 | (GICC_ARCH_VERSION_V2 << 16) | | ||
1778 | (IMPLEMENTER_ARM << 0); | ||
1779 | mmio_data_write(mmio, ~0, reg); | ||
1780 | return false; | ||
1781 | } | ||
1782 | |||
1783 | /* | ||
1784 | * CPU Interface Register accesses - these are not accessed by the VM, but by | ||
1785 | * user space for saving and restoring VGIC state. | ||
1786 | */ | ||
1787 | static const struct mmio_range vgic_cpu_ranges[] = { | ||
1788 | { | ||
1789 | .base = GIC_CPU_CTRL, | ||
1790 | .len = 12, | ||
1791 | .handle_mmio = handle_cpu_mmio_misc, | ||
1792 | }, | ||
1793 | { | ||
1794 | .base = GIC_CPU_ALIAS_BINPOINT, | ||
1795 | .len = 4, | ||
1796 | .handle_mmio = handle_mmio_abpr, | ||
1797 | }, | ||
1798 | { | ||
1799 | .base = GIC_CPU_ACTIVEPRIO, | ||
1800 | .len = 16, | ||
1801 | .handle_mmio = handle_mmio_raz_wi, | ||
1802 | }, | ||
1803 | { | ||
1804 | .base = GIC_CPU_IDENT, | ||
1805 | .len = 4, | ||
1806 | .handle_mmio = handle_cpu_mmio_ident, | ||
1807 | }, | ||
1808 | }; | ||
1809 | |||
1810 | static int vgic_attr_regs_access(struct kvm_device *dev, | ||
1811 | struct kvm_device_attr *attr, | ||
1812 | u32 *reg, bool is_write) | ||
1813 | { | ||
1814 | const struct mmio_range *r = NULL, *ranges; | ||
1815 | phys_addr_t offset; | ||
1816 | int ret, cpuid, c; | ||
1817 | struct kvm_vcpu *vcpu, *tmp_vcpu; | ||
1818 | struct vgic_dist *vgic; | ||
1819 | struct kvm_exit_mmio mmio; | ||
1820 | |||
1821 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | ||
1822 | cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> | ||
1823 | KVM_DEV_ARM_VGIC_CPUID_SHIFT; | ||
1824 | |||
1825 | mutex_lock(&dev->kvm->lock); | ||
1826 | |||
1827 | if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) { | ||
1828 | ret = -EINVAL; | ||
1829 | goto out; | ||
1830 | } | ||
1831 | |||
1832 | vcpu = kvm_get_vcpu(dev->kvm, cpuid); | ||
1833 | vgic = &dev->kvm->arch.vgic; | ||
1834 | |||
1835 | mmio.len = 4; | ||
1836 | mmio.is_write = is_write; | ||
1837 | if (is_write) | ||
1838 | mmio_data_write(&mmio, ~0, *reg); | ||
1839 | switch (attr->group) { | ||
1840 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | ||
1841 | mmio.phys_addr = vgic->vgic_dist_base + offset; | ||
1842 | ranges = vgic_dist_ranges; | ||
1843 | break; | ||
1844 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: | ||
1845 | mmio.phys_addr = vgic->vgic_cpu_base + offset; | ||
1846 | ranges = vgic_cpu_ranges; | ||
1847 | break; | ||
1848 | default: | ||
1849 | BUG(); | ||
1850 | } | ||
1851 | r = find_matching_range(ranges, &mmio, offset); | ||
1852 | |||
1853 | if (unlikely(!r || !r->handle_mmio)) { | ||
1854 | ret = -ENXIO; | ||
1855 | goto out; | ||
1856 | } | ||
1857 | |||
1858 | |||
1859 | spin_lock(&vgic->lock); | ||
1860 | |||
1861 | /* | ||
1862 | * Ensure that no other VCPU is running by checking the vcpu->cpu | ||
1863 | * field. If no other VPCUs are running we can safely access the VGIC | ||
1864 | * state, because even if another VPU is run after this point, that | ||
1865 | * VCPU will not touch the vgic state, because it will block on | ||
1866 | * getting the vgic->lock in kvm_vgic_sync_hwstate(). | ||
1867 | */ | ||
1868 | kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) { | ||
1869 | if (unlikely(tmp_vcpu->cpu != -1)) { | ||
1870 | ret = -EBUSY; | ||
1871 | goto out_vgic_unlock; | ||
1872 | } | ||
1873 | } | ||
1874 | |||
1875 | /* | ||
1876 | * Move all pending IRQs from the LRs on all VCPUs so the pending | ||
1877 | * state can be properly represented in the register state accessible | ||
1878 | * through this API. | ||
1879 | */ | ||
1880 | kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) | ||
1881 | vgic_unqueue_irqs(tmp_vcpu); | ||
1882 | |||
1883 | offset -= r->base; | ||
1884 | r->handle_mmio(vcpu, &mmio, offset); | ||
1885 | |||
1886 | if (!is_write) | ||
1887 | *reg = mmio_data_read(&mmio, ~0); | ||
1888 | |||
1889 | ret = 0; | ||
1890 | out_vgic_unlock: | ||
1891 | spin_unlock(&vgic->lock); | ||
1892 | out: | ||
1893 | mutex_unlock(&dev->kvm->lock); | ||
1894 | return ret; | ||
1895 | } | ||
1896 | |||
1897 | static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | ||
1898 | { | ||
1899 | int r; | ||
1900 | |||
1901 | switch (attr->group) { | ||
1902 | case KVM_DEV_ARM_VGIC_GRP_ADDR: { | ||
1903 | u64 __user *uaddr = (u64 __user *)(long)attr->addr; | ||
1904 | u64 addr; | ||
1905 | unsigned long type = (unsigned long)attr->attr; | ||
1906 | |||
1907 | if (copy_from_user(&addr, uaddr, sizeof(addr))) | ||
1908 | return -EFAULT; | ||
1909 | |||
1910 | r = kvm_vgic_addr(dev->kvm, type, &addr, true); | ||
1911 | return (r == -ENODEV) ? -ENXIO : r; | ||
1912 | } | ||
1913 | |||
1914 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | ||
1915 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: { | ||
1916 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | ||
1917 | u32 reg; | ||
1918 | |||
1919 | if (get_user(reg, uaddr)) | ||
1920 | return -EFAULT; | ||
1921 | |||
1922 | return vgic_attr_regs_access(dev, attr, ®, true); | ||
1923 | } | ||
1924 | |||
1925 | } | ||
1926 | |||
1927 | return -ENXIO; | ||
1928 | } | ||
1929 | |||
1930 | static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | ||
1931 | { | ||
1932 | int r = -ENXIO; | ||
1933 | |||
1934 | switch (attr->group) { | ||
1935 | case KVM_DEV_ARM_VGIC_GRP_ADDR: { | ||
1936 | u64 __user *uaddr = (u64 __user *)(long)attr->addr; | ||
1937 | u64 addr; | ||
1938 | unsigned long type = (unsigned long)attr->attr; | ||
1939 | |||
1940 | r = kvm_vgic_addr(dev->kvm, type, &addr, false); | ||
1941 | if (r) | ||
1942 | return (r == -ENODEV) ? -ENXIO : r; | ||
1943 | |||
1944 | if (copy_to_user(uaddr, &addr, sizeof(addr))) | ||
1945 | return -EFAULT; | ||
1946 | break; | ||
1947 | } | ||
1948 | |||
1949 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | ||
1950 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: { | ||
1951 | u32 __user *uaddr = (u32 __user *)(long)attr->addr; | ||
1952 | u32 reg = 0; | ||
1953 | |||
1954 | r = vgic_attr_regs_access(dev, attr, ®, false); | ||
1955 | if (r) | ||
1956 | return r; | ||
1957 | r = put_user(reg, uaddr); | ||
1958 | break; | ||
1959 | } | ||
1960 | |||
1961 | } | ||
1962 | |||
1963 | return r; | ||
1964 | } | ||
1965 | |||
1966 | static int vgic_has_attr_regs(const struct mmio_range *ranges, | ||
1967 | phys_addr_t offset) | ||
1968 | { | ||
1969 | struct kvm_exit_mmio dev_attr_mmio; | ||
1970 | |||
1971 | dev_attr_mmio.len = 4; | ||
1972 | if (find_matching_range(ranges, &dev_attr_mmio, offset)) | ||
1973 | return 0; | ||
1974 | else | ||
1975 | return -ENXIO; | ||
1976 | } | ||
1977 | |||
1978 | static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | ||
1979 | { | ||
1980 | phys_addr_t offset; | ||
1981 | |||
1982 | switch (attr->group) { | ||
1983 | case KVM_DEV_ARM_VGIC_GRP_ADDR: | ||
1984 | switch (attr->attr) { | ||
1985 | case KVM_VGIC_V2_ADDR_TYPE_DIST: | ||
1986 | case KVM_VGIC_V2_ADDR_TYPE_CPU: | ||
1987 | return 0; | ||
1988 | } | ||
1989 | break; | ||
1990 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: | ||
1991 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | ||
1992 | return vgic_has_attr_regs(vgic_dist_ranges, offset); | ||
1993 | case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: | ||
1994 | offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; | ||
1995 | return vgic_has_attr_regs(vgic_cpu_ranges, offset); | ||
1996 | } | ||
1997 | return -ENXIO; | ||
1998 | } | ||
1999 | |||
2000 | static void vgic_destroy(struct kvm_device *dev) | ||
2001 | { | ||
2002 | kfree(dev); | ||
2003 | } | ||
2004 | |||
2005 | static int vgic_create(struct kvm_device *dev, u32 type) | ||
2006 | { | ||
2007 | return kvm_vgic_create(dev->kvm); | ||
2008 | } | ||
2009 | |||
2010 | struct kvm_device_ops kvm_arm_vgic_v2_ops = { | ||
2011 | .name = "kvm-arm-vgic", | ||
2012 | .create = vgic_create, | ||
2013 | .destroy = vgic_destroy, | ||
2014 | .set_attr = vgic_set_attr, | ||
2015 | .get_attr = vgic_get_attr, | ||
2016 | .has_attr = vgic_has_attr, | ||
2017 | }; | ||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 03c97e7ae4ca..3efba97bdce2 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -2273,6 +2273,11 @@ static int kvm_ioctl_create_device(struct kvm *kvm, | |||
2273 | ops = &kvm_vfio_ops; | 2273 | ops = &kvm_vfio_ops; |
2274 | break; | 2274 | break; |
2275 | #endif | 2275 | #endif |
2276 | #ifdef CONFIG_KVM_ARM_VGIC | ||
2277 | case KVM_DEV_TYPE_ARM_VGIC_V2: | ||
2278 | ops = &kvm_arm_vgic_v2_ops; | ||
2279 | break; | ||
2280 | #endif | ||
2276 | default: | 2281 | default: |
2277 | return -ENODEV; | 2282 | return -ENODEV; |
2278 | } | 2283 | } |