diff options
author | Andre Przywara <andre.przywara@arm.com> | 2014-06-06 18:53:08 -0400 |
---|---|---|
committer | Christoffer Dall <christoffer.dall@linaro.org> | 2015-01-20 12:25:30 -0500 |
commit | 832158125d2ef30b364f21e1616495c40c286a4a (patch) | |
tree | feaf6e054ab71b474bbf33d293bf9ddb5a2a2c79 /virt/kvm | |
parent | b60da146c135ea6b6c25a0ae925edca038b64344 (diff) |
arm/arm64: KVM: add vgic.h header file
vgic.c is currently a mixture of generic vGIC emulation code and
functions specific to emulating a GICv2. To ease the addition of
GICv3 later, we create new header file vgic.h, which holds constants
and prototypes of commonly used functions.
Rename some identifiers to avoid name space clutter.
I removed the long-standing comment about using the kvm_io_bus API
to tackle the GIC register ranges, as it wouldn't be a win for us
anymore.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
-------
As the diff isn't always obvious here (and to aid eventual rebases),
here is a list of high-level changes done to the code:
* moved definitions and prototypes from vgic.c to vgic.h:
- VGIC_ADDR_UNDEF
- ACCESS_{READ,WRITE}_*
- vgic_init()
- vgic_update_state()
- vgic_kick_vcpus()
- vgic_get_vmcr()
- vgic_set_vmcr()
- struct mmio_range {} (renamed to struct kvm_mmio_range)
* removed static keyword and exported prototype in vgic.h:
- vgic_bitmap_get_reg()
- vgic_bitmap_set_irq_val()
- vgic_bitmap_get_shared_map()
- vgic_bytemap_get_reg()
- vgic_dist_irq_set_pending()
- vgic_dist_irq_clear_pending()
- vgic_cpu_irq_clear()
- vgic_reg_access()
- handle_mmio_raz_wi()
- vgic_handle_enable_reg()
- vgic_handle_set_pending_reg()
- vgic_handle_clear_pending_reg()
- vgic_handle_cfg_reg()
- vgic_unqueue_irqs()
- find_matching_range() (renamed to vgic_find_range)
- vgic_handle_mmio_range()
- vgic_update_state()
- vgic_get_vmcr()
- vgic_set_vmcr()
- vgic_queue_irq()
- vgic_kick_vcpus()
- vgic_init()
- vgic_v2_init_emulation()
- vgic_has_attr_regs()
- vgic_set_common_attr()
- vgic_get_common_attr()
- vgic_destroy()
- vgic_create()
* moved functions to vgic.h (static inline):
- mmio_data_read()
- mmio_data_write()
- is_in_range()
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/arm/vgic.c | 151 | ||||
-rw-r--r-- | virt/kvm/arm/vgic.h | 120 |
2 files changed, 170 insertions, 101 deletions
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 06073fac03a9..ce6c998a3f8d 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c | |||
@@ -75,33 +75,16 @@ | |||
75 | * inactive as long as the external input line is held high. | 75 | * inactive as long as the external input line is held high. |
76 | */ | 76 | */ |
77 | 77 | ||
78 | #define VGIC_ADDR_UNDEF (-1) | 78 | #include "vgic.h" |
79 | #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) | ||
80 | 79 | ||
81 | #define PRODUCT_ID_KVM 0x4b /* ASCII code K */ | ||
82 | #define IMPLEMENTER_ARM 0x43b | ||
83 | #define GICC_ARCH_VERSION_V2 0x2 | 80 | #define GICC_ARCH_VERSION_V2 0x2 |
84 | 81 | ||
85 | #define ACCESS_READ_VALUE (1 << 0) | ||
86 | #define ACCESS_READ_RAZ (0 << 0) | ||
87 | #define ACCESS_READ_MASK(x) ((x) & (1 << 0)) | ||
88 | #define ACCESS_WRITE_IGNORED (0 << 1) | ||
89 | #define ACCESS_WRITE_SETBIT (1 << 1) | ||
90 | #define ACCESS_WRITE_CLEARBIT (2 << 1) | ||
91 | #define ACCESS_WRITE_VALUE (3 << 1) | ||
92 | #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) | ||
93 | |||
94 | static int vgic_init(struct kvm *kvm); | ||
95 | static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); | 82 | static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); |
96 | static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); | 83 | static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); |
97 | static void vgic_update_state(struct kvm *kvm); | ||
98 | static void vgic_kick_vcpus(struct kvm *kvm); | ||
99 | static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi); | 84 | static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi); |
100 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); | 85 | static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); |
101 | static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); | 86 | static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); |
102 | static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); | 87 | static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); |
103 | static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); | ||
104 | static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); | ||
105 | 88 | ||
106 | static const struct vgic_ops *vgic_ops; | 89 | static const struct vgic_ops *vgic_ops; |
107 | static const struct vgic_params *vgic; | 90 | static const struct vgic_params *vgic; |
@@ -175,8 +158,7 @@ static unsigned long *u64_to_bitmask(u64 *val) | |||
175 | return (unsigned long *)val; | 158 | return (unsigned long *)val; |
176 | } | 159 | } |
177 | 160 | ||
178 | static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, | 161 | u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset) |
179 | int cpuid, u32 offset) | ||
180 | { | 162 | { |
181 | offset >>= 2; | 163 | offset >>= 2; |
182 | if (!offset) | 164 | if (!offset) |
@@ -194,8 +176,8 @@ static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x, | |||
194 | return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared); | 176 | return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared); |
195 | } | 177 | } |
196 | 178 | ||
197 | static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, | 179 | void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, |
198 | int irq, int val) | 180 | int irq, int val) |
199 | { | 181 | { |
200 | unsigned long *reg; | 182 | unsigned long *reg; |
201 | 183 | ||
@@ -217,7 +199,7 @@ static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid) | |||
217 | return x->private + cpuid; | 199 | return x->private + cpuid; |
218 | } | 200 | } |
219 | 201 | ||
220 | static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x) | 202 | unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x) |
221 | { | 203 | { |
222 | return x->shared; | 204 | return x->shared; |
223 | } | 205 | } |
@@ -244,7 +226,7 @@ static void vgic_free_bytemap(struct vgic_bytemap *b) | |||
244 | b->shared = NULL; | 226 | b->shared = NULL; |
245 | } | 227 | } |
246 | 228 | ||
247 | static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset) | 229 | u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset) |
248 | { | 230 | { |
249 | u32 *reg; | 231 | u32 *reg; |
250 | 232 | ||
@@ -341,14 +323,14 @@ static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq) | |||
341 | return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq); | 323 | return vgic_bitmap_get_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq); |
342 | } | 324 | } |
343 | 325 | ||
344 | static void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq) | 326 | void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq) |
345 | { | 327 | { |
346 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 328 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
347 | 329 | ||
348 | vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1); | 330 | vgic_bitmap_set_irq_val(&dist->irq_pending, vcpu->vcpu_id, irq, 1); |
349 | } | 331 | } |
350 | 332 | ||
351 | static void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq) | 333 | void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq) |
352 | { | 334 | { |
353 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 335 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
354 | 336 | ||
@@ -364,7 +346,7 @@ static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq) | |||
364 | vcpu->arch.vgic_cpu.pending_shared); | 346 | vcpu->arch.vgic_cpu.pending_shared); |
365 | } | 347 | } |
366 | 348 | ||
367 | static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq) | 349 | void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq) |
368 | { | 350 | { |
369 | if (irq < VGIC_NR_PRIVATE_IRQS) | 351 | if (irq < VGIC_NR_PRIVATE_IRQS) |
370 | clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu); | 352 | clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu); |
@@ -378,16 +360,6 @@ static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq) | |||
378 | return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq); | 360 | return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq); |
379 | } | 361 | } |
380 | 362 | ||
381 | static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask) | ||
382 | { | ||
383 | return le32_to_cpu(*((u32 *)mmio->data)) & mask; | ||
384 | } | ||
385 | |||
386 | static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value) | ||
387 | { | ||
388 | *((u32 *)mmio->data) = cpu_to_le32(value) & mask; | ||
389 | } | ||
390 | |||
391 | /** | 363 | /** |
392 | * vgic_reg_access - access vgic register | 364 | * vgic_reg_access - access vgic register |
393 | * @mmio: pointer to the data describing the mmio access | 365 | * @mmio: pointer to the data describing the mmio access |
@@ -399,8 +371,8 @@ static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value) | |||
399 | * modes defined for vgic register access | 371 | * modes defined for vgic register access |
400 | * (read,raz,write-ignored,setbit,clearbit,write) | 372 | * (read,raz,write-ignored,setbit,clearbit,write) |
401 | */ | 373 | */ |
402 | static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, | 374 | void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, |
403 | phys_addr_t offset, int mode) | 375 | phys_addr_t offset, int mode) |
404 | { | 376 | { |
405 | int word_offset = (offset & 3) * 8; | 377 | int word_offset = (offset & 3) * 8; |
406 | u32 mask = (1UL << (mmio->len * 8)) - 1; | 378 | u32 mask = (1UL << (mmio->len * 8)) - 1; |
@@ -484,16 +456,16 @@ static bool handle_mmio_misc(struct kvm_vcpu *vcpu, | |||
484 | return false; | 456 | return false; |
485 | } | 457 | } |
486 | 458 | ||
487 | static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, | 459 | bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, |
488 | struct kvm_exit_mmio *mmio, phys_addr_t offset) | 460 | phys_addr_t offset) |
489 | { | 461 | { |
490 | vgic_reg_access(mmio, NULL, offset, | 462 | vgic_reg_access(mmio, NULL, offset, |
491 | ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); | 463 | ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED); |
492 | return false; | 464 | return false; |
493 | } | 465 | } |
494 | 466 | ||
495 | static bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, | 467 | bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, |
496 | phys_addr_t offset, int vcpu_id, int access) | 468 | phys_addr_t offset, int vcpu_id, int access) |
497 | { | 469 | { |
498 | u32 *reg; | 470 | u32 *reg; |
499 | int mode = ACCESS_READ_VALUE | access; | 471 | int mode = ACCESS_READ_VALUE | access; |
@@ -530,9 +502,9 @@ static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu, | |||
530 | vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT); | 502 | vcpu->vcpu_id, ACCESS_WRITE_CLEARBIT); |
531 | } | 503 | } |
532 | 504 | ||
533 | static bool vgic_handle_set_pending_reg(struct kvm *kvm, | 505 | bool vgic_handle_set_pending_reg(struct kvm *kvm, |
534 | struct kvm_exit_mmio *mmio, | 506 | struct kvm_exit_mmio *mmio, |
535 | phys_addr_t offset, int vcpu_id) | 507 | phys_addr_t offset, int vcpu_id) |
536 | { | 508 | { |
537 | u32 *reg, orig; | 509 | u32 *reg, orig; |
538 | u32 level_mask; | 510 | u32 level_mask; |
@@ -567,9 +539,9 @@ static bool vgic_handle_set_pending_reg(struct kvm *kvm, | |||
567 | return false; | 539 | return false; |
568 | } | 540 | } |
569 | 541 | ||
570 | static bool vgic_handle_clear_pending_reg(struct kvm *kvm, | 542 | bool vgic_handle_clear_pending_reg(struct kvm *kvm, |
571 | struct kvm_exit_mmio *mmio, | 543 | struct kvm_exit_mmio *mmio, |
572 | phys_addr_t offset, int vcpu_id) | 544 | phys_addr_t offset, int vcpu_id) |
573 | { | 545 | { |
574 | u32 *level_active; | 546 | u32 *level_active; |
575 | u32 *reg, orig; | 547 | u32 *reg, orig; |
@@ -741,8 +713,8 @@ static u16 vgic_cfg_compress(u32 val) | |||
741 | * LSB is always 0. As such, we only keep the upper bit, and use the | 713 | * LSB is always 0. As such, we only keep the upper bit, and use the |
742 | * two above functions to compress/expand the bits | 714 | * two above functions to compress/expand the bits |
743 | */ | 715 | */ |
744 | static bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, | 716 | bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, |
745 | phys_addr_t offset) | 717 | phys_addr_t offset) |
746 | { | 718 | { |
747 | u32 val; | 719 | u32 val; |
748 | 720 | ||
@@ -818,7 +790,7 @@ static void vgic_v2_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source) | |||
818 | * to the distributor but the active state stays in the LRs, because we don't | 790 | * to the distributor but the active state stays in the LRs, because we don't |
819 | * track the active state on the distributor side. | 791 | * track the active state on the distributor side. |
820 | */ | 792 | */ |
821 | static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) | 793 | void vgic_unqueue_irqs(struct kvm_vcpu *vcpu) |
822 | { | 794 | { |
823 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 795 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
824 | int i; | 796 | int i; |
@@ -943,21 +915,7 @@ static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu, | |||
943 | return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false); | 915 | return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false); |
944 | } | 916 | } |
945 | 917 | ||
946 | /* | 918 | static const struct kvm_mmio_range vgic_dist_ranges[] = { |
947 | * I would have liked to use the kvm_bus_io_*() API instead, but it | ||
948 | * cannot cope with banked registers (only the VM pointer is passed | ||
949 | * around, and we need the vcpu). One of these days, someone please | ||
950 | * fix it! | ||
951 | */ | ||
952 | struct mmio_range { | ||
953 | phys_addr_t base; | ||
954 | unsigned long len; | ||
955 | int bits_per_irq; | ||
956 | bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, | ||
957 | phys_addr_t offset); | ||
958 | }; | ||
959 | |||
960 | static const struct mmio_range vgic_dist_ranges[] = { | ||
961 | { | 919 | { |
962 | .base = GIC_DIST_CTRL, | 920 | .base = GIC_DIST_CTRL, |
963 | .len = 12, | 921 | .len = 12, |
@@ -1042,12 +1000,12 @@ static const struct mmio_range vgic_dist_ranges[] = { | |||
1042 | {} | 1000 | {} |
1043 | }; | 1001 | }; |
1044 | 1002 | ||
1045 | static const | 1003 | const |
1046 | struct mmio_range *find_matching_range(const struct mmio_range *ranges, | 1004 | struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges, |
1047 | struct kvm_exit_mmio *mmio, | 1005 | struct kvm_exit_mmio *mmio, |
1048 | phys_addr_t offset) | 1006 | phys_addr_t offset) |
1049 | { | 1007 | { |
1050 | const struct mmio_range *r = ranges; | 1008 | const struct kvm_mmio_range *r = ranges; |
1051 | 1009 | ||
1052 | while (r->len) { | 1010 | while (r->len) { |
1053 | if (offset >= r->base && | 1011 | if (offset >= r->base && |
@@ -1060,7 +1018,7 @@ struct mmio_range *find_matching_range(const struct mmio_range *ranges, | |||
1060 | } | 1018 | } |
1061 | 1019 | ||
1062 | static bool vgic_validate_access(const struct vgic_dist *dist, | 1020 | static bool vgic_validate_access(const struct vgic_dist *dist, |
1063 | const struct mmio_range *range, | 1021 | const struct kvm_mmio_range *range, |
1064 | unsigned long offset) | 1022 | unsigned long offset) |
1065 | { | 1023 | { |
1066 | int irq; | 1024 | int irq; |
@@ -1088,7 +1046,7 @@ static bool vgic_validate_access(const struct vgic_dist *dist, | |||
1088 | static bool call_range_handler(struct kvm_vcpu *vcpu, | 1046 | static bool call_range_handler(struct kvm_vcpu *vcpu, |
1089 | struct kvm_exit_mmio *mmio, | 1047 | struct kvm_exit_mmio *mmio, |
1090 | unsigned long offset, | 1048 | unsigned long offset, |
1091 | const struct mmio_range *range) | 1049 | const struct kvm_mmio_range *range) |
1092 | { | 1050 | { |
1093 | u32 *data32 = (void *)mmio->data; | 1051 | u32 *data32 = (void *)mmio->data; |
1094 | struct kvm_exit_mmio mmio32; | 1052 | struct kvm_exit_mmio mmio32; |
@@ -1132,18 +1090,18 @@ static bool call_range_handler(struct kvm_vcpu *vcpu, | |||
1132 | * | 1090 | * |
1133 | * returns true if the MMIO access could be performed | 1091 | * returns true if the MMIO access could be performed |
1134 | */ | 1092 | */ |
1135 | static bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run, | 1093 | bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run, |
1136 | struct kvm_exit_mmio *mmio, | 1094 | struct kvm_exit_mmio *mmio, |
1137 | const struct mmio_range *ranges, | 1095 | const struct kvm_mmio_range *ranges, |
1138 | unsigned long mmio_base) | 1096 | unsigned long mmio_base) |
1139 | { | 1097 | { |
1140 | const struct mmio_range *range; | 1098 | const struct kvm_mmio_range *range; |
1141 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 1099 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
1142 | bool updated_state; | 1100 | bool updated_state; |
1143 | unsigned long offset; | 1101 | unsigned long offset; |
1144 | 1102 | ||
1145 | offset = mmio->phys_addr - mmio_base; | 1103 | offset = mmio->phys_addr - mmio_base; |
1146 | range = find_matching_range(ranges, mmio, offset); | 1104 | range = vgic_find_range(ranges, mmio, offset); |
1147 | if (unlikely(!range || !range->handle_mmio)) { | 1105 | if (unlikely(!range || !range->handle_mmio)) { |
1148 | pr_warn("Unhandled access %d %08llx %d\n", | 1106 | pr_warn("Unhandled access %d %08llx %d\n", |
1149 | mmio->is_write, mmio->phys_addr, mmio->len); | 1107 | mmio->is_write, mmio->phys_addr, mmio->len); |
@@ -1169,12 +1127,6 @@ static bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
1169 | return true; | 1127 | return true; |
1170 | } | 1128 | } |
1171 | 1129 | ||
1172 | static inline bool is_in_range(phys_addr_t addr, unsigned long len, | ||
1173 | phys_addr_t baseaddr, unsigned long size) | ||
1174 | { | ||
1175 | return (addr >= baseaddr) && (addr + len <= baseaddr + size); | ||
1176 | } | ||
1177 | |||
1178 | static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, | 1130 | static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run, |
1179 | struct kvm_exit_mmio *mmio) | 1131 | struct kvm_exit_mmio *mmio) |
1180 | { | 1132 | { |
@@ -1301,7 +1253,7 @@ static int compute_pending_for_cpu(struct kvm_vcpu *vcpu) | |||
1301 | * Update the interrupt state and determine which CPUs have pending | 1253 | * Update the interrupt state and determine which CPUs have pending |
1302 | * interrupts. Must be called with distributor lock held. | 1254 | * interrupts. Must be called with distributor lock held. |
1303 | */ | 1255 | */ |
1304 | static void vgic_update_state(struct kvm *kvm) | 1256 | void vgic_update_state(struct kvm *kvm) |
1305 | { | 1257 | { |
1306 | struct vgic_dist *dist = &kvm->arch.vgic; | 1258 | struct vgic_dist *dist = &kvm->arch.vgic; |
1307 | struct kvm_vcpu *vcpu; | 1259 | struct kvm_vcpu *vcpu; |
@@ -1362,12 +1314,12 @@ static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu) | |||
1362 | vgic_ops->disable_underflow(vcpu); | 1314 | vgic_ops->disable_underflow(vcpu); |
1363 | } | 1315 | } |
1364 | 1316 | ||
1365 | static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) | 1317 | void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) |
1366 | { | 1318 | { |
1367 | vgic_ops->get_vmcr(vcpu, vmcr); | 1319 | vgic_ops->get_vmcr(vcpu, vmcr); |
1368 | } | 1320 | } |
1369 | 1321 | ||
1370 | static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) | 1322 | void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) |
1371 | { | 1323 | { |
1372 | vgic_ops->set_vmcr(vcpu, vmcr); | 1324 | vgic_ops->set_vmcr(vcpu, vmcr); |
1373 | } | 1325 | } |
@@ -1417,7 +1369,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu) | |||
1417 | * Queue an interrupt to a CPU virtual interface. Return true on success, | 1369 | * Queue an interrupt to a CPU virtual interface. Return true on success, |
1418 | * or false if it wasn't possible to queue it. | 1370 | * or false if it wasn't possible to queue it. |
1419 | */ | 1371 | */ |
1420 | static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | 1372 | bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) |
1421 | { | 1373 | { |
1422 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 1374 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
1423 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | 1375 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
@@ -1703,7 +1655,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | |||
1703 | return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); | 1655 | return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu); |
1704 | } | 1656 | } |
1705 | 1657 | ||
1706 | static void vgic_kick_vcpus(struct kvm *kvm) | 1658 | void vgic_kick_vcpus(struct kvm *kvm) |
1707 | { | 1659 | { |
1708 | struct kvm_vcpu *vcpu; | 1660 | struct kvm_vcpu *vcpu; |
1709 | int c; | 1661 | int c; |
@@ -1956,7 +1908,7 @@ static int vgic_v2_init_model(struct kvm *kvm) | |||
1956 | * Allocate and initialize the various data structures. Must be called | 1908 | * Allocate and initialize the various data structures. Must be called |
1957 | * with kvm->lock held! | 1909 | * with kvm->lock held! |
1958 | */ | 1910 | */ |
1959 | static int vgic_init(struct kvm *kvm) | 1911 | int vgic_init(struct kvm *kvm) |
1960 | { | 1912 | { |
1961 | struct vgic_dist *dist = &kvm->arch.vgic; | 1913 | struct vgic_dist *dist = &kvm->arch.vgic; |
1962 | struct kvm_vcpu *vcpu; | 1914 | struct kvm_vcpu *vcpu; |
@@ -2096,7 +2048,7 @@ out: | |||
2096 | return ret; | 2048 | return ret; |
2097 | } | 2049 | } |
2098 | 2050 | ||
2099 | static void vgic_v2_init_emulation(struct kvm *kvm) | 2051 | void vgic_v2_init_emulation(struct kvm *kvm) |
2100 | { | 2052 | { |
2101 | struct vgic_dist *dist = &kvm->arch.vgic; | 2053 | struct vgic_dist *dist = &kvm->arch.vgic; |
2102 | 2054 | ||
@@ -2326,7 +2278,7 @@ static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu, | |||
2326 | * CPU Interface Register accesses - these are not accessed by the VM, but by | 2278 | * CPU Interface Register accesses - these are not accessed by the VM, but by |
2327 | * user space for saving and restoring VGIC state. | 2279 | * user space for saving and restoring VGIC state. |
2328 | */ | 2280 | */ |
2329 | static const struct mmio_range vgic_cpu_ranges[] = { | 2281 | static const struct kvm_mmio_range vgic_cpu_ranges[] = { |
2330 | { | 2282 | { |
2331 | .base = GIC_CPU_CTRL, | 2283 | .base = GIC_CPU_CTRL, |
2332 | .len = 12, | 2284 | .len = 12, |
@@ -2353,7 +2305,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev, | |||
2353 | struct kvm_device_attr *attr, | 2305 | struct kvm_device_attr *attr, |
2354 | u32 *reg, bool is_write) | 2306 | u32 *reg, bool is_write) |
2355 | { | 2307 | { |
2356 | const struct mmio_range *r = NULL, *ranges; | 2308 | const struct kvm_mmio_range *r = NULL, *ranges; |
2357 | phys_addr_t offset; | 2309 | phys_addr_t offset; |
2358 | int ret, cpuid, c; | 2310 | int ret, cpuid, c; |
2359 | struct kvm_vcpu *vcpu, *tmp_vcpu; | 2311 | struct kvm_vcpu *vcpu, *tmp_vcpu; |
@@ -2394,7 +2346,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev, | |||
2394 | default: | 2346 | default: |
2395 | BUG(); | 2347 | BUG(); |
2396 | } | 2348 | } |
2397 | r = find_matching_range(ranges, &mmio, offset); | 2349 | r = vgic_find_range(ranges, &mmio, offset); |
2398 | 2350 | ||
2399 | if (unlikely(!r || !r->handle_mmio)) { | 2351 | if (unlikely(!r || !r->handle_mmio)) { |
2400 | ret = -ENXIO; | 2352 | ret = -ENXIO; |
@@ -2440,8 +2392,7 @@ out: | |||
2440 | return ret; | 2392 | return ret; |
2441 | } | 2393 | } |
2442 | 2394 | ||
2443 | static int vgic_set_common_attr(struct kvm_device *dev, | 2395 | int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr) |
2444 | struct kvm_device_attr *attr) | ||
2445 | { | 2396 | { |
2446 | int r; | 2397 | int r; |
2447 | 2398 | ||
@@ -2525,8 +2476,7 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |||
2525 | return -ENXIO; | 2476 | return -ENXIO; |
2526 | } | 2477 | } |
2527 | 2478 | ||
2528 | static int vgic_get_common_attr(struct kvm_device *dev, | 2479 | int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr) |
2529 | struct kvm_device_attr *attr) | ||
2530 | { | 2480 | { |
2531 | int r = -ENXIO; | 2481 | int r = -ENXIO; |
2532 | 2482 | ||
@@ -2581,13 +2531,12 @@ static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |||
2581 | return -ENXIO; | 2531 | return -ENXIO; |
2582 | } | 2532 | } |
2583 | 2533 | ||
2584 | static int vgic_has_attr_regs(const struct mmio_range *ranges, | 2534 | int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset) |
2585 | phys_addr_t offset) | ||
2586 | { | 2535 | { |
2587 | struct kvm_exit_mmio dev_attr_mmio; | 2536 | struct kvm_exit_mmio dev_attr_mmio; |
2588 | 2537 | ||
2589 | dev_attr_mmio.len = 4; | 2538 | dev_attr_mmio.len = 4; |
2590 | if (find_matching_range(ranges, &dev_attr_mmio, offset)) | 2539 | if (vgic_find_range(ranges, &dev_attr_mmio, offset)) |
2591 | return 0; | 2540 | return 0; |
2592 | else | 2541 | else |
2593 | return -ENXIO; | 2542 | return -ENXIO; |
@@ -2622,12 +2571,12 @@ static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) | |||
2622 | return -ENXIO; | 2571 | return -ENXIO; |
2623 | } | 2572 | } |
2624 | 2573 | ||
2625 | static void vgic_destroy(struct kvm_device *dev) | 2574 | void vgic_destroy(struct kvm_device *dev) |
2626 | { | 2575 | { |
2627 | kfree(dev); | 2576 | kfree(dev); |
2628 | } | 2577 | } |
2629 | 2578 | ||
2630 | static int vgic_create(struct kvm_device *dev, u32 type) | 2579 | int vgic_create(struct kvm_device *dev, u32 type) |
2631 | { | 2580 | { |
2632 | return kvm_vgic_create(dev->kvm, type); | 2581 | return kvm_vgic_create(dev->kvm, type); |
2633 | } | 2582 | } |
diff --git a/virt/kvm/arm/vgic.h b/virt/kvm/arm/vgic.h new file mode 100644 index 000000000000..e363b9341873 --- /dev/null +++ b/virt/kvm/arm/vgic.h | |||
@@ -0,0 +1,120 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012-2014 ARM Ltd. | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * Derived from virt/kvm/arm/vgic.c | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #ifndef __KVM_VGIC_H__ | ||
21 | #define __KVM_VGIC_H__ | ||
22 | |||
23 | #define VGIC_ADDR_UNDEF (-1) | ||
24 | #define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF) | ||
25 | |||
26 | #define PRODUCT_ID_KVM 0x4b /* ASCII code K */ | ||
27 | #define IMPLEMENTER_ARM 0x43b | ||
28 | |||
29 | #define ACCESS_READ_VALUE (1 << 0) | ||
30 | #define ACCESS_READ_RAZ (0 << 0) | ||
31 | #define ACCESS_READ_MASK(x) ((x) & (1 << 0)) | ||
32 | #define ACCESS_WRITE_IGNORED (0 << 1) | ||
33 | #define ACCESS_WRITE_SETBIT (1 << 1) | ||
34 | #define ACCESS_WRITE_CLEARBIT (2 << 1) | ||
35 | #define ACCESS_WRITE_VALUE (3 << 1) | ||
36 | #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) | ||
37 | |||
38 | unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x); | ||
39 | |||
40 | void vgic_update_state(struct kvm *kvm); | ||
41 | int vgic_init_common_maps(struct kvm *kvm); | ||
42 | |||
43 | u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, int cpuid, u32 offset); | ||
44 | u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset); | ||
45 | |||
46 | void vgic_dist_irq_set_pending(struct kvm_vcpu *vcpu, int irq); | ||
47 | void vgic_dist_irq_clear_pending(struct kvm_vcpu *vcpu, int irq); | ||
48 | void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq); | ||
49 | void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, | ||
50 | int irq, int val); | ||
51 | |||
52 | void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); | ||
53 | void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr); | ||
54 | |||
55 | bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq); | ||
56 | void vgic_unqueue_irqs(struct kvm_vcpu *vcpu); | ||
57 | |||
58 | void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, | ||
59 | phys_addr_t offset, int mode); | ||
60 | bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, | ||
61 | phys_addr_t offset); | ||
62 | |||
63 | static inline | ||
64 | u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask) | ||
65 | { | ||
66 | return le32_to_cpu(*((u32 *)mmio->data)) & mask; | ||
67 | } | ||
68 | |||
69 | static inline | ||
70 | void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value) | ||
71 | { | ||
72 | *((u32 *)mmio->data) = cpu_to_le32(value) & mask; | ||
73 | } | ||
74 | |||
75 | struct kvm_mmio_range { | ||
76 | phys_addr_t base; | ||
77 | unsigned long len; | ||
78 | int bits_per_irq; | ||
79 | bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, | ||
80 | phys_addr_t offset); | ||
81 | }; | ||
82 | |||
83 | static inline bool is_in_range(phys_addr_t addr, unsigned long len, | ||
84 | phys_addr_t baseaddr, unsigned long size) | ||
85 | { | ||
86 | return (addr >= baseaddr) && (addr + len <= baseaddr + size); | ||
87 | } | ||
88 | |||
89 | const | ||
90 | struct kvm_mmio_range *vgic_find_range(const struct kvm_mmio_range *ranges, | ||
91 | struct kvm_exit_mmio *mmio, | ||
92 | phys_addr_t offset); | ||
93 | |||
94 | bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run, | ||
95 | struct kvm_exit_mmio *mmio, | ||
96 | const struct kvm_mmio_range *ranges, | ||
97 | unsigned long mmio_base); | ||
98 | |||
99 | bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, | ||
100 | phys_addr_t offset, int vcpu_id, int access); | ||
101 | |||
102 | bool vgic_handle_set_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, | ||
103 | phys_addr_t offset, int vcpu_id); | ||
104 | |||
105 | bool vgic_handle_clear_pending_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, | ||
106 | phys_addr_t offset, int vcpu_id); | ||
107 | |||
108 | bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio, | ||
109 | phys_addr_t offset); | ||
110 | |||
111 | void vgic_kick_vcpus(struct kvm *kvm); | ||
112 | |||
113 | int vgic_has_attr_regs(const struct kvm_mmio_range *ranges, phys_addr_t offset); | ||
114 | int vgic_set_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr); | ||
115 | int vgic_get_common_attr(struct kvm_device *dev, struct kvm_device_attr *attr); | ||
116 | |||
117 | int vgic_init(struct kvm *kvm); | ||
118 | void vgic_v2_init_emulation(struct kvm *kvm); | ||
119 | |||
120 | #endif | ||