summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c661
1 files changed, 660 insertions, 1 deletions
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 6f43b3b1172b..1408c88d063e 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -33,6 +33,67 @@
33#include "vgic.h" 33#include "vgic.h"
34#include "vgic-mmio.h" 34#include "vgic-mmio.h"
35 35
36/*
37 * Creates a new (reference to a) struct vgic_irq for a given LPI.
38 * If this LPI is already mapped on another ITS, we increase its refcount
39 * and return a pointer to the existing structure.
40 * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
41 * This function returns a pointer to the _unlocked_ structure.
42 */
43static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid)
44{
45 struct vgic_dist *dist = &kvm->arch.vgic;
46 struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
47
48 /* In this case there is no put, since we keep the reference. */
49 if (irq)
50 return irq;
51
52 irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
53 if (!irq)
54 return NULL;
55
56 INIT_LIST_HEAD(&irq->lpi_list);
57 INIT_LIST_HEAD(&irq->ap_list);
58 spin_lock_init(&irq->irq_lock);
59
60 irq->config = VGIC_CONFIG_EDGE;
61 kref_init(&irq->refcount);
62 irq->intid = intid;
63
64 spin_lock(&dist->lpi_list_lock);
65
66 /*
67 * There could be a race with another vgic_add_lpi(), so we need to
68 * check that we don't add a second list entry with the same LPI.
69 */
70 list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
71 if (oldirq->intid != intid)
72 continue;
73
74 /* Someone was faster with adding this LPI, lets use that. */
75 kfree(irq);
76 irq = oldirq;
77
78 /*
79 * This increases the refcount, the caller is expected to
80 * call vgic_put_irq() on the returned pointer once it's
81 * finished with the IRQ.
82 */
83 kref_get(&irq->refcount);
84
85 goto out_unlock;
86 }
87
88 list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
89 dist->lpi_list_count++;
90
91out_unlock:
92 spin_unlock(&dist->lpi_list_lock);
93
94 return irq;
95}
96
36struct its_device { 97struct its_device {
37 struct list_head dev_list; 98 struct list_head dev_list;
38 99
@@ -63,15 +124,74 @@ struct its_itte {
63}; 124};
64 125
65/* 126/*
127 * Find and returns a device in the device table for an ITS.
128 * Must be called with the its_lock mutex held.
129 */
130static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
131{
132 struct its_device *device;
133
134 list_for_each_entry(device, &its->device_list, dev_list)
135 if (device_id == device->device_id)
136 return device;
137
138 return NULL;
139}
140
141/*
142 * Find and returns an interrupt translation table entry (ITTE) for a given
143 * Device ID/Event ID pair on an ITS.
144 * Must be called with the its_lock mutex held.
145 */
146static struct its_itte *find_itte(struct vgic_its *its, u32 device_id,
147 u32 event_id)
148{
149 struct its_device *device;
150 struct its_itte *itte;
151
152 device = find_its_device(its, device_id);
153 if (device == NULL)
154 return NULL;
155
156 list_for_each_entry(itte, &device->itt_head, itte_list)
157 if (itte->event_id == event_id)
158 return itte;
159
160 return NULL;
161}
162
163/* To be used as an iterator this macro misses the enclosing parentheses */
164#define for_each_lpi_its(dev, itte, its) \
165 list_for_each_entry(dev, &(its)->device_list, dev_list) \
166 list_for_each_entry(itte, &(dev)->itt_head, itte_list)
167
168/*
66 * We only implement 48 bits of PA at the moment, although the ITS 169 * We only implement 48 bits of PA at the moment, although the ITS
67 * supports more. Let's be restrictive here. 170 * supports more. Let's be restrictive here.
68 */ 171 */
172#define BASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
69#define CBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12)) 173#define CBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
70#define PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16)) 174#define PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
71#define PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12)) 175#define PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
72 176
73#define GIC_LPI_OFFSET 8192 177#define GIC_LPI_OFFSET 8192
74 178
179/*
180 * Finds and returns a collection in the ITS collection table.
181 * Must be called with the its_lock mutex held.
182 */
183static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
184{
185 struct its_collection *collection;
186
187 list_for_each_entry(collection, &its->collection_list, coll_list) {
188 if (coll_id == collection->collection_id)
189 return collection;
190 }
191
192 return NULL;
193}
194
75#define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED) 195#define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
76#define LPI_PROP_PRIORITY(p) ((p) & 0xfc) 196#define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
77 197
@@ -145,6 +265,51 @@ static int vgic_copy_lpi_list(struct kvm *kvm, u32 **intid_ptr)
145} 265}
146 266
147/* 267/*
268 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
269 * is targeting) to the VGIC's view, which deals with target VCPUs.
270 * Needs to be called whenever either the collection for a LPIs has
271 * changed or the collection itself got retargeted.
272 */
273static void update_affinity_itte(struct kvm *kvm, struct its_itte *itte)
274{
275 struct kvm_vcpu *vcpu;
276
277 if (!its_is_collection_mapped(itte->collection))
278 return;
279
280 vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr);
281
282 spin_lock(&itte->irq->irq_lock);
283 itte->irq->target_vcpu = vcpu;
284 spin_unlock(&itte->irq->irq_lock);
285}
286
287/*
288 * Updates the target VCPU for every LPI targeting this collection.
289 * Must be called with the its_lock mutex held.
290 */
291static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
292 struct its_collection *coll)
293{
294 struct its_device *device;
295 struct its_itte *itte;
296
297 for_each_lpi_its(device, itte, its) {
298 if (!itte->collection || coll != itte->collection)
299 continue;
300
301 update_affinity_itte(kvm, itte);
302 }
303}
304
305static u32 max_lpis_propbaser(u64 propbaser)
306{
307 int nr_idbits = (propbaser & 0x1f) + 1;
308
309 return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
310}
311
312/*
148 * Scan the whole LPI pending table and sync the pending bit in there 313 * Scan the whole LPI pending table and sync the pending bit in there
149 * with our own data structures. This relies on the LPI being 314 * with our own data structures. This relies on the LPI being
150 * mapped before. 315 * mapped before.
@@ -283,10 +448,504 @@ static void its_free_itte(struct kvm *kvm, struct its_itte *itte)
283 kfree(itte); 448 kfree(itte);
284} 449}
285 450
451static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
452{
453 return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
454}
455
456#define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
457#define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
458#define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
459#define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
460#define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
461#define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
462#define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
463
464/*
465 * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
466 * Must be called with the its_lock mutex held.
467 */
468static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
469 u64 *its_cmd)
470{
471 u32 device_id = its_cmd_get_deviceid(its_cmd);
472 u32 event_id = its_cmd_get_id(its_cmd);
473 struct its_itte *itte;
474
475
476 itte = find_itte(its, device_id, event_id);
477 if (itte && itte->collection) {
478 /*
479 * Though the spec talks about removing the pending state, we
480 * don't bother here since we clear the ITTE anyway and the
481 * pending state is a property of the ITTE struct.
482 */
483 its_free_itte(kvm, itte);
484 return 0;
485 }
486
487 return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
488}
489
490/*
491 * The MOVI command moves an ITTE to a different collection.
492 * Must be called with the its_lock mutex held.
493 */
494static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
495 u64 *its_cmd)
496{
497 u32 device_id = its_cmd_get_deviceid(its_cmd);
498 u32 event_id = its_cmd_get_id(its_cmd);
499 u32 coll_id = its_cmd_get_collection(its_cmd);
500 struct kvm_vcpu *vcpu;
501 struct its_itte *itte;
502 struct its_collection *collection;
503
504 itte = find_itte(its, device_id, event_id);
505 if (!itte)
506 return E_ITS_MOVI_UNMAPPED_INTERRUPT;
507
508 if (!its_is_collection_mapped(itte->collection))
509 return E_ITS_MOVI_UNMAPPED_COLLECTION;
510
511 collection = find_collection(its, coll_id);
512 if (!its_is_collection_mapped(collection))
513 return E_ITS_MOVI_UNMAPPED_COLLECTION;
514
515 itte->collection = collection;
516 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
517
518 spin_lock(&itte->irq->irq_lock);
519 itte->irq->target_vcpu = vcpu;
520 spin_unlock(&itte->irq->irq_lock);
521
522 return 0;
523}
524
525static void vgic_its_init_collection(struct vgic_its *its,
526 struct its_collection *collection,
527 u32 coll_id)
528{
529 collection->collection_id = coll_id;
530 collection->target_addr = COLLECTION_NOT_MAPPED;
531
532 list_add_tail(&collection->coll_list, &its->collection_list);
533}
534
535/*
536 * The MAPTI and MAPI commands map LPIs to ITTEs.
537 * Must be called with its_lock mutex held.
538 */
539static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
540 u64 *its_cmd, u8 subcmd)
541{
542 u32 device_id = its_cmd_get_deviceid(its_cmd);
543 u32 event_id = its_cmd_get_id(its_cmd);
544 u32 coll_id = its_cmd_get_collection(its_cmd);
545 struct its_itte *itte;
546 struct its_device *device;
547 struct its_collection *collection, *new_coll = NULL;
548 int lpi_nr;
549
550 device = find_its_device(its, device_id);
551 if (!device)
552 return E_ITS_MAPTI_UNMAPPED_DEVICE;
553
554 collection = find_collection(its, coll_id);
555 if (!collection) {
556 new_coll = kzalloc(sizeof(struct its_collection), GFP_KERNEL);
557 if (!new_coll)
558 return -ENOMEM;
559 }
560
561 if (subcmd == GITS_CMD_MAPTI)
562 lpi_nr = its_cmd_get_physical_id(its_cmd);
563 else
564 lpi_nr = event_id;
565 if (lpi_nr < GIC_LPI_OFFSET ||
566 lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser)) {
567 kfree(new_coll);
568 return E_ITS_MAPTI_PHYSICALID_OOR;
569 }
570
571 itte = find_itte(its, device_id, event_id);
572 if (!itte) {
573 itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
574 if (!itte) {
575 kfree(new_coll);
576 return -ENOMEM;
577 }
578
579 itte->event_id = event_id;
580 list_add_tail(&itte->itte_list, &device->itt_head);
581 }
582
583 if (!collection) {
584 collection = new_coll;
585 vgic_its_init_collection(its, collection, coll_id);
586 }
587
588 itte->collection = collection;
589 itte->lpi = lpi_nr;
590 itte->irq = vgic_add_lpi(kvm, lpi_nr);
591 update_affinity_itte(kvm, itte);
592
593 /*
594 * We "cache" the configuration table entries in out struct vgic_irq's.
595 * However we only have those structs for mapped IRQs, so we read in
596 * the respective config data from memory here upon mapping the LPI.
597 */
598 update_lpi_config(kvm, itte->irq, NULL);
599
600 return 0;
601}
602
603/* Requires the its_lock to be held. */
604static void vgic_its_unmap_device(struct kvm *kvm, struct its_device *device)
605{
606 struct its_itte *itte, *temp;
607
608 /*
609 * The spec says that unmapping a device with still valid
610 * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
611 * since we cannot leave the memory unreferenced.
612 */
613 list_for_each_entry_safe(itte, temp, &device->itt_head, itte_list)
614 its_free_itte(kvm, itte);
615
616 list_del(&device->dev_list);
617 kfree(device);
618}
619
620/*
621 * Check whether a device ID can be stored into the guest device tables.
622 * For a direct table this is pretty easy, but gets a bit nasty for
623 * indirect tables. We check whether the resulting guest physical address
624 * is actually valid (covered by a memslot and guest accessbible).
625 * For this we have to read the respective first level entry.
626 */
627static bool vgic_its_check_device_id(struct kvm *kvm, struct vgic_its *its,
628 int device_id)
629{
630 u64 r = its->baser_device_table;
631 int nr_entries = GITS_BASER_NR_PAGES(r) * SZ_64K;
632 int index;
633 u64 indirect_ptr;
634 gfn_t gfn;
635
636
637 if (!(r & GITS_BASER_INDIRECT))
638 return device_id < (nr_entries / GITS_BASER_ENTRY_SIZE(r));
639
640 /* calculate and check the index into the 1st level */
641 index = device_id / (SZ_64K / GITS_BASER_ENTRY_SIZE(r));
642 if (index >= (nr_entries / sizeof(u64)))
643 return false;
644
645 /* Each 1st level entry is represented by a 64-bit value. */
646 if (!kvm_read_guest(kvm,
647 BASER_ADDRESS(r) + index * sizeof(indirect_ptr),
648 &indirect_ptr, sizeof(indirect_ptr)))
649 return false;
650
651 /* check the valid bit of the first level entry */
652 if (!(indirect_ptr & BIT_ULL(63)))
653 return false;
654
655 /*
656 * Mask the guest physical address and calculate the frame number.
657 * Any address beyond our supported 48 bits of PA will be caught
658 * by the actual check in the final step.
659 */
660 gfn = (indirect_ptr & GENMASK_ULL(51, 16)) >> PAGE_SHIFT;
661
662 return kvm_is_visible_gfn(kvm, gfn);
663}
664
665/*
666 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
667 * Must be called with the its_lock mutex held.
668 */
669static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
670 u64 *its_cmd)
671{
672 u32 device_id = its_cmd_get_deviceid(its_cmd);
673 bool valid = its_cmd_get_validbit(its_cmd);
674 struct its_device *device;
675
676 if (!vgic_its_check_device_id(kvm, its, device_id))
677 return E_ITS_MAPD_DEVICE_OOR;
678
679 device = find_its_device(its, device_id);
680
681 /*
682 * The spec says that calling MAPD on an already mapped device
683 * invalidates all cached data for this device. We implement this
684 * by removing the mapping and re-establishing it.
685 */
686 if (device)
687 vgic_its_unmap_device(kvm, device);
688
689 /*
690 * The spec does not say whether unmapping a not-mapped device
691 * is an error, so we are done in any case.
692 */
693 if (!valid)
694 return 0;
695
696 device = kzalloc(sizeof(struct its_device), GFP_KERNEL);
697 if (!device)
698 return -ENOMEM;
699
700 device->device_id = device_id;
701 INIT_LIST_HEAD(&device->itt_head);
702
703 list_add_tail(&device->dev_list, &its->device_list);
704
705 return 0;
706}
707
708static int vgic_its_nr_collection_ids(struct vgic_its *its)
709{
710 u64 r = its->baser_coll_table;
711
712 return (GITS_BASER_NR_PAGES(r) * SZ_64K) / GITS_BASER_ENTRY_SIZE(r);
713}
714
715/*
716 * The MAPC command maps collection IDs to redistributors.
717 * Must be called with the its_lock mutex held.
718 */
719static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
720 u64 *its_cmd)
721{
722 u16 coll_id;
723 u32 target_addr;
724 struct its_collection *collection;
725 bool valid;
726
727 valid = its_cmd_get_validbit(its_cmd);
728 coll_id = its_cmd_get_collection(its_cmd);
729 target_addr = its_cmd_get_target_addr(its_cmd);
730
731 if (target_addr >= atomic_read(&kvm->online_vcpus))
732 return E_ITS_MAPC_PROCNUM_OOR;
733
734 if (coll_id >= vgic_its_nr_collection_ids(its))
735 return E_ITS_MAPC_COLLECTION_OOR;
736
737 collection = find_collection(its, coll_id);
738
739 if (!valid) {
740 struct its_device *device;
741 struct its_itte *itte;
742 /*
743 * Clearing the mapping for that collection ID removes the
744 * entry from the list. If there wasn't any before, we can
745 * go home early.
746 */
747 if (!collection)
748 return 0;
749
750 for_each_lpi_its(device, itte, its)
751 if (itte->collection &&
752 itte->collection->collection_id == coll_id)
753 itte->collection = NULL;
754
755 list_del(&collection->coll_list);
756 kfree(collection);
757 } else {
758 if (!collection) {
759 collection = kzalloc(sizeof(struct its_collection),
760 GFP_KERNEL);
761 if (!collection)
762 return -ENOMEM;
763
764 vgic_its_init_collection(its, collection, coll_id);
765 collection->target_addr = target_addr;
766 } else {
767 collection->target_addr = target_addr;
768 update_affinity_collection(kvm, its, collection);
769 }
770 }
771
772 return 0;
773}
774
775/*
776 * The CLEAR command removes the pending state for a particular LPI.
777 * Must be called with the its_lock mutex held.
778 */
779static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
780 u64 *its_cmd)
781{
782 u32 device_id = its_cmd_get_deviceid(its_cmd);
783 u32 event_id = its_cmd_get_id(its_cmd);
784 struct its_itte *itte;
785
786
787 itte = find_itte(its, device_id, event_id);
788 if (!itte)
789 return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
790
791 itte->irq->pending = false;
792
793 return 0;
794}
795
796/*
797 * The INV command syncs the configuration bits from the memory table.
798 * Must be called with the its_lock mutex held.
799 */
800static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
801 u64 *its_cmd)
802{
803 u32 device_id = its_cmd_get_deviceid(its_cmd);
804 u32 event_id = its_cmd_get_id(its_cmd);
805 struct its_itte *itte;
806
807
808 itte = find_itte(its, device_id, event_id);
809 if (!itte)
810 return E_ITS_INV_UNMAPPED_INTERRUPT;
811
812 return update_lpi_config(kvm, itte->irq, NULL);
813}
814
815/*
816 * The INVALL command requests flushing of all IRQ data in this collection.
817 * Find the VCPU mapped to that collection, then iterate over the VM's list
818 * of mapped LPIs and update the configuration for each IRQ which targets
819 * the specified vcpu. The configuration will be read from the in-memory
820 * configuration table.
821 * Must be called with the its_lock mutex held.
822 */
823static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
824 u64 *its_cmd)
825{
826 u32 coll_id = its_cmd_get_collection(its_cmd);
827 struct its_collection *collection;
828 struct kvm_vcpu *vcpu;
829 struct vgic_irq *irq;
830 u32 *intids;
831 int irq_count, i;
832
833 collection = find_collection(its, coll_id);
834 if (!its_is_collection_mapped(collection))
835 return E_ITS_INVALL_UNMAPPED_COLLECTION;
836
837 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
838
839 irq_count = vgic_copy_lpi_list(kvm, &intids);
840 if (irq_count < 0)
841 return irq_count;
842
843 for (i = 0; i < irq_count; i++) {
844 irq = vgic_get_irq(kvm, NULL, intids[i]);
845 if (!irq)
846 continue;
847 update_lpi_config(kvm, irq, vcpu);
848 vgic_put_irq(kvm, irq);
849 }
850
851 kfree(intids);
852
853 return 0;
854}
855
856/*
857 * The MOVALL command moves the pending state of all IRQs targeting one
858 * redistributor to another. We don't hold the pending state in the VCPUs,
859 * but in the IRQs instead, so there is really not much to do for us here.
860 * However the spec says that no IRQ must target the old redistributor
861 * afterwards, so we make sure that no LPI is using the associated target_vcpu.
862 * This command affects all LPIs in the system that target that redistributor.
863 */
864static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
865 u64 *its_cmd)
866{
867 struct vgic_dist *dist = &kvm->arch.vgic;
868 u32 target1_addr = its_cmd_get_target_addr(its_cmd);
869 u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
870 struct kvm_vcpu *vcpu1, *vcpu2;
871 struct vgic_irq *irq;
872
873 if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
874 target2_addr >= atomic_read(&kvm->online_vcpus))
875 return E_ITS_MOVALL_PROCNUM_OOR;
876
877 if (target1_addr == target2_addr)
878 return 0;
879
880 vcpu1 = kvm_get_vcpu(kvm, target1_addr);
881 vcpu2 = kvm_get_vcpu(kvm, target2_addr);
882
883 spin_lock(&dist->lpi_list_lock);
884
885 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
886 spin_lock(&irq->irq_lock);
887
888 if (irq->target_vcpu == vcpu1)
889 irq->target_vcpu = vcpu2;
890
891 spin_unlock(&irq->irq_lock);
892 }
893
894 spin_unlock(&dist->lpi_list_lock);
895
896 return 0;
897}
898
899/*
900 * This function is called with the its_cmd lock held, but the ITS data
901 * structure lock dropped.
902 */
286static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its, 903static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
287 u64 *its_cmd) 904 u64 *its_cmd)
288{ 905{
289 return -ENODEV; 906 u8 cmd = its_cmd_get_command(its_cmd);
907 int ret = -ENODEV;
908
909 mutex_lock(&its->its_lock);
910 switch (cmd) {
911 case GITS_CMD_MAPD:
912 ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
913 break;
914 case GITS_CMD_MAPC:
915 ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
916 break;
917 case GITS_CMD_MAPI:
918 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd, cmd);
919 break;
920 case GITS_CMD_MAPTI:
921 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd, cmd);
922 break;
923 case GITS_CMD_MOVI:
924 ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
925 break;
926 case GITS_CMD_DISCARD:
927 ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
928 break;
929 case GITS_CMD_CLEAR:
930 ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
931 break;
932 case GITS_CMD_MOVALL:
933 ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
934 break;
935 case GITS_CMD_INV:
936 ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
937 break;
938 case GITS_CMD_INVALL:
939 ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
940 break;
941 case GITS_CMD_SYNC:
942 /* we ignore this command: we are in sync all of the time */
943 ret = 0;
944 break;
945 }
946 mutex_unlock(&its->its_lock);
947
948 return ret;
290} 949}
291 950
292static u64 vgic_sanitise_its_baser(u64 reg) 951static u64 vgic_sanitise_its_baser(u64 reg)