aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2014-07-08 07:09:01 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2014-09-18 21:48:52 -0400
commitc1bfb577addd4867a82c4f235824a315d5afb94a (patch)
tree4e2fba53b594691839d2c511862b01c83783e1c2 /virt/kvm
parent71afaba4a2e98bb7bdeba5078370ab43d46e67a1 (diff)
arm/arm64: KVM: vgic: switch to dynamic allocation
So far, all the VGIC data structures are statically defined by the *maximum* number of vcpus and interrupts it supports. It means that we always have to oversize it to cater for the worse case. Start by changing the data structures to be dynamically sizeable, and allocate them at runtime. The sizes are still very static though. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/arm/vgic.c243
1 files changed, 208 insertions, 35 deletions
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 9bdf181a00e2..08db8764496a 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -95,6 +95,7 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
95static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); 95static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
96static void vgic_update_state(struct kvm *kvm); 96static void vgic_update_state(struct kvm *kvm);
97static void vgic_kick_vcpus(struct kvm *kvm); 97static void vgic_kick_vcpus(struct kvm *kvm);
98static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi);
98static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg); 99static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
99static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr); 100static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
100static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc); 101static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
@@ -105,10 +106,8 @@ static const struct vgic_ops *vgic_ops;
105static const struct vgic_params *vgic; 106static const struct vgic_params *vgic;
106 107
107/* 108/*
108 * struct vgic_bitmap contains unions that provide two views of 109 * struct vgic_bitmap contains a bitmap made of unsigned longs, but
109 * the same data. In one case it is an array of registers of 110 * extracts u32s out of them.
110 * u32's, and in the other case it is a bitmap of unsigned
111 * longs.
112 * 111 *
113 * This does not work on 64-bit BE systems, because the bitmap access 112 * This does not work on 64-bit BE systems, because the bitmap access
114 * will store two consecutive 32-bit words with the higher-addressed 113 * will store two consecutive 32-bit words with the higher-addressed
@@ -124,23 +123,45 @@ static const struct vgic_params *vgic;
124#define REG_OFFSET_SWIZZLE 0 123#define REG_OFFSET_SWIZZLE 0
125#endif 124#endif
126 125
126static int vgic_init_bitmap(struct vgic_bitmap *b, int nr_cpus, int nr_irqs)
127{
128 int nr_longs;
129
130 nr_longs = nr_cpus + BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
131
132 b->private = kzalloc(sizeof(unsigned long) * nr_longs, GFP_KERNEL);
133 if (!b->private)
134 return -ENOMEM;
135
136 b->shared = b->private + nr_cpus;
137
138 return 0;
139}
140
141static void vgic_free_bitmap(struct vgic_bitmap *b)
142{
143 kfree(b->private);
144 b->private = NULL;
145 b->shared = NULL;
146}
147
127static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x, 148static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
128 int cpuid, u32 offset) 149 int cpuid, u32 offset)
129{ 150{
130 offset >>= 2; 151 offset >>= 2;
131 if (!offset) 152 if (!offset)
132 return x->percpu[cpuid].reg + (offset ^ REG_OFFSET_SWIZZLE); 153 return (u32 *)(x->private + cpuid) + REG_OFFSET_SWIZZLE;
133 else 154 else
134 return x->shared.reg + ((offset - 1) ^ REG_OFFSET_SWIZZLE); 155 return (u32 *)(x->shared) + ((offset - 1) ^ REG_OFFSET_SWIZZLE);
135} 156}
136 157
137static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x, 158static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
138 int cpuid, int irq) 159 int cpuid, int irq)
139{ 160{
140 if (irq < VGIC_NR_PRIVATE_IRQS) 161 if (irq < VGIC_NR_PRIVATE_IRQS)
141 return test_bit(irq, x->percpu[cpuid].reg_ul); 162 return test_bit(irq, x->private + cpuid);
142 163
143 return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared.reg_ul); 164 return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared);
144} 165}
145 166
146static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid, 167static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
@@ -149,9 +170,9 @@ static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
149 unsigned long *reg; 170 unsigned long *reg;
150 171
151 if (irq < VGIC_NR_PRIVATE_IRQS) { 172 if (irq < VGIC_NR_PRIVATE_IRQS) {
152 reg = x->percpu[cpuid].reg_ul; 173 reg = x->private + cpuid;
153 } else { 174 } else {
154 reg = x->shared.reg_ul; 175 reg = x->shared;
155 irq -= VGIC_NR_PRIVATE_IRQS; 176 irq -= VGIC_NR_PRIVATE_IRQS;
156 } 177 }
157 178
@@ -163,24 +184,49 @@ static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
163 184
164static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid) 185static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
165{ 186{
166 if (unlikely(cpuid >= VGIC_MAX_CPUS)) 187 return x->private + cpuid;
167 return NULL;
168 return x->percpu[cpuid].reg_ul;
169} 188}
170 189
171static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x) 190static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
172{ 191{
173 return x->shared.reg_ul; 192 return x->shared;
193}
194
195static int vgic_init_bytemap(struct vgic_bytemap *x, int nr_cpus, int nr_irqs)
196{
197 int size;
198
199 size = nr_cpus * VGIC_NR_PRIVATE_IRQS;
200 size += nr_irqs - VGIC_NR_PRIVATE_IRQS;
201
202 x->private = kzalloc(size, GFP_KERNEL);
203 if (!x->private)
204 return -ENOMEM;
205
206 x->shared = x->private + nr_cpus * VGIC_NR_PRIVATE_IRQS / sizeof(u32);
207 return 0;
208}
209
210static void vgic_free_bytemap(struct vgic_bytemap *b)
211{
212 kfree(b->private);
213 b->private = NULL;
214 b->shared = NULL;
174} 215}
175 216
176static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset) 217static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
177{ 218{
178 offset >>= 2; 219 u32 *reg;
179 BUG_ON(offset > (VGIC_NR_IRQS / 4)); 220
180 if (offset < 8) 221 if (offset < VGIC_NR_PRIVATE_IRQS) {
181 return x->percpu[cpuid] + offset; 222 reg = x->private;
182 else 223 offset += cpuid * VGIC_NR_PRIVATE_IRQS;
183 return x->shared + offset - 8; 224 } else {
225 reg = x->shared;
226 offset -= VGIC_NR_PRIVATE_IRQS;
227 }
228
229 return reg + (offset / sizeof(u32));
184} 230}
185 231
186#define VGIC_CFG_LEVEL 0 232#define VGIC_CFG_LEVEL 0
@@ -744,7 +790,7 @@ static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
744 */ 790 */
745 vgic_dist_irq_set_pending(vcpu, lr.irq); 791 vgic_dist_irq_set_pending(vcpu, lr.irq);
746 if (lr.irq < VGIC_NR_SGIS) 792 if (lr.irq < VGIC_NR_SGIS)
747 dist->irq_sgi_sources[vcpu_id][lr.irq] |= 1 << lr.source; 793 *vgic_get_sgi_sources(dist, vcpu_id, lr.irq) |= 1 << lr.source;
748 lr.state &= ~LR_STATE_PENDING; 794 lr.state &= ~LR_STATE_PENDING;
749 vgic_set_lr(vcpu, i, lr); 795 vgic_set_lr(vcpu, i, lr);
750 796
@@ -778,7 +824,7 @@ static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
778 /* Copy source SGIs from distributor side */ 824 /* Copy source SGIs from distributor side */
779 for (sgi = min_sgi; sgi <= max_sgi; sgi++) { 825 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
780 int shift = 8 * (sgi - min_sgi); 826 int shift = 8 * (sgi - min_sgi);
781 reg |= (u32)dist->irq_sgi_sources[vcpu_id][sgi] << shift; 827 reg |= ((u32)*vgic_get_sgi_sources(dist, vcpu_id, sgi)) << shift;
782 } 828 }
783 829
784 mmio_data_write(mmio, ~0, reg); 830 mmio_data_write(mmio, ~0, reg);
@@ -802,14 +848,15 @@ static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
802 /* Clear pending SGIs on the distributor */ 848 /* Clear pending SGIs on the distributor */
803 for (sgi = min_sgi; sgi <= max_sgi; sgi++) { 849 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
804 u8 mask = reg >> (8 * (sgi - min_sgi)); 850 u8 mask = reg >> (8 * (sgi - min_sgi));
851 u8 *src = vgic_get_sgi_sources(dist, vcpu_id, sgi);
805 if (set) { 852 if (set) {
806 if ((dist->irq_sgi_sources[vcpu_id][sgi] & mask) != mask) 853 if ((*src & mask) != mask)
807 updated = true; 854 updated = true;
808 dist->irq_sgi_sources[vcpu_id][sgi] |= mask; 855 *src |= mask;
809 } else { 856 } else {
810 if (dist->irq_sgi_sources[vcpu_id][sgi] & mask) 857 if (*src & mask)
811 updated = true; 858 updated = true;
812 dist->irq_sgi_sources[vcpu_id][sgi] &= ~mask; 859 *src &= ~mask;
813 } 860 }
814 } 861 }
815 862
@@ -993,6 +1040,11 @@ bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
993 return true; 1040 return true;
994} 1041}
995 1042
1043static u8 *vgic_get_sgi_sources(struct vgic_dist *dist, int vcpu_id, int sgi)
1044{
1045 return dist->irq_sgi_sources + vcpu_id * VGIC_NR_SGIS + sgi;
1046}
1047
996static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) 1048static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
997{ 1049{
998 struct kvm *kvm = vcpu->kvm; 1050 struct kvm *kvm = vcpu->kvm;
@@ -1026,7 +1078,7 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
1026 if (target_cpus & 1) { 1078 if (target_cpus & 1) {
1027 /* Flag the SGI as pending */ 1079 /* Flag the SGI as pending */
1028 vgic_dist_irq_set_pending(vcpu, sgi); 1080 vgic_dist_irq_set_pending(vcpu, sgi);
1029 dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id; 1081 *vgic_get_sgi_sources(dist, c, sgi) |= 1 << vcpu_id;
1030 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c); 1082 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
1031 } 1083 }
1032 1084
@@ -1073,14 +1125,14 @@ static void vgic_update_state(struct kvm *kvm)
1073 int c; 1125 int c;
1074 1126
1075 if (!dist->enabled) { 1127 if (!dist->enabled) {
1076 set_bit(0, &dist->irq_pending_on_cpu); 1128 set_bit(0, dist->irq_pending_on_cpu);
1077 return; 1129 return;
1078 } 1130 }
1079 1131
1080 kvm_for_each_vcpu(c, vcpu, kvm) { 1132 kvm_for_each_vcpu(c, vcpu, kvm) {
1081 if (compute_pending_for_cpu(vcpu)) { 1133 if (compute_pending_for_cpu(vcpu)) {
1082 pr_debug("CPU%d has pending interrupts\n", c); 1134 pr_debug("CPU%d has pending interrupts\n", c);
1083 set_bit(c, &dist->irq_pending_on_cpu); 1135 set_bit(c, dist->irq_pending_on_cpu);
1084 } 1136 }
1085 } 1137 }
1086} 1138}
@@ -1237,14 +1289,14 @@ static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
1237 int vcpu_id = vcpu->vcpu_id; 1289 int vcpu_id = vcpu->vcpu_id;
1238 int c; 1290 int c;
1239 1291
1240 sources = dist->irq_sgi_sources[vcpu_id][irq]; 1292 sources = *vgic_get_sgi_sources(dist, vcpu_id, irq);
1241 1293
1242 for_each_set_bit(c, &sources, VGIC_MAX_CPUS) { 1294 for_each_set_bit(c, &sources, VGIC_MAX_CPUS) {
1243 if (vgic_queue_irq(vcpu, c, irq)) 1295 if (vgic_queue_irq(vcpu, c, irq))
1244 clear_bit(c, &sources); 1296 clear_bit(c, &sources);
1245 } 1297 }
1246 1298
1247 dist->irq_sgi_sources[vcpu_id][irq] = sources; 1299 *vgic_get_sgi_sources(dist, vcpu_id, irq) = sources;
1248 1300
1249 /* 1301 /*
1250 * If the sources bitmap has been cleared it means that we 1302 * If the sources bitmap has been cleared it means that we
@@ -1332,7 +1384,7 @@ epilog:
1332 * us. Claim we don't have anything pending. We'll 1384 * us. Claim we don't have anything pending. We'll
1333 * adjust that if needed while exiting. 1385 * adjust that if needed while exiting.
1334 */ 1386 */
1335 clear_bit(vcpu_id, &dist->irq_pending_on_cpu); 1387 clear_bit(vcpu_id, dist->irq_pending_on_cpu);
1336 } 1388 }
1337} 1389}
1338 1390
@@ -1430,7 +1482,7 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1430 /* Check if we still have something up our sleeve... */ 1482 /* Check if we still have something up our sleeve... */
1431 pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr); 1483 pending = find_first_zero_bit(elrsr_ptr, vgic->nr_lr);
1432 if (level_pending || pending < vgic->nr_lr) 1484 if (level_pending || pending < vgic->nr_lr)
1433 set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); 1485 set_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1434} 1486}
1435 1487
1436void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) 1488void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
@@ -1464,7 +1516,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1464 if (!irqchip_in_kernel(vcpu->kvm)) 1516 if (!irqchip_in_kernel(vcpu->kvm))
1465 return 0; 1517 return 0;
1466 1518
1467 return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu); 1519 return test_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
1468} 1520}
1469 1521
1470static void vgic_kick_vcpus(struct kvm *kvm) 1522static void vgic_kick_vcpus(struct kvm *kvm)
@@ -1559,7 +1611,7 @@ static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
1559 1611
1560 if (level) { 1612 if (level) {
1561 vgic_cpu_irq_set(vcpu, irq_num); 1613 vgic_cpu_irq_set(vcpu, irq_num);
1562 set_bit(cpuid, &dist->irq_pending_on_cpu); 1614 set_bit(cpuid, dist->irq_pending_on_cpu);
1563 } 1615 }
1564 1616
1565out: 1617out:
@@ -1603,6 +1655,32 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
1603 return IRQ_HANDLED; 1655 return IRQ_HANDLED;
1604} 1656}
1605 1657
1658void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
1659{
1660 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1661
1662 kfree(vgic_cpu->pending_shared);
1663 kfree(vgic_cpu->vgic_irq_lr_map);
1664 vgic_cpu->pending_shared = NULL;
1665 vgic_cpu->vgic_irq_lr_map = NULL;
1666}
1667
1668static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
1669{
1670 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1671
1672 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
1673 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
1674 vgic_cpu->vgic_irq_lr_map = kzalloc(nr_irqs, GFP_KERNEL);
1675
1676 if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) {
1677 kvm_vgic_vcpu_destroy(vcpu);
1678 return -ENOMEM;
1679 }
1680
1681 return 0;
1682}
1683
1606/** 1684/**
1607 * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state 1685 * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
1608 * @vcpu: pointer to the vcpu struct 1686 * @vcpu: pointer to the vcpu struct
@@ -1642,6 +1720,97 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
1642 return 0; 1720 return 0;
1643} 1721}
1644 1722
1723void kvm_vgic_destroy(struct kvm *kvm)
1724{
1725 struct vgic_dist *dist = &kvm->arch.vgic;
1726 struct kvm_vcpu *vcpu;
1727 int i;
1728
1729 kvm_for_each_vcpu(i, vcpu, kvm)
1730 kvm_vgic_vcpu_destroy(vcpu);
1731
1732 vgic_free_bitmap(&dist->irq_enabled);
1733 vgic_free_bitmap(&dist->irq_level);
1734 vgic_free_bitmap(&dist->irq_pending);
1735 vgic_free_bitmap(&dist->irq_soft_pend);
1736 vgic_free_bitmap(&dist->irq_queued);
1737 vgic_free_bitmap(&dist->irq_cfg);
1738 vgic_free_bytemap(&dist->irq_priority);
1739 if (dist->irq_spi_target) {
1740 for (i = 0; i < dist->nr_cpus; i++)
1741 vgic_free_bitmap(&dist->irq_spi_target[i]);
1742 }
1743 kfree(dist->irq_sgi_sources);
1744 kfree(dist->irq_spi_cpu);
1745 kfree(dist->irq_spi_target);
1746 kfree(dist->irq_pending_on_cpu);
1747 dist->irq_sgi_sources = NULL;
1748 dist->irq_spi_cpu = NULL;
1749 dist->irq_spi_target = NULL;
1750 dist->irq_pending_on_cpu = NULL;
1751}
1752
1753/*
1754 * Allocate and initialize the various data structures. Must be called
1755 * with kvm->lock held!
1756 */
1757static int vgic_init_maps(struct kvm *kvm)
1758{
1759 struct vgic_dist *dist = &kvm->arch.vgic;
1760 struct kvm_vcpu *vcpu;
1761 int nr_cpus, nr_irqs;
1762 int ret, i;
1763
1764 nr_cpus = dist->nr_cpus = VGIC_MAX_CPUS;
1765 nr_irqs = dist->nr_irqs = VGIC_NR_IRQS;
1766
1767 ret = vgic_init_bitmap(&dist->irq_enabled, nr_cpus, nr_irqs);
1768 ret |= vgic_init_bitmap(&dist->irq_level, nr_cpus, nr_irqs);
1769 ret |= vgic_init_bitmap(&dist->irq_pending, nr_cpus, nr_irqs);
1770 ret |= vgic_init_bitmap(&dist->irq_soft_pend, nr_cpus, nr_irqs);
1771 ret |= vgic_init_bitmap(&dist->irq_queued, nr_cpus, nr_irqs);
1772 ret |= vgic_init_bitmap(&dist->irq_cfg, nr_cpus, nr_irqs);
1773 ret |= vgic_init_bytemap(&dist->irq_priority, nr_cpus, nr_irqs);
1774
1775 if (ret)
1776 goto out;
1777
1778 dist->irq_sgi_sources = kzalloc(nr_cpus * VGIC_NR_SGIS, GFP_KERNEL);
1779 dist->irq_spi_cpu = kzalloc(nr_irqs - VGIC_NR_PRIVATE_IRQS, GFP_KERNEL);
1780 dist->irq_spi_target = kzalloc(sizeof(*dist->irq_spi_target) * nr_cpus,
1781 GFP_KERNEL);
1782 dist->irq_pending_on_cpu = kzalloc(BITS_TO_LONGS(nr_cpus) * sizeof(long),
1783 GFP_KERNEL);
1784 if (!dist->irq_sgi_sources ||
1785 !dist->irq_spi_cpu ||
1786 !dist->irq_spi_target ||
1787 !dist->irq_pending_on_cpu) {
1788 ret = -ENOMEM;
1789 goto out;
1790 }
1791
1792 for (i = 0; i < nr_cpus; i++)
1793 ret |= vgic_init_bitmap(&dist->irq_spi_target[i],
1794 nr_cpus, nr_irqs);
1795
1796 if (ret)
1797 goto out;
1798
1799 kvm_for_each_vcpu(i, vcpu, kvm) {
1800 ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
1801 if (ret) {
1802 kvm_err("VGIC: Failed to allocate vcpu memory\n");
1803 break;
1804 }
1805 }
1806
1807out:
1808 if (ret)
1809 kvm_vgic_destroy(kvm);
1810
1811 return ret;
1812}
1813
1645/** 1814/**
1646 * kvm_vgic_init - Initialize global VGIC state before running any VCPUs 1815 * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
1647 * @kvm: pointer to the kvm struct 1816 * @kvm: pointer to the kvm struct
@@ -1722,6 +1891,10 @@ int kvm_vgic_create(struct kvm *kvm)
1722 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; 1891 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
1723 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF; 1892 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
1724 1893
1894 ret = vgic_init_maps(kvm);
1895 if (ret)
1896 kvm_err("Unable to allocate maps\n");
1897
1725out_unlock: 1898out_unlock:
1726 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { 1899 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
1727 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx); 1900 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);