aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2013-02-12 18:20:19 -0500
committerOlof Johansson <olof@lixom.net>2013-02-12 18:20:19 -0500
commitb221498e5d276b4ea5a50ba545b75f8cf2c2edf5 (patch)
tree55461b2678d331ce7f06cca728d5668f9ab7a1e1
parent0582b05366f39ea1024450f18cc801c7f42bbbbb (diff)
parent967f84275ba74eac696f798ce1a780285170b5e7 (diff)
Merge branch 'kvm-arm/timer' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into next/virt
From Marc Zyngier, this branch enables virtual GIC and timer for KVM/ARM. * 'kvm-arm/timer' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms: ARM: KVM: arch_timers: Wire the init code and config option ARM: KVM: arch_timers: Add timer world switch ARM: KVM: arch_timers: Add guest timer core support ARM: KVM: Add VGIC configuration option ARM: KVM: VGIC initialisation code ARM: KVM: VGIC control interface world switch ARM: KVM: VGIC interrupt injection ARM: KVM: vgic: retire queued, disabled interrupts ARM: KVM: VGIC virtual CPU interface management ARM: KVM: VGIC distributor handling ARM: KVM: VGIC accept vcpu and dist base addresses from user space ARM: KVM: Initial VGIC infrastructure code ARM: KVM: Keep track of currently running vcpus KVM: ARM: Introduce KVM_ARM_SET_DEVICE_ADDR ioctl ARM: gic: add __ASSEMBLY__ guard to C definitions ARM: gic: define GICH offsets for VGIC support ARM: gic: add missing distributor defintions
-rw-r--r--Documentation/virtual/kvm/api.txt38
-rw-r--r--arch/arm/include/asm/kvm_arch_timer.h85
-rw-r--r--arch/arm/include/asm/kvm_asm.h3
-rw-r--r--arch/arm/include/asm/kvm_host.h23
-rw-r--r--arch/arm/include/asm/kvm_vgic.h221
-rw-r--r--arch/arm/include/uapi/asm/kvm.h16
-rw-r--r--arch/arm/kernel/asm-offsets.c18
-rw-r--r--arch/arm/kvm/Kconfig16
-rw-r--r--arch/arm/kvm/Makefile2
-rw-r--r--arch/arm/kvm/arch_timer.c271
-rw-r--r--arch/arm/kvm/arm.c178
-rw-r--r--arch/arm/kvm/coproc.c4
-rw-r--r--arch/arm/kvm/interrupts.S6
-rw-r--r--arch/arm/kvm/interrupts_head.S164
-rw-r--r--arch/arm/kvm/mmio.c3
-rw-r--r--arch/arm/kvm/vgic.c1506
-rw-r--r--include/linux/irqchip/arm-gic.h33
-rw-r--r--include/uapi/linux/kvm.h8
18 files changed, 2581 insertions, 14 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index c25439a58274..e0fa0ea2b187 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2210,6 +2210,44 @@ This ioctl returns the guest registers that are supported for the
2210KVM_GET_ONE_REG/KVM_SET_ONE_REG calls. 2210KVM_GET_ONE_REG/KVM_SET_ONE_REG calls.
2211 2211
2212 2212
22134.80 KVM_ARM_SET_DEVICE_ADDR
2214
2215Capability: KVM_CAP_ARM_SET_DEVICE_ADDR
2216Architectures: arm
2217Type: vm ioctl
2218Parameters: struct kvm_arm_device_address (in)
2219Returns: 0 on success, -1 on error
2220Errors:
2221 ENODEV: The device id is unknown
2222 ENXIO: Device not supported on current system
2223 EEXIST: Address already set
2224 E2BIG: Address outside guest physical address space
2225 EBUSY: Address overlaps with other device range
2226
2227struct kvm_arm_device_addr {
2228 __u64 id;
2229 __u64 addr;
2230};
2231
2232Specify a device address in the guest's physical address space where guests
2233can access emulated or directly exposed devices, which the host kernel needs
2234to know about. The id field is an architecture specific identifier for a
2235specific device.
2236
2237ARM divides the id field into two parts, a device id and an address type id
2238specific to the individual device.
2239
2240  bits: | 63 ... 32 | 31 ... 16 | 15 ... 0 |
2241 field: | 0x00000000 | device id | addr type id |
2242
2243ARM currently only require this when using the in-kernel GIC support for the
2244hardware VGIC features, using KVM_ARM_DEVICE_VGIC_V2 as the device id. When
2245setting the base address for the guest's mapping of the VGIC virtual CPU
2246and distributor interface, the ioctl must be called after calling
2247KVM_CREATE_IRQCHIP, but before calling KVM_RUN on any of the VCPUs. Calling
2248this ioctl twice for any of the base addresses will return -EEXIST.
2249
2250
22135. The kvm_run structure 22515. The kvm_run structure
2214------------------------ 2252------------------------
2215 2253
diff --git a/arch/arm/include/asm/kvm_arch_timer.h b/arch/arm/include/asm/kvm_arch_timer.h
new file mode 100644
index 000000000000..68cb9e1dfb81
--- /dev/null
+++ b/arch/arm/include/asm/kvm_arch_timer.h
@@ -0,0 +1,85 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef __ASM_ARM_KVM_ARCH_TIMER_H
20#define __ASM_ARM_KVM_ARCH_TIMER_H
21
22#include <linux/clocksource.h>
23#include <linux/hrtimer.h>
24#include <linux/workqueue.h>
25
26struct arch_timer_kvm {
27#ifdef CONFIG_KVM_ARM_TIMER
28 /* Is the timer enabled */
29 bool enabled;
30
31 /* Virtual offset */
32 cycle_t cntvoff;
33#endif
34};
35
36struct arch_timer_cpu {
37#ifdef CONFIG_KVM_ARM_TIMER
38 /* Registers: control register, timer value */
39 u32 cntv_ctl; /* Saved/restored */
40 cycle_t cntv_cval; /* Saved/restored */
41
42 /*
43 * Anything that is not used directly from assembly code goes
44 * here.
45 */
46
47 /* Background timer used when the guest is not running */
48 struct hrtimer timer;
49
50 /* Work queued with the above timer expires */
51 struct work_struct expired;
52
53 /* Background timer active */
54 bool armed;
55
56 /* Timer IRQ */
57 const struct kvm_irq_level *irq;
58#endif
59};
60
61#ifdef CONFIG_KVM_ARM_TIMER
62int kvm_timer_hyp_init(void);
63int kvm_timer_init(struct kvm *kvm);
64void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
65void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
66void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
67void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
68#else
69static inline int kvm_timer_hyp_init(void)
70{
71 return 0;
72};
73
74static inline int kvm_timer_init(struct kvm *kvm)
75{
76 return 0;
77}
78
79static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
80static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {}
81static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {}
82static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {}
83#endif
84
85#endif
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 5e06e8177784..e4956f4e23e1 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -45,7 +45,8 @@
45#define c13_TID_URW 23 /* Thread ID, User R/W */ 45#define c13_TID_URW 23 /* Thread ID, User R/W */
46#define c13_TID_URO 24 /* Thread ID, User R/O */ 46#define c13_TID_URO 24 /* Thread ID, User R/O */
47#define c13_TID_PRIV 25 /* Thread ID, Privileged */ 47#define c13_TID_PRIV 25 /* Thread ID, Privileged */
48#define NR_CP15_REGS 26 /* Number of regs (incl. invalid) */ 48#define c14_CNTKCTL 26 /* Timer Control Register (PL1) */
49#define NR_CP15_REGS 27 /* Number of regs (incl. invalid) */
49 50
50#define ARM_EXCEPTION_RESET 0 51#define ARM_EXCEPTION_RESET 0
51#define ARM_EXCEPTION_UNDEFINED 1 52#define ARM_EXCEPTION_UNDEFINED 1
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 98b4d1a72923..dfe98866a992 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -23,6 +23,7 @@
23#include <asm/kvm_asm.h> 23#include <asm/kvm_asm.h>
24#include <asm/kvm_mmio.h> 24#include <asm/kvm_mmio.h>
25#include <asm/fpstate.h> 25#include <asm/fpstate.h>
26#include <asm/kvm_arch_timer.h>
26 27
27#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS 28#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
28#define KVM_MEMORY_SLOTS 32 29#define KVM_MEMORY_SLOTS 32
@@ -37,6 +38,8 @@
37#define KVM_NR_PAGE_SIZES 1 38#define KVM_NR_PAGE_SIZES 1
38#define KVM_PAGES_PER_HPAGE(x) (1UL<<31) 39#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
39 40
41#include <asm/kvm_vgic.h>
42
40struct kvm_vcpu; 43struct kvm_vcpu;
41u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); 44u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
42int kvm_target_cpu(void); 45int kvm_target_cpu(void);
@@ -47,6 +50,9 @@ struct kvm_arch {
47 /* VTTBR value associated with below pgd and vmid */ 50 /* VTTBR value associated with below pgd and vmid */
48 u64 vttbr; 51 u64 vttbr;
49 52
53 /* Timer */
54 struct arch_timer_kvm timer;
55
50 /* 56 /*
51 * Anything that is not used directly from assembly code goes 57 * Anything that is not used directly from assembly code goes
52 * here. 58 * here.
@@ -58,6 +64,9 @@ struct kvm_arch {
58 64
59 /* Stage-2 page table */ 65 /* Stage-2 page table */
60 pgd_t *pgd; 66 pgd_t *pgd;
67
68 /* Interrupt controller */
69 struct vgic_dist vgic;
61}; 70};
62 71
63#define KVM_NR_MEM_OBJS 40 72#define KVM_NR_MEM_OBJS 40
@@ -92,6 +101,10 @@ struct kvm_vcpu_arch {
92 struct vfp_hard_struct vfp_guest; 101 struct vfp_hard_struct vfp_guest;
93 struct vfp_hard_struct *vfp_host; 102 struct vfp_hard_struct *vfp_host;
94 103
104 /* VGIC state */
105 struct vgic_cpu vgic_cpu;
106 struct arch_timer_cpu timer_cpu;
107
95 /* 108 /*
96 * Anything that is not used directly from assembly code goes 109 * Anything that is not used directly from assembly code goes
97 * here. 110 * here.
@@ -158,4 +171,14 @@ static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
158{ 171{
159 return 0; 172 return 0;
160} 173}
174
175struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
176struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
177
178int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
179unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
180struct kvm_one_reg;
181int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
182int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
183
161#endif /* __ARM_KVM_HOST_H__ */ 184#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h
new file mode 100644
index 000000000000..ab97207d9cd3
--- /dev/null
+++ b/arch/arm/include/asm/kvm_vgic.h
@@ -0,0 +1,221 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef __ASM_ARM_KVM_VGIC_H
20#define __ASM_ARM_KVM_VGIC_H
21
22#include <linux/kernel.h>
23#include <linux/kvm.h>
24#include <linux/kvm_host.h>
25#include <linux/irqreturn.h>
26#include <linux/spinlock.h>
27#include <linux/types.h>
28#include <linux/irqchip/arm-gic.h>
29
30#define VGIC_NR_IRQS 128
31#define VGIC_NR_SGIS 16
32#define VGIC_NR_PPIS 16
33#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
34#define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS)
35#define VGIC_MAX_CPUS KVM_MAX_VCPUS
36#define VGIC_MAX_LRS (1 << 6)
37
38/* Sanity checks... */
39#if (VGIC_MAX_CPUS > 8)
40#error Invalid number of CPU interfaces
41#endif
42
43#if (VGIC_NR_IRQS & 31)
44#error "VGIC_NR_IRQS must be a multiple of 32"
45#endif
46
47#if (VGIC_NR_IRQS > 1024)
48#error "VGIC_NR_IRQS must be <= 1024"
49#endif
50
51/*
52 * The GIC distributor registers describing interrupts have two parts:
53 * - 32 per-CPU interrupts (SGI + PPI)
54 * - a bunch of shared interrupts (SPI)
55 */
56struct vgic_bitmap {
57 union {
58 u32 reg[VGIC_NR_PRIVATE_IRQS / 32];
59 DECLARE_BITMAP(reg_ul, VGIC_NR_PRIVATE_IRQS);
60 } percpu[VGIC_MAX_CPUS];
61 union {
62 u32 reg[VGIC_NR_SHARED_IRQS / 32];
63 DECLARE_BITMAP(reg_ul, VGIC_NR_SHARED_IRQS);
64 } shared;
65};
66
67struct vgic_bytemap {
68 u32 percpu[VGIC_MAX_CPUS][VGIC_NR_PRIVATE_IRQS / 4];
69 u32 shared[VGIC_NR_SHARED_IRQS / 4];
70};
71
72struct vgic_dist {
73#ifdef CONFIG_KVM_ARM_VGIC
74 spinlock_t lock;
75 bool ready;
76
77 /* Virtual control interface mapping */
78 void __iomem *vctrl_base;
79
80 /* Distributor and vcpu interface mapping in the guest */
81 phys_addr_t vgic_dist_base;
82 phys_addr_t vgic_cpu_base;
83
84 /* Distributor enabled */
85 u32 enabled;
86
87 /* Interrupt enabled (one bit per IRQ) */
88 struct vgic_bitmap irq_enabled;
89
90 /* Interrupt 'pin' level */
91 struct vgic_bitmap irq_state;
92
93 /* Level-triggered interrupt in progress */
94 struct vgic_bitmap irq_active;
95
96 /* Interrupt priority. Not used yet. */
97 struct vgic_bytemap irq_priority;
98
99 /* Level/edge triggered */
100 struct vgic_bitmap irq_cfg;
101
102 /* Source CPU per SGI and target CPU */
103 u8 irq_sgi_sources[VGIC_MAX_CPUS][VGIC_NR_SGIS];
104
105 /* Target CPU for each IRQ */
106 u8 irq_spi_cpu[VGIC_NR_SHARED_IRQS];
107 struct vgic_bitmap irq_spi_target[VGIC_MAX_CPUS];
108
109 /* Bitmap indicating which CPU has something pending */
110 unsigned long irq_pending_on_cpu;
111#endif
112};
113
114struct vgic_cpu {
115#ifdef CONFIG_KVM_ARM_VGIC
116 /* per IRQ to LR mapping */
117 u8 vgic_irq_lr_map[VGIC_NR_IRQS];
118
119 /* Pending interrupts on this VCPU */
120 DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS);
121 DECLARE_BITMAP( pending_shared, VGIC_NR_SHARED_IRQS);
122
123 /* Bitmap of used/free list registers */
124 DECLARE_BITMAP( lr_used, VGIC_MAX_LRS);
125
126 /* Number of list registers on this CPU */
127 int nr_lr;
128
129 /* CPU vif control registers for world switch */
130 u32 vgic_hcr;
131 u32 vgic_vmcr;
132 u32 vgic_misr; /* Saved only */
133 u32 vgic_eisr[2]; /* Saved only */
134 u32 vgic_elrsr[2]; /* Saved only */
135 u32 vgic_apr;
136 u32 vgic_lr[VGIC_MAX_LRS];
137#endif
138};
139
140#define LR_EMPTY 0xff
141
142struct kvm;
143struct kvm_vcpu;
144struct kvm_run;
145struct kvm_exit_mmio;
146
147#ifdef CONFIG_KVM_ARM_VGIC
148int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr);
149int kvm_vgic_hyp_init(void);
150int kvm_vgic_init(struct kvm *kvm);
151int kvm_vgic_create(struct kvm *kvm);
152int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
153void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
154void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
155int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
156 bool level);
157int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
158bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
159 struct kvm_exit_mmio *mmio);
160
161#define irqchip_in_kernel(k) (!!((k)->arch.vgic.vctrl_base))
162#define vgic_initialized(k) ((k)->arch.vgic.ready)
163
164#else
165static inline int kvm_vgic_hyp_init(void)
166{
167 return 0;
168}
169
170static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
171{
172 return 0;
173}
174
175static inline int kvm_vgic_init(struct kvm *kvm)
176{
177 return 0;
178}
179
180static inline int kvm_vgic_create(struct kvm *kvm)
181{
182 return 0;
183}
184
185static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
186{
187 return 0;
188}
189
190static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {}
191static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {}
192
193static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid,
194 unsigned int irq_num, bool level)
195{
196 return 0;
197}
198
199static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
200{
201 return 0;
202}
203
204static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
205 struct kvm_exit_mmio *mmio)
206{
207 return false;
208}
209
210static inline int irqchip_in_kernel(struct kvm *kvm)
211{
212 return 0;
213}
214
215static inline bool vgic_initialized(struct kvm *kvm)
216{
217 return true;
218}
219#endif
220
221#endif
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 3303ff5adbf3..023bfeb367bf 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -65,6 +65,22 @@ struct kvm_regs {
65#define KVM_ARM_TARGET_CORTEX_A15 0 65#define KVM_ARM_TARGET_CORTEX_A15 0
66#define KVM_ARM_NUM_TARGETS 1 66#define KVM_ARM_NUM_TARGETS 1
67 67
68/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
69#define KVM_ARM_DEVICE_TYPE_SHIFT 0
70#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
71#define KVM_ARM_DEVICE_ID_SHIFT 16
72#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
73
74/* Supported device IDs */
75#define KVM_ARM_DEVICE_VGIC_V2 0
76
77/* Supported VGIC address types */
78#define KVM_VGIC_V2_ADDR_TYPE_DIST 0
79#define KVM_VGIC_V2_ADDR_TYPE_CPU 1
80
81#define KVM_VGIC_V2_DIST_SIZE 0x1000
82#define KVM_VGIC_V2_CPU_SIZE 0x2000
83
68#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ 84#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
69 85
70struct kvm_vcpu_init { 86struct kvm_vcpu_init {
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index c8b3272dfed1..5ce738b43508 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -169,6 +169,24 @@ int main(void)
169 DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar)); 169 DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar));
170 DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar)); 170 DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar));
171 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc)); 171 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc));
172#ifdef CONFIG_KVM_ARM_VGIC
173 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
174 DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
175 DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr));
176 DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr));
177 DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr));
178 DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr));
179 DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr));
180 DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr));
181 DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
182#ifdef CONFIG_KVM_ARM_TIMER
183 DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
184 DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
185 DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
186 DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
187#endif
188 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
189#endif
172 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); 190 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
173#endif 191#endif
174 return 0; 192 return 0;
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 05227cb57a7b..49dd64e579c2 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -51,6 +51,22 @@ config KVM_ARM_MAX_VCPUS
51 large, so only choose a reasonable number that you expect to 51 large, so only choose a reasonable number that you expect to
52 actually use. 52 actually use.
53 53
54config KVM_ARM_VGIC
55 bool "KVM support for Virtual GIC"
56 depends on KVM_ARM_HOST && OF
57 select HAVE_KVM_IRQCHIP
58 default y
59 ---help---
60 Adds support for a hardware assisted, in-kernel GIC emulation.
61
62config KVM_ARM_TIMER
63 bool "KVM support for Architected Timers"
64 depends on KVM_ARM_VGIC && ARM_ARCH_TIMER
65 select HAVE_KVM_IRQCHIP
66 default y
67 ---help---
68 Adds support for the Architected Timers in virtual machines
69
54source drivers/virtio/Kconfig 70source drivers/virtio/Kconfig
55 71
56endif # VIRTUALIZATION 72endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index ea27987bd07f..fc96ce6f2357 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -19,3 +19,5 @@ kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
19obj-y += kvm-arm.o init.o interrupts.o 19obj-y += kvm-arm.o init.o interrupts.o
20obj-y += arm.o guest.o mmu.o emulate.o reset.o 20obj-y += arm.o guest.o mmu.o emulate.o reset.o
21obj-y += coproc.o coproc_a15.o mmio.o psci.o 21obj-y += coproc.o coproc_a15.o mmio.o psci.o
22obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o
23obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o
diff --git a/arch/arm/kvm/arch_timer.c b/arch/arm/kvm/arch_timer.c
new file mode 100644
index 000000000000..6ac938d46297
--- /dev/null
+++ b/arch/arm/kvm/arch_timer.c
@@ -0,0 +1,271 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/cpu.h>
20#include <linux/of_irq.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/interrupt.h>
24
25#include <asm/arch_timer.h>
26
27#include <asm/kvm_vgic.h>
28#include <asm/kvm_arch_timer.h>
29
30static struct timecounter *timecounter;
31static struct workqueue_struct *wqueue;
32static struct kvm_irq_level timer_irq = {
33 .level = 1,
34};
35
36static cycle_t kvm_phys_timer_read(void)
37{
38 return timecounter->cc->read(timecounter->cc);
39}
40
41static bool timer_is_armed(struct arch_timer_cpu *timer)
42{
43 return timer->armed;
44}
45
46/* timer_arm: as in "arm the timer", not as in ARM the company */
47static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
48{
49 timer->armed = true;
50 hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
51 HRTIMER_MODE_ABS);
52}
53
54static void timer_disarm(struct arch_timer_cpu *timer)
55{
56 if (timer_is_armed(timer)) {
57 hrtimer_cancel(&timer->timer);
58 cancel_work_sync(&timer->expired);
59 timer->armed = false;
60 }
61}
62
63static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
64{
65 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
66
67 timer->cntv_ctl |= 1 << 1; /* Mask the interrupt in the guest */
68 kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
69 vcpu->arch.timer_cpu.irq->irq,
70 vcpu->arch.timer_cpu.irq->level);
71}
72
73static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
74{
75 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
76
77 /*
78 * We disable the timer in the world switch and let it be
79 * handled by kvm_timer_sync_hwstate(). Getting a timer
80 * interrupt at this point is a sure sign of some major
81 * breakage.
82 */
83 pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu);
84 return IRQ_HANDLED;
85}
86
87static void kvm_timer_inject_irq_work(struct work_struct *work)
88{
89 struct kvm_vcpu *vcpu;
90
91 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
92 vcpu->arch.timer_cpu.armed = false;
93 kvm_timer_inject_irq(vcpu);
94}
95
96static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
97{
98 struct arch_timer_cpu *timer;
99 timer = container_of(hrt, struct arch_timer_cpu, timer);
100 queue_work(wqueue, &timer->expired);
101 return HRTIMER_NORESTART;
102}
103
104/**
105 * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
106 * @vcpu: The vcpu pointer
107 *
108 * Disarm any pending soft timers, since the world-switch code will write the
109 * virtual timer state back to the physical CPU.
110 */
111void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
112{
113 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
114
115 /*
116 * We're about to run this vcpu again, so there is no need to
117 * keep the background timer running, as we're about to
118 * populate the CPU timer again.
119 */
120 timer_disarm(timer);
121}
122
123/**
124 * kvm_timer_sync_hwstate - sync timer state from cpu
125 * @vcpu: The vcpu pointer
126 *
127 * Check if the virtual timer was armed and either schedule a corresponding
128 * soft timer or inject directly if already expired.
129 */
130void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
131{
132 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
133 cycle_t cval, now;
134 u64 ns;
135
136 /* Check if the timer is enabled and unmasked first */
137 if ((timer->cntv_ctl & 3) != 1)
138 return;
139
140 cval = timer->cntv_cval;
141 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
142
143 BUG_ON(timer_is_armed(timer));
144
145 if (cval <= now) {
146 /*
147 * Timer has already expired while we were not
148 * looking. Inject the interrupt and carry on.
149 */
150 kvm_timer_inject_irq(vcpu);
151 return;
152 }
153
154 ns = cyclecounter_cyc2ns(timecounter->cc, cval - now);
155 timer_arm(timer, ns);
156}
157
158void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
159{
160 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
161
162 INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
163 hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
164 timer->timer.function = kvm_timer_expire;
165 timer->irq = &timer_irq;
166}
167
168static void kvm_timer_init_interrupt(void *info)
169{
170 enable_percpu_irq(timer_irq.irq, 0);
171}
172
173
174static int kvm_timer_cpu_notify(struct notifier_block *self,
175 unsigned long action, void *cpu)
176{
177 switch (action) {
178 case CPU_STARTING:
179 case CPU_STARTING_FROZEN:
180 kvm_timer_init_interrupt(NULL);
181 break;
182 case CPU_DYING:
183 case CPU_DYING_FROZEN:
184 disable_percpu_irq(timer_irq.irq);
185 break;
186 }
187
188 return NOTIFY_OK;
189}
190
191static struct notifier_block kvm_timer_cpu_nb = {
192 .notifier_call = kvm_timer_cpu_notify,
193};
194
195static const struct of_device_id arch_timer_of_match[] = {
196 { .compatible = "arm,armv7-timer", },
197 {},
198};
199
200int kvm_timer_hyp_init(void)
201{
202 struct device_node *np;
203 unsigned int ppi;
204 int err;
205
206 timecounter = arch_timer_get_timecounter();
207 if (!timecounter)
208 return -ENODEV;
209
210 np = of_find_matching_node(NULL, arch_timer_of_match);
211 if (!np) {
212 kvm_err("kvm_arch_timer: can't find DT node\n");
213 return -ENODEV;
214 }
215
216 ppi = irq_of_parse_and_map(np, 2);
217 if (!ppi) {
218 kvm_err("kvm_arch_timer: no virtual timer interrupt\n");
219 err = -EINVAL;
220 goto out;
221 }
222
223 err = request_percpu_irq(ppi, kvm_arch_timer_handler,
224 "kvm guest timer", kvm_get_running_vcpus());
225 if (err) {
226 kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
227 ppi, err);
228 goto out;
229 }
230
231 timer_irq.irq = ppi;
232
233 err = register_cpu_notifier(&kvm_timer_cpu_nb);
234 if (err) {
235 kvm_err("Cannot register timer CPU notifier\n");
236 goto out_free;
237 }
238
239 wqueue = create_singlethread_workqueue("kvm_arch_timer");
240 if (!wqueue) {
241 err = -ENOMEM;
242 goto out_free;
243 }
244
245 kvm_info("%s IRQ%d\n", np->name, ppi);
246 on_each_cpu(kvm_timer_init_interrupt, NULL, 1);
247
248 goto out;
249out_free:
250 free_percpu_irq(ppi, kvm_get_running_vcpus());
251out:
252 of_node_put(np);
253 return err;
254}
255
256void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
257{
258 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
259
260 timer_disarm(timer);
261}
262
263int kvm_timer_init(struct kvm *kvm)
264{
265 if (timecounter && wqueue) {
266 kvm->arch.timer.cntvoff = kvm_phys_timer_read();
267 kvm->arch.timer.enabled = 1;
268 }
269
270 return 0;
271}
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 2d30e3afdaf9..9ada5549216d 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -54,11 +54,40 @@ static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
54static struct vfp_hard_struct __percpu *kvm_host_vfp_state; 54static struct vfp_hard_struct __percpu *kvm_host_vfp_state;
55static unsigned long hyp_default_vectors; 55static unsigned long hyp_default_vectors;
56 56
57/* Per-CPU variable containing the currently running vcpu. */
58static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
59
57/* The VMID used in the VTTBR */ 60/* The VMID used in the VTTBR */
58static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); 61static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
59static u8 kvm_next_vmid; 62static u8 kvm_next_vmid;
60static DEFINE_SPINLOCK(kvm_vmid_lock); 63static DEFINE_SPINLOCK(kvm_vmid_lock);
61 64
65static bool vgic_present;
66
67static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
68{
69 BUG_ON(preemptible());
70 __get_cpu_var(kvm_arm_running_vcpu) = vcpu;
71}
72
73/**
74 * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
75 * Must be called from non-preemptible context
76 */
77struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
78{
79 BUG_ON(preemptible());
80 return __get_cpu_var(kvm_arm_running_vcpu);
81}
82
83/**
84 * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
85 */
86struct kvm_vcpu __percpu **kvm_get_running_vcpus(void)
87{
88 return &kvm_arm_running_vcpu;
89}
90
62int kvm_arch_hardware_enable(void *garbage) 91int kvm_arch_hardware_enable(void *garbage)
63{ 92{
64 return 0; 93 return 0;
@@ -157,6 +186,9 @@ int kvm_dev_ioctl_check_extension(long ext)
157{ 186{
158 int r; 187 int r;
159 switch (ext) { 188 switch (ext) {
189 case KVM_CAP_IRQCHIP:
190 r = vgic_present;
191 break;
160 case KVM_CAP_USER_MEMORY: 192 case KVM_CAP_USER_MEMORY:
161 case KVM_CAP_SYNC_MMU: 193 case KVM_CAP_SYNC_MMU:
162 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 194 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
@@ -167,6 +199,8 @@ int kvm_dev_ioctl_check_extension(long ext)
167 case KVM_CAP_COALESCED_MMIO: 199 case KVM_CAP_COALESCED_MMIO:
168 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 200 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
169 break; 201 break;
202 case KVM_CAP_ARM_SET_DEVICE_ADDR:
203 r = 1;
170 case KVM_CAP_NR_VCPUS: 204 case KVM_CAP_NR_VCPUS:
171 r = num_online_cpus(); 205 r = num_online_cpus();
172 break; 206 break;
@@ -255,6 +289,7 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
255void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 289void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
256{ 290{
257 kvm_mmu_free_memory_caches(vcpu); 291 kvm_mmu_free_memory_caches(vcpu);
292 kvm_timer_vcpu_terminate(vcpu);
258 kmem_cache_free(kvm_vcpu_cache, vcpu); 293 kmem_cache_free(kvm_vcpu_cache, vcpu);
259} 294}
260 295
@@ -286,8 +321,19 @@ int __attribute_const__ kvm_target_cpu(void)
286 321
287int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 322int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
288{ 323{
324 int ret;
325
289 /* Force users to call KVM_ARM_VCPU_INIT */ 326 /* Force users to call KVM_ARM_VCPU_INIT */
290 vcpu->arch.target = -1; 327 vcpu->arch.target = -1;
328
329 /* Set up VGIC */
330 ret = kvm_vgic_vcpu_init(vcpu);
331 if (ret)
332 return ret;
333
334 /* Set up the timer */
335 kvm_timer_vcpu_init(vcpu);
336
291 return 0; 337 return 0;
292} 338}
293 339
@@ -308,10 +354,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
308 */ 354 */
309 if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) 355 if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
310 flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ 356 flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
357
358 kvm_arm_set_running_vcpu(vcpu);
311} 359}
312 360
313void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 361void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
314{ 362{
363 kvm_arm_set_running_vcpu(NULL);
315} 364}
316 365
317int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 366int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
@@ -342,7 +391,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
342 */ 391 */
343int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 392int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
344{ 393{
345 return !!v->arch.irq_lines; 394 return !!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v);
346} 395}
347 396
348/* Just ensure a guest exit from a particular CPU */ 397/* Just ensure a guest exit from a particular CPU */
@@ -597,6 +646,17 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
597 vcpu->arch.has_run_once = true; 646 vcpu->arch.has_run_once = true;
598 647
599 /* 648 /*
649 * Initialize the VGIC before running a vcpu the first time on
650 * this VM.
651 */
652 if (irqchip_in_kernel(vcpu->kvm) &&
653 unlikely(!vgic_initialized(vcpu->kvm))) {
654 int ret = kvm_vgic_init(vcpu->kvm);
655 if (ret)
656 return ret;
657 }
658
659 /*
600 * Handle the "start in power-off" case by calling into the 660 * Handle the "start in power-off" case by calling into the
601 * PSCI code. 661 * PSCI code.
602 */ 662 */
@@ -661,6 +721,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
661 if (vcpu->arch.pause) 721 if (vcpu->arch.pause)
662 vcpu_pause(vcpu); 722 vcpu_pause(vcpu);
663 723
724 kvm_vgic_flush_hwstate(vcpu);
725 kvm_timer_flush_hwstate(vcpu);
726
664 local_irq_disable(); 727 local_irq_disable();
665 728
666 /* 729 /*
@@ -673,6 +736,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
673 736
674 if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) { 737 if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
675 local_irq_enable(); 738 local_irq_enable();
739 kvm_timer_sync_hwstate(vcpu);
740 kvm_vgic_sync_hwstate(vcpu);
676 continue; 741 continue;
677 } 742 }
678 743
@@ -705,6 +770,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
705 * Back from guest 770 * Back from guest
706 *************************************************************/ 771 *************************************************************/
707 772
773 kvm_timer_sync_hwstate(vcpu);
774 kvm_vgic_sync_hwstate(vcpu);
775
708 ret = handle_exit(vcpu, run, ret); 776 ret = handle_exit(vcpu, run, ret);
709 } 777 }
710 778
@@ -760,20 +828,49 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level)
760 828
761 trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); 829 trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
762 830
763 if (irq_type != KVM_ARM_IRQ_TYPE_CPU) 831 switch (irq_type) {
764 return -EINVAL; 832 case KVM_ARM_IRQ_TYPE_CPU:
833 if (irqchip_in_kernel(kvm))
834 return -ENXIO;
765 835
766 if (vcpu_idx >= nrcpus) 836 if (vcpu_idx >= nrcpus)
767 return -EINVAL; 837 return -EINVAL;
768 838
769 vcpu = kvm_get_vcpu(kvm, vcpu_idx); 839 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
770 if (!vcpu) 840 if (!vcpu)
771 return -EINVAL; 841 return -EINVAL;
772 842
773 if (irq_num > KVM_ARM_IRQ_CPU_FIQ) 843 if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
774 return -EINVAL; 844 return -EINVAL;
845
846 return vcpu_interrupt_line(vcpu, irq_num, level);
847 case KVM_ARM_IRQ_TYPE_PPI:
848 if (!irqchip_in_kernel(kvm))
849 return -ENXIO;
775 850
776 return vcpu_interrupt_line(vcpu, irq_num, level); 851 if (vcpu_idx >= nrcpus)
852 return -EINVAL;
853
854 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
855 if (!vcpu)
856 return -EINVAL;
857
858 if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
859 return -EINVAL;
860
861 return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level);
862 case KVM_ARM_IRQ_TYPE_SPI:
863 if (!irqchip_in_kernel(kvm))
864 return -ENXIO;
865
866 if (irq_num < VGIC_NR_PRIVATE_IRQS ||
867 irq_num > KVM_ARM_IRQ_GIC_MAX)
868 return -EINVAL;
869
870 return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
871 }
872
873 return -EINVAL;
777} 874}
778 875
779long kvm_arch_vcpu_ioctl(struct file *filp, 876long kvm_arch_vcpu_ioctl(struct file *filp,
@@ -827,10 +924,49 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
827 return -EINVAL; 924 return -EINVAL;
828} 925}
829 926
927static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
928 struct kvm_arm_device_addr *dev_addr)
929{
930 unsigned long dev_id, type;
931
932 dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
933 KVM_ARM_DEVICE_ID_SHIFT;
934 type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
935 KVM_ARM_DEVICE_TYPE_SHIFT;
936
937 switch (dev_id) {
938 case KVM_ARM_DEVICE_VGIC_V2:
939 if (!vgic_present)
940 return -ENXIO;
941 return kvm_vgic_set_addr(kvm, type, dev_addr->addr);
942 default:
943 return -ENODEV;
944 }
945}
946
830long kvm_arch_vm_ioctl(struct file *filp, 947long kvm_arch_vm_ioctl(struct file *filp,
831 unsigned int ioctl, unsigned long arg) 948 unsigned int ioctl, unsigned long arg)
832{ 949{
833 return -EINVAL; 950 struct kvm *kvm = filp->private_data;
951 void __user *argp = (void __user *)arg;
952
953 switch (ioctl) {
954 case KVM_CREATE_IRQCHIP: {
955 if (vgic_present)
956 return kvm_vgic_create(kvm);
957 else
958 return -ENXIO;
959 }
960 case KVM_ARM_SET_DEVICE_ADDR: {
961 struct kvm_arm_device_addr dev_addr;
962
963 if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
964 return -EFAULT;
965 return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
966 }
967 default:
968 return -EINVAL;
969 }
834} 970}
835 971
836static void cpu_init_hyp_mode(void *vector) 972static void cpu_init_hyp_mode(void *vector)
@@ -960,6 +1096,24 @@ static int init_hyp_mode(void)
960 } 1096 }
961 } 1097 }
962 1098
1099 /*
1100 * Init HYP view of VGIC
1101 */
1102 err = kvm_vgic_hyp_init();
1103 if (err)
1104 goto out_free_vfp;
1105
1106#ifdef CONFIG_KVM_ARM_VGIC
1107 vgic_present = true;
1108#endif
1109
1110 /*
1111 * Init HYP architected timer support
1112 */
1113 err = kvm_timer_hyp_init();
1114 if (err)
1115 goto out_free_mappings;
1116
963 kvm_info("Hyp mode initialized successfully\n"); 1117 kvm_info("Hyp mode initialized successfully\n");
964 return 0; 1118 return 0;
965out_free_vfp: 1119out_free_vfp:
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index d782638c7ec0..4ea9a982269c 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -222,6 +222,10 @@ static const struct coproc_reg cp15_regs[] = {
222 NULL, reset_unknown, c13_TID_URO }, 222 NULL, reset_unknown, c13_TID_URO },
223 { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32, 223 { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
224 NULL, reset_unknown, c13_TID_PRIV }, 224 NULL, reset_unknown, c13_TID_PRIV },
225
226 /* CNTKCTL: swapped by interrupt.S. */
227 { CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
228 NULL, reset_val, c14_CNTKCTL, 0x00000000 },
225}; 229};
226 230
227/* Target specific emulation tables */ 231/* Target specific emulation tables */
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index c5400d2e97ca..8ca87ab0919d 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -94,6 +94,9 @@ ENTRY(__kvm_vcpu_run)
94 94
95 save_host_regs 95 save_host_regs
96 96
97 restore_vgic_state
98 restore_timer_state
99
97 @ Store hardware CP15 state and load guest state 100 @ Store hardware CP15 state and load guest state
98 read_cp15_state store_to_vcpu = 0 101 read_cp15_state store_to_vcpu = 0
99 write_cp15_state read_from_vcpu = 1 102 write_cp15_state read_from_vcpu = 1
@@ -187,6 +190,9 @@ after_vfp_restore:
187 read_cp15_state store_to_vcpu = 1 190 read_cp15_state store_to_vcpu = 1
188 write_cp15_state read_from_vcpu = 0 191 write_cp15_state read_from_vcpu = 0
189 192
193 save_timer_state
194 save_vgic_state
195
190 restore_host_regs 196 restore_host_regs
191 clrex @ Clear exclusive monitor 197 clrex @ Clear exclusive monitor
192 mov r0, r1 @ Return the return code 198 mov r0, r1 @ Return the return code
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 6a95d341e9c5..3c8f2f0b4c5e 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -1,3 +1,5 @@
1#include <linux/irqchip/arm-gic.h>
2
1#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) 3#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4))
2#define VCPU_USR_SP (VCPU_USR_REG(13)) 4#define VCPU_USR_SP (VCPU_USR_REG(13))
3#define VCPU_USR_LR (VCPU_USR_REG(14)) 5#define VCPU_USR_LR (VCPU_USR_REG(14))
@@ -298,6 +300,14 @@ vcpu .req r0 @ vcpu pointer always in r0
298 str r11, [vcpu, #CP15_OFFSET(c6_IFAR)] 300 str r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
299 str r12, [vcpu, #CP15_OFFSET(c12_VBAR)] 301 str r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
300 .endif 302 .endif
303
304 mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL
305
306 .if \store_to_vcpu == 0
307 push {r2}
308 .else
309 str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
310 .endif
301.endm 311.endm
302 312
303/* 313/*
@@ -309,6 +319,14 @@ vcpu .req r0 @ vcpu pointer always in r0
309 */ 319 */
310.macro write_cp15_state read_from_vcpu 320.macro write_cp15_state read_from_vcpu
311 .if \read_from_vcpu == 0 321 .if \read_from_vcpu == 0
322 pop {r2}
323 .else
324 ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
325 .endif
326
327 mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL
328
329 .if \read_from_vcpu == 0
312 pop {r2-r12} 330 pop {r2-r12}
313 .else 331 .else
314 ldr r2, [vcpu, #CP15_OFFSET(c13_CID)] 332 ldr r2, [vcpu, #CP15_OFFSET(c13_CID)]
@@ -369,6 +387,49 @@ vcpu .req r0 @ vcpu pointer always in r0
369 * Assumes vcpu pointer in vcpu reg 387 * Assumes vcpu pointer in vcpu reg
370 */ 388 */
371.macro save_vgic_state 389.macro save_vgic_state
390#ifdef CONFIG_KVM_ARM_VGIC
391 /* Get VGIC VCTRL base into r2 */
392 ldr r2, [vcpu, #VCPU_KVM]
393 ldr r2, [r2, #KVM_VGIC_VCTRL]
394 cmp r2, #0
395 beq 2f
396
397 /* Compute the address of struct vgic_cpu */
398 add r11, vcpu, #VCPU_VGIC_CPU
399
400 /* Save all interesting registers */
401 ldr r3, [r2, #GICH_HCR]
402 ldr r4, [r2, #GICH_VMCR]
403 ldr r5, [r2, #GICH_MISR]
404 ldr r6, [r2, #GICH_EISR0]
405 ldr r7, [r2, #GICH_EISR1]
406 ldr r8, [r2, #GICH_ELRSR0]
407 ldr r9, [r2, #GICH_ELRSR1]
408 ldr r10, [r2, #GICH_APR]
409
410 str r3, [r11, #VGIC_CPU_HCR]
411 str r4, [r11, #VGIC_CPU_VMCR]
412 str r5, [r11, #VGIC_CPU_MISR]
413 str r6, [r11, #VGIC_CPU_EISR]
414 str r7, [r11, #(VGIC_CPU_EISR + 4)]
415 str r8, [r11, #VGIC_CPU_ELRSR]
416 str r9, [r11, #(VGIC_CPU_ELRSR + 4)]
417 str r10, [r11, #VGIC_CPU_APR]
418
419 /* Clear GICH_HCR */
420 mov r5, #0
421 str r5, [r2, #GICH_HCR]
422
423 /* Save list registers */
424 add r2, r2, #GICH_LR0
425 add r3, r11, #VGIC_CPU_LR
426 ldr r4, [r11, #VGIC_CPU_NR_LR]
4271: ldr r6, [r2], #4
428 str r6, [r3], #4
429 subs r4, r4, #1
430 bne 1b
4312:
432#endif
372.endm 433.endm
373 434
374/* 435/*
@@ -377,6 +438,109 @@ vcpu .req r0 @ vcpu pointer always in r0
377 * Assumes vcpu pointer in vcpu reg 438 * Assumes vcpu pointer in vcpu reg
378 */ 439 */
379.macro restore_vgic_state 440.macro restore_vgic_state
441#ifdef CONFIG_KVM_ARM_VGIC
442 /* Get VGIC VCTRL base into r2 */
443 ldr r2, [vcpu, #VCPU_KVM]
444 ldr r2, [r2, #KVM_VGIC_VCTRL]
445 cmp r2, #0
446 beq 2f
447
448 /* Compute the address of struct vgic_cpu */
449 add r11, vcpu, #VCPU_VGIC_CPU
450
451 /* We only restore a minimal set of registers */
452 ldr r3, [r11, #VGIC_CPU_HCR]
453 ldr r4, [r11, #VGIC_CPU_VMCR]
454 ldr r8, [r11, #VGIC_CPU_APR]
455
456 str r3, [r2, #GICH_HCR]
457 str r4, [r2, #GICH_VMCR]
458 str r8, [r2, #GICH_APR]
459
460 /* Restore list registers */
461 add r2, r2, #GICH_LR0
462 add r3, r11, #VGIC_CPU_LR
463 ldr r4, [r11, #VGIC_CPU_NR_LR]
4641: ldr r6, [r3], #4
465 str r6, [r2], #4
466 subs r4, r4, #1
467 bne 1b
4682:
469#endif
470.endm
471
472#define CNTHCTL_PL1PCTEN (1 << 0)
473#define CNTHCTL_PL1PCEN (1 << 1)
474
475/*
476 * Save the timer state onto the VCPU and allow physical timer/counter access
477 * for the host.
478 *
479 * Assumes vcpu pointer in vcpu reg
480 * Clobbers r2-r5
481 */
482.macro save_timer_state
483#ifdef CONFIG_KVM_ARM_TIMER
484 ldr r4, [vcpu, #VCPU_KVM]
485 ldr r2, [r4, #KVM_TIMER_ENABLED]
486 cmp r2, #0
487 beq 1f
488
489 mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
490 str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
491 bic r2, #1 @ Clear ENABLE
492 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
493 isb
494
495 mrrc p15, 3, r2, r3, c14 @ CNTV_CVAL
496 ldr r4, =VCPU_TIMER_CNTV_CVAL
497 add r5, vcpu, r4
498 strd r2, r3, [r5]
499
5001:
501#endif
502 @ Allow physical timer/counter access for the host
503 mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
504 orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
505 mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
506.endm
507
508/*
509 * Load the timer state from the VCPU and deny physical timer/counter access
510 * for the host.
511 *
512 * Assumes vcpu pointer in vcpu reg
513 * Clobbers r2-r5
514 */
515.macro restore_timer_state
516 @ Disallow physical timer access for the guest
517 @ Physical counter access is allowed
518 mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
519 orr r2, r2, #CNTHCTL_PL1PCTEN
520 bic r2, r2, #CNTHCTL_PL1PCEN
521 mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
522
523#ifdef CONFIG_KVM_ARM_TIMER
524 ldr r4, [vcpu, #VCPU_KVM]
525 ldr r2, [r4, #KVM_TIMER_ENABLED]
526 cmp r2, #0
527 beq 1f
528
529 ldr r2, [r4, #KVM_TIMER_CNTVOFF]
530 ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)]
531 mcrr p15, 4, r2, r3, c14 @ CNTVOFF
532
533 ldr r4, =VCPU_TIMER_CNTV_CVAL
534 add r5, vcpu, r4
535 ldrd r2, r3, [r5]
536 mcrr p15, 3, r2, r3, c14 @ CNTV_CVAL
537 isb
538
539 ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
540 and r2, r2, #3
541 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
5421:
543#endif
380.endm 544.endm
381 545
382.equ vmentry, 0 546.equ vmentry, 0
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 0144baf82904..98a870ff1a5c 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -148,6 +148,9 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
148 if (mmio.is_write) 148 if (mmio.is_write)
149 memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len); 149 memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len);
150 150
151 if (vgic_handle_mmio(vcpu, run, &mmio))
152 return 1;
153
151 kvm_prepare_mmio(run, &mmio); 154 kvm_prepare_mmio(run, &mmio);
152 return 0; 155 return 0;
153} 156}
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c
new file mode 100644
index 000000000000..c9a17316e9fe
--- /dev/null
+++ b/arch/arm/kvm/vgic.c
@@ -0,0 +1,1506 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/cpu.h>
20#include <linux/kvm.h>
21#include <linux/kvm_host.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/of.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27
28#include <linux/irqchip/arm-gic.h>
29
30#include <asm/kvm_emulate.h>
31#include <asm/kvm_arm.h>
32#include <asm/kvm_mmu.h>
33
34/*
35 * How the whole thing works (courtesy of Christoffer Dall):
36 *
37 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
38 * something is pending
39 * - VGIC pending interrupts are stored on the vgic.irq_state vgic
40 * bitmap (this bitmap is updated by both user land ioctls and guest
41 * mmio ops, and other in-kernel peripherals such as the
42 * arch. timers) and indicate the 'wire' state.
43 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
44 * recalculated
45 * - To calculate the oracle, we need info for each cpu from
46 * compute_pending_for_cpu, which considers:
47 * - PPI: dist->irq_state & dist->irq_enable
48 * - SPI: dist->irq_state & dist->irq_enable & dist->irq_spi_target
49 * - irq_spi_target is a 'formatted' version of the GICD_ICFGR
50 * registers, stored on each vcpu. We only keep one bit of
51 * information per interrupt, making sure that only one vcpu can
52 * accept the interrupt.
53 * - The same is true when injecting an interrupt, except that we only
54 * consider a single interrupt at a time. The irq_spi_cpu array
55 * contains the target CPU for each SPI.
56 *
57 * The handling of level interrupts adds some extra complexity. We
58 * need to track when the interrupt has been EOIed, so we can sample
59 * the 'line' again. This is achieved as such:
60 *
61 * - When a level interrupt is moved onto a vcpu, the corresponding
62 * bit in irq_active is set. As long as this bit is set, the line
63 * will be ignored for further interrupts. The interrupt is injected
64 * into the vcpu with the GICH_LR_EOI bit set (generate a
65 * maintenance interrupt on EOI).
66 * - When the interrupt is EOIed, the maintenance interrupt fires,
67 * and clears the corresponding bit in irq_active. This allow the
68 * interrupt line to be sampled again.
69 */
70
71#define VGIC_ADDR_UNDEF (-1)
72#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
73
74/* Physical address of vgic virtual cpu interface */
75static phys_addr_t vgic_vcpu_base;
76
77/* Virtual control interface base address */
78static void __iomem *vgic_vctrl_base;
79
80static struct device_node *vgic_node;
81
82#define ACCESS_READ_VALUE (1 << 0)
83#define ACCESS_READ_RAZ (0 << 0)
84#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
85#define ACCESS_WRITE_IGNORED (0 << 1)
86#define ACCESS_WRITE_SETBIT (1 << 1)
87#define ACCESS_WRITE_CLEARBIT (2 << 1)
88#define ACCESS_WRITE_VALUE (3 << 1)
89#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
90
91static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
92static void vgic_update_state(struct kvm *kvm);
93static void vgic_kick_vcpus(struct kvm *kvm);
94static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
95static u32 vgic_nr_lr;
96
97static unsigned int vgic_maint_irq;
98
99static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
100 int cpuid, u32 offset)
101{
102 offset >>= 2;
103 if (!offset)
104 return x->percpu[cpuid].reg;
105 else
106 return x->shared.reg + offset - 1;
107}
108
109static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
110 int cpuid, int irq)
111{
112 if (irq < VGIC_NR_PRIVATE_IRQS)
113 return test_bit(irq, x->percpu[cpuid].reg_ul);
114
115 return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared.reg_ul);
116}
117
118static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
119 int irq, int val)
120{
121 unsigned long *reg;
122
123 if (irq < VGIC_NR_PRIVATE_IRQS) {
124 reg = x->percpu[cpuid].reg_ul;
125 } else {
126 reg = x->shared.reg_ul;
127 irq -= VGIC_NR_PRIVATE_IRQS;
128 }
129
130 if (val)
131 set_bit(irq, reg);
132 else
133 clear_bit(irq, reg);
134}
135
136static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
137{
138 if (unlikely(cpuid >= VGIC_MAX_CPUS))
139 return NULL;
140 return x->percpu[cpuid].reg_ul;
141}
142
143static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
144{
145 return x->shared.reg_ul;
146}
147
148static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
149{
150 offset >>= 2;
151 BUG_ON(offset > (VGIC_NR_IRQS / 4));
152 if (offset < 4)
153 return x->percpu[cpuid] + offset;
154 else
155 return x->shared + offset - 8;
156}
157
158#define VGIC_CFG_LEVEL 0
159#define VGIC_CFG_EDGE 1
160
161static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
162{
163 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
164 int irq_val;
165
166 irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
167 return irq_val == VGIC_CFG_EDGE;
168}
169
170static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
171{
172 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
173
174 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
175}
176
177static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
178{
179 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
180
181 return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
182}
183
184static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
185{
186 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
187
188 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
189}
190
191static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
192{
193 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
194
195 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
196}
197
198static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
199{
200 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
201
202 return vgic_bitmap_get_irq_val(&dist->irq_state, vcpu->vcpu_id, irq);
203}
204
205static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq)
206{
207 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
208
209 vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1);
210}
211
212static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq)
213{
214 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
215
216 vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 0);
217}
218
219static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
220{
221 if (irq < VGIC_NR_PRIVATE_IRQS)
222 set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
223 else
224 set_bit(irq - VGIC_NR_PRIVATE_IRQS,
225 vcpu->arch.vgic_cpu.pending_shared);
226}
227
228static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
229{
230 if (irq < VGIC_NR_PRIVATE_IRQS)
231 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
232 else
233 clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
234 vcpu->arch.vgic_cpu.pending_shared);
235}
236
237static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
238{
239 return *((u32 *)mmio->data) & mask;
240}
241
242static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
243{
244 *((u32 *)mmio->data) = value & mask;
245}
246
247/**
248 * vgic_reg_access - access vgic register
249 * @mmio: pointer to the data describing the mmio access
250 * @reg: pointer to the virtual backing of vgic distributor data
251 * @offset: least significant 2 bits used for word offset
252 * @mode: ACCESS_ mode (see defines above)
253 *
254 * Helper to make vgic register access easier using one of the access
255 * modes defined for vgic register access
256 * (read,raz,write-ignored,setbit,clearbit,write)
257 */
258static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
259 phys_addr_t offset, int mode)
260{
261 int word_offset = (offset & 3) * 8;
262 u32 mask = (1UL << (mmio->len * 8)) - 1;
263 u32 regval;
264
265 /*
266 * Any alignment fault should have been delivered to the guest
267 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
268 */
269
270 if (reg) {
271 regval = *reg;
272 } else {
273 BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
274 regval = 0;
275 }
276
277 if (mmio->is_write) {
278 u32 data = mmio_data_read(mmio, mask) << word_offset;
279 switch (ACCESS_WRITE_MASK(mode)) {
280 case ACCESS_WRITE_IGNORED:
281 return;
282
283 case ACCESS_WRITE_SETBIT:
284 regval |= data;
285 break;
286
287 case ACCESS_WRITE_CLEARBIT:
288 regval &= ~data;
289 break;
290
291 case ACCESS_WRITE_VALUE:
292 regval = (regval & ~(mask << word_offset)) | data;
293 break;
294 }
295 *reg = regval;
296 } else {
297 switch (ACCESS_READ_MASK(mode)) {
298 case ACCESS_READ_RAZ:
299 regval = 0;
300 /* fall through */
301
302 case ACCESS_READ_VALUE:
303 mmio_data_write(mmio, mask, regval >> word_offset);
304 }
305 }
306}
307
308static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
309 struct kvm_exit_mmio *mmio, phys_addr_t offset)
310{
311 u32 reg;
312 u32 word_offset = offset & 3;
313
314 switch (offset & ~3) {
315 case 0: /* CTLR */
316 reg = vcpu->kvm->arch.vgic.enabled;
317 vgic_reg_access(mmio, &reg, word_offset,
318 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
319 if (mmio->is_write) {
320 vcpu->kvm->arch.vgic.enabled = reg & 1;
321 vgic_update_state(vcpu->kvm);
322 return true;
323 }
324 break;
325
326 case 4: /* TYPER */
327 reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
328 reg |= (VGIC_NR_IRQS >> 5) - 1;
329 vgic_reg_access(mmio, &reg, word_offset,
330 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
331 break;
332
333 case 8: /* IIDR */
334 reg = 0x4B00043B;
335 vgic_reg_access(mmio, &reg, word_offset,
336 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
337 break;
338 }
339
340 return false;
341}
342
343static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
344 struct kvm_exit_mmio *mmio, phys_addr_t offset)
345{
346 vgic_reg_access(mmio, NULL, offset,
347 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
348 return false;
349}
350
351static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
352 struct kvm_exit_mmio *mmio,
353 phys_addr_t offset)
354{
355 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
356 vcpu->vcpu_id, offset);
357 vgic_reg_access(mmio, reg, offset,
358 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
359 if (mmio->is_write) {
360 vgic_update_state(vcpu->kvm);
361 return true;
362 }
363
364 return false;
365}
366
367static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
368 struct kvm_exit_mmio *mmio,
369 phys_addr_t offset)
370{
371 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
372 vcpu->vcpu_id, offset);
373 vgic_reg_access(mmio, reg, offset,
374 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
375 if (mmio->is_write) {
376 if (offset < 4) /* Force SGI enabled */
377 *reg |= 0xffff;
378 vgic_retire_disabled_irqs(vcpu);
379 vgic_update_state(vcpu->kvm);
380 return true;
381 }
382
383 return false;
384}
385
386static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
387 struct kvm_exit_mmio *mmio,
388 phys_addr_t offset)
389{
390 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
391 vcpu->vcpu_id, offset);
392 vgic_reg_access(mmio, reg, offset,
393 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
394 if (mmio->is_write) {
395 vgic_update_state(vcpu->kvm);
396 return true;
397 }
398
399 return false;
400}
401
402static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
403 struct kvm_exit_mmio *mmio,
404 phys_addr_t offset)
405{
406 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
407 vcpu->vcpu_id, offset);
408 vgic_reg_access(mmio, reg, offset,
409 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
410 if (mmio->is_write) {
411 vgic_update_state(vcpu->kvm);
412 return true;
413 }
414
415 return false;
416}
417
418static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
419 struct kvm_exit_mmio *mmio,
420 phys_addr_t offset)
421{
422 u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
423 vcpu->vcpu_id, offset);
424 vgic_reg_access(mmio, reg, offset,
425 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
426 return false;
427}
428
429#define GICD_ITARGETSR_SIZE 32
430#define GICD_CPUTARGETS_BITS 8
431#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
432static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
433{
434 struct vgic_dist *dist = &kvm->arch.vgic;
435 struct kvm_vcpu *vcpu;
436 int i, c;
437 unsigned long *bmap;
438 u32 val = 0;
439
440 irq -= VGIC_NR_PRIVATE_IRQS;
441
442 kvm_for_each_vcpu(c, vcpu, kvm) {
443 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
444 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
445 if (test_bit(irq + i, bmap))
446 val |= 1 << (c + i * 8);
447 }
448
449 return val;
450}
451
452static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
453{
454 struct vgic_dist *dist = &kvm->arch.vgic;
455 struct kvm_vcpu *vcpu;
456 int i, c;
457 unsigned long *bmap;
458 u32 target;
459
460 irq -= VGIC_NR_PRIVATE_IRQS;
461
462 /*
463 * Pick the LSB in each byte. This ensures we target exactly
464 * one vcpu per IRQ. If the byte is null, assume we target
465 * CPU0.
466 */
467 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
468 int shift = i * GICD_CPUTARGETS_BITS;
469 target = ffs((val >> shift) & 0xffU);
470 target = target ? (target - 1) : 0;
471 dist->irq_spi_cpu[irq + i] = target;
472 kvm_for_each_vcpu(c, vcpu, kvm) {
473 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
474 if (c == target)
475 set_bit(irq + i, bmap);
476 else
477 clear_bit(irq + i, bmap);
478 }
479 }
480}
481
482static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
483 struct kvm_exit_mmio *mmio,
484 phys_addr_t offset)
485{
486 u32 reg;
487
488 /* We treat the banked interrupts targets as read-only */
489 if (offset < 32) {
490 u32 roreg = 1 << vcpu->vcpu_id;
491 roreg |= roreg << 8;
492 roreg |= roreg << 16;
493
494 vgic_reg_access(mmio, &roreg, offset,
495 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
496 return false;
497 }
498
499 reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
500 vgic_reg_access(mmio, &reg, offset,
501 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
502 if (mmio->is_write) {
503 vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
504 vgic_update_state(vcpu->kvm);
505 return true;
506 }
507
508 return false;
509}
510
511static u32 vgic_cfg_expand(u16 val)
512{
513 u32 res = 0;
514 int i;
515
516 /*
517 * Turn a 16bit value like abcd...mnop into a 32bit word
518 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
519 */
520 for (i = 0; i < 16; i++)
521 res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
522
523 return res;
524}
525
526static u16 vgic_cfg_compress(u32 val)
527{
528 u16 res = 0;
529 int i;
530
531 /*
532 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
533 * abcd...mnop which is what we really care about.
534 */
535 for (i = 0; i < 16; i++)
536 res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
537
538 return res;
539}
540
541/*
542 * The distributor uses 2 bits per IRQ for the CFG register, but the
543 * LSB is always 0. As such, we only keep the upper bit, and use the
544 * two above functions to compress/expand the bits
545 */
546static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
547 struct kvm_exit_mmio *mmio, phys_addr_t offset)
548{
549 u32 val;
550 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
551 vcpu->vcpu_id, offset >> 1);
552 if (offset & 2)
553 val = *reg >> 16;
554 else
555 val = *reg & 0xffff;
556
557 val = vgic_cfg_expand(val);
558 vgic_reg_access(mmio, &val, offset,
559 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
560 if (mmio->is_write) {
561 if (offset < 4) {
562 *reg = ~0U; /* Force PPIs/SGIs to 1 */
563 return false;
564 }
565
566 val = vgic_cfg_compress(val);
567 if (offset & 2) {
568 *reg &= 0xffff;
569 *reg |= val << 16;
570 } else {
571 *reg &= 0xffff << 16;
572 *reg |= val;
573 }
574 }
575
576 return false;
577}
578
579static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
580 struct kvm_exit_mmio *mmio, phys_addr_t offset)
581{
582 u32 reg;
583 vgic_reg_access(mmio, &reg, offset,
584 ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
585 if (mmio->is_write) {
586 vgic_dispatch_sgi(vcpu, reg);
587 vgic_update_state(vcpu->kvm);
588 return true;
589 }
590
591 return false;
592}
593
594/*
595 * I would have liked to use the kvm_bus_io_*() API instead, but it
596 * cannot cope with banked registers (only the VM pointer is passed
597 * around, and we need the vcpu). One of these days, someone please
598 * fix it!
599 */
600struct mmio_range {
601 phys_addr_t base;
602 unsigned long len;
603 bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
604 phys_addr_t offset);
605};
606
607static const struct mmio_range vgic_ranges[] = {
608 {
609 .base = GIC_DIST_CTRL,
610 .len = 12,
611 .handle_mmio = handle_mmio_misc,
612 },
613 {
614 .base = GIC_DIST_IGROUP,
615 .len = VGIC_NR_IRQS / 8,
616 .handle_mmio = handle_mmio_raz_wi,
617 },
618 {
619 .base = GIC_DIST_ENABLE_SET,
620 .len = VGIC_NR_IRQS / 8,
621 .handle_mmio = handle_mmio_set_enable_reg,
622 },
623 {
624 .base = GIC_DIST_ENABLE_CLEAR,
625 .len = VGIC_NR_IRQS / 8,
626 .handle_mmio = handle_mmio_clear_enable_reg,
627 },
628 {
629 .base = GIC_DIST_PENDING_SET,
630 .len = VGIC_NR_IRQS / 8,
631 .handle_mmio = handle_mmio_set_pending_reg,
632 },
633 {
634 .base = GIC_DIST_PENDING_CLEAR,
635 .len = VGIC_NR_IRQS / 8,
636 .handle_mmio = handle_mmio_clear_pending_reg,
637 },
638 {
639 .base = GIC_DIST_ACTIVE_SET,
640 .len = VGIC_NR_IRQS / 8,
641 .handle_mmio = handle_mmio_raz_wi,
642 },
643 {
644 .base = GIC_DIST_ACTIVE_CLEAR,
645 .len = VGIC_NR_IRQS / 8,
646 .handle_mmio = handle_mmio_raz_wi,
647 },
648 {
649 .base = GIC_DIST_PRI,
650 .len = VGIC_NR_IRQS,
651 .handle_mmio = handle_mmio_priority_reg,
652 },
653 {
654 .base = GIC_DIST_TARGET,
655 .len = VGIC_NR_IRQS,
656 .handle_mmio = handle_mmio_target_reg,
657 },
658 {
659 .base = GIC_DIST_CONFIG,
660 .len = VGIC_NR_IRQS / 4,
661 .handle_mmio = handle_mmio_cfg_reg,
662 },
663 {
664 .base = GIC_DIST_SOFTINT,
665 .len = 4,
666 .handle_mmio = handle_mmio_sgi_reg,
667 },
668 {}
669};
670
671static const
672struct mmio_range *find_matching_range(const struct mmio_range *ranges,
673 struct kvm_exit_mmio *mmio,
674 phys_addr_t base)
675{
676 const struct mmio_range *r = ranges;
677 phys_addr_t addr = mmio->phys_addr - base;
678
679 while (r->len) {
680 if (addr >= r->base &&
681 (addr + mmio->len) <= (r->base + r->len))
682 return r;
683 r++;
684 }
685
686 return NULL;
687}
688
689/**
690 * vgic_handle_mmio - handle an in-kernel MMIO access
691 * @vcpu: pointer to the vcpu performing the access
692 * @run: pointer to the kvm_run structure
693 * @mmio: pointer to the data describing the access
694 *
695 * returns true if the MMIO access has been performed in kernel space,
696 * and false if it needs to be emulated in user space.
697 */
698bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
699 struct kvm_exit_mmio *mmio)
700{
701 const struct mmio_range *range;
702 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
703 unsigned long base = dist->vgic_dist_base;
704 bool updated_state;
705 unsigned long offset;
706
707 if (!irqchip_in_kernel(vcpu->kvm) ||
708 mmio->phys_addr < base ||
709 (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
710 return false;
711
712 /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
713 if (mmio->len > 4) {
714 kvm_inject_dabt(vcpu, mmio->phys_addr);
715 return true;
716 }
717
718 range = find_matching_range(vgic_ranges, mmio, base);
719 if (unlikely(!range || !range->handle_mmio)) {
720 pr_warn("Unhandled access %d %08llx %d\n",
721 mmio->is_write, mmio->phys_addr, mmio->len);
722 return false;
723 }
724
725 spin_lock(&vcpu->kvm->arch.vgic.lock);
726 offset = mmio->phys_addr - range->base - base;
727 updated_state = range->handle_mmio(vcpu, mmio, offset);
728 spin_unlock(&vcpu->kvm->arch.vgic.lock);
729 kvm_prepare_mmio(run, mmio);
730 kvm_handle_mmio_return(vcpu, run);
731
732 if (updated_state)
733 vgic_kick_vcpus(vcpu->kvm);
734
735 return true;
736}
737
738static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
739{
740 struct kvm *kvm = vcpu->kvm;
741 struct vgic_dist *dist = &kvm->arch.vgic;
742 int nrcpus = atomic_read(&kvm->online_vcpus);
743 u8 target_cpus;
744 int sgi, mode, c, vcpu_id;
745
746 vcpu_id = vcpu->vcpu_id;
747
748 sgi = reg & 0xf;
749 target_cpus = (reg >> 16) & 0xff;
750 mode = (reg >> 24) & 3;
751
752 switch (mode) {
753 case 0:
754 if (!target_cpus)
755 return;
756
757 case 1:
758 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
759 break;
760
761 case 2:
762 target_cpus = 1 << vcpu_id;
763 break;
764 }
765
766 kvm_for_each_vcpu(c, vcpu, kvm) {
767 if (target_cpus & 1) {
768 /* Flag the SGI as pending */
769 vgic_dist_irq_set(vcpu, sgi);
770 dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id;
771 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
772 }
773
774 target_cpus >>= 1;
775 }
776}
777
778static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
779{
780 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
781 unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
782 unsigned long pending_private, pending_shared;
783 int vcpu_id;
784
785 vcpu_id = vcpu->vcpu_id;
786 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
787 pend_shared = vcpu->arch.vgic_cpu.pending_shared;
788
789 pending = vgic_bitmap_get_cpu_map(&dist->irq_state, vcpu_id);
790 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
791 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
792
793 pending = vgic_bitmap_get_shared_map(&dist->irq_state);
794 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
795 bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHARED_IRQS);
796 bitmap_and(pend_shared, pend_shared,
797 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
798 VGIC_NR_SHARED_IRQS);
799
800 pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
801 pending_shared = find_first_bit(pend_shared, VGIC_NR_SHARED_IRQS);
802 return (pending_private < VGIC_NR_PRIVATE_IRQS ||
803 pending_shared < VGIC_NR_SHARED_IRQS);
804}
805
806/*
807 * Update the interrupt state and determine which CPUs have pending
808 * interrupts. Must be called with distributor lock held.
809 */
810static void vgic_update_state(struct kvm *kvm)
811{
812 struct vgic_dist *dist = &kvm->arch.vgic;
813 struct kvm_vcpu *vcpu;
814 int c;
815
816 if (!dist->enabled) {
817 set_bit(0, &dist->irq_pending_on_cpu);
818 return;
819 }
820
821 kvm_for_each_vcpu(c, vcpu, kvm) {
822 if (compute_pending_for_cpu(vcpu)) {
823 pr_debug("CPU%d has pending interrupts\n", c);
824 set_bit(c, &dist->irq_pending_on_cpu);
825 }
826 }
827}
828
829#define LR_CPUID(lr) \
830 (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT)
831#define MK_LR_PEND(src, irq) \
832 (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
833
834/*
835 * An interrupt may have been disabled after being made pending on the
836 * CPU interface (the classic case is a timer running while we're
837 * rebooting the guest - the interrupt would kick as soon as the CPU
838 * interface gets enabled, with deadly consequences).
839 *
840 * The solution is to examine already active LRs, and check the
841 * interrupt is still enabled. If not, just retire it.
842 */
843static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
844{
845 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
846 int lr;
847
848 for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
849 int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
850
851 if (!vgic_irq_is_enabled(vcpu, irq)) {
852 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
853 clear_bit(lr, vgic_cpu->lr_used);
854 vgic_cpu->vgic_lr[lr] &= ~GICH_LR_STATE;
855 if (vgic_irq_is_active(vcpu, irq))
856 vgic_irq_clear_active(vcpu, irq);
857 }
858 }
859}
860
861/*
862 * Queue an interrupt to a CPU virtual interface. Return true on success,
863 * or false if it wasn't possible to queue it.
864 */
865static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
866{
867 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
868 int lr;
869
870 /* Sanitize the input... */
871 BUG_ON(sgi_source_id & ~7);
872 BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
873 BUG_ON(irq >= VGIC_NR_IRQS);
874
875 kvm_debug("Queue IRQ%d\n", irq);
876
877 lr = vgic_cpu->vgic_irq_lr_map[irq];
878
879 /* Do we have an active interrupt for the same CPUID? */
880 if (lr != LR_EMPTY &&
881 (LR_CPUID(vgic_cpu->vgic_lr[lr]) == sgi_source_id)) {
882 kvm_debug("LR%d piggyback for IRQ%d %x\n",
883 lr, irq, vgic_cpu->vgic_lr[lr]);
884 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
885 vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
886
887 goto out;
888 }
889
890 /* Try to use another LR for this interrupt */
891 lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
892 vgic_cpu->nr_lr);
893 if (lr >= vgic_cpu->nr_lr)
894 return false;
895
896 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
897 vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
898 vgic_cpu->vgic_irq_lr_map[irq] = lr;
899 set_bit(lr, vgic_cpu->lr_used);
900
901out:
902 if (!vgic_irq_is_edge(vcpu, irq))
903 vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
904
905 return true;
906}
907
908static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
909{
910 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
911 unsigned long sources;
912 int vcpu_id = vcpu->vcpu_id;
913 int c;
914
915 sources = dist->irq_sgi_sources[vcpu_id][irq];
916
917 for_each_set_bit(c, &sources, VGIC_MAX_CPUS) {
918 if (vgic_queue_irq(vcpu, c, irq))
919 clear_bit(c, &sources);
920 }
921
922 dist->irq_sgi_sources[vcpu_id][irq] = sources;
923
924 /*
925 * If the sources bitmap has been cleared it means that we
926 * could queue all the SGIs onto link registers (see the
927 * clear_bit above), and therefore we are done with them in
928 * our emulated gic and can get rid of them.
929 */
930 if (!sources) {
931 vgic_dist_irq_clear(vcpu, irq);
932 vgic_cpu_irq_clear(vcpu, irq);
933 return true;
934 }
935
936 return false;
937}
938
939static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
940{
941 if (vgic_irq_is_active(vcpu, irq))
942 return true; /* level interrupt, already queued */
943
944 if (vgic_queue_irq(vcpu, 0, irq)) {
945 if (vgic_irq_is_edge(vcpu, irq)) {
946 vgic_dist_irq_clear(vcpu, irq);
947 vgic_cpu_irq_clear(vcpu, irq);
948 } else {
949 vgic_irq_set_active(vcpu, irq);
950 }
951
952 return true;
953 }
954
955 return false;
956}
957
958/*
959 * Fill the list registers with pending interrupts before running the
960 * guest.
961 */
962static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
963{
964 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
965 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
966 int i, vcpu_id;
967 int overflow = 0;
968
969 vcpu_id = vcpu->vcpu_id;
970
971 /*
972 * We may not have any pending interrupt, or the interrupts
973 * may have been serviced from another vcpu. In all cases,
974 * move along.
975 */
976 if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
977 pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
978 goto epilog;
979 }
980
981 /* SGIs */
982 for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
983 if (!vgic_queue_sgi(vcpu, i))
984 overflow = 1;
985 }
986
987 /* PPIs */
988 for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
989 if (!vgic_queue_hwirq(vcpu, i))
990 overflow = 1;
991 }
992
993 /* SPIs */
994 for_each_set_bit(i, vgic_cpu->pending_shared, VGIC_NR_SHARED_IRQS) {
995 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
996 overflow = 1;
997 }
998
999epilog:
1000 if (overflow) {
1001 vgic_cpu->vgic_hcr |= GICH_HCR_UIE;
1002 } else {
1003 vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
1004 /*
1005 * We're about to run this VCPU, and we've consumed
1006 * everything the distributor had in store for
1007 * us. Claim we don't have anything pending. We'll
1008 * adjust that if needed while exiting.
1009 */
1010 clear_bit(vcpu_id, &dist->irq_pending_on_cpu);
1011 }
1012}
1013
1014static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1015{
1016 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1017 bool level_pending = false;
1018
1019 kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr);
1020
1021 /*
1022 * We do not need to take the distributor lock here, since the only
1023 * action we perform is clearing the irq_active_bit for an EOIed
1024 * level interrupt. There is a potential race with
1025 * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we
1026 * check if the interrupt is already active. Two possibilities:
1027 *
1028 * - The queuing is occurring on the same vcpu: cannot happen,
1029 * as we're already in the context of this vcpu, and
1030 * executing the handler
1031 * - The interrupt has been migrated to another vcpu, and we
1032 * ignore this interrupt for this run. Big deal. It is still
1033 * pending though, and will get considered when this vcpu
1034 * exits.
1035 */
1036 if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {
1037 /*
1038 * Some level interrupts have been EOIed. Clear their
1039 * active bit.
1040 */
1041 int lr, irq;
1042
1043 for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_eisr,
1044 vgic_cpu->nr_lr) {
1045 irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
1046
1047 vgic_irq_clear_active(vcpu, irq);
1048 vgic_cpu->vgic_lr[lr] &= ~GICH_LR_EOI;
1049
1050 /* Any additional pending interrupt? */
1051 if (vgic_dist_irq_is_pending(vcpu, irq)) {
1052 vgic_cpu_irq_set(vcpu, irq);
1053 level_pending = true;
1054 } else {
1055 vgic_cpu_irq_clear(vcpu, irq);
1056 }
1057 }
1058 }
1059
1060 if (vgic_cpu->vgic_misr & GICH_MISR_U)
1061 vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
1062
1063 return level_pending;
1064}
1065
1066/*
1067 * Sync back the VGIC state after a guest run. We do not really touch
1068 * the distributor here (the irq_pending_on_cpu bit is safe to set),
1069 * so there is no need for taking its lock.
1070 */
1071static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1072{
1073 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1074 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1075 int lr, pending;
1076 bool level_pending;
1077
1078 level_pending = vgic_process_maintenance(vcpu);
1079
1080 /* Clear mappings for empty LRs */
1081 for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr,
1082 vgic_cpu->nr_lr) {
1083 int irq;
1084
1085 if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
1086 continue;
1087
1088 irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
1089
1090 BUG_ON(irq >= VGIC_NR_IRQS);
1091 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
1092 }
1093
1094 /* Check if we still have something up our sleeve... */
1095 pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_elrsr,
1096 vgic_cpu->nr_lr);
1097 if (level_pending || pending < vgic_cpu->nr_lr)
1098 set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1099}
1100
1101void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1102{
1103 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1104
1105 if (!irqchip_in_kernel(vcpu->kvm))
1106 return;
1107
1108 spin_lock(&dist->lock);
1109 __kvm_vgic_flush_hwstate(vcpu);
1110 spin_unlock(&dist->lock);
1111}
1112
1113void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1114{
1115 if (!irqchip_in_kernel(vcpu->kvm))
1116 return;
1117
1118 __kvm_vgic_sync_hwstate(vcpu);
1119}
1120
1121int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1122{
1123 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1124
1125 if (!irqchip_in_kernel(vcpu->kvm))
1126 return 0;
1127
1128 return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1129}
1130
1131static void vgic_kick_vcpus(struct kvm *kvm)
1132{
1133 struct kvm_vcpu *vcpu;
1134 int c;
1135
1136 /*
1137 * We've injected an interrupt, time to find out who deserves
1138 * a good kick...
1139 */
1140 kvm_for_each_vcpu(c, vcpu, kvm) {
1141 if (kvm_vgic_vcpu_pending_irq(vcpu))
1142 kvm_vcpu_kick(vcpu);
1143 }
1144}
1145
1146static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1147{
1148 int is_edge = vgic_irq_is_edge(vcpu, irq);
1149 int state = vgic_dist_irq_is_pending(vcpu, irq);
1150
1151 /*
1152 * Only inject an interrupt if:
1153 * - edge triggered and we have a rising edge
1154 * - level triggered and we change level
1155 */
1156 if (is_edge)
1157 return level > state;
1158 else
1159 return level != state;
1160}
1161
1162static bool vgic_update_irq_state(struct kvm *kvm, int cpuid,
1163 unsigned int irq_num, bool level)
1164{
1165 struct vgic_dist *dist = &kvm->arch.vgic;
1166 struct kvm_vcpu *vcpu;
1167 int is_edge, is_level;
1168 int enabled;
1169 bool ret = true;
1170
1171 spin_lock(&dist->lock);
1172
1173 vcpu = kvm_get_vcpu(kvm, cpuid);
1174 is_edge = vgic_irq_is_edge(vcpu, irq_num);
1175 is_level = !is_edge;
1176
1177 if (!vgic_validate_injection(vcpu, irq_num, level)) {
1178 ret = false;
1179 goto out;
1180 }
1181
1182 if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
1183 cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
1184 vcpu = kvm_get_vcpu(kvm, cpuid);
1185 }
1186
1187 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
1188
1189 if (level)
1190 vgic_dist_irq_set(vcpu, irq_num);
1191 else
1192 vgic_dist_irq_clear(vcpu, irq_num);
1193
1194 enabled = vgic_irq_is_enabled(vcpu, irq_num);
1195
1196 if (!enabled) {
1197 ret = false;
1198 goto out;
1199 }
1200
1201 if (is_level && vgic_irq_is_active(vcpu, irq_num)) {
1202 /*
1203 * Level interrupt in progress, will be picked up
1204 * when EOId.
1205 */
1206 ret = false;
1207 goto out;
1208 }
1209
1210 if (level) {
1211 vgic_cpu_irq_set(vcpu, irq_num);
1212 set_bit(cpuid, &dist->irq_pending_on_cpu);
1213 }
1214
1215out:
1216 spin_unlock(&dist->lock);
1217
1218 return ret;
1219}
1220
1221/**
1222 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1223 * @kvm: The VM structure pointer
1224 * @cpuid: The CPU for PPIs
1225 * @irq_num: The IRQ number that is assigned to the device
1226 * @level: Edge-triggered: true: to trigger the interrupt
1227 * false: to ignore the call
1228 * Level-sensitive true: activates an interrupt
1229 * false: deactivates an interrupt
1230 *
1231 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1232 * level-sensitive interrupts. You can think of the level parameter as 1
1233 * being HIGH and 0 being LOW and all devices being active-HIGH.
1234 */
1235int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1236 bool level)
1237{
1238 if (vgic_update_irq_state(kvm, cpuid, irq_num, level))
1239 vgic_kick_vcpus(kvm);
1240
1241 return 0;
1242}
1243
1244static irqreturn_t vgic_maintenance_handler(int irq, void *data)
1245{
1246 /*
1247 * We cannot rely on the vgic maintenance interrupt to be
1248 * delivered synchronously. This means we can only use it to
1249 * exit the VM, and we perform the handling of EOIed
1250 * interrupts on the exit path (see vgic_process_maintenance).
1251 */
1252 return IRQ_HANDLED;
1253}
1254
1255int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
1256{
1257 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1258 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1259 int i;
1260
1261 if (!irqchip_in_kernel(vcpu->kvm))
1262 return 0;
1263
1264 if (vcpu->vcpu_id >= VGIC_MAX_CPUS)
1265 return -EBUSY;
1266
1267 for (i = 0; i < VGIC_NR_IRQS; i++) {
1268 if (i < VGIC_NR_PPIS)
1269 vgic_bitmap_set_irq_val(&dist->irq_enabled,
1270 vcpu->vcpu_id, i, 1);
1271 if (i < VGIC_NR_PRIVATE_IRQS)
1272 vgic_bitmap_set_irq_val(&dist->irq_cfg,
1273 vcpu->vcpu_id, i, VGIC_CFG_EDGE);
1274
1275 vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
1276 }
1277
1278 /*
1279 * By forcing VMCR to zero, the GIC will restore the binary
1280 * points to their reset values. Anything else resets to zero
1281 * anyway.
1282 */
1283 vgic_cpu->vgic_vmcr = 0;
1284
1285 vgic_cpu->nr_lr = vgic_nr_lr;
1286 vgic_cpu->vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */
1287
1288 return 0;
1289}
1290
1291static void vgic_init_maintenance_interrupt(void *info)
1292{
1293 enable_percpu_irq(vgic_maint_irq, 0);
1294}
1295
1296static int vgic_cpu_notify(struct notifier_block *self,
1297 unsigned long action, void *cpu)
1298{
1299 switch (action) {
1300 case CPU_STARTING:
1301 case CPU_STARTING_FROZEN:
1302 vgic_init_maintenance_interrupt(NULL);
1303 break;
1304 case CPU_DYING:
1305 case CPU_DYING_FROZEN:
1306 disable_percpu_irq(vgic_maint_irq);
1307 break;
1308 }
1309
1310 return NOTIFY_OK;
1311}
1312
1313static struct notifier_block vgic_cpu_nb = {
1314 .notifier_call = vgic_cpu_notify,
1315};
1316
1317int kvm_vgic_hyp_init(void)
1318{
1319 int ret;
1320 struct resource vctrl_res;
1321 struct resource vcpu_res;
1322
1323 vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
1324 if (!vgic_node) {
1325 kvm_err("error: no compatible vgic node in DT\n");
1326 return -ENODEV;
1327 }
1328
1329 vgic_maint_irq = irq_of_parse_and_map(vgic_node, 0);
1330 if (!vgic_maint_irq) {
1331 kvm_err("error getting vgic maintenance irq from DT\n");
1332 ret = -ENXIO;
1333 goto out;
1334 }
1335
1336 ret = request_percpu_irq(vgic_maint_irq, vgic_maintenance_handler,
1337 "vgic", kvm_get_running_vcpus());
1338 if (ret) {
1339 kvm_err("Cannot register interrupt %d\n", vgic_maint_irq);
1340 goto out;
1341 }
1342
1343 ret = register_cpu_notifier(&vgic_cpu_nb);
1344 if (ret) {
1345 kvm_err("Cannot register vgic CPU notifier\n");
1346 goto out_free_irq;
1347 }
1348
1349 ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
1350 if (ret) {
1351 kvm_err("Cannot obtain VCTRL resource\n");
1352 goto out_free_irq;
1353 }
1354
1355 vgic_vctrl_base = of_iomap(vgic_node, 2);
1356 if (!vgic_vctrl_base) {
1357 kvm_err("Cannot ioremap VCTRL\n");
1358 ret = -ENOMEM;
1359 goto out_free_irq;
1360 }
1361
1362 vgic_nr_lr = readl_relaxed(vgic_vctrl_base + GICH_VTR);
1363 vgic_nr_lr = (vgic_nr_lr & 0x3f) + 1;
1364
1365 ret = create_hyp_io_mappings(vgic_vctrl_base,
1366 vgic_vctrl_base + resource_size(&vctrl_res),
1367 vctrl_res.start);
1368 if (ret) {
1369 kvm_err("Cannot map VCTRL into hyp\n");
1370 goto out_unmap;
1371 }
1372
1373 kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
1374 vctrl_res.start, vgic_maint_irq);
1375 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
1376
1377 if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
1378 kvm_err("Cannot obtain VCPU resource\n");
1379 ret = -ENXIO;
1380 goto out_unmap;
1381 }
1382 vgic_vcpu_base = vcpu_res.start;
1383
1384 goto out;
1385
1386out_unmap:
1387 iounmap(vgic_vctrl_base);
1388out_free_irq:
1389 free_percpu_irq(vgic_maint_irq, kvm_get_running_vcpus());
1390out:
1391 of_node_put(vgic_node);
1392 return ret;
1393}
1394
1395int kvm_vgic_init(struct kvm *kvm)
1396{
1397 int ret = 0, i;
1398
1399 mutex_lock(&kvm->lock);
1400
1401 if (vgic_initialized(kvm))
1402 goto out;
1403
1404 if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
1405 IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
1406 kvm_err("Need to set vgic cpu and dist addresses first\n");
1407 ret = -ENXIO;
1408 goto out;
1409 }
1410
1411 ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
1412 vgic_vcpu_base, KVM_VGIC_V2_CPU_SIZE);
1413 if (ret) {
1414 kvm_err("Unable to remap VGIC CPU to VCPU\n");
1415 goto out;
1416 }
1417
1418 for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4)
1419 vgic_set_target_reg(kvm, 0, i);
1420
1421 kvm_timer_init(kvm);
1422 kvm->arch.vgic.ready = true;
1423out:
1424 mutex_unlock(&kvm->lock);
1425 return ret;
1426}
1427
1428int kvm_vgic_create(struct kvm *kvm)
1429{
1430 int ret = 0;
1431
1432 mutex_lock(&kvm->lock);
1433
1434 if (atomic_read(&kvm->online_vcpus) || kvm->arch.vgic.vctrl_base) {
1435 ret = -EEXIST;
1436 goto out;
1437 }
1438
1439 spin_lock_init(&kvm->arch.vgic.lock);
1440 kvm->arch.vgic.vctrl_base = vgic_vctrl_base;
1441 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
1442 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
1443
1444out:
1445 mutex_unlock(&kvm->lock);
1446 return ret;
1447}
1448
1449static bool vgic_ioaddr_overlap(struct kvm *kvm)
1450{
1451 phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
1452 phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
1453
1454 if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
1455 return 0;
1456 if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
1457 (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
1458 return -EBUSY;
1459 return 0;
1460}
1461
1462static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
1463 phys_addr_t addr, phys_addr_t size)
1464{
1465 int ret;
1466
1467 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
1468 return -EEXIST;
1469 if (addr + size < addr)
1470 return -EINVAL;
1471
1472 ret = vgic_ioaddr_overlap(kvm);
1473 if (ret)
1474 return ret;
1475 *ioaddr = addr;
1476 return ret;
1477}
1478
1479int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
1480{
1481 int r = 0;
1482 struct vgic_dist *vgic = &kvm->arch.vgic;
1483
1484 if (addr & ~KVM_PHYS_MASK)
1485 return -E2BIG;
1486
1487 if (addr & ~PAGE_MASK)
1488 return -EINVAL;
1489
1490 mutex_lock(&kvm->lock);
1491 switch (type) {
1492 case KVM_VGIC_V2_ADDR_TYPE_DIST:
1493 r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
1494 addr, KVM_VGIC_V2_DIST_SIZE);
1495 break;
1496 case KVM_VGIC_V2_ADDR_TYPE_CPU:
1497 r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
1498 addr, KVM_VGIC_V2_CPU_SIZE);
1499 break;
1500 default:
1501 r = -ENODEV;
1502 }
1503
1504 mutex_unlock(&kvm->lock);
1505 return r;
1506}
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index a67ca55e6f4e..3fd8e4290a1c 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -20,16 +20,45 @@
20 20
21#define GIC_DIST_CTRL 0x000 21#define GIC_DIST_CTRL 0x000
22#define GIC_DIST_CTR 0x004 22#define GIC_DIST_CTR 0x004
23#define GIC_DIST_IGROUP 0x080
23#define GIC_DIST_ENABLE_SET 0x100 24#define GIC_DIST_ENABLE_SET 0x100
24#define GIC_DIST_ENABLE_CLEAR 0x180 25#define GIC_DIST_ENABLE_CLEAR 0x180
25#define GIC_DIST_PENDING_SET 0x200 26#define GIC_DIST_PENDING_SET 0x200
26#define GIC_DIST_PENDING_CLEAR 0x280 27#define GIC_DIST_PENDING_CLEAR 0x280
27#define GIC_DIST_ACTIVE_BIT 0x300 28#define GIC_DIST_ACTIVE_SET 0x300
29#define GIC_DIST_ACTIVE_CLEAR 0x380
28#define GIC_DIST_PRI 0x400 30#define GIC_DIST_PRI 0x400
29#define GIC_DIST_TARGET 0x800 31#define GIC_DIST_TARGET 0x800
30#define GIC_DIST_CONFIG 0xc00 32#define GIC_DIST_CONFIG 0xc00
31#define GIC_DIST_SOFTINT 0xf00 33#define GIC_DIST_SOFTINT 0xf00
32 34
35#define GICH_HCR 0x0
36#define GICH_VTR 0x4
37#define GICH_VMCR 0x8
38#define GICH_MISR 0x10
39#define GICH_EISR0 0x20
40#define GICH_EISR1 0x24
41#define GICH_ELRSR0 0x30
42#define GICH_ELRSR1 0x34
43#define GICH_APR 0xf0
44#define GICH_LR0 0x100
45
46#define GICH_HCR_EN (1 << 0)
47#define GICH_HCR_UIE (1 << 1)
48
49#define GICH_LR_VIRTUALID (0x3ff << 0)
50#define GICH_LR_PHYSID_CPUID_SHIFT (10)
51#define GICH_LR_PHYSID_CPUID (7 << GICH_LR_PHYSID_CPUID_SHIFT)
52#define GICH_LR_STATE (3 << 28)
53#define GICH_LR_PENDING_BIT (1 << 28)
54#define GICH_LR_ACTIVE_BIT (1 << 29)
55#define GICH_LR_EOI (1 << 19)
56
57#define GICH_MISR_EOI (1 << 0)
58#define GICH_MISR_U (1 << 1)
59
60#ifndef __ASSEMBLY__
61
33struct device_node; 62struct device_node;
34 63
35extern struct irq_chip gic_arch_extn; 64extern struct irq_chip gic_arch_extn;
@@ -45,4 +74,6 @@ static inline void gic_init(unsigned int nr, int start,
45 gic_init_bases(nr, start, dist, cpu, 0, NULL); 74 gic_init_bases(nr, start, dist, cpu, 0, NULL);
46} 75}
47 76
77#endif /* __ASSEMBLY */
78
48#endif 79#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 7f2360a46fc2..c70577cf67bc 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -637,6 +637,7 @@ struct kvm_ppc_smmu_info {
637#define KVM_CAP_PPC_BOOKE_WATCHDOG 83 637#define KVM_CAP_PPC_BOOKE_WATCHDOG 83
638#define KVM_CAP_PPC_HTAB_FD 84 638#define KVM_CAP_PPC_HTAB_FD 84
639#define KVM_CAP_ARM_PSCI 87 639#define KVM_CAP_ARM_PSCI 87
640#define KVM_CAP_ARM_SET_DEVICE_ADDR 88
640 641
641#ifdef KVM_CAP_IRQ_ROUTING 642#ifdef KVM_CAP_IRQ_ROUTING
642 643
@@ -784,6 +785,11 @@ struct kvm_msi {
784 __u8 pad[16]; 785 __u8 pad[16];
785}; 786};
786 787
788struct kvm_arm_device_addr {
789 __u64 id;
790 __u64 addr;
791};
792
787/* 793/*
788 * ioctls for VM fds 794 * ioctls for VM fds
789 */ 795 */
@@ -869,6 +875,8 @@ struct kvm_s390_ucas_mapping {
869#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma) 875#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma)
870/* Available with KVM_CAP_PPC_HTAB_FD */ 876/* Available with KVM_CAP_PPC_HTAB_FD */
871#define KVM_PPC_GET_HTAB_FD _IOW(KVMIO, 0xaa, struct kvm_get_htab_fd) 877#define KVM_PPC_GET_HTAB_FD _IOW(KVMIO, 0xaa, struct kvm_get_htab_fd)
878/* Available with KVM_CAP_ARM_SET_DEVICE_ADDR */
879#define KVM_ARM_SET_DEVICE_ADDR _IOW(KVMIO, 0xab, struct kvm_arm_device_addr)
872 880
873/* 881/*
874 * ioctls for vcpu fds 882 * ioctls for vcpu fds