aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/arch_timer.txt7
-rw-r--r--Documentation/virtual/kvm/api.txt38
-rw-r--r--arch/arm/Kconfig6
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/include/asm/arch_timer.h109
-rw-r--r--arch/arm/include/asm/kvm_arch_timer.h85
-rw-r--r--arch/arm/include/asm/kvm_asm.h3
-rw-r--r--arch/arm/include/asm/kvm_host.h23
-rw-r--r--arch/arm/include/asm/kvm_vgic.h221
-rw-r--r--arch/arm/include/uapi/asm/kvm.h16
-rw-r--r--arch/arm/kernel/arch_timer.c505
-rw-r--r--arch/arm/kernel/asm-offsets.c18
-rw-r--r--arch/arm/kernel/smp.c13
-rw-r--r--arch/arm/kvm/Kconfig16
-rw-r--r--arch/arm/kvm/Makefile2
-rw-r--r--arch/arm/kvm/arch_timer.c271
-rw-r--r--arch/arm/kvm/arm.c178
-rw-r--r--arch/arm/kvm/coproc.c4
-rw-r--r--arch/arm/kvm/interrupts.S6
-rw-r--r--arch/arm/kvm/interrupts_head.S164
-rw-r--r--arch/arm/kvm/mmio.c3
-rw-r--r--arch/arm/kvm/vgic.c1506
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-virt/Kconfig10
-rw-r--r--arch/arm/mach-virt/Makefile6
-rw-r--r--arch/arm/mach-virt/platsmp.c58
-rw-r--r--arch/arm/mach-virt/virt.c54
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/arch_timer.h133
-rw-r--r--arch/arm64/include/asm/arm_generic.h100
-rw-r--r--arch/arm64/kernel/time.c29
-rw-r--r--drivers/clocksource/Kconfig6
-rw-r--r--drivers/clocksource/Makefile2
-rw-r--r--drivers/clocksource/arm_arch_timer.c391
-rw-r--r--drivers/clocksource/arm_generic.c232
-rw-r--r--include/clocksource/arm_arch_timer.h63
-rw-r--r--include/clocksource/arm_generic.h21
-rw-r--r--include/linux/irqchip/arm-gic.h33
-rw-r--r--include/uapi/linux/kvm.h8
39 files changed, 3466 insertions, 878 deletions
diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/arm/arch_timer.txt
index 52478c83d0cc..20746e5abe6f 100644
--- a/Documentation/devicetree/bindings/arm/arch_timer.txt
+++ b/Documentation/devicetree/bindings/arm/arch_timer.txt
@@ -1,13 +1,14 @@
1* ARM architected timer 1* ARM architected timer
2 2
3ARM Cortex-A7 and Cortex-A15 have a per-core architected timer, which 3ARM cores may have a per-core architected timer, which provides per-cpu timers.
4provides per-cpu timers.
5 4
6The timer is attached to a GIC to deliver its per-processor interrupts. 5The timer is attached to a GIC to deliver its per-processor interrupts.
7 6
8** Timer node properties: 7** Timer node properties:
9 8
10- compatible : Should at least contain "arm,armv7-timer". 9- compatible : Should at least contain one of
10 "arm,armv7-timer"
11 "arm,armv8-timer"
11 12
12- interrupts : Interrupt list for secure, non-secure, virtual and 13- interrupts : Interrupt list for secure, non-secure, virtual and
13 hypervisor timers, in that order. 14 hypervisor timers, in that order.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index c25439a58274..e0fa0ea2b187 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2210,6 +2210,44 @@ This ioctl returns the guest registers that are supported for the
2210KVM_GET_ONE_REG/KVM_SET_ONE_REG calls. 2210KVM_GET_ONE_REG/KVM_SET_ONE_REG calls.
2211 2211
2212 2212
22134.80 KVM_ARM_SET_DEVICE_ADDR
2214
2215Capability: KVM_CAP_ARM_SET_DEVICE_ADDR
2216Architectures: arm
2217Type: vm ioctl
2218Parameters: struct kvm_arm_device_address (in)
2219Returns: 0 on success, -1 on error
2220Errors:
2221 ENODEV: The device id is unknown
2222 ENXIO: Device not supported on current system
2223 EEXIST: Address already set
2224 E2BIG: Address outside guest physical address space
2225 EBUSY: Address overlaps with other device range
2226
2227struct kvm_arm_device_addr {
2228 __u64 id;
2229 __u64 addr;
2230};
2231
2232Specify a device address in the guest's physical address space where guests
2233can access emulated or directly exposed devices, which the host kernel needs
2234to know about. The id field is an architecture specific identifier for a
2235specific device.
2236
2237ARM divides the id field into two parts, a device id and an address type id
2238specific to the individual device.
2239
2240  bits: | 63 ... 32 | 31 ... 16 | 15 ... 0 |
2241 field: | 0x00000000 | device id | addr type id |
2242
2243ARM currently only require this when using the in-kernel GIC support for the
2244hardware VGIC features, using KVM_ARM_DEVICE_VGIC_V2 as the device id. When
2245setting the base address for the guest's mapping of the VGIC virtual CPU
2246and distributor interface, the ioctl must be called after calling
2247KVM_CREATE_IRQCHIP, but before calling KVM_RUN on any of the VCPUs. Calling
2248this ioctl twice for any of the base addresses will return -EEXIST.
2249
2250
22135. The kvm_run structure 22515. The kvm_run structure
2214------------------------ 2252------------------------
2215 2253
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 2cb9c35b14e7..38ec1f8df5a8 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -4,6 +4,7 @@ config ARM
4 select ARCH_BINFMT_ELF_RANDOMIZE_PIE 4 select ARCH_BINFMT_ELF_RANDOMIZE_PIE
5 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 5 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
6 select ARCH_HAVE_CUSTOM_GPIO_H 6 select ARCH_HAVE_CUSTOM_GPIO_H
7 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
7 select ARCH_WANT_IPC_PARSE_VERSION 8 select ARCH_WANT_IPC_PARSE_VERSION
8 select BUILDTIME_EXTABLE_SORT if MMU 9 select BUILDTIME_EXTABLE_SORT if MMU
9 select CPU_PM if (SUSPEND || CPU_IDLE) 10 select CPU_PM if (SUSPEND || CPU_IDLE)
@@ -1112,6 +1113,8 @@ source "arch/arm/mach-versatile/Kconfig"
1112source "arch/arm/mach-vexpress/Kconfig" 1113source "arch/arm/mach-vexpress/Kconfig"
1113source "arch/arm/plat-versatile/Kconfig" 1114source "arch/arm/plat-versatile/Kconfig"
1114 1115
1116source "arch/arm/mach-virt/Kconfig"
1117
1115source "arch/arm/mach-vt8500/Kconfig" 1118source "arch/arm/mach-vt8500/Kconfig"
1116 1119
1117source "arch/arm/mach-w90x900/Kconfig" 1120source "arch/arm/mach-w90x900/Kconfig"
@@ -1560,9 +1563,10 @@ config HAVE_ARM_SCU
1560 help 1563 help
1561 This option enables support for the ARM system coherency unit 1564 This option enables support for the ARM system coherency unit
1562 1565
1563config ARM_ARCH_TIMER 1566config HAVE_ARM_ARCH_TIMER
1564 bool "Architected timer support" 1567 bool "Architected timer support"
1565 depends on CPU_V7 1568 depends on CPU_V7
1569 select ARM_ARCH_TIMER
1566 help 1570 help
1567 This option enables support for the ARM architected timer 1571 This option enables support for the ARM architected timer
1568 1572
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 1b7071681a5e..ee4605f400b0 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -194,6 +194,7 @@ machine-$(CONFIG_ARCH_SOCFPGA) += socfpga
194machine-$(CONFIG_ARCH_SPEAR13XX) += spear13xx 194machine-$(CONFIG_ARCH_SPEAR13XX) += spear13xx
195machine-$(CONFIG_ARCH_SPEAR3XX) += spear3xx 195machine-$(CONFIG_ARCH_SPEAR3XX) += spear3xx
196machine-$(CONFIG_MACH_SPEAR600) += spear6xx 196machine-$(CONFIG_MACH_SPEAR600) += spear6xx
197machine-$(CONFIG_ARCH_VIRT) += virt
197machine-$(CONFIG_ARCH_ZYNQ) += zynq 198machine-$(CONFIG_ARCH_ZYNQ) += zynq
198machine-$(CONFIG_ARCH_SUNXI) += sunxi 199machine-$(CONFIG_ARCH_SUNXI) += sunxi
199 200
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index d40229d9a1c9..7ade91d8cc6f 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -1,13 +1,115 @@
1#ifndef __ASMARM_ARCH_TIMER_H 1#ifndef __ASMARM_ARCH_TIMER_H
2#define __ASMARM_ARCH_TIMER_H 2#define __ASMARM_ARCH_TIMER_H
3 3
4#include <asm/barrier.h>
4#include <asm/errno.h> 5#include <asm/errno.h>
5#include <linux/clocksource.h> 6#include <linux/clocksource.h>
7#include <linux/init.h>
8#include <linux/types.h>
9
10#include <clocksource/arm_arch_timer.h>
6 11
7#ifdef CONFIG_ARM_ARCH_TIMER 12#ifdef CONFIG_ARM_ARCH_TIMER
8int arch_timer_of_register(void); 13int arch_timer_of_register(void);
9int arch_timer_sched_clock_init(void); 14int arch_timer_sched_clock_init(void);
10struct timecounter *arch_timer_get_timecounter(void); 15
16/*
17 * These register accessors are marked inline so the compiler can
18 * nicely work out which register we want, and chuck away the rest of
19 * the code. At least it does so with a recent GCC (4.6.3).
20 */
21static inline void arch_timer_reg_write(const int access, const int reg, u32 val)
22{
23 if (access == ARCH_TIMER_PHYS_ACCESS) {
24 switch (reg) {
25 case ARCH_TIMER_REG_CTRL:
26 asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
27 break;
28 case ARCH_TIMER_REG_TVAL:
29 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
30 break;
31 }
32 }
33
34 if (access == ARCH_TIMER_VIRT_ACCESS) {
35 switch (reg) {
36 case ARCH_TIMER_REG_CTRL:
37 asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
38 break;
39 case ARCH_TIMER_REG_TVAL:
40 asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
41 break;
42 }
43 }
44
45 isb();
46}
47
48static inline u32 arch_timer_reg_read(const int access, const int reg)
49{
50 u32 val = 0;
51
52 if (access == ARCH_TIMER_PHYS_ACCESS) {
53 switch (reg) {
54 case ARCH_TIMER_REG_CTRL:
55 asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
56 break;
57 case ARCH_TIMER_REG_TVAL:
58 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
59 break;
60 }
61 }
62
63 if (access == ARCH_TIMER_VIRT_ACCESS) {
64 switch (reg) {
65 case ARCH_TIMER_REG_CTRL:
66 asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
67 break;
68 case ARCH_TIMER_REG_TVAL:
69 asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
70 break;
71 }
72 }
73
74 return val;
75}
76
77static inline u32 arch_timer_get_cntfrq(void)
78{
79 u32 val;
80 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
81 return val;
82}
83
84static inline u64 arch_counter_get_cntpct(void)
85{
86 u64 cval;
87
88 isb();
89 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
90 return cval;
91}
92
93static inline u64 arch_counter_get_cntvct(void)
94{
95 u64 cval;
96
97 isb();
98 asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
99 return cval;
100}
101
102static inline void __cpuinit arch_counter_set_user_access(void)
103{
104 u32 cntkctl;
105
106 asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
107
108 /* disable user access to everything */
109 cntkctl &= ~((3 << 8) | (7 << 0));
110
111 asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
112}
11#else 113#else
12static inline int arch_timer_of_register(void) 114static inline int arch_timer_of_register(void)
13{ 115{
@@ -18,11 +120,6 @@ static inline int arch_timer_sched_clock_init(void)
18{ 120{
19 return -ENXIO; 121 return -ENXIO;
20} 122}
21
22static inline struct timecounter *arch_timer_get_timecounter(void)
23{
24 return NULL;
25}
26#endif 123#endif
27 124
28#endif 125#endif
diff --git a/arch/arm/include/asm/kvm_arch_timer.h b/arch/arm/include/asm/kvm_arch_timer.h
new file mode 100644
index 000000000000..68cb9e1dfb81
--- /dev/null
+++ b/arch/arm/include/asm/kvm_arch_timer.h
@@ -0,0 +1,85 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef __ASM_ARM_KVM_ARCH_TIMER_H
20#define __ASM_ARM_KVM_ARCH_TIMER_H
21
22#include <linux/clocksource.h>
23#include <linux/hrtimer.h>
24#include <linux/workqueue.h>
25
26struct arch_timer_kvm {
27#ifdef CONFIG_KVM_ARM_TIMER
28 /* Is the timer enabled */
29 bool enabled;
30
31 /* Virtual offset */
32 cycle_t cntvoff;
33#endif
34};
35
36struct arch_timer_cpu {
37#ifdef CONFIG_KVM_ARM_TIMER
38 /* Registers: control register, timer value */
39 u32 cntv_ctl; /* Saved/restored */
40 cycle_t cntv_cval; /* Saved/restored */
41
42 /*
43 * Anything that is not used directly from assembly code goes
44 * here.
45 */
46
47 /* Background timer used when the guest is not running */
48 struct hrtimer timer;
49
50 /* Work queued with the above timer expires */
51 struct work_struct expired;
52
53 /* Background timer active */
54 bool armed;
55
56 /* Timer IRQ */
57 const struct kvm_irq_level *irq;
58#endif
59};
60
61#ifdef CONFIG_KVM_ARM_TIMER
62int kvm_timer_hyp_init(void);
63int kvm_timer_init(struct kvm *kvm);
64void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
65void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
66void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
67void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
68#else
69static inline int kvm_timer_hyp_init(void)
70{
71 return 0;
72};
73
74static inline int kvm_timer_init(struct kvm *kvm)
75{
76 return 0;
77}
78
79static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
80static inline void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) {}
81static inline void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) {}
82static inline void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) {}
83#endif
84
85#endif
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 5e06e8177784..e4956f4e23e1 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -45,7 +45,8 @@
45#define c13_TID_URW 23 /* Thread ID, User R/W */ 45#define c13_TID_URW 23 /* Thread ID, User R/W */
46#define c13_TID_URO 24 /* Thread ID, User R/O */ 46#define c13_TID_URO 24 /* Thread ID, User R/O */
47#define c13_TID_PRIV 25 /* Thread ID, Privileged */ 47#define c13_TID_PRIV 25 /* Thread ID, Privileged */
48#define NR_CP15_REGS 26 /* Number of regs (incl. invalid) */ 48#define c14_CNTKCTL 26 /* Timer Control Register (PL1) */
49#define NR_CP15_REGS 27 /* Number of regs (incl. invalid) */
49 50
50#define ARM_EXCEPTION_RESET 0 51#define ARM_EXCEPTION_RESET 0
51#define ARM_EXCEPTION_UNDEFINED 1 52#define ARM_EXCEPTION_UNDEFINED 1
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 98b4d1a72923..dfe98866a992 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -23,6 +23,7 @@
23#include <asm/kvm_asm.h> 23#include <asm/kvm_asm.h>
24#include <asm/kvm_mmio.h> 24#include <asm/kvm_mmio.h>
25#include <asm/fpstate.h> 25#include <asm/fpstate.h>
26#include <asm/kvm_arch_timer.h>
26 27
27#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS 28#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
28#define KVM_MEMORY_SLOTS 32 29#define KVM_MEMORY_SLOTS 32
@@ -37,6 +38,8 @@
37#define KVM_NR_PAGE_SIZES 1 38#define KVM_NR_PAGE_SIZES 1
38#define KVM_PAGES_PER_HPAGE(x) (1UL<<31) 39#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
39 40
41#include <asm/kvm_vgic.h>
42
40struct kvm_vcpu; 43struct kvm_vcpu;
41u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); 44u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
42int kvm_target_cpu(void); 45int kvm_target_cpu(void);
@@ -47,6 +50,9 @@ struct kvm_arch {
47 /* VTTBR value associated with below pgd and vmid */ 50 /* VTTBR value associated with below pgd and vmid */
48 u64 vttbr; 51 u64 vttbr;
49 52
53 /* Timer */
54 struct arch_timer_kvm timer;
55
50 /* 56 /*
51 * Anything that is not used directly from assembly code goes 57 * Anything that is not used directly from assembly code goes
52 * here. 58 * here.
@@ -58,6 +64,9 @@ struct kvm_arch {
58 64
59 /* Stage-2 page table */ 65 /* Stage-2 page table */
60 pgd_t *pgd; 66 pgd_t *pgd;
67
68 /* Interrupt controller */
69 struct vgic_dist vgic;
61}; 70};
62 71
63#define KVM_NR_MEM_OBJS 40 72#define KVM_NR_MEM_OBJS 40
@@ -92,6 +101,10 @@ struct kvm_vcpu_arch {
92 struct vfp_hard_struct vfp_guest; 101 struct vfp_hard_struct vfp_guest;
93 struct vfp_hard_struct *vfp_host; 102 struct vfp_hard_struct *vfp_host;
94 103
104 /* VGIC state */
105 struct vgic_cpu vgic_cpu;
106 struct arch_timer_cpu timer_cpu;
107
95 /* 108 /*
96 * Anything that is not used directly from assembly code goes 109 * Anything that is not used directly from assembly code goes
97 * here. 110 * here.
@@ -158,4 +171,14 @@ static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
158{ 171{
159 return 0; 172 return 0;
160} 173}
174
175struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
176struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
177
178int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
179unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
180struct kvm_one_reg;
181int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
182int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
183
161#endif /* __ARM_KVM_HOST_H__ */ 184#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_vgic.h b/arch/arm/include/asm/kvm_vgic.h
new file mode 100644
index 000000000000..ab97207d9cd3
--- /dev/null
+++ b/arch/arm/include/asm/kvm_vgic.h
@@ -0,0 +1,221 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef __ASM_ARM_KVM_VGIC_H
20#define __ASM_ARM_KVM_VGIC_H
21
22#include <linux/kernel.h>
23#include <linux/kvm.h>
24#include <linux/kvm_host.h>
25#include <linux/irqreturn.h>
26#include <linux/spinlock.h>
27#include <linux/types.h>
28#include <linux/irqchip/arm-gic.h>
29
30#define VGIC_NR_IRQS 128
31#define VGIC_NR_SGIS 16
32#define VGIC_NR_PPIS 16
33#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
34#define VGIC_NR_SHARED_IRQS (VGIC_NR_IRQS - VGIC_NR_PRIVATE_IRQS)
35#define VGIC_MAX_CPUS KVM_MAX_VCPUS
36#define VGIC_MAX_LRS (1 << 6)
37
38/* Sanity checks... */
39#if (VGIC_MAX_CPUS > 8)
40#error Invalid number of CPU interfaces
41#endif
42
43#if (VGIC_NR_IRQS & 31)
44#error "VGIC_NR_IRQS must be a multiple of 32"
45#endif
46
47#if (VGIC_NR_IRQS > 1024)
48#error "VGIC_NR_IRQS must be <= 1024"
49#endif
50
51/*
52 * The GIC distributor registers describing interrupts have two parts:
53 * - 32 per-CPU interrupts (SGI + PPI)
54 * - a bunch of shared interrupts (SPI)
55 */
56struct vgic_bitmap {
57 union {
58 u32 reg[VGIC_NR_PRIVATE_IRQS / 32];
59 DECLARE_BITMAP(reg_ul, VGIC_NR_PRIVATE_IRQS);
60 } percpu[VGIC_MAX_CPUS];
61 union {
62 u32 reg[VGIC_NR_SHARED_IRQS / 32];
63 DECLARE_BITMAP(reg_ul, VGIC_NR_SHARED_IRQS);
64 } shared;
65};
66
67struct vgic_bytemap {
68 u32 percpu[VGIC_MAX_CPUS][VGIC_NR_PRIVATE_IRQS / 4];
69 u32 shared[VGIC_NR_SHARED_IRQS / 4];
70};
71
72struct vgic_dist {
73#ifdef CONFIG_KVM_ARM_VGIC
74 spinlock_t lock;
75 bool ready;
76
77 /* Virtual control interface mapping */
78 void __iomem *vctrl_base;
79
80 /* Distributor and vcpu interface mapping in the guest */
81 phys_addr_t vgic_dist_base;
82 phys_addr_t vgic_cpu_base;
83
84 /* Distributor enabled */
85 u32 enabled;
86
87 /* Interrupt enabled (one bit per IRQ) */
88 struct vgic_bitmap irq_enabled;
89
90 /* Interrupt 'pin' level */
91 struct vgic_bitmap irq_state;
92
93 /* Level-triggered interrupt in progress */
94 struct vgic_bitmap irq_active;
95
96 /* Interrupt priority. Not used yet. */
97 struct vgic_bytemap irq_priority;
98
99 /* Level/edge triggered */
100 struct vgic_bitmap irq_cfg;
101
102 /* Source CPU per SGI and target CPU */
103 u8 irq_sgi_sources[VGIC_MAX_CPUS][VGIC_NR_SGIS];
104
105 /* Target CPU for each IRQ */
106 u8 irq_spi_cpu[VGIC_NR_SHARED_IRQS];
107 struct vgic_bitmap irq_spi_target[VGIC_MAX_CPUS];
108
109 /* Bitmap indicating which CPU has something pending */
110 unsigned long irq_pending_on_cpu;
111#endif
112};
113
114struct vgic_cpu {
115#ifdef CONFIG_KVM_ARM_VGIC
116 /* per IRQ to LR mapping */
117 u8 vgic_irq_lr_map[VGIC_NR_IRQS];
118
119 /* Pending interrupts on this VCPU */
120 DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS);
121 DECLARE_BITMAP( pending_shared, VGIC_NR_SHARED_IRQS);
122
123 /* Bitmap of used/free list registers */
124 DECLARE_BITMAP( lr_used, VGIC_MAX_LRS);
125
126 /* Number of list registers on this CPU */
127 int nr_lr;
128
129 /* CPU vif control registers for world switch */
130 u32 vgic_hcr;
131 u32 vgic_vmcr;
132 u32 vgic_misr; /* Saved only */
133 u32 vgic_eisr[2]; /* Saved only */
134 u32 vgic_elrsr[2]; /* Saved only */
135 u32 vgic_apr;
136 u32 vgic_lr[VGIC_MAX_LRS];
137#endif
138};
139
140#define LR_EMPTY 0xff
141
142struct kvm;
143struct kvm_vcpu;
144struct kvm_run;
145struct kvm_exit_mmio;
146
147#ifdef CONFIG_KVM_ARM_VGIC
148int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr);
149int kvm_vgic_hyp_init(void);
150int kvm_vgic_init(struct kvm *kvm);
151int kvm_vgic_create(struct kvm *kvm);
152int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
153void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
154void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
155int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
156 bool level);
157int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
158bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
159 struct kvm_exit_mmio *mmio);
160
161#define irqchip_in_kernel(k) (!!((k)->arch.vgic.vctrl_base))
162#define vgic_initialized(k) ((k)->arch.vgic.ready)
163
164#else
165static inline int kvm_vgic_hyp_init(void)
166{
167 return 0;
168}
169
170static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
171{
172 return 0;
173}
174
175static inline int kvm_vgic_init(struct kvm *kvm)
176{
177 return 0;
178}
179
180static inline int kvm_vgic_create(struct kvm *kvm)
181{
182 return 0;
183}
184
185static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
186{
187 return 0;
188}
189
190static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {}
191static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {}
192
193static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid,
194 unsigned int irq_num, bool level)
195{
196 return 0;
197}
198
199static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
200{
201 return 0;
202}
203
204static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
205 struct kvm_exit_mmio *mmio)
206{
207 return false;
208}
209
210static inline int irqchip_in_kernel(struct kvm *kvm)
211{
212 return 0;
213}
214
215static inline bool vgic_initialized(struct kvm *kvm)
216{
217 return true;
218}
219#endif
220
221#endif
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 3303ff5adbf3..023bfeb367bf 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -65,6 +65,22 @@ struct kvm_regs {
65#define KVM_ARM_TARGET_CORTEX_A15 0 65#define KVM_ARM_TARGET_CORTEX_A15 0
66#define KVM_ARM_NUM_TARGETS 1 66#define KVM_ARM_NUM_TARGETS 1
67 67
68/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
69#define KVM_ARM_DEVICE_TYPE_SHIFT 0
70#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
71#define KVM_ARM_DEVICE_ID_SHIFT 16
72#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
73
74/* Supported device IDs */
75#define KVM_ARM_DEVICE_VGIC_V2 0
76
77/* Supported VGIC address types */
78#define KVM_VGIC_V2_ADDR_TYPE_DIST 0
79#define KVM_VGIC_V2_ADDR_TYPE_CPU 1
80
81#define KVM_VGIC_V2_DIST_SIZE 0x1000
82#define KVM_VGIC_V2_CPU_SIZE 0x2000
83
68#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ 84#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
69 85
70struct kvm_vcpu_init { 86struct kvm_vcpu_init {
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index c8ef20747ee7..d957a51435d8 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -9,516 +9,53 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/kernel.h> 12#include <linux/types.h>
13#include <linux/delay.h> 13#include <linux/errno.h>
14#include <linux/device.h>
15#include <linux/smp.h>
16#include <linux/cpu.h>
17#include <linux/jiffies.h>
18#include <linux/clockchips.h>
19#include <linux/interrupt.h>
20#include <linux/of_irq.h>
21#include <linux/io.h>
22 14
23#include <asm/cputype.h>
24#include <asm/delay.h> 15#include <asm/delay.h>
25#include <asm/localtimer.h>
26#include <asm/arch_timer.h>
27#include <asm/system_info.h>
28#include <asm/sched_clock.h> 16#include <asm/sched_clock.h>
29 17
30static unsigned long arch_timer_rate; 18#include <clocksource/arm_arch_timer.h>
31 19
32enum ppi_nr { 20static unsigned long arch_timer_read_counter_long(void)
33 PHYS_SECURE_PPI,
34 PHYS_NONSECURE_PPI,
35 VIRT_PPI,
36 HYP_PPI,
37 MAX_TIMER_PPI
38};
39
40static int arch_timer_ppi[MAX_TIMER_PPI];
41
42static struct clock_event_device __percpu **arch_timer_evt;
43static struct delay_timer arch_delay_timer;
44
45static bool arch_timer_use_virtual = true;
46
47/*
48 * Architected system timer support.
49 */
50
51#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
52#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
53#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
54
55#define ARCH_TIMER_REG_CTRL 0
56#define ARCH_TIMER_REG_FREQ 1
57#define ARCH_TIMER_REG_TVAL 2
58
59#define ARCH_TIMER_PHYS_ACCESS 0
60#define ARCH_TIMER_VIRT_ACCESS 1
61
62/*
63 * These register accessors are marked inline so the compiler can
64 * nicely work out which register we want, and chuck away the rest of
65 * the code. At least it does so with a recent GCC (4.6.3).
66 */
67static inline void arch_timer_reg_write(const int access, const int reg, u32 val)
68{
69 if (access == ARCH_TIMER_PHYS_ACCESS) {
70 switch (reg) {
71 case ARCH_TIMER_REG_CTRL:
72 asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val));
73 break;
74 case ARCH_TIMER_REG_TVAL:
75 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
76 break;
77 }
78 }
79
80 if (access == ARCH_TIMER_VIRT_ACCESS) {
81 switch (reg) {
82 case ARCH_TIMER_REG_CTRL:
83 asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
84 break;
85 case ARCH_TIMER_REG_TVAL:
86 asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val));
87 break;
88 }
89 }
90
91 isb();
92}
93
94static inline u32 arch_timer_reg_read(const int access, const int reg)
95{
96 u32 val = 0;
97
98 if (access == ARCH_TIMER_PHYS_ACCESS) {
99 switch (reg) {
100 case ARCH_TIMER_REG_CTRL:
101 asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val));
102 break;
103 case ARCH_TIMER_REG_TVAL:
104 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
105 break;
106 case ARCH_TIMER_REG_FREQ:
107 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val));
108 break;
109 }
110 }
111
112 if (access == ARCH_TIMER_VIRT_ACCESS) {
113 switch (reg) {
114 case ARCH_TIMER_REG_CTRL:
115 asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
116 break;
117 case ARCH_TIMER_REG_TVAL:
118 asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val));
119 break;
120 }
121 }
122
123 return val;
124}
125
126static inline cycle_t arch_timer_counter_read(const int access)
127{
128 cycle_t cval = 0;
129
130 if (access == ARCH_TIMER_PHYS_ACCESS)
131 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
132
133 if (access == ARCH_TIMER_VIRT_ACCESS)
134 asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval));
135
136 return cval;
137}
138
139static inline cycle_t arch_counter_get_cntpct(void)
140{
141 return arch_timer_counter_read(ARCH_TIMER_PHYS_ACCESS);
142}
143
144static inline cycle_t arch_counter_get_cntvct(void)
145{
146 return arch_timer_counter_read(ARCH_TIMER_VIRT_ACCESS);
147}
148
149static irqreturn_t inline timer_handler(const int access,
150 struct clock_event_device *evt)
151{
152 unsigned long ctrl;
153 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
154 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
155 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
156 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
157 evt->event_handler(evt);
158 return IRQ_HANDLED;
159 }
160
161 return IRQ_NONE;
162}
163
164static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
165{
166 struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
167
168 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
169}
170
171static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
172{
173 struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
174
175 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
176}
177
178static inline void timer_set_mode(const int access, int mode)
179{
180 unsigned long ctrl;
181 switch (mode) {
182 case CLOCK_EVT_MODE_UNUSED:
183 case CLOCK_EVT_MODE_SHUTDOWN:
184 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
185 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
186 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
187 break;
188 default:
189 break;
190 }
191}
192
193static void arch_timer_set_mode_virt(enum clock_event_mode mode,
194 struct clock_event_device *clk)
195{
196 timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
197}
198
199static void arch_timer_set_mode_phys(enum clock_event_mode mode,
200 struct clock_event_device *clk)
201{
202 timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
203}
204
205static inline void set_next_event(const int access, unsigned long evt)
206{
207 unsigned long ctrl;
208 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
209 ctrl |= ARCH_TIMER_CTRL_ENABLE;
210 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
211 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
212 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
213}
214
215static int arch_timer_set_next_event_virt(unsigned long evt,
216 struct clock_event_device *unused)
217{
218 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
219 return 0;
220}
221
222static int arch_timer_set_next_event_phys(unsigned long evt,
223 struct clock_event_device *unused)
224{
225 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
226 return 0;
227}
228
229static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
230{
231 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
232 clk->name = "arch_sys_timer";
233 clk->rating = 450;
234 if (arch_timer_use_virtual) {
235 clk->irq = arch_timer_ppi[VIRT_PPI];
236 clk->set_mode = arch_timer_set_mode_virt;
237 clk->set_next_event = arch_timer_set_next_event_virt;
238 } else {
239 clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
240 clk->set_mode = arch_timer_set_mode_phys;
241 clk->set_next_event = arch_timer_set_next_event_phys;
242 }
243
244 clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
245
246 clockevents_config_and_register(clk, arch_timer_rate,
247 0xf, 0x7fffffff);
248
249 *__this_cpu_ptr(arch_timer_evt) = clk;
250
251 if (arch_timer_use_virtual)
252 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
253 else {
254 enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
255 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
256 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
257 }
258
259 return 0;
260}
261
262/* Is the optional system timer available? */
263static int local_timer_is_architected(void)
264{
265 return (cpu_architecture() >= CPU_ARCH_ARMv7) &&
266 ((read_cpuid_ext(CPUID_EXT_PFR1) >> 16) & 0xf) == 1;
267}
268
269static int arch_timer_available(void)
270{
271 unsigned long freq;
272
273 if (!local_timer_is_architected())
274 return -ENXIO;
275
276 if (arch_timer_rate == 0) {
277 freq = arch_timer_reg_read(ARCH_TIMER_PHYS_ACCESS,
278 ARCH_TIMER_REG_FREQ);
279
280 /* Check the timer frequency. */
281 if (freq == 0) {
282 pr_warn("Architected timer frequency not available\n");
283 return -EINVAL;
284 }
285
286 arch_timer_rate = freq;
287 }
288
289 pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
290 arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100,
291 arch_timer_use_virtual ? "virt" : "phys");
292 return 0;
293}
294
295static u32 notrace arch_counter_get_cntpct32(void)
296{
297 cycle_t cnt = arch_counter_get_cntpct();
298
299 /*
300 * The sched_clock infrastructure only knows about counters
301 * with at most 32bits. Forget about the upper 24 bits for the
302 * time being...
303 */
304 return (u32)cnt;
305}
306
307static u32 notrace arch_counter_get_cntvct32(void)
308{
309 cycle_t cnt = arch_counter_get_cntvct();
310
311 /*
312 * The sched_clock infrastructure only knows about counters
313 * with at most 32bits. Forget about the upper 24 bits for the
314 * time being...
315 */
316 return (u32)cnt;
317}
318
319static cycle_t arch_counter_read(struct clocksource *cs)
320{
321 /*
322 * Always use the physical counter for the clocksource.
323 * CNTHCTL.PL1PCTEN must be set to 1.
324 */
325 return arch_counter_get_cntpct();
326}
327
328static unsigned long arch_timer_read_current_timer(void)
329{ 21{
330 return arch_counter_get_cntpct(); 22 return arch_timer_read_counter();
331} 23}
332 24
333static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) 25static u32 arch_timer_read_counter_u32(void)
334{ 26{
335 /* 27 return arch_timer_read_counter();
336 * Always use the physical counter for the clocksource.
337 * CNTHCTL.PL1PCTEN must be set to 1.
338 */
339 return arch_counter_get_cntpct();
340} 28}
341 29
342static struct clocksource clocksource_counter = { 30static struct delay_timer arch_delay_timer;
343 .name = "arch_sys_counter",
344 .rating = 400,
345 .read = arch_counter_read,
346 .mask = CLOCKSOURCE_MASK(56),
347 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
348};
349
350static struct cyclecounter cyclecounter = {
351 .read = arch_counter_read_cc,
352 .mask = CLOCKSOURCE_MASK(56),
353};
354
355static struct timecounter timecounter;
356
357struct timecounter *arch_timer_get_timecounter(void)
358{
359 return &timecounter;
360}
361
362static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
363{
364 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
365 clk->irq, smp_processor_id());
366
367 if (arch_timer_use_virtual)
368 disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
369 else {
370 disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
371 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
372 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
373 }
374
375 clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
376}
377
378static struct local_timer_ops arch_timer_ops __cpuinitdata = {
379 .setup = arch_timer_setup,
380 .stop = arch_timer_stop,
381};
382
383static struct clock_event_device arch_timer_global_evt;
384 31
385static int __init arch_timer_register(void) 32static void __init arch_timer_delay_timer_register(void)
386{ 33{
387 int err;
388 int ppi;
389
390 err = arch_timer_available();
391 if (err)
392 goto out;
393
394 arch_timer_evt = alloc_percpu(struct clock_event_device *);
395 if (!arch_timer_evt) {
396 err = -ENOMEM;
397 goto out;
398 }
399
400 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
401 cyclecounter.mult = clocksource_counter.mult;
402 cyclecounter.shift = clocksource_counter.shift;
403 timecounter_init(&timecounter, &cyclecounter,
404 arch_counter_get_cntpct());
405
406 if (arch_timer_use_virtual) {
407 ppi = arch_timer_ppi[VIRT_PPI];
408 err = request_percpu_irq(ppi, arch_timer_handler_virt,
409 "arch_timer", arch_timer_evt);
410 } else {
411 ppi = arch_timer_ppi[PHYS_SECURE_PPI];
412 err = request_percpu_irq(ppi, arch_timer_handler_phys,
413 "arch_timer", arch_timer_evt);
414 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
415 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
416 err = request_percpu_irq(ppi, arch_timer_handler_phys,
417 "arch_timer", arch_timer_evt);
418 if (err)
419 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
420 arch_timer_evt);
421 }
422 }
423
424 if (err) {
425 pr_err("arch_timer: can't register interrupt %d (%d)\n",
426 ppi, err);
427 goto out_free;
428 }
429
430 err = local_timer_register(&arch_timer_ops);
431 if (err) {
432 /*
433 * We couldn't register as a local timer (could be
434 * because we're on a UP platform, or because some
435 * other local timer is already present...). Try as a
436 * global timer instead.
437 */
438 arch_timer_global_evt.cpumask = cpumask_of(0);
439 err = arch_timer_setup(&arch_timer_global_evt);
440 }
441 if (err)
442 goto out_free_irq;
443
444 /* Use the architected timer for the delay loop. */ 34 /* Use the architected timer for the delay loop. */
445 arch_delay_timer.read_current_timer = &arch_timer_read_current_timer; 35 arch_delay_timer.read_current_timer = arch_timer_read_counter_long;
446 arch_delay_timer.freq = arch_timer_rate; 36 arch_delay_timer.freq = arch_timer_get_rate();
447 register_current_timer_delay(&arch_delay_timer); 37 register_current_timer_delay(&arch_delay_timer);
448 return 0;
449
450out_free_irq:
451 if (arch_timer_use_virtual)
452 free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
453 else {
454 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
455 arch_timer_evt);
456 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
457 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
458 arch_timer_evt);
459 }
460
461out_free:
462 free_percpu(arch_timer_evt);
463out:
464 return err;
465} 38}
466 39
467static const struct of_device_id arch_timer_of_match[] __initconst = {
468 { .compatible = "arm,armv7-timer", },
469 {},
470};
471
472int __init arch_timer_of_register(void) 40int __init arch_timer_of_register(void)
473{ 41{
474 struct device_node *np; 42 int ret;
475 u32 freq;
476 int i;
477
478 np = of_find_matching_node(NULL, arch_timer_of_match);
479 if (!np) {
480 pr_err("arch_timer: can't find DT node\n");
481 return -ENODEV;
482 }
483
484 /* Try to determine the frequency from the device tree or CNTFRQ */
485 if (!of_property_read_u32(np, "clock-frequency", &freq))
486 arch_timer_rate = freq;
487
488 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
489 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
490 43
491 /* 44 ret = arch_timer_init();
492 * If no interrupt provided for virtual timer, we'll have to 45 if (ret)
493 * stick to the physical timer. It'd better be accessible... 46 return ret;
494 */
495 if (!arch_timer_ppi[VIRT_PPI]) {
496 arch_timer_use_virtual = false;
497 47
498 if (!arch_timer_ppi[PHYS_SECURE_PPI] || 48 arch_timer_delay_timer_register();
499 !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
500 pr_warn("arch_timer: No interrupt available, giving up\n");
501 return -EINVAL;
502 }
503 }
504 49
505 return arch_timer_register(); 50 return 0;
506} 51}
507 52
508int __init arch_timer_sched_clock_init(void) 53int __init arch_timer_sched_clock_init(void)
509{ 54{
510 u32 (*cnt32)(void); 55 if (arch_timer_get_rate() == 0)
511 int err; 56 return -ENXIO;
512
513 err = arch_timer_available();
514 if (err)
515 return err;
516
517 if (arch_timer_use_virtual)
518 cnt32 = arch_counter_get_cntvct32;
519 else
520 cnt32 = arch_counter_get_cntpct32;
521 57
522 setup_sched_clock(cnt32, 32, arch_timer_rate); 58 setup_sched_clock(arch_timer_read_counter_u32,
59 32, arch_timer_get_rate());
523 return 0; 60 return 0;
524} 61}
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index c8b3272dfed1..5ce738b43508 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -169,6 +169,24 @@ int main(void)
169 DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar)); 169 DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar));
170 DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar)); 170 DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar));
171 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc)); 171 DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc));
172#ifdef CONFIG_KVM_ARM_VGIC
173 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
174 DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
175 DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr));
176 DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr));
177 DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr));
178 DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr));
179 DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr));
180 DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr));
181 DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
182#ifdef CONFIG_KVM_ARM_TIMER
183 DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
184 DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
185 DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
186 DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
187#endif
188 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
189#endif
172 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); 190 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
173#endif 191#endif
174 return 0; 192 return 0;
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 60340fa561d4..5f73f7018f50 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -461,14 +461,8 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
461 */ 461 */
462static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); 462static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
463 463
464static void ipi_timer(void)
465{
466 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
467 evt->event_handler(evt);
468}
469
470#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 464#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
471static void smp_timer_broadcast(const struct cpumask *mask) 465void tick_broadcast(const struct cpumask *mask)
472{ 466{
473 smp_cross_call(mask, IPI_TIMER); 467 smp_cross_call(mask, IPI_TIMER);
474} 468}
@@ -516,7 +510,6 @@ static void __cpuinit percpu_timer_setup(void)
516 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); 510 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
517 511
518 evt->cpumask = cpumask_of(cpu); 512 evt->cpumask = cpumask_of(cpu);
519 evt->broadcast = smp_timer_broadcast;
520 513
521 if (!lt_ops || lt_ops->setup(evt)) 514 if (!lt_ops || lt_ops->setup(evt))
522 broadcast_timer_setup(evt); 515 broadcast_timer_setup(evt);
@@ -582,11 +575,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
582 case IPI_WAKEUP: 575 case IPI_WAKEUP:
583 break; 576 break;
584 577
578#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
585 case IPI_TIMER: 579 case IPI_TIMER:
586 irq_enter(); 580 irq_enter();
587 ipi_timer(); 581 tick_receive_broadcast();
588 irq_exit(); 582 irq_exit();
589 break; 583 break;
584#endif
590 585
591 case IPI_RESCHEDULE: 586 case IPI_RESCHEDULE:
592 scheduler_ipi(); 587 scheduler_ipi();
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 05227cb57a7b..49dd64e579c2 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -51,6 +51,22 @@ config KVM_ARM_MAX_VCPUS
51 large, so only choose a reasonable number that you expect to 51 large, so only choose a reasonable number that you expect to
52 actually use. 52 actually use.
53 53
54config KVM_ARM_VGIC
55 bool "KVM support for Virtual GIC"
56 depends on KVM_ARM_HOST && OF
57 select HAVE_KVM_IRQCHIP
58 default y
59 ---help---
60 Adds support for a hardware assisted, in-kernel GIC emulation.
61
62config KVM_ARM_TIMER
63 bool "KVM support for Architected Timers"
64 depends on KVM_ARM_VGIC && ARM_ARCH_TIMER
65 select HAVE_KVM_IRQCHIP
66 default y
67 ---help---
68 Adds support for the Architected Timers in virtual machines
69
54source drivers/virtio/Kconfig 70source drivers/virtio/Kconfig
55 71
56endif # VIRTUALIZATION 72endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index ea27987bd07f..fc96ce6f2357 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -19,3 +19,5 @@ kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
19obj-y += kvm-arm.o init.o interrupts.o 19obj-y += kvm-arm.o init.o interrupts.o
20obj-y += arm.o guest.o mmu.o emulate.o reset.o 20obj-y += arm.o guest.o mmu.o emulate.o reset.o
21obj-y += coproc.o coproc_a15.o mmio.o psci.o 21obj-y += coproc.o coproc_a15.o mmio.o psci.o
22obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o
23obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o
diff --git a/arch/arm/kvm/arch_timer.c b/arch/arm/kvm/arch_timer.c
new file mode 100644
index 000000000000..6ac938d46297
--- /dev/null
+++ b/arch/arm/kvm/arch_timer.c
@@ -0,0 +1,271 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/cpu.h>
20#include <linux/of_irq.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/interrupt.h>
24
25#include <asm/arch_timer.h>
26
27#include <asm/kvm_vgic.h>
28#include <asm/kvm_arch_timer.h>
29
30static struct timecounter *timecounter;
31static struct workqueue_struct *wqueue;
32static struct kvm_irq_level timer_irq = {
33 .level = 1,
34};
35
36static cycle_t kvm_phys_timer_read(void)
37{
38 return timecounter->cc->read(timecounter->cc);
39}
40
41static bool timer_is_armed(struct arch_timer_cpu *timer)
42{
43 return timer->armed;
44}
45
46/* timer_arm: as in "arm the timer", not as in ARM the company */
47static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
48{
49 timer->armed = true;
50 hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
51 HRTIMER_MODE_ABS);
52}
53
54static void timer_disarm(struct arch_timer_cpu *timer)
55{
56 if (timer_is_armed(timer)) {
57 hrtimer_cancel(&timer->timer);
58 cancel_work_sync(&timer->expired);
59 timer->armed = false;
60 }
61}
62
63static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
64{
65 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
66
67 timer->cntv_ctl |= 1 << 1; /* Mask the interrupt in the guest */
68 kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
69 vcpu->arch.timer_cpu.irq->irq,
70 vcpu->arch.timer_cpu.irq->level);
71}
72
73static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
74{
75 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
76
77 /*
78 * We disable the timer in the world switch and let it be
79 * handled by kvm_timer_sync_hwstate(). Getting a timer
80 * interrupt at this point is a sure sign of some major
81 * breakage.
82 */
83 pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu);
84 return IRQ_HANDLED;
85}
86
87static void kvm_timer_inject_irq_work(struct work_struct *work)
88{
89 struct kvm_vcpu *vcpu;
90
91 vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
92 vcpu->arch.timer_cpu.armed = false;
93 kvm_timer_inject_irq(vcpu);
94}
95
96static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
97{
98 struct arch_timer_cpu *timer;
99 timer = container_of(hrt, struct arch_timer_cpu, timer);
100 queue_work(wqueue, &timer->expired);
101 return HRTIMER_NORESTART;
102}
103
104/**
105 * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
106 * @vcpu: The vcpu pointer
107 *
108 * Disarm any pending soft timers, since the world-switch code will write the
109 * virtual timer state back to the physical CPU.
110 */
111void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
112{
113 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
114
115 /*
116 * We're about to run this vcpu again, so there is no need to
117 * keep the background timer running, as we're about to
118 * populate the CPU timer again.
119 */
120 timer_disarm(timer);
121}
122
123/**
124 * kvm_timer_sync_hwstate - sync timer state from cpu
125 * @vcpu: The vcpu pointer
126 *
127 * Check if the virtual timer was armed and either schedule a corresponding
128 * soft timer or inject directly if already expired.
129 */
130void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
131{
132 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
133 cycle_t cval, now;
134 u64 ns;
135
136 /* Check if the timer is enabled and unmasked first */
137 if ((timer->cntv_ctl & 3) != 1)
138 return;
139
140 cval = timer->cntv_cval;
141 now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
142
143 BUG_ON(timer_is_armed(timer));
144
145 if (cval <= now) {
146 /*
147 * Timer has already expired while we were not
148 * looking. Inject the interrupt and carry on.
149 */
150 kvm_timer_inject_irq(vcpu);
151 return;
152 }
153
154 ns = cyclecounter_cyc2ns(timecounter->cc, cval - now);
155 timer_arm(timer, ns);
156}
157
158void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
159{
160 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
161
162 INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
163 hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
164 timer->timer.function = kvm_timer_expire;
165 timer->irq = &timer_irq;
166}
167
168static void kvm_timer_init_interrupt(void *info)
169{
170 enable_percpu_irq(timer_irq.irq, 0);
171}
172
173
174static int kvm_timer_cpu_notify(struct notifier_block *self,
175 unsigned long action, void *cpu)
176{
177 switch (action) {
178 case CPU_STARTING:
179 case CPU_STARTING_FROZEN:
180 kvm_timer_init_interrupt(NULL);
181 break;
182 case CPU_DYING:
183 case CPU_DYING_FROZEN:
184 disable_percpu_irq(timer_irq.irq);
185 break;
186 }
187
188 return NOTIFY_OK;
189}
190
191static struct notifier_block kvm_timer_cpu_nb = {
192 .notifier_call = kvm_timer_cpu_notify,
193};
194
195static const struct of_device_id arch_timer_of_match[] = {
196 { .compatible = "arm,armv7-timer", },
197 {},
198};
199
200int kvm_timer_hyp_init(void)
201{
202 struct device_node *np;
203 unsigned int ppi;
204 int err;
205
206 timecounter = arch_timer_get_timecounter();
207 if (!timecounter)
208 return -ENODEV;
209
210 np = of_find_matching_node(NULL, arch_timer_of_match);
211 if (!np) {
212 kvm_err("kvm_arch_timer: can't find DT node\n");
213 return -ENODEV;
214 }
215
216 ppi = irq_of_parse_and_map(np, 2);
217 if (!ppi) {
218 kvm_err("kvm_arch_timer: no virtual timer interrupt\n");
219 err = -EINVAL;
220 goto out;
221 }
222
223 err = request_percpu_irq(ppi, kvm_arch_timer_handler,
224 "kvm guest timer", kvm_get_running_vcpus());
225 if (err) {
226 kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
227 ppi, err);
228 goto out;
229 }
230
231 timer_irq.irq = ppi;
232
233 err = register_cpu_notifier(&kvm_timer_cpu_nb);
234 if (err) {
235 kvm_err("Cannot register timer CPU notifier\n");
236 goto out_free;
237 }
238
239 wqueue = create_singlethread_workqueue("kvm_arch_timer");
240 if (!wqueue) {
241 err = -ENOMEM;
242 goto out_free;
243 }
244
245 kvm_info("%s IRQ%d\n", np->name, ppi);
246 on_each_cpu(kvm_timer_init_interrupt, NULL, 1);
247
248 goto out;
249out_free:
250 free_percpu_irq(ppi, kvm_get_running_vcpus());
251out:
252 of_node_put(np);
253 return err;
254}
255
256void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
257{
258 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
259
260 timer_disarm(timer);
261}
262
263int kvm_timer_init(struct kvm *kvm)
264{
265 if (timecounter && wqueue) {
266 kvm->arch.timer.cntvoff = kvm_phys_timer_read();
267 kvm->arch.timer.enabled = 1;
268 }
269
270 return 0;
271}
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 2d30e3afdaf9..9ada5549216d 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -54,11 +54,40 @@ static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
54static struct vfp_hard_struct __percpu *kvm_host_vfp_state; 54static struct vfp_hard_struct __percpu *kvm_host_vfp_state;
55static unsigned long hyp_default_vectors; 55static unsigned long hyp_default_vectors;
56 56
57/* Per-CPU variable containing the currently running vcpu. */
58static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
59
57/* The VMID used in the VTTBR */ 60/* The VMID used in the VTTBR */
58static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); 61static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
59static u8 kvm_next_vmid; 62static u8 kvm_next_vmid;
60static DEFINE_SPINLOCK(kvm_vmid_lock); 63static DEFINE_SPINLOCK(kvm_vmid_lock);
61 64
65static bool vgic_present;
66
67static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
68{
69 BUG_ON(preemptible());
70 __get_cpu_var(kvm_arm_running_vcpu) = vcpu;
71}
72
73/**
74 * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
75 * Must be called from non-preemptible context
76 */
77struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
78{
79 BUG_ON(preemptible());
80 return __get_cpu_var(kvm_arm_running_vcpu);
81}
82
83/**
84 * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
85 */
86struct kvm_vcpu __percpu **kvm_get_running_vcpus(void)
87{
88 return &kvm_arm_running_vcpu;
89}
90
62int kvm_arch_hardware_enable(void *garbage) 91int kvm_arch_hardware_enable(void *garbage)
63{ 92{
64 return 0; 93 return 0;
@@ -157,6 +186,9 @@ int kvm_dev_ioctl_check_extension(long ext)
157{ 186{
158 int r; 187 int r;
159 switch (ext) { 188 switch (ext) {
189 case KVM_CAP_IRQCHIP:
190 r = vgic_present;
191 break;
160 case KVM_CAP_USER_MEMORY: 192 case KVM_CAP_USER_MEMORY:
161 case KVM_CAP_SYNC_MMU: 193 case KVM_CAP_SYNC_MMU:
162 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: 194 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
@@ -167,6 +199,8 @@ int kvm_dev_ioctl_check_extension(long ext)
167 case KVM_CAP_COALESCED_MMIO: 199 case KVM_CAP_COALESCED_MMIO:
168 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 200 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
169 break; 201 break;
202 case KVM_CAP_ARM_SET_DEVICE_ADDR:
203 r = 1;
170 case KVM_CAP_NR_VCPUS: 204 case KVM_CAP_NR_VCPUS:
171 r = num_online_cpus(); 205 r = num_online_cpus();
172 break; 206 break;
@@ -255,6 +289,7 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
255void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 289void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
256{ 290{
257 kvm_mmu_free_memory_caches(vcpu); 291 kvm_mmu_free_memory_caches(vcpu);
292 kvm_timer_vcpu_terminate(vcpu);
258 kmem_cache_free(kvm_vcpu_cache, vcpu); 293 kmem_cache_free(kvm_vcpu_cache, vcpu);
259} 294}
260 295
@@ -286,8 +321,19 @@ int __attribute_const__ kvm_target_cpu(void)
286 321
287int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 322int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
288{ 323{
324 int ret;
325
289 /* Force users to call KVM_ARM_VCPU_INIT */ 326 /* Force users to call KVM_ARM_VCPU_INIT */
290 vcpu->arch.target = -1; 327 vcpu->arch.target = -1;
328
329 /* Set up VGIC */
330 ret = kvm_vgic_vcpu_init(vcpu);
331 if (ret)
332 return ret;
333
334 /* Set up the timer */
335 kvm_timer_vcpu_init(vcpu);
336
291 return 0; 337 return 0;
292} 338}
293 339
@@ -308,10 +354,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
308 */ 354 */
309 if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) 355 if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
310 flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ 356 flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
357
358 kvm_arm_set_running_vcpu(vcpu);
311} 359}
312 360
313void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 361void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
314{ 362{
363 kvm_arm_set_running_vcpu(NULL);
315} 364}
316 365
317int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 366int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
@@ -342,7 +391,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
342 */ 391 */
343int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 392int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
344{ 393{
345 return !!v->arch.irq_lines; 394 return !!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v);
346} 395}
347 396
348/* Just ensure a guest exit from a particular CPU */ 397/* Just ensure a guest exit from a particular CPU */
@@ -597,6 +646,17 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
597 vcpu->arch.has_run_once = true; 646 vcpu->arch.has_run_once = true;
598 647
599 /* 648 /*
649 * Initialize the VGIC before running a vcpu the first time on
650 * this VM.
651 */
652 if (irqchip_in_kernel(vcpu->kvm) &&
653 unlikely(!vgic_initialized(vcpu->kvm))) {
654 int ret = kvm_vgic_init(vcpu->kvm);
655 if (ret)
656 return ret;
657 }
658
659 /*
600 * Handle the "start in power-off" case by calling into the 660 * Handle the "start in power-off" case by calling into the
601 * PSCI code. 661 * PSCI code.
602 */ 662 */
@@ -661,6 +721,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
661 if (vcpu->arch.pause) 721 if (vcpu->arch.pause)
662 vcpu_pause(vcpu); 722 vcpu_pause(vcpu);
663 723
724 kvm_vgic_flush_hwstate(vcpu);
725 kvm_timer_flush_hwstate(vcpu);
726
664 local_irq_disable(); 727 local_irq_disable();
665 728
666 /* 729 /*
@@ -673,6 +736,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
673 736
674 if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) { 737 if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
675 local_irq_enable(); 738 local_irq_enable();
739 kvm_timer_sync_hwstate(vcpu);
740 kvm_vgic_sync_hwstate(vcpu);
676 continue; 741 continue;
677 } 742 }
678 743
@@ -705,6 +770,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
705 * Back from guest 770 * Back from guest
706 *************************************************************/ 771 *************************************************************/
707 772
773 kvm_timer_sync_hwstate(vcpu);
774 kvm_vgic_sync_hwstate(vcpu);
775
708 ret = handle_exit(vcpu, run, ret); 776 ret = handle_exit(vcpu, run, ret);
709 } 777 }
710 778
@@ -760,20 +828,49 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level)
760 828
761 trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); 829 trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
762 830
763 if (irq_type != KVM_ARM_IRQ_TYPE_CPU) 831 switch (irq_type) {
764 return -EINVAL; 832 case KVM_ARM_IRQ_TYPE_CPU:
833 if (irqchip_in_kernel(kvm))
834 return -ENXIO;
765 835
766 if (vcpu_idx >= nrcpus) 836 if (vcpu_idx >= nrcpus)
767 return -EINVAL; 837 return -EINVAL;
768 838
769 vcpu = kvm_get_vcpu(kvm, vcpu_idx); 839 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
770 if (!vcpu) 840 if (!vcpu)
771 return -EINVAL; 841 return -EINVAL;
772 842
773 if (irq_num > KVM_ARM_IRQ_CPU_FIQ) 843 if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
774 return -EINVAL; 844 return -EINVAL;
845
846 return vcpu_interrupt_line(vcpu, irq_num, level);
847 case KVM_ARM_IRQ_TYPE_PPI:
848 if (!irqchip_in_kernel(kvm))
849 return -ENXIO;
775 850
776 return vcpu_interrupt_line(vcpu, irq_num, level); 851 if (vcpu_idx >= nrcpus)
852 return -EINVAL;
853
854 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
855 if (!vcpu)
856 return -EINVAL;
857
858 if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
859 return -EINVAL;
860
861 return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level);
862 case KVM_ARM_IRQ_TYPE_SPI:
863 if (!irqchip_in_kernel(kvm))
864 return -ENXIO;
865
866 if (irq_num < VGIC_NR_PRIVATE_IRQS ||
867 irq_num > KVM_ARM_IRQ_GIC_MAX)
868 return -EINVAL;
869
870 return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
871 }
872
873 return -EINVAL;
777} 874}
778 875
779long kvm_arch_vcpu_ioctl(struct file *filp, 876long kvm_arch_vcpu_ioctl(struct file *filp,
@@ -827,10 +924,49 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
827 return -EINVAL; 924 return -EINVAL;
828} 925}
829 926
927static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
928 struct kvm_arm_device_addr *dev_addr)
929{
930 unsigned long dev_id, type;
931
932 dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
933 KVM_ARM_DEVICE_ID_SHIFT;
934 type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
935 KVM_ARM_DEVICE_TYPE_SHIFT;
936
937 switch (dev_id) {
938 case KVM_ARM_DEVICE_VGIC_V2:
939 if (!vgic_present)
940 return -ENXIO;
941 return kvm_vgic_set_addr(kvm, type, dev_addr->addr);
942 default:
943 return -ENODEV;
944 }
945}
946
830long kvm_arch_vm_ioctl(struct file *filp, 947long kvm_arch_vm_ioctl(struct file *filp,
831 unsigned int ioctl, unsigned long arg) 948 unsigned int ioctl, unsigned long arg)
832{ 949{
833 return -EINVAL; 950 struct kvm *kvm = filp->private_data;
951 void __user *argp = (void __user *)arg;
952
953 switch (ioctl) {
954 case KVM_CREATE_IRQCHIP: {
955 if (vgic_present)
956 return kvm_vgic_create(kvm);
957 else
958 return -ENXIO;
959 }
960 case KVM_ARM_SET_DEVICE_ADDR: {
961 struct kvm_arm_device_addr dev_addr;
962
963 if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
964 return -EFAULT;
965 return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
966 }
967 default:
968 return -EINVAL;
969 }
834} 970}
835 971
836static void cpu_init_hyp_mode(void *vector) 972static void cpu_init_hyp_mode(void *vector)
@@ -960,6 +1096,24 @@ static int init_hyp_mode(void)
960 } 1096 }
961 } 1097 }
962 1098
1099 /*
1100 * Init HYP view of VGIC
1101 */
1102 err = kvm_vgic_hyp_init();
1103 if (err)
1104 goto out_free_vfp;
1105
1106#ifdef CONFIG_KVM_ARM_VGIC
1107 vgic_present = true;
1108#endif
1109
1110 /*
1111 * Init HYP architected timer support
1112 */
1113 err = kvm_timer_hyp_init();
1114 if (err)
1115 goto out_free_mappings;
1116
963 kvm_info("Hyp mode initialized successfully\n"); 1117 kvm_info("Hyp mode initialized successfully\n");
964 return 0; 1118 return 0;
965out_free_vfp: 1119out_free_vfp:
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index d782638c7ec0..4ea9a982269c 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -222,6 +222,10 @@ static const struct coproc_reg cp15_regs[] = {
222 NULL, reset_unknown, c13_TID_URO }, 222 NULL, reset_unknown, c13_TID_URO },
223 { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32, 223 { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32,
224 NULL, reset_unknown, c13_TID_PRIV }, 224 NULL, reset_unknown, c13_TID_PRIV },
225
226 /* CNTKCTL: swapped by interrupt.S. */
227 { CRn(14), CRm( 1), Op1( 0), Op2( 0), is32,
228 NULL, reset_val, c14_CNTKCTL, 0x00000000 },
225}; 229};
226 230
227/* Target specific emulation tables */ 231/* Target specific emulation tables */
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index c5400d2e97ca..8ca87ab0919d 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -94,6 +94,9 @@ ENTRY(__kvm_vcpu_run)
94 94
95 save_host_regs 95 save_host_regs
96 96
97 restore_vgic_state
98 restore_timer_state
99
97 @ Store hardware CP15 state and load guest state 100 @ Store hardware CP15 state and load guest state
98 read_cp15_state store_to_vcpu = 0 101 read_cp15_state store_to_vcpu = 0
99 write_cp15_state read_from_vcpu = 1 102 write_cp15_state read_from_vcpu = 1
@@ -187,6 +190,9 @@ after_vfp_restore:
187 read_cp15_state store_to_vcpu = 1 190 read_cp15_state store_to_vcpu = 1
188 write_cp15_state read_from_vcpu = 0 191 write_cp15_state read_from_vcpu = 0
189 192
193 save_timer_state
194 save_vgic_state
195
190 restore_host_regs 196 restore_host_regs
191 clrex @ Clear exclusive monitor 197 clrex @ Clear exclusive monitor
192 mov r0, r1 @ Return the return code 198 mov r0, r1 @ Return the return code
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
index 6a95d341e9c5..3c8f2f0b4c5e 100644
--- a/arch/arm/kvm/interrupts_head.S
+++ b/arch/arm/kvm/interrupts_head.S
@@ -1,3 +1,5 @@
1#include <linux/irqchip/arm-gic.h>
2
1#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) 3#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4))
2#define VCPU_USR_SP (VCPU_USR_REG(13)) 4#define VCPU_USR_SP (VCPU_USR_REG(13))
3#define VCPU_USR_LR (VCPU_USR_REG(14)) 5#define VCPU_USR_LR (VCPU_USR_REG(14))
@@ -298,6 +300,14 @@ vcpu .req r0 @ vcpu pointer always in r0
298 str r11, [vcpu, #CP15_OFFSET(c6_IFAR)] 300 str r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
299 str r12, [vcpu, #CP15_OFFSET(c12_VBAR)] 301 str r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
300 .endif 302 .endif
303
304 mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL
305
306 .if \store_to_vcpu == 0
307 push {r2}
308 .else
309 str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
310 .endif
301.endm 311.endm
302 312
303/* 313/*
@@ -309,6 +319,14 @@ vcpu .req r0 @ vcpu pointer always in r0
309 */ 319 */
310.macro write_cp15_state read_from_vcpu 320.macro write_cp15_state read_from_vcpu
311 .if \read_from_vcpu == 0 321 .if \read_from_vcpu == 0
322 pop {r2}
323 .else
324 ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
325 .endif
326
327 mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL
328
329 .if \read_from_vcpu == 0
312 pop {r2-r12} 330 pop {r2-r12}
313 .else 331 .else
314 ldr r2, [vcpu, #CP15_OFFSET(c13_CID)] 332 ldr r2, [vcpu, #CP15_OFFSET(c13_CID)]
@@ -369,6 +387,49 @@ vcpu .req r0 @ vcpu pointer always in r0
369 * Assumes vcpu pointer in vcpu reg 387 * Assumes vcpu pointer in vcpu reg
370 */ 388 */
371.macro save_vgic_state 389.macro save_vgic_state
390#ifdef CONFIG_KVM_ARM_VGIC
391 /* Get VGIC VCTRL base into r2 */
392 ldr r2, [vcpu, #VCPU_KVM]
393 ldr r2, [r2, #KVM_VGIC_VCTRL]
394 cmp r2, #0
395 beq 2f
396
397 /* Compute the address of struct vgic_cpu */
398 add r11, vcpu, #VCPU_VGIC_CPU
399
400 /* Save all interesting registers */
401 ldr r3, [r2, #GICH_HCR]
402 ldr r4, [r2, #GICH_VMCR]
403 ldr r5, [r2, #GICH_MISR]
404 ldr r6, [r2, #GICH_EISR0]
405 ldr r7, [r2, #GICH_EISR1]
406 ldr r8, [r2, #GICH_ELRSR0]
407 ldr r9, [r2, #GICH_ELRSR1]
408 ldr r10, [r2, #GICH_APR]
409
410 str r3, [r11, #VGIC_CPU_HCR]
411 str r4, [r11, #VGIC_CPU_VMCR]
412 str r5, [r11, #VGIC_CPU_MISR]
413 str r6, [r11, #VGIC_CPU_EISR]
414 str r7, [r11, #(VGIC_CPU_EISR + 4)]
415 str r8, [r11, #VGIC_CPU_ELRSR]
416 str r9, [r11, #(VGIC_CPU_ELRSR + 4)]
417 str r10, [r11, #VGIC_CPU_APR]
418
419 /* Clear GICH_HCR */
420 mov r5, #0
421 str r5, [r2, #GICH_HCR]
422
423 /* Save list registers */
424 add r2, r2, #GICH_LR0
425 add r3, r11, #VGIC_CPU_LR
426 ldr r4, [r11, #VGIC_CPU_NR_LR]
4271: ldr r6, [r2], #4
428 str r6, [r3], #4
429 subs r4, r4, #1
430 bne 1b
4312:
432#endif
372.endm 433.endm
373 434
374/* 435/*
@@ -377,6 +438,109 @@ vcpu .req r0 @ vcpu pointer always in r0
377 * Assumes vcpu pointer in vcpu reg 438 * Assumes vcpu pointer in vcpu reg
378 */ 439 */
379.macro restore_vgic_state 440.macro restore_vgic_state
441#ifdef CONFIG_KVM_ARM_VGIC
442 /* Get VGIC VCTRL base into r2 */
443 ldr r2, [vcpu, #VCPU_KVM]
444 ldr r2, [r2, #KVM_VGIC_VCTRL]
445 cmp r2, #0
446 beq 2f
447
448 /* Compute the address of struct vgic_cpu */
449 add r11, vcpu, #VCPU_VGIC_CPU
450
451 /* We only restore a minimal set of registers */
452 ldr r3, [r11, #VGIC_CPU_HCR]
453 ldr r4, [r11, #VGIC_CPU_VMCR]
454 ldr r8, [r11, #VGIC_CPU_APR]
455
456 str r3, [r2, #GICH_HCR]
457 str r4, [r2, #GICH_VMCR]
458 str r8, [r2, #GICH_APR]
459
460 /* Restore list registers */
461 add r2, r2, #GICH_LR0
462 add r3, r11, #VGIC_CPU_LR
463 ldr r4, [r11, #VGIC_CPU_NR_LR]
4641: ldr r6, [r3], #4
465 str r6, [r2], #4
466 subs r4, r4, #1
467 bne 1b
4682:
469#endif
470.endm
471
472#define CNTHCTL_PL1PCTEN (1 << 0)
473#define CNTHCTL_PL1PCEN (1 << 1)
474
475/*
476 * Save the timer state onto the VCPU and allow physical timer/counter access
477 * for the host.
478 *
479 * Assumes vcpu pointer in vcpu reg
480 * Clobbers r2-r5
481 */
482.macro save_timer_state
483#ifdef CONFIG_KVM_ARM_TIMER
484 ldr r4, [vcpu, #VCPU_KVM]
485 ldr r2, [r4, #KVM_TIMER_ENABLED]
486 cmp r2, #0
487 beq 1f
488
489 mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
490 str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
491 bic r2, #1 @ Clear ENABLE
492 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
493 isb
494
495 mrrc p15, 3, r2, r3, c14 @ CNTV_CVAL
496 ldr r4, =VCPU_TIMER_CNTV_CVAL
497 add r5, vcpu, r4
498 strd r2, r3, [r5]
499
5001:
501#endif
502 @ Allow physical timer/counter access for the host
503 mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
504 orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
505 mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
506.endm
507
508/*
509 * Load the timer state from the VCPU and deny physical timer/counter access
510 * for the host.
511 *
512 * Assumes vcpu pointer in vcpu reg
513 * Clobbers r2-r5
514 */
515.macro restore_timer_state
516 @ Disallow physical timer access for the guest
517 @ Physical counter access is allowed
518 mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
519 orr r2, r2, #CNTHCTL_PL1PCTEN
520 bic r2, r2, #CNTHCTL_PL1PCEN
521 mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
522
523#ifdef CONFIG_KVM_ARM_TIMER
524 ldr r4, [vcpu, #VCPU_KVM]
525 ldr r2, [r4, #KVM_TIMER_ENABLED]
526 cmp r2, #0
527 beq 1f
528
529 ldr r2, [r4, #KVM_TIMER_CNTVOFF]
530 ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)]
531 mcrr p15, 4, r2, r3, c14 @ CNTVOFF
532
533 ldr r4, =VCPU_TIMER_CNTV_CVAL
534 add r5, vcpu, r4
535 ldrd r2, r3, [r5]
536 mcrr p15, 3, r2, r3, c14 @ CNTV_CVAL
537 isb
538
539 ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
540 and r2, r2, #3
541 mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
5421:
543#endif
380.endm 544.endm
381 545
382.equ vmentry, 0 546.equ vmentry, 0
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 0144baf82904..98a870ff1a5c 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -148,6 +148,9 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
148 if (mmio.is_write) 148 if (mmio.is_write)
149 memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len); 149 memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len);
150 150
151 if (vgic_handle_mmio(vcpu, run, &mmio))
152 return 1;
153
151 kvm_prepare_mmio(run, &mmio); 154 kvm_prepare_mmio(run, &mmio);
152 return 0; 155 return 0;
153} 156}
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c
new file mode 100644
index 000000000000..c9a17316e9fe
--- /dev/null
+++ b/arch/arm/kvm/vgic.c
@@ -0,0 +1,1506 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/cpu.h>
20#include <linux/kvm.h>
21#include <linux/kvm_host.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/of.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
27
28#include <linux/irqchip/arm-gic.h>
29
30#include <asm/kvm_emulate.h>
31#include <asm/kvm_arm.h>
32#include <asm/kvm_mmu.h>
33
34/*
35 * How the whole thing works (courtesy of Christoffer Dall):
36 *
37 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
38 * something is pending
39 * - VGIC pending interrupts are stored on the vgic.irq_state vgic
40 * bitmap (this bitmap is updated by both user land ioctls and guest
41 * mmio ops, and other in-kernel peripherals such as the
42 * arch. timers) and indicate the 'wire' state.
43 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
44 * recalculated
45 * - To calculate the oracle, we need info for each cpu from
46 * compute_pending_for_cpu, which considers:
47 * - PPI: dist->irq_state & dist->irq_enable
48 * - SPI: dist->irq_state & dist->irq_enable & dist->irq_spi_target
49 * - irq_spi_target is a 'formatted' version of the GICD_ICFGR
50 * registers, stored on each vcpu. We only keep one bit of
51 * information per interrupt, making sure that only one vcpu can
52 * accept the interrupt.
53 * - The same is true when injecting an interrupt, except that we only
54 * consider a single interrupt at a time. The irq_spi_cpu array
55 * contains the target CPU for each SPI.
56 *
57 * The handling of level interrupts adds some extra complexity. We
58 * need to track when the interrupt has been EOIed, so we can sample
59 * the 'line' again. This is achieved as such:
60 *
61 * - When a level interrupt is moved onto a vcpu, the corresponding
62 * bit in irq_active is set. As long as this bit is set, the line
63 * will be ignored for further interrupts. The interrupt is injected
64 * into the vcpu with the GICH_LR_EOI bit set (generate a
65 * maintenance interrupt on EOI).
66 * - When the interrupt is EOIed, the maintenance interrupt fires,
67 * and clears the corresponding bit in irq_active. This allow the
68 * interrupt line to be sampled again.
69 */
70
71#define VGIC_ADDR_UNDEF (-1)
72#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
73
74/* Physical address of vgic virtual cpu interface */
75static phys_addr_t vgic_vcpu_base;
76
77/* Virtual control interface base address */
78static void __iomem *vgic_vctrl_base;
79
80static struct device_node *vgic_node;
81
82#define ACCESS_READ_VALUE (1 << 0)
83#define ACCESS_READ_RAZ (0 << 0)
84#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
85#define ACCESS_WRITE_IGNORED (0 << 1)
86#define ACCESS_WRITE_SETBIT (1 << 1)
87#define ACCESS_WRITE_CLEARBIT (2 << 1)
88#define ACCESS_WRITE_VALUE (3 << 1)
89#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
90
91static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
92static void vgic_update_state(struct kvm *kvm);
93static void vgic_kick_vcpus(struct kvm *kvm);
94static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
95static u32 vgic_nr_lr;
96
97static unsigned int vgic_maint_irq;
98
99static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
100 int cpuid, u32 offset)
101{
102 offset >>= 2;
103 if (!offset)
104 return x->percpu[cpuid].reg;
105 else
106 return x->shared.reg + offset - 1;
107}
108
109static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
110 int cpuid, int irq)
111{
112 if (irq < VGIC_NR_PRIVATE_IRQS)
113 return test_bit(irq, x->percpu[cpuid].reg_ul);
114
115 return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared.reg_ul);
116}
117
118static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
119 int irq, int val)
120{
121 unsigned long *reg;
122
123 if (irq < VGIC_NR_PRIVATE_IRQS) {
124 reg = x->percpu[cpuid].reg_ul;
125 } else {
126 reg = x->shared.reg_ul;
127 irq -= VGIC_NR_PRIVATE_IRQS;
128 }
129
130 if (val)
131 set_bit(irq, reg);
132 else
133 clear_bit(irq, reg);
134}
135
136static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
137{
138 if (unlikely(cpuid >= VGIC_MAX_CPUS))
139 return NULL;
140 return x->percpu[cpuid].reg_ul;
141}
142
143static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
144{
145 return x->shared.reg_ul;
146}
147
148static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
149{
150 offset >>= 2;
151 BUG_ON(offset > (VGIC_NR_IRQS / 4));
152 if (offset < 4)
153 return x->percpu[cpuid] + offset;
154 else
155 return x->shared + offset - 8;
156}
157
158#define VGIC_CFG_LEVEL 0
159#define VGIC_CFG_EDGE 1
160
161static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
162{
163 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
164 int irq_val;
165
166 irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
167 return irq_val == VGIC_CFG_EDGE;
168}
169
170static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
171{
172 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
173
174 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
175}
176
177static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
178{
179 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
180
181 return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
182}
183
184static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
185{
186 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
187
188 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
189}
190
191static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
192{
193 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
194
195 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
196}
197
198static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
199{
200 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
201
202 return vgic_bitmap_get_irq_val(&dist->irq_state, vcpu->vcpu_id, irq);
203}
204
205static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq)
206{
207 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
208
209 vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1);
210}
211
212static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq)
213{
214 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
215
216 vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 0);
217}
218
219static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
220{
221 if (irq < VGIC_NR_PRIVATE_IRQS)
222 set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
223 else
224 set_bit(irq - VGIC_NR_PRIVATE_IRQS,
225 vcpu->arch.vgic_cpu.pending_shared);
226}
227
228static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
229{
230 if (irq < VGIC_NR_PRIVATE_IRQS)
231 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
232 else
233 clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
234 vcpu->arch.vgic_cpu.pending_shared);
235}
236
237static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
238{
239 return *((u32 *)mmio->data) & mask;
240}
241
242static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
243{
244 *((u32 *)mmio->data) = value & mask;
245}
246
247/**
248 * vgic_reg_access - access vgic register
249 * @mmio: pointer to the data describing the mmio access
250 * @reg: pointer to the virtual backing of vgic distributor data
251 * @offset: least significant 2 bits used for word offset
252 * @mode: ACCESS_ mode (see defines above)
253 *
254 * Helper to make vgic register access easier using one of the access
255 * modes defined for vgic register access
256 * (read,raz,write-ignored,setbit,clearbit,write)
257 */
258static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
259 phys_addr_t offset, int mode)
260{
261 int word_offset = (offset & 3) * 8;
262 u32 mask = (1UL << (mmio->len * 8)) - 1;
263 u32 regval;
264
265 /*
266 * Any alignment fault should have been delivered to the guest
267 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
268 */
269
270 if (reg) {
271 regval = *reg;
272 } else {
273 BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
274 regval = 0;
275 }
276
277 if (mmio->is_write) {
278 u32 data = mmio_data_read(mmio, mask) << word_offset;
279 switch (ACCESS_WRITE_MASK(mode)) {
280 case ACCESS_WRITE_IGNORED:
281 return;
282
283 case ACCESS_WRITE_SETBIT:
284 regval |= data;
285 break;
286
287 case ACCESS_WRITE_CLEARBIT:
288 regval &= ~data;
289 break;
290
291 case ACCESS_WRITE_VALUE:
292 regval = (regval & ~(mask << word_offset)) | data;
293 break;
294 }
295 *reg = regval;
296 } else {
297 switch (ACCESS_READ_MASK(mode)) {
298 case ACCESS_READ_RAZ:
299 regval = 0;
300 /* fall through */
301
302 case ACCESS_READ_VALUE:
303 mmio_data_write(mmio, mask, regval >> word_offset);
304 }
305 }
306}
307
308static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
309 struct kvm_exit_mmio *mmio, phys_addr_t offset)
310{
311 u32 reg;
312 u32 word_offset = offset & 3;
313
314 switch (offset & ~3) {
315 case 0: /* CTLR */
316 reg = vcpu->kvm->arch.vgic.enabled;
317 vgic_reg_access(mmio, &reg, word_offset,
318 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
319 if (mmio->is_write) {
320 vcpu->kvm->arch.vgic.enabled = reg & 1;
321 vgic_update_state(vcpu->kvm);
322 return true;
323 }
324 break;
325
326 case 4: /* TYPER */
327 reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
328 reg |= (VGIC_NR_IRQS >> 5) - 1;
329 vgic_reg_access(mmio, &reg, word_offset,
330 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
331 break;
332
333 case 8: /* IIDR */
334 reg = 0x4B00043B;
335 vgic_reg_access(mmio, &reg, word_offset,
336 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
337 break;
338 }
339
340 return false;
341}
342
343static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
344 struct kvm_exit_mmio *mmio, phys_addr_t offset)
345{
346 vgic_reg_access(mmio, NULL, offset,
347 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
348 return false;
349}
350
351static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
352 struct kvm_exit_mmio *mmio,
353 phys_addr_t offset)
354{
355 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
356 vcpu->vcpu_id, offset);
357 vgic_reg_access(mmio, reg, offset,
358 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
359 if (mmio->is_write) {
360 vgic_update_state(vcpu->kvm);
361 return true;
362 }
363
364 return false;
365}
366
367static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
368 struct kvm_exit_mmio *mmio,
369 phys_addr_t offset)
370{
371 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
372 vcpu->vcpu_id, offset);
373 vgic_reg_access(mmio, reg, offset,
374 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
375 if (mmio->is_write) {
376 if (offset < 4) /* Force SGI enabled */
377 *reg |= 0xffff;
378 vgic_retire_disabled_irqs(vcpu);
379 vgic_update_state(vcpu->kvm);
380 return true;
381 }
382
383 return false;
384}
385
386static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
387 struct kvm_exit_mmio *mmio,
388 phys_addr_t offset)
389{
390 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
391 vcpu->vcpu_id, offset);
392 vgic_reg_access(mmio, reg, offset,
393 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
394 if (mmio->is_write) {
395 vgic_update_state(vcpu->kvm);
396 return true;
397 }
398
399 return false;
400}
401
402static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
403 struct kvm_exit_mmio *mmio,
404 phys_addr_t offset)
405{
406 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
407 vcpu->vcpu_id, offset);
408 vgic_reg_access(mmio, reg, offset,
409 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
410 if (mmio->is_write) {
411 vgic_update_state(vcpu->kvm);
412 return true;
413 }
414
415 return false;
416}
417
418static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
419 struct kvm_exit_mmio *mmio,
420 phys_addr_t offset)
421{
422 u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
423 vcpu->vcpu_id, offset);
424 vgic_reg_access(mmio, reg, offset,
425 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
426 return false;
427}
428
429#define GICD_ITARGETSR_SIZE 32
430#define GICD_CPUTARGETS_BITS 8
431#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
432static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
433{
434 struct vgic_dist *dist = &kvm->arch.vgic;
435 struct kvm_vcpu *vcpu;
436 int i, c;
437 unsigned long *bmap;
438 u32 val = 0;
439
440 irq -= VGIC_NR_PRIVATE_IRQS;
441
442 kvm_for_each_vcpu(c, vcpu, kvm) {
443 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
444 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
445 if (test_bit(irq + i, bmap))
446 val |= 1 << (c + i * 8);
447 }
448
449 return val;
450}
451
452static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
453{
454 struct vgic_dist *dist = &kvm->arch.vgic;
455 struct kvm_vcpu *vcpu;
456 int i, c;
457 unsigned long *bmap;
458 u32 target;
459
460 irq -= VGIC_NR_PRIVATE_IRQS;
461
462 /*
463 * Pick the LSB in each byte. This ensures we target exactly
464 * one vcpu per IRQ. If the byte is null, assume we target
465 * CPU0.
466 */
467 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
468 int shift = i * GICD_CPUTARGETS_BITS;
469 target = ffs((val >> shift) & 0xffU);
470 target = target ? (target - 1) : 0;
471 dist->irq_spi_cpu[irq + i] = target;
472 kvm_for_each_vcpu(c, vcpu, kvm) {
473 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
474 if (c == target)
475 set_bit(irq + i, bmap);
476 else
477 clear_bit(irq + i, bmap);
478 }
479 }
480}
481
482static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
483 struct kvm_exit_mmio *mmio,
484 phys_addr_t offset)
485{
486 u32 reg;
487
488 /* We treat the banked interrupts targets as read-only */
489 if (offset < 32) {
490 u32 roreg = 1 << vcpu->vcpu_id;
491 roreg |= roreg << 8;
492 roreg |= roreg << 16;
493
494 vgic_reg_access(mmio, &roreg, offset,
495 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
496 return false;
497 }
498
499 reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
500 vgic_reg_access(mmio, &reg, offset,
501 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
502 if (mmio->is_write) {
503 vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
504 vgic_update_state(vcpu->kvm);
505 return true;
506 }
507
508 return false;
509}
510
511static u32 vgic_cfg_expand(u16 val)
512{
513 u32 res = 0;
514 int i;
515
516 /*
517 * Turn a 16bit value like abcd...mnop into a 32bit word
518 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
519 */
520 for (i = 0; i < 16; i++)
521 res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
522
523 return res;
524}
525
526static u16 vgic_cfg_compress(u32 val)
527{
528 u16 res = 0;
529 int i;
530
531 /*
532 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
533 * abcd...mnop which is what we really care about.
534 */
535 for (i = 0; i < 16; i++)
536 res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
537
538 return res;
539}
540
541/*
542 * The distributor uses 2 bits per IRQ for the CFG register, but the
543 * LSB is always 0. As such, we only keep the upper bit, and use the
544 * two above functions to compress/expand the bits
545 */
546static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
547 struct kvm_exit_mmio *mmio, phys_addr_t offset)
548{
549 u32 val;
550 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
551 vcpu->vcpu_id, offset >> 1);
552 if (offset & 2)
553 val = *reg >> 16;
554 else
555 val = *reg & 0xffff;
556
557 val = vgic_cfg_expand(val);
558 vgic_reg_access(mmio, &val, offset,
559 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
560 if (mmio->is_write) {
561 if (offset < 4) {
562 *reg = ~0U; /* Force PPIs/SGIs to 1 */
563 return false;
564 }
565
566 val = vgic_cfg_compress(val);
567 if (offset & 2) {
568 *reg &= 0xffff;
569 *reg |= val << 16;
570 } else {
571 *reg &= 0xffff << 16;
572 *reg |= val;
573 }
574 }
575
576 return false;
577}
578
579static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
580 struct kvm_exit_mmio *mmio, phys_addr_t offset)
581{
582 u32 reg;
583 vgic_reg_access(mmio, &reg, offset,
584 ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
585 if (mmio->is_write) {
586 vgic_dispatch_sgi(vcpu, reg);
587 vgic_update_state(vcpu->kvm);
588 return true;
589 }
590
591 return false;
592}
593
594/*
595 * I would have liked to use the kvm_bus_io_*() API instead, but it
596 * cannot cope with banked registers (only the VM pointer is passed
597 * around, and we need the vcpu). One of these days, someone please
598 * fix it!
599 */
600struct mmio_range {
601 phys_addr_t base;
602 unsigned long len;
603 bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
604 phys_addr_t offset);
605};
606
607static const struct mmio_range vgic_ranges[] = {
608 {
609 .base = GIC_DIST_CTRL,
610 .len = 12,
611 .handle_mmio = handle_mmio_misc,
612 },
613 {
614 .base = GIC_DIST_IGROUP,
615 .len = VGIC_NR_IRQS / 8,
616 .handle_mmio = handle_mmio_raz_wi,
617 },
618 {
619 .base = GIC_DIST_ENABLE_SET,
620 .len = VGIC_NR_IRQS / 8,
621 .handle_mmio = handle_mmio_set_enable_reg,
622 },
623 {
624 .base = GIC_DIST_ENABLE_CLEAR,
625 .len = VGIC_NR_IRQS / 8,
626 .handle_mmio = handle_mmio_clear_enable_reg,
627 },
628 {
629 .base = GIC_DIST_PENDING_SET,
630 .len = VGIC_NR_IRQS / 8,
631 .handle_mmio = handle_mmio_set_pending_reg,
632 },
633 {
634 .base = GIC_DIST_PENDING_CLEAR,
635 .len = VGIC_NR_IRQS / 8,
636 .handle_mmio = handle_mmio_clear_pending_reg,
637 },
638 {
639 .base = GIC_DIST_ACTIVE_SET,
640 .len = VGIC_NR_IRQS / 8,
641 .handle_mmio = handle_mmio_raz_wi,
642 },
643 {
644 .base = GIC_DIST_ACTIVE_CLEAR,
645 .len = VGIC_NR_IRQS / 8,
646 .handle_mmio = handle_mmio_raz_wi,
647 },
648 {
649 .base = GIC_DIST_PRI,
650 .len = VGIC_NR_IRQS,
651 .handle_mmio = handle_mmio_priority_reg,
652 },
653 {
654 .base = GIC_DIST_TARGET,
655 .len = VGIC_NR_IRQS,
656 .handle_mmio = handle_mmio_target_reg,
657 },
658 {
659 .base = GIC_DIST_CONFIG,
660 .len = VGIC_NR_IRQS / 4,
661 .handle_mmio = handle_mmio_cfg_reg,
662 },
663 {
664 .base = GIC_DIST_SOFTINT,
665 .len = 4,
666 .handle_mmio = handle_mmio_sgi_reg,
667 },
668 {}
669};
670
671static const
672struct mmio_range *find_matching_range(const struct mmio_range *ranges,
673 struct kvm_exit_mmio *mmio,
674 phys_addr_t base)
675{
676 const struct mmio_range *r = ranges;
677 phys_addr_t addr = mmio->phys_addr - base;
678
679 while (r->len) {
680 if (addr >= r->base &&
681 (addr + mmio->len) <= (r->base + r->len))
682 return r;
683 r++;
684 }
685
686 return NULL;
687}
688
689/**
690 * vgic_handle_mmio - handle an in-kernel MMIO access
691 * @vcpu: pointer to the vcpu performing the access
692 * @run: pointer to the kvm_run structure
693 * @mmio: pointer to the data describing the access
694 *
695 * returns true if the MMIO access has been performed in kernel space,
696 * and false if it needs to be emulated in user space.
697 */
698bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
699 struct kvm_exit_mmio *mmio)
700{
701 const struct mmio_range *range;
702 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
703 unsigned long base = dist->vgic_dist_base;
704 bool updated_state;
705 unsigned long offset;
706
707 if (!irqchip_in_kernel(vcpu->kvm) ||
708 mmio->phys_addr < base ||
709 (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
710 return false;
711
712 /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
713 if (mmio->len > 4) {
714 kvm_inject_dabt(vcpu, mmio->phys_addr);
715 return true;
716 }
717
718 range = find_matching_range(vgic_ranges, mmio, base);
719 if (unlikely(!range || !range->handle_mmio)) {
720 pr_warn("Unhandled access %d %08llx %d\n",
721 mmio->is_write, mmio->phys_addr, mmio->len);
722 return false;
723 }
724
725 spin_lock(&vcpu->kvm->arch.vgic.lock);
726 offset = mmio->phys_addr - range->base - base;
727 updated_state = range->handle_mmio(vcpu, mmio, offset);
728 spin_unlock(&vcpu->kvm->arch.vgic.lock);
729 kvm_prepare_mmio(run, mmio);
730 kvm_handle_mmio_return(vcpu, run);
731
732 if (updated_state)
733 vgic_kick_vcpus(vcpu->kvm);
734
735 return true;
736}
737
738static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
739{
740 struct kvm *kvm = vcpu->kvm;
741 struct vgic_dist *dist = &kvm->arch.vgic;
742 int nrcpus = atomic_read(&kvm->online_vcpus);
743 u8 target_cpus;
744 int sgi, mode, c, vcpu_id;
745
746 vcpu_id = vcpu->vcpu_id;
747
748 sgi = reg & 0xf;
749 target_cpus = (reg >> 16) & 0xff;
750 mode = (reg >> 24) & 3;
751
752 switch (mode) {
753 case 0:
754 if (!target_cpus)
755 return;
756
757 case 1:
758 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
759 break;
760
761 case 2:
762 target_cpus = 1 << vcpu_id;
763 break;
764 }
765
766 kvm_for_each_vcpu(c, vcpu, kvm) {
767 if (target_cpus & 1) {
768 /* Flag the SGI as pending */
769 vgic_dist_irq_set(vcpu, sgi);
770 dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id;
771 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
772 }
773
774 target_cpus >>= 1;
775 }
776}
777
778static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
779{
780 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
781 unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
782 unsigned long pending_private, pending_shared;
783 int vcpu_id;
784
785 vcpu_id = vcpu->vcpu_id;
786 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
787 pend_shared = vcpu->arch.vgic_cpu.pending_shared;
788
789 pending = vgic_bitmap_get_cpu_map(&dist->irq_state, vcpu_id);
790 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
791 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
792
793 pending = vgic_bitmap_get_shared_map(&dist->irq_state);
794 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
795 bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHARED_IRQS);
796 bitmap_and(pend_shared, pend_shared,
797 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
798 VGIC_NR_SHARED_IRQS);
799
800 pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
801 pending_shared = find_first_bit(pend_shared, VGIC_NR_SHARED_IRQS);
802 return (pending_private < VGIC_NR_PRIVATE_IRQS ||
803 pending_shared < VGIC_NR_SHARED_IRQS);
804}
805
806/*
807 * Update the interrupt state and determine which CPUs have pending
808 * interrupts. Must be called with distributor lock held.
809 */
810static void vgic_update_state(struct kvm *kvm)
811{
812 struct vgic_dist *dist = &kvm->arch.vgic;
813 struct kvm_vcpu *vcpu;
814 int c;
815
816 if (!dist->enabled) {
817 set_bit(0, &dist->irq_pending_on_cpu);
818 return;
819 }
820
821 kvm_for_each_vcpu(c, vcpu, kvm) {
822 if (compute_pending_for_cpu(vcpu)) {
823 pr_debug("CPU%d has pending interrupts\n", c);
824 set_bit(c, &dist->irq_pending_on_cpu);
825 }
826 }
827}
828
829#define LR_CPUID(lr) \
830 (((lr) & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT)
831#define MK_LR_PEND(src, irq) \
832 (GICH_LR_PENDING_BIT | ((src) << GICH_LR_PHYSID_CPUID_SHIFT) | (irq))
833
834/*
835 * An interrupt may have been disabled after being made pending on the
836 * CPU interface (the classic case is a timer running while we're
837 * rebooting the guest - the interrupt would kick as soon as the CPU
838 * interface gets enabled, with deadly consequences).
839 *
840 * The solution is to examine already active LRs, and check the
841 * interrupt is still enabled. If not, just retire it.
842 */
843static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
844{
845 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
846 int lr;
847
848 for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
849 int irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
850
851 if (!vgic_irq_is_enabled(vcpu, irq)) {
852 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
853 clear_bit(lr, vgic_cpu->lr_used);
854 vgic_cpu->vgic_lr[lr] &= ~GICH_LR_STATE;
855 if (vgic_irq_is_active(vcpu, irq))
856 vgic_irq_clear_active(vcpu, irq);
857 }
858 }
859}
860
861/*
862 * Queue an interrupt to a CPU virtual interface. Return true on success,
863 * or false if it wasn't possible to queue it.
864 */
865static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
866{
867 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
868 int lr;
869
870 /* Sanitize the input... */
871 BUG_ON(sgi_source_id & ~7);
872 BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
873 BUG_ON(irq >= VGIC_NR_IRQS);
874
875 kvm_debug("Queue IRQ%d\n", irq);
876
877 lr = vgic_cpu->vgic_irq_lr_map[irq];
878
879 /* Do we have an active interrupt for the same CPUID? */
880 if (lr != LR_EMPTY &&
881 (LR_CPUID(vgic_cpu->vgic_lr[lr]) == sgi_source_id)) {
882 kvm_debug("LR%d piggyback for IRQ%d %x\n",
883 lr, irq, vgic_cpu->vgic_lr[lr]);
884 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
885 vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
886
887 goto out;
888 }
889
890 /* Try to use another LR for this interrupt */
891 lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
892 vgic_cpu->nr_lr);
893 if (lr >= vgic_cpu->nr_lr)
894 return false;
895
896 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
897 vgic_cpu->vgic_lr[lr] = MK_LR_PEND(sgi_source_id, irq);
898 vgic_cpu->vgic_irq_lr_map[irq] = lr;
899 set_bit(lr, vgic_cpu->lr_used);
900
901out:
902 if (!vgic_irq_is_edge(vcpu, irq))
903 vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
904
905 return true;
906}
907
908static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
909{
910 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
911 unsigned long sources;
912 int vcpu_id = vcpu->vcpu_id;
913 int c;
914
915 sources = dist->irq_sgi_sources[vcpu_id][irq];
916
917 for_each_set_bit(c, &sources, VGIC_MAX_CPUS) {
918 if (vgic_queue_irq(vcpu, c, irq))
919 clear_bit(c, &sources);
920 }
921
922 dist->irq_sgi_sources[vcpu_id][irq] = sources;
923
924 /*
925 * If the sources bitmap has been cleared it means that we
926 * could queue all the SGIs onto link registers (see the
927 * clear_bit above), and therefore we are done with them in
928 * our emulated gic and can get rid of them.
929 */
930 if (!sources) {
931 vgic_dist_irq_clear(vcpu, irq);
932 vgic_cpu_irq_clear(vcpu, irq);
933 return true;
934 }
935
936 return false;
937}
938
939static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
940{
941 if (vgic_irq_is_active(vcpu, irq))
942 return true; /* level interrupt, already queued */
943
944 if (vgic_queue_irq(vcpu, 0, irq)) {
945 if (vgic_irq_is_edge(vcpu, irq)) {
946 vgic_dist_irq_clear(vcpu, irq);
947 vgic_cpu_irq_clear(vcpu, irq);
948 } else {
949 vgic_irq_set_active(vcpu, irq);
950 }
951
952 return true;
953 }
954
955 return false;
956}
957
958/*
959 * Fill the list registers with pending interrupts before running the
960 * guest.
961 */
962static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
963{
964 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
965 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
966 int i, vcpu_id;
967 int overflow = 0;
968
969 vcpu_id = vcpu->vcpu_id;
970
971 /*
972 * We may not have any pending interrupt, or the interrupts
973 * may have been serviced from another vcpu. In all cases,
974 * move along.
975 */
976 if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
977 pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
978 goto epilog;
979 }
980
981 /* SGIs */
982 for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
983 if (!vgic_queue_sgi(vcpu, i))
984 overflow = 1;
985 }
986
987 /* PPIs */
988 for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
989 if (!vgic_queue_hwirq(vcpu, i))
990 overflow = 1;
991 }
992
993 /* SPIs */
994 for_each_set_bit(i, vgic_cpu->pending_shared, VGIC_NR_SHARED_IRQS) {
995 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
996 overflow = 1;
997 }
998
999epilog:
1000 if (overflow) {
1001 vgic_cpu->vgic_hcr |= GICH_HCR_UIE;
1002 } else {
1003 vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
1004 /*
1005 * We're about to run this VCPU, and we've consumed
1006 * everything the distributor had in store for
1007 * us. Claim we don't have anything pending. We'll
1008 * adjust that if needed while exiting.
1009 */
1010 clear_bit(vcpu_id, &dist->irq_pending_on_cpu);
1011 }
1012}
1013
1014static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1015{
1016 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1017 bool level_pending = false;
1018
1019 kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr);
1020
1021 /*
1022 * We do not need to take the distributor lock here, since the only
1023 * action we perform is clearing the irq_active_bit for an EOIed
1024 * level interrupt. There is a potential race with
1025 * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we
1026 * check if the interrupt is already active. Two possibilities:
1027 *
1028 * - The queuing is occurring on the same vcpu: cannot happen,
1029 * as we're already in the context of this vcpu, and
1030 * executing the handler
1031 * - The interrupt has been migrated to another vcpu, and we
1032 * ignore this interrupt for this run. Big deal. It is still
1033 * pending though, and will get considered when this vcpu
1034 * exits.
1035 */
1036 if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {
1037 /*
1038 * Some level interrupts have been EOIed. Clear their
1039 * active bit.
1040 */
1041 int lr, irq;
1042
1043 for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_eisr,
1044 vgic_cpu->nr_lr) {
1045 irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
1046
1047 vgic_irq_clear_active(vcpu, irq);
1048 vgic_cpu->vgic_lr[lr] &= ~GICH_LR_EOI;
1049
1050 /* Any additional pending interrupt? */
1051 if (vgic_dist_irq_is_pending(vcpu, irq)) {
1052 vgic_cpu_irq_set(vcpu, irq);
1053 level_pending = true;
1054 } else {
1055 vgic_cpu_irq_clear(vcpu, irq);
1056 }
1057 }
1058 }
1059
1060 if (vgic_cpu->vgic_misr & GICH_MISR_U)
1061 vgic_cpu->vgic_hcr &= ~GICH_HCR_UIE;
1062
1063 return level_pending;
1064}
1065
1066/*
1067 * Sync back the VGIC state after a guest run. We do not really touch
1068 * the distributor here (the irq_pending_on_cpu bit is safe to set),
1069 * so there is no need for taking its lock.
1070 */
1071static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1072{
1073 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1074 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1075 int lr, pending;
1076 bool level_pending;
1077
1078 level_pending = vgic_process_maintenance(vcpu);
1079
1080 /* Clear mappings for empty LRs */
1081 for_each_set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr,
1082 vgic_cpu->nr_lr) {
1083 int irq;
1084
1085 if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
1086 continue;
1087
1088 irq = vgic_cpu->vgic_lr[lr] & GICH_LR_VIRTUALID;
1089
1090 BUG_ON(irq >= VGIC_NR_IRQS);
1091 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
1092 }
1093
1094 /* Check if we still have something up our sleeve... */
1095 pending = find_first_zero_bit((unsigned long *)vgic_cpu->vgic_elrsr,
1096 vgic_cpu->nr_lr);
1097 if (level_pending || pending < vgic_cpu->nr_lr)
1098 set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1099}
1100
1101void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1102{
1103 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1104
1105 if (!irqchip_in_kernel(vcpu->kvm))
1106 return;
1107
1108 spin_lock(&dist->lock);
1109 __kvm_vgic_flush_hwstate(vcpu);
1110 spin_unlock(&dist->lock);
1111}
1112
1113void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1114{
1115 if (!irqchip_in_kernel(vcpu->kvm))
1116 return;
1117
1118 __kvm_vgic_sync_hwstate(vcpu);
1119}
1120
1121int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1122{
1123 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1124
1125 if (!irqchip_in_kernel(vcpu->kvm))
1126 return 0;
1127
1128 return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1129}
1130
1131static void vgic_kick_vcpus(struct kvm *kvm)
1132{
1133 struct kvm_vcpu *vcpu;
1134 int c;
1135
1136 /*
1137 * We've injected an interrupt, time to find out who deserves
1138 * a good kick...
1139 */
1140 kvm_for_each_vcpu(c, vcpu, kvm) {
1141 if (kvm_vgic_vcpu_pending_irq(vcpu))
1142 kvm_vcpu_kick(vcpu);
1143 }
1144}
1145
1146static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1147{
1148 int is_edge = vgic_irq_is_edge(vcpu, irq);
1149 int state = vgic_dist_irq_is_pending(vcpu, irq);
1150
1151 /*
1152 * Only inject an interrupt if:
1153 * - edge triggered and we have a rising edge
1154 * - level triggered and we change level
1155 */
1156 if (is_edge)
1157 return level > state;
1158 else
1159 return level != state;
1160}
1161
1162static bool vgic_update_irq_state(struct kvm *kvm, int cpuid,
1163 unsigned int irq_num, bool level)
1164{
1165 struct vgic_dist *dist = &kvm->arch.vgic;
1166 struct kvm_vcpu *vcpu;
1167 int is_edge, is_level;
1168 int enabled;
1169 bool ret = true;
1170
1171 spin_lock(&dist->lock);
1172
1173 vcpu = kvm_get_vcpu(kvm, cpuid);
1174 is_edge = vgic_irq_is_edge(vcpu, irq_num);
1175 is_level = !is_edge;
1176
1177 if (!vgic_validate_injection(vcpu, irq_num, level)) {
1178 ret = false;
1179 goto out;
1180 }
1181
1182 if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
1183 cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
1184 vcpu = kvm_get_vcpu(kvm, cpuid);
1185 }
1186
1187 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
1188
1189 if (level)
1190 vgic_dist_irq_set(vcpu, irq_num);
1191 else
1192 vgic_dist_irq_clear(vcpu, irq_num);
1193
1194 enabled = vgic_irq_is_enabled(vcpu, irq_num);
1195
1196 if (!enabled) {
1197 ret = false;
1198 goto out;
1199 }
1200
1201 if (is_level && vgic_irq_is_active(vcpu, irq_num)) {
1202 /*
1203 * Level interrupt in progress, will be picked up
1204 * when EOId.
1205 */
1206 ret = false;
1207 goto out;
1208 }
1209
1210 if (level) {
1211 vgic_cpu_irq_set(vcpu, irq_num);
1212 set_bit(cpuid, &dist->irq_pending_on_cpu);
1213 }
1214
1215out:
1216 spin_unlock(&dist->lock);
1217
1218 return ret;
1219}
1220
1221/**
1222 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1223 * @kvm: The VM structure pointer
1224 * @cpuid: The CPU for PPIs
1225 * @irq_num: The IRQ number that is assigned to the device
1226 * @level: Edge-triggered: true: to trigger the interrupt
1227 * false: to ignore the call
1228 * Level-sensitive true: activates an interrupt
1229 * false: deactivates an interrupt
1230 *
1231 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1232 * level-sensitive interrupts. You can think of the level parameter as 1
1233 * being HIGH and 0 being LOW and all devices being active-HIGH.
1234 */
1235int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1236 bool level)
1237{
1238 if (vgic_update_irq_state(kvm, cpuid, irq_num, level))
1239 vgic_kick_vcpus(kvm);
1240
1241 return 0;
1242}
1243
1244static irqreturn_t vgic_maintenance_handler(int irq, void *data)
1245{
1246 /*
1247 * We cannot rely on the vgic maintenance interrupt to be
1248 * delivered synchronously. This means we can only use it to
1249 * exit the VM, and we perform the handling of EOIed
1250 * interrupts on the exit path (see vgic_process_maintenance).
1251 */
1252 return IRQ_HANDLED;
1253}
1254
1255int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
1256{
1257 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1258 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1259 int i;
1260
1261 if (!irqchip_in_kernel(vcpu->kvm))
1262 return 0;
1263
1264 if (vcpu->vcpu_id >= VGIC_MAX_CPUS)
1265 return -EBUSY;
1266
1267 for (i = 0; i < VGIC_NR_IRQS; i++) {
1268 if (i < VGIC_NR_PPIS)
1269 vgic_bitmap_set_irq_val(&dist->irq_enabled,
1270 vcpu->vcpu_id, i, 1);
1271 if (i < VGIC_NR_PRIVATE_IRQS)
1272 vgic_bitmap_set_irq_val(&dist->irq_cfg,
1273 vcpu->vcpu_id, i, VGIC_CFG_EDGE);
1274
1275 vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
1276 }
1277
1278 /*
1279 * By forcing VMCR to zero, the GIC will restore the binary
1280 * points to their reset values. Anything else resets to zero
1281 * anyway.
1282 */
1283 vgic_cpu->vgic_vmcr = 0;
1284
1285 vgic_cpu->nr_lr = vgic_nr_lr;
1286 vgic_cpu->vgic_hcr = GICH_HCR_EN; /* Get the show on the road... */
1287
1288 return 0;
1289}
1290
1291static void vgic_init_maintenance_interrupt(void *info)
1292{
1293 enable_percpu_irq(vgic_maint_irq, 0);
1294}
1295
1296static int vgic_cpu_notify(struct notifier_block *self,
1297 unsigned long action, void *cpu)
1298{
1299 switch (action) {
1300 case CPU_STARTING:
1301 case CPU_STARTING_FROZEN:
1302 vgic_init_maintenance_interrupt(NULL);
1303 break;
1304 case CPU_DYING:
1305 case CPU_DYING_FROZEN:
1306 disable_percpu_irq(vgic_maint_irq);
1307 break;
1308 }
1309
1310 return NOTIFY_OK;
1311}
1312
1313static struct notifier_block vgic_cpu_nb = {
1314 .notifier_call = vgic_cpu_notify,
1315};
1316
1317int kvm_vgic_hyp_init(void)
1318{
1319 int ret;
1320 struct resource vctrl_res;
1321 struct resource vcpu_res;
1322
1323 vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
1324 if (!vgic_node) {
1325 kvm_err("error: no compatible vgic node in DT\n");
1326 return -ENODEV;
1327 }
1328
1329 vgic_maint_irq = irq_of_parse_and_map(vgic_node, 0);
1330 if (!vgic_maint_irq) {
1331 kvm_err("error getting vgic maintenance irq from DT\n");
1332 ret = -ENXIO;
1333 goto out;
1334 }
1335
1336 ret = request_percpu_irq(vgic_maint_irq, vgic_maintenance_handler,
1337 "vgic", kvm_get_running_vcpus());
1338 if (ret) {
1339 kvm_err("Cannot register interrupt %d\n", vgic_maint_irq);
1340 goto out;
1341 }
1342
1343 ret = register_cpu_notifier(&vgic_cpu_nb);
1344 if (ret) {
1345 kvm_err("Cannot register vgic CPU notifier\n");
1346 goto out_free_irq;
1347 }
1348
1349 ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
1350 if (ret) {
1351 kvm_err("Cannot obtain VCTRL resource\n");
1352 goto out_free_irq;
1353 }
1354
1355 vgic_vctrl_base = of_iomap(vgic_node, 2);
1356 if (!vgic_vctrl_base) {
1357 kvm_err("Cannot ioremap VCTRL\n");
1358 ret = -ENOMEM;
1359 goto out_free_irq;
1360 }
1361
1362 vgic_nr_lr = readl_relaxed(vgic_vctrl_base + GICH_VTR);
1363 vgic_nr_lr = (vgic_nr_lr & 0x3f) + 1;
1364
1365 ret = create_hyp_io_mappings(vgic_vctrl_base,
1366 vgic_vctrl_base + resource_size(&vctrl_res),
1367 vctrl_res.start);
1368 if (ret) {
1369 kvm_err("Cannot map VCTRL into hyp\n");
1370 goto out_unmap;
1371 }
1372
1373 kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
1374 vctrl_res.start, vgic_maint_irq);
1375 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
1376
1377 if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
1378 kvm_err("Cannot obtain VCPU resource\n");
1379 ret = -ENXIO;
1380 goto out_unmap;
1381 }
1382 vgic_vcpu_base = vcpu_res.start;
1383
1384 goto out;
1385
1386out_unmap:
1387 iounmap(vgic_vctrl_base);
1388out_free_irq:
1389 free_percpu_irq(vgic_maint_irq, kvm_get_running_vcpus());
1390out:
1391 of_node_put(vgic_node);
1392 return ret;
1393}
1394
1395int kvm_vgic_init(struct kvm *kvm)
1396{
1397 int ret = 0, i;
1398
1399 mutex_lock(&kvm->lock);
1400
1401 if (vgic_initialized(kvm))
1402 goto out;
1403
1404 if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
1405 IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
1406 kvm_err("Need to set vgic cpu and dist addresses first\n");
1407 ret = -ENXIO;
1408 goto out;
1409 }
1410
1411 ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
1412 vgic_vcpu_base, KVM_VGIC_V2_CPU_SIZE);
1413 if (ret) {
1414 kvm_err("Unable to remap VGIC CPU to VCPU\n");
1415 goto out;
1416 }
1417
1418 for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4)
1419 vgic_set_target_reg(kvm, 0, i);
1420
1421 kvm_timer_init(kvm);
1422 kvm->arch.vgic.ready = true;
1423out:
1424 mutex_unlock(&kvm->lock);
1425 return ret;
1426}
1427
1428int kvm_vgic_create(struct kvm *kvm)
1429{
1430 int ret = 0;
1431
1432 mutex_lock(&kvm->lock);
1433
1434 if (atomic_read(&kvm->online_vcpus) || kvm->arch.vgic.vctrl_base) {
1435 ret = -EEXIST;
1436 goto out;
1437 }
1438
1439 spin_lock_init(&kvm->arch.vgic.lock);
1440 kvm->arch.vgic.vctrl_base = vgic_vctrl_base;
1441 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
1442 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
1443
1444out:
1445 mutex_unlock(&kvm->lock);
1446 return ret;
1447}
1448
1449static bool vgic_ioaddr_overlap(struct kvm *kvm)
1450{
1451 phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
1452 phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
1453
1454 if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
1455 return 0;
1456 if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
1457 (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
1458 return -EBUSY;
1459 return 0;
1460}
1461
1462static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
1463 phys_addr_t addr, phys_addr_t size)
1464{
1465 int ret;
1466
1467 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
1468 return -EEXIST;
1469 if (addr + size < addr)
1470 return -EINVAL;
1471
1472 ret = vgic_ioaddr_overlap(kvm);
1473 if (ret)
1474 return ret;
1475 *ioaddr = addr;
1476 return ret;
1477}
1478
1479int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
1480{
1481 int r = 0;
1482 struct vgic_dist *vgic = &kvm->arch.vgic;
1483
1484 if (addr & ~KVM_PHYS_MASK)
1485 return -E2BIG;
1486
1487 if (addr & ~PAGE_MASK)
1488 return -EINVAL;
1489
1490 mutex_lock(&kvm->lock);
1491 switch (type) {
1492 case KVM_VGIC_V2_ADDR_TYPE_DIST:
1493 r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
1494 addr, KVM_VGIC_V2_DIST_SIZE);
1495 break;
1496 case KVM_VGIC_V2_ADDR_TYPE_CPU:
1497 r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
1498 addr, KVM_VGIC_V2_CPU_SIZE);
1499 break;
1500 default:
1501 r = -ENODEV;
1502 }
1503
1504 mutex_unlock(&kvm->lock);
1505 return r;
1506}
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 22ad24e9496b..49ac3dfebef9 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -99,12 +99,12 @@ config ARCH_OMAP4
99 99
100config SOC_OMAP5 100config SOC_OMAP5
101 bool "TI OMAP5" 101 bool "TI OMAP5"
102 select ARM_ARCH_TIMER
103 select ARM_CPU_SUSPEND if PM 102 select ARM_CPU_SUSPEND if PM
104 select ARM_GIC 103 select ARM_GIC
105 select CPU_V7 104 select CPU_V7
106 select HAVE_SMP 105 select HAVE_SMP
107 select COMMON_CLK 106 select COMMON_CLK
107 select HAVE_ARM_ARCH_TIMER
108 108
109comment "OMAP Core Type" 109comment "OMAP Core Type"
110 depends on ARCH_OMAP2 110 depends on ARCH_OMAP2
diff --git a/arch/arm/mach-virt/Kconfig b/arch/arm/mach-virt/Kconfig
new file mode 100644
index 000000000000..8958f0d896bc
--- /dev/null
+++ b/arch/arm/mach-virt/Kconfig
@@ -0,0 +1,10 @@
1config ARCH_VIRT
2 bool "Dummy Virtual Machine" if ARCH_MULTI_V7
3 select ARCH_WANT_OPTIONAL_GPIOLIB
4 select ARM_GIC
5 select ARM_ARCH_TIMER
6 select ARM_PSCI
7 select HAVE_SMP
8 select CPU_V7
9 select SPARSE_IRQ
10 select USE_OF
diff --git a/arch/arm/mach-virt/Makefile b/arch/arm/mach-virt/Makefile
new file mode 100644
index 000000000000..042afc1f8c44
--- /dev/null
+++ b/arch/arm/mach-virt/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for the linux kernel.
3#
4
5obj-y := virt.o
6obj-$(CONFIG_SMP) += platsmp.o
diff --git a/arch/arm/mach-virt/platsmp.c b/arch/arm/mach-virt/platsmp.c
new file mode 100644
index 000000000000..8badaabe70a1
--- /dev/null
+++ b/arch/arm/mach-virt/platsmp.c
@@ -0,0 +1,58 @@
1/*
2 * Dummy Virtual Machine - does what it says on the tin.
3 *
4 * Copyright (C) 2012 ARM Ltd
5 * Author: Will Deacon <will.deacon@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/init.h>
21#include <linux/smp.h>
22#include <linux/of.h>
23
24#include <linux/irqchip/arm-gic.h>
25
26#include <asm/psci.h>
27#include <asm/smp_plat.h>
28
29extern void secondary_startup(void);
30
31static void __init virt_smp_init_cpus(void)
32{
33}
34
35static void __init virt_smp_prepare_cpus(unsigned int max_cpus)
36{
37}
38
39static int __cpuinit virt_boot_secondary(unsigned int cpu,
40 struct task_struct *idle)
41{
42 if (psci_ops.cpu_on)
43 return psci_ops.cpu_on(cpu_logical_map(cpu),
44 __pa(secondary_startup));
45 return -ENODEV;
46}
47
48static void __cpuinit virt_secondary_init(unsigned int cpu)
49{
50 gic_secondary_init(0);
51}
52
53struct smp_operations __initdata virt_smp_ops = {
54 .smp_init_cpus = virt_smp_init_cpus,
55 .smp_prepare_cpus = virt_smp_prepare_cpus,
56 .smp_secondary_init = virt_secondary_init,
57 .smp_boot_secondary = virt_boot_secondary,
58};
diff --git a/arch/arm/mach-virt/virt.c b/arch/arm/mach-virt/virt.c
new file mode 100644
index 000000000000..31666f6b4373
--- /dev/null
+++ b/arch/arm/mach-virt/virt.c
@@ -0,0 +1,54 @@
1/*
2 * Dummy Virtual Machine - does what it says on the tin.
3 *
4 * Copyright (C) 2012 ARM Ltd
5 * Authors: Will Deacon <will.deacon@arm.com>,
6 * Marc Zyngier <marc.zyngier@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/irqchip.h>
22#include <linux/of_irq.h>
23#include <linux/of_platform.h>
24#include <linux/smp.h>
25
26#include <asm/arch_timer.h>
27#include <asm/mach/arch.h>
28#include <asm/mach/time.h>
29
30static void __init virt_init(void)
31{
32 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
33}
34
35static void __init virt_timer_init(void)
36{
37 WARN_ON(arch_timer_of_register() != 0);
38 WARN_ON(arch_timer_sched_clock_init() != 0);
39}
40
41static const char *virt_dt_match[] = {
42 "linux,dummy-virt",
43 NULL
44};
45
46extern struct smp_operations virt_smp_ops;
47
48DT_MACHINE_START(VIRT, "Dummy Virtual Machine")
49 .init_irq = irqchip_init,
50 .init_time = virt_timer_init,
51 .init_machine = virt_init,
52 .smp = smp_ops(virt_smp_ops),
53 .dt_compat = virt_dt_match,
54MACHINE_END
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7c43569e3141..ab4aa54b36ef 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -4,6 +4,7 @@ config ARM64
4 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION 4 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
5 select ARCH_WANT_FRAME_POINTERS 5 select ARCH_WANT_FRAME_POINTERS
6 select ARM_AMBA 6 select ARM_AMBA
7 select ARM_ARCH_TIMER
7 select CLONE_BACKWARDS 8 select CLONE_BACKWARDS
8 select COMMON_CLK 9 select COMMON_CLK
9 select GENERIC_CLOCKEVENTS 10 select GENERIC_CLOCKEVENTS
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
new file mode 100644
index 000000000000..91e2a6a6fcd4
--- /dev/null
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -0,0 +1,133 @@
1/*
2 * arch/arm64/include/asm/arch_timer.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 *
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_ARCH_TIMER_H
20#define __ASM_ARCH_TIMER_H
21
22#include <asm/barrier.h>
23
24#include <linux/init.h>
25#include <linux/types.h>
26
27#include <clocksource/arm_arch_timer.h>
28
29static inline void arch_timer_reg_write(int access, int reg, u32 val)
30{
31 if (access == ARCH_TIMER_PHYS_ACCESS) {
32 switch (reg) {
33 case ARCH_TIMER_REG_CTRL:
34 asm volatile("msr cntp_ctl_el0, %0" : : "r" (val));
35 break;
36 case ARCH_TIMER_REG_TVAL:
37 asm volatile("msr cntp_tval_el0, %0" : : "r" (val));
38 break;
39 default:
40 BUILD_BUG();
41 }
42 } else if (access == ARCH_TIMER_VIRT_ACCESS) {
43 switch (reg) {
44 case ARCH_TIMER_REG_CTRL:
45 asm volatile("msr cntv_ctl_el0, %0" : : "r" (val));
46 break;
47 case ARCH_TIMER_REG_TVAL:
48 asm volatile("msr cntv_tval_el0, %0" : : "r" (val));
49 break;
50 default:
51 BUILD_BUG();
52 }
53 } else {
54 BUILD_BUG();
55 }
56
57 isb();
58}
59
60static inline u32 arch_timer_reg_read(int access, int reg)
61{
62 u32 val;
63
64 if (access == ARCH_TIMER_PHYS_ACCESS) {
65 switch (reg) {
66 case ARCH_TIMER_REG_CTRL:
67 asm volatile("mrs %0, cntp_ctl_el0" : "=r" (val));
68 break;
69 case ARCH_TIMER_REG_TVAL:
70 asm volatile("mrs %0, cntp_tval_el0" : "=r" (val));
71 break;
72 default:
73 BUILD_BUG();
74 }
75 } else if (access == ARCH_TIMER_VIRT_ACCESS) {
76 switch (reg) {
77 case ARCH_TIMER_REG_CTRL:
78 asm volatile("mrs %0, cntv_ctl_el0" : "=r" (val));
79 break;
80 case ARCH_TIMER_REG_TVAL:
81 asm volatile("mrs %0, cntv_tval_el0" : "=r" (val));
82 break;
83 default:
84 BUILD_BUG();
85 }
86 } else {
87 BUILD_BUG();
88 }
89
90 return val;
91}
92
93static inline u32 arch_timer_get_cntfrq(void)
94{
95 u32 val;
96 asm volatile("mrs %0, cntfrq_el0" : "=r" (val));
97 return val;
98}
99
100static inline void __cpuinit arch_counter_set_user_access(void)
101{
102 u32 cntkctl;
103
104 /* Disable user access to the timers and the physical counter. */
105 asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl));
106 cntkctl &= ~((3 << 8) | (1 << 0));
107
108 /* Enable user access to the virtual counter and frequency. */
109 cntkctl |= (1 << 1);
110 asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
111}
112
113static inline u64 arch_counter_get_cntpct(void)
114{
115 u64 cval;
116
117 isb();
118 asm volatile("mrs %0, cntpct_el0" : "=r" (cval));
119
120 return cval;
121}
122
123static inline u64 arch_counter_get_cntvct(void)
124{
125 u64 cval;
126
127 isb();
128 asm volatile("mrs %0, cntvct_el0" : "=r" (cval));
129
130 return cval;
131}
132
133#endif
diff --git a/arch/arm64/include/asm/arm_generic.h b/arch/arm64/include/asm/arm_generic.h
deleted file mode 100644
index df2aeb82f74e..000000000000
--- a/arch/arm64/include/asm/arm_generic.h
+++ /dev/null
@@ -1,100 +0,0 @@
1/*
2 * arch/arm64/include/asm/arm_generic.h
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 *
7 * This program is free software: you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASM_ARM_GENERIC_H
20#define __ASM_ARM_GENERIC_H
21
22#include <linux/clocksource.h>
23
24#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
25#define ARCH_TIMER_CTRL_IMASK (1 << 1)
26#define ARCH_TIMER_CTRL_ISTATUS (1 << 2)
27
28#define ARCH_TIMER_REG_CTRL 0
29#define ARCH_TIMER_REG_FREQ 1
30#define ARCH_TIMER_REG_TVAL 2
31
32static inline void arch_timer_reg_write(int reg, u32 val)
33{
34 switch (reg) {
35 case ARCH_TIMER_REG_CTRL:
36 asm volatile("msr cntp_ctl_el0, %0" : : "r" (val));
37 break;
38 case ARCH_TIMER_REG_TVAL:
39 asm volatile("msr cntp_tval_el0, %0" : : "r" (val));
40 break;
41 default:
42 BUILD_BUG();
43 }
44
45 isb();
46}
47
48static inline u32 arch_timer_reg_read(int reg)
49{
50 u32 val;
51
52 switch (reg) {
53 case ARCH_TIMER_REG_CTRL:
54 asm volatile("mrs %0, cntp_ctl_el0" : "=r" (val));
55 break;
56 case ARCH_TIMER_REG_FREQ:
57 asm volatile("mrs %0, cntfrq_el0" : "=r" (val));
58 break;
59 case ARCH_TIMER_REG_TVAL:
60 asm volatile("mrs %0, cntp_tval_el0" : "=r" (val));
61 break;
62 default:
63 BUILD_BUG();
64 }
65
66 return val;
67}
68
69static inline void __cpuinit arch_counter_enable_user_access(void)
70{
71 u32 cntkctl;
72
73 /* Disable user access to the timers and the physical counter. */
74 asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl));
75 cntkctl &= ~((3 << 8) | (1 << 0));
76
77 /* Enable user access to the virtual counter and frequency. */
78 cntkctl |= (1 << 1);
79 asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
80}
81
82static inline cycle_t arch_counter_get_cntpct(void)
83{
84 cycle_t cval;
85
86 asm volatile("mrs %0, cntpct_el0" : "=r" (cval));
87
88 return cval;
89}
90
91static inline cycle_t arch_counter_get_cntvct(void)
92{
93 cycle_t cval;
94
95 asm volatile("mrs %0, cntvct_el0" : "=r" (cval));
96
97 return cval;
98}
99
100#endif
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 3b4b7258f492..b0ef18d14c3b 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -31,8 +31,9 @@
31#include <linux/syscore_ops.h> 31#include <linux/syscore_ops.h>
32#include <linux/timer.h> 32#include <linux/timer.h>
33#include <linux/irq.h> 33#include <linux/irq.h>
34#include <linux/delay.h>
34 35
35#include <clocksource/arm_generic.h> 36#include <clocksource/arm_arch_timer.h>
36 37
37#include <asm/thread_info.h> 38#include <asm/thread_info.h>
38#include <asm/stacktrace.h> 39#include <asm/stacktrace.h>
@@ -59,7 +60,31 @@ unsigned long profile_pc(struct pt_regs *regs)
59EXPORT_SYMBOL(profile_pc); 60EXPORT_SYMBOL(profile_pc);
60#endif 61#endif
61 62
63static u64 sched_clock_mult __read_mostly;
64
65unsigned long long notrace sched_clock(void)
66{
67 return arch_timer_read_counter() * sched_clock_mult;
68}
69
70int read_current_timer(unsigned long *timer_value)
71{
72 *timer_value = arch_timer_read_counter();
73 return 0;
74}
75
62void __init time_init(void) 76void __init time_init(void)
63{ 77{
64 arm_generic_timer_init(); 78 u32 arch_timer_rate;
79
80 if (arch_timer_init())
81 panic("Unable to initialise architected timer.\n");
82
83 arch_timer_rate = arch_timer_get_rate();
84
85 /* Cache the sched_clock multiplier to save a divide in the hot path. */
86 sched_clock_mult = NSEC_PER_SEC / arch_timer_rate;
87
88 /* Calibrate the delay loop directly */
89 lpj_fine = arch_timer_rate / HZ;
65} 90}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 7d978c1bd528..e920cbe519fa 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -60,7 +60,5 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
60 help 60 help
61 Use the always on PRCMU Timer as sched_clock 61 Use the always on PRCMU Timer as sched_clock
62 62
63config CLKSRC_ARM_GENERIC 63config ARM_ARCH_TIMER
64 def_bool y if ARM64 64 bool
65 help
66 This option enables support for the ARM generic timer.
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 596c45c2f192..7d671b85a98e 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -20,4 +20,4 @@ obj-$(CONFIG_SUNXI_TIMER) += sunxi_timer.o
20obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o 20obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o
21obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o 21obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
22 22
23obj-$(CONFIG_CLKSRC_ARM_GENERIC) += arm_generic.o 23obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
new file mode 100644
index 000000000000..d7ad425ab9b3
--- /dev/null
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -0,0 +1,391 @@
1/*
2 * linux/drivers/clocksource/arm_arch_timer.c
3 *
4 * Copyright (C) 2011 ARM Ltd.
5 * All Rights Reserved
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/device.h>
14#include <linux/smp.h>
15#include <linux/cpu.h>
16#include <linux/clockchips.h>
17#include <linux/interrupt.h>
18#include <linux/of_irq.h>
19#include <linux/io.h>
20
21#include <asm/arch_timer.h>
22#include <asm/virt.h>
23
24#include <clocksource/arm_arch_timer.h>
25
26static u32 arch_timer_rate;
27
28enum ppi_nr {
29 PHYS_SECURE_PPI,
30 PHYS_NONSECURE_PPI,
31 VIRT_PPI,
32 HYP_PPI,
33 MAX_TIMER_PPI
34};
35
36static int arch_timer_ppi[MAX_TIMER_PPI];
37
38static struct clock_event_device __percpu *arch_timer_evt;
39
40static bool arch_timer_use_virtual = true;
41
42/*
43 * Architected system timer support.
44 */
45
46static inline irqreturn_t timer_handler(const int access,
47 struct clock_event_device *evt)
48{
49 unsigned long ctrl;
50 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
51 if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
52 ctrl |= ARCH_TIMER_CTRL_IT_MASK;
53 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
54 evt->event_handler(evt);
55 return IRQ_HANDLED;
56 }
57
58 return IRQ_NONE;
59}
60
61static irqreturn_t arch_timer_handler_virt(int irq, void *dev_id)
62{
63 struct clock_event_device *evt = dev_id;
64
65 return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
66}
67
68static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
69{
70 struct clock_event_device *evt = dev_id;
71
72 return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
73}
74
75static inline void timer_set_mode(const int access, int mode)
76{
77 unsigned long ctrl;
78 switch (mode) {
79 case CLOCK_EVT_MODE_UNUSED:
80 case CLOCK_EVT_MODE_SHUTDOWN:
81 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
82 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
83 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
84 break;
85 default:
86 break;
87 }
88}
89
90static void arch_timer_set_mode_virt(enum clock_event_mode mode,
91 struct clock_event_device *clk)
92{
93 timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
94}
95
96static void arch_timer_set_mode_phys(enum clock_event_mode mode,
97 struct clock_event_device *clk)
98{
99 timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
100}
101
102static inline void set_next_event(const int access, unsigned long evt)
103{
104 unsigned long ctrl;
105 ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
106 ctrl |= ARCH_TIMER_CTRL_ENABLE;
107 ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
108 arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
109 arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
110}
111
112static int arch_timer_set_next_event_virt(unsigned long evt,
113 struct clock_event_device *unused)
114{
115 set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
116 return 0;
117}
118
119static int arch_timer_set_next_event_phys(unsigned long evt,
120 struct clock_event_device *unused)
121{
122 set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
123 return 0;
124}
125
126static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
127{
128 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
129 clk->name = "arch_sys_timer";
130 clk->rating = 450;
131 if (arch_timer_use_virtual) {
132 clk->irq = arch_timer_ppi[VIRT_PPI];
133 clk->set_mode = arch_timer_set_mode_virt;
134 clk->set_next_event = arch_timer_set_next_event_virt;
135 } else {
136 clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
137 clk->set_mode = arch_timer_set_mode_phys;
138 clk->set_next_event = arch_timer_set_next_event_phys;
139 }
140
141 clk->cpumask = cpumask_of(smp_processor_id());
142
143 clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
144
145 clockevents_config_and_register(clk, arch_timer_rate,
146 0xf, 0x7fffffff);
147
148 if (arch_timer_use_virtual)
149 enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
150 else {
151 enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
152 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
153 enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
154 }
155
156 arch_counter_set_user_access();
157
158 return 0;
159}
160
161static int arch_timer_available(void)
162{
163 u32 freq;
164
165 if (arch_timer_rate == 0) {
166 freq = arch_timer_get_cntfrq();
167
168 /* Check the timer frequency. */
169 if (freq == 0) {
170 pr_warn("Architected timer frequency not available\n");
171 return -EINVAL;
172 }
173
174 arch_timer_rate = freq;
175 }
176
177 pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
178 (unsigned long)arch_timer_rate / 1000000,
179 (unsigned long)(arch_timer_rate / 10000) % 100,
180 arch_timer_use_virtual ? "virt" : "phys");
181 return 0;
182}
183
184u32 arch_timer_get_rate(void)
185{
186 return arch_timer_rate;
187}
188
189/*
190 * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
191 * call it before it has been initialised. Rather than incur a performance
192 * penalty checking for initialisation, provide a default implementation that
193 * won't lead to time appearing to jump backwards.
194 */
195static u64 arch_timer_read_zero(void)
196{
197 return 0;
198}
199
200u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero;
201
202static cycle_t arch_counter_read(struct clocksource *cs)
203{
204 return arch_timer_read_counter();
205}
206
207static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
208{
209 return arch_timer_read_counter();
210}
211
212static struct clocksource clocksource_counter = {
213 .name = "arch_sys_counter",
214 .rating = 400,
215 .read = arch_counter_read,
216 .mask = CLOCKSOURCE_MASK(56),
217 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
218};
219
220static struct cyclecounter cyclecounter = {
221 .read = arch_counter_read_cc,
222 .mask = CLOCKSOURCE_MASK(56),
223};
224
225static struct timecounter timecounter;
226
227struct timecounter *arch_timer_get_timecounter(void)
228{
229 return &timecounter;
230}
231
232static void __cpuinit arch_timer_stop(struct clock_event_device *clk)
233{
234 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
235 clk->irq, smp_processor_id());
236
237 if (arch_timer_use_virtual)
238 disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
239 else {
240 disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
241 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
242 disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
243 }
244
245 clk->set_mode(CLOCK_EVT_MODE_UNUSED, clk);
246}
247
248static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
249 unsigned long action, void *hcpu)
250{
251 struct clock_event_device *evt = this_cpu_ptr(arch_timer_evt);
252
253 switch (action & ~CPU_TASKS_FROZEN) {
254 case CPU_STARTING:
255 arch_timer_setup(evt);
256 break;
257 case CPU_DYING:
258 arch_timer_stop(evt);
259 break;
260 }
261
262 return NOTIFY_OK;
263}
264
265static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
266 .notifier_call = arch_timer_cpu_notify,
267};
268
269static int __init arch_timer_register(void)
270{
271 int err;
272 int ppi;
273
274 err = arch_timer_available();
275 if (err)
276 goto out;
277
278 arch_timer_evt = alloc_percpu(struct clock_event_device);
279 if (!arch_timer_evt) {
280 err = -ENOMEM;
281 goto out;
282 }
283
284 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
285 cyclecounter.mult = clocksource_counter.mult;
286 cyclecounter.shift = clocksource_counter.shift;
287 timecounter_init(&timecounter, &cyclecounter,
288 arch_counter_get_cntpct());
289
290 if (arch_timer_use_virtual) {
291 ppi = arch_timer_ppi[VIRT_PPI];
292 err = request_percpu_irq(ppi, arch_timer_handler_virt,
293 "arch_timer", arch_timer_evt);
294 } else {
295 ppi = arch_timer_ppi[PHYS_SECURE_PPI];
296 err = request_percpu_irq(ppi, arch_timer_handler_phys,
297 "arch_timer", arch_timer_evt);
298 if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
299 ppi = arch_timer_ppi[PHYS_NONSECURE_PPI];
300 err = request_percpu_irq(ppi, arch_timer_handler_phys,
301 "arch_timer", arch_timer_evt);
302 if (err)
303 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
304 arch_timer_evt);
305 }
306 }
307
308 if (err) {
309 pr_err("arch_timer: can't register interrupt %d (%d)\n",
310 ppi, err);
311 goto out_free;
312 }
313
314 err = register_cpu_notifier(&arch_timer_cpu_nb);
315 if (err)
316 goto out_free_irq;
317
318 /* Immediately configure the timer on the boot CPU */
319 arch_timer_setup(this_cpu_ptr(arch_timer_evt));
320
321 return 0;
322
323out_free_irq:
324 if (arch_timer_use_virtual)
325 free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
326 else {
327 free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
328 arch_timer_evt);
329 if (arch_timer_ppi[PHYS_NONSECURE_PPI])
330 free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
331 arch_timer_evt);
332 }
333
334out_free:
335 free_percpu(arch_timer_evt);
336out:
337 return err;
338}
339
340static const struct of_device_id arch_timer_of_match[] __initconst = {
341 { .compatible = "arm,armv7-timer", },
342 { .compatible = "arm,armv8-timer", },
343 {},
344};
345
346int __init arch_timer_init(void)
347{
348 struct device_node *np;
349 u32 freq;
350 int i;
351
352 np = of_find_matching_node(NULL, arch_timer_of_match);
353 if (!np) {
354 pr_err("arch_timer: can't find DT node\n");
355 return -ENODEV;
356 }
357
358 /* Try to determine the frequency from the device tree or CNTFRQ */
359 if (!of_property_read_u32(np, "clock-frequency", &freq))
360 arch_timer_rate = freq;
361
362 for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
363 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
364
365 of_node_put(np);
366
367 /*
368 * If HYP mode is available, we know that the physical timer
369 * has been configured to be accessible from PL1. Use it, so
370 * that a guest can use the virtual timer instead.
371 *
372 * If no interrupt provided for virtual timer, we'll have to
373 * stick to the physical timer. It'd better be accessible...
374 */
375 if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
376 arch_timer_use_virtual = false;
377
378 if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
379 !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
380 pr_warn("arch_timer: No interrupt available, giving up\n");
381 return -EINVAL;
382 }
383 }
384
385 if (arch_timer_use_virtual)
386 arch_timer_read_counter = arch_counter_get_cntvct;
387 else
388 arch_timer_read_counter = arch_counter_get_cntpct;
389
390 return arch_timer_register();
391}
diff --git a/drivers/clocksource/arm_generic.c b/drivers/clocksource/arm_generic.c
deleted file mode 100644
index 8ae1a61523ff..000000000000
--- a/drivers/clocksource/arm_generic.c
+++ /dev/null
@@ -1,232 +0,0 @@
1/*
2 * Generic timers support
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/delay.h>
23#include <linux/device.h>
24#include <linux/smp.h>
25#include <linux/cpu.h>
26#include <linux/jiffies.h>
27#include <linux/interrupt.h>
28#include <linux/clockchips.h>
29#include <linux/of_irq.h>
30#include <linux/io.h>
31
32#include <clocksource/arm_generic.h>
33
34#include <asm/arm_generic.h>
35
36static u32 arch_timer_rate;
37static u64 sched_clock_mult __read_mostly;
38static DEFINE_PER_CPU(struct clock_event_device, arch_timer_evt);
39static int arch_timer_ppi;
40
41static irqreturn_t arch_timer_handle_irq(int irq, void *dev_id)
42{
43 struct clock_event_device *evt = dev_id;
44 unsigned long ctrl;
45
46 ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
47 if (ctrl & ARCH_TIMER_CTRL_ISTATUS) {
48 ctrl |= ARCH_TIMER_CTRL_IMASK;
49 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
50 evt->event_handler(evt);
51 return IRQ_HANDLED;
52 }
53
54 return IRQ_NONE;
55}
56
57static void arch_timer_stop(void)
58{
59 unsigned long ctrl;
60
61 ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
62 ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
63 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
64}
65
66static void arch_timer_set_mode(enum clock_event_mode mode,
67 struct clock_event_device *clk)
68{
69 switch (mode) {
70 case CLOCK_EVT_MODE_UNUSED:
71 case CLOCK_EVT_MODE_SHUTDOWN:
72 arch_timer_stop();
73 break;
74 default:
75 break;
76 }
77}
78
79static int arch_timer_set_next_event(unsigned long evt,
80 struct clock_event_device *unused)
81{
82 unsigned long ctrl;
83
84 ctrl = arch_timer_reg_read(ARCH_TIMER_REG_CTRL);
85 ctrl |= ARCH_TIMER_CTRL_ENABLE;
86 ctrl &= ~ARCH_TIMER_CTRL_IMASK;
87
88 arch_timer_reg_write(ARCH_TIMER_REG_TVAL, evt);
89 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, ctrl);
90
91 return 0;
92}
93
94static void __cpuinit arch_timer_setup(struct clock_event_device *clk)
95{
96 /* Let's make sure the timer is off before doing anything else */
97 arch_timer_stop();
98
99 clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
100 clk->name = "arch_sys_timer";
101 clk->rating = 400;
102 clk->set_mode = arch_timer_set_mode;
103 clk->set_next_event = arch_timer_set_next_event;
104 clk->irq = arch_timer_ppi;
105 clk->cpumask = cpumask_of(smp_processor_id());
106
107 clockevents_config_and_register(clk, arch_timer_rate,
108 0xf, 0x7fffffff);
109
110 enable_percpu_irq(clk->irq, 0);
111
112 /* Ensure the virtual counter is visible to userspace for the vDSO. */
113 arch_counter_enable_user_access();
114}
115
116static void __init arch_timer_calibrate(void)
117{
118 if (arch_timer_rate == 0) {
119 arch_timer_reg_write(ARCH_TIMER_REG_CTRL, 0);
120 arch_timer_rate = arch_timer_reg_read(ARCH_TIMER_REG_FREQ);
121
122 /* Check the timer frequency. */
123 if (arch_timer_rate == 0)
124 panic("Architected timer frequency is set to zero.\n"
125 "You must set this in your .dts file\n");
126 }
127
128 /* Cache the sched_clock multiplier to save a divide in the hot path. */
129
130 sched_clock_mult = DIV_ROUND_CLOSEST(NSEC_PER_SEC, arch_timer_rate);
131
132 pr_info("Architected local timer running at %u.%02uMHz.\n",
133 arch_timer_rate / 1000000, (arch_timer_rate / 10000) % 100);
134}
135
136static cycle_t arch_counter_read(struct clocksource *cs)
137{
138 return arch_counter_get_cntpct();
139}
140
141static struct clocksource clocksource_counter = {
142 .name = "arch_sys_counter",
143 .rating = 400,
144 .read = arch_counter_read,
145 .mask = CLOCKSOURCE_MASK(56),
146 .flags = (CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_VALID_FOR_HRES),
147};
148
149int read_current_timer(unsigned long *timer_value)
150{
151 *timer_value = arch_counter_get_cntpct();
152 return 0;
153}
154
155unsigned long long notrace sched_clock(void)
156{
157 return arch_counter_get_cntvct() * sched_clock_mult;
158}
159
160static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
161 unsigned long action, void *hcpu)
162{
163 int cpu = (long)hcpu;
164 struct clock_event_device *clk = per_cpu_ptr(&arch_timer_evt, cpu);
165
166 switch(action) {
167 case CPU_STARTING:
168 case CPU_STARTING_FROZEN:
169 arch_timer_setup(clk);
170 break;
171
172 case CPU_DYING:
173 case CPU_DYING_FROZEN:
174 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
175 clk->irq, cpu);
176 disable_percpu_irq(clk->irq);
177 arch_timer_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
178 break;
179 }
180
181 return NOTIFY_OK;
182}
183
184static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
185 .notifier_call = arch_timer_cpu_notify,
186};
187
188static const struct of_device_id arch_timer_of_match[] __initconst = {
189 { .compatible = "arm,armv8-timer" },
190 {},
191};
192
193int __init arm_generic_timer_init(void)
194{
195 struct device_node *np;
196 int err;
197 u32 freq;
198
199 np = of_find_matching_node(NULL, arch_timer_of_match);
200 if (!np) {
201 pr_err("arch_timer: can't find DT node\n");
202 return -ENODEV;
203 }
204
205 /* Try to determine the frequency from the device tree or CNTFRQ */
206 if (!of_property_read_u32(np, "clock-frequency", &freq))
207 arch_timer_rate = freq;
208 arch_timer_calibrate();
209
210 arch_timer_ppi = irq_of_parse_and_map(np, 0);
211 pr_info("arch_timer: found %s irq %d\n", np->name, arch_timer_ppi);
212
213 err = request_percpu_irq(arch_timer_ppi, arch_timer_handle_irq,
214 np->name, &arch_timer_evt);
215 if (err) {
216 pr_err("arch_timer: can't register interrupt %d (%d)\n",
217 arch_timer_ppi, err);
218 return err;
219 }
220
221 clocksource_register_hz(&clocksource_counter, arch_timer_rate);
222
223 /* Calibrate the delay loop directly */
224 lpj_fine = DIV_ROUND_CLOSEST(arch_timer_rate, HZ);
225
226 /* Immediately configure the timer on the boot CPU */
227 arch_timer_setup(this_cpu_ptr(&arch_timer_evt));
228
229 register_cpu_notifier(&arch_timer_cpu_nb);
230
231 return 0;
232}
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
new file mode 100644
index 000000000000..2603267b1a29
--- /dev/null
+++ b/include/clocksource/arm_arch_timer.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __CLKSOURCE_ARM_ARCH_TIMER_H
17#define __CLKSOURCE_ARM_ARCH_TIMER_H
18
19#include <linux/clocksource.h>
20#include <linux/types.h>
21
22#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
23#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
24#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
25
26#define ARCH_TIMER_REG_CTRL 0
27#define ARCH_TIMER_REG_TVAL 1
28
29#define ARCH_TIMER_PHYS_ACCESS 0
30#define ARCH_TIMER_VIRT_ACCESS 1
31
32#ifdef CONFIG_ARM_ARCH_TIMER
33
34extern int arch_timer_init(void);
35extern u32 arch_timer_get_rate(void);
36extern u64 (*arch_timer_read_counter)(void);
37extern struct timecounter *arch_timer_get_timecounter(void);
38
39#else
40
41static inline int arch_timer_init(void)
42{
43 return -ENXIO;
44}
45
46static inline u32 arch_timer_get_rate(void)
47{
48 return 0;
49}
50
51static inline u64 arch_timer_read_counter(void)
52{
53 return 0;
54}
55
56static inline struct timecounter *arch_timer_get_timecounter(void)
57{
58 return NULL;
59}
60
61#endif
62
63#endif
diff --git a/include/clocksource/arm_generic.h b/include/clocksource/arm_generic.h
deleted file mode 100644
index 5b41b0d27f0f..000000000000
--- a/include/clocksource/arm_generic.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __CLKSOURCE_ARM_GENERIC_H
17#define __CLKSOURCE_ARM_GENERIC_H
18
19extern int arm_generic_timer_init(void);
20
21#endif
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index a67ca55e6f4e..3fd8e4290a1c 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -20,16 +20,45 @@
20 20
21#define GIC_DIST_CTRL 0x000 21#define GIC_DIST_CTRL 0x000
22#define GIC_DIST_CTR 0x004 22#define GIC_DIST_CTR 0x004
23#define GIC_DIST_IGROUP 0x080
23#define GIC_DIST_ENABLE_SET 0x100 24#define GIC_DIST_ENABLE_SET 0x100
24#define GIC_DIST_ENABLE_CLEAR 0x180 25#define GIC_DIST_ENABLE_CLEAR 0x180
25#define GIC_DIST_PENDING_SET 0x200 26#define GIC_DIST_PENDING_SET 0x200
26#define GIC_DIST_PENDING_CLEAR 0x280 27#define GIC_DIST_PENDING_CLEAR 0x280
27#define GIC_DIST_ACTIVE_BIT 0x300 28#define GIC_DIST_ACTIVE_SET 0x300
29#define GIC_DIST_ACTIVE_CLEAR 0x380
28#define GIC_DIST_PRI 0x400 30#define GIC_DIST_PRI 0x400
29#define GIC_DIST_TARGET 0x800 31#define GIC_DIST_TARGET 0x800
30#define GIC_DIST_CONFIG 0xc00 32#define GIC_DIST_CONFIG 0xc00
31#define GIC_DIST_SOFTINT 0xf00 33#define GIC_DIST_SOFTINT 0xf00
32 34
35#define GICH_HCR 0x0
36#define GICH_VTR 0x4
37#define GICH_VMCR 0x8
38#define GICH_MISR 0x10
39#define GICH_EISR0 0x20
40#define GICH_EISR1 0x24
41#define GICH_ELRSR0 0x30
42#define GICH_ELRSR1 0x34
43#define GICH_APR 0xf0
44#define GICH_LR0 0x100
45
46#define GICH_HCR_EN (1 << 0)
47#define GICH_HCR_UIE (1 << 1)
48
49#define GICH_LR_VIRTUALID (0x3ff << 0)
50#define GICH_LR_PHYSID_CPUID_SHIFT (10)
51#define GICH_LR_PHYSID_CPUID (7 << GICH_LR_PHYSID_CPUID_SHIFT)
52#define GICH_LR_STATE (3 << 28)
53#define GICH_LR_PENDING_BIT (1 << 28)
54#define GICH_LR_ACTIVE_BIT (1 << 29)
55#define GICH_LR_EOI (1 << 19)
56
57#define GICH_MISR_EOI (1 << 0)
58#define GICH_MISR_U (1 << 1)
59
60#ifndef __ASSEMBLY__
61
33struct device_node; 62struct device_node;
34 63
35extern struct irq_chip gic_arch_extn; 64extern struct irq_chip gic_arch_extn;
@@ -45,4 +74,6 @@ static inline void gic_init(unsigned int nr, int start,
45 gic_init_bases(nr, start, dist, cpu, 0, NULL); 74 gic_init_bases(nr, start, dist, cpu, 0, NULL);
46} 75}
47 76
77#endif /* __ASSEMBLY */
78
48#endif 79#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 7f2360a46fc2..c70577cf67bc 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -637,6 +637,7 @@ struct kvm_ppc_smmu_info {
637#define KVM_CAP_PPC_BOOKE_WATCHDOG 83 637#define KVM_CAP_PPC_BOOKE_WATCHDOG 83
638#define KVM_CAP_PPC_HTAB_FD 84 638#define KVM_CAP_PPC_HTAB_FD 84
639#define KVM_CAP_ARM_PSCI 87 639#define KVM_CAP_ARM_PSCI 87
640#define KVM_CAP_ARM_SET_DEVICE_ADDR 88
640 641
641#ifdef KVM_CAP_IRQ_ROUTING 642#ifdef KVM_CAP_IRQ_ROUTING
642 643
@@ -784,6 +785,11 @@ struct kvm_msi {
784 __u8 pad[16]; 785 __u8 pad[16];
785}; 786};
786 787
788struct kvm_arm_device_addr {
789 __u64 id;
790 __u64 addr;
791};
792
787/* 793/*
788 * ioctls for VM fds 794 * ioctls for VM fds
789 */ 795 */
@@ -869,6 +875,8 @@ struct kvm_s390_ucas_mapping {
869#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma) 875#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma)
870/* Available with KVM_CAP_PPC_HTAB_FD */ 876/* Available with KVM_CAP_PPC_HTAB_FD */
871#define KVM_PPC_GET_HTAB_FD _IOW(KVMIO, 0xaa, struct kvm_get_htab_fd) 877#define KVM_PPC_GET_HTAB_FD _IOW(KVMIO, 0xaa, struct kvm_get_htab_fd)
878/* Available with KVM_CAP_ARM_SET_DEVICE_ADDR */
879#define KVM_ARM_SET_DEVICE_ADDR _IOW(KVMIO, 0xab, struct kvm_arm_device_addr)
872 880
873/* 881/*
874 * ioctls for vcpu fds 882 * ioctls for vcpu fds