aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2013-02-06 14:40:29 -0500
committerMarc Zyngier <marc.zyngier@arm.com>2013-06-12 11:42:14 -0400
commitb547631fc64e249a3c507e6ce854642507fa7c1c (patch)
tree9eedcb03b1576466cb9c75feec6ed4a170320b59 /arch
parent40033a614ea3db196d57c477ca328f44eb1e4df0 (diff)
arm64: KVM: 32bit GP register access
Allow access to the 32bit register file through the usual API. Reviewed-by: Christopher Covington <cov@codeaurora.org> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h15
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/regmap.c168
3 files changed, 183 insertions, 2 deletions
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 6c1725e93b0b..20a1a3931d8d 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -28,6 +28,9 @@
28#include <asm/kvm_mmio.h> 28#include <asm/kvm_mmio.h>
29#include <asm/ptrace.h> 29#include <asm/ptrace.h>
30 30
31unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
32unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
33
31void kvm_inject_undefined(struct kvm_vcpu *vcpu); 34void kvm_inject_undefined(struct kvm_vcpu *vcpu);
32void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); 35void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
33void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); 36void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
@@ -49,7 +52,7 @@ static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
49 52
50static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) 53static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
51{ 54{
52 return false; /* 32bit? Bahhh... */ 55 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
53} 56}
54 57
55static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) 58static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
@@ -64,16 +67,23 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
64 67
65static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) 68static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
66{ 69{
70 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
67} 71}
68 72
69static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) 73static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
70{ 74{
75 if (vcpu_mode_is_32bit(vcpu))
76 return vcpu_reg32(vcpu, reg_num);
77
71 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; 78 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
72} 79}
73 80
74/* Get vcpu SPSR for current mode */ 81/* Get vcpu SPSR for current mode */
75static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu) 82static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
76{ 83{
84 if (vcpu_mode_is_32bit(vcpu))
85 return vcpu_spsr32(vcpu);
86
77 return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1]; 87 return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
78} 88}
79 89
@@ -81,6 +91,9 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
81{ 91{
82 u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; 92 u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
83 93
94 if (vcpu_mode_is_32bit(vcpu))
95 return mode > COMPAT_PSR_MODE_USR;
96
84 return mode != PSR_MODE_EL0t; 97 return mode != PSR_MODE_EL0t;
85} 98}
86 99
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index dca110556683..a2169ec8d93b 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -15,7 +15,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o
15kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o 15kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
16kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o 16kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
17 17
18kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o 18kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o
19kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o 19kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
20kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o 20kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
21 21
diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c
new file mode 100644
index 000000000000..bbc6ae32e4af
--- /dev/null
+++ b/arch/arm64/kvm/regmap.c
@@ -0,0 +1,168 @@
1/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/kvm/emulate.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/mm.h>
23#include <linux/kvm_host.h>
24#include <asm/kvm_emulate.h>
25#include <asm/ptrace.h>
26
27#define VCPU_NR_MODES 6
28#define REG_OFFSET(_reg) \
29 (offsetof(struct user_pt_regs, _reg) / sizeof(unsigned long))
30
31#define USR_REG_OFFSET(R) REG_OFFSET(compat_usr(R))
32
33static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
34 /* USR Registers */
35 {
36 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
37 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
38 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
39 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
40 USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
41 REG_OFFSET(pc)
42 },
43
44 /* FIQ Registers */
45 {
46 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
47 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
48 USR_REG_OFFSET(6), USR_REG_OFFSET(7),
49 REG_OFFSET(compat_r8_fiq), /* r8 */
50 REG_OFFSET(compat_r9_fiq), /* r9 */
51 REG_OFFSET(compat_r10_fiq), /* r10 */
52 REG_OFFSET(compat_r11_fiq), /* r11 */
53 REG_OFFSET(compat_r12_fiq), /* r12 */
54 REG_OFFSET(compat_sp_fiq), /* r13 */
55 REG_OFFSET(compat_lr_fiq), /* r14 */
56 REG_OFFSET(pc)
57 },
58
59 /* IRQ Registers */
60 {
61 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
62 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
63 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
64 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
65 USR_REG_OFFSET(12),
66 REG_OFFSET(compat_sp_irq), /* r13 */
67 REG_OFFSET(compat_lr_irq), /* r14 */
68 REG_OFFSET(pc)
69 },
70
71 /* SVC Registers */
72 {
73 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
74 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
75 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
76 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
77 USR_REG_OFFSET(12),
78 REG_OFFSET(compat_sp_svc), /* r13 */
79 REG_OFFSET(compat_lr_svc), /* r14 */
80 REG_OFFSET(pc)
81 },
82
83 /* ABT Registers */
84 {
85 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
86 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
87 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
88 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
89 USR_REG_OFFSET(12),
90 REG_OFFSET(compat_sp_abt), /* r13 */
91 REG_OFFSET(compat_lr_abt), /* r14 */
92 REG_OFFSET(pc)
93 },
94
95 /* UND Registers */
96 {
97 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
98 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
99 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
100 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
101 USR_REG_OFFSET(12),
102 REG_OFFSET(compat_sp_und), /* r13 */
103 REG_OFFSET(compat_lr_und), /* r14 */
104 REG_OFFSET(pc)
105 },
106};
107
108/*
109 * Return a pointer to the register number valid in the current mode of
110 * the virtual CPU.
111 */
112unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
113{
114 unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs.regs;
115 unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
116
117 switch (mode) {
118 case COMPAT_PSR_MODE_USR ... COMPAT_PSR_MODE_SVC:
119 mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
120 break;
121
122 case COMPAT_PSR_MODE_ABT:
123 mode = 4;
124 break;
125
126 case COMPAT_PSR_MODE_UND:
127 mode = 5;
128 break;
129
130 case COMPAT_PSR_MODE_SYS:
131 mode = 0; /* SYS maps to USR */
132 break;
133
134 default:
135 BUG();
136 }
137
138 return reg_array + vcpu_reg_offsets[mode][reg_num];
139}
140
141/*
142 * Return the SPSR for the current mode of the virtual CPU.
143 */
144unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu)
145{
146 unsigned long mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
147 switch (mode) {
148 case COMPAT_PSR_MODE_SVC:
149 mode = KVM_SPSR_SVC;
150 break;
151 case COMPAT_PSR_MODE_ABT:
152 mode = KVM_SPSR_ABT;
153 break;
154 case COMPAT_PSR_MODE_UND:
155 mode = KVM_SPSR_UND;
156 break;
157 case COMPAT_PSR_MODE_IRQ:
158 mode = KVM_SPSR_IRQ;
159 break;
160 case COMPAT_PSR_MODE_FIQ:
161 mode = KVM_SPSR_FIQ;
162 break;
163 default:
164 BUG();
165 }
166
167 return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[mode];
168}