aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2016-11-11 09:08:32 -0500
committerJames Hogan <james.hogan@imgtec.com>2017-02-03 10:20:52 -0500
commit29b500b54ef379f1f3227b633dd477a4dd3cd62b (patch)
treed60e393993c1e2b70ddd1a24dfbc1a2e103b6e39
parent7faa6eec6991715d6c1d85c192738dcac405ab89 (diff)
KVM: MIPS: Support NetLogic KScratch registers
tlbex.c uses the implementation dependent $22 CP0 register group on NetLogic cores, with the help of the c0_kscratch() helper. Allow these registers to be allocated by the KVM entry code too instead of assuming KScratch registers are all $31, which will also allow pgd_reg to be handled since it is allocated that way. We also drop the masking of kscratch_mask with 0xfc, as it is redundant for the standard KScratch registers (Config4.KScrExist won't have the low 2 bits set anyway), and apparently not necessary for NetLogic. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Radim Krčmář" <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org
-rw-r--r--arch/mips/kvm/entry.c25
1 files changed, 20 insertions, 5 deletions
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index f683d123172c..7424d3d566ff 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -91,6 +91,21 @@ static void *kvm_mips_build_ret_from_exit(void *addr);
91static void *kvm_mips_build_ret_to_guest(void *addr); 91static void *kvm_mips_build_ret_to_guest(void *addr);
92static void *kvm_mips_build_ret_to_host(void *addr); 92static void *kvm_mips_build_ret_to_host(void *addr);
93 93
94/*
95 * The version of this function in tlbex.c uses current_cpu_type(), but for KVM
96 * we assume symmetry.
97 */
98static int c0_kscratch(void)
99{
100 switch (boot_cpu_type()) {
101 case CPU_XLP:
102 case CPU_XLR:
103 return 22;
104 default:
105 return 31;
106 }
107}
108
94/** 109/**
95 * kvm_mips_entry_setup() - Perform global setup for entry code. 110 * kvm_mips_entry_setup() - Perform global setup for entry code.
96 * 111 *
@@ -105,18 +120,18 @@ int kvm_mips_entry_setup(void)
105 * We prefer to use KScratchN registers if they are available over the 120 * We prefer to use KScratchN registers if they are available over the
106 * defaults above, which may not work on all cores. 121 * defaults above, which may not work on all cores.
107 */ 122 */
108 unsigned int kscratch_mask = cpu_data[0].kscratch_mask & 0xfc; 123 unsigned int kscratch_mask = cpu_data[0].kscratch_mask;
109 124
110 /* Pick a scratch register for storing VCPU */ 125 /* Pick a scratch register for storing VCPU */
111 if (kscratch_mask) { 126 if (kscratch_mask) {
112 scratch_vcpu[0] = 31; 127 scratch_vcpu[0] = c0_kscratch();
113 scratch_vcpu[1] = ffs(kscratch_mask) - 1; 128 scratch_vcpu[1] = ffs(kscratch_mask) - 1;
114 kscratch_mask &= ~BIT(scratch_vcpu[1]); 129 kscratch_mask &= ~BIT(scratch_vcpu[1]);
115 } 130 }
116 131
117 /* Pick a scratch register to use as a temp for saving state */ 132 /* Pick a scratch register to use as a temp for saving state */
118 if (kscratch_mask) { 133 if (kscratch_mask) {
119 scratch_tmp[0] = 31; 134 scratch_tmp[0] = c0_kscratch();
120 scratch_tmp[1] = ffs(kscratch_mask) - 1; 135 scratch_tmp[1] = ffs(kscratch_mask) - 1;
121 kscratch_mask &= ~BIT(scratch_tmp[1]); 136 kscratch_mask &= ~BIT(scratch_tmp[1]);
122 } 137 }
@@ -132,7 +147,7 @@ static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
132 UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); 147 UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
133 148
134 /* Save the temp scratch register value in cp0_cause of stack frame */ 149 /* Save the temp scratch register value in cp0_cause of stack frame */
135 if (scratch_tmp[0] == 31) { 150 if (scratch_tmp[0] == c0_kscratch()) {
136 UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]); 151 UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
137 UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); 152 UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
138 } 153 }
@@ -148,7 +163,7 @@ static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
148 UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); 163 UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
149 UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]); 164 UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
150 165
151 if (scratch_tmp[0] == 31) { 166 if (scratch_tmp[0] == c0_kscratch()) {
152 UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); 167 UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
153 UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]); 168 UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
154 } 169 }