aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2014-07-25 04:38:59 -0400
committerAlexander Graf <agraf@suse.de>2014-07-28 09:23:15 -0400
commitb2677b8dd8de0dc1496ede4da09b9dfd59f15cea (patch)
tree3df12ebc9b7cd0ad0090496fdfc9485e393398d1
parent8c95ead6039d46d2d5c375d3cadac8708121fbe4 (diff)
KVM: PPC: Remove 440 support
The 440 target hasn't been properly functioning for a few releases and before I was the only one who fixes a very serious bug that indicates to me that nobody used it before either. Furthermore KVM on 440 is slow to the extent of unusable. We don't have to carry along completely unused code. Remove 440 and give us one less thing to worry about. Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--Documentation/powerpc/00-INDEX2
-rw-r--r--Documentation/powerpc/kvm_440.txt41
-rw-r--r--arch/powerpc/Kconfig.debug4
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig1
-rw-r--r--arch/powerpc/include/asm/kvm_44x.h67
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_host.h3
-rw-r--r--arch/powerpc/kvm/44x.c237
-rw-r--r--arch/powerpc/kvm/44x_emulate.c194
-rw-r--r--arch/powerpc/kvm/44x_tlb.c528
-rw-r--r--arch/powerpc/kvm/44x_tlb.h86
-rw-r--r--arch/powerpc/kvm/Kconfig16
-rw-r--r--arch/powerpc/kvm/Makefile12
-rw-r--r--arch/powerpc/kvm/booke.h7
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S5
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S1
-rw-r--r--arch/powerpc/kvm/powerpc.c1
17 files changed, 2 insertions, 1204 deletions
diff --git a/Documentation/powerpc/00-INDEX b/Documentation/powerpc/00-INDEX
index 6db73df04278..a68784d0a1ee 100644
--- a/Documentation/powerpc/00-INDEX
+++ b/Documentation/powerpc/00-INDEX
@@ -17,8 +17,6 @@ firmware-assisted-dump.txt
17 - Documentation on the firmware assisted dump mechanism "fadump". 17 - Documentation on the firmware assisted dump mechanism "fadump".
18hvcs.txt 18hvcs.txt
19 - IBM "Hypervisor Virtual Console Server" Installation Guide 19 - IBM "Hypervisor Virtual Console Server" Installation Guide
20kvm_440.txt
21 - Various notes on the implementation of KVM for PowerPC 440.
22mpc52xx.txt 20mpc52xx.txt
23 - Linux 2.6.x on MPC52xx family 21 - Linux 2.6.x on MPC52xx family
24pmu-ebb.txt 22pmu-ebb.txt
diff --git a/Documentation/powerpc/kvm_440.txt b/Documentation/powerpc/kvm_440.txt
deleted file mode 100644
index c02a003fa03a..000000000000
--- a/Documentation/powerpc/kvm_440.txt
+++ /dev/null
@@ -1,41 +0,0 @@
1Hollis Blanchard <hollisb@us.ibm.com>
215 Apr 2008
3
4Various notes on the implementation of KVM for PowerPC 440:
5
6To enforce isolation, host userspace, guest kernel, and guest userspace all
7run at user privilege level. Only the host kernel runs in supervisor mode.
8Executing privileged instructions in the guest traps into KVM (in the host
9kernel), where we decode and emulate them. Through this technique, unmodified
10440 Linux kernels can be run (slowly) as guests. Future performance work will
11focus on reducing the overhead and frequency of these traps.
12
13The usual code flow is started from userspace invoking an "run" ioctl, which
14causes KVM to switch into guest context. We use IVPR to hijack the host
15interrupt vectors while running the guest, which allows us to direct all
16interrupts to kvmppc_handle_interrupt(). At this point, we could either
17- handle the interrupt completely (e.g. emulate "mtspr SPRG0"), or
18- let the host interrupt handler run (e.g. when the decrementer fires), or
19- return to host userspace (e.g. when the guest performs device MMIO)
20
21Address spaces: We take advantage of the fact that Linux doesn't use the AS=1
22address space (in host or guest), which gives us virtual address space to use
23for guest mappings. While the guest is running, the host kernel remains mapped
24in AS=0, but the guest can only use AS=1 mappings.
25
26TLB entries: The TLB entries covering the host linear mapping remain
27present while running the guest. This reduces the overhead of lightweight
28exits, which are handled by KVM running in the host kernel. We keep three
29copies of the TLB:
30 - guest TLB: contents of the TLB as the guest sees it
31 - shadow TLB: the TLB that is actually in hardware while guest is running
32 - host TLB: to restore TLB state when context switching guest -> host
33When a TLB miss occurs because a mapping was not present in the shadow TLB,
34but was present in the guest TLB, KVM handles the fault without invoking the
35guest. Large guest pages are backed by multiple 4KB shadow pages through this
36mechanism.
37
38IO: MMIO and DCR accesses are emulated by userspace. We use virtio for network
39and block IO, so those drivers must be enabled in the guest. It's possible
40that some qemu device emulation (e.g. e1000 or rtl8139) may also work with
41little effort.
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 790352f93700..93500f64f530 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -202,9 +202,7 @@ config PPC_EARLY_DEBUG_BEAT
202 202
203config PPC_EARLY_DEBUG_44x 203config PPC_EARLY_DEBUG_44x
204 bool "Early serial debugging for IBM/AMCC 44x CPUs" 204 bool "Early serial debugging for IBM/AMCC 44x CPUs"
205 # PPC_EARLY_DEBUG on 440 leaves AS=1 mappings above the TLB high water 205 depends on 44x
206 # mark, which doesn't work with current 440 KVM.
207 depends on 44x && !KVM
208 help 206 help
209 Select this to enable early debugging for IBM 44x chips via the 207 Select this to enable early debugging for IBM 44x chips via the
210 inbuilt serial port. If you enable this, ensure you set 208 inbuilt serial port. If you enable this, ensure you set
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index ccf66b9060a6..924e10df1844 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -127,4 +127,3 @@ CONFIG_CRYPTO_PCBC=y
127# CONFIG_CRYPTO_ANSI_CPRNG is not set 127# CONFIG_CRYPTO_ANSI_CPRNG is not set
128# CONFIG_CRYPTO_HW is not set 128# CONFIG_CRYPTO_HW is not set
129CONFIG_VIRTUALIZATION=y 129CONFIG_VIRTUALIZATION=y
130CONFIG_KVM_440=y
diff --git a/arch/powerpc/include/asm/kvm_44x.h b/arch/powerpc/include/asm/kvm_44x.h
deleted file mode 100644
index a0e57618ff33..000000000000
--- a/arch/powerpc/include/asm/kvm_44x.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __ASM_44X_H__
21#define __ASM_44X_H__
22
23#include <linux/kvm_host.h>
24
25#define PPC44x_TLB_SIZE 64
26
27/* If the guest is expecting it, this can be as large as we like; we'd just
28 * need to find some way of advertising it. */
29#define KVM44x_GUEST_TLB_SIZE 64
30
31struct kvmppc_44x_tlbe {
32 u32 tid; /* Only the low 8 bits are used. */
33 u32 word0;
34 u32 word1;
35 u32 word2;
36};
37
38struct kvmppc_44x_shadow_ref {
39 struct page *page;
40 u16 gtlb_index;
41 u8 writeable;
42 u8 tid;
43};
44
45struct kvmppc_vcpu_44x {
46 /* Unmodified copy of the guest's TLB. */
47 struct kvmppc_44x_tlbe guest_tlb[KVM44x_GUEST_TLB_SIZE];
48
49 /* References to guest pages in the hardware TLB. */
50 struct kvmppc_44x_shadow_ref shadow_refs[PPC44x_TLB_SIZE];
51
52 /* State of the shadow TLB at guest context switch time. */
53 struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE];
54 u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
55
56 struct kvm_vcpu vcpu;
57};
58
59static inline struct kvmppc_vcpu_44x *to_44x(struct kvm_vcpu *vcpu)
60{
61 return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu);
62}
63
64void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu);
65void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu);
66
67#endif /* __ASM_44X_H__ */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 3f3e53047ac4..b8901c4a4922 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -33,7 +33,6 @@
33/* IVPR must be 64KiB-aligned. */ 33/* IVPR must be 64KiB-aligned. */
34#define VCPU_SIZE_ORDER 4 34#define VCPU_SIZE_ORDER 4
35#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12) 35#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
36#define VCPU_TLB_PGSZ PPC44x_TLB_64K
37#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG) 36#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG)
38 37
39#define BOOKE_INTERRUPT_CRITICAL 0 38#define BOOKE_INTERRUPT_CRITICAL 0
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 562f685d1678..5fe2b5d17bc0 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -49,7 +49,6 @@
49#define KVM_NR_IRQCHIPS 1 49#define KVM_NR_IRQCHIPS 1
50#define KVM_IRQCHIP_NUM_PINS 256 50#define KVM_IRQCHIP_NUM_PINS 256
51 51
52#if !defined(CONFIG_KVM_440)
53#include <linux/mmu_notifier.h> 52#include <linux/mmu_notifier.h>
54 53
55#define KVM_ARCH_WANT_MMU_NOTIFIER 54#define KVM_ARCH_WANT_MMU_NOTIFIER
@@ -62,8 +61,6 @@ extern int kvm_age_hva(struct kvm *kvm, unsigned long hva);
62extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 61extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
63extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 62extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
64 63
65#endif
66
67#define HPTEG_CACHE_NUM (1 << 15) 64#define HPTEG_CACHE_NUM (1 << 15)
68#define HPTEG_HASH_BITS_PTE 13 65#define HPTEG_HASH_BITS_PTE 13
69#define HPTEG_HASH_BITS_PTE_LONG 12 66#define HPTEG_HASH_BITS_PTE_LONG 12
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
deleted file mode 100644
index 9cb4b0a36031..000000000000
--- a/arch/powerpc/kvm/44x.c
+++ /dev/null
@@ -1,237 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <linux/kvm_host.h>
21#include <linux/slab.h>
22#include <linux/err.h>
23#include <linux/export.h>
24#include <linux/module.h>
25#include <linux/miscdevice.h>
26
27#include <asm/reg.h>
28#include <asm/cputable.h>
29#include <asm/tlbflush.h>
30#include <asm/kvm_44x.h>
31#include <asm/kvm_ppc.h>
32
33#include "44x_tlb.h"
34#include "booke.h"
35
36static void kvmppc_core_vcpu_load_44x(struct kvm_vcpu *vcpu, int cpu)
37{
38 kvmppc_booke_vcpu_load(vcpu, cpu);
39 kvmppc_44x_tlb_load(vcpu);
40}
41
42static void kvmppc_core_vcpu_put_44x(struct kvm_vcpu *vcpu)
43{
44 kvmppc_44x_tlb_put(vcpu);
45 kvmppc_booke_vcpu_put(vcpu);
46}
47
48int kvmppc_core_check_processor_compat(void)
49{
50 int r;
51
52 if (strncmp(cur_cpu_spec->platform, "ppc440", 6) == 0)
53 r = 0;
54 else
55 r = -ENOTSUPP;
56
57 return r;
58}
59
60int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
61{
62 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
63 struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[0];
64 int i;
65
66 tlbe->tid = 0;
67 tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID;
68 tlbe->word1 = 0;
69 tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR;
70
71 tlbe++;
72 tlbe->tid = 0;
73 tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID;
74 tlbe->word1 = 0xef600000;
75 tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR
76 | PPC44x_TLB_I | PPC44x_TLB_G;
77
78 /* Since the guest can directly access the timebase, it must know the
79 * real timebase frequency. Accordingly, it must see the state of
80 * CCR1[TCS]. */
81 /* XXX CCR1 doesn't exist on all 440 SoCs. */
82 vcpu->arch.ccr1 = mfspr(SPRN_CCR1);
83
84 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++)
85 vcpu_44x->shadow_refs[i].gtlb_index = -1;
86
87 vcpu->arch.cpu_type = KVM_CPU_440;
88 vcpu->arch.pvr = mfspr(SPRN_PVR);
89
90 return 0;
91}
92
93/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
94int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
95 struct kvm_translation *tr)
96{
97 int index;
98 gva_t eaddr;
99 u8 pid;
100 u8 as;
101
102 eaddr = tr->linear_address;
103 pid = (tr->linear_address >> 32) & 0xff;
104 as = (tr->linear_address >> 40) & 0x1;
105
106 index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as);
107 if (index == -1) {
108 tr->valid = 0;
109 return 0;
110 }
111
112 tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
113 /* XXX what does "writeable" and "usermode" even mean? */
114 tr->valid = 1;
115
116 return 0;
117}
118
119static int kvmppc_core_get_sregs_44x(struct kvm_vcpu *vcpu,
120 struct kvm_sregs *sregs)
121{
122 return kvmppc_get_sregs_ivor(vcpu, sregs);
123}
124
125static int kvmppc_core_set_sregs_44x(struct kvm_vcpu *vcpu,
126 struct kvm_sregs *sregs)
127{
128 return kvmppc_set_sregs_ivor(vcpu, sregs);
129}
130
131static int kvmppc_get_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
132 union kvmppc_one_reg *val)
133{
134 return -EINVAL;
135}
136
137static int kvmppc_set_one_reg_44x(struct kvm_vcpu *vcpu, u64 id,
138 union kvmppc_one_reg *val)
139{
140 return -EINVAL;
141}
142
143static struct kvm_vcpu *kvmppc_core_vcpu_create_44x(struct kvm *kvm,
144 unsigned int id)
145{
146 struct kvmppc_vcpu_44x *vcpu_44x;
147 struct kvm_vcpu *vcpu;
148 int err;
149
150 vcpu_44x = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
151 if (!vcpu_44x) {
152 err = -ENOMEM;
153 goto out;
154 }
155
156 vcpu = &vcpu_44x->vcpu;
157 err = kvm_vcpu_init(vcpu, kvm, id);
158 if (err)
159 goto free_vcpu;
160
161 vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
162 if (!vcpu->arch.shared)
163 goto uninit_vcpu;
164
165 return vcpu;
166
167uninit_vcpu:
168 kvm_vcpu_uninit(vcpu);
169free_vcpu:
170 kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
171out:
172 return ERR_PTR(err);
173}
174
175static void kvmppc_core_vcpu_free_44x(struct kvm_vcpu *vcpu)
176{
177 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
178
179 free_page((unsigned long)vcpu->arch.shared);
180 kvm_vcpu_uninit(vcpu);
181 kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
182}
183
184static int kvmppc_core_init_vm_44x(struct kvm *kvm)
185{
186 return 0;
187}
188
189static void kvmppc_core_destroy_vm_44x(struct kvm *kvm)
190{
191}
192
193static struct kvmppc_ops kvm_ops_44x = {
194 .get_sregs = kvmppc_core_get_sregs_44x,
195 .set_sregs = kvmppc_core_set_sregs_44x,
196 .get_one_reg = kvmppc_get_one_reg_44x,
197 .set_one_reg = kvmppc_set_one_reg_44x,
198 .vcpu_load = kvmppc_core_vcpu_load_44x,
199 .vcpu_put = kvmppc_core_vcpu_put_44x,
200 .vcpu_create = kvmppc_core_vcpu_create_44x,
201 .vcpu_free = kvmppc_core_vcpu_free_44x,
202 .mmu_destroy = kvmppc_mmu_destroy_44x,
203 .init_vm = kvmppc_core_init_vm_44x,
204 .destroy_vm = kvmppc_core_destroy_vm_44x,
205 .emulate_op = kvmppc_core_emulate_op_44x,
206 .emulate_mtspr = kvmppc_core_emulate_mtspr_44x,
207 .emulate_mfspr = kvmppc_core_emulate_mfspr_44x,
208};
209
210static int __init kvmppc_44x_init(void)
211{
212 int r;
213
214 r = kvmppc_booke_init();
215 if (r)
216 goto err_out;
217
218 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE);
219 if (r)
220 goto err_out;
221 kvm_ops_44x.owner = THIS_MODULE;
222 kvmppc_pr_ops = &kvm_ops_44x;
223
224err_out:
225 return r;
226}
227
228static void __exit kvmppc_44x_exit(void)
229{
230 kvmppc_pr_ops = NULL;
231 kvmppc_booke_exit();
232}
233
234module_init(kvmppc_44x_init);
235module_exit(kvmppc_44x_exit);
236MODULE_ALIAS_MISCDEV(KVM_MINOR);
237MODULE_ALIAS("devname:kvm");
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
deleted file mode 100644
index 92c9ab4bcfec..000000000000
--- a/arch/powerpc/kvm/44x_emulate.c
+++ /dev/null
@@ -1,194 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <asm/kvm_ppc.h>
21#include <asm/dcr.h>
22#include <asm/dcr-regs.h>
23#include <asm/disassemble.h>
24#include <asm/kvm_44x.h>
25#include "timing.h"
26
27#include "booke.h"
28#include "44x_tlb.h"
29
30#define XOP_MFDCRX 259
31#define XOP_MFDCR 323
32#define XOP_MTDCRX 387
33#define XOP_MTDCR 451
34#define XOP_TLBSX 914
35#define XOP_ICCCI 966
36#define XOP_TLBWE 978
37
38static int emulate_mtdcr(struct kvm_vcpu *vcpu, int rs, int dcrn)
39{
40 /* emulate some access in kernel */
41 switch (dcrn) {
42 case DCRN_CPR0_CONFIG_ADDR:
43 vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs);
44 return EMULATE_DONE;
45 default:
46 vcpu->run->dcr.dcrn = dcrn;
47 vcpu->run->dcr.data = kvmppc_get_gpr(vcpu, rs);
48 vcpu->run->dcr.is_write = 1;
49 vcpu->arch.dcr_is_write = 1;
50 vcpu->arch.dcr_needed = 1;
51 kvmppc_account_exit(vcpu, DCR_EXITS);
52 return EMULATE_DO_DCR;
53 }
54}
55
56static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn)
57{
58 /* The guest may access CPR0 registers to determine the timebase
59 * frequency, and it must know the real host frequency because it
60 * can directly access the timebase registers.
61 *
62 * It would be possible to emulate those accesses in userspace,
63 * but userspace can really only figure out the end frequency.
64 * We could decompose that into the factors that compute it, but
65 * that's tricky math, and it's easier to just report the real
66 * CPR0 values.
67 */
68 switch (dcrn) {
69 case DCRN_CPR0_CONFIG_ADDR:
70 kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr);
71 break;
72 case DCRN_CPR0_CONFIG_DATA:
73 local_irq_disable();
74 mtdcr(DCRN_CPR0_CONFIG_ADDR,
75 vcpu->arch.cpr0_cfgaddr);
76 kvmppc_set_gpr(vcpu, rt,
77 mfdcr(DCRN_CPR0_CONFIG_DATA));
78 local_irq_enable();
79 break;
80 default:
81 vcpu->run->dcr.dcrn = dcrn;
82 vcpu->run->dcr.data = 0;
83 vcpu->run->dcr.is_write = 0;
84 vcpu->arch.dcr_is_write = 0;
85 vcpu->arch.io_gpr = rt;
86 vcpu->arch.dcr_needed = 1;
87 kvmppc_account_exit(vcpu, DCR_EXITS);
88 return EMULATE_DO_DCR;
89 }
90
91 return EMULATE_DONE;
92}
93
94int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
95 unsigned int inst, int *advance)
96{
97 int emulated = EMULATE_DONE;
98 int dcrn = get_dcrn(inst);
99 int ra = get_ra(inst);
100 int rb = get_rb(inst);
101 int rc = get_rc(inst);
102 int rs = get_rs(inst);
103 int rt = get_rt(inst);
104 int ws = get_ws(inst);
105
106 switch (get_op(inst)) {
107 case 31:
108 switch (get_xop(inst)) {
109
110 case XOP_MFDCR:
111 emulated = emulate_mfdcr(vcpu, rt, dcrn);
112 break;
113
114 case XOP_MFDCRX:
115 emulated = emulate_mfdcr(vcpu, rt,
116 kvmppc_get_gpr(vcpu, ra));
117 break;
118
119 case XOP_MTDCR:
120 emulated = emulate_mtdcr(vcpu, rs, dcrn);
121 break;
122
123 case XOP_MTDCRX:
124 emulated = emulate_mtdcr(vcpu, rs,
125 kvmppc_get_gpr(vcpu, ra));
126 break;
127
128 case XOP_TLBWE:
129 emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws);
130 break;
131
132 case XOP_TLBSX:
133 emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc);
134 break;
135
136 case XOP_ICCCI:
137 break;
138
139 default:
140 emulated = EMULATE_FAIL;
141 }
142
143 break;
144
145 default:
146 emulated = EMULATE_FAIL;
147 }
148
149 if (emulated == EMULATE_FAIL)
150 emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);
151
152 return emulated;
153}
154
155int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
156{
157 int emulated = EMULATE_DONE;
158
159 switch (sprn) {
160 case SPRN_PID:
161 kvmppc_set_pid(vcpu, spr_val); break;
162 case SPRN_MMUCR:
163 vcpu->arch.mmucr = spr_val; break;
164 case SPRN_CCR0:
165 vcpu->arch.ccr0 = spr_val; break;
166 case SPRN_CCR1:
167 vcpu->arch.ccr1 = spr_val; break;
168 default:
169 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
170 }
171
172 return emulated;
173}
174
175int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
176{
177 int emulated = EMULATE_DONE;
178
179 switch (sprn) {
180 case SPRN_PID:
181 *spr_val = vcpu->arch.pid; break;
182 case SPRN_MMUCR:
183 *spr_val = vcpu->arch.mmucr; break;
184 case SPRN_CCR0:
185 *spr_val = vcpu->arch.ccr0; break;
186 case SPRN_CCR1:
187 *spr_val = vcpu->arch.ccr1; break;
188 default:
189 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
190 }
191
192 return emulated;
193}
194
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
deleted file mode 100644
index 0deef1082e02..000000000000
--- a/arch/powerpc/kvm/44x_tlb.c
+++ /dev/null
@@ -1,528 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#include <linux/types.h>
21#include <linux/string.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/highmem.h>
25
26#include <asm/tlbflush.h>
27#include <asm/mmu-44x.h>
28#include <asm/kvm_ppc.h>
29#include <asm/kvm_44x.h>
30#include "timing.h"
31
32#include "44x_tlb.h"
33#include "trace.h"
34
35#ifndef PPC44x_TLBE_SIZE
36#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
37#endif
38
39#define PAGE_SIZE_4K (1<<12)
40#define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1))
41
42#define PPC44x_TLB_UATTR_MASK \
43 (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3)
44#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
45#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
46
47#ifdef DEBUG
48void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
49{
50 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
51 struct kvmppc_44x_tlbe *tlbe;
52 int i;
53
54 printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
55 printk("| %2s | %3s | %8s | %8s | %8s |\n",
56 "nr", "tid", "word0", "word1", "word2");
57
58 for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
59 tlbe = &vcpu_44x->guest_tlb[i];
60 if (tlbe->word0 & PPC44x_TLB_VALID)
61 printk(" G%2d | %02X | %08X | %08X | %08X |\n",
62 i, tlbe->tid, tlbe->word0, tlbe->word1,
63 tlbe->word2);
64 }
65}
66#endif
67
68static inline void kvmppc_44x_tlbie(unsigned int index)
69{
70 /* 0 <= index < 64, so the V bit is clear and we can use the index as
71 * word0. */
72 asm volatile(
73 "tlbwe %[index], %[index], 0\n"
74 :
75 : [index] "r"(index)
76 );
77}
78
79static inline void kvmppc_44x_tlbre(unsigned int index,
80 struct kvmppc_44x_tlbe *tlbe)
81{
82 asm volatile(
83 "tlbre %[word0], %[index], 0\n"
84 "mfspr %[tid], %[sprn_mmucr]\n"
85 "andi. %[tid], %[tid], 0xff\n"
86 "tlbre %[word1], %[index], 1\n"
87 "tlbre %[word2], %[index], 2\n"
88 : [word0] "=r"(tlbe->word0),
89 [word1] "=r"(tlbe->word1),
90 [word2] "=r"(tlbe->word2),
91 [tid] "=r"(tlbe->tid)
92 : [index] "r"(index),
93 [sprn_mmucr] "i"(SPRN_MMUCR)
94 : "cc"
95 );
96}
97
98static inline void kvmppc_44x_tlbwe(unsigned int index,
99 struct kvmppc_44x_tlbe *stlbe)
100{
101 unsigned long tmp;
102
103 asm volatile(
104 "mfspr %[tmp], %[sprn_mmucr]\n"
105 "rlwimi %[tmp], %[tid], 0, 0xff\n"
106 "mtspr %[sprn_mmucr], %[tmp]\n"
107 "tlbwe %[word0], %[index], 0\n"
108 "tlbwe %[word1], %[index], 1\n"
109 "tlbwe %[word2], %[index], 2\n"
110 : [tmp] "=&r"(tmp)
111 : [word0] "r"(stlbe->word0),
112 [word1] "r"(stlbe->word1),
113 [word2] "r"(stlbe->word2),
114 [tid] "r"(stlbe->tid),
115 [index] "r"(index),
116 [sprn_mmucr] "i"(SPRN_MMUCR)
117 );
118}
119
120static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
121{
122 /* We only care about the guest's permission and user bits. */
123 attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK;
124
125 if (!usermode) {
126 /* Guest is in supervisor mode, so we need to translate guest
127 * supervisor permissions into user permissions. */
128 attrib &= ~PPC44x_TLB_USER_PERM_MASK;
129 attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
130 }
131
132 /* Make sure host can always access this memory. */
133 attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
134
135 /* WIMGE = 0b00100 */
136 attrib |= PPC44x_TLB_M;
137
138 return attrib;
139}
140
141/* Load shadow TLB back into hardware. */
142void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu)
143{
144 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
145 int i;
146
147 for (i = 0; i <= tlb_44x_hwater; i++) {
148 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
149
150 if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
151 kvmppc_44x_tlbwe(i, stlbe);
152 }
153}
154
155static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x,
156 unsigned int i)
157{
158 vcpu_44x->shadow_tlb_mod[i] = 1;
159}
160
161/* Save hardware TLB to the vcpu, and invalidate all guest mappings. */
162void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu)
163{
164 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
165 int i;
166
167 for (i = 0; i <= tlb_44x_hwater; i++) {
168 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
169
170 if (vcpu_44x->shadow_tlb_mod[i])
171 kvmppc_44x_tlbre(i, stlbe);
172
173 if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
174 kvmppc_44x_tlbie(i);
175 }
176}
177
178
179/* Search the guest TLB for a matching entry. */
180int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
181 unsigned int as)
182{
183 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
184 int i;
185
186 /* XXX Replace loop with fancy data structures. */
187 for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
188 struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i];
189 unsigned int tid;
190
191 if (eaddr < get_tlb_eaddr(tlbe))
192 continue;
193
194 if (eaddr > get_tlb_end(tlbe))
195 continue;
196
197 tid = get_tlb_tid(tlbe);
198 if (tid && (tid != pid))
199 continue;
200
201 if (!get_tlb_v(tlbe))
202 continue;
203
204 if (get_tlb_ts(tlbe) != as)
205 continue;
206
207 return i;
208 }
209
210 return -1;
211}
212
213gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
214 gva_t eaddr)
215{
216 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
217 struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
218 unsigned int pgmask = get_tlb_bytes(gtlbe) - 1;
219
220 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
221}
222
223int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
224{
225 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
226
227 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
228}
229
230int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
231{
232 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
233
234 return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
235}
236
237void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
238{
239}
240
241void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
242{
243}
244
245static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
246 unsigned int stlb_index)
247{
248 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index];
249
250 if (!ref->page)
251 return;
252
253 /* Discard from the TLB. */
254 /* Note: we could actually invalidate a host mapping, if the host overwrote
255 * this TLB entry since we inserted a guest mapping. */
256 kvmppc_44x_tlbie(stlb_index);
257
258 /* Now release the page. */
259 if (ref->writeable)
260 kvm_release_page_dirty(ref->page);
261 else
262 kvm_release_page_clean(ref->page);
263
264 ref->page = NULL;
265
266 /* XXX set tlb_44x_index to stlb_index? */
267
268 trace_kvm_stlb_inval(stlb_index);
269}
270
271void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu)
272{
273 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
274 int i;
275
276 for (i = 0; i <= tlb_44x_hwater; i++)
277 kvmppc_44x_shadow_release(vcpu_44x, i);
278}
279
280/**
281 * kvmppc_mmu_map -- create a host mapping for guest memory
282 *
283 * If the guest wanted a larger page than the host supports, only the first
284 * host page is mapped here and the rest are demand faulted.
285 *
286 * If the guest wanted a smaller page than the host page size, we map only the
287 * guest-size page (i.e. not a full host page mapping).
288 *
289 * Caller must ensure that the specified guest TLB entry is safe to insert into
290 * the shadow TLB.
291 */
292void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
293 unsigned int gtlb_index)
294{
295 struct kvmppc_44x_tlbe stlbe;
296 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
297 struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
298 struct kvmppc_44x_shadow_ref *ref;
299 struct page *new_page;
300 hpa_t hpaddr;
301 gfn_t gfn;
302 u32 asid = gtlbe->tid;
303 u32 flags = gtlbe->word2;
304 u32 max_bytes = get_tlb_bytes(gtlbe);
305 unsigned int victim;
306
307 /* Select TLB entry to clobber. Indirectly guard against races with the TLB
308 * miss handler by disabling interrupts. */
309 local_irq_disable();
310 victim = ++tlb_44x_index;
311 if (victim > tlb_44x_hwater)
312 victim = 0;
313 tlb_44x_index = victim;
314 local_irq_enable();
315
316 /* Get reference to new page. */
317 gfn = gpaddr >> PAGE_SHIFT;
318 new_page = gfn_to_page(vcpu->kvm, gfn);
319 if (is_error_page(new_page)) {
320 printk(KERN_ERR "Couldn't get guest page for gfn %llx!\n",
321 (unsigned long long)gfn);
322 return;
323 }
324 hpaddr = page_to_phys(new_page);
325
326 /* Invalidate any previous shadow mappings. */
327 kvmppc_44x_shadow_release(vcpu_44x, victim);
328
329 /* XXX Make sure (va, size) doesn't overlap any other
330 * entries. 440x6 user manual says the result would be
331 * "undefined." */
332
333 /* XXX what about AS? */
334
335 /* Force TS=1 for all guest mappings. */
336 stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS;
337
338 if (max_bytes >= PAGE_SIZE) {
339 /* Guest mapping is larger than or equal to host page size. We can use
340 * a "native" host mapping. */
341 stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE;
342 } else {
343 /* Guest mapping is smaller than host page size. We must restrict the
344 * size of the mapping to be at most the smaller of the two, but for
345 * simplicity we fall back to a 4K mapping (this is probably what the
346 * guest is using anyways). */
347 stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K;
348
349 /* 'hpaddr' is a host page, which is larger than the mapping we're
350 * inserting here. To compensate, we must add the in-page offset to the
351 * sub-page. */
352 hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K);
353 }
354
355 stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
356 stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags,
357 vcpu->arch.shared->msr & MSR_PR);
358 stlbe.tid = !(asid & 0xff);
359
360 /* Keep track of the reference so we can properly release it later. */
361 ref = &vcpu_44x->shadow_refs[victim];
362 ref->page = new_page;
363 ref->gtlb_index = gtlb_index;
364 ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW);
365 ref->tid = stlbe.tid;
366
367 /* Insert shadow mapping into hardware TLB. */
368 kvmppc_44x_tlbe_set_modified(vcpu_44x, victim);
369 kvmppc_44x_tlbwe(victim, &stlbe);
370 trace_kvm_stlb_write(victim, stlbe.tid, stlbe.word0, stlbe.word1,
371 stlbe.word2);
372}
373
374/* For a particular guest TLB entry, invalidate the corresponding host TLB
375 * mappings and release the host pages. */
376static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu,
377 unsigned int gtlb_index)
378{
379 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
380 int i;
381
382 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
383 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
384 if (ref->gtlb_index == gtlb_index)
385 kvmppc_44x_shadow_release(vcpu_44x, i);
386 }
387}
388
389void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
390{
391 int usermode = vcpu->arch.shared->msr & MSR_PR;
392
393 vcpu->arch.shadow_pid = !usermode;
394}
395
396void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
397{
398 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
399 int i;
400
401 if (unlikely(vcpu->arch.pid == new_pid))
402 return;
403
404 vcpu->arch.pid = new_pid;
405
406 /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it
407 * can't access guest kernel mappings (TID=1). When we switch to a new
408 * guest PID, which will also use host PID=0, we must discard the old guest
409 * userspace mappings. */
410 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
411 struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
412
413 if (ref->tid == 0)
414 kvmppc_44x_shadow_release(vcpu_44x, i);
415 }
416}
417
418static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
419 const struct kvmppc_44x_tlbe *tlbe)
420{
421 gpa_t gpa;
422
423 if (!get_tlb_v(tlbe))
424 return 0;
425
426 /* Does it match current guest AS? */
427 /* XXX what about IS != DS? */
428 if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
429 return 0;
430
431 gpa = get_tlb_raddr(tlbe);
432 if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
433 /* Mapping is not for RAM. */
434 return 0;
435
436 return 1;
437}
438
439int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
440{
441 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
442 struct kvmppc_44x_tlbe *tlbe;
443 unsigned int gtlb_index;
444 int idx;
445
446 gtlb_index = kvmppc_get_gpr(vcpu, ra);
447 if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
448 printk("%s: index %d\n", __func__, gtlb_index);
449 kvmppc_dump_vcpu(vcpu);
450 return EMULATE_FAIL;
451 }
452
453 tlbe = &vcpu_44x->guest_tlb[gtlb_index];
454
455 /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */
456 if (tlbe->word0 & PPC44x_TLB_VALID)
457 kvmppc_44x_invalidate(vcpu, gtlb_index);
458
459 switch (ws) {
460 case PPC44x_TLB_PAGEID:
461 tlbe->tid = get_mmucr_stid(vcpu);
462 tlbe->word0 = kvmppc_get_gpr(vcpu, rs);
463 break;
464
465 case PPC44x_TLB_XLAT:
466 tlbe->word1 = kvmppc_get_gpr(vcpu, rs);
467 break;
468
469 case PPC44x_TLB_ATTRIB:
470 tlbe->word2 = kvmppc_get_gpr(vcpu, rs);
471 break;
472
473 default:
474 return EMULATE_FAIL;
475 }
476
477 idx = srcu_read_lock(&vcpu->kvm->srcu);
478
479 if (tlbe_is_host_safe(vcpu, tlbe)) {
480 gva_t eaddr;
481 gpa_t gpaddr;
482 u32 bytes;
483
484 eaddr = get_tlb_eaddr(tlbe);
485 gpaddr = get_tlb_raddr(tlbe);
486
487 /* Use the advertised page size to mask effective and real addrs. */
488 bytes = get_tlb_bytes(tlbe);
489 eaddr &= ~(bytes - 1);
490 gpaddr &= ~(bytes - 1);
491
492 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
493 }
494
495 srcu_read_unlock(&vcpu->kvm->srcu, idx);
496
497 trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
498 tlbe->word2);
499
500 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
501 return EMULATE_DONE;
502}
503
504int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
505{
506 u32 ea;
507 int gtlb_index;
508 unsigned int as = get_mmucr_sts(vcpu);
509 unsigned int pid = get_mmucr_stid(vcpu);
510
511 ea = kvmppc_get_gpr(vcpu, rb);
512 if (ra)
513 ea += kvmppc_get_gpr(vcpu, ra);
514
515 gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
516 if (rc) {
517 u32 cr = kvmppc_get_cr(vcpu);
518
519 if (gtlb_index < 0)
520 kvmppc_set_cr(vcpu, cr & ~0x20000000);
521 else
522 kvmppc_set_cr(vcpu, cr | 0x20000000);
523 }
524 kvmppc_set_gpr(vcpu, rt, gtlb_index);
525
526 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
527 return EMULATE_DONE;
528}
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h
deleted file mode 100644
index a9ff80e51526..000000000000
--- a/arch/powerpc/kvm/44x_tlb.h
+++ /dev/null
@@ -1,86 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __KVM_POWERPC_TLB_H__
21#define __KVM_POWERPC_TLB_H__
22
23#include <linux/kvm_host.h>
24#include <asm/mmu-44x.h>
25
26extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr,
27 unsigned int pid, unsigned int as);
28
29extern int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb,
30 u8 rc);
31extern int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws);
32
33/* TLB helper functions */
34static inline unsigned int get_tlb_size(const struct kvmppc_44x_tlbe *tlbe)
35{
36 return (tlbe->word0 >> 4) & 0xf;
37}
38
39static inline gva_t get_tlb_eaddr(const struct kvmppc_44x_tlbe *tlbe)
40{
41 return tlbe->word0 & 0xfffffc00;
42}
43
44static inline gva_t get_tlb_bytes(const struct kvmppc_44x_tlbe *tlbe)
45{
46 unsigned int pgsize = get_tlb_size(tlbe);
47 return 1 << 10 << (pgsize << 1);
48}
49
50static inline gva_t get_tlb_end(const struct kvmppc_44x_tlbe *tlbe)
51{
52 return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1;
53}
54
55static inline u64 get_tlb_raddr(const struct kvmppc_44x_tlbe *tlbe)
56{
57 u64 word1 = tlbe->word1;
58 return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00);
59}
60
61static inline unsigned int get_tlb_tid(const struct kvmppc_44x_tlbe *tlbe)
62{
63 return tlbe->tid & 0xff;
64}
65
66static inline unsigned int get_tlb_ts(const struct kvmppc_44x_tlbe *tlbe)
67{
68 return (tlbe->word0 >> 8) & 0x1;
69}
70
71static inline unsigned int get_tlb_v(const struct kvmppc_44x_tlbe *tlbe)
72{
73 return (tlbe->word0 >> 9) & 0x1;
74}
75
76static inline unsigned int get_mmucr_stid(const struct kvm_vcpu *vcpu)
77{
78 return vcpu->arch.mmucr & 0xff;
79}
80
81static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu)
82{
83 return (vcpu->arch.mmucr >> 16) & 0x1;
84}
85
86#endif /* __KVM_POWERPC_TLB_H__ */
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 8aeeda1ff42a..8f104a6879f0 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -112,23 +112,9 @@ config KVM_BOOK3S_64_PR
112config KVM_BOOKE_HV 112config KVM_BOOKE_HV
113 bool 113 bool
114 114
115config KVM_440
116 bool "KVM support for PowerPC 440 processors"
117 depends on 44x
118 select KVM
119 select KVM_MMIO
120 ---help---
121 Support running unmodified 440 guest kernels in virtual machines on
122 440 host processors.
123
124 This module provides access to the hardware capabilities through
125 a character device node named /dev/kvm.
126
127 If unsure, say N.
128
129config KVM_EXIT_TIMING 115config KVM_EXIT_TIMING
130 bool "Detailed exit timing" 116 bool "Detailed exit timing"
131 depends on KVM_440 || KVM_E500V2 || KVM_E500MC 117 depends on KVM_E500V2 || KVM_E500MC
132 ---help--- 118 ---help---
133 Calculate elapsed time for every exit/enter cycle. A per-vcpu 119 Calculate elapsed time for every exit/enter cycle. A per-vcpu
134 report is available in debugfs kvm/vm#_vcpu#_timing. 120 report is available in debugfs kvm/vm#_vcpu#_timing.
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index ce569b6bf4d8..777f8941a8d5 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -10,7 +10,6 @@ KVM := ../../../virt/kvm
10common-objs-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ 10common-objs-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
11 $(KVM)/eventfd.o 11 $(KVM)/eventfd.o
12 12
13CFLAGS_44x_tlb.o := -I.
14CFLAGS_e500_mmu.o := -I. 13CFLAGS_e500_mmu.o := -I.
15CFLAGS_e500_mmu_host.o := -I. 14CFLAGS_e500_mmu_host.o := -I.
16CFLAGS_emulate.o := -I. 15CFLAGS_emulate.o := -I.
@@ -21,16 +20,6 @@ obj-$(CONFIG_KVM_BOOK3S_HANDLER) += book3s_exports.o
21 20
22AFLAGS_booke_interrupts.o := -I$(obj) 21AFLAGS_booke_interrupts.o := -I$(obj)
23 22
24kvm-440-objs := \
25 $(common-objs-y) \
26 booke.o \
27 booke_emulate.o \
28 booke_interrupts.o \
29 44x.o \
30 44x_tlb.o \
31 44x_emulate.o
32kvm-objs-$(CONFIG_KVM_440) := $(kvm-440-objs)
33
34kvm-e500-objs := \ 23kvm-e500-objs := \
35 $(common-objs-y) \ 24 $(common-objs-y) \
36 booke.o \ 25 booke.o \
@@ -127,7 +116,6 @@ kvm-objs-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o
127 116
128kvm-objs := $(kvm-objs-m) $(kvm-objs-y) 117kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
129 118
130obj-$(CONFIG_KVM_440) += kvm.o
131obj-$(CONFIG_KVM_E500V2) += kvm.o 119obj-$(CONFIG_KVM_E500V2) += kvm.o
132obj-$(CONFIG_KVM_E500MC) += kvm.o 120obj-$(CONFIG_KVM_E500MC) += kvm.o
133obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o 121obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index b632cd35919b..f753543c56fa 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -99,13 +99,6 @@ enum int_class {
99 99
100void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); 100void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
101 101
102extern void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu);
103extern int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
104 unsigned int inst, int *advance);
105extern int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn,
106 ulong spr_val);
107extern int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn,
108 ulong *spr_val);
109extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu); 102extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
110extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, 103extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
111 struct kvm_vcpu *vcpu, 104 struct kvm_vcpu *vcpu,
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 2c6deb5ef2fe..84c308a9a371 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -21,7 +21,6 @@
21#include <asm/ppc_asm.h> 21#include <asm/ppc_asm.h>
22#include <asm/kvm_asm.h> 22#include <asm/kvm_asm.h>
23#include <asm/reg.h> 23#include <asm/reg.h>
24#include <asm/mmu-44x.h>
25#include <asm/page.h> 24#include <asm/page.h>
26#include <asm/asm-offsets.h> 25#include <asm/asm-offsets.h>
27 26
@@ -424,10 +423,6 @@ lightweight_exit:
424 mtspr SPRN_PID1, r3 423 mtspr SPRN_PID1, r3
425#endif 424#endif
426 425
427#ifdef CONFIG_44x
428 iccci 0, 0 /* XXX hack */
429#endif
430
431 /* Load some guest volatiles. */ 426 /* Load some guest volatiles. */
432 lwz r0, VCPU_GPR(R0)(r4) 427 lwz r0, VCPU_GPR(R0)(r4)
433 lwz r2, VCPU_GPR(R2)(r4) 428 lwz r2, VCPU_GPR(R2)(r4)
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index b4f8fbafee7a..e9fa56a911fd 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -24,7 +24,6 @@
24#include <asm/ppc_asm.h> 24#include <asm/ppc_asm.h>
25#include <asm/kvm_asm.h> 25#include <asm/kvm_asm.h>
26#include <asm/reg.h> 26#include <asm/reg.h>
27#include <asm/mmu-44x.h>
28#include <asm/page.h> 27#include <asm/page.h>
29#include <asm/asm-compat.h> 28#include <asm/asm-compat.h>
30#include <asm/asm-offsets.h> 29#include <asm/asm-offsets.h>
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index cfa6cfabf4a3..8e0356835960 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -217,7 +217,6 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
217 case KVM_HCALL_TOKEN(KVM_HC_FEATURES): 217 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
218 r = EV_SUCCESS; 218 r = EV_SUCCESS;
219#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) 219#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
220 /* XXX Missing magic page on 44x */
221 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 220 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
222#endif 221#endif
223 222