aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/Kbuild4
-rw-r--r--arch/mips/Kconfig18
-rw-r--r--arch/mips/configs/malta_kvm_defconfig456
-rw-r--r--arch/mips/configs/malta_kvm_guest_defconfig453
-rw-r--r--arch/mips/include/asm/kvm.h55
-rw-r--r--arch/mips/include/asm/kvm_host.h667
-rw-r--r--arch/mips/include/asm/mach-generic/spaces.h9
-rw-r--r--arch/mips/include/asm/mmu_context.h6
-rw-r--r--arch/mips/include/asm/processor.h5
-rw-r--r--arch/mips/include/asm/sn/sn_private.h2
-rw-r--r--arch/mips/include/asm/sn/types.h1
-rw-r--r--arch/mips/include/asm/uaccess.h11
-rw-r--r--arch/mips/kernel/asm-offsets.c66
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c4
-rw-r--r--arch/mips/kernel/cevt-r4k.c4
-rw-r--r--arch/mips/kernel/smp.c1
-rw-r--r--arch/mips/kernel/traps.c7
-rw-r--r--arch/mips/kvm/00README.txt31
-rw-r--r--arch/mips/kvm/Kconfig49
-rw-r--r--arch/mips/kvm/Makefile13
-rw-r--r--arch/mips/kvm/kvm_cb.c14
-rw-r--r--arch/mips/kvm/kvm_locore.S650
-rw-r--r--arch/mips/kvm/kvm_mips.c958
-rw-r--r--arch/mips/kvm/kvm_mips_comm.h23
-rw-r--r--arch/mips/kvm/kvm_mips_commpage.c37
-rw-r--r--arch/mips/kvm/kvm_mips_dyntrans.c149
-rw-r--r--arch/mips/kvm/kvm_mips_emul.c1829
-rw-r--r--arch/mips/kvm/kvm_mips_int.c243
-rw-r--r--arch/mips/kvm/kvm_mips_int.h49
-rw-r--r--arch/mips/kvm/kvm_mips_opcode.h24
-rw-r--r--arch/mips/kvm/kvm_mips_stats.c82
-rw-r--r--arch/mips/kvm/kvm_tlb.c932
-rw-r--r--arch/mips/kvm/kvm_trap_emul.c482
-rw-r--r--arch/mips/kvm/trace.h46
-rw-r--r--arch/mips/mm/c-r4k.c6
-rw-r--r--arch/mips/mm/cache.c1
-rw-r--r--arch/mips/mm/tlb-r4k.c2
-rw-r--r--arch/mips/mti-malta/Platform6
-rw-r--r--arch/mips/mti-malta/malta-time.c15
-rw-r--r--arch/mips/sgi-ip27/ip27-klnuma.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c16
-rw-r--r--virt/kvm/kvm_main.c2
42 files changed, 7412 insertions, 18 deletions
diff --git a/arch/mips/Kbuild b/arch/mips/Kbuild
index 7dd65cfae837..d2cfe45f332b 100644
--- a/arch/mips/Kbuild
+++ b/arch/mips/Kbuild
@@ -17,3 +17,7 @@ obj- := $(platform-)
17obj-y += kernel/ 17obj-y += kernel/
18obj-y += mm/ 18obj-y += mm/
19obj-y += math-emu/ 19obj-y += math-emu/
20
21ifdef CONFIG_KVM
22obj-y += kvm/
23endif
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index c1997db9c57c..0cb6f5ffeecd 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1234,6 +1234,7 @@ config CPU_MIPS32_R2
1234 select CPU_HAS_PREFETCH 1234 select CPU_HAS_PREFETCH
1235 select CPU_SUPPORTS_32BIT_KERNEL 1235 select CPU_SUPPORTS_32BIT_KERNEL
1236 select CPU_SUPPORTS_HIGHMEM 1236 select CPU_SUPPORTS_HIGHMEM
1237 select HAVE_KVM
1237 help 1238 help
1238 Choose this option to build a kernel for release 2 or later of the 1239 Choose this option to build a kernel for release 2 or later of the
1239 MIPS32 architecture. Most modern embedded systems with a 32-bit 1240 MIPS32 architecture. Most modern embedded systems with a 32-bit
@@ -1734,6 +1735,20 @@ config 64BIT
1734 1735
1735endchoice 1736endchoice
1736 1737
1738config KVM_GUEST
1739 bool "KVM Guest Kernel"
1740 help
1741 Select this option if building a guest kernel for KVM (Trap & Emulate) mode
1742
1743config KVM_HOST_FREQ
1744 int "KVM Host Processor Frequency (MHz)"
1745 depends on KVM_GUEST
1746 default 500
1747 help
1748 Select this option if building a guest kernel for KVM to skip
1749 RTC emulation when determining guest CPU Frequency. Instead, the guest
1750 processor frequency is automatically derived from the host frequency.
1751
1737choice 1752choice
1738 prompt "Kernel page size" 1753 prompt "Kernel page size"
1739 default PAGE_SIZE_4KB 1754 default PAGE_SIZE_4KB
@@ -2014,6 +2029,7 @@ config SB1_PASS_2_1_WORKAROUNDS
2014 depends on CPU_SB1 && CPU_SB1_PASS_2 2029 depends on CPU_SB1 && CPU_SB1_PASS_2
2015 default y 2030 default y
2016 2031
2032
2017config 64BIT_PHYS_ADDR 2033config 64BIT_PHYS_ADDR
2018 bool 2034 bool
2019 2035
@@ -2547,3 +2563,5 @@ source "security/Kconfig"
2547source "crypto/Kconfig" 2563source "crypto/Kconfig"
2548 2564
2549source "lib/Kconfig" 2565source "lib/Kconfig"
2566
2567source "arch/mips/kvm/Kconfig"
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
new file mode 100644
index 000000000000..341bb47204d6
--- /dev/null
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -0,0 +1,456 @@
1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y
4CONFIG_PAGE_SIZE_16KB=y
5CONFIG_MIPS_MT_SMP=y
6CONFIG_HZ_100=y
7CONFIG_SYSVIPC=y
8CONFIG_NO_HZ=y
9CONFIG_HIGH_RES_TIMERS=y
10CONFIG_LOG_BUF_SHIFT=15
11CONFIG_NAMESPACES=y
12CONFIG_RELAY=y
13CONFIG_EXPERT=y
14CONFIG_PERF_EVENTS=y
15# CONFIG_COMPAT_BRK is not set
16CONFIG_SLAB=y
17CONFIG_MODULES=y
18CONFIG_MODULE_UNLOAD=y
19CONFIG_MODVERSIONS=y
20CONFIG_MODULE_SRCVERSION_ALL=y
21CONFIG_PCI=y
22CONFIG_PACKET=y
23CONFIG_UNIX=y
24CONFIG_XFRM_USER=m
25CONFIG_NET_KEY=y
26CONFIG_NET_KEY_MIGRATE=y
27CONFIG_INET=y
28CONFIG_IP_MULTICAST=y
29CONFIG_IP_ADVANCED_ROUTER=y
30CONFIG_IP_MULTIPLE_TABLES=y
31CONFIG_IP_ROUTE_MULTIPATH=y
32CONFIG_IP_ROUTE_VERBOSE=y
33CONFIG_IP_PNP=y
34CONFIG_IP_PNP_DHCP=y
35CONFIG_IP_PNP_BOOTP=y
36CONFIG_NET_IPIP=m
37CONFIG_IP_MROUTE=y
38CONFIG_IP_PIMSM_V1=y
39CONFIG_IP_PIMSM_V2=y
40CONFIG_SYN_COOKIES=y
41CONFIG_INET_AH=m
42CONFIG_INET_ESP=m
43CONFIG_INET_IPCOMP=m
44CONFIG_INET_XFRM_MODE_TRANSPORT=m
45CONFIG_INET_XFRM_MODE_TUNNEL=m
46CONFIG_TCP_MD5SIG=y
47CONFIG_IPV6_PRIVACY=y
48CONFIG_IPV6_ROUTER_PREF=y
49CONFIG_IPV6_ROUTE_INFO=y
50CONFIG_IPV6_OPTIMISTIC_DAD=y
51CONFIG_INET6_AH=m
52CONFIG_INET6_ESP=m
53CONFIG_INET6_IPCOMP=m
54CONFIG_IPV6_TUNNEL=m
55CONFIG_IPV6_MROUTE=y
56CONFIG_IPV6_PIMSM_V2=y
57CONFIG_NETWORK_SECMARK=y
58CONFIG_NETFILTER=y
59CONFIG_NF_CONNTRACK=m
60CONFIG_NF_CONNTRACK_SECMARK=y
61CONFIG_NF_CONNTRACK_EVENTS=y
62CONFIG_NF_CT_PROTO_DCCP=m
63CONFIG_NF_CT_PROTO_UDPLITE=m
64CONFIG_NF_CONNTRACK_AMANDA=m
65CONFIG_NF_CONNTRACK_FTP=m
66CONFIG_NF_CONNTRACK_H323=m
67CONFIG_NF_CONNTRACK_IRC=m
68CONFIG_NF_CONNTRACK_PPTP=m
69CONFIG_NF_CONNTRACK_SANE=m
70CONFIG_NF_CONNTRACK_SIP=m
71CONFIG_NF_CONNTRACK_TFTP=m
72CONFIG_NF_CT_NETLINK=m
73CONFIG_NETFILTER_TPROXY=m
74CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
75CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
76CONFIG_NETFILTER_XT_TARGET_MARK=m
77CONFIG_NETFILTER_XT_TARGET_NFLOG=m
78CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
79CONFIG_NETFILTER_XT_TARGET_TPROXY=m
80CONFIG_NETFILTER_XT_TARGET_TRACE=m
81CONFIG_NETFILTER_XT_TARGET_SECMARK=m
82CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
83CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
84CONFIG_NETFILTER_XT_MATCH_COMMENT=m
85CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
86CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
87CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
88CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
89CONFIG_NETFILTER_XT_MATCH_DCCP=m
90CONFIG_NETFILTER_XT_MATCH_ESP=m
91CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
92CONFIG_NETFILTER_XT_MATCH_HELPER=m
93CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
94CONFIG_NETFILTER_XT_MATCH_LENGTH=m
95CONFIG_NETFILTER_XT_MATCH_LIMIT=m
96CONFIG_NETFILTER_XT_MATCH_MAC=m
97CONFIG_NETFILTER_XT_MATCH_MARK=m
98CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
99CONFIG_NETFILTER_XT_MATCH_OWNER=m
100CONFIG_NETFILTER_XT_MATCH_POLICY=m
101CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
102CONFIG_NETFILTER_XT_MATCH_QUOTA=m
103CONFIG_NETFILTER_XT_MATCH_RATEEST=m
104CONFIG_NETFILTER_XT_MATCH_REALM=m
105CONFIG_NETFILTER_XT_MATCH_RECENT=m
106CONFIG_NETFILTER_XT_MATCH_SOCKET=m
107CONFIG_NETFILTER_XT_MATCH_STATE=m
108CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
109CONFIG_NETFILTER_XT_MATCH_STRING=m
110CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
111CONFIG_NETFILTER_XT_MATCH_TIME=m
112CONFIG_NETFILTER_XT_MATCH_U32=m
113CONFIG_IP_VS=m
114CONFIG_IP_VS_IPV6=y
115CONFIG_IP_VS_PROTO_TCP=y
116CONFIG_IP_VS_PROTO_UDP=y
117CONFIG_IP_VS_PROTO_ESP=y
118CONFIG_IP_VS_PROTO_AH=y
119CONFIG_IP_VS_RR=m
120CONFIG_IP_VS_WRR=m
121CONFIG_IP_VS_LC=m
122CONFIG_IP_VS_WLC=m
123CONFIG_IP_VS_LBLC=m
124CONFIG_IP_VS_LBLCR=m
125CONFIG_IP_VS_DH=m
126CONFIG_IP_VS_SH=m
127CONFIG_IP_VS_SED=m
128CONFIG_IP_VS_NQ=m
129CONFIG_NF_CONNTRACK_IPV4=m
130CONFIG_IP_NF_QUEUE=m
131CONFIG_IP_NF_IPTABLES=m
132CONFIG_IP_NF_MATCH_AH=m
133CONFIG_IP_NF_MATCH_ECN=m
134CONFIG_IP_NF_MATCH_TTL=m
135CONFIG_IP_NF_FILTER=m
136CONFIG_IP_NF_TARGET_REJECT=m
137CONFIG_IP_NF_TARGET_ULOG=m
138CONFIG_IP_NF_MANGLE=m
139CONFIG_IP_NF_TARGET_CLUSTERIP=m
140CONFIG_IP_NF_TARGET_ECN=m
141CONFIG_IP_NF_TARGET_TTL=m
142CONFIG_IP_NF_RAW=m
143CONFIG_IP_NF_ARPTABLES=m
144CONFIG_IP_NF_ARPFILTER=m
145CONFIG_IP_NF_ARP_MANGLE=m
146CONFIG_NF_CONNTRACK_IPV6=m
147CONFIG_IP6_NF_MATCH_AH=m
148CONFIG_IP6_NF_MATCH_EUI64=m
149CONFIG_IP6_NF_MATCH_FRAG=m
150CONFIG_IP6_NF_MATCH_OPTS=m
151CONFIG_IP6_NF_MATCH_HL=m
152CONFIG_IP6_NF_MATCH_IPV6HEADER=m
153CONFIG_IP6_NF_MATCH_MH=m
154CONFIG_IP6_NF_MATCH_RT=m
155CONFIG_IP6_NF_TARGET_HL=m
156CONFIG_IP6_NF_FILTER=m
157CONFIG_IP6_NF_TARGET_REJECT=m
158CONFIG_IP6_NF_MANGLE=m
159CONFIG_IP6_NF_RAW=m
160CONFIG_BRIDGE_NF_EBTABLES=m
161CONFIG_BRIDGE_EBT_BROUTE=m
162CONFIG_BRIDGE_EBT_T_FILTER=m
163CONFIG_BRIDGE_EBT_T_NAT=m
164CONFIG_BRIDGE_EBT_802_3=m
165CONFIG_BRIDGE_EBT_AMONG=m
166CONFIG_BRIDGE_EBT_ARP=m
167CONFIG_BRIDGE_EBT_IP=m
168CONFIG_BRIDGE_EBT_IP6=m
169CONFIG_BRIDGE_EBT_LIMIT=m
170CONFIG_BRIDGE_EBT_MARK=m
171CONFIG_BRIDGE_EBT_PKTTYPE=m
172CONFIG_BRIDGE_EBT_STP=m
173CONFIG_BRIDGE_EBT_VLAN=m
174CONFIG_BRIDGE_EBT_ARPREPLY=m
175CONFIG_BRIDGE_EBT_DNAT=m
176CONFIG_BRIDGE_EBT_MARK_T=m
177CONFIG_BRIDGE_EBT_REDIRECT=m
178CONFIG_BRIDGE_EBT_SNAT=m
179CONFIG_BRIDGE_EBT_LOG=m
180CONFIG_BRIDGE_EBT_ULOG=m
181CONFIG_BRIDGE_EBT_NFLOG=m
182CONFIG_IP_SCTP=m
183CONFIG_BRIDGE=m
184CONFIG_VLAN_8021Q=m
185CONFIG_VLAN_8021Q_GVRP=y
186CONFIG_ATALK=m
187CONFIG_DEV_APPLETALK=m
188CONFIG_IPDDP=m
189CONFIG_IPDDP_ENCAP=y
190CONFIG_IPDDP_DECAP=y
191CONFIG_PHONET=m
192CONFIG_NET_SCHED=y
193CONFIG_NET_SCH_CBQ=m
194CONFIG_NET_SCH_HTB=m
195CONFIG_NET_SCH_HFSC=m
196CONFIG_NET_SCH_PRIO=m
197CONFIG_NET_SCH_RED=m
198CONFIG_NET_SCH_SFQ=m
199CONFIG_NET_SCH_TEQL=m
200CONFIG_NET_SCH_TBF=m
201CONFIG_NET_SCH_GRED=m
202CONFIG_NET_SCH_DSMARK=m
203CONFIG_NET_SCH_NETEM=m
204CONFIG_NET_SCH_INGRESS=m
205CONFIG_NET_CLS_BASIC=m
206CONFIG_NET_CLS_TCINDEX=m
207CONFIG_NET_CLS_ROUTE4=m
208CONFIG_NET_CLS_FW=m
209CONFIG_NET_CLS_U32=m
210CONFIG_NET_CLS_RSVP=m
211CONFIG_NET_CLS_RSVP6=m
212CONFIG_NET_CLS_FLOW=m
213CONFIG_NET_CLS_ACT=y
214CONFIG_NET_ACT_POLICE=y
215CONFIG_NET_ACT_GACT=m
216CONFIG_GACT_PROB=y
217CONFIG_NET_ACT_MIRRED=m
218CONFIG_NET_ACT_IPT=m
219CONFIG_NET_ACT_NAT=m
220CONFIG_NET_ACT_PEDIT=m
221CONFIG_NET_ACT_SIMP=m
222CONFIG_NET_ACT_SKBEDIT=m
223CONFIG_NET_CLS_IND=y
224CONFIG_CFG80211=m
225CONFIG_MAC80211=m
226CONFIG_MAC80211_RC_PID=y
227CONFIG_MAC80211_RC_DEFAULT_PID=y
228CONFIG_MAC80211_MESH=y
229CONFIG_RFKILL=m
230CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
231CONFIG_CONNECTOR=m
232CONFIG_MTD=y
233CONFIG_MTD_CHAR=y
234CONFIG_MTD_BLOCK=y
235CONFIG_MTD_OOPS=m
236CONFIG_MTD_CFI=y
237CONFIG_MTD_CFI_INTELEXT=y
238CONFIG_MTD_CFI_AMDSTD=y
239CONFIG_MTD_CFI_STAA=y
240CONFIG_MTD_PHYSMAP=y
241CONFIG_MTD_UBI=m
242CONFIG_MTD_UBI_GLUEBI=m
243CONFIG_BLK_DEV_FD=m
244CONFIG_BLK_DEV_UMEM=m
245CONFIG_BLK_DEV_LOOP=m
246CONFIG_BLK_DEV_CRYPTOLOOP=m
247CONFIG_BLK_DEV_NBD=m
248CONFIG_BLK_DEV_RAM=y
249CONFIG_CDROM_PKTCDVD=m
250CONFIG_ATA_OVER_ETH=m
251CONFIG_IDE=y
252CONFIG_BLK_DEV_IDECD=y
253CONFIG_IDE_GENERIC=y
254CONFIG_BLK_DEV_GENERIC=y
255CONFIG_BLK_DEV_PIIX=y
256CONFIG_BLK_DEV_IT8213=m
257CONFIG_BLK_DEV_TC86C001=m
258CONFIG_RAID_ATTRS=m
259CONFIG_SCSI=m
260CONFIG_SCSI_TGT=m
261CONFIG_BLK_DEV_SD=m
262CONFIG_CHR_DEV_ST=m
263CONFIG_CHR_DEV_OSST=m
264CONFIG_BLK_DEV_SR=m
265CONFIG_BLK_DEV_SR_VENDOR=y
266CONFIG_CHR_DEV_SG=m
267CONFIG_SCSI_MULTI_LUN=y
268CONFIG_SCSI_CONSTANTS=y
269CONFIG_SCSI_LOGGING=y
270CONFIG_SCSI_SCAN_ASYNC=y
271CONFIG_SCSI_FC_ATTRS=m
272CONFIG_ISCSI_TCP=m
273CONFIG_BLK_DEV_3W_XXXX_RAID=m
274CONFIG_SCSI_3W_9XXX=m
275CONFIG_SCSI_ACARD=m
276CONFIG_SCSI_AACRAID=m
277CONFIG_SCSI_AIC7XXX=m
278CONFIG_AIC7XXX_RESET_DELAY_MS=15000
279# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
280CONFIG_MD=y
281CONFIG_BLK_DEV_MD=m
282CONFIG_MD_LINEAR=m
283CONFIG_MD_RAID0=m
284CONFIG_MD_RAID1=m
285CONFIG_MD_RAID10=m
286CONFIG_MD_RAID456=m
287CONFIG_MD_MULTIPATH=m
288CONFIG_MD_FAULTY=m
289CONFIG_BLK_DEV_DM=m
290CONFIG_DM_CRYPT=m
291CONFIG_DM_SNAPSHOT=m
292CONFIG_DM_MIRROR=m
293CONFIG_DM_ZERO=m
294CONFIG_DM_MULTIPATH=m
295CONFIG_NETDEVICES=y
296CONFIG_BONDING=m
297CONFIG_DUMMY=m
298CONFIG_EQUALIZER=m
299CONFIG_IFB=m
300CONFIG_MACVLAN=m
301CONFIG_TUN=m
302CONFIG_VETH=m
303CONFIG_PCNET32=y
304CONFIG_CHELSIO_T3=m
305CONFIG_AX88796=m
306CONFIG_NETXEN_NIC=m
307CONFIG_TC35815=m
308CONFIG_MARVELL_PHY=m
309CONFIG_DAVICOM_PHY=m
310CONFIG_QSEMI_PHY=m
311CONFIG_LXT_PHY=m
312CONFIG_CICADA_PHY=m
313CONFIG_VITESSE_PHY=m
314CONFIG_SMSC_PHY=m
315CONFIG_BROADCOM_PHY=m
316CONFIG_ICPLUS_PHY=m
317CONFIG_REALTEK_PHY=m
318CONFIG_ATMEL=m
319CONFIG_PCI_ATMEL=m
320CONFIG_PRISM54=m
321CONFIG_HOSTAP=m
322CONFIG_HOSTAP_FIRMWARE=y
323CONFIG_HOSTAP_FIRMWARE_NVRAM=y
324CONFIG_HOSTAP_PLX=m
325CONFIG_HOSTAP_PCI=m
326CONFIG_IPW2100=m
327CONFIG_IPW2100_MONITOR=y
328CONFIG_LIBERTAS=m
329# CONFIG_INPUT_KEYBOARD is not set
330# CONFIG_INPUT_MOUSE is not set
331# CONFIG_SERIO_I8042 is not set
332CONFIG_VT_HW_CONSOLE_BINDING=y
333CONFIG_SERIAL_8250=y
334CONFIG_SERIAL_8250_CONSOLE=y
335# CONFIG_HWMON is not set
336CONFIG_FB=y
337CONFIG_FB_CIRRUS=y
338# CONFIG_VGA_CONSOLE is not set
339CONFIG_FRAMEBUFFER_CONSOLE=y
340CONFIG_HID=m
341CONFIG_RTC_CLASS=y
342CONFIG_RTC_DRV_CMOS=y
343CONFIG_UIO=m
344CONFIG_UIO_CIF=m
345CONFIG_EXT2_FS=y
346CONFIG_EXT3_FS=y
347CONFIG_REISERFS_FS=m
348CONFIG_REISERFS_PROC_INFO=y
349CONFIG_REISERFS_FS_XATTR=y
350CONFIG_REISERFS_FS_POSIX_ACL=y
351CONFIG_REISERFS_FS_SECURITY=y
352CONFIG_JFS_FS=m
353CONFIG_JFS_POSIX_ACL=y
354CONFIG_JFS_SECURITY=y
355CONFIG_XFS_FS=m
356CONFIG_XFS_QUOTA=y
357CONFIG_XFS_POSIX_ACL=y
358CONFIG_QUOTA=y
359CONFIG_QFMT_V2=y
360CONFIG_FUSE_FS=m
361CONFIG_ISO9660_FS=m
362CONFIG_JOLIET=y
363CONFIG_ZISOFS=y
364CONFIG_UDF_FS=m
365CONFIG_MSDOS_FS=m
366CONFIG_VFAT_FS=m
367CONFIG_PROC_KCORE=y
368CONFIG_TMPFS=y
369CONFIG_CONFIGFS_FS=y
370CONFIG_AFFS_FS=m
371CONFIG_HFS_FS=m
372CONFIG_HFSPLUS_FS=m
373CONFIG_BEFS_FS=m
374CONFIG_BFS_FS=m
375CONFIG_EFS_FS=m
376CONFIG_JFFS2_FS=m
377CONFIG_JFFS2_FS_XATTR=y
378CONFIG_JFFS2_COMPRESSION_OPTIONS=y
379CONFIG_JFFS2_RUBIN=y
380CONFIG_CRAMFS=m
381CONFIG_VXFS_FS=m
382CONFIG_MINIX_FS=m
383CONFIG_ROMFS_FS=m
384CONFIG_SYSV_FS=m
385CONFIG_UFS_FS=m
386CONFIG_NFS_FS=y
387CONFIG_ROOT_NFS=y
388CONFIG_NFSD=y
389CONFIG_NFSD_V3=y
390CONFIG_NLS_CODEPAGE_437=m
391CONFIG_NLS_CODEPAGE_737=m
392CONFIG_NLS_CODEPAGE_775=m
393CONFIG_NLS_CODEPAGE_850=m
394CONFIG_NLS_CODEPAGE_852=m
395CONFIG_NLS_CODEPAGE_855=m
396CONFIG_NLS_CODEPAGE_857=m
397CONFIG_NLS_CODEPAGE_860=m
398CONFIG_NLS_CODEPAGE_861=m
399CONFIG_NLS_CODEPAGE_862=m
400CONFIG_NLS_CODEPAGE_863=m
401CONFIG_NLS_CODEPAGE_864=m
402CONFIG_NLS_CODEPAGE_865=m
403CONFIG_NLS_CODEPAGE_866=m
404CONFIG_NLS_CODEPAGE_869=m
405CONFIG_NLS_CODEPAGE_936=m
406CONFIG_NLS_CODEPAGE_950=m
407CONFIG_NLS_CODEPAGE_932=m
408CONFIG_NLS_CODEPAGE_949=m
409CONFIG_NLS_CODEPAGE_874=m
410CONFIG_NLS_ISO8859_8=m
411CONFIG_NLS_CODEPAGE_1250=m
412CONFIG_NLS_CODEPAGE_1251=m
413CONFIG_NLS_ASCII=m
414CONFIG_NLS_ISO8859_1=m
415CONFIG_NLS_ISO8859_2=m
416CONFIG_NLS_ISO8859_3=m
417CONFIG_NLS_ISO8859_4=m
418CONFIG_NLS_ISO8859_5=m
419CONFIG_NLS_ISO8859_6=m
420CONFIG_NLS_ISO8859_7=m
421CONFIG_NLS_ISO8859_9=m
422CONFIG_NLS_ISO8859_13=m
423CONFIG_NLS_ISO8859_14=m
424CONFIG_NLS_ISO8859_15=m
425CONFIG_NLS_KOI8_R=m
426CONFIG_NLS_KOI8_U=m
427CONFIG_RCU_CPU_STALL_TIMEOUT=60
428CONFIG_ENABLE_DEFAULT_TRACERS=y
429CONFIG_CRYPTO_NULL=m
430CONFIG_CRYPTO_CRYPTD=m
431CONFIG_CRYPTO_LRW=m
432CONFIG_CRYPTO_PCBC=m
433CONFIG_CRYPTO_HMAC=y
434CONFIG_CRYPTO_XCBC=m
435CONFIG_CRYPTO_MD4=m
436CONFIG_CRYPTO_SHA256=m
437CONFIG_CRYPTO_SHA512=m
438CONFIG_CRYPTO_TGR192=m
439CONFIG_CRYPTO_WP512=m
440CONFIG_CRYPTO_ANUBIS=m
441CONFIG_CRYPTO_BLOWFISH=m
442CONFIG_CRYPTO_CAMELLIA=m
443CONFIG_CRYPTO_CAST5=m
444CONFIG_CRYPTO_CAST6=m
445CONFIG_CRYPTO_FCRYPT=m
446CONFIG_CRYPTO_KHAZAD=m
447CONFIG_CRYPTO_SERPENT=m
448CONFIG_CRYPTO_TEA=m
449CONFIG_CRYPTO_TWOFISH=m
450# CONFIG_CRYPTO_ANSI_CPRNG is not set
451CONFIG_CRC16=m
452CONFIG_VIRTUALIZATION=y
453CONFIG_KVM=m
454CONFIG_KVM_MIPS_DYN_TRANS=y
455CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y
456CONFIG_VHOST_NET=m
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
new file mode 100644
index 000000000000..2b8558b71080
--- /dev/null
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -0,0 +1,453 @@
1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y
4CONFIG_KVM_GUEST=y
5CONFIG_PAGE_SIZE_16KB=y
6CONFIG_HZ_100=y
7CONFIG_SYSVIPC=y
8CONFIG_NO_HZ=y
9CONFIG_HIGH_RES_TIMERS=y
10CONFIG_LOG_BUF_SHIFT=15
11CONFIG_NAMESPACES=y
12CONFIG_RELAY=y
13CONFIG_BLK_DEV_INITRD=y
14CONFIG_EXPERT=y
15# CONFIG_COMPAT_BRK is not set
16CONFIG_SLAB=y
17CONFIG_MODULES=y
18CONFIG_MODULE_UNLOAD=y
19CONFIG_MODVERSIONS=y
20CONFIG_MODULE_SRCVERSION_ALL=y
21CONFIG_PCI=y
22CONFIG_PACKET=y
23CONFIG_UNIX=y
24CONFIG_XFRM_USER=m
25CONFIG_NET_KEY=y
26CONFIG_NET_KEY_MIGRATE=y
27CONFIG_INET=y
28CONFIG_IP_MULTICAST=y
29CONFIG_IP_ADVANCED_ROUTER=y
30CONFIG_IP_MULTIPLE_TABLES=y
31CONFIG_IP_ROUTE_MULTIPATH=y
32CONFIG_IP_ROUTE_VERBOSE=y
33CONFIG_IP_PNP=y
34CONFIG_IP_PNP_DHCP=y
35CONFIG_IP_PNP_BOOTP=y
36CONFIG_NET_IPIP=m
37CONFIG_IP_MROUTE=y
38CONFIG_IP_PIMSM_V1=y
39CONFIG_IP_PIMSM_V2=y
40CONFIG_SYN_COOKIES=y
41CONFIG_INET_AH=m
42CONFIG_INET_ESP=m
43CONFIG_INET_IPCOMP=m
44CONFIG_INET_XFRM_MODE_TRANSPORT=m
45CONFIG_INET_XFRM_MODE_TUNNEL=m
46CONFIG_TCP_MD5SIG=y
47CONFIG_IPV6_PRIVACY=y
48CONFIG_IPV6_ROUTER_PREF=y
49CONFIG_IPV6_ROUTE_INFO=y
50CONFIG_IPV6_OPTIMISTIC_DAD=y
51CONFIG_INET6_AH=m
52CONFIG_INET6_ESP=m
53CONFIG_INET6_IPCOMP=m
54CONFIG_IPV6_TUNNEL=m
55CONFIG_IPV6_MROUTE=y
56CONFIG_IPV6_PIMSM_V2=y
57CONFIG_NETWORK_SECMARK=y
58CONFIG_NETFILTER=y
59CONFIG_NF_CONNTRACK=m
60CONFIG_NF_CONNTRACK_SECMARK=y
61CONFIG_NF_CONNTRACK_EVENTS=y
62CONFIG_NF_CT_PROTO_DCCP=m
63CONFIG_NF_CT_PROTO_UDPLITE=m
64CONFIG_NF_CONNTRACK_AMANDA=m
65CONFIG_NF_CONNTRACK_FTP=m
66CONFIG_NF_CONNTRACK_H323=m
67CONFIG_NF_CONNTRACK_IRC=m
68CONFIG_NF_CONNTRACK_PPTP=m
69CONFIG_NF_CONNTRACK_SANE=m
70CONFIG_NF_CONNTRACK_SIP=m
71CONFIG_NF_CONNTRACK_TFTP=m
72CONFIG_NF_CT_NETLINK=m
73CONFIG_NETFILTER_TPROXY=m
74CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
75CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
76CONFIG_NETFILTER_XT_TARGET_MARK=m
77CONFIG_NETFILTER_XT_TARGET_NFLOG=m
78CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
79CONFIG_NETFILTER_XT_TARGET_TPROXY=m
80CONFIG_NETFILTER_XT_TARGET_TRACE=m
81CONFIG_NETFILTER_XT_TARGET_SECMARK=m
82CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
83CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
84CONFIG_NETFILTER_XT_MATCH_COMMENT=m
85CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
86CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
87CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
88CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
89CONFIG_NETFILTER_XT_MATCH_DCCP=m
90CONFIG_NETFILTER_XT_MATCH_ESP=m
91CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
92CONFIG_NETFILTER_XT_MATCH_HELPER=m
93CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
94CONFIG_NETFILTER_XT_MATCH_LENGTH=m
95CONFIG_NETFILTER_XT_MATCH_LIMIT=m
96CONFIG_NETFILTER_XT_MATCH_MAC=m
97CONFIG_NETFILTER_XT_MATCH_MARK=m
98CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
99CONFIG_NETFILTER_XT_MATCH_OWNER=m
100CONFIG_NETFILTER_XT_MATCH_POLICY=m
101CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
102CONFIG_NETFILTER_XT_MATCH_QUOTA=m
103CONFIG_NETFILTER_XT_MATCH_RATEEST=m
104CONFIG_NETFILTER_XT_MATCH_REALM=m
105CONFIG_NETFILTER_XT_MATCH_RECENT=m
106CONFIG_NETFILTER_XT_MATCH_SOCKET=m
107CONFIG_NETFILTER_XT_MATCH_STATE=m
108CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
109CONFIG_NETFILTER_XT_MATCH_STRING=m
110CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
111CONFIG_NETFILTER_XT_MATCH_TIME=m
112CONFIG_NETFILTER_XT_MATCH_U32=m
113CONFIG_IP_VS=m
114CONFIG_IP_VS_IPV6=y
115CONFIG_IP_VS_PROTO_TCP=y
116CONFIG_IP_VS_PROTO_UDP=y
117CONFIG_IP_VS_PROTO_ESP=y
118CONFIG_IP_VS_PROTO_AH=y
119CONFIG_IP_VS_RR=m
120CONFIG_IP_VS_WRR=m
121CONFIG_IP_VS_LC=m
122CONFIG_IP_VS_WLC=m
123CONFIG_IP_VS_LBLC=m
124CONFIG_IP_VS_LBLCR=m
125CONFIG_IP_VS_DH=m
126CONFIG_IP_VS_SH=m
127CONFIG_IP_VS_SED=m
128CONFIG_IP_VS_NQ=m
129CONFIG_NF_CONNTRACK_IPV4=m
130CONFIG_IP_NF_QUEUE=m
131CONFIG_IP_NF_IPTABLES=m
132CONFIG_IP_NF_MATCH_AH=m
133CONFIG_IP_NF_MATCH_ECN=m
134CONFIG_IP_NF_MATCH_TTL=m
135CONFIG_IP_NF_FILTER=m
136CONFIG_IP_NF_TARGET_REJECT=m
137CONFIG_IP_NF_TARGET_ULOG=m
138CONFIG_IP_NF_MANGLE=m
139CONFIG_IP_NF_TARGET_CLUSTERIP=m
140CONFIG_IP_NF_TARGET_ECN=m
141CONFIG_IP_NF_TARGET_TTL=m
142CONFIG_IP_NF_RAW=m
143CONFIG_IP_NF_ARPTABLES=m
144CONFIG_IP_NF_ARPFILTER=m
145CONFIG_IP_NF_ARP_MANGLE=m
146CONFIG_NF_CONNTRACK_IPV6=m
147CONFIG_IP6_NF_MATCH_AH=m
148CONFIG_IP6_NF_MATCH_EUI64=m
149CONFIG_IP6_NF_MATCH_FRAG=m
150CONFIG_IP6_NF_MATCH_OPTS=m
151CONFIG_IP6_NF_MATCH_HL=m
152CONFIG_IP6_NF_MATCH_IPV6HEADER=m
153CONFIG_IP6_NF_MATCH_MH=m
154CONFIG_IP6_NF_MATCH_RT=m
155CONFIG_IP6_NF_TARGET_HL=m
156CONFIG_IP6_NF_FILTER=m
157CONFIG_IP6_NF_TARGET_REJECT=m
158CONFIG_IP6_NF_MANGLE=m
159CONFIG_IP6_NF_RAW=m
160CONFIG_BRIDGE_NF_EBTABLES=m
161CONFIG_BRIDGE_EBT_BROUTE=m
162CONFIG_BRIDGE_EBT_T_FILTER=m
163CONFIG_BRIDGE_EBT_T_NAT=m
164CONFIG_BRIDGE_EBT_802_3=m
165CONFIG_BRIDGE_EBT_AMONG=m
166CONFIG_BRIDGE_EBT_ARP=m
167CONFIG_BRIDGE_EBT_IP=m
168CONFIG_BRIDGE_EBT_IP6=m
169CONFIG_BRIDGE_EBT_LIMIT=m
170CONFIG_BRIDGE_EBT_MARK=m
171CONFIG_BRIDGE_EBT_PKTTYPE=m
172CONFIG_BRIDGE_EBT_STP=m
173CONFIG_BRIDGE_EBT_VLAN=m
174CONFIG_BRIDGE_EBT_ARPREPLY=m
175CONFIG_BRIDGE_EBT_DNAT=m
176CONFIG_BRIDGE_EBT_MARK_T=m
177CONFIG_BRIDGE_EBT_REDIRECT=m
178CONFIG_BRIDGE_EBT_SNAT=m
179CONFIG_BRIDGE_EBT_LOG=m
180CONFIG_BRIDGE_EBT_ULOG=m
181CONFIG_BRIDGE_EBT_NFLOG=m
182CONFIG_IP_SCTP=m
183CONFIG_BRIDGE=m
184CONFIG_VLAN_8021Q=m
185CONFIG_VLAN_8021Q_GVRP=y
186CONFIG_ATALK=m
187CONFIG_DEV_APPLETALK=m
188CONFIG_IPDDP=m
189CONFIG_IPDDP_ENCAP=y
190CONFIG_IPDDP_DECAP=y
191CONFIG_PHONET=m
192CONFIG_NET_SCHED=y
193CONFIG_NET_SCH_CBQ=m
194CONFIG_NET_SCH_HTB=m
195CONFIG_NET_SCH_HFSC=m
196CONFIG_NET_SCH_PRIO=m
197CONFIG_NET_SCH_RED=m
198CONFIG_NET_SCH_SFQ=m
199CONFIG_NET_SCH_TEQL=m
200CONFIG_NET_SCH_TBF=m
201CONFIG_NET_SCH_GRED=m
202CONFIG_NET_SCH_DSMARK=m
203CONFIG_NET_SCH_NETEM=m
204CONFIG_NET_SCH_INGRESS=m
205CONFIG_NET_CLS_BASIC=m
206CONFIG_NET_CLS_TCINDEX=m
207CONFIG_NET_CLS_ROUTE4=m
208CONFIG_NET_CLS_FW=m
209CONFIG_NET_CLS_U32=m
210CONFIG_NET_CLS_RSVP=m
211CONFIG_NET_CLS_RSVP6=m
212CONFIG_NET_CLS_FLOW=m
213CONFIG_NET_CLS_ACT=y
214CONFIG_NET_ACT_POLICE=y
215CONFIG_NET_ACT_GACT=m
216CONFIG_GACT_PROB=y
217CONFIG_NET_ACT_MIRRED=m
218CONFIG_NET_ACT_IPT=m
219CONFIG_NET_ACT_NAT=m
220CONFIG_NET_ACT_PEDIT=m
221CONFIG_NET_ACT_SIMP=m
222CONFIG_NET_ACT_SKBEDIT=m
223CONFIG_NET_CLS_IND=y
224CONFIG_CFG80211=m
225CONFIG_MAC80211=m
226CONFIG_MAC80211_RC_PID=y
227CONFIG_MAC80211_RC_DEFAULT_PID=y
228CONFIG_MAC80211_MESH=y
229CONFIG_RFKILL=m
230CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
231CONFIG_CONNECTOR=m
232CONFIG_MTD=y
233CONFIG_MTD_CHAR=y
234CONFIG_MTD_BLOCK=y
235CONFIG_MTD_OOPS=m
236CONFIG_MTD_CFI=y
237CONFIG_MTD_CFI_INTELEXT=y
238CONFIG_MTD_CFI_AMDSTD=y
239CONFIG_MTD_CFI_STAA=y
240CONFIG_MTD_PHYSMAP=y
241CONFIG_MTD_UBI=m
242CONFIG_MTD_UBI_GLUEBI=m
243CONFIG_BLK_DEV_FD=m
244CONFIG_BLK_DEV_UMEM=m
245CONFIG_BLK_DEV_LOOP=m
246CONFIG_BLK_DEV_CRYPTOLOOP=m
247CONFIG_BLK_DEV_NBD=m
248CONFIG_BLK_DEV_RAM=y
249CONFIG_CDROM_PKTCDVD=m
250CONFIG_ATA_OVER_ETH=m
251CONFIG_VIRTIO_BLK=y
252CONFIG_IDE=y
253CONFIG_BLK_DEV_IDECD=y
254CONFIG_IDE_GENERIC=y
255CONFIG_BLK_DEV_GENERIC=y
256CONFIG_BLK_DEV_PIIX=y
257CONFIG_BLK_DEV_IT8213=m
258CONFIG_BLK_DEV_TC86C001=m
259CONFIG_RAID_ATTRS=m
260CONFIG_SCSI=m
261CONFIG_SCSI_TGT=m
262CONFIG_BLK_DEV_SD=m
263CONFIG_CHR_DEV_ST=m
264CONFIG_CHR_DEV_OSST=m
265CONFIG_BLK_DEV_SR=m
266CONFIG_BLK_DEV_SR_VENDOR=y
267CONFIG_CHR_DEV_SG=m
268CONFIG_SCSI_MULTI_LUN=y
269CONFIG_SCSI_CONSTANTS=y
270CONFIG_SCSI_LOGGING=y
271CONFIG_SCSI_SCAN_ASYNC=y
272CONFIG_SCSI_FC_ATTRS=m
273CONFIG_ISCSI_TCP=m
274CONFIG_BLK_DEV_3W_XXXX_RAID=m
275CONFIG_SCSI_3W_9XXX=m
276CONFIG_SCSI_ACARD=m
277CONFIG_SCSI_AACRAID=m
278CONFIG_SCSI_AIC7XXX=m
279CONFIG_AIC7XXX_RESET_DELAY_MS=15000
280# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
281CONFIG_MD=y
282CONFIG_BLK_DEV_MD=m
283CONFIG_MD_LINEAR=m
284CONFIG_MD_RAID0=m
285CONFIG_MD_RAID1=m
286CONFIG_MD_RAID10=m
287CONFIG_MD_RAID456=m
288CONFIG_MD_MULTIPATH=m
289CONFIG_MD_FAULTY=m
290CONFIG_BLK_DEV_DM=m
291CONFIG_DM_CRYPT=m
292CONFIG_DM_SNAPSHOT=m
293CONFIG_DM_MIRROR=m
294CONFIG_DM_ZERO=m
295CONFIG_DM_MULTIPATH=m
296CONFIG_NETDEVICES=y
297CONFIG_BONDING=m
298CONFIG_DUMMY=m
299CONFIG_EQUALIZER=m
300CONFIG_IFB=m
301CONFIG_MACVLAN=m
302CONFIG_TUN=m
303CONFIG_VETH=m
304CONFIG_VIRTIO_NET=y
305CONFIG_PCNET32=y
306CONFIG_CHELSIO_T3=m
307CONFIG_AX88796=m
308CONFIG_NETXEN_NIC=m
309CONFIG_TC35815=m
310CONFIG_MARVELL_PHY=m
311CONFIG_DAVICOM_PHY=m
312CONFIG_QSEMI_PHY=m
313CONFIG_LXT_PHY=m
314CONFIG_CICADA_PHY=m
315CONFIG_VITESSE_PHY=m
316CONFIG_SMSC_PHY=m
317CONFIG_BROADCOM_PHY=m
318CONFIG_ICPLUS_PHY=m
319CONFIG_REALTEK_PHY=m
320CONFIG_ATMEL=m
321CONFIG_PCI_ATMEL=m
322CONFIG_PRISM54=m
323CONFIG_HOSTAP=m
324CONFIG_HOSTAP_FIRMWARE=y
325CONFIG_HOSTAP_FIRMWARE_NVRAM=y
326CONFIG_HOSTAP_PLX=m
327CONFIG_HOSTAP_PCI=m
328CONFIG_IPW2100=m
329CONFIG_IPW2100_MONITOR=y
330CONFIG_LIBERTAS=m
331# CONFIG_INPUT_KEYBOARD is not set
332# CONFIG_INPUT_MOUSE is not set
333# CONFIG_SERIO_I8042 is not set
334CONFIG_VT_HW_CONSOLE_BINDING=y
335CONFIG_SERIAL_8250=y
336CONFIG_SERIAL_8250_CONSOLE=y
337# CONFIG_HWMON is not set
338CONFIG_FB=y
339CONFIG_FB_CIRRUS=y
340# CONFIG_VGA_CONSOLE is not set
341CONFIG_FRAMEBUFFER_CONSOLE=y
342CONFIG_HID=m
343CONFIG_RTC_CLASS=y
344CONFIG_RTC_DRV_CMOS=y
345CONFIG_UIO=m
346CONFIG_UIO_CIF=m
347CONFIG_VIRTIO_PCI=y
348CONFIG_VIRTIO_BALLOON=y
349CONFIG_VIRTIO_MMIO=y
350CONFIG_EXT2_FS=y
351CONFIG_EXT3_FS=y
352CONFIG_REISERFS_FS=m
353CONFIG_REISERFS_PROC_INFO=y
354CONFIG_REISERFS_FS_XATTR=y
355CONFIG_REISERFS_FS_POSIX_ACL=y
356CONFIG_REISERFS_FS_SECURITY=y
357CONFIG_JFS_FS=m
358CONFIG_JFS_POSIX_ACL=y
359CONFIG_JFS_SECURITY=y
360CONFIG_XFS_FS=m
361CONFIG_XFS_QUOTA=y
362CONFIG_XFS_POSIX_ACL=y
363CONFIG_QUOTA=y
364CONFIG_QFMT_V2=y
365CONFIG_FUSE_FS=m
366CONFIG_ISO9660_FS=m
367CONFIG_JOLIET=y
368CONFIG_ZISOFS=y
369CONFIG_UDF_FS=m
370CONFIG_MSDOS_FS=m
371CONFIG_VFAT_FS=m
372CONFIG_PROC_KCORE=y
373CONFIG_TMPFS=y
374CONFIG_AFFS_FS=m
375CONFIG_HFS_FS=m
376CONFIG_HFSPLUS_FS=m
377CONFIG_BEFS_FS=m
378CONFIG_BFS_FS=m
379CONFIG_EFS_FS=m
380CONFIG_JFFS2_FS=m
381CONFIG_JFFS2_FS_XATTR=y
382CONFIG_JFFS2_COMPRESSION_OPTIONS=y
383CONFIG_JFFS2_RUBIN=y
384CONFIG_CRAMFS=m
385CONFIG_VXFS_FS=m
386CONFIG_MINIX_FS=m
387CONFIG_ROMFS_FS=m
388CONFIG_SYSV_FS=m
389CONFIG_UFS_FS=m
390CONFIG_NFS_FS=y
391CONFIG_ROOT_NFS=y
392CONFIG_NFSD=y
393CONFIG_NFSD_V3=y
394CONFIG_NLS_CODEPAGE_437=m
395CONFIG_NLS_CODEPAGE_737=m
396CONFIG_NLS_CODEPAGE_775=m
397CONFIG_NLS_CODEPAGE_850=m
398CONFIG_NLS_CODEPAGE_852=m
399CONFIG_NLS_CODEPAGE_855=m
400CONFIG_NLS_CODEPAGE_857=m
401CONFIG_NLS_CODEPAGE_860=m
402CONFIG_NLS_CODEPAGE_861=m
403CONFIG_NLS_CODEPAGE_862=m
404CONFIG_NLS_CODEPAGE_863=m
405CONFIG_NLS_CODEPAGE_864=m
406CONFIG_NLS_CODEPAGE_865=m
407CONFIG_NLS_CODEPAGE_866=m
408CONFIG_NLS_CODEPAGE_869=m
409CONFIG_NLS_CODEPAGE_936=m
410CONFIG_NLS_CODEPAGE_950=m
411CONFIG_NLS_CODEPAGE_932=m
412CONFIG_NLS_CODEPAGE_949=m
413CONFIG_NLS_CODEPAGE_874=m
414CONFIG_NLS_ISO8859_8=m
415CONFIG_NLS_CODEPAGE_1250=m
416CONFIG_NLS_CODEPAGE_1251=m
417CONFIG_NLS_ASCII=m
418CONFIG_NLS_ISO8859_1=m
419CONFIG_NLS_ISO8859_2=m
420CONFIG_NLS_ISO8859_3=m
421CONFIG_NLS_ISO8859_4=m
422CONFIG_NLS_ISO8859_5=m
423CONFIG_NLS_ISO8859_6=m
424CONFIG_NLS_ISO8859_7=m
425CONFIG_NLS_ISO8859_9=m
426CONFIG_NLS_ISO8859_13=m
427CONFIG_NLS_ISO8859_14=m
428CONFIG_NLS_ISO8859_15=m
429CONFIG_NLS_KOI8_R=m
430CONFIG_NLS_KOI8_U=m
431CONFIG_CRYPTO_NULL=m
432CONFIG_CRYPTO_CRYPTD=m
433CONFIG_CRYPTO_LRW=m
434CONFIG_CRYPTO_PCBC=m
435CONFIG_CRYPTO_HMAC=y
436CONFIG_CRYPTO_XCBC=m
437CONFIG_CRYPTO_MD4=m
438CONFIG_CRYPTO_SHA256=m
439CONFIG_CRYPTO_SHA512=m
440CONFIG_CRYPTO_TGR192=m
441CONFIG_CRYPTO_WP512=m
442CONFIG_CRYPTO_ANUBIS=m
443CONFIG_CRYPTO_BLOWFISH=m
444CONFIG_CRYPTO_CAMELLIA=m
445CONFIG_CRYPTO_CAST5=m
446CONFIG_CRYPTO_CAST6=m
447CONFIG_CRYPTO_FCRYPT=m
448CONFIG_CRYPTO_KHAZAD=m
449CONFIG_CRYPTO_SERPENT=m
450CONFIG_CRYPTO_TEA=m
451CONFIG_CRYPTO_TWOFISH=m
452# CONFIG_CRYPTO_ANSI_CPRNG is not set
453CONFIG_CRC16=m
diff --git a/arch/mips/include/asm/kvm.h b/arch/mips/include/asm/kvm.h
new file mode 100644
index 000000000000..85789eacbf18
--- /dev/null
+++ b/arch/mips/include/asm/kvm.h
@@ -0,0 +1,55 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10#ifndef __LINUX_KVM_MIPS_H
11#define __LINUX_KVM_MIPS_H
12
13#include <linux/types.h>
14
15#define __KVM_MIPS
16
17#define N_MIPS_COPROC_REGS 32
18#define N_MIPS_COPROC_SEL 8
19
20/* for KVM_GET_REGS and KVM_SET_REGS */
21struct kvm_regs {
22 __u32 gprs[32];
23 __u32 hi;
24 __u32 lo;
25 __u32 pc;
26
27 __u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
28};
29
30/* for KVM_GET_SREGS and KVM_SET_SREGS */
31struct kvm_sregs {
32};
33
34/* for KVM_GET_FPU and KVM_SET_FPU */
35struct kvm_fpu {
36};
37
38struct kvm_debug_exit_arch {
39};
40
41/* for KVM_SET_GUEST_DEBUG */
42struct kvm_guest_debug_arch {
43};
44
45struct kvm_mips_interrupt {
46 /* in */
47 __u32 cpu;
48 __u32 irq;
49};
50
51/* definition of registers in kvm_run */
52struct kvm_sync_regs {
53};
54
55#endif /* __LINUX_KVM_MIPS_H */
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
new file mode 100644
index 000000000000..143875c6c95a
--- /dev/null
+++ b/arch/mips/include/asm/kvm_host.h
@@ -0,0 +1,667 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10#ifndef __MIPS_KVM_HOST_H__
11#define __MIPS_KVM_HOST_H__
12
13#include <linux/mutex.h>
14#include <linux/hrtimer.h>
15#include <linux/interrupt.h>
16#include <linux/types.h>
17#include <linux/kvm.h>
18#include <linux/kvm_types.h>
19#include <linux/threads.h>
20#include <linux/spinlock.h>
21
22
23#define KVM_MAX_VCPUS 1
24#define KVM_USER_MEM_SLOTS 8
25/* memory slots that does not exposed to userspace */
26#define KVM_PRIVATE_MEM_SLOTS 0
27
28#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
29
30/* Don't support huge pages */
31#define KVM_HPAGE_GFN_SHIFT(x) 0
32
33/* We don't currently support large pages. */
34#define KVM_NR_PAGE_SIZES 1
35#define KVM_PAGES_PER_HPAGE(x) 1
36
37
38
39/* Special address that contains the comm page, used for reducing # of traps */
40#define KVM_GUEST_COMMPAGE_ADDR 0x0
41
42#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
43 ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
44
45#define KVM_GUEST_KUSEG 0x00000000UL
46#define KVM_GUEST_KSEG0 0x40000000UL
47#define KVM_GUEST_KSEG23 0x60000000UL
48#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000)
49#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
50
51#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
52#define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
53#define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
54
55/*
56 * Map an address to a certain kernel segment
57 */
58#define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
59#define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
60#define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
61
62#define KVM_INVALID_PAGE 0xdeadbeef
63#define KVM_INVALID_INST 0xdeadbeef
64#define KVM_INVALID_ADDR 0xdeadbeef
65
66#define KVM_MALTA_GUEST_RTC_ADDR 0xb8000070UL
67
68#define GUEST_TICKS_PER_JIFFY (40000000/HZ)
69#define MS_TO_NS(x) (x * 1E6L)
70
71#define CAUSEB_DC 27
72#define CAUSEF_DC (_ULCAST_(1) << 27)
73
74struct kvm;
75struct kvm_run;
76struct kvm_vcpu;
77struct kvm_interrupt;
78
79extern atomic_t kvm_mips_instance;
80extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
81extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
82extern bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
83
84struct kvm_vm_stat {
85 u32 remote_tlb_flush;
86};
87
88struct kvm_vcpu_stat {
89 u32 wait_exits;
90 u32 cache_exits;
91 u32 signal_exits;
92 u32 int_exits;
93 u32 cop_unusable_exits;
94 u32 tlbmod_exits;
95 u32 tlbmiss_ld_exits;
96 u32 tlbmiss_st_exits;
97 u32 addrerr_st_exits;
98 u32 addrerr_ld_exits;
99 u32 syscall_exits;
100 u32 resvd_inst_exits;
101 u32 break_inst_exits;
102 u32 flush_dcache_exits;
103 u32 halt_wakeup;
104};
105
106enum kvm_mips_exit_types {
107 WAIT_EXITS,
108 CACHE_EXITS,
109 SIGNAL_EXITS,
110 INT_EXITS,
111 COP_UNUSABLE_EXITS,
112 TLBMOD_EXITS,
113 TLBMISS_LD_EXITS,
114 TLBMISS_ST_EXITS,
115 ADDRERR_ST_EXITS,
116 ADDRERR_LD_EXITS,
117 SYSCALL_EXITS,
118 RESVD_INST_EXITS,
119 BREAK_INST_EXITS,
120 FLUSH_DCACHE_EXITS,
121 MAX_KVM_MIPS_EXIT_TYPES
122};
123
124struct kvm_arch_memory_slot {
125};
126
127struct kvm_arch {
128 /* Guest GVA->HPA page table */
129 unsigned long *guest_pmap;
130 unsigned long guest_pmap_npages;
131
132 /* Wired host TLB used for the commpage */
133 int commpage_tlb;
134};
135
136#define N_MIPS_COPROC_REGS 32
137#define N_MIPS_COPROC_SEL 8
138
139struct mips_coproc {
140 unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
141#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
142 unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
143#endif
144};
145
146/*
147 * Coprocessor 0 register names
148 */
149#define MIPS_CP0_TLB_INDEX 0
150#define MIPS_CP0_TLB_RANDOM 1
151#define MIPS_CP0_TLB_LOW 2
152#define MIPS_CP0_TLB_LO0 2
153#define MIPS_CP0_TLB_LO1 3
154#define MIPS_CP0_TLB_CONTEXT 4
155#define MIPS_CP0_TLB_PG_MASK 5
156#define MIPS_CP0_TLB_WIRED 6
157#define MIPS_CP0_HWRENA 7
158#define MIPS_CP0_BAD_VADDR 8
159#define MIPS_CP0_COUNT 9
160#define MIPS_CP0_TLB_HI 10
161#define MIPS_CP0_COMPARE 11
162#define MIPS_CP0_STATUS 12
163#define MIPS_CP0_CAUSE 13
164#define MIPS_CP0_EXC_PC 14
165#define MIPS_CP0_PRID 15
166#define MIPS_CP0_CONFIG 16
167#define MIPS_CP0_LLADDR 17
168#define MIPS_CP0_WATCH_LO 18
169#define MIPS_CP0_WATCH_HI 19
170#define MIPS_CP0_TLB_XCONTEXT 20
171#define MIPS_CP0_ECC 26
172#define MIPS_CP0_CACHE_ERR 27
173#define MIPS_CP0_TAG_LO 28
174#define MIPS_CP0_TAG_HI 29
175#define MIPS_CP0_ERROR_PC 30
176#define MIPS_CP0_DEBUG 23
177#define MIPS_CP0_DEPC 24
178#define MIPS_CP0_PERFCNT 25
179#define MIPS_CP0_ERRCTL 26
180#define MIPS_CP0_DATA_LO 28
181#define MIPS_CP0_DATA_HI 29
182#define MIPS_CP0_DESAVE 31
183
184#define MIPS_CP0_CONFIG_SEL 0
185#define MIPS_CP0_CONFIG1_SEL 1
186#define MIPS_CP0_CONFIG2_SEL 2
187#define MIPS_CP0_CONFIG3_SEL 3
188
189/* Config0 register bits */
190#define CP0C0_M 31
191#define CP0C0_K23 28
192#define CP0C0_KU 25
193#define CP0C0_MDU 20
194#define CP0C0_MM 17
195#define CP0C0_BM 16
196#define CP0C0_BE 15
197#define CP0C0_AT 13
198#define CP0C0_AR 10
199#define CP0C0_MT 7
200#define CP0C0_VI 3
201#define CP0C0_K0 0
202
203/* Config1 register bits */
204#define CP0C1_M 31
205#define CP0C1_MMU 25
206#define CP0C1_IS 22
207#define CP0C1_IL 19
208#define CP0C1_IA 16
209#define CP0C1_DS 13
210#define CP0C1_DL 10
211#define CP0C1_DA 7
212#define CP0C1_C2 6
213#define CP0C1_MD 5
214#define CP0C1_PC 4
215#define CP0C1_WR 3
216#define CP0C1_CA 2
217#define CP0C1_EP 1
218#define CP0C1_FP 0
219
220/* Config2 Register bits */
221#define CP0C2_M 31
222#define CP0C2_TU 28
223#define CP0C2_TS 24
224#define CP0C2_TL 20
225#define CP0C2_TA 16
226#define CP0C2_SU 12
227#define CP0C2_SS 8
228#define CP0C2_SL 4
229#define CP0C2_SA 0
230
231/* Config3 Register bits */
232#define CP0C3_M 31
233#define CP0C3_ISA_ON_EXC 16
234#define CP0C3_ULRI 13
235#define CP0C3_DSPP 10
236#define CP0C3_LPA 7
237#define CP0C3_VEIC 6
238#define CP0C3_VInt 5
239#define CP0C3_SP 4
240#define CP0C3_MT 2
241#define CP0C3_SM 1
242#define CP0C3_TL 0
243
244/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
245#define MIPS_CONFIG0 \
246 ((1 << CP0C0_M) | (0x3 << CP0C0_K0))
247
248/* Have config2, no coprocessor2 attached, no MDMX support attached,
249 no performance counters, watch registers present,
250 no code compression, EJTAG present, no FPU, no watch registers */
251#define MIPS_CONFIG1 \
252((1 << CP0C1_M) | \
253 (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \
254 (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \
255 (0 << CP0C1_FP))
256
257/* Have config3, no tertiary/secondary caches implemented */
258#define MIPS_CONFIG2 \
259((1 << CP0C2_M))
260
261/* No config4, no DSP ASE, no large physaddr (PABITS),
262 no external interrupt controller, no vectored interrupts,
263 no 1kb pages, no SmartMIPS ASE, no trace logic */
264#define MIPS_CONFIG3 \
265((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \
266 (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \
267 (0 << CP0C3_SM) | (0 << CP0C3_TL))
268
269/* MMU types, the first four entries have the same layout as the
270 CP0C0_MT field. */
271enum mips_mmu_types {
272 MMU_TYPE_NONE,
273 MMU_TYPE_R4000,
274 MMU_TYPE_RESERVED,
275 MMU_TYPE_FMT,
276 MMU_TYPE_R3000,
277 MMU_TYPE_R6000,
278 MMU_TYPE_R8000
279};
280
281/*
282 * Trap codes
283 */
284#define T_INT 0 /* Interrupt pending */
285#define T_TLB_MOD 1 /* TLB modified fault */
286#define T_TLB_LD_MISS 2 /* TLB miss on load or ifetch */
287#define T_TLB_ST_MISS 3 /* TLB miss on a store */
288#define T_ADDR_ERR_LD 4 /* Address error on a load or ifetch */
289#define T_ADDR_ERR_ST 5 /* Address error on a store */
290#define T_BUS_ERR_IFETCH 6 /* Bus error on an ifetch */
291#define T_BUS_ERR_LD_ST 7 /* Bus error on a load or store */
292#define T_SYSCALL 8 /* System call */
293#define T_BREAK 9 /* Breakpoint */
294#define T_RES_INST 10 /* Reserved instruction exception */
295#define T_COP_UNUSABLE 11 /* Coprocessor unusable */
296#define T_OVFLOW 12 /* Arithmetic overflow */
297
298/*
299 * Trap definitions added for r4000 port.
300 */
301#define T_TRAP 13 /* Trap instruction */
302#define T_VCEI 14 /* Virtual coherency exception */
303#define T_FPE 15 /* Floating point exception */
304#define T_WATCH 23 /* Watch address reference */
305#define T_VCED 31 /* Virtual coherency data */
306
307/* Resume Flags */
308#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
309#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
310
311#define RESUME_GUEST 0
312#define RESUME_GUEST_DR RESUME_FLAG_DR
313#define RESUME_HOST RESUME_FLAG_HOST
314
315enum emulation_result {
316 EMULATE_DONE, /* no further processing */
317 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
318 EMULATE_FAIL, /* can't emulate this instruction */
319 EMULATE_WAIT, /* WAIT instruction */
320 EMULATE_PRIV_FAIL,
321};
322
323#define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
324#define MIPS3_PG_V 0x00000002 /* Valid */
325#define MIPS3_PG_NV 0x00000000
326#define MIPS3_PG_D 0x00000004 /* Dirty */
327
328#define mips3_paddr_to_tlbpfn(x) \
329 (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
330#define mips3_tlbpfn_to_paddr(x) \
331 ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
332
333#define MIPS3_PG_SHIFT 6
334#define MIPS3_PG_FRAME 0x3fffffc0
335
336#define VPN2_MASK 0xffffe000
337#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
338#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
339#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK)
340#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
341
342struct kvm_mips_tlb {
343 long tlb_mask;
344 long tlb_hi;
345 long tlb_lo0;
346 long tlb_lo1;
347};
348
349#define KVM_MIPS_GUEST_TLB_SIZE 64
350struct kvm_vcpu_arch {
351 void *host_ebase, *guest_ebase;
352 unsigned long host_stack;
353 unsigned long host_gp;
354
355 /* Host CP0 registers used when handling exits from guest */
356 unsigned long host_cp0_badvaddr;
357 unsigned long host_cp0_cause;
358 unsigned long host_cp0_epc;
359 unsigned long host_cp0_entryhi;
360 uint32_t guest_inst;
361
362 /* GPRS */
363 unsigned long gprs[32];
364 unsigned long hi;
365 unsigned long lo;
366 unsigned long pc;
367
368 /* FPU State */
369 struct mips_fpu_struct fpu;
370
371 /* COP0 State */
372 struct mips_coproc *cop0;
373
374 /* Host KSEG0 address of the EI/DI offset */
375 void *kseg0_commpage;
376
377 u32 io_gpr; /* GPR used as IO source/target */
378
379 /* Used to calibrate the virutal count register for the guest */
380 int32_t host_cp0_count;
381
382 /* Bitmask of exceptions that are pending */
383 unsigned long pending_exceptions;
384
385 /* Bitmask of pending exceptions to be cleared */
386 unsigned long pending_exceptions_clr;
387
388 unsigned long pending_load_cause;
389
390 /* Save/Restore the entryhi register when are are preempted/scheduled back in */
391 unsigned long preempt_entryhi;
392
393 /* S/W Based TLB for guest */
394 struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
395
396 /* Cached guest kernel/user ASIDs */
397 uint32_t guest_user_asid[NR_CPUS];
398 uint32_t guest_kernel_asid[NR_CPUS];
399 struct mm_struct guest_kernel_mm, guest_user_mm;
400
401 struct kvm_mips_tlb shadow_tlb[NR_CPUS][KVM_MIPS_GUEST_TLB_SIZE];
402
403
404 struct hrtimer comparecount_timer;
405
406 int last_sched_cpu;
407
408 /* WAIT executed */
409 int wait;
410};
411
412
413#define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0])
414#define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
415#define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0])
416#define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0])
417#define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
418#define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
419#define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
420#define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
421#define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
422#define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0])
423#define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
424#define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0])
425#define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
426#define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0])
427#define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val))
428#define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0])
429#define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
430#define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0])
431#define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
432#define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0])
433#define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val))
434#define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1])
435#define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val))
436#define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0])
437#define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
438#define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0])
439#define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
440#define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0])
441#define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val))
442#define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1])
443#define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val))
444#define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0])
445#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
446#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
447#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
448#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
449#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
450#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
451#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
452#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
453#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
454#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
455#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
456
457#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
458#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
459#define kvm_set_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] |= (val))
460#define kvm_clear_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val))
461#define kvm_change_c0_guest_cause(cop0, change, val) \
462{ \
463 kvm_clear_c0_guest_cause(cop0, change); \
464 kvm_set_c0_guest_cause(cop0, ((val) & (change))); \
465}
466#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val))
467#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
468#define kvm_change_c0_guest_ebase(cop0, change, val) \
469{ \
470 kvm_clear_c0_guest_ebase(cop0, change); \
471 kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
472}
473
474
475struct kvm_mips_callbacks {
476 int (*handle_cop_unusable) (struct kvm_vcpu *vcpu);
477 int (*handle_tlb_mod) (struct kvm_vcpu *vcpu);
478 int (*handle_tlb_ld_miss) (struct kvm_vcpu *vcpu);
479 int (*handle_tlb_st_miss) (struct kvm_vcpu *vcpu);
480 int (*handle_addr_err_st) (struct kvm_vcpu *vcpu);
481 int (*handle_addr_err_ld) (struct kvm_vcpu *vcpu);
482 int (*handle_syscall) (struct kvm_vcpu *vcpu);
483 int (*handle_res_inst) (struct kvm_vcpu *vcpu);
484 int (*handle_break) (struct kvm_vcpu *vcpu);
485 int (*vm_init) (struct kvm *kvm);
486 int (*vcpu_init) (struct kvm_vcpu *vcpu);
487 int (*vcpu_setup) (struct kvm_vcpu *vcpu);
488 gpa_t(*gva_to_gpa) (gva_t gva);
489 void (*queue_timer_int) (struct kvm_vcpu *vcpu);
490 void (*dequeue_timer_int) (struct kvm_vcpu *vcpu);
491 void (*queue_io_int) (struct kvm_vcpu *vcpu,
492 struct kvm_mips_interrupt *irq);
493 void (*dequeue_io_int) (struct kvm_vcpu *vcpu,
494 struct kvm_mips_interrupt *irq);
495 int (*irq_deliver) (struct kvm_vcpu *vcpu, unsigned int priority,
496 uint32_t cause);
497 int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority,
498 uint32_t cause);
499 int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu,
500 struct kvm_regs *regs);
501 int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu,
502 struct kvm_regs *regs);
503};
504extern struct kvm_mips_callbacks *kvm_mips_callbacks;
505int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
506
507/* Debug: dump vcpu state */
508int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
509
510/* Trampoline ASM routine to start running in "Guest" context */
511extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
512
513/* TLB handling */
514uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
515
516uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu);
517
518uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
519
520extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
521 struct kvm_vcpu *vcpu);
522
523extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
524 struct kvm_vcpu *vcpu);
525
526extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
527 struct kvm_mips_tlb *tlb,
528 unsigned long *hpa0,
529 unsigned long *hpa1);
530
531extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
532 uint32_t *opc,
533 struct kvm_run *run,
534 struct kvm_vcpu *vcpu);
535
536extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,
537 uint32_t *opc,
538 struct kvm_run *run,
539 struct kvm_vcpu *vcpu);
540
541extern void kvm_mips_dump_host_tlbs(void);
542extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
543extern void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu);
544extern void kvm_mips_flush_host_tlb(int skip_kseg0);
545extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
546extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index);
547
548extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
549 unsigned long entryhi);
550extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr);
551extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
552 unsigned long gva);
553extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
554 struct kvm_vcpu *vcpu);
555extern void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu);
556extern void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu);
557extern void kvm_local_flush_tlb_all(void);
558extern void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu);
559extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
560extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
561extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
562
563/* Emulation */
564uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu);
565enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause);
566
567extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause,
568 uint32_t *opc,
569 struct kvm_run *run,
570 struct kvm_vcpu *vcpu);
571
572extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
573 uint32_t *opc,
574 struct kvm_run *run,
575 struct kvm_vcpu *vcpu);
576
577extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
578 uint32_t *opc,
579 struct kvm_run *run,
580 struct kvm_vcpu *vcpu);
581
582extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
583 uint32_t *opc,
584 struct kvm_run *run,
585 struct kvm_vcpu *vcpu);
586
587extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
588 uint32_t *opc,
589 struct kvm_run *run,
590 struct kvm_vcpu *vcpu);
591
592extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
593 uint32_t *opc,
594 struct kvm_run *run,
595 struct kvm_vcpu *vcpu);
596
597extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
598 uint32_t *opc,
599 struct kvm_run *run,
600 struct kvm_vcpu *vcpu);
601
602extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
603 uint32_t *opc,
604 struct kvm_run *run,
605 struct kvm_vcpu *vcpu);
606
607extern enum emulation_result kvm_mips_handle_ri(unsigned long cause,
608 uint32_t *opc,
609 struct kvm_run *run,
610 struct kvm_vcpu *vcpu);
611
612extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
613 uint32_t *opc,
614 struct kvm_run *run,
615 struct kvm_vcpu *vcpu);
616
617extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
618 uint32_t *opc,
619 struct kvm_run *run,
620 struct kvm_vcpu *vcpu);
621
622extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
623 struct kvm_run *run);
624
625enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu);
626
627enum emulation_result kvm_mips_check_privilege(unsigned long cause,
628 uint32_t *opc,
629 struct kvm_run *run,
630 struct kvm_vcpu *vcpu);
631
632enum emulation_result kvm_mips_emulate_cache(uint32_t inst,
633 uint32_t *opc,
634 uint32_t cause,
635 struct kvm_run *run,
636 struct kvm_vcpu *vcpu);
637enum emulation_result kvm_mips_emulate_CP0(uint32_t inst,
638 uint32_t *opc,
639 uint32_t cause,
640 struct kvm_run *run,
641 struct kvm_vcpu *vcpu);
642enum emulation_result kvm_mips_emulate_store(uint32_t inst,
643 uint32_t cause,
644 struct kvm_run *run,
645 struct kvm_vcpu *vcpu);
646enum emulation_result kvm_mips_emulate_load(uint32_t inst,
647 uint32_t cause,
648 struct kvm_run *run,
649 struct kvm_vcpu *vcpu);
650
651/* Dynamic binary translation */
652extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
653 struct kvm_vcpu *vcpu);
654extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
655 struct kvm_vcpu *vcpu);
656extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc,
657 struct kvm_vcpu *vcpu);
658extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
659 struct kvm_vcpu *vcpu);
660
661/* Misc */
662extern void mips32_SyncICache(unsigned long addr, unsigned long size);
663extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
664extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
665
666
667#endif /* __MIPS_KVM_HOST_H__ */
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index 73d717a75cb0..5b2f2e68e57f 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -20,14 +20,21 @@
20#endif 20#endif
21 21
22#ifdef CONFIG_32BIT 22#ifdef CONFIG_32BIT
23 23#ifdef CONFIG_KVM_GUEST
24#define CAC_BASE _AC(0x40000000, UL)
25#else
24#define CAC_BASE _AC(0x80000000, UL) 26#define CAC_BASE _AC(0x80000000, UL)
27#endif
25#define IO_BASE _AC(0xa0000000, UL) 28#define IO_BASE _AC(0xa0000000, UL)
26#define UNCAC_BASE _AC(0xa0000000, UL) 29#define UNCAC_BASE _AC(0xa0000000, UL)
27 30
28#ifndef MAP_BASE 31#ifndef MAP_BASE
32#ifdef CONFIG_KVM_GUEST
33#define MAP_BASE _AC(0x60000000, UL)
34#else
29#define MAP_BASE _AC(0xc0000000, UL) 35#define MAP_BASE _AC(0xc0000000, UL)
30#endif 36#endif
37#endif
31 38
32/* 39/*
33 * Memory above this physical address will be considered highmem. 40 * Memory above this physical address will be considered highmem.
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 952701c3ad2e..820116067c10 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -111,15 +111,21 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
111static inline void 111static inline void
112get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) 112get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
113{ 113{
114 extern void kvm_local_flush_tlb_all(void);
114 unsigned long asid = asid_cache(cpu); 115 unsigned long asid = asid_cache(cpu);
115 116
116 if (! ((asid += ASID_INC) & ASID_MASK) ) { 117 if (! ((asid += ASID_INC) & ASID_MASK) ) {
117 if (cpu_has_vtag_icache) 118 if (cpu_has_vtag_icache)
118 flush_icache_all(); 119 flush_icache_all();
120#ifdef CONFIG_VIRTUALIZATION
121 kvm_local_flush_tlb_all(); /* start new asid cycle */
122#else
119 local_flush_tlb_all(); /* start new asid cycle */ 123 local_flush_tlb_all(); /* start new asid cycle */
124#endif
120 if (!asid) /* fix version if needed */ 125 if (!asid) /* fix version if needed */
121 asid = ASID_FIRST_VERSION; 126 asid = ASID_FIRST_VERSION;
122 } 127 }
128
123 cpu_context(cpu, mm) = asid_cache(cpu) = asid; 129 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
124} 130}
125 131
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 2a5fa7abb346..71686c897dea 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -44,11 +44,16 @@ extern unsigned int vced_count, vcei_count;
44#define SPECIAL_PAGES_SIZE PAGE_SIZE 44#define SPECIAL_PAGES_SIZE PAGE_SIZE
45 45
46#ifdef CONFIG_32BIT 46#ifdef CONFIG_32BIT
47#ifdef CONFIG_KVM_GUEST
48/* User space process size is limited to 1GB in KVM Guest Mode */
49#define TASK_SIZE 0x3fff8000UL
50#else
47/* 51/*
48 * User space process size: 2GB. This is hardcoded into a few places, 52 * User space process size: 2GB. This is hardcoded into a few places,
49 * so don't change it unless you know what you are doing. 53 * so don't change it unless you know what you are doing.
50 */ 54 */
51#define TASK_SIZE 0x7fff8000UL 55#define TASK_SIZE 0x7fff8000UL
56#endif
52 57
53#ifdef __KERNEL__ 58#ifdef __KERNEL__
54#define STACK_TOP_MAX TASK_SIZE 59#define STACK_TOP_MAX TASK_SIZE
diff --git a/arch/mips/include/asm/sn/sn_private.h b/arch/mips/include/asm/sn/sn_private.h
index 1a2c3025bf28..fdfae43d8b99 100644
--- a/arch/mips/include/asm/sn/sn_private.h
+++ b/arch/mips/include/asm/sn/sn_private.h
@@ -14,6 +14,6 @@ extern void install_cpu_nmi_handler(int slice);
14extern void install_ipi(void); 14extern void install_ipi(void);
15extern void setup_replication_mask(void); 15extern void setup_replication_mask(void);
16extern void replicate_kernel_text(void); 16extern void replicate_kernel_text(void);
17extern pfn_t node_getfirstfree(cnodeid_t); 17extern unsigned long node_getfirstfree(cnodeid_t);
18 18
19#endif /* __ASM_SN_SN_PRIVATE_H */ 19#endif /* __ASM_SN_SN_PRIVATE_H */
diff --git a/arch/mips/include/asm/sn/types.h b/arch/mips/include/asm/sn/types.h
index c4813d67aec3..6d24d4e8b9ed 100644
--- a/arch/mips/include/asm/sn/types.h
+++ b/arch/mips/include/asm/sn/types.h
@@ -19,7 +19,6 @@ typedef signed char partid_t; /* partition ID type */
19typedef signed short moduleid_t; /* user-visible module number type */ 19typedef signed short moduleid_t; /* user-visible module number type */
20typedef signed short cmoduleid_t; /* kernel compact module id type */ 20typedef signed short cmoduleid_t; /* kernel compact module id type */
21typedef unsigned char clusterid_t; /* Clusterid of the cell */ 21typedef unsigned char clusterid_t; /* Clusterid of the cell */
22typedef unsigned long pfn_t;
23 22
24typedef dev_t vertex_hdl_t; /* hardware graph vertex handle */ 23typedef dev_t vertex_hdl_t; /* hardware graph vertex handle */
25 24
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index bd87e36bf26a..b46caab453a5 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -23,7 +23,11 @@
23 */ 23 */
24#ifdef CONFIG_32BIT 24#ifdef CONFIG_32BIT
25 25
26#define __UA_LIMIT 0x80000000UL 26#ifdef CONFIG_KVM_GUEST
27#define __UA_LIMIT 0x40000000UL
28#else
29#define __UA_LIMIT 0x80000000UL
30#endif
27 31
28#define __UA_ADDR ".word" 32#define __UA_ADDR ".word"
29#define __UA_LA "la" 33#define __UA_LA "la"
@@ -55,8 +59,13 @@ extern u64 __ua_limit;
55 * address in this range it's the process's problem, not ours :-) 59 * address in this range it's the process's problem, not ours :-)
56 */ 60 */
57 61
62#ifdef CONFIG_KVM_GUEST
63#define KERNEL_DS ((mm_segment_t) { 0x80000000UL })
64#define USER_DS ((mm_segment_t) { 0xC0000000UL })
65#else
58#define KERNEL_DS ((mm_segment_t) { 0UL }) 66#define KERNEL_DS ((mm_segment_t) { 0UL })
59#define USER_DS ((mm_segment_t) { __UA_LIMIT }) 67#define USER_DS ((mm_segment_t) { __UA_LIMIT })
68#endif
60 69
61#define VERIFY_READ 0 70#define VERIFY_READ 0
62#define VERIFY_WRITE 1 71#define VERIFY_WRITE 1
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 50285b2c7ffe..0845091ba480 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -17,6 +17,8 @@
17#include <asm/ptrace.h> 17#include <asm/ptrace.h>
18#include <asm/processor.h> 18#include <asm/processor.h>
19 19
20#include <linux/kvm_host.h>
21
20void output_ptreg_defines(void) 22void output_ptreg_defines(void)
21{ 23{
22 COMMENT("MIPS pt_regs offsets."); 24 COMMENT("MIPS pt_regs offsets.");
@@ -328,3 +330,67 @@ void output_pbe_defines(void)
328 BLANK(); 330 BLANK();
329} 331}
330#endif 332#endif
333
334void output_kvm_defines(void)
335{
336 COMMENT(" KVM/MIPS Specfic offsets. ");
337 DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch));
338 OFFSET(VCPU_RUN, kvm_vcpu, run);
339 OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch);
340
341 OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase);
342 OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase);
343
344 OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack);
345 OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp);
346
347 OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr);
348 OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause);
349 OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc);
350 OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi);
351
352 OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst);
353
354 OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]);
355 OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]);
356 OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]);
357 OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]);
358 OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]);
359 OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]);
360 OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]);
361 OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]);
362 OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]);
363 OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]);
364 OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]);
365 OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]);
366 OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]);
367 OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]);
368 OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]);
369 OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]);
370 OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]);
371 OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]);
372 OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]);
373 OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]);
374 OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]);
375 OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]);
376 OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]);
377 OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]);
378 OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]);
379 OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]);
380 OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]);
381 OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]);
382 OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]);
383 OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]);
384 OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]);
385 OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]);
386 OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
387 OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
388 OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
389 OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
390 OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
391 OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
392
393 OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]);
394 OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
395 BLANK();
396}
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 556a4357d7fc..97c5a1668e53 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -48,7 +48,11 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
48 __res; \ 48 __res; \
49}) 49})
50 50
51#ifdef CONFIG_KVM_GUEST
52#define TASK32_SIZE 0x3fff8000UL
53#else
51#define TASK32_SIZE 0x7fff8000UL 54#define TASK32_SIZE 0x7fff8000UL
55#endif
52#undef ELF_ET_DYN_BASE 56#undef ELF_ET_DYN_BASE
53#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) 57#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
54 58
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 07b847d77f5d..fd75d7144524 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -118,6 +118,10 @@ int c0_compare_int_usable(void)
118 unsigned int delta; 118 unsigned int delta;
119 unsigned int cnt; 119 unsigned int cnt;
120 120
121#ifdef CONFIG_KVM_GUEST
122 return 1;
123#endif
124
121 /* 125 /*
122 * IP7 already pending? Try to clear it by acking the timer. 126 * IP7 already pending? Try to clear it by acking the timer.
123 */ 127 */
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 66bf4e22d9b9..596620dd7ee2 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -83,6 +83,7 @@ static inline void set_cpu_sibling_map(int cpu)
83} 83}
84 84
85struct plat_smp_ops *mp_ops; 85struct plat_smp_ops *mp_ops;
86EXPORT_SYMBOL(mp_ops);
86 87
87__cpuinit void register_smp_ops(struct plat_smp_ops *ops) 88__cpuinit void register_smp_ops(struct plat_smp_ops *ops)
88{ 89{
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 3b98b7b8487f..7a99e60dadbd 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1712,7 +1712,12 @@ void __init trap_init(void)
1712 ebase = (unsigned long) 1712 ebase = (unsigned long)
1713 __alloc_bootmem(size, 1 << fls(size), 0); 1713 __alloc_bootmem(size, 1 << fls(size), 0);
1714 } else { 1714 } else {
1715 ebase = CKSEG0; 1715#ifdef CONFIG_KVM_GUEST
1716#define KVM_GUEST_KSEG0 0x40000000
1717 ebase = KVM_GUEST_KSEG0;
1718#else
1719 ebase = CKSEG0;
1720#endif
1716 if (cpu_has_mips_r2) 1721 if (cpu_has_mips_r2)
1717 ebase += (read_c0_ebase() & 0x3ffff000); 1722 ebase += (read_c0_ebase() & 0x3ffff000);
1718 } 1723 }
diff --git a/arch/mips/kvm/00README.txt b/arch/mips/kvm/00README.txt
new file mode 100644
index 000000000000..51617e481aa3
--- /dev/null
+++ b/arch/mips/kvm/00README.txt
@@ -0,0 +1,31 @@
1KVM/MIPS Trap & Emulate Release Notes
2=====================================
3
4(1) KVM/MIPS should support MIPS32R2 and beyond. It has been tested on the following platforms:
5 Malta Board with FPGA based 34K
6 Sigma Designs TangoX board with a 24K based 8654 SoC.
7 Malta Board with 74K @ 1GHz
8
9(2) Both Guest kernel and Guest Userspace execute in UM.
10 Guest User address space: 0x00000000 -> 0x40000000
11 Guest Kernel Unmapped: 0x40000000 -> 0x60000000
12 Guest Kernel Mapped: 0x60000000 -> 0x80000000
13
14 Guest Usermode virtual memory is limited to 1GB.
15
16(2) 16K Page Sizes: Both Host Kernel and Guest Kernel should have the same page size, currently at least 16K.
17 Note that due to cache aliasing issues, 4K page sizes are NOT supported.
18
19(3) No HugeTLB Support
20 Both the host kernel and Guest kernel should have the page size set to 16K.
21 This will be implemented in a future release.
22
23(4) KVM/MIPS does not have support for SMP Guests
24 Linux-3.7-rc2 based SMP guest hangs due to the following code sequence in the generated TLB handlers:
25 LL/TLBP/SC. Since the TLBP instruction causes a trap the reservation gets cleared
26 when we ERET back to the guest. This causes the guest to hang in an infinite loop.
27 This will be fixed in a future release.
28
29(5) Use Host FPU
30 Currently KVM/MIPS emulates a 24K CPU without a FPU.
31 This will be fixed in a future release
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
new file mode 100644
index 000000000000..2c15590e55f7
--- /dev/null
+++ b/arch/mips/kvm/Kconfig
@@ -0,0 +1,49 @@
1#
2# KVM configuration
3#
4source "virt/kvm/Kconfig"
5
6menuconfig VIRTUALIZATION
7 bool "Virtualization"
8 depends on HAVE_KVM
9 ---help---
10 Say Y here to get to see options for using your Linux host to run
11 other operating systems inside virtual machines (guests).
12 This option alone does not add any kernel code.
13
14 If you say N, all options in this submenu will be skipped and disabled.
15
16if VIRTUALIZATION
17
18config KVM
19 tristate "Kernel-based Virtual Machine (KVM) support"
20 depends on HAVE_KVM
21 select PREEMPT_NOTIFIERS
22 select ANON_INODES
23 select KVM_MMIO
24 ---help---
25 Support for hosting Guest kernels.
26 Currently supported on MIPS32 processors.
27
28config KVM_MIPS_DYN_TRANS
29 bool "KVM/MIPS: Dynamic binary translation to reduce traps"
30 depends on KVM
31 ---help---
32 When running in Trap & Emulate mode patch privileged
33 instructions to reduce the number of traps.
34
35 If unsure, say Y.
36
37config KVM_MIPS_DEBUG_COP0_COUNTERS
38 bool "Maintain counters for COP0 accesses"
39 depends on KVM
40 ---help---
41 Maintain statistics for Guest COP0 accesses.
42 A histogram of COP0 accesses is printed when the VM is
43 shutdown.
44
45 If unsure, say N.
46
47source drivers/vhost/Kconfig
48
49endif # VIRTUALIZATION
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
new file mode 100644
index 000000000000..78d87bbc99db
--- /dev/null
+++ b/arch/mips/kvm/Makefile
@@ -0,0 +1,13 @@
1# Makefile for KVM support for MIPS
2#
3
4common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
5
6EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
7
8kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \
9 kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \
10 kvm_mips_dyntrans.o kvm_trap_emul.o
11
12obj-$(CONFIG_KVM) += kvm.o
13obj-y += kvm_cb.o kvm_tlb.o
diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/kvm_cb.c
new file mode 100644
index 000000000000..313c2e37b978
--- /dev/null
+++ b/arch/mips/kvm/kvm_cb.c
@@ -0,0 +1,14 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 * Authors: Yann Le Du <ledu@kymasys.com>
8 */
9
10#include <linux/export.h>
11#include <linux/kvm_host.h>
12
13struct kvm_mips_callbacks *kvm_mips_callbacks;
14EXPORT_SYMBOL(kvm_mips_callbacks);
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
new file mode 100644
index 000000000000..dca2aa665993
--- /dev/null
+++ b/arch/mips/kvm/kvm_locore.S
@@ -0,0 +1,650 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Main entry point for the guest, exception handling.
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <asm/asm.h>
13#include <asm/asmmacro.h>
14#include <asm/regdef.h>
15#include <asm/mipsregs.h>
16#include <asm/stackframe.h>
17#include <asm/asm-offsets.h>
18
19
20#define _C_LABEL(x) x
21#define MIPSX(name) mips32_ ## name
22#define CALLFRAME_SIZ 32
23
24/*
25 * VECTOR
26 * exception vector entrypoint
27 */
28#define VECTOR(x, regmask) \
29 .ent _C_LABEL(x),0; \
30 EXPORT(x);
31
32#define VECTOR_END(x) \
33 EXPORT(x);
34
35/* Overload, Danger Will Robinson!! */
36#define PT_HOST_ASID PT_BVADDR
37#define PT_HOST_USERLOCAL PT_EPC
38
39#define CP0_DDATA_LO $28,3
40#define CP0_EBASE $15,1
41
42#define CP0_INTCTL $12,1
43#define CP0_SRSCTL $12,2
44#define CP0_SRSMAP $12,3
45#define CP0_HWRENA $7,0
46
47/* Resume Flags */
48#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
49
50#define RESUME_GUEST 0
51#define RESUME_HOST RESUME_FLAG_HOST
52
53/*
54 * __kvm_mips_vcpu_run: entry point to the guest
55 * a0: run
56 * a1: vcpu
57 */
58
59FEXPORT(__kvm_mips_vcpu_run)
60 .set push
61 .set noreorder
62 .set noat
63
64 /* k0/k1 not being used in host kernel context */
65 addiu k1,sp, -PT_SIZE
66 LONG_S $0, PT_R0(k1)
67 LONG_S $1, PT_R1(k1)
68 LONG_S $2, PT_R2(k1)
69 LONG_S $3, PT_R3(k1)
70
71 LONG_S $4, PT_R4(k1)
72 LONG_S $5, PT_R5(k1)
73 LONG_S $6, PT_R6(k1)
74 LONG_S $7, PT_R7(k1)
75
76 LONG_S $8, PT_R8(k1)
77 LONG_S $9, PT_R9(k1)
78 LONG_S $10, PT_R10(k1)
79 LONG_S $11, PT_R11(k1)
80 LONG_S $12, PT_R12(k1)
81 LONG_S $13, PT_R13(k1)
82 LONG_S $14, PT_R14(k1)
83 LONG_S $15, PT_R15(k1)
84 LONG_S $16, PT_R16(k1)
85 LONG_S $17, PT_R17(k1)
86
87 LONG_S $18, PT_R18(k1)
88 LONG_S $19, PT_R19(k1)
89 LONG_S $20, PT_R20(k1)
90 LONG_S $21, PT_R21(k1)
91 LONG_S $22, PT_R22(k1)
92 LONG_S $23, PT_R23(k1)
93 LONG_S $24, PT_R24(k1)
94 LONG_S $25, PT_R25(k1)
95
96 /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */
97
98 LONG_S $28, PT_R28(k1)
99 LONG_S $29, PT_R29(k1)
100 LONG_S $30, PT_R30(k1)
101 LONG_S $31, PT_R31(k1)
102
103 /* Save hi/lo */
104 mflo v0
105 LONG_S v0, PT_LO(k1)
106 mfhi v1
107 LONG_S v1, PT_HI(k1)
108
109 /* Save host status */
110 mfc0 v0, CP0_STATUS
111 LONG_S v0, PT_STATUS(k1)
112
113 /* Save host ASID, shove it into the BVADDR location */
114 mfc0 v1,CP0_ENTRYHI
115 andi v1, 0xff
116 LONG_S v1, PT_HOST_ASID(k1)
117
118 /* Save DDATA_LO, will be used to store pointer to vcpu */
119 mfc0 v1, CP0_DDATA_LO
120 LONG_S v1, PT_HOST_USERLOCAL(k1)
121
122 /* DDATA_LO has pointer to vcpu */
123 mtc0 a1,CP0_DDATA_LO
124
125 /* Offset into vcpu->arch */
126 addiu k1, a1, VCPU_HOST_ARCH
127
128 /* Save the host stack to VCPU, used for exception processing when we exit from the Guest */
129 LONG_S sp, VCPU_HOST_STACK(k1)
130
131 /* Save the kernel gp as well */
132 LONG_S gp, VCPU_HOST_GP(k1)
133
134 /* Setup status register for running the guest in UM, interrupts are disabled */
135 li k0,(ST0_EXL | KSU_USER| ST0_BEV)
136 mtc0 k0,CP0_STATUS
137 ehb
138
139 /* load up the new EBASE */
140 LONG_L k0, VCPU_GUEST_EBASE(k1)
141 mtc0 k0,CP0_EBASE
142
143 /* Now that the new EBASE has been loaded, unset BEV, set interrupt mask as it was
144 * but make sure that timer interrupts are enabled
145 */
146 li k0,(ST0_EXL | KSU_USER | ST0_IE)
147 andi v0, v0, ST0_IM
148 or k0, k0, v0
149 mtc0 k0,CP0_STATUS
150 ehb
151
152
153 /* Set Guest EPC */
154 LONG_L t0, VCPU_PC(k1)
155 mtc0 t0, CP0_EPC
156
157FEXPORT(__kvm_mips_load_asid)
158 /* Set the ASID for the Guest Kernel */
159 sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
160 /* addresses shift to 0x80000000 */
161 bltz t0, 1f /* If kernel */
162 addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
163 addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */
1641:
165 /* t1: contains the base of the ASID array, need to get the cpu id */
166 LONG_L t2, TI_CPU($28) /* smp_processor_id */
167 sll t2, t2, 2 /* x4 */
168 addu t3, t1, t2
169 LONG_L k0, (t3)
170 andi k0, k0, 0xff
171 mtc0 k0,CP0_ENTRYHI
172 ehb
173
174 /* Disable RDHWR access */
175 mtc0 zero, CP0_HWRENA
176
177 /* Now load up the Guest Context from VCPU */
178 LONG_L $1, VCPU_R1(k1)
179 LONG_L $2, VCPU_R2(k1)
180 LONG_L $3, VCPU_R3(k1)
181
182 LONG_L $4, VCPU_R4(k1)
183 LONG_L $5, VCPU_R5(k1)
184 LONG_L $6, VCPU_R6(k1)
185 LONG_L $7, VCPU_R7(k1)
186
187 LONG_L $8, VCPU_R8(k1)
188 LONG_L $9, VCPU_R9(k1)
189 LONG_L $10, VCPU_R10(k1)
190 LONG_L $11, VCPU_R11(k1)
191 LONG_L $12, VCPU_R12(k1)
192 LONG_L $13, VCPU_R13(k1)
193 LONG_L $14, VCPU_R14(k1)
194 LONG_L $15, VCPU_R15(k1)
195 LONG_L $16, VCPU_R16(k1)
196 LONG_L $17, VCPU_R17(k1)
197 LONG_L $18, VCPU_R18(k1)
198 LONG_L $19, VCPU_R19(k1)
199 LONG_L $20, VCPU_R20(k1)
200 LONG_L $21, VCPU_R21(k1)
201 LONG_L $22, VCPU_R22(k1)
202 LONG_L $23, VCPU_R23(k1)
203 LONG_L $24, VCPU_R24(k1)
204 LONG_L $25, VCPU_R25(k1)
205
206 /* k0/k1 loaded up later */
207
208 LONG_L $28, VCPU_R28(k1)
209 LONG_L $29, VCPU_R29(k1)
210 LONG_L $30, VCPU_R30(k1)
211 LONG_L $31, VCPU_R31(k1)
212
213 /* Restore hi/lo */
214 LONG_L k0, VCPU_LO(k1)
215 mtlo k0
216
217 LONG_L k0, VCPU_HI(k1)
218 mthi k0
219
220FEXPORT(__kvm_mips_load_k0k1)
221 /* Restore the guest's k0/k1 registers */
222 LONG_L k0, VCPU_R26(k1)
223 LONG_L k1, VCPU_R27(k1)
224
225 /* Jump to guest */
226 eret
227 .set pop
228
229VECTOR(MIPSX(exception), unknown)
230/*
231 * Find out what mode we came from and jump to the proper handler.
232 */
233 .set push
234 .set noat
235 .set noreorder
236 mtc0 k0, CP0_ERROREPC #01: Save guest k0
237 ehb #02:
238
239 mfc0 k0, CP0_EBASE #02: Get EBASE
240 srl k0, k0, 10 #03: Get rid of CPUNum
241 sll k0, k0, 10 #04
242 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
243 addiu k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000
244 j k0 #07: jump to the function
245 nop #08: branch delay slot
246 .set push
247VECTOR_END(MIPSX(exceptionEnd))
248.end MIPSX(exception)
249
250/*
251 * Generic Guest exception handler. We end up here when the guest
252 * does something that causes a trap to kernel mode.
253 *
254 */
255NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
256 .set push
257 .set noat
258 .set noreorder
259
260 /* Get the VCPU pointer from DDTATA_LO */
261 mfc0 k1, CP0_DDATA_LO
262 addiu k1, k1, VCPU_HOST_ARCH
263
264 /* Start saving Guest context to VCPU */
265 LONG_S $0, VCPU_R0(k1)
266 LONG_S $1, VCPU_R1(k1)
267 LONG_S $2, VCPU_R2(k1)
268 LONG_S $3, VCPU_R3(k1)
269 LONG_S $4, VCPU_R4(k1)
270 LONG_S $5, VCPU_R5(k1)
271 LONG_S $6, VCPU_R6(k1)
272 LONG_S $7, VCPU_R7(k1)
273 LONG_S $8, VCPU_R8(k1)
274 LONG_S $9, VCPU_R9(k1)
275 LONG_S $10, VCPU_R10(k1)
276 LONG_S $11, VCPU_R11(k1)
277 LONG_S $12, VCPU_R12(k1)
278 LONG_S $13, VCPU_R13(k1)
279 LONG_S $14, VCPU_R14(k1)
280 LONG_S $15, VCPU_R15(k1)
281 LONG_S $16, VCPU_R16(k1)
282 LONG_S $17,VCPU_R17(k1)
283 LONG_S $18, VCPU_R18(k1)
284 LONG_S $19, VCPU_R19(k1)
285 LONG_S $20, VCPU_R20(k1)
286 LONG_S $21, VCPU_R21(k1)
287 LONG_S $22, VCPU_R22(k1)
288 LONG_S $23, VCPU_R23(k1)
289 LONG_S $24, VCPU_R24(k1)
290 LONG_S $25, VCPU_R25(k1)
291
292 /* Guest k0/k1 saved later */
293
294 LONG_S $28, VCPU_R28(k1)
295 LONG_S $29, VCPU_R29(k1)
296 LONG_S $30, VCPU_R30(k1)
297 LONG_S $31, VCPU_R31(k1)
298
299 /* We need to save hi/lo and restore them on
300 * the way out
301 */
302 mfhi t0
303 LONG_S t0, VCPU_HI(k1)
304
305 mflo t0
306 LONG_S t0, VCPU_LO(k1)
307
308 /* Finally save guest k0/k1 to VCPU */
309 mfc0 t0, CP0_ERROREPC
310 LONG_S t0, VCPU_R26(k1)
311
312 /* Get GUEST k1 and save it in VCPU */
313 la t1, ~0x2ff
314 mfc0 t0, CP0_EBASE
315 and t0, t0, t1
316 LONG_L t0, 0x3000(t0)
317 LONG_S t0, VCPU_R27(k1)
318
319 /* Now that context has been saved, we can use other registers */
320
321 /* Restore vcpu */
322 mfc0 a1, CP0_DDATA_LO
323 move s1, a1
324
325 /* Restore run (vcpu->run) */
326 LONG_L a0, VCPU_RUN(a1)
327 /* Save pointer to run in s0, will be saved by the compiler */
328 move s0, a0
329
330
331 /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process the exception */
332 mfc0 k0,CP0_EPC
333 LONG_S k0, VCPU_PC(k1)
334
335 mfc0 k0, CP0_BADVADDR
336 LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1)
337
338 mfc0 k0, CP0_CAUSE
339 LONG_S k0, VCPU_HOST_CP0_CAUSE(k1)
340
341 mfc0 k0, CP0_ENTRYHI
342 LONG_S k0, VCPU_HOST_ENTRYHI(k1)
343
344 /* Now restore the host state just enough to run the handlers */
345
346 /* Swtich EBASE to the one used by Linux */
347 /* load up the host EBASE */
348 mfc0 v0, CP0_STATUS
349
350 .set at
351 or k0, v0, ST0_BEV
352 .set noat
353
354 mtc0 k0, CP0_STATUS
355 ehb
356
357 LONG_L k0, VCPU_HOST_EBASE(k1)
358 mtc0 k0,CP0_EBASE
359
360
361 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
362 .set at
363 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
364 or v0, v0, ST0_CU0
365 .set noat
366 mtc0 v0, CP0_STATUS
367 ehb
368
369 /* Load up host GP */
370 LONG_L gp, VCPU_HOST_GP(k1)
371
372 /* Need a stack before we can jump to "C" */
373 LONG_L sp, VCPU_HOST_STACK(k1)
374
375 /* Saved host state */
376 addiu sp,sp, -PT_SIZE
377
378 /* XXXKYMA do we need to load the host ASID, maybe not because the
379 * kernel entries are marked GLOBAL, need to verify
380 */
381
382 /* Restore host DDATA_LO */
383 LONG_L k0, PT_HOST_USERLOCAL(sp)
384 mtc0 k0, CP0_DDATA_LO
385
386 /* Restore RDHWR access */
387 la k0, 0x2000000F
388 mtc0 k0, CP0_HWRENA
389
390 /* Jump to handler */
391FEXPORT(__kvm_mips_jump_to_handler)
392 /* XXXKYMA: not sure if this is safe, how large is the stack?? */
393 /* Now jump to the kvm_mips_handle_exit() to see if we can deal with this in the kernel */
394 la t9,kvm_mips_handle_exit
395 jalr.hb t9
396 addiu sp,sp, -CALLFRAME_SIZ /* BD Slot */
397
398 /* Return from handler Make sure interrupts are disabled */
399 di
400 ehb
401
402 /* XXXKYMA: k0/k1 could have been blown away if we processed an exception
403 * while we were handling the exception from the guest, reload k1
404 */
405 move k1, s1
406 addiu k1, k1, VCPU_HOST_ARCH
407
408 /* Check return value, should tell us if we are returning to the host (handle I/O etc)
409 * or resuming the guest
410 */
411 andi t0, v0, RESUME_HOST
412 bnez t0, __kvm_mips_return_to_host
413 nop
414
415__kvm_mips_return_to_guest:
416 /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
417 mtc0 s1, CP0_DDATA_LO
418
419 /* Load up the Guest EBASE to minimize the window where BEV is set */
420 LONG_L t0, VCPU_GUEST_EBASE(k1)
421
422 /* Switch EBASE back to the one used by KVM */
423 mfc0 v1, CP0_STATUS
424 .set at
425 or k0, v1, ST0_BEV
426 .set noat
427 mtc0 k0, CP0_STATUS
428 ehb
429 mtc0 t0,CP0_EBASE
430
431 /* Setup status register for running guest in UM */
432 .set at
433 or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
434 and v1, v1, ~ST0_CU0
435 .set noat
436 mtc0 v1, CP0_STATUS
437 ehb
438
439
440 /* Set Guest EPC */
441 LONG_L t0, VCPU_PC(k1)
442 mtc0 t0, CP0_EPC
443
444 /* Set the ASID for the Guest Kernel */
445 sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
446 /* addresses shift to 0x80000000 */
447 bltz t0, 1f /* If kernel */
448 addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
449 addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */
4501:
451 /* t1: contains the base of the ASID array, need to get the cpu id */
452 LONG_L t2, TI_CPU($28) /* smp_processor_id */
453 sll t2, t2, 2 /* x4 */
454 addu t3, t1, t2
455 LONG_L k0, (t3)
456 andi k0, k0, 0xff
457 mtc0 k0,CP0_ENTRYHI
458 ehb
459
460 /* Disable RDHWR access */
461 mtc0 zero, CP0_HWRENA
462
463 /* load the guest context from VCPU and return */
464 LONG_L $0, VCPU_R0(k1)
465 LONG_L $1, VCPU_R1(k1)
466 LONG_L $2, VCPU_R2(k1)
467 LONG_L $3, VCPU_R3(k1)
468 LONG_L $4, VCPU_R4(k1)
469 LONG_L $5, VCPU_R5(k1)
470 LONG_L $6, VCPU_R6(k1)
471 LONG_L $7, VCPU_R7(k1)
472 LONG_L $8, VCPU_R8(k1)
473 LONG_L $9, VCPU_R9(k1)
474 LONG_L $10, VCPU_R10(k1)
475 LONG_L $11, VCPU_R11(k1)
476 LONG_L $12, VCPU_R12(k1)
477 LONG_L $13, VCPU_R13(k1)
478 LONG_L $14, VCPU_R14(k1)
479 LONG_L $15, VCPU_R15(k1)
480 LONG_L $16, VCPU_R16(k1)
481 LONG_L $17, VCPU_R17(k1)
482 LONG_L $18, VCPU_R18(k1)
483 LONG_L $19, VCPU_R19(k1)
484 LONG_L $20, VCPU_R20(k1)
485 LONG_L $21, VCPU_R21(k1)
486 LONG_L $22, VCPU_R22(k1)
487 LONG_L $23, VCPU_R23(k1)
488 LONG_L $24, VCPU_R24(k1)
489 LONG_L $25, VCPU_R25(k1)
490
491 /* $/k1 loaded later */
492 LONG_L $28, VCPU_R28(k1)
493 LONG_L $29, VCPU_R29(k1)
494 LONG_L $30, VCPU_R30(k1)
495 LONG_L $31, VCPU_R31(k1)
496
497FEXPORT(__kvm_mips_skip_guest_restore)
498 LONG_L k0, VCPU_HI(k1)
499 mthi k0
500
501 LONG_L k0, VCPU_LO(k1)
502 mtlo k0
503
504 LONG_L k0, VCPU_R26(k1)
505 LONG_L k1, VCPU_R27(k1)
506
507 eret
508
509__kvm_mips_return_to_host:
510 /* EBASE is already pointing to Linux */
511 LONG_L k1, VCPU_HOST_STACK(k1)
512 addiu k1,k1, -PT_SIZE
513
514 /* Restore host DDATA_LO */
515 LONG_L k0, PT_HOST_USERLOCAL(k1)
516 mtc0 k0, CP0_DDATA_LO
517
518 /* Restore host ASID */
519 LONG_L k0, PT_HOST_ASID(sp)
520 andi k0, 0xff
521 mtc0 k0,CP0_ENTRYHI
522 ehb
523
524 /* Load context saved on the host stack */
525 LONG_L $0, PT_R0(k1)
526 LONG_L $1, PT_R1(k1)
527
528 /* r2/v0 is the return code, shift it down by 2 (arithmetic) to recover the err code */
529 sra k0, v0, 2
530 move $2, k0
531
532 LONG_L $3, PT_R3(k1)
533 LONG_L $4, PT_R4(k1)
534 LONG_L $5, PT_R5(k1)
535 LONG_L $6, PT_R6(k1)
536 LONG_L $7, PT_R7(k1)
537 LONG_L $8, PT_R8(k1)
538 LONG_L $9, PT_R9(k1)
539 LONG_L $10, PT_R10(k1)
540 LONG_L $11, PT_R11(k1)
541 LONG_L $12, PT_R12(k1)
542 LONG_L $13, PT_R13(k1)
543 LONG_L $14, PT_R14(k1)
544 LONG_L $15, PT_R15(k1)
545 LONG_L $16, PT_R16(k1)
546 LONG_L $17, PT_R17(k1)
547 LONG_L $18, PT_R18(k1)
548 LONG_L $19, PT_R19(k1)
549 LONG_L $20, PT_R20(k1)
550 LONG_L $21, PT_R21(k1)
551 LONG_L $22, PT_R22(k1)
552 LONG_L $23, PT_R23(k1)
553 LONG_L $24, PT_R24(k1)
554 LONG_L $25, PT_R25(k1)
555
556 /* Host k0/k1 were not saved */
557
558 LONG_L $28, PT_R28(k1)
559 LONG_L $29, PT_R29(k1)
560 LONG_L $30, PT_R30(k1)
561
562 LONG_L k0, PT_HI(k1)
563 mthi k0
564
565 LONG_L k0, PT_LO(k1)
566 mtlo k0
567
568 /* Restore RDHWR access */
569 la k0, 0x2000000F
570 mtc0 k0, CP0_HWRENA
571
572
573 /* Restore RA, which is the address we will return to */
574 LONG_L ra, PT_R31(k1)
575 j ra
576 nop
577
578 .set pop
579VECTOR_END(MIPSX(GuestExceptionEnd))
580.end MIPSX(GuestException)
581
582MIPSX(exceptions):
583 ####
584 ##### The exception handlers.
585 #####
586 .word _C_LABEL(MIPSX(GuestException)) # 0
587 .word _C_LABEL(MIPSX(GuestException)) # 1
588 .word _C_LABEL(MIPSX(GuestException)) # 2
589 .word _C_LABEL(MIPSX(GuestException)) # 3
590 .word _C_LABEL(MIPSX(GuestException)) # 4
591 .word _C_LABEL(MIPSX(GuestException)) # 5
592 .word _C_LABEL(MIPSX(GuestException)) # 6
593 .word _C_LABEL(MIPSX(GuestException)) # 7
594 .word _C_LABEL(MIPSX(GuestException)) # 8
595 .word _C_LABEL(MIPSX(GuestException)) # 9
596 .word _C_LABEL(MIPSX(GuestException)) # 10
597 .word _C_LABEL(MIPSX(GuestException)) # 11
598 .word _C_LABEL(MIPSX(GuestException)) # 12
599 .word _C_LABEL(MIPSX(GuestException)) # 13
600 .word _C_LABEL(MIPSX(GuestException)) # 14
601 .word _C_LABEL(MIPSX(GuestException)) # 15
602 .word _C_LABEL(MIPSX(GuestException)) # 16
603 .word _C_LABEL(MIPSX(GuestException)) # 17
604 .word _C_LABEL(MIPSX(GuestException)) # 18
605 .word _C_LABEL(MIPSX(GuestException)) # 19
606 .word _C_LABEL(MIPSX(GuestException)) # 20
607 .word _C_LABEL(MIPSX(GuestException)) # 21
608 .word _C_LABEL(MIPSX(GuestException)) # 22
609 .word _C_LABEL(MIPSX(GuestException)) # 23
610 .word _C_LABEL(MIPSX(GuestException)) # 24
611 .word _C_LABEL(MIPSX(GuestException)) # 25
612 .word _C_LABEL(MIPSX(GuestException)) # 26
613 .word _C_LABEL(MIPSX(GuestException)) # 27
614 .word _C_LABEL(MIPSX(GuestException)) # 28
615 .word _C_LABEL(MIPSX(GuestException)) # 29
616 .word _C_LABEL(MIPSX(GuestException)) # 30
617 .word _C_LABEL(MIPSX(GuestException)) # 31
618
619
620/* This routine makes changes to the instruction stream effective to the hardware.
621 * It should be called after the instruction stream is written.
622 * On return, the new instructions are effective.
623 * Inputs:
624 * a0 = Start address of new instruction stream
625 * a1 = Size, in bytes, of new instruction stream
626 */
627
628#define HW_SYNCI_Step $1
629LEAF(MIPSX(SyncICache))
630 .set push
631 .set mips32r2
632 beq a1, zero, 20f
633 nop
634 addu a1, a0, a1
635 rdhwr v0, HW_SYNCI_Step
636 beq v0, zero, 20f
637 nop
638
63910:
640 synci 0(a0)
641 addu a0, a0, v0
642 sltu v1, a0, a1
643 bne v1, zero, 10b
644 nop
645 sync
64620:
647 jr.hb ra
648 nop
649 .set pop
650END(MIPSX(SyncICache))
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
new file mode 100644
index 000000000000..2e60b1c78194
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips.c
@@ -0,0 +1,958 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: MIPS specific KVM APIs
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/vmalloc.h>
16#include <linux/fs.h>
17#include <linux/bootmem.h>
18#include <asm/page.h>
19#include <asm/cacheflush.h>
20#include <asm/mmu_context.h>
21
22#include <linux/kvm_host.h>
23
24#include "kvm_mips_int.h"
25#include "kvm_mips_comm.h"
26
27#define CREATE_TRACE_POINTS
28#include "trace.h"
29
30#ifndef VECTORSPACING
31#define VECTORSPACING 0x100 /* for EI/VI mode */
32#endif
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35struct kvm_stats_debugfs_item debugfs_entries[] = {
36 { "wait", VCPU_STAT(wait_exits) },
37 { "cache", VCPU_STAT(cache_exits) },
38 { "signal", VCPU_STAT(signal_exits) },
39 { "interrupt", VCPU_STAT(int_exits) },
40 { "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
41 { "tlbmod", VCPU_STAT(tlbmod_exits) },
42 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
43 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
44 { "addrerr_st", VCPU_STAT(addrerr_st_exits) },
45 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
46 { "syscall", VCPU_STAT(syscall_exits) },
47 { "resvd_inst", VCPU_STAT(resvd_inst_exits) },
48 { "break_inst", VCPU_STAT(break_inst_exits) },
49 { "flush_dcache", VCPU_STAT(flush_dcache_exits) },
50 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
51 {NULL}
52};
53
54static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
55{
56 int i;
57 for_each_possible_cpu(i) {
58 vcpu->arch.guest_kernel_asid[i] = 0;
59 vcpu->arch.guest_user_asid[i] = 0;
60 }
61 return 0;
62}
63
64gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
65{
66 return gfn;
67}
68
69/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
70 * are "runnable" if interrupts are pending
71 */
72int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
73{
74 return !!(vcpu->arch.pending_exceptions);
75}
76
77int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
78{
79 return 1;
80}
81
82int kvm_arch_hardware_enable(void *garbage)
83{
84 return 0;
85}
86
87void kvm_arch_hardware_disable(void *garbage)
88{
89}
90
91int kvm_arch_hardware_setup(void)
92{
93 return 0;
94}
95
96void kvm_arch_hardware_unsetup(void)
97{
98}
99
100void kvm_arch_check_processor_compat(void *rtn)
101{
102 int *r = (int *)rtn;
103 *r = 0;
104 return;
105}
106
107static void kvm_mips_init_tlbs(struct kvm *kvm)
108{
109 unsigned long wired;
110
111 /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
112 wired = read_c0_wired();
113 write_c0_wired(wired + 1);
114 mtc0_tlbw_hazard();
115 kvm->arch.commpage_tlb = wired;
116
117 kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
118 kvm->arch.commpage_tlb);
119}
120
121static void kvm_mips_init_vm_percpu(void *arg)
122{
123 struct kvm *kvm = (struct kvm *)arg;
124
125 kvm_mips_init_tlbs(kvm);
126 kvm_mips_callbacks->vm_init(kvm);
127
128}
129
130int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
131{
132 if (atomic_inc_return(&kvm_mips_instance) == 1) {
133 kvm_info("%s: 1st KVM instance, setup host TLB parameters\n",
134 __func__);
135 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
136 }
137
138
139 return 0;
140}
141
142void kvm_mips_free_vcpus(struct kvm *kvm)
143{
144 unsigned int i;
145 struct kvm_vcpu *vcpu;
146
147 /* Put the pages we reserved for the guest pmap */
148 for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
149 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
150 kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
151 }
152
153 if (kvm->arch.guest_pmap)
154 kfree(kvm->arch.guest_pmap);
155
156 kvm_for_each_vcpu(i, vcpu, kvm) {
157 kvm_arch_vcpu_free(vcpu);
158 }
159
160 mutex_lock(&kvm->lock);
161
162 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
163 kvm->vcpus[i] = NULL;
164
165 atomic_set(&kvm->online_vcpus, 0);
166
167 mutex_unlock(&kvm->lock);
168}
169
170void kvm_arch_sync_events(struct kvm *kvm)
171{
172}
173
174static void kvm_mips_uninit_tlbs(void *arg)
175{
176 /* Restore wired count */
177 write_c0_wired(0);
178 mtc0_tlbw_hazard();
179 /* Clear out all the TLBs */
180 kvm_local_flush_tlb_all();
181}
182
183void kvm_arch_destroy_vm(struct kvm *kvm)
184{
185 kvm_mips_free_vcpus(kvm);
186
187 /* If this is the last instance, restore wired count */
188 if (atomic_dec_return(&kvm_mips_instance) == 0) {
189 kvm_info("%s: last KVM instance, restoring TLB parameters\n",
190 __func__);
191 on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
192 }
193}
194
195long
196kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
197{
198 return -EINVAL;
199}
200
201void kvm_arch_free_memslot(struct kvm_memory_slot *free,
202 struct kvm_memory_slot *dont)
203{
204}
205
206int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
207{
208 return 0;
209}
210
211int kvm_arch_prepare_memory_region(struct kvm *kvm,
212 struct kvm_memory_slot *memslot,
213 struct kvm_memory_slot old,
214 struct kvm_userspace_memory_region *mem,
215 bool user_alloc)
216{
217 return 0;
218}
219
220void kvm_arch_commit_memory_region(struct kvm *kvm,
221 struct kvm_userspace_memory_region *mem,
222 struct kvm_memory_slot old, bool user_alloc)
223{
224 unsigned long npages = 0;
225 int i, err = 0;
226
227 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
228 __func__, kvm, mem->slot, mem->guest_phys_addr,
229 mem->memory_size, mem->userspace_addr);
230
231 /* Setup Guest PMAP table */
232 if (!kvm->arch.guest_pmap) {
233 if (mem->slot == 0)
234 npages = mem->memory_size >> PAGE_SHIFT;
235
236 if (npages) {
237 kvm->arch.guest_pmap_npages = npages;
238 kvm->arch.guest_pmap =
239 kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
240
241 if (!kvm->arch.guest_pmap) {
242 kvm_err("Failed to allocate guest PMAP");
243 err = -ENOMEM;
244 goto out;
245 }
246
247 kvm_info
248 ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
249 npages, kvm->arch.guest_pmap);
250
251 /* Now setup the page table */
252 for (i = 0; i < npages; i++) {
253 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
254 }
255 }
256 }
257out:
258 return;
259}
260
261void kvm_arch_flush_shadow_all(struct kvm *kvm)
262{
263}
264
265void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
266 struct kvm_memory_slot *slot)
267{
268}
269
270void kvm_arch_flush_shadow(struct kvm *kvm)
271{
272}
273
274struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
275{
276 extern char mips32_exception[], mips32_exceptionEnd[];
277 extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
278 int err, size, offset;
279 void *gebase;
280 int i;
281
282 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
283
284 if (!vcpu) {
285 err = -ENOMEM;
286 goto out;
287 }
288
289 err = kvm_vcpu_init(vcpu, kvm, id);
290
291 if (err)
292 goto out_free_cpu;
293
294 kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
295
296 /* Allocate space for host mode exception handlers that handle
297 * guest mode exits
298 */
299 if (cpu_has_veic || cpu_has_vint) {
300 size = 0x200 + VECTORSPACING * 64;
301 } else {
302 size = 0x200;
303 }
304
305 /* Save Linux EBASE */
306 vcpu->arch.host_ebase = (void *)read_c0_ebase();
307
308 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
309
310 if (!gebase) {
311 err = -ENOMEM;
312 goto out_free_cpu;
313 }
314 kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n",
315 ALIGN(size, PAGE_SIZE), gebase);
316
317 /* Save new ebase */
318 vcpu->arch.guest_ebase = gebase;
319
320 /* Copy L1 Guest Exception handler to correct offset */
321
322 /* TLB Refill, EXL = 0 */
323 memcpy(gebase, mips32_exception,
324 mips32_exceptionEnd - mips32_exception);
325
326 /* General Exception Entry point */
327 memcpy(gebase + 0x180, mips32_exception,
328 mips32_exceptionEnd - mips32_exception);
329
330 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
331 for (i = 0; i < 8; i++) {
332 kvm_debug("L1 Vectored handler @ %p\n",
333 gebase + 0x200 + (i * VECTORSPACING));
334 memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
335 mips32_exceptionEnd - mips32_exception);
336 }
337
338 /* General handler, relocate to unmapped space for sanity's sake */
339 offset = 0x2000;
340 kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n",
341 gebase + offset,
342 mips32_GuestExceptionEnd - mips32_GuestException);
343
344 memcpy(gebase + offset, mips32_GuestException,
345 mips32_GuestExceptionEnd - mips32_GuestException);
346
347 /* Invalidate the icache for these ranges */
348 mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE));
349
350 /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
351 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
352
353 if (!vcpu->arch.kseg0_commpage) {
354 err = -ENOMEM;
355 goto out_free_gebase;
356 }
357
358 kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
359 kvm_mips_commpage_init(vcpu);
360
361 /* Init */
362 vcpu->arch.last_sched_cpu = -1;
363
364 /* Start off the timer */
365 kvm_mips_emulate_count(vcpu);
366
367 return vcpu;
368
369out_free_gebase:
370 kfree(gebase);
371
372out_free_cpu:
373 kfree(vcpu);
374
375out:
376 return ERR_PTR(err);
377}
378
379void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
380{
381 hrtimer_cancel(&vcpu->arch.comparecount_timer);
382
383 kvm_vcpu_uninit(vcpu);
384
385 kvm_mips_dump_stats(vcpu);
386
387 if (vcpu->arch.guest_ebase)
388 kfree(vcpu->arch.guest_ebase);
389
390 if (vcpu->arch.kseg0_commpage)
391 kfree(vcpu->arch.kseg0_commpage);
392
393}
394
395void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
396{
397 kvm_arch_vcpu_free(vcpu);
398}
399
400int
401kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
402 struct kvm_guest_debug *dbg)
403{
404 return -EINVAL;
405}
406
407int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
408{
409 int r = 0;
410 sigset_t sigsaved;
411
412 if (vcpu->sigset_active)
413 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
414
415 if (vcpu->mmio_needed) {
416 if (!vcpu->mmio_is_write)
417 kvm_mips_complete_mmio_load(vcpu, run);
418 vcpu->mmio_needed = 0;
419 }
420
421 /* Check if we have any exceptions/interrupts pending */
422 kvm_mips_deliver_interrupts(vcpu,
423 kvm_read_c0_guest_cause(vcpu->arch.cop0));
424
425 local_irq_disable();
426 kvm_guest_enter();
427
428 r = __kvm_mips_vcpu_run(run, vcpu);
429
430 kvm_guest_exit();
431 local_irq_enable();
432
433 if (vcpu->sigset_active)
434 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
435
436 return r;
437}
438
439int
440kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
441{
442 int intr = (int)irq->irq;
443 struct kvm_vcpu *dvcpu = NULL;
444
445 if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
446 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
447 (int)intr);
448
449 if (irq->cpu == -1)
450 dvcpu = vcpu;
451 else
452 dvcpu = vcpu->kvm->vcpus[irq->cpu];
453
454 if (intr == 2 || intr == 3 || intr == 4) {
455 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
456
457 } else if (intr == -2 || intr == -3 || intr == -4) {
458 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
459 } else {
460 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
461 irq->cpu, irq->irq);
462 return -EINVAL;
463 }
464
465 dvcpu->arch.wait = 0;
466
467 if (waitqueue_active(&dvcpu->wq)) {
468 wake_up_interruptible(&dvcpu->wq);
469 }
470
471 return 0;
472}
473
474int
475kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
476 struct kvm_mp_state *mp_state)
477{
478 return -EINVAL;
479}
480
481int
482kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
483 struct kvm_mp_state *mp_state)
484{
485 return -EINVAL;
486}
487
488long
489kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
490{
491 struct kvm_vcpu *vcpu = filp->private_data;
492 void __user *argp = (void __user *)arg;
493 long r;
494 int intr;
495
496 switch (ioctl) {
497 case KVM_NMI:
498 /* Treat the NMI as a CPU reset */
499 r = kvm_mips_reset_vcpu(vcpu);
500 break;
501 case KVM_INTERRUPT:
502 {
503 struct kvm_mips_interrupt irq;
504 r = -EFAULT;
505 if (copy_from_user(&irq, argp, sizeof(irq)))
506 goto out;
507
508 intr = (int)irq.irq;
509
510 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
511 irq.irq);
512
513 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
514 break;
515 }
516 default:
517 r = -EINVAL;
518 }
519
520out:
521 return r;
522}
523
524/*
525 * Get (and clear) the dirty memory log for a memory slot.
526 */
527int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
528{
529 struct kvm_memory_slot *memslot;
530 unsigned long ga, ga_end;
531 int is_dirty = 0;
532 int r;
533 unsigned long n;
534
535 mutex_lock(&kvm->slots_lock);
536
537 r = kvm_get_dirty_log(kvm, log, &is_dirty);
538 if (r)
539 goto out;
540
541 /* If nothing is dirty, don't bother messing with page tables. */
542 if (is_dirty) {
543 memslot = &kvm->memslots->memslots[log->slot];
544
545 ga = memslot->base_gfn << PAGE_SHIFT;
546 ga_end = ga + (memslot->npages << PAGE_SHIFT);
547
548 printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
549 ga_end);
550
551 n = kvm_dirty_bitmap_bytes(memslot);
552 memset(memslot->dirty_bitmap, 0, n);
553 }
554
555 r = 0;
556out:
557 mutex_unlock(&kvm->slots_lock);
558 return r;
559
560}
561
562long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
563{
564 long r;
565
566 switch (ioctl) {
567 default:
568 r = -EINVAL;
569 }
570
571 return r;
572}
573
574int kvm_arch_init(void *opaque)
575{
576 int ret;
577
578 if (kvm_mips_callbacks) {
579 kvm_err("kvm: module already exists\n");
580 return -EEXIST;
581 }
582
583 ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
584
585 return ret;
586}
587
588void kvm_arch_exit(void)
589{
590 kvm_mips_callbacks = NULL;
591}
592
593int
594kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
595{
596 return -ENOTSUPP;
597}
598
599int
600kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
601{
602 return -ENOTSUPP;
603}
604
605int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
606{
607 return 0;
608}
609
610int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
611{
612 return -ENOTSUPP;
613}
614
615int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
616{
617 return -ENOTSUPP;
618}
619
620int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
621{
622 return VM_FAULT_SIGBUS;
623}
624
625int kvm_dev_ioctl_check_extension(long ext)
626{
627 int r;
628
629 switch (ext) {
630 case KVM_CAP_COALESCED_MMIO:
631 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
632 break;
633 default:
634 r = 0;
635 break;
636 }
637 return r;
638
639}
640
641int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
642{
643 return kvm_mips_pending_timer(vcpu);
644}
645
646int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
647{
648 int i;
649 struct mips_coproc *cop0;
650
651 if (!vcpu)
652 return -1;
653
654 printk("VCPU Register Dump:\n");
655 printk("\tpc = 0x%08lx\n", vcpu->arch.pc);;
656 printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
657
658 for (i = 0; i < 32; i += 4) {
659 printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
660 vcpu->arch.gprs[i],
661 vcpu->arch.gprs[i + 1],
662 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
663 }
664 printk("\thi: 0x%08lx\n", vcpu->arch.hi);
665 printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
666
667 cop0 = vcpu->arch.cop0;
668 printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
669 kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
670
671 printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
672
673 return 0;
674}
675
676int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
677{
678 int i;
679
680 for (i = 0; i < 32; i++)
681 vcpu->arch.gprs[i] = regs->gprs[i];
682
683 vcpu->arch.hi = regs->hi;
684 vcpu->arch.lo = regs->lo;
685 vcpu->arch.pc = regs->pc;
686
687 return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs);
688}
689
690int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
691{
692 int i;
693
694 for (i = 0; i < 32; i++)
695 regs->gprs[i] = vcpu->arch.gprs[i];
696
697 regs->hi = vcpu->arch.hi;
698 regs->lo = vcpu->arch.lo;
699 regs->pc = vcpu->arch.pc;
700
701 return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs);
702}
703
704void kvm_mips_comparecount_func(unsigned long data)
705{
706 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
707
708 kvm_mips_callbacks->queue_timer_int(vcpu);
709
710 vcpu->arch.wait = 0;
711 if (waitqueue_active(&vcpu->wq)) {
712 wake_up_interruptible(&vcpu->wq);
713 }
714}
715
716/*
717 * low level hrtimer wake routine.
718 */
719enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
720{
721 struct kvm_vcpu *vcpu;
722
723 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
724 kvm_mips_comparecount_func((unsigned long) vcpu);
725 hrtimer_forward_now(&vcpu->arch.comparecount_timer,
726 ktime_set(0, MS_TO_NS(10)));
727 return HRTIMER_RESTART;
728}
729
730int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
731{
732 kvm_mips_callbacks->vcpu_init(vcpu);
733 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
734 HRTIMER_MODE_REL);
735 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
736 kvm_mips_init_shadow_tlb(vcpu);
737 return 0;
738}
739
740void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
741{
742 return;
743}
744
745int
746kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
747{
748 return 0;
749}
750
751/* Initial guest state */
752int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
753{
754 return kvm_mips_callbacks->vcpu_setup(vcpu);
755}
756
757static
758void kvm_mips_set_c0_status(void)
759{
760 uint32_t status = read_c0_status();
761
762 if (cpu_has_fpu)
763 status |= (ST0_CU1);
764
765 if (cpu_has_dsp)
766 status |= (ST0_MX);
767
768 write_c0_status(status);
769 ehb();
770}
771
772/*
773 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
774 */
775int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
776{
777 uint32_t cause = vcpu->arch.host_cp0_cause;
778 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
779 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
780 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
781 enum emulation_result er = EMULATE_DONE;
782 int ret = RESUME_GUEST;
783
784 /* Set a default exit reason */
785 run->exit_reason = KVM_EXIT_UNKNOWN;
786 run->ready_for_interrupt_injection = 1;
787
788 /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
789 kvm_mips_set_c0_status();
790
791 local_irq_enable();
792
793 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
794 cause, opc, run, vcpu);
795
796 /* Do a privilege check, if in UM most of these exit conditions end up
797 * causing an exception to be delivered to the Guest Kernel
798 */
799 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
800 if (er == EMULATE_PRIV_FAIL) {
801 goto skip_emul;
802 } else if (er == EMULATE_FAIL) {
803 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
804 ret = RESUME_HOST;
805 goto skip_emul;
806 }
807
808 switch (exccode) {
809 case T_INT:
810 kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
811
812 ++vcpu->stat.int_exits;
813 trace_kvm_exit(vcpu, INT_EXITS);
814
815 if (need_resched()) {
816 cond_resched();
817 }
818
819 ret = RESUME_GUEST;
820 break;
821
822 case T_COP_UNUSABLE:
823 kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
824
825 ++vcpu->stat.cop_unusable_exits;
826 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
827 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
828 /* XXXKYMA: Might need to return to user space */
829 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
830 ret = RESUME_HOST;
831 }
832 break;
833
834 case T_TLB_MOD:
835 ++vcpu->stat.tlbmod_exits;
836 trace_kvm_exit(vcpu, TLBMOD_EXITS);
837 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
838 break;
839
840 case T_TLB_ST_MISS:
841 kvm_debug
842 ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
843 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
844 badvaddr);
845
846 ++vcpu->stat.tlbmiss_st_exits;
847 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
848 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
849 break;
850
851 case T_TLB_LD_MISS:
852 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
853 cause, opc, badvaddr);
854
855 ++vcpu->stat.tlbmiss_ld_exits;
856 trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
857 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
858 break;
859
860 case T_ADDR_ERR_ST:
861 ++vcpu->stat.addrerr_st_exits;
862 trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
863 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
864 break;
865
866 case T_ADDR_ERR_LD:
867 ++vcpu->stat.addrerr_ld_exits;
868 trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
869 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
870 break;
871
872 case T_SYSCALL:
873 ++vcpu->stat.syscall_exits;
874 trace_kvm_exit(vcpu, SYSCALL_EXITS);
875 ret = kvm_mips_callbacks->handle_syscall(vcpu);
876 break;
877
878 case T_RES_INST:
879 ++vcpu->stat.resvd_inst_exits;
880 trace_kvm_exit(vcpu, RESVD_INST_EXITS);
881 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
882 break;
883
884 case T_BREAK:
885 ++vcpu->stat.break_inst_exits;
886 trace_kvm_exit(vcpu, BREAK_INST_EXITS);
887 ret = kvm_mips_callbacks->handle_break(vcpu);
888 break;
889
890 default:
891 kvm_err
892 ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
893 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
894 kvm_read_c0_guest_status(vcpu->arch.cop0));
895 kvm_arch_vcpu_dump_regs(vcpu);
896 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
897 ret = RESUME_HOST;
898 break;
899
900 }
901
902skip_emul:
903 local_irq_disable();
904
905 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
906 kvm_mips_deliver_interrupts(vcpu, cause);
907
908 if (!(ret & RESUME_HOST)) {
909 /* Only check for signals if not already exiting to userspace */
910 if (signal_pending(current)) {
911 run->exit_reason = KVM_EXIT_INTR;
912 ret = (-EINTR << 2) | RESUME_HOST;
913 ++vcpu->stat.signal_exits;
914 trace_kvm_exit(vcpu, SIGNAL_EXITS);
915 }
916 }
917
918 return ret;
919}
920
921int __init kvm_mips_init(void)
922{
923 int ret;
924
925 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
926
927 if (ret)
928 return ret;
929
930 /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
931 * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
932 * to avoid the possibility of double faulting. The issue is that the TLB code
933 * references routines that are part of the the KVM module,
934 * which are only available once the module is loaded.
935 */
936 kvm_mips_gfn_to_pfn = gfn_to_pfn;
937 kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
938 kvm_mips_is_error_pfn = is_error_pfn;
939
940 pr_info("KVM/MIPS Initialized\n");
941 return 0;
942}
943
944void __exit kvm_mips_exit(void)
945{
946 kvm_exit();
947
948 kvm_mips_gfn_to_pfn = NULL;
949 kvm_mips_release_pfn_clean = NULL;
950 kvm_mips_is_error_pfn = NULL;
951
952 pr_info("KVM/MIPS unloaded\n");
953}
954
955module_init(kvm_mips_init);
956module_exit(kvm_mips_exit);
957
958EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h
new file mode 100644
index 000000000000..a4a8c85cc8f7
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_comm.h
@@ -0,0 +1,23 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: commpage: mapped into get kernel space
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#ifndef __KVM_MIPS_COMMPAGE_H__
13#define __KVM_MIPS_COMMPAGE_H__
14
15struct kvm_mips_commpage {
16 struct mips_coproc cop0; /* COP0 state is mapped into Guest kernel via commpage */
17};
18
19#define KVM_MIPS_COMM_EIDI_OFFSET 0x0
20
21extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
22
23#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c
new file mode 100644
index 000000000000..3873b1ecc40f
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_commpage.c
@@ -0,0 +1,37 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* commpage, currently used for Virtual COP0 registers.
7* Mapped into the guest kernel @ 0x0.
8*
9* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10* Authors: Sanjay Lal <sanjayl@kymasys.com>
11*/
12
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/module.h>
16#include <linux/vmalloc.h>
17#include <linux/fs.h>
18#include <linux/bootmem.h>
19#include <asm/page.h>
20#include <asm/cacheflush.h>
21#include <asm/mmu_context.h>
22
23#include <linux/kvm_host.h>
24
25#include "kvm_mips_comm.h"
26
27void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
28{
29 struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
30 memset(page, 0, sizeof(struct kvm_mips_commpage));
31
32 /* Specific init values for fields */
33 vcpu->arch.cop0 = &page->cop0;
34 memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc));
35
36 return;
37}
diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/kvm_mips_dyntrans.c
new file mode 100644
index 000000000000..96528e2d1ea6
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_dyntrans.c
@@ -0,0 +1,149 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/kvm_host.h>
15#include <linux/module.h>
16#include <linux/vmalloc.h>
17#include <linux/fs.h>
18#include <linux/bootmem.h>
19
20#include "kvm_mips_comm.h"
21
22#define SYNCI_TEMPLATE 0x041f0000
23#define SYNCI_BASE(x) (((x) >> 21) & 0x1f)
24#define SYNCI_OFFSET ((x) & 0xffff)
25
26#define LW_TEMPLATE 0x8c000000
27#define CLEAR_TEMPLATE 0x00000020
28#define SW_TEMPLATE 0xac000000
29
30int
31kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
32 struct kvm_vcpu *vcpu)
33{
34 int result = 0;
35 unsigned long kseg0_opc;
36 uint32_t synci_inst = 0x0;
37
38 /* Replace the CACHE instruction, with a NOP */
39 kseg0_opc =
40 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
41 (vcpu, (unsigned long) opc));
42 memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
43 mips32_SyncICache(kseg0_opc, 32);
44
45 return result;
46}
47
48/*
49 * Address based CACHE instructions are transformed into synci(s). A little heavy
50 * for just D-cache invalidates, but avoids an expensive trap
51 */
52int
53kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
54 struct kvm_vcpu *vcpu)
55{
56 int result = 0;
57 unsigned long kseg0_opc;
58 uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
59
60 base = (inst >> 21) & 0x1f;
61 offset = inst & 0xffff;
62 synci_inst |= (base << 21);
63 synci_inst |= offset;
64
65 kseg0_opc =
66 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
67 (vcpu, (unsigned long) opc));
68 memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
69 mips32_SyncICache(kseg0_opc, 32);
70
71 return result;
72}
73
74int
75kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
76{
77 int32_t rt, rd, sel;
78 uint32_t mfc0_inst;
79 unsigned long kseg0_opc, flags;
80
81 rt = (inst >> 16) & 0x1f;
82 rd = (inst >> 11) & 0x1f;
83 sel = inst & 0x7;
84
85 if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
86 mfc0_inst = CLEAR_TEMPLATE;
87 mfc0_inst |= ((rt & 0x1f) << 16);
88 } else {
89 mfc0_inst = LW_TEMPLATE;
90 mfc0_inst |= ((rt & 0x1f) << 16);
91 mfc0_inst |=
92 offsetof(struct mips_coproc,
93 reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
94 cop0);
95 }
96
97 if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
98 kseg0_opc =
99 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
100 (vcpu, (unsigned long) opc));
101 memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
102 mips32_SyncICache(kseg0_opc, 32);
103 } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
104 local_irq_save(flags);
105 memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
106 mips32_SyncICache((unsigned long) opc, 32);
107 local_irq_restore(flags);
108 } else {
109 kvm_err("%s: Invalid address: %p\n", __func__, opc);
110 return -EFAULT;
111 }
112
113 return 0;
114}
115
116int
117kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
118{
119 int32_t rt, rd, sel;
120 uint32_t mtc0_inst = SW_TEMPLATE;
121 unsigned long kseg0_opc, flags;
122
123 rt = (inst >> 16) & 0x1f;
124 rd = (inst >> 11) & 0x1f;
125 sel = inst & 0x7;
126
127 mtc0_inst |= ((rt & 0x1f) << 16);
128 mtc0_inst |=
129 offsetof(struct mips_coproc,
130 reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
131
132 if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
133 kseg0_opc =
134 CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
135 (vcpu, (unsigned long) opc));
136 memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
137 mips32_SyncICache(kseg0_opc, 32);
138 } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
139 local_irq_save(flags);
140 memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
141 mips32_SyncICache((unsigned long) opc, 32);
142 local_irq_restore(flags);
143 } else {
144 kvm_err("%s: Invalid address: %p\n", __func__, opc);
145 return -EFAULT;
146 }
147
148 return 0;
149}
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
new file mode 100644
index 000000000000..4b6274b47f33
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_emul.c
@@ -0,0 +1,1829 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Instruction/Exception emulation
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/kvm_host.h>
15#include <linux/module.h>
16#include <linux/vmalloc.h>
17#include <linux/fs.h>
18#include <linux/bootmem.h>
19#include <linux/random.h>
20#include <asm/page.h>
21#include <asm/cacheflush.h>
22#include <asm/cpu-info.h>
23#include <asm/mmu_context.h>
24#include <asm/tlbflush.h>
25#include <asm/inst.h>
26
27#undef CONFIG_MIPS_MT
28#include <asm/r4kcache.h>
29#define CONFIG_MIPS_MT
30
31#include "kvm_mips_opcode.h"
32#include "kvm_mips_int.h"
33#include "kvm_mips_comm.h"
34
35#include "trace.h"
36
37/*
38 * Compute the return address and do emulate branch simulation, if required.
39 * This function should be called only in branch delay slot active.
40 */
41unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
42 unsigned long instpc)
43{
44 unsigned int dspcontrol;
45 union mips_instruction insn;
46 struct kvm_vcpu_arch *arch = &vcpu->arch;
47 long epc = instpc;
48 long nextpc = KVM_INVALID_INST;
49
50 if (epc & 3)
51 goto unaligned;
52
53 /*
54 * Read the instruction
55 */
56 insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
57
58 if (insn.word == KVM_INVALID_INST)
59 return KVM_INVALID_INST;
60
61 switch (insn.i_format.opcode) {
62 /*
63 * jr and jalr are in r_format format.
64 */
65 case spec_op:
66 switch (insn.r_format.func) {
67 case jalr_op:
68 arch->gprs[insn.r_format.rd] = epc + 8;
69 /* Fall through */
70 case jr_op:
71 nextpc = arch->gprs[insn.r_format.rs];
72 break;
73 }
74 break;
75
76 /*
77 * This group contains:
78 * bltz_op, bgez_op, bltzl_op, bgezl_op,
79 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
80 */
81 case bcond_op:
82 switch (insn.i_format.rt) {
83 case bltz_op:
84 case bltzl_op:
85 if ((long)arch->gprs[insn.i_format.rs] < 0)
86 epc = epc + 4 + (insn.i_format.simmediate << 2);
87 else
88 epc += 8;
89 nextpc = epc;
90 break;
91
92 case bgez_op:
93 case bgezl_op:
94 if ((long)arch->gprs[insn.i_format.rs] >= 0)
95 epc = epc + 4 + (insn.i_format.simmediate << 2);
96 else
97 epc += 8;
98 nextpc = epc;
99 break;
100
101 case bltzal_op:
102 case bltzall_op:
103 arch->gprs[31] = epc + 8;
104 if ((long)arch->gprs[insn.i_format.rs] < 0)
105 epc = epc + 4 + (insn.i_format.simmediate << 2);
106 else
107 epc += 8;
108 nextpc = epc;
109 break;
110
111 case bgezal_op:
112 case bgezall_op:
113 arch->gprs[31] = epc + 8;
114 if ((long)arch->gprs[insn.i_format.rs] >= 0)
115 epc = epc + 4 + (insn.i_format.simmediate << 2);
116 else
117 epc += 8;
118 nextpc = epc;
119 break;
120 case bposge32_op:
121 if (!cpu_has_dsp)
122 goto sigill;
123
124 dspcontrol = rddsp(0x01);
125
126 if (dspcontrol >= 32) {
127 epc = epc + 4 + (insn.i_format.simmediate << 2);
128 } else
129 epc += 8;
130 nextpc = epc;
131 break;
132 }
133 break;
134
135 /*
136 * These are unconditional and in j_format.
137 */
138 case jal_op:
139 arch->gprs[31] = instpc + 8;
140 case j_op:
141 epc += 4;
142 epc >>= 28;
143 epc <<= 28;
144 epc |= (insn.j_format.target << 2);
145 nextpc = epc;
146 break;
147
148 /*
149 * These are conditional and in i_format.
150 */
151 case beq_op:
152 case beql_op:
153 if (arch->gprs[insn.i_format.rs] ==
154 arch->gprs[insn.i_format.rt])
155 epc = epc + 4 + (insn.i_format.simmediate << 2);
156 else
157 epc += 8;
158 nextpc = epc;
159 break;
160
161 case bne_op:
162 case bnel_op:
163 if (arch->gprs[insn.i_format.rs] !=
164 arch->gprs[insn.i_format.rt])
165 epc = epc + 4 + (insn.i_format.simmediate << 2);
166 else
167 epc += 8;
168 nextpc = epc;
169 break;
170
171 case blez_op: /* not really i_format */
172 case blezl_op:
173 /* rt field assumed to be zero */
174 if ((long)arch->gprs[insn.i_format.rs] <= 0)
175 epc = epc + 4 + (insn.i_format.simmediate << 2);
176 else
177 epc += 8;
178 nextpc = epc;
179 break;
180
181 case bgtz_op:
182 case bgtzl_op:
183 /* rt field assumed to be zero */
184 if ((long)arch->gprs[insn.i_format.rs] > 0)
185 epc = epc + 4 + (insn.i_format.simmediate << 2);
186 else
187 epc += 8;
188 nextpc = epc;
189 break;
190
191 /*
192 * And now the FPA/cp1 branch instructions.
193 */
194 case cop1_op:
195 printk("%s: unsupported cop1_op\n", __func__);
196 break;
197 }
198
199 return nextpc;
200
201unaligned:
202 printk("%s: unaligned epc\n", __func__);
203 return nextpc;
204
205sigill:
206 printk("%s: DSP branch but not DSP ASE\n", __func__);
207 return nextpc;
208}
209
210enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
211{
212 unsigned long branch_pc;
213 enum emulation_result er = EMULATE_DONE;
214
215 if (cause & CAUSEF_BD) {
216 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
217 if (branch_pc == KVM_INVALID_INST) {
218 er = EMULATE_FAIL;
219 } else {
220 vcpu->arch.pc = branch_pc;
221 kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
222 }
223 } else
224 vcpu->arch.pc += 4;
225
226 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
227
228 return er;
229}
230
231/* Everytime the compare register is written to, we need to decide when to fire
232 * the timer that represents timer ticks to the GUEST.
233 *
234 */
235enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu)
236{
237 struct mips_coproc *cop0 = vcpu->arch.cop0;
238 enum emulation_result er = EMULATE_DONE;
239
240 /* If COUNT is enabled */
241 if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) {
242 hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
243 hrtimer_start(&vcpu->arch.comparecount_timer,
244 ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL);
245 } else {
246 hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer);
247 }
248
249 return er;
250}
251
252enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
253{
254 struct mips_coproc *cop0 = vcpu->arch.cop0;
255 enum emulation_result er = EMULATE_DONE;
256
257 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
258 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
259 kvm_read_c0_guest_epc(cop0));
260 kvm_clear_c0_guest_status(cop0, ST0_EXL);
261 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
262
263 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
264 kvm_clear_c0_guest_status(cop0, ST0_ERL);
265 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
266 } else {
267 printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
268 vcpu->arch.pc);
269 er = EMULATE_FAIL;
270 }
271
272 return er;
273}
274
275enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
276{
277 enum emulation_result er = EMULATE_DONE;
278
279 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
280 vcpu->arch.pending_exceptions);
281
282 ++vcpu->stat.wait_exits;
283 trace_kvm_exit(vcpu, WAIT_EXITS);
284 if (!vcpu->arch.pending_exceptions) {
285 vcpu->arch.wait = 1;
286 kvm_vcpu_block(vcpu);
287
288 /* We we are runnable, then definitely go off to user space to check if any
289 * I/O interrupts are pending.
290 */
291 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
292 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
293 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
294 }
295 }
296
297 return er;
298}
299
300/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
301 * this, if things ever change
302 */
303enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
304{
305 struct mips_coproc *cop0 = vcpu->arch.cop0;
306 enum emulation_result er = EMULATE_FAIL;
307 uint32_t pc = vcpu->arch.pc;
308
309 printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
310 return er;
311}
312
313/* Write Guest TLB Entry @ Index */
314enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
315{
316 struct mips_coproc *cop0 = vcpu->arch.cop0;
317 int index = kvm_read_c0_guest_index(cop0);
318 enum emulation_result er = EMULATE_DONE;
319 struct kvm_mips_tlb *tlb = NULL;
320 uint32_t pc = vcpu->arch.pc;
321
322 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
323 printk("%s: illegal index: %d\n", __func__, index);
324 printk
325 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
326 pc, index, kvm_read_c0_guest_entryhi(cop0),
327 kvm_read_c0_guest_entrylo0(cop0),
328 kvm_read_c0_guest_entrylo1(cop0),
329 kvm_read_c0_guest_pagemask(cop0));
330 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
331 }
332
333 tlb = &vcpu->arch.guest_tlb[index];
334#if 1
335 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
336 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
337#endif
338
339 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
340 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
341 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
342 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
343
344 kvm_debug
345 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
346 pc, index, kvm_read_c0_guest_entryhi(cop0),
347 kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
348 kvm_read_c0_guest_pagemask(cop0));
349
350 return er;
351}
352
353/* Write Guest TLB Entry @ Random Index */
354enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
355{
356 struct mips_coproc *cop0 = vcpu->arch.cop0;
357 enum emulation_result er = EMULATE_DONE;
358 struct kvm_mips_tlb *tlb = NULL;
359 uint32_t pc = vcpu->arch.pc;
360 int index;
361
362#if 1
363 get_random_bytes(&index, sizeof(index));
364 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
365#else
366 index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
367#endif
368
369 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
370 printk("%s: illegal index: %d\n", __func__, index);
371 return EMULATE_FAIL;
372 }
373
374 tlb = &vcpu->arch.guest_tlb[index];
375
376#if 1
377 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
378 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
379#endif
380
381 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
382 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
383 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
384 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
385
386 kvm_debug
387 ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
388 pc, index, kvm_read_c0_guest_entryhi(cop0),
389 kvm_read_c0_guest_entrylo0(cop0),
390 kvm_read_c0_guest_entrylo1(cop0));
391
392 return er;
393}
394
395enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
396{
397 struct mips_coproc *cop0 = vcpu->arch.cop0;
398 long entryhi = kvm_read_c0_guest_entryhi(cop0);
399 enum emulation_result er = EMULATE_DONE;
400 uint32_t pc = vcpu->arch.pc;
401 int index = -1;
402
403 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
404
405 kvm_write_c0_guest_index(cop0, index);
406
407 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
408 index);
409
410 return er;
411}
412
413enum emulation_result
414kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
415 struct kvm_run *run, struct kvm_vcpu *vcpu)
416{
417 struct mips_coproc *cop0 = vcpu->arch.cop0;
418 enum emulation_result er = EMULATE_DONE;
419 int32_t rt, rd, copz, sel, co_bit, op;
420 uint32_t pc = vcpu->arch.pc;
421 unsigned long curr_pc;
422
423 /*
424 * Update PC and hold onto current PC in case there is
425 * an error and we want to rollback the PC
426 */
427 curr_pc = vcpu->arch.pc;
428 er = update_pc(vcpu, cause);
429 if (er == EMULATE_FAIL) {
430 return er;
431 }
432
433 copz = (inst >> 21) & 0x1f;
434 rt = (inst >> 16) & 0x1f;
435 rd = (inst >> 11) & 0x1f;
436 sel = inst & 0x7;
437 co_bit = (inst >> 25) & 1;
438
439 /* Verify that the register is valid */
440 if (rd > MIPS_CP0_DESAVE) {
441 printk("Invalid rd: %d\n", rd);
442 er = EMULATE_FAIL;
443 goto done;
444 }
445
446 if (co_bit) {
447 op = (inst) & 0xff;
448
449 switch (op) {
450 case tlbr_op: /* Read indexed TLB entry */
451 er = kvm_mips_emul_tlbr(vcpu);
452 break;
453 case tlbwi_op: /* Write indexed */
454 er = kvm_mips_emul_tlbwi(vcpu);
455 break;
456 case tlbwr_op: /* Write random */
457 er = kvm_mips_emul_tlbwr(vcpu);
458 break;
459 case tlbp_op: /* TLB Probe */
460 er = kvm_mips_emul_tlbp(vcpu);
461 break;
462 case rfe_op:
463 printk("!!!COP0_RFE!!!\n");
464 break;
465 case eret_op:
466 er = kvm_mips_emul_eret(vcpu);
467 goto dont_update_pc;
468 break;
469 case wait_op:
470 er = kvm_mips_emul_wait(vcpu);
471 break;
472 }
473 } else {
474 switch (copz) {
475 case mfc_op:
476#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
477 cop0->stat[rd][sel]++;
478#endif
479 /* Get reg */
480 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
481 /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */
482 vcpu->arch.gprs[rt] = (read_c0_count() >> 2);
483 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
484 vcpu->arch.gprs[rt] = 0x0;
485#ifdef CONFIG_KVM_MIPS_DYN_TRANS
486 kvm_mips_trans_mfc0(inst, opc, vcpu);
487#endif
488 }
489 else {
490 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
491
492#ifdef CONFIG_KVM_MIPS_DYN_TRANS
493 kvm_mips_trans_mfc0(inst, opc, vcpu);
494#endif
495 }
496
497 kvm_debug
498 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
499 pc, rd, sel, rt, vcpu->arch.gprs[rt]);
500
501 break;
502
503 case dmfc_op:
504 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
505 break;
506
507 case mtc_op:
508#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
509 cop0->stat[rd][sel]++;
510#endif
511 if ((rd == MIPS_CP0_TLB_INDEX)
512 && (vcpu->arch.gprs[rt] >=
513 KVM_MIPS_GUEST_TLB_SIZE)) {
514 printk("Invalid TLB Index: %ld",
515 vcpu->arch.gprs[rt]);
516 er = EMULATE_FAIL;
517 break;
518 }
519#define C0_EBASE_CORE_MASK 0xff
520 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
521 /* Preserve CORE number */
522 kvm_change_c0_guest_ebase(cop0,
523 ~(C0_EBASE_CORE_MASK),
524 vcpu->arch.gprs[rt]);
525 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
526 kvm_read_c0_guest_ebase(cop0));
527 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
528 uint32_t nasid =
529 vcpu->arch.gprs[rt] & ASID_MASK;
530 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
531 &&
532 ((kvm_read_c0_guest_entryhi(cop0) &
533 ASID_MASK) != nasid)) {
534
535 kvm_debug
536 ("MTCz, change ASID from %#lx to %#lx\n",
537 kvm_read_c0_guest_entryhi(cop0) &
538 ASID_MASK,
539 vcpu->arch.gprs[rt] & ASID_MASK);
540
541 /* Blow away the shadow host TLBs */
542 kvm_mips_flush_host_tlb(1);
543 }
544 kvm_write_c0_guest_entryhi(cop0,
545 vcpu->arch.gprs[rt]);
546 }
547 /* Are we writing to COUNT */
548 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
549 /* Linux doesn't seem to write into COUNT, we throw an error
550 * if we notice a write to COUNT
551 */
552 /*er = EMULATE_FAIL; */
553 goto done;
554 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
555 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
556 pc, kvm_read_c0_guest_compare(cop0),
557 vcpu->arch.gprs[rt]);
558
559 /* If we are writing to COMPARE */
560 /* Clear pending timer interrupt, if any */
561 kvm_mips_callbacks->dequeue_timer_int(vcpu);
562 kvm_write_c0_guest_compare(cop0,
563 vcpu->arch.gprs[rt]);
564 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
565 kvm_write_c0_guest_status(cop0,
566 vcpu->arch.gprs[rt]);
567 /* Make sure that CU1 and NMI bits are never set */
568 kvm_clear_c0_guest_status(cop0,
569 (ST0_CU1 | ST0_NMI));
570
571#ifdef CONFIG_KVM_MIPS_DYN_TRANS
572 kvm_mips_trans_mtc0(inst, opc, vcpu);
573#endif
574 } else {
575 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
576#ifdef CONFIG_KVM_MIPS_DYN_TRANS
577 kvm_mips_trans_mtc0(inst, opc, vcpu);
578#endif
579 }
580
581 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
582 rd, sel, cop0->reg[rd][sel]);
583 break;
584
585 case dmtc_op:
586 printk
587 ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
588 vcpu->arch.pc, rt, rd, sel);
589 er = EMULATE_FAIL;
590 break;
591
592 case mfmcz_op:
593#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
594 cop0->stat[MIPS_CP0_STATUS][0]++;
595#endif
596 if (rt != 0) {
597 vcpu->arch.gprs[rt] =
598 kvm_read_c0_guest_status(cop0);
599 }
600 /* EI */
601 if (inst & 0x20) {
602 kvm_debug("[%#lx] mfmcz_op: EI\n",
603 vcpu->arch.pc);
604 kvm_set_c0_guest_status(cop0, ST0_IE);
605 } else {
606 kvm_debug("[%#lx] mfmcz_op: DI\n",
607 vcpu->arch.pc);
608 kvm_clear_c0_guest_status(cop0, ST0_IE);
609 }
610
611 break;
612
613 case wrpgpr_op:
614 {
615 uint32_t css =
616 cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
617 uint32_t pss =
618 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
619 /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
620 if (css || pss) {
621 er = EMULATE_FAIL;
622 break;
623 }
624 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
625 vcpu->arch.gprs[rt]);
626 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
627 }
628 break;
629 default:
630 printk
631 ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
632 vcpu->arch.pc, copz);
633 er = EMULATE_FAIL;
634 break;
635 }
636 }
637
638done:
639 /*
640 * Rollback PC only if emulation was unsuccessful
641 */
642 if (er == EMULATE_FAIL) {
643 vcpu->arch.pc = curr_pc;
644 }
645
646dont_update_pc:
647 /*
648 * This is for special instructions whose emulation
649 * updates the PC, so do not overwrite the PC under
650 * any circumstances
651 */
652
653 return er;
654}
655
656enum emulation_result
657kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
658 struct kvm_run *run, struct kvm_vcpu *vcpu)
659{
660 enum emulation_result er = EMULATE_DO_MMIO;
661 int32_t op, base, rt, offset;
662 uint32_t bytes;
663 void *data = run->mmio.data;
664 unsigned long curr_pc;
665
666 /*
667 * Update PC and hold onto current PC in case there is
668 * an error and we want to rollback the PC
669 */
670 curr_pc = vcpu->arch.pc;
671 er = update_pc(vcpu, cause);
672 if (er == EMULATE_FAIL)
673 return er;
674
675 rt = (inst >> 16) & 0x1f;
676 base = (inst >> 21) & 0x1f;
677 offset = inst & 0xffff;
678 op = (inst >> 26) & 0x3f;
679
680 switch (op) {
681 case sb_op:
682 bytes = 1;
683 if (bytes > sizeof(run->mmio.data)) {
684 kvm_err("%s: bad MMIO length: %d\n", __func__,
685 run->mmio.len);
686 }
687 run->mmio.phys_addr =
688 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
689 host_cp0_badvaddr);
690 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
691 er = EMULATE_FAIL;
692 break;
693 }
694 run->mmio.len = bytes;
695 run->mmio.is_write = 1;
696 vcpu->mmio_needed = 1;
697 vcpu->mmio_is_write = 1;
698 *(u8 *) data = vcpu->arch.gprs[rt];
699 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
700 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
701 *(uint8_t *) data);
702
703 break;
704
705 case sw_op:
706 bytes = 4;
707 if (bytes > sizeof(run->mmio.data)) {
708 kvm_err("%s: bad MMIO length: %d\n", __func__,
709 run->mmio.len);
710 }
711 run->mmio.phys_addr =
712 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
713 host_cp0_badvaddr);
714 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
715 er = EMULATE_FAIL;
716 break;
717 }
718
719 run->mmio.len = bytes;
720 run->mmio.is_write = 1;
721 vcpu->mmio_needed = 1;
722 vcpu->mmio_is_write = 1;
723 *(uint32_t *) data = vcpu->arch.gprs[rt];
724
725 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
726 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
727 vcpu->arch.gprs[rt], *(uint32_t *) data);
728 break;
729
730 case sh_op:
731 bytes = 2;
732 if (bytes > sizeof(run->mmio.data)) {
733 kvm_err("%s: bad MMIO length: %d\n", __func__,
734 run->mmio.len);
735 }
736 run->mmio.phys_addr =
737 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
738 host_cp0_badvaddr);
739 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
740 er = EMULATE_FAIL;
741 break;
742 }
743
744 run->mmio.len = bytes;
745 run->mmio.is_write = 1;
746 vcpu->mmio_needed = 1;
747 vcpu->mmio_is_write = 1;
748 *(uint16_t *) data = vcpu->arch.gprs[rt];
749
750 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
751 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
752 vcpu->arch.gprs[rt], *(uint32_t *) data);
753 break;
754
755 default:
756 printk("Store not yet supported");
757 er = EMULATE_FAIL;
758 break;
759 }
760
761 /*
762 * Rollback PC if emulation was unsuccessful
763 */
764 if (er == EMULATE_FAIL) {
765 vcpu->arch.pc = curr_pc;
766 }
767
768 return er;
769}
770
771enum emulation_result
772kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
773 struct kvm_run *run, struct kvm_vcpu *vcpu)
774{
775 enum emulation_result er = EMULATE_DO_MMIO;
776 int32_t op, base, rt, offset;
777 uint32_t bytes;
778
779 rt = (inst >> 16) & 0x1f;
780 base = (inst >> 21) & 0x1f;
781 offset = inst & 0xffff;
782 op = (inst >> 26) & 0x3f;
783
784 vcpu->arch.pending_load_cause = cause;
785 vcpu->arch.io_gpr = rt;
786
787 switch (op) {
788 case lw_op:
789 bytes = 4;
790 if (bytes > sizeof(run->mmio.data)) {
791 kvm_err("%s: bad MMIO length: %d\n", __func__,
792 run->mmio.len);
793 er = EMULATE_FAIL;
794 break;
795 }
796 run->mmio.phys_addr =
797 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
798 host_cp0_badvaddr);
799 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
800 er = EMULATE_FAIL;
801 break;
802 }
803
804 run->mmio.len = bytes;
805 run->mmio.is_write = 0;
806 vcpu->mmio_needed = 1;
807 vcpu->mmio_is_write = 0;
808 break;
809
810 case lh_op:
811 case lhu_op:
812 bytes = 2;
813 if (bytes > sizeof(run->mmio.data)) {
814 kvm_err("%s: bad MMIO length: %d\n", __func__,
815 run->mmio.len);
816 er = EMULATE_FAIL;
817 break;
818 }
819 run->mmio.phys_addr =
820 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
821 host_cp0_badvaddr);
822 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
823 er = EMULATE_FAIL;
824 break;
825 }
826
827 run->mmio.len = bytes;
828 run->mmio.is_write = 0;
829 vcpu->mmio_needed = 1;
830 vcpu->mmio_is_write = 0;
831
832 if (op == lh_op)
833 vcpu->mmio_needed = 2;
834 else
835 vcpu->mmio_needed = 1;
836
837 break;
838
839 case lbu_op:
840 case lb_op:
841 bytes = 1;
842 if (bytes > sizeof(run->mmio.data)) {
843 kvm_err("%s: bad MMIO length: %d\n", __func__,
844 run->mmio.len);
845 er = EMULATE_FAIL;
846 break;
847 }
848 run->mmio.phys_addr =
849 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
850 host_cp0_badvaddr);
851 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
852 er = EMULATE_FAIL;
853 break;
854 }
855
856 run->mmio.len = bytes;
857 run->mmio.is_write = 0;
858 vcpu->mmio_is_write = 0;
859
860 if (op == lb_op)
861 vcpu->mmio_needed = 2;
862 else
863 vcpu->mmio_needed = 1;
864
865 break;
866
867 default:
868 printk("Load not yet supported");
869 er = EMULATE_FAIL;
870 break;
871 }
872
873 return er;
874}
875
876int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
877{
878 unsigned long offset = (va & ~PAGE_MASK);
879 struct kvm *kvm = vcpu->kvm;
880 unsigned long pa;
881 gfn_t gfn;
882 pfn_t pfn;
883
884 gfn = va >> PAGE_SHIFT;
885
886 if (gfn >= kvm->arch.guest_pmap_npages) {
887 printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
888 kvm_mips_dump_host_tlbs();
889 kvm_arch_vcpu_dump_regs(vcpu);
890 return -1;
891 }
892 pfn = kvm->arch.guest_pmap[gfn];
893 pa = (pfn << PAGE_SHIFT) | offset;
894
895 printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
896
897 mips32_SyncICache(CKSEG0ADDR(pa), 32);
898 return 0;
899}
900
901#define MIPS_CACHE_OP_INDEX_INV 0x0
902#define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
903#define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
904#define MIPS_CACHE_OP_IMP 0x3
905#define MIPS_CACHE_OP_HIT_INV 0x4
906#define MIPS_CACHE_OP_FILL_WB_INV 0x5
907#define MIPS_CACHE_OP_HIT_HB 0x6
908#define MIPS_CACHE_OP_FETCH_LOCK 0x7
909
910#define MIPS_CACHE_ICACHE 0x0
911#define MIPS_CACHE_DCACHE 0x1
912#define MIPS_CACHE_SEC 0x3
913
914enum emulation_result
915kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
916 struct kvm_run *run, struct kvm_vcpu *vcpu)
917{
918 struct mips_coproc *cop0 = vcpu->arch.cop0;
919 extern void (*r4k_blast_dcache) (void);
920 extern void (*r4k_blast_icache) (void);
921 enum emulation_result er = EMULATE_DONE;
922 int32_t offset, cache, op_inst, op, base;
923 struct kvm_vcpu_arch *arch = &vcpu->arch;
924 unsigned long va;
925 unsigned long curr_pc;
926
927 /*
928 * Update PC and hold onto current PC in case there is
929 * an error and we want to rollback the PC
930 */
931 curr_pc = vcpu->arch.pc;
932 er = update_pc(vcpu, cause);
933 if (er == EMULATE_FAIL)
934 return er;
935
936 base = (inst >> 21) & 0x1f;
937 op_inst = (inst >> 16) & 0x1f;
938 offset = inst & 0xffff;
939 cache = (inst >> 16) & 0x3;
940 op = (inst >> 18) & 0x7;
941
942 va = arch->gprs[base] + offset;
943
944 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
945 cache, op, base, arch->gprs[base], offset);
946
947 /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
948 * the caches entirely by stepping through all the ways/indexes
949 */
950 if (op == MIPS_CACHE_OP_INDEX_INV) {
951 kvm_debug
952 ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
953 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
954 arch->gprs[base], offset);
955
956 if (cache == MIPS_CACHE_DCACHE)
957 r4k_blast_dcache();
958 else if (cache == MIPS_CACHE_ICACHE)
959 r4k_blast_icache();
960 else {
961 printk("%s: unsupported CACHE INDEX operation\n",
962 __func__);
963 return EMULATE_FAIL;
964 }
965
966#ifdef CONFIG_KVM_MIPS_DYN_TRANS
967 kvm_mips_trans_cache_index(inst, opc, vcpu);
968#endif
969 goto done;
970 }
971
972 preempt_disable();
973 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
974
975 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
976 kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
977 }
978 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
979 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
980 int index;
981
982 /* If an entry already exists then skip */
983 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
984 goto skip_fault;
985 }
986
987 /* If address not in the guest TLB, then give the guest a fault, the
988 * resulting handler will do the right thing
989 */
990 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
991 (kvm_read_c0_guest_entryhi
992 (cop0) & ASID_MASK));
993
994 if (index < 0) {
995 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
996 vcpu->arch.host_cp0_badvaddr = va;
997 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
998 vcpu);
999 preempt_enable();
1000 goto dont_update_pc;
1001 } else {
1002 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1003 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1004 if (!TLB_IS_VALID(*tlb, va)) {
1005 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1006 run, vcpu);
1007 preempt_enable();
1008 goto dont_update_pc;
1009 } else {
1010 /* We fault an entry from the guest tlb to the shadow host TLB */
1011 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1012 NULL,
1013 NULL);
1014 }
1015 }
1016 } else {
1017 printk
1018 ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1019 cache, op, base, arch->gprs[base], offset);
1020 er = EMULATE_FAIL;
1021 preempt_enable();
1022 goto dont_update_pc;
1023
1024 }
1025
1026skip_fault:
1027 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1028 if (cache == MIPS_CACHE_DCACHE
1029 && (op == MIPS_CACHE_OP_FILL_WB_INV
1030 || op == MIPS_CACHE_OP_HIT_INV)) {
1031 flush_dcache_line(va);
1032
1033#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1034 /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
1035 kvm_mips_trans_cache_va(inst, opc, vcpu);
1036#endif
1037 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
1038 flush_dcache_line(va);
1039 flush_icache_line(va);
1040
1041#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1042 /* Replace the CACHE instruction, with a SYNCI */
1043 kvm_mips_trans_cache_va(inst, opc, vcpu);
1044#endif
1045 } else {
1046 printk
1047 ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1048 cache, op, base, arch->gprs[base], offset);
1049 er = EMULATE_FAIL;
1050 preempt_enable();
1051 goto dont_update_pc;
1052 }
1053
1054 preempt_enable();
1055
1056 dont_update_pc:
1057 /*
1058 * Rollback PC
1059 */
1060 vcpu->arch.pc = curr_pc;
1061 done:
1062 return er;
1063}
1064
1065enum emulation_result
1066kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1067 struct kvm_run *run, struct kvm_vcpu *vcpu)
1068{
1069 enum emulation_result er = EMULATE_DONE;
1070 uint32_t inst;
1071
1072 /*
1073 * Fetch the instruction.
1074 */
1075 if (cause & CAUSEF_BD) {
1076 opc += 1;
1077 }
1078
1079 inst = kvm_get_inst(opc, vcpu);
1080
1081 switch (((union mips_instruction)inst).r_format.opcode) {
1082 case cop0_op:
1083 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1084 break;
1085 case sb_op:
1086 case sh_op:
1087 case sw_op:
1088 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1089 break;
1090 case lb_op:
1091 case lbu_op:
1092 case lhu_op:
1093 case lh_op:
1094 case lw_op:
1095 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1096 break;
1097
1098 case cache_op:
1099 ++vcpu->stat.cache_exits;
1100 trace_kvm_exit(vcpu, CACHE_EXITS);
1101 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1102 break;
1103
1104 default:
1105 printk("Instruction emulation not supported (%p/%#x)\n", opc,
1106 inst);
1107 kvm_arch_vcpu_dump_regs(vcpu);
1108 er = EMULATE_FAIL;
1109 break;
1110 }
1111
1112 return er;
1113}
1114
1115enum emulation_result
1116kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
1117 struct kvm_run *run, struct kvm_vcpu *vcpu)
1118{
1119 struct mips_coproc *cop0 = vcpu->arch.cop0;
1120 struct kvm_vcpu_arch *arch = &vcpu->arch;
1121 enum emulation_result er = EMULATE_DONE;
1122
1123 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1124 /* save old pc */
1125 kvm_write_c0_guest_epc(cop0, arch->pc);
1126 kvm_set_c0_guest_status(cop0, ST0_EXL);
1127
1128 if (cause & CAUSEF_BD)
1129 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1130 else
1131 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1132
1133 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1134
1135 kvm_change_c0_guest_cause(cop0, (0xff),
1136 (T_SYSCALL << CAUSEB_EXCCODE));
1137
1138 /* Set PC to the exception entry point */
1139 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1140
1141 } else {
1142 printk("Trying to deliver SYSCALL when EXL is already set\n");
1143 er = EMULATE_FAIL;
1144 }
1145
1146 return er;
1147}
1148
1149enum emulation_result
1150kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
1151 struct kvm_run *run, struct kvm_vcpu *vcpu)
1152{
1153 struct mips_coproc *cop0 = vcpu->arch.cop0;
1154 struct kvm_vcpu_arch *arch = &vcpu->arch;
1155 enum emulation_result er = EMULATE_DONE;
1156 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1157 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1158
1159 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1160 /* save old pc */
1161 kvm_write_c0_guest_epc(cop0, arch->pc);
1162 kvm_set_c0_guest_status(cop0, ST0_EXL);
1163
1164 if (cause & CAUSEF_BD)
1165 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1166 else
1167 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1168
1169 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1170 arch->pc);
1171
1172 /* set pc to the exception entry point */
1173 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1174
1175 } else {
1176 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1177 arch->pc);
1178
1179 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1180 }
1181
1182 kvm_change_c0_guest_cause(cop0, (0xff),
1183 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1184
1185 /* setup badvaddr, context and entryhi registers for the guest */
1186 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1187 /* XXXKYMA: is the context register used by linux??? */
1188 kvm_write_c0_guest_entryhi(cop0, entryhi);
1189 /* Blow away the shadow host TLBs */
1190 kvm_mips_flush_host_tlb(1);
1191
1192 return er;
1193}
1194
1195enum emulation_result
1196kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
1197 struct kvm_run *run, struct kvm_vcpu *vcpu)
1198{
1199 struct mips_coproc *cop0 = vcpu->arch.cop0;
1200 struct kvm_vcpu_arch *arch = &vcpu->arch;
1201 enum emulation_result er = EMULATE_DONE;
1202 unsigned long entryhi =
1203 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1204 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1205
1206 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1207 /* save old pc */
1208 kvm_write_c0_guest_epc(cop0, arch->pc);
1209 kvm_set_c0_guest_status(cop0, ST0_EXL);
1210
1211 if (cause & CAUSEF_BD)
1212 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1213 else
1214 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1215
1216 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1217 arch->pc);
1218
1219 /* set pc to the exception entry point */
1220 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1221
1222 } else {
1223 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1224 arch->pc);
1225 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1226 }
1227
1228 kvm_change_c0_guest_cause(cop0, (0xff),
1229 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1230
1231 /* setup badvaddr, context and entryhi registers for the guest */
1232 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1233 /* XXXKYMA: is the context register used by linux??? */
1234 kvm_write_c0_guest_entryhi(cop0, entryhi);
1235 /* Blow away the shadow host TLBs */
1236 kvm_mips_flush_host_tlb(1);
1237
1238 return er;
1239}
1240
1241enum emulation_result
1242kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
1243 struct kvm_run *run, struct kvm_vcpu *vcpu)
1244{
1245 struct mips_coproc *cop0 = vcpu->arch.cop0;
1246 struct kvm_vcpu_arch *arch = &vcpu->arch;
1247 enum emulation_result er = EMULATE_DONE;
1248 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1249 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1250
1251 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1252 /* save old pc */
1253 kvm_write_c0_guest_epc(cop0, arch->pc);
1254 kvm_set_c0_guest_status(cop0, ST0_EXL);
1255
1256 if (cause & CAUSEF_BD)
1257 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1258 else
1259 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1260
1261 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1262 arch->pc);
1263
1264 /* Set PC to the exception entry point */
1265 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1266 } else {
1267 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1268 arch->pc);
1269 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1270 }
1271
1272 kvm_change_c0_guest_cause(cop0, (0xff),
1273 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1274
1275 /* setup badvaddr, context and entryhi registers for the guest */
1276 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1277 /* XXXKYMA: is the context register used by linux??? */
1278 kvm_write_c0_guest_entryhi(cop0, entryhi);
1279 /* Blow away the shadow host TLBs */
1280 kvm_mips_flush_host_tlb(1);
1281
1282 return er;
1283}
1284
1285enum emulation_result
1286kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
1287 struct kvm_run *run, struct kvm_vcpu *vcpu)
1288{
1289 struct mips_coproc *cop0 = vcpu->arch.cop0;
1290 struct kvm_vcpu_arch *arch = &vcpu->arch;
1291 enum emulation_result er = EMULATE_DONE;
1292 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1293 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1294
1295 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1296 /* save old pc */
1297 kvm_write_c0_guest_epc(cop0, arch->pc);
1298 kvm_set_c0_guest_status(cop0, ST0_EXL);
1299
1300 if (cause & CAUSEF_BD)
1301 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1302 else
1303 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1304
1305 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1306 arch->pc);
1307
1308 /* Set PC to the exception entry point */
1309 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1310 } else {
1311 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1312 arch->pc);
1313 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1314 }
1315
1316 kvm_change_c0_guest_cause(cop0, (0xff),
1317 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1318
1319 /* setup badvaddr, context and entryhi registers for the guest */
1320 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1321 /* XXXKYMA: is the context register used by linux??? */
1322 kvm_write_c0_guest_entryhi(cop0, entryhi);
1323 /* Blow away the shadow host TLBs */
1324 kvm_mips_flush_host_tlb(1);
1325
1326 return er;
1327}
1328
1329/* TLBMOD: store into address matching TLB with Dirty bit off */
1330enum emulation_result
1331kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1332 struct kvm_run *run, struct kvm_vcpu *vcpu)
1333{
1334 enum emulation_result er = EMULATE_DONE;
1335
1336#ifdef DEBUG
1337 /*
1338 * If address not in the guest TLB, then we are in trouble
1339 */
1340 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1341 if (index < 0) {
1342 /* XXXKYMA Invalidate and retry */
1343 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1344 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1345 __func__, entryhi);
1346 kvm_mips_dump_guest_tlbs(vcpu);
1347 kvm_mips_dump_host_tlbs();
1348 return EMULATE_FAIL;
1349 }
1350#endif
1351
1352 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
1353 return er;
1354}
1355
1356enum emulation_result
1357kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
1358 struct kvm_run *run, struct kvm_vcpu *vcpu)
1359{
1360 struct mips_coproc *cop0 = vcpu->arch.cop0;
1361 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1362 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1363 struct kvm_vcpu_arch *arch = &vcpu->arch;
1364 enum emulation_result er = EMULATE_DONE;
1365
1366 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1367 /* save old pc */
1368 kvm_write_c0_guest_epc(cop0, arch->pc);
1369 kvm_set_c0_guest_status(cop0, ST0_EXL);
1370
1371 if (cause & CAUSEF_BD)
1372 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1373 else
1374 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1375
1376 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1377 arch->pc);
1378
1379 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1380 } else {
1381 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
1382 arch->pc);
1383 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1384 }
1385
1386 kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
1387
1388 /* setup badvaddr, context and entryhi registers for the guest */
1389 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1390 /* XXXKYMA: is the context register used by linux??? */
1391 kvm_write_c0_guest_entryhi(cop0, entryhi);
1392 /* Blow away the shadow host TLBs */
1393 kvm_mips_flush_host_tlb(1);
1394
1395 return er;
1396}
1397
1398enum emulation_result
1399kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
1400 struct kvm_run *run, struct kvm_vcpu *vcpu)
1401{
1402 struct mips_coproc *cop0 = vcpu->arch.cop0;
1403 struct kvm_vcpu_arch *arch = &vcpu->arch;
1404 enum emulation_result er = EMULATE_DONE;
1405
1406 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1407 /* save old pc */
1408 kvm_write_c0_guest_epc(cop0, arch->pc);
1409 kvm_set_c0_guest_status(cop0, ST0_EXL);
1410
1411 if (cause & CAUSEF_BD)
1412 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1413 else
1414 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1415
1416 }
1417
1418 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1419
1420 kvm_change_c0_guest_cause(cop0, (0xff),
1421 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
1422 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
1423
1424 return er;
1425}
1426
1427enum emulation_result
1428kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
1429 struct kvm_run *run, struct kvm_vcpu *vcpu)
1430{
1431 struct mips_coproc *cop0 = vcpu->arch.cop0;
1432 struct kvm_vcpu_arch *arch = &vcpu->arch;
1433 enum emulation_result er = EMULATE_DONE;
1434
1435 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1436 /* save old pc */
1437 kvm_write_c0_guest_epc(cop0, arch->pc);
1438 kvm_set_c0_guest_status(cop0, ST0_EXL);
1439
1440 if (cause & CAUSEF_BD)
1441 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1442 else
1443 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1444
1445 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
1446
1447 kvm_change_c0_guest_cause(cop0, (0xff),
1448 (T_RES_INST << CAUSEB_EXCCODE));
1449
1450 /* Set PC to the exception entry point */
1451 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1452
1453 } else {
1454 kvm_err("Trying to deliver RI when EXL is already set\n");
1455 er = EMULATE_FAIL;
1456 }
1457
1458 return er;
1459}
1460
1461enum emulation_result
1462kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
1463 struct kvm_run *run, struct kvm_vcpu *vcpu)
1464{
1465 struct mips_coproc *cop0 = vcpu->arch.cop0;
1466 struct kvm_vcpu_arch *arch = &vcpu->arch;
1467 enum emulation_result er = EMULATE_DONE;
1468
1469 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1470 /* save old pc */
1471 kvm_write_c0_guest_epc(cop0, arch->pc);
1472 kvm_set_c0_guest_status(cop0, ST0_EXL);
1473
1474 if (cause & CAUSEF_BD)
1475 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1476 else
1477 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1478
1479 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
1480
1481 kvm_change_c0_guest_cause(cop0, (0xff),
1482 (T_BREAK << CAUSEB_EXCCODE));
1483
1484 /* Set PC to the exception entry point */
1485 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1486
1487 } else {
1488 printk("Trying to deliver BP when EXL is already set\n");
1489 er = EMULATE_FAIL;
1490 }
1491
1492 return er;
1493}
1494
1495/*
1496 * ll/sc, rdhwr, sync emulation
1497 */
1498
1499#define OPCODE 0xfc000000
1500#define BASE 0x03e00000
1501#define RT 0x001f0000
1502#define OFFSET 0x0000ffff
1503#define LL 0xc0000000
1504#define SC 0xe0000000
1505#define SPEC0 0x00000000
1506#define SPEC3 0x7c000000
1507#define RD 0x0000f800
1508#define FUNC 0x0000003f
1509#define SYNC 0x0000000f
1510#define RDHWR 0x0000003b
1511
1512enum emulation_result
1513kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
1514 struct kvm_run *run, struct kvm_vcpu *vcpu)
1515{
1516 struct mips_coproc *cop0 = vcpu->arch.cop0;
1517 struct kvm_vcpu_arch *arch = &vcpu->arch;
1518 enum emulation_result er = EMULATE_DONE;
1519 unsigned long curr_pc;
1520 uint32_t inst;
1521
1522 /*
1523 * Update PC and hold onto current PC in case there is
1524 * an error and we want to rollback the PC
1525 */
1526 curr_pc = vcpu->arch.pc;
1527 er = update_pc(vcpu, cause);
1528 if (er == EMULATE_FAIL)
1529 return er;
1530
1531 /*
1532 * Fetch the instruction.
1533 */
1534 if (cause & CAUSEF_BD)
1535 opc += 1;
1536
1537 inst = kvm_get_inst(opc, vcpu);
1538
1539 if (inst == KVM_INVALID_INST) {
1540 printk("%s: Cannot get inst @ %p\n", __func__, opc);
1541 return EMULATE_FAIL;
1542 }
1543
1544 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
1545 int rd = (inst & RD) >> 11;
1546 int rt = (inst & RT) >> 16;
1547 switch (rd) {
1548 case 0: /* CPU number */
1549 arch->gprs[rt] = 0;
1550 break;
1551 case 1: /* SYNCI length */
1552 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
1553 current_cpu_data.icache.linesz);
1554 break;
1555 case 2: /* Read count register */
1556 printk("RDHWR: Cont register\n");
1557 arch->gprs[rt] = kvm_read_c0_guest_count(cop0);
1558 break;
1559 case 3: /* Count register resolution */
1560 switch (current_cpu_data.cputype) {
1561 case CPU_20KC:
1562 case CPU_25KF:
1563 arch->gprs[rt] = 1;
1564 break;
1565 default:
1566 arch->gprs[rt] = 2;
1567 }
1568 break;
1569 case 29:
1570#if 1
1571 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
1572#else
1573 /* UserLocal not implemented */
1574 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
1575#endif
1576 break;
1577
1578 default:
1579 printk("RDHWR not supported\n");
1580 er = EMULATE_FAIL;
1581 break;
1582 }
1583 } else {
1584 printk("Emulate RI not supported @ %p: %#x\n", opc, inst);
1585 er = EMULATE_FAIL;
1586 }
1587
1588 /*
1589 * Rollback PC only if emulation was unsuccessful
1590 */
1591 if (er == EMULATE_FAIL) {
1592 vcpu->arch.pc = curr_pc;
1593 }
1594 return er;
1595}
1596
1597enum emulation_result
1598kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
1599{
1600 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
1601 enum emulation_result er = EMULATE_DONE;
1602 unsigned long curr_pc;
1603
1604 if (run->mmio.len > sizeof(*gpr)) {
1605 printk("Bad MMIO length: %d", run->mmio.len);
1606 er = EMULATE_FAIL;
1607 goto done;
1608 }
1609
1610 /*
1611 * Update PC and hold onto current PC in case there is
1612 * an error and we want to rollback the PC
1613 */
1614 curr_pc = vcpu->arch.pc;
1615 er = update_pc(vcpu, vcpu->arch.pending_load_cause);
1616 if (er == EMULATE_FAIL)
1617 return er;
1618
1619 switch (run->mmio.len) {
1620 case 4:
1621 *gpr = *(int32_t *) run->mmio.data;
1622 break;
1623
1624 case 2:
1625 if (vcpu->mmio_needed == 2)
1626 *gpr = *(int16_t *) run->mmio.data;
1627 else
1628 *gpr = *(int16_t *) run->mmio.data;
1629
1630 break;
1631 case 1:
1632 if (vcpu->mmio_needed == 2)
1633 *gpr = *(int8_t *) run->mmio.data;
1634 else
1635 *gpr = *(u8 *) run->mmio.data;
1636 break;
1637 }
1638
1639 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
1640 kvm_debug
1641 ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
1642 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
1643 vcpu->mmio_needed);
1644
1645done:
1646 return er;
1647}
1648
1649static enum emulation_result
1650kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
1651 struct kvm_run *run, struct kvm_vcpu *vcpu)
1652{
1653 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1654 struct mips_coproc *cop0 = vcpu->arch.cop0;
1655 struct kvm_vcpu_arch *arch = &vcpu->arch;
1656 enum emulation_result er = EMULATE_DONE;
1657
1658 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1659 /* save old pc */
1660 kvm_write_c0_guest_epc(cop0, arch->pc);
1661 kvm_set_c0_guest_status(cop0, ST0_EXL);
1662
1663 if (cause & CAUSEF_BD)
1664 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1665 else
1666 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1667
1668 kvm_change_c0_guest_cause(cop0, (0xff),
1669 (exccode << CAUSEB_EXCCODE));
1670
1671 /* Set PC to the exception entry point */
1672 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1673 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1674
1675 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
1676 exccode, kvm_read_c0_guest_epc(cop0),
1677 kvm_read_c0_guest_badvaddr(cop0));
1678 } else {
1679 printk("Trying to deliver EXC when EXL is already set\n");
1680 er = EMULATE_FAIL;
1681 }
1682
1683 return er;
1684}
1685
1686enum emulation_result
1687kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
1688 struct kvm_run *run, struct kvm_vcpu *vcpu)
1689{
1690 enum emulation_result er = EMULATE_DONE;
1691 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1692 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1693
1694 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
1695
1696 if (usermode) {
1697 switch (exccode) {
1698 case T_INT:
1699 case T_SYSCALL:
1700 case T_BREAK:
1701 case T_RES_INST:
1702 break;
1703
1704 case T_COP_UNUSABLE:
1705 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
1706 er = EMULATE_PRIV_FAIL;
1707 break;
1708
1709 case T_TLB_MOD:
1710 break;
1711
1712 case T_TLB_LD_MISS:
1713 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1714 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
1715 printk("%s: LD MISS @ %#lx\n", __func__,
1716 badvaddr);
1717 cause &= ~0xff;
1718 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
1719 er = EMULATE_PRIV_FAIL;
1720 }
1721 break;
1722
1723 case T_TLB_ST_MISS:
1724 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
1725 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
1726 printk("%s: ST MISS @ %#lx\n", __func__,
1727 badvaddr);
1728 cause &= ~0xff;
1729 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
1730 er = EMULATE_PRIV_FAIL;
1731 }
1732 break;
1733
1734 case T_ADDR_ERR_ST:
1735 printk("%s: address error ST @ %#lx\n", __func__,
1736 badvaddr);
1737 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
1738 cause &= ~0xff;
1739 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
1740 }
1741 er = EMULATE_PRIV_FAIL;
1742 break;
1743 case T_ADDR_ERR_LD:
1744 printk("%s: address error LD @ %#lx\n", __func__,
1745 badvaddr);
1746 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
1747 cause &= ~0xff;
1748 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
1749 }
1750 er = EMULATE_PRIV_FAIL;
1751 break;
1752 default:
1753 er = EMULATE_PRIV_FAIL;
1754 break;
1755 }
1756 }
1757
1758 if (er == EMULATE_PRIV_FAIL) {
1759 kvm_mips_emulate_exc(cause, opc, run, vcpu);
1760 }
1761 return er;
1762}
1763
1764/* User Address (UA) fault, this could happen if
1765 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
1766 * case we pass on the fault to the guest kernel and let it handle it.
1767 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
1768 * case we inject the TLB from the Guest TLB into the shadow host TLB
1769 */
1770enum emulation_result
1771kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
1772 struct kvm_run *run, struct kvm_vcpu *vcpu)
1773{
1774 enum emulation_result er = EMULATE_DONE;
1775 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1776 unsigned long va = vcpu->arch.host_cp0_badvaddr;
1777 int index;
1778
1779 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
1780 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
1781
1782 /* KVM would not have got the exception if this entry was valid in the shadow host TLB
1783 * Check the Guest TLB, if the entry is not there then send the guest an
1784 * exception. The guest exc handler should then inject an entry into the
1785 * guest TLB
1786 */
1787 index = kvm_mips_guest_tlb_lookup(vcpu,
1788 (va & VPN2_MASK) |
1789 (kvm_read_c0_guest_entryhi
1790 (vcpu->arch.cop0) & ASID_MASK));
1791 if (index < 0) {
1792 if (exccode == T_TLB_LD_MISS) {
1793 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
1794 } else if (exccode == T_TLB_ST_MISS) {
1795 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
1796 } else {
1797 printk("%s: invalid exc code: %d\n", __func__, exccode);
1798 er = EMULATE_FAIL;
1799 }
1800 } else {
1801 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1802
1803 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1804 if (!TLB_IS_VALID(*tlb, va)) {
1805 if (exccode == T_TLB_LD_MISS) {
1806 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
1807 vcpu);
1808 } else if (exccode == T_TLB_ST_MISS) {
1809 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
1810 vcpu);
1811 } else {
1812 printk("%s: invalid exc code: %d\n", __func__,
1813 exccode);
1814 er = EMULATE_FAIL;
1815 }
1816 } else {
1817#ifdef DEBUG
1818 kvm_debug
1819 ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
1820 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
1821#endif
1822 /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
1823 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
1824 NULL);
1825 }
1826 }
1827
1828 return er;
1829}
diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/kvm_mips_int.c
new file mode 100644
index 000000000000..1e5de16afe29
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_int.c
@@ -0,0 +1,243 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Interrupt delivery
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/vmalloc.h>
16#include <linux/fs.h>
17#include <linux/bootmem.h>
18#include <asm/page.h>
19#include <asm/cacheflush.h>
20
21#include <linux/kvm_host.h>
22
23#include "kvm_mips_int.h"
24
25void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
26{
27 set_bit(priority, &vcpu->arch.pending_exceptions);
28}
29
30void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
31{
32 clear_bit(priority, &vcpu->arch.pending_exceptions);
33}
34
35void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
36{
37 /* Cause bits to reflect the pending timer interrupt,
38 * the EXC code will be set when we are actually
39 * delivering the interrupt:
40 */
41 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
42
43 /* Queue up an INT exception for the core */
44 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
45
46}
47
48void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
49{
50 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
51 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
52}
53
54void
55kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
56{
57 int intr = (int)irq->irq;
58
59 /* Cause bits to reflect the pending IO interrupt,
60 * the EXC code will be set when we are actually
61 * delivering the interrupt:
62 */
63 switch (intr) {
64 case 2:
65 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
66 /* Queue up an INT exception for the core */
67 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
68 break;
69
70 case 3:
71 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
72 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
73 break;
74
75 case 4:
76 kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
77 kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
78 break;
79
80 default:
81 break;
82 }
83
84}
85
86void
87kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
88 struct kvm_mips_interrupt *irq)
89{
90 int intr = (int)irq->irq;
91 switch (intr) {
92 case -2:
93 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
94 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
95 break;
96
97 case -3:
98 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
99 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
100 break;
101
102 case -4:
103 kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
104 kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
105 break;
106
107 default:
108 break;
109 }
110
111}
112
113/* Deliver the interrupt of the corresponding priority, if possible. */
114int
115kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
116 uint32_t cause)
117{
118 int allowed = 0;
119 uint32_t exccode;
120
121 struct kvm_vcpu_arch *arch = &vcpu->arch;
122 struct mips_coproc *cop0 = vcpu->arch.cop0;
123
124 switch (priority) {
125 case MIPS_EXC_INT_TIMER:
126 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
127 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
128 && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
129 allowed = 1;
130 exccode = T_INT;
131 }
132 break;
133
134 case MIPS_EXC_INT_IO:
135 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
136 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
137 && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
138 allowed = 1;
139 exccode = T_INT;
140 }
141 break;
142
143 case MIPS_EXC_INT_IPI_1:
144 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
145 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
146 && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
147 allowed = 1;
148 exccode = T_INT;
149 }
150 break;
151
152 case MIPS_EXC_INT_IPI_2:
153 if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
154 && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
155 && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
156 allowed = 1;
157 exccode = T_INT;
158 }
159 break;
160
161 default:
162 break;
163 }
164
165 /* Are we allowed to deliver the interrupt ??? */
166 if (allowed) {
167
168 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
169 /* save old pc */
170 kvm_write_c0_guest_epc(cop0, arch->pc);
171 kvm_set_c0_guest_status(cop0, ST0_EXL);
172
173 if (cause & CAUSEF_BD)
174 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
175 else
176 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
177
178 kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
179
180 } else
181 kvm_err("Trying to deliver interrupt when EXL is already set\n");
182
183 kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
184 (exccode << CAUSEB_EXCCODE));
185
186 /* XXXSL Set PC to the interrupt exception entry point */
187 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
188 arch->pc = KVM_GUEST_KSEG0 + 0x200;
189 else
190 arch->pc = KVM_GUEST_KSEG0 + 0x180;
191
192 clear_bit(priority, &vcpu->arch.pending_exceptions);
193 }
194
195 return allowed;
196}
197
198int
199kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
200 uint32_t cause)
201{
202 return 1;
203}
204
205void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
206{
207 unsigned long *pending = &vcpu->arch.pending_exceptions;
208 unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
209 unsigned int priority;
210
211 if (!(*pending) && !(*pending_clr))
212 return;
213
214 priority = __ffs(*pending_clr);
215 while (priority <= MIPS_EXC_MAX) {
216 if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
217 if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
218 break;
219 }
220
221 priority = find_next_bit(pending_clr,
222 BITS_PER_BYTE * sizeof(*pending_clr),
223 priority + 1);
224 }
225
226 priority = __ffs(*pending);
227 while (priority <= MIPS_EXC_MAX) {
228 if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
229 if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
230 break;
231 }
232
233 priority = find_next_bit(pending,
234 BITS_PER_BYTE * sizeof(*pending),
235 priority + 1);
236 }
237
238}
239
240int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
241{
242 return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
243}
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h
new file mode 100644
index 000000000000..20da7d29eede
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_int.h
@@ -0,0 +1,49 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Interrupts
7* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
8* Authors: Sanjay Lal <sanjayl@kymasys.com>
9*/
10
11/* MIPS Exception Priorities, exceptions (including interrupts) are queued up
12 * for the guest in the order specified by their priorities
13 */
14
15#define MIPS_EXC_RESET 0
16#define MIPS_EXC_SRESET 1
17#define MIPS_EXC_DEBUG_ST 2
18#define MIPS_EXC_DEBUG 3
19#define MIPS_EXC_DDB 4
20#define MIPS_EXC_NMI 5
21#define MIPS_EXC_MCHK 6
22#define MIPS_EXC_INT_TIMER 7
23#define MIPS_EXC_INT_IO 8
24#define MIPS_EXC_EXECUTE 9
25#define MIPS_EXC_INT_IPI_1 10
26#define MIPS_EXC_INT_IPI_2 11
27#define MIPS_EXC_MAX 12
28/* XXXSL More to follow */
29
30#define C_TI (_ULCAST_(1) << 30)
31
32#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
33#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
34
35void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
36void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
37int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
38
39void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
40void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
41void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
42 struct kvm_mips_interrupt *irq);
43void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
44 struct kvm_mips_interrupt *irq);
45int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
46 uint32_t cause);
47int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
48 uint32_t cause);
49void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h
new file mode 100644
index 000000000000..86d3b4cc348b
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_opcode.h
@@ -0,0 +1,24 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10/*
11 * Define opcode values not defined in <asm/isnt.h>
12 */
13
14#ifndef __KVM_MIPS_OPCODE_H__
15#define __KVM_MIPS_OPCODE_H__
16
17/* COP0 Ops */
18#define mfmcz_op 0x0b /* 01011 */
19#define wrpgpr_op 0x0e /* 01110 */
20
21/* COP0 opcodes (only if COP0 and CO=1): */
22#define wait_op 0x20 /* 100000 */
23
24#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/kvm_mips_stats.c
new file mode 100644
index 000000000000..075904bcac1b
--- /dev/null
+++ b/arch/mips/kvm/kvm_mips_stats.c
@@ -0,0 +1,82 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: COP0 access histogram
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/kvm_host.h>
13
14char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
15 "WAIT",
16 "CACHE",
17 "Signal",
18 "Interrupt",
19 "COP0/1 Unusable",
20 "TLB Mod",
21 "TLB Miss (LD)",
22 "TLB Miss (ST)",
23 "Address Err (ST)",
24 "Address Error (LD)",
25 "System Call",
26 "Reserved Inst",
27 "Break Inst",
28 "D-Cache Flushes",
29};
30
31char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
32 "Index",
33 "Random",
34 "EntryLo0",
35 "EntryLo1",
36 "Context",
37 "PG Mask",
38 "Wired",
39 "HWREna",
40 "BadVAddr",
41 "Count",
42 "EntryHI",
43 "Compare",
44 "Status",
45 "Cause",
46 "EXC PC",
47 "PRID",
48 "Config",
49 "LLAddr",
50 "Watch Lo",
51 "Watch Hi",
52 "X Context",
53 "Reserved",
54 "Impl Dep",
55 "Debug",
56 "DEPC",
57 "PerfCnt",
58 "ErrCtl",
59 "CacheErr",
60 "TagLo",
61 "TagHi",
62 "ErrorEPC",
63 "DESAVE"
64};
65
66int kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
67{
68#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
69 int i, j;
70
71 printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
72 for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
73 for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
74 if (vcpu->arch.cop0->stat[i][j])
75 printk("%s[%d]: %lu\n", kvm_cop0_str[i], j,
76 vcpu->arch.cop0->stat[i][j]);
77 }
78 }
79#endif
80
81 return 0;
82}
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
new file mode 100644
index 000000000000..e3f0d9b8b6c5
--- /dev/null
+++ b/arch/mips/kvm/kvm_tlb.c
@@ -0,0 +1,932 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7* TLB handlers run from KSEG0
8*
9* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10* Authors: Sanjay Lal <sanjayl@kymasys.com>
11*/
12
13#include <linux/init.h>
14#include <linux/sched.h>
15#include <linux/smp.h>
16#include <linux/mm.h>
17#include <linux/delay.h>
18#include <linux/module.h>
19#include <linux/kvm_host.h>
20
21#include <asm/cpu.h>
22#include <asm/bootinfo.h>
23#include <asm/mmu_context.h>
24#include <asm/pgtable.h>
25#include <asm/cacheflush.h>
26
27#undef CONFIG_MIPS_MT
28#include <asm/r4kcache.h>
29#define CONFIG_MIPS_MT
30
31#define KVM_GUEST_PC_TLB 0
32#define KVM_GUEST_SP_TLB 1
33
34#define PRIx64 "llx"
35
36/* Use VZ EntryHi.EHINV to invalidate TLB entries */
37#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
38
39atomic_t kvm_mips_instance;
40EXPORT_SYMBOL(kvm_mips_instance);
41
42/* These function pointers are initialized once the KVM module is loaded */
43pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
44EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
45
46void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
47EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
48
49bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
50EXPORT_SYMBOL(kvm_mips_is_error_pfn);
51
52uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
53{
54 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
55}
56
57
58uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
59{
60 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
61}
62
63inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
64{
65 return vcpu->kvm->arch.commpage_tlb;
66}
67
68
69/*
70 * Structure defining an tlb entry data set.
71 */
72
73void kvm_mips_dump_host_tlbs(void)
74{
75 unsigned long old_entryhi;
76 unsigned long old_pagemask;
77 struct kvm_mips_tlb tlb;
78 unsigned long flags;
79 int i;
80
81 local_irq_save(flags);
82
83 old_entryhi = read_c0_entryhi();
84 old_pagemask = read_c0_pagemask();
85
86 printk("HOST TLBs:\n");
87 printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
88
89 for (i = 0; i < current_cpu_data.tlbsize; i++) {
90 write_c0_index(i);
91 mtc0_tlbw_hazard();
92
93 tlb_read();
94 tlbw_use_hazard();
95
96 tlb.tlb_hi = read_c0_entryhi();
97 tlb.tlb_lo0 = read_c0_entrylo0();
98 tlb.tlb_lo1 = read_c0_entrylo1();
99 tlb.tlb_mask = read_c0_pagemask();
100
101 printk("TLB%c%3d Hi 0x%08lx ",
102 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
103 i, tlb.tlb_hi);
104 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
105 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
106 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
107 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
108 (tlb.tlb_lo0 >> 3) & 7);
109 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
110 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
111 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
112 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
113 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
114 }
115 write_c0_entryhi(old_entryhi);
116 write_c0_pagemask(old_pagemask);
117 mtc0_tlbw_hazard();
118 local_irq_restore(flags);
119}
120
121void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
122{
123 struct mips_coproc *cop0 = vcpu->arch.cop0;
124 struct kvm_mips_tlb tlb;
125 int i;
126
127 printk("Guest TLBs:\n");
128 printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
129
130 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
131 tlb = vcpu->arch.guest_tlb[i];
132 printk("TLB%c%3d Hi 0x%08lx ",
133 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
134 i, tlb.tlb_hi);
135 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
136 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
137 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
138 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
139 (tlb.tlb_lo0 >> 3) & 7);
140 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
141 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
142 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
143 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
144 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
145 }
146}
147
148void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
149{
150 int i;
151 volatile struct kvm_mips_tlb tlb;
152
153 printk("Shadow TLBs:\n");
154 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
155 tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
156 printk("TLB%c%3d Hi 0x%08lx ",
157 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
158 i, tlb.tlb_hi);
159 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
160 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
161 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
162 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
163 (tlb.tlb_lo0 >> 3) & 7);
164 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
165 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
166 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
167 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
168 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
169 }
170}
171
172static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
173{
174 pfn_t pfn;
175
176 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
177 return;
178
179 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
180
181 if (kvm_mips_is_error_pfn(pfn)) {
182 panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
183 }
184
185 kvm->arch.guest_pmap[gfn] = pfn;
186 return;
187}
188
189/* Translate guest KSEG0 addresses to Host PA */
190unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
191 unsigned long gva)
192{
193 gfn_t gfn;
194 uint32_t offset = gva & ~PAGE_MASK;
195 struct kvm *kvm = vcpu->kvm;
196
197 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
198 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
199 __builtin_return_address(0), gva);
200 return KVM_INVALID_PAGE;
201 }
202
203 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
204
205 if (gfn >= kvm->arch.guest_pmap_npages) {
206 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
207 gva);
208 return KVM_INVALID_PAGE;
209 }
210 kvm_mips_map_page(vcpu->kvm, gfn);
211 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
212}
213
214/* XXXKYMA: Must be called with interrupts disabled */
215/* set flush_dcache_mask == 0 if no dcache flush required */
216int
217kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
218 unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
219{
220 unsigned long flags;
221 unsigned long old_entryhi;
222 volatile int idx;
223
224 local_irq_save(flags);
225
226
227 old_entryhi = read_c0_entryhi();
228 write_c0_entryhi(entryhi);
229 mtc0_tlbw_hazard();
230
231 tlb_probe();
232 tlb_probe_hazard();
233 idx = read_c0_index();
234
235 if (idx > current_cpu_data.tlbsize) {
236 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
237 kvm_mips_dump_host_tlbs();
238 return -1;
239 }
240
241 if (idx < 0) {
242 idx = read_c0_random() % current_cpu_data.tlbsize;
243 write_c0_index(idx);
244 mtc0_tlbw_hazard();
245 }
246 write_c0_entrylo0(entrylo0);
247 write_c0_entrylo1(entrylo1);
248 mtc0_tlbw_hazard();
249
250 tlb_write_indexed();
251 tlbw_use_hazard();
252
253#ifdef DEBUG
254 if (debug) {
255 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
256 "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
257 vcpu->arch.pc, idx, read_c0_entryhi(),
258 read_c0_entrylo0(), read_c0_entrylo1());
259 }
260#endif
261
262 /* Flush D-cache */
263 if (flush_dcache_mask) {
264 if (entrylo0 & MIPS3_PG_V) {
265 ++vcpu->stat.flush_dcache_exits;
266 flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
267 }
268 if (entrylo1 & MIPS3_PG_V) {
269 ++vcpu->stat.flush_dcache_exits;
270 flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
271 (0x1 << PAGE_SHIFT));
272 }
273 }
274
275 /* Restore old ASID */
276 write_c0_entryhi(old_entryhi);
277 mtc0_tlbw_hazard();
278 tlbw_use_hazard();
279 local_irq_restore(flags);
280 return 0;
281}
282
283
284/* XXXKYMA: Must be called with interrupts disabled */
285int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
286 struct kvm_vcpu *vcpu)
287{
288 gfn_t gfn;
289 pfn_t pfn0, pfn1;
290 unsigned long vaddr = 0;
291 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
292 int even;
293 struct kvm *kvm = vcpu->kvm;
294 const int flush_dcache_mask = 0;
295
296
297 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
298 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
299 kvm_mips_dump_host_tlbs();
300 return -1;
301 }
302
303 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
304 if (gfn >= kvm->arch.guest_pmap_npages) {
305 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
306 gfn, badvaddr);
307 kvm_mips_dump_host_tlbs();
308 return -1;
309 }
310 even = !(gfn & 0x1);
311 vaddr = badvaddr & (PAGE_MASK << 1);
312
313 kvm_mips_map_page(vcpu->kvm, gfn);
314 kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1);
315
316 if (even) {
317 pfn0 = kvm->arch.guest_pmap[gfn];
318 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
319 } else {
320 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
321 pfn1 = kvm->arch.guest_pmap[gfn];
322 }
323
324 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
325 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
326 (0x1 << 1);
327 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
328 (0x1 << 1);
329
330 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
331 flush_dcache_mask);
332}
333
334int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
335 struct kvm_vcpu *vcpu)
336{
337 pfn_t pfn0, pfn1;
338 unsigned long flags, old_entryhi = 0, vaddr = 0;
339 unsigned long entrylo0 = 0, entrylo1 = 0;
340
341
342 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
343 pfn1 = 0;
344 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
345 (0x1 << 1);
346 entrylo1 = 0;
347
348 local_irq_save(flags);
349
350 old_entryhi = read_c0_entryhi();
351 vaddr = badvaddr & (PAGE_MASK << 1);
352 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
353 mtc0_tlbw_hazard();
354 write_c0_entrylo0(entrylo0);
355 mtc0_tlbw_hazard();
356 write_c0_entrylo1(entrylo1);
357 mtc0_tlbw_hazard();
358 write_c0_index(kvm_mips_get_commpage_asid(vcpu));
359 mtc0_tlbw_hazard();
360 tlb_write_indexed();
361 mtc0_tlbw_hazard();
362 tlbw_use_hazard();
363
364#ifdef DEBUG
365 kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
366 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
367 read_c0_entrylo0(), read_c0_entrylo1());
368#endif
369
370 /* Restore old ASID */
371 write_c0_entryhi(old_entryhi);
372 mtc0_tlbw_hazard();
373 tlbw_use_hazard();
374 local_irq_restore(flags);
375
376 return 0;
377}
378
379int
380kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
381 struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
382{
383 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
384 struct kvm *kvm = vcpu->kvm;
385 pfn_t pfn0, pfn1;
386
387
388 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
389 pfn0 = 0;
390 pfn1 = 0;
391 } else {
392 kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT);
393 kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT);
394
395 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
396 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
397 }
398
399 if (hpa0)
400 *hpa0 = pfn0 << PAGE_SHIFT;
401
402 if (hpa1)
403 *hpa1 = pfn1 << PAGE_SHIFT;
404
405 /* Get attributes from the Guest TLB */
406 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
407 kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
408 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
409 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
410 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
411 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
412
413#ifdef DEBUG
414 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
415 tlb->tlb_lo0, tlb->tlb_lo1);
416#endif
417
418 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
419 tlb->tlb_mask);
420}
421
422int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
423{
424 int i;
425 int index = -1;
426 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
427
428
429 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
430 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
431 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
432 index = i;
433 break;
434 }
435 }
436
437#ifdef DEBUG
438 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
439 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
440#endif
441
442 return index;
443}
444
445int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
446{
447 unsigned long old_entryhi, flags;
448 volatile int idx;
449
450
451 local_irq_save(flags);
452
453 old_entryhi = read_c0_entryhi();
454
455 if (KVM_GUEST_KERNEL_MODE(vcpu))
456 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
457 else {
458 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
459 }
460
461 mtc0_tlbw_hazard();
462
463 tlb_probe();
464 tlb_probe_hazard();
465 idx = read_c0_index();
466
467 /* Restore old ASID */
468 write_c0_entryhi(old_entryhi);
469 mtc0_tlbw_hazard();
470 tlbw_use_hazard();
471
472 local_irq_restore(flags);
473
474#ifdef DEBUG
475 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
476#endif
477
478 return idx;
479}
480
481int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
482{
483 int idx;
484 unsigned long flags, old_entryhi;
485
486 local_irq_save(flags);
487
488
489 old_entryhi = read_c0_entryhi();
490
491 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
492 mtc0_tlbw_hazard();
493
494 tlb_probe();
495 tlb_probe_hazard();
496 idx = read_c0_index();
497
498 if (idx >= current_cpu_data.tlbsize)
499 BUG();
500
501 if (idx > 0) {
502 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
503 mtc0_tlbw_hazard();
504
505 write_c0_entrylo0(0);
506 mtc0_tlbw_hazard();
507
508 write_c0_entrylo1(0);
509 mtc0_tlbw_hazard();
510
511 tlb_write_indexed();
512 mtc0_tlbw_hazard();
513 }
514
515 write_c0_entryhi(old_entryhi);
516 mtc0_tlbw_hazard();
517 tlbw_use_hazard();
518
519 local_irq_restore(flags);
520
521#ifdef DEBUG
522 if (idx > 0) {
523 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
524 (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
525 }
526#endif
527
528 return 0;
529}
530
531/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
532int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
533{
534 unsigned long flags, old_entryhi;
535
536 if (index >= current_cpu_data.tlbsize)
537 BUG();
538
539 local_irq_save(flags);
540
541
542 old_entryhi = read_c0_entryhi();
543
544 write_c0_entryhi(UNIQUE_ENTRYHI(index));
545 mtc0_tlbw_hazard();
546
547 write_c0_index(index);
548 mtc0_tlbw_hazard();
549
550 write_c0_entrylo0(0);
551 mtc0_tlbw_hazard();
552
553 write_c0_entrylo1(0);
554 mtc0_tlbw_hazard();
555
556 tlb_write_indexed();
557 mtc0_tlbw_hazard();
558 tlbw_use_hazard();
559
560 write_c0_entryhi(old_entryhi);
561 mtc0_tlbw_hazard();
562 tlbw_use_hazard();
563
564 local_irq_restore(flags);
565
566 return 0;
567}
568
569void kvm_mips_flush_host_tlb(int skip_kseg0)
570{
571 unsigned long flags;
572 unsigned long old_entryhi, entryhi;
573 unsigned long old_pagemask;
574 int entry = 0;
575 int maxentry = current_cpu_data.tlbsize;
576
577
578 local_irq_save(flags);
579
580 old_entryhi = read_c0_entryhi();
581 old_pagemask = read_c0_pagemask();
582
583 /* Blast 'em all away. */
584 for (entry = 0; entry < maxentry; entry++) {
585
586 write_c0_index(entry);
587 mtc0_tlbw_hazard();
588
589 if (skip_kseg0) {
590 tlb_read();
591 tlbw_use_hazard();
592
593 entryhi = read_c0_entryhi();
594
595 /* Don't blow away guest kernel entries */
596 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
597 continue;
598 }
599 }
600
601 /* Make sure all entries differ. */
602 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
603 mtc0_tlbw_hazard();
604 write_c0_entrylo0(0);
605 mtc0_tlbw_hazard();
606 write_c0_entrylo1(0);
607 mtc0_tlbw_hazard();
608
609 tlb_write_indexed();
610 mtc0_tlbw_hazard();
611 }
612
613 tlbw_use_hazard();
614
615 write_c0_entryhi(old_entryhi);
616 write_c0_pagemask(old_pagemask);
617 mtc0_tlbw_hazard();
618 tlbw_use_hazard();
619
620 local_irq_restore(flags);
621}
622
623void
624kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
625 struct kvm_vcpu *vcpu)
626{
627 unsigned long asid = asid_cache(cpu);
628
629 if (!((asid += ASID_INC) & ASID_MASK)) {
630 if (cpu_has_vtag_icache) {
631 flush_icache_all();
632 }
633
634 kvm_local_flush_tlb_all(); /* start new asid cycle */
635
636 if (!asid) /* fix version if needed */
637 asid = ASID_FIRST_VERSION;
638 }
639
640 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
641}
642
643void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
644{
645 unsigned long flags;
646 unsigned long old_entryhi;
647 unsigned long old_pagemask;
648 int entry = 0;
649 int cpu = smp_processor_id();
650
651 local_irq_save(flags);
652
653 old_entryhi = read_c0_entryhi();
654 old_pagemask = read_c0_pagemask();
655
656 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
657 write_c0_index(entry);
658 mtc0_tlbw_hazard();
659 tlb_read();
660 tlbw_use_hazard();
661
662 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
663 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
664 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
665 vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
666 }
667
668 write_c0_entryhi(old_entryhi);
669 write_c0_pagemask(old_pagemask);
670 mtc0_tlbw_hazard();
671
672 local_irq_restore(flags);
673
674}
675
676void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
677{
678 unsigned long flags;
679 unsigned long old_ctx;
680 int entry;
681 int cpu = smp_processor_id();
682
683 local_irq_save(flags);
684
685 old_ctx = read_c0_entryhi();
686
687 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
688 write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
689 mtc0_tlbw_hazard();
690 write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
691 write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
692
693 write_c0_index(entry);
694 mtc0_tlbw_hazard();
695
696 tlb_write_indexed();
697 tlbw_use_hazard();
698 }
699
700 tlbw_use_hazard();
701 write_c0_entryhi(old_ctx);
702 mtc0_tlbw_hazard();
703 local_irq_restore(flags);
704}
705
706
707void kvm_local_flush_tlb_all(void)
708{
709 unsigned long flags;
710 unsigned long old_ctx;
711 int entry = 0;
712
713 local_irq_save(flags);
714 /* Save old context and create impossible VPN2 value */
715 old_ctx = read_c0_entryhi();
716 write_c0_entrylo0(0);
717 write_c0_entrylo1(0);
718
719 /* Blast 'em all away. */
720 while (entry < current_cpu_data.tlbsize) {
721 /* Make sure all entries differ. */
722 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
723 write_c0_index(entry);
724 mtc0_tlbw_hazard();
725 tlb_write_indexed();
726 entry++;
727 }
728 tlbw_use_hazard();
729 write_c0_entryhi(old_ctx);
730 mtc0_tlbw_hazard();
731
732 local_irq_restore(flags);
733}
734
735void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
736{
737 int cpu, entry;
738
739 for_each_possible_cpu(cpu) {
740 for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
741 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
742 UNIQUE_ENTRYHI(entry);
743 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
744 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
745 vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
746 read_c0_pagemask();
747#ifdef DEBUG
748 kvm_debug
749 ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
750 cpu, entry,
751 vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
752 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
753 vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
754#endif
755 }
756 }
757}
758
759/* Restore ASID once we are scheduled back after preemption */
760void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
761{
762 unsigned long flags;
763 int newasid = 0;
764
765#ifdef DEBUG
766 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
767#endif
768
769 /* Alocate new kernel and user ASIDs if needed */
770
771 local_irq_save(flags);
772
773 if (((vcpu->arch.
774 guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
775 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
776 vcpu->arch.guest_kernel_asid[cpu] =
777 vcpu->arch.guest_kernel_mm.context.asid[cpu];
778 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
779 vcpu->arch.guest_user_asid[cpu] =
780 vcpu->arch.guest_user_mm.context.asid[cpu];
781 newasid++;
782
783 kvm_info("[%d]: cpu_context: %#lx\n", cpu,
784 cpu_context(cpu, current->mm));
785 kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
786 cpu, vcpu->arch.guest_kernel_asid[cpu]);
787 kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
788 vcpu->arch.guest_user_asid[cpu]);
789 }
790
791 if (vcpu->arch.last_sched_cpu != cpu) {
792 kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
793 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
794 }
795
796 /* Only reload shadow host TLB if new ASIDs haven't been allocated */
797#if 0
798 if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
799 kvm_mips_flush_host_tlb(0);
800 kvm_shadow_tlb_load(vcpu);
801 }
802#endif
803
804 if (!newasid) {
805 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
806 if (current->flags & PF_VCPU) {
807 write_c0_entryhi(vcpu->arch.
808 preempt_entryhi & ASID_MASK);
809 ehb();
810 }
811 } else {
812 /* New ASIDs were allocated for the VM */
813
814 /* Were we in guest context? If so then the pre-empted ASID is no longer
815 * valid, we need to set it to what it should be based on the mode of
816 * the Guest (Kernel/User)
817 */
818 if (current->flags & PF_VCPU) {
819 if (KVM_GUEST_KERNEL_MODE(vcpu))
820 write_c0_entryhi(vcpu->arch.
821 guest_kernel_asid[cpu] &
822 ASID_MASK);
823 else
824 write_c0_entryhi(vcpu->arch.
825 guest_user_asid[cpu] &
826 ASID_MASK);
827 ehb();
828 }
829 }
830
831 local_irq_restore(flags);
832
833}
834
835/* ASID can change if another task is scheduled during preemption */
836void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
837{
838 unsigned long flags;
839 uint32_t cpu;
840
841 local_irq_save(flags);
842
843 cpu = smp_processor_id();
844
845
846 vcpu->arch.preempt_entryhi = read_c0_entryhi();
847 vcpu->arch.last_sched_cpu = cpu;
848
849#if 0
850 if ((atomic_read(&kvm_mips_instance) > 1)) {
851 kvm_shadow_tlb_put(vcpu);
852 }
853#endif
854
855 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
856 ASID_VERSION_MASK)) {
857 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
858 cpu_context(cpu, current->mm));
859 drop_mmu_context(current->mm, cpu);
860 }
861 write_c0_entryhi(cpu_asid(cpu, current->mm));
862 ehb();
863
864 local_irq_restore(flags);
865}
866
867uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
868{
869 struct mips_coproc *cop0 = vcpu->arch.cop0;
870 unsigned long paddr, flags;
871 uint32_t inst;
872 int index;
873
874 if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
875 KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
876 local_irq_save(flags);
877 index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
878 if (index >= 0) {
879 inst = *(opc);
880 } else {
881 index =
882 kvm_mips_guest_tlb_lookup(vcpu,
883 ((unsigned long) opc & VPN2_MASK)
884 |
885 (kvm_read_c0_guest_entryhi
886 (cop0) & ASID_MASK));
887 if (index < 0) {
888 kvm_err
889 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
890 __func__, opc, vcpu, read_c0_entryhi());
891 kvm_mips_dump_host_tlbs();
892 local_irq_restore(flags);
893 return KVM_INVALID_INST;
894 }
895 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
896 &vcpu->arch.
897 guest_tlb[index],
898 NULL, NULL);
899 inst = *(opc);
900 }
901 local_irq_restore(flags);
902 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
903 paddr =
904 kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
905 (unsigned long) opc);
906 inst = *(uint32_t *) CKSEG0ADDR(paddr);
907 } else {
908 kvm_err("%s: illegal address: %p\n", __func__, opc);
909 return KVM_INVALID_INST;
910 }
911
912 return inst;
913}
914
915EXPORT_SYMBOL(kvm_local_flush_tlb_all);
916EXPORT_SYMBOL(kvm_shadow_tlb_put);
917EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
918EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
919EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
920EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
921EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
922EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
923EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
924EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
925EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
926EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
927EXPORT_SYMBOL(kvm_shadow_tlb_load);
928EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
929EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
930EXPORT_SYMBOL(kvm_get_inst);
931EXPORT_SYMBOL(kvm_arch_vcpu_load);
932EXPORT_SYMBOL(kvm_arch_vcpu_put);
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
new file mode 100644
index 000000000000..466aeef044bd
--- /dev/null
+++ b/arch/mips/kvm/kvm_trap_emul.c
@@ -0,0 +1,482 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/vmalloc.h>
16
17#include <linux/kvm_host.h>
18
19#include "kvm_mips_opcode.h"
20#include "kvm_mips_int.h"
21
22static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
23{
24 gpa_t gpa;
25 uint32_t kseg = KSEGX(gva);
26
27 if ((kseg == CKSEG0) || (kseg == CKSEG1))
28 gpa = CPHYSADDR(gva);
29 else {
30 printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
31 kvm_mips_dump_host_tlbs();
32 gpa = KVM_INVALID_ADDR;
33 }
34
35#ifdef DEBUG
36 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
37#endif
38
39 return gpa;
40}
41
42
43static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
44{
45 struct kvm_run *run = vcpu->run;
46 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
47 unsigned long cause = vcpu->arch.host_cp0_cause;
48 enum emulation_result er = EMULATE_DONE;
49 int ret = RESUME_GUEST;
50
51 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
52 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
53 } else
54 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
55
56 switch (er) {
57 case EMULATE_DONE:
58 ret = RESUME_GUEST;
59 break;
60
61 case EMULATE_FAIL:
62 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
63 ret = RESUME_HOST;
64 break;
65
66 case EMULATE_WAIT:
67 run->exit_reason = KVM_EXIT_INTR;
68 ret = RESUME_HOST;
69 break;
70
71 default:
72 BUG();
73 }
74 return ret;
75}
76
77static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
78{
79 struct kvm_run *run = vcpu->run;
80 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
81 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
82 unsigned long cause = vcpu->arch.host_cp0_cause;
83 enum emulation_result er = EMULATE_DONE;
84 int ret = RESUME_GUEST;
85
86 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
87 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
88#ifdef DEBUG
89 kvm_debug
90 ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
91 cause, opc, badvaddr);
92#endif
93 er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
94
95 if (er == EMULATE_DONE)
96 ret = RESUME_GUEST;
97 else {
98 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
99 ret = RESUME_HOST;
100 }
101 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
102 /* XXXKYMA: The guest kernel does not expect to get this fault when we are not
103 * using HIGHMEM. Need to address this in a HIGHMEM kernel
104 */
105 printk
106 ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
107 cause, opc, badvaddr);
108 kvm_mips_dump_host_tlbs();
109 kvm_arch_vcpu_dump_regs(vcpu);
110 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
111 ret = RESUME_HOST;
112 } else {
113 printk
114 ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
115 cause, opc, badvaddr);
116 kvm_mips_dump_host_tlbs();
117 kvm_arch_vcpu_dump_regs(vcpu);
118 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
119 ret = RESUME_HOST;
120 }
121 return ret;
122}
123
124static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
125{
126 struct kvm_run *run = vcpu->run;
127 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
128 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
129 unsigned long cause = vcpu->arch.host_cp0_cause;
130 enum emulation_result er = EMULATE_DONE;
131 int ret = RESUME_GUEST;
132
133 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
134 && KVM_GUEST_KERNEL_MODE(vcpu)) {
135 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
136 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
137 ret = RESUME_HOST;
138 }
139 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
140 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
141#ifdef DEBUG
142 kvm_debug
143 ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
144 cause, opc, badvaddr);
145#endif
146 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
147 if (er == EMULATE_DONE)
148 ret = RESUME_GUEST;
149 else {
150 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
151 ret = RESUME_HOST;
152 }
153 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
154 /* All KSEG0 faults are handled by KVM, as the guest kernel does not
155 * expect to ever get them
156 */
157 if (kvm_mips_handle_kseg0_tlb_fault
158 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
159 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
160 ret = RESUME_HOST;
161 }
162 } else {
163 kvm_err
164 ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
165 cause, opc, badvaddr);
166 kvm_mips_dump_host_tlbs();
167 kvm_arch_vcpu_dump_regs(vcpu);
168 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
169 ret = RESUME_HOST;
170 }
171 return ret;
172}
173
174static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
175{
176 struct kvm_run *run = vcpu->run;
177 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
178 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
179 unsigned long cause = vcpu->arch.host_cp0_cause;
180 enum emulation_result er = EMULATE_DONE;
181 int ret = RESUME_GUEST;
182
183 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
184 && KVM_GUEST_KERNEL_MODE(vcpu)) {
185 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
186 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
187 ret = RESUME_HOST;
188 }
189 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
190 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
191#ifdef DEBUG
192 kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
193 vcpu->arch.pc, badvaddr);
194#endif
195
196 /* User Address (UA) fault, this could happen if
197 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
198 * case we pass on the fault to the guest kernel and let it handle it.
199 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
200 * case we inject the TLB from the Guest TLB into the shadow host TLB
201 */
202
203 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
204 if (er == EMULATE_DONE)
205 ret = RESUME_GUEST;
206 else {
207 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
208 ret = RESUME_HOST;
209 }
210 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
211 if (kvm_mips_handle_kseg0_tlb_fault
212 (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
213 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
214 ret = RESUME_HOST;
215 }
216 } else {
217 printk
218 ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
219 cause, opc, badvaddr);
220 kvm_mips_dump_host_tlbs();
221 kvm_arch_vcpu_dump_regs(vcpu);
222 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
223 ret = RESUME_HOST;
224 }
225 return ret;
226}
227
228static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
229{
230 struct kvm_run *run = vcpu->run;
231 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
232 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
233 unsigned long cause = vcpu->arch.host_cp0_cause;
234 enum emulation_result er = EMULATE_DONE;
235 int ret = RESUME_GUEST;
236
237 if (KVM_GUEST_KERNEL_MODE(vcpu)
238 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
239#ifdef DEBUG
240 kvm_debug("Emulate Store to MMIO space\n");
241#endif
242 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
243 if (er == EMULATE_FAIL) {
244 printk("Emulate Store to MMIO space failed\n");
245 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
246 ret = RESUME_HOST;
247 } else {
248 run->exit_reason = KVM_EXIT_MMIO;
249 ret = RESUME_HOST;
250 }
251 } else {
252 printk
253 ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
254 cause, opc, badvaddr);
255 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
256 ret = RESUME_HOST;
257 }
258 return ret;
259}
260
261static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
262{
263 struct kvm_run *run = vcpu->run;
264 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
265 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
266 unsigned long cause = vcpu->arch.host_cp0_cause;
267 enum emulation_result er = EMULATE_DONE;
268 int ret = RESUME_GUEST;
269
270 if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
271#ifdef DEBUG
272 kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
273#endif
274 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
275 if (er == EMULATE_FAIL) {
276 printk("Emulate Load from MMIO space failed\n");
277 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
278 ret = RESUME_HOST;
279 } else {
280 run->exit_reason = KVM_EXIT_MMIO;
281 ret = RESUME_HOST;
282 }
283 } else {
284 printk
285 ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
286 cause, opc, badvaddr);
287 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
288 ret = RESUME_HOST;
289 er = EMULATE_FAIL;
290 }
291 return ret;
292}
293
294static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
295{
296 struct kvm_run *run = vcpu->run;
297 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
298 unsigned long cause = vcpu->arch.host_cp0_cause;
299 enum emulation_result er = EMULATE_DONE;
300 int ret = RESUME_GUEST;
301
302 er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
303 if (er == EMULATE_DONE)
304 ret = RESUME_GUEST;
305 else {
306 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
307 ret = RESUME_HOST;
308 }
309 return ret;
310}
311
312static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
313{
314 struct kvm_run *run = vcpu->run;
315 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
316 unsigned long cause = vcpu->arch.host_cp0_cause;
317 enum emulation_result er = EMULATE_DONE;
318 int ret = RESUME_GUEST;
319
320 er = kvm_mips_handle_ri(cause, opc, run, vcpu);
321 if (er == EMULATE_DONE)
322 ret = RESUME_GUEST;
323 else {
324 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
325 ret = RESUME_HOST;
326 }
327 return ret;
328}
329
330static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
331{
332 struct kvm_run *run = vcpu->run;
333 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
334 unsigned long cause = vcpu->arch.host_cp0_cause;
335 enum emulation_result er = EMULATE_DONE;
336 int ret = RESUME_GUEST;
337
338 er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
339 if (er == EMULATE_DONE)
340 ret = RESUME_GUEST;
341 else {
342 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
343 ret = RESUME_HOST;
344 }
345 return ret;
346}
347
348static int
349kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
350{
351 struct mips_coproc *cop0 = vcpu->arch.cop0;
352
353 kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]);
354 kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]);
355 kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]);
356 kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]);
357 kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]);
358
359 kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]);
360 kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]);
361 kvm_write_c0_guest_pagemask(cop0,
362 regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]);
363 kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]);
364 kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]);
365
366 return 0;
367}
368
369static int
370kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
371{
372 struct mips_coproc *cop0 = vcpu->arch.cop0;
373
374 regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0);
375 regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0);
376 regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0);
377 regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0);
378 regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0);
379
380 regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0);
381 regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0);
382 regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] =
383 kvm_read_c0_guest_pagemask(cop0);
384 regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0);
385 regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0);
386
387 regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0);
388 regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0);
389 regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0);
390 regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0);
391 regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0);
392
393 return 0;
394}
395
396static int kvm_trap_emul_vm_init(struct kvm *kvm)
397{
398 return 0;
399}
400
401static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
402{
403 return 0;
404}
405
406static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
407{
408 struct mips_coproc *cop0 = vcpu->arch.cop0;
409 uint32_t config1;
410 int vcpu_id = vcpu->vcpu_id;
411
412 /* Arch specific stuff, set up config registers properly so that the
413 * guest will come up as expected, for now we simulate a
414 * MIPS 24kc
415 */
416 kvm_write_c0_guest_prid(cop0, 0x00019300);
417 kvm_write_c0_guest_config(cop0,
418 MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
419 (MMU_TYPE_R4000 << CP0C0_MT));
420
421 /* Read the cache characteristics from the host Config1 Register */
422 config1 = (read_c0_config1() & ~0x7f);
423
424 /* Set up MMU size */
425 config1 &= ~(0x3f << 25);
426 config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
427
428 /* We unset some bits that we aren't emulating */
429 config1 &=
430 ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
431 (1 << CP0C1_WR) | (1 << CP0C1_CA));
432 kvm_write_c0_guest_config1(cop0, config1);
433
434 kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
435 /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
436 kvm_write_c0_guest_config3(cop0,
437 MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 <<
438 CP0C3_ULRI));
439
440 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
441 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
442
443 /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */
444 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
445
446 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
447 kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
448
449 return 0;
450}
451
452static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
453 /* exit handlers */
454 .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
455 .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
456 .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
457 .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
458 .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
459 .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
460 .handle_syscall = kvm_trap_emul_handle_syscall,
461 .handle_res_inst = kvm_trap_emul_handle_res_inst,
462 .handle_break = kvm_trap_emul_handle_break,
463
464 .vm_init = kvm_trap_emul_vm_init,
465 .vcpu_init = kvm_trap_emul_vcpu_init,
466 .vcpu_setup = kvm_trap_emul_vcpu_setup,
467 .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
468 .queue_timer_int = kvm_mips_queue_timer_int_cb,
469 .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
470 .queue_io_int = kvm_mips_queue_io_int_cb,
471 .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
472 .irq_deliver = kvm_mips_irq_deliver_cb,
473 .irq_clear = kvm_mips_irq_clear_cb,
474 .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs,
475 .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs,
476};
477
478int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
479{
480 *install_callbacks = &kvm_trap_emul_callbacks;
481 return 0;
482}
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
new file mode 100644
index 000000000000..bc9e0f406c08
--- /dev/null
+++ b/arch/mips/kvm/trace.h
@@ -0,0 +1,46 @@
1/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7* Authors: Sanjay Lal <sanjayl@kymasys.com>
8*/
9
10#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
11#define _TRACE_KVM_H
12
13#include <linux/tracepoint.h>
14
15#undef TRACE_SYSTEM
16#define TRACE_SYSTEM kvm
17#define TRACE_INCLUDE_PATH .
18#define TRACE_INCLUDE_FILE trace
19
20/*
21 * Tracepoints for VM eists
22 */
23extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
24
25TRACE_EVENT(kvm_exit,
26 TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
27 TP_ARGS(vcpu, reason),
28 TP_STRUCT__entry(
29 __field(struct kvm_vcpu *, vcpu)
30 __field(unsigned int, reason)
31 ),
32
33 TP_fast_assign(
34 __entry->vcpu = vcpu;
35 __entry->reason = reason;
36 ),
37
38 TP_printk("[%s]PC: 0x%08lx",
39 kvm_mips_exit_types_str[__entry->reason],
40 __entry->vcpu->arch.pc)
41);
42
43#endif /* _TRACE_KVM_H */
44
45/* This part must be outside protection */
46#include <trace/define_trace.h>
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 2078915eacb9..96f4d5a6c21c 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -136,7 +136,8 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
136 r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; 136 r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
137} 137}
138 138
139static void (* r4k_blast_dcache)(void); 139void (* r4k_blast_dcache)(void);
140EXPORT_SYMBOL(r4k_blast_dcache);
140 141
141static void __cpuinit r4k_blast_dcache_setup(void) 142static void __cpuinit r4k_blast_dcache_setup(void)
142{ 143{
@@ -264,7 +265,8 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
264 r4k_blast_icache_page_indexed = blast_icache64_page_indexed; 265 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
265} 266}
266 267
267static void (* r4k_blast_icache)(void); 268void (* r4k_blast_icache)(void);
269EXPORT_SYMBOL(r4k_blast_icache);
268 270
269static void __cpuinit r4k_blast_icache_setup(void) 271static void __cpuinit r4k_blast_icache_setup(void)
270{ 272{
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 07cec4407b0c..5aeb3eb0b72f 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -48,6 +48,7 @@ void (*flush_icache_all)(void);
48 48
49EXPORT_SYMBOL_GPL(local_flush_data_cache_page); 49EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
50EXPORT_SYMBOL(flush_data_cache_page); 50EXPORT_SYMBOL(flush_data_cache_page);
51EXPORT_SYMBOL(flush_icache_all);
51 52
52#ifdef CONFIG_DMA_NONCOHERENT 53#ifdef CONFIG_DMA_NONCOHERENT
53 54
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 493131c81a29..c643de4c473a 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -13,6 +13,7 @@
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/hugetlb.h> 15#include <linux/hugetlb.h>
16#include <linux/module.h>
16 17
17#include <asm/cpu.h> 18#include <asm/cpu.h>
18#include <asm/bootinfo.h> 19#include <asm/bootinfo.h>
@@ -94,6 +95,7 @@ void local_flush_tlb_all(void)
94 FLUSH_ITLB; 95 FLUSH_ITLB;
95 EXIT_CRITICAL(flags); 96 EXIT_CRITICAL(flags);
96} 97}
98EXPORT_SYMBOL(local_flush_tlb_all);
97 99
98/* All entries common to a mm share an asid. To effectively flush 100/* All entries common to a mm share an asid. To effectively flush
99 these entries, we just bump the asid. */ 101 these entries, we just bump the asid. */
diff --git a/arch/mips/mti-malta/Platform b/arch/mips/mti-malta/Platform
index 5b548b5a4fcf..2cc72c9b38e3 100644
--- a/arch/mips/mti-malta/Platform
+++ b/arch/mips/mti-malta/Platform
@@ -3,5 +3,9 @@
3# 3#
4platform-$(CONFIG_MIPS_MALTA) += mti-malta/ 4platform-$(CONFIG_MIPS_MALTA) += mti-malta/
5cflags-$(CONFIG_MIPS_MALTA) += -I$(srctree)/arch/mips/include/asm/mach-malta 5cflags-$(CONFIG_MIPS_MALTA) += -I$(srctree)/arch/mips/include/asm/mach-malta
6load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000 6ifdef CONFIG_KVM_GUEST
7 load-$(CONFIG_MIPS_MALTA) += 0x0000000040100000
8else
9 load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000
10endif
7all-$(CONFIG_MIPS_MALTA) := $(COMPRESSION_FNAME).bin 11all-$(CONFIG_MIPS_MALTA) := $(COMPRESSION_FNAME).bin
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index a144b89cf9ba..bc6ac00c0d57 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -76,6 +76,21 @@ static void __init estimate_frequencies(void)
76 unsigned int count, start; 76 unsigned int count, start;
77 unsigned int giccount = 0, gicstart = 0; 77 unsigned int giccount = 0, gicstart = 0;
78 78
79#if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ)
80 unsigned int prid = read_c0_prid() & 0xffff00;
81
82 /*
83 * XXXKYMA: hardwire the CPU frequency to Host Freq/4
84 */
85 count = (CONFIG_KVM_HOST_FREQ * 1000000) >> 3;
86 if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
87 (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
88 count *= 2;
89
90 mips_hpt_frequency = count;
91 return;
92#endif
93
79 local_irq_save(flags); 94 local_irq_save(flags);
80 95
81 /* Start counter exactly on falling edge of update flag. */ 96 /* Start counter exactly on falling edge of update flag. */
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
index 1d1919a44e88..7a53b1e28a93 100644
--- a/arch/mips/sgi-ip27/ip27-klnuma.c
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -114,7 +114,7 @@ void __init replicate_kernel_text()
114 * data structures on the first couple of pages of the first slot of each 114 * data structures on the first couple of pages of the first slot of each
115 * node. If this is the case, getfirstfree(node) > getslotstart(node, 0). 115 * node. If this is the case, getfirstfree(node) > getslotstart(node, 0).
116 */ 116 */
117pfn_t node_getfirstfree(cnodeid_t cnode) 117unsigned long node_getfirstfree(cnodeid_t cnode)
118{ 118{
119 unsigned long loadbase = REP_BASE; 119 unsigned long loadbase = REP_BASE;
120 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); 120 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 3505d08ff2fd..64d8dab06b01 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -255,14 +255,14 @@ static void __init dump_topology(void)
255 } 255 }
256} 256}
257 257
258static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot) 258static unsigned long __init slot_getbasepfn(cnodeid_t cnode, int slot)
259{ 259{
260 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); 260 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
261 261
262 return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT); 262 return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT);
263} 263}
264 264
265static pfn_t __init slot_psize_compute(cnodeid_t node, int slot) 265static unsigned long __init slot_psize_compute(cnodeid_t node, int slot)
266{ 266{
267 nasid_t nasid; 267 nasid_t nasid;
268 lboard_t *brd; 268 lboard_t *brd;
@@ -353,7 +353,7 @@ static void __init mlreset(void)
353 353
354static void __init szmem(void) 354static void __init szmem(void)
355{ 355{
356 pfn_t slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */ 356 unsigned long slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */
357 int slot; 357 int slot;
358 cnodeid_t node; 358 cnodeid_t node;
359 359
@@ -390,10 +390,10 @@ static void __init szmem(void)
390 390
391static void __init node_mem_init(cnodeid_t node) 391static void __init node_mem_init(cnodeid_t node)
392{ 392{
393 pfn_t slot_firstpfn = slot_getbasepfn(node, 0); 393 unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
394 pfn_t slot_freepfn = node_getfirstfree(node); 394 unsigned long slot_freepfn = node_getfirstfree(node);
395 unsigned long bootmap_size; 395 unsigned long bootmap_size;
396 pfn_t start_pfn, end_pfn; 396 unsigned long start_pfn, end_pfn;
397 397
398 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); 398 get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
399 399
@@ -467,7 +467,7 @@ void __init paging_init(void)
467 pagetable_init(); 467 pagetable_init();
468 468
469 for_each_online_node(node) { 469 for_each_online_node(node) {
470 pfn_t start_pfn, end_pfn; 470 unsigned long start_pfn, end_pfn;
471 471
472 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); 472 get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
473 473
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f18013f09e68..1fc942048521 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1981,7 +1981,7 @@ static long kvm_vcpu_ioctl(struct file *filp,
1981 if (vcpu->kvm->mm != current->mm) 1981 if (vcpu->kvm->mm != current->mm)
1982 return -EIO; 1982 return -EIO;
1983 1983
1984#if defined(CONFIG_S390) || defined(CONFIG_PPC) 1984#if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
1985 /* 1985 /*
1986 * Special cases: vcpu ioctls that are asynchronous to vcpu execution, 1986 * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
1987 * so vcpu_load() would break it. 1987 * so vcpu_load() would break it.