aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-04-15 18:11:34 -0400
committerAvi Kivity <avi@redhat.com>2010-05-17 05:18:17 -0400
commit786f19daa8b109ae6b96a351eee3a14b9f8b57d0 (patch)
tree9f2ce80363552264bdbee1ce6eb99291f5064640 /arch/powerpc/kvm
parentd32154f1b8b748ea23edc90b06f640304a979012 (diff)
KVM: PPC: Add SR swapping code
Later in this series we will move the current segment switch code to generic code and make that call hooks for the specific sub-archs (32 vs. 64 bit). This is the hook for 32 bits. It enabled the entry and exit code to swap segment registers with values from the shadow cpu structure. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_32_sr.S143
1 files changed, 143 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/book3s_32_sr.S b/arch/powerpc/kvm/book3s_32_sr.S
new file mode 100644
index 000000000000..3608471ad2d8
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_32_sr.S
@@ -0,0 +1,143 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20/******************************************************************************
21 * *
22 * Entry code *
23 * *
24 *****************************************************************************/
25
26.macro LOAD_GUEST_SEGMENTS
27
28 /* Required state:
29 *
30 * MSR = ~IR|DR
31 * R1 = host R1
32 * R2 = host R2
33 * R3 = shadow vcpu
34 * all other volatile GPRS = free
35 * SVCPU[CR] = guest CR
36 * SVCPU[XER] = guest XER
37 * SVCPU[CTR] = guest CTR
38 * SVCPU[LR] = guest LR
39 */
40
41#define XCHG_SR(n) lwz r9, (SVCPU_SR+(n*4))(r3); \
42 mtsr n, r9
43
44 XCHG_SR(0)
45 XCHG_SR(1)
46 XCHG_SR(2)
47 XCHG_SR(3)
48 XCHG_SR(4)
49 XCHG_SR(5)
50 XCHG_SR(6)
51 XCHG_SR(7)
52 XCHG_SR(8)
53 XCHG_SR(9)
54 XCHG_SR(10)
55 XCHG_SR(11)
56 XCHG_SR(12)
57 XCHG_SR(13)
58 XCHG_SR(14)
59 XCHG_SR(15)
60
61 /* Clear BATs. */
62
63#define KVM_KILL_BAT(n, reg) \
64 mtspr SPRN_IBAT##n##U,reg; \
65 mtspr SPRN_IBAT##n##L,reg; \
66 mtspr SPRN_DBAT##n##U,reg; \
67 mtspr SPRN_DBAT##n##L,reg; \
68
69 li r9, 0
70 KVM_KILL_BAT(0, r9)
71 KVM_KILL_BAT(1, r9)
72 KVM_KILL_BAT(2, r9)
73 KVM_KILL_BAT(3, r9)
74
75.endm
76
77/******************************************************************************
78 * *
79 * Exit code *
80 * *
81 *****************************************************************************/
82
83.macro LOAD_HOST_SEGMENTS
84
85 /* Register usage at this point:
86 *
87 * R1 = host R1
88 * R2 = host R2
89 * R12 = exit handler id
90 * R13 = shadow vcpu - SHADOW_VCPU_OFF
91 * SVCPU.* = guest *
92 * SVCPU[CR] = guest CR
93 * SVCPU[XER] = guest XER
94 * SVCPU[CTR] = guest CTR
95 * SVCPU[LR] = guest LR
96 *
97 */
98
99 /* Restore BATs */
100
101 /* We only overwrite the upper part, so we only restoree
102 the upper part. */
103#define KVM_LOAD_BAT(n, reg, RA, RB) \
104 lwz RA,(n*16)+0(reg); \
105 lwz RB,(n*16)+4(reg); \
106 mtspr SPRN_IBAT##n##U,RA; \
107 mtspr SPRN_IBAT##n##L,RB; \
108 lwz RA,(n*16)+8(reg); \
109 lwz RB,(n*16)+12(reg); \
110 mtspr SPRN_DBAT##n##U,RA; \
111 mtspr SPRN_DBAT##n##L,RB; \
112
113 lis r9, BATS@ha
114 addi r9, r9, BATS@l
115 tophys(r9, r9)
116 KVM_LOAD_BAT(0, r9, r10, r11)
117 KVM_LOAD_BAT(1, r9, r10, r11)
118 KVM_LOAD_BAT(2, r9, r10, r11)
119 KVM_LOAD_BAT(3, r9, r10, r11)
120
121 /* Restore Segment Registers */
122
123 /* 0xc - 0xf */
124
125 li r0, 4
126 mtctr r0
127 LOAD_REG_IMMEDIATE(r3, 0x20000000 | (0x111 * 0xc))
128 lis r4, 0xc000
1293: mtsrin r3, r4
130 addi r3, r3, 0x111 /* increment VSID */
131 addis r4, r4, 0x1000 /* address of next segment */
132 bdnz 3b
133
134 /* 0x0 - 0xb */
135
136 /* 'current->mm' needs to be in r4 */
137 tophys(r4, r2)
138 lwz r4, MM(r4)
139 tophys(r4, r4)
140 /* This only clobbers r0, r3, r4 and r5 */
141 bl switch_mmu_context
142
143.endm