diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-01-08 00:24:38 -0500 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-01-08 00:24:38 -0500 |
commit | 24f030175d30f019be41766cdf88c2ff03de19ff (patch) | |
tree | 354232a84e82d5a721ed7b1a9af580ff2a59be8f /arch/powerpc | |
parent | 4aa12f7b927c3cac0e0cf3503642597527d0ece0 (diff) | |
parent | 9e42d0cf5020aaf217433cad1a224745241d212a (diff) |
Merge commit 'origin/master' into next
Diffstat (limited to 'arch/powerpc')
43 files changed, 2078 insertions, 1239 deletions
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index f32829937aad..ab6dda372438 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile | |||
@@ -208,7 +208,7 @@ image-$(CONFIG_DEFAULT_UIMAGE) += uImage | |||
208 | # | 208 | # |
209 | # Theses are default targets to build images which embed device tree blobs. | 209 | # Theses are default targets to build images which embed device tree blobs. |
210 | # They are only required on boards which do not have FDT support in firmware. | 210 | # They are only required on boards which do not have FDT support in firmware. |
211 | # Boards with newish u-boot firmare can use the uImage target above | 211 | # Boards with newish u-boot firmware can use the uImage target above |
212 | # | 212 | # |
213 | 213 | ||
214 | # Board ports in arch/powerpc/platform/40x/Kconfig | 214 | # Board ports in arch/powerpc/platform/40x/Kconfig |
diff --git a/arch/powerpc/boot/dts/sequoia.dts b/arch/powerpc/boot/dts/sequoia.dts index 3b295e8df53f..43cc68bd3192 100644 --- a/arch/powerpc/boot/dts/sequoia.dts +++ b/arch/powerpc/boot/dts/sequoia.dts | |||
@@ -134,7 +134,7 @@ | |||
134 | }; | 134 | }; |
135 | 135 | ||
136 | USB1: usb@e0000400 { | 136 | USB1: usb@e0000400 { |
137 | compatible = "ohci-be"; | 137 | compatible = "ibm,usb-ohci-440epx", "ohci-be"; |
138 | reg = <0x00000000 0xe0000400 0x00000060>; | 138 | reg = <0x00000000 0xe0000400 0x00000060>; |
139 | interrupt-parent = <&UIC0>; | 139 | interrupt-parent = <&UIC0>; |
140 | interrupts = <0x15 0x8>; | 140 | interrupts = <0x15 0x8>; |
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 5ab7d7fe198c..9268602de5d0 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild | |||
@@ -35,3 +35,4 @@ unifdef-y += spu_info.h | |||
35 | unifdef-y += termios.h | 35 | unifdef-y += termios.h |
36 | unifdef-y += types.h | 36 | unifdef-y += types.h |
37 | unifdef-y += unistd.h | 37 | unifdef-y += unistd.h |
38 | unifdef-y += swab.h | ||
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h index 499be5bdd6fa..b401950f5259 100644 --- a/arch/powerpc/include/asm/atomic.h +++ b/arch/powerpc/include/asm/atomic.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * PowerPC atomic operations | 5 | * PowerPC atomic operations |
6 | */ | 6 | */ |
7 | 7 | ||
8 | typedef struct { int counter; } atomic_t; | 8 | #include <linux/types.h> |
9 | 9 | ||
10 | #ifdef __KERNEL__ | 10 | #ifdef __KERNEL__ |
11 | #include <linux/compiler.h> | 11 | #include <linux/compiler.h> |
@@ -251,8 +251,6 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) | |||
251 | 251 | ||
252 | #ifdef __powerpc64__ | 252 | #ifdef __powerpc64__ |
253 | 253 | ||
254 | typedef struct { long counter; } atomic64_t; | ||
255 | |||
256 | #define ATOMIC64_INIT(i) { (i) } | 254 | #define ATOMIC64_INIT(i) { (i) } |
257 | 255 | ||
258 | static __inline__ long atomic64_read(const atomic64_t *v) | 256 | static __inline__ long atomic64_read(const atomic64_t *v) |
diff --git a/arch/powerpc/include/asm/byteorder.h b/arch/powerpc/include/asm/byteorder.h index d5de325472e9..5cca27a41532 100644 --- a/arch/powerpc/include/asm/byteorder.h +++ b/arch/powerpc/include/asm/byteorder.h | |||
@@ -8,86 +8,7 @@ | |||
8 | * 2 of the License, or (at your option) any later version. | 8 | * 2 of the License, or (at your option) any later version. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <asm/types.h> | 11 | #include <asm/swab.h> |
12 | #include <linux/compiler.h> | 12 | #include <linux/byteorder/big_endian.h> |
13 | |||
14 | #define __BIG_ENDIAN | ||
15 | |||
16 | #ifdef __GNUC__ | ||
17 | #ifdef __KERNEL__ | ||
18 | |||
19 | static __inline__ __u16 ld_le16(const volatile __u16 *addr) | ||
20 | { | ||
21 | __u16 val; | ||
22 | |||
23 | __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); | ||
24 | return val; | ||
25 | } | ||
26 | #define __arch_swab16p ld_le16 | ||
27 | |||
28 | static __inline__ void st_le16(volatile __u16 *addr, const __u16 val) | ||
29 | { | ||
30 | __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); | ||
31 | } | ||
32 | |||
33 | static inline void __arch_swab16s(__u16 *addr) | ||
34 | { | ||
35 | st_le16(addr, *addr); | ||
36 | } | ||
37 | #define __arch_swab16s __arch_swab16s | ||
38 | |||
39 | static __inline__ __u32 ld_le32(const volatile __u32 *addr) | ||
40 | { | ||
41 | __u32 val; | ||
42 | |||
43 | __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); | ||
44 | return val; | ||
45 | } | ||
46 | #define __arch_swab32p ld_le32 | ||
47 | |||
48 | static __inline__ void st_le32(volatile __u32 *addr, const __u32 val) | ||
49 | { | ||
50 | __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); | ||
51 | } | ||
52 | |||
53 | static inline void __arch_swab32s(__u32 *addr) | ||
54 | { | ||
55 | st_le32(addr, *addr); | ||
56 | } | ||
57 | #define __arch_swab32s __arch_swab32s | ||
58 | |||
59 | static inline __attribute_const__ __u16 __arch_swab16(__u16 value) | ||
60 | { | ||
61 | __u16 result; | ||
62 | |||
63 | __asm__("rlwimi %0,%1,8,16,23" | ||
64 | : "=r" (result) | ||
65 | : "r" (value), "0" (value >> 8)); | ||
66 | return result; | ||
67 | } | ||
68 | #define __arch_swab16 __arch_swab16 | ||
69 | |||
70 | static inline __attribute_const__ __u32 __arch_swab32(__u32 value) | ||
71 | { | ||
72 | __u32 result; | ||
73 | |||
74 | __asm__("rlwimi %0,%1,24,16,23\n\t" | ||
75 | "rlwimi %0,%1,8,8,15\n\t" | ||
76 | "rlwimi %0,%1,24,0,7" | ||
77 | : "=r" (result) | ||
78 | : "r" (value), "0" (value >> 24)); | ||
79 | return result; | ||
80 | } | ||
81 | #define __arch_swab32 __arch_swab32 | ||
82 | |||
83 | #endif /* __KERNEL__ */ | ||
84 | |||
85 | #ifndef __powerpc64__ | ||
86 | #define __SWAB_64_THRU_32__ | ||
87 | #endif /* __powerpc64__ */ | ||
88 | |||
89 | #endif /* __GNUC__ */ | ||
90 | |||
91 | #include <linux/byteorder.h> | ||
92 | 13 | ||
93 | #endif /* _ASM_POWERPC_BYTEORDER_H */ | 14 | #endif /* _ASM_POWERPC_BYTEORDER_H */ |
diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h new file mode 100644 index 000000000000..9b198d1b3b2b --- /dev/null +++ b/arch/powerpc/include/asm/disassemble.h | |||
@@ -0,0 +1,80 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #ifndef __ASM_PPC_DISASSEMBLE_H__ | ||
21 | #define __ASM_PPC_DISASSEMBLE_H__ | ||
22 | |||
23 | #include <linux/types.h> | ||
24 | |||
25 | static inline unsigned int get_op(u32 inst) | ||
26 | { | ||
27 | return inst >> 26; | ||
28 | } | ||
29 | |||
30 | static inline unsigned int get_xop(u32 inst) | ||
31 | { | ||
32 | return (inst >> 1) & 0x3ff; | ||
33 | } | ||
34 | |||
35 | static inline unsigned int get_sprn(u32 inst) | ||
36 | { | ||
37 | return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0); | ||
38 | } | ||
39 | |||
40 | static inline unsigned int get_dcrn(u32 inst) | ||
41 | { | ||
42 | return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0); | ||
43 | } | ||
44 | |||
45 | static inline unsigned int get_rt(u32 inst) | ||
46 | { | ||
47 | return (inst >> 21) & 0x1f; | ||
48 | } | ||
49 | |||
50 | static inline unsigned int get_rs(u32 inst) | ||
51 | { | ||
52 | return (inst >> 21) & 0x1f; | ||
53 | } | ||
54 | |||
55 | static inline unsigned int get_ra(u32 inst) | ||
56 | { | ||
57 | return (inst >> 16) & 0x1f; | ||
58 | } | ||
59 | |||
60 | static inline unsigned int get_rb(u32 inst) | ||
61 | { | ||
62 | return (inst >> 11) & 0x1f; | ||
63 | } | ||
64 | |||
65 | static inline unsigned int get_rc(u32 inst) | ||
66 | { | ||
67 | return inst & 0x1; | ||
68 | } | ||
69 | |||
70 | static inline unsigned int get_ws(u32 inst) | ||
71 | { | ||
72 | return (inst >> 11) & 0x1f; | ||
73 | } | ||
74 | |||
75 | static inline unsigned int get_d(u32 inst) | ||
76 | { | ||
77 | return inst & 0xffff; | ||
78 | } | ||
79 | |||
80 | #endif /* __ASM_PPC_DISASSEMBLE_H__ */ | ||
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 26f0d0ab27a5..b1dafb6a9743 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h | |||
@@ -18,6 +18,12 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | |||
18 | pte_t *ptep); | 18 | pte_t *ptep); |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs | ||
22 | * to override the version in mm/hugetlb.c | ||
23 | */ | ||
24 | #define vma_mmu_pagesize vma_mmu_pagesize | ||
25 | |||
26 | /* | ||
21 | * If the arch doesn't supply something else, assume that hugepage | 27 | * If the arch doesn't supply something else, assume that hugepage |
22 | * size aligned regions are ok without further preparation. | 28 | * size aligned regions are ok without further preparation. |
23 | */ | 29 | */ |
diff --git a/arch/powerpc/include/asm/kvm_44x.h b/arch/powerpc/include/asm/kvm_44x.h new file mode 100644 index 000000000000..f49031b632ca --- /dev/null +++ b/arch/powerpc/include/asm/kvm_44x.h | |||
@@ -0,0 +1,61 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #ifndef __ASM_44X_H__ | ||
21 | #define __ASM_44X_H__ | ||
22 | |||
23 | #include <linux/kvm_host.h> | ||
24 | |||
25 | #define PPC44x_TLB_SIZE 64 | ||
26 | |||
27 | /* If the guest is expecting it, this can be as large as we like; we'd just | ||
28 | * need to find some way of advertising it. */ | ||
29 | #define KVM44x_GUEST_TLB_SIZE 64 | ||
30 | |||
31 | struct kvmppc_44x_shadow_ref { | ||
32 | struct page *page; | ||
33 | u16 gtlb_index; | ||
34 | u8 writeable; | ||
35 | u8 tid; | ||
36 | }; | ||
37 | |||
38 | struct kvmppc_vcpu_44x { | ||
39 | /* Unmodified copy of the guest's TLB. */ | ||
40 | struct kvmppc_44x_tlbe guest_tlb[KVM44x_GUEST_TLB_SIZE]; | ||
41 | |||
42 | /* References to guest pages in the hardware TLB. */ | ||
43 | struct kvmppc_44x_shadow_ref shadow_refs[PPC44x_TLB_SIZE]; | ||
44 | |||
45 | /* State of the shadow TLB at guest context switch time. */ | ||
46 | struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE]; | ||
47 | u8 shadow_tlb_mod[PPC44x_TLB_SIZE]; | ||
48 | |||
49 | struct kvm_vcpu vcpu; | ||
50 | }; | ||
51 | |||
52 | static inline struct kvmppc_vcpu_44x *to_44x(struct kvm_vcpu *vcpu) | ||
53 | { | ||
54 | return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu); | ||
55 | } | ||
56 | |||
57 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid); | ||
58 | void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu); | ||
59 | void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu); | ||
60 | |||
61 | #endif /* __ASM_44X_H__ */ | ||
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 34b52b7180cd..c1e436fe7738 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -64,27 +64,58 @@ struct kvm_vcpu_stat { | |||
64 | u32 halt_wakeup; | 64 | u32 halt_wakeup; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | struct tlbe { | 67 | struct kvmppc_44x_tlbe { |
68 | u32 tid; /* Only the low 8 bits are used. */ | 68 | u32 tid; /* Only the low 8 bits are used. */ |
69 | u32 word0; | 69 | u32 word0; |
70 | u32 word1; | 70 | u32 word1; |
71 | u32 word2; | 71 | u32 word2; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct kvm_arch { | 74 | enum kvm_exit_types { |
75 | MMIO_EXITS, | ||
76 | DCR_EXITS, | ||
77 | SIGNAL_EXITS, | ||
78 | ITLB_REAL_MISS_EXITS, | ||
79 | ITLB_VIRT_MISS_EXITS, | ||
80 | DTLB_REAL_MISS_EXITS, | ||
81 | DTLB_VIRT_MISS_EXITS, | ||
82 | SYSCALL_EXITS, | ||
83 | ISI_EXITS, | ||
84 | DSI_EXITS, | ||
85 | EMULATED_INST_EXITS, | ||
86 | EMULATED_MTMSRWE_EXITS, | ||
87 | EMULATED_WRTEE_EXITS, | ||
88 | EMULATED_MTSPR_EXITS, | ||
89 | EMULATED_MFSPR_EXITS, | ||
90 | EMULATED_MTMSR_EXITS, | ||
91 | EMULATED_MFMSR_EXITS, | ||
92 | EMULATED_TLBSX_EXITS, | ||
93 | EMULATED_TLBWE_EXITS, | ||
94 | EMULATED_RFI_EXITS, | ||
95 | DEC_EXITS, | ||
96 | EXT_INTR_EXITS, | ||
97 | HALT_WAKEUP, | ||
98 | USR_PR_INST, | ||
99 | FP_UNAVAIL, | ||
100 | DEBUG_EXITS, | ||
101 | TIMEINGUEST, | ||
102 | __NUMBER_OF_KVM_EXIT_TYPES | ||
75 | }; | 103 | }; |
76 | 104 | ||
77 | struct kvm_vcpu_arch { | 105 | /* allow access to big endian 32bit upper/lower parts and 64bit var */ |
78 | /* Unmodified copy of the guest's TLB. */ | 106 | struct kvmppc_exit_timing { |
79 | struct tlbe guest_tlb[PPC44x_TLB_SIZE]; | 107 | union { |
80 | /* TLB that's actually used when the guest is running. */ | 108 | u64 tv64; |
81 | struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; | 109 | struct { |
82 | /* Pages which are referenced in the shadow TLB. */ | 110 | u32 tbu, tbl; |
83 | struct page *shadow_pages[PPC44x_TLB_SIZE]; | 111 | } tv32; |
112 | }; | ||
113 | }; | ||
84 | 114 | ||
85 | /* Track which TLB entries we've modified in the current exit. */ | 115 | struct kvm_arch { |
86 | u8 shadow_tlb_mod[PPC44x_TLB_SIZE]; | 116 | }; |
87 | 117 | ||
118 | struct kvm_vcpu_arch { | ||
88 | u32 host_stack; | 119 | u32 host_stack; |
89 | u32 host_pid; | 120 | u32 host_pid; |
90 | u32 host_dbcr0; | 121 | u32 host_dbcr0; |
@@ -94,32 +125,32 @@ struct kvm_vcpu_arch { | |||
94 | u32 host_msr; | 125 | u32 host_msr; |
95 | 126 | ||
96 | u64 fpr[32]; | 127 | u64 fpr[32]; |
97 | u32 gpr[32]; | 128 | ulong gpr[32]; |
98 | 129 | ||
99 | u32 pc; | 130 | ulong pc; |
100 | u32 cr; | 131 | u32 cr; |
101 | u32 ctr; | 132 | ulong ctr; |
102 | u32 lr; | 133 | ulong lr; |
103 | u32 xer; | 134 | ulong xer; |
104 | 135 | ||
105 | u32 msr; | 136 | ulong msr; |
106 | u32 mmucr; | 137 | u32 mmucr; |
107 | u32 sprg0; | 138 | ulong sprg0; |
108 | u32 sprg1; | 139 | ulong sprg1; |
109 | u32 sprg2; | 140 | ulong sprg2; |
110 | u32 sprg3; | 141 | ulong sprg3; |
111 | u32 sprg4; | 142 | ulong sprg4; |
112 | u32 sprg5; | 143 | ulong sprg5; |
113 | u32 sprg6; | 144 | ulong sprg6; |
114 | u32 sprg7; | 145 | ulong sprg7; |
115 | u32 srr0; | 146 | ulong srr0; |
116 | u32 srr1; | 147 | ulong srr1; |
117 | u32 csrr0; | 148 | ulong csrr0; |
118 | u32 csrr1; | 149 | ulong csrr1; |
119 | u32 dsrr0; | 150 | ulong dsrr0; |
120 | u32 dsrr1; | 151 | ulong dsrr1; |
121 | u32 dear; | 152 | ulong dear; |
122 | u32 esr; | 153 | ulong esr; |
123 | u32 dec; | 154 | u32 dec; |
124 | u32 decar; | 155 | u32 decar; |
125 | u32 tbl; | 156 | u32 tbl; |
@@ -127,7 +158,7 @@ struct kvm_vcpu_arch { | |||
127 | u32 tcr; | 158 | u32 tcr; |
128 | u32 tsr; | 159 | u32 tsr; |
129 | u32 ivor[16]; | 160 | u32 ivor[16]; |
130 | u32 ivpr; | 161 | ulong ivpr; |
131 | u32 pir; | 162 | u32 pir; |
132 | 163 | ||
133 | u32 shadow_pid; | 164 | u32 shadow_pid; |
@@ -140,9 +171,22 @@ struct kvm_vcpu_arch { | |||
140 | u32 dbcr0; | 171 | u32 dbcr0; |
141 | u32 dbcr1; | 172 | u32 dbcr1; |
142 | 173 | ||
174 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
175 | struct kvmppc_exit_timing timing_exit; | ||
176 | struct kvmppc_exit_timing timing_last_enter; | ||
177 | u32 last_exit_type; | ||
178 | u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES]; | ||
179 | u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES]; | ||
180 | u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES]; | ||
181 | u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES]; | ||
182 | u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES]; | ||
183 | u64 timing_last_exit; | ||
184 | struct dentry *debugfs_exit_timing; | ||
185 | #endif | ||
186 | |||
143 | u32 last_inst; | 187 | u32 last_inst; |
144 | u32 fault_dear; | 188 | ulong fault_dear; |
145 | u32 fault_esr; | 189 | ulong fault_esr; |
146 | gpa_t paddr_accessed; | 190 | gpa_t paddr_accessed; |
147 | 191 | ||
148 | u8 io_gpr; /* GPR used as IO source/target */ | 192 | u8 io_gpr; /* GPR used as IO source/target */ |
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index bb62ad876de3..36d2a50a8487 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
@@ -29,11 +29,6 @@ | |||
29 | #include <linux/kvm_types.h> | 29 | #include <linux/kvm_types.h> |
30 | #include <linux/kvm_host.h> | 30 | #include <linux/kvm_host.h> |
31 | 31 | ||
32 | struct kvm_tlb { | ||
33 | struct tlbe guest_tlb[PPC44x_TLB_SIZE]; | ||
34 | struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; | ||
35 | }; | ||
36 | |||
37 | enum emulation_result { | 32 | enum emulation_result { |
38 | EMULATE_DONE, /* no further processing */ | 33 | EMULATE_DONE, /* no further processing */ |
39 | EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ | 34 | EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ |
@@ -41,9 +36,6 @@ enum emulation_result { | |||
41 | EMULATE_FAIL, /* can't emulate this instruction */ | 36 | EMULATE_FAIL, /* can't emulate this instruction */ |
42 | }; | 37 | }; |
43 | 38 | ||
44 | extern const unsigned char exception_priority[]; | ||
45 | extern const unsigned char priority_exception[]; | ||
46 | |||
47 | extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | 39 | extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); |
48 | extern char kvmppc_handlers_start[]; | 40 | extern char kvmppc_handlers_start[]; |
49 | extern unsigned long kvmppc_handler_len; | 41 | extern unsigned long kvmppc_handler_len; |
@@ -58,51 +50,44 @@ extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
58 | extern int kvmppc_emulate_instruction(struct kvm_run *run, | 50 | extern int kvmppc_emulate_instruction(struct kvm_run *run, |
59 | struct kvm_vcpu *vcpu); | 51 | struct kvm_vcpu *vcpu); |
60 | extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); | 52 | extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); |
53 | extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); | ||
61 | 54 | ||
62 | extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, | 55 | extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, |
63 | u64 asid, u32 flags); | 56 | u64 asid, u32 flags, u32 max_bytes, |
64 | extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, | 57 | unsigned int gtlb_idx); |
65 | gva_t eend, u32 asid); | ||
66 | extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); | 58 | extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); |
67 | extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); | 59 | extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); |
68 | 60 | ||
69 | /* XXX Book E specific */ | 61 | /* Core-specific hooks */ |
70 | extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i); | 62 | |
71 | 63 | extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, | |
72 | extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu); | 64 | unsigned int id); |
73 | 65 | extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu); | |
74 | static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception) | 66 | extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu); |
75 | { | 67 | extern int kvmppc_core_check_processor_compat(void); |
76 | unsigned int priority = exception_priority[exception]; | 68 | extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, |
77 | set_bit(priority, &vcpu->arch.pending_exceptions); | 69 | struct kvm_translation *tr); |
78 | } | 70 | |
79 | 71 | extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | |
80 | static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception) | 72 | extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); |
81 | { | 73 | |
82 | unsigned int priority = exception_priority[exception]; | 74 | extern void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu); |
83 | clear_bit(priority, &vcpu->arch.pending_exceptions); | 75 | extern void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu); |
84 | } | 76 | |
85 | 77 | extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu); | |
86 | /* Helper function for "full" MSR writes. No need to call this if only EE is | 78 | extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); |
87 | * changing. */ | 79 | extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu); |
88 | static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) | 80 | extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); |
89 | { | 81 | extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
90 | if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR)) | 82 | struct kvm_interrupt *irq); |
91 | kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); | 83 | |
92 | 84 | extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
93 | vcpu->arch.msr = new_msr; | 85 | unsigned int op, int *advance); |
94 | 86 | extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); | |
95 | if (vcpu->arch.msr & MSR_WE) | 87 | extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); |
96 | kvm_vcpu_block(vcpu); | 88 | |
97 | } | 89 | extern int kvmppc_booke_init(void); |
98 | 90 | extern void kvmppc_booke_exit(void); | |
99 | static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid) | ||
100 | { | ||
101 | if (vcpu->arch.pid != new_pid) { | ||
102 | vcpu->arch.pid = new_pid; | ||
103 | vcpu->arch.swap_pid = 1; | ||
104 | } | ||
105 | } | ||
106 | 91 | ||
107 | extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); | 92 | extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); |
108 | 93 | ||
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h index 8a97cfb08b7e..27cc6fdcd3b7 100644 --- a/arch/powerpc/include/asm/mmu-44x.h +++ b/arch/powerpc/include/asm/mmu-44x.h | |||
@@ -56,6 +56,7 @@ | |||
56 | #ifndef __ASSEMBLY__ | 56 | #ifndef __ASSEMBLY__ |
57 | 57 | ||
58 | extern unsigned int tlb_44x_hwater; | 58 | extern unsigned int tlb_44x_hwater; |
59 | extern unsigned int tlb_44x_index; | ||
59 | 60 | ||
60 | typedef struct { | 61 | typedef struct { |
61 | unsigned int id; | 62 | unsigned int id; |
diff --git a/arch/powerpc/include/asm/swab.h b/arch/powerpc/include/asm/swab.h new file mode 100644 index 000000000000..ef824ae4b79c --- /dev/null +++ b/arch/powerpc/include/asm/swab.h | |||
@@ -0,0 +1,90 @@ | |||
1 | #ifndef _ASM_POWERPC_SWAB_H | ||
2 | #define _ASM_POWERPC_SWAB_H | ||
3 | |||
4 | /* | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | */ | ||
10 | |||
11 | #include <asm/types.h> | ||
12 | #include <linux/compiler.h> | ||
13 | |||
14 | #ifdef __GNUC__ | ||
15 | |||
16 | #ifndef __powerpc64__ | ||
17 | #define __SWAB_64_THRU_32__ | ||
18 | #endif /* __powerpc64__ */ | ||
19 | |||
20 | #ifdef __KERNEL__ | ||
21 | |||
22 | static __inline__ __u16 ld_le16(const volatile __u16 *addr) | ||
23 | { | ||
24 | __u16 val; | ||
25 | |||
26 | __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); | ||
27 | return val; | ||
28 | } | ||
29 | #define __arch_swab16p ld_le16 | ||
30 | |||
31 | static __inline__ void st_le16(volatile __u16 *addr, const __u16 val) | ||
32 | { | ||
33 | __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); | ||
34 | } | ||
35 | |||
36 | static inline void __arch_swab16s(__u16 *addr) | ||
37 | { | ||
38 | st_le16(addr, *addr); | ||
39 | } | ||
40 | #define __arch_swab16s __arch_swab16s | ||
41 | |||
42 | static __inline__ __u32 ld_le32(const volatile __u32 *addr) | ||
43 | { | ||
44 | __u32 val; | ||
45 | |||
46 | __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); | ||
47 | return val; | ||
48 | } | ||
49 | #define __arch_swab32p ld_le32 | ||
50 | |||
51 | static __inline__ void st_le32(volatile __u32 *addr, const __u32 val) | ||
52 | { | ||
53 | __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); | ||
54 | } | ||
55 | |||
56 | static inline void __arch_swab32s(__u32 *addr) | ||
57 | { | ||
58 | st_le32(addr, *addr); | ||
59 | } | ||
60 | #define __arch_swab32s __arch_swab32s | ||
61 | |||
62 | static inline __attribute_const__ __u16 __arch_swab16(__u16 value) | ||
63 | { | ||
64 | __u16 result; | ||
65 | |||
66 | __asm__("rlwimi %0,%1,8,16,23" | ||
67 | : "=r" (result) | ||
68 | : "r" (value), "0" (value >> 8)); | ||
69 | return result; | ||
70 | } | ||
71 | #define __arch_swab16 __arch_swab16 | ||
72 | |||
73 | static inline __attribute_const__ __u32 __arch_swab32(__u32 value) | ||
74 | { | ||
75 | __u32 result; | ||
76 | |||
77 | __asm__("rlwimi %0,%1,24,16,23\n\t" | ||
78 | "rlwimi %0,%1,8,8,15\n\t" | ||
79 | "rlwimi %0,%1,24,0,7" | ||
80 | : "=r" (result) | ||
81 | : "r" (value), "0" (value >> 24)); | ||
82 | return result; | ||
83 | } | ||
84 | #define __arch_swab32 __arch_swab32 | ||
85 | |||
86 | #endif /* __KERNEL__ */ | ||
87 | |||
88 | #endif /* __GNUC__ */ | ||
89 | |||
90 | #endif /* _ASM_POWERPC_SWAB_H */ | ||
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index c32da6f97999..375258559ae6 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h | |||
@@ -22,11 +22,11 @@ static inline cpumask_t node_to_cpumask(int node) | |||
22 | return numa_cpumask_lookup_table[node]; | 22 | return numa_cpumask_lookup_table[node]; |
23 | } | 23 | } |
24 | 24 | ||
25 | #define cpumask_of_node(node) (&numa_cpumask_lookup_table[node]) | ||
26 | |||
25 | static inline int node_to_first_cpu(int node) | 27 | static inline int node_to_first_cpu(int node) |
26 | { | 28 | { |
27 | cpumask_t tmp; | 29 | return cpumask_first(cpumask_of_node(node)); |
28 | tmp = node_to_cpumask(node); | ||
29 | return first_cpu(tmp); | ||
30 | } | 30 | } |
31 | 31 | ||
32 | int of_node_to_nid(struct device_node *device); | 32 | int of_node_to_nid(struct device_node *device); |
@@ -46,9 +46,12 @@ static inline int pcibus_to_node(struct pci_bus *bus) | |||
46 | node_to_cpumask(pcibus_to_node(bus)) \ | 46 | node_to_cpumask(pcibus_to_node(bus)) \ |
47 | ) | 47 | ) |
48 | 48 | ||
49 | #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ | ||
50 | cpu_all_mask : \ | ||
51 | cpumask_of_node(pcibus_to_node(bus))) | ||
52 | |||
49 | /* sched_domains SD_NODE_INIT for PPC64 machines */ | 53 | /* sched_domains SD_NODE_INIT for PPC64 machines */ |
50 | #define SD_NODE_INIT (struct sched_domain) { \ | 54 | #define SD_NODE_INIT (struct sched_domain) { \ |
51 | .span = CPU_MASK_NONE, \ | ||
52 | .parent = NULL, \ | 55 | .parent = NULL, \ |
53 | .child = NULL, \ | 56 | .child = NULL, \ |
54 | .groups = NULL, \ | 57 | .groups = NULL, \ |
@@ -109,6 +112,8 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev, | |||
109 | 112 | ||
110 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) | 113 | #define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) |
111 | #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) | 114 | #define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) |
115 | #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) | ||
116 | #define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) | ||
112 | #define topology_core_id(cpu) (cpu_to_core_id(cpu)) | 117 | #define topology_core_id(cpu) (cpu_to_core_id(cpu)) |
113 | #endif | 118 | #endif |
114 | #endif | 119 | #endif |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 661d07d2146b..9937fe44555f 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -23,9 +23,6 @@ | |||
23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
24 | #include <linux/suspend.h> | 24 | #include <linux/suspend.h> |
25 | #include <linux/hrtimer.h> | 25 | #include <linux/hrtimer.h> |
26 | #ifdef CONFIG_KVM | ||
27 | #include <linux/kvm_host.h> | ||
28 | #endif | ||
29 | #ifdef CONFIG_PPC64 | 26 | #ifdef CONFIG_PPC64 |
30 | #include <linux/time.h> | 27 | #include <linux/time.h> |
31 | #include <linux/hardirq.h> | 28 | #include <linux/hardirq.h> |
@@ -51,6 +48,9 @@ | |||
51 | #ifdef CONFIG_PPC_ISERIES | 48 | #ifdef CONFIG_PPC_ISERIES |
52 | #include <asm/iseries/alpaca.h> | 49 | #include <asm/iseries/alpaca.h> |
53 | #endif | 50 | #endif |
51 | #ifdef CONFIG_KVM | ||
52 | #include <asm/kvm_44x.h> | ||
53 | #endif | ||
54 | 54 | ||
55 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) | 55 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
56 | #include "head_booke.h" | 56 | #include "head_booke.h" |
@@ -357,12 +357,10 @@ int main(void) | |||
357 | DEFINE(PTE_SIZE, sizeof(pte_t)); | 357 | DEFINE(PTE_SIZE, sizeof(pte_t)); |
358 | 358 | ||
359 | #ifdef CONFIG_KVM | 359 | #ifdef CONFIG_KVM |
360 | DEFINE(TLBE_BYTES, sizeof(struct tlbe)); | 360 | DEFINE(TLBE_BYTES, sizeof(struct kvmppc_44x_tlbe)); |
361 | 361 | ||
362 | DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); | 362 | DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); |
363 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); | 363 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); |
364 | DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb)); | ||
365 | DEFINE(VCPU_SHADOW_MOD, offsetof(struct kvm_vcpu, arch.shadow_tlb_mod)); | ||
366 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); | 364 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); |
367 | DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); | 365 | DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); |
368 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); | 366 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); |
@@ -385,5 +383,16 @@ int main(void) | |||
385 | DEFINE(PTE_T_LOG2, PTE_T_LOG2); | 383 | DEFINE(PTE_T_LOG2, PTE_T_LOG2); |
386 | #endif | 384 | #endif |
387 | 385 | ||
386 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
387 | DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, | ||
388 | arch.timing_exit.tv32.tbu)); | ||
389 | DEFINE(VCPU_TIMING_EXIT_TBL, offsetof(struct kvm_vcpu, | ||
390 | arch.timing_exit.tv32.tbl)); | ||
391 | DEFINE(VCPU_TIMING_LAST_ENTER_TBU, offsetof(struct kvm_vcpu, | ||
392 | arch.timing_last_enter.tv32.tbu)); | ||
393 | DEFINE(VCPU_TIMING_LAST_ENTER_TBL, offsetof(struct kvm_vcpu, | ||
394 | arch.timing_last_enter.tv32.tbl)); | ||
395 | #endif | ||
396 | |||
388 | return 0; | 397 | return 0; |
389 | } | 398 | } |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index ac222d0ab12e..23b8b5e36f98 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -237,7 +237,7 @@ void fixup_irqs(cpumask_t map) | |||
237 | mask = map; | 237 | mask = map; |
238 | } | 238 | } |
239 | if (irq_desc[irq].chip->set_affinity) | 239 | if (irq_desc[irq].chip->set_affinity) |
240 | irq_desc[irq].chip->set_affinity(irq, mask); | 240 | irq_desc[irq].chip->set_affinity(irq, &mask); |
241 | else if (irq_desc[irq].action && !(warned++)) | 241 | else if (irq_desc[irq].action && !(warned++)) |
242 | printk("Cannot set affinity for irq %i\n", irq); | 242 | printk("Cannot set affinity for irq %i\n", irq); |
243 | } | 243 | } |
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index de79915452c8..c9329786073b 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c | |||
@@ -96,9 +96,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) | |||
96 | 96 | ||
97 | void __kprobes arch_remove_kprobe(struct kprobe *p) | 97 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
98 | { | 98 | { |
99 | mutex_lock(&kprobe_mutex); | 99 | if (p->ainsn.insn) { |
100 | free_insn_slot(p->ainsn.insn, 0); | 100 | free_insn_slot(p->ainsn.insn, 0); |
101 | mutex_unlock(&kprobe_mutex); | 101 | p->ainsn.insn = NULL; |
102 | } | ||
102 | } | 103 | } |
103 | 104 | ||
104 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | 105 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
@@ -316,7 +317,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p, | |||
316 | /* | 317 | /* |
317 | * It is possible to have multiple instances associated with a given | 318 | * It is possible to have multiple instances associated with a given |
318 | * task either because an multiple functions in the call path | 319 | * task either because an multiple functions in the call path |
319 | * have a return probe installed on them, and/or more then one return | 320 | * have a return probe installed on them, and/or more than one return |
320 | * return probe was registered for a target function. | 321 | * return probe was registered for a target function. |
321 | * | 322 | * |
322 | * We can handle this because: | 323 | * We can handle this because: |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 51b201ddf9a1..fb7049c054c0 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/mqueue.h> | 33 | #include <linux/mqueue.h> |
34 | #include <linux/hardirq.h> | 34 | #include <linux/hardirq.h> |
35 | #include <linux/utsname.h> | 35 | #include <linux/utsname.h> |
36 | #include <linux/kernel_stat.h> | ||
36 | 37 | ||
37 | #include <asm/pgtable.h> | 38 | #include <asm/pgtable.h> |
38 | #include <asm/uaccess.h> | 39 | #include <asm/uaccess.h> |
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c index 8c1335566089..8f0856f312da 100644 --- a/arch/powerpc/kernel/prom_parse.c +++ b/arch/powerpc/kernel/prom_parse.c | |||
@@ -232,11 +232,6 @@ int of_pci_address_to_resource(struct device_node *dev, int bar, | |||
232 | } | 232 | } |
233 | EXPORT_SYMBOL_GPL(of_pci_address_to_resource); | 233 | EXPORT_SYMBOL_GPL(of_pci_address_to_resource); |
234 | 234 | ||
235 | static u8 of_irq_pci_swizzle(u8 slot, u8 pin) | ||
236 | { | ||
237 | return (((pin - 1) + slot) % 4) + 1; | ||
238 | } | ||
239 | |||
240 | int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) | 235 | int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) |
241 | { | 236 | { |
242 | struct device_node *dn, *ppnode; | 237 | struct device_node *dn, *ppnode; |
@@ -306,7 +301,7 @@ int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) | |||
306 | /* We can only get here if we hit a P2P bridge with no node, | 301 | /* We can only get here if we hit a P2P bridge with no node, |
307 | * let's do standard swizzling and try again | 302 | * let's do standard swizzling and try again |
308 | */ | 303 | */ |
309 | lspec = of_irq_pci_swizzle(PCI_SLOT(pdev->devfn), lspec); | 304 | lspec = pci_swizzle_interrupt_pin(pdev, lspec); |
310 | pdev = ppdev; | 305 | pdev = ppdev; |
311 | } | 306 | } |
312 | 307 | ||
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 8ac3f721d235..65484b2200b3 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -59,13 +59,9 @@ | |||
59 | 59 | ||
60 | struct thread_info *secondary_ti; | 60 | struct thread_info *secondary_ti; |
61 | 61 | ||
62 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | ||
63 | cpumask_t cpu_online_map = CPU_MASK_NONE; | ||
64 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; | 62 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; |
65 | DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; | 63 | DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; |
66 | 64 | ||
67 | EXPORT_SYMBOL(cpu_online_map); | ||
68 | EXPORT_SYMBOL(cpu_possible_map); | ||
69 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | 65 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
70 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); | 66 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
71 | 67 | ||
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index e1f3a5140429..c9564031a2a9 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -256,8 +256,10 @@ void account_system_vtime(struct task_struct *tsk) | |||
256 | delta += sys_time; | 256 | delta += sys_time; |
257 | get_paca()->system_time = 0; | 257 | get_paca()->system_time = 0; |
258 | } | 258 | } |
259 | account_system_time(tsk, 0, delta); | 259 | if (in_irq() || idle_task(smp_processor_id()) != tsk) |
260 | account_system_time_scaled(tsk, deltascaled); | 260 | account_system_time(tsk, 0, delta, deltascaled); |
261 | else | ||
262 | account_idle_time(delta); | ||
261 | per_cpu(cputime_last_delta, smp_processor_id()) = delta; | 263 | per_cpu(cputime_last_delta, smp_processor_id()) = delta; |
262 | per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled; | 264 | per_cpu(cputime_scaled_last_delta, smp_processor_id()) = deltascaled; |
263 | local_irq_restore(flags); | 265 | local_irq_restore(flags); |
@@ -275,10 +277,8 @@ void account_process_tick(struct task_struct *tsk, int user_tick) | |||
275 | 277 | ||
276 | utime = get_paca()->user_time; | 278 | utime = get_paca()->user_time; |
277 | get_paca()->user_time = 0; | 279 | get_paca()->user_time = 0; |
278 | account_user_time(tsk, utime); | ||
279 | |||
280 | utimescaled = cputime_to_scaled(utime); | 280 | utimescaled = cputime_to_scaled(utime); |
281 | account_user_time_scaled(tsk, utimescaled); | 281 | account_user_time(tsk, utime, utimescaled); |
282 | } | 282 | } |
283 | 283 | ||
284 | /* | 284 | /* |
@@ -338,8 +338,12 @@ void calculate_steal_time(void) | |||
338 | tb = mftb(); | 338 | tb = mftb(); |
339 | purr = mfspr(SPRN_PURR); | 339 | purr = mfspr(SPRN_PURR); |
340 | stolen = (tb - pme->tb) - (purr - pme->purr); | 340 | stolen = (tb - pme->tb) - (purr - pme->purr); |
341 | if (stolen > 0) | 341 | if (stolen > 0) { |
342 | account_steal_time(current, stolen); | 342 | if (idle_task(smp_processor_id()) != current) |
343 | account_steal_time(stolen); | ||
344 | else | ||
345 | account_idle_time(stolen); | ||
346 | } | ||
343 | pme->tb = tb; | 347 | pme->tb = tb; |
344 | pme->purr = purr; | 348 | pme->purr = purr; |
345 | } | 349 | } |
@@ -844,7 +848,7 @@ static void register_decrementer_clockevent(int cpu) | |||
844 | struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; | 848 | struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; |
845 | 849 | ||
846 | *dec = decrementer_clockevent; | 850 | *dec = decrementer_clockevent; |
847 | dec->cpumask = cpumask_of_cpu(cpu); | 851 | dec->cpumask = cpumask_of(cpu); |
848 | 852 | ||
849 | printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", | 853 | printk(KERN_DEBUG "clockevent: %s mult[%lx] shift[%d] cpu[%d]\n", |
850 | dec->name, dec->mult, dec->shift, cpu); | 854 | dec->name, dec->mult, dec->shift, cpu); |
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c new file mode 100644 index 000000000000..a66bec57265a --- /dev/null +++ b/arch/powerpc/kvm/44x.c | |||
@@ -0,0 +1,228 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #include <linux/kvm_host.h> | ||
21 | #include <linux/err.h> | ||
22 | |||
23 | #include <asm/reg.h> | ||
24 | #include <asm/cputable.h> | ||
25 | #include <asm/tlbflush.h> | ||
26 | #include <asm/kvm_44x.h> | ||
27 | #include <asm/kvm_ppc.h> | ||
28 | |||
29 | #include "44x_tlb.h" | ||
30 | |||
31 | /* Note: clearing MSR[DE] just means that the debug interrupt will not be | ||
32 | * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits. | ||
33 | * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt | ||
34 | * will be delivered as an "imprecise debug event" (which is indicated by | ||
35 | * DBSR[IDE]. | ||
36 | */ | ||
37 | static void kvm44x_disable_debug_interrupts(void) | ||
38 | { | ||
39 | mtmsr(mfmsr() & ~MSR_DE); | ||
40 | } | ||
41 | |||
42 | void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) | ||
43 | { | ||
44 | kvm44x_disable_debug_interrupts(); | ||
45 | |||
46 | mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]); | ||
47 | mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]); | ||
48 | mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]); | ||
49 | mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]); | ||
50 | mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1); | ||
51 | mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2); | ||
52 | mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0); | ||
53 | mtmsr(vcpu->arch.host_msr); | ||
54 | } | ||
55 | |||
56 | void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | ||
57 | { | ||
58 | struct kvm_guest_debug *dbg = &vcpu->guest_debug; | ||
59 | u32 dbcr0 = 0; | ||
60 | |||
61 | vcpu->arch.host_msr = mfmsr(); | ||
62 | kvm44x_disable_debug_interrupts(); | ||
63 | |||
64 | /* Save host debug register state. */ | ||
65 | vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1); | ||
66 | vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2); | ||
67 | vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3); | ||
68 | vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4); | ||
69 | vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0); | ||
70 | vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1); | ||
71 | vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2); | ||
72 | |||
73 | /* set registers up for guest */ | ||
74 | |||
75 | if (dbg->bp[0]) { | ||
76 | mtspr(SPRN_IAC1, dbg->bp[0]); | ||
77 | dbcr0 |= DBCR0_IAC1 | DBCR0_IDM; | ||
78 | } | ||
79 | if (dbg->bp[1]) { | ||
80 | mtspr(SPRN_IAC2, dbg->bp[1]); | ||
81 | dbcr0 |= DBCR0_IAC2 | DBCR0_IDM; | ||
82 | } | ||
83 | if (dbg->bp[2]) { | ||
84 | mtspr(SPRN_IAC3, dbg->bp[2]); | ||
85 | dbcr0 |= DBCR0_IAC3 | DBCR0_IDM; | ||
86 | } | ||
87 | if (dbg->bp[3]) { | ||
88 | mtspr(SPRN_IAC4, dbg->bp[3]); | ||
89 | dbcr0 |= DBCR0_IAC4 | DBCR0_IDM; | ||
90 | } | ||
91 | |||
92 | mtspr(SPRN_DBCR0, dbcr0); | ||
93 | mtspr(SPRN_DBCR1, 0); | ||
94 | mtspr(SPRN_DBCR2, 0); | ||
95 | } | ||
96 | |||
97 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
98 | { | ||
99 | kvmppc_44x_tlb_load(vcpu); | ||
100 | } | ||
101 | |||
102 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | ||
103 | { | ||
104 | kvmppc_44x_tlb_put(vcpu); | ||
105 | } | ||
106 | |||
107 | int kvmppc_core_check_processor_compat(void) | ||
108 | { | ||
109 | int r; | ||
110 | |||
111 | if (strcmp(cur_cpu_spec->platform, "ppc440") == 0) | ||
112 | r = 0; | ||
113 | else | ||
114 | r = -ENOTSUPP; | ||
115 | |||
116 | return r; | ||
117 | } | ||
118 | |||
119 | int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | ||
120 | { | ||
121 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
122 | struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[0]; | ||
123 | int i; | ||
124 | |||
125 | tlbe->tid = 0; | ||
126 | tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID; | ||
127 | tlbe->word1 = 0; | ||
128 | tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR; | ||
129 | |||
130 | tlbe++; | ||
131 | tlbe->tid = 0; | ||
132 | tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID; | ||
133 | tlbe->word1 = 0xef600000; | ||
134 | tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR | ||
135 | | PPC44x_TLB_I | PPC44x_TLB_G; | ||
136 | |||
137 | /* Since the guest can directly access the timebase, it must know the | ||
138 | * real timebase frequency. Accordingly, it must see the state of | ||
139 | * CCR1[TCS]. */ | ||
140 | vcpu->arch.ccr1 = mfspr(SPRN_CCR1); | ||
141 | |||
142 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) | ||
143 | vcpu_44x->shadow_refs[i].gtlb_index = -1; | ||
144 | |||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | /* 'linear_address' is actually an encoding of AS|PID|EADDR . */ | ||
149 | int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, | ||
150 | struct kvm_translation *tr) | ||
151 | { | ||
152 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
153 | struct kvmppc_44x_tlbe *gtlbe; | ||
154 | int index; | ||
155 | gva_t eaddr; | ||
156 | u8 pid; | ||
157 | u8 as; | ||
158 | |||
159 | eaddr = tr->linear_address; | ||
160 | pid = (tr->linear_address >> 32) & 0xff; | ||
161 | as = (tr->linear_address >> 40) & 0x1; | ||
162 | |||
163 | index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as); | ||
164 | if (index == -1) { | ||
165 | tr->valid = 0; | ||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | gtlbe = &vcpu_44x->guest_tlb[index]; | ||
170 | |||
171 | tr->physical_address = tlb_xlate(gtlbe, eaddr); | ||
172 | /* XXX what does "writeable" and "usermode" even mean? */ | ||
173 | tr->valid = 1; | ||
174 | |||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | ||
179 | { | ||
180 | struct kvmppc_vcpu_44x *vcpu_44x; | ||
181 | struct kvm_vcpu *vcpu; | ||
182 | int err; | ||
183 | |||
184 | vcpu_44x = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); | ||
185 | if (!vcpu_44x) { | ||
186 | err = -ENOMEM; | ||
187 | goto out; | ||
188 | } | ||
189 | |||
190 | vcpu = &vcpu_44x->vcpu; | ||
191 | err = kvm_vcpu_init(vcpu, kvm, id); | ||
192 | if (err) | ||
193 | goto free_vcpu; | ||
194 | |||
195 | return vcpu; | ||
196 | |||
197 | free_vcpu: | ||
198 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); | ||
199 | out: | ||
200 | return ERR_PTR(err); | ||
201 | } | ||
202 | |||
203 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | ||
204 | { | ||
205 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
206 | |||
207 | kvm_vcpu_uninit(vcpu); | ||
208 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); | ||
209 | } | ||
210 | |||
211 | static int kvmppc_44x_init(void) | ||
212 | { | ||
213 | int r; | ||
214 | |||
215 | r = kvmppc_booke_init(); | ||
216 | if (r) | ||
217 | return r; | ||
218 | |||
219 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), THIS_MODULE); | ||
220 | } | ||
221 | |||
222 | static void kvmppc_44x_exit(void) | ||
223 | { | ||
224 | kvmppc_booke_exit(); | ||
225 | } | ||
226 | |||
227 | module_init(kvmppc_44x_init); | ||
228 | module_exit(kvmppc_44x_exit); | ||
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c new file mode 100644 index 000000000000..82489a743a6f --- /dev/null +++ b/arch/powerpc/kvm/44x_emulate.c | |||
@@ -0,0 +1,371 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #include <asm/kvm_ppc.h> | ||
21 | #include <asm/dcr.h> | ||
22 | #include <asm/dcr-regs.h> | ||
23 | #include <asm/disassemble.h> | ||
24 | #include <asm/kvm_44x.h> | ||
25 | #include "timing.h" | ||
26 | |||
27 | #include "booke.h" | ||
28 | #include "44x_tlb.h" | ||
29 | |||
30 | #define OP_RFI 19 | ||
31 | |||
32 | #define XOP_RFI 50 | ||
33 | #define XOP_MFMSR 83 | ||
34 | #define XOP_WRTEE 131 | ||
35 | #define XOP_MTMSR 146 | ||
36 | #define XOP_WRTEEI 163 | ||
37 | #define XOP_MFDCR 323 | ||
38 | #define XOP_MTDCR 451 | ||
39 | #define XOP_TLBSX 914 | ||
40 | #define XOP_ICCCI 966 | ||
41 | #define XOP_TLBWE 978 | ||
42 | |||
43 | static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) | ||
44 | { | ||
45 | vcpu->arch.pc = vcpu->arch.srr0; | ||
46 | kvmppc_set_msr(vcpu, vcpu->arch.srr1); | ||
47 | } | ||
48 | |||
49 | int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
50 | unsigned int inst, int *advance) | ||
51 | { | ||
52 | int emulated = EMULATE_DONE; | ||
53 | int dcrn; | ||
54 | int ra; | ||
55 | int rb; | ||
56 | int rc; | ||
57 | int rs; | ||
58 | int rt; | ||
59 | int ws; | ||
60 | |||
61 | switch (get_op(inst)) { | ||
62 | case OP_RFI: | ||
63 | switch (get_xop(inst)) { | ||
64 | case XOP_RFI: | ||
65 | kvmppc_emul_rfi(vcpu); | ||
66 | kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS); | ||
67 | *advance = 0; | ||
68 | break; | ||
69 | |||
70 | default: | ||
71 | emulated = EMULATE_FAIL; | ||
72 | break; | ||
73 | } | ||
74 | break; | ||
75 | |||
76 | case 31: | ||
77 | switch (get_xop(inst)) { | ||
78 | |||
79 | case XOP_MFMSR: | ||
80 | rt = get_rt(inst); | ||
81 | vcpu->arch.gpr[rt] = vcpu->arch.msr; | ||
82 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); | ||
83 | break; | ||
84 | |||
85 | case XOP_MTMSR: | ||
86 | rs = get_rs(inst); | ||
87 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); | ||
88 | kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); | ||
89 | break; | ||
90 | |||
91 | case XOP_WRTEE: | ||
92 | rs = get_rs(inst); | ||
93 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | ||
94 | | (vcpu->arch.gpr[rs] & MSR_EE); | ||
95 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); | ||
96 | break; | ||
97 | |||
98 | case XOP_WRTEEI: | ||
99 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | ||
100 | | (inst & MSR_EE); | ||
101 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); | ||
102 | break; | ||
103 | |||
104 | case XOP_MFDCR: | ||
105 | dcrn = get_dcrn(inst); | ||
106 | rt = get_rt(inst); | ||
107 | |||
108 | /* The guest may access CPR0 registers to determine the timebase | ||
109 | * frequency, and it must know the real host frequency because it | ||
110 | * can directly access the timebase registers. | ||
111 | * | ||
112 | * It would be possible to emulate those accesses in userspace, | ||
113 | * but userspace can really only figure out the end frequency. | ||
114 | * We could decompose that into the factors that compute it, but | ||
115 | * that's tricky math, and it's easier to just report the real | ||
116 | * CPR0 values. | ||
117 | */ | ||
118 | switch (dcrn) { | ||
119 | case DCRN_CPR0_CONFIG_ADDR: | ||
120 | vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr; | ||
121 | break; | ||
122 | case DCRN_CPR0_CONFIG_DATA: | ||
123 | local_irq_disable(); | ||
124 | mtdcr(DCRN_CPR0_CONFIG_ADDR, | ||
125 | vcpu->arch.cpr0_cfgaddr); | ||
126 | vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA); | ||
127 | local_irq_enable(); | ||
128 | break; | ||
129 | default: | ||
130 | run->dcr.dcrn = dcrn; | ||
131 | run->dcr.data = 0; | ||
132 | run->dcr.is_write = 0; | ||
133 | vcpu->arch.io_gpr = rt; | ||
134 | vcpu->arch.dcr_needed = 1; | ||
135 | kvmppc_account_exit(vcpu, DCR_EXITS); | ||
136 | emulated = EMULATE_DO_DCR; | ||
137 | } | ||
138 | |||
139 | break; | ||
140 | |||
141 | case XOP_MTDCR: | ||
142 | dcrn = get_dcrn(inst); | ||
143 | rs = get_rs(inst); | ||
144 | |||
145 | /* emulate some access in kernel */ | ||
146 | switch (dcrn) { | ||
147 | case DCRN_CPR0_CONFIG_ADDR: | ||
148 | vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs]; | ||
149 | break; | ||
150 | default: | ||
151 | run->dcr.dcrn = dcrn; | ||
152 | run->dcr.data = vcpu->arch.gpr[rs]; | ||
153 | run->dcr.is_write = 1; | ||
154 | vcpu->arch.dcr_needed = 1; | ||
155 | kvmppc_account_exit(vcpu, DCR_EXITS); | ||
156 | emulated = EMULATE_DO_DCR; | ||
157 | } | ||
158 | |||
159 | break; | ||
160 | |||
161 | case XOP_TLBWE: | ||
162 | ra = get_ra(inst); | ||
163 | rs = get_rs(inst); | ||
164 | ws = get_ws(inst); | ||
165 | emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); | ||
166 | break; | ||
167 | |||
168 | case XOP_TLBSX: | ||
169 | rt = get_rt(inst); | ||
170 | ra = get_ra(inst); | ||
171 | rb = get_rb(inst); | ||
172 | rc = get_rc(inst); | ||
173 | emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); | ||
174 | break; | ||
175 | |||
176 | case XOP_ICCCI: | ||
177 | break; | ||
178 | |||
179 | default: | ||
180 | emulated = EMULATE_FAIL; | ||
181 | } | ||
182 | |||
183 | break; | ||
184 | |||
185 | default: | ||
186 | emulated = EMULATE_FAIL; | ||
187 | } | ||
188 | |||
189 | return emulated; | ||
190 | } | ||
191 | |||
192 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | ||
193 | { | ||
194 | switch (sprn) { | ||
195 | case SPRN_MMUCR: | ||
196 | vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break; | ||
197 | case SPRN_PID: | ||
198 | kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break; | ||
199 | case SPRN_CCR0: | ||
200 | vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break; | ||
201 | case SPRN_CCR1: | ||
202 | vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break; | ||
203 | case SPRN_DEAR: | ||
204 | vcpu->arch.dear = vcpu->arch.gpr[rs]; break; | ||
205 | case SPRN_ESR: | ||
206 | vcpu->arch.esr = vcpu->arch.gpr[rs]; break; | ||
207 | case SPRN_DBCR0: | ||
208 | vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break; | ||
209 | case SPRN_DBCR1: | ||
210 | vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break; | ||
211 | case SPRN_TSR: | ||
212 | vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break; | ||
213 | case SPRN_TCR: | ||
214 | vcpu->arch.tcr = vcpu->arch.gpr[rs]; | ||
215 | kvmppc_emulate_dec(vcpu); | ||
216 | break; | ||
217 | |||
218 | /* Note: SPRG4-7 are user-readable. These values are | ||
219 | * loaded into the real SPRGs when resuming the | ||
220 | * guest. */ | ||
221 | case SPRN_SPRG4: | ||
222 | vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break; | ||
223 | case SPRN_SPRG5: | ||
224 | vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break; | ||
225 | case SPRN_SPRG6: | ||
226 | vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break; | ||
227 | case SPRN_SPRG7: | ||
228 | vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break; | ||
229 | |||
230 | case SPRN_IVPR: | ||
231 | vcpu->arch.ivpr = vcpu->arch.gpr[rs]; | ||
232 | break; | ||
233 | case SPRN_IVOR0: | ||
234 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = vcpu->arch.gpr[rs]; | ||
235 | break; | ||
236 | case SPRN_IVOR1: | ||
237 | vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = vcpu->arch.gpr[rs]; | ||
238 | break; | ||
239 | case SPRN_IVOR2: | ||
240 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = vcpu->arch.gpr[rs]; | ||
241 | break; | ||
242 | case SPRN_IVOR3: | ||
243 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = vcpu->arch.gpr[rs]; | ||
244 | break; | ||
245 | case SPRN_IVOR4: | ||
246 | vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = vcpu->arch.gpr[rs]; | ||
247 | break; | ||
248 | case SPRN_IVOR5: | ||
249 | vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = vcpu->arch.gpr[rs]; | ||
250 | break; | ||
251 | case SPRN_IVOR6: | ||
252 | vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = vcpu->arch.gpr[rs]; | ||
253 | break; | ||
254 | case SPRN_IVOR7: | ||
255 | vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = vcpu->arch.gpr[rs]; | ||
256 | break; | ||
257 | case SPRN_IVOR8: | ||
258 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = vcpu->arch.gpr[rs]; | ||
259 | break; | ||
260 | case SPRN_IVOR9: | ||
261 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = vcpu->arch.gpr[rs]; | ||
262 | break; | ||
263 | case SPRN_IVOR10: | ||
264 | vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = vcpu->arch.gpr[rs]; | ||
265 | break; | ||
266 | case SPRN_IVOR11: | ||
267 | vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = vcpu->arch.gpr[rs]; | ||
268 | break; | ||
269 | case SPRN_IVOR12: | ||
270 | vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = vcpu->arch.gpr[rs]; | ||
271 | break; | ||
272 | case SPRN_IVOR13: | ||
273 | vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = vcpu->arch.gpr[rs]; | ||
274 | break; | ||
275 | case SPRN_IVOR14: | ||
276 | vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = vcpu->arch.gpr[rs]; | ||
277 | break; | ||
278 | case SPRN_IVOR15: | ||
279 | vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = vcpu->arch.gpr[rs]; | ||
280 | break; | ||
281 | |||
282 | default: | ||
283 | return EMULATE_FAIL; | ||
284 | } | ||
285 | |||
286 | kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); | ||
287 | return EMULATE_DONE; | ||
288 | } | ||
289 | |||
290 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | ||
291 | { | ||
292 | switch (sprn) { | ||
293 | /* 440 */ | ||
294 | case SPRN_MMUCR: | ||
295 | vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break; | ||
296 | case SPRN_CCR0: | ||
297 | vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break; | ||
298 | case SPRN_CCR1: | ||
299 | vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break; | ||
300 | |||
301 | /* Book E */ | ||
302 | case SPRN_PID: | ||
303 | vcpu->arch.gpr[rt] = vcpu->arch.pid; break; | ||
304 | case SPRN_IVPR: | ||
305 | vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break; | ||
306 | case SPRN_DEAR: | ||
307 | vcpu->arch.gpr[rt] = vcpu->arch.dear; break; | ||
308 | case SPRN_ESR: | ||
309 | vcpu->arch.gpr[rt] = vcpu->arch.esr; break; | ||
310 | case SPRN_DBCR0: | ||
311 | vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break; | ||
312 | case SPRN_DBCR1: | ||
313 | vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break; | ||
314 | |||
315 | case SPRN_IVOR0: | ||
316 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; | ||
317 | break; | ||
318 | case SPRN_IVOR1: | ||
319 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; | ||
320 | break; | ||
321 | case SPRN_IVOR2: | ||
322 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; | ||
323 | break; | ||
324 | case SPRN_IVOR3: | ||
325 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; | ||
326 | break; | ||
327 | case SPRN_IVOR4: | ||
328 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; | ||
329 | break; | ||
330 | case SPRN_IVOR5: | ||
331 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; | ||
332 | break; | ||
333 | case SPRN_IVOR6: | ||
334 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; | ||
335 | break; | ||
336 | case SPRN_IVOR7: | ||
337 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; | ||
338 | break; | ||
339 | case SPRN_IVOR8: | ||
340 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; | ||
341 | break; | ||
342 | case SPRN_IVOR9: | ||
343 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; | ||
344 | break; | ||
345 | case SPRN_IVOR10: | ||
346 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; | ||
347 | break; | ||
348 | case SPRN_IVOR11: | ||
349 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; | ||
350 | break; | ||
351 | case SPRN_IVOR12: | ||
352 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; | ||
353 | break; | ||
354 | case SPRN_IVOR13: | ||
355 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; | ||
356 | break; | ||
357 | case SPRN_IVOR14: | ||
358 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; | ||
359 | break; | ||
360 | case SPRN_IVOR15: | ||
361 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; | ||
362 | break; | ||
363 | |||
364 | default: | ||
365 | return EMULATE_FAIL; | ||
366 | } | ||
367 | |||
368 | kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); | ||
369 | return EMULATE_DONE; | ||
370 | } | ||
371 | |||
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index ad72c6f9811f..9a34b8edb9e2 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -22,20 +22,103 @@ | |||
22 | #include <linux/kvm.h> | 22 | #include <linux/kvm.h> |
23 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
24 | #include <linux/highmem.h> | 24 | #include <linux/highmem.h> |
25 | |||
26 | #include <asm/tlbflush.h> | ||
25 | #include <asm/mmu-44x.h> | 27 | #include <asm/mmu-44x.h> |
26 | #include <asm/kvm_ppc.h> | 28 | #include <asm/kvm_ppc.h> |
29 | #include <asm/kvm_44x.h> | ||
30 | #include "timing.h" | ||
27 | 31 | ||
28 | #include "44x_tlb.h" | 32 | #include "44x_tlb.h" |
29 | 33 | ||
34 | #ifndef PPC44x_TLBE_SIZE | ||
35 | #define PPC44x_TLBE_SIZE PPC44x_TLB_4K | ||
36 | #endif | ||
37 | |||
38 | #define PAGE_SIZE_4K (1<<12) | ||
39 | #define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1)) | ||
40 | |||
41 | #define PPC44x_TLB_UATTR_MASK \ | ||
42 | (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3) | ||
30 | #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW) | 43 | #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW) |
31 | #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW) | 44 | #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW) |
32 | 45 | ||
33 | static unsigned int kvmppc_tlb_44x_pos; | 46 | #ifdef DEBUG |
47 | void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) | ||
48 | { | ||
49 | struct kvmppc_44x_tlbe *tlbe; | ||
50 | int i; | ||
51 | |||
52 | printk("vcpu %d TLB dump:\n", vcpu->vcpu_id); | ||
53 | printk("| %2s | %3s | %8s | %8s | %8s |\n", | ||
54 | "nr", "tid", "word0", "word1", "word2"); | ||
55 | |||
56 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) { | ||
57 | tlbe = &vcpu_44x->guest_tlb[i]; | ||
58 | if (tlbe->word0 & PPC44x_TLB_VALID) | ||
59 | printk(" G%2d | %02X | %08X | %08X | %08X |\n", | ||
60 | i, tlbe->tid, tlbe->word0, tlbe->word1, | ||
61 | tlbe->word2); | ||
62 | } | ||
63 | } | ||
64 | #endif | ||
65 | |||
66 | static inline void kvmppc_44x_tlbie(unsigned int index) | ||
67 | { | ||
68 | /* 0 <= index < 64, so the V bit is clear and we can use the index as | ||
69 | * word0. */ | ||
70 | asm volatile( | ||
71 | "tlbwe %[index], %[index], 0\n" | ||
72 | : | ||
73 | : [index] "r"(index) | ||
74 | ); | ||
75 | } | ||
76 | |||
77 | static inline void kvmppc_44x_tlbre(unsigned int index, | ||
78 | struct kvmppc_44x_tlbe *tlbe) | ||
79 | { | ||
80 | asm volatile( | ||
81 | "tlbre %[word0], %[index], 0\n" | ||
82 | "mfspr %[tid], %[sprn_mmucr]\n" | ||
83 | "andi. %[tid], %[tid], 0xff\n" | ||
84 | "tlbre %[word1], %[index], 1\n" | ||
85 | "tlbre %[word2], %[index], 2\n" | ||
86 | : [word0] "=r"(tlbe->word0), | ||
87 | [word1] "=r"(tlbe->word1), | ||
88 | [word2] "=r"(tlbe->word2), | ||
89 | [tid] "=r"(tlbe->tid) | ||
90 | : [index] "r"(index), | ||
91 | [sprn_mmucr] "i"(SPRN_MMUCR) | ||
92 | : "cc" | ||
93 | ); | ||
94 | } | ||
95 | |||
96 | static inline void kvmppc_44x_tlbwe(unsigned int index, | ||
97 | struct kvmppc_44x_tlbe *stlbe) | ||
98 | { | ||
99 | unsigned long tmp; | ||
100 | |||
101 | asm volatile( | ||
102 | "mfspr %[tmp], %[sprn_mmucr]\n" | ||
103 | "rlwimi %[tmp], %[tid], 0, 0xff\n" | ||
104 | "mtspr %[sprn_mmucr], %[tmp]\n" | ||
105 | "tlbwe %[word0], %[index], 0\n" | ||
106 | "tlbwe %[word1], %[index], 1\n" | ||
107 | "tlbwe %[word2], %[index], 2\n" | ||
108 | : [tmp] "=&r"(tmp) | ||
109 | : [word0] "r"(stlbe->word0), | ||
110 | [word1] "r"(stlbe->word1), | ||
111 | [word2] "r"(stlbe->word2), | ||
112 | [tid] "r"(stlbe->tid), | ||
113 | [index] "r"(index), | ||
114 | [sprn_mmucr] "i"(SPRN_MMUCR) | ||
115 | ); | ||
116 | } | ||
34 | 117 | ||
35 | static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) | 118 | static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) |
36 | { | 119 | { |
37 | /* Mask off reserved bits. */ | 120 | /* We only care about the guest's permission and user bits. */ |
38 | attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_ATTR_MASK; | 121 | attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK; |
39 | 122 | ||
40 | if (!usermode) { | 123 | if (!usermode) { |
41 | /* Guest is in supervisor mode, so we need to translate guest | 124 | /* Guest is in supervisor mode, so we need to translate guest |
@@ -47,18 +130,60 @@ static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) | |||
47 | /* Make sure host can always access this memory. */ | 130 | /* Make sure host can always access this memory. */ |
48 | attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW; | 131 | attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW; |
49 | 132 | ||
133 | /* WIMGE = 0b00100 */ | ||
134 | attrib |= PPC44x_TLB_M; | ||
135 | |||
50 | return attrib; | 136 | return attrib; |
51 | } | 137 | } |
52 | 138 | ||
139 | /* Load shadow TLB back into hardware. */ | ||
140 | void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu) | ||
141 | { | ||
142 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
143 | int i; | ||
144 | |||
145 | for (i = 0; i <= tlb_44x_hwater; i++) { | ||
146 | struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; | ||
147 | |||
148 | if (get_tlb_v(stlbe) && get_tlb_ts(stlbe)) | ||
149 | kvmppc_44x_tlbwe(i, stlbe); | ||
150 | } | ||
151 | } | ||
152 | |||
153 | static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x, | ||
154 | unsigned int i) | ||
155 | { | ||
156 | vcpu_44x->shadow_tlb_mod[i] = 1; | ||
157 | } | ||
158 | |||
159 | /* Save hardware TLB to the vcpu, and invalidate all guest mappings. */ | ||
160 | void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu) | ||
161 | { | ||
162 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
163 | int i; | ||
164 | |||
165 | for (i = 0; i <= tlb_44x_hwater; i++) { | ||
166 | struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; | ||
167 | |||
168 | if (vcpu_44x->shadow_tlb_mod[i]) | ||
169 | kvmppc_44x_tlbre(i, stlbe); | ||
170 | |||
171 | if (get_tlb_v(stlbe) && get_tlb_ts(stlbe)) | ||
172 | kvmppc_44x_tlbie(i); | ||
173 | } | ||
174 | } | ||
175 | |||
176 | |||
53 | /* Search the guest TLB for a matching entry. */ | 177 | /* Search the guest TLB for a matching entry. */ |
54 | int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, | 178 | int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, |
55 | unsigned int as) | 179 | unsigned int as) |
56 | { | 180 | { |
181 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
57 | int i; | 182 | int i; |
58 | 183 | ||
59 | /* XXX Replace loop with fancy data structures. */ | 184 | /* XXX Replace loop with fancy data structures. */ |
60 | for (i = 0; i < PPC44x_TLB_SIZE; i++) { | 185 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) { |
61 | struct tlbe *tlbe = &vcpu->arch.guest_tlb[i]; | 186 | struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i]; |
62 | unsigned int tid; | 187 | unsigned int tid; |
63 | 188 | ||
64 | if (eaddr < get_tlb_eaddr(tlbe)) | 189 | if (eaddr < get_tlb_eaddr(tlbe)) |
@@ -83,78 +208,89 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, | |||
83 | return -1; | 208 | return -1; |
84 | } | 209 | } |
85 | 210 | ||
86 | struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr) | 211 | int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
87 | { | 212 | { |
88 | unsigned int as = !!(vcpu->arch.msr & MSR_IS); | 213 | unsigned int as = !!(vcpu->arch.msr & MSR_IS); |
89 | unsigned int index; | ||
90 | 214 | ||
91 | index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); | 215 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); |
92 | if (index == -1) | ||
93 | return NULL; | ||
94 | return &vcpu->arch.guest_tlb[index]; | ||
95 | } | 216 | } |
96 | 217 | ||
97 | struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr) | 218 | int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
98 | { | 219 | { |
99 | unsigned int as = !!(vcpu->arch.msr & MSR_DS); | 220 | unsigned int as = !!(vcpu->arch.msr & MSR_DS); |
100 | unsigned int index; | ||
101 | 221 | ||
102 | index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); | 222 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); |
103 | if (index == -1) | ||
104 | return NULL; | ||
105 | return &vcpu->arch.guest_tlb[index]; | ||
106 | } | 223 | } |
107 | 224 | ||
108 | static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe) | 225 | static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x, |
226 | unsigned int stlb_index) | ||
109 | { | 227 | { |
110 | return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW); | 228 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index]; |
111 | } | ||
112 | 229 | ||
113 | static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, | 230 | if (!ref->page) |
114 | unsigned int index) | 231 | return; |
115 | { | ||
116 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index]; | ||
117 | struct page *page = vcpu->arch.shadow_pages[index]; | ||
118 | 232 | ||
119 | if (get_tlb_v(stlbe)) { | 233 | /* Discard from the TLB. */ |
120 | if (kvmppc_44x_tlbe_is_writable(stlbe)) | 234 | /* Note: we could actually invalidate a host mapping, if the host overwrote |
121 | kvm_release_page_dirty(page); | 235 | * this TLB entry since we inserted a guest mapping. */ |
122 | else | 236 | kvmppc_44x_tlbie(stlb_index); |
123 | kvm_release_page_clean(page); | 237 | |
124 | } | 238 | /* Now release the page. */ |
239 | if (ref->writeable) | ||
240 | kvm_release_page_dirty(ref->page); | ||
241 | else | ||
242 | kvm_release_page_clean(ref->page); | ||
243 | |||
244 | ref->page = NULL; | ||
245 | |||
246 | /* XXX set tlb_44x_index to stlb_index? */ | ||
247 | |||
248 | KVMTRACE_1D(STLB_INVAL, &vcpu_44x->vcpu, stlb_index, handler); | ||
125 | } | 249 | } |
126 | 250 | ||
127 | void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu) | 251 | void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu) |
128 | { | 252 | { |
253 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
129 | int i; | 254 | int i; |
130 | 255 | ||
131 | for (i = 0; i <= tlb_44x_hwater; i++) | 256 | for (i = 0; i <= tlb_44x_hwater; i++) |
132 | kvmppc_44x_shadow_release(vcpu, i); | 257 | kvmppc_44x_shadow_release(vcpu_44x, i); |
133 | } | ||
134 | |||
135 | void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i) | ||
136 | { | ||
137 | vcpu->arch.shadow_tlb_mod[i] = 1; | ||
138 | } | 258 | } |
139 | 259 | ||
140 | /* Caller must ensure that the specified guest TLB entry is safe to insert into | 260 | /** |
141 | * the shadow TLB. */ | 261 | * kvmppc_mmu_map -- create a host mapping for guest memory |
142 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | 262 | * |
143 | u32 flags) | 263 | * If the guest wanted a larger page than the host supports, only the first |
264 | * host page is mapped here and the rest are demand faulted. | ||
265 | * | ||
266 | * If the guest wanted a smaller page than the host page size, we map only the | ||
267 | * guest-size page (i.e. not a full host page mapping). | ||
268 | * | ||
269 | * Caller must ensure that the specified guest TLB entry is safe to insert into | ||
270 | * the shadow TLB. | ||
271 | */ | ||
272 | void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid, | ||
273 | u32 flags, u32 max_bytes, unsigned int gtlb_index) | ||
144 | { | 274 | { |
275 | struct kvmppc_44x_tlbe stlbe; | ||
276 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
277 | struct kvmppc_44x_shadow_ref *ref; | ||
145 | struct page *new_page; | 278 | struct page *new_page; |
146 | struct tlbe *stlbe; | ||
147 | hpa_t hpaddr; | 279 | hpa_t hpaddr; |
280 | gfn_t gfn; | ||
148 | unsigned int victim; | 281 | unsigned int victim; |
149 | 282 | ||
150 | /* Future optimization: don't overwrite the TLB entry containing the | 283 | /* Select TLB entry to clobber. Indirectly guard against races with the TLB |
151 | * current PC (or stack?). */ | 284 | * miss handler by disabling interrupts. */ |
152 | victim = kvmppc_tlb_44x_pos++; | 285 | local_irq_disable(); |
153 | if (kvmppc_tlb_44x_pos > tlb_44x_hwater) | 286 | victim = ++tlb_44x_index; |
154 | kvmppc_tlb_44x_pos = 0; | 287 | if (victim > tlb_44x_hwater) |
155 | stlbe = &vcpu->arch.shadow_tlb[victim]; | 288 | victim = 0; |
289 | tlb_44x_index = victim; | ||
290 | local_irq_enable(); | ||
156 | 291 | ||
157 | /* Get reference to new page. */ | 292 | /* Get reference to new page. */ |
293 | gfn = gpaddr >> PAGE_SHIFT; | ||
158 | new_page = gfn_to_page(vcpu->kvm, gfn); | 294 | new_page = gfn_to_page(vcpu->kvm, gfn); |
159 | if (is_error_page(new_page)) { | 295 | if (is_error_page(new_page)) { |
160 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); | 296 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); |
@@ -163,10 +299,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | |||
163 | } | 299 | } |
164 | hpaddr = page_to_phys(new_page); | 300 | hpaddr = page_to_phys(new_page); |
165 | 301 | ||
166 | /* Drop reference to old page. */ | 302 | /* Invalidate any previous shadow mappings. */ |
167 | kvmppc_44x_shadow_release(vcpu, victim); | 303 | kvmppc_44x_shadow_release(vcpu_44x, victim); |
168 | |||
169 | vcpu->arch.shadow_pages[victim] = new_page; | ||
170 | 304 | ||
171 | /* XXX Make sure (va, size) doesn't overlap any other | 305 | /* XXX Make sure (va, size) doesn't overlap any other |
172 | * entries. 440x6 user manual says the result would be | 306 | * entries. 440x6 user manual says the result would be |
@@ -174,78 +308,193 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | |||
174 | 308 | ||
175 | /* XXX what about AS? */ | 309 | /* XXX what about AS? */ |
176 | 310 | ||
177 | stlbe->tid = !(asid & 0xff); | ||
178 | |||
179 | /* Force TS=1 for all guest mappings. */ | 311 | /* Force TS=1 for all guest mappings. */ |
180 | /* For now we hardcode 4KB mappings, but it will be important to | 312 | stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS; |
181 | * use host large pages in the future. */ | 313 | |
182 | stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS | 314 | if (max_bytes >= PAGE_SIZE) { |
183 | | PPC44x_TLB_4K; | 315 | /* Guest mapping is larger than or equal to host page size. We can use |
184 | stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); | 316 | * a "native" host mapping. */ |
185 | stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags, | 317 | stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE; |
186 | vcpu->arch.msr & MSR_PR); | 318 | } else { |
187 | kvmppc_tlbe_set_modified(vcpu, victim); | 319 | /* Guest mapping is smaller than host page size. We must restrict the |
320 | * size of the mapping to be at most the smaller of the two, but for | ||
321 | * simplicity we fall back to a 4K mapping (this is probably what the | ||
322 | * guest is using anyways). */ | ||
323 | stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K; | ||
324 | |||
325 | /* 'hpaddr' is a host page, which is larger than the mapping we're | ||
326 | * inserting here. To compensate, we must add the in-page offset to the | ||
327 | * sub-page. */ | ||
328 | hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K); | ||
329 | } | ||
188 | 330 | ||
189 | KVMTRACE_5D(STLB_WRITE, vcpu, victim, | 331 | stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); |
190 | stlbe->tid, stlbe->word0, stlbe->word1, stlbe->word2, | 332 | stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags, |
191 | handler); | 333 | vcpu->arch.msr & MSR_PR); |
334 | stlbe.tid = !(asid & 0xff); | ||
335 | |||
336 | /* Keep track of the reference so we can properly release it later. */ | ||
337 | ref = &vcpu_44x->shadow_refs[victim]; | ||
338 | ref->page = new_page; | ||
339 | ref->gtlb_index = gtlb_index; | ||
340 | ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW); | ||
341 | ref->tid = stlbe.tid; | ||
342 | |||
343 | /* Insert shadow mapping into hardware TLB. */ | ||
344 | kvmppc_44x_tlbe_set_modified(vcpu_44x, victim); | ||
345 | kvmppc_44x_tlbwe(victim, &stlbe); | ||
346 | KVMTRACE_5D(STLB_WRITE, vcpu, victim, stlbe.tid, stlbe.word0, stlbe.word1, | ||
347 | stlbe.word2, handler); | ||
192 | } | 348 | } |
193 | 349 | ||
194 | void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, | 350 | /* For a particular guest TLB entry, invalidate the corresponding host TLB |
195 | gva_t eend, u32 asid) | 351 | * mappings and release the host pages. */ |
352 | static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu, | ||
353 | unsigned int gtlb_index) | ||
196 | { | 354 | { |
197 | unsigned int pid = !(asid & 0xff); | 355 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
198 | int i; | 356 | int i; |
199 | 357 | ||
200 | /* XXX Replace loop with fancy data structures. */ | 358 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) { |
201 | for (i = 0; i <= tlb_44x_hwater; i++) { | 359 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i]; |
202 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; | 360 | if (ref->gtlb_index == gtlb_index) |
203 | unsigned int tid; | 361 | kvmppc_44x_shadow_release(vcpu_44x, i); |
362 | } | ||
363 | } | ||
204 | 364 | ||
205 | if (!get_tlb_v(stlbe)) | 365 | void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) |
206 | continue; | 366 | { |
367 | vcpu->arch.shadow_pid = !usermode; | ||
368 | } | ||
207 | 369 | ||
208 | if (eend < get_tlb_eaddr(stlbe)) | 370 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid) |
209 | continue; | 371 | { |
372 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
373 | int i; | ||
210 | 374 | ||
211 | if (eaddr > get_tlb_end(stlbe)) | 375 | if (unlikely(vcpu->arch.pid == new_pid)) |
212 | continue; | 376 | return; |
213 | 377 | ||
214 | tid = get_tlb_tid(stlbe); | 378 | vcpu->arch.pid = new_pid; |
215 | if (tid && (tid != pid)) | ||
216 | continue; | ||
217 | 379 | ||
218 | kvmppc_44x_shadow_release(vcpu, i); | 380 | /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it |
219 | stlbe->word0 = 0; | 381 | * can't access guest kernel mappings (TID=1). When we switch to a new |
220 | kvmppc_tlbe_set_modified(vcpu, i); | 382 | * guest PID, which will also use host PID=0, we must discard the old guest |
221 | KVMTRACE_5D(STLB_INVAL, vcpu, i, | 383 | * userspace mappings. */ |
222 | stlbe->tid, stlbe->word0, stlbe->word1, | 384 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) { |
223 | stlbe->word2, handler); | 385 | struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i]; |
386 | |||
387 | if (ref->tid == 0) | ||
388 | kvmppc_44x_shadow_release(vcpu_44x, i); | ||
224 | } | 389 | } |
225 | } | 390 | } |
226 | 391 | ||
227 | /* Invalidate all mappings on the privilege switch after PID has been changed. | 392 | static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, |
228 | * The guest always runs with PID=1, so we must clear the entire TLB when | 393 | const struct kvmppc_44x_tlbe *tlbe) |
229 | * switching address spaces. */ | ||
230 | void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) | ||
231 | { | 394 | { |
232 | int i; | 395 | gpa_t gpa; |
233 | 396 | ||
234 | if (vcpu->arch.swap_pid) { | 397 | if (!get_tlb_v(tlbe)) |
235 | /* XXX Replace loop with fancy data structures. */ | 398 | return 0; |
236 | for (i = 0; i <= tlb_44x_hwater; i++) { | 399 | |
237 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; | 400 | /* Does it match current guest AS? */ |
238 | 401 | /* XXX what about IS != DS? */ | |
239 | /* Future optimization: clear only userspace mappings. */ | 402 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS)) |
240 | kvmppc_44x_shadow_release(vcpu, i); | 403 | return 0; |
241 | stlbe->word0 = 0; | 404 | |
242 | kvmppc_tlbe_set_modified(vcpu, i); | 405 | gpa = get_tlb_raddr(tlbe); |
243 | KVMTRACE_5D(STLB_INVAL, vcpu, i, | 406 | if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT)) |
244 | stlbe->tid, stlbe->word0, stlbe->word1, | 407 | /* Mapping is not for RAM. */ |
245 | stlbe->word2, handler); | 408 | return 0; |
246 | } | 409 | |
247 | vcpu->arch.swap_pid = 0; | 410 | return 1; |
411 | } | ||
412 | |||
413 | int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | ||
414 | { | ||
415 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
416 | struct kvmppc_44x_tlbe *tlbe; | ||
417 | unsigned int gtlb_index; | ||
418 | |||
419 | gtlb_index = vcpu->arch.gpr[ra]; | ||
420 | if (gtlb_index > KVM44x_GUEST_TLB_SIZE) { | ||
421 | printk("%s: index %d\n", __func__, gtlb_index); | ||
422 | kvmppc_dump_vcpu(vcpu); | ||
423 | return EMULATE_FAIL; | ||
248 | } | 424 | } |
249 | 425 | ||
250 | vcpu->arch.shadow_pid = !usermode; | 426 | tlbe = &vcpu_44x->guest_tlb[gtlb_index]; |
427 | |||
428 | /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */ | ||
429 | if (tlbe->word0 & PPC44x_TLB_VALID) | ||
430 | kvmppc_44x_invalidate(vcpu, gtlb_index); | ||
431 | |||
432 | switch (ws) { | ||
433 | case PPC44x_TLB_PAGEID: | ||
434 | tlbe->tid = get_mmucr_stid(vcpu); | ||
435 | tlbe->word0 = vcpu->arch.gpr[rs]; | ||
436 | break; | ||
437 | |||
438 | case PPC44x_TLB_XLAT: | ||
439 | tlbe->word1 = vcpu->arch.gpr[rs]; | ||
440 | break; | ||
441 | |||
442 | case PPC44x_TLB_ATTRIB: | ||
443 | tlbe->word2 = vcpu->arch.gpr[rs]; | ||
444 | break; | ||
445 | |||
446 | default: | ||
447 | return EMULATE_FAIL; | ||
448 | } | ||
449 | |||
450 | if (tlbe_is_host_safe(vcpu, tlbe)) { | ||
451 | u64 asid; | ||
452 | gva_t eaddr; | ||
453 | gpa_t gpaddr; | ||
454 | u32 flags; | ||
455 | u32 bytes; | ||
456 | |||
457 | eaddr = get_tlb_eaddr(tlbe); | ||
458 | gpaddr = get_tlb_raddr(tlbe); | ||
459 | |||
460 | /* Use the advertised page size to mask effective and real addrs. */ | ||
461 | bytes = get_tlb_bytes(tlbe); | ||
462 | eaddr &= ~(bytes - 1); | ||
463 | gpaddr &= ~(bytes - 1); | ||
464 | |||
465 | asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; | ||
466 | flags = tlbe->word2 & 0xffff; | ||
467 | |||
468 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, asid, flags, bytes, gtlb_index); | ||
469 | } | ||
470 | |||
471 | KVMTRACE_5D(GTLB_WRITE, vcpu, gtlb_index, tlbe->tid, tlbe->word0, | ||
472 | tlbe->word1, tlbe->word2, handler); | ||
473 | |||
474 | kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); | ||
475 | return EMULATE_DONE; | ||
476 | } | ||
477 | |||
478 | int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) | ||
479 | { | ||
480 | u32 ea; | ||
481 | int gtlb_index; | ||
482 | unsigned int as = get_mmucr_sts(vcpu); | ||
483 | unsigned int pid = get_mmucr_stid(vcpu); | ||
484 | |||
485 | ea = vcpu->arch.gpr[rb]; | ||
486 | if (ra) | ||
487 | ea += vcpu->arch.gpr[ra]; | ||
488 | |||
489 | gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); | ||
490 | if (rc) { | ||
491 | if (gtlb_index < 0) | ||
492 | vcpu->arch.cr &= ~0x20000000; | ||
493 | else | ||
494 | vcpu->arch.cr |= 0x20000000; | ||
495 | } | ||
496 | vcpu->arch.gpr[rt] = gtlb_index; | ||
497 | |||
498 | kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); | ||
499 | return EMULATE_DONE; | ||
251 | } | 500 | } |
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h index 2ccd46b6f6b7..772191f29e62 100644 --- a/arch/powerpc/kvm/44x_tlb.h +++ b/arch/powerpc/kvm/44x_tlb.h | |||
@@ -25,48 +25,52 @@ | |||
25 | 25 | ||
26 | extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, | 26 | extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, |
27 | unsigned int pid, unsigned int as); | 27 | unsigned int pid, unsigned int as); |
28 | extern struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr); | 28 | extern int kvmppc_44x_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); |
29 | extern struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr); | 29 | extern int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); |
30 | |||
31 | extern int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, | ||
32 | u8 rc); | ||
33 | extern int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws); | ||
30 | 34 | ||
31 | /* TLB helper functions */ | 35 | /* TLB helper functions */ |
32 | static inline unsigned int get_tlb_size(const struct tlbe *tlbe) | 36 | static inline unsigned int get_tlb_size(const struct kvmppc_44x_tlbe *tlbe) |
33 | { | 37 | { |
34 | return (tlbe->word0 >> 4) & 0xf; | 38 | return (tlbe->word0 >> 4) & 0xf; |
35 | } | 39 | } |
36 | 40 | ||
37 | static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe) | 41 | static inline gva_t get_tlb_eaddr(const struct kvmppc_44x_tlbe *tlbe) |
38 | { | 42 | { |
39 | return tlbe->word0 & 0xfffffc00; | 43 | return tlbe->word0 & 0xfffffc00; |
40 | } | 44 | } |
41 | 45 | ||
42 | static inline gva_t get_tlb_bytes(const struct tlbe *tlbe) | 46 | static inline gva_t get_tlb_bytes(const struct kvmppc_44x_tlbe *tlbe) |
43 | { | 47 | { |
44 | unsigned int pgsize = get_tlb_size(tlbe); | 48 | unsigned int pgsize = get_tlb_size(tlbe); |
45 | return 1 << 10 << (pgsize << 1); | 49 | return 1 << 10 << (pgsize << 1); |
46 | } | 50 | } |
47 | 51 | ||
48 | static inline gva_t get_tlb_end(const struct tlbe *tlbe) | 52 | static inline gva_t get_tlb_end(const struct kvmppc_44x_tlbe *tlbe) |
49 | { | 53 | { |
50 | return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1; | 54 | return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1; |
51 | } | 55 | } |
52 | 56 | ||
53 | static inline u64 get_tlb_raddr(const struct tlbe *tlbe) | 57 | static inline u64 get_tlb_raddr(const struct kvmppc_44x_tlbe *tlbe) |
54 | { | 58 | { |
55 | u64 word1 = tlbe->word1; | 59 | u64 word1 = tlbe->word1; |
56 | return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00); | 60 | return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00); |
57 | } | 61 | } |
58 | 62 | ||
59 | static inline unsigned int get_tlb_tid(const struct tlbe *tlbe) | 63 | static inline unsigned int get_tlb_tid(const struct kvmppc_44x_tlbe *tlbe) |
60 | { | 64 | { |
61 | return tlbe->tid & 0xff; | 65 | return tlbe->tid & 0xff; |
62 | } | 66 | } |
63 | 67 | ||
64 | static inline unsigned int get_tlb_ts(const struct tlbe *tlbe) | 68 | static inline unsigned int get_tlb_ts(const struct kvmppc_44x_tlbe *tlbe) |
65 | { | 69 | { |
66 | return (tlbe->word0 >> 8) & 0x1; | 70 | return (tlbe->word0 >> 8) & 0x1; |
67 | } | 71 | } |
68 | 72 | ||
69 | static inline unsigned int get_tlb_v(const struct tlbe *tlbe) | 73 | static inline unsigned int get_tlb_v(const struct kvmppc_44x_tlbe *tlbe) |
70 | { | 74 | { |
71 | return (tlbe->word0 >> 9) & 0x1; | 75 | return (tlbe->word0 >> 9) & 0x1; |
72 | } | 76 | } |
@@ -81,7 +85,7 @@ static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu) | |||
81 | return (vcpu->arch.mmucr >> 16) & 0x1; | 85 | return (vcpu->arch.mmucr >> 16) & 0x1; |
82 | } | 86 | } |
83 | 87 | ||
84 | static inline gpa_t tlb_xlate(struct tlbe *tlbe, gva_t eaddr) | 88 | static inline gpa_t tlb_xlate(struct kvmppc_44x_tlbe *tlbe, gva_t eaddr) |
85 | { | 89 | { |
86 | unsigned int pgmask = get_tlb_bytes(tlbe) - 1; | 90 | unsigned int pgmask = get_tlb_bytes(tlbe) - 1; |
87 | 91 | ||
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 53aaa66b25e5..6dbdc4817d80 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig | |||
@@ -15,27 +15,33 @@ menuconfig VIRTUALIZATION | |||
15 | if VIRTUALIZATION | 15 | if VIRTUALIZATION |
16 | 16 | ||
17 | config KVM | 17 | config KVM |
18 | bool "Kernel-based Virtual Machine (KVM) support" | 18 | bool |
19 | depends on 44x && EXPERIMENTAL | ||
20 | select PREEMPT_NOTIFIERS | 19 | select PREEMPT_NOTIFIERS |
21 | select ANON_INODES | 20 | select ANON_INODES |
22 | # We can only run on Book E hosts so far | 21 | |
23 | select KVM_BOOKE_HOST | 22 | config KVM_440 |
23 | bool "KVM support for PowerPC 440 processors" | ||
24 | depends on EXPERIMENTAL && 44x | ||
25 | select KVM | ||
24 | ---help--- | 26 | ---help--- |
25 | Support hosting virtualized guest machines. You will also | 27 | Support running unmodified 440 guest kernels in virtual machines on |
26 | need to select one or more of the processor modules below. | 28 | 440 host processors. |
27 | 29 | ||
28 | This module provides access to the hardware capabilities through | 30 | This module provides access to the hardware capabilities through |
29 | a character device node named /dev/kvm. | 31 | a character device node named /dev/kvm. |
30 | 32 | ||
31 | If unsure, say N. | 33 | If unsure, say N. |
32 | 34 | ||
33 | config KVM_BOOKE_HOST | 35 | config KVM_EXIT_TIMING |
34 | bool "KVM host support for Book E PowerPC processors" | 36 | bool "Detailed exit timing" |
35 | depends on KVM && 44x | 37 | depends on KVM |
36 | ---help--- | 38 | ---help--- |
37 | Provides host support for KVM on Book E PowerPC processors. Currently | 39 | Calculate elapsed time for every exit/enter cycle. A per-vcpu |
38 | this works on 440 processors only. | 40 | report is available in debugfs kvm/vm#_vcpu#_timing. |
41 | The overhead is relatively small, however it is not recommended for | ||
42 | production environments. | ||
43 | |||
44 | If unsure, say N. | ||
39 | 45 | ||
40 | config KVM_TRACE | 46 | config KVM_TRACE |
41 | bool "KVM trace support" | 47 | bool "KVM trace support" |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 2a5d4397ac4b..df7ba59e6d53 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
@@ -8,10 +8,16 @@ common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) | |||
8 | 8 | ||
9 | common-objs-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o) | 9 | common-objs-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o) |
10 | 10 | ||
11 | kvm-objs := $(common-objs-y) powerpc.o emulate.o booke_guest.o | 11 | kvm-objs := $(common-objs-y) powerpc.o emulate.o |
12 | obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o | ||
12 | obj-$(CONFIG_KVM) += kvm.o | 13 | obj-$(CONFIG_KVM) += kvm.o |
13 | 14 | ||
14 | AFLAGS_booke_interrupts.o := -I$(obj) | 15 | AFLAGS_booke_interrupts.o := -I$(obj) |
15 | 16 | ||
16 | kvm-booke-host-objs := booke_host.o booke_interrupts.o 44x_tlb.o | 17 | kvm-440-objs := \ |
17 | obj-$(CONFIG_KVM_BOOKE_HOST) += kvm-booke-host.o | 18 | booke.o \ |
19 | booke_interrupts.o \ | ||
20 | 44x.o \ | ||
21 | 44x_tlb.o \ | ||
22 | 44x_emulate.o | ||
23 | obj-$(CONFIG_KVM_440) += kvm-440.o | ||
diff --git a/arch/powerpc/kvm/booke_guest.c b/arch/powerpc/kvm/booke.c index 7b2591e26bae..35485dd6927e 100644 --- a/arch/powerpc/kvm/booke_guest.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -24,21 +24,26 @@ | |||
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/vmalloc.h> | 25 | #include <linux/vmalloc.h> |
26 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | |||
27 | #include <asm/cputable.h> | 28 | #include <asm/cputable.h> |
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
29 | #include <asm/kvm_ppc.h> | 30 | #include <asm/kvm_ppc.h> |
31 | #include "timing.h" | ||
32 | #include <asm/cacheflush.h> | ||
33 | #include <asm/kvm_44x.h> | ||
30 | 34 | ||
35 | #include "booke.h" | ||
31 | #include "44x_tlb.h" | 36 | #include "44x_tlb.h" |
32 | 37 | ||
38 | unsigned long kvmppc_booke_handlers; | ||
39 | |||
33 | #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM | 40 | #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM |
34 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | 41 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
35 | 42 | ||
36 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 43 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
37 | { "exits", VCPU_STAT(sum_exits) }, | ||
38 | { "mmio", VCPU_STAT(mmio_exits) }, | 44 | { "mmio", VCPU_STAT(mmio_exits) }, |
39 | { "dcr", VCPU_STAT(dcr_exits) }, | 45 | { "dcr", VCPU_STAT(dcr_exits) }, |
40 | { "sig", VCPU_STAT(signal_exits) }, | 46 | { "sig", VCPU_STAT(signal_exits) }, |
41 | { "light", VCPU_STAT(light_exits) }, | ||
42 | { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, | 47 | { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, |
43 | { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, | 48 | { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, |
44 | { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) }, | 49 | { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) }, |
@@ -53,103 +58,19 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
53 | { NULL } | 58 | { NULL } |
54 | }; | 59 | }; |
55 | 60 | ||
56 | static const u32 interrupt_msr_mask[16] = { | ||
57 | [BOOKE_INTERRUPT_CRITICAL] = MSR_ME, | ||
58 | [BOOKE_INTERRUPT_MACHINE_CHECK] = 0, | ||
59 | [BOOKE_INTERRUPT_DATA_STORAGE] = MSR_CE|MSR_ME|MSR_DE, | ||
60 | [BOOKE_INTERRUPT_INST_STORAGE] = MSR_CE|MSR_ME|MSR_DE, | ||
61 | [BOOKE_INTERRUPT_EXTERNAL] = MSR_CE|MSR_ME|MSR_DE, | ||
62 | [BOOKE_INTERRUPT_ALIGNMENT] = MSR_CE|MSR_ME|MSR_DE, | ||
63 | [BOOKE_INTERRUPT_PROGRAM] = MSR_CE|MSR_ME|MSR_DE, | ||
64 | [BOOKE_INTERRUPT_FP_UNAVAIL] = MSR_CE|MSR_ME|MSR_DE, | ||
65 | [BOOKE_INTERRUPT_SYSCALL] = MSR_CE|MSR_ME|MSR_DE, | ||
66 | [BOOKE_INTERRUPT_AP_UNAVAIL] = MSR_CE|MSR_ME|MSR_DE, | ||
67 | [BOOKE_INTERRUPT_DECREMENTER] = MSR_CE|MSR_ME|MSR_DE, | ||
68 | [BOOKE_INTERRUPT_FIT] = MSR_CE|MSR_ME|MSR_DE, | ||
69 | [BOOKE_INTERRUPT_WATCHDOG] = MSR_ME, | ||
70 | [BOOKE_INTERRUPT_DTLB_MISS] = MSR_CE|MSR_ME|MSR_DE, | ||
71 | [BOOKE_INTERRUPT_ITLB_MISS] = MSR_CE|MSR_ME|MSR_DE, | ||
72 | [BOOKE_INTERRUPT_DEBUG] = MSR_ME, | ||
73 | }; | ||
74 | |||
75 | const unsigned char exception_priority[] = { | ||
76 | [BOOKE_INTERRUPT_DATA_STORAGE] = 0, | ||
77 | [BOOKE_INTERRUPT_INST_STORAGE] = 1, | ||
78 | [BOOKE_INTERRUPT_ALIGNMENT] = 2, | ||
79 | [BOOKE_INTERRUPT_PROGRAM] = 3, | ||
80 | [BOOKE_INTERRUPT_FP_UNAVAIL] = 4, | ||
81 | [BOOKE_INTERRUPT_SYSCALL] = 5, | ||
82 | [BOOKE_INTERRUPT_AP_UNAVAIL] = 6, | ||
83 | [BOOKE_INTERRUPT_DTLB_MISS] = 7, | ||
84 | [BOOKE_INTERRUPT_ITLB_MISS] = 8, | ||
85 | [BOOKE_INTERRUPT_MACHINE_CHECK] = 9, | ||
86 | [BOOKE_INTERRUPT_DEBUG] = 10, | ||
87 | [BOOKE_INTERRUPT_CRITICAL] = 11, | ||
88 | [BOOKE_INTERRUPT_WATCHDOG] = 12, | ||
89 | [BOOKE_INTERRUPT_EXTERNAL] = 13, | ||
90 | [BOOKE_INTERRUPT_FIT] = 14, | ||
91 | [BOOKE_INTERRUPT_DECREMENTER] = 15, | ||
92 | }; | ||
93 | |||
94 | const unsigned char priority_exception[] = { | ||
95 | BOOKE_INTERRUPT_DATA_STORAGE, | ||
96 | BOOKE_INTERRUPT_INST_STORAGE, | ||
97 | BOOKE_INTERRUPT_ALIGNMENT, | ||
98 | BOOKE_INTERRUPT_PROGRAM, | ||
99 | BOOKE_INTERRUPT_FP_UNAVAIL, | ||
100 | BOOKE_INTERRUPT_SYSCALL, | ||
101 | BOOKE_INTERRUPT_AP_UNAVAIL, | ||
102 | BOOKE_INTERRUPT_DTLB_MISS, | ||
103 | BOOKE_INTERRUPT_ITLB_MISS, | ||
104 | BOOKE_INTERRUPT_MACHINE_CHECK, | ||
105 | BOOKE_INTERRUPT_DEBUG, | ||
106 | BOOKE_INTERRUPT_CRITICAL, | ||
107 | BOOKE_INTERRUPT_WATCHDOG, | ||
108 | BOOKE_INTERRUPT_EXTERNAL, | ||
109 | BOOKE_INTERRUPT_FIT, | ||
110 | BOOKE_INTERRUPT_DECREMENTER, | ||
111 | }; | ||
112 | |||
113 | |||
114 | void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) | ||
115 | { | ||
116 | struct tlbe *tlbe; | ||
117 | int i; | ||
118 | |||
119 | printk("vcpu %d TLB dump:\n", vcpu->vcpu_id); | ||
120 | printk("| %2s | %3s | %8s | %8s | %8s |\n", | ||
121 | "nr", "tid", "word0", "word1", "word2"); | ||
122 | |||
123 | for (i = 0; i < PPC44x_TLB_SIZE; i++) { | ||
124 | tlbe = &vcpu->arch.guest_tlb[i]; | ||
125 | if (tlbe->word0 & PPC44x_TLB_VALID) | ||
126 | printk(" G%2d | %02X | %08X | %08X | %08X |\n", | ||
127 | i, tlbe->tid, tlbe->word0, tlbe->word1, | ||
128 | tlbe->word2); | ||
129 | } | ||
130 | |||
131 | for (i = 0; i < PPC44x_TLB_SIZE; i++) { | ||
132 | tlbe = &vcpu->arch.shadow_tlb[i]; | ||
133 | if (tlbe->word0 & PPC44x_TLB_VALID) | ||
134 | printk(" S%2d | %02X | %08X | %08X | %08X |\n", | ||
135 | i, tlbe->tid, tlbe->word0, tlbe->word1, | ||
136 | tlbe->word2); | ||
137 | } | ||
138 | } | ||
139 | |||
140 | /* TODO: use vcpu_printf() */ | 61 | /* TODO: use vcpu_printf() */ |
141 | void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) | 62 | void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) |
142 | { | 63 | { |
143 | int i; | 64 | int i; |
144 | 65 | ||
145 | printk("pc: %08x msr: %08x\n", vcpu->arch.pc, vcpu->arch.msr); | 66 | printk("pc: %08lx msr: %08lx\n", vcpu->arch.pc, vcpu->arch.msr); |
146 | printk("lr: %08x ctr: %08x\n", vcpu->arch.lr, vcpu->arch.ctr); | 67 | printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); |
147 | printk("srr0: %08x srr1: %08x\n", vcpu->arch.srr0, vcpu->arch.srr1); | 68 | printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1); |
148 | 69 | ||
149 | printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); | 70 | printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); |
150 | 71 | ||
151 | for (i = 0; i < 32; i += 4) { | 72 | for (i = 0; i < 32; i += 4) { |
152 | printk("gpr%02d: %08x %08x %08x %08x\n", i, | 73 | printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i, |
153 | vcpu->arch.gpr[i], | 74 | vcpu->arch.gpr[i], |
154 | vcpu->arch.gpr[i+1], | 75 | vcpu->arch.gpr[i+1], |
155 | vcpu->arch.gpr[i+2], | 76 | vcpu->arch.gpr[i+2], |
@@ -157,69 +78,96 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) | |||
157 | } | 78 | } |
158 | } | 79 | } |
159 | 80 | ||
160 | /* Check if we are ready to deliver the interrupt */ | 81 | static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, |
161 | static int kvmppc_can_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt) | 82 | unsigned int priority) |
162 | { | 83 | { |
163 | int r; | 84 | set_bit(priority, &vcpu->arch.pending_exceptions); |
85 | } | ||
164 | 86 | ||
165 | switch (interrupt) { | 87 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu) |
166 | case BOOKE_INTERRUPT_CRITICAL: | 88 | { |
167 | r = vcpu->arch.msr & MSR_CE; | 89 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); |
168 | break; | 90 | } |
169 | case BOOKE_INTERRUPT_MACHINE_CHECK: | 91 | |
170 | r = vcpu->arch.msr & MSR_ME; | 92 | void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) |
171 | break; | 93 | { |
172 | case BOOKE_INTERRUPT_EXTERNAL: | 94 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER); |
173 | r = vcpu->arch.msr & MSR_EE; | 95 | } |
96 | |||
97 | int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) | ||
98 | { | ||
99 | return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); | ||
100 | } | ||
101 | |||
102 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | ||
103 | struct kvm_interrupt *irq) | ||
104 | { | ||
105 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL); | ||
106 | } | ||
107 | |||
108 | /* Deliver the interrupt of the corresponding priority, if possible. */ | ||
109 | static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | ||
110 | unsigned int priority) | ||
111 | { | ||
112 | int allowed = 0; | ||
113 | ulong msr_mask; | ||
114 | |||
115 | switch (priority) { | ||
116 | case BOOKE_IRQPRIO_PROGRAM: | ||
117 | case BOOKE_IRQPRIO_DTLB_MISS: | ||
118 | case BOOKE_IRQPRIO_ITLB_MISS: | ||
119 | case BOOKE_IRQPRIO_SYSCALL: | ||
120 | case BOOKE_IRQPRIO_DATA_STORAGE: | ||
121 | case BOOKE_IRQPRIO_INST_STORAGE: | ||
122 | case BOOKE_IRQPRIO_FP_UNAVAIL: | ||
123 | case BOOKE_IRQPRIO_AP_UNAVAIL: | ||
124 | case BOOKE_IRQPRIO_ALIGNMENT: | ||
125 | allowed = 1; | ||
126 | msr_mask = MSR_CE|MSR_ME|MSR_DE; | ||
174 | break; | 127 | break; |
175 | case BOOKE_INTERRUPT_DECREMENTER: | 128 | case BOOKE_IRQPRIO_CRITICAL: |
176 | r = vcpu->arch.msr & MSR_EE; | 129 | case BOOKE_IRQPRIO_WATCHDOG: |
130 | allowed = vcpu->arch.msr & MSR_CE; | ||
131 | msr_mask = MSR_ME; | ||
177 | break; | 132 | break; |
178 | case BOOKE_INTERRUPT_FIT: | 133 | case BOOKE_IRQPRIO_MACHINE_CHECK: |
179 | r = vcpu->arch.msr & MSR_EE; | 134 | allowed = vcpu->arch.msr & MSR_ME; |
135 | msr_mask = 0; | ||
180 | break; | 136 | break; |
181 | case BOOKE_INTERRUPT_WATCHDOG: | 137 | case BOOKE_IRQPRIO_EXTERNAL: |
182 | r = vcpu->arch.msr & MSR_CE; | 138 | case BOOKE_IRQPRIO_DECREMENTER: |
139 | case BOOKE_IRQPRIO_FIT: | ||
140 | allowed = vcpu->arch.msr & MSR_EE; | ||
141 | msr_mask = MSR_CE|MSR_ME|MSR_DE; | ||
183 | break; | 142 | break; |
184 | case BOOKE_INTERRUPT_DEBUG: | 143 | case BOOKE_IRQPRIO_DEBUG: |
185 | r = vcpu->arch.msr & MSR_DE; | 144 | allowed = vcpu->arch.msr & MSR_DE; |
145 | msr_mask = MSR_ME; | ||
186 | break; | 146 | break; |
187 | default: | ||
188 | r = 1; | ||
189 | } | 147 | } |
190 | 148 | ||
191 | return r; | 149 | if (allowed) { |
192 | } | 150 | vcpu->arch.srr0 = vcpu->arch.pc; |
151 | vcpu->arch.srr1 = vcpu->arch.msr; | ||
152 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; | ||
153 | kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); | ||
193 | 154 | ||
194 | static void kvmppc_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt) | 155 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
195 | { | ||
196 | switch (interrupt) { | ||
197 | case BOOKE_INTERRUPT_DECREMENTER: | ||
198 | vcpu->arch.tsr |= TSR_DIS; | ||
199 | break; | ||
200 | } | 156 | } |
201 | 157 | ||
202 | vcpu->arch.srr0 = vcpu->arch.pc; | 158 | return allowed; |
203 | vcpu->arch.srr1 = vcpu->arch.msr; | ||
204 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[interrupt]; | ||
205 | kvmppc_set_msr(vcpu, vcpu->arch.msr & interrupt_msr_mask[interrupt]); | ||
206 | } | 159 | } |
207 | 160 | ||
208 | /* Check pending exceptions and deliver one, if possible. */ | 161 | /* Check pending exceptions and deliver one, if possible. */ |
209 | void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu) | 162 | void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) |
210 | { | 163 | { |
211 | unsigned long *pending = &vcpu->arch.pending_exceptions; | 164 | unsigned long *pending = &vcpu->arch.pending_exceptions; |
212 | unsigned int exception; | ||
213 | unsigned int priority; | 165 | unsigned int priority; |
214 | 166 | ||
215 | priority = find_first_bit(pending, BITS_PER_BYTE * sizeof(*pending)); | 167 | priority = __ffs(*pending); |
216 | while (priority <= BOOKE_MAX_INTERRUPT) { | 168 | while (priority <= BOOKE_MAX_INTERRUPT) { |
217 | exception = priority_exception[priority]; | 169 | if (kvmppc_booke_irqprio_deliver(vcpu, priority)) |
218 | if (kvmppc_can_deliver_interrupt(vcpu, exception)) { | ||
219 | kvmppc_clear_exception(vcpu, exception); | ||
220 | kvmppc_deliver_interrupt(vcpu, exception); | ||
221 | break; | 170 | break; |
222 | } | ||
223 | 171 | ||
224 | priority = find_next_bit(pending, | 172 | priority = find_next_bit(pending, |
225 | BITS_PER_BYTE * sizeof(*pending), | 173 | BITS_PER_BYTE * sizeof(*pending), |
@@ -238,6 +186,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
238 | enum emulation_result er; | 186 | enum emulation_result er; |
239 | int r = RESUME_HOST; | 187 | int r = RESUME_HOST; |
240 | 188 | ||
189 | /* update before a new last_exit_type is rewritten */ | ||
190 | kvmppc_update_timing_stats(vcpu); | ||
191 | |||
241 | local_irq_enable(); | 192 | local_irq_enable(); |
242 | 193 | ||
243 | run->exit_reason = KVM_EXIT_UNKNOWN; | 194 | run->exit_reason = KVM_EXIT_UNKNOWN; |
@@ -251,21 +202,19 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
251 | break; | 202 | break; |
252 | 203 | ||
253 | case BOOKE_INTERRUPT_EXTERNAL: | 204 | case BOOKE_INTERRUPT_EXTERNAL: |
205 | kvmppc_account_exit(vcpu, EXT_INTR_EXITS); | ||
206 | if (need_resched()) | ||
207 | cond_resched(); | ||
208 | r = RESUME_GUEST; | ||
209 | break; | ||
210 | |||
254 | case BOOKE_INTERRUPT_DECREMENTER: | 211 | case BOOKE_INTERRUPT_DECREMENTER: |
255 | /* Since we switched IVPR back to the host's value, the host | 212 | /* Since we switched IVPR back to the host's value, the host |
256 | * handled this interrupt the moment we enabled interrupts. | 213 | * handled this interrupt the moment we enabled interrupts. |
257 | * Now we just offer it a chance to reschedule the guest. */ | 214 | * Now we just offer it a chance to reschedule the guest. */ |
258 | 215 | kvmppc_account_exit(vcpu, DEC_EXITS); | |
259 | /* XXX At this point the TLB still holds our shadow TLB, so if | ||
260 | * we do reschedule the host will fault over it. Perhaps we | ||
261 | * should politely restore the host's entries to minimize | ||
262 | * misses before ceding control. */ | ||
263 | if (need_resched()) | 216 | if (need_resched()) |
264 | cond_resched(); | 217 | cond_resched(); |
265 | if (exit_nr == BOOKE_INTERRUPT_DECREMENTER) | ||
266 | vcpu->stat.dec_exits++; | ||
267 | else | ||
268 | vcpu->stat.ext_intr_exits++; | ||
269 | r = RESUME_GUEST; | 218 | r = RESUME_GUEST; |
270 | break; | 219 | break; |
271 | 220 | ||
@@ -274,17 +223,19 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
274 | /* Program traps generated by user-level software must be handled | 223 | /* Program traps generated by user-level software must be handled |
275 | * by the guest kernel. */ | 224 | * by the guest kernel. */ |
276 | vcpu->arch.esr = vcpu->arch.fault_esr; | 225 | vcpu->arch.esr = vcpu->arch.fault_esr; |
277 | kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM); | 226 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); |
278 | r = RESUME_GUEST; | 227 | r = RESUME_GUEST; |
228 | kvmppc_account_exit(vcpu, USR_PR_INST); | ||
279 | break; | 229 | break; |
280 | } | 230 | } |
281 | 231 | ||
282 | er = kvmppc_emulate_instruction(run, vcpu); | 232 | er = kvmppc_emulate_instruction(run, vcpu); |
283 | switch (er) { | 233 | switch (er) { |
284 | case EMULATE_DONE: | 234 | case EMULATE_DONE: |
235 | /* don't overwrite subtypes, just account kvm_stats */ | ||
236 | kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS); | ||
285 | /* Future optimization: only reload non-volatiles if | 237 | /* Future optimization: only reload non-volatiles if |
286 | * they were actually modified by emulation. */ | 238 | * they were actually modified by emulation. */ |
287 | vcpu->stat.emulated_inst_exits++; | ||
288 | r = RESUME_GUEST_NV; | 239 | r = RESUME_GUEST_NV; |
289 | break; | 240 | break; |
290 | case EMULATE_DO_DCR: | 241 | case EMULATE_DO_DCR: |
@@ -293,7 +244,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
293 | break; | 244 | break; |
294 | case EMULATE_FAIL: | 245 | case EMULATE_FAIL: |
295 | /* XXX Deliver Program interrupt to guest. */ | 246 | /* XXX Deliver Program interrupt to guest. */ |
296 | printk(KERN_CRIT "%s: emulation at %x failed (%08x)\n", | 247 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", |
297 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | 248 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); |
298 | /* For debugging, encode the failing instruction and | 249 | /* For debugging, encode the failing instruction and |
299 | * report it to userspace. */ | 250 | * report it to userspace. */ |
@@ -307,48 +258,53 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
307 | break; | 258 | break; |
308 | 259 | ||
309 | case BOOKE_INTERRUPT_FP_UNAVAIL: | 260 | case BOOKE_INTERRUPT_FP_UNAVAIL: |
310 | kvmppc_queue_exception(vcpu, exit_nr); | 261 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); |
262 | kvmppc_account_exit(vcpu, FP_UNAVAIL); | ||
311 | r = RESUME_GUEST; | 263 | r = RESUME_GUEST; |
312 | break; | 264 | break; |
313 | 265 | ||
314 | case BOOKE_INTERRUPT_DATA_STORAGE: | 266 | case BOOKE_INTERRUPT_DATA_STORAGE: |
315 | vcpu->arch.dear = vcpu->arch.fault_dear; | 267 | vcpu->arch.dear = vcpu->arch.fault_dear; |
316 | vcpu->arch.esr = vcpu->arch.fault_esr; | 268 | vcpu->arch.esr = vcpu->arch.fault_esr; |
317 | kvmppc_queue_exception(vcpu, exit_nr); | 269 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); |
318 | vcpu->stat.dsi_exits++; | 270 | kvmppc_account_exit(vcpu, DSI_EXITS); |
319 | r = RESUME_GUEST; | 271 | r = RESUME_GUEST; |
320 | break; | 272 | break; |
321 | 273 | ||
322 | case BOOKE_INTERRUPT_INST_STORAGE: | 274 | case BOOKE_INTERRUPT_INST_STORAGE: |
323 | vcpu->arch.esr = vcpu->arch.fault_esr; | 275 | vcpu->arch.esr = vcpu->arch.fault_esr; |
324 | kvmppc_queue_exception(vcpu, exit_nr); | 276 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); |
325 | vcpu->stat.isi_exits++; | 277 | kvmppc_account_exit(vcpu, ISI_EXITS); |
326 | r = RESUME_GUEST; | 278 | r = RESUME_GUEST; |
327 | break; | 279 | break; |
328 | 280 | ||
329 | case BOOKE_INTERRUPT_SYSCALL: | 281 | case BOOKE_INTERRUPT_SYSCALL: |
330 | kvmppc_queue_exception(vcpu, exit_nr); | 282 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); |
331 | vcpu->stat.syscall_exits++; | 283 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); |
332 | r = RESUME_GUEST; | 284 | r = RESUME_GUEST; |
333 | break; | 285 | break; |
334 | 286 | ||
287 | /* XXX move to a 440-specific file. */ | ||
335 | case BOOKE_INTERRUPT_DTLB_MISS: { | 288 | case BOOKE_INTERRUPT_DTLB_MISS: { |
336 | struct tlbe *gtlbe; | 289 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
290 | struct kvmppc_44x_tlbe *gtlbe; | ||
337 | unsigned long eaddr = vcpu->arch.fault_dear; | 291 | unsigned long eaddr = vcpu->arch.fault_dear; |
292 | int gtlb_index; | ||
338 | gfn_t gfn; | 293 | gfn_t gfn; |
339 | 294 | ||
340 | /* Check the guest TLB. */ | 295 | /* Check the guest TLB. */ |
341 | gtlbe = kvmppc_44x_dtlb_search(vcpu, eaddr); | 296 | gtlb_index = kvmppc_44x_dtlb_index(vcpu, eaddr); |
342 | if (!gtlbe) { | 297 | if (gtlb_index < 0) { |
343 | /* The guest didn't have a mapping for it. */ | 298 | /* The guest didn't have a mapping for it. */ |
344 | kvmppc_queue_exception(vcpu, exit_nr); | 299 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); |
345 | vcpu->arch.dear = vcpu->arch.fault_dear; | 300 | vcpu->arch.dear = vcpu->arch.fault_dear; |
346 | vcpu->arch.esr = vcpu->arch.fault_esr; | 301 | vcpu->arch.esr = vcpu->arch.fault_esr; |
347 | vcpu->stat.dtlb_real_miss_exits++; | 302 | kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); |
348 | r = RESUME_GUEST; | 303 | r = RESUME_GUEST; |
349 | break; | 304 | break; |
350 | } | 305 | } |
351 | 306 | ||
307 | gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; | ||
352 | vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr); | 308 | vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr); |
353 | gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT; | 309 | gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT; |
354 | 310 | ||
@@ -359,38 +315,45 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
359 | * b) the guest used a large mapping which we're faking | 315 | * b) the guest used a large mapping which we're faking |
360 | * Either way, we need to satisfy the fault without | 316 | * Either way, we need to satisfy the fault without |
361 | * invoking the guest. */ | 317 | * invoking the guest. */ |
362 | kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid, | 318 | kvmppc_mmu_map(vcpu, eaddr, vcpu->arch.paddr_accessed, gtlbe->tid, |
363 | gtlbe->word2); | 319 | gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index); |
364 | vcpu->stat.dtlb_virt_miss_exits++; | 320 | kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); |
365 | r = RESUME_GUEST; | 321 | r = RESUME_GUEST; |
366 | } else { | 322 | } else { |
367 | /* Guest has mapped and accessed a page which is not | 323 | /* Guest has mapped and accessed a page which is not |
368 | * actually RAM. */ | 324 | * actually RAM. */ |
369 | r = kvmppc_emulate_mmio(run, vcpu); | 325 | r = kvmppc_emulate_mmio(run, vcpu); |
326 | kvmppc_account_exit(vcpu, MMIO_EXITS); | ||
370 | } | 327 | } |
371 | 328 | ||
372 | break; | 329 | break; |
373 | } | 330 | } |
374 | 331 | ||
332 | /* XXX move to a 440-specific file. */ | ||
375 | case BOOKE_INTERRUPT_ITLB_MISS: { | 333 | case BOOKE_INTERRUPT_ITLB_MISS: { |
376 | struct tlbe *gtlbe; | 334 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
335 | struct kvmppc_44x_tlbe *gtlbe; | ||
377 | unsigned long eaddr = vcpu->arch.pc; | 336 | unsigned long eaddr = vcpu->arch.pc; |
337 | gpa_t gpaddr; | ||
378 | gfn_t gfn; | 338 | gfn_t gfn; |
339 | int gtlb_index; | ||
379 | 340 | ||
380 | r = RESUME_GUEST; | 341 | r = RESUME_GUEST; |
381 | 342 | ||
382 | /* Check the guest TLB. */ | 343 | /* Check the guest TLB. */ |
383 | gtlbe = kvmppc_44x_itlb_search(vcpu, eaddr); | 344 | gtlb_index = kvmppc_44x_itlb_index(vcpu, eaddr); |
384 | if (!gtlbe) { | 345 | if (gtlb_index < 0) { |
385 | /* The guest didn't have a mapping for it. */ | 346 | /* The guest didn't have a mapping for it. */ |
386 | kvmppc_queue_exception(vcpu, exit_nr); | 347 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); |
387 | vcpu->stat.itlb_real_miss_exits++; | 348 | kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS); |
388 | break; | 349 | break; |
389 | } | 350 | } |
390 | 351 | ||
391 | vcpu->stat.itlb_virt_miss_exits++; | 352 | kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); |
392 | 353 | ||
393 | gfn = tlb_xlate(gtlbe, eaddr) >> PAGE_SHIFT; | 354 | gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; |
355 | gpaddr = tlb_xlate(gtlbe, eaddr); | ||
356 | gfn = gpaddr >> PAGE_SHIFT; | ||
394 | 357 | ||
395 | if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { | 358 | if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { |
396 | /* The guest TLB had a mapping, but the shadow TLB | 359 | /* The guest TLB had a mapping, but the shadow TLB |
@@ -399,12 +362,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
399 | * b) the guest used a large mapping which we're faking | 362 | * b) the guest used a large mapping which we're faking |
400 | * Either way, we need to satisfy the fault without | 363 | * Either way, we need to satisfy the fault without |
401 | * invoking the guest. */ | 364 | * invoking the guest. */ |
402 | kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid, | 365 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlbe->tid, |
403 | gtlbe->word2); | 366 | gtlbe->word2, get_tlb_bytes(gtlbe), gtlb_index); |
404 | } else { | 367 | } else { |
405 | /* Guest mapped and leaped at non-RAM! */ | 368 | /* Guest mapped and leaped at non-RAM! */ |
406 | kvmppc_queue_exception(vcpu, | 369 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); |
407 | BOOKE_INTERRUPT_MACHINE_CHECK); | ||
408 | } | 370 | } |
409 | 371 | ||
410 | break; | 372 | break; |
@@ -421,6 +383,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
421 | mtspr(SPRN_DBSR, dbsr); | 383 | mtspr(SPRN_DBSR, dbsr); |
422 | 384 | ||
423 | run->exit_reason = KVM_EXIT_DEBUG; | 385 | run->exit_reason = KVM_EXIT_DEBUG; |
386 | kvmppc_account_exit(vcpu, DEBUG_EXITS); | ||
424 | r = RESUME_HOST; | 387 | r = RESUME_HOST; |
425 | break; | 388 | break; |
426 | } | 389 | } |
@@ -432,10 +395,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
432 | 395 | ||
433 | local_irq_disable(); | 396 | local_irq_disable(); |
434 | 397 | ||
435 | kvmppc_check_and_deliver_interrupts(vcpu); | 398 | kvmppc_core_deliver_interrupts(vcpu); |
436 | 399 | ||
437 | /* Do some exit accounting. */ | ||
438 | vcpu->stat.sum_exits++; | ||
439 | if (!(r & RESUME_HOST)) { | 400 | if (!(r & RESUME_HOST)) { |
440 | /* To avoid clobbering exit_reason, only check for signals if | 401 | /* To avoid clobbering exit_reason, only check for signals if |
441 | * we aren't already exiting to userspace for some other | 402 | * we aren't already exiting to userspace for some other |
@@ -443,22 +404,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
443 | if (signal_pending(current)) { | 404 | if (signal_pending(current)) { |
444 | run->exit_reason = KVM_EXIT_INTR; | 405 | run->exit_reason = KVM_EXIT_INTR; |
445 | r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); | 406 | r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); |
446 | 407 | kvmppc_account_exit(vcpu, SIGNAL_EXITS); | |
447 | vcpu->stat.signal_exits++; | ||
448 | } else { | ||
449 | vcpu->stat.light_exits++; | ||
450 | } | ||
451 | } else { | ||
452 | switch (run->exit_reason) { | ||
453 | case KVM_EXIT_MMIO: | ||
454 | vcpu->stat.mmio_exits++; | ||
455 | break; | ||
456 | case KVM_EXIT_DCR: | ||
457 | vcpu->stat.dcr_exits++; | ||
458 | break; | ||
459 | case KVM_EXIT_INTR: | ||
460 | vcpu->stat.signal_exits++; | ||
461 | break; | ||
462 | } | 408 | } |
463 | } | 409 | } |
464 | 410 | ||
@@ -468,20 +414,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
468 | /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ | 414 | /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ |
469 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 415 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
470 | { | 416 | { |
471 | struct tlbe *tlbe = &vcpu->arch.guest_tlb[0]; | ||
472 | |||
473 | tlbe->tid = 0; | ||
474 | tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID; | ||
475 | tlbe->word1 = 0; | ||
476 | tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR; | ||
477 | |||
478 | tlbe++; | ||
479 | tlbe->tid = 0; | ||
480 | tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID; | ||
481 | tlbe->word1 = 0xef600000; | ||
482 | tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR | ||
483 | | PPC44x_TLB_I | PPC44x_TLB_G; | ||
484 | |||
485 | vcpu->arch.pc = 0; | 417 | vcpu->arch.pc = 0; |
486 | vcpu->arch.msr = 0; | 418 | vcpu->arch.msr = 0; |
487 | vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */ | 419 | vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */ |
@@ -492,12 +424,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
492 | * before it's programmed its own IVPR. */ | 424 | * before it's programmed its own IVPR. */ |
493 | vcpu->arch.ivpr = 0x55550000; | 425 | vcpu->arch.ivpr = 0x55550000; |
494 | 426 | ||
495 | /* Since the guest can directly access the timebase, it must know the | 427 | kvmppc_init_timing_stats(vcpu); |
496 | * real timebase frequency. Accordingly, it must see the state of | ||
497 | * CCR1[TCS]. */ | ||
498 | vcpu->arch.ccr1 = mfspr(SPRN_CCR1); | ||
499 | 428 | ||
500 | return 0; | 429 | return kvmppc_core_vcpu_setup(vcpu); |
501 | } | 430 | } |
502 | 431 | ||
503 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 432 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
@@ -536,7 +465,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
536 | vcpu->arch.ctr = regs->ctr; | 465 | vcpu->arch.ctr = regs->ctr; |
537 | vcpu->arch.lr = regs->lr; | 466 | vcpu->arch.lr = regs->lr; |
538 | vcpu->arch.xer = regs->xer; | 467 | vcpu->arch.xer = regs->xer; |
539 | vcpu->arch.msr = regs->msr; | 468 | kvmppc_set_msr(vcpu, regs->msr); |
540 | vcpu->arch.srr0 = regs->srr0; | 469 | vcpu->arch.srr0 = regs->srr0; |
541 | vcpu->arch.srr1 = regs->srr1; | 470 | vcpu->arch.srr1 = regs->srr1; |
542 | vcpu->arch.sprg0 = regs->sprg0; | 471 | vcpu->arch.sprg0 = regs->sprg0; |
@@ -575,31 +504,62 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |||
575 | return -ENOTSUPP; | 504 | return -ENOTSUPP; |
576 | } | 505 | } |
577 | 506 | ||
578 | /* 'linear_address' is actually an encoding of AS|PID|EADDR . */ | ||
579 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | 507 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
580 | struct kvm_translation *tr) | 508 | struct kvm_translation *tr) |
581 | { | 509 | { |
582 | struct tlbe *gtlbe; | 510 | return kvmppc_core_vcpu_translate(vcpu, tr); |
583 | int index; | 511 | } |
584 | gva_t eaddr; | ||
585 | u8 pid; | ||
586 | u8 as; | ||
587 | |||
588 | eaddr = tr->linear_address; | ||
589 | pid = (tr->linear_address >> 32) & 0xff; | ||
590 | as = (tr->linear_address >> 40) & 0x1; | ||
591 | |||
592 | index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as); | ||
593 | if (index == -1) { | ||
594 | tr->valid = 0; | ||
595 | return 0; | ||
596 | } | ||
597 | 512 | ||
598 | gtlbe = &vcpu->arch.guest_tlb[index]; | 513 | int kvmppc_booke_init(void) |
514 | { | ||
515 | unsigned long ivor[16]; | ||
516 | unsigned long max_ivor = 0; | ||
517 | int i; | ||
599 | 518 | ||
600 | tr->physical_address = tlb_xlate(gtlbe, eaddr); | 519 | /* We install our own exception handlers by hijacking IVPR. IVPR must |
601 | /* XXX what does "writeable" and "usermode" even mean? */ | 520 | * be 16-bit aligned, so we need a 64KB allocation. */ |
602 | tr->valid = 1; | 521 | kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO, |
522 | VCPU_SIZE_ORDER); | ||
523 | if (!kvmppc_booke_handlers) | ||
524 | return -ENOMEM; | ||
525 | |||
526 | /* XXX make sure our handlers are smaller than Linux's */ | ||
527 | |||
528 | /* Copy our interrupt handlers to match host IVORs. That way we don't | ||
529 | * have to swap the IVORs on every guest/host transition. */ | ||
530 | ivor[0] = mfspr(SPRN_IVOR0); | ||
531 | ivor[1] = mfspr(SPRN_IVOR1); | ||
532 | ivor[2] = mfspr(SPRN_IVOR2); | ||
533 | ivor[3] = mfspr(SPRN_IVOR3); | ||
534 | ivor[4] = mfspr(SPRN_IVOR4); | ||
535 | ivor[5] = mfspr(SPRN_IVOR5); | ||
536 | ivor[6] = mfspr(SPRN_IVOR6); | ||
537 | ivor[7] = mfspr(SPRN_IVOR7); | ||
538 | ivor[8] = mfspr(SPRN_IVOR8); | ||
539 | ivor[9] = mfspr(SPRN_IVOR9); | ||
540 | ivor[10] = mfspr(SPRN_IVOR10); | ||
541 | ivor[11] = mfspr(SPRN_IVOR11); | ||
542 | ivor[12] = mfspr(SPRN_IVOR12); | ||
543 | ivor[13] = mfspr(SPRN_IVOR13); | ||
544 | ivor[14] = mfspr(SPRN_IVOR14); | ||
545 | ivor[15] = mfspr(SPRN_IVOR15); | ||
546 | |||
547 | for (i = 0; i < 16; i++) { | ||
548 | if (ivor[i] > max_ivor) | ||
549 | max_ivor = ivor[i]; | ||
550 | |||
551 | memcpy((void *)kvmppc_booke_handlers + ivor[i], | ||
552 | kvmppc_handlers_start + i * kvmppc_handler_len, | ||
553 | kvmppc_handler_len); | ||
554 | } | ||
555 | flush_icache_range(kvmppc_booke_handlers, | ||
556 | kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); | ||
603 | 557 | ||
604 | return 0; | 558 | return 0; |
605 | } | 559 | } |
560 | |||
561 | void __exit kvmppc_booke_exit(void) | ||
562 | { | ||
563 | free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER); | ||
564 | kvm_exit(); | ||
565 | } | ||
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h new file mode 100644 index 000000000000..cf7c94ca24bf --- /dev/null +++ b/arch/powerpc/kvm/booke.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #ifndef __KVM_BOOKE_H__ | ||
21 | #define __KVM_BOOKE_H__ | ||
22 | |||
23 | #include <linux/types.h> | ||
24 | #include <linux/kvm_host.h> | ||
25 | #include "timing.h" | ||
26 | |||
27 | /* interrupt priortity ordering */ | ||
28 | #define BOOKE_IRQPRIO_DATA_STORAGE 0 | ||
29 | #define BOOKE_IRQPRIO_INST_STORAGE 1 | ||
30 | #define BOOKE_IRQPRIO_ALIGNMENT 2 | ||
31 | #define BOOKE_IRQPRIO_PROGRAM 3 | ||
32 | #define BOOKE_IRQPRIO_FP_UNAVAIL 4 | ||
33 | #define BOOKE_IRQPRIO_SYSCALL 5 | ||
34 | #define BOOKE_IRQPRIO_AP_UNAVAIL 6 | ||
35 | #define BOOKE_IRQPRIO_DTLB_MISS 7 | ||
36 | #define BOOKE_IRQPRIO_ITLB_MISS 8 | ||
37 | #define BOOKE_IRQPRIO_MACHINE_CHECK 9 | ||
38 | #define BOOKE_IRQPRIO_DEBUG 10 | ||
39 | #define BOOKE_IRQPRIO_CRITICAL 11 | ||
40 | #define BOOKE_IRQPRIO_WATCHDOG 12 | ||
41 | #define BOOKE_IRQPRIO_EXTERNAL 13 | ||
42 | #define BOOKE_IRQPRIO_FIT 14 | ||
43 | #define BOOKE_IRQPRIO_DECREMENTER 15 | ||
44 | |||
45 | /* Helper function for "full" MSR writes. No need to call this if only EE is | ||
46 | * changing. */ | ||
47 | static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) | ||
48 | { | ||
49 | if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR)) | ||
50 | kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); | ||
51 | |||
52 | vcpu->arch.msr = new_msr; | ||
53 | |||
54 | if (vcpu->arch.msr & MSR_WE) { | ||
55 | kvm_vcpu_block(vcpu); | ||
56 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); | ||
57 | }; | ||
58 | } | ||
59 | |||
60 | #endif /* __KVM_BOOKE_H__ */ | ||
diff --git a/arch/powerpc/kvm/booke_host.c b/arch/powerpc/kvm/booke_host.c deleted file mode 100644 index b480341bc31e..000000000000 --- a/arch/powerpc/kvm/booke_host.c +++ /dev/null | |||
@@ -1,83 +0,0 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #include <linux/errno.h> | ||
21 | #include <linux/kvm_host.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <asm/cacheflush.h> | ||
24 | #include <asm/kvm_ppc.h> | ||
25 | |||
26 | unsigned long kvmppc_booke_handlers; | ||
27 | |||
28 | static int kvmppc_booke_init(void) | ||
29 | { | ||
30 | unsigned long ivor[16]; | ||
31 | unsigned long max_ivor = 0; | ||
32 | int i; | ||
33 | |||
34 | /* We install our own exception handlers by hijacking IVPR. IVPR must | ||
35 | * be 16-bit aligned, so we need a 64KB allocation. */ | ||
36 | kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
37 | VCPU_SIZE_ORDER); | ||
38 | if (!kvmppc_booke_handlers) | ||
39 | return -ENOMEM; | ||
40 | |||
41 | /* XXX make sure our handlers are smaller than Linux's */ | ||
42 | |||
43 | /* Copy our interrupt handlers to match host IVORs. That way we don't | ||
44 | * have to swap the IVORs on every guest/host transition. */ | ||
45 | ivor[0] = mfspr(SPRN_IVOR0); | ||
46 | ivor[1] = mfspr(SPRN_IVOR1); | ||
47 | ivor[2] = mfspr(SPRN_IVOR2); | ||
48 | ivor[3] = mfspr(SPRN_IVOR3); | ||
49 | ivor[4] = mfspr(SPRN_IVOR4); | ||
50 | ivor[5] = mfspr(SPRN_IVOR5); | ||
51 | ivor[6] = mfspr(SPRN_IVOR6); | ||
52 | ivor[7] = mfspr(SPRN_IVOR7); | ||
53 | ivor[8] = mfspr(SPRN_IVOR8); | ||
54 | ivor[9] = mfspr(SPRN_IVOR9); | ||
55 | ivor[10] = mfspr(SPRN_IVOR10); | ||
56 | ivor[11] = mfspr(SPRN_IVOR11); | ||
57 | ivor[12] = mfspr(SPRN_IVOR12); | ||
58 | ivor[13] = mfspr(SPRN_IVOR13); | ||
59 | ivor[14] = mfspr(SPRN_IVOR14); | ||
60 | ivor[15] = mfspr(SPRN_IVOR15); | ||
61 | |||
62 | for (i = 0; i < 16; i++) { | ||
63 | if (ivor[i] > max_ivor) | ||
64 | max_ivor = ivor[i]; | ||
65 | |||
66 | memcpy((void *)kvmppc_booke_handlers + ivor[i], | ||
67 | kvmppc_handlers_start + i * kvmppc_handler_len, | ||
68 | kvmppc_handler_len); | ||
69 | } | ||
70 | flush_icache_range(kvmppc_booke_handlers, | ||
71 | kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); | ||
72 | |||
73 | return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE); | ||
74 | } | ||
75 | |||
76 | static void __exit kvmppc_booke_exit(void) | ||
77 | { | ||
78 | free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER); | ||
79 | kvm_exit(); | ||
80 | } | ||
81 | |||
82 | module_init(kvmppc_booke_init) | ||
83 | module_exit(kvmppc_booke_exit) | ||
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 95e165baf85f..084ebcd7dd83 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S | |||
@@ -107,6 +107,18 @@ _GLOBAL(kvmppc_resume_host) | |||
107 | li r6, 1 | 107 | li r6, 1 |
108 | slw r6, r6, r5 | 108 | slw r6, r6, r5 |
109 | 109 | ||
110 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
111 | /* save exit time */ | ||
112 | 1: | ||
113 | mfspr r7, SPRN_TBRU | ||
114 | mfspr r8, SPRN_TBRL | ||
115 | mfspr r9, SPRN_TBRU | ||
116 | cmpw r9, r7 | ||
117 | bne 1b | ||
118 | stw r8, VCPU_TIMING_EXIT_TBL(r4) | ||
119 | stw r9, VCPU_TIMING_EXIT_TBU(r4) | ||
120 | #endif | ||
121 | |||
110 | /* Save the faulting instruction and all GPRs for emulation. */ | 122 | /* Save the faulting instruction and all GPRs for emulation. */ |
111 | andi. r7, r6, NEED_INST_MASK | 123 | andi. r7, r6, NEED_INST_MASK |
112 | beq ..skip_inst_copy | 124 | beq ..skip_inst_copy |
@@ -335,54 +347,6 @@ lightweight_exit: | |||
335 | lwz r3, VCPU_SHADOW_PID(r4) | 347 | lwz r3, VCPU_SHADOW_PID(r4) |
336 | mtspr SPRN_PID, r3 | 348 | mtspr SPRN_PID, r3 |
337 | 349 | ||
338 | /* Prevent all asynchronous TLB updates. */ | ||
339 | mfmsr r5 | ||
340 | lis r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h | ||
341 | ori r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l | ||
342 | andc r6, r5, r6 | ||
343 | mtmsr r6 | ||
344 | |||
345 | /* Load the guest mappings, leaving the host's "pinned" kernel mappings | ||
346 | * in place. */ | ||
347 | mfspr r10, SPRN_MMUCR /* Save host MMUCR. */ | ||
348 | li r5, PPC44x_TLB_SIZE | ||
349 | lis r5, tlb_44x_hwater@ha | ||
350 | lwz r5, tlb_44x_hwater@l(r5) | ||
351 | mtctr r5 | ||
352 | addi r9, r4, VCPU_SHADOW_TLB | ||
353 | addi r5, r4, VCPU_SHADOW_MOD | ||
354 | li r3, 0 | ||
355 | 1: | ||
356 | lbzx r7, r3, r5 | ||
357 | cmpwi r7, 0 | ||
358 | beq 3f | ||
359 | |||
360 | /* Load guest entry. */ | ||
361 | mulli r11, r3, TLBE_BYTES | ||
362 | add r11, r11, r9 | ||
363 | lwz r7, 0(r11) | ||
364 | mtspr SPRN_MMUCR, r7 | ||
365 | lwz r7, 4(r11) | ||
366 | tlbwe r7, r3, PPC44x_TLB_PAGEID | ||
367 | lwz r7, 8(r11) | ||
368 | tlbwe r7, r3, PPC44x_TLB_XLAT | ||
369 | lwz r7, 12(r11) | ||
370 | tlbwe r7, r3, PPC44x_TLB_ATTRIB | ||
371 | 3: | ||
372 | addi r3, r3, 1 /* Increment index. */ | ||
373 | bdnz 1b | ||
374 | |||
375 | mtspr SPRN_MMUCR, r10 /* Restore host MMUCR. */ | ||
376 | |||
377 | /* Clear bitmap of modified TLB entries */ | ||
378 | li r5, PPC44x_TLB_SIZE>>2 | ||
379 | mtctr r5 | ||
380 | addi r5, r4, VCPU_SHADOW_MOD - 4 | ||
381 | li r6, 0 | ||
382 | 1: | ||
383 | stwu r6, 4(r5) | ||
384 | bdnz 1b | ||
385 | |||
386 | iccci 0, 0 /* XXX hack */ | 350 | iccci 0, 0 /* XXX hack */ |
387 | 351 | ||
388 | /* Load some guest volatiles. */ | 352 | /* Load some guest volatiles. */ |
@@ -423,6 +387,18 @@ lightweight_exit: | |||
423 | lwz r3, VCPU_SPRG7(r4) | 387 | lwz r3, VCPU_SPRG7(r4) |
424 | mtspr SPRN_SPRG7, r3 | 388 | mtspr SPRN_SPRG7, r3 |
425 | 389 | ||
390 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
391 | /* save enter time */ | ||
392 | 1: | ||
393 | mfspr r6, SPRN_TBRU | ||
394 | mfspr r7, SPRN_TBRL | ||
395 | mfspr r8, SPRN_TBRU | ||
396 | cmpw r8, r6 | ||
397 | bne 1b | ||
398 | stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4) | ||
399 | stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4) | ||
400 | #endif | ||
401 | |||
426 | /* Finish loading guest volatiles and jump to guest. */ | 402 | /* Finish loading guest volatiles and jump to guest. */ |
427 | lwz r3, VCPU_CTR(r4) | 403 | lwz r3, VCPU_CTR(r4) |
428 | mtctr r3 | 404 | mtctr r3 |
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 0fce4fbdc20d..d1d38daa93fb 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -23,161 +23,14 @@ | |||
23 | #include <linux/string.h> | 23 | #include <linux/string.h> |
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
25 | 25 | ||
26 | #include <asm/dcr.h> | 26 | #include <asm/reg.h> |
27 | #include <asm/dcr-regs.h> | ||
28 | #include <asm/time.h> | 27 | #include <asm/time.h> |
29 | #include <asm/byteorder.h> | 28 | #include <asm/byteorder.h> |
30 | #include <asm/kvm_ppc.h> | 29 | #include <asm/kvm_ppc.h> |
30 | #include <asm/disassemble.h> | ||
31 | #include "timing.h" | ||
31 | 32 | ||
32 | #include "44x_tlb.h" | 33 | void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) |
33 | |||
34 | /* Instruction decoding */ | ||
35 | static inline unsigned int get_op(u32 inst) | ||
36 | { | ||
37 | return inst >> 26; | ||
38 | } | ||
39 | |||
40 | static inline unsigned int get_xop(u32 inst) | ||
41 | { | ||
42 | return (inst >> 1) & 0x3ff; | ||
43 | } | ||
44 | |||
45 | static inline unsigned int get_sprn(u32 inst) | ||
46 | { | ||
47 | return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0); | ||
48 | } | ||
49 | |||
50 | static inline unsigned int get_dcrn(u32 inst) | ||
51 | { | ||
52 | return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0); | ||
53 | } | ||
54 | |||
55 | static inline unsigned int get_rt(u32 inst) | ||
56 | { | ||
57 | return (inst >> 21) & 0x1f; | ||
58 | } | ||
59 | |||
60 | static inline unsigned int get_rs(u32 inst) | ||
61 | { | ||
62 | return (inst >> 21) & 0x1f; | ||
63 | } | ||
64 | |||
65 | static inline unsigned int get_ra(u32 inst) | ||
66 | { | ||
67 | return (inst >> 16) & 0x1f; | ||
68 | } | ||
69 | |||
70 | static inline unsigned int get_rb(u32 inst) | ||
71 | { | ||
72 | return (inst >> 11) & 0x1f; | ||
73 | } | ||
74 | |||
75 | static inline unsigned int get_rc(u32 inst) | ||
76 | { | ||
77 | return inst & 0x1; | ||
78 | } | ||
79 | |||
80 | static inline unsigned int get_ws(u32 inst) | ||
81 | { | ||
82 | return (inst >> 11) & 0x1f; | ||
83 | } | ||
84 | |||
85 | static inline unsigned int get_d(u32 inst) | ||
86 | { | ||
87 | return inst & 0xffff; | ||
88 | } | ||
89 | |||
90 | static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, | ||
91 | const struct tlbe *tlbe) | ||
92 | { | ||
93 | gpa_t gpa; | ||
94 | |||
95 | if (!get_tlb_v(tlbe)) | ||
96 | return 0; | ||
97 | |||
98 | /* Does it match current guest AS? */ | ||
99 | /* XXX what about IS != DS? */ | ||
100 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS)) | ||
101 | return 0; | ||
102 | |||
103 | gpa = get_tlb_raddr(tlbe); | ||
104 | if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT)) | ||
105 | /* Mapping is not for RAM. */ | ||
106 | return 0; | ||
107 | |||
108 | return 1; | ||
109 | } | ||
110 | |||
111 | static int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u32 inst) | ||
112 | { | ||
113 | u64 eaddr; | ||
114 | u64 raddr; | ||
115 | u64 asid; | ||
116 | u32 flags; | ||
117 | struct tlbe *tlbe; | ||
118 | unsigned int ra; | ||
119 | unsigned int rs; | ||
120 | unsigned int ws; | ||
121 | unsigned int index; | ||
122 | |||
123 | ra = get_ra(inst); | ||
124 | rs = get_rs(inst); | ||
125 | ws = get_ws(inst); | ||
126 | |||
127 | index = vcpu->arch.gpr[ra]; | ||
128 | if (index > PPC44x_TLB_SIZE) { | ||
129 | printk("%s: index %d\n", __func__, index); | ||
130 | kvmppc_dump_vcpu(vcpu); | ||
131 | return EMULATE_FAIL; | ||
132 | } | ||
133 | |||
134 | tlbe = &vcpu->arch.guest_tlb[index]; | ||
135 | |||
136 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ | ||
137 | if (tlbe->word0 & PPC44x_TLB_VALID) { | ||
138 | eaddr = get_tlb_eaddr(tlbe); | ||
139 | asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; | ||
140 | kvmppc_mmu_invalidate(vcpu, eaddr, get_tlb_end(tlbe), asid); | ||
141 | } | ||
142 | |||
143 | switch (ws) { | ||
144 | case PPC44x_TLB_PAGEID: | ||
145 | tlbe->tid = vcpu->arch.mmucr & 0xff; | ||
146 | tlbe->word0 = vcpu->arch.gpr[rs]; | ||
147 | break; | ||
148 | |||
149 | case PPC44x_TLB_XLAT: | ||
150 | tlbe->word1 = vcpu->arch.gpr[rs]; | ||
151 | break; | ||
152 | |||
153 | case PPC44x_TLB_ATTRIB: | ||
154 | tlbe->word2 = vcpu->arch.gpr[rs]; | ||
155 | break; | ||
156 | |||
157 | default: | ||
158 | return EMULATE_FAIL; | ||
159 | } | ||
160 | |||
161 | if (tlbe_is_host_safe(vcpu, tlbe)) { | ||
162 | eaddr = get_tlb_eaddr(tlbe); | ||
163 | raddr = get_tlb_raddr(tlbe); | ||
164 | asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid; | ||
165 | flags = tlbe->word2 & 0xffff; | ||
166 | |||
167 | /* Create a 4KB mapping on the host. If the guest wanted a | ||
168 | * large page, only the first 4KB is mapped here and the rest | ||
169 | * are mapped on the fly. */ | ||
170 | kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags); | ||
171 | } | ||
172 | |||
173 | KVMTRACE_5D(GTLB_WRITE, vcpu, index, | ||
174 | tlbe->tid, tlbe->word0, tlbe->word1, tlbe->word2, | ||
175 | handler); | ||
176 | |||
177 | return EMULATE_DONE; | ||
178 | } | ||
179 | |||
180 | static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) | ||
181 | { | 34 | { |
182 | if (vcpu->arch.tcr & TCR_DIE) { | 35 | if (vcpu->arch.tcr & TCR_DIE) { |
183 | /* The decrementer ticks at the same rate as the timebase, so | 36 | /* The decrementer ticks at the same rate as the timebase, so |
@@ -193,12 +46,6 @@ static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) | |||
193 | } | 46 | } |
194 | } | 47 | } |
195 | 48 | ||
196 | static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) | ||
197 | { | ||
198 | vcpu->arch.pc = vcpu->arch.srr0; | ||
199 | kvmppc_set_msr(vcpu, vcpu->arch.srr1); | ||
200 | } | ||
201 | |||
202 | /* XXX to do: | 49 | /* XXX to do: |
203 | * lhax | 50 | * lhax |
204 | * lhaux | 51 | * lhaux |
@@ -213,40 +60,30 @@ static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) | |||
213 | * | 60 | * |
214 | * XXX is_bigendian should depend on MMU mapping or MSR[LE] | 61 | * XXX is_bigendian should depend on MMU mapping or MSR[LE] |
215 | */ | 62 | */ |
63 | /* XXX Should probably auto-generate instruction decoding for a particular core | ||
64 | * from opcode tables in the future. */ | ||
216 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | 65 | int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) |
217 | { | 66 | { |
218 | u32 inst = vcpu->arch.last_inst; | 67 | u32 inst = vcpu->arch.last_inst; |
219 | u32 ea; | 68 | u32 ea; |
220 | int ra; | 69 | int ra; |
221 | int rb; | 70 | int rb; |
222 | int rc; | ||
223 | int rs; | 71 | int rs; |
224 | int rt; | 72 | int rt; |
225 | int sprn; | 73 | int sprn; |
226 | int dcrn; | ||
227 | enum emulation_result emulated = EMULATE_DONE; | 74 | enum emulation_result emulated = EMULATE_DONE; |
228 | int advance = 1; | 75 | int advance = 1; |
229 | 76 | ||
77 | /* this default type might be overwritten by subcategories */ | ||
78 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); | ||
79 | |||
230 | switch (get_op(inst)) { | 80 | switch (get_op(inst)) { |
231 | case 3: /* trap */ | 81 | case 3: /* trap */ |
232 | printk("trap!\n"); | 82 | vcpu->arch.esr |= ESR_PTR; |
233 | kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM); | 83 | kvmppc_core_queue_program(vcpu); |
234 | advance = 0; | 84 | advance = 0; |
235 | break; | 85 | break; |
236 | 86 | ||
237 | case 19: | ||
238 | switch (get_xop(inst)) { | ||
239 | case 50: /* rfi */ | ||
240 | kvmppc_emul_rfi(vcpu); | ||
241 | advance = 0; | ||
242 | break; | ||
243 | |||
244 | default: | ||
245 | emulated = EMULATE_FAIL; | ||
246 | break; | ||
247 | } | ||
248 | break; | ||
249 | |||
250 | case 31: | 87 | case 31: |
251 | switch (get_xop(inst)) { | 88 | switch (get_xop(inst)) { |
252 | 89 | ||
@@ -255,27 +92,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
255 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 92 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
256 | break; | 93 | break; |
257 | 94 | ||
258 | case 83: /* mfmsr */ | ||
259 | rt = get_rt(inst); | ||
260 | vcpu->arch.gpr[rt] = vcpu->arch.msr; | ||
261 | break; | ||
262 | |||
263 | case 87: /* lbzx */ | 95 | case 87: /* lbzx */ |
264 | rt = get_rt(inst); | 96 | rt = get_rt(inst); |
265 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 97 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
266 | break; | 98 | break; |
267 | 99 | ||
268 | case 131: /* wrtee */ | ||
269 | rs = get_rs(inst); | ||
270 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | ||
271 | | (vcpu->arch.gpr[rs] & MSR_EE); | ||
272 | break; | ||
273 | |||
274 | case 146: /* mtmsr */ | ||
275 | rs = get_rs(inst); | ||
276 | kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); | ||
277 | break; | ||
278 | |||
279 | case 151: /* stwx */ | 100 | case 151: /* stwx */ |
280 | rs = get_rs(inst); | 101 | rs = get_rs(inst); |
281 | emulated = kvmppc_handle_store(run, vcpu, | 102 | emulated = kvmppc_handle_store(run, vcpu, |
@@ -283,11 +104,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
283 | 4, 1); | 104 | 4, 1); |
284 | break; | 105 | break; |
285 | 106 | ||
286 | case 163: /* wrteei */ | ||
287 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | ||
288 | | (inst & MSR_EE); | ||
289 | break; | ||
290 | |||
291 | case 215: /* stbx */ | 107 | case 215: /* stbx */ |
292 | rs = get_rs(inst); | 108 | rs = get_rs(inst); |
293 | emulated = kvmppc_handle_store(run, vcpu, | 109 | emulated = kvmppc_handle_store(run, vcpu, |
@@ -328,42 +144,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
328 | vcpu->arch.gpr[ra] = ea; | 144 | vcpu->arch.gpr[ra] = ea; |
329 | break; | 145 | break; |
330 | 146 | ||
331 | case 323: /* mfdcr */ | ||
332 | dcrn = get_dcrn(inst); | ||
333 | rt = get_rt(inst); | ||
334 | |||
335 | /* The guest may access CPR0 registers to determine the timebase | ||
336 | * frequency, and it must know the real host frequency because it | ||
337 | * can directly access the timebase registers. | ||
338 | * | ||
339 | * It would be possible to emulate those accesses in userspace, | ||
340 | * but userspace can really only figure out the end frequency. | ||
341 | * We could decompose that into the factors that compute it, but | ||
342 | * that's tricky math, and it's easier to just report the real | ||
343 | * CPR0 values. | ||
344 | */ | ||
345 | switch (dcrn) { | ||
346 | case DCRN_CPR0_CONFIG_ADDR: | ||
347 | vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr; | ||
348 | break; | ||
349 | case DCRN_CPR0_CONFIG_DATA: | ||
350 | local_irq_disable(); | ||
351 | mtdcr(DCRN_CPR0_CONFIG_ADDR, | ||
352 | vcpu->arch.cpr0_cfgaddr); | ||
353 | vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA); | ||
354 | local_irq_enable(); | ||
355 | break; | ||
356 | default: | ||
357 | run->dcr.dcrn = dcrn; | ||
358 | run->dcr.data = 0; | ||
359 | run->dcr.is_write = 0; | ||
360 | vcpu->arch.io_gpr = rt; | ||
361 | vcpu->arch.dcr_needed = 1; | ||
362 | emulated = EMULATE_DO_DCR; | ||
363 | } | ||
364 | |||
365 | break; | ||
366 | |||
367 | case 339: /* mfspr */ | 147 | case 339: /* mfspr */ |
368 | sprn = get_sprn(inst); | 148 | sprn = get_sprn(inst); |
369 | rt = get_rt(inst); | 149 | rt = get_rt(inst); |
@@ -373,26 +153,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
373 | vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; | 153 | vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; |
374 | case SPRN_SRR1: | 154 | case SPRN_SRR1: |
375 | vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; | 155 | vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; |
376 | case SPRN_MMUCR: | ||
377 | vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break; | ||
378 | case SPRN_PID: | ||
379 | vcpu->arch.gpr[rt] = vcpu->arch.pid; break; | ||
380 | case SPRN_IVPR: | ||
381 | vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break; | ||
382 | case SPRN_CCR0: | ||
383 | vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break; | ||
384 | case SPRN_CCR1: | ||
385 | vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break; | ||
386 | case SPRN_PVR: | 156 | case SPRN_PVR: |
387 | vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; | 157 | vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; |
388 | case SPRN_DEAR: | ||
389 | vcpu->arch.gpr[rt] = vcpu->arch.dear; break; | ||
390 | case SPRN_ESR: | ||
391 | vcpu->arch.gpr[rt] = vcpu->arch.esr; break; | ||
392 | case SPRN_DBCR0: | ||
393 | vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break; | ||
394 | case SPRN_DBCR1: | ||
395 | vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break; | ||
396 | 158 | ||
397 | /* Note: mftb and TBRL/TBWL are user-accessible, so | 159 | /* Note: mftb and TBRL/TBWL are user-accessible, so |
398 | * the guest can always access the real TB anyways. | 160 | * the guest can always access the real TB anyways. |
@@ -413,42 +175,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
413 | /* Note: SPRG4-7 are user-readable, so we don't get | 175 | /* Note: SPRG4-7 are user-readable, so we don't get |
414 | * a trap. */ | 176 | * a trap. */ |
415 | 177 | ||
416 | case SPRN_IVOR0: | ||
417 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[0]; break; | ||
418 | case SPRN_IVOR1: | ||
419 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[1]; break; | ||
420 | case SPRN_IVOR2: | ||
421 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[2]; break; | ||
422 | case SPRN_IVOR3: | ||
423 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[3]; break; | ||
424 | case SPRN_IVOR4: | ||
425 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[4]; break; | ||
426 | case SPRN_IVOR5: | ||
427 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[5]; break; | ||
428 | case SPRN_IVOR6: | ||
429 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[6]; break; | ||
430 | case SPRN_IVOR7: | ||
431 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[7]; break; | ||
432 | case SPRN_IVOR8: | ||
433 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[8]; break; | ||
434 | case SPRN_IVOR9: | ||
435 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[9]; break; | ||
436 | case SPRN_IVOR10: | ||
437 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[10]; break; | ||
438 | case SPRN_IVOR11: | ||
439 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[11]; break; | ||
440 | case SPRN_IVOR12: | ||
441 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[12]; break; | ||
442 | case SPRN_IVOR13: | ||
443 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[13]; break; | ||
444 | case SPRN_IVOR14: | ||
445 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[14]; break; | ||
446 | case SPRN_IVOR15: | ||
447 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[15]; break; | ||
448 | |||
449 | default: | 178 | default: |
450 | printk("mfspr: unknown spr %x\n", sprn); | 179 | emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); |
451 | vcpu->arch.gpr[rt] = 0; | 180 | if (emulated == EMULATE_FAIL) { |
181 | printk("mfspr: unknown spr %x\n", sprn); | ||
182 | vcpu->arch.gpr[rt] = 0; | ||
183 | } | ||
452 | break; | 184 | break; |
453 | } | 185 | } |
454 | break; | 186 | break; |
@@ -478,25 +210,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
478 | vcpu->arch.gpr[ra] = ea; | 210 | vcpu->arch.gpr[ra] = ea; |
479 | break; | 211 | break; |
480 | 212 | ||
481 | case 451: /* mtdcr */ | ||
482 | dcrn = get_dcrn(inst); | ||
483 | rs = get_rs(inst); | ||
484 | |||
485 | /* emulate some access in kernel */ | ||
486 | switch (dcrn) { | ||
487 | case DCRN_CPR0_CONFIG_ADDR: | ||
488 | vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs]; | ||
489 | break; | ||
490 | default: | ||
491 | run->dcr.dcrn = dcrn; | ||
492 | run->dcr.data = vcpu->arch.gpr[rs]; | ||
493 | run->dcr.is_write = 1; | ||
494 | vcpu->arch.dcr_needed = 1; | ||
495 | emulated = EMULATE_DO_DCR; | ||
496 | } | ||
497 | |||
498 | break; | ||
499 | |||
500 | case 467: /* mtspr */ | 213 | case 467: /* mtspr */ |
501 | sprn = get_sprn(inst); | 214 | sprn = get_sprn(inst); |
502 | rs = get_rs(inst); | 215 | rs = get_rs(inst); |
@@ -505,22 +218,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
505 | vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; | 218 | vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; |
506 | case SPRN_SRR1: | 219 | case SPRN_SRR1: |
507 | vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; | 220 | vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; |
508 | case SPRN_MMUCR: | ||
509 | vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break; | ||
510 | case SPRN_PID: | ||
511 | kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break; | ||
512 | case SPRN_CCR0: | ||
513 | vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break; | ||
514 | case SPRN_CCR1: | ||
515 | vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break; | ||
516 | case SPRN_DEAR: | ||
517 | vcpu->arch.dear = vcpu->arch.gpr[rs]; break; | ||
518 | case SPRN_ESR: | ||
519 | vcpu->arch.esr = vcpu->arch.gpr[rs]; break; | ||
520 | case SPRN_DBCR0: | ||
521 | vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break; | ||
522 | case SPRN_DBCR1: | ||
523 | vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break; | ||
524 | 221 | ||
525 | /* XXX We need to context-switch the timebase for | 222 | /* XXX We need to context-switch the timebase for |
526 | * watchdog and FIT. */ | 223 | * watchdog and FIT. */ |
@@ -532,14 +229,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
532 | kvmppc_emulate_dec(vcpu); | 229 | kvmppc_emulate_dec(vcpu); |
533 | break; | 230 | break; |
534 | 231 | ||
535 | case SPRN_TSR: | ||
536 | vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break; | ||
537 | |||
538 | case SPRN_TCR: | ||
539 | vcpu->arch.tcr = vcpu->arch.gpr[rs]; | ||
540 | kvmppc_emulate_dec(vcpu); | ||
541 | break; | ||
542 | |||
543 | case SPRN_SPRG0: | 232 | case SPRN_SPRG0: |
544 | vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; | 233 | vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; |
545 | case SPRN_SPRG1: | 234 | case SPRN_SPRG1: |
@@ -549,56 +238,10 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
549 | case SPRN_SPRG3: | 238 | case SPRN_SPRG3: |
550 | vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; | 239 | vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; |
551 | 240 | ||
552 | /* Note: SPRG4-7 are user-readable. These values are | ||
553 | * loaded into the real SPRGs when resuming the | ||
554 | * guest. */ | ||
555 | case SPRN_SPRG4: | ||
556 | vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break; | ||
557 | case SPRN_SPRG5: | ||
558 | vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break; | ||
559 | case SPRN_SPRG6: | ||
560 | vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break; | ||
561 | case SPRN_SPRG7: | ||
562 | vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break; | ||
563 | |||
564 | case SPRN_IVPR: | ||
565 | vcpu->arch.ivpr = vcpu->arch.gpr[rs]; break; | ||
566 | case SPRN_IVOR0: | ||
567 | vcpu->arch.ivor[0] = vcpu->arch.gpr[rs]; break; | ||
568 | case SPRN_IVOR1: | ||
569 | vcpu->arch.ivor[1] = vcpu->arch.gpr[rs]; break; | ||
570 | case SPRN_IVOR2: | ||
571 | vcpu->arch.ivor[2] = vcpu->arch.gpr[rs]; break; | ||
572 | case SPRN_IVOR3: | ||
573 | vcpu->arch.ivor[3] = vcpu->arch.gpr[rs]; break; | ||
574 | case SPRN_IVOR4: | ||
575 | vcpu->arch.ivor[4] = vcpu->arch.gpr[rs]; break; | ||
576 | case SPRN_IVOR5: | ||
577 | vcpu->arch.ivor[5] = vcpu->arch.gpr[rs]; break; | ||
578 | case SPRN_IVOR6: | ||
579 | vcpu->arch.ivor[6] = vcpu->arch.gpr[rs]; break; | ||
580 | case SPRN_IVOR7: | ||
581 | vcpu->arch.ivor[7] = vcpu->arch.gpr[rs]; break; | ||
582 | case SPRN_IVOR8: | ||
583 | vcpu->arch.ivor[8] = vcpu->arch.gpr[rs]; break; | ||
584 | case SPRN_IVOR9: | ||
585 | vcpu->arch.ivor[9] = vcpu->arch.gpr[rs]; break; | ||
586 | case SPRN_IVOR10: | ||
587 | vcpu->arch.ivor[10] = vcpu->arch.gpr[rs]; break; | ||
588 | case SPRN_IVOR11: | ||
589 | vcpu->arch.ivor[11] = vcpu->arch.gpr[rs]; break; | ||
590 | case SPRN_IVOR12: | ||
591 | vcpu->arch.ivor[12] = vcpu->arch.gpr[rs]; break; | ||
592 | case SPRN_IVOR13: | ||
593 | vcpu->arch.ivor[13] = vcpu->arch.gpr[rs]; break; | ||
594 | case SPRN_IVOR14: | ||
595 | vcpu->arch.ivor[14] = vcpu->arch.gpr[rs]; break; | ||
596 | case SPRN_IVOR15: | ||
597 | vcpu->arch.ivor[15] = vcpu->arch.gpr[rs]; break; | ||
598 | |||
599 | default: | 241 | default: |
600 | printk("mtspr: unknown spr %x\n", sprn); | 242 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); |
601 | emulated = EMULATE_FAIL; | 243 | if (emulated == EMULATE_FAIL) |
244 | printk("mtspr: unknown spr %x\n", sprn); | ||
602 | break; | 245 | break; |
603 | } | 246 | } |
604 | break; | 247 | break; |
@@ -629,36 +272,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
629 | 4, 0); | 272 | 4, 0); |
630 | break; | 273 | break; |
631 | 274 | ||
632 | case 978: /* tlbwe */ | ||
633 | emulated = kvmppc_emul_tlbwe(vcpu, inst); | ||
634 | break; | ||
635 | |||
636 | case 914: { /* tlbsx */ | ||
637 | int index; | ||
638 | unsigned int as = get_mmucr_sts(vcpu); | ||
639 | unsigned int pid = get_mmucr_stid(vcpu); | ||
640 | |||
641 | rt = get_rt(inst); | ||
642 | ra = get_ra(inst); | ||
643 | rb = get_rb(inst); | ||
644 | rc = get_rc(inst); | ||
645 | |||
646 | ea = vcpu->arch.gpr[rb]; | ||
647 | if (ra) | ||
648 | ea += vcpu->arch.gpr[ra]; | ||
649 | |||
650 | index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); | ||
651 | if (rc) { | ||
652 | if (index < 0) | ||
653 | vcpu->arch.cr &= ~0x20000000; | ||
654 | else | ||
655 | vcpu->arch.cr |= 0x20000000; | ||
656 | } | ||
657 | vcpu->arch.gpr[rt] = index; | ||
658 | |||
659 | } | ||
660 | break; | ||
661 | |||
662 | case 790: /* lhbrx */ | 275 | case 790: /* lhbrx */ |
663 | rt = get_rt(inst); | 276 | rt = get_rt(inst); |
664 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); | 277 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); |
@@ -674,14 +287,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
674 | 2, 0); | 287 | 2, 0); |
675 | break; | 288 | break; |
676 | 289 | ||
677 | case 966: /* iccci */ | ||
678 | break; | ||
679 | |||
680 | default: | 290 | default: |
681 | printk("unknown: op %d xop %d\n", get_op(inst), | 291 | /* Attempt core-specific emulation below. */ |
682 | get_xop(inst)); | ||
683 | emulated = EMULATE_FAIL; | 292 | emulated = EMULATE_FAIL; |
684 | break; | ||
685 | } | 293 | } |
686 | break; | 294 | break; |
687 | 295 | ||
@@ -764,12 +372,19 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
764 | break; | 372 | break; |
765 | 373 | ||
766 | default: | 374 | default: |
767 | printk("unknown op %d\n", get_op(inst)); | ||
768 | emulated = EMULATE_FAIL; | 375 | emulated = EMULATE_FAIL; |
769 | break; | ||
770 | } | 376 | } |
771 | 377 | ||
772 | KVMTRACE_3D(PPC_INSTR, vcpu, inst, vcpu->arch.pc, emulated, entryexit); | 378 | if (emulated == EMULATE_FAIL) { |
379 | emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); | ||
380 | if (emulated == EMULATE_FAIL) { | ||
381 | advance = 0; | ||
382 | printk(KERN_ERR "Couldn't emulate instruction 0x%08x " | ||
383 | "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); | ||
384 | } | ||
385 | } | ||
386 | |||
387 | KVMTRACE_3D(PPC_INSTR, vcpu, inst, (int)vcpu->arch.pc, emulated, entryexit); | ||
773 | 388 | ||
774 | if (advance) | 389 | if (advance) |
775 | vcpu->arch.pc += 4; /* Advance past emulated instruction. */ | 390 | vcpu->arch.pc += 4; /* Advance past emulated instruction. */ |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 8bef0efcdfe1..2822c8ccfaaf 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -28,9 +28,9 @@ | |||
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | #include <asm/kvm_ppc.h> | 29 | #include <asm/kvm_ppc.h> |
30 | #include <asm/tlbflush.h> | 30 | #include <asm/tlbflush.h> |
31 | #include "timing.h" | ||
31 | #include "../mm/mmu_decl.h" | 32 | #include "../mm/mmu_decl.h" |
32 | 33 | ||
33 | |||
34 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | 34 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) |
35 | { | 35 | { |
36 | return gfn; | 36 | return gfn; |
@@ -99,14 +99,7 @@ void kvm_arch_hardware_unsetup(void) | |||
99 | 99 | ||
100 | void kvm_arch_check_processor_compat(void *rtn) | 100 | void kvm_arch_check_processor_compat(void *rtn) |
101 | { | 101 | { |
102 | int r; | 102 | *(int *)rtn = kvmppc_core_check_processor_compat(); |
103 | |||
104 | if (strcmp(cur_cpu_spec->platform, "ppc440") == 0) | ||
105 | r = 0; | ||
106 | else | ||
107 | r = -ENOTSUPP; | ||
108 | |||
109 | *(int *)rtn = r; | ||
110 | } | 103 | } |
111 | 104 | ||
112 | struct kvm *kvm_arch_create_vm(void) | 105 | struct kvm *kvm_arch_create_vm(void) |
@@ -144,9 +137,6 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
144 | int r; | 137 | int r; |
145 | 138 | ||
146 | switch (ext) { | 139 | switch (ext) { |
147 | case KVM_CAP_USER_MEMORY: | ||
148 | r = 1; | ||
149 | break; | ||
150 | case KVM_CAP_COALESCED_MMIO: | 140 | case KVM_CAP_COALESCED_MMIO: |
151 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 141 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
152 | break; | 142 | break; |
@@ -179,30 +169,15 @@ void kvm_arch_flush_shadow(struct kvm *kvm) | |||
179 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | 169 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
180 | { | 170 | { |
181 | struct kvm_vcpu *vcpu; | 171 | struct kvm_vcpu *vcpu; |
182 | int err; | 172 | vcpu = kvmppc_core_vcpu_create(kvm, id); |
183 | 173 | kvmppc_create_vcpu_debugfs(vcpu, id); | |
184 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); | ||
185 | if (!vcpu) { | ||
186 | err = -ENOMEM; | ||
187 | goto out; | ||
188 | } | ||
189 | |||
190 | err = kvm_vcpu_init(vcpu, kvm, id); | ||
191 | if (err) | ||
192 | goto free_vcpu; | ||
193 | |||
194 | return vcpu; | 174 | return vcpu; |
195 | |||
196 | free_vcpu: | ||
197 | kmem_cache_free(kvm_vcpu_cache, vcpu); | ||
198 | out: | ||
199 | return ERR_PTR(err); | ||
200 | } | 175 | } |
201 | 176 | ||
202 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | 177 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) |
203 | { | 178 | { |
204 | kvm_vcpu_uninit(vcpu); | 179 | kvmppc_remove_vcpu_debugfs(vcpu); |
205 | kmem_cache_free(kvm_vcpu_cache, vcpu); | 180 | kvmppc_core_vcpu_free(vcpu); |
206 | } | 181 | } |
207 | 182 | ||
208 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | 183 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
@@ -212,16 +187,14 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |||
212 | 187 | ||
213 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | 188 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
214 | { | 189 | { |
215 | unsigned int priority = exception_priority[BOOKE_INTERRUPT_DECREMENTER]; | 190 | return kvmppc_core_pending_dec(vcpu); |
216 | |||
217 | return test_bit(priority, &vcpu->arch.pending_exceptions); | ||
218 | } | 191 | } |
219 | 192 | ||
220 | static void kvmppc_decrementer_func(unsigned long data) | 193 | static void kvmppc_decrementer_func(unsigned long data) |
221 | { | 194 | { |
222 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; | 195 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; |
223 | 196 | ||
224 | kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER); | 197 | kvmppc_core_queue_dec(vcpu); |
225 | 198 | ||
226 | if (waitqueue_active(&vcpu->wq)) { | 199 | if (waitqueue_active(&vcpu->wq)) { |
227 | wake_up_interruptible(&vcpu->wq); | 200 | wake_up_interruptible(&vcpu->wq); |
@@ -242,96 +215,25 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | |||
242 | kvmppc_core_destroy_mmu(vcpu); | 215 | kvmppc_core_destroy_mmu(vcpu); |
243 | } | 216 | } |
244 | 217 | ||
245 | /* Note: clearing MSR[DE] just means that the debug interrupt will not be | ||
246 | * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits. | ||
247 | * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt | ||
248 | * will be delivered as an "imprecise debug event" (which is indicated by | ||
249 | * DBSR[IDE]. | ||
250 | */ | ||
251 | static void kvmppc_disable_debug_interrupts(void) | ||
252 | { | ||
253 | mtmsr(mfmsr() & ~MSR_DE); | ||
254 | } | ||
255 | |||
256 | static void kvmppc_restore_host_debug_state(struct kvm_vcpu *vcpu) | ||
257 | { | ||
258 | kvmppc_disable_debug_interrupts(); | ||
259 | |||
260 | mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]); | ||
261 | mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]); | ||
262 | mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]); | ||
263 | mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]); | ||
264 | mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1); | ||
265 | mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2); | ||
266 | mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0); | ||
267 | mtmsr(vcpu->arch.host_msr); | ||
268 | } | ||
269 | |||
270 | static void kvmppc_load_guest_debug_registers(struct kvm_vcpu *vcpu) | ||
271 | { | ||
272 | struct kvm_guest_debug *dbg = &vcpu->guest_debug; | ||
273 | u32 dbcr0 = 0; | ||
274 | |||
275 | vcpu->arch.host_msr = mfmsr(); | ||
276 | kvmppc_disable_debug_interrupts(); | ||
277 | |||
278 | /* Save host debug register state. */ | ||
279 | vcpu->arch.host_iac[0] = mfspr(SPRN_IAC1); | ||
280 | vcpu->arch.host_iac[1] = mfspr(SPRN_IAC2); | ||
281 | vcpu->arch.host_iac[2] = mfspr(SPRN_IAC3); | ||
282 | vcpu->arch.host_iac[3] = mfspr(SPRN_IAC4); | ||
283 | vcpu->arch.host_dbcr0 = mfspr(SPRN_DBCR0); | ||
284 | vcpu->arch.host_dbcr1 = mfspr(SPRN_DBCR1); | ||
285 | vcpu->arch.host_dbcr2 = mfspr(SPRN_DBCR2); | ||
286 | |||
287 | /* set registers up for guest */ | ||
288 | |||
289 | if (dbg->bp[0]) { | ||
290 | mtspr(SPRN_IAC1, dbg->bp[0]); | ||
291 | dbcr0 |= DBCR0_IAC1 | DBCR0_IDM; | ||
292 | } | ||
293 | if (dbg->bp[1]) { | ||
294 | mtspr(SPRN_IAC2, dbg->bp[1]); | ||
295 | dbcr0 |= DBCR0_IAC2 | DBCR0_IDM; | ||
296 | } | ||
297 | if (dbg->bp[2]) { | ||
298 | mtspr(SPRN_IAC3, dbg->bp[2]); | ||
299 | dbcr0 |= DBCR0_IAC3 | DBCR0_IDM; | ||
300 | } | ||
301 | if (dbg->bp[3]) { | ||
302 | mtspr(SPRN_IAC4, dbg->bp[3]); | ||
303 | dbcr0 |= DBCR0_IAC4 | DBCR0_IDM; | ||
304 | } | ||
305 | |||
306 | mtspr(SPRN_DBCR0, dbcr0); | ||
307 | mtspr(SPRN_DBCR1, 0); | ||
308 | mtspr(SPRN_DBCR2, 0); | ||
309 | } | ||
310 | |||
311 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 218 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
312 | { | 219 | { |
313 | int i; | ||
314 | |||
315 | if (vcpu->guest_debug.enabled) | 220 | if (vcpu->guest_debug.enabled) |
316 | kvmppc_load_guest_debug_registers(vcpu); | 221 | kvmppc_core_load_guest_debugstate(vcpu); |
317 | 222 | ||
318 | /* Mark every guest entry in the shadow TLB entry modified, so that they | 223 | kvmppc_core_vcpu_load(vcpu, cpu); |
319 | * will all be reloaded on the next vcpu run (instead of being | ||
320 | * demand-faulted). */ | ||
321 | for (i = 0; i <= tlb_44x_hwater; i++) | ||
322 | kvmppc_tlbe_set_modified(vcpu, i); | ||
323 | } | 224 | } |
324 | 225 | ||
325 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 226 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
326 | { | 227 | { |
327 | if (vcpu->guest_debug.enabled) | 228 | if (vcpu->guest_debug.enabled) |
328 | kvmppc_restore_host_debug_state(vcpu); | 229 | kvmppc_core_load_host_debugstate(vcpu); |
329 | 230 | ||
330 | /* Don't leave guest TLB entries resident when being de-scheduled. */ | 231 | /* Don't leave guest TLB entries resident when being de-scheduled. */ |
331 | /* XXX It would be nice to differentiate between heavyweight exit and | 232 | /* XXX It would be nice to differentiate between heavyweight exit and |
332 | * sched_out here, since we could avoid the TLB flush for heavyweight | 233 | * sched_out here, since we could avoid the TLB flush for heavyweight |
333 | * exits. */ | 234 | * exits. */ |
334 | _tlbil_all(); | 235 | _tlbil_all(); |
236 | kvmppc_core_vcpu_put(vcpu); | ||
335 | } | 237 | } |
336 | 238 | ||
337 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | 239 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, |
@@ -355,14 +257,14 @@ int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | |||
355 | static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, | 257 | static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, |
356 | struct kvm_run *run) | 258 | struct kvm_run *run) |
357 | { | 259 | { |
358 | u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; | 260 | ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; |
359 | *gpr = run->dcr.data; | 261 | *gpr = run->dcr.data; |
360 | } | 262 | } |
361 | 263 | ||
362 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | 264 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, |
363 | struct kvm_run *run) | 265 | struct kvm_run *run) |
364 | { | 266 | { |
365 | u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; | 267 | ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; |
366 | 268 | ||
367 | if (run->mmio.len > sizeof(*gpr)) { | 269 | if (run->mmio.len > sizeof(*gpr)) { |
368 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); | 270 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); |
@@ -460,7 +362,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
460 | vcpu->arch.dcr_needed = 0; | 362 | vcpu->arch.dcr_needed = 0; |
461 | } | 363 | } |
462 | 364 | ||
463 | kvmppc_check_and_deliver_interrupts(vcpu); | 365 | kvmppc_core_deliver_interrupts(vcpu); |
464 | 366 | ||
465 | local_irq_disable(); | 367 | local_irq_disable(); |
466 | kvm_guest_enter(); | 368 | kvm_guest_enter(); |
@@ -478,7 +380,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
478 | 380 | ||
479 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | 381 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) |
480 | { | 382 | { |
481 | kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL); | 383 | kvmppc_core_queue_external(vcpu, irq); |
482 | 384 | ||
483 | if (waitqueue_active(&vcpu->wq)) { | 385 | if (waitqueue_active(&vcpu->wq)) { |
484 | wake_up_interruptible(&vcpu->wq); | 386 | wake_up_interruptible(&vcpu->wq); |
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c new file mode 100644 index 000000000000..47ee603f558e --- /dev/null +++ b/arch/powerpc/kvm/timing.c | |||
@@ -0,0 +1,239 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | ||
18 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | ||
19 | */ | ||
20 | |||
21 | #include <linux/kvm_host.h> | ||
22 | #include <linux/fs.h> | ||
23 | #include <linux/seq_file.h> | ||
24 | #include <linux/debugfs.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | |||
27 | #include <asm/time.h> | ||
28 | #include <asm-generic/div64.h> | ||
29 | |||
30 | #include "timing.h" | ||
31 | |||
32 | void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) | ||
33 | { | ||
34 | int i; | ||
35 | |||
36 | /* pause guest execution to avoid concurrent updates */ | ||
37 | local_irq_disable(); | ||
38 | mutex_lock(&vcpu->mutex); | ||
39 | |||
40 | vcpu->arch.last_exit_type = 0xDEAD; | ||
41 | for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { | ||
42 | vcpu->arch.timing_count_type[i] = 0; | ||
43 | vcpu->arch.timing_max_duration[i] = 0; | ||
44 | vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF; | ||
45 | vcpu->arch.timing_sum_duration[i] = 0; | ||
46 | vcpu->arch.timing_sum_quad_duration[i] = 0; | ||
47 | } | ||
48 | vcpu->arch.timing_last_exit = 0; | ||
49 | vcpu->arch.timing_exit.tv64 = 0; | ||
50 | vcpu->arch.timing_last_enter.tv64 = 0; | ||
51 | |||
52 | mutex_unlock(&vcpu->mutex); | ||
53 | local_irq_enable(); | ||
54 | } | ||
55 | |||
56 | static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type) | ||
57 | { | ||
58 | u64 old; | ||
59 | |||
60 | do_div(duration, tb_ticks_per_usec); | ||
61 | if (unlikely(duration > 0xFFFFFFFF)) { | ||
62 | printk(KERN_ERR"%s - duration too big -> overflow" | ||
63 | " duration %lld type %d exit #%d\n", | ||
64 | __func__, duration, type, | ||
65 | vcpu->arch.timing_count_type[type]); | ||
66 | return; | ||
67 | } | ||
68 | |||
69 | vcpu->arch.timing_count_type[type]++; | ||
70 | |||
71 | /* sum */ | ||
72 | old = vcpu->arch.timing_sum_duration[type]; | ||
73 | vcpu->arch.timing_sum_duration[type] += duration; | ||
74 | if (unlikely(old > vcpu->arch.timing_sum_duration[type])) { | ||
75 | printk(KERN_ERR"%s - wrap adding sum of durations" | ||
76 | " old %lld new %lld type %d exit # of type %d\n", | ||
77 | __func__, old, vcpu->arch.timing_sum_duration[type], | ||
78 | type, vcpu->arch.timing_count_type[type]); | ||
79 | } | ||
80 | |||
81 | /* square sum */ | ||
82 | old = vcpu->arch.timing_sum_quad_duration[type]; | ||
83 | vcpu->arch.timing_sum_quad_duration[type] += (duration*duration); | ||
84 | if (unlikely(old > vcpu->arch.timing_sum_quad_duration[type])) { | ||
85 | printk(KERN_ERR"%s - wrap adding sum of squared durations" | ||
86 | " old %lld new %lld type %d exit # of type %d\n", | ||
87 | __func__, old, | ||
88 | vcpu->arch.timing_sum_quad_duration[type], | ||
89 | type, vcpu->arch.timing_count_type[type]); | ||
90 | } | ||
91 | |||
92 | /* set min/max */ | ||
93 | if (unlikely(duration < vcpu->arch.timing_min_duration[type])) | ||
94 | vcpu->arch.timing_min_duration[type] = duration; | ||
95 | if (unlikely(duration > vcpu->arch.timing_max_duration[type])) | ||
96 | vcpu->arch.timing_max_duration[type] = duration; | ||
97 | } | ||
98 | |||
99 | void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) | ||
100 | { | ||
101 | u64 exit = vcpu->arch.timing_last_exit; | ||
102 | u64 enter = vcpu->arch.timing_last_enter.tv64; | ||
103 | |||
104 | /* save exit time, used next exit when the reenter time is known */ | ||
105 | vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64; | ||
106 | |||
107 | if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0)) | ||
108 | return; /* skip incomplete cycle (e.g. after reset) */ | ||
109 | |||
110 | /* update statistics for average and standard deviation */ | ||
111 | add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type); | ||
112 | /* enter -> timing_last_exit is time spent in guest - log this too */ | ||
113 | add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter), | ||
114 | TIMEINGUEST); | ||
115 | } | ||
116 | |||
117 | static const char *kvm_exit_names[__NUMBER_OF_KVM_EXIT_TYPES] = { | ||
118 | [MMIO_EXITS] = "MMIO", | ||
119 | [DCR_EXITS] = "DCR", | ||
120 | [SIGNAL_EXITS] = "SIGNAL", | ||
121 | [ITLB_REAL_MISS_EXITS] = "ITLBREAL", | ||
122 | [ITLB_VIRT_MISS_EXITS] = "ITLBVIRT", | ||
123 | [DTLB_REAL_MISS_EXITS] = "DTLBREAL", | ||
124 | [DTLB_VIRT_MISS_EXITS] = "DTLBVIRT", | ||
125 | [SYSCALL_EXITS] = "SYSCALL", | ||
126 | [ISI_EXITS] = "ISI", | ||
127 | [DSI_EXITS] = "DSI", | ||
128 | [EMULATED_INST_EXITS] = "EMULINST", | ||
129 | [EMULATED_MTMSRWE_EXITS] = "EMUL_WAIT", | ||
130 | [EMULATED_WRTEE_EXITS] = "EMUL_WRTEE", | ||
131 | [EMULATED_MTSPR_EXITS] = "EMUL_MTSPR", | ||
132 | [EMULATED_MFSPR_EXITS] = "EMUL_MFSPR", | ||
133 | [EMULATED_MTMSR_EXITS] = "EMUL_MTMSR", | ||
134 | [EMULATED_MFMSR_EXITS] = "EMUL_MFMSR", | ||
135 | [EMULATED_TLBSX_EXITS] = "EMUL_TLBSX", | ||
136 | [EMULATED_TLBWE_EXITS] = "EMUL_TLBWE", | ||
137 | [EMULATED_RFI_EXITS] = "EMUL_RFI", | ||
138 | [DEC_EXITS] = "DEC", | ||
139 | [EXT_INTR_EXITS] = "EXTINT", | ||
140 | [HALT_WAKEUP] = "HALT", | ||
141 | [USR_PR_INST] = "USR_PR_INST", | ||
142 | [FP_UNAVAIL] = "FP_UNAVAIL", | ||
143 | [DEBUG_EXITS] = "DEBUG", | ||
144 | [TIMEINGUEST] = "TIMEINGUEST" | ||
145 | }; | ||
146 | |||
147 | static int kvmppc_exit_timing_show(struct seq_file *m, void *private) | ||
148 | { | ||
149 | struct kvm_vcpu *vcpu = m->private; | ||
150 | int i; | ||
151 | |||
152 | seq_printf(m, "%s", "type count min max sum sum_squared\n"); | ||
153 | |||
154 | for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { | ||
155 | seq_printf(m, "%12s %10d %10lld %10lld %20lld %20lld\n", | ||
156 | kvm_exit_names[i], | ||
157 | vcpu->arch.timing_count_type[i], | ||
158 | vcpu->arch.timing_min_duration[i], | ||
159 | vcpu->arch.timing_max_duration[i], | ||
160 | vcpu->arch.timing_sum_duration[i], | ||
161 | vcpu->arch.timing_sum_quad_duration[i]); | ||
162 | } | ||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | /* Write 'c' to clear the timing statistics. */ | ||
167 | static ssize_t kvmppc_exit_timing_write(struct file *file, | ||
168 | const char __user *user_buf, | ||
169 | size_t count, loff_t *ppos) | ||
170 | { | ||
171 | int err = -EINVAL; | ||
172 | char c; | ||
173 | |||
174 | if (count > 1) { | ||
175 | goto done; | ||
176 | } | ||
177 | |||
178 | if (get_user(c, user_buf)) { | ||
179 | err = -EFAULT; | ||
180 | goto done; | ||
181 | } | ||
182 | |||
183 | if (c == 'c') { | ||
184 | struct seq_file *seqf = (struct seq_file *)file->private_data; | ||
185 | struct kvm_vcpu *vcpu = seqf->private; | ||
186 | /* Write does not affect our buffers previously generated with | ||
187 | * show. seq_file is locked here to prevent races of init with | ||
188 | * a show call */ | ||
189 | mutex_lock(&seqf->lock); | ||
190 | kvmppc_init_timing_stats(vcpu); | ||
191 | mutex_unlock(&seqf->lock); | ||
192 | err = count; | ||
193 | } | ||
194 | |||
195 | done: | ||
196 | return err; | ||
197 | } | ||
198 | |||
199 | static int kvmppc_exit_timing_open(struct inode *inode, struct file *file) | ||
200 | { | ||
201 | return single_open(file, kvmppc_exit_timing_show, inode->i_private); | ||
202 | } | ||
203 | |||
204 | static struct file_operations kvmppc_exit_timing_fops = { | ||
205 | .owner = THIS_MODULE, | ||
206 | .open = kvmppc_exit_timing_open, | ||
207 | .read = seq_read, | ||
208 | .write = kvmppc_exit_timing_write, | ||
209 | .llseek = seq_lseek, | ||
210 | .release = single_release, | ||
211 | }; | ||
212 | |||
213 | void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id) | ||
214 | { | ||
215 | static char dbg_fname[50]; | ||
216 | struct dentry *debugfs_file; | ||
217 | |||
218 | snprintf(dbg_fname, sizeof(dbg_fname), "vm%u_vcpu%u_timing", | ||
219 | current->pid, id); | ||
220 | debugfs_file = debugfs_create_file(dbg_fname, 0666, | ||
221 | kvm_debugfs_dir, vcpu, | ||
222 | &kvmppc_exit_timing_fops); | ||
223 | |||
224 | if (!debugfs_file) { | ||
225 | printk(KERN_ERR"%s: error creating debugfs file %s\n", | ||
226 | __func__, dbg_fname); | ||
227 | return; | ||
228 | } | ||
229 | |||
230 | vcpu->arch.debugfs_exit_timing = debugfs_file; | ||
231 | } | ||
232 | |||
233 | void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu) | ||
234 | { | ||
235 | if (vcpu->arch.debugfs_exit_timing) { | ||
236 | debugfs_remove(vcpu->arch.debugfs_exit_timing); | ||
237 | vcpu->arch.debugfs_exit_timing = NULL; | ||
238 | } | ||
239 | } | ||
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h new file mode 100644 index 000000000000..bb13b1f3cd5a --- /dev/null +++ b/arch/powerpc/kvm/timing.h | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright IBM Corp. 2008 | ||
16 | * | ||
17 | * Authors: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | ||
18 | */ | ||
19 | |||
20 | #ifndef __POWERPC_KVM_EXITTIMING_H__ | ||
21 | #define __POWERPC_KVM_EXITTIMING_H__ | ||
22 | |||
23 | #include <linux/kvm_host.h> | ||
24 | #include <asm/kvm_host.h> | ||
25 | |||
26 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
27 | void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu); | ||
28 | void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu); | ||
29 | void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id); | ||
30 | void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu); | ||
31 | |||
32 | static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) | ||
33 | { | ||
34 | vcpu->arch.last_exit_type = type; | ||
35 | } | ||
36 | |||
37 | #else | ||
38 | /* if exit timing is not configured there is no need to build the c file */ | ||
39 | static inline void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) {} | ||
40 | static inline void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) {} | ||
41 | static inline void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, | ||
42 | unsigned int id) {} | ||
43 | static inline void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu) {} | ||
44 | static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) {} | ||
45 | #endif /* CONFIG_KVM_EXIT_TIMING */ | ||
46 | |||
47 | /* account the exit in kvm_stats */ | ||
48 | static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type) | ||
49 | { | ||
50 | /* type has to be known at build time for optimization */ | ||
51 | BUILD_BUG_ON(__builtin_constant_p(type)); | ||
52 | switch (type) { | ||
53 | case EXT_INTR_EXITS: | ||
54 | vcpu->stat.ext_intr_exits++; | ||
55 | break; | ||
56 | case DEC_EXITS: | ||
57 | vcpu->stat.dec_exits++; | ||
58 | break; | ||
59 | case EMULATED_INST_EXITS: | ||
60 | vcpu->stat.emulated_inst_exits++; | ||
61 | break; | ||
62 | case DCR_EXITS: | ||
63 | vcpu->stat.dcr_exits++; | ||
64 | break; | ||
65 | case DSI_EXITS: | ||
66 | vcpu->stat.dsi_exits++; | ||
67 | break; | ||
68 | case ISI_EXITS: | ||
69 | vcpu->stat.isi_exits++; | ||
70 | break; | ||
71 | case SYSCALL_EXITS: | ||
72 | vcpu->stat.syscall_exits++; | ||
73 | break; | ||
74 | case DTLB_REAL_MISS_EXITS: | ||
75 | vcpu->stat.dtlb_real_miss_exits++; | ||
76 | break; | ||
77 | case DTLB_VIRT_MISS_EXITS: | ||
78 | vcpu->stat.dtlb_virt_miss_exits++; | ||
79 | break; | ||
80 | case MMIO_EXITS: | ||
81 | vcpu->stat.mmio_exits++; | ||
82 | break; | ||
83 | case ITLB_REAL_MISS_EXITS: | ||
84 | vcpu->stat.itlb_real_miss_exits++; | ||
85 | break; | ||
86 | case ITLB_VIRT_MISS_EXITS: | ||
87 | vcpu->stat.itlb_virt_miss_exits++; | ||
88 | break; | ||
89 | case SIGNAL_EXITS: | ||
90 | vcpu->stat.signal_exits++; | ||
91 | break; | ||
92 | } | ||
93 | } | ||
94 | |||
95 | /* wrapper to set exit time and account for it in kvm_stats */ | ||
96 | static inline void kvmppc_account_exit(struct kvm_vcpu *vcpu, int type) | ||
97 | { | ||
98 | kvmppc_set_exit_type(vcpu, type); | ||
99 | kvmppc_account_exit_stat(vcpu, type); | ||
100 | } | ||
101 | |||
102 | #endif /* __POWERPC_KVM_EXITTIMING_H__ */ | ||
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 201c7a5486cb..9920d6a7cf29 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -512,6 +512,13 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
512 | return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); | 512 | return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0); |
513 | } | 513 | } |
514 | 514 | ||
515 | unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) | ||
516 | { | ||
517 | unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start); | ||
518 | |||
519 | return 1UL << mmu_psize_to_shift(psize); | ||
520 | } | ||
521 | |||
515 | /* | 522 | /* |
516 | * Called by asm hashtable.S for doing lazy icache flush | 523 | * Called by asm hashtable.S for doing lazy icache flush |
517 | */ | 524 | */ |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 53b06ebb3f2f..f00f09a77f12 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -132,7 +132,7 @@ int arch_add_memory(int nid, u64 start, u64 size) | |||
132 | /* this should work for most non-highmem platforms */ | 132 | /* this should work for most non-highmem platforms */ |
133 | zone = pgdata->node_zones; | 133 | zone = pgdata->node_zones; |
134 | 134 | ||
135 | return __add_pages(zone, start_pfn, nr_pages); | 135 | return __add_pages(nid, zone, start_pfn, nr_pages); |
136 | } | 136 | } |
137 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 137 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
138 | 138 | ||
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c index dd499c3e9da7..83faa958b9d4 100644 --- a/arch/powerpc/oprofile/cell/spu_profiler.c +++ b/arch/powerpc/oprofile/cell/spu_profiler.c | |||
@@ -49,7 +49,7 @@ void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_rese | |||
49 | * of precision. This is close enough for the purpose at hand. | 49 | * of precision. This is close enough for the purpose at hand. |
50 | * | 50 | * |
51 | * The value of the timeout should be small enough that the hw | 51 | * The value of the timeout should be small enough that the hw |
52 | * trace buffer will not get more then about 1/3 full for the | 52 | * trace buffer will not get more than about 1/3 full for the |
53 | * maximum user specified (the LFSR value) hw sampling frequency. | 53 | * maximum user specified (the LFSR value) hw sampling frequency. |
54 | * This is to ensure the trace buffer will never fill even if the | 54 | * This is to ensure the trace buffer will never fill even if the |
55 | * kernel thread scheduling varies under a heavy system load. | 55 | * kernel thread scheduling varies under a heavy system load. |
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c index 906a0a2a9fe1..1410443731eb 100644 --- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c +++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c | |||
@@ -80,10 +80,10 @@ static void cpu_affinity_set(struct spu *spu, int cpu) | |||
80 | u64 route; | 80 | u64 route; |
81 | 81 | ||
82 | if (nr_cpus_node(spu->node)) { | 82 | if (nr_cpus_node(spu->node)) { |
83 | cpumask_t spumask = node_to_cpumask(spu->node); | 83 | const struct cpumask *spumask = cpumask_of_node(spu->node), |
84 | cpumask_t cpumask = node_to_cpumask(cpu_to_node(cpu)); | 84 | *cpumask = cpumask_of_node(cpu_to_node(cpu)); |
85 | 85 | ||
86 | if (!cpus_intersects(spumask, cpumask)) | 86 | if (!cpumask_intersects(spumask, cpumask)) |
87 | return; | 87 | return; |
88 | } | 88 | } |
89 | 89 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 6296bfd9cb0b..e309ef70a531 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c | |||
@@ -97,7 +97,6 @@ spufs_new_inode(struct super_block *sb, int mode) | |||
97 | inode->i_mode = mode; | 97 | inode->i_mode = mode; |
98 | inode->i_uid = current_fsuid(); | 98 | inode->i_uid = current_fsuid(); |
99 | inode->i_gid = current_fsgid(); | 99 | inode->i_gid = current_fsgid(); |
100 | inode->i_blocks = 0; | ||
101 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 100 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
102 | out: | 101 | out: |
103 | return inode; | 102 | return inode; |
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 2ad914c47493..6a0ad196aeb3 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -166,9 +166,9 @@ void spu_update_sched_info(struct spu_context *ctx) | |||
166 | static int __node_allowed(struct spu_context *ctx, int node) | 166 | static int __node_allowed(struct spu_context *ctx, int node) |
167 | { | 167 | { |
168 | if (nr_cpus_node(node)) { | 168 | if (nr_cpus_node(node)) { |
169 | cpumask_t mask = node_to_cpumask(node); | 169 | const struct cpumask *mask = cpumask_of_node(node); |
170 | 170 | ||
171 | if (cpus_intersects(mask, ctx->cpus_allowed)) | 171 | if (cpumask_intersects(mask, &ctx->cpus_allowed)) |
172 | return 1; | 172 | return 1; |
173 | } | 173 | } |
174 | 174 | ||
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index f7a69021b7bf..84e058f1e1cc 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -332,7 +332,7 @@ static void xics_eoi_lpar(unsigned int virq) | |||
332 | lpar_xirr_info_set((0xff << 24) | irq); | 332 | lpar_xirr_info_set((0xff << 24) | irq); |
333 | } | 333 | } |
334 | 334 | ||
335 | static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) | 335 | static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) |
336 | { | 336 | { |
337 | unsigned int irq; | 337 | unsigned int irq; |
338 | int status; | 338 | int status; |
@@ -870,7 +870,7 @@ void xics_migrate_irqs_away(void) | |||
870 | 870 | ||
871 | /* Reset affinity to all cpus */ | 871 | /* Reset affinity to all cpus */ |
872 | irq_desc[virq].affinity = CPU_MASK_ALL; | 872 | irq_desc[virq].affinity = CPU_MASK_ALL; |
873 | desc->chip->set_affinity(virq, CPU_MASK_ALL); | 873 | desc->chip->set_affinity(virq, cpu_all_mask); |
874 | unlock: | 874 | unlock: |
875 | spin_unlock_irqrestore(&desc->lock, flags); | 875 | spin_unlock_irqrestore(&desc->lock, flags); |
876 | } | 876 | } |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index c82babb70074..3e0d89dcdba2 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -806,7 +806,7 @@ static void mpic_end_ipi(unsigned int irq) | |||
806 | 806 | ||
807 | #endif /* CONFIG_SMP */ | 807 | #endif /* CONFIG_SMP */ |
808 | 808 | ||
809 | void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) | 809 | void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask) |
810 | { | 810 | { |
811 | struct mpic *mpic = mpic_from_irq(irq); | 811 | struct mpic *mpic = mpic_from_irq(irq); |
812 | unsigned int src = mpic_irq_to_hw(irq); | 812 | unsigned int src = mpic_irq_to_hw(irq); |
@@ -818,7 +818,7 @@ void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
818 | } else { | 818 | } else { |
819 | cpumask_t tmp; | 819 | cpumask_t tmp; |
820 | 820 | ||
821 | cpus_and(tmp, cpumask, cpu_online_map); | 821 | cpumask_and(&tmp, cpumask, cpu_online_mask); |
822 | 822 | ||
823 | mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), | 823 | mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), |
824 | mpic_physmask(cpus_addr(tmp)[0])); | 824 | mpic_physmask(cpus_addr(tmp)[0])); |
diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h index 6209c62a426d..3cef2af10f42 100644 --- a/arch/powerpc/sysdev/mpic.h +++ b/arch/powerpc/sysdev/mpic.h | |||
@@ -36,6 +36,6 @@ static inline int mpic_pasemi_msi_init(struct mpic *mpic) | |||
36 | 36 | ||
37 | extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type); | 37 | extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type); |
38 | extern void mpic_set_vector(unsigned int virq, unsigned int vector); | 38 | extern void mpic_set_vector(unsigned int virq, unsigned int vector); |
39 | extern void mpic_set_affinity(unsigned int irq, cpumask_t cpumask); | 39 | extern void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask); |
40 | 40 | ||
41 | #endif /* _POWERPC_SYSDEV_MPIC_H */ | 41 | #endif /* _POWERPC_SYSDEV_MPIC_H */ |