aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/44x_tlb.c
diff options
context:
space:
mode:
authorHollis Blanchard <hollisb@us.ibm.com>2008-11-05 10:36:12 -0500
committerAvi Kivity <avi@redhat.com>2008-12-31 09:51:50 -0500
commit0f55dc481ea5c4f87fc0161cb1b8c6e2cafae8fc (patch)
tree18b5bb0ad449607690329c6c23083886cb39a9bc /arch/powerpc/kvm/44x_tlb.c
parenta0d7b9f246074fab1f42678d203ef4ba281505f2 (diff)
KVM: ppc: Rename "struct tlbe" to "struct kvmppc_44x_tlbe"
This will ease ports to other cores. Also remove unused "struct kvm_tlb" while we're at it. Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/44x_tlb.c')
-rw-r--r--arch/powerpc/kvm/44x_tlb.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index dd75ab84e04..5152fe5b2a9 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -86,7 +86,7 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
86 86
87 /* XXX Replace loop with fancy data structures. */ 87 /* XXX Replace loop with fancy data structures. */
88 for (i = 0; i < PPC44x_TLB_SIZE; i++) { 88 for (i = 0; i < PPC44x_TLB_SIZE; i++) {
89 struct tlbe *tlbe = &vcpu->arch.guest_tlb[i]; 89 struct kvmppc_44x_tlbe *tlbe = &vcpu->arch.guest_tlb[i];
90 unsigned int tid; 90 unsigned int tid;
91 91
92 if (eaddr < get_tlb_eaddr(tlbe)) 92 if (eaddr < get_tlb_eaddr(tlbe))
@@ -111,7 +111,8 @@ int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
111 return -1; 111 return -1;
112} 112}
113 113
114struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr) 114struct kvmppc_44x_tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu,
115 gva_t eaddr)
115{ 116{
116 unsigned int as = !!(vcpu->arch.msr & MSR_IS); 117 unsigned int as = !!(vcpu->arch.msr & MSR_IS);
117 unsigned int index; 118 unsigned int index;
@@ -122,7 +123,8 @@ struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
122 return &vcpu->arch.guest_tlb[index]; 123 return &vcpu->arch.guest_tlb[index];
123} 124}
124 125
125struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr) 126struct kvmppc_44x_tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu,
127 gva_t eaddr)
126{ 128{
127 unsigned int as = !!(vcpu->arch.msr & MSR_DS); 129 unsigned int as = !!(vcpu->arch.msr & MSR_DS);
128 unsigned int index; 130 unsigned int index;
@@ -133,7 +135,7 @@ struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
133 return &vcpu->arch.guest_tlb[index]; 135 return &vcpu->arch.guest_tlb[index];
134} 136}
135 137
136static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe) 138static int kvmppc_44x_tlbe_is_writable(struct kvmppc_44x_tlbe *tlbe)
137{ 139{
138 return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW); 140 return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
139} 141}
@@ -141,7 +143,7 @@ static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe)
141static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, 143static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
142 unsigned int index) 144 unsigned int index)
143{ 145{
144 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index]; 146 struct kvmppc_44x_tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
145 struct page *page = vcpu->arch.shadow_pages[index]; 147 struct page *page = vcpu->arch.shadow_pages[index];
146 148
147 if (get_tlb_v(stlbe)) { 149 if (get_tlb_v(stlbe)) {
@@ -171,7 +173,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
171 u32 flags) 173 u32 flags)
172{ 174{
173 struct page *new_page; 175 struct page *new_page;
174 struct tlbe *stlbe; 176 struct kvmppc_44x_tlbe *stlbe;
175 hpa_t hpaddr; 177 hpa_t hpaddr;
176 unsigned int victim; 178 unsigned int victim;
177 179
@@ -227,7 +229,7 @@ static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
227 229
228 /* XXX Replace loop with fancy data structures. */ 230 /* XXX Replace loop with fancy data structures. */
229 for (i = 0; i <= tlb_44x_hwater; i++) { 231 for (i = 0; i <= tlb_44x_hwater; i++) {
230 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; 232 struct kvmppc_44x_tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
231 unsigned int tid; 233 unsigned int tid;
232 234
233 if (!get_tlb_v(stlbe)) 235 if (!get_tlb_v(stlbe))
@@ -262,7 +264,7 @@ void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
262 if (vcpu->arch.swap_pid) { 264 if (vcpu->arch.swap_pid) {
263 /* XXX Replace loop with fancy data structures. */ 265 /* XXX Replace loop with fancy data structures. */
264 for (i = 0; i <= tlb_44x_hwater; i++) { 266 for (i = 0; i <= tlb_44x_hwater; i++) {
265 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; 267 struct kvmppc_44x_tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
266 268
267 /* Future optimization: clear only userspace mappings. */ 269 /* Future optimization: clear only userspace mappings. */
268 kvmppc_44x_shadow_release(vcpu, i); 270 kvmppc_44x_shadow_release(vcpu, i);
@@ -279,7 +281,7 @@ void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
279} 281}
280 282
281static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, 283static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
282 const struct tlbe *tlbe) 284 const struct kvmppc_44x_tlbe *tlbe)
283{ 285{
284 gpa_t gpa; 286 gpa_t gpa;
285 287
@@ -305,7 +307,7 @@ int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
305 u64 raddr; 307 u64 raddr;
306 u64 asid; 308 u64 asid;
307 u32 flags; 309 u32 flags;
308 struct tlbe *tlbe; 310 struct kvmppc_44x_tlbe *tlbe;
309 unsigned int index; 311 unsigned int index;
310 312
311 index = vcpu->arch.gpr[ra]; 313 index = vcpu->arch.gpr[ra];