diff options
author | Alexander Graf <agraf@suse.de> | 2010-07-29 09:04:19 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 04:50:58 -0400 |
commit | 2d27fc5eac0205588cb59ae138062e5e96695276 (patch) | |
tree | e621ed3ade57faca4650d6bd46945c578362b2bb /arch/powerpc | |
parent | 49451389ecc2b4336c305678c210b25fadd18994 (diff) |
KVM: PPC: Add book3s_32 tlbie flush acceleration
On Book3s_32 the tlbie instruction flushed effective addresses by the mask
0x0ffff000. This is pretty hard to reflect with a hash that hashes ~0xfff, so
to speed up that target we should also keep a special hash around for it.
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 4 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_mmu_hpte.c | 40 |
2 files changed, 39 insertions, 5 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index fafc71aa3343..bba3b9b72a39 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -42,9 +42,11 @@ | |||
42 | 42 | ||
43 | #define HPTEG_CACHE_NUM (1 << 15) | 43 | #define HPTEG_CACHE_NUM (1 << 15) |
44 | #define HPTEG_HASH_BITS_PTE 13 | 44 | #define HPTEG_HASH_BITS_PTE 13 |
45 | #define HPTEG_HASH_BITS_PTE_LONG 12 | ||
45 | #define HPTEG_HASH_BITS_VPTE 13 | 46 | #define HPTEG_HASH_BITS_VPTE 13 |
46 | #define HPTEG_HASH_BITS_VPTE_LONG 5 | 47 | #define HPTEG_HASH_BITS_VPTE_LONG 5 |
47 | #define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE) | 48 | #define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE) |
49 | #define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG) | ||
48 | #define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE) | 50 | #define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE) |
49 | #define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG) | 51 | #define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG) |
50 | 52 | ||
@@ -163,6 +165,7 @@ struct kvmppc_mmu { | |||
163 | 165 | ||
164 | struct hpte_cache { | 166 | struct hpte_cache { |
165 | struct hlist_node list_pte; | 167 | struct hlist_node list_pte; |
168 | struct hlist_node list_pte_long; | ||
166 | struct hlist_node list_vpte; | 169 | struct hlist_node list_vpte; |
167 | struct hlist_node list_vpte_long; | 170 | struct hlist_node list_vpte_long; |
168 | struct rcu_head rcu_head; | 171 | struct rcu_head rcu_head; |
@@ -293,6 +296,7 @@ struct kvm_vcpu_arch { | |||
293 | 296 | ||
294 | #ifdef CONFIG_PPC_BOOK3S | 297 | #ifdef CONFIG_PPC_BOOK3S |
295 | struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; | 298 | struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; |
299 | struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; | ||
296 | struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; | 300 | struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; |
297 | struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; | 301 | struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; |
298 | int hpte_cache_count; | 302 | int hpte_cache_count; |
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c index b64389362446..02c64ab99c97 100644 --- a/arch/powerpc/kvm/book3s_mmu_hpte.c +++ b/arch/powerpc/kvm/book3s_mmu_hpte.c | |||
@@ -45,6 +45,12 @@ static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) | |||
45 | return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE); | 45 | return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE); |
46 | } | 46 | } |
47 | 47 | ||
48 | static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr) | ||
49 | { | ||
50 | return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE, | ||
51 | HPTEG_HASH_BITS_PTE_LONG); | ||
52 | } | ||
53 | |||
48 | static inline u64 kvmppc_mmu_hash_vpte(u64 vpage) | 54 | static inline u64 kvmppc_mmu_hash_vpte(u64 vpage) |
49 | { | 55 | { |
50 | return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE); | 56 | return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE); |
@@ -66,6 +72,11 @@ void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | |||
66 | index = kvmppc_mmu_hash_pte(pte->pte.eaddr); | 72 | index = kvmppc_mmu_hash_pte(pte->pte.eaddr); |
67 | hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]); | 73 | hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]); |
68 | 74 | ||
75 | /* Add to ePTE_long list */ | ||
76 | index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr); | ||
77 | hlist_add_head_rcu(&pte->list_pte_long, | ||
78 | &vcpu->arch.hpte_hash_pte_long[index]); | ||
79 | |||
69 | /* Add to vPTE list */ | 80 | /* Add to vPTE list */ |
70 | index = kvmppc_mmu_hash_vpte(pte->pte.vpage); | 81 | index = kvmppc_mmu_hash_vpte(pte->pte.vpage); |
71 | hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]); | 82 | hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]); |
@@ -99,6 +110,7 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | |||
99 | spin_lock(&vcpu->arch.mmu_lock); | 110 | spin_lock(&vcpu->arch.mmu_lock); |
100 | 111 | ||
101 | hlist_del_init_rcu(&pte->list_pte); | 112 | hlist_del_init_rcu(&pte->list_pte); |
113 | hlist_del_init_rcu(&pte->list_pte_long); | ||
102 | hlist_del_init_rcu(&pte->list_vpte); | 114 | hlist_del_init_rcu(&pte->list_vpte); |
103 | hlist_del_init_rcu(&pte->list_vpte_long); | 115 | hlist_del_init_rcu(&pte->list_vpte_long); |
104 | 116 | ||
@@ -150,10 +162,28 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) | |||
150 | rcu_read_unlock(); | 162 | rcu_read_unlock(); |
151 | } | 163 | } |
152 | 164 | ||
153 | void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) | 165 | static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea) |
154 | { | 166 | { |
155 | u64 i; | 167 | struct hlist_head *list; |
168 | struct hlist_node *node; | ||
169 | struct hpte_cache *pte; | ||
170 | |||
171 | /* Find the list of entries in the map */ | ||
172 | list = &vcpu->arch.hpte_hash_pte_long[ | ||
173 | kvmppc_mmu_hash_pte_long(guest_ea)]; | ||
156 | 174 | ||
175 | rcu_read_lock(); | ||
176 | |||
177 | /* Check the list for matching entries and invalidate */ | ||
178 | hlist_for_each_entry_rcu(pte, node, list, list_pte_long) | ||
179 | if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea) | ||
180 | invalidate_pte(vcpu, pte); | ||
181 | |||
182 | rcu_read_unlock(); | ||
183 | } | ||
184 | |||
185 | void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) | ||
186 | { | ||
157 | dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n", | 187 | dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n", |
158 | vcpu->arch.hpte_cache_count, guest_ea, ea_mask); | 188 | vcpu->arch.hpte_cache_count, guest_ea, ea_mask); |
159 | 189 | ||
@@ -164,9 +194,7 @@ void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) | |||
164 | kvmppc_mmu_pte_flush_page(vcpu, guest_ea); | 194 | kvmppc_mmu_pte_flush_page(vcpu, guest_ea); |
165 | break; | 195 | break; |
166 | case 0x0ffff000: | 196 | case 0x0ffff000: |
167 | /* 32-bit flush w/o segment, go through all possible segments */ | 197 | kvmppc_mmu_pte_flush_long(vcpu, guest_ea); |
168 | for (i = 0; i < 0x100000000ULL; i += 0x10000000ULL) | ||
169 | kvmppc_mmu_pte_flush(vcpu, guest_ea | i, ~0xfffUL); | ||
170 | break; | 198 | break; |
171 | case 0: | 199 | case 0: |
172 | /* Doing a complete flush -> start from scratch */ | 200 | /* Doing a complete flush -> start from scratch */ |
@@ -292,6 +320,8 @@ int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu) | |||
292 | /* init hpte lookup hashes */ | 320 | /* init hpte lookup hashes */ |
293 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte, | 321 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte, |
294 | ARRAY_SIZE(vcpu->arch.hpte_hash_pte)); | 322 | ARRAY_SIZE(vcpu->arch.hpte_hash_pte)); |
323 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte_long, | ||
324 | ARRAY_SIZE(vcpu->arch.hpte_hash_pte_long)); | ||
295 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte, | 325 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte, |
296 | ARRAY_SIZE(vcpu->arch.hpte_hash_vpte)); | 326 | ARRAY_SIZE(vcpu->arch.hpte_hash_vpte)); |
297 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long, | 327 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long, |