diff options
author | Alexander Graf <agraf@suse.de> | 2010-07-29 09:04:17 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 04:50:58 -0400 |
commit | 2e0908afaf03675d22e40ce45a66b8d2070214ac (patch) | |
tree | ce0d8f7cd57b50750b14740d2443fd6476db388d /arch/powerpc | |
parent | 5302104235f0e9f05781b92a4ab25d20e4537f56 (diff) |
KVM: PPC: RCU'ify the Book3s MMU
So far we've been running all code without locking of any sort. This wasn't
really an issue because I didn't see any parallel access to the shadow MMU
code coming.
But then I started to implement dirty bitmapping to MOL which has the video
code in its own thread, so suddenly we had the dirty bitmap code run in
parallel to the shadow mmu code. And with that came trouble.
So I went ahead and made the MMU modifying functions as parallelizable as
I could think of. I hope I didn't screw up too much RCU logic :-). If you
know your way around RCU and locking and what needs to be done when, please
take a look at this patch.
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_mmu_hpte.c | 78 |
2 files changed, 61 insertions, 19 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index e1da77579e65..fafc71aa3343 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -165,6 +165,7 @@ struct hpte_cache { | |||
165 | struct hlist_node list_pte; | 165 | struct hlist_node list_pte; |
166 | struct hlist_node list_vpte; | 166 | struct hlist_node list_vpte; |
167 | struct hlist_node list_vpte_long; | 167 | struct hlist_node list_vpte_long; |
168 | struct rcu_head rcu_head; | ||
168 | u64 host_va; | 169 | u64 host_va; |
169 | u64 pfn; | 170 | u64 pfn; |
170 | ulong slot; | 171 | ulong slot; |
@@ -295,6 +296,7 @@ struct kvm_vcpu_arch { | |||
295 | struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; | 296 | struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; |
296 | struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; | 297 | struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; |
297 | int hpte_cache_count; | 298 | int hpte_cache_count; |
299 | spinlock_t mmu_lock; | ||
298 | #endif | 300 | #endif |
299 | }; | 301 | }; |
300 | 302 | ||
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c index 4868d4a7ebc5..b64389362446 100644 --- a/arch/powerpc/kvm/book3s_mmu_hpte.c +++ b/arch/powerpc/kvm/book3s_mmu_hpte.c | |||
@@ -60,68 +60,94 @@ void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | |||
60 | { | 60 | { |
61 | u64 index; | 61 | u64 index; |
62 | 62 | ||
63 | spin_lock(&vcpu->arch.mmu_lock); | ||
64 | |||
63 | /* Add to ePTE list */ | 65 | /* Add to ePTE list */ |
64 | index = kvmppc_mmu_hash_pte(pte->pte.eaddr); | 66 | index = kvmppc_mmu_hash_pte(pte->pte.eaddr); |
65 | hlist_add_head(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]); | 67 | hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]); |
66 | 68 | ||
67 | /* Add to vPTE list */ | 69 | /* Add to vPTE list */ |
68 | index = kvmppc_mmu_hash_vpte(pte->pte.vpage); | 70 | index = kvmppc_mmu_hash_vpte(pte->pte.vpage); |
69 | hlist_add_head(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]); | 71 | hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]); |
70 | 72 | ||
71 | /* Add to vPTE_long list */ | 73 | /* Add to vPTE_long list */ |
72 | index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); | 74 | index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); |
73 | hlist_add_head(&pte->list_vpte_long, | 75 | hlist_add_head_rcu(&pte->list_vpte_long, |
74 | &vcpu->arch.hpte_hash_vpte_long[index]); | 76 | &vcpu->arch.hpte_hash_vpte_long[index]); |
77 | |||
78 | spin_unlock(&vcpu->arch.mmu_lock); | ||
79 | } | ||
80 | |||
81 | static void free_pte_rcu(struct rcu_head *head) | ||
82 | { | ||
83 | struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head); | ||
84 | kmem_cache_free(hpte_cache, pte); | ||
75 | } | 85 | } |
76 | 86 | ||
77 | static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | 87 | static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) |
78 | { | 88 | { |
89 | /* pte already invalidated? */ | ||
90 | if (hlist_unhashed(&pte->list_pte)) | ||
91 | return; | ||
92 | |||
79 | dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n", | 93 | dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n", |
80 | pte->pte.eaddr, pte->pte.vpage, pte->host_va); | 94 | pte->pte.eaddr, pte->pte.vpage, pte->host_va); |
81 | 95 | ||
82 | /* Different for 32 and 64 bit */ | 96 | /* Different for 32 and 64 bit */ |
83 | kvmppc_mmu_invalidate_pte(vcpu, pte); | 97 | kvmppc_mmu_invalidate_pte(vcpu, pte); |
84 | 98 | ||
99 | spin_lock(&vcpu->arch.mmu_lock); | ||
100 | |||
101 | hlist_del_init_rcu(&pte->list_pte); | ||
102 | hlist_del_init_rcu(&pte->list_vpte); | ||
103 | hlist_del_init_rcu(&pte->list_vpte_long); | ||
104 | |||
105 | spin_unlock(&vcpu->arch.mmu_lock); | ||
106 | |||
85 | if (pte->pte.may_write) | 107 | if (pte->pte.may_write) |
86 | kvm_release_pfn_dirty(pte->pfn); | 108 | kvm_release_pfn_dirty(pte->pfn); |
87 | else | 109 | else |
88 | kvm_release_pfn_clean(pte->pfn); | 110 | kvm_release_pfn_clean(pte->pfn); |
89 | 111 | ||
90 | hlist_del(&pte->list_pte); | ||
91 | hlist_del(&pte->list_vpte); | ||
92 | hlist_del(&pte->list_vpte_long); | ||
93 | |||
94 | vcpu->arch.hpte_cache_count--; | 112 | vcpu->arch.hpte_cache_count--; |
95 | kmem_cache_free(hpte_cache, pte); | 113 | call_rcu(&pte->rcu_head, free_pte_rcu); |
96 | } | 114 | } |
97 | 115 | ||
98 | static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) | 116 | static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) |
99 | { | 117 | { |
100 | struct hpte_cache *pte; | 118 | struct hpte_cache *pte; |
101 | struct hlist_node *node, *tmp; | 119 | struct hlist_node *node; |
102 | int i; | 120 | int i; |
103 | 121 | ||
122 | rcu_read_lock(); | ||
123 | |||
104 | for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { | 124 | for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { |
105 | struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; | 125 | struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; |
106 | 126 | ||
107 | hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) | 127 | hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) |
108 | invalidate_pte(vcpu, pte); | 128 | invalidate_pte(vcpu, pte); |
109 | } | 129 | } |
130 | |||
131 | rcu_read_unlock(); | ||
110 | } | 132 | } |
111 | 133 | ||
112 | static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) | 134 | static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) |
113 | { | 135 | { |
114 | struct hlist_head *list; | 136 | struct hlist_head *list; |
115 | struct hlist_node *node, *tmp; | 137 | struct hlist_node *node; |
116 | struct hpte_cache *pte; | 138 | struct hpte_cache *pte; |
117 | 139 | ||
118 | /* Find the list of entries in the map */ | 140 | /* Find the list of entries in the map */ |
119 | list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; | 141 | list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; |
120 | 142 | ||
143 | rcu_read_lock(); | ||
144 | |||
121 | /* Check the list for matching entries and invalidate */ | 145 | /* Check the list for matching entries and invalidate */ |
122 | hlist_for_each_entry_safe(pte, node, tmp, list, list_pte) | 146 | hlist_for_each_entry_rcu(pte, node, list, list_pte) |
123 | if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) | 147 | if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) |
124 | invalidate_pte(vcpu, pte); | 148 | invalidate_pte(vcpu, pte); |
149 | |||
150 | rcu_read_unlock(); | ||
125 | } | 151 | } |
126 | 152 | ||
127 | void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) | 153 | void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) |
@@ -156,33 +182,41 @@ void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) | |||
156 | static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) | 182 | static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) |
157 | { | 183 | { |
158 | struct hlist_head *list; | 184 | struct hlist_head *list; |
159 | struct hlist_node *node, *tmp; | 185 | struct hlist_node *node; |
160 | struct hpte_cache *pte; | 186 | struct hpte_cache *pte; |
161 | u64 vp_mask = 0xfffffffffULL; | 187 | u64 vp_mask = 0xfffffffffULL; |
162 | 188 | ||
163 | list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; | 189 | list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; |
164 | 190 | ||
191 | rcu_read_lock(); | ||
192 | |||
165 | /* Check the list for matching entries and invalidate */ | 193 | /* Check the list for matching entries and invalidate */ |
166 | hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte) | 194 | hlist_for_each_entry_rcu(pte, node, list, list_vpte) |
167 | if ((pte->pte.vpage & vp_mask) == guest_vp) | 195 | if ((pte->pte.vpage & vp_mask) == guest_vp) |
168 | invalidate_pte(vcpu, pte); | 196 | invalidate_pte(vcpu, pte); |
197 | |||
198 | rcu_read_unlock(); | ||
169 | } | 199 | } |
170 | 200 | ||
171 | /* Flush with mask 0xffffff000 */ | 201 | /* Flush with mask 0xffffff000 */ |
172 | static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) | 202 | static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) |
173 | { | 203 | { |
174 | struct hlist_head *list; | 204 | struct hlist_head *list; |
175 | struct hlist_node *node, *tmp; | 205 | struct hlist_node *node; |
176 | struct hpte_cache *pte; | 206 | struct hpte_cache *pte; |
177 | u64 vp_mask = 0xffffff000ULL; | 207 | u64 vp_mask = 0xffffff000ULL; |
178 | 208 | ||
179 | list = &vcpu->arch.hpte_hash_vpte_long[ | 209 | list = &vcpu->arch.hpte_hash_vpte_long[ |
180 | kvmppc_mmu_hash_vpte_long(guest_vp)]; | 210 | kvmppc_mmu_hash_vpte_long(guest_vp)]; |
181 | 211 | ||
212 | rcu_read_lock(); | ||
213 | |||
182 | /* Check the list for matching entries and invalidate */ | 214 | /* Check the list for matching entries and invalidate */ |
183 | hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) | 215 | hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) |
184 | if ((pte->pte.vpage & vp_mask) == guest_vp) | 216 | if ((pte->pte.vpage & vp_mask) == guest_vp) |
185 | invalidate_pte(vcpu, pte); | 217 | invalidate_pte(vcpu, pte); |
218 | |||
219 | rcu_read_unlock(); | ||
186 | } | 220 | } |
187 | 221 | ||
188 | void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) | 222 | void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) |
@@ -206,21 +240,25 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) | |||
206 | 240 | ||
207 | void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) | 241 | void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) |
208 | { | 242 | { |
209 | struct hlist_node *node, *tmp; | 243 | struct hlist_node *node; |
210 | struct hpte_cache *pte; | 244 | struct hpte_cache *pte; |
211 | int i; | 245 | int i; |
212 | 246 | ||
213 | dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx - 0x%lx\n", | 247 | dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx - 0x%lx\n", |
214 | vcpu->arch.hpte_cache_count, pa_start, pa_end); | 248 | vcpu->arch.hpte_cache_count, pa_start, pa_end); |
215 | 249 | ||
250 | rcu_read_lock(); | ||
251 | |||
216 | for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { | 252 | for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { |
217 | struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; | 253 | struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; |
218 | 254 | ||
219 | hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) | 255 | hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) |
220 | if ((pte->pte.raddr >= pa_start) && | 256 | if ((pte->pte.raddr >= pa_start) && |
221 | (pte->pte.raddr < pa_end)) | 257 | (pte->pte.raddr < pa_end)) |
222 | invalidate_pte(vcpu, pte); | 258 | invalidate_pte(vcpu, pte); |
223 | } | 259 | } |
260 | |||
261 | rcu_read_unlock(); | ||
224 | } | 262 | } |
225 | 263 | ||
226 | struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) | 264 | struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) |
@@ -259,6 +297,8 @@ int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu) | |||
259 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long, | 297 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long, |
260 | ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long)); | 298 | ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long)); |
261 | 299 | ||
300 | spin_lock_init(&vcpu->arch.mmu_lock); | ||
301 | |||
262 | return 0; | 302 | return 0; |
263 | } | 303 | } |
264 | 304 | ||