diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s_mmu_hpte.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_mmu_hpte.c | 18 |
1 files changed, 6 insertions, 12 deletions
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c index 2c86b0d63714..da8b13c4b776 100644 --- a/arch/powerpc/kvm/book3s_mmu_hpte.c +++ b/arch/powerpc/kvm/book3s_mmu_hpte.c | |||
@@ -124,7 +124,6 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) | |||
124 | { | 124 | { |
125 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 125 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
126 | struct hpte_cache *pte; | 126 | struct hpte_cache *pte; |
127 | struct hlist_node *node; | ||
128 | int i; | 127 | int i; |
129 | 128 | ||
130 | rcu_read_lock(); | 129 | rcu_read_lock(); |
@@ -132,7 +131,7 @@ static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) | |||
132 | for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { | 131 | for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { |
133 | struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; | 132 | struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; |
134 | 133 | ||
135 | hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) | 134 | hlist_for_each_entry_rcu(pte, list, list_vpte_long) |
136 | invalidate_pte(vcpu, pte); | 135 | invalidate_pte(vcpu, pte); |
137 | } | 136 | } |
138 | 137 | ||
@@ -143,7 +142,6 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) | |||
143 | { | 142 | { |
144 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 143 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
145 | struct hlist_head *list; | 144 | struct hlist_head *list; |
146 | struct hlist_node *node; | ||
147 | struct hpte_cache *pte; | 145 | struct hpte_cache *pte; |
148 | 146 | ||
149 | /* Find the list of entries in the map */ | 147 | /* Find the list of entries in the map */ |
@@ -152,7 +150,7 @@ static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) | |||
152 | rcu_read_lock(); | 150 | rcu_read_lock(); |
153 | 151 | ||
154 | /* Check the list for matching entries and invalidate */ | 152 | /* Check the list for matching entries and invalidate */ |
155 | hlist_for_each_entry_rcu(pte, node, list, list_pte) | 153 | hlist_for_each_entry_rcu(pte, list, list_pte) |
156 | if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) | 154 | if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) |
157 | invalidate_pte(vcpu, pte); | 155 | invalidate_pte(vcpu, pte); |
158 | 156 | ||
@@ -163,7 +161,6 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea) | |||
163 | { | 161 | { |
164 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 162 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
165 | struct hlist_head *list; | 163 | struct hlist_head *list; |
166 | struct hlist_node *node; | ||
167 | struct hpte_cache *pte; | 164 | struct hpte_cache *pte; |
168 | 165 | ||
169 | /* Find the list of entries in the map */ | 166 | /* Find the list of entries in the map */ |
@@ -173,7 +170,7 @@ static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea) | |||
173 | rcu_read_lock(); | 170 | rcu_read_lock(); |
174 | 171 | ||
175 | /* Check the list for matching entries and invalidate */ | 172 | /* Check the list for matching entries and invalidate */ |
176 | hlist_for_each_entry_rcu(pte, node, list, list_pte_long) | 173 | hlist_for_each_entry_rcu(pte, list, list_pte_long) |
177 | if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea) | 174 | if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea) |
178 | invalidate_pte(vcpu, pte); | 175 | invalidate_pte(vcpu, pte); |
179 | 176 | ||
@@ -207,7 +204,6 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) | |||
207 | { | 204 | { |
208 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 205 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
209 | struct hlist_head *list; | 206 | struct hlist_head *list; |
210 | struct hlist_node *node; | ||
211 | struct hpte_cache *pte; | 207 | struct hpte_cache *pte; |
212 | u64 vp_mask = 0xfffffffffULL; | 208 | u64 vp_mask = 0xfffffffffULL; |
213 | 209 | ||
@@ -216,7 +212,7 @@ static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) | |||
216 | rcu_read_lock(); | 212 | rcu_read_lock(); |
217 | 213 | ||
218 | /* Check the list for matching entries and invalidate */ | 214 | /* Check the list for matching entries and invalidate */ |
219 | hlist_for_each_entry_rcu(pte, node, list, list_vpte) | 215 | hlist_for_each_entry_rcu(pte, list, list_vpte) |
220 | if ((pte->pte.vpage & vp_mask) == guest_vp) | 216 | if ((pte->pte.vpage & vp_mask) == guest_vp) |
221 | invalidate_pte(vcpu, pte); | 217 | invalidate_pte(vcpu, pte); |
222 | 218 | ||
@@ -228,7 +224,6 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) | |||
228 | { | 224 | { |
229 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 225 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
230 | struct hlist_head *list; | 226 | struct hlist_head *list; |
231 | struct hlist_node *node; | ||
232 | struct hpte_cache *pte; | 227 | struct hpte_cache *pte; |
233 | u64 vp_mask = 0xffffff000ULL; | 228 | u64 vp_mask = 0xffffff000ULL; |
234 | 229 | ||
@@ -238,7 +233,7 @@ static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) | |||
238 | rcu_read_lock(); | 233 | rcu_read_lock(); |
239 | 234 | ||
240 | /* Check the list for matching entries and invalidate */ | 235 | /* Check the list for matching entries and invalidate */ |
241 | hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) | 236 | hlist_for_each_entry_rcu(pte, list, list_vpte_long) |
242 | if ((pte->pte.vpage & vp_mask) == guest_vp) | 237 | if ((pte->pte.vpage & vp_mask) == guest_vp) |
243 | invalidate_pte(vcpu, pte); | 238 | invalidate_pte(vcpu, pte); |
244 | 239 | ||
@@ -266,7 +261,6 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) | |||
266 | void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) | 261 | void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) |
267 | { | 262 | { |
268 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 263 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
269 | struct hlist_node *node; | ||
270 | struct hpte_cache *pte; | 264 | struct hpte_cache *pte; |
271 | int i; | 265 | int i; |
272 | 266 | ||
@@ -277,7 +271,7 @@ void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) | |||
277 | for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { | 271 | for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { |
278 | struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; | 272 | struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i]; |
279 | 273 | ||
280 | hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) | 274 | hlist_for_each_entry_rcu(pte, list, list_vpte_long) |
281 | if ((pte->pte.raddr >= pa_start) && | 275 | if ((pte->pte.raddr >= pa_start) && |
282 | (pte->pte.raddr < pa_end)) | 276 | (pte->pte.raddr < pa_end)) |
283 | invalidate_pte(vcpu, pte); | 277 | invalidate_pte(vcpu, pte); |