aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVitaly Kuznetsov <vkuznets@redhat.com>2018-05-16 11:21:30 -0400
committerRadim Krčmář <rkrcmar@redhat.com>2018-05-26 09:35:35 -0400
commitc70126764bf09c5dd95527808b647ec347b8a822 (patch)
tree1f0f13b3f8f72ad2ef9a87fb90803a10d5a1c32c
parente2f11f42824bf2d906468a94888718ae24bf0270 (diff)
KVM: x86: hyperv: simplistic HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE}_EX implementation
Implement HvFlushVirtualAddress{List,Space}Ex hypercalls in the same way we've implemented non-EX counterparts. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> [Initialized valid_bank_mask to silence misguided GCC warnigs. - Radim] Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
-rw-r--r--arch/x86/kvm/hyperv.c110
-rw-r--r--arch/x86/kvm/trace.h27
2 files changed, 125 insertions, 12 deletions
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 0d916606519d..14e0d0ae4e0a 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1242,31 +1242,102 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1242 return kvm_hv_get_msr(vcpu, msr, pdata); 1242 return kvm_hv_get_msr(vcpu, msr, pdata);
1243} 1243}
1244 1244
1245static __always_inline int get_sparse_bank_no(u64 valid_bank_mask, int bank_no)
1246{
1247 int i = 0, j;
1248
1249 if (!(valid_bank_mask & BIT_ULL(bank_no)))
1250 return -1;
1251
1252 for (j = 0; j < bank_no; j++)
1253 if (valid_bank_mask & BIT_ULL(j))
1254 i++;
1255
1256 return i;
1257}
1258
1245static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa, 1259static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
1246 u16 rep_cnt) 1260 u16 rep_cnt, bool ex)
1247{ 1261{
1248 struct kvm *kvm = current_vcpu->kvm; 1262 struct kvm *kvm = current_vcpu->kvm;
1249 struct kvm_vcpu_hv *hv_current = &current_vcpu->arch.hyperv; 1263 struct kvm_vcpu_hv *hv_current = &current_vcpu->arch.hyperv;
1264 struct hv_tlb_flush_ex flush_ex;
1250 struct hv_tlb_flush flush; 1265 struct hv_tlb_flush flush;
1251 struct kvm_vcpu *vcpu; 1266 struct kvm_vcpu *vcpu;
1252 unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)] = {0}; 1267 unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)] = {0};
1253 int i; 1268 unsigned long valid_bank_mask = 0;
1269 u64 sparse_banks[64];
1270 int sparse_banks_len, i;
1271 bool all_cpus;
1254 1272
1255 if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush)))) 1273 if (!ex) {
1256 return HV_STATUS_INVALID_HYPERCALL_INPUT; 1274 if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush))))
1275 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1257 1276
1258 trace_kvm_hv_flush_tlb(flush.processor_mask, flush.address_space, 1277 trace_kvm_hv_flush_tlb(flush.processor_mask,
1259 flush.flags); 1278 flush.address_space, flush.flags);
1279
1280 sparse_banks[0] = flush.processor_mask;
1281 all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS;
1282 } else {
1283 if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
1284 sizeof(flush_ex))))
1285 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1286
1287 trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
1288 flush_ex.hv_vp_set.format,
1289 flush_ex.address_space,
1290 flush_ex.flags);
1291
1292 valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
1293 all_cpus = flush_ex.hv_vp_set.format !=
1294 HV_GENERIC_SET_SPARSE_4K;
1295
1296 sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
1297 sizeof(sparse_banks[0]);
1298
1299 if (!sparse_banks_len && !all_cpus)
1300 goto ret_success;
1301
1302 if (!all_cpus &&
1303 kvm_read_guest(kvm,
1304 ingpa + offsetof(struct hv_tlb_flush_ex,
1305 hv_vp_set.bank_contents),
1306 sparse_banks,
1307 sparse_banks_len))
1308 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1309 }
1260 1310
1261 cpumask_clear(&hv_current->tlb_lush); 1311 cpumask_clear(&hv_current->tlb_lush);
1262 1312
1263 kvm_for_each_vcpu(i, vcpu, kvm) { 1313 kvm_for_each_vcpu(i, vcpu, kvm) {
1264 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; 1314 struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
1315 int bank = hv->vp_index / 64, sbank = 0;
1316
1317 if (!all_cpus) {
1318 /* Banks >64 can't be represented */
1319 if (bank >= 64)
1320 continue;
1321
1322 /* Non-ex hypercalls can only address first 64 vCPUs */
1323 if (!ex && bank)
1324 continue;
1325
1326 if (ex) {
1327 /*
1328 * Check is the bank of this vCPU is in sparse
1329 * set and get the sparse bank number.
1330 */
1331 sbank = get_sparse_bank_no(valid_bank_mask,
1332 bank);
1333
1334 if (sbank < 0)
1335 continue;
1336 }
1265 1337
1266 if (!(flush.flags & HV_FLUSH_ALL_PROCESSORS) && 1338 if (!(sparse_banks[sbank] & BIT_ULL(hv->vp_index % 64)))
1267 (hv->vp_index >= 64 || 1339 continue;
1268 !(flush.processor_mask & BIT_ULL(hv->vp_index)))) 1340 }
1269 continue;
1270 1341
1271 /* 1342 /*
1272 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we 1343 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we
@@ -1280,6 +1351,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
1280 KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP, 1351 KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
1281 vcpu_bitmap, &hv_current->tlb_lush); 1352 vcpu_bitmap, &hv_current->tlb_lush);
1282 1353
1354ret_success:
1283 /* We always do full TLB flush, set rep_done = rep_cnt. */ 1355 /* We always do full TLB flush, set rep_done = rep_cnt. */
1284 return (u64)HV_STATUS_SUCCESS | 1356 return (u64)HV_STATUS_SUCCESS |
1285 ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET); 1357 ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
@@ -1427,14 +1499,28 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
1427 ret = HV_STATUS_INVALID_HYPERCALL_INPUT; 1499 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1428 break; 1500 break;
1429 } 1501 }
1430 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt); 1502 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
1431 break; 1503 break;
1432 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE: 1504 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
1433 if (unlikely(fast || rep)) { 1505 if (unlikely(fast || rep)) {
1434 ret = HV_STATUS_INVALID_HYPERCALL_INPUT; 1506 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1435 break; 1507 break;
1436 } 1508 }
1437 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt); 1509 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
1510 break;
1511 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
1512 if (unlikely(fast || !rep_cnt || rep_idx)) {
1513 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1514 break;
1515 }
1516 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
1517 break;
1518 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
1519 if (unlikely(fast || rep)) {
1520 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
1521 break;
1522 }
1523 ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
1438 break; 1524 break;
1439 default: 1525 default:
1440 ret = HV_STATUS_INVALID_HYPERCALL_CODE; 1526 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 47a4fd758743..0f997683404f 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -1391,6 +1391,33 @@ TRACE_EVENT(kvm_hv_flush_tlb,
1391 __entry->processor_mask, __entry->address_space, 1391 __entry->processor_mask, __entry->address_space,
1392 __entry->flags) 1392 __entry->flags)
1393); 1393);
1394
1395/*
1396 * Tracepoint for kvm_hv_flush_tlb_ex.
1397 */
1398TRACE_EVENT(kvm_hv_flush_tlb_ex,
1399 TP_PROTO(u64 valid_bank_mask, u64 format, u64 address_space, u64 flags),
1400 TP_ARGS(valid_bank_mask, format, address_space, flags),
1401
1402 TP_STRUCT__entry(
1403 __field(u64, valid_bank_mask)
1404 __field(u64, format)
1405 __field(u64, address_space)
1406 __field(u64, flags)
1407 ),
1408
1409 TP_fast_assign(
1410 __entry->valid_bank_mask = valid_bank_mask;
1411 __entry->format = format;
1412 __entry->address_space = address_space;
1413 __entry->flags = flags;
1414 ),
1415
1416 TP_printk("valid_bank_mask 0x%llx format 0x%llx "
1417 "address_space 0x%llx flags 0x%llx",
1418 __entry->valid_bank_mask, __entry->format,
1419 __entry->address_space, __entry->flags)
1420);
1394#endif /* _TRACE_KVM_H */ 1421#endif /* _TRACE_KVM_H */
1395 1422
1396#undef TRACE_INCLUDE_PATH 1423#undef TRACE_INCLUDE_PATH