diff options
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 104 |
1 files changed, 104 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 410ddbc1aa2e..ac2304fd173e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1393,6 +1393,110 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) | |||
1393 | return page; | 1393 | return page; |
1394 | } | 1394 | } |
1395 | 1395 | ||
1396 | /* | ||
1397 | * The function is based on mtrr_type_lookup() in | ||
1398 | * arch/x86/kernel/cpu/mtrr/generic.c | ||
1399 | */ | ||
1400 | static int get_mtrr_type(struct mtrr_state_type *mtrr_state, | ||
1401 | u64 start, u64 end) | ||
1402 | { | ||
1403 | int i; | ||
1404 | u64 base, mask; | ||
1405 | u8 prev_match, curr_match; | ||
1406 | int num_var_ranges = KVM_NR_VAR_MTRR; | ||
1407 | |||
1408 | if (!mtrr_state->enabled) | ||
1409 | return 0xFF; | ||
1410 | |||
1411 | /* Make end inclusive end, instead of exclusive */ | ||
1412 | end--; | ||
1413 | |||
1414 | /* Look in fixed ranges. Just return the type as per start */ | ||
1415 | if (mtrr_state->have_fixed && (start < 0x100000)) { | ||
1416 | int idx; | ||
1417 | |||
1418 | if (start < 0x80000) { | ||
1419 | idx = 0; | ||
1420 | idx += (start >> 16); | ||
1421 | return mtrr_state->fixed_ranges[idx]; | ||
1422 | } else if (start < 0xC0000) { | ||
1423 | idx = 1 * 8; | ||
1424 | idx += ((start - 0x80000) >> 14); | ||
1425 | return mtrr_state->fixed_ranges[idx]; | ||
1426 | } else if (start < 0x1000000) { | ||
1427 | idx = 3 * 8; | ||
1428 | idx += ((start - 0xC0000) >> 12); | ||
1429 | return mtrr_state->fixed_ranges[idx]; | ||
1430 | } | ||
1431 | } | ||
1432 | |||
1433 | /* | ||
1434 | * Look in variable ranges | ||
1435 | * Look of multiple ranges matching this address and pick type | ||
1436 | * as per MTRR precedence | ||
1437 | */ | ||
1438 | if (!(mtrr_state->enabled & 2)) | ||
1439 | return mtrr_state->def_type; | ||
1440 | |||
1441 | prev_match = 0xFF; | ||
1442 | for (i = 0; i < num_var_ranges; ++i) { | ||
1443 | unsigned short start_state, end_state; | ||
1444 | |||
1445 | if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11))) | ||
1446 | continue; | ||
1447 | |||
1448 | base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) + | ||
1449 | (mtrr_state->var_ranges[i].base_lo & PAGE_MASK); | ||
1450 | mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) + | ||
1451 | (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK); | ||
1452 | |||
1453 | start_state = ((start & mask) == (base & mask)); | ||
1454 | end_state = ((end & mask) == (base & mask)); | ||
1455 | if (start_state != end_state) | ||
1456 | return 0xFE; | ||
1457 | |||
1458 | if ((start & mask) != (base & mask)) | ||
1459 | continue; | ||
1460 | |||
1461 | curr_match = mtrr_state->var_ranges[i].base_lo & 0xff; | ||
1462 | if (prev_match == 0xFF) { | ||
1463 | prev_match = curr_match; | ||
1464 | continue; | ||
1465 | } | ||
1466 | |||
1467 | if (prev_match == MTRR_TYPE_UNCACHABLE || | ||
1468 | curr_match == MTRR_TYPE_UNCACHABLE) | ||
1469 | return MTRR_TYPE_UNCACHABLE; | ||
1470 | |||
1471 | if ((prev_match == MTRR_TYPE_WRBACK && | ||
1472 | curr_match == MTRR_TYPE_WRTHROUGH) || | ||
1473 | (prev_match == MTRR_TYPE_WRTHROUGH && | ||
1474 | curr_match == MTRR_TYPE_WRBACK)) { | ||
1475 | prev_match = MTRR_TYPE_WRTHROUGH; | ||
1476 | curr_match = MTRR_TYPE_WRTHROUGH; | ||
1477 | } | ||
1478 | |||
1479 | if (prev_match != curr_match) | ||
1480 | return MTRR_TYPE_UNCACHABLE; | ||
1481 | } | ||
1482 | |||
1483 | if (prev_match != 0xFF) | ||
1484 | return prev_match; | ||
1485 | |||
1486 | return mtrr_state->def_type; | ||
1487 | } | ||
1488 | |||
1489 | static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) | ||
1490 | { | ||
1491 | u8 mtrr; | ||
1492 | |||
1493 | mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT, | ||
1494 | (gfn << PAGE_SHIFT) + PAGE_SIZE); | ||
1495 | if (mtrr == 0xfe || mtrr == 0xff) | ||
1496 | mtrr = MTRR_TYPE_WRBACK; | ||
1497 | return mtrr; | ||
1498 | } | ||
1499 | |||
1396 | static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | 1500 | static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) |
1397 | { | 1501 | { |
1398 | unsigned index; | 1502 | unsigned index; |