diff options
author | Andrew Honig <ahonig@google.com> | 2013-03-29 12:35:21 -0400 |
---|---|---|
committer | Gleb Natapov <gleb@redhat.com> | 2013-04-07 06:05:35 -0400 |
commit | 8f964525a121f2ff2df948dac908dcc65be21b5b (patch) | |
tree | 1986d7677a1cae8f639c91812da2d8c6ed5bba26 /virt/kvm/kvm_main.c | |
parent | 09a6e1f4ad32243989b30485f78985c0923284cd (diff) |
KVM: Allow cross page reads and writes from cached translations.
This patch adds support for kvm_gfn_to_hva_cache_init functions for
reads and writes that will cross a page. If the range falls within
the same memslot, then this will be a fast operation. If the range
is split between two memslots, then the slower kvm_read_guest and
kvm_write_guest are used.
Tested: Test against kvm_clock unit tests.
Signed-off-by: Andrew Honig <ahonig@google.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r-- | virt/kvm/kvm_main.c | 47 |
1 files changed, 37 insertions, 10 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index adc68feb5c5a..f18013f09e68 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -1541,21 +1541,38 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | |||
1541 | } | 1541 | } |
1542 | 1542 | ||
1543 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | 1543 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
1544 | gpa_t gpa) | 1544 | gpa_t gpa, unsigned long len) |
1545 | { | 1545 | { |
1546 | struct kvm_memslots *slots = kvm_memslots(kvm); | 1546 | struct kvm_memslots *slots = kvm_memslots(kvm); |
1547 | int offset = offset_in_page(gpa); | 1547 | int offset = offset_in_page(gpa); |
1548 | gfn_t gfn = gpa >> PAGE_SHIFT; | 1548 | gfn_t start_gfn = gpa >> PAGE_SHIFT; |
1549 | gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; | ||
1550 | gfn_t nr_pages_needed = end_gfn - start_gfn + 1; | ||
1551 | gfn_t nr_pages_avail; | ||
1549 | 1552 | ||
1550 | ghc->gpa = gpa; | 1553 | ghc->gpa = gpa; |
1551 | ghc->generation = slots->generation; | 1554 | ghc->generation = slots->generation; |
1552 | ghc->memslot = gfn_to_memslot(kvm, gfn); | 1555 | ghc->len = len; |
1553 | ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); | 1556 | ghc->memslot = gfn_to_memslot(kvm, start_gfn); |
1554 | if (!kvm_is_error_hva(ghc->hva)) | 1557 | ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); |
1558 | if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) { | ||
1555 | ghc->hva += offset; | 1559 | ghc->hva += offset; |
1556 | else | 1560 | } else { |
1557 | return -EFAULT; | 1561 | /* |
1558 | 1562 | * If the requested region crosses two memslots, we still | |
1563 | * verify that the entire region is valid here. | ||
1564 | */ | ||
1565 | while (start_gfn <= end_gfn) { | ||
1566 | ghc->memslot = gfn_to_memslot(kvm, start_gfn); | ||
1567 | ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, | ||
1568 | &nr_pages_avail); | ||
1569 | if (kvm_is_error_hva(ghc->hva)) | ||
1570 | return -EFAULT; | ||
1571 | start_gfn += nr_pages_avail; | ||
1572 | } | ||
1573 | /* Use the slow path for cross page reads and writes. */ | ||
1574 | ghc->memslot = NULL; | ||
1575 | } | ||
1559 | return 0; | 1576 | return 0; |
1560 | } | 1577 | } |
1561 | EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); | 1578 | EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); |
@@ -1566,8 +1583,13 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | |||
1566 | struct kvm_memslots *slots = kvm_memslots(kvm); | 1583 | struct kvm_memslots *slots = kvm_memslots(kvm); |
1567 | int r; | 1584 | int r; |
1568 | 1585 | ||
1586 | BUG_ON(len > ghc->len); | ||
1587 | |||
1569 | if (slots->generation != ghc->generation) | 1588 | if (slots->generation != ghc->generation) |
1570 | kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); | 1589 | kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); |
1590 | |||
1591 | if (unlikely(!ghc->memslot)) | ||
1592 | return kvm_write_guest(kvm, ghc->gpa, data, len); | ||
1571 | 1593 | ||
1572 | if (kvm_is_error_hva(ghc->hva)) | 1594 | if (kvm_is_error_hva(ghc->hva)) |
1573 | return -EFAULT; | 1595 | return -EFAULT; |
@@ -1587,8 +1609,13 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | |||
1587 | struct kvm_memslots *slots = kvm_memslots(kvm); | 1609 | struct kvm_memslots *slots = kvm_memslots(kvm); |
1588 | int r; | 1610 | int r; |
1589 | 1611 | ||
1612 | BUG_ON(len > ghc->len); | ||
1613 | |||
1590 | if (slots->generation != ghc->generation) | 1614 | if (slots->generation != ghc->generation) |
1591 | kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); | 1615 | kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); |
1616 | |||
1617 | if (unlikely(!ghc->memslot)) | ||
1618 | return kvm_read_guest(kvm, ghc->gpa, data, len); | ||
1592 | 1619 | ||
1593 | if (kvm_is_error_hva(ghc->hva)) | 1620 | if (kvm_is_error_hva(ghc->hva)) |
1594 | return -EFAULT; | 1621 | return -EFAULT; |