aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2015-03-10 15:07:00 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2015-03-11 09:24:36 -0400
commit04b8dc85bf4a64517e3cf20e409eeaa503b15cc1 (patch)
treece074a61824c768147f911152c4af6dfa97ff9eb
parenta987370f8e7a1677ae385042644326d9cd145a20 (diff)
arm64: KVM: Do not use pgd_index to index stage-2 pgd
The kernel's pgd_index macro is designed to index a normal, page sized array. KVM is a bit diffferent, as we can use concatenated pages to have a bigger address space (for example 40bit IPA with 4kB pages gives us an 8kB PGD. In the above case, the use of pgd_index will always return an index inside the first 4kB, which makes a guest that has memory above 0x8000000000 rather unhappy, as it spins forever in a page fault, whist the host happilly corrupts the lower pgd. The obvious fix is to get our own kvm_pgd_index that does the right thing(tm). Tested on X-Gene with a hacked kvmtool that put memory at a stupidly high address. Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
-rw-r--r--arch/arm/include/asm/kvm_mmu.h3
-rw-r--r--arch/arm/kvm/mmu.c8
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h2
3 files changed, 8 insertions, 5 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index c57c41dc7e87..4cf48c3aca13 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -149,13 +149,14 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
149 (__boundary - 1 < (end) - 1)? __boundary: (end); \ 149 (__boundary - 1 < (end) - 1)? __boundary: (end); \
150}) 150})
151 151
152#define kvm_pgd_index(addr) pgd_index(addr)
153
152static inline bool kvm_page_empty(void *ptr) 154static inline bool kvm_page_empty(void *ptr)
153{ 155{
154 struct page *ptr_page = virt_to_page(ptr); 156 struct page *ptr_page = virt_to_page(ptr);
155 return page_count(ptr_page) == 1; 157 return page_count(ptr_page) == 1;
156} 158}
157 159
158
159#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep) 160#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
160#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp) 161#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
161#define kvm_pud_table_empty(kvm, pudp) (0) 162#define kvm_pud_table_empty(kvm, pudp) (0)
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index a48a73c6b866..5656d79c5a44 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -290,7 +290,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
290 phys_addr_t addr = start, end = start + size; 290 phys_addr_t addr = start, end = start + size;
291 phys_addr_t next; 291 phys_addr_t next;
292 292
293 pgd = pgdp + pgd_index(addr); 293 pgd = pgdp + kvm_pgd_index(addr);
294 do { 294 do {
295 next = kvm_pgd_addr_end(addr, end); 295 next = kvm_pgd_addr_end(addr, end);
296 if (!pgd_none(*pgd)) 296 if (!pgd_none(*pgd))
@@ -355,7 +355,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
355 phys_addr_t next; 355 phys_addr_t next;
356 pgd_t *pgd; 356 pgd_t *pgd;
357 357
358 pgd = kvm->arch.pgd + pgd_index(addr); 358 pgd = kvm->arch.pgd + kvm_pgd_index(addr);
359 do { 359 do {
360 next = kvm_pgd_addr_end(addr, end); 360 next = kvm_pgd_addr_end(addr, end);
361 stage2_flush_puds(kvm, pgd, addr, next); 361 stage2_flush_puds(kvm, pgd, addr, next);
@@ -830,7 +830,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
830 pgd_t *pgd; 830 pgd_t *pgd;
831 pud_t *pud; 831 pud_t *pud;
832 832
833 pgd = kvm->arch.pgd + pgd_index(addr); 833 pgd = kvm->arch.pgd + kvm_pgd_index(addr);
834 if (WARN_ON(pgd_none(*pgd))) { 834 if (WARN_ON(pgd_none(*pgd))) {
835 if (!cache) 835 if (!cache)
836 return NULL; 836 return NULL;
@@ -1120,7 +1120,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1120 pgd_t *pgd; 1120 pgd_t *pgd;
1121 phys_addr_t next; 1121 phys_addr_t next;
1122 1122
1123 pgd = kvm->arch.pgd + pgd_index(addr); 1123 pgd = kvm->arch.pgd + kvm_pgd_index(addr);
1124 do { 1124 do {
1125 /* 1125 /*
1126 * Release kvm_mmu_lock periodically if the memory region is 1126 * Release kvm_mmu_lock periodically if the memory region is
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index a099cd9cdef8..bbfb600fa822 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -158,6 +158,8 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
158#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT) 158#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
159#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) 159#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
160 160
161#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
162
161/* 163/*
162 * If we are concatenating first level stage-2 page tables, we would have less 164 * If we are concatenating first level stage-2 page tables, we would have less
163 * than or equal to 16 pointers in the fake PGD, because that's what the 165 * than or equal to 16 pointers in the fake PGD, because that's what the