aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2012-10-27 20:09:14 -0400
committerChristoffer Dall <cdall@cs.columbia.edu>2013-03-06 18:48:44 -0500
commit06e8c3b0f3210e5e7039fd2b5e3926b68df7f5d7 (patch)
tree99ad1af05824cc65a5ded6764f6ef8a2eb40be17
parent9c7a6432fb081563f084b25bbd2774b1547c4fad (diff)
ARM: KVM: allow HYP mappings to be at an offset from kernel mappings
arm64 cannot represent the kernel VAs in HYP mode, because of the lack of TTBR1 at EL2. A way to cope with this situation is to have HYP VAs to be an offset from the kernel VAs. Introduce macros to convert a kernel VA to a HYP VA, make the HYP mapping functions use these conversion macros. Also change the documentation to reflect the existence of the offset. On ARM, where we can have an identity mapping between kernel and HYP, the macros are without any effect. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r--arch/arm/include/asm/kvm_mmu.h8
-rw-r--r--arch/arm/kvm/mmu.c43
2 files changed, 35 insertions, 16 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index ac784937cc0f..3c71a1d4b7a3 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -22,6 +22,14 @@
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
23#include <asm/pgalloc.h> 23#include <asm/pgalloc.h>
24 24
25/*
26 * We directly use the kernel VA for the HYP, as we can directly share
27 * the mapping (HTTBR "covers" TTBR1).
28 */
29#define HYP_PAGE_OFFSET_MASK (~0UL)
30#define HYP_PAGE_OFFSET PAGE_OFFSET
31#define KERN_TO_HYP(kva) (kva)
32
25int create_hyp_mappings(void *from, void *to); 33int create_hyp_mappings(void *from, void *to);
26int create_hyp_io_mappings(void *from, void *to, phys_addr_t); 34int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
27void free_hyp_pmds(void); 35void free_hyp_pmds(void);
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 6b4ea185956e..ead6b16eeb09 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -101,14 +101,15 @@ void free_hyp_pmds(void)
101 101
102 mutex_lock(&kvm_hyp_pgd_mutex); 102 mutex_lock(&kvm_hyp_pgd_mutex);
103 for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) { 103 for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) {
104 pgd = hyp_pgd + pgd_index(addr); 104 unsigned long hyp_addr = KERN_TO_HYP(addr);
105 pud = pud_offset(pgd, addr); 105 pgd = hyp_pgd + pgd_index(hyp_addr);
106 pud = pud_offset(pgd, hyp_addr);
106 107
107 if (pud_none(*pud)) 108 if (pud_none(*pud))
108 continue; 109 continue;
109 BUG_ON(pud_bad(*pud)); 110 BUG_ON(pud_bad(*pud));
110 111
111 pmd = pmd_offset(pud, addr); 112 pmd = pmd_offset(pud, hyp_addr);
112 free_ptes(pmd, addr); 113 free_ptes(pmd, addr);
113 pmd_free(NULL, pmd); 114 pmd_free(NULL, pmd);
114 pud_clear(pud); 115 pud_clear(pud);
@@ -124,7 +125,9 @@ static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
124 struct page *page; 125 struct page *page;
125 126
126 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { 127 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
127 pte = pte_offset_kernel(pmd, addr); 128 unsigned long hyp_addr = KERN_TO_HYP(addr);
129
130 pte = pte_offset_kernel(pmd, hyp_addr);
128 BUG_ON(!virt_addr_valid(addr)); 131 BUG_ON(!virt_addr_valid(addr));
129 page = virt_to_page(addr); 132 page = virt_to_page(addr);
130 kvm_set_pte(pte, mk_pte(page, PAGE_HYP)); 133 kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
@@ -139,7 +142,9 @@ static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
139 unsigned long addr; 142 unsigned long addr;
140 143
141 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { 144 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
142 pte = pte_offset_kernel(pmd, addr); 145 unsigned long hyp_addr = KERN_TO_HYP(addr);
146
147 pte = pte_offset_kernel(pmd, hyp_addr);
143 BUG_ON(pfn_valid(*pfn_base)); 148 BUG_ON(pfn_valid(*pfn_base));
144 kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE)); 149 kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
145 (*pfn_base)++; 150 (*pfn_base)++;
@@ -154,12 +159,13 @@ static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
154 unsigned long addr, next; 159 unsigned long addr, next;
155 160
156 for (addr = start; addr < end; addr = next) { 161 for (addr = start; addr < end; addr = next) {
157 pmd = pmd_offset(pud, addr); 162 unsigned long hyp_addr = KERN_TO_HYP(addr);
163 pmd = pmd_offset(pud, hyp_addr);
158 164
159 BUG_ON(pmd_sect(*pmd)); 165 BUG_ON(pmd_sect(*pmd));
160 166
161 if (pmd_none(*pmd)) { 167 if (pmd_none(*pmd)) {
162 pte = pte_alloc_one_kernel(NULL, addr); 168 pte = pte_alloc_one_kernel(NULL, hyp_addr);
163 if (!pte) { 169 if (!pte) {
164 kvm_err("Cannot allocate Hyp pte\n"); 170 kvm_err("Cannot allocate Hyp pte\n");
165 return -ENOMEM; 171 return -ENOMEM;
@@ -200,11 +206,12 @@ static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
200 206
201 mutex_lock(&kvm_hyp_pgd_mutex); 207 mutex_lock(&kvm_hyp_pgd_mutex);
202 for (addr = start; addr < end; addr = next) { 208 for (addr = start; addr < end; addr = next) {
203 pgd = hyp_pgd + pgd_index(addr); 209 unsigned long hyp_addr = KERN_TO_HYP(addr);
204 pud = pud_offset(pgd, addr); 210 pgd = hyp_pgd + pgd_index(hyp_addr);
211 pud = pud_offset(pgd, hyp_addr);
205 212
206 if (pud_none_or_clear_bad(pud)) { 213 if (pud_none_or_clear_bad(pud)) {
207 pmd = pmd_alloc_one(NULL, addr); 214 pmd = pmd_alloc_one(NULL, hyp_addr);
208 if (!pmd) { 215 if (!pmd) {
209 kvm_err("Cannot allocate Hyp pmd\n"); 216 kvm_err("Cannot allocate Hyp pmd\n");
210 err = -ENOMEM; 217 err = -ENOMEM;
@@ -224,12 +231,13 @@ out:
224} 231}
225 232
226/** 233/**
227 * create_hyp_mappings - map a kernel virtual address range in Hyp mode 234 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
228 * @from: The virtual kernel start address of the range 235 * @from: The virtual kernel start address of the range
229 * @to: The virtual kernel end address of the range (exclusive) 236 * @to: The virtual kernel end address of the range (exclusive)
230 * 237 *
231 * The same virtual address as the kernel virtual address is also used in 238 * The same virtual address as the kernel virtual address is also used
232 * Hyp-mode mapping to the same underlying physical pages. 239 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
240 * physical pages.
233 * 241 *
234 * Note: Wrapping around zero in the "to" address is not supported. 242 * Note: Wrapping around zero in the "to" address is not supported.
235 */ 243 */
@@ -239,10 +247,13 @@ int create_hyp_mappings(void *from, void *to)
239} 247}
240 248
241/** 249/**
242 * create_hyp_io_mappings - map a physical IO range in Hyp mode 250 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
243 * @from: The virtual HYP start address of the range 251 * @from: The kernel start VA of the range
244 * @to: The virtual HYP end address of the range (exclusive) 252 * @to: The kernel end VA of the range (exclusive)
245 * @addr: The physical start address which gets mapped 253 * @addr: The physical start address which gets mapped
254 *
255 * The resulting HYP VA is the same as the kernel VA, modulo
256 * HYP_PAGE_OFFSET.
246 */ 257 */
247int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr) 258int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)
248{ 259{