aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorChristoffer Dall <c.dall@virtualopensystems.com>2013-01-20 18:28:04 -0500
committerChristoffer Dall <c.dall@virtualopensystems.com>2013-01-23 13:29:08 -0500
commitcc577c26e2e9740b046591a72e77213c556bff19 (patch)
treecec417a8f5de11b352146d69fc6c92e2c2de5b78 /arch/arm/mm/mmu.c
parent6abc749f635005be78dfcb562c2235511965db6d (diff)
ARM: Add page table and page defines needed by KVM
KVM uses the stage-2 page tables and the Hyp page table format, so we define the fields and page protection flags needed by KVM. The nomenclature is this: - page_hyp: PL2 code/data mappings - page_hyp_device: PL2 device mappings (vgic access) - page_s2: Stage-2 code/data page mappings - page_s2_device: Stage-2 device mappings (vgic access) Reviewed-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com> Christoffer Dall <c.dall@virtualopensystems.com>
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9f0610243bd6..1f51d712b55a 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -57,6 +57,9 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
57static unsigned int ecc_mask __initdata = 0; 57static unsigned int ecc_mask __initdata = 0;
58pgprot_t pgprot_user; 58pgprot_t pgprot_user;
59pgprot_t pgprot_kernel; 59pgprot_t pgprot_kernel;
60pgprot_t pgprot_hyp_device;
61pgprot_t pgprot_s2;
62pgprot_t pgprot_s2_device;
60 63
61EXPORT_SYMBOL(pgprot_user); 64EXPORT_SYMBOL(pgprot_user);
62EXPORT_SYMBOL(pgprot_kernel); 65EXPORT_SYMBOL(pgprot_kernel);
@@ -66,34 +69,46 @@ struct cachepolicy {
66 unsigned int cr_mask; 69 unsigned int cr_mask;
67 pmdval_t pmd; 70 pmdval_t pmd;
68 pteval_t pte; 71 pteval_t pte;
72 pteval_t pte_s2;
69}; 73};
70 74
75#ifdef CONFIG_ARM_LPAE
76#define s2_policy(policy) policy
77#else
78#define s2_policy(policy) 0
79#endif
80
71static struct cachepolicy cache_policies[] __initdata = { 81static struct cachepolicy cache_policies[] __initdata = {
72 { 82 {
73 .policy = "uncached", 83 .policy = "uncached",
74 .cr_mask = CR_W|CR_C, 84 .cr_mask = CR_W|CR_C,
75 .pmd = PMD_SECT_UNCACHED, 85 .pmd = PMD_SECT_UNCACHED,
76 .pte = L_PTE_MT_UNCACHED, 86 .pte = L_PTE_MT_UNCACHED,
87 .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
77 }, { 88 }, {
78 .policy = "buffered", 89 .policy = "buffered",
79 .cr_mask = CR_C, 90 .cr_mask = CR_C,
80 .pmd = PMD_SECT_BUFFERED, 91 .pmd = PMD_SECT_BUFFERED,
81 .pte = L_PTE_MT_BUFFERABLE, 92 .pte = L_PTE_MT_BUFFERABLE,
93 .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
82 }, { 94 }, {
83 .policy = "writethrough", 95 .policy = "writethrough",
84 .cr_mask = 0, 96 .cr_mask = 0,
85 .pmd = PMD_SECT_WT, 97 .pmd = PMD_SECT_WT,
86 .pte = L_PTE_MT_WRITETHROUGH, 98 .pte = L_PTE_MT_WRITETHROUGH,
99 .pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
87 }, { 100 }, {
88 .policy = "writeback", 101 .policy = "writeback",
89 .cr_mask = 0, 102 .cr_mask = 0,
90 .pmd = PMD_SECT_WB, 103 .pmd = PMD_SECT_WB,
91 .pte = L_PTE_MT_WRITEBACK, 104 .pte = L_PTE_MT_WRITEBACK,
105 .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
92 }, { 106 }, {
93 .policy = "writealloc", 107 .policy = "writealloc",
94 .cr_mask = 0, 108 .cr_mask = 0,
95 .pmd = PMD_SECT_WBWA, 109 .pmd = PMD_SECT_WBWA,
96 .pte = L_PTE_MT_WRITEALLOC, 110 .pte = L_PTE_MT_WRITEALLOC,
111 .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
97 } 112 }
98}; 113};
99 114
@@ -310,6 +325,7 @@ static void __init build_mem_type_table(void)
310 struct cachepolicy *cp; 325 struct cachepolicy *cp;
311 unsigned int cr = get_cr(); 326 unsigned int cr = get_cr();
312 pteval_t user_pgprot, kern_pgprot, vecs_pgprot; 327 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
328 pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
313 int cpu_arch = cpu_architecture(); 329 int cpu_arch = cpu_architecture();
314 int i; 330 int i;
315 331
@@ -421,6 +437,8 @@ static void __init build_mem_type_table(void)
421 */ 437 */
422 cp = &cache_policies[cachepolicy]; 438 cp = &cache_policies[cachepolicy];
423 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 439 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
440 s2_pgprot = cp->pte_s2;
441 hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
424 442
425 /* 443 /*
426 * ARMv6 and above have extended page tables. 444 * ARMv6 and above have extended page tables.
@@ -444,6 +462,7 @@ static void __init build_mem_type_table(void)
444 user_pgprot |= L_PTE_SHARED; 462 user_pgprot |= L_PTE_SHARED;
445 kern_pgprot |= L_PTE_SHARED; 463 kern_pgprot |= L_PTE_SHARED;
446 vecs_pgprot |= L_PTE_SHARED; 464 vecs_pgprot |= L_PTE_SHARED;
465 s2_pgprot |= L_PTE_SHARED;
447 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; 466 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
448 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; 467 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
449 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 468 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
@@ -498,6 +517,9 @@ static void __init build_mem_type_table(void)
498 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); 517 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
499 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 518 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
500 L_PTE_DIRTY | kern_pgprot); 519 L_PTE_DIRTY | kern_pgprot);
520 pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
521 pgprot_s2_device = __pgprot(s2_device_pgprot);
522 pgprot_hyp_device = __pgprot(hyp_device_pgprot);
501 523
502 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 524 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
503 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 525 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;