aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2016-06-29 08:51:27 -0400
committerCatalin Marinas <catalin.marinas@arm.com>2016-07-01 06:56:26 -0400
commitbd264d046aad25e9922a142a7831e6841a2f0474 (patch)
treebc11e20fc1c2a0c307ac34e3a372f6f57733e60c
parent53e1b32910a3bc94d9f122321442b79b314219f8 (diff)
arm64: efi: always map runtime services code and data regions down to pages
To avoid triggering diagnostics in the MMU code that are finicky about splitting block mappings into more granular mappings, ensure that regions that are likely to appear in the Memory Attributes table as well as the UEFI memory map are always mapped down to pages. This way, we can use apply_to_page_range() instead of create_pgd_mapping() for the second pass, which cannot split or merge block entries, and operates strictly on PTEs. Note that this aligns the arm64 Memory Attributes table handling code with the ARM code, which already uses apply_to_page_range() to set the strict permissions. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/include/asm/efi.h3
-rw-r--r--arch/arm64/kernel/efi.c36
2 files changed, 36 insertions, 3 deletions
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 622db3c6474e..8b13476cdf96 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -14,8 +14,7 @@ extern void efi_init(void);
14#endif 14#endif
15 15
16int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); 16int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
17 17int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
18#define efi_set_mapping_permissions efi_create_mapping
19 18
20#define arch_efi_call_virt_setup() \ 19#define arch_efi_call_virt_setup() \
21({ \ 20({ \
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 981604948521..4aef89f37049 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -62,13 +62,47 @@ struct screen_info screen_info __section(.data);
62int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) 62int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
63{ 63{
64 pteval_t prot_val = create_mapping_protection(md); 64 pteval_t prot_val = create_mapping_protection(md);
65 bool allow_block_mappings = (md->type != EFI_RUNTIME_SERVICES_CODE &&
66 md->type != EFI_RUNTIME_SERVICES_DATA);
65 67
66 create_pgd_mapping(mm, md->phys_addr, md->virt_addr, 68 create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
67 md->num_pages << EFI_PAGE_SHIFT, 69 md->num_pages << EFI_PAGE_SHIFT,
68 __pgprot(prot_val | PTE_NG), true); 70 __pgprot(prot_val | PTE_NG), allow_block_mappings);
69 return 0; 71 return 0;
70} 72}
71 73
74static int __init set_permissions(pte_t *ptep, pgtable_t token,
75 unsigned long addr, void *data)
76{
77 efi_memory_desc_t *md = data;
78 pte_t pte = *ptep;
79
80 if (md->attribute & EFI_MEMORY_RO)
81 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
82 if (md->attribute & EFI_MEMORY_XP)
83 pte = set_pte_bit(pte, __pgprot(PTE_PXN));
84 set_pte(ptep, pte);
85 return 0;
86}
87
88int __init efi_set_mapping_permissions(struct mm_struct *mm,
89 efi_memory_desc_t *md)
90{
91 BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
92 md->type != EFI_RUNTIME_SERVICES_DATA);
93
94 /*
95 * Calling apply_to_page_range() is only safe on regions that are
96 * guaranteed to be mapped down to pages. Since we are only called
97 * for regions that have been mapped using efi_create_mapping() above
98 * (and this is checked by the generic Memory Attributes table parsing
99 * routines), there is no need to check that again here.
100 */
101 return apply_to_page_range(mm, md->virt_addr,
102 md->num_pages << EFI_PAGE_SHIFT,
103 set_permissions, md);
104}
105
72static int __init arm64_dmi_init(void) 106static int __init arm64_dmi_init(void)
73{ 107{
74 /* 108 /*