aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/init.c1
-rw-r--r--arch/arm64/mm/ioremap.c85
-rw-r--r--arch/arm64/mm/mmu.c44
3 files changed, 87 insertions, 43 deletions
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 88627c450a6c..51d5352e6ad5 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -161,6 +161,7 @@ void __init arm64_memblock_init(void)
161 memblock_reserve(base, size); 161 memblock_reserve(base, size);
162 } 162 }
163 163
164 early_init_fdt_scan_reserved_mem();
164 dma_contiguous_reserve(0); 165 dma_contiguous_reserve(0);
165 166
166 memblock_allow_resize(); 167 memblock_allow_resize();
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index 2bb1d586664c..7ec328392ae0 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -25,6 +25,10 @@
25#include <linux/vmalloc.h> 25#include <linux/vmalloc.h>
26#include <linux/io.h> 26#include <linux/io.h>
27 27
28#include <asm/fixmap.h>
29#include <asm/tlbflush.h>
30#include <asm/pgalloc.h>
31
28static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size, 32static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
29 pgprot_t prot, void *caller) 33 pgprot_t prot, void *caller)
30{ 34{
@@ -98,3 +102,84 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
98 __builtin_return_address(0)); 102 __builtin_return_address(0));
99} 103}
100EXPORT_SYMBOL(ioremap_cache); 104EXPORT_SYMBOL(ioremap_cache);
105
106#ifndef CONFIG_ARM64_64K_PAGES
107static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
108#endif
109
110static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
111{
112 pgd_t *pgd;
113 pud_t *pud;
114
115 pgd = pgd_offset_k(addr);
116 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
117
118 pud = pud_offset(pgd, addr);
119 BUG_ON(pud_none(*pud) || pud_bad(*pud));
120
121 return pmd_offset(pud, addr);
122}
123
124static inline pte_t * __init early_ioremap_pte(unsigned long addr)
125{
126 pmd_t *pmd = early_ioremap_pmd(addr);
127
128 BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
129
130 return pte_offset_kernel(pmd, addr);
131}
132
133void __init early_ioremap_init(void)
134{
135 pmd_t *pmd;
136
137 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
138#ifndef CONFIG_ARM64_64K_PAGES
139 /* need to populate pmd for 4k pagesize only */
140 pmd_populate_kernel(&init_mm, pmd, bm_pte);
141#endif
142 /*
143 * The boot-ioremap range spans multiple pmds, for which
144 * we are not prepared:
145 */
146 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
147 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
148
149 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
150 WARN_ON(1);
151 pr_warn("pmd %p != %p\n",
152 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
153 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
154 fix_to_virt(FIX_BTMAP_BEGIN));
155 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
156 fix_to_virt(FIX_BTMAP_END));
157
158 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
159 pr_warn("FIX_BTMAP_BEGIN: %d\n",
160 FIX_BTMAP_BEGIN);
161 }
162
163 early_ioremap_setup();
164}
165
166void __init __early_set_fixmap(enum fixed_addresses idx,
167 phys_addr_t phys, pgprot_t flags)
168{
169 unsigned long addr = __fix_to_virt(idx);
170 pte_t *pte;
171
172 if (idx >= __end_of_fixed_addresses) {
173 BUG();
174 return;
175 }
176
177 pte = early_ioremap_pte(addr);
178
179 if (pgprot_val(flags))
180 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
181 else {
182 pte_clear(&init_mm, addr, pte);
183 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
184 }
185}
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index f8dc7e8fce6f..6b7e89569a3a 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -125,7 +125,7 @@ early_param("cachepolicy", early_cachepolicy);
125/* 125/*
126 * Adjust the PMD section entries according to the CPU in use. 126 * Adjust the PMD section entries according to the CPU in use.
127 */ 127 */
128static void __init init_mem_pgprot(void) 128void __init init_mem_pgprot(void)
129{ 129{
130 pteval_t default_pgprot; 130 pteval_t default_pgprot;
131 int i; 131 int i;
@@ -260,47 +260,6 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
260 } while (pgd++, addr = next, addr != end); 260 } while (pgd++, addr = next, addr != end);
261} 261}
262 262
263#ifdef CONFIG_EARLY_PRINTK
264/*
265 * Create an early I/O mapping using the pgd/pmd entries already populated
266 * in head.S as this function is called too early to allocated any memory. The
267 * mapping size is 2MB with 4KB pages or 64KB or 64KB pages.
268 */
269void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
270{
271 unsigned long size, mask;
272 bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
273 pgd_t *pgd;
274 pud_t *pud;
275 pmd_t *pmd;
276 pte_t *pte;
277
278 /*
279 * No early pte entries with !ARM64_64K_PAGES configuration, so using
280 * sections (pmd).
281 */
282 size = page64k ? PAGE_SIZE : SECTION_SIZE;
283 mask = ~(size - 1);
284
285 pgd = pgd_offset_k(virt);
286 pud = pud_offset(pgd, virt);
287 if (pud_none(*pud))
288 return NULL;
289 pmd = pmd_offset(pud, virt);
290
291 if (page64k) {
292 if (pmd_none(*pmd))
293 return NULL;
294 pte = pte_offset_kernel(pmd, virt);
295 set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE));
296 } else {
297 set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE));
298 }
299
300 return (void __iomem *)((virt & mask) + (phys & ~mask));
301}
302#endif
303
304static void __init map_mem(void) 263static void __init map_mem(void)
305{ 264{
306 struct memblock_region *reg; 265 struct memblock_region *reg;
@@ -357,7 +316,6 @@ void __init paging_init(void)
357{ 316{
358 void *zero_page; 317 void *zero_page;
359 318
360 init_mem_pgprot();
361 map_mem(); 319 map_mem();
362 320
363 /* 321 /*