aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 21:03:54 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 21:03:54 -0500
commit6b00f7efb5303418c231994c91fb8239f5ada260 (patch)
tree1daba87ccda34e632ea39dedc5055391c7e94bdc /arch/arm64/mm
parentb3d6524ff7956c5a898d51a18eaecb62a60a2b84 (diff)
parentd476d94f180af3f0fca77394651d4a98f4df1c54 (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas: "arm64 updates for 3.20: - reimplementation of the virtual remapping of UEFI Runtime Services in a way that is stable across kexec - emulation of the "setend" instruction for 32-bit tasks (user endianness switching trapped in the kernel, SCTLR_EL1.E0E bit set accordingly) - compat_sys_call_table implemented in C (from asm) and made it a constant array together with sys_call_table - export CPU cache information via /sys (like other architectures) - DMA API implementation clean-up in preparation for IOMMU support - macros clean-up for KVM - dropped some unnecessary cache+tlb maintenance - CONFIG_ARM64_CPU_SUSPEND clean-up - defconfig update (CPU_IDLE) The EFI changes going via the arm64 tree have been acked by Matt Fleming. There is also a patch adding sys_*stat64 prototypes to include/linux/syscalls.h, acked by Andrew Morton" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (47 commits) arm64: compat: Remove incorrect comment in compat_siginfo arm64: Fix section mismatch on alloc_init_p[mu]d() arm64: Avoid breakage caused by .altmacro in fpsimd save/restore macros arm64: mm: use *_sect to check for section maps arm64: drop unnecessary cache+tlb maintenance arm64:mm: free the useless initial page table arm64: Enable CPU_IDLE in defconfig arm64: kernel: remove ARM64_CPU_SUSPEND config option arm64: make sys_call_table const arm64: Remove asm/syscalls.h arm64: Implement the compat_sys_call_table in C syscalls: Declare sys_*stat64 prototypes if __ARCH_WANT_(COMPAT_)STAT64 compat: Declare compat_sys_sigpending and compat_sys_sigprocmask prototypes arm64: uapi: expose our struct ucontext to the uapi headers smp, ARM64: Kill SMP single function call interrupt arm64: Emulate SETEND for AArch32 tasks arm64: Consolidate hotplug notifier for instruction emulation arm64: Track system support for mixed endian EL0 arm64: implement generic IOMMU configuration arm64: Combine coherent and non-coherent swiotlb dma_ops ...
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/dma-mapping.c116
-rw-r--r--arch/arm64/mm/dump.c30
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/arm64/mm/init.c25
-rw-r--r--arch/arm64/mm/ioremap.c1
-rw-r--r--arch/arm64/mm/mm.h2
-rw-r--r--arch/arm64/mm/mmu.c342
-rw-r--r--arch/arm64/mm/proc.S14
8 files changed, 321 insertions, 211 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index d92094203913..0a24b9b8c698 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -134,16 +134,17 @@ static void __dma_free_coherent(struct device *dev, size_t size,
134 swiotlb_free_coherent(dev, size, vaddr, dma_handle); 134 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
135} 135}
136 136
137static void *__dma_alloc_noncoherent(struct device *dev, size_t size, 137static void *__dma_alloc(struct device *dev, size_t size,
138 dma_addr_t *dma_handle, gfp_t flags, 138 dma_addr_t *dma_handle, gfp_t flags,
139 struct dma_attrs *attrs) 139 struct dma_attrs *attrs)
140{ 140{
141 struct page *page; 141 struct page *page;
142 void *ptr, *coherent_ptr; 142 void *ptr, *coherent_ptr;
143 bool coherent = is_device_dma_coherent(dev);
143 144
144 size = PAGE_ALIGN(size); 145 size = PAGE_ALIGN(size);
145 146
146 if (!(flags & __GFP_WAIT)) { 147 if (!coherent && !(flags & __GFP_WAIT)) {
147 struct page *page = NULL; 148 struct page *page = NULL;
148 void *addr = __alloc_from_pool(size, &page); 149 void *addr = __alloc_from_pool(size, &page);
149 150
@@ -151,13 +152,16 @@ static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
151 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 152 *dma_handle = phys_to_dma(dev, page_to_phys(page));
152 153
153 return addr; 154 return addr;
154
155 } 155 }
156 156
157 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs); 157 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
158 if (!ptr) 158 if (!ptr)
159 goto no_mem; 159 goto no_mem;
160 160
161 /* no need for non-cacheable mapping if coherent */
162 if (coherent)
163 return ptr;
164
161 /* remove any dirty cache lines on the kernel alias */ 165 /* remove any dirty cache lines on the kernel alias */
162 __dma_flush_range(ptr, ptr + size); 166 __dma_flush_range(ptr, ptr + size);
163 167
@@ -179,15 +183,17 @@ no_mem:
179 return NULL; 183 return NULL;
180} 184}
181 185
182static void __dma_free_noncoherent(struct device *dev, size_t size, 186static void __dma_free(struct device *dev, size_t size,
183 void *vaddr, dma_addr_t dma_handle, 187 void *vaddr, dma_addr_t dma_handle,
184 struct dma_attrs *attrs) 188 struct dma_attrs *attrs)
185{ 189{
186 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); 190 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
187 191
188 if (__free_from_pool(vaddr, size)) 192 if (!is_device_dma_coherent(dev)) {
189 return; 193 if (__free_from_pool(vaddr, size))
190 vunmap(vaddr); 194 return;
195 vunmap(vaddr);
196 }
191 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs); 197 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
192} 198}
193 199
@@ -199,7 +205,8 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
199 dma_addr_t dev_addr; 205 dma_addr_t dev_addr;
200 206
201 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs); 207 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
202 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); 208 if (!is_device_dma_coherent(dev))
209 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
203 210
204 return dev_addr; 211 return dev_addr;
205} 212}
@@ -209,7 +216,8 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
209 size_t size, enum dma_data_direction dir, 216 size_t size, enum dma_data_direction dir,
210 struct dma_attrs *attrs) 217 struct dma_attrs *attrs)
211{ 218{
212 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); 219 if (!is_device_dma_coherent(dev))
220 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
213 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs); 221 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
214} 222}
215 223
@@ -221,9 +229,10 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
221 int i, ret; 229 int i, ret;
222 230
223 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs); 231 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
224 for_each_sg(sgl, sg, ret, i) 232 if (!is_device_dma_coherent(dev))
225 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), 233 for_each_sg(sgl, sg, ret, i)
226 sg->length, dir); 234 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
235 sg->length, dir);
227 236
228 return ret; 237 return ret;
229} 238}
@@ -236,9 +245,10 @@ static void __swiotlb_unmap_sg_attrs(struct device *dev,
236 struct scatterlist *sg; 245 struct scatterlist *sg;
237 int i; 246 int i;
238 247
239 for_each_sg(sgl, sg, nelems, i) 248 if (!is_device_dma_coherent(dev))
240 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), 249 for_each_sg(sgl, sg, nelems, i)
241 sg->length, dir); 250 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
251 sg->length, dir);
242 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs); 252 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
243} 253}
244 254
@@ -246,7 +256,8 @@ static void __swiotlb_sync_single_for_cpu(struct device *dev,
246 dma_addr_t dev_addr, size_t size, 256 dma_addr_t dev_addr, size_t size,
247 enum dma_data_direction dir) 257 enum dma_data_direction dir)
248{ 258{
249 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); 259 if (!is_device_dma_coherent(dev))
260 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
250 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir); 261 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
251} 262}
252 263
@@ -255,7 +266,8 @@ static void __swiotlb_sync_single_for_device(struct device *dev,
255 enum dma_data_direction dir) 266 enum dma_data_direction dir)
256{ 267{
257 swiotlb_sync_single_for_device(dev, dev_addr, size, dir); 268 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
258 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); 269 if (!is_device_dma_coherent(dev))
270 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
259} 271}
260 272
261static void __swiotlb_sync_sg_for_cpu(struct device *dev, 273static void __swiotlb_sync_sg_for_cpu(struct device *dev,
@@ -265,9 +277,10 @@ static void __swiotlb_sync_sg_for_cpu(struct device *dev,
265 struct scatterlist *sg; 277 struct scatterlist *sg;
266 int i; 278 int i;
267 279
268 for_each_sg(sgl, sg, nelems, i) 280 if (!is_device_dma_coherent(dev))
269 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), 281 for_each_sg(sgl, sg, nelems, i)
270 sg->length, dir); 282 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
283 sg->length, dir);
271 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir); 284 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
272} 285}
273 286
@@ -279,9 +292,10 @@ static void __swiotlb_sync_sg_for_device(struct device *dev,
279 int i; 292 int i;
280 293
281 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir); 294 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
282 for_each_sg(sgl, sg, nelems, i) 295 if (!is_device_dma_coherent(dev))
283 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)), 296 for_each_sg(sgl, sg, nelems, i)
284 sg->length, dir); 297 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
298 sg->length, dir);
285} 299}
286 300
287/* vma->vm_page_prot must be set appropriately before calling this function */ 301/* vma->vm_page_prot must be set appropriately before calling this function */
@@ -308,28 +322,20 @@ static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
308 return ret; 322 return ret;
309} 323}
310 324
311static int __swiotlb_mmap_noncoherent(struct device *dev, 325static int __swiotlb_mmap(struct device *dev,
312 struct vm_area_struct *vma, 326 struct vm_area_struct *vma,
313 void *cpu_addr, dma_addr_t dma_addr, size_t size, 327 void *cpu_addr, dma_addr_t dma_addr, size_t size,
314 struct dma_attrs *attrs) 328 struct dma_attrs *attrs)
315{
316 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false);
317 return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
318}
319
320static int __swiotlb_mmap_coherent(struct device *dev,
321 struct vm_area_struct *vma,
322 void *cpu_addr, dma_addr_t dma_addr, size_t size,
323 struct dma_attrs *attrs)
324{ 329{
325 /* Just use whatever page_prot attributes were specified */ 330 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
331 is_device_dma_coherent(dev));
326 return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); 332 return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
327} 333}
328 334
329struct dma_map_ops noncoherent_swiotlb_dma_ops = { 335static struct dma_map_ops swiotlb_dma_ops = {
330 .alloc = __dma_alloc_noncoherent, 336 .alloc = __dma_alloc,
331 .free = __dma_free_noncoherent, 337 .free = __dma_free,
332 .mmap = __swiotlb_mmap_noncoherent, 338 .mmap = __swiotlb_mmap,
333 .map_page = __swiotlb_map_page, 339 .map_page = __swiotlb_map_page,
334 .unmap_page = __swiotlb_unmap_page, 340 .unmap_page = __swiotlb_unmap_page,
335 .map_sg = __swiotlb_map_sg_attrs, 341 .map_sg = __swiotlb_map_sg_attrs,
@@ -341,24 +347,6 @@ struct dma_map_ops noncoherent_swiotlb_dma_ops = {
341 .dma_supported = swiotlb_dma_supported, 347 .dma_supported = swiotlb_dma_supported,
342 .mapping_error = swiotlb_dma_mapping_error, 348 .mapping_error = swiotlb_dma_mapping_error,
343}; 349};
344EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
345
346struct dma_map_ops coherent_swiotlb_dma_ops = {
347 .alloc = __dma_alloc_coherent,
348 .free = __dma_free_coherent,
349 .mmap = __swiotlb_mmap_coherent,
350 .map_page = swiotlb_map_page,
351 .unmap_page = swiotlb_unmap_page,
352 .map_sg = swiotlb_map_sg_attrs,
353 .unmap_sg = swiotlb_unmap_sg_attrs,
354 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
355 .sync_single_for_device = swiotlb_sync_single_for_device,
356 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
357 .sync_sg_for_device = swiotlb_sync_sg_for_device,
358 .dma_supported = swiotlb_dma_supported,
359 .mapping_error = swiotlb_dma_mapping_error,
360};
361EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
362 350
363extern int swiotlb_late_init_with_default_size(size_t default_size); 351extern int swiotlb_late_init_with_default_size(size_t default_size);
364 352
@@ -427,7 +415,7 @@ static int __init swiotlb_late_init(void)
427{ 415{
428 size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); 416 size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
429 417
430 dma_ops = &noncoherent_swiotlb_dma_ops; 418 dma_ops = &swiotlb_dma_ops;
431 419
432 return swiotlb_late_init_with_default_size(swiotlb_size); 420 return swiotlb_late_init_with_default_size(swiotlb_size);
433} 421}
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index d54dc9ac4b70..74c256744b25 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -14,14 +14,18 @@
14 * of the License. 14 * of the License.
15 */ 15 */
16#include <linux/debugfs.h> 16#include <linux/debugfs.h>
17#include <linux/errno.h>
17#include <linux/fs.h> 18#include <linux/fs.h>
18#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/init.h>
19#include <linux/mm.h> 21#include <linux/mm.h>
20#include <linux/sched.h> 22#include <linux/sched.h>
21#include <linux/seq_file.h> 23#include <linux/seq_file.h>
22 24
23#include <asm/fixmap.h> 25#include <asm/fixmap.h>
26#include <asm/memory.h>
24#include <asm/pgtable.h> 27#include <asm/pgtable.h>
28#include <asm/pgtable-hwdef.h>
25 29
26#define LOWEST_ADDR (UL(0xffffffffffffffff) << VA_BITS) 30#define LOWEST_ADDR (UL(0xffffffffffffffff) << VA_BITS)
27 31
@@ -37,10 +41,10 @@ enum address_markers_idx {
37 VMEMMAP_START_NR, 41 VMEMMAP_START_NR,
38 VMEMMAP_END_NR, 42 VMEMMAP_END_NR,
39#endif 43#endif
40 PCI_START_NR,
41 PCI_END_NR,
42 FIXADDR_START_NR, 44 FIXADDR_START_NR,
43 FIXADDR_END_NR, 45 FIXADDR_END_NR,
46 PCI_START_NR,
47 PCI_END_NR,
44 MODULES_START_NR, 48 MODULES_START_NR,
45 MODUELS_END_NR, 49 MODUELS_END_NR,
46 KERNEL_SPACE_NR, 50 KERNEL_SPACE_NR,
@@ -53,10 +57,10 @@ static struct addr_marker address_markers[] = {
53 { 0, "vmemmap start" }, 57 { 0, "vmemmap start" },
54 { 0, "vmemmap end" }, 58 { 0, "vmemmap end" },
55#endif 59#endif
56 { (unsigned long) PCI_IOBASE, "PCI I/O start" },
57 { (unsigned long) PCI_IOBASE + SZ_16M, "PCI I/O end" },
58 { FIXADDR_START, "Fixmap start" }, 60 { FIXADDR_START, "Fixmap start" },
59 { FIXADDR_TOP, "Fixmap end" }, 61 { FIXADDR_TOP, "Fixmap end" },
62 { PCI_IO_START, "PCI I/O start" },
63 { PCI_IO_END, "PCI I/O end" },
60 { MODULES_VADDR, "Modules start" }, 64 { MODULES_VADDR, "Modules start" },
61 { MODULES_END, "Modules end" }, 65 { MODULES_END, "Modules end" },
62 { PAGE_OFFSET, "Kernel Mapping" }, 66 { PAGE_OFFSET, "Kernel Mapping" },
@@ -246,10 +250,12 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
246 250
247 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { 251 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
248 addr = start + i * PMD_SIZE; 252 addr = start + i * PMD_SIZE;
249 if (pmd_none(*pmd) || pmd_sect(*pmd) || pmd_bad(*pmd)) 253 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
250 note_page(st, addr, 3, pmd_val(*pmd)); 254 note_page(st, addr, 3, pmd_val(*pmd));
251 else 255 } else {
256 BUG_ON(pmd_bad(*pmd));
252 walk_pte(st, pmd, addr); 257 walk_pte(st, pmd, addr);
258 }
253 } 259 }
254} 260}
255 261
@@ -261,10 +267,12 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
261 267
262 for (i = 0; i < PTRS_PER_PUD; i++, pud++) { 268 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
263 addr = start + i * PUD_SIZE; 269 addr = start + i * PUD_SIZE;
264 if (pud_none(*pud) || pud_sect(*pud) || pud_bad(*pud)) 270 if (pud_none(*pud) || pud_sect(*pud)) {
265 note_page(st, addr, 2, pud_val(*pud)); 271 note_page(st, addr, 2, pud_val(*pud));
266 else 272 } else {
273 BUG_ON(pud_bad(*pud));
267 walk_pmd(st, pud, addr); 274 walk_pmd(st, pud, addr);
275 }
268 } 276 }
269} 277}
270 278
@@ -276,10 +284,12 @@ static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long st
276 284
277 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { 285 for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
278 addr = start + i * PGDIR_SIZE; 286 addr = start + i * PGDIR_SIZE;
279 if (pgd_none(*pgd) || pgd_bad(*pgd)) 287 if (pgd_none(*pgd)) {
280 note_page(st, addr, 1, pgd_val(*pgd)); 288 note_page(st, addr, 1, pgd_val(*pgd));
281 else 289 } else {
290 BUG_ON(pgd_bad(*pgd));
282 walk_pud(st, pgd, addr); 291 walk_pud(st, pgd, addr);
292 }
283 } 293 }
284} 294}
285 295
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index c11cd27ca8f5..96da13167d4a 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -219,7 +219,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
219 219
220 if (esr & ESR_LNX_EXEC) { 220 if (esr & ESR_LNX_EXEC) {
221 vm_flags = VM_EXEC; 221 vm_flags = VM_EXEC;
222 } else if ((esr & ESR_EL1_WRITE) && !(esr & ESR_EL1_CM)) { 222 } else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
223 vm_flags = VM_WRITE; 223 vm_flags = VM_WRITE;
224 mm_flags |= FAULT_FLAG_WRITE; 224 mm_flags |= FAULT_FLAG_WRITE;
225 } 225 }
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index c95464a33f36..71145f952070 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -35,6 +35,7 @@
35#include <linux/efi.h> 35#include <linux/efi.h>
36 36
37#include <asm/fixmap.h> 37#include <asm/fixmap.h>
38#include <asm/memory.h>
38#include <asm/sections.h> 39#include <asm/sections.h>
39#include <asm/setup.h> 40#include <asm/setup.h>
40#include <asm/sizes.h> 41#include <asm/sizes.h>
@@ -136,10 +137,29 @@ static void arm64_memory_present(void)
136} 137}
137#endif 138#endif
138 139
140static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX;
141
142/*
143 * Limit the memory size that was specified via FDT.
144 */
145static int __init early_mem(char *p)
146{
147 if (!p)
148 return 1;
149
150 memory_limit = memparse(p, &p) & PAGE_MASK;
151 pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
152
153 return 0;
154}
155early_param("mem", early_mem);
156
139void __init arm64_memblock_init(void) 157void __init arm64_memblock_init(void)
140{ 158{
141 phys_addr_t dma_phys_limit = 0; 159 phys_addr_t dma_phys_limit = 0;
142 160
161 memblock_enforce_memory_limit(memory_limit);
162
143 /* 163 /*
144 * Register the kernel text, kernel data, initrd, and initial 164 * Register the kernel text, kernel data, initrd, and initial
145 * pagetables with memblock. 165 * pagetables with memblock.
@@ -277,8 +297,8 @@ void __init mem_init(void)
277 " vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n" 297 " vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n"
278 " 0x%16lx - 0x%16lx (%6ld MB actual)\n" 298 " 0x%16lx - 0x%16lx (%6ld MB actual)\n"
279#endif 299#endif
280 " PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n"
281 " fixed : 0x%16lx - 0x%16lx (%6ld KB)\n" 300 " fixed : 0x%16lx - 0x%16lx (%6ld KB)\n"
301 " PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n"
282 " modules : 0x%16lx - 0x%16lx (%6ld MB)\n" 302 " modules : 0x%16lx - 0x%16lx (%6ld MB)\n"
283 " memory : 0x%16lx - 0x%16lx (%6ld MB)\n" 303 " memory : 0x%16lx - 0x%16lx (%6ld MB)\n"
284 " .init : 0x%p" " - 0x%p" " (%6ld KB)\n" 304 " .init : 0x%p" " - 0x%p" " (%6ld KB)\n"
@@ -291,8 +311,8 @@ void __init mem_init(void)
291 MLM((unsigned long)virt_to_page(PAGE_OFFSET), 311 MLM((unsigned long)virt_to_page(PAGE_OFFSET),
292 (unsigned long)virt_to_page(high_memory)), 312 (unsigned long)virt_to_page(high_memory)),
293#endif 313#endif
294 MLM((unsigned long)PCI_IOBASE, (unsigned long)PCI_IOBASE + SZ_16M),
295 MLK(FIXADDR_START, FIXADDR_TOP), 314 MLK(FIXADDR_START, FIXADDR_TOP),
315 MLM(PCI_IO_START, PCI_IO_END),
296 MLM(MODULES_VADDR, MODULES_END), 316 MLM(MODULES_VADDR, MODULES_END),
297 MLM(PAGE_OFFSET, (unsigned long)high_memory), 317 MLM(PAGE_OFFSET, (unsigned long)high_memory),
298 MLK_ROUNDUP(__init_begin, __init_end), 318 MLK_ROUNDUP(__init_begin, __init_end),
@@ -325,6 +345,7 @@ void __init mem_init(void)
325 345
326void free_initmem(void) 346void free_initmem(void)
327{ 347{
348 fixup_init();
328 free_initmem_default(0); 349 free_initmem_default(0);
329 free_alternatives_memory(); 350 free_alternatives_memory();
330} 351}
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index cbb99c8f1e04..01e88c8bcab0 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -62,6 +62,7 @@ static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
62 if (!area) 62 if (!area)
63 return NULL; 63 return NULL;
64 addr = (unsigned long)area->addr; 64 addr = (unsigned long)area->addr;
65 area->phys_addr = phys_addr;
65 66
66 err = ioremap_page_range(addr, addr + size, phys_addr, prot); 67 err = ioremap_page_range(addr, addr + size, phys_addr, prot);
67 if (err) { 68 if (err) {
diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h
index 50c3351df9c7..ef47d99b5cbc 100644
--- a/arch/arm64/mm/mm.h
+++ b/arch/arm64/mm/mm.h
@@ -1 +1,3 @@
1extern void __init bootmem_init(void); 1extern void __init bootmem_init(void);
2
3void fixup_init(void);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6032f3e3056a..c6daaf6c6f97 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -26,6 +26,8 @@
26#include <linux/memblock.h> 26#include <linux/memblock.h>
27#include <linux/fs.h> 27#include <linux/fs.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/slab.h>
30#include <linux/stop_machine.h>
29 31
30#include <asm/cputype.h> 32#include <asm/cputype.h>
31#include <asm/fixmap.h> 33#include <asm/fixmap.h>
@@ -45,80 +47,6 @@
45struct page *empty_zero_page; 47struct page *empty_zero_page;
46EXPORT_SYMBOL(empty_zero_page); 48EXPORT_SYMBOL(empty_zero_page);
47 49
48struct cachepolicy {
49 const char policy[16];
50 u64 mair;
51 u64 tcr;
52};
53
54static struct cachepolicy cache_policies[] __initdata = {
55 {
56 .policy = "uncached",
57 .mair = 0x44, /* inner, outer non-cacheable */
58 .tcr = TCR_IRGN_NC | TCR_ORGN_NC,
59 }, {
60 .policy = "writethrough",
61 .mair = 0xaa, /* inner, outer write-through, read-allocate */
62 .tcr = TCR_IRGN_WT | TCR_ORGN_WT,
63 }, {
64 .policy = "writeback",
65 .mair = 0xee, /* inner, outer write-back, read-allocate */
66 .tcr = TCR_IRGN_WBnWA | TCR_ORGN_WBnWA,
67 }
68};
69
70/*
71 * These are useful for identifying cache coherency problems by allowing the
72 * cache or the cache and writebuffer to be turned off. It changes the Normal
73 * memory caching attributes in the MAIR_EL1 register.
74 */
75static int __init early_cachepolicy(char *p)
76{
77 int i;
78 u64 tmp;
79
80 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
81 int len = strlen(cache_policies[i].policy);
82
83 if (memcmp(p, cache_policies[i].policy, len) == 0)
84 break;
85 }
86 if (i == ARRAY_SIZE(cache_policies)) {
87 pr_err("ERROR: unknown or unsupported cache policy: %s\n", p);
88 return 0;
89 }
90
91 flush_cache_all();
92
93 /*
94 * Modify MT_NORMAL attributes in MAIR_EL1.
95 */
96 asm volatile(
97 " mrs %0, mair_el1\n"
98 " bfi %0, %1, %2, #8\n"
99 " msr mair_el1, %0\n"
100 " isb\n"
101 : "=&r" (tmp)
102 : "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8));
103
104 /*
105 * Modify TCR PTW cacheability attributes.
106 */
107 asm volatile(
108 " mrs %0, tcr_el1\n"
109 " bic %0, %0, %2\n"
110 " orr %0, %0, %1\n"
111 " msr tcr_el1, %0\n"
112 " isb\n"
113 : "=&r" (tmp)
114 : "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK));
115
116 flush_cache_all();
117
118 return 0;
119}
120early_param("cachepolicy", early_cachepolicy);
121
122pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 50pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
123 unsigned long size, pgprot_t vma_prot) 51 unsigned long size, pgprot_t vma_prot)
124{ 52{
@@ -133,19 +61,42 @@ EXPORT_SYMBOL(phys_mem_access_prot);
133static void __init *early_alloc(unsigned long sz) 61static void __init *early_alloc(unsigned long sz)
134{ 62{
135 void *ptr = __va(memblock_alloc(sz, sz)); 63 void *ptr = __va(memblock_alloc(sz, sz));
64 BUG_ON(!ptr);
136 memset(ptr, 0, sz); 65 memset(ptr, 0, sz);
137 return ptr; 66 return ptr;
138} 67}
139 68
140static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, 69/*
70 * remap a PMD into pages
71 */
72static void split_pmd(pmd_t *pmd, pte_t *pte)
73{
74 unsigned long pfn = pmd_pfn(*pmd);
75 int i = 0;
76
77 do {
78 /*
79 * Need to have the least restrictive permissions available
80 * permissions will be fixed up later
81 */
82 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
83 pfn++;
84 } while (pte++, i++, i < PTRS_PER_PTE);
85}
86
87static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
141 unsigned long end, unsigned long pfn, 88 unsigned long end, unsigned long pfn,
142 pgprot_t prot) 89 pgprot_t prot,
90 void *(*alloc)(unsigned long size))
143{ 91{
144 pte_t *pte; 92 pte_t *pte;
145 93
146 if (pmd_none(*pmd)) { 94 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
147 pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t)); 95 pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
96 if (pmd_sect(*pmd))
97 split_pmd(pmd, pte);
148 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE); 98 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
99 flush_tlb_all();
149 } 100 }
150 BUG_ON(pmd_bad(*pmd)); 101 BUG_ON(pmd_bad(*pmd));
151 102
@@ -156,30 +107,42 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
156 } while (pte++, addr += PAGE_SIZE, addr != end); 107 } while (pte++, addr += PAGE_SIZE, addr != end);
157} 108}
158 109
159static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, 110void split_pud(pud_t *old_pud, pmd_t *pmd)
160 unsigned long end, phys_addr_t phys, 111{
161 int map_io) 112 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
113 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
114 int i = 0;
115
116 do {
117 set_pmd(pmd, __pmd(addr | prot));
118 addr += PMD_SIZE;
119 } while (pmd++, i++, i < PTRS_PER_PMD);
120}
121
122static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
123 unsigned long addr, unsigned long end,
124 phys_addr_t phys, pgprot_t prot,
125 void *(*alloc)(unsigned long size))
162{ 126{
163 pmd_t *pmd; 127 pmd_t *pmd;
164 unsigned long next; 128 unsigned long next;
165 pmdval_t prot_sect;
166 pgprot_t prot_pte;
167
168 if (map_io) {
169 prot_sect = PROT_SECT_DEVICE_nGnRE;
170 prot_pte = __pgprot(PROT_DEVICE_nGnRE);
171 } else {
172 prot_sect = PROT_SECT_NORMAL_EXEC;
173 prot_pte = PAGE_KERNEL_EXEC;
174 }
175 129
176 /* 130 /*
177 * Check for initial section mappings in the pgd/pud and remove them. 131 * Check for initial section mappings in the pgd/pud and remove them.
178 */ 132 */
179 if (pud_none(*pud) || pud_bad(*pud)) { 133 if (pud_none(*pud) || pud_sect(*pud)) {
180 pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t)); 134 pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
181 pud_populate(&init_mm, pud, pmd); 135 if (pud_sect(*pud)) {
136 /*
137 * need to have the 1G of mappings continue to be
138 * present
139 */
140 split_pud(pud, pmd);
141 }
142 pud_populate(mm, pud, pmd);
143 flush_tlb_all();
182 } 144 }
145 BUG_ON(pud_bad(*pud));
183 146
184 pmd = pmd_offset(pud, addr); 147 pmd = pmd_offset(pud, addr);
185 do { 148 do {
@@ -187,31 +150,51 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
187 /* try section mapping first */ 150 /* try section mapping first */
188 if (((addr | next | phys) & ~SECTION_MASK) == 0) { 151 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
189 pmd_t old_pmd =*pmd; 152 pmd_t old_pmd =*pmd;
190 set_pmd(pmd, __pmd(phys | prot_sect)); 153 set_pmd(pmd, __pmd(phys |
154 pgprot_val(mk_sect_prot(prot))));
191 /* 155 /*
192 * Check for previous table entries created during 156 * Check for previous table entries created during
193 * boot (__create_page_tables) and flush them. 157 * boot (__create_page_tables) and flush them.
194 */ 158 */
195 if (!pmd_none(old_pmd)) 159 if (!pmd_none(old_pmd)) {
196 flush_tlb_all(); 160 flush_tlb_all();
161 if (pmd_table(old_pmd)) {
162 phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
163 if (!WARN_ON_ONCE(slab_is_available()))
164 memblock_free(table, PAGE_SIZE);
165 }
166 }
197 } else { 167 } else {
198 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), 168 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
199 prot_pte); 169 prot, alloc);
200 } 170 }
201 phys += next - addr; 171 phys += next - addr;
202 } while (pmd++, addr = next, addr != end); 172 } while (pmd++, addr = next, addr != end);
203} 173}
204 174
205static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, 175static inline bool use_1G_block(unsigned long addr, unsigned long next,
206 unsigned long end, phys_addr_t phys, 176 unsigned long phys)
207 int map_io) 177{
178 if (PAGE_SHIFT != 12)
179 return false;
180
181 if (((addr | next | phys) & ~PUD_MASK) != 0)
182 return false;
183
184 return true;
185}
186
187static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
188 unsigned long addr, unsigned long end,
189 phys_addr_t phys, pgprot_t prot,
190 void *(*alloc)(unsigned long size))
208{ 191{
209 pud_t *pud; 192 pud_t *pud;
210 unsigned long next; 193 unsigned long next;
211 194
212 if (pgd_none(*pgd)) { 195 if (pgd_none(*pgd)) {
213 pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t)); 196 pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
214 pgd_populate(&init_mm, pgd, pud); 197 pgd_populate(mm, pgd, pud);
215 } 198 }
216 BUG_ON(pgd_bad(*pgd)); 199 BUG_ON(pgd_bad(*pgd));
217 200
@@ -222,10 +205,10 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
222 /* 205 /*
223 * For 4K granule only, attempt to put down a 1GB block 206 * For 4K granule only, attempt to put down a 1GB block
224 */ 207 */
225 if (!map_io && (PAGE_SHIFT == 12) && 208 if (use_1G_block(addr, next, phys)) {
226 ((addr | next | phys) & ~PUD_MASK) == 0) {
227 pud_t old_pud = *pud; 209 pud_t old_pud = *pud;
228 set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC)); 210 set_pud(pud, __pud(phys |
211 pgprot_val(mk_sect_prot(prot))));
229 212
230 /* 213 /*
231 * If we have an old value for a pud, it will 214 * If we have an old value for a pud, it will
@@ -235,12 +218,15 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
235 * Look up the old pmd table and free it. 218 * Look up the old pmd table and free it.
236 */ 219 */
237 if (!pud_none(old_pud)) { 220 if (!pud_none(old_pud)) {
238 phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
239 memblock_free(table, PAGE_SIZE);
240 flush_tlb_all(); 221 flush_tlb_all();
222 if (pud_table(old_pud)) {
223 phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
224 if (!WARN_ON_ONCE(slab_is_available()))
225 memblock_free(table, PAGE_SIZE);
226 }
241 } 227 }
242 } else { 228 } else {
243 alloc_init_pmd(pud, addr, next, phys, map_io); 229 alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
244 } 230 }
245 phys += next - addr; 231 phys += next - addr;
246 } while (pud++, addr = next, addr != end); 232 } while (pud++, addr = next, addr != end);
@@ -250,9 +236,10 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
250 * Create the page directory entries and any necessary page tables for the 236 * Create the page directory entries and any necessary page tables for the
251 * mapping specified by 'md'. 237 * mapping specified by 'md'.
252 */ 238 */
253static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys, 239static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
254 unsigned long virt, phys_addr_t size, 240 phys_addr_t phys, unsigned long virt,
255 int map_io) 241 phys_addr_t size, pgprot_t prot,
242 void *(*alloc)(unsigned long size))
256{ 243{
257 unsigned long addr, length, end, next; 244 unsigned long addr, length, end, next;
258 245
@@ -262,31 +249,95 @@ static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys,
262 end = addr + length; 249 end = addr + length;
263 do { 250 do {
264 next = pgd_addr_end(addr, end); 251 next = pgd_addr_end(addr, end);
265 alloc_init_pud(pgd, addr, next, phys, map_io); 252 alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
266 phys += next - addr; 253 phys += next - addr;
267 } while (pgd++, addr = next, addr != end); 254 } while (pgd++, addr = next, addr != end);
268} 255}
269 256
270static void __init create_mapping(phys_addr_t phys, unsigned long virt, 257static void *late_alloc(unsigned long size)
271 phys_addr_t size) 258{
259 void *ptr;
260
261 BUG_ON(size > PAGE_SIZE);
262 ptr = (void *)__get_free_page(PGALLOC_GFP);
263 BUG_ON(!ptr);
264 return ptr;
265}
266
267static void __ref create_mapping(phys_addr_t phys, unsigned long virt,
268 phys_addr_t size, pgprot_t prot)
272{ 269{
273 if (virt < VMALLOC_START) { 270 if (virt < VMALLOC_START) {
274 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 271 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
275 &phys, virt); 272 &phys, virt);
276 return; 273 return;
277 } 274 }
278 __create_mapping(pgd_offset_k(virt & PAGE_MASK), phys, virt, size, 0); 275 __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
276 size, prot, early_alloc);
277}
278
279void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
280 unsigned long virt, phys_addr_t size,
281 pgprot_t prot)
282{
283 __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
284 late_alloc);
279} 285}
280 286
281void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io) 287static void create_mapping_late(phys_addr_t phys, unsigned long virt,
288 phys_addr_t size, pgprot_t prot)
282{ 289{
283 if ((addr >> PGDIR_SHIFT) >= ARRAY_SIZE(idmap_pg_dir)) { 290 if (virt < VMALLOC_START) {
284 pr_warn("BUG: not creating id mapping for %pa\n", &addr); 291 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
292 &phys, virt);
285 return; 293 return;
286 } 294 }
287 __create_mapping(&idmap_pg_dir[pgd_index(addr)], 295
288 addr, addr, size, map_io); 296 return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
297 phys, virt, size, prot, late_alloc);
298}
299
300#ifdef CONFIG_DEBUG_RODATA
301static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
302{
303 /*
304 * Set up the executable regions using the existing section mappings
305 * for now. This will get more fine grained later once all memory
306 * is mapped
307 */
308 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
309 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
310
311 if (end < kernel_x_start) {
312 create_mapping(start, __phys_to_virt(start),
313 end - start, PAGE_KERNEL);
314 } else if (start >= kernel_x_end) {
315 create_mapping(start, __phys_to_virt(start),
316 end - start, PAGE_KERNEL);
317 } else {
318 if (start < kernel_x_start)
319 create_mapping(start, __phys_to_virt(start),
320 kernel_x_start - start,
321 PAGE_KERNEL);
322 create_mapping(kernel_x_start,
323 __phys_to_virt(kernel_x_start),
324 kernel_x_end - kernel_x_start,
325 PAGE_KERNEL_EXEC);
326 if (kernel_x_end < end)
327 create_mapping(kernel_x_end,
328 __phys_to_virt(kernel_x_end),
329 end - kernel_x_end,
330 PAGE_KERNEL);
331 }
332
289} 333}
334#else
335static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
336{
337 create_mapping(start, __phys_to_virt(start), end - start,
338 PAGE_KERNEL_EXEC);
339}
340#endif
290 341
291static void __init map_mem(void) 342static void __init map_mem(void)
292{ 343{
@@ -332,14 +383,53 @@ static void __init map_mem(void)
332 memblock_set_current_limit(limit); 383 memblock_set_current_limit(limit);
333 } 384 }
334#endif 385#endif
335 386 __map_memblock(start, end);
336 create_mapping(start, __phys_to_virt(start), end - start);
337 } 387 }
338 388
339 /* Limit no longer required. */ 389 /* Limit no longer required. */
340 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); 390 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
341} 391}
342 392
393void __init fixup_executable(void)
394{
395#ifdef CONFIG_DEBUG_RODATA
396 /* now that we are actually fully mapped, make the start/end more fine grained */
397 if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
398 unsigned long aligned_start = round_down(__pa(_stext),
399 SECTION_SIZE);
400
401 create_mapping(aligned_start, __phys_to_virt(aligned_start),
402 __pa(_stext) - aligned_start,
403 PAGE_KERNEL);
404 }
405
406 if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
407 unsigned long aligned_end = round_up(__pa(__init_end),
408 SECTION_SIZE);
409 create_mapping(__pa(__init_end), (unsigned long)__init_end,
410 aligned_end - __pa(__init_end),
411 PAGE_KERNEL);
412 }
413#endif
414}
415
416#ifdef CONFIG_DEBUG_RODATA
417void mark_rodata_ro(void)
418{
419 create_mapping_late(__pa(_stext), (unsigned long)_stext,
420 (unsigned long)_etext - (unsigned long)_stext,
421 PAGE_KERNEL_EXEC | PTE_RDONLY);
422
423}
424#endif
425
426void fixup_init(void)
427{
428 create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
429 (unsigned long)__init_end - (unsigned long)__init_begin,
430 PAGE_KERNEL);
431}
432
343/* 433/*
344 * paging_init() sets up the page tables, initialises the zone memory 434 * paging_init() sets up the page tables, initialises the zone memory
345 * maps and sets up the zero page. 435 * maps and sets up the zero page.
@@ -349,13 +439,7 @@ void __init paging_init(void)
349 void *zero_page; 439 void *zero_page;
350 440
351 map_mem(); 441 map_mem();
352 442 fixup_executable();
353 /*
354 * Finally flush the caches and tlb to ensure that we're in a
355 * consistent state.
356 */
357 flush_cache_all();
358 flush_tlb_all();
359 443
360 /* allocate the zero page. */ 444 /* allocate the zero page. */
361 zero_page = early_alloc(PAGE_SIZE); 445 zero_page = early_alloc(PAGE_SIZE);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 4e778b13291b..28eebfb6af76 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -102,7 +102,7 @@ ENTRY(cpu_do_idle)
102 ret 102 ret
103ENDPROC(cpu_do_idle) 103ENDPROC(cpu_do_idle)
104 104
105#ifdef CONFIG_ARM64_CPU_SUSPEND 105#ifdef CONFIG_CPU_PM
106/** 106/**
107 * cpu_do_suspend - save CPU registers context 107 * cpu_do_suspend - save CPU registers context
108 * 108 *
@@ -244,14 +244,18 @@ ENTRY(__cpu_setup)
244ENDPROC(__cpu_setup) 244ENDPROC(__cpu_setup)
245 245
246 /* 246 /*
247 * We set the desired value explicitly, including those of the
248 * reserved bits. The values of bits EE & E0E were set early in
249 * el2_setup, which are left untouched below.
250 *
247 * n n T 251 * n n T
248 * U E WT T UD US IHBS 252 * U E WT T UD US IHBS
249 * CE0 XWHW CZ ME TEEA S 253 * CE0 XWHW CZ ME TEEA S
250 * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM 254 * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
251 * 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved 255 * 0011 0... 1101 ..0. ..0. 10.. .0.. .... < hardware reserved
252 * .... .1.. .... 01.1 11.1 ..01 0001 1101 < software settings 256 * .... .1.. .... 01.1 11.1 ..01 0.01 1101 < software settings
253 */ 257 */
254 .type crval, #object 258 .type crval, #object
255crval: 259crval:
256 .word 0x000802e2 // clear 260 .word 0xfcffffff // clear
257 .word 0x0405d11d // set 261 .word 0x34d5d91d // set