aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/Kconfig-nommu2
-rw-r--r--arch/arm/include/asm/mpu.h3
-rw-r--r--arch/arm/kernel/head-nommu.S20
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S23
-rw-r--r--arch/arm/mm/pmsa-v7.c49
5 files changed, 90 insertions, 7 deletions
diff --git a/arch/arm/Kconfig-nommu b/arch/arm/Kconfig-nommu
index 930e000489a8..0fad7d943630 100644
--- a/arch/arm/Kconfig-nommu
+++ b/arch/arm/Kconfig-nommu
@@ -52,7 +52,7 @@ config REMAP_VECTORS_TO_RAM
52 52
53config ARM_MPU 53config ARM_MPU
54 bool 'Use the ARM v7 PMSA Compliant MPU' 54 bool 'Use the ARM v7 PMSA Compliant MPU'
55 depends on !XIP_KERNEL && (CPU_V7 || CPU_V7M) 55 depends on CPU_V7 || CPU_V7M
56 default y if CPU_V7 56 default y if CPU_V7
57 help 57 help
58 Some ARM systems without an MMU have instead a Memory Protection 58 Some ARM systems without an MMU have instead a Memory Protection
diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h
index 5db37a6ef3cb..56ec02617f58 100644
--- a/arch/arm/include/asm/mpu.h
+++ b/arch/arm/include/asm/mpu.h
@@ -41,6 +41,7 @@
41#endif 41#endif
42 42
43/* Access permission bits of ACR (only define those that we use)*/ 43/* Access permission bits of ACR (only define those that we use)*/
44#define MPU_AP_PL1RO_PL0NA (0x5 << 8)
44#define MPU_AP_PL1RW_PL0RW (0x3 << 8) 45#define MPU_AP_PL1RW_PL0RW (0x3 << 8)
45#define MPU_AP_PL1RW_PL0R0 (0x2 << 8) 46#define MPU_AP_PL1RW_PL0R0 (0x2 << 8)
46#define MPU_AP_PL1RW_PL0NA (0x1 << 8) 47#define MPU_AP_PL1RW_PL0NA (0x1 << 8)
@@ -49,7 +50,7 @@
49#define MPU_PROBE_REGION 0 50#define MPU_PROBE_REGION 0
50#define MPU_BG_REGION 1 51#define MPU_BG_REGION 1
51#define MPU_RAM_REGION 2 52#define MPU_RAM_REGION 2
52#define MPU_VECTORS_REGION 3 53#define MPU_ROM_REGION 3
53 54
54/* Maximum number of regions Linux is interested in */ 55/* Maximum number of regions Linux is interested in */
55#define MPU_MAX_REGIONS 16 56#define MPU_MAX_REGIONS 16
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 0d64b8ba7e9c..2e38f85b757a 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -258,6 +258,26 @@ M_CLASS(ldr r0, [r12, #MPU_TYPE])
258 setup_region r0, r5, r6, MPU_INSTR_SIDE r12 @ 0x0, BG region, enabled 258 setup_region r0, r5, r6, MPU_INSTR_SIDE r12 @ 0x0, BG region, enabled
2592: isb 2592: isb
260 260
261#ifdef CONFIG_XIP_KERNEL
262 set_region_nr r0, #MPU_ROM_REGION, r12
263 isb
264
265 ldr r5,=(MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL)
266
267 ldr r0, =CONFIG_XIP_PHYS_ADDR @ ROM start
268 ldr r6, =(_exiprom) @ ROM end
269 sub r6, r6, r0 @ Minimum size of region to map
270 clz r6, r6 @ Region size must be 2^N...
271 rsb r6, r6, #31 @ ...so round up region size
272 lsl r6, r6, #MPU_RSR_SZ @ Put size in right field
273 orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit
274
275 setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
276 beq 3f @ Memory-map not unified
277 setup_region r0, r5, r6, MPU_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
2783: isb
279#endif
280
261 /* Enable the MPU */ 281 /* Enable the MPU */
262AR_CLASS(mrc p15, 0, r0, c1, c0, 0) @ Read SCTLR 282AR_CLASS(mrc p15, 0, r0, c1, c0, 0) @ Read SCTLR
263AR_CLASS(bic r0, r0, #CR_BR) @ Disable the 'default mem-map' 283AR_CLASS(bic r0, r0, #CR_BR) @ Disable the 'default mem-map'
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index 7a844310085e..74c93879532a 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -6,6 +6,8 @@
6/* No __ro_after_init data in the .rodata section - which will always be ro */ 6/* No __ro_after_init data in the .rodata section - which will always be ro */
7#define RO_AFTER_INIT_DATA 7#define RO_AFTER_INIT_DATA
8 8
9#include <linux/sizes.h>
10
9#include <asm-generic/vmlinux.lds.h> 11#include <asm-generic/vmlinux.lds.h>
10#include <asm/cache.h> 12#include <asm/cache.h>
11#include <asm/thread_info.h> 13#include <asm/thread_info.h>
@@ -187,6 +189,9 @@ SECTIONS
187 INIT_RAM_FS 189 INIT_RAM_FS
188 } 190 }
189 191
192#ifdef CONFIG_ARM_MPU
193 . = ALIGN(SZ_128K);
194#endif
190 _exiprom = .; /* End of XIP ROM area */ 195 _exiprom = .; /* End of XIP ROM area */
191 196
192/* 197/*
@@ -314,3 +319,21 @@ ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
314 */ 319 */
315ASSERT((_end - __bss_start) >= 12288, ".bss too small for CONFIG_XIP_DEFLATED_DATA") 320ASSERT((_end - __bss_start) >= 12288, ".bss too small for CONFIG_XIP_DEFLATED_DATA")
316#endif 321#endif
322
323#ifdef CONFIG_ARM_MPU
324/*
325 * Due to PMSAv7 restriction on base address and size we have to
326 * enforce minimal alignment restrictions. It was seen that weaker
327 * alignment restriction on _xiprom will likely force XIP address
328 * space spawns multiple MPU regions thus it is likely we run in
329 * situation when we are reprogramming MPU region we run on with
330 * something which doesn't cover reprogramming code itself, so as soon
331 * as we update MPU settings we'd immediately try to execute straight
332 * from background region which is XN.
333 * It seem that alignment in 1M should suit most users.
334 * _exiprom is aligned as 1/8 of 1M so can be covered by subregion
335 * disable
336 */
337ASSERT(!(_xiprom & (SZ_1M - 1)), "XIP start address may cause MPU programming issues")
338ASSERT(!(_exiprom & (SZ_128K - 1)), "XIP end address may cause MPU programming issues")
339#endif
diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c
index ef204634a16e..106ae1c435a3 100644
--- a/arch/arm/mm/pmsa-v7.c
+++ b/arch/arm/mm/pmsa-v7.c
@@ -7,9 +7,11 @@
7#include <linux/bitops.h> 7#include <linux/bitops.h>
8#include <linux/memblock.h> 8#include <linux/memblock.h>
9 9
10#include <asm/cacheflush.h>
10#include <asm/cp15.h> 11#include <asm/cp15.h>
11#include <asm/cputype.h> 12#include <asm/cputype.h>
12#include <asm/mpu.h> 13#include <asm/mpu.h>
14#include <asm/sections.h>
13 15
14#include "mm.h" 16#include "mm.h"
15 17
@@ -20,6 +22,9 @@ struct region {
20}; 22};
21 23
22static struct region __initdata mem[MPU_MAX_REGIONS]; 24static struct region __initdata mem[MPU_MAX_REGIONS];
25#ifdef CONFIG_XIP_KERNEL
26static struct region __initdata xip[MPU_MAX_REGIONS];
27#endif
23 28
24static unsigned int __initdata mpu_min_region_order; 29static unsigned int __initdata mpu_min_region_order;
25static unsigned int __initdata mpu_max_regions; 30static unsigned int __initdata mpu_max_regions;
@@ -229,7 +234,6 @@ static int __init allocate_region(phys_addr_t base, phys_addr_t size,
229/* MPU initialisation functions */ 234/* MPU initialisation functions */
230void __init adjust_lowmem_bounds_mpu(void) 235void __init adjust_lowmem_bounds_mpu(void)
231{ 236{
232 phys_addr_t phys_offset = PHYS_OFFSET;
233 phys_addr_t specified_mem_size, total_mem_size = 0; 237 phys_addr_t specified_mem_size, total_mem_size = 0;
234 struct memblock_region *reg; 238 struct memblock_region *reg;
235 bool first = true; 239 bool first = true;
@@ -256,8 +260,19 @@ void __init adjust_lowmem_bounds_mpu(void)
256 /* ... and one for vectors */ 260 /* ... and one for vectors */
257 mem_max_regions--; 261 mem_max_regions--;
258#endif 262#endif
263
264#ifdef CONFIG_XIP_KERNEL
265 /* plus some regions to cover XIP ROM */
266 num = allocate_region(CONFIG_XIP_PHYS_ADDR, __pa(_exiprom) - CONFIG_XIP_PHYS_ADDR,
267 mem_max_regions, xip);
268
269 mem_max_regions -= num;
270#endif
271
259 for_each_memblock(memory, reg) { 272 for_each_memblock(memory, reg) {
260 if (first) { 273 if (first) {
274 phys_addr_t phys_offset = PHYS_OFFSET;
275
261 /* 276 /*
262 * Initially only use memory continuous from 277 * Initially only use memory continuous from
263 * PHYS_OFFSET */ 278 * PHYS_OFFSET */
@@ -355,7 +370,7 @@ static int __init __mpu_min_region_order(void)
355 370
356static int __init mpu_setup_region(unsigned int number, phys_addr_t start, 371static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
357 unsigned int size_order, unsigned int properties, 372 unsigned int size_order, unsigned int properties,
358 unsigned int subregions) 373 unsigned int subregions, bool need_flush)
359{ 374{
360 u32 size_data; 375 u32 size_data;
361 376
@@ -374,6 +389,9 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
374 size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN; 389 size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN;
375 size_data |= subregions << MPU_RSR_SD; 390 size_data |= subregions << MPU_RSR_SD;
376 391
392 if (need_flush)
393 flush_cache_all();
394
377 dsb(); /* Ensure all previous data accesses occur with old mappings */ 395 dsb(); /* Ensure all previous data accesses occur with old mappings */
378 rgnr_write(number); 396 rgnr_write(number);
379 isb(); 397 isb();
@@ -416,7 +434,28 @@ void __init mpu_setup(void)
416 /* Background */ 434 /* Background */
417 err |= mpu_setup_region(region++, 0, 32, 435 err |= mpu_setup_region(region++, 0, 32,
418 MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA, 436 MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA,
419 0); 437 0, false);
438
439#ifdef CONFIG_XIP_KERNEL
440 /* ROM */
441 for (i = 0; i < ARRAY_SIZE(xip); i++) {
442 /*
443 * In case we overwrite RAM region we set earlier in
444 * head-nommu.S (which is cachable) all subsequent
445 * data access till we setup RAM bellow would be done
446 * with BG region (which is uncachable), thus we need
447 * to clean and invalidate cache.
448 */
449 bool need_flush = region == MPU_RAM_REGION;
450
451 if (!xip[i].size)
452 continue;
453
454 err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size),
455 MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL,
456 xip[i].subreg, need_flush);
457 }
458#endif
420 459
421 /* RAM */ 460 /* RAM */
422 for (i = 0; i < ARRAY_SIZE(mem); i++) { 461 for (i = 0; i < ARRAY_SIZE(mem); i++) {
@@ -425,14 +464,14 @@ void __init mpu_setup(void)
425 464
426 err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size), 465 err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size),
427 MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL, 466 MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL,
428 mem[i].subreg); 467 mem[i].subreg, false);
429 } 468 }
430 469
431 /* Vectors */ 470 /* Vectors */
432#ifndef CONFIG_CPU_V7M 471#ifndef CONFIG_CPU_V7M
433 err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE), 472 err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE),
434 MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL, 473 MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL,
435 0); 474 0, false);
436#endif 475#endif
437 if (err) { 476 if (err) {
438 panic("MPU region initialization failure! %d", err); 477 panic("MPU region initialization failure! %d", err);