aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Murzin <vladimir.murzin@arm.com>2018-04-03 05:39:23 -0400
committerRussell King <rmk+kernel@armlinux.org.uk>2018-05-19 06:53:46 -0400
commit046835b4aa22b9ab6aa0bb274e3b71047c4b887d (patch)
tree1f1e8dc8ed4c30f03ba51d930a6f79a923959937
parent3c24121039c9da14692eb48f6e39565b28c0f3cf (diff)
ARM: 8757/1: NOMMU: Support PMSAv8 MPU
ARMv8R/M architecture defines new memory protection scheme - PMSAv8 which is not compatible with PMSAv7. Key differences to PMSAv7 are: - Region geometry is defined by base and limit addresses - Addresses need to be either 32 or 64 byte aligned - No region priority due to overlapping regions are not allowed - It is unified, i.e. no distinction between data/instruction regions - Memory attributes are controlled via MAIR This patch implements support for PMSAv8 MPU defined by ARMv8R/M architecture. Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
-rw-r--r--arch/arm/include/asm/mpu.h52
-rw-r--r--arch/arm/include/asm/v7m.h8
-rw-r--r--arch/arm/kernel/asm-offsets.c2
-rw-r--r--arch/arm/kernel/head-nommu.S163
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S4
-rw-r--r--arch/arm/kernel/vmlinux.lds.S7
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/nommu.c6
-rw-r--r--arch/arm/mm/pmsa-v8.c307
9 files changed, 547 insertions, 4 deletions
diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h
index fbde275668c3..5e088c83d3d8 100644
--- a/arch/arm/include/asm/mpu.h
+++ b/arch/arm/include/asm/mpu.h
@@ -12,6 +12,7 @@
12/* ID_MMFR0 data relevant to MPU */ 12/* ID_MMFR0 data relevant to MPU */
13#define MMFR0_PMSA (0xF << 4) 13#define MMFR0_PMSA (0xF << 4)
14#define MMFR0_PMSAv7 (3 << 4) 14#define MMFR0_PMSAv7 (3 << 4)
15#define MMFR0_PMSAv8 (4 << 4)
15 16
16/* MPU D/I Size Register fields */ 17/* MPU D/I Size Register fields */
17#define PMSAv7_RSR_SZ 1 18#define PMSAv7_RSR_SZ 1
@@ -47,12 +48,43 @@
47#define PMSAv7_AP_PL1RW_PL0R0 (0x2 << 8) 48#define PMSAv7_AP_PL1RW_PL0R0 (0x2 << 8)
48#define PMSAv7_AP_PL1RW_PL0NA (0x1 << 8) 49#define PMSAv7_AP_PL1RW_PL0NA (0x1 << 8)
49 50
51#define PMSAv8_BAR_XN 1
52
53#define PMSAv8_LAR_EN 1
54#define PMSAv8_LAR_IDX(n) (((n) & 0x7) << 1)
55
56
57#define PMSAv8_AP_PL1RW_PL0NA (0 << 1)
58#define PMSAv8_AP_PL1RW_PL0RW (1 << 1)
59#define PMSAv8_AP_PL1RO_PL0RO (3 << 1)
60
61#ifdef CONFIG_SMP
62#define PMSAv8_RGN_SHARED (3 << 3) // inner sharable
63#else
64#define PMSAv8_RGN_SHARED (0 << 3)
65#endif
66
67#define PMSAv8_RGN_DEVICE_nGnRnE 0
68#define PMSAv8_RGN_NORMAL 1
69
70#define PMSAv8_MAIR(attr, mt) ((attr) << ((mt) * 8))
71
72#ifdef CONFIG_CPU_V7M
73#define PMSAv8_MINALIGN 32
74#else
75#define PMSAv8_MINALIGN 64
76#endif
77
50/* For minimal static MPU region configurations */ 78/* For minimal static MPU region configurations */
51#define PMSAv7_PROBE_REGION 0 79#define PMSAv7_PROBE_REGION 0
52#define PMSAv7_BG_REGION 1 80#define PMSAv7_BG_REGION 1
53#define PMSAv7_RAM_REGION 2 81#define PMSAv7_RAM_REGION 2
54#define PMSAv7_ROM_REGION 3 82#define PMSAv7_ROM_REGION 3
55 83
84/* Fixed for PMSAv8 only */
85#define PMSAv8_XIP_REGION 0
86#define PMSAv8_KERNEL_REGION 1
87
56/* Maximum number of regions Linux is interested in */ 88/* Maximum number of regions Linux is interested in */
57#define MPU_MAX_REGIONS 16 89#define MPU_MAX_REGIONS 16
58 90
@@ -63,9 +95,18 @@
63 95
64struct mpu_rgn { 96struct mpu_rgn {
65 /* Assume same attributes for d/i-side */ 97 /* Assume same attributes for d/i-side */
66 u32 drbar; 98 union {
67 u32 drsr; 99 u32 drbar; /* PMSAv7 */
68 u32 dracr; 100 u32 prbar; /* PMSAv8 */
101 };
102 union {
103 u32 drsr; /* PMSAv7 */
104 u32 prlar; /* PMSAv8 */
105 };
106 union {
107 u32 dracr; /* PMSAv7 */
108 u32 unused; /* not used in PMSAv8 */
109 };
69}; 110};
70 111
71struct mpu_rgn_info { 112struct mpu_rgn_info {
@@ -76,10 +117,15 @@ extern struct mpu_rgn_info mpu_rgn_info;
76 117
77#ifdef CONFIG_ARM_MPU 118#ifdef CONFIG_ARM_MPU
78extern void __init pmsav7_adjust_lowmem_bounds(void); 119extern void __init pmsav7_adjust_lowmem_bounds(void);
120extern void __init pmsav8_adjust_lowmem_bounds(void);
121
79extern void __init pmsav7_setup(void); 122extern void __init pmsav7_setup(void);
123extern void __init pmsav8_setup(void);
80#else 124#else
81static inline void pmsav7_adjust_lowmem_bounds(void) {}; 125static inline void pmsav7_adjust_lowmem_bounds(void) {};
126static inline void pmsav8_adjust_lowmem_bounds(void) {};
82static inline void pmsav7_setup(void) {}; 127static inline void pmsav7_setup(void) {};
128static inline void pmsav8_setup(void) {};
83#endif 129#endif
84 130
85#endif /* __ASSEMBLY__ */ 131#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
index aba49e0b3ebe..187ccf6496ad 100644
--- a/arch/arm/include/asm/v7m.h
+++ b/arch/arm/include/asm/v7m.h
@@ -68,6 +68,14 @@
68#define PMSAv7_RBAR 0x9c 68#define PMSAv7_RBAR 0x9c
69#define PMSAv7_RASR 0xa0 69#define PMSAv7_RASR 0xa0
70 70
71#define PMSAv8_RNR 0x98
72#define PMSAv8_RBAR 0x9c
73#define PMSAv8_RLAR 0xa0
74#define PMSAv8_RBAR_A(n) (PMSAv8_RBAR + 8*(n))
75#define PMSAv8_RLAR_A(n) (PMSAv8_RLAR + 8*(n))
76#define PMSAv8_MAIR0 0xc0
77#define PMSAv8_MAIR1 0xc4
78
71/* Cache opeartions */ 79/* Cache opeartions */
72#define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */ 80#define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */
73#define V7M_SCB_ICIMVAU 0x258 /* I-cache invalidate by MVA to PoU */ 81#define V7M_SCB_ICIMVAU 0x258 /* I-cache invalidate by MVA to PoU */
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 250a98544ca6..27c5381518d8 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -197,6 +197,8 @@ int main(void)
197 DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar)); 197 DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar));
198 DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr)); 198 DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr));
199 DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr)); 199 DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr));
200 DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar));
201 DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar));
200#endif 202#endif
201 return 0; 203 return 0;
202} 204}
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 2f0f1ba6e237..dd546d65a383 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -132,6 +132,25 @@ M_CLASS(ldr r3, [r12, 0x50])
132AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0 132AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0
133 and r3, r3, #(MMFR0_PMSA) @ PMSA field 133 and r3, r3, #(MMFR0_PMSA) @ PMSA field
134 teq r3, #(MMFR0_PMSAv7) @ PMSA v7 134 teq r3, #(MMFR0_PMSAv7) @ PMSA v7
135 beq 1f
136 teq r3, #(MMFR0_PMSAv8) @ PMSA v8
137 /*
138 * Memory region attributes for PMSAv8:
139 *
140 * n = AttrIndx[2:0]
141 * n MAIR
142 * DEVICE_nGnRnE 000 00000000
143 * NORMAL 001 11111111
144 */
145 ldreq r3, =PMSAv8_MAIR(0x00, PMSAv8_RGN_DEVICE_nGnRnE) | \
146 PMSAv8_MAIR(0xff, PMSAv8_RGN_NORMAL)
147AR_CLASS(mcreq p15, 0, r3, c10, c2, 0) @ MAIR 0
148M_CLASS(streq r3, [r12, #PMSAv8_MAIR0])
149 moveq r3, #0
150AR_CLASS(mcreq p15, 0, r3, c10, c2, 1) @ MAIR 1
151M_CLASS(streq r3, [r12, #PMSAv8_MAIR1])
152
1531:
135#endif 154#endif
136#ifdef CONFIG_CPU_CP15 155#ifdef CONFIG_CPU_CP15
137 /* 156 /*
@@ -235,6 +254,8 @@ M_CLASS(ldr r0, [r12, 0x50])
235 and r0, r0, #(MMFR0_PMSA) @ PMSA field 254 and r0, r0, #(MMFR0_PMSA) @ PMSA field
236 teq r0, #(MMFR0_PMSAv7) @ PMSA v7 255 teq r0, #(MMFR0_PMSAv7) @ PMSA v7
237 beq __setup_pmsa_v7 256 beq __setup_pmsa_v7
257 teq r0, #(MMFR0_PMSAv8) @ PMSA v8
258 beq __setup_pmsa_v8
238 259
239 ret lr 260 ret lr
240ENDPROC(__setup_mpu) 261ENDPROC(__setup_mpu)
@@ -304,6 +325,119 @@ M_CLASS(ldr r0, [r12, #MPU_TYPE])
304 ret lr 325 ret lr
305ENDPROC(__setup_pmsa_v7) 326ENDPROC(__setup_pmsa_v7)
306 327
328ENTRY(__setup_pmsa_v8)
329 mov r0, #0
330AR_CLASS(mcr p15, 0, r0, c6, c2, 1) @ PRSEL
331M_CLASS(str r0, [r12, #PMSAv8_RNR])
332 isb
333
334#ifdef CONFIG_XIP_KERNEL
335 ldr r5, =CONFIG_XIP_PHYS_ADDR @ ROM start
336 ldr r6, =(_exiprom) @ ROM end
337 sub r6, r6, #1
338 bic r6, r6, #(PMSAv8_MINALIGN - 1)
339
340 orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
341 orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
342
343AR_CLASS(mcr p15, 0, r5, c6, c8, 0) @ PRBAR0
344AR_CLASS(mcr p15, 0, r6, c6, c8, 1) @ PRLAR0
345M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(0)])
346M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(0)])
347#endif
348
349 ldr r5, =KERNEL_START
350 ldr r6, =KERNEL_END
351 sub r6, r6, #1
352 bic r6, r6, #(PMSAv8_MINALIGN - 1)
353
354 orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
355 orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
356
357AR_CLASS(mcr p15, 0, r5, c6, c8, 4) @ PRBAR1
358AR_CLASS(mcr p15, 0, r6, c6, c8, 5) @ PRLAR1
359M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(1)])
360M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(1)])
361
362 /* Setup Background: 0x0 - min(KERNEL_START, XIP_PHYS_ADDR) */
363#ifdef CONFIG_XIP_KERNEL
364 ldr r6, =KERNEL_START
365 ldr r5, =CONFIG_XIP_PHYS_ADDR
366 cmp r6, r5
367 movcs r6, r5
368#else
369 ldr r6, =KERNEL_START
370#endif
371 cmp r6, #0
372 beq 1f
373
374 mov r5, #0
375 sub r6, r6, #1
376 bic r6, r6, #(PMSAv8_MINALIGN - 1)
377
378 orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
379 orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
380
381AR_CLASS(mcr p15, 0, r5, c6, c9, 0) @ PRBAR2
382AR_CLASS(mcr p15, 0, r6, c6, c9, 1) @ PRLAR2
383M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(2)])
384M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(2)])
385
3861:
387 /* Setup Background: max(KERNEL_END, _exiprom) - 0xffffffff */
388#ifdef CONFIG_XIP_KERNEL
389 ldr r5, =KERNEL_END
390 ldr r6, =(_exiprom)
391 cmp r5, r6
392 movcc r5, r6
393#else
394 ldr r5, =KERNEL_END
395#endif
396 mov r6, #0xffffffff
397 bic r6, r6, #(PMSAv8_MINALIGN - 1)
398
399 orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
400 orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
401
402AR_CLASS(mcr p15, 0, r5, c6, c9, 4) @ PRBAR3
403AR_CLASS(mcr p15, 0, r6, c6, c9, 5) @ PRLAR3
404M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(3)])
405M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(3)])
406
407#ifdef CONFIG_XIP_KERNEL
408 /* Setup Background: min(_exiprom, KERNEL_END) - max(KERNEL_START, XIP_PHYS_ADDR) */
409 ldr r5, =(_exiprom)
410 ldr r6, =KERNEL_END
411 cmp r5, r6
412 movcs r5, r6
413
414 ldr r6, =KERNEL_START
415 ldr r0, =CONFIG_XIP_PHYS_ADDR
416 cmp r6, r0
417 movcc r6, r0
418
419 sub r6, r6, #1
420 bic r6, r6, #(PMSAv8_MINALIGN - 1)
421
422 orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
423 orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
424
425#ifdef CONFIG_CPU_V7M
426 /* There is no alias for n == 4 */
427 mov r0, #4
428 str r0, [r12, #PMSAv8_RNR] @ PRSEL
429 isb
430
431 str r5, [r12, #PMSAv8_RBAR_A(0)]
432 str r6, [r12, #PMSAv8_RLAR_A(0)]
433#else
434 mcr p15, 0, r5, c6, c10, 1 @ PRBAR4
435 mcr p15, 0, r6, c6, c10, 2 @ PRLAR4
436#endif
437#endif
438 ret lr
439ENDPROC(__setup_pmsa_v8)
440
307#ifdef CONFIG_SMP 441#ifdef CONFIG_SMP
308/* 442/*
309 * r6: pointer at mpu_rgn_info 443 * r6: pointer at mpu_rgn_info
@@ -319,6 +453,8 @@ ENTRY(__secondary_setup_mpu)
319 and r0, r0, #(MMFR0_PMSA) @ PMSA field 453 and r0, r0, #(MMFR0_PMSA) @ PMSA field
320 teq r0, #(MMFR0_PMSAv7) @ PMSA v7 454 teq r0, #(MMFR0_PMSAv7) @ PMSA v7
321 beq __secondary_setup_pmsa_v7 455 beq __secondary_setup_pmsa_v7
456 teq r0, #(MMFR0_PMSAv8) @ PMSA v8
457 beq __secondary_setup_pmsa_v8
322 b __error_p 458 b __error_p
323ENDPROC(__secondary_setup_mpu) 459ENDPROC(__secondary_setup_mpu)
324 460
@@ -361,6 +497,33 @@ ENTRY(__secondary_setup_pmsa_v7)
361 ret lr 497 ret lr
362ENDPROC(__secondary_setup_pmsa_v7) 498ENDPROC(__secondary_setup_pmsa_v7)
363 499
500ENTRY(__secondary_setup_pmsa_v8)
501 ldr r4, [r6, #MPU_RNG_INFO_USED]
502#ifndef CONFIG_XIP_KERNEL
503 add r4, r4, #1
504#endif
505 mov r5, #MPU_RNG_SIZE
506 add r3, r6, #MPU_RNG_INFO_RNGS
507 mla r3, r4, r5, r3
508
5091:
510 sub r3, r3, #MPU_RNG_SIZE
511 sub r4, r4, #1
512
513 mcr p15, 0, r4, c6, c2, 1 @ PRSEL
514 isb
515
516 ldr r5, [r3, #MPU_RGN_PRBAR]
517 ldr r6, [r3, #MPU_RGN_PRLAR]
518
519 mcr p15, 0, r5, c6, c3, 0 @ PRBAR
520 mcr p15, 0, r6, c6, c3, 1 @ PRLAR
521
522 cmp r4, #0
523 bgt 1b
524
525 ret lr
526ENDPROC(__secondary_setup_pmsa_v8)
364#endif /* CONFIG_SMP */ 527#endif /* CONFIG_SMP */
365#endif /* CONFIG_ARM_MPU */ 528#endif /* CONFIG_ARM_MPU */
366#include "head-common.S" 529#include "head-common.S"
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index d32f5d35f602..3593d5c1acd2 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -13,6 +13,7 @@
13#include <asm/cache.h> 13#include <asm/cache.h>
14#include <asm/thread_info.h> 14#include <asm/thread_info.h>
15#include <asm/memory.h> 15#include <asm/memory.h>
16#include <asm/mpu.h>
16#include <asm/page.h> 17#include <asm/page.h>
17 18
18#include "vmlinux.lds.h" 19#include "vmlinux.lds.h"
@@ -148,6 +149,9 @@ SECTIONS
148 __init_end = .; 149 __init_end = .;
149 150
150 BSS_SECTION(0, 0, 8) 151 BSS_SECTION(0, 0, 8)
152#ifdef CONFIG_ARM_MPU
153 . = ALIGN(PMSAv8_MINALIGN);
154#endif
151 _end = .; 155 _end = .;
152 156
153 STABS_DEBUG 157 STABS_DEBUG
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index b77dc675ae55..23150c0f0f4d 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -12,6 +12,7 @@
12#include <asm/cache.h> 12#include <asm/cache.h>
13#include <asm/thread_info.h> 13#include <asm/thread_info.h>
14#include <asm/memory.h> 14#include <asm/memory.h>
15#include <asm/mpu.h>
15#include <asm/page.h> 16#include <asm/page.h>
16#include <asm/pgtable.h> 17#include <asm/pgtable.h>
17 18
@@ -54,6 +55,9 @@ SECTIONS
54 . = ALIGN(1<<SECTION_SHIFT); 55 . = ALIGN(1<<SECTION_SHIFT);
55#endif 56#endif
56 57
58#ifdef CONFIG_ARM_MPU
59 . = ALIGN(PMSAv8_MINALIGN);
60#endif
57 .text : { /* Real text segment */ 61 .text : { /* Real text segment */
58 _stext = .; /* Text and read-only data */ 62 _stext = .; /* Text and read-only data */
59 ARM_TEXT 63 ARM_TEXT
@@ -143,6 +147,9 @@ SECTIONS
143 _edata = .; 147 _edata = .;
144 148
145 BSS_SECTION(0, 0, 0) 149 BSS_SECTION(0, 0, 0)
150#ifdef CONFIG_ARM_MPU
151 . = ALIGN(PMSAv8_MINALIGN);
152#endif
146 _end = .; 153 _end = .;
147 154
148 STABS_DEBUG 155 STABS_DEBUG
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 9dbb84923e12..d19b209e04e0 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
10 10
11ifneq ($(CONFIG_MMU),y) 11ifneq ($(CONFIG_MMU),y)
12obj-y += nommu.o 12obj-y += nommu.o
13obj-$(CONFIG_ARM_MPU) += pmsa-v7.o 13obj-$(CONFIG_ARM_MPU) += pmsa-v7.o pmsa-v8.o
14endif 14endif
15 15
16obj-$(CONFIG_ARM_PTDUMP_CORE) += dump.o 16obj-$(CONFIG_ARM_PTDUMP_CORE) += dump.o
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index edbaa47be160..5dd6c58d653b 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -107,6 +107,9 @@ static void __init adjust_lowmem_bounds_mpu(void)
107 case MMFR0_PMSAv7: 107 case MMFR0_PMSAv7:
108 pmsav7_adjust_lowmem_bounds(); 108 pmsav7_adjust_lowmem_bounds();
109 break; 109 break;
110 case MMFR0_PMSAv8:
111 pmsav8_adjust_lowmem_bounds();
112 break;
110 default: 113 default:
111 break; 114 break;
112 } 115 }
@@ -120,6 +123,9 @@ static void __init mpu_setup(void)
120 case MMFR0_PMSAv7: 123 case MMFR0_PMSAv7:
121 pmsav7_setup(); 124 pmsav7_setup();
122 break; 125 break;
126 case MMFR0_PMSAv8:
127 pmsav8_setup();
128 break;
123 default: 129 default:
124 break; 130 break;
125 } 131 }
diff --git a/arch/arm/mm/pmsa-v8.c b/arch/arm/mm/pmsa-v8.c
new file mode 100644
index 000000000000..617a83def88a
--- /dev/null
+++ b/arch/arm/mm/pmsa-v8.c
@@ -0,0 +1,307 @@
1/*
2 * Based on linux/arch/arm/pmsa-v7.c
3 *
4 * ARM PMSAv8 supporting functions.
5 */
6
7#include <linux/memblock.h>
8#include <linux/range.h>
9
10#include <asm/cp15.h>
11#include <asm/cputype.h>
12#include <asm/mpu.h>
13
14#include <asm/memory.h>
15#include <asm/sections.h>
16
17#include "mm.h"
18
19#ifndef CONFIG_CPU_V7M
20
21#define PRSEL __ACCESS_CP15(c6, 0, c2, 1)
22#define PRBAR __ACCESS_CP15(c6, 0, c3, 0)
23#define PRLAR __ACCESS_CP15(c6, 0, c3, 1)
24
25static inline u32 prlar_read(void)
26{
27 return read_sysreg(PRLAR);
28}
29
30static inline u32 prbar_read(void)
31{
32 return read_sysreg(PRBAR);
33}
34
35static inline void prsel_write(u32 v)
36{
37 write_sysreg(v, PRSEL);
38}
39
40static inline void prbar_write(u32 v)
41{
42 write_sysreg(v, PRBAR);
43}
44
45static inline void prlar_write(u32 v)
46{
47 write_sysreg(v, PRLAR);
48}
49#else
50
51static inline u32 prlar_read(void)
52{
53 return readl_relaxed(BASEADDR_V7M_SCB + PMSAv8_RLAR);
54}
55
56static inline u32 prbar_read(void)
57{
58 return readl_relaxed(BASEADDR_V7M_SCB + PMSAv8_RBAR);
59}
60
61static inline void prsel_write(u32 v)
62{
63 writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RNR);
64}
65
66static inline void prbar_write(u32 v)
67{
68 writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RBAR);
69}
70
71static inline void prlar_write(u32 v)
72{
73 writel_relaxed(v, BASEADDR_V7M_SCB + PMSAv8_RLAR);
74}
75
76#endif
77
78static struct range __initdata io[MPU_MAX_REGIONS];
79static struct range __initdata mem[MPU_MAX_REGIONS];
80
81static unsigned int __initdata mpu_max_regions;
82
83static __init bool is_region_fixed(int number)
84{
85 switch (number) {
86 case PMSAv8_XIP_REGION:
87 case PMSAv8_KERNEL_REGION:
88 return true;
89 default:
90 return false;
91 }
92}
93
94void __init pmsav8_adjust_lowmem_bounds(void)
95{
96 phys_addr_t mem_end;
97 struct memblock_region *reg;
98 bool first = true;
99
100 for_each_memblock(memory, reg) {
101 if (first) {
102 phys_addr_t phys_offset = PHYS_OFFSET;
103
104 /*
105 * Initially only use memory continuous from
106 * PHYS_OFFSET */
107 if (reg->base != phys_offset)
108 panic("First memory bank must be contiguous from PHYS_OFFSET");
109 mem_end = reg->base + reg->size;
110 first = false;
111 } else {
112 /*
113 * memblock auto merges contiguous blocks, remove
114 * all blocks afterwards in one go (we can't remove
115 * blocks separately while iterating)
116 */
117 pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
118 &mem_end, &reg->base);
119 memblock_remove(reg->base, 0 - reg->base);
120 break;
121 }
122 }
123}
124
125static int __init __mpu_max_regions(void)
126{
127 static int max_regions;
128 u32 mpuir;
129
130 if (max_regions)
131 return max_regions;
132
133 mpuir = read_cpuid_mputype();
134
135 max_regions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
136
137 return max_regions;
138}
139
140static int __init __pmsav8_setup_region(unsigned int number, u32 bar, u32 lar)
141{
142 if (number > mpu_max_regions
143 || number >= MPU_MAX_REGIONS)
144 return -ENOENT;
145
146 dsb();
147 prsel_write(number);
148 isb();
149 prbar_write(bar);
150 prlar_write(lar);
151
152 mpu_rgn_info.rgns[number].prbar = bar;
153 mpu_rgn_info.rgns[number].prlar = lar;
154
155 mpu_rgn_info.used++;
156
157 return 0;
158}
159
160static int __init pmsav8_setup_ram(unsigned int number, phys_addr_t start,phys_addr_t end)
161{
162 u32 bar, lar;
163
164 if (is_region_fixed(number))
165 return -EINVAL;
166
167 bar = start;
168 lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);;
169
170 bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED;
171 lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
172
173 return __pmsav8_setup_region(number, bar, lar);
174}
175
176static int __init pmsav8_setup_io(unsigned int number, phys_addr_t start,phys_addr_t end)
177{
178 u32 bar, lar;
179
180 if (is_region_fixed(number))
181 return -EINVAL;
182
183 bar = start;
184 lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);;
185
186 bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN;
187 lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN;
188
189 return __pmsav8_setup_region(number, bar, lar);
190}
191
192static int __init pmsav8_setup_fixed(unsigned int number, phys_addr_t start,phys_addr_t end)
193{
194 u32 bar, lar;
195
196 if (!is_region_fixed(number))
197 return -EINVAL;
198
199 bar = start;
200 lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
201
202 bar |= PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED;
203 lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
204
205 prsel_write(number);
206 isb();
207
208 if (prbar_read() != bar || prlar_read() != lar)
209 return -EINVAL;
210
211 /* Reserved region was set up early, we just need a record for secondaries */
212 mpu_rgn_info.rgns[number].prbar = bar;
213 mpu_rgn_info.rgns[number].prlar = lar;
214
215 mpu_rgn_info.used++;
216
217 return 0;
218}
219
220#ifndef CONFIG_CPU_V7M
221static int __init pmsav8_setup_vector(unsigned int number, phys_addr_t start,phys_addr_t end)
222{
223 u32 bar, lar;
224
225 if (number == PMSAv8_KERNEL_REGION)
226 return -EINVAL;
227
228 bar = start;
229 lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
230
231 bar |= PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED;
232 lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
233
234 return __pmsav8_setup_region(number, bar, lar);
235}
236#endif
237
238void __init pmsav8_setup(void)
239{
240 int i, err = 0;
241 int region = PMSAv8_KERNEL_REGION;
242
243 /* How many regions are supported ? */
244 mpu_max_regions = __mpu_max_regions();
245
246 /* RAM: single chunk of memory */
247 add_range(mem, ARRAY_SIZE(mem), 0, memblock.memory.regions[0].base,
248 memblock.memory.regions[0].base + memblock.memory.regions[0].size);
249
250 /* IO: cover full 4G range */
251 add_range(io, ARRAY_SIZE(io), 0, 0, 0xffffffff);
252
253 /* RAM and IO: exclude kernel */
254 subtract_range(mem, ARRAY_SIZE(mem), __pa(KERNEL_START), __pa(KERNEL_END));
255 subtract_range(io, ARRAY_SIZE(io), __pa(KERNEL_START), __pa(KERNEL_END));
256
257#ifdef CONFIG_XIP_KERNEL
258 /* RAM and IO: exclude xip */
259 subtract_range(mem, ARRAY_SIZE(mem), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
260 subtract_range(io, ARRAY_SIZE(io), CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
261#endif
262
263#ifndef CONFIG_CPU_V7M
264 /* RAM and IO: exclude vectors */
265 subtract_range(mem, ARRAY_SIZE(mem), vectors_base, vectors_base + 2 * PAGE_SIZE);
266 subtract_range(io, ARRAY_SIZE(io), vectors_base, vectors_base + 2 * PAGE_SIZE);
267#endif
268 /* IO: exclude RAM */
269 for (i = 0; i < ARRAY_SIZE(mem); i++)
270 subtract_range(io, ARRAY_SIZE(io), mem[i].start, mem[i].end);
271
272 /* Now program MPU */
273
274#ifdef CONFIG_XIP_KERNEL
275 /* ROM */
276 err |= pmsav8_setup_fixed(PMSAv8_XIP_REGION, CONFIG_XIP_PHYS_ADDR, __pa(_exiprom));
277#endif
278 /* Kernel */
279 err |= pmsav8_setup_fixed(region++, __pa(KERNEL_START), __pa(KERNEL_END));
280
281
282 /* IO */
283 for (i = 0; i < ARRAY_SIZE(io); i++) {
284 if (!io[i].end)
285 continue;
286
287 err |= pmsav8_setup_io(region++, io[i].start, io[i].end);
288 }
289
290 /* RAM */
291 for (i = 0; i < ARRAY_SIZE(mem); i++) {
292 if (!mem[i].end)
293 continue;
294
295 err |= pmsav8_setup_ram(region++, mem[i].start, mem[i].end);
296 }
297
298 /* Vectors */
299#ifndef CONFIG_CPU_V7M
300 err |= pmsav8_setup_vector(region++, vectors_base, vectors_base + 2 * PAGE_SIZE);
301#endif
302 if (err)
303 pr_warn("MPU region initialization failure! %d", err);
304 else
305 pr_info("Using ARM PMSAv8 Compliant MPU. Used %d of %d regions\n",
306 mpu_rgn_info.used, mpu_max_regions);
307}