aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-08 15:06:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-08 15:06:03 -0400
commite4f30545a23b6e22fb347a1ad034ce1aa280209c (patch)
tree415db60ace3ab89fe150bca797c690ddaf021432 /arch/arm64
parentd586c86d50cefa0897a51a2dbc714060ccedae76 (diff)
parentebf81a938dade3b450eb11c57fa744cfac4b523f (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull second set of arm64 updates from Catalin Marinas: "A second pull request for this merging window, mainly with fixes and docs clarification: - Documentation clarification on CPU topology and booting requirements - Additional cache flushing during boot (needed in the presence of external caches or under virtualisation) - DMA range invalidation fix for non cache line aligned buffers - Build failure fix with !COMPAT - Kconfig update for STRICT_DEVMEM" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: Fix DMA range invalidation for cache line unaligned buffers arm64: Add missing Kconfig for CONFIG_STRICT_DEVMEM arm64: fix !CONFIG_COMPAT build failures Revert "arm64: virt: ensure visibility of __boot_cpu_mode" arm64: Relax the kernel cache requirements for boot arm64: Update the TCR_EL1 translation granule definitions for 16K pages ARM: topology: Make it clear that all CPUs need to be described
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig.debug14
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h6
-rw-r--r--arch/arm64/include/asm/virt.h13
-rw-r--r--arch/arm64/kernel/head.S30
-rw-r--r--arch/arm64/kernel/perf_event.c4
-rw-r--r--arch/arm64/kernel/perf_regs.c2
-rw-r--r--arch/arm64/mm/cache.S24
-rw-r--r--arch/arm64/mm/proc.S25
8 files changed, 87 insertions, 31 deletions
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 835c559786bd..d10ec334c93b 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -6,6 +6,20 @@ config FRAME_POINTER
6 bool 6 bool
7 default y 7 default y
8 8
9config STRICT_DEVMEM
10 bool "Filter access to /dev/mem"
11 depends on MMU
12 help
13 If this option is disabled, you allow userspace (root) access to all
14 of memory, including kernel and userspace memory. Accidental
15 access to this is obviously disastrous, but specific access can
16 be used by people debugging the kernel.
17
18 If this option is switched on, the /dev/mem file only allows
19 userspace access to memory mapped peripherals.
20
21 If in doubt, say Y.
22
9config EARLY_PRINTK 23config EARLY_PRINTK
10 bool "Early printk support" 24 bool "Early printk support"
11 default y 25 default y
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index f7af66b54cb2..5fc8a66c3924 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -120,8 +120,12 @@
120#define TCR_ORGN_WBnWA ((UL(3) << 10) | (UL(3) << 26)) 120#define TCR_ORGN_WBnWA ((UL(3) << 10) | (UL(3) << 26))
121#define TCR_ORGN_MASK ((UL(3) << 10) | (UL(3) << 26)) 121#define TCR_ORGN_MASK ((UL(3) << 10) | (UL(3) << 26))
122#define TCR_SHARED ((UL(3) << 12) | (UL(3) << 28)) 122#define TCR_SHARED ((UL(3) << 12) | (UL(3) << 28))
123#define TCR_TG0_4K (UL(0) << 14)
123#define TCR_TG0_64K (UL(1) << 14) 124#define TCR_TG0_64K (UL(1) << 14)
124#define TCR_TG1_64K (UL(1) << 30) 125#define TCR_TG0_16K (UL(2) << 14)
126#define TCR_TG1_16K (UL(1) << 30)
127#define TCR_TG1_4K (UL(2) << 30)
128#define TCR_TG1_64K (UL(3) << 30)
125#define TCR_ASID16 (UL(1) << 36) 129#define TCR_ASID16 (UL(1) << 36)
126#define TCR_TBI0 (UL(1) << 37) 130#define TCR_TBI0 (UL(1) << 37)
127 131
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 130e2be952cf..215ad4649dd7 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -22,7 +22,6 @@
22#define BOOT_CPU_MODE_EL2 (0xe12) 22#define BOOT_CPU_MODE_EL2 (0xe12)
23 23
24#ifndef __ASSEMBLY__ 24#ifndef __ASSEMBLY__
25#include <asm/cacheflush.h>
26 25
27/* 26/*
28 * __boot_cpu_mode records what mode CPUs were booted in. 27 * __boot_cpu_mode records what mode CPUs were booted in.
@@ -38,20 +37,9 @@ extern u32 __boot_cpu_mode[2];
38void __hyp_set_vectors(phys_addr_t phys_vector_base); 37void __hyp_set_vectors(phys_addr_t phys_vector_base);
39phys_addr_t __hyp_get_vectors(void); 38phys_addr_t __hyp_get_vectors(void);
40 39
41static inline void sync_boot_mode(void)
42{
43 /*
44 * As secondaries write to __boot_cpu_mode with caches disabled, we
45 * must flush the corresponding cache entries to ensure the visibility
46 * of their writes.
47 */
48 __flush_dcache_area(__boot_cpu_mode, sizeof(__boot_cpu_mode));
49}
50
51/* Reports the availability of HYP mode */ 40/* Reports the availability of HYP mode */
52static inline bool is_hyp_mode_available(void) 41static inline bool is_hyp_mode_available(void)
53{ 42{
54 sync_boot_mode();
55 return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 && 43 return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 &&
56 __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2); 44 __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2);
57} 45}
@@ -59,7 +47,6 @@ static inline bool is_hyp_mode_available(void)
59/* Check if the bootloader has booted CPUs in different modes */ 47/* Check if the bootloader has booted CPUs in different modes */
60static inline bool is_hyp_mode_mismatched(void) 48static inline bool is_hyp_mode_mismatched(void)
61{ 49{
62 sync_boot_mode();
63 return __boot_cpu_mode[0] != __boot_cpu_mode[1]; 50 return __boot_cpu_mode[0] != __boot_cpu_mode[1];
64} 51}
65 52
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 1fe5d8d2bdfd..0fd565000772 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -26,6 +26,7 @@
26#include <asm/assembler.h> 26#include <asm/assembler.h>
27#include <asm/ptrace.h> 27#include <asm/ptrace.h>
28#include <asm/asm-offsets.h> 28#include <asm/asm-offsets.h>
29#include <asm/cache.h>
29#include <asm/cputype.h> 30#include <asm/cputype.h>
30#include <asm/memory.h> 31#include <asm/memory.h>
31#include <asm/thread_info.h> 32#include <asm/thread_info.h>
@@ -229,7 +230,11 @@ ENTRY(set_cpu_boot_mode_flag)
229 cmp w20, #BOOT_CPU_MODE_EL2 230 cmp w20, #BOOT_CPU_MODE_EL2
230 b.ne 1f 231 b.ne 1f
231 add x1, x1, #4 232 add x1, x1, #4
2321: str w20, [x1] // This CPU has booted in EL1 2331: dc cvac, x1 // Clean potentially dirty cache line
234 dsb sy
235 str w20, [x1] // This CPU has booted in EL1
236 dc civac, x1 // Clean&invalidate potentially stale cache line
237 dsb sy
233 ret 238 ret
234ENDPROC(set_cpu_boot_mode_flag) 239ENDPROC(set_cpu_boot_mode_flag)
235 240
@@ -240,8 +245,9 @@ ENDPROC(set_cpu_boot_mode_flag)
240 * This is not in .bss, because we set it sufficiently early that the boot-time 245 * This is not in .bss, because we set it sufficiently early that the boot-time
241 * zeroing of .bss would clobber it. 246 * zeroing of .bss would clobber it.
242 */ 247 */
243 .pushsection .data 248 .pushsection .data..cacheline_aligned
244ENTRY(__boot_cpu_mode) 249ENTRY(__boot_cpu_mode)
250 .align L1_CACHE_SHIFT
245 .long BOOT_CPU_MODE_EL2 251 .long BOOT_CPU_MODE_EL2
246 .long 0 252 .long 0
247 .popsection 253 .popsection
@@ -408,6 +414,15 @@ ENDPROC(__calc_phys_offset)
408 */ 414 */
409__create_page_tables: 415__create_page_tables:
410 pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses 416 pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
417 mov x27, lr
418
419 /*
420 * Invalidate the idmap and swapper page tables to avoid potential
421 * dirty cache lines being evicted.
422 */
423 mov x0, x25
424 add x1, x26, #SWAPPER_DIR_SIZE
425 bl __inval_cache_range
411 426
412 /* 427 /*
413 * Clear the idmap and swapper page tables. 428 * Clear the idmap and swapper page tables.
@@ -467,6 +482,17 @@ __create_page_tables:
467 ldr x5, =FIXADDR_TOP // Fixed mapping virtual address 482 ldr x5, =FIXADDR_TOP // Fixed mapping virtual address
468 add x0, x26, #2 * PAGE_SIZE // section table address 483 add x0, x26, #2 * PAGE_SIZE // section table address
469 create_pgd_entry x26, x0, x5, x6, x7 484 create_pgd_entry x26, x0, x5, x6, x7
485
486 /*
487 * Since the page tables have been populated with non-cacheable
488 * accesses (MMU disabled), invalidate the idmap and swapper page
489 * tables again to remove any speculatively loaded cache lines.
490 */
491 mov x0, x25
492 add x1, x26, #SWAPPER_DIR_SIZE
493 bl __inval_cache_range
494
495 mov lr, x27
470 ret 496 ret
471ENDPROC(__create_page_tables) 497ENDPROC(__create_page_tables)
472 .ltorg 498 .ltorg
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index e868c72a7938..baf5afb7e6a0 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1386,6 +1386,7 @@ user_backtrace(struct frame_tail __user *tail,
1386 return buftail.fp; 1386 return buftail.fp;
1387} 1387}
1388 1388
1389#ifdef CONFIG_COMPAT
1389/* 1390/*
1390 * The registers we're interested in are at the end of the variable 1391 * The registers we're interested in are at the end of the variable
1391 * length saved register structure. The fp points at the end of this 1392 * length saved register structure. The fp points at the end of this
@@ -1430,6 +1431,7 @@ compat_user_backtrace(struct compat_frame_tail __user *tail,
1430 1431
1431 return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1; 1432 return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
1432} 1433}
1434#endif /* CONFIG_COMPAT */
1433 1435
1434void perf_callchain_user(struct perf_callchain_entry *entry, 1436void perf_callchain_user(struct perf_callchain_entry *entry,
1435 struct pt_regs *regs) 1437 struct pt_regs *regs)
@@ -1451,6 +1453,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
1451 tail && !((unsigned long)tail & 0xf)) 1453 tail && !((unsigned long)tail & 0xf))
1452 tail = user_backtrace(tail, entry); 1454 tail = user_backtrace(tail, entry);
1453 } else { 1455 } else {
1456#ifdef CONFIG_COMPAT
1454 /* AARCH32 compat mode */ 1457 /* AARCH32 compat mode */
1455 struct compat_frame_tail __user *tail; 1458 struct compat_frame_tail __user *tail;
1456 1459
@@ -1459,6 +1462,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
1459 while ((entry->nr < PERF_MAX_STACK_DEPTH) && 1462 while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
1460 tail && !((unsigned long)tail & 0x3)) 1463 tail && !((unsigned long)tail & 0x3))
1461 tail = compat_user_backtrace(tail, entry); 1464 tail = compat_user_backtrace(tail, entry);
1465#endif
1462 } 1466 }
1463} 1467}
1464 1468
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
index f2d6f0a36d63..422ebd63b619 100644
--- a/arch/arm64/kernel/perf_regs.c
+++ b/arch/arm64/kernel/perf_regs.c
@@ -2,6 +2,8 @@
2#include <linux/kernel.h> 2#include <linux/kernel.h>
3#include <linux/perf_event.h> 3#include <linux/perf_event.h>
4#include <linux/bug.h> 4#include <linux/bug.h>
5
6#include <asm/compat.h>
5#include <asm/perf_regs.h> 7#include <asm/perf_regs.h>
6#include <asm/ptrace.h> 8#include <asm/ptrace.h>
7 9
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index c46f48b33c14..fda756875fa6 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -168,6 +168,14 @@ ENTRY(__flush_dcache_area)
168ENDPROC(__flush_dcache_area) 168ENDPROC(__flush_dcache_area)
169 169
170/* 170/*
171 * __inval_cache_range(start, end)
172 * - start - start address of region
173 * - end - end address of region
174 */
175ENTRY(__inval_cache_range)
176 /* FALLTHROUGH */
177
178/*
171 * __dma_inv_range(start, end) 179 * __dma_inv_range(start, end)
172 * - start - virtual start address of region 180 * - start - virtual start address of region
173 * - end - virtual end address of region 181 * - end - virtual end address of region
@@ -175,14 +183,22 @@ ENDPROC(__flush_dcache_area)
175__dma_inv_range: 183__dma_inv_range:
176 dcache_line_size x2, x3 184 dcache_line_size x2, x3
177 sub x3, x2, #1 185 sub x3, x2, #1
178 bic x0, x0, x3 186 tst x1, x3 // end cache line aligned?
179 bic x1, x1, x3 187 bic x1, x1, x3
1801: dc ivac, x0 // invalidate D / U line 188 b.eq 1f
181 add x0, x0, x2 189 dc civac, x1 // clean & invalidate D / U line
1901: tst x0, x3 // start cache line aligned?
191 bic x0, x0, x3
192 b.eq 2f
193 dc civac, x0 // clean & invalidate D / U line
194 b 3f
1952: dc ivac, x0 // invalidate D / U line
1963: add x0, x0, x2
182 cmp x0, x1 197 cmp x0, x1
183 b.lo 1b 198 b.lo 2b
184 dsb sy 199 dsb sy
185 ret 200 ret
201ENDPROC(__inval_cache_range)
186ENDPROC(__dma_inv_range) 202ENDPROC(__dma_inv_range)
187 203
188/* 204/*
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index e085ee6ef4e2..9042aff5e9e3 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -28,14 +28,21 @@
28 28
29#include "proc-macros.S" 29#include "proc-macros.S"
30 30
31#ifndef CONFIG_SMP 31#ifdef CONFIG_ARM64_64K_PAGES
32/* PTWs cacheable, inner/outer WBWA not shareable */ 32#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K
33#define TCR_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA 33#else
34#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K
35#endif
36
37#ifdef CONFIG_SMP
38#define TCR_SMP_FLAGS TCR_SHARED
34#else 39#else
35/* PTWs cacheable, inner/outer WBWA shareable */ 40#define TCR_SMP_FLAGS 0
36#define TCR_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA | TCR_SHARED
37#endif 41#endif
38 42
43/* PTWs cacheable, inner/outer WBWA */
44#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
45
39#define MAIR(attr, mt) ((attr) << ((mt) * 8)) 46#define MAIR(attr, mt) ((attr) << ((mt) * 8))
40 47
41/* 48/*
@@ -209,18 +216,14 @@ ENTRY(__cpu_setup)
209 * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for 216 * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
210 * both user and kernel. 217 * both user and kernel.
211 */ 218 */
212 ldr x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | \ 219 ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
213 TCR_ASID16 | TCR_TBI0 | (1 << 31) 220 TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
214 /* 221 /*
215 * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in 222 * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in
216 * TCR_EL1. 223 * TCR_EL1.
217 */ 224 */
218 mrs x9, ID_AA64MMFR0_EL1 225 mrs x9, ID_AA64MMFR0_EL1
219 bfi x10, x9, #32, #3 226 bfi x10, x9, #32, #3
220#ifdef CONFIG_ARM64_64K_PAGES
221 orr x10, x10, TCR_TG0_64K
222 orr x10, x10, TCR_TG1_64K
223#endif
224 msr tcr_el1, x10 227 msr tcr_el1, x10
225 ret // return to head.S 228 ret // return to head.S
226ENDPROC(__cpu_setup) 229ENDPROC(__cpu_setup)