aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/setup.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-04 10:18:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-04 10:18:09 -0400
commita4fdb2a46f617b8b2cd47acec026ec16532edbc6 (patch)
tree8d993287c9337349034ce6bbe050f7ce016a5268 /arch/arm64/kernel/setup.c
parent807249d3ada1ff28a47c4054ca4edd479421b671 (diff)
parent674c242c9323d3c293fc4f9a3a3a619fe3063290 (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: - Support for new architectural features introduced in ARMv8.1: * Privileged Access Never (PAN) to catch user pointer dereferences in the kernel * Large System Extension (LSE) for building scalable atomics and locks (depends on locking/arch-atomic from tip, which is included here) * Hardware Dirty Bit Management (DBM) for updating clean PTEs automatically - Move our PSCI implementation out into drivers/firmware/, where it can be shared with arch/arm/. RMK has also pulled this component branch and has additional patches moving arch/arm/ over. MAINTAINERS is updated accordingly. - Better BUG implementation based on the BRK instruction for trapping - Leaf TLB invalidation for unmapping user pages - Support for PROBE_ONLY PCI configurations - Various cleanups and non-critical fixes, including: * Always flush FP/SIMD state over exec() * Restrict memblock additions based on range of linear mapping * Ensure *(LIST_POISON) generates a fatal fault * Context-tracking syscall return no longer corrupts return value when not forced on. * Alternatives patching synchronisation/stability improvements * Signed sub-word cmpxchg compare fix (tickled by HAVE_CMPXCHG_LOCAL) * Force SMP=y * Hide direct DCC access from userspace * Fix EFI stub memory allocation when DRAM starts at 0x0 * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (92 commits) arm64: flush FP/SIMD state correctly after execve() arm64: makefile: fix perf_callchain.o kconfig dependency arm64: set MAX_MEMBLOCK_ADDR according to linear region size of/fdt: make memblock maximum physical address arch configurable arm64: Fix source code file path in comments arm64: entry: always restore x0 from the stack on syscall return arm64: mdscr_el1: avoid exposing DCC to userspace arm64: kconfig: Move LIST_POISON to a safe value arm64: Add __exception_irq_entry definition for function graph arm64: mm: ensure patched kernel text is fetched from PoU arm64: alternatives: ensure secondary CPUs execute ISB after patching arm64: make ll/sc __cmpxchg_case_##name asm consistent arm64: dma-mapping: Simplify pgprot handling arm64: restore cpu suspend/resume functionality ARM64: PCI: do not enable resources on PROBE_ONLY systems arm64: cmpxchg: truncate sub-word signed types before comparison arm64: alternative: put secondary CPUs into polling loop during patch arm64/Documentation: clarify wording regarding memory below the Image arm64: lse: fix lse cmpxchg code indentation arm64: remove redundant object file list ...
Diffstat (limited to 'arch/arm64/kernel/setup.c')
-rw-r--r--arch/arm64/kernel/setup.c85
1 files changed, 34 insertions, 51 deletions
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index fdc11f05ac36..888478881243 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -62,7 +62,6 @@
62#include <asm/traps.h> 62#include <asm/traps.h>
63#include <asm/memblock.h> 63#include <asm/memblock.h>
64#include <asm/efi.h> 64#include <asm/efi.h>
65#include <asm/virt.h>
66#include <asm/xen/hypervisor.h> 65#include <asm/xen/hypervisor.h>
67 66
68unsigned long elf_hwcap __read_mostly; 67unsigned long elf_hwcap __read_mostly;
@@ -130,7 +129,6 @@ bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
130} 129}
131 130
132struct mpidr_hash mpidr_hash; 131struct mpidr_hash mpidr_hash;
133#ifdef CONFIG_SMP
134/** 132/**
135 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity 133 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
136 * level in order to build a linear index from an 134 * level in order to build a linear index from an
@@ -196,35 +194,11 @@ static void __init smp_build_mpidr_hash(void)
196 pr_warn("Large number of MPIDR hash buckets detected\n"); 194 pr_warn("Large number of MPIDR hash buckets detected\n");
197 __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash)); 195 __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
198} 196}
199#endif
200
201static void __init hyp_mode_check(void)
202{
203 if (is_hyp_mode_available())
204 pr_info("CPU: All CPU(s) started at EL2\n");
205 else if (is_hyp_mode_mismatched())
206 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
207 "CPU: CPUs started in inconsistent modes");
208 else
209 pr_info("CPU: All CPU(s) started at EL1\n");
210}
211
212void __init do_post_cpus_up_work(void)
213{
214 hyp_mode_check();
215 apply_alternatives_all();
216}
217
218#ifdef CONFIG_UP_LATE_INIT
219void __init up_late_init(void)
220{
221 do_post_cpus_up_work();
222}
223#endif /* CONFIG_UP_LATE_INIT */
224 197
225static void __init setup_processor(void) 198static void __init setup_processor(void)
226{ 199{
227 u64 features, block; 200 u64 features;
201 s64 block;
228 u32 cwg; 202 u32 cwg;
229 int cls; 203 int cls;
230 204
@@ -254,8 +228,8 @@ static void __init setup_processor(void)
254 * for non-negative values. Negative values are reserved. 228 * for non-negative values. Negative values are reserved.
255 */ 229 */
256 features = read_cpuid(ID_AA64ISAR0_EL1); 230 features = read_cpuid(ID_AA64ISAR0_EL1);
257 block = (features >> 4) & 0xf; 231 block = cpuid_feature_extract_field(features, 4);
258 if (!(block & 0x8)) { 232 if (block > 0) {
259 switch (block) { 233 switch (block) {
260 default: 234 default:
261 case 2: 235 case 2:
@@ -267,26 +241,36 @@ static void __init setup_processor(void)
267 } 241 }
268 } 242 }
269 243
270 block = (features >> 8) & 0xf; 244 if (cpuid_feature_extract_field(features, 8) > 0)
271 if (block && !(block & 0x8))
272 elf_hwcap |= HWCAP_SHA1; 245 elf_hwcap |= HWCAP_SHA1;
273 246
274 block = (features >> 12) & 0xf; 247 if (cpuid_feature_extract_field(features, 12) > 0)
275 if (block && !(block & 0x8))
276 elf_hwcap |= HWCAP_SHA2; 248 elf_hwcap |= HWCAP_SHA2;
277 249
278 block = (features >> 16) & 0xf; 250 if (cpuid_feature_extract_field(features, 16) > 0)
279 if (block && !(block & 0x8))
280 elf_hwcap |= HWCAP_CRC32; 251 elf_hwcap |= HWCAP_CRC32;
281 252
253 block = cpuid_feature_extract_field(features, 20);
254 if (block > 0) {
255 switch (block) {
256 default:
257 case 2:
258 elf_hwcap |= HWCAP_ATOMICS;
259 case 1:
260 /* RESERVED */
261 case 0:
262 break;
263 }
264 }
265
282#ifdef CONFIG_COMPAT 266#ifdef CONFIG_COMPAT
283 /* 267 /*
284 * ID_ISAR5_EL1 carries similar information as above, but pertaining to 268 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
285 * the Aarch32 32-bit execution state. 269 * the AArch32 32-bit execution state.
286 */ 270 */
287 features = read_cpuid(ID_ISAR5_EL1); 271 features = read_cpuid(ID_ISAR5_EL1);
288 block = (features >> 4) & 0xf; 272 block = cpuid_feature_extract_field(features, 4);
289 if (!(block & 0x8)) { 273 if (block > 0) {
290 switch (block) { 274 switch (block) {
291 default: 275 default:
292 case 2: 276 case 2:
@@ -298,16 +282,13 @@ static void __init setup_processor(void)
298 } 282 }
299 } 283 }
300 284
301 block = (features >> 8) & 0xf; 285 if (cpuid_feature_extract_field(features, 8) > 0)
302 if (block && !(block & 0x8))
303 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1; 286 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
304 287
305 block = (features >> 12) & 0xf; 288 if (cpuid_feature_extract_field(features, 12) > 0)
306 if (block && !(block & 0x8))
307 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2; 289 compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
308 290
309 block = (features >> 16) & 0xf; 291 if (cpuid_feature_extract_field(features, 16) > 0)
310 if (block && !(block & 0x8))
311 compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32; 292 compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
312#endif 293#endif
313} 294}
@@ -404,10 +385,8 @@ void __init setup_arch(char **cmdline_p)
404 xen_early_init(); 385 xen_early_init();
405 386
406 cpu_read_bootcpu_ops(); 387 cpu_read_bootcpu_ops();
407#ifdef CONFIG_SMP
408 smp_init_cpus(); 388 smp_init_cpus();
409 smp_build_mpidr_hash(); 389 smp_build_mpidr_hash();
410#endif
411 390
412#ifdef CONFIG_VT 391#ifdef CONFIG_VT
413#if defined(CONFIG_VGA_CONSOLE) 392#if defined(CONFIG_VGA_CONSOLE)
@@ -426,8 +405,13 @@ void __init setup_arch(char **cmdline_p)
426 405
427static int __init arm64_device_init(void) 406static int __init arm64_device_init(void)
428{ 407{
429 of_iommu_init(); 408 if (of_have_populated_dt()) {
430 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 409 of_iommu_init();
410 of_platform_populate(NULL, of_default_bus_match_table,
411 NULL, NULL);
412 } else if (acpi_disabled) {
413 pr_crit("Device tree not populated\n");
414 }
431 return 0; 415 return 0;
432} 416}
433arch_initcall_sync(arm64_device_init); 417arch_initcall_sync(arm64_device_init);
@@ -455,6 +439,7 @@ static const char *hwcap_str[] = {
455 "sha1", 439 "sha1",
456 "sha2", 440 "sha2",
457 "crc32", 441 "crc32",
442 "atomics",
458 NULL 443 NULL
459}; 444};
460 445
@@ -507,9 +492,7 @@ static int c_show(struct seq_file *m, void *v)
507 * online processors, looking for lines beginning with 492 * online processors, looking for lines beginning with
508 * "processor". Give glibc what it expects. 493 * "processor". Give glibc what it expects.
509 */ 494 */
510#ifdef CONFIG_SMP
511 seq_printf(m, "processor\t: %d\n", i); 495 seq_printf(m, "processor\t: %d\n", i);
512#endif
513 496
514 /* 497 /*
515 * Dump out the common processor features in a single line. 498 * Dump out the common processor features in a single line.