aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-24 13:02:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-24 13:02:15 -0400
commite3d8238d7f5c3f539a29f5ac596cd342d847e099 (patch)
treef545e6064155d0e943dcfbfabb3de60530a8e5de
parent4e241557fc1cb560bd9e77ca1b4a9352732a5427 (diff)
parent86dca36e6ba019650a94cadf922ea3d06dec0182 (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas: "Mostly refactoring/clean-up: - CPU ops and PSCI (Power State Coordination Interface) refactoring following the merging of the arm64 ACPI support, together with handling of Trusted (secure) OS instances - Using fixmap for permanent FDT mapping, removing the initial dtb placement requirements (within 512MB from the start of the kernel image). This required moving the FDT self reservation out of the memreserve processing - Idmap (1:1 mapping used for MMU on/off) handling clean-up - Removing flush_cache_all() - not safe on ARM unless the MMU is off. Last stages of CPU power down/up are handled by firmware already - "Alternatives" (run-time code patching) refactoring and support for immediate branch patching, GICv3 CPU interface access - User faults handling clean-up And some fixes: - Fix for VDSO building with broken ELF toolchains - Fix another case of init_mm.pgd usage for user mappings (during ASID roll-over broadcasting) - Fix for FPSIMD reloading after CPU hotplug - Fix for missing syscall trace exit - Workaround for .inst asm bug - Compat fix for switching the user tls tpidr_el0 register" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (42 commits) arm64: use private ratelimit state along with show_unhandled_signals arm64: show unhandled SP/PC alignment faults arm64: vdso: work-around broken ELF toolchains in Makefile arm64: kernel: rename __cpu_suspend to keep it aligned with arm arm64: compat: print compat_sp instead of sp arm64: mm: Fix freeing of the wrong memmap entries with !SPARSEMEM_VMEMMAP arm64: entry: fix context tracking for el0_sp_pc arm64: defconfig: enable memtest arm64: mm: remove reference to tlb.S from comment block arm64: Do not attempt to use init_mm in reset_context() arm64: KVM: Switch vgic save/restore to alternative_insn arm64: alternative: Introduce feature for GICv3 CPU interface arm64: psci: fix !CONFIG_HOTPLUG_CPU build warning arm64: fix bug for reloading FPSIMD state after CPU hotplug. arm64: kernel thread don't need to save fpsimd context. arm64: fix missing syscall trace exit arm64: alternative: Work around .inst assembler bugs arm64: alternative: Merge alternative-asm.h into alternative.h arm64: alternative: Allow immediate branch as alternative instruction arm64: Rework alternate sequence for ARM erratum 845719 ...
-rw-r--r--Documentation/arm64/booting.txt10
-rw-r--r--arch/arm/include/asm/kvm_host.h5
-rw-r--r--arch/arm/kvm/psci.c2
-rw-r--r--arch/arm/mm/init.c1
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/include/asm/acpi.h19
-rw-r--r--arch/arm64/include/asm/alternative-asm.h29
-rw-r--r--arch/arm64/include/asm/alternative.h46
-rw-r--r--arch/arm64/include/asm/boot.h14
-rw-r--r--arch/arm64/include/asm/cacheflush.h5
-rw-r--r--arch/arm64/include/asm/cpu_ops.h27
-rw-r--r--arch/arm64/include/asm/cpufeature.h8
-rw-r--r--arch/arm64/include/asm/cpuidle.h8
-rw-r--r--arch/arm64/include/asm/fixmap.h15
-rw-r--r--arch/arm64/include/asm/insn.h3
-rw-r--r--arch/arm64/include/asm/io.h8
-rw-r--r--arch/arm64/include/asm/kvm_asm.h5
-rw-r--r--arch/arm64/include/asm/kvm_host.h23
-rw-r--r--arch/arm64/include/asm/mmu.h1
-rw-r--r--arch/arm64/include/asm/perf_event.h7
-rw-r--r--arch/arm64/include/asm/proc-fns.h4
-rw-r--r--arch/arm64/include/asm/processor.h19
-rw-r--r--arch/arm64/include/asm/psci.h12
-rw-r--r--arch/arm64/include/asm/smp.h2
-rw-r--r--arch/arm64/include/asm/smp_plat.h16
-rw-r--r--arch/arm64/include/asm/suspend.h2
-rw-r--r--arch/arm64/include/asm/system_misc.h14
-rw-r--r--arch/arm64/include/asm/tlbflush.h2
-rw-r--r--arch/arm64/kernel/acpi.c123
-rw-r--r--arch/arm64/kernel/alternative.c71
-rw-r--r--arch/arm64/kernel/asm-offsets.c1
-rw-r--r--arch/arm64/kernel/cpu_ops.c72
-rw-r--r--arch/arm64/kernel/cpufeature.c16
-rw-r--r--arch/arm64/kernel/cpuidle.c11
-rw-r--r--arch/arm64/kernel/entry.S37
-rw-r--r--arch/arm64/kernel/fpsimd.c31
-rw-r--r--arch/arm64/kernel/head.S52
-rw-r--r--arch/arm64/kernel/insn.c60
-rw-r--r--arch/arm64/kernel/perf_event.c2
-rw-r--r--arch/arm64/kernel/process.c62
-rw-r--r--arch/arm64/kernel/psci.c244
-rw-r--r--arch/arm64/kernel/setup.c37
-rw-r--r--arch/arm64/kernel/signal32.c4
-rw-r--r--arch/arm64/kernel/sleep.S9
-rw-r--r--arch/arm64/kernel/smp.c246
-rw-r--r--arch/arm64/kernel/smp_spin_table.c8
-rw-r--r--arch/arm64/kernel/suspend.c9
-rw-r--r--arch/arm64/kernel/traps.c5
-rw-r--r--arch/arm64/kernel/vdso/Makefile4
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S11
-rw-r--r--arch/arm64/kvm/hyp.S18
-rw-r--r--arch/arm64/mm/Makefile2
-rw-r--r--arch/arm64/mm/cache.S75
-rw-r--r--arch/arm64/mm/context.c8
-rw-r--r--arch/arm64/mm/fault.c12
-rw-r--r--arch/arm64/mm/flush.c1
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/arm64/mm/mmu.c66
-rw-r--r--arch/arm64/mm/proc.S46
-rw-r--r--arch/powerpc/kernel/prom.c1
-rw-r--r--drivers/of/fdt.c19
-rw-r--r--include/linux/of_fdt.h2
-rw-r--r--virt/kvm/arm/vgic.c3
64 files changed, 938 insertions, 741 deletions
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index f3c05b5f9f08..1690350f16e7 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -45,11 +45,13 @@ sees fit.)
45 45
46Requirement: MANDATORY 46Requirement: MANDATORY
47 47
48The device tree blob (dtb) must be placed on an 8-byte boundary within 48The device tree blob (dtb) must be placed on an 8-byte boundary and must
49the first 512 megabytes from the start of the kernel image and must not 49not exceed 2 megabytes in size. Since the dtb will be mapped cacheable
50cross a 2-megabyte boundary. This is to allow the kernel to map the 50using blocks of up to 2 megabytes in size, it must not be placed within
51blob using a single section mapping in the initial page tables. 51any 2M region which must be mapped with any specific attributes.
52 52
53NOTE: versions prior to v4.2 also require that the DTB be placed within
54the 512 MB region starting at text_offset bytes below the kernel Image.
53 55
543. Decompress the kernel image 563. Decompress the kernel image
55------------------------------ 57------------------------------
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index d71607c16601..e896d2c196e6 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -218,11 +218,6 @@ static inline int kvm_arch_dev_ioctl_check_extension(long ext)
218 return 0; 218 return 0;
219} 219}
220 220
221static inline void vgic_arch_setup(const struct vgic_params *vgic)
222{
223 BUG_ON(vgic->type != VGIC_V2);
224}
225
226int kvm_perf_init(void); 221int kvm_perf_init(void);
227int kvm_perf_teardown(void); 222int kvm_perf_teardown(void);
228 223
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 531e922486b2..4b94b513168d 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -24,6 +24,8 @@
24#include <asm/kvm_psci.h> 24#include <asm/kvm_psci.h>
25#include <asm/kvm_host.h> 25#include <asm/kvm_host.h>
26 26
27#include <uapi/linux/psci.h>
28
27/* 29/*
28 * This is an implementation of the Power State Coordination Interface 30 * This is an implementation of the Power State Coordination Interface
29 * as described in ARM document number ARM DEN 0022A. 31 * as described in ARM document number ARM DEN 0022A.
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index be92fa0f2f35..8a63b4cdc0f2 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -268,6 +268,7 @@ void __init arm_memblock_init(const struct machine_desc *mdesc)
268 if (mdesc->reserve) 268 if (mdesc->reserve)
269 mdesc->reserve(); 269 mdesc->reserve();
270 270
271 early_init_fdt_reserve_self();
271 early_init_fdt_scan_reserved_mem(); 272 early_init_fdt_scan_reserved_mem();
272 273
273 /* reserve memory for DMA contiguous allocations */ 274 /* reserve memory for DMA contiguous allocations */
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6be1a6efcdd6..802400f2a69e 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -72,6 +72,7 @@ config ARM64
72 select HAVE_RCU_TABLE_FREE 72 select HAVE_RCU_TABLE_FREE
73 select HAVE_SYSCALL_TRACEPOINTS 73 select HAVE_SYSCALL_TRACEPOINTS
74 select IRQ_DOMAIN 74 select IRQ_DOMAIN
75 select IRQ_FORCED_THREADING
75 select MODULES_USE_ELF_RELA 76 select MODULES_USE_ELF_RELA
76 select NO_BOOTMEM 77 select NO_BOOTMEM
77 select OF 78 select OF
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 2ed7449d9273..daefbf0329a6 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -180,6 +180,7 @@ CONFIG_LOCKUP_DETECTOR=y
180# CONFIG_SCHED_DEBUG is not set 180# CONFIG_SCHED_DEBUG is not set
181# CONFIG_DEBUG_PREEMPT is not set 181# CONFIG_DEBUG_PREEMPT is not set
182# CONFIG_FTRACE is not set 182# CONFIG_FTRACE is not set
183CONFIG_MEMTEST=y
183CONFIG_SECURITY=y 184CONFIG_SECURITY=y
184CONFIG_CRYPTO_ANSI_CPRNG=y 185CONFIG_CRYPTO_ANSI_CPRNG=y
185CONFIG_ARM64_CRYPTO=y 186CONFIG_ARM64_CRYPTO=y
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 59c05d8ea4a0..39248d3adf5d 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -16,6 +16,7 @@
16#include <linux/irqchip/arm-gic-acpi.h> 16#include <linux/irqchip/arm-gic-acpi.h>
17 17
18#include <asm/cputype.h> 18#include <asm/cputype.h>
19#include <asm/psci.h>
19#include <asm/smp_plat.h> 20#include <asm/smp_plat.h>
20 21
21/* Basic configuration for ACPI */ 22/* Basic configuration for ACPI */
@@ -39,18 +40,6 @@ extern int acpi_disabled;
39extern int acpi_noirq; 40extern int acpi_noirq;
40extern int acpi_pci_disabled; 41extern int acpi_pci_disabled;
41 42
42/* 1 to indicate PSCI 0.2+ is implemented */
43static inline bool acpi_psci_present(void)
44{
45 return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT;
46}
47
48/* 1 to indicate HVC must be used instead of SMC as the PSCI conduit */
49static inline bool acpi_psci_use_hvc(void)
50{
51 return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
52}
53
54static inline void disable_acpi(void) 43static inline void disable_acpi(void)
55{ 44{
56 acpi_disabled = 1; 45 acpi_disabled = 1;
@@ -88,9 +77,11 @@ static inline void arch_fix_phys_package_id(int num, u32 slot) { }
88void __init acpi_init_cpus(void); 77void __init acpi_init_cpus(void);
89 78
90#else 79#else
91static inline bool acpi_psci_present(void) { return false; }
92static inline bool acpi_psci_use_hvc(void) { return false; }
93static inline void acpi_init_cpus(void) { } 80static inline void acpi_init_cpus(void) { }
94#endif /* CONFIG_ACPI */ 81#endif /* CONFIG_ACPI */
95 82
83static inline const char *acpi_get_enable_method(int cpu)
84{
85 return acpi_psci_present() ? "psci" : NULL;
86}
96#endif /*_ASM_ACPI_H*/ 87#endif /*_ASM_ACPI_H*/
diff --git a/arch/arm64/include/asm/alternative-asm.h b/arch/arm64/include/asm/alternative-asm.h
deleted file mode 100644
index 919a67855b63..000000000000
--- a/arch/arm64/include/asm/alternative-asm.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef __ASM_ALTERNATIVE_ASM_H
2#define __ASM_ALTERNATIVE_ASM_H
3
4#ifdef __ASSEMBLY__
5
6.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
7 .word \orig_offset - .
8 .word \alt_offset - .
9 .hword \feature
10 .byte \orig_len
11 .byte \alt_len
12.endm
13
14.macro alternative_insn insn1 insn2 cap
15661: \insn1
16662: .pushsection .altinstructions, "a"
17 altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
18 .popsection
19 .pushsection .altinstr_replacement, "ax"
20663: \insn2
21664: .popsection
22 .if ((664b-663b) != (662b-661b))
23 .error "Alternatives instruction length mismatch"
24 .endif
25.endm
26
27#endif /* __ASSEMBLY__ */
28
29#endif /* __ASM_ALTERNATIVE_ASM_H */
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index d261f01e2bae..c385a0c4057f 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -1,6 +1,8 @@
1#ifndef __ASM_ALTERNATIVE_H 1#ifndef __ASM_ALTERNATIVE_H
2#define __ASM_ALTERNATIVE_H 2#define __ASM_ALTERNATIVE_H
3 3
4#ifndef __ASSEMBLY__
5
4#include <linux/types.h> 6#include <linux/types.h>
5#include <linux/stddef.h> 7#include <linux/stddef.h>
6#include <linux/stringify.h> 8#include <linux/stringify.h>
@@ -24,7 +26,20 @@ void free_alternatives_memory(void);
24 " .byte 662b-661b\n" /* source len */ \ 26 " .byte 662b-661b\n" /* source len */ \
25 " .byte 664f-663f\n" /* replacement len */ 27 " .byte 664f-663f\n" /* replacement len */
26 28
27/* alternative assembly primitive: */ 29/*
30 * alternative assembly primitive:
31 *
32 * If any of these .org directive fail, it means that insn1 and insn2
33 * don't have the same length. This used to be written as
34 *
35 * .if ((664b-663b) != (662b-661b))
36 * .error "Alternatives instruction length mismatch"
37 * .endif
38 *
39 * but most assemblers die if insn1 or insn2 have a .inst. This should
40 * be fixed in a binutils release posterior to 2.25.51.0.2 (anything
41 * containing commit 4e4d08cf7399b606 or c1baaddf8861).
42 */
28#define ALTERNATIVE(oldinstr, newinstr, feature) \ 43#define ALTERNATIVE(oldinstr, newinstr, feature) \
29 "661:\n\t" \ 44 "661:\n\t" \
30 oldinstr "\n" \ 45 oldinstr "\n" \
@@ -37,8 +52,31 @@ void free_alternatives_memory(void);
37 newinstr "\n" \ 52 newinstr "\n" \
38 "664:\n\t" \ 53 "664:\n\t" \
39 ".popsection\n\t" \ 54 ".popsection\n\t" \
40 ".if ((664b-663b) != (662b-661b))\n\t" \ 55 ".org . - (664b-663b) + (662b-661b)\n\t" \
41 " .error \"Alternatives instruction length mismatch\"\n\t"\ 56 ".org . - (662b-661b) + (664b-663b)\n"
42 ".endif\n" 57
58#else
59
60.macro altinstruction_entry orig_offset alt_offset feature orig_len alt_len
61 .word \orig_offset - .
62 .word \alt_offset - .
63 .hword \feature
64 .byte \orig_len
65 .byte \alt_len
66.endm
67
68.macro alternative_insn insn1 insn2 cap
69661: \insn1
70662: .pushsection .altinstructions, "a"
71 altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
72 .popsection
73 .pushsection .altinstr_replacement, "ax"
74663: \insn2
75664: .popsection
76 .org . - (664b-663b) + (662b-661b)
77 .org . - (662b-661b) + (664b-663b)
78.endm
79
80#endif /* __ASSEMBLY__ */
43 81
44#endif /* __ASM_ALTERNATIVE_H */ 82#endif /* __ASM_ALTERNATIVE_H */
diff --git a/arch/arm64/include/asm/boot.h b/arch/arm64/include/asm/boot.h
new file mode 100644
index 000000000000..81151b67b26b
--- /dev/null
+++ b/arch/arm64/include/asm/boot.h
@@ -0,0 +1,14 @@
1
2#ifndef __ASM_BOOT_H
3#define __ASM_BOOT_H
4
5#include <asm/sizes.h>
6
7/*
8 * arm64 requires the DTB to be 8 byte aligned and
9 * not exceed 2MB in size.
10 */
11#define MIN_FDT_ALIGN 8
12#define MAX_FDT_SIZE SZ_2M
13
14#endif
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 67d309cc3b6b..c75b8d027eb1 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -40,10 +40,6 @@
40 * the implementation assumes non-aliasing VIPT D-cache and (aliasing) 40 * the implementation assumes non-aliasing VIPT D-cache and (aliasing)
41 * VIPT or ASID-tagged VIVT I-cache. 41 * VIPT or ASID-tagged VIVT I-cache.
42 * 42 *
43 * flush_cache_all()
44 *
45 * Unconditionally clean and invalidate the entire cache.
46 *
47 * flush_cache_mm(mm) 43 * flush_cache_mm(mm)
48 * 44 *
49 * Clean and invalidate all user space cache entries 45 * Clean and invalidate all user space cache entries
@@ -69,7 +65,6 @@
69 * - kaddr - page address 65 * - kaddr - page address
70 * - size - region size 66 * - size - region size
71 */ 67 */
72extern void flush_cache_all(void);
73extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 68extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
74extern void flush_icache_range(unsigned long start, unsigned long end); 69extern void flush_icache_range(unsigned long start, unsigned long end);
75extern void __flush_dcache_area(void *addr, size_t len); 70extern void __flush_dcache_area(void *addr, size_t len);
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index 5a31d6716914..8f03446cf89f 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -19,15 +19,15 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/threads.h> 20#include <linux/threads.h>
21 21
22struct device_node;
23
24/** 22/**
25 * struct cpu_operations - Callback operations for hotplugging CPUs. 23 * struct cpu_operations - Callback operations for hotplugging CPUs.
26 * 24 *
27 * @name: Name of the property as appears in a devicetree cpu node's 25 * @name: Name of the property as appears in a devicetree cpu node's
28 * enable-method property. 26 * enable-method property. On systems booting with ACPI, @name
29 * @cpu_init: Reads any data necessary for a specific enable-method from the 27 * identifies the struct cpu_operations entry corresponding to
30 * devicetree, for a given cpu node and proposed logical id. 28 * the boot protocol specified in the ACPI MADT table.
29 * @cpu_init: Reads any data necessary for a specific enable-method for a
30 * proposed logical id.
31 * @cpu_prepare: Early one-time preparation step for a cpu. If there is a 31 * @cpu_prepare: Early one-time preparation step for a cpu. If there is a
32 * mechanism for doing so, tests whether it is possible to boot 32 * mechanism for doing so, tests whether it is possible to boot
33 * the given CPU. 33 * the given CPU.
@@ -40,15 +40,15 @@ struct device_node;
40 * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the 40 * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
41 * cpu being killed. 41 * cpu being killed.
42 * @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu. 42 * @cpu_kill: Ensures a cpu has left the kernel. Called from another cpu.
43 * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from 43 * @cpu_init_idle: Reads any data necessary to initialize CPU idle states for
44 * devicetree, for a given cpu node and proposed logical id. 44 * a proposed logical id.
45 * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing 45 * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
46 * to wrong parameters or error conditions. Called from the 46 * to wrong parameters or error conditions. Called from the
47 * CPU being suspended. Must be called with IRQs disabled. 47 * CPU being suspended. Must be called with IRQs disabled.
48 */ 48 */
49struct cpu_operations { 49struct cpu_operations {
50 const char *name; 50 const char *name;
51 int (*cpu_init)(struct device_node *, unsigned int); 51 int (*cpu_init)(unsigned int);
52 int (*cpu_prepare)(unsigned int); 52 int (*cpu_prepare)(unsigned int);
53 int (*cpu_boot)(unsigned int); 53 int (*cpu_boot)(unsigned int);
54 void (*cpu_postboot)(void); 54 void (*cpu_postboot)(void);
@@ -58,14 +58,17 @@ struct cpu_operations {
58 int (*cpu_kill)(unsigned int cpu); 58 int (*cpu_kill)(unsigned int cpu);
59#endif 59#endif
60#ifdef CONFIG_CPU_IDLE 60#ifdef CONFIG_CPU_IDLE
61 int (*cpu_init_idle)(struct device_node *, unsigned int); 61 int (*cpu_init_idle)(unsigned int);
62 int (*cpu_suspend)(unsigned long); 62 int (*cpu_suspend)(unsigned long);
63#endif 63#endif
64}; 64};
65 65
66extern const struct cpu_operations *cpu_ops[NR_CPUS]; 66extern const struct cpu_operations *cpu_ops[NR_CPUS];
67int __init cpu_read_ops(struct device_node *dn, int cpu); 67int __init cpu_read_ops(int cpu);
68void __init cpu_read_bootcpu_ops(void); 68
69const struct cpu_operations *cpu_get_ops(const char *name); 69static inline void __init cpu_read_bootcpu_ops(void)
70{
71 cpu_read_ops(0);
72}
70 73
71#endif /* ifndef __ASM_CPU_OPS_H */ 74#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 82cb9f98ba1a..c1044218a63a 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -24,8 +24,9 @@
24#define ARM64_WORKAROUND_CLEAN_CACHE 0 24#define ARM64_WORKAROUND_CLEAN_CACHE 0
25#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 25#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
26#define ARM64_WORKAROUND_845719 2 26#define ARM64_WORKAROUND_845719 2
27#define ARM64_HAS_SYSREG_GIC_CPUIF 3
27 28
28#define ARM64_NCAPS 3 29#define ARM64_NCAPS 4
29 30
30#ifndef __ASSEMBLY__ 31#ifndef __ASSEMBLY__
31 32
@@ -38,6 +39,11 @@ struct arm64_cpu_capabilities {
38 u32 midr_model; 39 u32 midr_model;
39 u32 midr_range_min, midr_range_max; 40 u32 midr_range_min, midr_range_max;
40 }; 41 };
42
43 struct { /* Feature register checking */
44 u64 register_mask;
45 u64 register_value;
46 };
41 }; 47 };
42}; 48};
43 49
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
index 141b2fcabaa6..0f74f05d662a 100644
--- a/arch/arm64/include/asm/cpuidle.h
+++ b/arch/arm64/include/asm/cpuidle.h
@@ -5,20 +5,16 @@
5 5
6#ifdef CONFIG_CPU_IDLE 6#ifdef CONFIG_CPU_IDLE
7extern int arm_cpuidle_init(unsigned int cpu); 7extern int arm_cpuidle_init(unsigned int cpu);
8extern int cpu_suspend(unsigned long arg); 8extern int arm_cpuidle_suspend(int index);
9#else 9#else
10static inline int arm_cpuidle_init(unsigned int cpu) 10static inline int arm_cpuidle_init(unsigned int cpu)
11{ 11{
12 return -EOPNOTSUPP; 12 return -EOPNOTSUPP;
13} 13}
14 14
15static inline int cpu_suspend(unsigned long arg) 15static inline int arm_cpuidle_suspend(int index)
16{ 16{
17 return -EOPNOTSUPP; 17 return -EOPNOTSUPP;
18} 18}
19#endif 19#endif
20static inline int arm_cpuidle_suspend(int index)
21{
22 return cpu_suspend(index);
23}
24#endif 20#endif
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index 95e6b6dcbe37..c0739187a920 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -17,6 +17,7 @@
17 17
18#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <asm/boot.h>
20#include <asm/page.h> 21#include <asm/page.h>
21 22
22/* 23/*
@@ -32,6 +33,20 @@
32 */ 33 */
33enum fixed_addresses { 34enum fixed_addresses {
34 FIX_HOLE, 35 FIX_HOLE,
36
37 /*
38 * Reserve a virtual window for the FDT that is 2 MB larger than the
39 * maximum supported size, and put it at the top of the fixmap region.
40 * The additional space ensures that any FDT that does not exceed
41 * MAX_FDT_SIZE can be mapped regardless of whether it crosses any
42 * 2 MB alignment boundaries.
43 *
44 * Keep this at the top so it remains 2 MB aligned.
45 */
46#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
47 FIX_FDT_END,
48 FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1,
49
35 FIX_EARLYCON_MEM_BASE, 50 FIX_EARLYCON_MEM_BASE,
36 FIX_TEXT_POKE0, 51 FIX_TEXT_POKE0,
37 __end_of_permanent_fixed_addresses, 52 __end_of_permanent_fixed_addresses,
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index f81b328d9cf4..30e50eb54a67 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -281,6 +281,7 @@ __AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000)
281#undef __AARCH64_INSN_FUNCS 281#undef __AARCH64_INSN_FUNCS
282 282
283bool aarch64_insn_is_nop(u32 insn); 283bool aarch64_insn_is_nop(u32 insn);
284bool aarch64_insn_is_branch_imm(u32 insn);
284 285
285int aarch64_insn_read(void *addr, u32 *insnp); 286int aarch64_insn_read(void *addr, u32 *insnp);
286int aarch64_insn_write(void *addr, u32 insn); 287int aarch64_insn_write(void *addr, u32 insn);
@@ -351,6 +352,8 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
351 int shift, 352 int shift,
352 enum aarch64_insn_variant variant, 353 enum aarch64_insn_variant variant,
353 enum aarch64_insn_logic_type type); 354 enum aarch64_insn_logic_type type);
355s32 aarch64_get_branch_offset(u32 insn);
356u32 aarch64_set_branch_offset(u32 insn, s32 offset);
354 357
355bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); 358bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
356 359
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 7116d3973058..44be1e03ed65 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -117,10 +117,10 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
117 * ordering rules but do not guarantee any ordering relative to Normal memory 117 * ordering rules but do not guarantee any ordering relative to Normal memory
118 * accesses. 118 * accesses.
119 */ 119 */
120#define readb_relaxed(c) ({ u8 __v = __raw_readb(c); __v; }) 120#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
121#define readw_relaxed(c) ({ u16 __v = le16_to_cpu((__force __le16)__raw_readw(c)); __v; }) 121#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
122#define readl_relaxed(c) ({ u32 __v = le32_to_cpu((__force __le32)__raw_readl(c)); __v; }) 122#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
123#define readq_relaxed(c) ({ u64 __v = le64_to_cpu((__force __le64)__raw_readq(c)); __v; }) 123#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
124 124
125#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c))) 125#define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c)))
126#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c))) 126#define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index c4c11d20ca17..3c5fe685a2d6 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -132,11 +132,6 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
132 132
133extern u64 __vgic_v3_get_ich_vtr_el2(void); 133extern u64 __vgic_v3_get_ich_vtr_el2(void);
134 134
135extern char __save_vgic_v2_state[];
136extern char __restore_vgic_v2_state[];
137extern char __save_vgic_v3_state[];
138extern char __restore_vgic_v3_state[];
139
140#endif 135#endif
141 136
142#endif /* __ARM_KVM_ASM_H__ */ 137#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index f0f58c9beec0..2709db2a7eac 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -221,29 +221,6 @@ struct vgic_sr_vectors {
221 void *restore_vgic; 221 void *restore_vgic;
222}; 222};
223 223
224static inline void vgic_arch_setup(const struct vgic_params *vgic)
225{
226 extern struct vgic_sr_vectors __vgic_sr_vectors;
227
228 switch(vgic->type)
229 {
230 case VGIC_V2:
231 __vgic_sr_vectors.save_vgic = __save_vgic_v2_state;
232 __vgic_sr_vectors.restore_vgic = __restore_vgic_v2_state;
233 break;
234
235#ifdef CONFIG_ARM_GIC_V3
236 case VGIC_V3:
237 __vgic_sr_vectors.save_vgic = __save_vgic_v3_state;
238 __vgic_sr_vectors.restore_vgic = __restore_vgic_v3_state;
239 break;
240#endif
241
242 default:
243 BUG();
244 }
245}
246
247static inline void kvm_arch_hardware_disable(void) {} 224static inline void kvm_arch_hardware_disable(void) {}
248static inline void kvm_arch_hardware_unsetup(void) {} 225static inline void kvm_arch_hardware_unsetup(void) {}
249static inline void kvm_arch_sync_events(struct kvm *kvm) {} 226static inline void kvm_arch_sync_events(struct kvm *kvm) {}
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 3d311761e3c2..79fcfb048884 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -34,5 +34,6 @@ extern void init_mem_pgprot(void);
34extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 34extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
35 unsigned long virt, phys_addr_t size, 35 unsigned long virt, phys_addr_t size,
36 pgprot_t prot); 36 pgprot_t prot);
37extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
37 38
38#endif 39#endif
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index d26d1d53c0d7..6471773db6fd 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -24,4 +24,11 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs);
24#define perf_misc_flags(regs) perf_misc_flags(regs) 24#define perf_misc_flags(regs) perf_misc_flags(regs)
25#endif 25#endif
26 26
27#define perf_arch_fetch_caller_regs(regs, __ip) { \
28 (regs)->pc = (__ip); \
29 (regs)->regs[29] = (unsigned long) __builtin_frame_address(0); \
30 (regs)->sp = current_stack_pointer; \
31 (regs)->pstate = PSR_MODE_EL1h; \
32}
33
27#endif 34#endif
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 220633b791b8..14ad6e4e87d1 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -28,12 +28,8 @@
28struct mm_struct; 28struct mm_struct;
29struct cpu_suspend_ctx; 29struct cpu_suspend_ctx;
30 30
31extern void cpu_cache_off(void);
32extern void cpu_do_idle(void); 31extern void cpu_do_idle(void);
33extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); 32extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
34extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
35void cpu_soft_restart(phys_addr_t cpu_reset,
36 unsigned long addr) __attribute__((noreturn));
37extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr); 33extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
38extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); 34extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
39 35
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index d2c37a1df0eb..e4c893e54f01 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -78,13 +78,30 @@ struct cpu_context {
78 78
79struct thread_struct { 79struct thread_struct {
80 struct cpu_context cpu_context; /* cpu context */ 80 struct cpu_context cpu_context; /* cpu context */
81 unsigned long tp_value; 81 unsigned long tp_value; /* TLS register */
82#ifdef CONFIG_COMPAT
83 unsigned long tp2_value;
84#endif
82 struct fpsimd_state fpsimd_state; 85 struct fpsimd_state fpsimd_state;
83 unsigned long fault_address; /* fault info */ 86 unsigned long fault_address; /* fault info */
84 unsigned long fault_code; /* ESR_EL1 value */ 87 unsigned long fault_code; /* ESR_EL1 value */
85 struct debug_info debug; /* debugging */ 88 struct debug_info debug; /* debugging */
86}; 89};
87 90
91#ifdef CONFIG_COMPAT
92#define task_user_tls(t) \
93({ \
94 unsigned long *__tls; \
95 if (is_compat_thread(task_thread_info(t))) \
96 __tls = &(t)->thread.tp2_value; \
97 else \
98 __tls = &(t)->thread.tp_value; \
99 __tls; \
100 })
101#else
102#define task_user_tls(t) (&(t)->thread.tp_value)
103#endif
104
88#define INIT_THREAD { } 105#define INIT_THREAD { }
89 106
90static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) 107static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
index 2454bc59c916..49d7e1aaebdc 100644
--- a/arch/arm64/include/asm/psci.h
+++ b/arch/arm64/include/asm/psci.h
@@ -14,7 +14,15 @@
14#ifndef __ASM_PSCI_H 14#ifndef __ASM_PSCI_H
15#define __ASM_PSCI_H 15#define __ASM_PSCI_H
16 16
17int psci_dt_init(void); 17int __init psci_dt_init(void);
18int psci_acpi_init(void); 18
19#ifdef CONFIG_ACPI
20int __init psci_acpi_init(void);
21bool __init acpi_psci_present(void);
22bool __init acpi_psci_use_hvc(void);
23#else
24static inline int psci_acpi_init(void) { return 0; }
25static inline bool acpi_psci_present(void) { return false; }
26#endif
19 27
20#endif /* __ASM_PSCI_H */ 28#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index bf22650b1a78..db02be81b90a 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -42,7 +42,7 @@ extern void handle_IPI(int ipinr, struct pt_regs *regs);
42 * Discover the set of possible CPUs and determine their 42 * Discover the set of possible CPUs and determine their
43 * SMP operations. 43 * SMP operations.
44 */ 44 */
45extern void of_smp_init_cpus(void); 45extern void smp_init_cpus(void);
46 46
47/* 47/*
48 * Provide a function to raise an IPI cross call on CPUs in callmap. 48 * Provide a function to raise an IPI cross call on CPUs in callmap.
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index 8dcd61e32176..7abf7570c00f 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -19,6 +19,8 @@
19#ifndef __ASM_SMP_PLAT_H 19#ifndef __ASM_SMP_PLAT_H
20#define __ASM_SMP_PLAT_H 20#define __ASM_SMP_PLAT_H
21 21
22#include <linux/cpumask.h>
23
22#include <asm/types.h> 24#include <asm/types.h>
23 25
24struct mpidr_hash { 26struct mpidr_hash {
@@ -39,6 +41,20 @@ static inline u32 mpidr_hash_size(void)
39 */ 41 */
40extern u64 __cpu_logical_map[NR_CPUS]; 42extern u64 __cpu_logical_map[NR_CPUS];
41#define cpu_logical_map(cpu) __cpu_logical_map[cpu] 43#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
44/*
45 * Retrieve logical cpu index corresponding to a given MPIDR.Aff*
46 * - mpidr: MPIDR.Aff* bits to be used for the look-up
47 *
48 * Returns the cpu logical index or -EINVAL on look-up error
49 */
50static inline int get_logical_index(u64 mpidr)
51{
52 int cpu;
53 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
54 if (cpu_logical_map(cpu) == mpidr)
55 return cpu;
56 return -EINVAL;
57}
42 58
43void __init do_post_cpus_up_work(void); 59void __init do_post_cpus_up_work(void);
44 60
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index 003802f58963..59a5b0f1e81c 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -21,6 +21,6 @@ struct sleep_save_sp {
21 phys_addr_t save_ptr_stash_phys; 21 phys_addr_t save_ptr_stash_phys;
22}; 22};
23 23
24extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)); 24extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
25extern void cpu_resume(void); 25extern void cpu_resume(void);
26#endif 26#endif
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 7a18fabbe0f6..57f110bea6a8 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -23,6 +23,8 @@
23#include <linux/compiler.h> 23#include <linux/compiler.h>
24#include <linux/linkage.h> 24#include <linux/linkage.h>
25#include <linux/irqflags.h> 25#include <linux/irqflags.h>
26#include <linux/signal.h>
27#include <linux/ratelimit.h>
26#include <linux/reboot.h> 28#include <linux/reboot.h>
27 29
28struct pt_regs; 30struct pt_regs;
@@ -41,9 +43,19 @@ struct mm_struct;
41extern void show_pte(struct mm_struct *mm, unsigned long addr); 43extern void show_pte(struct mm_struct *mm, unsigned long addr);
42extern void __show_regs(struct pt_regs *); 44extern void __show_regs(struct pt_regs *);
43 45
44void soft_restart(unsigned long);
45extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); 46extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
46 47
48#define show_unhandled_signals_ratelimited() \
49({ \
50 static DEFINE_RATELIMIT_STATE(_rs, \
51 DEFAULT_RATELIMIT_INTERVAL, \
52 DEFAULT_RATELIMIT_BURST); \
53 bool __show_ratelimited = false; \
54 if (show_unhandled_signals && __ratelimit(&_rs)) \
55 __show_ratelimited = true; \
56 __show_ratelimited; \
57})
58
47#define UDBG_UNDEFINED (1 << 0) 59#define UDBG_UNDEFINED (1 << 0)
48#define UDBG_SYSCALL (1 << 1) 60#define UDBG_SYSCALL (1 << 1)
49#define UDBG_BADABORT (1 << 2) 61#define UDBG_BADABORT (1 << 2)
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index c3bb05b98616..934815d45eda 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -28,8 +28,6 @@
28 * TLB Management 28 * TLB Management
29 * ============== 29 * ==============
30 * 30 *
31 * The arch/arm64/mm/tlb.S files implement these methods.
32 *
33 * The TLB specific code is expected to perform whatever tests it needs 31 * The TLB specific code is expected to perform whatever tests it needs
34 * to determine if it should invalidate the TLB for each call. Start 32 * to determine if it should invalidate the TLB for each call. Start
35 * addresses are inclusive and end addresses are exclusive; it is safe to 33 * addresses are inclusive and end addresses are exclusive; it is safe to
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 8b839558838e..19de7537e7d3 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -36,12 +36,6 @@ EXPORT_SYMBOL(acpi_disabled);
36int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */ 36int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */
37EXPORT_SYMBOL(acpi_pci_disabled); 37EXPORT_SYMBOL(acpi_pci_disabled);
38 38
39/* Processors with enabled flag and sane MPIDR */
40static int enabled_cpus;
41
42/* Boot CPU is valid or not in MADT */
43static bool bootcpu_valid __initdata;
44
45static bool param_acpi_off __initdata; 39static bool param_acpi_off __initdata;
46static bool param_acpi_force __initdata; 40static bool param_acpi_force __initdata;
47 41
@@ -95,122 +89,15 @@ void __init __acpi_unmap_table(char *map, unsigned long size)
95 early_memunmap(map, size); 89 early_memunmap(map, size);
96} 90}
97 91
98/** 92bool __init acpi_psci_present(void)
99 * acpi_map_gic_cpu_interface - generates a logical cpu number
100 * and map to MPIDR represented by GICC structure
101 */
102static void __init
103acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
104{ 93{
105 int i; 94 return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_COMPLIANT;
106 u64 mpidr = processor->arm_mpidr & MPIDR_HWID_BITMASK;
107 bool enabled = !!(processor->flags & ACPI_MADT_ENABLED);
108
109 if (mpidr == INVALID_HWID) {
110 pr_info("Skip MADT cpu entry with invalid MPIDR\n");
111 return;
112 }
113
114 total_cpus++;
115 if (!enabled)
116 return;
117
118 if (enabled_cpus >= NR_CPUS) {
119 pr_warn("NR_CPUS limit of %d reached, Processor %d/0x%llx ignored.\n",
120 NR_CPUS, total_cpus, mpidr);
121 return;
122 }
123
124 /* Check if GICC structure of boot CPU is available in the MADT */
125 if (cpu_logical_map(0) == mpidr) {
126 if (bootcpu_valid) {
127 pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n",
128 mpidr);
129 return;
130 }
131
132 bootcpu_valid = true;
133 }
134
135 /*
136 * Duplicate MPIDRs are a recipe for disaster. Scan
137 * all initialized entries and check for
138 * duplicates. If any is found just ignore the CPU.
139 */
140 for (i = 1; i < enabled_cpus; i++) {
141 if (cpu_logical_map(i) == mpidr) {
142 pr_err("Firmware bug, duplicate CPU MPIDR: 0x%llx in MADT\n",
143 mpidr);
144 return;
145 }
146 }
147
148 if (!acpi_psci_present())
149 return;
150
151 cpu_ops[enabled_cpus] = cpu_get_ops("psci");
152 /* CPU 0 was already initialized */
153 if (enabled_cpus) {
154 if (!cpu_ops[enabled_cpus])
155 return;
156
157 if (cpu_ops[enabled_cpus]->cpu_init(NULL, enabled_cpus))
158 return;
159
160 /* map the logical cpu id to cpu MPIDR */
161 cpu_logical_map(enabled_cpus) = mpidr;
162 }
163
164 enabled_cpus++;
165} 95}
166 96
167static int __init 97/* Whether HVC must be used instead of SMC as the PSCI conduit */
168acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header, 98bool __init acpi_psci_use_hvc(void)
169 const unsigned long end)
170{ 99{
171 struct acpi_madt_generic_interrupt *processor; 100 return acpi_gbl_FADT.arm_boot_flags & ACPI_FADT_PSCI_USE_HVC;
172
173 processor = (struct acpi_madt_generic_interrupt *)header;
174
175 if (BAD_MADT_ENTRY(processor, end))
176 return -EINVAL;
177
178 acpi_table_print_madt_entry(header);
179 acpi_map_gic_cpu_interface(processor);
180 return 0;
181}
182
183/* Parse GIC cpu interface entries in MADT for SMP init */
184void __init acpi_init_cpus(void)
185{
186 int count, i;
187
188 /*
189 * do a partial walk of MADT to determine how many CPUs
190 * we have including disabled CPUs, and get information
191 * we need for SMP init
192 */
193 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
194 acpi_parse_gic_cpu_interface, 0);
195
196 if (!count) {
197 pr_err("No GIC CPU interface entries present\n");
198 return;
199 } else if (count < 0) {
200 pr_err("Error parsing GIC CPU interface entry\n");
201 return;
202 }
203
204 if (!bootcpu_valid) {
205 pr_err("MADT missing boot CPU MPIDR, not enabling secondaries\n");
206 return;
207 }
208
209 for (i = 0; i < enabled_cpus; i++)
210 set_cpu_possible(i, true);
211
212 /* Make boot-up look pretty */
213 pr_info("%d CPUs enabled, %d CPUs total\n", enabled_cpus, total_cpus);
214} 101}
215 102
216/* 103/*
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 28f8365edc4c..221b98312f0c 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -24,8 +24,13 @@
24#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
25#include <asm/alternative.h> 25#include <asm/alternative.h>
26#include <asm/cpufeature.h> 26#include <asm/cpufeature.h>
27#include <asm/insn.h>
27#include <linux/stop_machine.h> 28#include <linux/stop_machine.h>
28 29
30#define __ALT_PTR(a,f) (u32 *)((void *)&(a)->f + (a)->f)
31#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
32#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
33
29extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; 34extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
30 35
31struct alt_region { 36struct alt_region {
@@ -33,13 +38,63 @@ struct alt_region {
33 struct alt_instr *end; 38 struct alt_instr *end;
34}; 39};
35 40
41/*
42 * Check if the target PC is within an alternative block.
43 */
44static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
45{
46 unsigned long replptr;
47
48 if (kernel_text_address(pc))
49 return 1;
50
51 replptr = (unsigned long)ALT_REPL_PTR(alt);
52 if (pc >= replptr && pc <= (replptr + alt->alt_len))
53 return 0;
54
55 /*
56 * Branching into *another* alternate sequence is doomed, and
57 * we're not even trying to fix it up.
58 */
59 BUG();
60}
61
62static u32 get_alt_insn(struct alt_instr *alt, u32 *insnptr, u32 *altinsnptr)
63{
64 u32 insn;
65
66 insn = le32_to_cpu(*altinsnptr);
67
68 if (aarch64_insn_is_branch_imm(insn)) {
69 s32 offset = aarch64_get_branch_offset(insn);
70 unsigned long target;
71
72 target = (unsigned long)altinsnptr + offset;
73
74 /*
75 * If we're branching inside the alternate sequence,
76 * do not rewrite the instruction, as it is already
77 * correct. Otherwise, generate the new instruction.
78 */
79 if (branch_insn_requires_update(alt, target)) {
80 offset = target - (unsigned long)insnptr;
81 insn = aarch64_set_branch_offset(insn, offset);
82 }
83 }
84
85 return insn;
86}
87
36static int __apply_alternatives(void *alt_region) 88static int __apply_alternatives(void *alt_region)
37{ 89{
38 struct alt_instr *alt; 90 struct alt_instr *alt;
39 struct alt_region *region = alt_region; 91 struct alt_region *region = alt_region;
40 u8 *origptr, *replptr; 92 u32 *origptr, *replptr;
41 93
42 for (alt = region->begin; alt < region->end; alt++) { 94 for (alt = region->begin; alt < region->end; alt++) {
95 u32 insn;
96 int i, nr_inst;
97
43 if (!cpus_have_cap(alt->cpufeature)) 98 if (!cpus_have_cap(alt->cpufeature))
44 continue; 99 continue;
45 100
@@ -47,11 +102,17 @@ static int __apply_alternatives(void *alt_region)
47 102
48 pr_info_once("patching kernel code\n"); 103 pr_info_once("patching kernel code\n");
49 104
50 origptr = (u8 *)&alt->orig_offset + alt->orig_offset; 105 origptr = ALT_ORIG_PTR(alt);
51 replptr = (u8 *)&alt->alt_offset + alt->alt_offset; 106 replptr = ALT_REPL_PTR(alt);
52 memcpy(origptr, replptr, alt->alt_len); 107 nr_inst = alt->alt_len / sizeof(insn);
108
109 for (i = 0; i < nr_inst; i++) {
110 insn = get_alt_insn(alt, origptr + i, replptr + i);
111 *(origptr + i) = cpu_to_le32(insn);
112 }
113
53 flush_icache_range((uintptr_t)origptr, 114 flush_icache_range((uintptr_t)origptr,
54 (uintptr_t)(origptr + alt->alt_len)); 115 (uintptr_t)(origptr + nr_inst));
55 } 116 }
56 117
57 return 0; 118 return 0;
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index da675cc5dfae..c99701a34d7b 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -127,7 +127,6 @@ int main(void)
127 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); 127 DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
128 DEFINE(VGIC_SAVE_FN, offsetof(struct vgic_sr_vectors, save_vgic)); 128 DEFINE(VGIC_SAVE_FN, offsetof(struct vgic_sr_vectors, save_vgic));
129 DEFINE(VGIC_RESTORE_FN, offsetof(struct vgic_sr_vectors, restore_vgic)); 129 DEFINE(VGIC_RESTORE_FN, offsetof(struct vgic_sr_vectors, restore_vgic));
130 DEFINE(VGIC_SR_VECTOR_SZ, sizeof(struct vgic_sr_vectors));
131 DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); 130 DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
132 DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); 131 DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
133 DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr)); 132 DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index fb8ff9ba467a..5ea337dd2f15 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -16,11 +16,13 @@
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18 18
19#include <asm/cpu_ops.h> 19#include <linux/acpi.h>
20#include <asm/smp_plat.h>
21#include <linux/errno.h> 20#include <linux/errno.h>
22#include <linux/of.h> 21#include <linux/of.h>
23#include <linux/string.h> 22#include <linux/string.h>
23#include <asm/acpi.h>
24#include <asm/cpu_ops.h>
25#include <asm/smp_plat.h>
24 26
25extern const struct cpu_operations smp_spin_table_ops; 27extern const struct cpu_operations smp_spin_table_ops;
26extern const struct cpu_operations cpu_psci_ops; 28extern const struct cpu_operations cpu_psci_ops;
@@ -35,7 +37,7 @@ static const struct cpu_operations *supported_cpu_ops[] __initconst = {
35 NULL, 37 NULL,
36}; 38};
37 39
38const struct cpu_operations * __init cpu_get_ops(const char *name) 40static const struct cpu_operations * __init cpu_get_ops(const char *name)
39{ 41{
40 const struct cpu_operations **ops = supported_cpu_ops; 42 const struct cpu_operations **ops = supported_cpu_ops;
41 43
@@ -49,39 +51,53 @@ const struct cpu_operations * __init cpu_get_ops(const char *name)
49 return NULL; 51 return NULL;
50} 52}
51 53
54static const char *__init cpu_read_enable_method(int cpu)
55{
56 const char *enable_method;
57
58 if (acpi_disabled) {
59 struct device_node *dn = of_get_cpu_node(cpu, NULL);
60
61 if (!dn) {
62 if (!cpu)
63 pr_err("Failed to find device node for boot cpu\n");
64 return NULL;
65 }
66
67 enable_method = of_get_property(dn, "enable-method", NULL);
68 if (!enable_method) {
69 /*
70 * The boot CPU may not have an enable method (e.g.
71 * when spin-table is used for secondaries).
72 * Don't warn spuriously.
73 */
74 if (cpu != 0)
75 pr_err("%s: missing enable-method property\n",
76 dn->full_name);
77 }
78 } else {
79 enable_method = acpi_get_enable_method(cpu);
80 if (!enable_method)
81 pr_err("Unsupported ACPI enable-method\n");
82 }
83
84 return enable_method;
85}
52/* 86/*
53 * Read a cpu's enable method from the device tree and record it in cpu_ops. 87 * Read a cpu's enable method and record it in cpu_ops.
54 */ 88 */
55int __init cpu_read_ops(struct device_node *dn, int cpu) 89int __init cpu_read_ops(int cpu)
56{ 90{
57 const char *enable_method = of_get_property(dn, "enable-method", NULL); 91 const char *enable_method = cpu_read_enable_method(cpu);
58 if (!enable_method) { 92
59 /* 93 if (!enable_method)
60 * The boot CPU may not have an enable method (e.g. when 94 return -ENODEV;
61 * spin-table is used for secondaries). Don't warn spuriously.
62 */
63 if (cpu != 0)
64 pr_err("%s: missing enable-method property\n",
65 dn->full_name);
66 return -ENOENT;
67 }
68 95
69 cpu_ops[cpu] = cpu_get_ops(enable_method); 96 cpu_ops[cpu] = cpu_get_ops(enable_method);
70 if (!cpu_ops[cpu]) { 97 if (!cpu_ops[cpu]) {
71 pr_warn("%s: unsupported enable-method property: %s\n", 98 pr_warn("Unsupported enable-method: %s\n", enable_method);
72 dn->full_name, enable_method);
73 return -EOPNOTSUPP; 99 return -EOPNOTSUPP;
74 } 100 }
75 101
76 return 0; 102 return 0;
77} 103}
78
79void __init cpu_read_bootcpu_ops(void)
80{
81 struct device_node *dn = of_get_cpu_node(0, NULL);
82 if (!dn) {
83 pr_err("Failed to find device node for boot cpu\n");
84 return;
85 }
86 cpu_read_ops(dn, 0);
87}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 3d9967e43d89..5ad86ceac010 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -22,7 +22,23 @@
22#include <asm/cpu.h> 22#include <asm/cpu.h>
23#include <asm/cpufeature.h> 23#include <asm/cpufeature.h>
24 24
25static bool
26has_id_aa64pfr0_feature(const struct arm64_cpu_capabilities *entry)
27{
28 u64 val;
29
30 val = read_cpuid(id_aa64pfr0_el1);
31 return (val & entry->register_mask) == entry->register_value;
32}
33
25static const struct arm64_cpu_capabilities arm64_features[] = { 34static const struct arm64_cpu_capabilities arm64_features[] = {
35 {
36 .desc = "GIC system register CPU interface",
37 .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
38 .matches = has_id_aa64pfr0_feature,
39 .register_mask = (0xf << 24),
40 .register_value = (1 << 24),
41 },
26 {}, 42 {},
27}; 43};
28 44
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index a78143a5c99f..7ce589ca54a4 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -18,15 +18,10 @@
18int arm_cpuidle_init(unsigned int cpu) 18int arm_cpuidle_init(unsigned int cpu)
19{ 19{
20 int ret = -EOPNOTSUPP; 20 int ret = -EOPNOTSUPP;
21 struct device_node *cpu_node = of_cpu_device_node_get(cpu);
22
23 if (!cpu_node)
24 return -ENODEV;
25 21
26 if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle) 22 if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle)
27 ret = cpu_ops[cpu]->cpu_init_idle(cpu_node, cpu); 23 ret = cpu_ops[cpu]->cpu_init_idle(cpu);
28 24
29 of_node_put(cpu_node);
30 return ret; 25 return ret;
31} 26}
32 27
@@ -37,7 +32,7 @@ int arm_cpuidle_init(unsigned int cpu)
37 * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU 32 * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
38 * operations back-end error code otherwise. 33 * operations back-end error code otherwise.
39 */ 34 */
40int cpu_suspend(unsigned long arg) 35int arm_cpuidle_suspend(int index)
41{ 36{
42 int cpu = smp_processor_id(); 37 int cpu = smp_processor_id();
43 38
@@ -47,5 +42,5 @@ int cpu_suspend(unsigned long arg)
47 */ 42 */
48 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) 43 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
49 return -EOPNOTSUPP; 44 return -EOPNOTSUPP;
50 return cpu_ops[cpu]->cpu_suspend(arg); 45 return cpu_ops[cpu]->cpu_suspend(index);
51} 46}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 959fe8733560..a7691a378668 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -21,7 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/linkage.h> 22#include <linux/linkage.h>
23 23
24#include <asm/alternative-asm.h> 24#include <asm/alternative.h>
25#include <asm/assembler.h> 25#include <asm/assembler.h>
26#include <asm/asm-offsets.h> 26#include <asm/asm-offsets.h>
27#include <asm/cpufeature.h> 27#include <asm/cpufeature.h>
@@ -124,21 +124,24 @@
124 msr sp_el0, x23 124 msr sp_el0, x23
125 125
126#ifdef CONFIG_ARM64_ERRATUM_845719 126#ifdef CONFIG_ARM64_ERRATUM_845719
127 alternative_insn \ 127
128 "nop", \ 128#undef SEQUENCE_ORG
129 "tbz x22, #4, 1f", \ 129#undef SEQUENCE_ALT
130 ARM64_WORKAROUND_845719 130
131#ifdef CONFIG_PID_IN_CONTEXTIDR 131#ifdef CONFIG_PID_IN_CONTEXTIDR
132 alternative_insn \ 132
133 "nop; nop", \ 133#define SEQUENCE_ORG "nop ; nop ; nop"
134 "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \ 134#define SEQUENCE_ALT "tbz x22, #4, 1f ; mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:"
135 ARM64_WORKAROUND_845719 135
136#else 136#else
137 alternative_insn \ 137
138 "nop", \ 138#define SEQUENCE_ORG "nop ; nop"
139 "msr contextidr_el1, xzr; 1:", \ 139#define SEQUENCE_ALT "tbz x22, #4, 1f ; msr contextidr_el1, xzr; 1:"
140 ARM64_WORKAROUND_845719 140
141#endif 141#endif
142
143 alternative_insn SEQUENCE_ORG, SEQUENCE_ALT, ARM64_WORKAROUND_845719
144
142#endif 145#endif
143 .endif 146 .endif
144 msr elr_el1, x21 // set up the return data 147 msr elr_el1, x21 // set up the return data
@@ -517,6 +520,7 @@ el0_sp_pc:
517 mrs x26, far_el1 520 mrs x26, far_el1
518 // enable interrupts before calling the main handler 521 // enable interrupts before calling the main handler
519 enable_dbg_and_irq 522 enable_dbg_and_irq
523 ct_user_exit
520 mov x0, x26 524 mov x0, x26
521 mov x1, x25 525 mov x1, x25
522 mov x2, sp 526 mov x2, sp
@@ -608,11 +612,16 @@ ENDPROC(cpu_switch_to)
608 */ 612 */
609ret_fast_syscall: 613ret_fast_syscall:
610 disable_irq // disable interrupts 614 disable_irq // disable interrupts
611 ldr x1, [tsk, #TI_FLAGS] 615 ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
616 and x2, x1, #_TIF_SYSCALL_WORK
617 cbnz x2, ret_fast_syscall_trace
612 and x2, x1, #_TIF_WORK_MASK 618 and x2, x1, #_TIF_WORK_MASK
613 cbnz x2, fast_work_pending 619 cbnz x2, fast_work_pending
614 enable_step_tsk x1, x2 620 enable_step_tsk x1, x2
615 kernel_exit 0, ret = 1 621 kernel_exit 0, ret = 1
622ret_fast_syscall_trace:
623 enable_irq // enable interrupts
624 b __sys_trace_return
616 625
617/* 626/*
618 * Ok, we need to do extra processing, enter the slow path. 627 * Ok, we need to do extra processing, enter the slow path.
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 3dca15634e69..44d6f7545505 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -17,6 +17,7 @@
17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <linux/cpu.h>
20#include <linux/cpu_pm.h> 21#include <linux/cpu_pm.h>
21#include <linux/kernel.h> 22#include <linux/kernel.h>
22#include <linux/init.h> 23#include <linux/init.h>
@@ -296,6 +297,35 @@ static void fpsimd_pm_init(void)
296static inline void fpsimd_pm_init(void) { } 297static inline void fpsimd_pm_init(void) { }
297#endif /* CONFIG_CPU_PM */ 298#endif /* CONFIG_CPU_PM */
298 299
300#ifdef CONFIG_HOTPLUG_CPU
301static int fpsimd_cpu_hotplug_notifier(struct notifier_block *nfb,
302 unsigned long action,
303 void *hcpu)
304{
305 unsigned int cpu = (long)hcpu;
306
307 switch (action) {
308 case CPU_DEAD:
309 case CPU_DEAD_FROZEN:
310 per_cpu(fpsimd_last_state, cpu) = NULL;
311 break;
312 }
313 return NOTIFY_OK;
314}
315
316static struct notifier_block fpsimd_cpu_hotplug_notifier_block = {
317 .notifier_call = fpsimd_cpu_hotplug_notifier,
318};
319
320static inline void fpsimd_hotplug_init(void)
321{
322 register_cpu_notifier(&fpsimd_cpu_hotplug_notifier_block);
323}
324
325#else
326static inline void fpsimd_hotplug_init(void) { }
327#endif
328
299/* 329/*
300 * FP/SIMD support code initialisation. 330 * FP/SIMD support code initialisation.
301 */ 331 */
@@ -315,6 +345,7 @@ static int __init fpsimd_init(void)
315 elf_hwcap |= HWCAP_ASIMD; 345 elf_hwcap |= HWCAP_ASIMD;
316 346
317 fpsimd_pm_init(); 347 fpsimd_pm_init();
348 fpsimd_hotplug_init();
318 349
319 return 0; 350 return 0;
320} 351}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 19f915e8f6e0..c0ff3ce4299e 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -237,8 +237,6 @@ ENTRY(stext)
237 bl el2_setup // Drop to EL1, w20=cpu_boot_mode 237 bl el2_setup // Drop to EL1, w20=cpu_boot_mode
238 adrp x24, __PHYS_OFFSET 238 adrp x24, __PHYS_OFFSET
239 bl set_cpu_boot_mode_flag 239 bl set_cpu_boot_mode_flag
240
241 bl __vet_fdt
242 bl __create_page_tables // x25=TTBR0, x26=TTBR1 240 bl __create_page_tables // x25=TTBR0, x26=TTBR1
243 /* 241 /*
244 * The following calls CPU setup code, see arch/arm64/mm/proc.S for 242 * The following calls CPU setup code, see arch/arm64/mm/proc.S for
@@ -270,24 +268,6 @@ preserve_boot_args:
270ENDPROC(preserve_boot_args) 268ENDPROC(preserve_boot_args)
271 269
272/* 270/*
273 * Determine validity of the x21 FDT pointer.
274 * The dtb must be 8-byte aligned and live in the first 512M of memory.
275 */
276__vet_fdt:
277 tst x21, #0x7
278 b.ne 1f
279 cmp x21, x24
280 b.lt 1f
281 mov x0, #(1 << 29)
282 add x0, x0, x24
283 cmp x21, x0
284 b.ge 1f
285 ret
2861:
287 mov x21, #0
288 ret
289ENDPROC(__vet_fdt)
290/*
291 * Macro to create a table entry to the next page. 271 * Macro to create a table entry to the next page.
292 * 272 *
293 * tbl: page table address 273 * tbl: page table address
@@ -348,8 +328,7 @@ ENDPROC(__vet_fdt)
348 * required to get the kernel running. The following sections are required: 328 * required to get the kernel running. The following sections are required:
349 * - identity mapping to enable the MMU (low address, TTBR0) 329 * - identity mapping to enable the MMU (low address, TTBR0)
350 * - first few MB of the kernel linear mapping to jump to once the MMU has 330 * - first few MB of the kernel linear mapping to jump to once the MMU has
351 * been enabled, including the FDT blob (TTBR1) 331 * been enabled
352 * - pgd entry for fixed mappings (TTBR1)
353 */ 332 */
354__create_page_tables: 333__create_page_tables:
355 adrp x25, idmap_pg_dir 334 adrp x25, idmap_pg_dir
@@ -382,7 +361,7 @@ __create_page_tables:
382 * Create the identity mapping. 361 * Create the identity mapping.
383 */ 362 */
384 mov x0, x25 // idmap_pg_dir 363 mov x0, x25 // idmap_pg_dir
385 adrp x3, KERNEL_START // __pa(KERNEL_START) 364 adrp x3, __idmap_text_start // __pa(__idmap_text_start)
386 365
387#ifndef CONFIG_ARM64_VA_BITS_48 366#ifndef CONFIG_ARM64_VA_BITS_48
388#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) 367#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
@@ -405,11 +384,11 @@ __create_page_tables:
405 384
406 /* 385 /*
407 * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the 386 * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
408 * entire kernel image can be ID mapped. As T0SZ == (64 - #bits used), 387 * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
409 * this number conveniently equals the number of leading zeroes in 388 * this number conveniently equals the number of leading zeroes in
410 * the physical address of KERNEL_END. 389 * the physical address of __idmap_text_end.
411 */ 390 */
412 adrp x5, KERNEL_END 391 adrp x5, __idmap_text_end
413 clz x5, x5 392 clz x5, x5
414 cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? 393 cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
415 b.ge 1f // .. then skip additional level 394 b.ge 1f // .. then skip additional level
@@ -424,8 +403,8 @@ __create_page_tables:
424#endif 403#endif
425 404
426 create_pgd_entry x0, x3, x5, x6 405 create_pgd_entry x0, x3, x5, x6
427 mov x5, x3 // __pa(KERNEL_START) 406 mov x5, x3 // __pa(__idmap_text_start)
428 adr_l x6, KERNEL_END // __pa(KERNEL_END) 407 adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
429 create_block_map x0, x7, x3, x5, x6 408 create_block_map x0, x7, x3, x5, x6
430 409
431 /* 410 /*
@@ -439,22 +418,6 @@ __create_page_tables:
439 create_block_map x0, x7, x3, x5, x6 418 create_block_map x0, x7, x3, x5, x6
440 419
441 /* 420 /*
442 * Map the FDT blob (maximum 2MB; must be within 512MB of
443 * PHYS_OFFSET).
444 */
445 mov x3, x21 // FDT phys address
446 and x3, x3, #~((1 << 21) - 1) // 2MB aligned
447 mov x6, #PAGE_OFFSET
448 sub x5, x3, x24 // subtract PHYS_OFFSET
449 tst x5, #~((1 << 29) - 1) // within 512MB?
450 csel x21, xzr, x21, ne // zero the FDT pointer
451 b.ne 1f
452 add x5, x5, x6 // __va(FDT blob)
453 add x6, x5, #1 << 21 // 2MB for the FDT blob
454 sub x6, x6, #1 // inclusive range
455 create_block_map x0, x7, x3, x5, x6
4561:
457 /*
458 * Since the page tables have been populated with non-cacheable 421 * Since the page tables have been populated with non-cacheable
459 * accesses (MMU disabled), invalidate the idmap and swapper page 422 * accesses (MMU disabled), invalidate the idmap and swapper page
460 * tables again to remove any speculatively loaded cache lines. 423 * tables again to remove any speculatively loaded cache lines.
@@ -669,6 +632,7 @@ ENDPROC(__secondary_switched)
669 * 632 *
670 * other registers depend on the function called upon completion 633 * other registers depend on the function called upon completion
671 */ 634 */
635 .section ".idmap.text", "ax"
672__enable_mmu: 636__enable_mmu:
673 ldr x5, =vectors 637 ldr x5, =vectors
674 msr vbar_el1, x5 638 msr vbar_el1, x5
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 924902083e47..dd9671cd0bb2 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -77,6 +77,14 @@ bool __kprobes aarch64_insn_is_nop(u32 insn)
77 } 77 }
78} 78}
79 79
80bool aarch64_insn_is_branch_imm(u32 insn)
81{
82 return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
83 aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
84 aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
85 aarch64_insn_is_bcond(insn));
86}
87
80static DEFINE_SPINLOCK(patch_lock); 88static DEFINE_SPINLOCK(patch_lock);
81 89
82static void __kprobes *patch_map(void *addr, int fixmap) 90static void __kprobes *patch_map(void *addr, int fixmap)
@@ -1057,6 +1065,58 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1057 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); 1065 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1058} 1066}
1059 1067
1068/*
1069 * Decode the imm field of a branch, and return the byte offset as a
1070 * signed value (so it can be used when computing a new branch
1071 * target).
1072 */
1073s32 aarch64_get_branch_offset(u32 insn)
1074{
1075 s32 imm;
1076
1077 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1078 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1079 return (imm << 6) >> 4;
1080 }
1081
1082 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1083 aarch64_insn_is_bcond(insn)) {
1084 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1085 return (imm << 13) >> 11;
1086 }
1087
1088 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1089 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1090 return (imm << 18) >> 16;
1091 }
1092
1093 /* Unhandled instruction */
1094 BUG();
1095}
1096
1097/*
1098 * Encode the displacement of a branch in the imm field and return the
1099 * updated instruction.
1100 */
1101u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1102{
1103 if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1104 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1105 offset >> 2);
1106
1107 if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1108 aarch64_insn_is_bcond(insn))
1109 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1110 offset >> 2);
1111
1112 if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1113 return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1114 offset >> 2);
1115
1116 /* Unhandled instruction */
1117 BUG();
1118}
1119
1060bool aarch32_insn_is_wide(u32 insn) 1120bool aarch32_insn_is_wide(u32 insn)
1061{ 1121{
1062 return insn >= 0xe800; 1122 return insn >= 0xe800;
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index cce18c85d2e8..702591f6180a 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -488,7 +488,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
488 } 488 }
489 489
490 err = request_irq(irq, armpmu->handle_irq, 490 err = request_irq(irq, armpmu->handle_irq,
491 IRQF_NOBALANCING, 491 IRQF_NOBALANCING | IRQF_NO_THREAD,
492 "arm-pmu", armpmu); 492 "arm-pmu", armpmu);
493 if (err) { 493 if (err) {
494 pr_err("unable to request IRQ%d for ARM PMU counters\n", 494 pr_err("unable to request IRQ%d for ARM PMU counters\n",
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index c6b1f3b96f45..223b093c9440 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -58,14 +58,6 @@ unsigned long __stack_chk_guard __read_mostly;
58EXPORT_SYMBOL(__stack_chk_guard); 58EXPORT_SYMBOL(__stack_chk_guard);
59#endif 59#endif
60 60
61void soft_restart(unsigned long addr)
62{
63 setup_mm_for_reboot();
64 cpu_soft_restart(virt_to_phys(cpu_reset), addr);
65 /* Should never get here */
66 BUG();
67}
68
69/* 61/*
70 * Function pointers to optional machine specific functions 62 * Function pointers to optional machine specific functions
71 */ 63 */
@@ -136,9 +128,7 @@ void machine_power_off(void)
136 128
137/* 129/*
138 * Restart requires that the secondary CPUs stop performing any activity 130 * Restart requires that the secondary CPUs stop performing any activity
139 * while the primary CPU resets the system. Systems with a single CPU can 131 * while the primary CPU resets the system. Systems with multiple CPUs must
140 * use soft_restart() as their machine descriptor's .restart hook, since that
141 * will cause the only available CPU to reset. Systems with multiple CPUs must
142 * provide a HW restart implementation, to ensure that all CPUs reset at once. 132 * provide a HW restart implementation, to ensure that all CPUs reset at once.
143 * This is required so that any code running after reset on the primary CPU 133 * This is required so that any code running after reset on the primary CPU
144 * doesn't have to co-ordinate with other CPUs to ensure they aren't still 134 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
@@ -243,7 +233,8 @@ void release_thread(struct task_struct *dead_task)
243 233
244int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) 234int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
245{ 235{
246 fpsimd_preserve_current_state(); 236 if (current->mm)
237 fpsimd_preserve_current_state();
247 *dst = *src; 238 *dst = *src;
248 return 0; 239 return 0;
249} 240}
@@ -254,35 +245,35 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
254 unsigned long stk_sz, struct task_struct *p) 245 unsigned long stk_sz, struct task_struct *p)
255{ 246{
256 struct pt_regs *childregs = task_pt_regs(p); 247 struct pt_regs *childregs = task_pt_regs(p);
257 unsigned long tls = p->thread.tp_value;
258 248
259 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); 249 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
260 250
261 if (likely(!(p->flags & PF_KTHREAD))) { 251 if (likely(!(p->flags & PF_KTHREAD))) {
262 *childregs = *current_pt_regs(); 252 *childregs = *current_pt_regs();
263 childregs->regs[0] = 0; 253 childregs->regs[0] = 0;
264 if (is_compat_thread(task_thread_info(p))) { 254
265 if (stack_start) 255 /*
256 * Read the current TLS pointer from tpidr_el0 as it may be
257 * out-of-sync with the saved value.
258 */
259 asm("mrs %0, tpidr_el0" : "=r" (*task_user_tls(p)));
260
261 if (stack_start) {
262 if (is_compat_thread(task_thread_info(p)))
266 childregs->compat_sp = stack_start; 263 childregs->compat_sp = stack_start;
267 } else { 264 /* 16-byte aligned stack mandatory on AArch64 */
268 /* 265 else if (stack_start & 15)
269 * Read the current TLS pointer from tpidr_el0 as it may be 266 return -EINVAL;
270 * out-of-sync with the saved value. 267 else
271 */
272 asm("mrs %0, tpidr_el0" : "=r" (tls));
273 if (stack_start) {
274 /* 16-byte aligned stack mandatory on AArch64 */
275 if (stack_start & 15)
276 return -EINVAL;
277 childregs->sp = stack_start; 268 childregs->sp = stack_start;
278 }
279 } 269 }
270
280 /* 271 /*
281 * If a TLS pointer was passed to clone (4th argument), use it 272 * If a TLS pointer was passed to clone (4th argument), use it
282 * for the new thread. 273 * for the new thread.
283 */ 274 */
284 if (clone_flags & CLONE_SETTLS) 275 if (clone_flags & CLONE_SETTLS)
285 tls = childregs->regs[3]; 276 p->thread.tp_value = childregs->regs[3];
286 } else { 277 } else {
287 memset(childregs, 0, sizeof(struct pt_regs)); 278 memset(childregs, 0, sizeof(struct pt_regs));
288 childregs->pstate = PSR_MODE_EL1h; 279 childregs->pstate = PSR_MODE_EL1h;
@@ -291,7 +282,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
291 } 282 }
292 p->thread.cpu_context.pc = (unsigned long)ret_from_fork; 283 p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
293 p->thread.cpu_context.sp = (unsigned long)childregs; 284 p->thread.cpu_context.sp = (unsigned long)childregs;
294 p->thread.tp_value = tls;
295 285
296 ptrace_hw_copy_thread(p); 286 ptrace_hw_copy_thread(p);
297 287
@@ -302,18 +292,12 @@ static void tls_thread_switch(struct task_struct *next)
302{ 292{
303 unsigned long tpidr, tpidrro; 293 unsigned long tpidr, tpidrro;
304 294
305 if (!is_compat_task()) { 295 asm("mrs %0, tpidr_el0" : "=r" (tpidr));
306 asm("mrs %0, tpidr_el0" : "=r" (tpidr)); 296 *task_user_tls(current) = tpidr;
307 current->thread.tp_value = tpidr;
308 }
309 297
310 if (is_compat_thread(task_thread_info(next))) { 298 tpidr = *task_user_tls(next);
311 tpidr = 0; 299 tpidrro = is_compat_thread(task_thread_info(next)) ?
312 tpidrro = next->thread.tp_value; 300 next->thread.tp_value : 0;
313 } else {
314 tpidr = next->thread.tp_value;
315 tpidrro = 0;
316 }
317 301
318 asm( 302 asm(
319 " msr tpidr_el0, %0\n" 303 " msr tpidr_el0, %0\n"
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index ea18cb53921e..869f202748e8 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -15,7 +15,6 @@
15 15
16#define pr_fmt(fmt) "psci: " fmt 16#define pr_fmt(fmt) "psci: " fmt
17 17
18#include <linux/acpi.h>
19#include <linux/init.h> 18#include <linux/init.h>
20#include <linux/of.h> 19#include <linux/of.h>
21#include <linux/smp.h> 20#include <linux/smp.h>
@@ -25,8 +24,8 @@
25#include <linux/slab.h> 24#include <linux/slab.h>
26#include <uapi/linux/psci.h> 25#include <uapi/linux/psci.h>
27 26
28#include <asm/acpi.h>
29#include <asm/compiler.h> 27#include <asm/compiler.h>
28#include <asm/cputype.h>
30#include <asm/cpu_ops.h> 29#include <asm/cpu_ops.h>
31#include <asm/errno.h> 30#include <asm/errno.h>
32#include <asm/psci.h> 31#include <asm/psci.h>
@@ -37,16 +36,31 @@
37#define PSCI_POWER_STATE_TYPE_STANDBY 0 36#define PSCI_POWER_STATE_TYPE_STANDBY 0
38#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 37#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
39 38
40struct psci_power_state { 39static bool psci_power_state_loses_context(u32 state)
41 u16 id; 40{
42 u8 type; 41 return state & PSCI_0_2_POWER_STATE_TYPE_MASK;
43 u8 affinity_level; 42}
44}; 43
44static bool psci_power_state_is_valid(u32 state)
45{
46 const u32 valid_mask = PSCI_0_2_POWER_STATE_ID_MASK |
47 PSCI_0_2_POWER_STATE_TYPE_MASK |
48 PSCI_0_2_POWER_STATE_AFFL_MASK;
49
50 return !(state & ~valid_mask);
51}
52
53/*
54 * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
55 * calls to its resident CPU, so we must avoid issuing those. We never migrate
56 * a Trusted OS even if it claims to be capable of migration -- doing so will
57 * require cooperation with a Trusted OS driver.
58 */
59static int resident_cpu = -1;
45 60
46struct psci_operations { 61struct psci_operations {
47 int (*cpu_suspend)(struct psci_power_state state, 62 int (*cpu_suspend)(u32 state, unsigned long entry_point);
48 unsigned long entry_point); 63 int (*cpu_off)(u32 state);
49 int (*cpu_off)(struct psci_power_state state);
50 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); 64 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
51 int (*migrate)(unsigned long cpuid); 65 int (*migrate)(unsigned long cpuid);
52 int (*affinity_info)(unsigned long target_affinity, 66 int (*affinity_info)(unsigned long target_affinity,
@@ -56,23 +70,21 @@ struct psci_operations {
56 70
57static struct psci_operations psci_ops; 71static struct psci_operations psci_ops;
58 72
59static int (*invoke_psci_fn)(u64, u64, u64, u64); 73typedef unsigned long (psci_fn)(unsigned long, unsigned long,
60typedef int (*psci_initcall_t)(const struct device_node *); 74 unsigned long, unsigned long);
61 75asmlinkage psci_fn __invoke_psci_fn_hvc;
62asmlinkage int __invoke_psci_fn_hvc(u64, u64, u64, u64); 76asmlinkage psci_fn __invoke_psci_fn_smc;
63asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64); 77static psci_fn *invoke_psci_fn;
64 78
65enum psci_function { 79enum psci_function {
66 PSCI_FN_CPU_SUSPEND, 80 PSCI_FN_CPU_SUSPEND,
67 PSCI_FN_CPU_ON, 81 PSCI_FN_CPU_ON,
68 PSCI_FN_CPU_OFF, 82 PSCI_FN_CPU_OFF,
69 PSCI_FN_MIGRATE, 83 PSCI_FN_MIGRATE,
70 PSCI_FN_AFFINITY_INFO,
71 PSCI_FN_MIGRATE_INFO_TYPE,
72 PSCI_FN_MAX, 84 PSCI_FN_MAX,
73}; 85};
74 86
75static DEFINE_PER_CPU_READ_MOSTLY(struct psci_power_state *, psci_power_state); 87static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
76 88
77static u32 psci_function_id[PSCI_FN_MAX]; 89static u32 psci_function_id[PSCI_FN_MAX];
78 90
@@ -92,56 +104,28 @@ static int psci_to_linux_errno(int errno)
92 return -EINVAL; 104 return -EINVAL;
93} 105}
94 106
95static u32 psci_power_state_pack(struct psci_power_state state) 107static u32 psci_get_version(void)
96{
97 return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT)
98 & PSCI_0_2_POWER_STATE_ID_MASK) |
99 ((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
100 & PSCI_0_2_POWER_STATE_TYPE_MASK) |
101 ((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
102 & PSCI_0_2_POWER_STATE_AFFL_MASK);
103}
104
105static void psci_power_state_unpack(u32 power_state,
106 struct psci_power_state *state)
107{
108 state->id = (power_state & PSCI_0_2_POWER_STATE_ID_MASK) >>
109 PSCI_0_2_POWER_STATE_ID_SHIFT;
110 state->type = (power_state & PSCI_0_2_POWER_STATE_TYPE_MASK) >>
111 PSCI_0_2_POWER_STATE_TYPE_SHIFT;
112 state->affinity_level =
113 (power_state & PSCI_0_2_POWER_STATE_AFFL_MASK) >>
114 PSCI_0_2_POWER_STATE_AFFL_SHIFT;
115}
116
117static int psci_get_version(void)
118{ 108{
119 int err; 109 return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
120
121 err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
122 return err;
123} 110}
124 111
125static int psci_cpu_suspend(struct psci_power_state state, 112static int psci_cpu_suspend(u32 state, unsigned long entry_point)
126 unsigned long entry_point)
127{ 113{
128 int err; 114 int err;
129 u32 fn, power_state; 115 u32 fn;
130 116
131 fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; 117 fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
132 power_state = psci_power_state_pack(state); 118 err = invoke_psci_fn(fn, state, entry_point, 0);
133 err = invoke_psci_fn(fn, power_state, entry_point, 0);
134 return psci_to_linux_errno(err); 119 return psci_to_linux_errno(err);
135} 120}
136 121
137static int psci_cpu_off(struct psci_power_state state) 122static int psci_cpu_off(u32 state)
138{ 123{
139 int err; 124 int err;
140 u32 fn, power_state; 125 u32 fn;
141 126
142 fn = psci_function_id[PSCI_FN_CPU_OFF]; 127 fn = psci_function_id[PSCI_FN_CPU_OFF];
143 power_state = psci_power_state_pack(state); 128 err = invoke_psci_fn(fn, state, 0, 0);
144 err = invoke_psci_fn(fn, power_state, 0, 0);
145 return psci_to_linux_errno(err); 129 return psci_to_linux_errno(err);
146} 130}
147 131
@@ -168,30 +152,29 @@ static int psci_migrate(unsigned long cpuid)
168static int psci_affinity_info(unsigned long target_affinity, 152static int psci_affinity_info(unsigned long target_affinity,
169 unsigned long lowest_affinity_level) 153 unsigned long lowest_affinity_level)
170{ 154{
171 int err; 155 return invoke_psci_fn(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity,
172 u32 fn; 156 lowest_affinity_level, 0);
173
174 fn = psci_function_id[PSCI_FN_AFFINITY_INFO];
175 err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0);
176 return err;
177} 157}
178 158
179static int psci_migrate_info_type(void) 159static int psci_migrate_info_type(void)
180{ 160{
181 int err; 161 return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
182 u32 fn; 162}
183 163
184 fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE]; 164static unsigned long psci_migrate_info_up_cpu(void)
185 err = invoke_psci_fn(fn, 0, 0, 0); 165{
186 return err; 166 return invoke_psci_fn(PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU, 0, 0, 0);
187} 167}
188 168
189static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node, 169static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
190 unsigned int cpu)
191{ 170{
192 int i, ret, count = 0; 171 int i, ret, count = 0;
193 struct psci_power_state *psci_states; 172 u32 *psci_states;
194 struct device_node *state_node; 173 struct device_node *state_node, *cpu_node;
174
175 cpu_node = of_get_cpu_node(cpu, NULL);
176 if (!cpu_node)
177 return -ENODEV;
195 178
196 /* 179 /*
197 * If the PSCI cpu_suspend function hook has not been initialized 180 * If the PSCI cpu_suspend function hook has not been initialized
@@ -215,13 +198,13 @@ static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node,
215 return -ENOMEM; 198 return -ENOMEM;
216 199
217 for (i = 0; i < count; i++) { 200 for (i = 0; i < count; i++) {
218 u32 psci_power_state; 201 u32 state;
219 202
220 state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); 203 state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
221 204
222 ret = of_property_read_u32(state_node, 205 ret = of_property_read_u32(state_node,
223 "arm,psci-suspend-param", 206 "arm,psci-suspend-param",
224 &psci_power_state); 207 &state);
225 if (ret) { 208 if (ret) {
226 pr_warn(" * %s missing arm,psci-suspend-param property\n", 209 pr_warn(" * %s missing arm,psci-suspend-param property\n",
227 state_node->full_name); 210 state_node->full_name);
@@ -230,9 +213,13 @@ static int __maybe_unused cpu_psci_cpu_init_idle(struct device_node *cpu_node,
230 } 213 }
231 214
232 of_node_put(state_node); 215 of_node_put(state_node);
233 pr_debug("psci-power-state %#x index %d\n", psci_power_state, 216 pr_debug("psci-power-state %#x index %d\n", state, i);
234 i); 217 if (!psci_power_state_is_valid(state)) {
235 psci_power_state_unpack(psci_power_state, &psci_states[i]); 218 pr_warn("Invalid PSCI power state %#x\n", state);
219 ret = -EINVAL;
220 goto free_mem;
221 }
222 psci_states[i] = state;
236 } 223 }
237 /* Idle states parsed correctly, initialize per-cpu pointer */ 224 /* Idle states parsed correctly, initialize per-cpu pointer */
238 per_cpu(psci_power_state, cpu) = psci_states; 225 per_cpu(psci_power_state, cpu) = psci_states;
@@ -275,6 +262,46 @@ static void psci_sys_poweroff(void)
275 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0); 262 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
276} 263}
277 264
265/*
266 * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
267 * return DENIED (which would be fatal).
268 */
269static void __init psci_init_migrate(void)
270{
271 unsigned long cpuid;
272 int type, cpu;
273
274 type = psci_ops.migrate_info_type();
275
276 if (type == PSCI_0_2_TOS_MP) {
277 pr_info("Trusted OS migration not required\n");
278 return;
279 }
280
281 if (type == PSCI_RET_NOT_SUPPORTED) {
282 pr_info("MIGRATE_INFO_TYPE not supported.\n");
283 return;
284 }
285
286 if (type != PSCI_0_2_TOS_UP_MIGRATE &&
287 type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
288 pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
289 return;
290 }
291
292 cpuid = psci_migrate_info_up_cpu();
293 if (cpuid & ~MPIDR_HWID_BITMASK) {
294 pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
295 cpuid);
296 return;
297 }
298
299 cpu = get_logical_index(cpuid);
300 resident_cpu = cpu >= 0 ? cpu : -1;
301
302 pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
303}
304
278static void __init psci_0_2_set_functions(void) 305static void __init psci_0_2_set_functions(void)
279{ 306{
280 pr_info("Using standard PSCI v0.2 function IDs\n"); 307 pr_info("Using standard PSCI v0.2 function IDs\n");
@@ -290,11 +317,8 @@ static void __init psci_0_2_set_functions(void)
290 psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE; 317 psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
291 psci_ops.migrate = psci_migrate; 318 psci_ops.migrate = psci_migrate;
292 319
293 psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN64_AFFINITY_INFO;
294 psci_ops.affinity_info = psci_affinity_info; 320 psci_ops.affinity_info = psci_affinity_info;
295 321
296 psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
297 PSCI_0_2_FN_MIGRATE_INFO_TYPE;
298 psci_ops.migrate_info_type = psci_migrate_info_type; 322 psci_ops.migrate_info_type = psci_migrate_info_type;
299 323
300 arm_pm_restart = psci_sys_reset; 324 arm_pm_restart = psci_sys_reset;
@@ -307,32 +331,26 @@ static void __init psci_0_2_set_functions(void)
307 */ 331 */
308static int __init psci_probe(void) 332static int __init psci_probe(void)
309{ 333{
310 int ver = psci_get_version(); 334 u32 ver = psci_get_version();
311 335
312 if (ver == PSCI_RET_NOT_SUPPORTED) { 336 pr_info("PSCIv%d.%d detected in firmware.\n",
313 /* 337 PSCI_VERSION_MAJOR(ver),
314 * PSCI versions >=0.2 mandates implementation of 338 PSCI_VERSION_MINOR(ver));
315 * PSCI_VERSION. 339
316 */ 340 if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
317 pr_err("PSCI firmware does not comply with the v0.2 spec.\n"); 341 pr_err("Conflicting PSCI version detected.\n");
318 return -EOPNOTSUPP; 342 return -EINVAL;
319 } else {
320 pr_info("PSCIv%d.%d detected in firmware.\n",
321 PSCI_VERSION_MAJOR(ver),
322 PSCI_VERSION_MINOR(ver));
323
324 if (PSCI_VERSION_MAJOR(ver) == 0 &&
325 PSCI_VERSION_MINOR(ver) < 2) {
326 pr_err("Conflicting PSCI version detected.\n");
327 return -EINVAL;
328 }
329 } 343 }
330 344
331 psci_0_2_set_functions(); 345 psci_0_2_set_functions();
332 346
347 psci_init_migrate();
348
333 return 0; 349 return 0;
334} 350}
335 351
352typedef int (*psci_initcall_t)(const struct device_node *);
353
336/* 354/*
337 * PSCI init function for PSCI versions >=0.2 355 * PSCI init function for PSCI versions >=0.2
338 * 356 *
@@ -421,6 +439,7 @@ int __init psci_dt_init(void)
421 return init_fn(np); 439 return init_fn(np);
422} 440}
423 441
442#ifdef CONFIG_ACPI
424/* 443/*
425 * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's 444 * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
426 * explicitly clarified in SBBR 445 * explicitly clarified in SBBR
@@ -441,10 +460,11 @@ int __init psci_acpi_init(void)
441 460
442 return psci_probe(); 461 return psci_probe();
443} 462}
463#endif
444 464
445#ifdef CONFIG_SMP 465#ifdef CONFIG_SMP
446 466
447static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu) 467static int __init cpu_psci_cpu_init(unsigned int cpu)
448{ 468{
449 return 0; 469 return 0;
450} 470}
@@ -469,11 +489,21 @@ static int cpu_psci_cpu_boot(unsigned int cpu)
469} 489}
470 490
471#ifdef CONFIG_HOTPLUG_CPU 491#ifdef CONFIG_HOTPLUG_CPU
492static bool psci_tos_resident_on(int cpu)
493{
494 return cpu == resident_cpu;
495}
496
472static int cpu_psci_cpu_disable(unsigned int cpu) 497static int cpu_psci_cpu_disable(unsigned int cpu)
473{ 498{
474 /* Fail early if we don't have CPU_OFF support */ 499 /* Fail early if we don't have CPU_OFF support */
475 if (!psci_ops.cpu_off) 500 if (!psci_ops.cpu_off)
476 return -EOPNOTSUPP; 501 return -EOPNOTSUPP;
502
503 /* Trusted OS will deny CPU_OFF */
504 if (psci_tos_resident_on(cpu))
505 return -EPERM;
506
477 return 0; 507 return 0;
478} 508}
479 509
@@ -484,9 +514,8 @@ static void cpu_psci_cpu_die(unsigned int cpu)
484 * There are no known implementations of PSCI actually using the 514 * There are no known implementations of PSCI actually using the
485 * power state field, pass a sensible default for now. 515 * power state field, pass a sensible default for now.
486 */ 516 */
487 struct psci_power_state state = { 517 u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
488 .type = PSCI_POWER_STATE_TYPE_POWER_DOWN, 518 PSCI_0_2_POWER_STATE_TYPE_SHIFT;
489 };
490 519
491 ret = psci_ops.cpu_off(state); 520 ret = psci_ops.cpu_off(state);
492 521
@@ -498,7 +527,7 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
498 int err, i; 527 int err, i;
499 528
500 if (!psci_ops.affinity_info) 529 if (!psci_ops.affinity_info)
501 return 1; 530 return 0;
502 /* 531 /*
503 * cpu_kill could race with cpu_die and we can 532 * cpu_kill could race with cpu_die and we can
504 * potentially end up declaring this cpu undead 533 * potentially end up declaring this cpu undead
@@ -509,7 +538,7 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
509 err = psci_ops.affinity_info(cpu_logical_map(cpu), 0); 538 err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
510 if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) { 539 if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
511 pr_info("CPU%d killed.\n", cpu); 540 pr_info("CPU%d killed.\n", cpu);
512 return 1; 541 return 0;
513 } 542 }
514 543
515 msleep(10); 544 msleep(10);
@@ -518,15 +547,14 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
518 547
519 pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n", 548 pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
520 cpu, err); 549 cpu, err);
521 /* Make op_cpu_kill() fail. */ 550 return -ETIMEDOUT;
522 return 0;
523} 551}
524#endif 552#endif
525#endif 553#endif
526 554
527static int psci_suspend_finisher(unsigned long index) 555static int psci_suspend_finisher(unsigned long index)
528{ 556{
529 struct psci_power_state *state = __this_cpu_read(psci_power_state); 557 u32 *state = __this_cpu_read(psci_power_state);
530 558
531 return psci_ops.cpu_suspend(state[index - 1], 559 return psci_ops.cpu_suspend(state[index - 1],
532 virt_to_phys(cpu_resume)); 560 virt_to_phys(cpu_resume));
@@ -535,7 +563,7 @@ static int psci_suspend_finisher(unsigned long index)
535static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index) 563static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
536{ 564{
537 int ret; 565 int ret;
538 struct psci_power_state *state = __this_cpu_read(psci_power_state); 566 u32 *state = __this_cpu_read(psci_power_state);
539 /* 567 /*
540 * idle state index 0 corresponds to wfi, should never be called 568 * idle state index 0 corresponds to wfi, should never be called
541 * from the cpu_suspend operations 569 * from the cpu_suspend operations
@@ -543,10 +571,10 @@ static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
543 if (WARN_ON_ONCE(!index)) 571 if (WARN_ON_ONCE(!index))
544 return -EINVAL; 572 return -EINVAL;
545 573
546 if (state[index - 1].type == PSCI_POWER_STATE_TYPE_STANDBY) 574 if (!psci_power_state_loses_context(state[index - 1]))
547 ret = psci_ops.cpu_suspend(state[index - 1], 0); 575 ret = psci_ops.cpu_suspend(state[index - 1], 0);
548 else 576 else
549 ret = __cpu_suspend(index, psci_suspend_finisher); 577 ret = cpu_suspend(index, psci_suspend_finisher);
550 578
551 return ret; 579 return ret;
552} 580}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 74753132c3ac..ffd3970721bf 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -105,18 +105,6 @@ static struct resource mem_res[] = {
105#define kernel_code mem_res[0] 105#define kernel_code mem_res[0]
106#define kernel_data mem_res[1] 106#define kernel_data mem_res[1]
107 107
108void __init early_print(const char *str, ...)
109{
110 char buf[256];
111 va_list ap;
112
113 va_start(ap, str);
114 vsnprintf(buf, sizeof(buf), str, ap);
115 va_end(ap);
116
117 printk("%s", buf);
118}
119
120/* 108/*
121 * The recorded values of x0 .. x3 upon kernel entry. 109 * The recorded values of x0 .. x3 upon kernel entry.
122 */ 110 */
@@ -326,12 +314,14 @@ static void __init setup_processor(void)
326 314
327static void __init setup_machine_fdt(phys_addr_t dt_phys) 315static void __init setup_machine_fdt(phys_addr_t dt_phys)
328{ 316{
329 if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) { 317 void *dt_virt = fixmap_remap_fdt(dt_phys);
330 early_print("\n" 318
331 "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n" 319 if (!dt_virt || !early_init_dt_scan(dt_virt)) {
332 "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n" 320 pr_crit("\n"
333 "\nPlease check your bootloader.\n", 321 "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
334 dt_phys, phys_to_virt(dt_phys)); 322 "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
323 "\nPlease check your bootloader.",
324 &dt_phys, dt_virt);
335 325
336 while (true) 326 while (true)
337 cpu_relax(); 327 cpu_relax();
@@ -374,8 +364,6 @@ void __init setup_arch(char **cmdline_p)
374{ 364{
375 setup_processor(); 365 setup_processor();
376 366
377 setup_machine_fdt(__fdt_pointer);
378
379 init_mm.start_code = (unsigned long) _text; 367 init_mm.start_code = (unsigned long) _text;
380 init_mm.end_code = (unsigned long) _etext; 368 init_mm.end_code = (unsigned long) _etext;
381 init_mm.end_data = (unsigned long) _edata; 369 init_mm.end_data = (unsigned long) _edata;
@@ -386,6 +374,8 @@ void __init setup_arch(char **cmdline_p)
386 early_fixmap_init(); 374 early_fixmap_init();
387 early_ioremap_init(); 375 early_ioremap_init();
388 376
377 setup_machine_fdt(__fdt_pointer);
378
389 parse_early_param(); 379 parse_early_param();
390 380
391 /* 381 /*
@@ -408,16 +398,13 @@ void __init setup_arch(char **cmdline_p)
408 if (acpi_disabled) { 398 if (acpi_disabled) {
409 unflatten_device_tree(); 399 unflatten_device_tree();
410 psci_dt_init(); 400 psci_dt_init();
411 cpu_read_bootcpu_ops();
412#ifdef CONFIG_SMP
413 of_smp_init_cpus();
414#endif
415 } else { 401 } else {
416 psci_acpi_init(); 402 psci_acpi_init();
417 acpi_init_cpus();
418 } 403 }
419 404
405 cpu_read_bootcpu_ops();
420#ifdef CONFIG_SMP 406#ifdef CONFIG_SMP
407 smp_init_cpus();
421 smp_build_mpidr_hash(); 408 smp_build_mpidr_hash();
422#endif 409#endif
423 410
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index d26fcd4cd6e6..1670f15ef69e 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -370,7 +370,7 @@ badframe:
370 if (show_unhandled_signals) 370 if (show_unhandled_signals)
371 pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n", 371 pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
372 current->comm, task_pid_nr(current), __func__, 372 current->comm, task_pid_nr(current), __func__,
373 regs->pc, regs->sp); 373 regs->pc, regs->compat_sp);
374 force_sig(SIGSEGV, current); 374 force_sig(SIGSEGV, current);
375 return 0; 375 return 0;
376} 376}
@@ -407,7 +407,7 @@ badframe:
407 if (show_unhandled_signals) 407 if (show_unhandled_signals)
408 pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n", 408 pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
409 current->comm, task_pid_nr(current), __func__, 409 current->comm, task_pid_nr(current), __func__,
410 regs->pc, regs->sp); 410 regs->pc, regs->compat_sp);
411 force_sig(SIGSEGV, current); 411 force_sig(SIGSEGV, current);
412 return 0; 412 return 0;
413} 413}
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index ede186cdd452..803cfea41962 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -130,12 +130,14 @@ ENDPROC(__cpu_suspend_enter)
130/* 130/*
131 * x0 must contain the sctlr value retrieved from restored context 131 * x0 must contain the sctlr value retrieved from restored context
132 */ 132 */
133 .pushsection ".idmap.text", "ax"
133ENTRY(cpu_resume_mmu) 134ENTRY(cpu_resume_mmu)
134 ldr x3, =cpu_resume_after_mmu 135 ldr x3, =cpu_resume_after_mmu
135 msr sctlr_el1, x0 // restore sctlr_el1 136 msr sctlr_el1, x0 // restore sctlr_el1
136 isb 137 isb
137 br x3 // global jump to virtual address 138 br x3 // global jump to virtual address
138ENDPROC(cpu_resume_mmu) 139ENDPROC(cpu_resume_mmu)
140 .popsection
139cpu_resume_after_mmu: 141cpu_resume_after_mmu:
140 mov x0, #0 // return zero on success 142 mov x0, #0 // return zero on success
141 ldp x19, x20, [sp, #16] 143 ldp x19, x20, [sp, #16]
@@ -162,15 +164,12 @@ ENTRY(cpu_resume)
162#else 164#else
163 mov x7, xzr 165 mov x7, xzr
164#endif 166#endif
165 adrp x0, sleep_save_sp 167 ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
166 add x0, x0, #:lo12:sleep_save_sp
167 ldr x0, [x0, #SLEEP_SAVE_SP_PHYS]
168 ldr x0, [x0, x7, lsl #3] 168 ldr x0, [x0, x7, lsl #3]
169 /* load sp from context */ 169 /* load sp from context */
170 ldr x2, [x0, #CPU_CTX_SP] 170 ldr x2, [x0, #CPU_CTX_SP]
171 adrp x1, sleep_idmap_phys
172 /* load physical address of identity map page table in x1 */ 171 /* load physical address of identity map page table in x1 */
173 ldr x1, [x1, #:lo12:sleep_idmap_phys] 172 adrp x1, idmap_pg_dir
174 mov sp, x2 173 mov sp, x2
175 /* 174 /*
176 * cpu_do_resume expects x0 to contain context physical address 175 * cpu_do_resume expects x0 to contain context physical address
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 2cb008177252..4b2121bd7f9c 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -17,6 +17,7 @@
17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <linux/acpi.h>
20#include <linux/delay.h> 21#include <linux/delay.h>
21#include <linux/init.h> 22#include <linux/init.h>
22#include <linux/spinlock.h> 23#include <linux/spinlock.h>
@@ -248,20 +249,20 @@ static int op_cpu_kill(unsigned int cpu)
248 * time and hope that it's dead, so let's skip the wait and just hope. 249 * time and hope that it's dead, so let's skip the wait and just hope.
249 */ 250 */
250 if (!cpu_ops[cpu]->cpu_kill) 251 if (!cpu_ops[cpu]->cpu_kill)
251 return 1; 252 return 0;
252 253
253 return cpu_ops[cpu]->cpu_kill(cpu); 254 return cpu_ops[cpu]->cpu_kill(cpu);
254} 255}
255 256
256static DECLARE_COMPLETION(cpu_died);
257
258/* 257/*
259 * called on the thread which is asking for a CPU to be shutdown - 258 * called on the thread which is asking for a CPU to be shutdown -
260 * waits until shutdown has completed, or it is timed out. 259 * waits until shutdown has completed, or it is timed out.
261 */ 260 */
262void __cpu_die(unsigned int cpu) 261void __cpu_die(unsigned int cpu)
263{ 262{
264 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { 263 int err;
264
265 if (!cpu_wait_death(cpu, 5)) {
265 pr_crit("CPU%u: cpu didn't die\n", cpu); 266 pr_crit("CPU%u: cpu didn't die\n", cpu);
266 return; 267 return;
267 } 268 }
@@ -273,8 +274,10 @@ void __cpu_die(unsigned int cpu)
273 * verify that it has really left the kernel before we consider 274 * verify that it has really left the kernel before we consider
274 * clobbering anything it might still be using. 275 * clobbering anything it might still be using.
275 */ 276 */
276 if (!op_cpu_kill(cpu)) 277 err = op_cpu_kill(cpu);
277 pr_warn("CPU%d may not have shut down cleanly\n", cpu); 278 if (err)
279 pr_warn("CPU%d may not have shut down cleanly: %d\n",
280 cpu, err);
278} 281}
279 282
280/* 283/*
@@ -294,7 +297,7 @@ void cpu_die(void)
294 local_irq_disable(); 297 local_irq_disable();
295 298
296 /* Tell __cpu_die() that this CPU is now safe to dispose of */ 299 /* Tell __cpu_die() that this CPU is now safe to dispose of */
297 complete(&cpu_died); 300 (void)cpu_report_death();
298 301
299 /* 302 /*
300 * Actually shutdown the CPU. This must never fail. The specific hotplug 303 * Actually shutdown the CPU. This must never fail. The specific hotplug
@@ -318,57 +321,158 @@ void __init smp_prepare_boot_cpu(void)
318 set_my_cpu_offset(per_cpu_offset(smp_processor_id())); 321 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
319} 322}
320 323
324static u64 __init of_get_cpu_mpidr(struct device_node *dn)
325{
326 const __be32 *cell;
327 u64 hwid;
328
329 /*
330 * A cpu node with missing "reg" property is
331 * considered invalid to build a cpu_logical_map
332 * entry.
333 */
334 cell = of_get_property(dn, "reg", NULL);
335 if (!cell) {
336 pr_err("%s: missing reg property\n", dn->full_name);
337 return INVALID_HWID;
338 }
339
340 hwid = of_read_number(cell, of_n_addr_cells(dn));
341 /*
342 * Non affinity bits must be set to 0 in the DT
343 */
344 if (hwid & ~MPIDR_HWID_BITMASK) {
345 pr_err("%s: invalid reg property\n", dn->full_name);
346 return INVALID_HWID;
347 }
348 return hwid;
349}
350
351/*
352 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
353 * entries and check for duplicates. If any is found just ignore the
354 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
355 * matching valid MPIDR values.
356 */
357static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
358{
359 unsigned int i;
360
361 for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
362 if (cpu_logical_map(i) == hwid)
363 return true;
364 return false;
365}
366
367/*
368 * Initialize cpu operations for a logical cpu and
369 * set it in the possible mask on success
370 */
371static int __init smp_cpu_setup(int cpu)
372{
373 if (cpu_read_ops(cpu))
374 return -ENODEV;
375
376 if (cpu_ops[cpu]->cpu_init(cpu))
377 return -ENODEV;
378
379 set_cpu_possible(cpu, true);
380
381 return 0;
382}
383
384static bool bootcpu_valid __initdata;
385static unsigned int cpu_count = 1;
386
387#ifdef CONFIG_ACPI
388/*
389 * acpi_map_gic_cpu_interface - parse processor MADT entry
390 *
391 * Carry out sanity checks on MADT processor entry and initialize
392 * cpu_logical_map on success
393 */
394static void __init
395acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
396{
397 u64 hwid = processor->arm_mpidr;
398
399 if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
400 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
401 return;
402 }
403
404 if (!(processor->flags & ACPI_MADT_ENABLED)) {
405 pr_err("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
406 return;
407 }
408
409 if (is_mpidr_duplicate(cpu_count, hwid)) {
410 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
411 return;
412 }
413
414 /* Check if GICC structure of boot CPU is available in the MADT */
415 if (cpu_logical_map(0) == hwid) {
416 if (bootcpu_valid) {
417 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
418 hwid);
419 return;
420 }
421 bootcpu_valid = true;
422 return;
423 }
424
425 if (cpu_count >= NR_CPUS)
426 return;
427
428 /* map the logical cpu id to cpu MPIDR */
429 cpu_logical_map(cpu_count) = hwid;
430
431 cpu_count++;
432}
433
434static int __init
435acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
436 const unsigned long end)
437{
438 struct acpi_madt_generic_interrupt *processor;
439
440 processor = (struct acpi_madt_generic_interrupt *)header;
441 if (BAD_MADT_ENTRY(processor, end))
442 return -EINVAL;
443
444 acpi_table_print_madt_entry(header);
445
446 acpi_map_gic_cpu_interface(processor);
447
448 return 0;
449}
450#else
451#define acpi_table_parse_madt(...) do { } while (0)
452#endif
453
321/* 454/*
322 * Enumerate the possible CPU set from the device tree and build the 455 * Enumerate the possible CPU set from the device tree and build the
323 * cpu logical map array containing MPIDR values related to logical 456 * cpu logical map array containing MPIDR values related to logical
324 * cpus. Assumes that cpu_logical_map(0) has already been initialized. 457 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
325 */ 458 */
326void __init of_smp_init_cpus(void) 459void __init of_parse_and_init_cpus(void)
327{ 460{
328 struct device_node *dn = NULL; 461 struct device_node *dn = NULL;
329 unsigned int i, cpu = 1;
330 bool bootcpu_valid = false;
331 462
332 while ((dn = of_find_node_by_type(dn, "cpu"))) { 463 while ((dn = of_find_node_by_type(dn, "cpu"))) {
333 const u32 *cell; 464 u64 hwid = of_get_cpu_mpidr(dn);
334 u64 hwid;
335 465
336 /* 466 if (hwid == INVALID_HWID)
337 * A cpu node with missing "reg" property is
338 * considered invalid to build a cpu_logical_map
339 * entry.
340 */
341 cell = of_get_property(dn, "reg", NULL);
342 if (!cell) {
343 pr_err("%s: missing reg property\n", dn->full_name);
344 goto next; 467 goto next;
345 }
346 hwid = of_read_number(cell, of_n_addr_cells(dn));
347 468
348 /* 469 if (is_mpidr_duplicate(cpu_count, hwid)) {
349 * Non affinity bits must be set to 0 in the DT 470 pr_err("%s: duplicate cpu reg properties in the DT\n",
350 */ 471 dn->full_name);
351 if (hwid & ~MPIDR_HWID_BITMASK) {
352 pr_err("%s: invalid reg property\n", dn->full_name);
353 goto next; 472 goto next;
354 } 473 }
355 474
356 /* 475 /*
357 * Duplicate MPIDRs are a recipe for disaster. Scan
358 * all initialized entries and check for
359 * duplicates. If any is found just ignore the cpu.
360 * cpu_logical_map was initialized to INVALID_HWID to
361 * avoid matching valid MPIDR values.
362 */
363 for (i = 1; (i < cpu) && (i < NR_CPUS); i++) {
364 if (cpu_logical_map(i) == hwid) {
365 pr_err("%s: duplicate cpu reg properties in the DT\n",
366 dn->full_name);
367 goto next;
368 }
369 }
370
371 /*
372 * The numbering scheme requires that the boot CPU 476 * The numbering scheme requires that the boot CPU
373 * must be assigned logical id 0. Record it so that 477 * must be assigned logical id 0. Record it so that
374 * the logical map built from DT is validated and can 478 * the logical map built from DT is validated and can
@@ -392,38 +496,58 @@ void __init of_smp_init_cpus(void)
392 continue; 496 continue;
393 } 497 }
394 498
395 if (cpu >= NR_CPUS) 499 if (cpu_count >= NR_CPUS)
396 goto next;
397
398 if (cpu_read_ops(dn, cpu) != 0)
399 goto next;
400
401 if (cpu_ops[cpu]->cpu_init(dn, cpu))
402 goto next; 500 goto next;
403 501
404 pr_debug("cpu logical map 0x%llx\n", hwid); 502 pr_debug("cpu logical map 0x%llx\n", hwid);
405 cpu_logical_map(cpu) = hwid; 503 cpu_logical_map(cpu_count) = hwid;
406next: 504next:
407 cpu++; 505 cpu_count++;
408 } 506 }
507}
508
509/*
510 * Enumerate the possible CPU set from the device tree or ACPI and build the
511 * cpu logical map array containing MPIDR values related to logical
512 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
513 */
514void __init smp_init_cpus(void)
515{
516 int i;
409 517
410 /* sanity check */ 518 if (acpi_disabled)
411 if (cpu > NR_CPUS) 519 of_parse_and_init_cpus();
412 pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n", 520 else
413 cpu, NR_CPUS); 521 /*
522 * do a walk of MADT to determine how many CPUs
523 * we have including disabled CPUs, and get information
524 * we need for SMP init
525 */
526 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
527 acpi_parse_gic_cpu_interface, 0);
528
529 if (cpu_count > NR_CPUS)
530 pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n",
531 cpu_count, NR_CPUS);
414 532
415 if (!bootcpu_valid) { 533 if (!bootcpu_valid) {
416 pr_err("DT missing boot CPU MPIDR, not enabling secondaries\n"); 534 pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
417 return; 535 return;
418 } 536 }
419 537
420 /* 538 /*
421 * All the cpus that made it to the cpu_logical_map have been 539 * We need to set the cpu_logical_map entries before enabling
422 * validated so set them as possible cpus. 540 * the cpus so that cpu processor description entries (DT cpu nodes
541 * and ACPI MADT entries) can be retrieved by matching the cpu hwid
542 * with entries in cpu_logical_map while initializing the cpus.
543 * If the cpu set-up fails, invalidate the cpu_logical_map entry.
423 */ 544 */
424 for (i = 0; i < NR_CPUS; i++) 545 for (i = 1; i < NR_CPUS; i++) {
425 if (cpu_logical_map(i) != INVALID_HWID) 546 if (cpu_logical_map(i) != INVALID_HWID) {
426 set_cpu_possible(i, true); 547 if (smp_cpu_setup(i))
548 cpu_logical_map(i) = INVALID_HWID;
549 }
550 }
427} 551}
428 552
429void __init smp_prepare_cpus(unsigned int max_cpus) 553void __init smp_prepare_cpus(unsigned int max_cpus)
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 14944e5b28da..aef3605a8c47 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -49,8 +49,14 @@ static void write_pen_release(u64 val)
49} 49}
50 50
51 51
52static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu) 52static int smp_spin_table_cpu_init(unsigned int cpu)
53{ 53{
54 struct device_node *dn;
55
56 dn = of_get_cpu_node(cpu, NULL);
57 if (!dn)
58 return -ENODEV;
59
54 /* 60 /*
55 * Determine the address from which the CPU is polling. 61 * Determine the address from which the CPU is polling.
56 */ 62 */
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index d7daf45ae7a2..8297d502217e 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -51,13 +51,13 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
51} 51}
52 52
53/* 53/*
54 * __cpu_suspend 54 * cpu_suspend
55 * 55 *
56 * arg: argument to pass to the finisher function 56 * arg: argument to pass to the finisher function
57 * fn: finisher function pointer 57 * fn: finisher function pointer
58 * 58 *
59 */ 59 */
60int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) 60int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
61{ 61{
62 struct mm_struct *mm = current->active_mm; 62 struct mm_struct *mm = current->active_mm;
63 int ret; 63 int ret;
@@ -82,7 +82,7 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
82 * We are resuming from reset with TTBR0_EL1 set to the 82 * We are resuming from reset with TTBR0_EL1 set to the
83 * idmap to enable the MMU; restore the active_mm mappings in 83 * idmap to enable the MMU; restore the active_mm mappings in
84 * TTBR0_EL1 unless the active_mm == &init_mm, in which case 84 * TTBR0_EL1 unless the active_mm == &init_mm, in which case
85 * the thread entered __cpu_suspend with TTBR0_EL1 set to 85 * the thread entered cpu_suspend with TTBR0_EL1 set to
86 * reserved TTBR0 page tables and should be restored as such. 86 * reserved TTBR0 page tables and should be restored as such.
87 */ 87 */
88 if (mm == &init_mm) 88 if (mm == &init_mm)
@@ -118,7 +118,6 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
118} 118}
119 119
120struct sleep_save_sp sleep_save_sp; 120struct sleep_save_sp sleep_save_sp;
121phys_addr_t sleep_idmap_phys;
122 121
123static int __init cpu_suspend_init(void) 122static int __init cpu_suspend_init(void)
124{ 123{
@@ -132,9 +131,7 @@ static int __init cpu_suspend_init(void)
132 131
133 sleep_save_sp.save_ptr_stash = ctx_ptr; 132 sleep_save_sp.save_ptr_stash = ctx_ptr;
134 sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr); 133 sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
135 sleep_idmap_phys = virt_to_phys(idmap_pg_dir);
136 __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp)); 134 __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
137 __flush_dcache_area(&sleep_idmap_phys, sizeof(sleep_idmap_phys));
138 135
139 return 0; 136 return 0;
140} 137}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 1ef2940df13c..a12251c074a8 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -335,8 +335,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
335 if (call_undef_hook(regs) == 0) 335 if (call_undef_hook(regs) == 0)
336 return; 336 return;
337 337
338 if (show_unhandled_signals && unhandled_signal(current, SIGILL) && 338 if (show_unhandled_signals_ratelimited() && unhandled_signal(current, SIGILL)) {
339 printk_ratelimit()) {
340 pr_info("%s[%d]: undefined instruction: pc=%p\n", 339 pr_info("%s[%d]: undefined instruction: pc=%p\n",
341 current->comm, task_pid_nr(current), pc); 340 current->comm, task_pid_nr(current), pc);
342 dump_instr(KERN_INFO, regs); 341 dump_instr(KERN_INFO, regs);
@@ -363,7 +362,7 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
363 } 362 }
364#endif 363#endif
365 364
366 if (show_unhandled_signals && printk_ratelimit()) { 365 if (show_unhandled_signals_ratelimited()) {
367 pr_info("%s[%d]: syscall %d\n", current->comm, 366 pr_info("%s[%d]: syscall %d\n", current->comm,
368 task_pid_nr(current), (int)regs->syscallno); 367 task_pid_nr(current), (int)regs->syscallno);
369 dump_instr("", regs); 368 dump_instr("", regs);
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index ff3bddea482d..f6fe17d88da5 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -15,6 +15,10 @@ ccflags-y := -shared -fno-common -fno-builtin
15ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \ 15ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
16 $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) 16 $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
17 17
18# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared
19# down to collect2, resulting in silent corruption of the vDSO image.
20ccflags-y += -Wl,-shared
21
18obj-y += vdso.o 22obj-y += vdso.o
19extra-y += vdso.lds vdso-offsets.h 23extra-y += vdso.lds vdso-offsets.h
20CPPFLAGS_vdso.lds += -P -C -U$(ARCH) 24CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index a2c29865c3fe..98073332e2d0 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -38,6 +38,12 @@ jiffies = jiffies_64;
38 *(.hyp.text) \ 38 *(.hyp.text) \
39 VMLINUX_SYMBOL(__hyp_text_end) = .; 39 VMLINUX_SYMBOL(__hyp_text_end) = .;
40 40
41#define IDMAP_TEXT \
42 . = ALIGN(SZ_4K); \
43 VMLINUX_SYMBOL(__idmap_text_start) = .; \
44 *(.idmap.text) \
45 VMLINUX_SYMBOL(__idmap_text_end) = .;
46
41/* 47/*
42 * The size of the PE/COFF section that covers the kernel image, which 48 * The size of the PE/COFF section that covers the kernel image, which
43 * runs from stext to _edata, must be a round multiple of the PE/COFF 49 * runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -95,6 +101,7 @@ SECTIONS
95 SCHED_TEXT 101 SCHED_TEXT
96 LOCK_TEXT 102 LOCK_TEXT
97 HYPERVISOR_TEXT 103 HYPERVISOR_TEXT
104 IDMAP_TEXT
98 *(.fixup) 105 *(.fixup)
99 *(.gnu.warning) 106 *(.gnu.warning)
100 . = ALIGN(16); 107 . = ALIGN(16);
@@ -167,11 +174,13 @@ SECTIONS
167} 174}
168 175
169/* 176/*
170 * The HYP init code can't be more than a page long, 177 * The HYP init code and ID map text can't be longer than a page each,
171 * and should not cross a page boundary. 178 * and should not cross a page boundary.
172 */ 179 */
173ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, 180ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
174 "HYP init code too big or misaligned") 181 "HYP init code too big or misaligned")
182ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
183 "ID map text too big or misaligned")
175 184
176/* 185/*
177 * If padding is applied before .head.text, virt<->phys conversions will fail. 186 * If padding is applied before .head.text, virt<->phys conversions will fail.
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 519805f71876..17a8fb14f428 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -17,8 +17,10 @@
17 17
18#include <linux/linkage.h> 18#include <linux/linkage.h>
19 19
20#include <asm/alternative.h>
20#include <asm/asm-offsets.h> 21#include <asm/asm-offsets.h>
21#include <asm/assembler.h> 22#include <asm/assembler.h>
23#include <asm/cpufeature.h>
22#include <asm/debug-monitors.h> 24#include <asm/debug-monitors.h>
23#include <asm/esr.h> 25#include <asm/esr.h>
24#include <asm/fpsimdmacros.h> 26#include <asm/fpsimdmacros.h>
@@ -808,10 +810,7 @@
808 * Call into the vgic backend for state saving 810 * Call into the vgic backend for state saving
809 */ 811 */
810.macro save_vgic_state 812.macro save_vgic_state
811 adr x24, __vgic_sr_vectors 813 alternative_insn "bl __save_vgic_v2_state", "bl __save_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
812 ldr x24, [x24, VGIC_SAVE_FN]
813 kern_hyp_va x24
814 blr x24
815 mrs x24, hcr_el2 814 mrs x24, hcr_el2
816 mov x25, #HCR_INT_OVERRIDE 815 mov x25, #HCR_INT_OVERRIDE
817 neg x25, x25 816 neg x25, x25
@@ -828,10 +827,7 @@
828 orr x24, x24, #HCR_INT_OVERRIDE 827 orr x24, x24, #HCR_INT_OVERRIDE
829 orr x24, x24, x25 828 orr x24, x24, x25
830 msr hcr_el2, x24 829 msr hcr_el2, x24
831 adr x24, __vgic_sr_vectors 830 alternative_insn "bl __restore_vgic_v2_state", "bl __restore_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
832 ldr x24, [x24, #VGIC_RESTORE_FN]
833 kern_hyp_va x24
834 blr x24
835.endm 831.endm
836 832
837.macro save_timer_state 833.macro save_timer_state
@@ -1062,12 +1058,6 @@ ENTRY(__kvm_flush_vm_context)
1062 ret 1058 ret
1063ENDPROC(__kvm_flush_vm_context) 1059ENDPROC(__kvm_flush_vm_context)
1064 1060
1065 // struct vgic_sr_vectors __vgi_sr_vectors;
1066 .align 3
1067ENTRY(__vgic_sr_vectors)
1068 .skip VGIC_SR_VECTOR_SZ
1069ENDPROC(__vgic_sr_vectors)
1070
1071__kvm_hyp_panic: 1061__kvm_hyp_panic:
1072 // Guess the context by looking at VTTBR: 1062 // Guess the context by looking at VTTBR:
1073 // If zero, then we're already a host. 1063 // If zero, then we're already a host.
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 773d37a14039..9d84feb41a16 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -4,3 +4,5 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
4 context.o proc.o pageattr.o 4 context.o proc.o pageattr.o
5obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 5obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
6obj-$(CONFIG_ARM64_PTDUMP) += dump.o 6obj-$(CONFIG_ARM64_PTDUMP) += dump.o
7
8CFLAGS_mmu.o := -I$(srctree)/scripts/dtc/libfdt/
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 2560e1e1562e..bdeb5d38c2dd 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -22,84 +22,11 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <asm/assembler.h> 23#include <asm/assembler.h>
24#include <asm/cpufeature.h> 24#include <asm/cpufeature.h>
25#include <asm/alternative-asm.h> 25#include <asm/alternative.h>
26 26
27#include "proc-macros.S" 27#include "proc-macros.S"
28 28
29/* 29/*
30 * __flush_dcache_all()
31 *
32 * Flush the whole D-cache.
33 *
34 * Corrupted registers: x0-x7, x9-x11
35 */
36__flush_dcache_all:
37 dmb sy // ensure ordering with previous memory accesses
38 mrs x0, clidr_el1 // read clidr
39 and x3, x0, #0x7000000 // extract loc from clidr
40 lsr x3, x3, #23 // left align loc bit field
41 cbz x3, finished // if loc is 0, then no need to clean
42 mov x10, #0 // start clean at cache level 0
43loop1:
44 add x2, x10, x10, lsr #1 // work out 3x current cache level
45 lsr x1, x0, x2 // extract cache type bits from clidr
46 and x1, x1, #7 // mask of the bits for current cache only
47 cmp x1, #2 // see what cache we have at this level
48 b.lt skip // skip if no cache, or just i-cache
49 save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic
50 msr csselr_el1, x10 // select current cache level in csselr
51 isb // isb to sych the new cssr&csidr
52 mrs x1, ccsidr_el1 // read the new ccsidr
53 restore_irqs x9
54 and x2, x1, #7 // extract the length of the cache lines
55 add x2, x2, #4 // add 4 (line length offset)
56 mov x4, #0x3ff
57 and x4, x4, x1, lsr #3 // find maximum number on the way size
58 clz w5, w4 // find bit position of way size increment
59 mov x7, #0x7fff
60 and x7, x7, x1, lsr #13 // extract max number of the index size
61loop2:
62 mov x9, x4 // create working copy of max way size
63loop3:
64 lsl x6, x9, x5
65 orr x11, x10, x6 // factor way and cache number into x11
66 lsl x6, x7, x2
67 orr x11, x11, x6 // factor index number into x11
68 dc cisw, x11 // clean & invalidate by set/way
69 subs x9, x9, #1 // decrement the way
70 b.ge loop3
71 subs x7, x7, #1 // decrement the index
72 b.ge loop2
73skip:
74 add x10, x10, #2 // increment cache number
75 cmp x3, x10
76 b.gt loop1
77finished:
78 mov x10, #0 // swith back to cache level 0
79 msr csselr_el1, x10 // select current cache level in csselr
80 dsb sy
81 isb
82 ret
83ENDPROC(__flush_dcache_all)
84
85/*
86 * flush_cache_all()
87 *
88 * Flush the entire cache system. The data cache flush is now achieved
89 * using atomic clean / invalidates working outwards from L1 cache. This
90 * is done using Set/Way based cache maintainance instructions. The
91 * instruction cache can still be invalidated back to the point of
92 * unification in a single instruction.
93 */
94ENTRY(flush_cache_all)
95 mov x12, lr
96 bl __flush_dcache_all
97 mov x0, #0
98 ic ialluis // I+BTB cache invalidate
99 ret x12
100ENDPROC(flush_cache_all)
101
102/*
103 * flush_icache_range(start,end) 30 * flush_icache_range(start,end)
104 * 31 *
105 * Ensure that the I and D caches are coherent within specified region. 32 * Ensure that the I and D caches are coherent within specified region.
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index baa758d37021..76c1e6cd36fc 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -92,6 +92,14 @@ static void reset_context(void *info)
92 unsigned int cpu = smp_processor_id(); 92 unsigned int cpu = smp_processor_id();
93 struct mm_struct *mm = current->active_mm; 93 struct mm_struct *mm = current->active_mm;
94 94
95 /*
96 * current->active_mm could be init_mm for the idle thread immediately
97 * after secondary CPU boot or hotplug. TTBR0_EL1 is already set to
98 * the reserved value, so no need to reset any context.
99 */
100 if (mm == &init_mm)
101 return;
102
95 smp_rmb(); 103 smp_rmb();
96 asid = cpu_last_asid + cpu; 104 asid = cpu_last_asid + cpu;
97 105
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 0948d327d013..b1fc69cd1499 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -115,8 +115,7 @@ static void __do_user_fault(struct task_struct *tsk, unsigned long addr,
115{ 115{
116 struct siginfo si; 116 struct siginfo si;
117 117
118 if (show_unhandled_signals && unhandled_signal(tsk, sig) && 118 if (show_unhandled_signals_ratelimited() && unhandled_signal(tsk, sig)) {
119 printk_ratelimit()) {
120 pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n", 119 pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
121 tsk->comm, task_pid_nr(tsk), fault_name(esr), sig, 120 tsk->comm, task_pid_nr(tsk), fault_name(esr), sig,
122 addr, esr); 121 addr, esr);
@@ -478,12 +477,19 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
478 struct pt_regs *regs) 477 struct pt_regs *regs)
479{ 478{
480 struct siginfo info; 479 struct siginfo info;
480 struct task_struct *tsk = current;
481
482 if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS))
483 pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n",
484 tsk->comm, task_pid_nr(tsk),
485 esr_get_class_string(esr), (void *)regs->pc,
486 (void *)regs->sp);
481 487
482 info.si_signo = SIGBUS; 488 info.si_signo = SIGBUS;
483 info.si_errno = 0; 489 info.si_errno = 0;
484 info.si_code = BUS_ADRALN; 490 info.si_code = BUS_ADRALN;
485 info.si_addr = (void __user *)addr; 491 info.si_addr = (void __user *)addr;
486 arm64_notify_die("", regs, &info, esr); 492 arm64_notify_die("Oops - SP/PC alignment exception", regs, &info, esr);
487} 493}
488 494
489static struct fault_info debug_fault_info[] = { 495static struct fault_info debug_fault_info[] = {
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index b6f14e8d2121..4dfa3975ce5b 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -102,7 +102,6 @@ EXPORT_SYMBOL(flush_dcache_page);
102/* 102/*
103 * Additional functions defined in assembly. 103 * Additional functions defined in assembly.
104 */ 104 */
105EXPORT_SYMBOL(flush_cache_all);
106EXPORT_SYMBOL(flush_icache_range); 105EXPORT_SYMBOL(flush_icache_range);
107 106
108#ifdef CONFIG_TRANSPARENT_HUGEPAGE 107#ifdef CONFIG_TRANSPARENT_HUGEPAGE
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 597831bdddf3..ad87ce826cce 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -262,7 +262,7 @@ static void __init free_unused_memmap(void)
262 * memmap entries are valid from the bank end aligned to 262 * memmap entries are valid from the bank end aligned to
263 * MAX_ORDER_NR_PAGES. 263 * MAX_ORDER_NR_PAGES.
264 */ 264 */
265 prev_end = ALIGN(start + __phys_to_pfn(reg->size), 265 prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
266 MAX_ORDER_NR_PAGES); 266 MAX_ORDER_NR_PAGES);
267 } 267 }
268 268
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 5b8b664422d3..82d3435bf14f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -21,6 +21,7 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/libfdt.h>
24#include <linux/mman.h> 25#include <linux/mman.h>
25#include <linux/nodemask.h> 26#include <linux/nodemask.h>
26#include <linux/memblock.h> 27#include <linux/memblock.h>
@@ -643,3 +644,68 @@ void __set_fixmap(enum fixed_addresses idx,
643 flush_tlb_kernel_range(addr, addr+PAGE_SIZE); 644 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
644 } 645 }
645} 646}
647
648void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
649{
650 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
651 pgprot_t prot = PAGE_KERNEL | PTE_RDONLY;
652 int granularity, size, offset;
653 void *dt_virt;
654
655 /*
656 * Check whether the physical FDT address is set and meets the minimum
657 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
658 * at least 8 bytes so that we can always access the size field of the
659 * FDT header after mapping the first chunk, double check here if that
660 * is indeed the case.
661 */
662 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
663 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
664 return NULL;
665
666 /*
667 * Make sure that the FDT region can be mapped without the need to
668 * allocate additional translation table pages, so that it is safe
669 * to call create_mapping() this early.
670 *
671 * On 64k pages, the FDT will be mapped using PTEs, so we need to
672 * be in the same PMD as the rest of the fixmap.
673 * On 4k pages, we'll use section mappings for the FDT so we only
674 * have to be in the same PUD.
675 */
676 BUILD_BUG_ON(dt_virt_base % SZ_2M);
677
678 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) {
679 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PMD_SHIFT !=
680 __fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT);
681
682 granularity = PAGE_SIZE;
683 } else {
684 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PUD_SHIFT !=
685 __fix_to_virt(FIX_BTMAP_BEGIN) >> PUD_SHIFT);
686
687 granularity = PMD_SIZE;
688 }
689
690 offset = dt_phys % granularity;
691 dt_virt = (void *)dt_virt_base + offset;
692
693 /* map the first chunk so we can read the size from the header */
694 create_mapping(round_down(dt_phys, granularity), dt_virt_base,
695 granularity, prot);
696
697 if (fdt_check_header(dt_virt) != 0)
698 return NULL;
699
700 size = fdt_totalsize(dt_virt);
701 if (size > MAX_FDT_SIZE)
702 return NULL;
703
704 if (offset + size > granularity)
705 create_mapping(round_down(dt_phys, granularity), dt_virt_base,
706 round_up(offset + size, granularity), prot);
707
708 memblock_reserve(dt_phys, size);
709
710 return dt_virt;
711}
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index cdd754e19b9b..39139a3aa16d 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -46,52 +46,6 @@
46#define MAIR(attr, mt) ((attr) << ((mt) * 8)) 46#define MAIR(attr, mt) ((attr) << ((mt) * 8))
47 47
48/* 48/*
49 * cpu_cache_off()
50 *
51 * Turn the CPU D-cache off.
52 */
53ENTRY(cpu_cache_off)
54 mrs x0, sctlr_el1
55 bic x0, x0, #1 << 2 // clear SCTLR.C
56 msr sctlr_el1, x0
57 isb
58 ret
59ENDPROC(cpu_cache_off)
60
61/*
62 * cpu_reset(loc)
63 *
64 * Perform a soft reset of the system. Put the CPU into the same state
65 * as it would be if it had been reset, and branch to what would be the
66 * reset vector. It must be executed with the flat identity mapping.
67 *
68 * - loc - location to jump to for soft reset
69 */
70 .align 5
71ENTRY(cpu_reset)
72 mrs x1, sctlr_el1
73 bic x1, x1, #1
74 msr sctlr_el1, x1 // disable the MMU
75 isb
76 ret x0
77ENDPROC(cpu_reset)
78
79ENTRY(cpu_soft_restart)
80 /* Save address of cpu_reset() and reset address */
81 mov x19, x0
82 mov x20, x1
83
84 /* Turn D-cache off */
85 bl cpu_cache_off
86
87 /* Push out all dirty data, and ensure cache is empty */
88 bl flush_cache_all
89
90 mov x0, x20
91 ret x19
92ENDPROC(cpu_soft_restart)
93
94/*
95 * cpu_do_idle() 49 * cpu_do_idle()
96 * 50 *
97 * Idle the processor (wait for interrupt). 51 * Idle the processor (wait for interrupt).
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 50a508714f87..8b888b12a475 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -572,6 +572,7 @@ static void __init early_reserve_mem_dt(void)
572 int len; 572 int len;
573 const __be32 *prop; 573 const __be32 *prop;
574 574
575 early_init_fdt_reserve_self();
575 early_init_fdt_scan_reserved_mem(); 576 early_init_fdt_scan_reserved_mem();
576 577
577 dt_root = of_get_flat_dt_root(); 578 dt_root = of_get_flat_dt_root();
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index cde35c5d0191..f2dd23a32267 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -580,11 +580,6 @@ void __init early_init_fdt_scan_reserved_mem(void)
580 if (!initial_boot_params) 580 if (!initial_boot_params)
581 return; 581 return;
582 582
583 /* Reserve the dtb region */
584 early_init_dt_reserve_memory_arch(__pa(initial_boot_params),
585 fdt_totalsize(initial_boot_params),
586 0);
587
588 /* Process header /memreserve/ fields */ 583 /* Process header /memreserve/ fields */
589 for (n = 0; ; n++) { 584 for (n = 0; ; n++) {
590 fdt_get_mem_rsv(initial_boot_params, n, &base, &size); 585 fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
@@ -598,6 +593,20 @@ void __init early_init_fdt_scan_reserved_mem(void)
598} 593}
599 594
600/** 595/**
596 * early_init_fdt_reserve_self() - reserve the memory used by the FDT blob
597 */
598void __init early_init_fdt_reserve_self(void)
599{
600 if (!initial_boot_params)
601 return;
602
603 /* Reserve the dtb region */
604 early_init_dt_reserve_memory_arch(__pa(initial_boot_params),
605 fdt_totalsize(initial_boot_params),
606 0);
607}
608
609/**
601 * of_scan_flat_dt - scan flattened tree blob and call callback on each. 610 * of_scan_flat_dt - scan flattened tree blob and call callback on each.
602 * @it: callback function 611 * @it: callback function
603 * @data: context data pointer 612 * @data: context data pointer
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 587ee507965d..fd627a58068f 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -64,6 +64,7 @@ extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
64extern int early_init_dt_scan_memory(unsigned long node, const char *uname, 64extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
65 int depth, void *data); 65 int depth, void *data);
66extern void early_init_fdt_scan_reserved_mem(void); 66extern void early_init_fdt_scan_reserved_mem(void);
67extern void early_init_fdt_reserve_self(void);
67extern void early_init_dt_add_memory_arch(u64 base, u64 size); 68extern void early_init_dt_add_memory_arch(u64 base, u64 size);
68extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, 69extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
69 bool no_map); 70 bool no_map);
@@ -91,6 +92,7 @@ extern u64 fdt_translate_address(const void *blob, int node_offset);
91extern void of_fdt_limit_memory(int limit); 92extern void of_fdt_limit_memory(int limit);
92#else /* CONFIG_OF_FLATTREE */ 93#else /* CONFIG_OF_FLATTREE */
93static inline void early_init_fdt_scan_reserved_mem(void) {} 94static inline void early_init_fdt_scan_reserved_mem(void) {}
95static inline void early_init_fdt_reserve_self(void) {}
94static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } 96static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
95static inline void unflatten_device_tree(void) {} 97static inline void unflatten_device_tree(void) {}
96static inline void unflatten_and_copy_device_tree(void) {} 98static inline void unflatten_and_copy_device_tree(void) {}
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index f94d887d20e6..bc40137a022d 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -2126,9 +2126,6 @@ int kvm_vgic_hyp_init(void)
2126 goto out_free_irq; 2126 goto out_free_irq;
2127 } 2127 }
2128 2128
2129 /* Callback into for arch code for setup */
2130 vgic_arch_setup(vgic);
2131
2132 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); 2129 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
2133 2130
2134 return 0; 2131 return 0;