summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/perf/imx-ddr.rst52
-rw-r--r--Documentation/arm64/index.rst1
-rw-r--r--Documentation/arm64/kasan-offsets.sh27
-rw-r--r--Documentation/arm64/memory.rst123
-rw-r--r--Documentation/arm64/tagged-address-abi.rst156
-rw-r--r--Documentation/arm64/tagged-pointers.rst21
-rw-r--r--Documentation/devicetree/bindings/cpu/cpu-topology.txt (renamed from Documentation/devicetree/bindings/arm/topology.txt)256
-rw-r--r--MAINTAINERS16
-rw-r--r--Makefile4
-rw-r--r--arch/Kconfig14
-rw-r--r--arch/arm/include/asm/topology.h20
-rw-r--r--arch/arm/kernel/topology.c60
-rw-r--r--arch/arm64/Kbuild6
-rw-r--r--arch/arm64/Kconfig42
-rw-r--r--arch/arm64/Makefile14
-rw-r--r--arch/arm64/include/asm/assembler.h28
-rw-r--r--arch/arm64/include/asm/atomic.h5
-rw-r--r--arch/arm64/include/asm/cache.h2
-rw-r--r--arch/arm64/include/asm/compat.h2
-rw-r--r--arch/arm64/include/asm/cpu_ops.h3
-rw-r--r--arch/arm64/include/asm/cpufeature.h28
-rw-r--r--arch/arm64/include/asm/cputype.h21
-rw-r--r--arch/arm64/include/asm/debug-monitors.h3
-rw-r--r--arch/arm64/include/asm/dma-mapping.h3
-rw-r--r--arch/arm64/include/asm/efi.h4
-rw-r--r--arch/arm64/include/asm/esr.h3
-rw-r--r--arch/arm64/include/asm/exception.h2
-rw-r--r--arch/arm64/include/asm/fpsimd.h2
-rw-r--r--arch/arm64/include/asm/futex.h3
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h3
-rw-r--r--arch/arm64/include/asm/io.h7
-rw-r--r--arch/arm64/include/asm/irqflags.h5
-rw-r--r--arch/arm64/include/asm/kasan.h11
-rw-r--r--arch/arm64/include/asm/memory.h141
-rw-r--r--arch/arm64/include/asm/mmu.h2
-rw-r--r--arch/arm64/include/asm/mmu_context.h4
-rw-r--r--arch/arm64/include/asm/pci.h2
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h23
-rw-r--r--arch/arm64/include/asm/pointer_auth.h2
-rw-r--r--arch/arm64/include/asm/proc-fns.h2
-rw-r--r--arch/arm64/include/asm/processor.h15
-rw-r--r--arch/arm64/include/asm/ptrace.h5
-rw-r--r--arch/arm64/include/asm/signal32.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h32
-rw-r--r--arch/arm64/include/asm/thread_info.h29
-rw-r--r--arch/arm64/include/asm/tlbflush.h1
-rw-r--r--arch/arm64/include/asm/topology.h23
-rw-r--r--arch/arm64/include/asm/uaccess.h12
-rw-r--r--arch/arm64/include/asm/vdso.h4
-rw-r--r--arch/arm64/include/asm/vdso_datapage.h4
-rw-r--r--arch/arm64/include/uapi/asm/stat.h17
-rw-r--r--arch/arm64/kernel/cpufeature.c2
-rw-r--r--arch/arm64/kernel/cpuidle.c50
-rw-r--r--arch/arm64/kernel/cpuinfo.c2
-rw-r--r--arch/arm64/kernel/entry.S36
-rw-r--r--arch/arm64/kernel/head.S108
-rw-r--r--arch/arm64/kernel/hibernate-asm.S8
-rw-r--r--arch/arm64/kernel/hibernate.c2
-rw-r--r--arch/arm64/kernel/image-vars.h51
-rw-r--r--arch/arm64/kernel/image.h42
-rw-r--r--arch/arm64/kernel/insn.c2
-rw-r--r--arch/arm64/kernel/kaslr.c11
-rw-r--r--arch/arm64/kernel/kexec_image.c2
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c22
-rw-r--r--arch/arm64/kernel/module-plts.c2
-rw-r--r--arch/arm64/kernel/perf_event.c2
-rw-r--r--arch/arm64/kernel/process.c76
-rw-r--r--arch/arm64/kernel/psci.c10
-rw-r--r--arch/arm64/kernel/ptrace.c2
-rw-r--r--arch/arm64/kernel/setup.c20
-rw-r--r--arch/arm64/kernel/smp_spin_table.c2
-rw-r--r--arch/arm64/kernel/topology.c312
-rw-r--r--arch/arm64/kernel/traps.c20
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S11
-rw-r--r--arch/arm64/kvm/hyp/switch.c2
-rw-r--r--arch/arm64/kvm/va_layout.c14
-rw-r--r--arch/arm64/lib/Makefile2
-rw-r--r--arch/arm64/lib/error-inject.c18
-rw-r--r--arch/arm64/mm/dump.c24
-rw-r--r--arch/arm64/mm/fault.c44
-rw-r--r--arch/arm64/mm/init.c35
-rw-r--r--arch/arm64/mm/kasan_init.c9
-rw-r--r--arch/arm64/mm/mmu.c32
-rw-r--r--arch/arm64/mm/numa.c2
-rw-r--r--arch/arm64/mm/pageattr.c2
-rw-r--r--arch/arm64/mm/proc.S20
-rw-r--r--arch/powerpc/Makefile2
-rw-r--r--arch/powerpc/include/asm/error-injection.h13
-rw-r--r--arch/riscv/Kconfig1
-rw-r--r--arch/riscv/kernel/smpboot.c3
-rw-r--r--arch/x86/include/asm/error-injection.h13
-rw-r--r--drivers/acpi/arm64/iort.c6
-rw-r--r--drivers/acpi/pptt.c53
-rw-r--r--drivers/base/Kconfig2
-rw-r--r--drivers/base/arch_topology.c298
-rw-r--r--drivers/char/Kconfig9
-rw-r--r--drivers/char/random.c14
-rw-r--r--drivers/cpuidle/Kconfig.arm10
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/cpuidle-arm.c13
-rw-r--r--drivers/cpuidle/cpuidle-psci.c236
-rw-r--r--drivers/firmware/psci/psci.c167
-rw-r--r--drivers/firmware/psci/psci_checker.c16
-rw-r--r--drivers/of/fdt.c14
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c65
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c75
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c4
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c4
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c4
-rw-r--r--drivers/perf/qcom_l2_pmu.c6
-rw-r--r--drivers/perf/xgene_pmu.c4
-rw-r--r--include/asm-generic/error-injection.h6
-rw-r--r--include/linux/acpi.h5
-rw-r--r--include/linux/arch_topology.h26
-rw-r--r--include/linux/cpuidle.h17
-rw-r--r--include/linux/error-injection.h6
-rw-r--r--include/linux/psci.h4
-rw-r--r--include/linux/random.h1
-rw-r--r--include/linux/topology.h1
-rw-r--r--include/uapi/linux/prctl.h5
-rw-r--r--init/Kconfig3
-rw-r--r--kernel/sys.c16
-rw-r--r--scripts/Makefile.kasan11
-rwxr-xr-xscripts/tools-support-relr.sh16
-rw-r--r--tools/testing/selftests/arm64/.gitignore1
-rw-r--r--tools/testing/selftests/arm64/Makefile11
-rwxr-xr-xtools/testing/selftests/arm64/run_tags_test.sh12
-rw-r--r--tools/testing/selftests/arm64/tags_test.c31
129 files changed, 2258 insertions, 1215 deletions
diff --git a/Documentation/admin-guide/perf/imx-ddr.rst b/Documentation/admin-guide/perf/imx-ddr.rst
new file mode 100644
index 000000000000..517a205abad6
--- /dev/null
+++ b/Documentation/admin-guide/perf/imx-ddr.rst
@@ -0,0 +1,52 @@
1=====================================================
2Freescale i.MX8 DDR Performance Monitoring Unit (PMU)
3=====================================================
4
5There are no performance counters inside the DRAM controller, so performance
6signals are brought out to the edge of the controller where a set of 4 x 32 bit
7counters is implemented. This is controlled by the CSV modes programed in counter
8control register which causes a large number of PERF signals to be generated.
9
10Selection of the value for each counter is done via the config registers. There
11is one register for each counter. Counter 0 is special in that it always counts
12“time” and when expired causes a lock on itself and the other counters and an
13interrupt is raised. If any other counter overflows, it continues counting, and
14no interrupt is raised.
15
16The "format" directory describes format of the config (event ID) and config1
17(AXI filtering) fields of the perf_event_attr structure, see /sys/bus/event_source/
18devices/imx8_ddr0/format/. The "events" directory describes the events types
19hardware supported that can be used with perf tool, see /sys/bus/event_source/
20devices/imx8_ddr0/events/.
21 e.g.::
22 perf stat -a -e imx8_ddr0/cycles/ cmd
23 perf stat -a -e imx8_ddr0/read/,imx8_ddr0/write/ cmd
24
25AXI filtering is only used by CSV modes 0x41 (axid-read) and 0x42 (axid-write)
26to count reading or writing matches filter setting. Filter setting is various
27from different DRAM controller implementations, which is distinguished by quirks
28in the driver.
29
30* With DDR_CAP_AXI_ID_FILTER quirk.
31 Filter is defined with two configuration parts:
32 --AXI_ID defines AxID matching value.
33 --AXI_MASKING defines which bits of AxID are meaningful for the matching.
34 0:corresponding bit is masked.
35 1: corresponding bit is not masked, i.e. used to do the matching.
36
37 AXI_ID and AXI_MASKING are mapped on DPCR1 register in performance counter.
38 When non-masked bits are matching corresponding AXI_ID bits then counter is
39 incremented. Perf counter is incremented if
40 AxID && AXI_MASKING == AXI_ID && AXI_MASKING
41
42 This filter doesn't support filter different AXI ID for axid-read and axid-write
43 event at the same time as this filter is shared between counters.
44 e.g.::
45 perf stat -a -e imx8_ddr0/axid-read,axi_mask=0xMMMM,axi_id=0xDDDD/ cmd
46 perf stat -a -e imx8_ddr0/axid-write,axi_mask=0xMMMM,axi_id=0xDDDD/ cmd
47
48 NOTE: axi_mask is inverted in userspace(i.e. set bits are bits to mask), and
49 it will be reverted in driver automatically. so that the user can just specify
50 axi_id to monitor a specific id, rather than having to specify axi_mask.
51 e.g.::
52 perf stat -a -e imx8_ddr0/axid-read,axi_id=0x12/ cmd, which will monitor ARID=0x12
diff --git a/Documentation/arm64/index.rst b/Documentation/arm64/index.rst
index 96b696ba4e6c..5c0c69dc58aa 100644
--- a/Documentation/arm64/index.rst
+++ b/Documentation/arm64/index.rst
@@ -16,6 +16,7 @@ ARM64 Architecture
16 pointer-authentication 16 pointer-authentication
17 silicon-errata 17 silicon-errata
18 sve 18 sve
19 tagged-address-abi
19 tagged-pointers 20 tagged-pointers
20 21
21.. only:: subproject and html 22.. only:: subproject and html
diff --git a/Documentation/arm64/kasan-offsets.sh b/Documentation/arm64/kasan-offsets.sh
new file mode 100644
index 000000000000..2b7a021db363
--- /dev/null
+++ b/Documentation/arm64/kasan-offsets.sh
@@ -0,0 +1,27 @@
1#!/bin/sh
2
3# Print out the KASAN_SHADOW_OFFSETS required to place the KASAN SHADOW
4# start address at the mid-point of the kernel VA space
5
6print_kasan_offset () {
7 printf "%02d\t" $1
8 printf "0x%08x00000000\n" $(( (0xffffffff & (-1 << ($1 - 1 - 32))) \
9 + (1 << ($1 - 32 - $2)) \
10 - (1 << (64 - 32 - $2)) ))
11}
12
13echo KASAN_SHADOW_SCALE_SHIFT = 3
14printf "VABITS\tKASAN_SHADOW_OFFSET\n"
15print_kasan_offset 48 3
16print_kasan_offset 47 3
17print_kasan_offset 42 3
18print_kasan_offset 39 3
19print_kasan_offset 36 3
20echo
21echo KASAN_SHADOW_SCALE_SHIFT = 4
22printf "VABITS\tKASAN_SHADOW_OFFSET\n"
23print_kasan_offset 48 4
24print_kasan_offset 47 4
25print_kasan_offset 42 4
26print_kasan_offset 39 4
27print_kasan_offset 36 4
diff --git a/Documentation/arm64/memory.rst b/Documentation/arm64/memory.rst
index 464b880fc4b7..b040909e45f8 100644
--- a/Documentation/arm64/memory.rst
+++ b/Documentation/arm64/memory.rst
@@ -14,6 +14,10 @@ with the 4KB page configuration, allowing 39-bit (512GB) or 48-bit
1464KB pages, only 2 levels of translation tables, allowing 42-bit (4TB) 1464KB pages, only 2 levels of translation tables, allowing 42-bit (4TB)
15virtual address, are used but the memory layout is the same. 15virtual address, are used but the memory layout is the same.
16 16
17ARMv8.2 adds optional support for Large Virtual Address space. This is
18only available when running with a 64KB page size and expands the
19number of descriptors in the first level of translation.
20
17User addresses have bits 63:48 set to 0 while the kernel addresses have 21User addresses have bits 63:48 set to 0 while the kernel addresses have
18the same bits set to 1. TTBRx selection is given by bit 63 of the 22the same bits set to 1. TTBRx selection is given by bit 63 of the
19virtual address. The swapper_pg_dir contains only kernel (global) 23virtual address. The swapper_pg_dir contains only kernel (global)
@@ -22,40 +26,43 @@ The swapper_pg_dir address is written to TTBR1 and never written to
22TTBR0. 26TTBR0.
23 27
24 28
25AArch64 Linux memory layout with 4KB pages + 3 levels:: 29AArch64 Linux memory layout with 4KB pages + 4 levels (48-bit)::
26
27 Start End Size Use
28 -----------------------------------------------------------------------
29 0000000000000000 0000007fffffffff 512GB user
30 ffffff8000000000 ffffffffffffffff 512GB kernel
31
32
33AArch64 Linux memory layout with 4KB pages + 4 levels::
34 30
35 Start End Size Use 31 Start End Size Use
36 ----------------------------------------------------------------------- 32 -----------------------------------------------------------------------
37 0000000000000000 0000ffffffffffff 256TB user 33 0000000000000000 0000ffffffffffff 256TB user
38 ffff000000000000 ffffffffffffffff 256TB kernel 34 ffff000000000000 ffff7fffffffffff 128TB kernel logical memory map
39 35 ffff800000000000 ffff9fffffffffff 32TB kasan shadow region
40 36 ffffa00000000000 ffffa00007ffffff 128MB bpf jit region
41AArch64 Linux memory layout with 64KB pages + 2 levels:: 37 ffffa00008000000 ffffa0000fffffff 128MB modules
38 ffffa00010000000 fffffdffbffeffff ~93TB vmalloc
39 fffffdffbfff0000 fffffdfffe5f8fff ~998MB [guard region]
40 fffffdfffe5f9000 fffffdfffe9fffff 4124KB fixed mappings
41 fffffdfffea00000 fffffdfffebfffff 2MB [guard region]
42 fffffdfffec00000 fffffdffffbfffff 16MB PCI I/O space
43 fffffdffffc00000 fffffdffffdfffff 2MB [guard region]
44 fffffdffffe00000 ffffffffffdfffff 2TB vmemmap
45 ffffffffffe00000 ffffffffffffffff 2MB [guard region]
46
47
48AArch64 Linux memory layout with 64KB pages + 3 levels (52-bit with HW support)::
42 49
43 Start End Size Use 50 Start End Size Use
44 ----------------------------------------------------------------------- 51 -----------------------------------------------------------------------
45 0000000000000000 000003ffffffffff 4TB user 52 0000000000000000 000fffffffffffff 4PB user
46 fffffc0000000000 ffffffffffffffff 4TB kernel 53 fff0000000000000 fff7ffffffffffff 2PB kernel logical memory map
47 54 fff8000000000000 fffd9fffffffffff 1440TB [gap]
48 55 fffda00000000000 ffff9fffffffffff 512TB kasan shadow region
49AArch64 Linux memory layout with 64KB pages + 3 levels:: 56 ffffa00000000000 ffffa00007ffffff 128MB bpf jit region
50 57 ffffa00008000000 ffffa0000fffffff 128MB modules
51 Start End Size Use 58 ffffa00010000000 fffff81ffffeffff ~88TB vmalloc
52 ----------------------------------------------------------------------- 59 fffff81fffff0000 fffffc1ffe58ffff ~3TB [guard region]
53 0000000000000000 0000ffffffffffff 256TB user 60 fffffc1ffe590000 fffffc1ffe9fffff 4544KB fixed mappings
54 ffff000000000000 ffffffffffffffff 256TB kernel 61 fffffc1ffea00000 fffffc1ffebfffff 2MB [guard region]
55 62 fffffc1ffec00000 fffffc1fffbfffff 16MB PCI I/O space
56 63 fffffc1fffc00000 fffffc1fffdfffff 2MB [guard region]
57For details of the virtual kernel memory layout please see the kernel 64 fffffc1fffe00000 ffffffffffdfffff 3968GB vmemmap
58booting log. 65 ffffffffffe00000 ffffffffffffffff 2MB [guard region]
59 66
60 67
61Translation table lookup with 4KB pages:: 68Translation table lookup with 4KB pages::
@@ -83,7 +90,8 @@ Translation table lookup with 64KB pages::
83 | | | | [15:0] in-page offset 90 | | | | [15:0] in-page offset
84 | | | +----------> [28:16] L3 index 91 | | | +----------> [28:16] L3 index
85 | | +--------------------------> [41:29] L2 index 92 | | +--------------------------> [41:29] L2 index
86 | +-------------------------------> [47:42] L1 index 93 | +-------------------------------> [47:42] L1 index (48-bit)
94 | [51:42] L1 index (52-bit)
87 +-------------------------------------------------> [63] TTBR0/1 95 +-------------------------------------------------> [63] TTBR0/1
88 96
89 97
@@ -96,3 +104,62 @@ ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs.
96 104
97When using KVM with the Virtualization Host Extensions, no additional 105When using KVM with the Virtualization Host Extensions, no additional
98mappings are created, since the host kernel runs directly in EL2. 106mappings are created, since the host kernel runs directly in EL2.
107
10852-bit VA support in the kernel
109-------------------------------
110If the ARMv8.2-LVA optional feature is present, and we are running
111with a 64KB page size; then it is possible to use 52-bits of address
112space for both userspace and kernel addresses. However, any kernel
113binary that supports 52-bit must also be able to fall back to 48-bit
114at early boot time if the hardware feature is not present.
115
116This fallback mechanism necessitates the kernel .text to be in the
117higher addresses such that they are invariant to 48/52-bit VAs. Due
118to the kasan shadow being a fraction of the entire kernel VA space,
119the end of the kasan shadow must also be in the higher half of the
120kernel VA space for both 48/52-bit. (Switching from 48-bit to 52-bit,
121the end of the kasan shadow is invariant and dependent on ~0UL,
122whilst the start address will "grow" towards the lower addresses).
123
124In order to optimise phys_to_virt and virt_to_phys, the PAGE_OFFSET
125is kept constant at 0xFFF0000000000000 (corresponding to 52-bit),
126this obviates the need for an extra variable read. The physvirt
127offset and vmemmap offsets are computed at early boot to enable
128this logic.
129
130As a single binary will need to support both 48-bit and 52-bit VA
131spaces, the VMEMMAP must be sized large enough for 52-bit VAs and
132also must be sized large enought to accommodate a fixed PAGE_OFFSET.
133
134Most code in the kernel should not need to consider the VA_BITS, for
135code that does need to know the VA size the variables are
136defined as follows:
137
138VA_BITS constant the *maximum* VA space size
139
140VA_BITS_MIN constant the *minimum* VA space size
141
142vabits_actual variable the *actual* VA space size
143
144
145Maximum and minimum sizes can be useful to ensure that buffers are
146sized large enough or that addresses are positioned close enough for
147the "worst" case.
148
14952-bit userspace VAs
150--------------------
151To maintain compatibility with software that relies on the ARMv8.0
152VA space maximum size of 48-bits, the kernel will, by default,
153return virtual addresses to userspace from a 48-bit range.
154
155Software can "opt-in" to receiving VAs from a 52-bit space by
156specifying an mmap hint parameter that is larger than 48-bit.
157For example:
158 maybe_high_address = mmap(~0UL, size, prot, flags,...);
159
160It is also possible to build a debug kernel that returns addresses
161from a 52-bit space by enabling the following kernel config options:
162 CONFIG_EXPERT=y && CONFIG_ARM64_FORCE_52BIT=y
163
164Note that this option is only intended for debugging applications
165and should not be used in production.
diff --git a/Documentation/arm64/tagged-address-abi.rst b/Documentation/arm64/tagged-address-abi.rst
new file mode 100644
index 000000000000..d4a85d535bf9
--- /dev/null
+++ b/Documentation/arm64/tagged-address-abi.rst
@@ -0,0 +1,156 @@
1==========================
2AArch64 TAGGED ADDRESS ABI
3==========================
4
5Authors: Vincenzo Frascino <vincenzo.frascino@arm.com>
6 Catalin Marinas <catalin.marinas@arm.com>
7
8Date: 21 August 2019
9
10This document describes the usage and semantics of the Tagged Address
11ABI on AArch64 Linux.
12
131. Introduction
14---------------
15
16On AArch64 the ``TCR_EL1.TBI0`` bit is set by default, allowing
17userspace (EL0) to perform memory accesses through 64-bit pointers with
18a non-zero top byte. This document describes the relaxation of the
19syscall ABI that allows userspace to pass certain tagged pointers to
20kernel syscalls.
21
222. AArch64 Tagged Address ABI
23-----------------------------
24
25From the kernel syscall interface perspective and for the purposes of
26this document, a "valid tagged pointer" is a pointer with a potentially
27non-zero top-byte that references an address in the user process address
28space obtained in one of the following ways:
29
30- ``mmap()`` syscall where either:
31
32 - flags have the ``MAP_ANONYMOUS`` bit set or
33 - the file descriptor refers to a regular file (including those
34 returned by ``memfd_create()``) or ``/dev/zero``
35
36- ``brk()`` syscall (i.e. the heap area between the initial location of
37 the program break at process creation and its current location).
38
39- any memory mapped by the kernel in the address space of the process
40 during creation and with the same restrictions as for ``mmap()`` above
41 (e.g. data, bss, stack).
42
43The AArch64 Tagged Address ABI has two stages of relaxation depending
44how the user addresses are used by the kernel:
45
461. User addresses not accessed by the kernel but used for address space
47 management (e.g. ``mmap()``, ``mprotect()``, ``madvise()``). The use
48 of valid tagged pointers in this context is always allowed.
49
502. User addresses accessed by the kernel (e.g. ``write()``). This ABI
51 relaxation is disabled by default and the application thread needs to
52 explicitly enable it via ``prctl()`` as follows:
53
54 - ``PR_SET_TAGGED_ADDR_CTRL``: enable or disable the AArch64 Tagged
55 Address ABI for the calling thread.
56
57 The ``(unsigned int) arg2`` argument is a bit mask describing the
58 control mode used:
59
60 - ``PR_TAGGED_ADDR_ENABLE``: enable AArch64 Tagged Address ABI.
61 Default status is disabled.
62
63 Arguments ``arg3``, ``arg4``, and ``arg5`` must be 0.
64
65 - ``PR_GET_TAGGED_ADDR_CTRL``: get the status of the AArch64 Tagged
66 Address ABI for the calling thread.
67
68 Arguments ``arg2``, ``arg3``, ``arg4``, and ``arg5`` must be 0.
69
70 The ABI properties described above are thread-scoped, inherited on
71 clone() and fork() and cleared on exec().
72
73 Calling ``prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0)``
74 returns ``-EINVAL`` if the AArch64 Tagged Address ABI is globally
75 disabled by ``sysctl abi.tagged_addr_disabled=1``. The default
76 ``sysctl abi.tagged_addr_disabled`` configuration is 0.
77
78When the AArch64 Tagged Address ABI is enabled for a thread, the
79following behaviours are guaranteed:
80
81- All syscalls except the cases mentioned in section 3 can accept any
82 valid tagged pointer.
83
84- The syscall behaviour is undefined for invalid tagged pointers: it may
85 result in an error code being returned, a (fatal) signal being raised,
86 or other modes of failure.
87
88- The syscall behaviour for a valid tagged pointer is the same as for
89 the corresponding untagged pointer.
90
91
92A definition of the meaning of tagged pointers on AArch64 can be found
93in Documentation/arm64/tagged-pointers.rst.
94
953. AArch64 Tagged Address ABI Exceptions
96-----------------------------------------
97
98The following system call parameters must be untagged regardless of the
99ABI relaxation:
100
101- ``prctl()`` other than pointers to user data either passed directly or
102 indirectly as arguments to be accessed by the kernel.
103
104- ``ioctl()`` other than pointers to user data either passed directly or
105 indirectly as arguments to be accessed by the kernel.
106
107- ``shmat()`` and ``shmdt()``.
108
109Any attempt to use non-zero tagged pointers may result in an error code
110being returned, a (fatal) signal being raised, or other modes of
111failure.
112
1134. Example of correct usage
114---------------------------
115.. code-block:: c
116
117 #include <stdlib.h>
118 #include <string.h>
119 #include <unistd.h>
120 #include <sys/mman.h>
121 #include <sys/prctl.h>
122
123 #define PR_SET_TAGGED_ADDR_CTRL 55
124 #define PR_TAGGED_ADDR_ENABLE (1UL << 0)
125
126 #define TAG_SHIFT 56
127
128 int main(void)
129 {
130 int tbi_enabled = 0;
131 unsigned long tag = 0;
132 char *ptr;
133
134 /* check/enable the tagged address ABI */
135 if (!prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0))
136 tbi_enabled = 1;
137
138 /* memory allocation */
139 ptr = mmap(NULL, sysconf(_SC_PAGE_SIZE), PROT_READ | PROT_WRITE,
140 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
141 if (ptr == MAP_FAILED)
142 return 1;
143
144 /* set a non-zero tag if the ABI is available */
145 if (tbi_enabled)
146 tag = rand() & 0xff;
147 ptr = (char *)((unsigned long)ptr | (tag << TAG_SHIFT));
148
149 /* memory access to a tagged address */
150 strcpy(ptr, "tagged pointer\n");
151
152 /* syscall with a tagged pointer */
153 write(1, ptr, strlen(ptr));
154
155 return 0;
156 }
diff --git a/Documentation/arm64/tagged-pointers.rst b/Documentation/arm64/tagged-pointers.rst
index 2acdec3ebbeb..eab4323609b9 100644
--- a/Documentation/arm64/tagged-pointers.rst
+++ b/Documentation/arm64/tagged-pointers.rst
@@ -20,7 +20,9 @@ Passing tagged addresses to the kernel
20-------------------------------------- 20--------------------------------------
21 21
22All interpretation of userspace memory addresses by the kernel assumes 22All interpretation of userspace memory addresses by the kernel assumes
23an address tag of 0x00. 23an address tag of 0x00, unless the application enables the AArch64
24Tagged Address ABI explicitly
25(Documentation/arm64/tagged-address-abi.rst).
24 26
25This includes, but is not limited to, addresses found in: 27This includes, but is not limited to, addresses found in:
26 28
@@ -33,13 +35,15 @@ This includes, but is not limited to, addresses found in:
33 - the frame pointer (x29) and frame records, e.g. when interpreting 35 - the frame pointer (x29) and frame records, e.g. when interpreting
34 them to generate a backtrace or call graph. 36 them to generate a backtrace or call graph.
35 37
36Using non-zero address tags in any of these locations may result in an 38Using non-zero address tags in any of these locations when the
37error code being returned, a (fatal) signal being raised, or other modes 39userspace application did not enable the AArch64 Tagged Address ABI may
38of failure. 40result in an error code being returned, a (fatal) signal being raised,
41or other modes of failure.
39 42
40For these reasons, passing non-zero address tags to the kernel via 43For these reasons, when the AArch64 Tagged Address ABI is disabled,
41system calls is forbidden, and using a non-zero address tag for sp is 44passing non-zero address tags to the kernel via system calls is
42strongly discouraged. 45forbidden, and using a non-zero address tag for sp is strongly
46discouraged.
43 47
44Programs maintaining a frame pointer and frame records that use non-zero 48Programs maintaining a frame pointer and frame records that use non-zero
45address tags may suffer impaired or inaccurate debug and profiling 49address tags may suffer impaired or inaccurate debug and profiling
@@ -59,6 +63,9 @@ be preserved.
59The architecture prevents the use of a tagged PC, so the upper byte will 63The architecture prevents the use of a tagged PC, so the upper byte will
60be set to a sign-extension of bit 55 on exception return. 64be set to a sign-extension of bit 55 on exception return.
61 65
66This behaviour is maintained when the AArch64 Tagged Address ABI is
67enabled.
68
62 69
63Other considerations 70Other considerations
64-------------------- 71--------------------
diff --git a/Documentation/devicetree/bindings/arm/topology.txt b/Documentation/devicetree/bindings/cpu/cpu-topology.txt
index b0d80c0fb265..99918189403c 100644
--- a/Documentation/devicetree/bindings/arm/topology.txt
+++ b/Documentation/devicetree/bindings/cpu/cpu-topology.txt
@@ -1,21 +1,19 @@
1=========================================== 1===========================================
2ARM topology binding description 2CPU topology binding description
3=========================================== 3===========================================
4 4
5=========================================== 5===========================================
61 - Introduction 61 - Introduction
7=========================================== 7===========================================
8 8
9In an ARM system, the hierarchy of CPUs is defined through three entities that 9In a SMP system, the hierarchy of CPUs is defined through three entities that
10are used to describe the layout of physical CPUs in the system: 10are used to describe the layout of physical CPUs in the system:
11 11
12- socket
12- cluster 13- cluster
13- core 14- core
14- thread 15- thread
15 16
16The cpu nodes (bindings defined in [1]) represent the devices that
17correspond to physical CPUs and are to be mapped to the hierarchy levels.
18
19The bottom hierarchy level sits at core or thread level depending on whether 17The bottom hierarchy level sits at core or thread level depending on whether
20symmetric multi-threading (SMT) is supported or not. 18symmetric multi-threading (SMT) is supported or not.
21 19
@@ -24,33 +22,31 @@ threads existing in the system and map to the hierarchy level "thread" above.
24In systems where SMT is not supported "cpu" nodes represent all cores present 22In systems where SMT is not supported "cpu" nodes represent all cores present
25in the system and map to the hierarchy level "core" above. 23in the system and map to the hierarchy level "core" above.
26 24
27ARM topology bindings allow one to associate cpu nodes with hierarchical groups 25CPU topology bindings allow one to associate cpu nodes with hierarchical groups
28corresponding to the system hierarchy; syntactically they are defined as device 26corresponding to the system hierarchy; syntactically they are defined as device
29tree nodes. 27tree nodes.
30 28
31The remainder of this document provides the topology bindings for ARM, based 29Currently, only ARM/RISC-V intend to use this cpu topology binding but it may be
32on the Devicetree Specification, available from: 30used for any other architecture as well.
33 31
34https://www.devicetree.org/specifications/ 32The cpu nodes, as per bindings defined in [4], represent the devices that
33correspond to physical CPUs and are to be mapped to the hierarchy levels.
35 34
36If not stated otherwise, whenever a reference to a cpu node phandle is made its
37value must point to a cpu node compliant with the cpu node bindings as
38documented in [1].
39A topology description containing phandles to cpu nodes that are not compliant 35A topology description containing phandles to cpu nodes that are not compliant
40with bindings standardized in [1] is therefore considered invalid. 36with bindings standardized in [4] is therefore considered invalid.
41 37
42=========================================== 38===========================================
432 - cpu-map node 392 - cpu-map node
44=========================================== 40===========================================
45 41
46The ARM CPU topology is defined within the cpu-map node, which is a direct 42The ARM/RISC-V CPU topology is defined within the cpu-map node, which is a direct
47child of the cpus node and provides a container where the actual topology 43child of the cpus node and provides a container where the actual topology
48nodes are listed. 44nodes are listed.
49 45
50- cpu-map node 46- cpu-map node
51 47
52 Usage: Optional - On ARM SMP systems provide CPUs topology to the OS. 48 Usage: Optional - On SMP systems provide CPUs topology to the OS.
53 ARM uniprocessor systems do not require a topology 49 Uniprocessor systems do not require a topology
54 description and therefore should not define a 50 description and therefore should not define a
55 cpu-map node. 51 cpu-map node.
56 52
@@ -63,21 +59,23 @@ nodes are listed.
63 59
64 The cpu-map node's child nodes can be: 60 The cpu-map node's child nodes can be:
65 61
66 - one or more cluster nodes 62 - one or more cluster nodes or
63 - one or more socket nodes in a multi-socket system
67 64
68 Any other configuration is considered invalid. 65 Any other configuration is considered invalid.
69 66
70The cpu-map node can only contain three types of child nodes: 67The cpu-map node can only contain 4 types of child nodes:
71 68
69- socket node
72- cluster node 70- cluster node
73- core node 71- core node
74- thread node 72- thread node
75 73
76whose bindings are described in paragraph 3. 74whose bindings are described in paragraph 3.
77 75
78The nodes describing the CPU topology (cluster/core/thread) can only 76The nodes describing the CPU topology (socket/cluster/core/thread) can
79be defined within the cpu-map node and every core/thread in the system 77only be defined within the cpu-map node and every core/thread in the
80must be defined within the topology. Any other configuration is 78system must be defined within the topology. Any other configuration is
81invalid and therefore must be ignored. 79invalid and therefore must be ignored.
82 80
83=========================================== 81===========================================
@@ -85,26 +83,44 @@ invalid and therefore must be ignored.
85=========================================== 83===========================================
86 84
87cpu-map child nodes must follow a naming convention where the node name 85cpu-map child nodes must follow a naming convention where the node name
88must be "clusterN", "coreN", "threadN" depending on the node type (ie 86must be "socketN", "clusterN", "coreN", "threadN" depending on the node type
89cluster/core/thread) (where N = {0, 1, ...} is the node number; nodes which 87(ie socket/cluster/core/thread) (where N = {0, 1, ...} is the node number; nodes
90are siblings within a single common parent node must be given a unique and 88which are siblings within a single common parent node must be given a unique and
91sequential N value, starting from 0). 89sequential N value, starting from 0).
92cpu-map child nodes which do not share a common parent node can have the same 90cpu-map child nodes which do not share a common parent node can have the same
93name (ie same number N as other cpu-map child nodes at different device tree 91name (ie same number N as other cpu-map child nodes at different device tree
94levels) since name uniqueness will be guaranteed by the device tree hierarchy. 92levels) since name uniqueness will be guaranteed by the device tree hierarchy.
95 93
96=========================================== 94===========================================
973 - cluster/core/thread node bindings 953 - socket/cluster/core/thread node bindings
98=========================================== 96===========================================
99 97
100Bindings for cluster/cpu/thread nodes are defined as follows: 98Bindings for socket/cluster/cpu/thread nodes are defined as follows:
99
100- socket node
101
102 Description: must be declared within a cpu-map node, one node
103 per physical socket in the system. A system can
104 contain single or multiple physical socket.
105 The association of sockets and NUMA nodes is beyond
106 the scope of this bindings, please refer [2] for
107 NUMA bindings.
108
109 This node is optional for a single socket system.
110
111 The socket node name must be "socketN" as described in 2.1 above.
112 A socket node can not be a leaf node.
113
114 A socket node's child nodes must be one or more cluster nodes.
115
116 Any other configuration is considered invalid.
101 117
102- cluster node 118- cluster node
103 119
104 Description: must be declared within a cpu-map node, one node 120 Description: must be declared within a cpu-map node, one node
105 per cluster. A system can contain several layers of 121 per cluster. A system can contain several layers of
106 clustering and cluster nodes can be contained in parent 122 clustering within a single physical socket and cluster
107 cluster nodes. 123 nodes can be contained in parent cluster nodes.
108 124
109 The cluster node name must be "clusterN" as described in 2.1 above. 125 The cluster node name must be "clusterN" as described in 2.1 above.
110 A cluster node can not be a leaf node. 126 A cluster node can not be a leaf node.
@@ -164,90 +180,93 @@ Bindings for cluster/cpu/thread nodes are defined as follows:
1644 - Example dts 1804 - Example dts
165=========================================== 181===========================================
166 182
167Example 1 (ARM 64-bit, 16-cpu system, two clusters of clusters): 183Example 1 (ARM 64-bit, 16-cpu system, two clusters of clusters in a single
184physical socket):
168 185
169cpus { 186cpus {
170 #size-cells = <0>; 187 #size-cells = <0>;
171 #address-cells = <2>; 188 #address-cells = <2>;
172 189
173 cpu-map { 190 cpu-map {
174 cluster0 { 191 socket0 {
175 cluster0 { 192 cluster0 {
176 core0 { 193 cluster0 {
177 thread0 { 194 core0 {
178 cpu = <&CPU0>; 195 thread0 {
196 cpu = <&CPU0>;
197 };
198 thread1 {
199 cpu = <&CPU1>;
200 };
179 }; 201 };
180 thread1 {
181 cpu = <&CPU1>;
182 };
183 };
184 202
185 core1 { 203 core1 {
186 thread0 { 204 thread0 {
187 cpu = <&CPU2>; 205 cpu = <&CPU2>;
188 }; 206 };
189 thread1 { 207 thread1 {
190 cpu = <&CPU3>; 208 cpu = <&CPU3>;
209 };
191 }; 210 };
192 }; 211 };
193 };
194 212
195 cluster1 { 213 cluster1 {
196 core0 { 214 core0 {
197 thread0 { 215 thread0 {
198 cpu = <&CPU4>; 216 cpu = <&CPU4>;
199 }; 217 };
200 thread1 { 218 thread1 {
201 cpu = <&CPU5>; 219 cpu = <&CPU5>;
220 };
202 }; 221 };
203 };
204 222
205 core1 { 223 core1 {
206 thread0 { 224 thread0 {
207 cpu = <&CPU6>; 225 cpu = <&CPU6>;
208 }; 226 };
209 thread1 { 227 thread1 {
210 cpu = <&CPU7>; 228 cpu = <&CPU7>;
211 }; 229 };
212 };
213 };
214 };
215
216 cluster1 {
217 cluster0 {
218 core0 {
219 thread0 {
220 cpu = <&CPU8>;
221 };
222 thread1 {
223 cpu = <&CPU9>;
224 };
225 };
226 core1 {
227 thread0 {
228 cpu = <&CPU10>;
229 };
230 thread1 {
231 cpu = <&CPU11>;
232 }; 230 };
233 }; 231 };
234 }; 232 };
235 233
236 cluster1 { 234 cluster1 {
237 core0 { 235 cluster0 {
238 thread0 { 236 core0 {
239 cpu = <&CPU12>; 237 thread0 {
238 cpu = <&CPU8>;
239 };
240 thread1 {
241 cpu = <&CPU9>;
242 };
240 }; 243 };
241 thread1 { 244 core1 {
242 cpu = <&CPU13>; 245 thread0 {
246 cpu = <&CPU10>;
247 };
248 thread1 {
249 cpu = <&CPU11>;
250 };
243 }; 251 };
244 }; 252 };
245 core1 { 253
246 thread0 { 254 cluster1 {
247 cpu = <&CPU14>; 255 core0 {
256 thread0 {
257 cpu = <&CPU12>;
258 };
259 thread1 {
260 cpu = <&CPU13>;
261 };
248 }; 262 };
249 thread1 { 263 core1 {
250 cpu = <&CPU15>; 264 thread0 {
265 cpu = <&CPU14>;
266 };
267 thread1 {
268 cpu = <&CPU15>;
269 };
251 }; 270 };
252 }; 271 };
253 }; 272 };
@@ -470,6 +489,65 @@ cpus {
470 }; 489 };
471}; 490};
472 491
492Example 3: HiFive Unleashed (RISC-V 64 bit, 4 core system)
493
494{
495 #address-cells = <2>;
496 #size-cells = <2>;
497 compatible = "sifive,fu540g", "sifive,fu500";
498 model = "sifive,hifive-unleashed-a00";
499
500 ...
501 cpus {
502 #address-cells = <1>;
503 #size-cells = <0>;
504 cpu-map {
505 socket0 {
506 cluster0 {
507 core0 {
508 cpu = <&CPU1>;
509 };
510 core1 {
511 cpu = <&CPU2>;
512 };
513 core2 {
514 cpu0 = <&CPU2>;
515 };
516 core3 {
517 cpu0 = <&CPU3>;
518 };
519 };
520 };
521 };
522
523 CPU1: cpu@1 {
524 device_type = "cpu";
525 compatible = "sifive,rocket0", "riscv";
526 reg = <0x1>;
527 }
528
529 CPU2: cpu@2 {
530 device_type = "cpu";
531 compatible = "sifive,rocket0", "riscv";
532 reg = <0x2>;
533 }
534 CPU3: cpu@3 {
535 device_type = "cpu";
536 compatible = "sifive,rocket0", "riscv";
537 reg = <0x3>;
538 }
539 CPU4: cpu@4 {
540 device_type = "cpu";
541 compatible = "sifive,rocket0", "riscv";
542 reg = <0x4>;
543 }
544 }
545};
473=============================================================================== 546===============================================================================
474[1] ARM Linux kernel documentation 547[1] ARM Linux kernel documentation
475 Documentation/devicetree/bindings/arm/cpus.yaml 548 Documentation/devicetree/bindings/arm/cpus.yaml
549[2] Devicetree NUMA binding description
550 Documentation/devicetree/bindings/numa.txt
551[3] RISC-V Linux kernel documentation
552 Documentation/devicetree/bindings/riscv/cpus.txt
553[4] https://www.devicetree.org/specifications/
diff --git a/MAINTAINERS b/MAINTAINERS
index a2c343ee3b2c..f29cfc59d51c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4290,6 +4290,14 @@ S: Supported
4290F: drivers/cpuidle/cpuidle-exynos.c 4290F: drivers/cpuidle/cpuidle-exynos.c
4291F: arch/arm/mach-exynos/pm.c 4291F: arch/arm/mach-exynos/pm.c
4292 4292
4293CPUIDLE DRIVER - ARM PSCI
4294M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
4295M: Sudeep Holla <sudeep.holla@arm.com>
4296L: linux-pm@vger.kernel.org
4297L: linux-arm-kernel@lists.infradead.org
4298S: Supported
4299F: drivers/cpuidle/cpuidle-psci.c
4300
4293CPU IDLE TIME MANAGEMENT FRAMEWORK 4301CPU IDLE TIME MANAGEMENT FRAMEWORK
4294M: "Rafael J. Wysocki" <rjw@rjwysocki.net> 4302M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
4295M: Daniel Lezcano <daniel.lezcano@linaro.org> 4303M: Daniel Lezcano <daniel.lezcano@linaro.org>
@@ -6439,6 +6447,7 @@ M: Frank Li <Frank.li@nxp.com>
6439L: linux-arm-kernel@lists.infradead.org 6447L: linux-arm-kernel@lists.infradead.org
6440S: Maintained 6448S: Maintained
6441F: drivers/perf/fsl_imx8_ddr_perf.c 6449F: drivers/perf/fsl_imx8_ddr_perf.c
6450F: Documentation/admin-guide/perf/imx-ddr.rst
6442F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt 6451F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt
6443 6452
6444FREESCALE IMX LPI2C DRIVER 6453FREESCALE IMX LPI2C DRIVER
@@ -6724,6 +6733,13 @@ W: https://linuxtv.org
6724S: Maintained 6733S: Maintained
6725F: drivers/media/radio/radio-gemtek* 6734F: drivers/media/radio/radio-gemtek*
6726 6735
6736GENERIC ARCHITECTURE TOPOLOGY
6737M: Sudeep Holla <sudeep.holla@arm.com>
6738L: linux-kernel@vger.kernel.org
6739S: Maintained
6740F: drivers/base/arch_topology.c
6741F: include/linux/arch_topology.h
6742
6727GENERIC GPIO I2C DRIVER 6743GENERIC GPIO I2C DRIVER
6728M: Wolfram Sang <wsa+renesas@sang-engineering.com> 6744M: Wolfram Sang <wsa+renesas@sang-engineering.com>
6729S: Supported 6745S: Supported
diff --git a/Makefile b/Makefile
index 23cdf1f41364..9e6ec0c9962c 100644
--- a/Makefile
+++ b/Makefile
@@ -912,6 +912,10 @@ ifeq ($(CONFIG_STRIP_ASM_SYMS),y)
912LDFLAGS_vmlinux += $(call ld-option, -X,) 912LDFLAGS_vmlinux += $(call ld-option, -X,)
913endif 913endif
914 914
915ifeq ($(CONFIG_RELR),y)
916LDFLAGS_vmlinux += --pack-dyn-relocs=relr
917endif
918
915# insure the checker run with the right endianness 919# insure the checker run with the right endianness
916CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian) 920CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian)
917 921
diff --git a/arch/Kconfig b/arch/Kconfig
index a7b57dd42c26..aa6bdb3df5c1 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -925,6 +925,20 @@ config LOCK_EVENT_COUNTS
925 the chance of application behavior change because of timing 925 the chance of application behavior change because of timing
926 differences. The counts are reported via debugfs. 926 differences. The counts are reported via debugfs.
927 927
928# Select if the architecture has support for applying RELR relocations.
929config ARCH_HAS_RELR
930 bool
931
932config RELR
933 bool "Use RELR relocation packing"
934 depends on ARCH_HAS_RELR && TOOLS_SUPPORT_RELR
935 default y
936 help
937 Store the kernel's dynamic relocations in the RELR relocation packing
938 format. Requires a compatible linker (LLD supports this feature), as
939 well as compatible NM and OBJCOPY utilities (llvm-nm and llvm-objcopy
940 are compatible).
941
928source "kernel/gcov/Kconfig" 942source "kernel/gcov/Kconfig"
929 943
930source "scripts/gcc-plugins/Kconfig" 944source "scripts/gcc-plugins/Kconfig"
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 2a786f54d8b8..8a0fae94d45e 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -5,26 +5,6 @@
5#ifdef CONFIG_ARM_CPU_TOPOLOGY 5#ifdef CONFIG_ARM_CPU_TOPOLOGY
6 6
7#include <linux/cpumask.h> 7#include <linux/cpumask.h>
8
9struct cputopo_arm {
10 int thread_id;
11 int core_id;
12 int socket_id;
13 cpumask_t thread_sibling;
14 cpumask_t core_sibling;
15};
16
17extern struct cputopo_arm cpu_topology[NR_CPUS];
18
19#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
20#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
21#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
22#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
23
24void init_cpu_topology(void);
25void store_cpu_topology(unsigned int cpuid);
26const struct cpumask *cpu_coregroup_mask(int cpu);
27
28#include <linux/arch_topology.h> 8#include <linux/arch_topology.h>
29 9
30/* Replace task scheduler's default frequency-invariant accounting */ 10/* Replace task scheduler's default frequency-invariant accounting */
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index d17cb1e6d679..5b9faba03afb 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -177,17 +177,6 @@ static inline void parse_dt_topology(void) {}
177static inline void update_cpu_capacity(unsigned int cpuid) {} 177static inline void update_cpu_capacity(unsigned int cpuid) {}
178#endif 178#endif
179 179
180 /*
181 * cpu topology table
182 */
183struct cputopo_arm cpu_topology[NR_CPUS];
184EXPORT_SYMBOL_GPL(cpu_topology);
185
186const struct cpumask *cpu_coregroup_mask(int cpu)
187{
188 return &cpu_topology[cpu].core_sibling;
189}
190
191/* 180/*
192 * The current assumption is that we can power gate each core independently. 181 * The current assumption is that we can power gate each core independently.
193 * This will be superseded by DT binding once available. 182 * This will be superseded by DT binding once available.
@@ -197,32 +186,6 @@ const struct cpumask *cpu_corepower_mask(int cpu)
197 return &cpu_topology[cpu].thread_sibling; 186 return &cpu_topology[cpu].thread_sibling;
198} 187}
199 188
200static void update_siblings_masks(unsigned int cpuid)
201{
202 struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
203 int cpu;
204
205 /* update core and thread sibling masks */
206 for_each_possible_cpu(cpu) {
207 cpu_topo = &cpu_topology[cpu];
208
209 if (cpuid_topo->socket_id != cpu_topo->socket_id)
210 continue;
211
212 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
213 if (cpu != cpuid)
214 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
215
216 if (cpuid_topo->core_id != cpu_topo->core_id)
217 continue;
218
219 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
220 if (cpu != cpuid)
221 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
222 }
223 smp_wmb();
224}
225
226/* 189/*
227 * store_cpu_topology is called at boot when only one cpu is running 190 * store_cpu_topology is called at boot when only one cpu is running
228 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, 191 * and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
@@ -230,7 +193,7 @@ static void update_siblings_masks(unsigned int cpuid)
230 */ 193 */
231void store_cpu_topology(unsigned int cpuid) 194void store_cpu_topology(unsigned int cpuid)
232{ 195{
233 struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; 196 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
234 unsigned int mpidr; 197 unsigned int mpidr;
235 198
236 /* If the cpu topology has been already set, just return */ 199 /* If the cpu topology has been already set, just return */
@@ -250,12 +213,12 @@ void store_cpu_topology(unsigned int cpuid)
250 /* core performance interdependency */ 213 /* core performance interdependency */
251 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 214 cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
252 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); 215 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
253 cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); 216 cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
254 } else { 217 } else {
255 /* largely independent cores */ 218 /* largely independent cores */
256 cpuid_topo->thread_id = -1; 219 cpuid_topo->thread_id = -1;
257 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 220 cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
258 cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); 221 cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
259 } 222 }
260 } else { 223 } else {
261 /* 224 /*
@@ -265,7 +228,7 @@ void store_cpu_topology(unsigned int cpuid)
265 */ 228 */
266 cpuid_topo->thread_id = -1; 229 cpuid_topo->thread_id = -1;
267 cpuid_topo->core_id = 0; 230 cpuid_topo->core_id = 0;
268 cpuid_topo->socket_id = -1; 231 cpuid_topo->package_id = -1;
269 } 232 }
270 233
271 update_siblings_masks(cpuid); 234 update_siblings_masks(cpuid);
@@ -275,7 +238,7 @@ void store_cpu_topology(unsigned int cpuid)
275 pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", 238 pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
276 cpuid, cpu_topology[cpuid].thread_id, 239 cpuid, cpu_topology[cpuid].thread_id,
277 cpu_topology[cpuid].core_id, 240 cpu_topology[cpuid].core_id,
278 cpu_topology[cpuid].socket_id, mpidr); 241 cpu_topology[cpuid].package_id, mpidr);
279} 242}
280 243
281static inline int cpu_corepower_flags(void) 244static inline int cpu_corepower_flags(void)
@@ -298,18 +261,7 @@ static struct sched_domain_topology_level arm_topology[] = {
298 */ 261 */
299void __init init_cpu_topology(void) 262void __init init_cpu_topology(void)
300{ 263{
301 unsigned int cpu; 264 reset_cpu_topology();
302
303 /* init core mask and capacity */
304 for_each_possible_cpu(cpu) {
305 struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]);
306
307 cpu_topo->thread_id = -1;
308 cpu_topo->core_id = -1;
309 cpu_topo->socket_id = -1;
310 cpumask_clear(&cpu_topo->core_sibling);
311 cpumask_clear(&cpu_topo->thread_sibling);
312 }
313 smp_wmb(); 265 smp_wmb();
314 266
315 parse_dt_topology(); 267 parse_dt_topology();
diff --git a/arch/arm64/Kbuild b/arch/arm64/Kbuild
new file mode 100644
index 000000000000..d6465823b281
--- /dev/null
+++ b/arch/arm64/Kbuild
@@ -0,0 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0-only
2obj-y += kernel/ mm/
3obj-$(CONFIG_NET) += net/
4obj-$(CONFIG_KVM) += kvm/
5obj-$(CONFIG_XEN) += xen/
6obj-$(CONFIG_CRYPTO) += crypto/
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 3adcec05b1f6..6481964b6425 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -148,6 +148,7 @@ config ARM64
148 select HAVE_FAST_GUP 148 select HAVE_FAST_GUP
149 select HAVE_FTRACE_MCOUNT_RECORD 149 select HAVE_FTRACE_MCOUNT_RECORD
150 select HAVE_FUNCTION_TRACER 150 select HAVE_FUNCTION_TRACER
151 select HAVE_FUNCTION_ERROR_INJECTION
151 select HAVE_FUNCTION_GRAPH_TRACER 152 select HAVE_FUNCTION_GRAPH_TRACER
152 select HAVE_GCC_PLUGINS 153 select HAVE_GCC_PLUGINS
153 select HAVE_HW_BREAKPOINT if PERF_EVENTS 154 select HAVE_HW_BREAKPOINT if PERF_EVENTS
@@ -286,7 +287,7 @@ config PGTABLE_LEVELS
286 int 287 int
287 default 2 if ARM64_16K_PAGES && ARM64_VA_BITS_36 288 default 2 if ARM64_16K_PAGES && ARM64_VA_BITS_36
288 default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42 289 default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42
289 default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52) 290 default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_VA_BITS_52)
290 default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39 291 default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
291 default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47 292 default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47
292 default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48 293 default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48
@@ -297,6 +298,21 @@ config ARCH_SUPPORTS_UPROBES
297config ARCH_PROC_KCORE_TEXT 298config ARCH_PROC_KCORE_TEXT
298 def_bool y 299 def_bool y
299 300
301config KASAN_SHADOW_OFFSET
302 hex
303 depends on KASAN
304 default 0xdfffa00000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS
305 default 0xdfffd00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS
306 default 0xdffffe8000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS
307 default 0xdfffffd000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS
308 default 0xdffffffa00000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS
309 default 0xefff900000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && KASAN_SW_TAGS
310 default 0xefffc80000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS
311 default 0xeffffe4000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS
312 default 0xefffffc800000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS
313 default 0xeffffff900000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS
314 default 0xffffffffffffffff
315
300source "arch/arm64/Kconfig.platforms" 316source "arch/arm64/Kconfig.platforms"
301 317
302menu "Kernel Features" 318menu "Kernel Features"
@@ -744,13 +760,14 @@ config ARM64_VA_BITS_47
744config ARM64_VA_BITS_48 760config ARM64_VA_BITS_48
745 bool "48-bit" 761 bool "48-bit"
746 762
747config ARM64_USER_VA_BITS_52 763config ARM64_VA_BITS_52
748 bool "52-bit (user)" 764 bool "52-bit"
749 depends on ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN) 765 depends on ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN)
750 help 766 help
751 Enable 52-bit virtual addressing for userspace when explicitly 767 Enable 52-bit virtual addressing for userspace when explicitly
752 requested via a hint to mmap(). The kernel will continue to 768 requested via a hint to mmap(). The kernel will also use 52-bit
753 use 48-bit virtual addresses for its own mappings. 769 virtual addresses for its own mappings (provided HW support for
770 this feature is available, otherwise it reverts to 48-bit).
754 771
755 NOTE: Enabling 52-bit virtual addressing in conjunction with 772 NOTE: Enabling 52-bit virtual addressing in conjunction with
756 ARMv8.3 Pointer Authentication will result in the PAC being 773 ARMv8.3 Pointer Authentication will result in the PAC being
@@ -763,7 +780,7 @@ endchoice
763 780
764config ARM64_FORCE_52BIT 781config ARM64_FORCE_52BIT
765 bool "Force 52-bit virtual addresses for userspace" 782 bool "Force 52-bit virtual addresses for userspace"
766 depends on ARM64_USER_VA_BITS_52 && EXPERT 783 depends on ARM64_VA_BITS_52 && EXPERT
767 help 784 help
768 For systems with 52-bit userspace VAs enabled, the kernel will attempt 785 For systems with 52-bit userspace VAs enabled, the kernel will attempt
769 to maintain compatibility with older software by providing 48-bit VAs 786 to maintain compatibility with older software by providing 48-bit VAs
@@ -780,7 +797,8 @@ config ARM64_VA_BITS
780 default 39 if ARM64_VA_BITS_39 797 default 39 if ARM64_VA_BITS_39
781 default 42 if ARM64_VA_BITS_42 798 default 42 if ARM64_VA_BITS_42
782 default 47 if ARM64_VA_BITS_47 799 default 47 if ARM64_VA_BITS_47
783 default 48 if ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52 800 default 48 if ARM64_VA_BITS_48
801 default 52 if ARM64_VA_BITS_52
784 802
785choice 803choice
786 prompt "Physical address space size" 804 prompt "Physical address space size"
@@ -1110,6 +1128,15 @@ config ARM64_SW_TTBR0_PAN
1110 zeroed area and reserved ASID. The user access routines 1128 zeroed area and reserved ASID. The user access routines
1111 restore the valid TTBR0_EL1 temporarily. 1129 restore the valid TTBR0_EL1 temporarily.
1112 1130
1131config ARM64_TAGGED_ADDR_ABI
1132 bool "Enable the tagged user addresses syscall ABI"
1133 default y
1134 help
1135 When this option is enabled, user applications can opt in to a
1136 relaxed ABI via prctl() allowing tagged addresses to be passed
1137 to system calls as pointer arguments. For details, see
1138 Documentation/arm64/tagged-address-abi.txt.
1139
1113menuconfig COMPAT 1140menuconfig COMPAT
1114 bool "Kernel support for 32-bit EL0" 1141 bool "Kernel support for 32-bit EL0"
1115 depends on ARM64_4K_PAGES || EXPERT 1142 depends on ARM64_4K_PAGES || EXPERT
@@ -1467,6 +1494,7 @@ endif
1467 1494
1468config RELOCATABLE 1495config RELOCATABLE
1469 bool 1496 bool
1497 select ARCH_HAS_RELR
1470 help 1498 help
1471 This builds the kernel as a Position Independent Executable (PIE), 1499 This builds the kernel as a Position Independent Executable (PIE),
1472 which retains all relocation metadata required to relocate the 1500 which retains all relocation metadata required to relocate the
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 61de992bbea3..2847b36f72ed 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -126,21 +126,9 @@ KBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
126KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT) 126KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
127KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT) 127KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
128 128
129# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
130# - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT))
131# in 32-bit arithmetic
132KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \
133 (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \
134 + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \
135 - (1 << (64 - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) )) )
136
137export TEXT_OFFSET GZFLAGS 129export TEXT_OFFSET GZFLAGS
138 130
139core-y += arch/arm64/kernel/ arch/arm64/mm/ 131core-y += arch/arm64/
140core-$(CONFIG_NET) += arch/arm64/net/
141core-$(CONFIG_KVM) += arch/arm64/kvm/
142core-$(CONFIG_XEN) += arch/arm64/xen/
143core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
144libs-y := arch/arm64/lib/ $(libs-y) 132libs-y := arch/arm64/lib/ $(libs-y)
145core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a 133core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
146 134
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index e3a15c751b13..b8cf7c85ffa2 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -124,17 +124,6 @@ alternative_endif
124 .endm 124 .endm
125 125
126/* 126/*
127 * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
128 * of bounds.
129 */
130 .macro mask_nospec64, idx, limit, tmp
131 sub \tmp, \idx, \limit
132 bic \tmp, \tmp, \idx
133 and \idx, \idx, \tmp, asr #63
134 csdb
135 .endm
136
137/*
138 * NOP sequence 127 * NOP sequence
139 */ 128 */
140 .macro nops, num 129 .macro nops, num
@@ -350,6 +339,13 @@ alternative_endif
350 .endm 339 .endm
351 340
352/* 341/*
342 * tcr_set_t1sz - update TCR.T1SZ
343 */
344 .macro tcr_set_t1sz, valreg, t1sz
345 bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
346 .endm
347
348/*
353 * tcr_compute_pa_size - set TCR.(I)PS to the highest supported 349 * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
354 * ID_AA64MMFR0_EL1.PARange value 350 * ID_AA64MMFR0_EL1.PARange value
355 * 351 *
@@ -538,9 +534,13 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
538 * In future this may be nop'ed out when dealing with 52-bit kernel VAs. 534 * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
539 * ttbr: Value of ttbr to set, modified. 535 * ttbr: Value of ttbr to set, modified.
540 */ 536 */
541 .macro offset_ttbr1, ttbr 537 .macro offset_ttbr1, ttbr, tmp
542#ifdef CONFIG_ARM64_USER_VA_BITS_52 538#ifdef CONFIG_ARM64_VA_BITS_52
539 mrs_s \tmp, SYS_ID_AA64MMFR2_EL1
540 and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
541 cbnz \tmp, .Lskipoffs_\@
543 orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET 542 orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
543.Lskipoffs_\@ :
544#endif 544#endif
545 .endm 545 .endm
546 546
@@ -550,7 +550,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
550 * to be nop'ed out when dealing with 52-bit kernel VAs. 550 * to be nop'ed out when dealing with 52-bit kernel VAs.
551 */ 551 */
552 .macro restore_ttbr1, ttbr 552 .macro restore_ttbr1, ttbr
553#ifdef CONFIG_ARM64_USER_VA_BITS_52 553#ifdef CONFIG_ARM64_VA_BITS_52
554 bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET 554 bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
555#endif 555#endif
556 .endm 556 .endm
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 657b0457d83c..a5ca23950cfd 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -15,8 +15,6 @@
15#include <asm/barrier.h> 15#include <asm/barrier.h>
16#include <asm/lse.h> 16#include <asm/lse.h>
17 17
18#ifdef __KERNEL__
19
20#define __ARM64_IN_ATOMIC_IMPL 18#define __ARM64_IN_ATOMIC_IMPL
21 19
22#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE) 20#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE)
@@ -157,5 +155,4 @@
157 155
158#include <asm-generic/atomic-instrumented.h> 156#include <asm-generic/atomic-instrumented.h>
159 157
160#endif 158#endif /* __ASM_ATOMIC_H */
161#endif
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 64eeaa41e7ca..43da6dd29592 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -78,7 +78,7 @@ static inline u32 cache_type_cwg(void)
78 return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK; 78 return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK;
79} 79}
80 80
81#define __read_mostly __attribute__((__section__(".data..read_mostly"))) 81#define __read_mostly __section(.data..read_mostly)
82 82
83static inline int cache_line_size_of_cpu(void) 83static inline int cache_line_size_of_cpu(void)
84{ 84{
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index fb8ad4616b3b..b0d53a265f1d 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -4,7 +4,6 @@
4 */ 4 */
5#ifndef __ASM_COMPAT_H 5#ifndef __ASM_COMPAT_H
6#define __ASM_COMPAT_H 6#define __ASM_COMPAT_H
7#ifdef __KERNEL__
8#ifdef CONFIG_COMPAT 7#ifdef CONFIG_COMPAT
9 8
10/* 9/*
@@ -215,5 +214,4 @@ static inline int is_compat_thread(struct thread_info *thread)
215} 214}
216 215
217#endif /* CONFIG_COMPAT */ 216#endif /* CONFIG_COMPAT */
218#endif /* __KERNEL__ */
219#endif /* __ASM_COMPAT_H */ 217#endif /* __ASM_COMPAT_H */
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index c09d633c3109..86aabf1e0199 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -23,6 +23,8 @@
23 * @cpu_boot: Boots a cpu into the kernel. 23 * @cpu_boot: Boots a cpu into the kernel.
24 * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary 24 * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary
25 * synchronisation. Called from the cpu being booted. 25 * synchronisation. Called from the cpu being booted.
26 * @cpu_can_disable: Determines whether a CPU can be disabled based on
27 * mechanism-specific information.
26 * @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific 28 * @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific
27 * reason, which will cause the hot unplug to be aborted. Called 29 * reason, which will cause the hot unplug to be aborted. Called
28 * from the cpu to be killed. 30 * from the cpu to be killed.
@@ -42,6 +44,7 @@ struct cpu_operations {
42 int (*cpu_boot)(unsigned int); 44 int (*cpu_boot)(unsigned int);
43 void (*cpu_postboot)(void); 45 void (*cpu_postboot)(void);
44#ifdef CONFIG_HOTPLUG_CPU 46#ifdef CONFIG_HOTPLUG_CPU
47 bool (*cpu_can_disable)(unsigned int cpu);
45 int (*cpu_disable)(unsigned int cpu); 48 int (*cpu_disable)(unsigned int cpu);
46 void (*cpu_die)(unsigned int cpu); 49 void (*cpu_die)(unsigned int cpu);
47 int (*cpu_kill)(unsigned int cpu); 50 int (*cpu_kill)(unsigned int cpu);
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index c96ffa4722d3..9cde5d2e768f 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -289,9 +289,16 @@ struct arm64_cpu_capabilities {
289 u16 type; 289 u16 type;
290 bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope); 290 bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
291 /* 291 /*
292 * Take the appropriate actions to enable this capability for this CPU. 292 * Take the appropriate actions to configure this capability
293 * For each successfully booted CPU, this method is called for each 293 * for this CPU. If the capability is detected by the kernel
294 * globally detected capability. 294 * this will be called on all the CPUs in the system,
295 * including the hotplugged CPUs, regardless of whether the
296 * capability is available on that specific CPU. This is
297 * useful for some capabilities (e.g, working around CPU
298 * errata), where all the CPUs must take some action (e.g,
299 * changing system control/configuration). Thus, if an action
300 * is required only if the CPU has the capability, then the
301 * routine must check it before taking any action.
295 */ 302 */
296 void (*cpu_enable)(const struct arm64_cpu_capabilities *cap); 303 void (*cpu_enable)(const struct arm64_cpu_capabilities *cap);
297 union { 304 union {
@@ -363,21 +370,6 @@ cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
363 return false; 370 return false;
364} 371}
365 372
366/*
367 * Take appropriate action for all matching entries in the shared capability
368 * entry.
369 */
370static inline void
371cpucap_multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
372{
373 const struct arm64_cpu_capabilities *caps;
374
375 for (caps = entry->match_list; caps->matches; caps++)
376 if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
377 caps->cpu_enable)
378 caps->cpu_enable(caps);
379}
380
381extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); 373extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
382extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; 374extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
383extern struct static_key_false arm64_const_caps_ready; 375extern struct static_key_false arm64_const_caps_ready;
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index e7d46631cc42..b1454d117cd2 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -51,14 +51,6 @@
51#define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ 51#define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
52 MIDR_ARCHITECTURE_MASK) 52 MIDR_ARCHITECTURE_MASK)
53 53
54#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \
55({ \
56 u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \
57 u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \
58 \
59 _model == (model) && rv >= (rv_min) && rv <= (rv_max); \
60 })
61
62#define ARM_CPU_IMP_ARM 0x41 54#define ARM_CPU_IMP_ARM 0x41
63#define ARM_CPU_IMP_APM 0x50 55#define ARM_CPU_IMP_APM 0x50
64#define ARM_CPU_IMP_CAVIUM 0x43 56#define ARM_CPU_IMP_CAVIUM 0x43
@@ -159,10 +151,19 @@ struct midr_range {
159#define MIDR_REV(m, v, r) MIDR_RANGE(m, v, r, v, r) 151#define MIDR_REV(m, v, r) MIDR_RANGE(m, v, r, v, r)
160#define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf) 152#define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
161 153
154static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
155 u32 rv_max)
156{
157 u32 _model = midr & MIDR_CPU_MODEL_MASK;
158 u32 rv = midr & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);
159
160 return _model == model && rv >= rv_min && rv <= rv_max;
161}
162
162static inline bool is_midr_in_range(u32 midr, struct midr_range const *range) 163static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
163{ 164{
164 return MIDR_IS_CPU_MODEL_RANGE(midr, range->model, 165 return midr_is_cpu_model_range(midr, range->model,
165 range->rv_min, range->rv_max); 166 range->rv_min, range->rv_max);
166} 167}
167 168
168static inline bool 169static inline bool
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index d8ec5bb881c2..7619f473155f 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -5,8 +5,6 @@
5#ifndef __ASM_DEBUG_MONITORS_H 5#ifndef __ASM_DEBUG_MONITORS_H
6#define __ASM_DEBUG_MONITORS_H 6#define __ASM_DEBUG_MONITORS_H
7 7
8#ifdef __KERNEL__
9
10#include <linux/errno.h> 8#include <linux/errno.h>
11#include <linux/types.h> 9#include <linux/types.h>
12#include <asm/brk-imm.h> 10#include <asm/brk-imm.h>
@@ -128,5 +126,4 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs)
128int aarch32_break_handler(struct pt_regs *regs); 126int aarch32_break_handler(struct pt_regs *regs);
129 127
130#endif /* __ASSEMBLY */ 128#endif /* __ASSEMBLY */
131#endif /* __KERNEL__ */
132#endif /* __ASM_DEBUG_MONITORS_H */ 129#endif /* __ASM_DEBUG_MONITORS_H */
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index bdcb0922a40c..fb3e5044f473 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -5,8 +5,6 @@
5#ifndef __ASM_DMA_MAPPING_H 5#ifndef __ASM_DMA_MAPPING_H
6#define __ASM_DMA_MAPPING_H 6#define __ASM_DMA_MAPPING_H
7 7
8#ifdef __KERNEL__
9
10#include <linux/types.h> 8#include <linux/types.h>
11#include <linux/vmalloc.h> 9#include <linux/vmalloc.h>
12 10
@@ -27,5 +25,4 @@ static inline bool is_device_dma_coherent(struct device *dev)
27 return dev->dma_coherent; 25 return dev->dma_coherent;
28} 26}
29 27
30#endif /* __KERNEL__ */
31#endif /* __ASM_DMA_MAPPING_H */ 28#endif /* __ASM_DMA_MAPPING_H */
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 76a144702586..b54d3a86c444 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -79,7 +79,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)
79 79
80/* 80/*
81 * On arm64, we have to ensure that the initrd ends up in the linear region, 81 * On arm64, we have to ensure that the initrd ends up in the linear region,
82 * which is a 1 GB aligned region of size '1UL << (VA_BITS - 1)' that is 82 * which is a 1 GB aligned region of size '1UL << (VA_BITS_MIN - 1)' that is
83 * guaranteed to cover the kernel Image. 83 * guaranteed to cover the kernel Image.
84 * 84 *
85 * Since the EFI stub is part of the kernel Image, we can relax the 85 * Since the EFI stub is part of the kernel Image, we can relax the
@@ -90,7 +90,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base)
90static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base, 90static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
91 unsigned long image_addr) 91 unsigned long image_addr)
92{ 92{
93 return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS - 1)); 93 return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS_MIN - 1));
94} 94}
95 95
96#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) 96#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 65ac18400979..cb29253ae86b 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -34,7 +34,8 @@
34#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */ 34#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */
35#define ESR_ELx_EC_SYS64 (0x18) 35#define ESR_ELx_EC_SYS64 (0x18)
36#define ESR_ELx_EC_SVE (0x19) 36#define ESR_ELx_EC_SVE (0x19)
37/* Unallocated EC: 0x1A - 0x1E */ 37#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */
38/* Unallocated EC: 0x1b - 0x1E */
38#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */ 39#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
39#define ESR_ELx_EC_IABT_LOW (0x20) 40#define ESR_ELx_EC_IABT_LOW (0x20)
40#define ESR_ELx_EC_IABT_CUR (0x21) 41#define ESR_ELx_EC_IABT_CUR (0x21)
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index ed57b760f38c..a17393ff6677 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -30,4 +30,6 @@ static inline u32 disr_to_esr(u64 disr)
30 return esr; 30 return esr;
31} 31}
32 32
33asmlinkage void enter_from_user_mode(void);
34
33#endif /* __ASM_EXCEPTION_H */ 35#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index b6a2c352f4c3..59f10dd13f12 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -21,7 +21,7 @@
21#include <linux/stddef.h> 21#include <linux/stddef.h>
22#include <linux/types.h> 22#include <linux/types.h>
23 23
24#if defined(__KERNEL__) && defined(CONFIG_COMPAT) 24#ifdef CONFIG_COMPAT
25/* Masks for extracting the FPSR and FPCR from the FPSCR */ 25/* Masks for extracting the FPSR and FPCR from the FPSCR */
26#define VFP_FPSCR_STAT_MASK 0xf800009f 26#define VFP_FPSCR_STAT_MASK 0xf800009f
27#define VFP_FPSCR_CTRL_MASK 0x07f79f00 27#define VFP_FPSCR_CTRL_MASK 0x07f79f00
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 6211e3105491..6cc26a127819 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -5,8 +5,6 @@
5#ifndef __ASM_FUTEX_H 5#ifndef __ASM_FUTEX_H
6#define __ASM_FUTEX_H 6#define __ASM_FUTEX_H
7 7
8#ifdef __KERNEL__
9
10#include <linux/futex.h> 8#include <linux/futex.h>
11#include <linux/uaccess.h> 9#include <linux/uaccess.h>
12 10
@@ -129,5 +127,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
129 return ret; 127 return ret;
130} 128}
131 129
132#endif /* __KERNEL__ */
133#endif /* __ASM_FUTEX_H */ 130#endif /* __ASM_FUTEX_H */
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index db9ab760e6fd..bc7aaed4b34e 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -10,8 +10,6 @@
10#include <asm/sysreg.h> 10#include <asm/sysreg.h>
11#include <asm/virt.h> 11#include <asm/virt.h>
12 12
13#ifdef __KERNEL__
14
15struct arch_hw_breakpoint_ctrl { 13struct arch_hw_breakpoint_ctrl {
16 u32 __reserved : 19, 14 u32 __reserved : 19,
17 len : 8, 15 len : 8,
@@ -156,5 +154,4 @@ static inline int get_num_wrps(void)
156 ID_AA64DFR0_WRPS_SHIFT); 154 ID_AA64DFR0_WRPS_SHIFT);
157} 155}
158 156
159#endif /* __KERNEL__ */
160#endif /* __ASM_BREAKPOINT_H */ 157#endif /* __ASM_BREAKPOINT_H */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 7ed92626949d..e9763831186a 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -8,8 +8,6 @@
8#ifndef __ASM_IO_H 8#ifndef __ASM_IO_H
9#define __ASM_IO_H 9#define __ASM_IO_H
10 10
11#ifdef __KERNEL__
12
13#include <linux/types.h> 11#include <linux/types.h>
14 12
15#include <asm/byteorder.h> 13#include <asm/byteorder.h>
@@ -97,7 +95,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
97({ \ 95({ \
98 unsigned long tmp; \ 96 unsigned long tmp; \
99 \ 97 \
100 rmb(); \ 98 dma_rmb(); \
101 \ 99 \
102 /* \ 100 /* \
103 * Create a dummy control dependency from the IO read to any \ 101 * Create a dummy control dependency from the IO read to any \
@@ -111,7 +109,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
111}) 109})
112 110
113#define __io_par(v) __iormb(v) 111#define __io_par(v) __iormb(v)
114#define __iowmb() wmb() 112#define __iowmb() dma_wmb()
115 113
116/* 114/*
117 * Relaxed I/O memory access primitives. These follow the Device memory 115 * Relaxed I/O memory access primitives. These follow the Device memory
@@ -207,5 +205,4 @@ extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
207 205
208extern int devmem_is_allowed(unsigned long pfn); 206extern int devmem_is_allowed(unsigned long pfn);
209 207
210#endif /* __KERNEL__ */
211#endif /* __ASM_IO_H */ 208#endif /* __ASM_IO_H */
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index 7872f260c9ee..1a59f0ed1ae3 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -5,8 +5,6 @@
5#ifndef __ASM_IRQFLAGS_H 5#ifndef __ASM_IRQFLAGS_H
6#define __ASM_IRQFLAGS_H 6#define __ASM_IRQFLAGS_H
7 7
8#ifdef __KERNEL__
9
10#include <asm/alternative.h> 8#include <asm/alternative.h>
11#include <asm/ptrace.h> 9#include <asm/ptrace.h>
12#include <asm/sysreg.h> 10#include <asm/sysreg.h>
@@ -128,5 +126,4 @@ static inline void arch_local_irq_restore(unsigned long flags)
128 : "memory"); 126 : "memory");
129} 127}
130 128
131#endif 129#endif /* __ASM_IRQFLAGS_H */
132#endif
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h
index b52aacd2c526..b0dc4abc3589 100644
--- a/arch/arm64/include/asm/kasan.h
+++ b/arch/arm64/include/asm/kasan.h
@@ -18,11 +18,8 @@
18 * KASAN_SHADOW_START: beginning of the kernel virtual addresses. 18 * KASAN_SHADOW_START: beginning of the kernel virtual addresses.
19 * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses, 19 * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses,
20 * where N = (1 << KASAN_SHADOW_SCALE_SHIFT). 20 * where N = (1 << KASAN_SHADOW_SCALE_SHIFT).
21 */ 21 *
22#define KASAN_SHADOW_START (VA_START) 22 * KASAN_SHADOW_OFFSET:
23#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
24
25/*
26 * This value is used to map an address to the corresponding shadow 23 * This value is used to map an address to the corresponding shadow
27 * address by the following formula: 24 * address by the following formula:
28 * shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET 25 * shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET
@@ -33,8 +30,8 @@
33 * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - 30 * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END -
34 * (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT)) 31 * (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT))
35 */ 32 */
36#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \ 33#define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT)))
37 (64 - KASAN_SHADOW_SCALE_SHIFT))) 34#define KASAN_SHADOW_START _KASAN_SHADOW_START(vabits_actual)
38 35
39void kasan_init(void); 36void kasan_init(void);
40void kasan_copy_shadow(pgd_t *pgdir); 37void kasan_copy_shadow(pgd_t *pgdir);
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index fb04f10a78ab..b61b50bf68b1 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -12,10 +12,10 @@
12 12
13#include <linux/compiler.h> 13#include <linux/compiler.h>
14#include <linux/const.h> 14#include <linux/const.h>
15#include <linux/sizes.h>
15#include <linux/types.h> 16#include <linux/types.h>
16#include <asm/bug.h> 17#include <asm/bug.h>
17#include <asm/page-def.h> 18#include <asm/page-def.h>
18#include <linux/sizes.h>
19 19
20/* 20/*
21 * Size of the PCI I/O space. This must remain a power of two so that 21 * Size of the PCI I/O space. This must remain a power of two so that
@@ -26,37 +26,50 @@
26/* 26/*
27 * VMEMMAP_SIZE - allows the whole linear region to be covered by 27 * VMEMMAP_SIZE - allows the whole linear region to be covered by
28 * a struct page array 28 * a struct page array
29 *
30 * If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE
31 * needs to cover the memory region from the beginning of the 52-bit
32 * PAGE_OFFSET all the way to PAGE_END for 48-bit. This allows us to
33 * keep a constant PAGE_OFFSET and "fallback" to using the higher end
34 * of the VMEMMAP where 52-bit support is not available in hardware.
29 */ 35 */
30#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) 36#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) \
37 >> (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT))
31 38
32/* 39/*
33 * PAGE_OFFSET - the virtual address of the start of the linear map (top 40 * PAGE_OFFSET - the virtual address of the start of the linear map, at the
34 * (VA_BITS - 1)) 41 * start of the TTBR1 address space.
35 * KIMAGE_VADDR - the virtual address of the start of the kernel image 42 * PAGE_END - the end of the linear map, where all other kernel mappings begin.
43 * KIMAGE_VADDR - the virtual address of the start of the kernel image.
36 * VA_BITS - the maximum number of bits for virtual addresses. 44 * VA_BITS - the maximum number of bits for virtual addresses.
37 * VA_START - the first kernel virtual address.
38 */ 45 */
39#define VA_BITS (CONFIG_ARM64_VA_BITS) 46#define VA_BITS (CONFIG_ARM64_VA_BITS)
40#define VA_START (UL(0xffffffffffffffff) - \ 47#define _PAGE_OFFSET(va) (-(UL(1) << (va)))
41 (UL(1) << VA_BITS) + 1) 48#define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS))
42#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
43 (UL(1) << (VA_BITS - 1)) + 1)
44#define KIMAGE_VADDR (MODULES_END) 49#define KIMAGE_VADDR (MODULES_END)
45#define BPF_JIT_REGION_START (VA_START + KASAN_SHADOW_SIZE) 50#define BPF_JIT_REGION_START (KASAN_SHADOW_END)
46#define BPF_JIT_REGION_SIZE (SZ_128M) 51#define BPF_JIT_REGION_SIZE (SZ_128M)
47#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) 52#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
48#define MODULES_END (MODULES_VADDR + MODULES_VSIZE) 53#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
49#define MODULES_VADDR (BPF_JIT_REGION_END) 54#define MODULES_VADDR (BPF_JIT_REGION_END)
50#define MODULES_VSIZE (SZ_128M) 55#define MODULES_VSIZE (SZ_128M)
51#define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE) 56#define VMEMMAP_START (-VMEMMAP_SIZE - SZ_2M)
52#define PCI_IO_END (VMEMMAP_START - SZ_2M) 57#define PCI_IO_END (VMEMMAP_START - SZ_2M)
53#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) 58#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
54#define FIXADDR_TOP (PCI_IO_START - SZ_2M) 59#define FIXADDR_TOP (PCI_IO_START - SZ_2M)
55 60
56#define KERNEL_START _text 61#if VA_BITS > 48
57#define KERNEL_END _end 62#define VA_BITS_MIN (48)
63#else
64#define VA_BITS_MIN (VA_BITS)
65#endif
66
67#define _PAGE_END(va) (-(UL(1) << ((va) - 1)))
58 68
59#ifdef CONFIG_ARM64_USER_VA_BITS_52 69#define KERNEL_START _text
70#define KERNEL_END _end
71
72#ifdef CONFIG_ARM64_VA_BITS_52
60#define MAX_USER_VA_BITS 52 73#define MAX_USER_VA_BITS 52
61#else 74#else
62#define MAX_USER_VA_BITS VA_BITS 75#define MAX_USER_VA_BITS VA_BITS
@@ -68,12 +81,14 @@
68 * significantly, so double the (minimum) stack size when they are in use. 81 * significantly, so double the (minimum) stack size when they are in use.
69 */ 82 */
70#ifdef CONFIG_KASAN 83#ifdef CONFIG_KASAN
71#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) 84#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
85#define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \
86 + KASAN_SHADOW_OFFSET)
72#define KASAN_THREAD_SHIFT 1 87#define KASAN_THREAD_SHIFT 1
73#else 88#else
74#define KASAN_SHADOW_SIZE (0)
75#define KASAN_THREAD_SHIFT 0 89#define KASAN_THREAD_SHIFT 0
76#endif 90#define KASAN_SHADOW_END (_PAGE_END(VA_BITS_MIN))
91#endif /* CONFIG_KASAN */
77 92
78#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) 93#define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
79 94
@@ -117,14 +132,14 @@
117 * 16 KB granule: 128 level 3 entries, with contiguous bit 132 * 16 KB granule: 128 level 3 entries, with contiguous bit
118 * 64 KB granule: 32 level 3 entries, with contiguous bit 133 * 64 KB granule: 32 level 3 entries, with contiguous bit
119 */ 134 */
120#define SEGMENT_ALIGN SZ_2M 135#define SEGMENT_ALIGN SZ_2M
121#else 136#else
122/* 137/*
123 * 4 KB granule: 16 level 3 entries, with contiguous bit 138 * 4 KB granule: 16 level 3 entries, with contiguous bit
124 * 16 KB granule: 4 level 3 entries, without contiguous bit 139 * 16 KB granule: 4 level 3 entries, without contiguous bit
125 * 64 KB granule: 1 level 3 entry 140 * 64 KB granule: 1 level 3 entry
126 */ 141 */
127#define SEGMENT_ALIGN SZ_64K 142#define SEGMENT_ALIGN SZ_64K
128#endif 143#endif
129 144
130/* 145/*
@@ -157,10 +172,13 @@
157#endif 172#endif
158 173
159#ifndef __ASSEMBLY__ 174#ifndef __ASSEMBLY__
175extern u64 vabits_actual;
176#define PAGE_END (_PAGE_END(vabits_actual))
160 177
161#include <linux/bitops.h> 178#include <linux/bitops.h>
162#include <linux/mmdebug.h> 179#include <linux/mmdebug.h>
163 180
181extern s64 physvirt_offset;
164extern s64 memstart_addr; 182extern s64 memstart_addr;
165/* PHYS_OFFSET - the physical address of the start of memory. */ 183/* PHYS_OFFSET - the physical address of the start of memory. */
166#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; }) 184#define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
@@ -176,9 +194,6 @@ static inline unsigned long kaslr_offset(void)
176 return kimage_vaddr - KIMAGE_VADDR; 194 return kimage_vaddr - KIMAGE_VADDR;
177} 195}
178 196
179/* the actual size of a user virtual address */
180extern u64 vabits_user;
181
182/* 197/*
183 * Allow all memory at the discovery stage. We will clip it later. 198 * Allow all memory at the discovery stage. We will clip it later.
184 */ 199 */
@@ -201,24 +216,24 @@ extern u64 vabits_user;
201 * pass on to access_ok(), for instance. 216 * pass on to access_ok(), for instance.
202 */ 217 */
203#define untagged_addr(addr) \ 218#define untagged_addr(addr) \
204 ((__typeof__(addr))sign_extend64((u64)(addr), 55)) 219 ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
205 220
206#ifdef CONFIG_KASAN_SW_TAGS 221#ifdef CONFIG_KASAN_SW_TAGS
207#define __tag_shifted(tag) ((u64)(tag) << 56) 222#define __tag_shifted(tag) ((u64)(tag) << 56)
208#define __tag_set(addr, tag) (__typeof__(addr))( \
209 ((u64)(addr) & ~__tag_shifted(0xff)) | __tag_shifted(tag))
210#define __tag_reset(addr) untagged_addr(addr) 223#define __tag_reset(addr) untagged_addr(addr)
211#define __tag_get(addr) (__u8)((u64)(addr) >> 56) 224#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
212#else 225#else
226#define __tag_shifted(tag) 0UL
227#define __tag_reset(addr) (addr)
228#define __tag_get(addr) 0
229#endif /* CONFIG_KASAN_SW_TAGS */
230
213static inline const void *__tag_set(const void *addr, u8 tag) 231static inline const void *__tag_set(const void *addr, u8 tag)
214{ 232{
215 return addr; 233 u64 __addr = (u64)addr & ~__tag_shifted(0xff);
234 return (const void *)(__addr | __tag_shifted(tag));
216} 235}
217 236
218#define __tag_reset(addr) (addr)
219#define __tag_get(addr) 0
220#endif
221
222/* 237/*
223 * Physical vs virtual RAM address space conversion. These are 238 * Physical vs virtual RAM address space conversion. These are
224 * private definitions which should NOT be used outside memory.h 239 * private definitions which should NOT be used outside memory.h
@@ -227,19 +242,18 @@ static inline const void *__tag_set(const void *addr, u8 tag)
227 242
228 243
229/* 244/*
230 * The linear kernel range starts in the middle of the virtual adddress 245 * The linear kernel range starts at the bottom of the virtual address
231 * space. Testing the top bit for the start of the region is a 246 * space. Testing the top bit for the start of the region is a
232 * sufficient check. 247 * sufficient check and avoids having to worry about the tag.
233 */ 248 */
234#define __is_lm_address(addr) (!!((addr) & BIT(VA_BITS - 1))) 249#define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1)))
235 250
236#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) 251#define __lm_to_phys(addr) (((addr) + physvirt_offset))
237#define __kimg_to_phys(addr) ((addr) - kimage_voffset) 252#define __kimg_to_phys(addr) ((addr) - kimage_voffset)
238 253
239#define __virt_to_phys_nodebug(x) ({ \ 254#define __virt_to_phys_nodebug(x) ({ \
240 phys_addr_t __x = (phys_addr_t)(x); \ 255 phys_addr_t __x = (phys_addr_t)(__tag_reset(x)); \
241 __is_lm_address(__x) ? __lm_to_phys(__x) : \ 256 __is_lm_address(__x) ? __lm_to_phys(__x) : __kimg_to_phys(__x); \
242 __kimg_to_phys(__x); \
243}) 257})
244 258
245#define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x)) 259#define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x))
@@ -250,9 +264,9 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
250#else 264#else
251#define __virt_to_phys(x) __virt_to_phys_nodebug(x) 265#define __virt_to_phys(x) __virt_to_phys_nodebug(x)
252#define __phys_addr_symbol(x) __pa_symbol_nodebug(x) 266#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
253#endif 267#endif /* CONFIG_DEBUG_VIRTUAL */
254 268
255#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) 269#define __phys_to_virt(x) ((unsigned long)((x) - physvirt_offset))
256#define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) 270#define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
257 271
258/* 272/*
@@ -286,41 +300,38 @@ static inline void *phys_to_virt(phys_addr_t x)
286#define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x)) 300#define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x))
287#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) 301#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
288#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 302#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
289#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x))) 303#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
290#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) 304#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
291 305
292/* 306/*
293 * virt_to_page(k) convert a _valid_ virtual address to struct page * 307 * virt_to_page(x) convert a _valid_ virtual address to struct page *
294 * virt_addr_valid(k) indicates whether a virtual address is valid 308 * virt_addr_valid(x) indicates whether a virtual address is valid
295 */ 309 */
296#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) 310#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
297 311
298#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL) 312#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
299#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 313#define virt_to_page(x) pfn_to_page(virt_to_pfn(x))
300#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
301#else 314#else
302#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) 315#define page_to_virt(x) ({ \
303#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) 316 __typeof__(x) __page = x; \
304 317 u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\
305#define page_to_virt(page) ({ \ 318 u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE); \
306 unsigned long __addr = \ 319 (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
307 ((__page_to_voff(page)) | PAGE_OFFSET); \
308 const void *__addr_tag = \
309 __tag_set((void *)__addr, page_kasan_tag(page)); \
310 ((void *)__addr_tag); \
311}) 320})
312 321
313#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) 322#define virt_to_page(x) ({ \
323 u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE; \
324 u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \
325 (struct page *)__addr; \
326})
327#endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
314 328
315#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \ 329#define virt_addr_valid(addr) ({ \
316 + PHYS_OFFSET) >> PAGE_SHIFT) 330 __typeof__(addr) __addr = addr; \
317#endif 331 __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \
318#endif 332})
319 333
320#define _virt_addr_is_linear(kaddr) \ 334#endif /* !ASSEMBLY */
321 (__tag_reset((u64)(kaddr)) >= PAGE_OFFSET)
322#define virt_addr_valid(kaddr) \
323 (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr))
324 335
325/* 336/*
326 * Given that the GIC architecture permits ITS implementations that can only be 337 * Given that the GIC architecture permits ITS implementations that can only be
@@ -335,4 +346,4 @@ static inline void *phys_to_virt(phys_addr_t x)
335 346
336#include <asm-generic/memory_model.h> 347#include <asm-generic/memory_model.h>
337 348
338#endif 349#endif /* __ASM_MEMORY_H */
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index fd6161336653..f217e3292919 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -126,7 +126,7 @@ extern void init_mem_pgprot(void);
126extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 126extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
127 unsigned long virt, phys_addr_t size, 127 unsigned long virt, phys_addr_t size,
128 pgprot_t prot, bool page_mappings_only); 128 pgprot_t prot, bool page_mappings_only);
129extern void *fixmap_remap_fdt(phys_addr_t dt_phys); 129extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
130extern void mark_linear_text_alias_ro(void); 130extern void mark_linear_text_alias_ro(void);
131 131
132#define INIT_MM_CONTEXT(name) \ 132#define INIT_MM_CONTEXT(name) \
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 7ed0adb187a8..3827ff4040a3 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -63,7 +63,7 @@ extern u64 idmap_ptrs_per_pgd;
63 63
64static inline bool __cpu_uses_extended_idmap(void) 64static inline bool __cpu_uses_extended_idmap(void)
65{ 65{
66 if (IS_ENABLED(CONFIG_ARM64_USER_VA_BITS_52)) 66 if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
67 return false; 67 return false;
68 68
69 return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)); 69 return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
@@ -95,7 +95,7 @@ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
95 isb(); 95 isb();
96} 96}
97 97
98#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)) 98#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
99#define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz) 99#define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
100 100
101/* 101/*
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h
index 9e690686e8aa..70b323cf8300 100644
--- a/arch/arm64/include/asm/pci.h
+++ b/arch/arm64/include/asm/pci.h
@@ -1,7 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_PCI_H 2#ifndef __ASM_PCI_H
3#define __ASM_PCI_H 3#define __ASM_PCI_H
4#ifdef __KERNEL__
5 4
6#include <linux/types.h> 5#include <linux/types.h>
7#include <linux/slab.h> 6#include <linux/slab.h>
@@ -35,5 +34,4 @@ static inline int pci_proc_domain(struct pci_bus *bus)
35} 34}
36#endif /* CONFIG_PCI */ 35#endif /* CONFIG_PCI */
37 36
38#endif /* __KERNEL__ */
39#endif /* __ASM_PCI_H */ 37#endif /* __ASM_PCI_H */
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index db92950bb1a0..3df60f97da1f 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -304,7 +304,7 @@
304#define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2) 304#define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2)
305#endif 305#endif
306 306
307#ifdef CONFIG_ARM64_USER_VA_BITS_52 307#ifdef CONFIG_ARM64_VA_BITS_52
308/* Must be at least 64-byte aligned to prevent corruption of the TTBR */ 308/* Must be at least 64-byte aligned to prevent corruption of the TTBR */
309#define TTBR1_BADDR_4852_OFFSET (((UL(1) << (52 - PGDIR_SHIFT)) - \ 309#define TTBR1_BADDR_4852_OFFSET (((UL(1) << (52 - PGDIR_SHIFT)) - \
310 (UL(1) << (48 - PGDIR_SHIFT))) * 8) 310 (UL(1) << (48 - PGDIR_SHIFT))) * 8)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 5fdcfe237338..9a8f7e51c2b1 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -21,9 +21,7 @@
21 * and fixed mappings 21 * and fixed mappings
22 */ 22 */
23#define VMALLOC_START (MODULES_END) 23#define VMALLOC_START (MODULES_END)
24#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) 24#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
25
26#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
27 25
28#define FIRST_USER_ADDRESS 0UL 26#define FIRST_USER_ADDRESS 0UL
29 27
@@ -35,6 +33,8 @@
35#include <linux/mm_types.h> 33#include <linux/mm_types.h>
36#include <linux/sched.h> 34#include <linux/sched.h>
37 35
36extern struct page *vmemmap;
37
38extern void __pte_error(const char *file, int line, unsigned long val); 38extern void __pte_error(const char *file, int line, unsigned long val);
39extern void __pmd_error(const char *file, int line, unsigned long val); 39extern void __pmd_error(const char *file, int line, unsigned long val);
40extern void __pud_error(const char *file, int line, unsigned long val); 40extern void __pud_error(const char *file, int line, unsigned long val);
@@ -220,8 +220,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
220 * Only if the new pte is valid and kernel, otherwise TLB maintenance 220 * Only if the new pte is valid and kernel, otherwise TLB maintenance
221 * or update_mmu_cache() have the necessary barriers. 221 * or update_mmu_cache() have the necessary barriers.
222 */ 222 */
223 if (pte_valid_not_user(pte)) 223 if (pte_valid_not_user(pte)) {
224 dsb(ishst); 224 dsb(ishst);
225 isb();
226 }
225} 227}
226 228
227extern void __sync_icache_dcache(pte_t pteval); 229extern void __sync_icache_dcache(pte_t pteval);
@@ -481,8 +483,10 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
481 483
482 WRITE_ONCE(*pmdp, pmd); 484 WRITE_ONCE(*pmdp, pmd);
483 485
484 if (pmd_valid(pmd)) 486 if (pmd_valid(pmd)) {
485 dsb(ishst); 487 dsb(ishst);
488 isb();
489 }
486} 490}
487 491
488static inline void pmd_clear(pmd_t *pmdp) 492static inline void pmd_clear(pmd_t *pmdp)
@@ -540,8 +544,10 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
540 544
541 WRITE_ONCE(*pudp, pud); 545 WRITE_ONCE(*pudp, pud);
542 546
543 if (pud_valid(pud)) 547 if (pud_valid(pud)) {
544 dsb(ishst); 548 dsb(ishst);
549 isb();
550 }
545} 551}
546 552
547static inline void pud_clear(pud_t *pudp) 553static inline void pud_clear(pud_t *pudp)
@@ -599,6 +605,7 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
599 605
600 WRITE_ONCE(*pgdp, pgd); 606 WRITE_ONCE(*pgdp, pgd);
601 dsb(ishst); 607 dsb(ishst);
608 isb();
602} 609}
603 610
604static inline void pgd_clear(pgd_t *pgdp) 611static inline void pgd_clear(pgd_t *pgdp)
@@ -856,8 +863,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
856 863
857#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) 864#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
858 865
859#define kc_vaddr_to_offset(v) ((v) & ~VA_START) 866#define kc_vaddr_to_offset(v) ((v) & ~PAGE_END)
860#define kc_offset_to_vaddr(o) ((o) | VA_START) 867#define kc_offset_to_vaddr(o) ((o) | PAGE_END)
861 868
862#ifdef CONFIG_ARM64_PA_BITS_52 869#ifdef CONFIG_ARM64_PA_BITS_52
863#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) 870#define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
index d328540cb85e..7a24bad1a58b 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -69,7 +69,7 @@ extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);
69 * The EL0 pointer bits used by a pointer authentication code. 69 * The EL0 pointer bits used by a pointer authentication code.
70 * This is dependent on TBI0 being enabled, or bits 63:56 would also apply. 70 * This is dependent on TBI0 being enabled, or bits 63:56 would also apply.
71 */ 71 */
72#define ptrauth_user_pac_mask() GENMASK(54, vabits_user) 72#define ptrauth_user_pac_mask() GENMASK(54, vabits_actual)
73 73
74/* Only valid for EL0 TTBR0 instruction pointers */ 74/* Only valid for EL0 TTBR0 instruction pointers */
75static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) 75static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 368d90a9d0e5..a2ce65a0c1fa 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -9,7 +9,6 @@
9#ifndef __ASM_PROCFNS_H 9#ifndef __ASM_PROCFNS_H
10#define __ASM_PROCFNS_H 10#define __ASM_PROCFNS_H
11 11
12#ifdef __KERNEL__
13#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
14 13
15#include <asm/page.h> 14#include <asm/page.h>
@@ -25,5 +24,4 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
25#include <asm/memory.h> 24#include <asm/memory.h>
26 25
27#endif /* __ASSEMBLY__ */ 26#endif /* __ASSEMBLY__ */
28#endif /* __KERNEL__ */
29#endif /* __ASM_PROCFNS_H */ 27#endif /* __ASM_PROCFNS_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 844e2964b0f5..c67848c55009 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -20,7 +20,6 @@
20#define NET_IP_ALIGN 0 20#define NET_IP_ALIGN 0
21 21
22#ifndef __ASSEMBLY__ 22#ifndef __ASSEMBLY__
23#ifdef __KERNEL__
24 23
25#include <linux/build_bug.h> 24#include <linux/build_bug.h>
26#include <linux/cache.h> 25#include <linux/cache.h>
@@ -42,8 +41,8 @@
42 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. 41 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
43 */ 42 */
44 43
45#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS) 44#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN)
46#define TASK_SIZE_64 (UL(1) << vabits_user) 45#define TASK_SIZE_64 (UL(1) << vabits_actual)
47 46
48#ifdef CONFIG_COMPAT 47#ifdef CONFIG_COMPAT
49#if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS) 48#if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
@@ -283,8 +282,6 @@ static inline void spin_lock_prefetch(const void *ptr)
283 282
284#define HAVE_ARCH_PICK_MMAP_LAYOUT 283#define HAVE_ARCH_PICK_MMAP_LAYOUT
285 284
286#endif
287
288extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */ 285extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
289extern void __init minsigstksz_setup(void); 286extern void __init minsigstksz_setup(void);
290 287
@@ -306,6 +303,14 @@ extern void __init minsigstksz_setup(void);
306/* PR_PAC_RESET_KEYS prctl */ 303/* PR_PAC_RESET_KEYS prctl */
307#define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg) 304#define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg)
308 305
306#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
307/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
308long set_tagged_addr_ctrl(unsigned long arg);
309long get_tagged_addr_ctrl(void);
310#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(arg)
311#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl()
312#endif
313
309/* 314/*
310 * For CONFIG_GCC_PLUGIN_STACKLEAK 315 * For CONFIG_GCC_PLUGIN_STACKLEAK
311 * 316 *
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 1dcf63a9ac1f..fbebb411ae20 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -301,6 +301,11 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
301 return regs->regs[0]; 301 return regs->regs[0];
302} 302}
303 303
304static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
305{
306 regs->regs[0] = rc;
307}
308
304/** 309/**
305 * regs_get_kernel_argument() - get Nth function argument in kernel 310 * regs_get_kernel_argument() - get Nth function argument in kernel
306 * @regs: pt_regs of that context 311 * @regs: pt_regs of that context
diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h
index bd43d1cf724b..7e9f163d02ec 100644
--- a/arch/arm64/include/asm/signal32.h
+++ b/arch/arm64/include/asm/signal32.h
@@ -5,7 +5,6 @@
5#ifndef __ASM_SIGNAL32_H 5#ifndef __ASM_SIGNAL32_H
6#define __ASM_SIGNAL32_H 6#define __ASM_SIGNAL32_H
7 7
8#ifdef __KERNEL__
9#ifdef CONFIG_COMPAT 8#ifdef CONFIG_COMPAT
10#include <linux/compat.h> 9#include <linux/compat.h>
11 10
@@ -79,5 +78,4 @@ static inline void compat_setup_restart_syscall(struct pt_regs *regs)
79{ 78{
80} 79}
81#endif /* CONFIG_COMPAT */ 80#endif /* CONFIG_COMPAT */
82#endif /* __KERNEL__ */
83#endif /* __ASM_SIGNAL32_H */ 81#endif /* __ASM_SIGNAL32_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 06ebcfef73df..972d196c7714 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -212,6 +212,9 @@
212#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0) 212#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
213#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) 213#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
214 214
215#define SYS_PAR_EL1_F BIT(1)
216#define SYS_PAR_EL1_FST GENMASK(6, 1)
217
215/*** Statistical Profiling Extension ***/ 218/*** Statistical Profiling Extension ***/
216/* ID registers */ 219/* ID registers */
217#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7) 220#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7)
@@ -499,28 +502,11 @@
499#define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \ 502#define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \
500 (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \ 503 (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \
501 (BIT(29))) 504 (BIT(29)))
502#define SCTLR_EL2_RES0 ((BIT(6)) | (BIT(7)) | (BIT(8)) | (BIT(9)) | \
503 (BIT(10)) | (BIT(13)) | (BIT(14)) | (BIT(15)) | \
504 (BIT(17)) | (BIT(20)) | (BIT(24)) | (BIT(26)) | \
505 (BIT(27)) | (BIT(30)) | (BIT(31)) | \
506 (0xffffefffUL << 32))
507 505
508#ifdef CONFIG_CPU_BIG_ENDIAN 506#ifdef CONFIG_CPU_BIG_ENDIAN
509#define ENDIAN_SET_EL2 SCTLR_ELx_EE 507#define ENDIAN_SET_EL2 SCTLR_ELx_EE
510#define ENDIAN_CLEAR_EL2 0
511#else 508#else
512#define ENDIAN_SET_EL2 0 509#define ENDIAN_SET_EL2 0
513#define ENDIAN_CLEAR_EL2 SCTLR_ELx_EE
514#endif
515
516/* SCTLR_EL2 value used for the hyp-stub */
517#define SCTLR_EL2_SET (SCTLR_ELx_IESB | ENDIAN_SET_EL2 | SCTLR_EL2_RES1)
518#define SCTLR_EL2_CLEAR (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
519 SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
520 SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
521
522#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffffUL
523#error "Inconsistent SCTLR_EL2 set/clear bits"
524#endif 510#endif
525 511
526/* SCTLR_EL1 specific flags. */ 512/* SCTLR_EL1 specific flags. */
@@ -539,16 +525,11 @@
539 525
540#define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \ 526#define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \
541 (BIT(29))) 527 (BIT(29)))
542#define SCTLR_EL1_RES0 ((BIT(6)) | (BIT(10)) | (BIT(13)) | (BIT(17)) | \
543 (BIT(27)) | (BIT(30)) | (BIT(31)) | \
544 (0xffffefffUL << 32))
545 528
546#ifdef CONFIG_CPU_BIG_ENDIAN 529#ifdef CONFIG_CPU_BIG_ENDIAN
547#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) 530#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
548#define ENDIAN_CLEAR_EL1 0
549#else 531#else
550#define ENDIAN_SET_EL1 0 532#define ENDIAN_SET_EL1 0
551#define ENDIAN_CLEAR_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
552#endif 533#endif
553 534
554#define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\ 535#define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\
@@ -556,13 +537,6 @@
556 SCTLR_EL1_DZE | SCTLR_EL1_UCT |\ 537 SCTLR_EL1_DZE | SCTLR_EL1_UCT |\
557 SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\ 538 SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
558 ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1) 539 ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
559#define SCTLR_EL1_CLEAR (SCTLR_ELx_A | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD |\
560 SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
561 SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0)
562
563#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffffUL
564#error "Inconsistent SCTLR_EL1 set/clear bits"
565#endif
566 540
567/* id_aa64isar0 */ 541/* id_aa64isar0 */
568#define ID_AA64ISAR0_TS_SHIFT 52 542#define ID_AA64ISAR0_TS_SHIFT 52
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 180b34ec5965..f0cec4160136 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -8,8 +8,6 @@
8#ifndef __ASM_THREAD_INFO_H 8#ifndef __ASM_THREAD_INFO_H
9#define __ASM_THREAD_INFO_H 9#define __ASM_THREAD_INFO_H
10 10
11#ifdef __KERNEL__
12
13#include <linux/compiler.h> 11#include <linux/compiler.h>
14 12
15#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
@@ -59,29 +57,18 @@ void arch_release_task_struct(struct task_struct *tsk);
59 57
60#endif 58#endif
61 59
62/* 60#define TIF_SIGPENDING 0 /* signal pending */
63 * thread information flags: 61#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
64 * TIF_SYSCALL_TRACE - syscall trace active
65 * TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace
66 * TIF_SYSCALL_AUDIT - syscall auditing
67 * TIF_SECCOMP - syscall secure computing
68 * TIF_SYSCALL_EMU - syscall emulation active
69 * TIF_SIGPENDING - signal pending
70 * TIF_NEED_RESCHED - rescheduling necessary
71 * TIF_NOTIFY_RESUME - callback before returning to user
72 */
73#define TIF_SIGPENDING 0
74#define TIF_NEED_RESCHED 1
75#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ 62#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
76#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ 63#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
77#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ 64#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
78#define TIF_FSCHECK 5 /* Check FS is USER_DS on return */ 65#define TIF_FSCHECK 5 /* Check FS is USER_DS on return */
79#define TIF_NOHZ 7 66#define TIF_NOHZ 7
80#define TIF_SYSCALL_TRACE 8 67#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
81#define TIF_SYSCALL_AUDIT 9 68#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */
82#define TIF_SYSCALL_TRACEPOINT 10 69#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */
83#define TIF_SECCOMP 11 70#define TIF_SECCOMP 11 /* syscall secure computing */
84#define TIF_SYSCALL_EMU 12 71#define TIF_SYSCALL_EMU 12 /* syscall emulation active */
85#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 72#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
86#define TIF_FREEZE 19 73#define TIF_FREEZE 19
87#define TIF_RESTORE_SIGMASK 20 74#define TIF_RESTORE_SIGMASK 20
@@ -90,6 +77,7 @@ void arch_release_task_struct(struct task_struct *tsk);
90#define TIF_SVE 23 /* Scalable Vector Extension in use */ 77#define TIF_SVE 23 /* Scalable Vector Extension in use */
91#define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */ 78#define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */
92#define TIF_SSBD 25 /* Wants SSB mitigation */ 79#define TIF_SSBD 25 /* Wants SSB mitigation */
80#define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */
93 81
94#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 82#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
95#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 83#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -121,5 +109,4 @@ void arch_release_task_struct(struct task_struct *tsk);
121 .addr_limit = KERNEL_DS, \ 109 .addr_limit = KERNEL_DS, \
122} 110}
123 111
124#endif /* __KERNEL__ */
125#endif /* __ASM_THREAD_INFO_H */ 112#endif /* __ASM_THREAD_INFO_H */
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 8af7a85f76bd..bc3949064725 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -251,6 +251,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
251 dsb(ishst); 251 dsb(ishst);
252 __tlbi(vaae1is, addr); 252 __tlbi(vaae1is, addr);
253 dsb(ish); 253 dsb(ish);
254 isb();
254} 255}
255#endif 256#endif
256 257
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 0524f2438649..a4d945db95a2 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -4,29 +4,6 @@
4 4
5#include <linux/cpumask.h> 5#include <linux/cpumask.h>
6 6
7struct cpu_topology {
8 int thread_id;
9 int core_id;
10 int package_id;
11 int llc_id;
12 cpumask_t thread_sibling;
13 cpumask_t core_sibling;
14 cpumask_t llc_sibling;
15};
16
17extern struct cpu_topology cpu_topology[NR_CPUS];
18
19#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id)
20#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
21#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
22#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
23#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
24
25void init_cpu_topology(void);
26void store_cpu_topology(unsigned int cpuid);
27void remove_cpu_topology(unsigned int cpuid);
28const struct cpumask *cpu_coregroup_mask(int cpu);
29
30#ifdef CONFIG_NUMA 7#ifdef CONFIG_NUMA
31 8
32struct pci_bus; 9struct pci_bus;
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 5a1c32260c1f..097d6bfac0b7 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -62,6 +62,10 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
62{ 62{
63 unsigned long ret, limit = current_thread_info()->addr_limit; 63 unsigned long ret, limit = current_thread_info()->addr_limit;
64 64
65 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
66 test_thread_flag(TIF_TAGGED_ADDR))
67 addr = untagged_addr(addr);
68
65 __chk_user_ptr(addr); 69 __chk_user_ptr(addr);
66 asm volatile( 70 asm volatile(
67 // A + B <= C + 1 for all A,B,C, in four easy steps: 71 // A + B <= C + 1 for all A,B,C, in four easy steps:
@@ -215,7 +219,8 @@ static inline void uaccess_enable_not_uao(void)
215 219
216/* 220/*
217 * Sanitise a uaccess pointer such that it becomes NULL if above the 221 * Sanitise a uaccess pointer such that it becomes NULL if above the
218 * current addr_limit. 222 * current addr_limit. In case the pointer is tagged (has the top byte set),
223 * untag the pointer before checking.
219 */ 224 */
220#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) 225#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
221static inline void __user *__uaccess_mask_ptr(const void __user *ptr) 226static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
@@ -223,10 +228,11 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
223 void __user *safe_ptr; 228 void __user *safe_ptr;
224 229
225 asm volatile( 230 asm volatile(
226 " bics xzr, %1, %2\n" 231 " bics xzr, %3, %2\n"
227 " csel %0, %1, xzr, eq\n" 232 " csel %0, %1, xzr, eq\n"
228 : "=&r" (safe_ptr) 233 : "=&r" (safe_ptr)
229 : "r" (ptr), "r" (current_thread_info()->addr_limit) 234 : "r" (ptr), "r" (current_thread_info()->addr_limit),
235 "r" (untagged_addr(ptr))
230 : "cc"); 236 : "cc");
231 237
232 csdb(); 238 csdb();
diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h
index 9c15e0a06301..07468428fd29 100644
--- a/arch/arm64/include/asm/vdso.h
+++ b/arch/arm64/include/asm/vdso.h
@@ -5,8 +5,6 @@
5#ifndef __ASM_VDSO_H 5#ifndef __ASM_VDSO_H
6#define __ASM_VDSO_H 6#define __ASM_VDSO_H
7 7
8#ifdef __KERNEL__
9
10/* 8/*
11 * Default link address for the vDSO. 9 * Default link address for the vDSO.
12 * Since we randomise the VDSO mapping, there's little point in trying 10 * Since we randomise the VDSO mapping, there's little point in trying
@@ -28,6 +26,4 @@
28 26
29#endif /* !__ASSEMBLY__ */ 27#endif /* !__ASSEMBLY__ */
30 28
31#endif /* __KERNEL__ */
32
33#endif /* __ASM_VDSO_H */ 29#endif /* __ASM_VDSO_H */
diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
index ba6dbc3de864..1f38bf330a6e 100644
--- a/arch/arm64/include/asm/vdso_datapage.h
+++ b/arch/arm64/include/asm/vdso_datapage.h
@@ -5,8 +5,6 @@
5#ifndef __ASM_VDSO_DATAPAGE_H 5#ifndef __ASM_VDSO_DATAPAGE_H
6#define __ASM_VDSO_DATAPAGE_H 6#define __ASM_VDSO_DATAPAGE_H
7 7
8#ifdef __KERNEL__
9
10#ifndef __ASSEMBLY__ 8#ifndef __ASSEMBLY__
11 9
12struct vdso_data { 10struct vdso_data {
@@ -32,6 +30,4 @@ struct vdso_data {
32 30
33#endif /* !__ASSEMBLY__ */ 31#endif /* !__ASSEMBLY__ */
34 32
35#endif /* __KERNEL__ */
36
37#endif /* __ASM_VDSO_DATAPAGE_H */ 33#endif /* __ASM_VDSO_DATAPAGE_H */
diff --git a/arch/arm64/include/uapi/asm/stat.h b/arch/arm64/include/uapi/asm/stat.h
deleted file mode 100644
index 313325fa22fa..000000000000
--- a/arch/arm64/include/uapi/asm/stat.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2/*
3 * Copyright (C) 2012 ARM Ltd.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17#include <asm-generic/stat.h>
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d19d14ba9ae4..95201e5ff5e1 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -886,7 +886,7 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _
886 u32 midr = read_cpuid_id(); 886 u32 midr = read_cpuid_id();
887 887
888 /* Cavium ThunderX pass 1.x and 2.x */ 888 /* Cavium ThunderX pass 1.x and 2.x */
889 return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, 889 return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
890 MIDR_CPU_VAR_REV(0, 0), 890 MIDR_CPU_VAR_REV(0, 0),
891 MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK)); 891 MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
892} 892}
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index d1048173fd8a..e4d6af2fdec7 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -11,6 +11,7 @@
11#include <linux/cpu_pm.h> 11#include <linux/cpu_pm.h>
12#include <linux/of.h> 12#include <linux/of.h>
13#include <linux/of_device.h> 13#include <linux/of_device.h>
14#include <linux/psci.h>
14 15
15#include <asm/cpuidle.h> 16#include <asm/cpuidle.h>
16#include <asm/cpu_ops.h> 17#include <asm/cpu_ops.h>
@@ -46,17 +47,58 @@ int arm_cpuidle_suspend(int index)
46 47
47#define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags)) 48#define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags))
48 49
50static int psci_acpi_cpu_init_idle(unsigned int cpu)
51{
52 int i, count;
53 struct acpi_lpi_state *lpi;
54 struct acpi_processor *pr = per_cpu(processors, cpu);
55
56 /*
57 * If the PSCI cpu_suspend function hook has not been initialized
58 * idle states must not be enabled, so bail out
59 */
60 if (!psci_ops.cpu_suspend)
61 return -EOPNOTSUPP;
62
63 if (unlikely(!pr || !pr->flags.has_lpi))
64 return -EINVAL;
65
66 count = pr->power.count - 1;
67 if (count <= 0)
68 return -ENODEV;
69
70 for (i = 0; i < count; i++) {
71 u32 state;
72
73 lpi = &pr->power.lpi_states[i + 1];
74 /*
75 * Only bits[31:0] represent a PSCI power_state while
76 * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification
77 */
78 state = lpi->address;
79 if (!psci_power_state_is_valid(state)) {
80 pr_warn("Invalid PSCI power state %#x\n", state);
81 return -EINVAL;
82 }
83 }
84
85 return 0;
86}
87
49int acpi_processor_ffh_lpi_probe(unsigned int cpu) 88int acpi_processor_ffh_lpi_probe(unsigned int cpu)
50{ 89{
51 return arm_cpuidle_init(cpu); 90 return psci_acpi_cpu_init_idle(cpu);
52} 91}
53 92
54int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) 93int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
55{ 94{
95 u32 state = lpi->address;
96
56 if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags)) 97 if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags))
57 return CPU_PM_CPU_IDLE_ENTER_RETENTION(arm_cpuidle_suspend, 98 return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(psci_cpu_suspend_enter,
58 lpi->index); 99 lpi->index, state);
59 else 100 else
60 return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, lpi->index); 101 return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter,
102 lpi->index, state);
61} 103}
62#endif 104#endif
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 876055e37352..05933c065732 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -33,7 +33,7 @@
33DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); 33DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
34static struct cpuinfo_arm64 boot_cpu_data; 34static struct cpuinfo_arm64 boot_cpu_data;
35 35
36static char *icache_policy_str[] = { 36static const char *icache_policy_str[] = {
37 [0 ... ICACHE_POLICY_PIPT] = "RESERVED/UNKNOWN", 37 [0 ... ICACHE_POLICY_PIPT] = "RESERVED/UNKNOWN",
38 [ICACHE_POLICY_VIPT] = "VIPT", 38 [ICACHE_POLICY_VIPT] = "VIPT",
39 [ICACHE_POLICY_PIPT] = "PIPT", 39 [ICACHE_POLICY_PIPT] = "PIPT",
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 320a30dbe35e..84a822748c84 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -30,9 +30,9 @@
30 * Context tracking subsystem. Used to instrument transitions 30 * Context tracking subsystem. Used to instrument transitions
31 * between user and kernel mode. 31 * between user and kernel mode.
32 */ 32 */
33 .macro ct_user_exit 33 .macro ct_user_exit_irqoff
34#ifdef CONFIG_CONTEXT_TRACKING 34#ifdef CONFIG_CONTEXT_TRACKING
35 bl context_tracking_user_exit 35 bl enter_from_user_mode
36#endif 36#endif
37 .endm 37 .endm
38 38
@@ -792,8 +792,8 @@ el0_cp15:
792 /* 792 /*
793 * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions 793 * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
794 */ 794 */
795 ct_user_exit_irqoff
795 enable_daif 796 enable_daif
796 ct_user_exit
797 mov x0, x25 797 mov x0, x25
798 mov x1, sp 798 mov x1, sp
799 bl do_cp15instr 799 bl do_cp15instr
@@ -805,8 +805,8 @@ el0_da:
805 * Data abort handling 805 * Data abort handling
806 */ 806 */
807 mrs x26, far_el1 807 mrs x26, far_el1
808 ct_user_exit_irqoff
808 enable_daif 809 enable_daif
809 ct_user_exit
810 clear_address_tag x0, x26 810 clear_address_tag x0, x26
811 mov x1, x25 811 mov x1, x25
812 mov x2, sp 812 mov x2, sp
@@ -818,11 +818,11 @@ el0_ia:
818 */ 818 */
819 mrs x26, far_el1 819 mrs x26, far_el1
820 gic_prio_kentry_setup tmp=x0 820 gic_prio_kentry_setup tmp=x0
821 ct_user_exit_irqoff
821 enable_da_f 822 enable_da_f
822#ifdef CONFIG_TRACE_IRQFLAGS 823#ifdef CONFIG_TRACE_IRQFLAGS
823 bl trace_hardirqs_off 824 bl trace_hardirqs_off
824#endif 825#endif
825 ct_user_exit
826 mov x0, x26 826 mov x0, x26
827 mov x1, x25 827 mov x1, x25
828 mov x2, sp 828 mov x2, sp
@@ -832,8 +832,8 @@ el0_fpsimd_acc:
832 /* 832 /*
833 * Floating Point or Advanced SIMD access 833 * Floating Point or Advanced SIMD access
834 */ 834 */
835 ct_user_exit_irqoff
835 enable_daif 836 enable_daif
836 ct_user_exit
837 mov x0, x25 837 mov x0, x25
838 mov x1, sp 838 mov x1, sp
839 bl do_fpsimd_acc 839 bl do_fpsimd_acc
@@ -842,8 +842,8 @@ el0_sve_acc:
842 /* 842 /*
843 * Scalable Vector Extension access 843 * Scalable Vector Extension access
844 */ 844 */
845 ct_user_exit_irqoff
845 enable_daif 846 enable_daif
846 ct_user_exit
847 mov x0, x25 847 mov x0, x25
848 mov x1, sp 848 mov x1, sp
849 bl do_sve_acc 849 bl do_sve_acc
@@ -852,8 +852,8 @@ el0_fpsimd_exc:
852 /* 852 /*
853 * Floating Point, Advanced SIMD or SVE exception 853 * Floating Point, Advanced SIMD or SVE exception
854 */ 854 */
855 ct_user_exit_irqoff
855 enable_daif 856 enable_daif
856 ct_user_exit
857 mov x0, x25 857 mov x0, x25
858 mov x1, sp 858 mov x1, sp
859 bl do_fpsimd_exc 859 bl do_fpsimd_exc
@@ -868,11 +868,11 @@ el0_sp_pc:
868 * Stack or PC alignment exception handling 868 * Stack or PC alignment exception handling
869 */ 869 */
870 gic_prio_kentry_setup tmp=x0 870 gic_prio_kentry_setup tmp=x0
871 ct_user_exit_irqoff
871 enable_da_f 872 enable_da_f
872#ifdef CONFIG_TRACE_IRQFLAGS 873#ifdef CONFIG_TRACE_IRQFLAGS
873 bl trace_hardirqs_off 874 bl trace_hardirqs_off
874#endif 875#endif
875 ct_user_exit
876 mov x0, x26 876 mov x0, x26
877 mov x1, x25 877 mov x1, x25
878 mov x2, sp 878 mov x2, sp
@@ -882,8 +882,8 @@ el0_undef:
882 /* 882 /*
883 * Undefined instruction 883 * Undefined instruction
884 */ 884 */
885 ct_user_exit_irqoff
885 enable_daif 886 enable_daif
886 ct_user_exit
887 mov x0, sp 887 mov x0, sp
888 bl do_undefinstr 888 bl do_undefinstr
889 b ret_to_user 889 b ret_to_user
@@ -891,8 +891,8 @@ el0_sys:
891 /* 891 /*
892 * System instructions, for trapped cache maintenance instructions 892 * System instructions, for trapped cache maintenance instructions
893 */ 893 */
894 ct_user_exit_irqoff
894 enable_daif 895 enable_daif
895 ct_user_exit
896 mov x0, x25 896 mov x0, x25
897 mov x1, sp 897 mov x1, sp
898 bl do_sysinstr 898 bl do_sysinstr
@@ -902,17 +902,18 @@ el0_dbg:
902 * Debug exception handling 902 * Debug exception handling
903 */ 903 */
904 tbnz x24, #0, el0_inv // EL0 only 904 tbnz x24, #0, el0_inv // EL0 only
905 mrs x24, far_el1
905 gic_prio_kentry_setup tmp=x3 906 gic_prio_kentry_setup tmp=x3
906 mrs x0, far_el1 907 ct_user_exit_irqoff
908 mov x0, x24
907 mov x1, x25 909 mov x1, x25
908 mov x2, sp 910 mov x2, sp
909 bl do_debug_exception 911 bl do_debug_exception
910 enable_da_f 912 enable_da_f
911 ct_user_exit
912 b ret_to_user 913 b ret_to_user
913el0_inv: 914el0_inv:
915 ct_user_exit_irqoff
914 enable_daif 916 enable_daif
915 ct_user_exit
916 mov x0, sp 917 mov x0, sp
917 mov x1, #BAD_SYNC 918 mov x1, #BAD_SYNC
918 mov x2, x25 919 mov x2, x25
@@ -925,13 +926,13 @@ el0_irq:
925 kernel_entry 0 926 kernel_entry 0
926el0_irq_naked: 927el0_irq_naked:
927 gic_prio_irq_setup pmr=x20, tmp=x0 928 gic_prio_irq_setup pmr=x20, tmp=x0
929 ct_user_exit_irqoff
928 enable_da_f 930 enable_da_f
929 931
930#ifdef CONFIG_TRACE_IRQFLAGS 932#ifdef CONFIG_TRACE_IRQFLAGS
931 bl trace_hardirqs_off 933 bl trace_hardirqs_off
932#endif 934#endif
933 935
934 ct_user_exit
935#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR 936#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
936 tbz x22, #55, 1f 937 tbz x22, #55, 1f
937 bl do_el0_irq_bp_hardening 938 bl do_el0_irq_bp_hardening
@@ -958,13 +959,14 @@ ENDPROC(el1_error)
958el0_error: 959el0_error:
959 kernel_entry 0 960 kernel_entry 0
960el0_error_naked: 961el0_error_naked:
961 mrs x1, esr_el1 962 mrs x25, esr_el1
962 gic_prio_kentry_setup tmp=x2 963 gic_prio_kentry_setup tmp=x2
964 ct_user_exit_irqoff
963 enable_dbg 965 enable_dbg
964 mov x0, sp 966 mov x0, sp
967 mov x1, x25
965 bl do_serror 968 bl do_serror
966 enable_da_f 969 enable_da_f
967 ct_user_exit
968 b ret_to_user 970 b ret_to_user
969ENDPROC(el0_error) 971ENDPROC(el0_error)
970 972
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 0baadf335172..989b1944cb71 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -102,6 +102,8 @@ pe_header:
102 * x23 stext() .. start_kernel() physical misalignment/KASLR offset 102 * x23 stext() .. start_kernel() physical misalignment/KASLR offset
103 * x28 __create_page_tables() callee preserved temp register 103 * x28 __create_page_tables() callee preserved temp register
104 * x19/x20 __primary_switch() callee preserved temp registers 104 * x19/x20 __primary_switch() callee preserved temp registers
105 * x24 __primary_switch() .. relocate_kernel()
106 * current RELR displacement
105 */ 107 */
106ENTRY(stext) 108ENTRY(stext)
107 bl preserve_boot_args 109 bl preserve_boot_args
@@ -308,15 +310,15 @@ __create_page_tables:
308 adrp x0, idmap_pg_dir 310 adrp x0, idmap_pg_dir
309 adrp x3, __idmap_text_start // __pa(__idmap_text_start) 311 adrp x3, __idmap_text_start // __pa(__idmap_text_start)
310 312
311#ifdef CONFIG_ARM64_USER_VA_BITS_52 313#ifdef CONFIG_ARM64_VA_BITS_52
312 mrs_s x6, SYS_ID_AA64MMFR2_EL1 314 mrs_s x6, SYS_ID_AA64MMFR2_EL1
313 and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT) 315 and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
314 mov x5, #52 316 mov x5, #52
315 cbnz x6, 1f 317 cbnz x6, 1f
316#endif 318#endif
317 mov x5, #VA_BITS 319 mov x5, #VA_BITS_MIN
3181: 3201:
319 adr_l x6, vabits_user 321 adr_l x6, vabits_actual
320 str x5, [x6] 322 str x5, [x6]
321 dmb sy 323 dmb sy
322 dc ivac, x6 // Invalidate potentially stale cache line 324 dc ivac, x6 // Invalidate potentially stale cache line
@@ -780,7 +782,7 @@ ENTRY(__enable_mmu)
780 phys_to_ttbr x1, x1 782 phys_to_ttbr x1, x1
781 phys_to_ttbr x2, x2 783 phys_to_ttbr x2, x2
782 msr ttbr0_el1, x2 // load TTBR0 784 msr ttbr0_el1, x2 // load TTBR0
783 offset_ttbr1 x1 785 offset_ttbr1 x1, x3
784 msr ttbr1_el1, x1 // load TTBR1 786 msr ttbr1_el1, x1 // load TTBR1
785 isb 787 isb
786 msr sctlr_el1, x0 788 msr sctlr_el1, x0
@@ -797,8 +799,8 @@ ENTRY(__enable_mmu)
797ENDPROC(__enable_mmu) 799ENDPROC(__enable_mmu)
798 800
799ENTRY(__cpu_secondary_check52bitva) 801ENTRY(__cpu_secondary_check52bitva)
800#ifdef CONFIG_ARM64_USER_VA_BITS_52 802#ifdef CONFIG_ARM64_VA_BITS_52
801 ldr_l x0, vabits_user 803 ldr_l x0, vabits_actual
802 cmp x0, #52 804 cmp x0, #52
803 b.ne 2f 805 b.ne 2f
804 806
@@ -842,14 +844,93 @@ __relocate_kernel:
842 844
8430: cmp x9, x10 8450: cmp x9, x10
844 b.hs 1f 846 b.hs 1f
845 ldp x11, x12, [x9], #24 847 ldp x12, x13, [x9], #24
846 ldr x13, [x9, #-8] 848 ldr x14, [x9, #-8]
847 cmp w12, #R_AARCH64_RELATIVE 849 cmp w13, #R_AARCH64_RELATIVE
848 b.ne 0b 850 b.ne 0b
849 add x13, x13, x23 // relocate 851 add x14, x14, x23 // relocate
850 str x13, [x11, x23] 852 str x14, [x12, x23]
851 b 0b 853 b 0b
8521: ret 854
8551:
856#ifdef CONFIG_RELR
857 /*
858 * Apply RELR relocations.
859 *
860 * RELR is a compressed format for storing relative relocations. The
861 * encoded sequence of entries looks like:
862 * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
863 *
864 * i.e. start with an address, followed by any number of bitmaps. The
865 * address entry encodes 1 relocation. The subsequent bitmap entries
866 * encode up to 63 relocations each, at subsequent offsets following
867 * the last address entry.
868 *
869 * The bitmap entries must have 1 in the least significant bit. The
870 * assumption here is that an address cannot have 1 in lsb. Odd
871 * addresses are not supported. Any odd addresses are stored in the RELA
872 * section, which is handled above.
873 *
874 * Excluding the least significant bit in the bitmap, each non-zero
875 * bit in the bitmap represents a relocation to be applied to
876 * a corresponding machine word that follows the base address
877 * word. The second least significant bit represents the machine
878 * word immediately following the initial address, and each bit
879 * that follows represents the next word, in linear order. As such,
880 * a single bitmap can encode up to 63 relocations in a 64-bit object.
881 *
882 * In this implementation we store the address of the next RELR table
883 * entry in x9, the address being relocated by the current address or
884 * bitmap entry in x13 and the address being relocated by the current
885 * bit in x14.
886 *
887 * Because addends are stored in place in the binary, RELR relocations
888 * cannot be applied idempotently. We use x24 to keep track of the
889 * currently applied displacement so that we can correctly relocate if
890 * __relocate_kernel is called twice with non-zero displacements (i.e.
891 * if there is both a physical misalignment and a KASLR displacement).
892 */
893 ldr w9, =__relr_offset // offset to reloc table
894 ldr w10, =__relr_size // size of reloc table
895 add x9, x9, x11 // __va(.relr)
896 add x10, x9, x10 // __va(.relr) + sizeof(.relr)
897
898 sub x15, x23, x24 // delta from previous offset
899 cbz x15, 7f // nothing to do if unchanged
900 mov x24, x23 // save new offset
901
9022: cmp x9, x10
903 b.hs 7f
904 ldr x11, [x9], #8
905 tbnz x11, #0, 3f // branch to handle bitmaps
906 add x13, x11, x23
907 ldr x12, [x13] // relocate address entry
908 add x12, x12, x15
909 str x12, [x13], #8 // adjust to start of bitmap
910 b 2b
911
9123: mov x14, x13
9134: lsr x11, x11, #1
914 cbz x11, 6f
915 tbz x11, #0, 5f // skip bit if not set
916 ldr x12, [x14] // relocate bit
917 add x12, x12, x15
918 str x12, [x14]
919
9205: add x14, x14, #8 // move to next bit's address
921 b 4b
922
9236: /*
924 * Move to the next bitmap's address. 8 is the word size, and 63 is the
925 * number of significant bits in a bitmap entry.
926 */
927 add x13, x13, #(8 * 63)
928 b 2b
929
9307:
931#endif
932 ret
933
853ENDPROC(__relocate_kernel) 934ENDPROC(__relocate_kernel)
854#endif 935#endif
855 936
@@ -862,6 +943,9 @@ __primary_switch:
862 adrp x1, init_pg_dir 943 adrp x1, init_pg_dir
863 bl __enable_mmu 944 bl __enable_mmu
864#ifdef CONFIG_RELOCATABLE 945#ifdef CONFIG_RELOCATABLE
946#ifdef CONFIG_RELR
947 mov x24, #0 // no RELR displacement yet
948#endif
865 bl __relocate_kernel 949 bl __relocate_kernel
866#ifdef CONFIG_RANDOMIZE_BASE 950#ifdef CONFIG_RANDOMIZE_BASE
867 ldr x8, =__primary_switched 951 ldr x8, =__primary_switched
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
index 2f4a2ce7264b..38bcd4d4e43b 100644
--- a/arch/arm64/kernel/hibernate-asm.S
+++ b/arch/arm64/kernel/hibernate-asm.S
@@ -22,14 +22,14 @@
22 * Even switching to our copied tables will cause a changed output address at 22 * Even switching to our copied tables will cause a changed output address at
23 * each stage of the walk. 23 * each stage of the walk.
24 */ 24 */
25.macro break_before_make_ttbr_switch zero_page, page_table, tmp 25.macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
26 phys_to_ttbr \tmp, \zero_page 26 phys_to_ttbr \tmp, \zero_page
27 msr ttbr1_el1, \tmp 27 msr ttbr1_el1, \tmp
28 isb 28 isb
29 tlbi vmalle1 29 tlbi vmalle1
30 dsb nsh 30 dsb nsh
31 phys_to_ttbr \tmp, \page_table 31 phys_to_ttbr \tmp, \page_table
32 offset_ttbr1 \tmp 32 offset_ttbr1 \tmp, \tmp2
33 msr ttbr1_el1, \tmp 33 msr ttbr1_el1, \tmp
34 isb 34 isb
35.endm 35.endm
@@ -70,7 +70,7 @@ ENTRY(swsusp_arch_suspend_exit)
70 * We execute from ttbr0, change ttbr1 to our copied linear map tables 70 * We execute from ttbr0, change ttbr1 to our copied linear map tables
71 * with a break-before-make via the zero page 71 * with a break-before-make via the zero page
72 */ 72 */
73 break_before_make_ttbr_switch x5, x0, x6 73 break_before_make_ttbr_switch x5, x0, x6, x8
74 74
75 mov x21, x1 75 mov x21, x1
76 mov x30, x2 76 mov x30, x2
@@ -101,7 +101,7 @@ ENTRY(swsusp_arch_suspend_exit)
101 dsb ish /* wait for PoU cleaning to finish */ 101 dsb ish /* wait for PoU cleaning to finish */
102 102
103 /* switch to the restored kernels page tables */ 103 /* switch to the restored kernels page tables */
104 break_before_make_ttbr_switch x25, x21, x6 104 break_before_make_ttbr_switch x25, x21, x6, x8
105 105
106 ic ialluis 106 ic ialluis
107 dsb ish 107 dsb ish
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 9341fcc6e809..e0a7fce0e01c 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -496,7 +496,7 @@ int swsusp_arch_resume(void)
496 rc = -ENOMEM; 496 rc = -ENOMEM;
497 goto out; 497 goto out;
498 } 498 }
499 rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0); 499 rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, PAGE_END);
500 if (rc) 500 if (rc)
501 goto out; 501 goto out;
502 502
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
new file mode 100644
index 000000000000..25a2a9b479c2
--- /dev/null
+++ b/arch/arm64/kernel/image-vars.h
@@ -0,0 +1,51 @@
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Linker script variables to be set after section resolution, as
4 * ld.lld does not like variables assigned before SECTIONS is processed.
5 */
6#ifndef __ARM64_KERNEL_IMAGE_VARS_H
7#define __ARM64_KERNEL_IMAGE_VARS_H
8
9#ifndef LINKER_SCRIPT
10#error This file should only be included in vmlinux.lds.S
11#endif
12
13#ifdef CONFIG_EFI
14
15__efistub_stext_offset = stext - _text;
16
17/*
18 * The EFI stub has its own symbol namespace prefixed by __efistub_, to
19 * isolate it from the kernel proper. The following symbols are legally
20 * accessed by the stub, so provide some aliases to make them accessible.
21 * Only include data symbols here, or text symbols of functions that are
22 * guaranteed to be safe when executed at another offset than they were
23 * linked at. The routines below are all implemented in assembler in a
24 * position independent manner
25 */
26__efistub_memcmp = __pi_memcmp;
27__efistub_memchr = __pi_memchr;
28__efistub_memcpy = __pi_memcpy;
29__efistub_memmove = __pi_memmove;
30__efistub_memset = __pi_memset;
31__efistub_strlen = __pi_strlen;
32__efistub_strnlen = __pi_strnlen;
33__efistub_strcmp = __pi_strcmp;
34__efistub_strncmp = __pi_strncmp;
35__efistub_strrchr = __pi_strrchr;
36__efistub___flush_dcache_area = __pi___flush_dcache_area;
37
38#ifdef CONFIG_KASAN
39__efistub___memcpy = __pi_memcpy;
40__efistub___memmove = __pi_memmove;
41__efistub___memset = __pi_memset;
42#endif
43
44__efistub__text = _text;
45__efistub__end = _end;
46__efistub__edata = _edata;
47__efistub_screen_info = screen_info;
48
49#endif
50
51#endif /* __ARM64_KERNEL_IMAGE_VARS_H */
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index 2b85c0d6fa3d..c7d38c660372 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -65,46 +65,4 @@
65 DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \ 65 DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \
66 DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS); 66 DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS);
67 67
68#ifdef CONFIG_EFI
69
70/*
71 * Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol:
72 * https://github.com/ClangBuiltLinux/linux/issues/561
73 */
74__efistub_stext_offset = ABSOLUTE(stext - _text);
75
76/*
77 * The EFI stub has its own symbol namespace prefixed by __efistub_, to
78 * isolate it from the kernel proper. The following symbols are legally
79 * accessed by the stub, so provide some aliases to make them accessible.
80 * Only include data symbols here, or text symbols of functions that are
81 * guaranteed to be safe when executed at another offset than they were
82 * linked at. The routines below are all implemented in assembler in a
83 * position independent manner
84 */
85__efistub_memcmp = __pi_memcmp;
86__efistub_memchr = __pi_memchr;
87__efistub_memcpy = __pi_memcpy;
88__efistub_memmove = __pi_memmove;
89__efistub_memset = __pi_memset;
90__efistub_strlen = __pi_strlen;
91__efistub_strnlen = __pi_strnlen;
92__efistub_strcmp = __pi_strcmp;
93__efistub_strncmp = __pi_strncmp;
94__efistub_strrchr = __pi_strrchr;
95__efistub___flush_dcache_area = __pi___flush_dcache_area;
96
97#ifdef CONFIG_KASAN
98__efistub___memcpy = __pi_memcpy;
99__efistub___memmove = __pi_memmove;
100__efistub___memset = __pi_memset;
101#endif
102
103__efistub__text = _text;
104__efistub__end = _end;
105__efistub__edata = _edata;
106__efistub_screen_info = screen_info;
107
108#endif
109
110#endif /* __ARM64_KERNEL_IMAGE_H */ 68#endif /* __ARM64_KERNEL_IMAGE_H */
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 84b059ed04fc..d801a7094076 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -26,7 +26,7 @@
26#define AARCH64_INSN_N_BIT BIT(22) 26#define AARCH64_INSN_N_BIT BIT(22)
27#define AARCH64_INSN_LSL_12 BIT(22) 27#define AARCH64_INSN_LSL_12 BIT(22)
28 28
29static int aarch64_insn_encoding_class[] = { 29static const int aarch64_insn_encoding_class[] = {
30 AARCH64_INSN_CLS_UNKNOWN, 30 AARCH64_INSN_CLS_UNKNOWN,
31 AARCH64_INSN_CLS_UNKNOWN, 31 AARCH64_INSN_CLS_UNKNOWN,
32 AARCH64_INSN_CLS_UNKNOWN, 32 AARCH64_INSN_CLS_UNKNOWN,
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 708051655ad9..416f537bf614 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -62,9 +62,6 @@ out:
62 return default_cmdline; 62 return default_cmdline;
63} 63}
64 64
65extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
66 pgprot_t prot);
67
68/* 65/*
69 * This routine will be executed with the kernel mapped at its default virtual 66 * This routine will be executed with the kernel mapped at its default virtual
70 * address, and if it returns successfully, the kernel will be remapped, and 67 * address, and if it returns successfully, the kernel will be remapped, and
@@ -93,7 +90,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
93 * attempt at mapping the FDT in setup_machine() 90 * attempt at mapping the FDT in setup_machine()
94 */ 91 */
95 early_fixmap_init(); 92 early_fixmap_init();
96 fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL); 93 fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
97 if (!fdt) 94 if (!fdt)
98 return 0; 95 return 0;
99 96
@@ -116,15 +113,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
116 /* 113 /*
117 * OK, so we are proceeding with KASLR enabled. Calculate a suitable 114 * OK, so we are proceeding with KASLR enabled. Calculate a suitable
118 * kernel image offset from the seed. Let's place the kernel in the 115 * kernel image offset from the seed. Let's place the kernel in the
119 * middle half of the VMALLOC area (VA_BITS - 2), and stay clear of 116 * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of
120 * the lower and upper quarters to avoid colliding with other 117 * the lower and upper quarters to avoid colliding with other
121 * allocations. 118 * allocations.
122 * Even if we could randomize at page granularity for 16k and 64k pages, 119 * Even if we could randomize at page granularity for 16k and 64k pages,
123 * let's always round to 2 MB so we don't interfere with the ability to 120 * let's always round to 2 MB so we don't interfere with the ability to
124 * map using contiguous PTEs 121 * map using contiguous PTEs
125 */ 122 */
126 mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1); 123 mask = ((1UL << (VA_BITS_MIN - 2)) - 1) & ~(SZ_2M - 1);
127 offset = BIT(VA_BITS - 3) + (seed & mask); 124 offset = BIT(VA_BITS_MIN - 3) + (seed & mask);
128 125
129 /* use the top 16 bits to randomize the linear region */ 126 /* use the top 16 bits to randomize the linear region */
130 memstart_offset_seed = seed >> 48; 127 memstart_offset_seed = seed >> 48;
diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c
index 2514fd6f12cb..29a9428486a5 100644
--- a/arch/arm64/kernel/kexec_image.c
+++ b/arch/arm64/kernel/kexec_image.c
@@ -84,7 +84,7 @@ static void *image_load(struct kimage *image,
84 84
85 kbuf.buffer = kernel; 85 kbuf.buffer = kernel;
86 kbuf.bufsz = kernel_len; 86 kbuf.bufsz = kernel_len;
87 kbuf.mem = 0; 87 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
88 kbuf.memsz = le64_to_cpu(h->image_size); 88 kbuf.memsz = le64_to_cpu(h->image_size);
89 text_offset = le64_to_cpu(h->text_offset); 89 text_offset = le64_to_cpu(h->text_offset);
90 kbuf.buf_align = MIN_KIMG_ALIGN; 90 kbuf.buf_align = MIN_KIMG_ALIGN;
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 58871333737a..7b08bf9499b6 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -27,6 +27,8 @@
27#define FDT_PROP_INITRD_END "linux,initrd-end" 27#define FDT_PROP_INITRD_END "linux,initrd-end"
28#define FDT_PROP_BOOTARGS "bootargs" 28#define FDT_PROP_BOOTARGS "bootargs"
29#define FDT_PROP_KASLR_SEED "kaslr-seed" 29#define FDT_PROP_KASLR_SEED "kaslr-seed"
30#define FDT_PROP_RNG_SEED "rng-seed"
31#define RNG_SEED_SIZE 128
30 32
31const struct kexec_file_ops * const kexec_file_loaders[] = { 33const struct kexec_file_ops * const kexec_file_loaders[] = {
32 &kexec_image_ops, 34 &kexec_image_ops,
@@ -102,6 +104,19 @@ static int setup_dtb(struct kimage *image,
102 FDT_PROP_KASLR_SEED); 104 FDT_PROP_KASLR_SEED);
103 } 105 }
104 106
107 /* add rng-seed */
108 if (rng_is_initialized()) {
109 u8 rng_seed[RNG_SEED_SIZE];
110 get_random_bytes(rng_seed, RNG_SEED_SIZE);
111 ret = fdt_setprop(dtb, off, FDT_PROP_RNG_SEED, rng_seed,
112 RNG_SEED_SIZE);
113 if (ret)
114 goto out;
115 } else {
116 pr_notice("RNG is not initialised: omitting \"%s\" property\n",
117 FDT_PROP_RNG_SEED);
118 }
119
105out: 120out:
106 if (ret) 121 if (ret)
107 return (ret == -FDT_ERR_NOSPACE) ? -ENOMEM : -EINVAL; 122 return (ret == -FDT_ERR_NOSPACE) ? -ENOMEM : -EINVAL;
@@ -110,7 +125,8 @@ out:
110} 125}
111 126
112/* 127/*
113 * More space needed so that we can add initrd, bootargs and kaslr-seed. 128 * More space needed so that we can add initrd, bootargs, kaslr-seed, and
129 * rng-seed.
114 */ 130 */
115#define DTB_EXTRA_SPACE 0x1000 131#define DTB_EXTRA_SPACE 0x1000
116 132
@@ -177,7 +193,7 @@ int load_other_segments(struct kimage *image,
177 if (initrd) { 193 if (initrd) {
178 kbuf.buffer = initrd; 194 kbuf.buffer = initrd;
179 kbuf.bufsz = initrd_len; 195 kbuf.bufsz = initrd_len;
180 kbuf.mem = 0; 196 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
181 kbuf.memsz = initrd_len; 197 kbuf.memsz = initrd_len;
182 kbuf.buf_align = 0; 198 kbuf.buf_align = 0;
183 /* within 1GB-aligned window of up to 32GB in size */ 199 /* within 1GB-aligned window of up to 32GB in size */
@@ -204,7 +220,7 @@ int load_other_segments(struct kimage *image,
204 dtb_len = fdt_totalsize(dtb); 220 dtb_len = fdt_totalsize(dtb);
205 kbuf.buffer = dtb; 221 kbuf.buffer = dtb;
206 kbuf.bufsz = dtb_len; 222 kbuf.bufsz = dtb_len;
207 kbuf.mem = 0; 223 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
208 kbuf.memsz = dtb_len; 224 kbuf.memsz = dtb_len;
209 /* not across 2MB boundary */ 225 /* not across 2MB boundary */
210 kbuf.buf_align = SZ_2M; 226 kbuf.buf_align = SZ_2M;
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index 044c0ae4d6c8..b182442b87a3 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -302,7 +302,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
302 /* sort by type, symbol index and addend */ 302 /* sort by type, symbol index and addend */
303 sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL); 303 sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL);
304 304
305 if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0) 305 if (!str_has_prefix(secstrings + dstsec->sh_name, ".init"))
306 core_plts += count_plts(syms, rels, numrels, 306 core_plts += count_plts(syms, rels, numrels,
307 sechdrs[i].sh_info, dstsec); 307 sechdrs[i].sh_info, dstsec);
308 else 308 else
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 96e90e270042..a0b4f1bca491 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -19,6 +19,7 @@
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/perf/arm_pmu.h> 20#include <linux/perf/arm_pmu.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/smp.h>
22 23
23/* ARMv8 Cortex-A53 specific event types. */ 24/* ARMv8 Cortex-A53 specific event types. */
24#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2 25#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
@@ -157,7 +158,6 @@ armv8pmu_events_sysfs_show(struct device *dev,
157 return sprintf(page, "event=0x%03llx\n", pmu_attr->id); 158 return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
158} 159}
159 160
160#define ARMV8_EVENT_ATTR_RESOLVE(m) #m
161#define ARMV8_EVENT_ATTR(name, config) \ 161#define ARMV8_EVENT_ATTR(name, config) \
162 PMU_EVENT_ATTR(name, armv8_event_attr_##name, \ 162 PMU_EVENT_ATTR(name, armv8_event_attr_##name, \
163 config, armv8pmu_events_sysfs_show) 163 config, armv8pmu_events_sysfs_show)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index f674f28df663..03689c0beb34 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -19,6 +19,7 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/mm.h> 20#include <linux/mm.h>
21#include <linux/stddef.h> 21#include <linux/stddef.h>
22#include <linux/sysctl.h>
22#include <linux/unistd.h> 23#include <linux/unistd.h>
23#include <linux/user.h> 24#include <linux/user.h>
24#include <linux/delay.h> 25#include <linux/delay.h>
@@ -38,6 +39,7 @@
38#include <trace/events/power.h> 39#include <trace/events/power.h>
39#include <linux/percpu.h> 40#include <linux/percpu.h>
40#include <linux/thread_info.h> 41#include <linux/thread_info.h>
42#include <linux/prctl.h>
41 43
42#include <asm/alternative.h> 44#include <asm/alternative.h>
43#include <asm/arch_gicv3.h> 45#include <asm/arch_gicv3.h>
@@ -307,11 +309,18 @@ static void tls_thread_flush(void)
307 } 309 }
308} 310}
309 311
312static void flush_tagged_addr_state(void)
313{
314 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI))
315 clear_thread_flag(TIF_TAGGED_ADDR);
316}
317
310void flush_thread(void) 318void flush_thread(void)
311{ 319{
312 fpsimd_flush_thread(); 320 fpsimd_flush_thread();
313 tls_thread_flush(); 321 tls_thread_flush();
314 flush_ptrace_hw_breakpoint(current); 322 flush_ptrace_hw_breakpoint(current);
323 flush_tagged_addr_state();
315} 324}
316 325
317void release_thread(struct task_struct *dead_task) 326void release_thread(struct task_struct *dead_task)
@@ -565,3 +574,70 @@ void arch_setup_new_exec(void)
565 574
566 ptrauth_thread_init_user(current); 575 ptrauth_thread_init_user(current);
567} 576}
577
578#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
579/*
580 * Control the relaxed ABI allowing tagged user addresses into the kernel.
581 */
582static unsigned int tagged_addr_disabled;
583
584long set_tagged_addr_ctrl(unsigned long arg)
585{
586 if (is_compat_task())
587 return -EINVAL;
588 if (arg & ~PR_TAGGED_ADDR_ENABLE)
589 return -EINVAL;
590
591 /*
592 * Do not allow the enabling of the tagged address ABI if globally
593 * disabled via sysctl abi.tagged_addr_disabled.
594 */
595 if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
596 return -EINVAL;
597
598 update_thread_flag(TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
599
600 return 0;
601}
602
603long get_tagged_addr_ctrl(void)
604{
605 if (is_compat_task())
606 return -EINVAL;
607
608 if (test_thread_flag(TIF_TAGGED_ADDR))
609 return PR_TAGGED_ADDR_ENABLE;
610
611 return 0;
612}
613
614/*
615 * Global sysctl to disable the tagged user addresses support. This control
616 * only prevents the tagged address ABI enabling via prctl() and does not
617 * disable it for tasks that already opted in to the relaxed ABI.
618 */
619static int zero;
620static int one = 1;
621
622static struct ctl_table tagged_addr_sysctl_table[] = {
623 {
624 .procname = "tagged_addr_disabled",
625 .mode = 0644,
626 .data = &tagged_addr_disabled,
627 .maxlen = sizeof(int),
628 .proc_handler = proc_dointvec_minmax,
629 .extra1 = &zero,
630 .extra2 = &one,
631 },
632 { }
633};
634
635static int __init tagged_addr_init(void)
636{
637 if (!register_sysctl("abi", tagged_addr_sysctl_table))
638 return -EINVAL;
639 return 0;
640}
641
642core_initcall(tagged_addr_init);
643#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 85ee7d07889e..c9f72b2665f1 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -46,6 +46,11 @@ static int cpu_psci_cpu_boot(unsigned int cpu)
46} 46}
47 47
48#ifdef CONFIG_HOTPLUG_CPU 48#ifdef CONFIG_HOTPLUG_CPU
49static bool cpu_psci_cpu_can_disable(unsigned int cpu)
50{
51 return !psci_tos_resident_on(cpu);
52}
53
49static int cpu_psci_cpu_disable(unsigned int cpu) 54static int cpu_psci_cpu_disable(unsigned int cpu)
50{ 55{
51 /* Fail early if we don't have CPU_OFF support */ 56 /* Fail early if we don't have CPU_OFF support */
@@ -105,14 +110,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
105 110
106const struct cpu_operations cpu_psci_ops = { 111const struct cpu_operations cpu_psci_ops = {
107 .name = "psci", 112 .name = "psci",
108#ifdef CONFIG_CPU_IDLE
109 .cpu_init_idle = psci_cpu_init_idle,
110 .cpu_suspend = psci_cpu_suspend_enter,
111#endif
112 .cpu_init = cpu_psci_cpu_init, 113 .cpu_init = cpu_psci_cpu_init,
113 .cpu_prepare = cpu_psci_cpu_prepare, 114 .cpu_prepare = cpu_psci_cpu_prepare,
114 .cpu_boot = cpu_psci_cpu_boot, 115 .cpu_boot = cpu_psci_cpu_boot,
115#ifdef CONFIG_HOTPLUG_CPU 116#ifdef CONFIG_HOTPLUG_CPU
117 .cpu_can_disable = cpu_psci_cpu_can_disable,
116 .cpu_disable = cpu_psci_cpu_disable, 118 .cpu_disable = cpu_psci_cpu_disable,
117 .cpu_die = cpu_psci_cpu_die, 119 .cpu_die = cpu_psci_cpu_die,
118 .cpu_kill = cpu_psci_cpu_kill, 120 .cpu_kill = cpu_psci_cpu_kill,
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 3cf3b135027e..21176d02e21a 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -870,7 +870,7 @@ static int sve_set(struct task_struct *target,
870 goto out; 870 goto out;
871 871
872 /* 872 /*
873 * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by 873 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
874 * sve_set_vector_length(), which will also validate them for us: 874 * sve_set_vector_length(), which will also validate them for us:
875 */ 875 */
876 ret = sve_set_vector_length(target, header.vl, 876 ret = sve_set_vector_length(target, header.vl,
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 9c4bad7d7131..56f664561754 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -170,9 +170,13 @@ static void __init smp_build_mpidr_hash(void)
170 170
171static void __init setup_machine_fdt(phys_addr_t dt_phys) 171static void __init setup_machine_fdt(phys_addr_t dt_phys)
172{ 172{
173 void *dt_virt = fixmap_remap_fdt(dt_phys); 173 int size;
174 void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
174 const char *name; 175 const char *name;
175 176
177 if (dt_virt)
178 memblock_reserve(dt_phys, size);
179
176 if (!dt_virt || !early_init_dt_scan(dt_virt)) { 180 if (!dt_virt || !early_init_dt_scan(dt_virt)) {
177 pr_crit("\n" 181 pr_crit("\n"
178 "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n" 182 "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
@@ -184,6 +188,9 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
184 cpu_relax(); 188 cpu_relax();
185 } 189 }
186 190
191 /* Early fixups are done, map the FDT as read-only now */
192 fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
193
187 name = of_flat_dt_get_machine_name(); 194 name = of_flat_dt_get_machine_name();
188 if (!name) 195 if (!name)
189 return; 196 return;
@@ -357,6 +364,15 @@ void __init setup_arch(char **cmdline_p)
357 } 364 }
358} 365}
359 366
367static inline bool cpu_can_disable(unsigned int cpu)
368{
369#ifdef CONFIG_HOTPLUG_CPU
370 if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_can_disable)
371 return cpu_ops[cpu]->cpu_can_disable(cpu);
372#endif
373 return false;
374}
375
360static int __init topology_init(void) 376static int __init topology_init(void)
361{ 377{
362 int i; 378 int i;
@@ -366,7 +382,7 @@ static int __init topology_init(void)
366 382
367 for_each_possible_cpu(i) { 383 for_each_possible_cpu(i) {
368 struct cpu *cpu = &per_cpu(cpu_data.cpu, i); 384 struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
369 cpu->hotpluggable = 1; 385 cpu->hotpluggable = cpu_can_disable(i);
370 register_cpu(cpu, i); 386 register_cpu(cpu, i);
371 } 387 }
372 388
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 76c2739ba8a4..c8a3fee00c11 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -19,7 +19,7 @@
19#include <asm/smp_plat.h> 19#include <asm/smp_plat.h>
20 20
21extern void secondary_holding_pen(void); 21extern void secondary_holding_pen(void);
22volatile unsigned long __section(".mmuoff.data.read") 22volatile unsigned long __section(.mmuoff.data.read)
23secondary_holding_pen_release = INVALID_HWID; 23secondary_holding_pen_release = INVALID_HWID;
24 24
25static phys_addr_t cpu_release_addr[NR_CPUS]; 25static phys_addr_t cpu_release_addr[NR_CPUS];
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index 0825c4a856e3..fa9528dfd0ce 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -14,250 +14,13 @@
14#include <linux/acpi.h> 14#include <linux/acpi.h>
15#include <linux/arch_topology.h> 15#include <linux/arch_topology.h>
16#include <linux/cacheinfo.h> 16#include <linux/cacheinfo.h>
17#include <linux/cpu.h>
18#include <linux/cpumask.h>
19#include <linux/init.h> 17#include <linux/init.h>
20#include <linux/percpu.h> 18#include <linux/percpu.h>
21#include <linux/node.h>
22#include <linux/nodemask.h>
23#include <linux/of.h>
24#include <linux/sched.h>
25#include <linux/sched/topology.h>
26#include <linux/slab.h>
27#include <linux/smp.h>
28#include <linux/string.h>
29 19
30#include <asm/cpu.h> 20#include <asm/cpu.h>
31#include <asm/cputype.h> 21#include <asm/cputype.h>
32#include <asm/topology.h> 22#include <asm/topology.h>
33 23
34static int __init get_cpu_for_node(struct device_node *node)
35{
36 struct device_node *cpu_node;
37 int cpu;
38
39 cpu_node = of_parse_phandle(node, "cpu", 0);
40 if (!cpu_node)
41 return -1;
42
43 cpu = of_cpu_node_to_id(cpu_node);
44 if (cpu >= 0)
45 topology_parse_cpu_capacity(cpu_node, cpu);
46 else
47 pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
48
49 of_node_put(cpu_node);
50 return cpu;
51}
52
53static int __init parse_core(struct device_node *core, int package_id,
54 int core_id)
55{
56 char name[10];
57 bool leaf = true;
58 int i = 0;
59 int cpu;
60 struct device_node *t;
61
62 do {
63 snprintf(name, sizeof(name), "thread%d", i);
64 t = of_get_child_by_name(core, name);
65 if (t) {
66 leaf = false;
67 cpu = get_cpu_for_node(t);
68 if (cpu >= 0) {
69 cpu_topology[cpu].package_id = package_id;
70 cpu_topology[cpu].core_id = core_id;
71 cpu_topology[cpu].thread_id = i;
72 } else {
73 pr_err("%pOF: Can't get CPU for thread\n",
74 t);
75 of_node_put(t);
76 return -EINVAL;
77 }
78 of_node_put(t);
79 }
80 i++;
81 } while (t);
82
83 cpu = get_cpu_for_node(core);
84 if (cpu >= 0) {
85 if (!leaf) {
86 pr_err("%pOF: Core has both threads and CPU\n",
87 core);
88 return -EINVAL;
89 }
90
91 cpu_topology[cpu].package_id = package_id;
92 cpu_topology[cpu].core_id = core_id;
93 } else if (leaf) {
94 pr_err("%pOF: Can't get CPU for leaf core\n", core);
95 return -EINVAL;
96 }
97
98 return 0;
99}
100
101static int __init parse_cluster(struct device_node *cluster, int depth)
102{
103 char name[10];
104 bool leaf = true;
105 bool has_cores = false;
106 struct device_node *c;
107 static int package_id __initdata;
108 int core_id = 0;
109 int i, ret;
110
111 /*
112 * First check for child clusters; we currently ignore any
113 * information about the nesting of clusters and present the
114 * scheduler with a flat list of them.
115 */
116 i = 0;
117 do {
118 snprintf(name, sizeof(name), "cluster%d", i);
119 c = of_get_child_by_name(cluster, name);
120 if (c) {
121 leaf = false;
122 ret = parse_cluster(c, depth + 1);
123 of_node_put(c);
124 if (ret != 0)
125 return ret;
126 }
127 i++;
128 } while (c);
129
130 /* Now check for cores */
131 i = 0;
132 do {
133 snprintf(name, sizeof(name), "core%d", i);
134 c = of_get_child_by_name(cluster, name);
135 if (c) {
136 has_cores = true;
137
138 if (depth == 0) {
139 pr_err("%pOF: cpu-map children should be clusters\n",
140 c);
141 of_node_put(c);
142 return -EINVAL;
143 }
144
145 if (leaf) {
146 ret = parse_core(c, package_id, core_id++);
147 } else {
148 pr_err("%pOF: Non-leaf cluster with core %s\n",
149 cluster, name);
150 ret = -EINVAL;
151 }
152
153 of_node_put(c);
154 if (ret != 0)
155 return ret;
156 }
157 i++;
158 } while (c);
159
160 if (leaf && !has_cores)
161 pr_warn("%pOF: empty cluster\n", cluster);
162
163 if (leaf)
164 package_id++;
165
166 return 0;
167}
168
169static int __init parse_dt_topology(void)
170{
171 struct device_node *cn, *map;
172 int ret = 0;
173 int cpu;
174
175 cn = of_find_node_by_path("/cpus");
176 if (!cn) {
177 pr_err("No CPU information found in DT\n");
178 return 0;
179 }
180
181 /*
182 * When topology is provided cpu-map is essentially a root
183 * cluster with restricted subnodes.
184 */
185 map = of_get_child_by_name(cn, "cpu-map");
186 if (!map)
187 goto out;
188
189 ret = parse_cluster(map, 0);
190 if (ret != 0)
191 goto out_map;
192
193 topology_normalize_cpu_scale();
194
195 /*
196 * Check that all cores are in the topology; the SMP code will
197 * only mark cores described in the DT as possible.
198 */
199 for_each_possible_cpu(cpu)
200 if (cpu_topology[cpu].package_id == -1)
201 ret = -EINVAL;
202
203out_map:
204 of_node_put(map);
205out:
206 of_node_put(cn);
207 return ret;
208}
209
210/*
211 * cpu topology table
212 */
213struct cpu_topology cpu_topology[NR_CPUS];
214EXPORT_SYMBOL_GPL(cpu_topology);
215
216const struct cpumask *cpu_coregroup_mask(int cpu)
217{
218 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
219
220 /* Find the smaller of NUMA, core or LLC siblings */
221 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
222 /* not numa in package, lets use the package siblings */
223 core_mask = &cpu_topology[cpu].core_sibling;
224 }
225 if (cpu_topology[cpu].llc_id != -1) {
226 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
227 core_mask = &cpu_topology[cpu].llc_sibling;
228 }
229
230 return core_mask;
231}
232
233static void update_siblings_masks(unsigned int cpuid)
234{
235 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
236 int cpu;
237
238 /* update core and thread sibling masks */
239 for_each_online_cpu(cpu) {
240 cpu_topo = &cpu_topology[cpu];
241
242 if (cpuid_topo->llc_id == cpu_topo->llc_id) {
243 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
244 cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
245 }
246
247 if (cpuid_topo->package_id != cpu_topo->package_id)
248 continue;
249
250 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
251 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
252
253 if (cpuid_topo->core_id != cpu_topo->core_id)
254 continue;
255
256 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
257 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
258 }
259}
260
261void store_cpu_topology(unsigned int cpuid) 24void store_cpu_topology(unsigned int cpuid)
262{ 25{
263 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; 26 struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
@@ -296,60 +59,31 @@ topology_populated:
296 update_siblings_masks(cpuid); 59 update_siblings_masks(cpuid);
297} 60}
298 61
299static void clear_cpu_topology(int cpu) 62#ifdef CONFIG_ACPI
300{ 63static bool __init acpi_cpu_is_threaded(int cpu)
301 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
302
303 cpumask_clear(&cpu_topo->llc_sibling);
304 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
305
306 cpumask_clear(&cpu_topo->core_sibling);
307 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
308 cpumask_clear(&cpu_topo->thread_sibling);
309 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
310}
311
312static void __init reset_cpu_topology(void)
313{
314 unsigned int cpu;
315
316 for_each_possible_cpu(cpu) {
317 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
318
319 cpu_topo->thread_id = -1;
320 cpu_topo->core_id = 0;
321 cpu_topo->package_id = -1;
322 cpu_topo->llc_id = -1;
323
324 clear_cpu_topology(cpu);
325 }
326}
327
328void remove_cpu_topology(unsigned int cpu)
329{ 64{
330 int sibling; 65 int is_threaded = acpi_pptt_cpu_is_thread(cpu);
331 66
332 for_each_cpu(sibling, topology_core_cpumask(cpu)) 67 /*
333 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); 68 * if the PPTT doesn't have thread information, assume a homogeneous
334 for_each_cpu(sibling, topology_sibling_cpumask(cpu)) 69 * machine and return the current CPU's thread state.
335 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); 70 */
336 for_each_cpu(sibling, topology_llc_cpumask(cpu)) 71 if (is_threaded < 0)
337 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); 72 is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
338 73
339 clear_cpu_topology(cpu); 74 return !!is_threaded;
340} 75}
341 76
342#ifdef CONFIG_ACPI
343/* 77/*
344 * Propagate the topology information of the processor_topology_node tree to the 78 * Propagate the topology information of the processor_topology_node tree to the
345 * cpu_topology array. 79 * cpu_topology array.
346 */ 80 */
347static int __init parse_acpi_topology(void) 81int __init parse_acpi_topology(void)
348{ 82{
349 bool is_threaded;
350 int cpu, topology_id; 83 int cpu, topology_id;
351 84
352 is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK; 85 if (acpi_disabled)
86 return 0;
353 87
354 for_each_possible_cpu(cpu) { 88 for_each_possible_cpu(cpu) {
355 int i, cache_id; 89 int i, cache_id;
@@ -358,7 +92,7 @@ static int __init parse_acpi_topology(void)
358 if (topology_id < 0) 92 if (topology_id < 0)
359 return topology_id; 93 return topology_id;
360 94
361 if (is_threaded) { 95 if (acpi_cpu_is_threaded(cpu)) {
362 cpu_topology[cpu].thread_id = topology_id; 96 cpu_topology[cpu].thread_id = topology_id;
363 topology_id = find_acpi_cpu_topology(cpu, 1); 97 topology_id = find_acpi_cpu_topology(cpu, 1);
364 cpu_topology[cpu].core_id = topology_id; 98 cpu_topology[cpu].core_id = topology_id;
@@ -384,24 +118,6 @@ static int __init parse_acpi_topology(void)
384 118
385 return 0; 119 return 0;
386} 120}
387
388#else
389static inline int __init parse_acpi_topology(void)
390{
391 return -EINVAL;
392}
393#endif 121#endif
394 122
395void __init init_cpu_topology(void)
396{
397 reset_cpu_topology();
398 123
399 /*
400 * Discard anything that was parsed if we hit an error so we
401 * don't use partial information.
402 */
403 if (!acpi_disabled && parse_acpi_topology())
404 reset_cpu_topology();
405 else if (of_have_populated_dt() && parse_dt_topology())
406 reset_cpu_topology();
407}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index d3313797cca9..6e950908eb97 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -7,9 +7,11 @@
7 */ 7 */
8 8
9#include <linux/bug.h> 9#include <linux/bug.h>
10#include <linux/context_tracking.h>
10#include <linux/signal.h> 11#include <linux/signal.h>
11#include <linux/personality.h> 12#include <linux/personality.h>
12#include <linux/kallsyms.h> 13#include <linux/kallsyms.h>
14#include <linux/kprobes.h>
13#include <linux/spinlock.h> 15#include <linux/spinlock.h>
14#include <linux/uaccess.h> 16#include <linux/uaccess.h>
15#include <linux/hardirq.h> 17#include <linux/hardirq.h>
@@ -511,7 +513,7 @@ struct sys64_hook {
511 void (*handler)(unsigned int esr, struct pt_regs *regs); 513 void (*handler)(unsigned int esr, struct pt_regs *regs);
512}; 514};
513 515
514static struct sys64_hook sys64_hooks[] = { 516static const struct sys64_hook sys64_hooks[] = {
515 { 517 {
516 .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK, 518 .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
517 .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL, 519 .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
@@ -636,7 +638,7 @@ static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
636 arm64_compat_skip_faulting_instruction(regs, 4); 638 arm64_compat_skip_faulting_instruction(regs, 4);
637} 639}
638 640
639static struct sys64_hook cp15_32_hooks[] = { 641static const struct sys64_hook cp15_32_hooks[] = {
640 { 642 {
641 .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK, 643 .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
642 .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ, 644 .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
@@ -656,7 +658,7 @@ static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
656 arm64_compat_skip_faulting_instruction(regs, 4); 658 arm64_compat_skip_faulting_instruction(regs, 4);
657} 659}
658 660
659static struct sys64_hook cp15_64_hooks[] = { 661static const struct sys64_hook cp15_64_hooks[] = {
660 { 662 {
661 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK, 663 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
662 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT, 664 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
@@ -667,7 +669,7 @@ static struct sys64_hook cp15_64_hooks[] = {
667 669
668asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs) 670asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
669{ 671{
670 struct sys64_hook *hook, *hook_base; 672 const struct sys64_hook *hook, *hook_base;
671 673
672 if (!cp15_cond_valid(esr, regs)) { 674 if (!cp15_cond_valid(esr, regs)) {
673 /* 675 /*
@@ -707,7 +709,7 @@ asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
707 709
708asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) 710asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
709{ 711{
710 struct sys64_hook *hook; 712 const struct sys64_hook *hook;
711 713
712 for (hook = sys64_hooks; hook->handler; hook++) 714 for (hook = sys64_hooks; hook->handler; hook++)
713 if ((hook->esr_mask & esr) == hook->esr_val) { 715 if ((hook->esr_mask & esr) == hook->esr_val) {
@@ -743,6 +745,7 @@ static const char *esr_class_str[] = {
743 [ESR_ELx_EC_SMC64] = "SMC (AArch64)", 745 [ESR_ELx_EC_SMC64] = "SMC (AArch64)",
744 [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)", 746 [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
745 [ESR_ELx_EC_SVE] = "SVE", 747 [ESR_ELx_EC_SVE] = "SVE",
748 [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB",
746 [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF", 749 [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
747 [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)", 750 [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
748 [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)", 751 [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
@@ -899,6 +902,13 @@ asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
899 nmi_exit(); 902 nmi_exit();
900} 903}
901 904
905asmlinkage void enter_from_user_mode(void)
906{
907 CT_WARN_ON(ct_state() != CONTEXT_USER);
908 user_exit_irqoff();
909}
910NOKPROBE_SYMBOL(enter_from_user_mode);
911
902void __pte_error(const char *file, int line, unsigned long val) 912void __pte_error(const char *file, int line, unsigned long val)
903{ 913{
904 pr_err("%s:%d: bad pte %016lx.\n", file, line, val); 914 pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 7fa008374907..aa76f7259668 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -200,6 +200,15 @@ SECTIONS
200 __rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR); 200 __rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
201 __rela_size = SIZEOF(.rela.dyn); 201 __rela_size = SIZEOF(.rela.dyn);
202 202
203#ifdef CONFIG_RELR
204 .relr.dyn : ALIGN(8) {
205 *(.relr.dyn)
206 }
207
208 __relr_offset = ABSOLUTE(ADDR(.relr.dyn) - KIMAGE_VADDR);
209 __relr_size = SIZEOF(.relr.dyn);
210#endif
211
203 . = ALIGN(SEGMENT_ALIGN); 212 . = ALIGN(SEGMENT_ALIGN);
204 __initdata_end = .; 213 __initdata_end = .;
205 __init_end = .; 214 __init_end = .;
@@ -245,6 +254,8 @@ SECTIONS
245 HEAD_SYMBOLS 254 HEAD_SYMBOLS
246} 255}
247 256
257#include "image-vars.h"
258
248/* 259/*
249 * The HYP init code and ID map text can't be longer than a page each, 260 * The HYP init code and ID map text can't be longer than a page each,
250 * and should not cross a page boundary. 261 * and should not cross a page boundary.
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index adaf266d8de8..bd978ad71936 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -264,7 +264,7 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
264 tmp = read_sysreg(par_el1); 264 tmp = read_sysreg(par_el1);
265 write_sysreg(par, par_el1); 265 write_sysreg(par, par_el1);
266 266
267 if (unlikely(tmp & 1)) 267 if (unlikely(tmp & SYS_PAR_EL1_F))
268 return false; /* Translation failed, back to guest */ 268 return false; /* Translation failed, back to guest */
269 269
270 /* Convert PAR to HPFAR format */ 270 /* Convert PAR to HPFAR format */
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index acd8084f1f2c..2cf7d4b606c3 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -29,25 +29,25 @@ static void compute_layout(void)
29 int kva_msb; 29 int kva_msb;
30 30
31 /* Where is my RAM region? */ 31 /* Where is my RAM region? */
32 hyp_va_msb = idmap_addr & BIT(VA_BITS - 1); 32 hyp_va_msb = idmap_addr & BIT(vabits_actual - 1);
33 hyp_va_msb ^= BIT(VA_BITS - 1); 33 hyp_va_msb ^= BIT(vabits_actual - 1);
34 34
35 kva_msb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^ 35 kva_msb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^
36 (u64)(high_memory - 1)); 36 (u64)(high_memory - 1));
37 37
38 if (kva_msb == (VA_BITS - 1)) { 38 if (kva_msb == (vabits_actual - 1)) {
39 /* 39 /*
40 * No space in the address, let's compute the mask so 40 * No space in the address, let's compute the mask so
41 * that it covers (VA_BITS - 1) bits, and the region 41 * that it covers (vabits_actual - 1) bits, and the region
42 * bit. The tag stays set to zero. 42 * bit. The tag stays set to zero.
43 */ 43 */
44 va_mask = BIT(VA_BITS - 1) - 1; 44 va_mask = BIT(vabits_actual - 1) - 1;
45 va_mask |= hyp_va_msb; 45 va_mask |= hyp_va_msb;
46 } else { 46 } else {
47 /* 47 /*
48 * We do have some free bits to insert a random tag. 48 * We do have some free bits to insert a random tag.
49 * Hyp VAs are now created from kernel linear map VAs 49 * Hyp VAs are now created from kernel linear map VAs
50 * using the following formula (with V == VA_BITS): 50 * using the following formula (with V == vabits_actual):
51 * 51 *
52 * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0 52 * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0
53 * --------------------------------------------------------- 53 * ---------------------------------------------------------
@@ -55,7 +55,7 @@ static void compute_layout(void)
55 */ 55 */
56 tag_lsb = kva_msb; 56 tag_lsb = kva_msb;
57 va_mask = GENMASK_ULL(tag_lsb - 1, 0); 57 va_mask = GENMASK_ULL(tag_lsb - 1, 0);
58 tag_val = get_random_long() & GENMASK_ULL(VA_BITS - 2, tag_lsb); 58 tag_val = get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
59 tag_val |= hyp_va_msb; 59 tag_val |= hyp_va_msb;
60 tag_val >>= tag_lsb; 60 tag_val >>= tag_lsb;
61 } 61 }
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 33c2a4abda04..f182ccb0438e 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -33,3 +33,5 @@ UBSAN_SANITIZE_atomic_ll_sc.o := n
33lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o 33lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
34 34
35obj-$(CONFIG_CRC32) += crc32.o 35obj-$(CONFIG_CRC32) += crc32.o
36
37obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
diff --git a/arch/arm64/lib/error-inject.c b/arch/arm64/lib/error-inject.c
new file mode 100644
index 000000000000..ed15021da3ed
--- /dev/null
+++ b/arch/arm64/lib/error-inject.c
@@ -0,0 +1,18 @@
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/error-injection.h>
4#include <linux/kprobes.h>
5
6void override_function_with_return(struct pt_regs *regs)
7{
8 /*
9 * 'regs' represents the state on entry of a predefined function in
10 * the kernel/module and which is captured on a kprobe.
11 *
12 * When kprobe returns back from exception it will override the end
13 * of probed function and directly return to the predefined
14 * function's caller.
15 */
16 instruction_pointer_set(regs, procedure_link_pointer(regs));
17}
18NOKPROBE_SYMBOL(override_function_with_return);
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 82b3a7fdb4a6..93f9f77582ae 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -25,9 +25,20 @@
25#include <asm/pgtable-hwdef.h> 25#include <asm/pgtable-hwdef.h>
26#include <asm/ptdump.h> 26#include <asm/ptdump.h>
27 27
28static const struct addr_marker address_markers[] = { 28
29enum address_markers_idx {
30 PAGE_OFFSET_NR = 0,
31 PAGE_END_NR,
29#ifdef CONFIG_KASAN 32#ifdef CONFIG_KASAN
30 { KASAN_SHADOW_START, "Kasan shadow start" }, 33 KASAN_START_NR,
34#endif
35};
36
37static struct addr_marker address_markers[] = {
38 { PAGE_OFFSET, "Linear Mapping start" },
39 { 0 /* PAGE_END */, "Linear Mapping end" },
40#ifdef CONFIG_KASAN
41 { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" },
31 { KASAN_SHADOW_END, "Kasan shadow end" }, 42 { KASAN_SHADOW_END, "Kasan shadow end" },
32#endif 43#endif
33 { MODULES_VADDR, "Modules start" }, 44 { MODULES_VADDR, "Modules start" },
@@ -42,7 +53,6 @@ static const struct addr_marker address_markers[] = {
42 { VMEMMAP_START, "vmemmap start" }, 53 { VMEMMAP_START, "vmemmap start" },
43 { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" }, 54 { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" },
44#endif 55#endif
45 { PAGE_OFFSET, "Linear mapping" },
46 { -1, NULL }, 56 { -1, NULL },
47}; 57};
48 58
@@ -376,7 +386,7 @@ static void ptdump_initialize(void)
376static struct ptdump_info kernel_ptdump_info = { 386static struct ptdump_info kernel_ptdump_info = {
377 .mm = &init_mm, 387 .mm = &init_mm,
378 .markers = address_markers, 388 .markers = address_markers,
379 .base_addr = VA_START, 389 .base_addr = PAGE_OFFSET,
380}; 390};
381 391
382void ptdump_check_wx(void) 392void ptdump_check_wx(void)
@@ -390,7 +400,7 @@ void ptdump_check_wx(void)
390 .check_wx = true, 400 .check_wx = true,
391 }; 401 };
392 402
393 walk_pgd(&st, &init_mm, VA_START); 403 walk_pgd(&st, &init_mm, PAGE_OFFSET);
394 note_page(&st, 0, 0, 0); 404 note_page(&st, 0, 0, 0);
395 if (st.wx_pages || st.uxn_pages) 405 if (st.wx_pages || st.uxn_pages)
396 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n", 406 pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
@@ -401,6 +411,10 @@ void ptdump_check_wx(void)
401 411
402static int ptdump_init(void) 412static int ptdump_init(void)
403{ 413{
414 address_markers[PAGE_END_NR].start_address = PAGE_END;
415#ifdef CONFIG_KASAN
416 address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START;
417#endif
404 ptdump_initialize(); 418 ptdump_initialize();
405 ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables"); 419 ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables");
406 return 0; 420 return 0;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index cfd65b63f36f..115d7a0e4b08 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/acpi.h> 10#include <linux/acpi.h>
11#include <linux/bitfield.h>
11#include <linux/extable.h> 12#include <linux/extable.h>
12#include <linux/signal.h> 13#include <linux/signal.h>
13#include <linux/mm.h> 14#include <linux/mm.h>
@@ -86,8 +87,8 @@ static void mem_abort_decode(unsigned int esr)
86 pr_alert("Mem abort info:\n"); 87 pr_alert("Mem abort info:\n");
87 88
88 pr_alert(" ESR = 0x%08x\n", esr); 89 pr_alert(" ESR = 0x%08x\n", esr);
89 pr_alert(" Exception class = %s, IL = %u bits\n", 90 pr_alert(" EC = 0x%02lx: %s, IL = %u bits\n",
90 esr_get_class_string(esr), 91 ESR_ELx_EC(esr), esr_get_class_string(esr),
91 (esr & ESR_ELx_IL) ? 32 : 16); 92 (esr & ESR_ELx_IL) ? 32 : 16);
92 pr_alert(" SET = %lu, FnV = %lu\n", 93 pr_alert(" SET = %lu, FnV = %lu\n",
93 (esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT, 94 (esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT,
@@ -109,7 +110,7 @@ static inline bool is_ttbr0_addr(unsigned long addr)
109static inline bool is_ttbr1_addr(unsigned long addr) 110static inline bool is_ttbr1_addr(unsigned long addr)
110{ 111{
111 /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ 112 /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
112 return arch_kasan_reset_tag(addr) >= VA_START; 113 return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
113} 114}
114 115
115/* 116/*
@@ -138,10 +139,9 @@ static void show_pte(unsigned long addr)
138 return; 139 return;
139 } 140 }
140 141
141 pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp=%016lx\n", 142 pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n",
142 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K, 143 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
143 mm == &init_mm ? VA_BITS : (int)vabits_user, 144 vabits_actual, (unsigned long)virt_to_phys(mm->pgd));
144 (unsigned long)virt_to_phys(mm->pgd));
145 pgdp = pgd_offset(mm, addr); 145 pgdp = pgd_offset(mm, addr);
146 pgd = READ_ONCE(*pgdp); 146 pgd = READ_ONCE(*pgdp);
147 pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd)); 147 pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
@@ -242,6 +242,34 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
242 return false; 242 return false;
243} 243}
244 244
245static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
246 unsigned int esr,
247 struct pt_regs *regs)
248{
249 unsigned long flags;
250 u64 par, dfsc;
251
252 if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR ||
253 (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT)
254 return false;
255
256 local_irq_save(flags);
257 asm volatile("at s1e1r, %0" :: "r" (addr));
258 isb();
259 par = read_sysreg(par_el1);
260 local_irq_restore(flags);
261
262 if (!(par & SYS_PAR_EL1_F))
263 return false;
264
265 /*
266 * If we got a different type of fault from the AT instruction,
267 * treat the translation fault as spurious.
268 */
269 dfsc = FIELD_PREP(SYS_PAR_EL1_FST, par);
270 return (dfsc & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT;
271}
272
245static void die_kernel_fault(const char *msg, unsigned long addr, 273static void die_kernel_fault(const char *msg, unsigned long addr,
246 unsigned int esr, struct pt_regs *regs) 274 unsigned int esr, struct pt_regs *regs)
247{ 275{
@@ -270,6 +298,10 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
270 if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) 298 if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
271 return; 299 return;
272 300
301 if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs),
302 "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr))
303 return;
304
273 if (is_el1_permission_fault(addr, esr, regs)) { 305 if (is_el1_permission_fault(addr, esr, regs)) {
274 if (esr & ESR_ELx_WNR) 306 if (esr & ESR_ELx_WNR)
275 msg = "write to read-only memory"; 307 msg = "write to read-only memory";
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f3c795278def..45c00a54909c 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -50,6 +50,12 @@
50s64 memstart_addr __ro_after_init = -1; 50s64 memstart_addr __ro_after_init = -1;
51EXPORT_SYMBOL(memstart_addr); 51EXPORT_SYMBOL(memstart_addr);
52 52
53s64 physvirt_offset __ro_after_init;
54EXPORT_SYMBOL(physvirt_offset);
55
56struct page *vmemmap __ro_after_init;
57EXPORT_SYMBOL(vmemmap);
58
53phys_addr_t arm64_dma_phys_limit __ro_after_init; 59phys_addr_t arm64_dma_phys_limit __ro_after_init;
54 60
55#ifdef CONFIG_KEXEC_CORE 61#ifdef CONFIG_KEXEC_CORE
@@ -301,7 +307,7 @@ static void __init fdt_enforce_memory_region(void)
301 307
302void __init arm64_memblock_init(void) 308void __init arm64_memblock_init(void)
303{ 309{
304 const s64 linear_region_size = -(s64)PAGE_OFFSET; 310 const s64 linear_region_size = BIT(vabits_actual - 1);
305 311
306 /* Handle linux,usable-memory-range property */ 312 /* Handle linux,usable-memory-range property */
307 fdt_enforce_memory_region(); 313 fdt_enforce_memory_region();
@@ -310,18 +316,25 @@ void __init arm64_memblock_init(void)
310 memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX); 316 memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
311 317
312 /* 318 /*
313 * Ensure that the linear region takes up exactly half of the kernel
314 * virtual address space. This way, we can distinguish a linear address
315 * from a kernel/module/vmalloc address by testing a single bit.
316 */
317 BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1));
318
319 /*
320 * Select a suitable value for the base of physical memory. 319 * Select a suitable value for the base of physical memory.
321 */ 320 */
322 memstart_addr = round_down(memblock_start_of_DRAM(), 321 memstart_addr = round_down(memblock_start_of_DRAM(),
323 ARM64_MEMSTART_ALIGN); 322 ARM64_MEMSTART_ALIGN);
324 323
324 physvirt_offset = PHYS_OFFSET - PAGE_OFFSET;
325
326 vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT));
327
328 /*
329 * If we are running with a 52-bit kernel VA config on a system that
330 * does not support it, we have to offset our vmemmap and physvirt_offset
331 * s.t. we avoid the 52-bit portion of the direct linear map
332 */
333 if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) {
334 vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT;
335 physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48);
336 }
337
325 /* 338 /*
326 * Remove the memory that we will not be able to cover with the 339 * Remove the memory that we will not be able to cover with the
327 * linear mapping. Take care not to clip the kernel which may be 340 * linear mapping. Take care not to clip the kernel which may be
@@ -570,8 +583,12 @@ void free_initmem(void)
570#ifdef CONFIG_BLK_DEV_INITRD 583#ifdef CONFIG_BLK_DEV_INITRD
571void __init free_initrd_mem(unsigned long start, unsigned long end) 584void __init free_initrd_mem(unsigned long start, unsigned long end)
572{ 585{
586 unsigned long aligned_start, aligned_end;
587
588 aligned_start = __virt_to_phys(start) & PAGE_MASK;
589 aligned_end = PAGE_ALIGN(__virt_to_phys(end));
590 memblock_free(aligned_start, aligned_end - aligned_start);
573 free_reserved_area((void *)start, (void *)end, 0, "initrd"); 591 free_reserved_area((void *)start, (void *)end, 0, "initrd");
574 memblock_free(__virt_to_phys(start), end - start);
575} 592}
576#endif 593#endif
577 594
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 6cf97b904ebb..f87a32484ea8 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -156,7 +156,8 @@ asmlinkage void __init kasan_early_init(void)
156{ 156{
157 BUILD_BUG_ON(KASAN_SHADOW_OFFSET != 157 BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
158 KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT))); 158 KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
159 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE)); 159 BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
160 BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
160 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); 161 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
161 kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, 162 kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
162 true); 163 true);
@@ -225,10 +226,10 @@ void __init kasan_init(void)
225 kasan_map_populate(kimg_shadow_start, kimg_shadow_end, 226 kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
226 early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); 227 early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
227 228
228 kasan_populate_early_shadow((void *)KASAN_SHADOW_START, 229 kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
229 (void *)mod_shadow_start); 230 (void *)mod_shadow_start);
230 kasan_populate_early_shadow((void *)kimg_shadow_end, 231 kasan_populate_early_shadow((void *)kimg_shadow_end,
231 kasan_mem_to_shadow((void *)PAGE_OFFSET)); 232 (void *)KASAN_SHADOW_END);
232 233
233 if (kimg_shadow_start > mod_shadow_end) 234 if (kimg_shadow_start > mod_shadow_end)
234 kasan_populate_early_shadow((void *)mod_shadow_end, 235 kasan_populate_early_shadow((void *)mod_shadow_end,
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 750a69dde39b..53dc6f24cfb7 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -40,8 +40,9 @@
40 40
41u64 idmap_t0sz = TCR_T0SZ(VA_BITS); 41u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
42u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; 42u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
43u64 vabits_user __ro_after_init; 43
44EXPORT_SYMBOL(vabits_user); 44u64 __section(".mmuoff.data.write") vabits_actual;
45EXPORT_SYMBOL(vabits_actual);
45 46
46u64 kimage_voffset __ro_after_init; 47u64 kimage_voffset __ro_after_init;
47EXPORT_SYMBOL(kimage_voffset); 48EXPORT_SYMBOL(kimage_voffset);
@@ -398,7 +399,7 @@ static phys_addr_t pgd_pgtable_alloc(int shift)
398static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, 399static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
399 phys_addr_t size, pgprot_t prot) 400 phys_addr_t size, pgprot_t prot)
400{ 401{
401 if (virt < VMALLOC_START) { 402 if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
402 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 403 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
403 &phys, virt); 404 &phys, virt);
404 return; 405 return;
@@ -425,7 +426,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
425static void update_mapping_prot(phys_addr_t phys, unsigned long virt, 426static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
426 phys_addr_t size, pgprot_t prot) 427 phys_addr_t size, pgprot_t prot)
427{ 428{
428 if (virt < VMALLOC_START) { 429 if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
429 pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", 430 pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
430 &phys, virt); 431 &phys, virt);
431 return; 432 return;
@@ -646,6 +647,8 @@ static void __init map_kernel(pgd_t *pgdp)
646 set_pgd(pgd_offset_raw(pgdp, FIXADDR_START), 647 set_pgd(pgd_offset_raw(pgdp, FIXADDR_START),
647 READ_ONCE(*pgd_offset_k(FIXADDR_START))); 648 READ_ONCE(*pgd_offset_k(FIXADDR_START)));
648 } else if (CONFIG_PGTABLE_LEVELS > 3) { 649 } else if (CONFIG_PGTABLE_LEVELS > 3) {
650 pgd_t *bm_pgdp;
651 pud_t *bm_pudp;
649 /* 652 /*
650 * The fixmap shares its top level pgd entry with the kernel 653 * The fixmap shares its top level pgd entry with the kernel
651 * mapping. This can really only occur when we are running 654 * mapping. This can really only occur when we are running
@@ -653,9 +656,9 @@ static void __init map_kernel(pgd_t *pgdp)
653 * entry instead. 656 * entry instead.
654 */ 657 */
655 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 658 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
656 pud_populate(&init_mm, 659 bm_pgdp = pgd_offset_raw(pgdp, FIXADDR_START);
657 pud_set_fixmap_offset(pgdp, FIXADDR_START), 660 bm_pudp = pud_set_fixmap_offset(bm_pgdp, FIXADDR_START);
658 lm_alias(bm_pmd)); 661 pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
659 pud_clear_fixmap(); 662 pud_clear_fixmap();
660 } else { 663 } else {
661 BUG(); 664 BUG();
@@ -876,7 +879,7 @@ void __set_fixmap(enum fixed_addresses idx,
876 } 879 }
877} 880}
878 881
879void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) 882void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
880{ 883{
881 const u64 dt_virt_base = __fix_to_virt(FIX_FDT); 884 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
882 int offset; 885 int offset;
@@ -929,19 +932,6 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
929 return dt_virt; 932 return dt_virt;
930} 933}
931 934
932void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
933{
934 void *dt_virt;
935 int size;
936
937 dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
938 if (!dt_virt)
939 return NULL;
940
941 memblock_reserve(dt_phys, size);
942 return dt_virt;
943}
944
945int __init arch_ioremap_p4d_supported(void) 935int __init arch_ioremap_p4d_supported(void)
946{ 936{
947 return 0; 937 return 0;
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 4f241cc7cc3b..4decf1659700 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -29,7 +29,7 @@ static __init int numa_parse_early_param(char *opt)
29{ 29{
30 if (!opt) 30 if (!opt)
31 return -EINVAL; 31 return -EINVAL;
32 if (!strncmp(opt, "off", 3)) 32 if (str_has_prefix(opt, "off"))
33 numa_off = true; 33 numa_off = true;
34 34
35 return 0; 35 return 0;
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 03c53f16ee77..9ce7bd9d4d9c 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -128,7 +128,6 @@ int set_memory_nx(unsigned long addr, int numpages)
128 __pgprot(PTE_PXN), 128 __pgprot(PTE_PXN),
129 __pgprot(0)); 129 __pgprot(0));
130} 130}
131EXPORT_SYMBOL_GPL(set_memory_nx);
132 131
133int set_memory_x(unsigned long addr, int numpages) 132int set_memory_x(unsigned long addr, int numpages)
134{ 133{
@@ -136,7 +135,6 @@ int set_memory_x(unsigned long addr, int numpages)
136 __pgprot(0), 135 __pgprot(0),
137 __pgprot(PTE_PXN)); 136 __pgprot(PTE_PXN));
138} 137}
139EXPORT_SYMBOL_GPL(set_memory_x);
140 138
141int set_memory_valid(unsigned long addr, int numpages, int enable) 139int set_memory_valid(unsigned long addr, int numpages, int enable)
142{ 140{
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 7dbf2be470f6..a1e0592d1fbc 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -168,7 +168,7 @@ ENDPROC(cpu_do_switch_mm)
168.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 168.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
169 adrp \tmp1, empty_zero_page 169 adrp \tmp1, empty_zero_page
170 phys_to_ttbr \tmp2, \tmp1 170 phys_to_ttbr \tmp2, \tmp1
171 offset_ttbr1 \tmp2 171 offset_ttbr1 \tmp2, \tmp1
172 msr ttbr1_el1, \tmp2 172 msr ttbr1_el1, \tmp2
173 isb 173 isb
174 tlbi vmalle1 174 tlbi vmalle1
@@ -187,7 +187,7 @@ ENTRY(idmap_cpu_replace_ttbr1)
187 187
188 __idmap_cpu_set_reserved_ttbr1 x1, x3 188 __idmap_cpu_set_reserved_ttbr1 x1, x3
189 189
190 offset_ttbr1 x0 190 offset_ttbr1 x0, x3
191 msr ttbr1_el1, x0 191 msr ttbr1_el1, x0
192 isb 192 isb
193 193
@@ -286,6 +286,15 @@ skip_pgd:
286 msr sctlr_el1, x18 286 msr sctlr_el1, x18
287 isb 287 isb
288 288
289 /*
290 * Invalidate the local I-cache so that any instructions fetched
291 * speculatively from the PoC are discarded, since they may have
292 * been dynamically patched at the PoU.
293 */
294 ic iallu
295 dsb nsh
296 isb
297
289 /* Set the flag to zero to indicate that we're all done */ 298 /* Set the flag to zero to indicate that we're all done */
290 str wzr, [flag_ptr] 299 str wzr, [flag_ptr]
291 ret 300 ret
@@ -362,7 +371,7 @@ __idmap_kpti_secondary:
362 cbnz w18, 1b 371 cbnz w18, 1b
363 372
364 /* All done, act like nothing happened */ 373 /* All done, act like nothing happened */
365 offset_ttbr1 swapper_ttb 374 offset_ttbr1 swapper_ttb, x18
366 msr ttbr1_el1, swapper_ttb 375 msr ttbr1_el1, swapper_ttb
367 isb 376 isb
368 ret 377 ret
@@ -438,10 +447,11 @@ ENTRY(__cpu_setup)
438 TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS 447 TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
439 tcr_clear_errata_bits x10, x9, x5 448 tcr_clear_errata_bits x10, x9, x5
440 449
441#ifdef CONFIG_ARM64_USER_VA_BITS_52 450#ifdef CONFIG_ARM64_VA_BITS_52
442 ldr_l x9, vabits_user 451 ldr_l x9, vabits_actual
443 sub x9, xzr, x9 452 sub x9, xzr, x9
444 add x9, x9, #64 453 add x9, x9, #64
454 tcr_set_t1sz x10, x9
445#else 455#else
446 ldr_l x9, idmap_t0sz 456 ldr_l x9, idmap_t0sz
447#endif 457#endif
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index c345b79414a9..403f7e193833 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -39,13 +39,11 @@ endif
39uname := $(shell uname -m) 39uname := $(shell uname -m)
40KBUILD_DEFCONFIG := $(if $(filter ppc%,$(uname)),$(uname),ppc64)_defconfig 40KBUILD_DEFCONFIG := $(if $(filter ppc%,$(uname)),$(uname),ppc64)_defconfig
41 41
42ifdef CONFIG_PPC64
43new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi) 42new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
44 43
45ifeq ($(new_nm),y) 44ifeq ($(new_nm),y)
46NM := $(NM) --synthetic 45NM := $(NM) --synthetic
47endif 46endif
48endif
49 47
50# BITS is used as extension for files which are available in a 32 bit 48# BITS is used as extension for files which are available in a 32 bit
51# and a 64 bit version to simplify shared Makefiles. 49# and a 64 bit version to simplify shared Makefiles.
diff --git a/arch/powerpc/include/asm/error-injection.h b/arch/powerpc/include/asm/error-injection.h
deleted file mode 100644
index 62fd24739852..000000000000
--- a/arch/powerpc/include/asm/error-injection.h
+++ /dev/null
@@ -1,13 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
2
3#ifndef _ASM_ERROR_INJECTION_H
4#define _ASM_ERROR_INJECTION_H
5
6#include <linux/compiler.h>
7#include <linux/linkage.h>
8#include <asm/ptrace.h>
9#include <asm-generic/error-injection.h>
10
11void override_function_with_return(struct pt_regs *regs);
12
13#endif /* _ASM_ERROR_INJECTION_H */
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 59a4727ecd6c..86ee362a1375 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -48,6 +48,7 @@ config RISCV
48 select PCI_MSI if PCI 48 select PCI_MSI if PCI
49 select RISCV_TIMER 49 select RISCV_TIMER
50 select GENERIC_IRQ_MULTI_HANDLER 50 select GENERIC_IRQ_MULTI_HANDLER
51 select GENERIC_ARCH_TOPOLOGY if SMP
51 select ARCH_HAS_PTE_SPECIAL 52 select ARCH_HAS_PTE_SPECIAL
52 select ARCH_HAS_MMIOWB 53 select ARCH_HAS_MMIOWB
53 select HAVE_EBPF_JIT if 64BIT 54 select HAVE_EBPF_JIT if 64BIT
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 7462a44304fe..18ae6da5115e 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -8,6 +8,7 @@
8 * Copyright (C) 2017 SiFive 8 * Copyright (C) 2017 SiFive
9 */ 9 */
10 10
11#include <linux/arch_topology.h>
11#include <linux/module.h> 12#include <linux/module.h>
12#include <linux/init.h> 13#include <linux/init.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
@@ -35,6 +36,7 @@ static DECLARE_COMPLETION(cpu_running);
35 36
36void __init smp_prepare_boot_cpu(void) 37void __init smp_prepare_boot_cpu(void)
37{ 38{
39 init_cpu_topology();
38} 40}
39 41
40void __init smp_prepare_cpus(unsigned int max_cpus) 42void __init smp_prepare_cpus(unsigned int max_cpus)
@@ -138,6 +140,7 @@ asmlinkage void __init smp_callin(void)
138 140
139 trap_init(); 141 trap_init();
140 notify_cpu_starting(smp_processor_id()); 142 notify_cpu_starting(smp_processor_id());
143 update_siblings_masks(smp_processor_id());
141 set_cpu_online(smp_processor_id(), 1); 144 set_cpu_online(smp_processor_id(), 1);
142 /* 145 /*
143 * Remote TLB flushes are ignored while the CPU is offline, so emit 146 * Remote TLB flushes are ignored while the CPU is offline, so emit
diff --git a/arch/x86/include/asm/error-injection.h b/arch/x86/include/asm/error-injection.h
deleted file mode 100644
index 47b7a1296245..000000000000
--- a/arch/x86/include/asm/error-injection.h
+++ /dev/null
@@ -1,13 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_ERROR_INJECTION_H
3#define _ASM_ERROR_INJECTION_H
4
5#include <linux/compiler.h>
6#include <linux/linkage.h>
7#include <asm/ptrace.h>
8#include <asm-generic/error-injection.h>
9
10asmlinkage void just_return_func(void);
11void override_function_with_return(struct pt_regs *regs);
12
13#endif /* _ASM_ERROR_INJECTION_H */
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 8569b79e8b58..5a7551d060f2 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1256,12 +1256,12 @@ static int __init arm_smmu_v3_set_proximity(struct device *dev,
1256 1256
1257 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; 1257 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1258 if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { 1258 if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
1259 int node = acpi_map_pxm_to_node(smmu->pxm); 1259 int dev_node = acpi_map_pxm_to_node(smmu->pxm);
1260 1260
1261 if (node != NUMA_NO_NODE && !node_online(node)) 1261 if (dev_node != NUMA_NO_NODE && !node_online(dev_node))
1262 return -EINVAL; 1262 return -EINVAL;
1263 1263
1264 set_dev_node(dev, node); 1264 set_dev_node(dev, dev_node);
1265 pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", 1265 pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1266 smmu->base_address, 1266 smmu->base_address,
1267 smmu->pxm); 1267 smmu->pxm);
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index 1e7ac0bd0d3a..f31544d3656e 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -541,6 +541,44 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag)
541} 541}
542 542
543/** 543/**
544 * check_acpi_cpu_flag() - Determine if CPU node has a flag set
545 * @cpu: Kernel logical CPU number
546 * @rev: The minimum PPTT revision defining the flag
547 * @flag: The flag itself
548 *
549 * Check the node representing a CPU for a given flag.
550 *
551 * Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found or
552 * the table revision isn't new enough.
553 * 1, any passed flag set
554 * 0, flag unset
555 */
556static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag)
557{
558 struct acpi_table_header *table;
559 acpi_status status;
560 u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
561 struct acpi_pptt_processor *cpu_node = NULL;
562 int ret = -ENOENT;
563
564 status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
565 if (ACPI_FAILURE(status)) {
566 acpi_pptt_warn_missing();
567 return ret;
568 }
569
570 if (table->revision >= rev)
571 cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
572
573 if (cpu_node)
574 ret = (cpu_node->flags & flag) != 0;
575
576 acpi_put_table(table);
577
578 return ret;
579}
580
581/**
544 * acpi_find_last_cache_level() - Determines the number of cache levels for a PE 582 * acpi_find_last_cache_level() - Determines the number of cache levels for a PE
545 * @cpu: Kernel logical CPU number 583 * @cpu: Kernel logical CPU number
546 * 584 *
@@ -605,6 +643,20 @@ int cache_setup_acpi(unsigned int cpu)
605} 643}
606 644
607/** 645/**
646 * acpi_pptt_cpu_is_thread() - Determine if CPU is a thread
647 * @cpu: Kernel logical CPU number
648 *
649 * Return: 1, a thread
650 * 0, not a thread
651 * -ENOENT ,if the PPTT doesn't exist, the CPU cannot be found or
652 * the table revision isn't new enough.
653 */
654int acpi_pptt_cpu_is_thread(unsigned int cpu)
655{
656 return check_acpi_cpu_flag(cpu, 2, ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD);
657}
658
659/**
608 * find_acpi_cpu_topology() - Determine a unique topology value for a given CPU 660 * find_acpi_cpu_topology() - Determine a unique topology value for a given CPU
609 * @cpu: Kernel logical CPU number 661 * @cpu: Kernel logical CPU number
610 * @level: The topological level for which we would like a unique ID 662 * @level: The topological level for which we would like a unique ID
@@ -664,7 +716,6 @@ int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
664 return ret; 716 return ret;
665} 717}
666 718
667
668/** 719/**
669 * find_acpi_cpu_topology_package() - Determine a unique CPU package value 720 * find_acpi_cpu_topology_package() - Determine a unique CPU package value
670 * @cpu: Kernel logical CPU number 721 * @cpu: Kernel logical CPU number
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index dc404492381d..28b92e3cc570 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -202,7 +202,7 @@ config GENERIC_ARCH_TOPOLOGY
202 help 202 help
203 Enable support for architectures common topology code: e.g., parsing 203 Enable support for architectures common topology code: e.g., parsing
204 CPU capacity information from DT, usage of such information for 204 CPU capacity information from DT, usage of such information for
205 appropriate scaling, sysfs interface for changing capacity values at 205 appropriate scaling, sysfs interface for reading capacity values at
206 runtime. 206 runtime.
207 207
208endmenu 208endmenu
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 63c1e76739f1..b54d241a2ff5 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -15,6 +15,11 @@
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/sched/topology.h> 16#include <linux/sched/topology.h>
17#include <linux/cpuset.h> 17#include <linux/cpuset.h>
18#include <linux/cpumask.h>
19#include <linux/init.h>
20#include <linux/percpu.h>
21#include <linux/sched.h>
22#include <linux/smp.h>
18 23
19DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; 24DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
20 25
@@ -241,3 +246,296 @@ static void parsing_done_workfn(struct work_struct *work)
241#else 246#else
242core_initcall(free_raw_capacity); 247core_initcall(free_raw_capacity);
243#endif 248#endif
249
250#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
251static int __init get_cpu_for_node(struct device_node *node)
252{
253 struct device_node *cpu_node;
254 int cpu;
255
256 cpu_node = of_parse_phandle(node, "cpu", 0);
257 if (!cpu_node)
258 return -1;
259
260 cpu = of_cpu_node_to_id(cpu_node);
261 if (cpu >= 0)
262 topology_parse_cpu_capacity(cpu_node, cpu);
263 else
264 pr_crit("Unable to find CPU node for %pOF\n", cpu_node);
265
266 of_node_put(cpu_node);
267 return cpu;
268}
269
270static int __init parse_core(struct device_node *core, int package_id,
271 int core_id)
272{
273 char name[10];
274 bool leaf = true;
275 int i = 0;
276 int cpu;
277 struct device_node *t;
278
279 do {
280 snprintf(name, sizeof(name), "thread%d", i);
281 t = of_get_child_by_name(core, name);
282 if (t) {
283 leaf = false;
284 cpu = get_cpu_for_node(t);
285 if (cpu >= 0) {
286 cpu_topology[cpu].package_id = package_id;
287 cpu_topology[cpu].core_id = core_id;
288 cpu_topology[cpu].thread_id = i;
289 } else {
290 pr_err("%pOF: Can't get CPU for thread\n",
291 t);
292 of_node_put(t);
293 return -EINVAL;
294 }
295 of_node_put(t);
296 }
297 i++;
298 } while (t);
299
300 cpu = get_cpu_for_node(core);
301 if (cpu >= 0) {
302 if (!leaf) {
303 pr_err("%pOF: Core has both threads and CPU\n",
304 core);
305 return -EINVAL;
306 }
307
308 cpu_topology[cpu].package_id = package_id;
309 cpu_topology[cpu].core_id = core_id;
310 } else if (leaf) {
311 pr_err("%pOF: Can't get CPU for leaf core\n", core);
312 return -EINVAL;
313 }
314
315 return 0;
316}
317
318static int __init parse_cluster(struct device_node *cluster, int depth)
319{
320 char name[10];
321 bool leaf = true;
322 bool has_cores = false;
323 struct device_node *c;
324 static int package_id __initdata;
325 int core_id = 0;
326 int i, ret;
327
328 /*
329 * First check for child clusters; we currently ignore any
330 * information about the nesting of clusters and present the
331 * scheduler with a flat list of them.
332 */
333 i = 0;
334 do {
335 snprintf(name, sizeof(name), "cluster%d", i);
336 c = of_get_child_by_name(cluster, name);
337 if (c) {
338 leaf = false;
339 ret = parse_cluster(c, depth + 1);
340 of_node_put(c);
341 if (ret != 0)
342 return ret;
343 }
344 i++;
345 } while (c);
346
347 /* Now check for cores */
348 i = 0;
349 do {
350 snprintf(name, sizeof(name), "core%d", i);
351 c = of_get_child_by_name(cluster, name);
352 if (c) {
353 has_cores = true;
354
355 if (depth == 0) {
356 pr_err("%pOF: cpu-map children should be clusters\n",
357 c);
358 of_node_put(c);
359 return -EINVAL;
360 }
361
362 if (leaf) {
363 ret = parse_core(c, package_id, core_id++);
364 } else {
365 pr_err("%pOF: Non-leaf cluster with core %s\n",
366 cluster, name);
367 ret = -EINVAL;
368 }
369
370 of_node_put(c);
371 if (ret != 0)
372 return ret;
373 }
374 i++;
375 } while (c);
376
377 if (leaf && !has_cores)
378 pr_warn("%pOF: empty cluster\n", cluster);
379
380 if (leaf)
381 package_id++;
382
383 return 0;
384}
385
386static int __init parse_dt_topology(void)
387{
388 struct device_node *cn, *map;
389 int ret = 0;
390 int cpu;
391
392 cn = of_find_node_by_path("/cpus");
393 if (!cn) {
394 pr_err("No CPU information found in DT\n");
395 return 0;
396 }
397
398 /*
399 * When topology is provided cpu-map is essentially a root
400 * cluster with restricted subnodes.
401 */
402 map = of_get_child_by_name(cn, "cpu-map");
403 if (!map)
404 goto out;
405
406 ret = parse_cluster(map, 0);
407 if (ret != 0)
408 goto out_map;
409
410 topology_normalize_cpu_scale();
411
412 /*
413 * Check that all cores are in the topology; the SMP code will
414 * only mark cores described in the DT as possible.
415 */
416 for_each_possible_cpu(cpu)
417 if (cpu_topology[cpu].package_id == -1)
418 ret = -EINVAL;
419
420out_map:
421 of_node_put(map);
422out:
423 of_node_put(cn);
424 return ret;
425}
426#endif
427
428/*
429 * cpu topology table
430 */
431struct cpu_topology cpu_topology[NR_CPUS];
432EXPORT_SYMBOL_GPL(cpu_topology);
433
434const struct cpumask *cpu_coregroup_mask(int cpu)
435{
436 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
437
438 /* Find the smaller of NUMA, core or LLC siblings */
439 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
440 /* not numa in package, lets use the package siblings */
441 core_mask = &cpu_topology[cpu].core_sibling;
442 }
443 if (cpu_topology[cpu].llc_id != -1) {
444 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
445 core_mask = &cpu_topology[cpu].llc_sibling;
446 }
447
448 return core_mask;
449}
450
451void update_siblings_masks(unsigned int cpuid)
452{
453 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
454 int cpu;
455
456 /* update core and thread sibling masks */
457 for_each_online_cpu(cpu) {
458 cpu_topo = &cpu_topology[cpu];
459
460 if (cpuid_topo->llc_id == cpu_topo->llc_id) {
461 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
462 cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
463 }
464
465 if (cpuid_topo->package_id != cpu_topo->package_id)
466 continue;
467
468 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
469 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
470
471 if (cpuid_topo->core_id != cpu_topo->core_id)
472 continue;
473
474 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
475 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
476 }
477}
478
479static void clear_cpu_topology(int cpu)
480{
481 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
482
483 cpumask_clear(&cpu_topo->llc_sibling);
484 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
485
486 cpumask_clear(&cpu_topo->core_sibling);
487 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
488 cpumask_clear(&cpu_topo->thread_sibling);
489 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
490}
491
492void __init reset_cpu_topology(void)
493{
494 unsigned int cpu;
495
496 for_each_possible_cpu(cpu) {
497 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
498
499 cpu_topo->thread_id = -1;
500 cpu_topo->core_id = -1;
501 cpu_topo->package_id = -1;
502 cpu_topo->llc_id = -1;
503
504 clear_cpu_topology(cpu);
505 }
506}
507
508void remove_cpu_topology(unsigned int cpu)
509{
510 int sibling;
511
512 for_each_cpu(sibling, topology_core_cpumask(cpu))
513 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
514 for_each_cpu(sibling, topology_sibling_cpumask(cpu))
515 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
516 for_each_cpu(sibling, topology_llc_cpumask(cpu))
517 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
518
519 clear_cpu_topology(cpu);
520}
521
522__weak int __init parse_acpi_topology(void)
523{
524 return 0;
525}
526
527#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
528void __init init_cpu_topology(void)
529{
530 reset_cpu_topology();
531
532 /*
533 * Discard anything that was parsed if we hit an error so we
534 * don't use partial information.
535 */
536 if (parse_acpi_topology())
537 reset_cpu_topology();
538 else if (of_have_populated_dt() && parse_dt_topology())
539 reset_cpu_topology();
540}
541#endif
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 3e866885a405..2794f4b3f62d 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -573,3 +573,12 @@ config RANDOM_TRUST_CPU
573 has not installed a hidden back door to compromise the CPU's 573 has not installed a hidden back door to compromise the CPU's
574 random number generation facilities. This can also be configured 574 random number generation facilities. This can also be configured
575 at boot with "random.trust_cpu=on/off". 575 at boot with "random.trust_cpu=on/off".
576
577config RANDOM_TRUST_BOOTLOADER
578 bool "Trust the bootloader to initialize Linux's CRNG"
579 help
580 Some bootloaders can provide entropy to increase the kernel's initial
581 device randomness. Say Y here to assume the entropy provided by the
582 booloader is trustworthy so it will be added to the kernel's entropy
583 pool. Otherwise, say N here so it will be regarded as device input that
584 only mixes the entropy pool. \ No newline at end of file
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 5d5ea4ce1442..566922df4b7b 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -2445,3 +2445,17 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
2445 credit_entropy_bits(poolp, entropy); 2445 credit_entropy_bits(poolp, entropy);
2446} 2446}
2447EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); 2447EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
2448
2449/* Handle random seed passed by bootloader.
2450 * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
2451 * it would be regarded as device data.
2452 * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
2453 */
2454void add_bootloader_randomness(const void *buf, unsigned int size)
2455{
2456 if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
2457 add_hwgenerator_randomness(buf, size, size * 8);
2458 else
2459 add_device_randomness(buf, size);
2460}
2461EXPORT_SYMBOL_GPL(add_bootloader_randomness); \ No newline at end of file
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 48cb3d4bb7d1..d8530475493c 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -13,6 +13,16 @@ config ARM_CPUIDLE
13 initialized by calling the CPU operations init idle hook 13 initialized by calling the CPU operations init idle hook
14 provided by architecture code. 14 provided by architecture code.
15 15
16config ARM_PSCI_CPUIDLE
17 bool "PSCI CPU idle Driver"
18 depends on ARM_PSCI_FW
19 select DT_IDLE_STATES
20 select CPU_IDLE_MULTIPLE_DRIVERS
21 help
22 Select this to enable PSCI firmware based CPUidle driver for ARM.
23 It provides an idle driver that is capable of detecting and
24 managing idle states through the PSCI firmware interface.
25
16config ARM_BIG_LITTLE_CPUIDLE 26config ARM_BIG_LITTLE_CPUIDLE
17 bool "Support for ARM big.LITTLE processors" 27 bool "Support for ARM big.LITTLE processors"
18 depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS 28 depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 9d7176cee3d3..40d016339b29 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
20obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o 20obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o
21obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o 21obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o
22obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o 22obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o
23obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle-psci.o
23 24
24############################################################################### 25###############################################################################
25# MIPS drivers 26# MIPS drivers
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 5bcd82c35dcf..9e5156d39627 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -15,7 +15,6 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/of.h> 16#include <linux/of.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/topology.h>
19 18
20#include <asm/cpuidle.h> 19#include <asm/cpuidle.h>
21 20
@@ -106,11 +105,17 @@ static int __init arm_idle_init_cpu(int cpu)
106 ret = arm_cpuidle_init(cpu); 105 ret = arm_cpuidle_init(cpu);
107 106
108 /* 107 /*
109 * Allow the initialization to continue for other CPUs, if the reported 108 * Allow the initialization to continue for other CPUs, if the
110 * failure is a HW misconfiguration/breakage (-ENXIO). 109 * reported failure is a HW misconfiguration/breakage (-ENXIO).
110 *
111 * Some platforms do not support idle operations
112 * (arm_cpuidle_init() returning -EOPNOTSUPP), we should
113 * not flag this case as an error, it is a valid
114 * configuration.
111 */ 115 */
112 if (ret) { 116 if (ret) {
113 pr_err("CPU %d failed to init idle CPU ops\n", cpu); 117 if (ret != -EOPNOTSUPP)
118 pr_err("CPU %d failed to init idle CPU ops\n", cpu);
114 ret = ret == -ENXIO ? 0 : ret; 119 ret = ret == -ENXIO ? 0 : ret;
115 goto out_kfree_drv; 120 goto out_kfree_drv;
116 } 121 }
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
new file mode 100644
index 000000000000..f3c1a2396f98
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -0,0 +1,236 @@
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * PSCI CPU idle driver.
4 *
5 * Copyright (C) 2019 ARM Ltd.
6 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
7 */
8
9#define pr_fmt(fmt) "CPUidle PSCI: " fmt
10
11#include <linux/cpuidle.h>
12#include <linux/cpumask.h>
13#include <linux/cpu_pm.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/of_device.h>
18#include <linux/psci.h>
19#include <linux/slab.h>
20
21#include <asm/cpuidle.h>
22
23#include "dt_idle_states.h"
24
25static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
26
27static int psci_enter_idle_state(struct cpuidle_device *dev,
28 struct cpuidle_driver *drv, int idx)
29{
30 u32 *state = __this_cpu_read(psci_power_state);
31
32 return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter,
33 idx, state[idx - 1]);
34}
35
36static struct cpuidle_driver psci_idle_driver __initdata = {
37 .name = "psci_idle",
38 .owner = THIS_MODULE,
39 /*
40 * PSCI idle states relies on architectural WFI to
41 * be represented as state index 0.
42 */
43 .states[0] = {
44 .enter = psci_enter_idle_state,
45 .exit_latency = 1,
46 .target_residency = 1,
47 .power_usage = UINT_MAX,
48 .name = "WFI",
49 .desc = "ARM WFI",
50 }
51};
52
53static const struct of_device_id psci_idle_state_match[] __initconst = {
54 { .compatible = "arm,idle-state",
55 .data = psci_enter_idle_state },
56 { },
57};
58
59static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state)
60{
61 int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
62
63 if (err) {
64 pr_warn("%pOF missing arm,psci-suspend-param property\n", np);
65 return err;
66 }
67
68 if (!psci_power_state_is_valid(*state)) {
69 pr_warn("Invalid PSCI power state %#x\n", *state);
70 return -EINVAL;
71 }
72
73 return 0;
74}
75
76static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
77{
78 int i, ret = 0, count = 0;
79 u32 *psci_states;
80 struct device_node *state_node;
81
82 /* Count idle states */
83 while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
84 count))) {
85 count++;
86 of_node_put(state_node);
87 }
88
89 if (!count)
90 return -ENODEV;
91
92 psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
93 if (!psci_states)
94 return -ENOMEM;
95
96 for (i = 0; i < count; i++) {
97 state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
98 ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
99 of_node_put(state_node);
100
101 if (ret)
102 goto free_mem;
103
104 pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
105 }
106
107 /* Idle states parsed correctly, initialize per-cpu pointer */
108 per_cpu(psci_power_state, cpu) = psci_states;
109 return 0;
110
111free_mem:
112 kfree(psci_states);
113 return ret;
114}
115
116static __init int psci_cpu_init_idle(unsigned int cpu)
117{
118 struct device_node *cpu_node;
119 int ret;
120
121 /*
122 * If the PSCI cpu_suspend function hook has not been initialized
123 * idle states must not be enabled, so bail out
124 */
125 if (!psci_ops.cpu_suspend)
126 return -EOPNOTSUPP;
127
128 cpu_node = of_cpu_device_node_get(cpu);
129 if (!cpu_node)
130 return -ENODEV;
131
132 ret = psci_dt_cpu_init_idle(cpu_node, cpu);
133
134 of_node_put(cpu_node);
135
136 return ret;
137}
138
139static int __init psci_idle_init_cpu(int cpu)
140{
141 struct cpuidle_driver *drv;
142 struct device_node *cpu_node;
143 const char *enable_method;
144 int ret = 0;
145
146 cpu_node = of_cpu_device_node_get(cpu);
147 if (!cpu_node)
148 return -ENODEV;
149
150 /*
151 * Check whether the enable-method for the cpu is PSCI, fail
152 * if it is not.
153 */
154 enable_method = of_get_property(cpu_node, "enable-method", NULL);
155 if (!enable_method || (strcmp(enable_method, "psci")))
156 ret = -ENODEV;
157
158 of_node_put(cpu_node);
159 if (ret)
160 return ret;
161
162 drv = kmemdup(&psci_idle_driver, sizeof(*drv), GFP_KERNEL);
163 if (!drv)
164 return -ENOMEM;
165
166 drv->cpumask = (struct cpumask *)cpumask_of(cpu);
167
168 /*
169 * Initialize idle states data, starting at index 1, since
170 * by default idle state 0 is the quiescent state reached
171 * by the cpu by executing the wfi instruction.
172 *
173 * If no DT idle states are detected (ret == 0) let the driver
174 * initialization fail accordingly since there is no reason to
175 * initialize the idle driver if only wfi is supported, the
176 * default archictectural back-end already executes wfi
177 * on idle entry.
178 */
179 ret = dt_init_idle_driver(drv, psci_idle_state_match, 1);
180 if (ret <= 0) {
181 ret = ret ? : -ENODEV;
182 goto out_kfree_drv;
183 }
184
185 /*
186 * Initialize PSCI idle states.
187 */
188 ret = psci_cpu_init_idle(cpu);
189 if (ret) {
190 pr_err("CPU %d failed to PSCI idle\n", cpu);
191 goto out_kfree_drv;
192 }
193
194 ret = cpuidle_register(drv, NULL);
195 if (ret)
196 goto out_kfree_drv;
197
198 return 0;
199
200out_kfree_drv:
201 kfree(drv);
202 return ret;
203}
204
205/*
206 * psci_idle_init - Initializes PSCI cpuidle driver
207 *
208 * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails
209 * to register cpuidle driver then rollback to cancel all CPUs
210 * registration.
211 */
212static int __init psci_idle_init(void)
213{
214 int cpu, ret;
215 struct cpuidle_driver *drv;
216 struct cpuidle_device *dev;
217
218 for_each_possible_cpu(cpu) {
219 ret = psci_idle_init_cpu(cpu);
220 if (ret)
221 goto out_fail;
222 }
223
224 return 0;
225
226out_fail:
227 while (--cpu >= 0) {
228 dev = per_cpu(cpuidle_devices, cpu);
229 drv = cpuidle_get_cpu_driver(dev);
230 cpuidle_unregister(drv);
231 kfree(drv);
232 }
233
234 return ret;
235}
236device_initcall(psci_idle_init);
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index f82ccd39a913..84f4ff351c62 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -103,7 +103,7 @@ static inline bool psci_power_state_loses_context(u32 state)
103 return state & mask; 103 return state & mask;
104} 104}
105 105
106static inline bool psci_power_state_is_valid(u32 state) 106bool psci_power_state_is_valid(u32 state)
107{ 107{
108 const u32 valid_mask = psci_has_ext_power_state() ? 108 const u32 valid_mask = psci_has_ext_power_state() ?
109 PSCI_1_0_EXT_POWER_STATE_MASK : 109 PSCI_1_0_EXT_POWER_STATE_MASK :
@@ -277,175 +277,24 @@ static int __init psci_features(u32 psci_func_id)
277} 277}
278 278
279#ifdef CONFIG_CPU_IDLE 279#ifdef CONFIG_CPU_IDLE
280static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); 280static int psci_suspend_finisher(unsigned long state)
281
282static int psci_dt_parse_state_node(struct device_node *np, u32 *state)
283{
284 int err = of_property_read_u32(np, "arm,psci-suspend-param", state);
285
286 if (err) {
287 pr_warn("%pOF missing arm,psci-suspend-param property\n", np);
288 return err;
289 }
290
291 if (!psci_power_state_is_valid(*state)) {
292 pr_warn("Invalid PSCI power state %#x\n", *state);
293 return -EINVAL;
294 }
295
296 return 0;
297}
298
299static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
300{
301 int i, ret = 0, count = 0;
302 u32 *psci_states;
303 struct device_node *state_node;
304
305 /* Count idle states */
306 while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
307 count))) {
308 count++;
309 of_node_put(state_node);
310 }
311
312 if (!count)
313 return -ENODEV;
314
315 psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
316 if (!psci_states)
317 return -ENOMEM;
318
319 for (i = 0; i < count; i++) {
320 state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
321 ret = psci_dt_parse_state_node(state_node, &psci_states[i]);
322 of_node_put(state_node);
323
324 if (ret)
325 goto free_mem;
326
327 pr_debug("psci-power-state %#x index %d\n", psci_states[i], i);
328 }
329
330 /* Idle states parsed correctly, initialize per-cpu pointer */
331 per_cpu(psci_power_state, cpu) = psci_states;
332 return 0;
333
334free_mem:
335 kfree(psci_states);
336 return ret;
337}
338
339#ifdef CONFIG_ACPI
340#include <acpi/processor.h>
341
342static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu)
343{
344 int i, count;
345 u32 *psci_states;
346 struct acpi_lpi_state *lpi;
347 struct acpi_processor *pr = per_cpu(processors, cpu);
348
349 if (unlikely(!pr || !pr->flags.has_lpi))
350 return -EINVAL;
351
352 count = pr->power.count - 1;
353 if (count <= 0)
354 return -ENODEV;
355
356 psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
357 if (!psci_states)
358 return -ENOMEM;
359
360 for (i = 0; i < count; i++) {
361 u32 state;
362
363 lpi = &pr->power.lpi_states[i + 1];
364 /*
365 * Only bits[31:0] represent a PSCI power_state while
366 * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification
367 */
368 state = lpi->address;
369 if (!psci_power_state_is_valid(state)) {
370 pr_warn("Invalid PSCI power state %#x\n", state);
371 kfree(psci_states);
372 return -EINVAL;
373 }
374 psci_states[i] = state;
375 }
376 /* Idle states parsed correctly, initialize per-cpu pointer */
377 per_cpu(psci_power_state, cpu) = psci_states;
378 return 0;
379}
380#else
381static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu)
382{
383 return -EINVAL;
384}
385#endif
386
387int psci_cpu_init_idle(unsigned int cpu)
388{
389 struct device_node *cpu_node;
390 int ret;
391
392 /*
393 * If the PSCI cpu_suspend function hook has not been initialized
394 * idle states must not be enabled, so bail out
395 */
396 if (!psci_ops.cpu_suspend)
397 return -EOPNOTSUPP;
398
399 if (!acpi_disabled)
400 return psci_acpi_cpu_init_idle(cpu);
401
402 cpu_node = of_get_cpu_node(cpu, NULL);
403 if (!cpu_node)
404 return -ENODEV;
405
406 ret = psci_dt_cpu_init_idle(cpu_node, cpu);
407
408 of_node_put(cpu_node);
409
410 return ret;
411}
412
413static int psci_suspend_finisher(unsigned long index)
414{ 281{
415 u32 *state = __this_cpu_read(psci_power_state); 282 u32 power_state = state;
416 283
417 return psci_ops.cpu_suspend(state[index - 1], 284 return psci_ops.cpu_suspend(power_state, __pa_symbol(cpu_resume));
418 __pa_symbol(cpu_resume));
419} 285}
420 286
421int psci_cpu_suspend_enter(unsigned long index) 287int psci_cpu_suspend_enter(u32 state)
422{ 288{
423 int ret; 289 int ret;
424 u32 *state = __this_cpu_read(psci_power_state);
425 /*
426 * idle state index 0 corresponds to wfi, should never be called
427 * from the cpu_suspend operations
428 */
429 if (WARN_ON_ONCE(!index))
430 return -EINVAL;
431 290
432 if (!psci_power_state_loses_context(state[index - 1])) 291 if (!psci_power_state_loses_context(state))
433 ret = psci_ops.cpu_suspend(state[index - 1], 0); 292 ret = psci_ops.cpu_suspend(state, 0);
434 else 293 else
435 ret = cpu_suspend(index, psci_suspend_finisher); 294 ret = cpu_suspend(state, psci_suspend_finisher);
436 295
437 return ret; 296 return ret;
438} 297}
439
440/* ARM specific CPU idle operations */
441#ifdef CONFIG_ARM
442static const struct cpuidle_ops psci_cpuidle_ops __initconst = {
443 .suspend = psci_cpu_suspend_enter,
444 .init = psci_dt_cpu_init_idle,
445};
446
447CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops);
448#endif
449#endif 298#endif
450 299
451static int psci_system_suspend(unsigned long unused) 300static int psci_system_suspend(unsigned long unused)
diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c
index f3659443f8c2..6a445397771c 100644
--- a/drivers/firmware/psci/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
@@ -228,8 +228,11 @@ out_free_cpus:
228 228
229static void dummy_callback(struct timer_list *unused) {} 229static void dummy_callback(struct timer_list *unused) {}
230 230
231static int suspend_cpu(int index, bool broadcast) 231static int suspend_cpu(struct cpuidle_device *dev,
232 struct cpuidle_driver *drv, int index)
232{ 233{
234 struct cpuidle_state *state = &drv->states[index];
235 bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
233 int ret; 236 int ret;
234 237
235 arch_cpu_idle_enter(); 238 arch_cpu_idle_enter();
@@ -254,11 +257,7 @@ static int suspend_cpu(int index, bool broadcast)
254 } 257 }
255 } 258 }
256 259
257 /* 260 ret = state->enter(dev, drv, index);
258 * Replicate the common ARM cpuidle enter function
259 * (arm_enter_idle_state).
260 */
261 ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index);
262 261
263 if (broadcast) 262 if (broadcast)
264 tick_broadcast_exit(); 263 tick_broadcast_exit();
@@ -301,9 +300,8 @@ static int suspend_test_thread(void *arg)
301 * doesn't use PSCI). 300 * doesn't use PSCI).
302 */ 301 */
303 for (index = 1; index < drv->state_count; ++index) { 302 for (index = 1; index < drv->state_count; ++index) {
304 struct cpuidle_state *state = &drv->states[index];
305 bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP;
306 int ret; 303 int ret;
304 struct cpuidle_state *state = &drv->states[index];
307 305
308 /* 306 /*
309 * Set the timer to wake this CPU up in some time (which 307 * Set the timer to wake this CPU up in some time (which
@@ -318,7 +316,7 @@ static int suspend_test_thread(void *arg)
318 /* IRQs must be disabled during suspend operations. */ 316 /* IRQs must be disabled during suspend operations. */
319 local_irq_disable(); 317 local_irq_disable();
320 318
321 ret = suspend_cpu(index, broadcast); 319 ret = suspend_cpu(dev, drv, index);
322 320
323 /* 321 /*
324 * We have woken up. Re-enable IRQs to handle any 322 * We have woken up. Re-enable IRQs to handle any
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 9cdf14b9aaab..223d617ecfe1 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -24,6 +24,7 @@
24#include <linux/debugfs.h> 24#include <linux/debugfs.h>
25#include <linux/serial_core.h> 25#include <linux/serial_core.h>
26#include <linux/sysfs.h> 26#include <linux/sysfs.h>
27#include <linux/random.h>
27 28
28#include <asm/setup.h> /* for COMMAND_LINE_SIZE */ 29#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
29#include <asm/page.h> 30#include <asm/page.h>
@@ -1044,6 +1045,7 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
1044{ 1045{
1045 int l; 1046 int l;
1046 const char *p; 1047 const char *p;
1048 const void *rng_seed;
1047 1049
1048 pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname); 1050 pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
1049 1051
@@ -1078,6 +1080,18 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
1078 1080
1079 pr_debug("Command line is: %s\n", (char*)data); 1081 pr_debug("Command line is: %s\n", (char*)data);
1080 1082
1083 rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
1084 if (rng_seed && l > 0) {
1085 add_bootloader_randomness(rng_seed, l);
1086
1087 /* try to clear seed so it won't be found. */
1088 fdt_nop_property(initial_boot_params, node, "rng-seed");
1089
1090 /* update CRC check value */
1091 of_fdt_crc32 = crc32_be(~0, initial_boot_params,
1092 fdt_totalsize(initial_boot_params));
1093 }
1094
1081 /* break now */ 1095 /* break now */
1082 return 1; 1096 return 1;
1083} 1097}
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index da71c741cb46..abcf54f7d19c 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -113,8 +113,6 @@ struct smmu_pmu {
113 u64 counter_mask; 113 u64 counter_mask;
114 u32 options; 114 u32 options;
115 bool global_filter; 115 bool global_filter;
116 u32 global_filter_span;
117 u32 global_filter_sid;
118}; 116};
119 117
120#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu)) 118#define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu))
@@ -260,6 +258,19 @@ static void smmu_pmu_set_event_filter(struct perf_event *event,
260 smmu_pmu_set_smr(smmu_pmu, idx, sid); 258 smmu_pmu_set_smr(smmu_pmu, idx, sid);
261} 259}
262 260
261static bool smmu_pmu_check_global_filter(struct perf_event *curr,
262 struct perf_event *new)
263{
264 if (get_filter_enable(new) != get_filter_enable(curr))
265 return false;
266
267 if (!get_filter_enable(new))
268 return true;
269
270 return get_filter_span(new) == get_filter_span(curr) &&
271 get_filter_stream_id(new) == get_filter_stream_id(curr);
272}
273
263static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu, 274static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
264 struct perf_event *event, int idx) 275 struct perf_event *event, int idx)
265{ 276{
@@ -279,17 +290,14 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu,
279 } 290 }
280 291
281 /* Requested settings same as current global settings*/ 292 /* Requested settings same as current global settings*/
282 if (span == smmu_pmu->global_filter_span && 293 idx = find_first_bit(smmu_pmu->used_counters, num_ctrs);
283 sid == smmu_pmu->global_filter_sid) 294 if (idx == num_ctrs ||
295 smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) {
296 smmu_pmu_set_event_filter(event, 0, span, sid);
284 return 0; 297 return 0;
298 }
285 299
286 if (!bitmap_empty(smmu_pmu->used_counters, num_ctrs)) 300 return -EAGAIN;
287 return -EAGAIN;
288
289 smmu_pmu_set_event_filter(event, 0, span, sid);
290 smmu_pmu->global_filter_span = span;
291 smmu_pmu->global_filter_sid = sid;
292 return 0;
293} 301}
294 302
295static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu, 303static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
@@ -312,6 +320,19 @@ static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu,
312 return idx; 320 return idx;
313} 321}
314 322
323static bool smmu_pmu_events_compatible(struct perf_event *curr,
324 struct perf_event *new)
325{
326 if (new->pmu != curr->pmu)
327 return false;
328
329 if (to_smmu_pmu(new->pmu)->global_filter &&
330 !smmu_pmu_check_global_filter(curr, new))
331 return false;
332
333 return true;
334}
335
315/* 336/*
316 * Implementation of abstract pmu functionality required by 337 * Implementation of abstract pmu functionality required by
317 * the core perf events code. 338 * the core perf events code.
@@ -323,6 +344,7 @@ static int smmu_pmu_event_init(struct perf_event *event)
323 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); 344 struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu);
324 struct device *dev = smmu_pmu->dev; 345 struct device *dev = smmu_pmu->dev;
325 struct perf_event *sibling; 346 struct perf_event *sibling;
347 int group_num_events = 1;
326 u16 event_id; 348 u16 event_id;
327 349
328 if (event->attr.type != event->pmu->type) 350 if (event->attr.type != event->pmu->type)
@@ -347,18 +369,23 @@ static int smmu_pmu_event_init(struct perf_event *event)
347 } 369 }
348 370
349 /* Don't allow groups with mixed PMUs, except for s/w events */ 371 /* Don't allow groups with mixed PMUs, except for s/w events */
350 if (event->group_leader->pmu != event->pmu && 372 if (!is_software_event(event->group_leader)) {
351 !is_software_event(event->group_leader)) { 373 if (!smmu_pmu_events_compatible(event->group_leader, event))
352 dev_dbg(dev, "Can't create mixed PMU group\n"); 374 return -EINVAL;
353 return -EINVAL; 375
376 if (++group_num_events > smmu_pmu->num_counters)
377 return -EINVAL;
354 } 378 }
355 379
356 for_each_sibling_event(sibling, event->group_leader) { 380 for_each_sibling_event(sibling, event->group_leader) {
357 if (sibling->pmu != event->pmu && 381 if (is_software_event(sibling))
358 !is_software_event(sibling)) { 382 continue;
359 dev_dbg(dev, "Can't create mixed PMU group\n"); 383
384 if (!smmu_pmu_events_compatible(sibling, event))
385 return -EINVAL;
386
387 if (++group_num_events > smmu_pmu->num_counters)
360 return -EINVAL; 388 return -EINVAL;
361 }
362 } 389 }
363 390
364 hwc->idx = -1; 391 hwc->idx = -1;
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 63fe21600072..ce7345745b42 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -35,6 +35,8 @@
35#define EVENT_CYCLES_COUNTER 0 35#define EVENT_CYCLES_COUNTER 0
36#define NUM_COUNTERS 4 36#define NUM_COUNTERS 4
37 37
38#define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */
39
38#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) 40#define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu)
39 41
40#define DDR_PERF_DEV_NAME "imx8_ddr" 42#define DDR_PERF_DEV_NAME "imx8_ddr"
@@ -42,11 +44,25 @@
42 44
43static DEFINE_IDA(ddr_ida); 45static DEFINE_IDA(ddr_ida);
44 46
47/* DDR Perf hardware feature */
48#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */
49
50struct fsl_ddr_devtype_data {
51 unsigned int quirks; /* quirks needed for different DDR Perf core */
52};
53
54static const struct fsl_ddr_devtype_data imx8_devtype_data;
55
56static const struct fsl_ddr_devtype_data imx8m_devtype_data = {
57 .quirks = DDR_CAP_AXI_ID_FILTER,
58};
59
45static const struct of_device_id imx_ddr_pmu_dt_ids[] = { 60static const struct of_device_id imx_ddr_pmu_dt_ids[] = {
46 { .compatible = "fsl,imx8-ddr-pmu",}, 61 { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data},
47 { .compatible = "fsl,imx8m-ddr-pmu",}, 62 { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data},
48 { /* sentinel */ } 63 { /* sentinel */ }
49}; 64};
65MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids);
50 66
51struct ddr_pmu { 67struct ddr_pmu {
52 struct pmu pmu; 68 struct pmu pmu;
@@ -57,6 +73,7 @@ struct ddr_pmu {
57 struct perf_event *events[NUM_COUNTERS]; 73 struct perf_event *events[NUM_COUNTERS];
58 int active_events; 74 int active_events;
59 enum cpuhp_state cpuhp_state; 75 enum cpuhp_state cpuhp_state;
76 const struct fsl_ddr_devtype_data *devtype_data;
60 int irq; 77 int irq;
61 int id; 78 int id;
62}; 79};
@@ -128,6 +145,8 @@ static struct attribute *ddr_perf_events_attrs[] = {
128 IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37), 145 IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37),
129 IMX8_DDR_PMU_EVENT_ATTR(write, 0x38), 146 IMX8_DDR_PMU_EVENT_ATTR(write, 0x38),
130 IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39), 147 IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39),
148 IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41),
149 IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42),
131 NULL, 150 NULL,
132}; 151};
133 152
@@ -137,9 +156,13 @@ static struct attribute_group ddr_perf_events_attr_group = {
137}; 156};
138 157
139PMU_FORMAT_ATTR(event, "config:0-7"); 158PMU_FORMAT_ATTR(event, "config:0-7");
159PMU_FORMAT_ATTR(axi_id, "config1:0-15");
160PMU_FORMAT_ATTR(axi_mask, "config1:16-31");
140 161
141static struct attribute *ddr_perf_format_attrs[] = { 162static struct attribute *ddr_perf_format_attrs[] = {
142 &format_attr_event.attr, 163 &format_attr_event.attr,
164 &format_attr_axi_id.attr,
165 &format_attr_axi_mask.attr,
143 NULL, 166 NULL,
144}; 167};
145 168
@@ -189,6 +212,26 @@ static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter)
189 return readl_relaxed(pmu->base + COUNTER_READ + counter * 4); 212 return readl_relaxed(pmu->base + COUNTER_READ + counter * 4);
190} 213}
191 214
215static bool ddr_perf_is_filtered(struct perf_event *event)
216{
217 return event->attr.config == 0x41 || event->attr.config == 0x42;
218}
219
220static u32 ddr_perf_filter_val(struct perf_event *event)
221{
222 return event->attr.config1;
223}
224
225static bool ddr_perf_filters_compatible(struct perf_event *a,
226 struct perf_event *b)
227{
228 if (!ddr_perf_is_filtered(a))
229 return true;
230 if (!ddr_perf_is_filtered(b))
231 return true;
232 return ddr_perf_filter_val(a) == ddr_perf_filter_val(b);
233}
234
192static int ddr_perf_event_init(struct perf_event *event) 235static int ddr_perf_event_init(struct perf_event *event)
193{ 236{
194 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); 237 struct ddr_pmu *pmu = to_ddr_pmu(event->pmu);
@@ -215,6 +258,15 @@ static int ddr_perf_event_init(struct perf_event *event)
215 !is_software_event(event->group_leader)) 258 !is_software_event(event->group_leader))
216 return -EINVAL; 259 return -EINVAL;
217 260
261 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
262 if (!ddr_perf_filters_compatible(event, event->group_leader))
263 return -EINVAL;
264 for_each_sibling_event(sibling, event->group_leader) {
265 if (!ddr_perf_filters_compatible(event, sibling))
266 return -EINVAL;
267 }
268 }
269
218 for_each_sibling_event(sibling, event->group_leader) { 270 for_each_sibling_event(sibling, event->group_leader) {
219 if (sibling->pmu != event->pmu && 271 if (sibling->pmu != event->pmu &&
220 !is_software_event(sibling)) 272 !is_software_event(sibling))
@@ -287,6 +339,23 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
287 struct hw_perf_event *hwc = &event->hw; 339 struct hw_perf_event *hwc = &event->hw;
288 int counter; 340 int counter;
289 int cfg = event->attr.config; 341 int cfg = event->attr.config;
342 int cfg1 = event->attr.config1;
343
344 if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) {
345 int i;
346
347 for (i = 1; i < NUM_COUNTERS; i++) {
348 if (pmu->events[i] &&
349 !ddr_perf_filters_compatible(event, pmu->events[i]))
350 return -EINVAL;
351 }
352
353 if (ddr_perf_is_filtered(event)) {
354 /* revert axi id masking(axi_mask) value */
355 cfg1 ^= AXI_MASKING_REVERT;
356 writel(cfg1, pmu->base + COUNTER_DPCR1);
357 }
358 }
290 359
291 counter = ddr_perf_alloc_counter(pmu, cfg); 360 counter = ddr_perf_alloc_counter(pmu, cfg);
292 if (counter < 0) { 361 if (counter < 0) {
@@ -472,6 +541,8 @@ static int ddr_perf_probe(struct platform_device *pdev)
472 if (!name) 541 if (!name)
473 return -ENOMEM; 542 return -ENOMEM;
474 543
544 pmu->devtype_data = of_device_get_match_data(&pdev->dev);
545
475 pmu->cpu = raw_smp_processor_id(); 546 pmu->cpu = raw_smp_processor_id();
476 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 547 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
477 DDR_CPUHP_CB_NAME, 548 DDR_CPUHP_CB_NAME,
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index 6ad0823bcf23..e42d4464c2cf 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -217,10 +217,8 @@ static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu,
217 217
218 /* Read and init IRQ */ 218 /* Read and init IRQ */
219 irq = platform_get_irq(pdev, 0); 219 irq = platform_get_irq(pdev, 0);
220 if (irq < 0) { 220 if (irq < 0)
221 dev_err(&pdev->dev, "DDRC PMU get irq fail; irq:%d\n", irq);
222 return irq; 221 return irq;
223 }
224 222
225 ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr, 223 ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr,
226 IRQF_NOBALANCING | IRQF_NO_THREAD, 224 IRQF_NOBALANCING | IRQF_NO_THREAD,
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 4f2917f3e25e..f28063873e11 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -207,10 +207,8 @@ static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu,
207 207
208 /* Read and init IRQ */ 208 /* Read and init IRQ */
209 irq = platform_get_irq(pdev, 0); 209 irq = platform_get_irq(pdev, 0);
210 if (irq < 0) { 210 if (irq < 0)
211 dev_err(&pdev->dev, "HHA PMU get irq fail; irq:%d\n", irq);
212 return irq; 211 return irq;
213 }
214 212
215 ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr, 213 ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr,
216 IRQF_NOBALANCING | IRQF_NO_THREAD, 214 IRQF_NOBALANCING | IRQF_NO_THREAD,
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 9153e093f9df..078b8dc57250 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -206,10 +206,8 @@ static int hisi_l3c_pmu_init_irq(struct hisi_pmu *l3c_pmu,
206 206
207 /* Read and init IRQ */ 207 /* Read and init IRQ */
208 irq = platform_get_irq(pdev, 0); 208 irq = platform_get_irq(pdev, 0);
209 if (irq < 0) { 209 if (irq < 0)
210 dev_err(&pdev->dev, "L3C PMU get irq fail; irq:%d\n", irq);
211 return irq; 210 return irq;
212 }
213 211
214 ret = devm_request_irq(&pdev->dev, irq, hisi_l3c_pmu_isr, 212 ret = devm_request_irq(&pdev->dev, irq, hisi_l3c_pmu_isr,
215 IRQF_NOBALANCING | IRQF_NO_THREAD, 213 IRQF_NOBALANCING | IRQF_NO_THREAD,
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index d06182fe14b8..21d6991dbe0b 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -909,12 +909,8 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
909 cluster->cluster_id = fw_cluster_id; 909 cluster->cluster_id = fw_cluster_id;
910 910
911 irq = platform_get_irq(sdev, 0); 911 irq = platform_get_irq(sdev, 0);
912 if (irq < 0) { 912 if (irq < 0)
913 dev_err(&pdev->dev,
914 "Failed to get valid irq for cluster %ld\n",
915 fw_cluster_id);
916 return irq; 913 return irq;
917 }
918 irq_set_status_flags(irq, IRQ_NOAUTOEN); 914 irq_set_status_flags(irq, IRQ_NOAUTOEN);
919 cluster->irq = irq; 915 cluster->irq = irq;
920 916
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 3259e2ebeb39..7e328d6385c3 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1901,10 +1901,8 @@ static int xgene_pmu_probe(struct platform_device *pdev)
1901 } 1901 }
1902 1902
1903 irq = platform_get_irq(pdev, 0); 1903 irq = platform_get_irq(pdev, 0);
1904 if (irq < 0) { 1904 if (irq < 0)
1905 dev_err(&pdev->dev, "No IRQ resource\n");
1906 return -EINVAL; 1905 return -EINVAL;
1907 }
1908 1906
1909 rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr, 1907 rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr,
1910 IRQF_NOBALANCING | IRQF_NO_THREAD, 1908 IRQF_NOBALANCING | IRQF_NO_THREAD,
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h
index 95a159a4137f..80ca61058dd2 100644
--- a/include/asm-generic/error-injection.h
+++ b/include/asm-generic/error-injection.h
@@ -16,6 +16,8 @@ struct error_injection_entry {
16 int etype; 16 int etype;
17}; 17};
18 18
19struct pt_regs;
20
19#ifdef CONFIG_FUNCTION_ERROR_INJECTION 21#ifdef CONFIG_FUNCTION_ERROR_INJECTION
20/* 22/*
21 * Whitelist ganerating macro. Specify functions which can be 23 * Whitelist ganerating macro. Specify functions which can be
@@ -28,8 +30,12 @@ static struct error_injection_entry __used \
28 .addr = (unsigned long)fname, \ 30 .addr = (unsigned long)fname, \
29 .etype = EI_ETYPE_##_etype, \ 31 .etype = EI_ETYPE_##_etype, \
30 }; 32 };
33
34void override_function_with_return(struct pt_regs *regs);
31#else 35#else
32#define ALLOW_ERROR_INJECTION(fname, _etype) 36#define ALLOW_ERROR_INJECTION(fname, _etype)
37
38static inline void override_function_with_return(struct pt_regs *regs) { }
33#endif 39#endif
34#endif 40#endif
35 41
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 9426b9aaed86..9d0e20a2ac83 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1302,11 +1302,16 @@ static inline int lpit_read_residency_count_address(u64 *address)
1302#endif 1302#endif
1303 1303
1304#ifdef CONFIG_ACPI_PPTT 1304#ifdef CONFIG_ACPI_PPTT
1305int acpi_pptt_cpu_is_thread(unsigned int cpu);
1305int find_acpi_cpu_topology(unsigned int cpu, int level); 1306int find_acpi_cpu_topology(unsigned int cpu, int level);
1306int find_acpi_cpu_topology_package(unsigned int cpu); 1307int find_acpi_cpu_topology_package(unsigned int cpu);
1307int find_acpi_cpu_topology_hetero_id(unsigned int cpu); 1308int find_acpi_cpu_topology_hetero_id(unsigned int cpu);
1308int find_acpi_cpu_cache_topology(unsigned int cpu, int level); 1309int find_acpi_cpu_cache_topology(unsigned int cpu, int level);
1309#else 1310#else
1311static inline int acpi_pptt_cpu_is_thread(unsigned int cpu)
1312{
1313 return -EINVAL;
1314}
1310static inline int find_acpi_cpu_topology(unsigned int cpu, int level) 1315static inline int find_acpi_cpu_topology(unsigned int cpu, int level)
1311{ 1316{
1312 return -EINVAL; 1317 return -EINVAL;
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 1cfe05ea1d89..42f2b5126094 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -33,4 +33,30 @@ unsigned long topology_get_freq_scale(int cpu)
33 return per_cpu(freq_scale, cpu); 33 return per_cpu(freq_scale, cpu);
34} 34}
35 35
36struct cpu_topology {
37 int thread_id;
38 int core_id;
39 int package_id;
40 int llc_id;
41 cpumask_t thread_sibling;
42 cpumask_t core_sibling;
43 cpumask_t llc_sibling;
44};
45
46#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
47extern struct cpu_topology cpu_topology[NR_CPUS];
48
49#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id)
50#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
51#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
52#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
53#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
54void init_cpu_topology(void);
55void store_cpu_topology(unsigned int cpuid);
56const struct cpumask *cpu_coregroup_mask(int cpu);
57void update_siblings_masks(unsigned int cpu);
58void remove_cpu_topology(unsigned int cpuid);
59void reset_cpu_topology(void);
60#endif
61
36#endif /* _LINUX_ARCH_TOPOLOGY_H_ */ 62#endif /* _LINUX_ARCH_TOPOLOGY_H_ */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index bb9a0db89f1a..12ae4b87494e 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -256,7 +256,10 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
256{return 0;} 256{return 0;}
257#endif 257#endif
258 258
259#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \ 259#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \
260 idx, \
261 state, \
262 is_retention) \
260({ \ 263({ \
261 int __ret = 0; \ 264 int __ret = 0; \
262 \ 265 \
@@ -268,7 +271,7 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
268 if (!is_retention) \ 271 if (!is_retention) \
269 __ret = cpu_pm_enter(); \ 272 __ret = cpu_pm_enter(); \
270 if (!__ret) { \ 273 if (!__ret) { \
271 __ret = low_level_idle_enter(idx); \ 274 __ret = low_level_idle_enter(state); \
272 if (!is_retention) \ 275 if (!is_retention) \
273 cpu_pm_exit(); \ 276 cpu_pm_exit(); \
274 } \ 277 } \
@@ -277,9 +280,15 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
277}) 280})
278 281
279#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ 282#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
280 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0) 283 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0)
281 284
282#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \ 285#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \
283 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1) 286 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1)
287
288#define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state) \
289 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0)
290
291#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \
292 __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1)
284 293
285#endif /* _LINUX_CPUIDLE_H */ 294#endif /* _LINUX_CPUIDLE_H */
diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h
index 280c61ecbf20..635a95caf29f 100644
--- a/include/linux/error-injection.h
+++ b/include/linux/error-injection.h
@@ -2,16 +2,16 @@
2#ifndef _LINUX_ERROR_INJECTION_H 2#ifndef _LINUX_ERROR_INJECTION_H
3#define _LINUX_ERROR_INJECTION_H 3#define _LINUX_ERROR_INJECTION_H
4 4
5#ifdef CONFIG_FUNCTION_ERROR_INJECTION 5#include <linux/compiler.h>
6#include <asm-generic/error-injection.h>
6 7
7#include <asm/error-injection.h> 8#ifdef CONFIG_FUNCTION_ERROR_INJECTION
8 9
9extern bool within_error_injection_list(unsigned long addr); 10extern bool within_error_injection_list(unsigned long addr);
10extern int get_injectable_error_type(unsigned long addr); 11extern int get_injectable_error_type(unsigned long addr);
11 12
12#else /* !CONFIG_FUNCTION_ERROR_INJECTION */ 13#else /* !CONFIG_FUNCTION_ERROR_INJECTION */
13 14
14#include <asm-generic/error-injection.h>
15static inline bool within_error_injection_list(unsigned long addr) 15static inline bool within_error_injection_list(unsigned long addr)
16{ 16{
17 return false; 17 return false;
diff --git a/include/linux/psci.h b/include/linux/psci.h
index a8a15613c157..e2bacc6fd2f2 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -15,8 +15,8 @@
15 15
16bool psci_tos_resident_on(int cpu); 16bool psci_tos_resident_on(int cpu);
17 17
18int psci_cpu_init_idle(unsigned int cpu); 18int psci_cpu_suspend_enter(u32 state);
19int psci_cpu_suspend_enter(unsigned long index); 19bool psci_power_state_is_valid(u32 state);
20 20
21enum psci_conduit { 21enum psci_conduit {
22 PSCI_CONDUIT_NONE, 22 PSCI_CONDUIT_NONE,
diff --git a/include/linux/random.h b/include/linux/random.h
index 1f7dced2bba6..f189c927fdea 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -19,6 +19,7 @@ struct random_ready_callback {
19}; 19};
20 20
21extern void add_device_randomness(const void *, unsigned int); 21extern void add_device_randomness(const void *, unsigned int);
22extern void add_bootloader_randomness(const void *, unsigned int);
22 23
23#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) 24#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
24static inline void add_latent_entropy(void) 25static inline void add_latent_entropy(void)
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 47a3e3c08036..2a19d196af28 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -27,6 +27,7 @@
27#ifndef _LINUX_TOPOLOGY_H 27#ifndef _LINUX_TOPOLOGY_H
28#define _LINUX_TOPOLOGY_H 28#define _LINUX_TOPOLOGY_H
29 29
30#include <linux/arch_topology.h>
30#include <linux/cpumask.h> 31#include <linux/cpumask.h>
31#include <linux/bitops.h> 32#include <linux/bitops.h>
32#include <linux/mmzone.h> 33#include <linux/mmzone.h>
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 094bb03b9cc2..2e927b3e9d6c 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -229,4 +229,9 @@ struct prctl_mm_map {
229# define PR_PAC_APDBKEY (1UL << 3) 229# define PR_PAC_APDBKEY (1UL << 3)
230# define PR_PAC_APGAKEY (1UL << 4) 230# define PR_PAC_APGAKEY (1UL << 4)
231 231
232/* Tagged user address controls for arm64 */
233#define PR_SET_TAGGED_ADDR_CTRL 55
234#define PR_GET_TAGGED_ADDR_CTRL 56
235# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
236
232#endif /* _LINUX_PRCTL_H */ 237#endif /* _LINUX_PRCTL_H */
diff --git a/init/Kconfig b/init/Kconfig
index bd7d650d4a99..d96127ebc44e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -30,6 +30,9 @@ config CC_CAN_LINK
30config CC_HAS_ASM_GOTO 30config CC_HAS_ASM_GOTO
31 def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC)) 31 def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
32 32
33config TOOLS_SUPPORT_RELR
34 def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh)
35
33config CC_HAS_WARN_MAYBE_UNINITIALIZED 36config CC_HAS_WARN_MAYBE_UNINITIALIZED
34 def_bool $(cc-option,-Wmaybe-uninitialized) 37 def_bool $(cc-option,-Wmaybe-uninitialized)
35 help 38 help
diff --git a/kernel/sys.c b/kernel/sys.c
index 2969304c29fe..ec48396b4943 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -124,6 +124,12 @@
124#ifndef PAC_RESET_KEYS 124#ifndef PAC_RESET_KEYS
125# define PAC_RESET_KEYS(a, b) (-EINVAL) 125# define PAC_RESET_KEYS(a, b) (-EINVAL)
126#endif 126#endif
127#ifndef SET_TAGGED_ADDR_CTRL
128# define SET_TAGGED_ADDR_CTRL(a) (-EINVAL)
129#endif
130#ifndef GET_TAGGED_ADDR_CTRL
131# define GET_TAGGED_ADDR_CTRL() (-EINVAL)
132#endif
127 133
128/* 134/*
129 * this is where the system-wide overflow UID and GID are defined, for 135 * this is where the system-wide overflow UID and GID are defined, for
@@ -2492,6 +2498,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2492 return -EINVAL; 2498 return -EINVAL;
2493 error = PAC_RESET_KEYS(me, arg2); 2499 error = PAC_RESET_KEYS(me, arg2);
2494 break; 2500 break;
2501 case PR_SET_TAGGED_ADDR_CTRL:
2502 if (arg3 || arg4 || arg5)
2503 return -EINVAL;
2504 error = SET_TAGGED_ADDR_CTRL(arg2);
2505 break;
2506 case PR_GET_TAGGED_ADDR_CTRL:
2507 if (arg2 || arg3 || arg4 || arg5)
2508 return -EINVAL;
2509 error = GET_TAGGED_ADDR_CTRL();
2510 break;
2495 default: 2511 default:
2496 error = -EINVAL; 2512 error = -EINVAL;
2497 break; 2513 break;
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
index 6410bd22fe38..03757cc60e06 100644
--- a/scripts/Makefile.kasan
+++ b/scripts/Makefile.kasan
@@ -1,4 +1,9 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2ifdef CONFIG_KASAN
3CFLAGS_KASAN_NOSANITIZE := -fno-builtin
4KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
5endif
6
2ifdef CONFIG_KASAN_GENERIC 7ifdef CONFIG_KASAN_GENERIC
3 8
4ifdef CONFIG_KASAN_INLINE 9ifdef CONFIG_KASAN_INLINE
@@ -7,8 +12,6 @@ else
7 call_threshold := 0 12 call_threshold := 0
8endif 13endif
9 14
10KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
11
12CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address 15CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
13 16
14cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1))) 17cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
@@ -45,7 +48,3 @@ CFLAGS_KASAN := -fsanitize=kernel-hwaddress \
45 $(instrumentation_flags) 48 $(instrumentation_flags)
46 49
47endif # CONFIG_KASAN_SW_TAGS 50endif # CONFIG_KASAN_SW_TAGS
48
49ifdef CONFIG_KASAN
50CFLAGS_KASAN_NOSANITIZE := -fno-builtin
51endif
diff --git a/scripts/tools-support-relr.sh b/scripts/tools-support-relr.sh
new file mode 100755
index 000000000000..97a2c844a95e
--- /dev/null
+++ b/scripts/tools-support-relr.sh
@@ -0,0 +1,16 @@
1#!/bin/sh -eu
2# SPDX-License-Identifier: GPL-2.0
3
4tmp_file=$(mktemp)
5trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT
6
7cat << "END" | "$CC" -c -x c - -o $tmp_file.o >/dev/null 2>&1
8void *p = &p;
9END
10"$LD" $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file
11
12# Despite printing an error message, GNU nm still exits with exit code 0 if it
13# sees a relr section. So we need to check that nothing is printed to stderr.
14test -z "$("$NM" $tmp_file 2>&1 >/dev/null)"
15
16"$OBJCOPY" -O binary $tmp_file $tmp_file.bin
diff --git a/tools/testing/selftests/arm64/.gitignore b/tools/testing/selftests/arm64/.gitignore
new file mode 100644
index 000000000000..e8fae8d61ed6
--- /dev/null
+++ b/tools/testing/selftests/arm64/.gitignore
@@ -0,0 +1 @@
tags_test
diff --git a/tools/testing/selftests/arm64/Makefile b/tools/testing/selftests/arm64/Makefile
new file mode 100644
index 000000000000..a61b2e743e99
--- /dev/null
+++ b/tools/testing/selftests/arm64/Makefile
@@ -0,0 +1,11 @@
1# SPDX-License-Identifier: GPL-2.0
2
3# ARCH can be overridden by the user for cross compiling
4ARCH ?= $(shell uname -m 2>/dev/null || echo not)
5
6ifneq (,$(filter $(ARCH),aarch64 arm64))
7TEST_GEN_PROGS := tags_test
8TEST_PROGS := run_tags_test.sh
9endif
10
11include ../lib.mk
diff --git a/tools/testing/selftests/arm64/run_tags_test.sh b/tools/testing/selftests/arm64/run_tags_test.sh
new file mode 100755
index 000000000000..745f11379930
--- /dev/null
+++ b/tools/testing/selftests/arm64/run_tags_test.sh
@@ -0,0 +1,12 @@
1#!/bin/sh
2# SPDX-License-Identifier: GPL-2.0
3
4echo "--------------------"
5echo "running tags test"
6echo "--------------------"
7./tags_test
8if [ $? -ne 0 ]; then
9 echo "[FAIL]"
10else
11 echo "[PASS]"
12fi
diff --git a/tools/testing/selftests/arm64/tags_test.c b/tools/testing/selftests/arm64/tags_test.c
new file mode 100644
index 000000000000..5701163460ef
--- /dev/null
+++ b/tools/testing/selftests/arm64/tags_test.c
@@ -0,0 +1,31 @@
1// SPDX-License-Identifier: GPL-2.0
2
3#include <stdio.h>
4#include <stdlib.h>
5#include <unistd.h>
6#include <stdint.h>
7#include <sys/prctl.h>
8#include <sys/utsname.h>
9
10#define SHIFT_TAG(tag) ((uint64_t)(tag) << 56)
11#define SET_TAG(ptr, tag) (((uint64_t)(ptr) & ~SHIFT_TAG(0xff)) | \
12 SHIFT_TAG(tag))
13
14int main(void)
15{
16 static int tbi_enabled = 0;
17 unsigned long tag = 0;
18 struct utsname *ptr;
19 int err;
20
21 if (prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) == 0)
22 tbi_enabled = 1;
23 ptr = (struct utsname *)malloc(sizeof(*ptr));
24 if (tbi_enabled)
25 tag = 0x42;
26 ptr = (struct utsname *)SET_TAG(ptr, tag);
27 err = uname(ptr);
28 free(ptr);
29
30 return err;
31}