diff options
117 files changed, 1927 insertions, 1026 deletions
diff --git a/Documentation/admin-guide/perf/imx-ddr.rst b/Documentation/admin-guide/perf/imx-ddr.rst new file mode 100644 index 000000000000..517a205abad6 --- /dev/null +++ b/Documentation/admin-guide/perf/imx-ddr.rst | |||
@@ -0,0 +1,52 @@ | |||
1 | ===================================================== | ||
2 | Freescale i.MX8 DDR Performance Monitoring Unit (PMU) | ||
3 | ===================================================== | ||
4 | |||
5 | There are no performance counters inside the DRAM controller, so performance | ||
6 | signals are brought out to the edge of the controller where a set of 4 x 32 bit | ||
7 | counters is implemented. This is controlled by the CSV modes programed in counter | ||
8 | control register which causes a large number of PERF signals to be generated. | ||
9 | |||
10 | Selection of the value for each counter is done via the config registers. There | ||
11 | is one register for each counter. Counter 0 is special in that it always counts | ||
12 | “time” and when expired causes a lock on itself and the other counters and an | ||
13 | interrupt is raised. If any other counter overflows, it continues counting, and | ||
14 | no interrupt is raised. | ||
15 | |||
16 | The "format" directory describes format of the config (event ID) and config1 | ||
17 | (AXI filtering) fields of the perf_event_attr structure, see /sys/bus/event_source/ | ||
18 | devices/imx8_ddr0/format/. The "events" directory describes the events types | ||
19 | hardware supported that can be used with perf tool, see /sys/bus/event_source/ | ||
20 | devices/imx8_ddr0/events/. | ||
21 | e.g.:: | ||
22 | perf stat -a -e imx8_ddr0/cycles/ cmd | ||
23 | perf stat -a -e imx8_ddr0/read/,imx8_ddr0/write/ cmd | ||
24 | |||
25 | AXI filtering is only used by CSV modes 0x41 (axid-read) and 0x42 (axid-write) | ||
26 | to count reading or writing matches filter setting. Filter setting is various | ||
27 | from different DRAM controller implementations, which is distinguished by quirks | ||
28 | in the driver. | ||
29 | |||
30 | * With DDR_CAP_AXI_ID_FILTER quirk. | ||
31 | Filter is defined with two configuration parts: | ||
32 | --AXI_ID defines AxID matching value. | ||
33 | --AXI_MASKING defines which bits of AxID are meaningful for the matching. | ||
34 | 0:corresponding bit is masked. | ||
35 | 1: corresponding bit is not masked, i.e. used to do the matching. | ||
36 | |||
37 | AXI_ID and AXI_MASKING are mapped on DPCR1 register in performance counter. | ||
38 | When non-masked bits are matching corresponding AXI_ID bits then counter is | ||
39 | incremented. Perf counter is incremented if | ||
40 | AxID && AXI_MASKING == AXI_ID && AXI_MASKING | ||
41 | |||
42 | This filter doesn't support filter different AXI ID for axid-read and axid-write | ||
43 | event at the same time as this filter is shared between counters. | ||
44 | e.g.:: | ||
45 | perf stat -a -e imx8_ddr0/axid-read,axi_mask=0xMMMM,axi_id=0xDDDD/ cmd | ||
46 | perf stat -a -e imx8_ddr0/axid-write,axi_mask=0xMMMM,axi_id=0xDDDD/ cmd | ||
47 | |||
48 | NOTE: axi_mask is inverted in userspace(i.e. set bits are bits to mask), and | ||
49 | it will be reverted in driver automatically. so that the user can just specify | ||
50 | axi_id to monitor a specific id, rather than having to specify axi_mask. | ||
51 | e.g.:: | ||
52 | perf stat -a -e imx8_ddr0/axid-read,axi_id=0x12/ cmd, which will monitor ARID=0x12 | ||
diff --git a/Documentation/arm64/index.rst b/Documentation/arm64/index.rst index 96b696ba4e6c..5c0c69dc58aa 100644 --- a/Documentation/arm64/index.rst +++ b/Documentation/arm64/index.rst | |||
@@ -16,6 +16,7 @@ ARM64 Architecture | |||
16 | pointer-authentication | 16 | pointer-authentication |
17 | silicon-errata | 17 | silicon-errata |
18 | sve | 18 | sve |
19 | tagged-address-abi | ||
19 | tagged-pointers | 20 | tagged-pointers |
20 | 21 | ||
21 | .. only:: subproject and html | 22 | .. only:: subproject and html |
diff --git a/Documentation/arm64/tagged-address-abi.rst b/Documentation/arm64/tagged-address-abi.rst new file mode 100644 index 000000000000..d4a85d535bf9 --- /dev/null +++ b/Documentation/arm64/tagged-address-abi.rst | |||
@@ -0,0 +1,156 @@ | |||
1 | ========================== | ||
2 | AArch64 TAGGED ADDRESS ABI | ||
3 | ========================== | ||
4 | |||
5 | Authors: Vincenzo Frascino <vincenzo.frascino@arm.com> | ||
6 | Catalin Marinas <catalin.marinas@arm.com> | ||
7 | |||
8 | Date: 21 August 2019 | ||
9 | |||
10 | This document describes the usage and semantics of the Tagged Address | ||
11 | ABI on AArch64 Linux. | ||
12 | |||
13 | 1. Introduction | ||
14 | --------------- | ||
15 | |||
16 | On AArch64 the ``TCR_EL1.TBI0`` bit is set by default, allowing | ||
17 | userspace (EL0) to perform memory accesses through 64-bit pointers with | ||
18 | a non-zero top byte. This document describes the relaxation of the | ||
19 | syscall ABI that allows userspace to pass certain tagged pointers to | ||
20 | kernel syscalls. | ||
21 | |||
22 | 2. AArch64 Tagged Address ABI | ||
23 | ----------------------------- | ||
24 | |||
25 | From the kernel syscall interface perspective and for the purposes of | ||
26 | this document, a "valid tagged pointer" is a pointer with a potentially | ||
27 | non-zero top-byte that references an address in the user process address | ||
28 | space obtained in one of the following ways: | ||
29 | |||
30 | - ``mmap()`` syscall where either: | ||
31 | |||
32 | - flags have the ``MAP_ANONYMOUS`` bit set or | ||
33 | - the file descriptor refers to a regular file (including those | ||
34 | returned by ``memfd_create()``) or ``/dev/zero`` | ||
35 | |||
36 | - ``brk()`` syscall (i.e. the heap area between the initial location of | ||
37 | the program break at process creation and its current location). | ||
38 | |||
39 | - any memory mapped by the kernel in the address space of the process | ||
40 | during creation and with the same restrictions as for ``mmap()`` above | ||
41 | (e.g. data, bss, stack). | ||
42 | |||
43 | The AArch64 Tagged Address ABI has two stages of relaxation depending | ||
44 | how the user addresses are used by the kernel: | ||
45 | |||
46 | 1. User addresses not accessed by the kernel but used for address space | ||
47 | management (e.g. ``mmap()``, ``mprotect()``, ``madvise()``). The use | ||
48 | of valid tagged pointers in this context is always allowed. | ||
49 | |||
50 | 2. User addresses accessed by the kernel (e.g. ``write()``). This ABI | ||
51 | relaxation is disabled by default and the application thread needs to | ||
52 | explicitly enable it via ``prctl()`` as follows: | ||
53 | |||
54 | - ``PR_SET_TAGGED_ADDR_CTRL``: enable or disable the AArch64 Tagged | ||
55 | Address ABI for the calling thread. | ||
56 | |||
57 | The ``(unsigned int) arg2`` argument is a bit mask describing the | ||
58 | control mode used: | ||
59 | |||
60 | - ``PR_TAGGED_ADDR_ENABLE``: enable AArch64 Tagged Address ABI. | ||
61 | Default status is disabled. | ||
62 | |||
63 | Arguments ``arg3``, ``arg4``, and ``arg5`` must be 0. | ||
64 | |||
65 | - ``PR_GET_TAGGED_ADDR_CTRL``: get the status of the AArch64 Tagged | ||
66 | Address ABI for the calling thread. | ||
67 | |||
68 | Arguments ``arg2``, ``arg3``, ``arg4``, and ``arg5`` must be 0. | ||
69 | |||
70 | The ABI properties described above are thread-scoped, inherited on | ||
71 | clone() and fork() and cleared on exec(). | ||
72 | |||
73 | Calling ``prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0)`` | ||
74 | returns ``-EINVAL`` if the AArch64 Tagged Address ABI is globally | ||
75 | disabled by ``sysctl abi.tagged_addr_disabled=1``. The default | ||
76 | ``sysctl abi.tagged_addr_disabled`` configuration is 0. | ||
77 | |||
78 | When the AArch64 Tagged Address ABI is enabled for a thread, the | ||
79 | following behaviours are guaranteed: | ||
80 | |||
81 | - All syscalls except the cases mentioned in section 3 can accept any | ||
82 | valid tagged pointer. | ||
83 | |||
84 | - The syscall behaviour is undefined for invalid tagged pointers: it may | ||
85 | result in an error code being returned, a (fatal) signal being raised, | ||
86 | or other modes of failure. | ||
87 | |||
88 | - The syscall behaviour for a valid tagged pointer is the same as for | ||
89 | the corresponding untagged pointer. | ||
90 | |||
91 | |||
92 | A definition of the meaning of tagged pointers on AArch64 can be found | ||
93 | in Documentation/arm64/tagged-pointers.rst. | ||
94 | |||
95 | 3. AArch64 Tagged Address ABI Exceptions | ||
96 | ----------------------------------------- | ||
97 | |||
98 | The following system call parameters must be untagged regardless of the | ||
99 | ABI relaxation: | ||
100 | |||
101 | - ``prctl()`` other than pointers to user data either passed directly or | ||
102 | indirectly as arguments to be accessed by the kernel. | ||
103 | |||
104 | - ``ioctl()`` other than pointers to user data either passed directly or | ||
105 | indirectly as arguments to be accessed by the kernel. | ||
106 | |||
107 | - ``shmat()`` and ``shmdt()``. | ||
108 | |||
109 | Any attempt to use non-zero tagged pointers may result in an error code | ||
110 | being returned, a (fatal) signal being raised, or other modes of | ||
111 | failure. | ||
112 | |||
113 | 4. Example of correct usage | ||
114 | --------------------------- | ||
115 | .. code-block:: c | ||
116 | |||
117 | #include <stdlib.h> | ||
118 | #include <string.h> | ||
119 | #include <unistd.h> | ||
120 | #include <sys/mman.h> | ||
121 | #include <sys/prctl.h> | ||
122 | |||
123 | #define PR_SET_TAGGED_ADDR_CTRL 55 | ||
124 | #define PR_TAGGED_ADDR_ENABLE (1UL << 0) | ||
125 | |||
126 | #define TAG_SHIFT 56 | ||
127 | |||
128 | int main(void) | ||
129 | { | ||
130 | int tbi_enabled = 0; | ||
131 | unsigned long tag = 0; | ||
132 | char *ptr; | ||
133 | |||
134 | /* check/enable the tagged address ABI */ | ||
135 | if (!prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0)) | ||
136 | tbi_enabled = 1; | ||
137 | |||
138 | /* memory allocation */ | ||
139 | ptr = mmap(NULL, sysconf(_SC_PAGE_SIZE), PROT_READ | PROT_WRITE, | ||
140 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); | ||
141 | if (ptr == MAP_FAILED) | ||
142 | return 1; | ||
143 | |||
144 | /* set a non-zero tag if the ABI is available */ | ||
145 | if (tbi_enabled) | ||
146 | tag = rand() & 0xff; | ||
147 | ptr = (char *)((unsigned long)ptr | (tag << TAG_SHIFT)); | ||
148 | |||
149 | /* memory access to a tagged address */ | ||
150 | strcpy(ptr, "tagged pointer\n"); | ||
151 | |||
152 | /* syscall with a tagged pointer */ | ||
153 | write(1, ptr, strlen(ptr)); | ||
154 | |||
155 | return 0; | ||
156 | } | ||
diff --git a/Documentation/arm64/tagged-pointers.rst b/Documentation/arm64/tagged-pointers.rst index 2acdec3ebbeb..eab4323609b9 100644 --- a/Documentation/arm64/tagged-pointers.rst +++ b/Documentation/arm64/tagged-pointers.rst | |||
@@ -20,7 +20,9 @@ Passing tagged addresses to the kernel | |||
20 | -------------------------------------- | 20 | -------------------------------------- |
21 | 21 | ||
22 | All interpretation of userspace memory addresses by the kernel assumes | 22 | All interpretation of userspace memory addresses by the kernel assumes |
23 | an address tag of 0x00. | 23 | an address tag of 0x00, unless the application enables the AArch64 |
24 | Tagged Address ABI explicitly | ||
25 | (Documentation/arm64/tagged-address-abi.rst). | ||
24 | 26 | ||
25 | This includes, but is not limited to, addresses found in: | 27 | This includes, but is not limited to, addresses found in: |
26 | 28 | ||
@@ -33,13 +35,15 @@ This includes, but is not limited to, addresses found in: | |||
33 | - the frame pointer (x29) and frame records, e.g. when interpreting | 35 | - the frame pointer (x29) and frame records, e.g. when interpreting |
34 | them to generate a backtrace or call graph. | 36 | them to generate a backtrace or call graph. |
35 | 37 | ||
36 | Using non-zero address tags in any of these locations may result in an | 38 | Using non-zero address tags in any of these locations when the |
37 | error code being returned, a (fatal) signal being raised, or other modes | 39 | userspace application did not enable the AArch64 Tagged Address ABI may |
38 | of failure. | 40 | result in an error code being returned, a (fatal) signal being raised, |
41 | or other modes of failure. | ||
39 | 42 | ||
40 | For these reasons, passing non-zero address tags to the kernel via | 43 | For these reasons, when the AArch64 Tagged Address ABI is disabled, |
41 | system calls is forbidden, and using a non-zero address tag for sp is | 44 | passing non-zero address tags to the kernel via system calls is |
42 | strongly discouraged. | 45 | forbidden, and using a non-zero address tag for sp is strongly |
46 | discouraged. | ||
43 | 47 | ||
44 | Programs maintaining a frame pointer and frame records that use non-zero | 48 | Programs maintaining a frame pointer and frame records that use non-zero |
45 | address tags may suffer impaired or inaccurate debug and profiling | 49 | address tags may suffer impaired or inaccurate debug and profiling |
@@ -59,6 +63,9 @@ be preserved. | |||
59 | The architecture prevents the use of a tagged PC, so the upper byte will | 63 | The architecture prevents the use of a tagged PC, so the upper byte will |
60 | be set to a sign-extension of bit 55 on exception return. | 64 | be set to a sign-extension of bit 55 on exception return. |
61 | 65 | ||
66 | This behaviour is maintained when the AArch64 Tagged Address ABI is | ||
67 | enabled. | ||
68 | |||
62 | 69 | ||
63 | Other considerations | 70 | Other considerations |
64 | -------------------- | 71 | -------------------- |
diff --git a/Documentation/devicetree/bindings/arm/topology.txt b/Documentation/devicetree/bindings/cpu/cpu-topology.txt index b0d80c0fb265..99918189403c 100644 --- a/Documentation/devicetree/bindings/arm/topology.txt +++ b/Documentation/devicetree/bindings/cpu/cpu-topology.txt | |||
@@ -1,21 +1,19 @@ | |||
1 | =========================================== | 1 | =========================================== |
2 | ARM topology binding description | 2 | CPU topology binding description |
3 | =========================================== | 3 | =========================================== |
4 | 4 | ||
5 | =========================================== | 5 | =========================================== |
6 | 1 - Introduction | 6 | 1 - Introduction |
7 | =========================================== | 7 | =========================================== |
8 | 8 | ||
9 | In an ARM system, the hierarchy of CPUs is defined through three entities that | 9 | In a SMP system, the hierarchy of CPUs is defined through three entities that |
10 | are used to describe the layout of physical CPUs in the system: | 10 | are used to describe the layout of physical CPUs in the system: |
11 | 11 | ||
12 | - socket | ||
12 | - cluster | 13 | - cluster |
13 | - core | 14 | - core |
14 | - thread | 15 | - thread |
15 | 16 | ||
16 | The cpu nodes (bindings defined in [1]) represent the devices that | ||
17 | correspond to physical CPUs and are to be mapped to the hierarchy levels. | ||
18 | |||
19 | The bottom hierarchy level sits at core or thread level depending on whether | 17 | The bottom hierarchy level sits at core or thread level depending on whether |
20 | symmetric multi-threading (SMT) is supported or not. | 18 | symmetric multi-threading (SMT) is supported or not. |
21 | 19 | ||
@@ -24,33 +22,31 @@ threads existing in the system and map to the hierarchy level "thread" above. | |||
24 | In systems where SMT is not supported "cpu" nodes represent all cores present | 22 | In systems where SMT is not supported "cpu" nodes represent all cores present |
25 | in the system and map to the hierarchy level "core" above. | 23 | in the system and map to the hierarchy level "core" above. |
26 | 24 | ||
27 | ARM topology bindings allow one to associate cpu nodes with hierarchical groups | 25 | CPU topology bindings allow one to associate cpu nodes with hierarchical groups |
28 | corresponding to the system hierarchy; syntactically they are defined as device | 26 | corresponding to the system hierarchy; syntactically they are defined as device |
29 | tree nodes. | 27 | tree nodes. |
30 | 28 | ||
31 | The remainder of this document provides the topology bindings for ARM, based | 29 | Currently, only ARM/RISC-V intend to use this cpu topology binding but it may be |
32 | on the Devicetree Specification, available from: | 30 | used for any other architecture as well. |
33 | 31 | ||
34 | https://www.devicetree.org/specifications/ | 32 | The cpu nodes, as per bindings defined in [4], represent the devices that |
33 | correspond to physical CPUs and are to be mapped to the hierarchy levels. | ||
35 | 34 | ||
36 | If not stated otherwise, whenever a reference to a cpu node phandle is made its | ||
37 | value must point to a cpu node compliant with the cpu node bindings as | ||
38 | documented in [1]. | ||
39 | A topology description containing phandles to cpu nodes that are not compliant | 35 | A topology description containing phandles to cpu nodes that are not compliant |
40 | with bindings standardized in [1] is therefore considered invalid. | 36 | with bindings standardized in [4] is therefore considered invalid. |
41 | 37 | ||
42 | =========================================== | 38 | =========================================== |
43 | 2 - cpu-map node | 39 | 2 - cpu-map node |
44 | =========================================== | 40 | =========================================== |
45 | 41 | ||
46 | The ARM CPU topology is defined within the cpu-map node, which is a direct | 42 | The ARM/RISC-V CPU topology is defined within the cpu-map node, which is a direct |
47 | child of the cpus node and provides a container where the actual topology | 43 | child of the cpus node and provides a container where the actual topology |
48 | nodes are listed. | 44 | nodes are listed. |
49 | 45 | ||
50 | - cpu-map node | 46 | - cpu-map node |
51 | 47 | ||
52 | Usage: Optional - On ARM SMP systems provide CPUs topology to the OS. | 48 | Usage: Optional - On SMP systems provide CPUs topology to the OS. |
53 | ARM uniprocessor systems do not require a topology | 49 | Uniprocessor systems do not require a topology |
54 | description and therefore should not define a | 50 | description and therefore should not define a |
55 | cpu-map node. | 51 | cpu-map node. |
56 | 52 | ||
@@ -63,21 +59,23 @@ nodes are listed. | |||
63 | 59 | ||
64 | The cpu-map node's child nodes can be: | 60 | The cpu-map node's child nodes can be: |
65 | 61 | ||
66 | - one or more cluster nodes | 62 | - one or more cluster nodes or |
63 | - one or more socket nodes in a multi-socket system | ||
67 | 64 | ||
68 | Any other configuration is considered invalid. | 65 | Any other configuration is considered invalid. |
69 | 66 | ||
70 | The cpu-map node can only contain three types of child nodes: | 67 | The cpu-map node can only contain 4 types of child nodes: |
71 | 68 | ||
69 | - socket node | ||
72 | - cluster node | 70 | - cluster node |
73 | - core node | 71 | - core node |
74 | - thread node | 72 | - thread node |
75 | 73 | ||
76 | whose bindings are described in paragraph 3. | 74 | whose bindings are described in paragraph 3. |
77 | 75 | ||
78 | The nodes describing the CPU topology (cluster/core/thread) can only | 76 | The nodes describing the CPU topology (socket/cluster/core/thread) can |
79 | be defined within the cpu-map node and every core/thread in the system | 77 | only be defined within the cpu-map node and every core/thread in the |
80 | must be defined within the topology. Any other configuration is | 78 | system must be defined within the topology. Any other configuration is |
81 | invalid and therefore must be ignored. | 79 | invalid and therefore must be ignored. |
82 | 80 | ||
83 | =========================================== | 81 | =========================================== |
@@ -85,26 +83,44 @@ invalid and therefore must be ignored. | |||
85 | =========================================== | 83 | =========================================== |
86 | 84 | ||
87 | cpu-map child nodes must follow a naming convention where the node name | 85 | cpu-map child nodes must follow a naming convention where the node name |
88 | must be "clusterN", "coreN", "threadN" depending on the node type (ie | 86 | must be "socketN", "clusterN", "coreN", "threadN" depending on the node type |
89 | cluster/core/thread) (where N = {0, 1, ...} is the node number; nodes which | 87 | (ie socket/cluster/core/thread) (where N = {0, 1, ...} is the node number; nodes |
90 | are siblings within a single common parent node must be given a unique and | 88 | which are siblings within a single common parent node must be given a unique and |
91 | sequential N value, starting from 0). | 89 | sequential N value, starting from 0). |
92 | cpu-map child nodes which do not share a common parent node can have the same | 90 | cpu-map child nodes which do not share a common parent node can have the same |
93 | name (ie same number N as other cpu-map child nodes at different device tree | 91 | name (ie same number N as other cpu-map child nodes at different device tree |
94 | levels) since name uniqueness will be guaranteed by the device tree hierarchy. | 92 | levels) since name uniqueness will be guaranteed by the device tree hierarchy. |
95 | 93 | ||
96 | =========================================== | 94 | =========================================== |
97 | 3 - cluster/core/thread node bindings | 95 | 3 - socket/cluster/core/thread node bindings |
98 | =========================================== | 96 | =========================================== |
99 | 97 | ||
100 | Bindings for cluster/cpu/thread nodes are defined as follows: | 98 | Bindings for socket/cluster/cpu/thread nodes are defined as follows: |
99 | |||
100 | - socket node | ||
101 | |||
102 | Description: must be declared within a cpu-map node, one node | ||
103 | per physical socket in the system. A system can | ||
104 | contain single or multiple physical socket. | ||
105 | The association of sockets and NUMA nodes is beyond | ||
106 | the scope of this bindings, please refer [2] for | ||
107 | NUMA bindings. | ||
108 | |||
109 | This node is optional for a single socket system. | ||
110 | |||
111 | The socket node name must be "socketN" as described in 2.1 above. | ||
112 | A socket node can not be a leaf node. | ||
113 | |||
114 | A socket node's child nodes must be one or more cluster nodes. | ||
115 | |||
116 | Any other configuration is considered invalid. | ||
101 | 117 | ||
102 | - cluster node | 118 | - cluster node |
103 | 119 | ||
104 | Description: must be declared within a cpu-map node, one node | 120 | Description: must be declared within a cpu-map node, one node |
105 | per cluster. A system can contain several layers of | 121 | per cluster. A system can contain several layers of |
106 | clustering and cluster nodes can be contained in parent | 122 | clustering within a single physical socket and cluster |
107 | cluster nodes. | 123 | nodes can be contained in parent cluster nodes. |
108 | 124 | ||
109 | The cluster node name must be "clusterN" as described in 2.1 above. | 125 | The cluster node name must be "clusterN" as described in 2.1 above. |
110 | A cluster node can not be a leaf node. | 126 | A cluster node can not be a leaf node. |
@@ -164,90 +180,93 @@ Bindings for cluster/cpu/thread nodes are defined as follows: | |||
164 | 4 - Example dts | 180 | 4 - Example dts |
165 | =========================================== | 181 | =========================================== |
166 | 182 | ||
167 | Example 1 (ARM 64-bit, 16-cpu system, two clusters of clusters): | 183 | Example 1 (ARM 64-bit, 16-cpu system, two clusters of clusters in a single |
184 | physical socket): | ||
168 | 185 | ||
169 | cpus { | 186 | cpus { |
170 | #size-cells = <0>; | 187 | #size-cells = <0>; |
171 | #address-cells = <2>; | 188 | #address-cells = <2>; |
172 | 189 | ||
173 | cpu-map { | 190 | cpu-map { |
174 | cluster0 { | 191 | socket0 { |
175 | cluster0 { | 192 | cluster0 { |
176 | core0 { | 193 | cluster0 { |
177 | thread0 { | 194 | core0 { |
178 | cpu = <&CPU0>; | 195 | thread0 { |
196 | cpu = <&CPU0>; | ||
197 | }; | ||
198 | thread1 { | ||
199 | cpu = <&CPU1>; | ||
200 | }; | ||
179 | }; | 201 | }; |
180 | thread1 { | ||
181 | cpu = <&CPU1>; | ||
182 | }; | ||
183 | }; | ||
184 | 202 | ||
185 | core1 { | 203 | core1 { |
186 | thread0 { | 204 | thread0 { |
187 | cpu = <&CPU2>; | 205 | cpu = <&CPU2>; |
188 | }; | 206 | }; |
189 | thread1 { | 207 | thread1 { |
190 | cpu = <&CPU3>; | 208 | cpu = <&CPU3>; |
209 | }; | ||
191 | }; | 210 | }; |
192 | }; | 211 | }; |
193 | }; | ||
194 | 212 | ||
195 | cluster1 { | 213 | cluster1 { |
196 | core0 { | 214 | core0 { |
197 | thread0 { | 215 | thread0 { |
198 | cpu = <&CPU4>; | 216 | cpu = <&CPU4>; |
199 | }; | 217 | }; |
200 | thread1 { | 218 | thread1 { |
201 | cpu = <&CPU5>; | 219 | cpu = <&CPU5>; |
220 | }; | ||
202 | }; | 221 | }; |
203 | }; | ||
204 | 222 | ||
205 | core1 { | 223 | core1 { |
206 | thread0 { | 224 | thread0 { |
207 | cpu = <&CPU6>; | 225 | cpu = <&CPU6>; |
208 | }; | 226 | }; |
209 | thread1 { | 227 | thread1 { |
210 | cpu = <&CPU7>; | 228 | cpu = <&CPU7>; |
211 | }; | 229 | }; |
212 | }; | ||
213 | }; | ||
214 | }; | ||
215 | |||
216 | cluster1 { | ||
217 | cluster0 { | ||
218 | core0 { | ||
219 | thread0 { | ||
220 | cpu = <&CPU8>; | ||
221 | }; | ||
222 | thread1 { | ||
223 | cpu = <&CPU9>; | ||
224 | }; | ||
225 | }; | ||
226 | core1 { | ||
227 | thread0 { | ||
228 | cpu = <&CPU10>; | ||
229 | }; | ||
230 | thread1 { | ||
231 | cpu = <&CPU11>; | ||
232 | }; | 230 | }; |
233 | }; | 231 | }; |
234 | }; | 232 | }; |
235 | 233 | ||
236 | cluster1 { | 234 | cluster1 { |
237 | core0 { | 235 | cluster0 { |
238 | thread0 { | 236 | core0 { |
239 | cpu = <&CPU12>; | 237 | thread0 { |
238 | cpu = <&CPU8>; | ||
239 | }; | ||
240 | thread1 { | ||
241 | cpu = <&CPU9>; | ||
242 | }; | ||
240 | }; | 243 | }; |
241 | thread1 { | 244 | core1 { |
242 | cpu = <&CPU13>; | 245 | thread0 { |
246 | cpu = <&CPU10>; | ||
247 | }; | ||
248 | thread1 { | ||
249 | cpu = <&CPU11>; | ||
250 | }; | ||
243 | }; | 251 | }; |
244 | }; | 252 | }; |
245 | core1 { | 253 | |
246 | thread0 { | 254 | cluster1 { |
247 | cpu = <&CPU14>; | 255 | core0 { |
256 | thread0 { | ||
257 | cpu = <&CPU12>; | ||
258 | }; | ||
259 | thread1 { | ||
260 | cpu = <&CPU13>; | ||
261 | }; | ||
248 | }; | 262 | }; |
249 | thread1 { | 263 | core1 { |
250 | cpu = <&CPU15>; | 264 | thread0 { |
265 | cpu = <&CPU14>; | ||
266 | }; | ||
267 | thread1 { | ||
268 | cpu = <&CPU15>; | ||
269 | }; | ||
251 | }; | 270 | }; |
252 | }; | 271 | }; |
253 | }; | 272 | }; |
@@ -470,6 +489,65 @@ cpus { | |||
470 | }; | 489 | }; |
471 | }; | 490 | }; |
472 | 491 | ||
492 | Example 3: HiFive Unleashed (RISC-V 64 bit, 4 core system) | ||
493 | |||
494 | { | ||
495 | #address-cells = <2>; | ||
496 | #size-cells = <2>; | ||
497 | compatible = "sifive,fu540g", "sifive,fu500"; | ||
498 | model = "sifive,hifive-unleashed-a00"; | ||
499 | |||
500 | ... | ||
501 | cpus { | ||
502 | #address-cells = <1>; | ||
503 | #size-cells = <0>; | ||
504 | cpu-map { | ||
505 | socket0 { | ||
506 | cluster0 { | ||
507 | core0 { | ||
508 | cpu = <&CPU1>; | ||
509 | }; | ||
510 | core1 { | ||
511 | cpu = <&CPU2>; | ||
512 | }; | ||
513 | core2 { | ||
514 | cpu0 = <&CPU2>; | ||
515 | }; | ||
516 | core3 { | ||
517 | cpu0 = <&CPU3>; | ||
518 | }; | ||
519 | }; | ||
520 | }; | ||
521 | }; | ||
522 | |||
523 | CPU1: cpu@1 { | ||
524 | device_type = "cpu"; | ||
525 | compatible = "sifive,rocket0", "riscv"; | ||
526 | reg = <0x1>; | ||
527 | } | ||
528 | |||
529 | CPU2: cpu@2 { | ||
530 | device_type = "cpu"; | ||
531 | compatible = "sifive,rocket0", "riscv"; | ||
532 | reg = <0x2>; | ||
533 | } | ||
534 | CPU3: cpu@3 { | ||
535 | device_type = "cpu"; | ||
536 | compatible = "sifive,rocket0", "riscv"; | ||
537 | reg = <0x3>; | ||
538 | } | ||
539 | CPU4: cpu@4 { | ||
540 | device_type = "cpu"; | ||
541 | compatible = "sifive,rocket0", "riscv"; | ||
542 | reg = <0x4>; | ||
543 | } | ||
544 | } | ||
545 | }; | ||
473 | =============================================================================== | 546 | =============================================================================== |
474 | [1] ARM Linux kernel documentation | 547 | [1] ARM Linux kernel documentation |
475 | Documentation/devicetree/bindings/arm/cpus.yaml | 548 | Documentation/devicetree/bindings/arm/cpus.yaml |
549 | [2] Devicetree NUMA binding description | ||
550 | Documentation/devicetree/bindings/numa.txt | ||
551 | [3] RISC-V Linux kernel documentation | ||
552 | Documentation/devicetree/bindings/riscv/cpus.txt | ||
553 | [4] https://www.devicetree.org/specifications/ | ||
diff --git a/MAINTAINERS b/MAINTAINERS index a2c343ee3b2c..f29cfc59d51c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -4290,6 +4290,14 @@ S: Supported | |||
4290 | F: drivers/cpuidle/cpuidle-exynos.c | 4290 | F: drivers/cpuidle/cpuidle-exynos.c |
4291 | F: arch/arm/mach-exynos/pm.c | 4291 | F: arch/arm/mach-exynos/pm.c |
4292 | 4292 | ||
4293 | CPUIDLE DRIVER - ARM PSCI | ||
4294 | M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> | ||
4295 | M: Sudeep Holla <sudeep.holla@arm.com> | ||
4296 | L: linux-pm@vger.kernel.org | ||
4297 | L: linux-arm-kernel@lists.infradead.org | ||
4298 | S: Supported | ||
4299 | F: drivers/cpuidle/cpuidle-psci.c | ||
4300 | |||
4293 | CPU IDLE TIME MANAGEMENT FRAMEWORK | 4301 | CPU IDLE TIME MANAGEMENT FRAMEWORK |
4294 | M: "Rafael J. Wysocki" <rjw@rjwysocki.net> | 4302 | M: "Rafael J. Wysocki" <rjw@rjwysocki.net> |
4295 | M: Daniel Lezcano <daniel.lezcano@linaro.org> | 4303 | M: Daniel Lezcano <daniel.lezcano@linaro.org> |
@@ -6439,6 +6447,7 @@ M: Frank Li <Frank.li@nxp.com> | |||
6439 | L: linux-arm-kernel@lists.infradead.org | 6447 | L: linux-arm-kernel@lists.infradead.org |
6440 | S: Maintained | 6448 | S: Maintained |
6441 | F: drivers/perf/fsl_imx8_ddr_perf.c | 6449 | F: drivers/perf/fsl_imx8_ddr_perf.c |
6450 | F: Documentation/admin-guide/perf/imx-ddr.rst | ||
6442 | F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt | 6451 | F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt |
6443 | 6452 | ||
6444 | FREESCALE IMX LPI2C DRIVER | 6453 | FREESCALE IMX LPI2C DRIVER |
@@ -6724,6 +6733,13 @@ W: https://linuxtv.org | |||
6724 | S: Maintained | 6733 | S: Maintained |
6725 | F: drivers/media/radio/radio-gemtek* | 6734 | F: drivers/media/radio/radio-gemtek* |
6726 | 6735 | ||
6736 | GENERIC ARCHITECTURE TOPOLOGY | ||
6737 | M: Sudeep Holla <sudeep.holla@arm.com> | ||
6738 | L: linux-kernel@vger.kernel.org | ||
6739 | S: Maintained | ||
6740 | F: drivers/base/arch_topology.c | ||
6741 | F: include/linux/arch_topology.h | ||
6742 | |||
6727 | GENERIC GPIO I2C DRIVER | 6743 | GENERIC GPIO I2C DRIVER |
6728 | M: Wolfram Sang <wsa+renesas@sang-engineering.com> | 6744 | M: Wolfram Sang <wsa+renesas@sang-engineering.com> |
6729 | S: Supported | 6745 | S: Supported |
@@ -912,6 +912,10 @@ ifeq ($(CONFIG_STRIP_ASM_SYMS),y) | |||
912 | LDFLAGS_vmlinux += $(call ld-option, -X,) | 912 | LDFLAGS_vmlinux += $(call ld-option, -X,) |
913 | endif | 913 | endif |
914 | 914 | ||
915 | ifeq ($(CONFIG_RELR),y) | ||
916 | LDFLAGS_vmlinux += --pack-dyn-relocs=relr | ||
917 | endif | ||
918 | |||
915 | # insure the checker run with the right endianness | 919 | # insure the checker run with the right endianness |
916 | CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian) | 920 | CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian) |
917 | 921 | ||
diff --git a/arch/Kconfig b/arch/Kconfig index a7b57dd42c26..aa6bdb3df5c1 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -925,6 +925,20 @@ config LOCK_EVENT_COUNTS | |||
925 | the chance of application behavior change because of timing | 925 | the chance of application behavior change because of timing |
926 | differences. The counts are reported via debugfs. | 926 | differences. The counts are reported via debugfs. |
927 | 927 | ||
928 | # Select if the architecture has support for applying RELR relocations. | ||
929 | config ARCH_HAS_RELR | ||
930 | bool | ||
931 | |||
932 | config RELR | ||
933 | bool "Use RELR relocation packing" | ||
934 | depends on ARCH_HAS_RELR && TOOLS_SUPPORT_RELR | ||
935 | default y | ||
936 | help | ||
937 | Store the kernel's dynamic relocations in the RELR relocation packing | ||
938 | format. Requires a compatible linker (LLD supports this feature), as | ||
939 | well as compatible NM and OBJCOPY utilities (llvm-nm and llvm-objcopy | ||
940 | are compatible). | ||
941 | |||
928 | source "kernel/gcov/Kconfig" | 942 | source "kernel/gcov/Kconfig" |
929 | 943 | ||
930 | source "scripts/gcc-plugins/Kconfig" | 944 | source "scripts/gcc-plugins/Kconfig" |
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index 2a786f54d8b8..8a0fae94d45e 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h | |||
@@ -5,26 +5,6 @@ | |||
5 | #ifdef CONFIG_ARM_CPU_TOPOLOGY | 5 | #ifdef CONFIG_ARM_CPU_TOPOLOGY |
6 | 6 | ||
7 | #include <linux/cpumask.h> | 7 | #include <linux/cpumask.h> |
8 | |||
9 | struct cputopo_arm { | ||
10 | int thread_id; | ||
11 | int core_id; | ||
12 | int socket_id; | ||
13 | cpumask_t thread_sibling; | ||
14 | cpumask_t core_sibling; | ||
15 | }; | ||
16 | |||
17 | extern struct cputopo_arm cpu_topology[NR_CPUS]; | ||
18 | |||
19 | #define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) | ||
20 | #define topology_core_id(cpu) (cpu_topology[cpu].core_id) | ||
21 | #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) | ||
22 | #define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) | ||
23 | |||
24 | void init_cpu_topology(void); | ||
25 | void store_cpu_topology(unsigned int cpuid); | ||
26 | const struct cpumask *cpu_coregroup_mask(int cpu); | ||
27 | |||
28 | #include <linux/arch_topology.h> | 8 | #include <linux/arch_topology.h> |
29 | 9 | ||
30 | /* Replace task scheduler's default frequency-invariant accounting */ | 10 | /* Replace task scheduler's default frequency-invariant accounting */ |
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index d17cb1e6d679..5b9faba03afb 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c | |||
@@ -177,17 +177,6 @@ static inline void parse_dt_topology(void) {} | |||
177 | static inline void update_cpu_capacity(unsigned int cpuid) {} | 177 | static inline void update_cpu_capacity(unsigned int cpuid) {} |
178 | #endif | 178 | #endif |
179 | 179 | ||
180 | /* | ||
181 | * cpu topology table | ||
182 | */ | ||
183 | struct cputopo_arm cpu_topology[NR_CPUS]; | ||
184 | EXPORT_SYMBOL_GPL(cpu_topology); | ||
185 | |||
186 | const struct cpumask *cpu_coregroup_mask(int cpu) | ||
187 | { | ||
188 | return &cpu_topology[cpu].core_sibling; | ||
189 | } | ||
190 | |||
191 | /* | 180 | /* |
192 | * The current assumption is that we can power gate each core independently. | 181 | * The current assumption is that we can power gate each core independently. |
193 | * This will be superseded by DT binding once available. | 182 | * This will be superseded by DT binding once available. |
@@ -197,32 +186,6 @@ const struct cpumask *cpu_corepower_mask(int cpu) | |||
197 | return &cpu_topology[cpu].thread_sibling; | 186 | return &cpu_topology[cpu].thread_sibling; |
198 | } | 187 | } |
199 | 188 | ||
200 | static void update_siblings_masks(unsigned int cpuid) | ||
201 | { | ||
202 | struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; | ||
203 | int cpu; | ||
204 | |||
205 | /* update core and thread sibling masks */ | ||
206 | for_each_possible_cpu(cpu) { | ||
207 | cpu_topo = &cpu_topology[cpu]; | ||
208 | |||
209 | if (cpuid_topo->socket_id != cpu_topo->socket_id) | ||
210 | continue; | ||
211 | |||
212 | cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); | ||
213 | if (cpu != cpuid) | ||
214 | cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); | ||
215 | |||
216 | if (cpuid_topo->core_id != cpu_topo->core_id) | ||
217 | continue; | ||
218 | |||
219 | cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); | ||
220 | if (cpu != cpuid) | ||
221 | cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); | ||
222 | } | ||
223 | smp_wmb(); | ||
224 | } | ||
225 | |||
226 | /* | 189 | /* |
227 | * store_cpu_topology is called at boot when only one cpu is running | 190 | * store_cpu_topology is called at boot when only one cpu is running |
228 | * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, | 191 | * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, |
@@ -230,7 +193,7 @@ static void update_siblings_masks(unsigned int cpuid) | |||
230 | */ | 193 | */ |
231 | void store_cpu_topology(unsigned int cpuid) | 194 | void store_cpu_topology(unsigned int cpuid) |
232 | { | 195 | { |
233 | struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; | 196 | struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; |
234 | unsigned int mpidr; | 197 | unsigned int mpidr; |
235 | 198 | ||
236 | /* If the cpu topology has been already set, just return */ | 199 | /* If the cpu topology has been already set, just return */ |
@@ -250,12 +213,12 @@ void store_cpu_topology(unsigned int cpuid) | |||
250 | /* core performance interdependency */ | 213 | /* core performance interdependency */ |
251 | cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); | 214 | cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
252 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); | 215 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
253 | cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); | 216 | cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); |
254 | } else { | 217 | } else { |
255 | /* largely independent cores */ | 218 | /* largely independent cores */ |
256 | cpuid_topo->thread_id = -1; | 219 | cpuid_topo->thread_id = -1; |
257 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); | 220 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); |
258 | cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); | 221 | cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); |
259 | } | 222 | } |
260 | } else { | 223 | } else { |
261 | /* | 224 | /* |
@@ -265,7 +228,7 @@ void store_cpu_topology(unsigned int cpuid) | |||
265 | */ | 228 | */ |
266 | cpuid_topo->thread_id = -1; | 229 | cpuid_topo->thread_id = -1; |
267 | cpuid_topo->core_id = 0; | 230 | cpuid_topo->core_id = 0; |
268 | cpuid_topo->socket_id = -1; | 231 | cpuid_topo->package_id = -1; |
269 | } | 232 | } |
270 | 233 | ||
271 | update_siblings_masks(cpuid); | 234 | update_siblings_masks(cpuid); |
@@ -275,7 +238,7 @@ void store_cpu_topology(unsigned int cpuid) | |||
275 | pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", | 238 | pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", |
276 | cpuid, cpu_topology[cpuid].thread_id, | 239 | cpuid, cpu_topology[cpuid].thread_id, |
277 | cpu_topology[cpuid].core_id, | 240 | cpu_topology[cpuid].core_id, |
278 | cpu_topology[cpuid].socket_id, mpidr); | 241 | cpu_topology[cpuid].package_id, mpidr); |
279 | } | 242 | } |
280 | 243 | ||
281 | static inline int cpu_corepower_flags(void) | 244 | static inline int cpu_corepower_flags(void) |
@@ -298,18 +261,7 @@ static struct sched_domain_topology_level arm_topology[] = { | |||
298 | */ | 261 | */ |
299 | void __init init_cpu_topology(void) | 262 | void __init init_cpu_topology(void) |
300 | { | 263 | { |
301 | unsigned int cpu; | 264 | reset_cpu_topology(); |
302 | |||
303 | /* init core mask and capacity */ | ||
304 | for_each_possible_cpu(cpu) { | ||
305 | struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); | ||
306 | |||
307 | cpu_topo->thread_id = -1; | ||
308 | cpu_topo->core_id = -1; | ||
309 | cpu_topo->socket_id = -1; | ||
310 | cpumask_clear(&cpu_topo->core_sibling); | ||
311 | cpumask_clear(&cpu_topo->thread_sibling); | ||
312 | } | ||
313 | smp_wmb(); | 265 | smp_wmb(); |
314 | 266 | ||
315 | parse_dt_topology(); | 267 | parse_dt_topology(); |
diff --git a/arch/arm64/Kbuild b/arch/arm64/Kbuild new file mode 100644 index 000000000000..d6465823b281 --- /dev/null +++ b/arch/arm64/Kbuild | |||
@@ -0,0 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0-only | ||
2 | obj-y += kernel/ mm/ | ||
3 | obj-$(CONFIG_NET) += net/ | ||
4 | obj-$(CONFIG_KVM) += kvm/ | ||
5 | obj-$(CONFIG_XEN) += xen/ | ||
6 | obj-$(CONFIG_CRYPTO) += crypto/ | ||
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index f5f7cb75a698..6481964b6425 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -148,6 +148,7 @@ config ARM64 | |||
148 | select HAVE_FAST_GUP | 148 | select HAVE_FAST_GUP |
149 | select HAVE_FTRACE_MCOUNT_RECORD | 149 | select HAVE_FTRACE_MCOUNT_RECORD |
150 | select HAVE_FUNCTION_TRACER | 150 | select HAVE_FUNCTION_TRACER |
151 | select HAVE_FUNCTION_ERROR_INJECTION | ||
151 | select HAVE_FUNCTION_GRAPH_TRACER | 152 | select HAVE_FUNCTION_GRAPH_TRACER |
152 | select HAVE_GCC_PLUGINS | 153 | select HAVE_GCC_PLUGINS |
153 | select HAVE_HW_BREAKPOINT if PERF_EVENTS | 154 | select HAVE_HW_BREAKPOINT if PERF_EVENTS |
@@ -1127,6 +1128,15 @@ config ARM64_SW_TTBR0_PAN | |||
1127 | zeroed area and reserved ASID. The user access routines | 1128 | zeroed area and reserved ASID. The user access routines |
1128 | restore the valid TTBR0_EL1 temporarily. | 1129 | restore the valid TTBR0_EL1 temporarily. |
1129 | 1130 | ||
1131 | config ARM64_TAGGED_ADDR_ABI | ||
1132 | bool "Enable the tagged user addresses syscall ABI" | ||
1133 | default y | ||
1134 | help | ||
1135 | When this option is enabled, user applications can opt in to a | ||
1136 | relaxed ABI via prctl() allowing tagged addresses to be passed | ||
1137 | to system calls as pointer arguments. For details, see | ||
1138 | Documentation/arm64/tagged-address-abi.txt. | ||
1139 | |||
1130 | menuconfig COMPAT | 1140 | menuconfig COMPAT |
1131 | bool "Kernel support for 32-bit EL0" | 1141 | bool "Kernel support for 32-bit EL0" |
1132 | depends on ARM64_4K_PAGES || EXPERT | 1142 | depends on ARM64_4K_PAGES || EXPERT |
@@ -1484,6 +1494,7 @@ endif | |||
1484 | 1494 | ||
1485 | config RELOCATABLE | 1495 | config RELOCATABLE |
1486 | bool | 1496 | bool |
1497 | select ARCH_HAS_RELR | ||
1487 | help | 1498 | help |
1488 | This builds the kernel as a Position Independent Executable (PIE), | 1499 | This builds the kernel as a Position Independent Executable (PIE), |
1489 | which retains all relocation metadata required to relocate the | 1500 | which retains all relocation metadata required to relocate the |
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index a8d2a241ac58..2847b36f72ed 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile | |||
@@ -128,11 +128,7 @@ KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT) | |||
128 | 128 | ||
129 | export TEXT_OFFSET GZFLAGS | 129 | export TEXT_OFFSET GZFLAGS |
130 | 130 | ||
131 | core-y += arch/arm64/kernel/ arch/arm64/mm/ | 131 | core-y += arch/arm64/ |
132 | core-$(CONFIG_NET) += arch/arm64/net/ | ||
133 | core-$(CONFIG_KVM) += arch/arm64/kvm/ | ||
134 | core-$(CONFIG_XEN) += arch/arm64/xen/ | ||
135 | core-$(CONFIG_CRYPTO) += arch/arm64/crypto/ | ||
136 | libs-y := arch/arm64/lib/ $(libs-y) | 132 | libs-y := arch/arm64/lib/ $(libs-y) |
137 | core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a | 133 | core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a |
138 | 134 | ||
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index c066fc4976cd..b8cf7c85ffa2 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h | |||
@@ -124,17 +124,6 @@ alternative_endif | |||
124 | .endm | 124 | .endm |
125 | 125 | ||
126 | /* | 126 | /* |
127 | * Sanitise a 64-bit bounded index wrt speculation, returning zero if out | ||
128 | * of bounds. | ||
129 | */ | ||
130 | .macro mask_nospec64, idx, limit, tmp | ||
131 | sub \tmp, \idx, \limit | ||
132 | bic \tmp, \tmp, \idx | ||
133 | and \idx, \idx, \tmp, asr #63 | ||
134 | csdb | ||
135 | .endm | ||
136 | |||
137 | /* | ||
138 | * NOP sequence | 127 | * NOP sequence |
139 | */ | 128 | */ |
140 | .macro nops, num | 129 | .macro nops, num |
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 657b0457d83c..a5ca23950cfd 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h | |||
@@ -15,8 +15,6 @@ | |||
15 | #include <asm/barrier.h> | 15 | #include <asm/barrier.h> |
16 | #include <asm/lse.h> | 16 | #include <asm/lse.h> |
17 | 17 | ||
18 | #ifdef __KERNEL__ | ||
19 | |||
20 | #define __ARM64_IN_ATOMIC_IMPL | 18 | #define __ARM64_IN_ATOMIC_IMPL |
21 | 19 | ||
22 | #if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE) | 20 | #if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE) |
@@ -157,5 +155,4 @@ | |||
157 | 155 | ||
158 | #include <asm-generic/atomic-instrumented.h> | 156 | #include <asm-generic/atomic-instrumented.h> |
159 | 157 | ||
160 | #endif | 158 | #endif /* __ASM_ATOMIC_H */ |
161 | #endif | ||
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 64eeaa41e7ca..43da6dd29592 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h | |||
@@ -78,7 +78,7 @@ static inline u32 cache_type_cwg(void) | |||
78 | return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK; | 78 | return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK; |
79 | } | 79 | } |
80 | 80 | ||
81 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) | 81 | #define __read_mostly __section(.data..read_mostly) |
82 | 82 | ||
83 | static inline int cache_line_size_of_cpu(void) | 83 | static inline int cache_line_size_of_cpu(void) |
84 | { | 84 | { |
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index fb8ad4616b3b..b0d53a265f1d 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h | |||
@@ -4,7 +4,6 @@ | |||
4 | */ | 4 | */ |
5 | #ifndef __ASM_COMPAT_H | 5 | #ifndef __ASM_COMPAT_H |
6 | #define __ASM_COMPAT_H | 6 | #define __ASM_COMPAT_H |
7 | #ifdef __KERNEL__ | ||
8 | #ifdef CONFIG_COMPAT | 7 | #ifdef CONFIG_COMPAT |
9 | 8 | ||
10 | /* | 9 | /* |
@@ -215,5 +214,4 @@ static inline int is_compat_thread(struct thread_info *thread) | |||
215 | } | 214 | } |
216 | 215 | ||
217 | #endif /* CONFIG_COMPAT */ | 216 | #endif /* CONFIG_COMPAT */ |
218 | #endif /* __KERNEL__ */ | ||
219 | #endif /* __ASM_COMPAT_H */ | 217 | #endif /* __ASM_COMPAT_H */ |
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index c09d633c3109..86aabf1e0199 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h | |||
@@ -23,6 +23,8 @@ | |||
23 | * @cpu_boot: Boots a cpu into the kernel. | 23 | * @cpu_boot: Boots a cpu into the kernel. |
24 | * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary | 24 | * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary |
25 | * synchronisation. Called from the cpu being booted. | 25 | * synchronisation. Called from the cpu being booted. |
26 | * @cpu_can_disable: Determines whether a CPU can be disabled based on | ||
27 | * mechanism-specific information. | ||
26 | * @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific | 28 | * @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific |
27 | * reason, which will cause the hot unplug to be aborted. Called | 29 | * reason, which will cause the hot unplug to be aborted. Called |
28 | * from the cpu to be killed. | 30 | * from the cpu to be killed. |
@@ -42,6 +44,7 @@ struct cpu_operations { | |||
42 | int (*cpu_boot)(unsigned int); | 44 | int (*cpu_boot)(unsigned int); |
43 | void (*cpu_postboot)(void); | 45 | void (*cpu_postboot)(void); |
44 | #ifdef CONFIG_HOTPLUG_CPU | 46 | #ifdef CONFIG_HOTPLUG_CPU |
47 | bool (*cpu_can_disable)(unsigned int cpu); | ||
45 | int (*cpu_disable)(unsigned int cpu); | 48 | int (*cpu_disable)(unsigned int cpu); |
46 | void (*cpu_die)(unsigned int cpu); | 49 | void (*cpu_die)(unsigned int cpu); |
47 | int (*cpu_kill)(unsigned int cpu); | 50 | int (*cpu_kill)(unsigned int cpu); |
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index c96ffa4722d3..9cde5d2e768f 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h | |||
@@ -289,9 +289,16 @@ struct arm64_cpu_capabilities { | |||
289 | u16 type; | 289 | u16 type; |
290 | bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope); | 290 | bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope); |
291 | /* | 291 | /* |
292 | * Take the appropriate actions to enable this capability for this CPU. | 292 | * Take the appropriate actions to configure this capability |
293 | * For each successfully booted CPU, this method is called for each | 293 | * for this CPU. If the capability is detected by the kernel |
294 | * globally detected capability. | 294 | * this will be called on all the CPUs in the system, |
295 | * including the hotplugged CPUs, regardless of whether the | ||
296 | * capability is available on that specific CPU. This is | ||
297 | * useful for some capabilities (e.g, working around CPU | ||
298 | * errata), where all the CPUs must take some action (e.g, | ||
299 | * changing system control/configuration). Thus, if an action | ||
300 | * is required only if the CPU has the capability, then the | ||
301 | * routine must check it before taking any action. | ||
295 | */ | 302 | */ |
296 | void (*cpu_enable)(const struct arm64_cpu_capabilities *cap); | 303 | void (*cpu_enable)(const struct arm64_cpu_capabilities *cap); |
297 | union { | 304 | union { |
@@ -363,21 +370,6 @@ cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, | |||
363 | return false; | 370 | return false; |
364 | } | 371 | } |
365 | 372 | ||
366 | /* | ||
367 | * Take appropriate action for all matching entries in the shared capability | ||
368 | * entry. | ||
369 | */ | ||
370 | static inline void | ||
371 | cpucap_multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry) | ||
372 | { | ||
373 | const struct arm64_cpu_capabilities *caps; | ||
374 | |||
375 | for (caps = entry->match_list; caps->matches; caps++) | ||
376 | if (caps->matches(caps, SCOPE_LOCAL_CPU) && | ||
377 | caps->cpu_enable) | ||
378 | caps->cpu_enable(caps); | ||
379 | } | ||
380 | |||
381 | extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); | 373 | extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); |
382 | extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; | 374 | extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; |
383 | extern struct static_key_false arm64_const_caps_ready; | 375 | extern struct static_key_false arm64_const_caps_ready; |
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index e7d46631cc42..b1454d117cd2 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h | |||
@@ -51,14 +51,6 @@ | |||
51 | #define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ | 51 | #define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ |
52 | MIDR_ARCHITECTURE_MASK) | 52 | MIDR_ARCHITECTURE_MASK) |
53 | 53 | ||
54 | #define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \ | ||
55 | ({ \ | ||
56 | u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \ | ||
57 | u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \ | ||
58 | \ | ||
59 | _model == (model) && rv >= (rv_min) && rv <= (rv_max); \ | ||
60 | }) | ||
61 | |||
62 | #define ARM_CPU_IMP_ARM 0x41 | 54 | #define ARM_CPU_IMP_ARM 0x41 |
63 | #define ARM_CPU_IMP_APM 0x50 | 55 | #define ARM_CPU_IMP_APM 0x50 |
64 | #define ARM_CPU_IMP_CAVIUM 0x43 | 56 | #define ARM_CPU_IMP_CAVIUM 0x43 |
@@ -159,10 +151,19 @@ struct midr_range { | |||
159 | #define MIDR_REV(m, v, r) MIDR_RANGE(m, v, r, v, r) | 151 | #define MIDR_REV(m, v, r) MIDR_RANGE(m, v, r, v, r) |
160 | #define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf) | 152 | #define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf) |
161 | 153 | ||
154 | static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min, | ||
155 | u32 rv_max) | ||
156 | { | ||
157 | u32 _model = midr & MIDR_CPU_MODEL_MASK; | ||
158 | u32 rv = midr & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); | ||
159 | |||
160 | return _model == model && rv >= rv_min && rv <= rv_max; | ||
161 | } | ||
162 | |||
162 | static inline bool is_midr_in_range(u32 midr, struct midr_range const *range) | 163 | static inline bool is_midr_in_range(u32 midr, struct midr_range const *range) |
163 | { | 164 | { |
164 | return MIDR_IS_CPU_MODEL_RANGE(midr, range->model, | 165 | return midr_is_cpu_model_range(midr, range->model, |
165 | range->rv_min, range->rv_max); | 166 | range->rv_min, range->rv_max); |
166 | } | 167 | } |
167 | 168 | ||
168 | static inline bool | 169 | static inline bool |
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index d8ec5bb881c2..7619f473155f 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h | |||
@@ -5,8 +5,6 @@ | |||
5 | #ifndef __ASM_DEBUG_MONITORS_H | 5 | #ifndef __ASM_DEBUG_MONITORS_H |
6 | #define __ASM_DEBUG_MONITORS_H | 6 | #define __ASM_DEBUG_MONITORS_H |
7 | 7 | ||
8 | #ifdef __KERNEL__ | ||
9 | |||
10 | #include <linux/errno.h> | 8 | #include <linux/errno.h> |
11 | #include <linux/types.h> | 9 | #include <linux/types.h> |
12 | #include <asm/brk-imm.h> | 10 | #include <asm/brk-imm.h> |
@@ -128,5 +126,4 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs) | |||
128 | int aarch32_break_handler(struct pt_regs *regs); | 126 | int aarch32_break_handler(struct pt_regs *regs); |
129 | 127 | ||
130 | #endif /* __ASSEMBLY */ | 128 | #endif /* __ASSEMBLY */ |
131 | #endif /* __KERNEL__ */ | ||
132 | #endif /* __ASM_DEBUG_MONITORS_H */ | 129 | #endif /* __ASM_DEBUG_MONITORS_H */ |
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index bdcb0922a40c..fb3e5044f473 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h | |||
@@ -5,8 +5,6 @@ | |||
5 | #ifndef __ASM_DMA_MAPPING_H | 5 | #ifndef __ASM_DMA_MAPPING_H |
6 | #define __ASM_DMA_MAPPING_H | 6 | #define __ASM_DMA_MAPPING_H |
7 | 7 | ||
8 | #ifdef __KERNEL__ | ||
9 | |||
10 | #include <linux/types.h> | 8 | #include <linux/types.h> |
11 | #include <linux/vmalloc.h> | 9 | #include <linux/vmalloc.h> |
12 | 10 | ||
@@ -27,5 +25,4 @@ static inline bool is_device_dma_coherent(struct device *dev) | |||
27 | return dev->dma_coherent; | 25 | return dev->dma_coherent; |
28 | } | 26 | } |
29 | 27 | ||
30 | #endif /* __KERNEL__ */ | ||
31 | #endif /* __ASM_DMA_MAPPING_H */ | 28 | #endif /* __ASM_DMA_MAPPING_H */ |
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 65ac18400979..cb29253ae86b 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h | |||
@@ -34,7 +34,8 @@ | |||
34 | #define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */ | 34 | #define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */ |
35 | #define ESR_ELx_EC_SYS64 (0x18) | 35 | #define ESR_ELx_EC_SYS64 (0x18) |
36 | #define ESR_ELx_EC_SVE (0x19) | 36 | #define ESR_ELx_EC_SVE (0x19) |
37 | /* Unallocated EC: 0x1A - 0x1E */ | 37 | #define ESR_ELx_EC_ERET (0x1a) /* EL2 only */ |
38 | /* Unallocated EC: 0x1b - 0x1E */ | ||
38 | #define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */ | 39 | #define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */ |
39 | #define ESR_ELx_EC_IABT_LOW (0x20) | 40 | #define ESR_ELx_EC_IABT_LOW (0x20) |
40 | #define ESR_ELx_EC_IABT_CUR (0x21) | 41 | #define ESR_ELx_EC_IABT_CUR (0x21) |
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index ed57b760f38c..a17393ff6677 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h | |||
@@ -30,4 +30,6 @@ static inline u32 disr_to_esr(u64 disr) | |||
30 | return esr; | 30 | return esr; |
31 | } | 31 | } |
32 | 32 | ||
33 | asmlinkage void enter_from_user_mode(void); | ||
34 | |||
33 | #endif /* __ASM_EXCEPTION_H */ | 35 | #endif /* __ASM_EXCEPTION_H */ |
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index b6a2c352f4c3..59f10dd13f12 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/stddef.h> | 21 | #include <linux/stddef.h> |
22 | #include <linux/types.h> | 22 | #include <linux/types.h> |
23 | 23 | ||
24 | #if defined(__KERNEL__) && defined(CONFIG_COMPAT) | 24 | #ifdef CONFIG_COMPAT |
25 | /* Masks for extracting the FPSR and FPCR from the FPSCR */ | 25 | /* Masks for extracting the FPSR and FPCR from the FPSCR */ |
26 | #define VFP_FPSCR_STAT_MASK 0xf800009f | 26 | #define VFP_FPSCR_STAT_MASK 0xf800009f |
27 | #define VFP_FPSCR_CTRL_MASK 0x07f79f00 | 27 | #define VFP_FPSCR_CTRL_MASK 0x07f79f00 |
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 6211e3105491..6cc26a127819 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h | |||
@@ -5,8 +5,6 @@ | |||
5 | #ifndef __ASM_FUTEX_H | 5 | #ifndef __ASM_FUTEX_H |
6 | #define __ASM_FUTEX_H | 6 | #define __ASM_FUTEX_H |
7 | 7 | ||
8 | #ifdef __KERNEL__ | ||
9 | |||
10 | #include <linux/futex.h> | 8 | #include <linux/futex.h> |
11 | #include <linux/uaccess.h> | 9 | #include <linux/uaccess.h> |
12 | 10 | ||
@@ -129,5 +127,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, | |||
129 | return ret; | 127 | return ret; |
130 | } | 128 | } |
131 | 129 | ||
132 | #endif /* __KERNEL__ */ | ||
133 | #endif /* __ASM_FUTEX_H */ | 130 | #endif /* __ASM_FUTEX_H */ |
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index db9ab760e6fd..bc7aaed4b34e 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h | |||
@@ -10,8 +10,6 @@ | |||
10 | #include <asm/sysreg.h> | 10 | #include <asm/sysreg.h> |
11 | #include <asm/virt.h> | 11 | #include <asm/virt.h> |
12 | 12 | ||
13 | #ifdef __KERNEL__ | ||
14 | |||
15 | struct arch_hw_breakpoint_ctrl { | 13 | struct arch_hw_breakpoint_ctrl { |
16 | u32 __reserved : 19, | 14 | u32 __reserved : 19, |
17 | len : 8, | 15 | len : 8, |
@@ -156,5 +154,4 @@ static inline int get_num_wrps(void) | |||
156 | ID_AA64DFR0_WRPS_SHIFT); | 154 | ID_AA64DFR0_WRPS_SHIFT); |
157 | } | 155 | } |
158 | 156 | ||
159 | #endif /* __KERNEL__ */ | ||
160 | #endif /* __ASM_BREAKPOINT_H */ | 157 | #endif /* __ASM_BREAKPOINT_H */ |
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 7ed92626949d..e9763831186a 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h | |||
@@ -8,8 +8,6 @@ | |||
8 | #ifndef __ASM_IO_H | 8 | #ifndef __ASM_IO_H |
9 | #define __ASM_IO_H | 9 | #define __ASM_IO_H |
10 | 10 | ||
11 | #ifdef __KERNEL__ | ||
12 | |||
13 | #include <linux/types.h> | 11 | #include <linux/types.h> |
14 | 12 | ||
15 | #include <asm/byteorder.h> | 13 | #include <asm/byteorder.h> |
@@ -97,7 +95,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) | |||
97 | ({ \ | 95 | ({ \ |
98 | unsigned long tmp; \ | 96 | unsigned long tmp; \ |
99 | \ | 97 | \ |
100 | rmb(); \ | 98 | dma_rmb(); \ |
101 | \ | 99 | \ |
102 | /* \ | 100 | /* \ |
103 | * Create a dummy control dependency from the IO read to any \ | 101 | * Create a dummy control dependency from the IO read to any \ |
@@ -111,7 +109,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) | |||
111 | }) | 109 | }) |
112 | 110 | ||
113 | #define __io_par(v) __iormb(v) | 111 | #define __io_par(v) __iormb(v) |
114 | #define __iowmb() wmb() | 112 | #define __iowmb() dma_wmb() |
115 | 113 | ||
116 | /* | 114 | /* |
117 | * Relaxed I/O memory access primitives. These follow the Device memory | 115 | * Relaxed I/O memory access primitives. These follow the Device memory |
@@ -207,5 +205,4 @@ extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); | |||
207 | 205 | ||
208 | extern int devmem_is_allowed(unsigned long pfn); | 206 | extern int devmem_is_allowed(unsigned long pfn); |
209 | 207 | ||
210 | #endif /* __KERNEL__ */ | ||
211 | #endif /* __ASM_IO_H */ | 208 | #endif /* __ASM_IO_H */ |
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index 7872f260c9ee..1a59f0ed1ae3 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h | |||
@@ -5,8 +5,6 @@ | |||
5 | #ifndef __ASM_IRQFLAGS_H | 5 | #ifndef __ASM_IRQFLAGS_H |
6 | #define __ASM_IRQFLAGS_H | 6 | #define __ASM_IRQFLAGS_H |
7 | 7 | ||
8 | #ifdef __KERNEL__ | ||
9 | |||
10 | #include <asm/alternative.h> | 8 | #include <asm/alternative.h> |
11 | #include <asm/ptrace.h> | 9 | #include <asm/ptrace.h> |
12 | #include <asm/sysreg.h> | 10 | #include <asm/sysreg.h> |
@@ -128,5 +126,4 @@ static inline void arch_local_irq_restore(unsigned long flags) | |||
128 | : "memory"); | 126 | : "memory"); |
129 | } | 127 | } |
130 | 128 | ||
131 | #endif | 129 | #endif /* __ASM_IRQFLAGS_H */ |
132 | #endif | ||
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index a713bad71db5..b61b50bf68b1 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -216,7 +216,7 @@ static inline unsigned long kaslr_offset(void) | |||
216 | * pass on to access_ok(), for instance. | 216 | * pass on to access_ok(), for instance. |
217 | */ | 217 | */ |
218 | #define untagged_addr(addr) \ | 218 | #define untagged_addr(addr) \ |
219 | ((__typeof__(addr))sign_extend64((u64)(addr), 55)) | 219 | ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55)) |
220 | 220 | ||
221 | #ifdef CONFIG_KASAN_SW_TAGS | 221 | #ifdef CONFIG_KASAN_SW_TAGS |
222 | #define __tag_shifted(tag) ((u64)(tag) << 56) | 222 | #define __tag_shifted(tag) ((u64)(tag) << 56) |
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index fd6161336653..f217e3292919 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h | |||
@@ -126,7 +126,7 @@ extern void init_mem_pgprot(void); | |||
126 | extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, | 126 | extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, |
127 | unsigned long virt, phys_addr_t size, | 127 | unsigned long virt, phys_addr_t size, |
128 | pgprot_t prot, bool page_mappings_only); | 128 | pgprot_t prot, bool page_mappings_only); |
129 | extern void *fixmap_remap_fdt(phys_addr_t dt_phys); | 129 | extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot); |
130 | extern void mark_linear_text_alias_ro(void); | 130 | extern void mark_linear_text_alias_ro(void); |
131 | 131 | ||
132 | #define INIT_MM_CONTEXT(name) \ | 132 | #define INIT_MM_CONTEXT(name) \ |
diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h index 9e690686e8aa..70b323cf8300 100644 --- a/arch/arm64/include/asm/pci.h +++ b/arch/arm64/include/asm/pci.h | |||
@@ -1,7 +1,6 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __ASM_PCI_H | 2 | #ifndef __ASM_PCI_H |
3 | #define __ASM_PCI_H | 3 | #define __ASM_PCI_H |
4 | #ifdef __KERNEL__ | ||
5 | 4 | ||
6 | #include <linux/types.h> | 5 | #include <linux/types.h> |
7 | #include <linux/slab.h> | 6 | #include <linux/slab.h> |
@@ -35,5 +34,4 @@ static inline int pci_proc_domain(struct pci_bus *bus) | |||
35 | } | 34 | } |
36 | #endif /* CONFIG_PCI */ | 35 | #endif /* CONFIG_PCI */ |
37 | 36 | ||
38 | #endif /* __KERNEL__ */ | ||
39 | #endif /* __ASM_PCI_H */ | 37 | #endif /* __ASM_PCI_H */ |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 979e24fadf35..9a8f7e51c2b1 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -220,8 +220,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte) | |||
220 | * Only if the new pte is valid and kernel, otherwise TLB maintenance | 220 | * Only if the new pte is valid and kernel, otherwise TLB maintenance |
221 | * or update_mmu_cache() have the necessary barriers. | 221 | * or update_mmu_cache() have the necessary barriers. |
222 | */ | 222 | */ |
223 | if (pte_valid_not_user(pte)) | 223 | if (pte_valid_not_user(pte)) { |
224 | dsb(ishst); | 224 | dsb(ishst); |
225 | isb(); | ||
226 | } | ||
225 | } | 227 | } |
226 | 228 | ||
227 | extern void __sync_icache_dcache(pte_t pteval); | 229 | extern void __sync_icache_dcache(pte_t pteval); |
@@ -481,8 +483,10 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) | |||
481 | 483 | ||
482 | WRITE_ONCE(*pmdp, pmd); | 484 | WRITE_ONCE(*pmdp, pmd); |
483 | 485 | ||
484 | if (pmd_valid(pmd)) | 486 | if (pmd_valid(pmd)) { |
485 | dsb(ishst); | 487 | dsb(ishst); |
488 | isb(); | ||
489 | } | ||
486 | } | 490 | } |
487 | 491 | ||
488 | static inline void pmd_clear(pmd_t *pmdp) | 492 | static inline void pmd_clear(pmd_t *pmdp) |
@@ -540,8 +544,10 @@ static inline void set_pud(pud_t *pudp, pud_t pud) | |||
540 | 544 | ||
541 | WRITE_ONCE(*pudp, pud); | 545 | WRITE_ONCE(*pudp, pud); |
542 | 546 | ||
543 | if (pud_valid(pud)) | 547 | if (pud_valid(pud)) { |
544 | dsb(ishst); | 548 | dsb(ishst); |
549 | isb(); | ||
550 | } | ||
545 | } | 551 | } |
546 | 552 | ||
547 | static inline void pud_clear(pud_t *pudp) | 553 | static inline void pud_clear(pud_t *pudp) |
@@ -599,6 +605,7 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) | |||
599 | 605 | ||
600 | WRITE_ONCE(*pgdp, pgd); | 606 | WRITE_ONCE(*pgdp, pgd); |
601 | dsb(ishst); | 607 | dsb(ishst); |
608 | isb(); | ||
602 | } | 609 | } |
603 | 610 | ||
604 | static inline void pgd_clear(pgd_t *pgdp) | 611 | static inline void pgd_clear(pgd_t *pgdp) |
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 368d90a9d0e5..a2ce65a0c1fa 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h | |||
@@ -9,7 +9,6 @@ | |||
9 | #ifndef __ASM_PROCFNS_H | 9 | #ifndef __ASM_PROCFNS_H |
10 | #define __ASM_PROCFNS_H | 10 | #define __ASM_PROCFNS_H |
11 | 11 | ||
12 | #ifdef __KERNEL__ | ||
13 | #ifndef __ASSEMBLY__ | 12 | #ifndef __ASSEMBLY__ |
14 | 13 | ||
15 | #include <asm/page.h> | 14 | #include <asm/page.h> |
@@ -25,5 +24,4 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); | |||
25 | #include <asm/memory.h> | 24 | #include <asm/memory.h> |
26 | 25 | ||
27 | #endif /* __ASSEMBLY__ */ | 26 | #endif /* __ASSEMBLY__ */ |
28 | #endif /* __KERNEL__ */ | ||
29 | #endif /* __ASM_PROCFNS_H */ | 27 | #endif /* __ASM_PROCFNS_H */ |
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index e4c93945e477..c67848c55009 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h | |||
@@ -20,7 +20,6 @@ | |||
20 | #define NET_IP_ALIGN 0 | 20 | #define NET_IP_ALIGN 0 |
21 | 21 | ||
22 | #ifndef __ASSEMBLY__ | 22 | #ifndef __ASSEMBLY__ |
23 | #ifdef __KERNEL__ | ||
24 | 23 | ||
25 | #include <linux/build_bug.h> | 24 | #include <linux/build_bug.h> |
26 | #include <linux/cache.h> | 25 | #include <linux/cache.h> |
@@ -283,8 +282,6 @@ static inline void spin_lock_prefetch(const void *ptr) | |||
283 | 282 | ||
284 | #define HAVE_ARCH_PICK_MMAP_LAYOUT | 283 | #define HAVE_ARCH_PICK_MMAP_LAYOUT |
285 | 284 | ||
286 | #endif | ||
287 | |||
288 | extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */ | 285 | extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */ |
289 | extern void __init minsigstksz_setup(void); | 286 | extern void __init minsigstksz_setup(void); |
290 | 287 | ||
@@ -306,6 +303,14 @@ extern void __init minsigstksz_setup(void); | |||
306 | /* PR_PAC_RESET_KEYS prctl */ | 303 | /* PR_PAC_RESET_KEYS prctl */ |
307 | #define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg) | 304 | #define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg) |
308 | 305 | ||
306 | #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI | ||
307 | /* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */ | ||
308 | long set_tagged_addr_ctrl(unsigned long arg); | ||
309 | long get_tagged_addr_ctrl(void); | ||
310 | #define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(arg) | ||
311 | #define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl() | ||
312 | #endif | ||
313 | |||
309 | /* | 314 | /* |
310 | * For CONFIG_GCC_PLUGIN_STACKLEAK | 315 | * For CONFIG_GCC_PLUGIN_STACKLEAK |
311 | * | 316 | * |
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 1dcf63a9ac1f..fbebb411ae20 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h | |||
@@ -301,6 +301,11 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) | |||
301 | return regs->regs[0]; | 301 | return regs->regs[0]; |
302 | } | 302 | } |
303 | 303 | ||
304 | static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc) | ||
305 | { | ||
306 | regs->regs[0] = rc; | ||
307 | } | ||
308 | |||
304 | /** | 309 | /** |
305 | * regs_get_kernel_argument() - get Nth function argument in kernel | 310 | * regs_get_kernel_argument() - get Nth function argument in kernel |
306 | * @regs: pt_regs of that context | 311 | * @regs: pt_regs of that context |
diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h index bd43d1cf724b..7e9f163d02ec 100644 --- a/arch/arm64/include/asm/signal32.h +++ b/arch/arm64/include/asm/signal32.h | |||
@@ -5,7 +5,6 @@ | |||
5 | #ifndef __ASM_SIGNAL32_H | 5 | #ifndef __ASM_SIGNAL32_H |
6 | #define __ASM_SIGNAL32_H | 6 | #define __ASM_SIGNAL32_H |
7 | 7 | ||
8 | #ifdef __KERNEL__ | ||
9 | #ifdef CONFIG_COMPAT | 8 | #ifdef CONFIG_COMPAT |
10 | #include <linux/compat.h> | 9 | #include <linux/compat.h> |
11 | 10 | ||
@@ -79,5 +78,4 @@ static inline void compat_setup_restart_syscall(struct pt_regs *regs) | |||
79 | { | 78 | { |
80 | } | 79 | } |
81 | #endif /* CONFIG_COMPAT */ | 80 | #endif /* CONFIG_COMPAT */ |
82 | #endif /* __KERNEL__ */ | ||
83 | #endif /* __ASM_SIGNAL32_H */ | 81 | #endif /* __ASM_SIGNAL32_H */ |
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 06ebcfef73df..972d196c7714 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h | |||
@@ -212,6 +212,9 @@ | |||
212 | #define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0) | 212 | #define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0) |
213 | #define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) | 213 | #define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) |
214 | 214 | ||
215 | #define SYS_PAR_EL1_F BIT(1) | ||
216 | #define SYS_PAR_EL1_FST GENMASK(6, 1) | ||
217 | |||
215 | /*** Statistical Profiling Extension ***/ | 218 | /*** Statistical Profiling Extension ***/ |
216 | /* ID registers */ | 219 | /* ID registers */ |
217 | #define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7) | 220 | #define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7) |
@@ -499,28 +502,11 @@ | |||
499 | #define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \ | 502 | #define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \ |
500 | (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \ | 503 | (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \ |
501 | (BIT(29))) | 504 | (BIT(29))) |
502 | #define SCTLR_EL2_RES0 ((BIT(6)) | (BIT(7)) | (BIT(8)) | (BIT(9)) | \ | ||
503 | (BIT(10)) | (BIT(13)) | (BIT(14)) | (BIT(15)) | \ | ||
504 | (BIT(17)) | (BIT(20)) | (BIT(24)) | (BIT(26)) | \ | ||
505 | (BIT(27)) | (BIT(30)) | (BIT(31)) | \ | ||
506 | (0xffffefffUL << 32)) | ||
507 | 505 | ||
508 | #ifdef CONFIG_CPU_BIG_ENDIAN | 506 | #ifdef CONFIG_CPU_BIG_ENDIAN |
509 | #define ENDIAN_SET_EL2 SCTLR_ELx_EE | 507 | #define ENDIAN_SET_EL2 SCTLR_ELx_EE |
510 | #define ENDIAN_CLEAR_EL2 0 | ||
511 | #else | 508 | #else |
512 | #define ENDIAN_SET_EL2 0 | 509 | #define ENDIAN_SET_EL2 0 |
513 | #define ENDIAN_CLEAR_EL2 SCTLR_ELx_EE | ||
514 | #endif | ||
515 | |||
516 | /* SCTLR_EL2 value used for the hyp-stub */ | ||
517 | #define SCTLR_EL2_SET (SCTLR_ELx_IESB | ENDIAN_SET_EL2 | SCTLR_EL2_RES1) | ||
518 | #define SCTLR_EL2_CLEAR (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ | ||
519 | SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \ | ||
520 | SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0) | ||
521 | |||
522 | #if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffffUL | ||
523 | #error "Inconsistent SCTLR_EL2 set/clear bits" | ||
524 | #endif | 510 | #endif |
525 | 511 | ||
526 | /* SCTLR_EL1 specific flags. */ | 512 | /* SCTLR_EL1 specific flags. */ |
@@ -539,16 +525,11 @@ | |||
539 | 525 | ||
540 | #define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \ | 526 | #define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \ |
541 | (BIT(29))) | 527 | (BIT(29))) |
542 | #define SCTLR_EL1_RES0 ((BIT(6)) | (BIT(10)) | (BIT(13)) | (BIT(17)) | \ | ||
543 | (BIT(27)) | (BIT(30)) | (BIT(31)) | \ | ||
544 | (0xffffefffUL << 32)) | ||
545 | 528 | ||
546 | #ifdef CONFIG_CPU_BIG_ENDIAN | 529 | #ifdef CONFIG_CPU_BIG_ENDIAN |
547 | #define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) | 530 | #define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) |
548 | #define ENDIAN_CLEAR_EL1 0 | ||
549 | #else | 531 | #else |
550 | #define ENDIAN_SET_EL1 0 | 532 | #define ENDIAN_SET_EL1 0 |
551 | #define ENDIAN_CLEAR_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) | ||
552 | #endif | 533 | #endif |
553 | 534 | ||
554 | #define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\ | 535 | #define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\ |
@@ -556,13 +537,6 @@ | |||
556 | SCTLR_EL1_DZE | SCTLR_EL1_UCT |\ | 537 | SCTLR_EL1_DZE | SCTLR_EL1_UCT |\ |
557 | SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\ | 538 | SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\ |
558 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1) | 539 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1) |
559 | #define SCTLR_EL1_CLEAR (SCTLR_ELx_A | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD |\ | ||
560 | SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\ | ||
561 | SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0) | ||
562 | |||
563 | #if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffffUL | ||
564 | #error "Inconsistent SCTLR_EL1 set/clear bits" | ||
565 | #endif | ||
566 | 540 | ||
567 | /* id_aa64isar0 */ | 541 | /* id_aa64isar0 */ |
568 | #define ID_AA64ISAR0_TS_SHIFT 52 | 542 | #define ID_AA64ISAR0_TS_SHIFT 52 |
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 180b34ec5965..f0cec4160136 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h | |||
@@ -8,8 +8,6 @@ | |||
8 | #ifndef __ASM_THREAD_INFO_H | 8 | #ifndef __ASM_THREAD_INFO_H |
9 | #define __ASM_THREAD_INFO_H | 9 | #define __ASM_THREAD_INFO_H |
10 | 10 | ||
11 | #ifdef __KERNEL__ | ||
12 | |||
13 | #include <linux/compiler.h> | 11 | #include <linux/compiler.h> |
14 | 12 | ||
15 | #ifndef __ASSEMBLY__ | 13 | #ifndef __ASSEMBLY__ |
@@ -59,29 +57,18 @@ void arch_release_task_struct(struct task_struct *tsk); | |||
59 | 57 | ||
60 | #endif | 58 | #endif |
61 | 59 | ||
62 | /* | 60 | #define TIF_SIGPENDING 0 /* signal pending */ |
63 | * thread information flags: | 61 | #define TIF_NEED_RESCHED 1 /* rescheduling necessary */ |
64 | * TIF_SYSCALL_TRACE - syscall trace active | ||
65 | * TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace | ||
66 | * TIF_SYSCALL_AUDIT - syscall auditing | ||
67 | * TIF_SECCOMP - syscall secure computing | ||
68 | * TIF_SYSCALL_EMU - syscall emulation active | ||
69 | * TIF_SIGPENDING - signal pending | ||
70 | * TIF_NEED_RESCHED - rescheduling necessary | ||
71 | * TIF_NOTIFY_RESUME - callback before returning to user | ||
72 | */ | ||
73 | #define TIF_SIGPENDING 0 | ||
74 | #define TIF_NEED_RESCHED 1 | ||
75 | #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ | 62 | #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ |
76 | #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ | 63 | #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ |
77 | #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ | 64 | #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ |
78 | #define TIF_FSCHECK 5 /* Check FS is USER_DS on return */ | 65 | #define TIF_FSCHECK 5 /* Check FS is USER_DS on return */ |
79 | #define TIF_NOHZ 7 | 66 | #define TIF_NOHZ 7 |
80 | #define TIF_SYSCALL_TRACE 8 | 67 | #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ |
81 | #define TIF_SYSCALL_AUDIT 9 | 68 | #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ |
82 | #define TIF_SYSCALL_TRACEPOINT 10 | 69 | #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ |
83 | #define TIF_SECCOMP 11 | 70 | #define TIF_SECCOMP 11 /* syscall secure computing */ |
84 | #define TIF_SYSCALL_EMU 12 | 71 | #define TIF_SYSCALL_EMU 12 /* syscall emulation active */ |
85 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ | 72 | #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ |
86 | #define TIF_FREEZE 19 | 73 | #define TIF_FREEZE 19 |
87 | #define TIF_RESTORE_SIGMASK 20 | 74 | #define TIF_RESTORE_SIGMASK 20 |
@@ -90,6 +77,7 @@ void arch_release_task_struct(struct task_struct *tsk); | |||
90 | #define TIF_SVE 23 /* Scalable Vector Extension in use */ | 77 | #define TIF_SVE 23 /* Scalable Vector Extension in use */ |
91 | #define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */ | 78 | #define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */ |
92 | #define TIF_SSBD 25 /* Wants SSB mitigation */ | 79 | #define TIF_SSBD 25 /* Wants SSB mitigation */ |
80 | #define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */ | ||
93 | 81 | ||
94 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 82 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
95 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 83 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
@@ -121,5 +109,4 @@ void arch_release_task_struct(struct task_struct *tsk); | |||
121 | .addr_limit = KERNEL_DS, \ | 109 | .addr_limit = KERNEL_DS, \ |
122 | } | 110 | } |
123 | 111 | ||
124 | #endif /* __KERNEL__ */ | ||
125 | #endif /* __ASM_THREAD_INFO_H */ | 112 | #endif /* __ASM_THREAD_INFO_H */ |
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 8af7a85f76bd..bc3949064725 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h | |||
@@ -251,6 +251,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) | |||
251 | dsb(ishst); | 251 | dsb(ishst); |
252 | __tlbi(vaae1is, addr); | 252 | __tlbi(vaae1is, addr); |
253 | dsb(ish); | 253 | dsb(ish); |
254 | isb(); | ||
254 | } | 255 | } |
255 | #endif | 256 | #endif |
256 | 257 | ||
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index 0524f2438649..a4d945db95a2 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h | |||
@@ -4,29 +4,6 @@ | |||
4 | 4 | ||
5 | #include <linux/cpumask.h> | 5 | #include <linux/cpumask.h> |
6 | 6 | ||
7 | struct cpu_topology { | ||
8 | int thread_id; | ||
9 | int core_id; | ||
10 | int package_id; | ||
11 | int llc_id; | ||
12 | cpumask_t thread_sibling; | ||
13 | cpumask_t core_sibling; | ||
14 | cpumask_t llc_sibling; | ||
15 | }; | ||
16 | |||
17 | extern struct cpu_topology cpu_topology[NR_CPUS]; | ||
18 | |||
19 | #define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id) | ||
20 | #define topology_core_id(cpu) (cpu_topology[cpu].core_id) | ||
21 | #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) | ||
22 | #define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) | ||
23 | #define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) | ||
24 | |||
25 | void init_cpu_topology(void); | ||
26 | void store_cpu_topology(unsigned int cpuid); | ||
27 | void remove_cpu_topology(unsigned int cpuid); | ||
28 | const struct cpumask *cpu_coregroup_mask(int cpu); | ||
29 | |||
30 | #ifdef CONFIG_NUMA | 7 | #ifdef CONFIG_NUMA |
31 | 8 | ||
32 | struct pci_bus; | 9 | struct pci_bus; |
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 5a1c32260c1f..097d6bfac0b7 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h | |||
@@ -62,6 +62,10 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si | |||
62 | { | 62 | { |
63 | unsigned long ret, limit = current_thread_info()->addr_limit; | 63 | unsigned long ret, limit = current_thread_info()->addr_limit; |
64 | 64 | ||
65 | if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) && | ||
66 | test_thread_flag(TIF_TAGGED_ADDR)) | ||
67 | addr = untagged_addr(addr); | ||
68 | |||
65 | __chk_user_ptr(addr); | 69 | __chk_user_ptr(addr); |
66 | asm volatile( | 70 | asm volatile( |
67 | // A + B <= C + 1 for all A,B,C, in four easy steps: | 71 | // A + B <= C + 1 for all A,B,C, in four easy steps: |
@@ -215,7 +219,8 @@ static inline void uaccess_enable_not_uao(void) | |||
215 | 219 | ||
216 | /* | 220 | /* |
217 | * Sanitise a uaccess pointer such that it becomes NULL if above the | 221 | * Sanitise a uaccess pointer such that it becomes NULL if above the |
218 | * current addr_limit. | 222 | * current addr_limit. In case the pointer is tagged (has the top byte set), |
223 | * untag the pointer before checking. | ||
219 | */ | 224 | */ |
220 | #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) | 225 | #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) |
221 | static inline void __user *__uaccess_mask_ptr(const void __user *ptr) | 226 | static inline void __user *__uaccess_mask_ptr(const void __user *ptr) |
@@ -223,10 +228,11 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) | |||
223 | void __user *safe_ptr; | 228 | void __user *safe_ptr; |
224 | 229 | ||
225 | asm volatile( | 230 | asm volatile( |
226 | " bics xzr, %1, %2\n" | 231 | " bics xzr, %3, %2\n" |
227 | " csel %0, %1, xzr, eq\n" | 232 | " csel %0, %1, xzr, eq\n" |
228 | : "=&r" (safe_ptr) | 233 | : "=&r" (safe_ptr) |
229 | : "r" (ptr), "r" (current_thread_info()->addr_limit) | 234 | : "r" (ptr), "r" (current_thread_info()->addr_limit), |
235 | "r" (untagged_addr(ptr)) | ||
230 | : "cc"); | 236 | : "cc"); |
231 | 237 | ||
232 | csdb(); | 238 | csdb(); |
diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h index 9c15e0a06301..07468428fd29 100644 --- a/arch/arm64/include/asm/vdso.h +++ b/arch/arm64/include/asm/vdso.h | |||
@@ -5,8 +5,6 @@ | |||
5 | #ifndef __ASM_VDSO_H | 5 | #ifndef __ASM_VDSO_H |
6 | #define __ASM_VDSO_H | 6 | #define __ASM_VDSO_H |
7 | 7 | ||
8 | #ifdef __KERNEL__ | ||
9 | |||
10 | /* | 8 | /* |
11 | * Default link address for the vDSO. | 9 | * Default link address for the vDSO. |
12 | * Since we randomise the VDSO mapping, there's little point in trying | 10 | * Since we randomise the VDSO mapping, there's little point in trying |
@@ -28,6 +26,4 @@ | |||
28 | 26 | ||
29 | #endif /* !__ASSEMBLY__ */ | 27 | #endif /* !__ASSEMBLY__ */ |
30 | 28 | ||
31 | #endif /* __KERNEL__ */ | ||
32 | |||
33 | #endif /* __ASM_VDSO_H */ | 29 | #endif /* __ASM_VDSO_H */ |
diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h index ba6dbc3de864..1f38bf330a6e 100644 --- a/arch/arm64/include/asm/vdso_datapage.h +++ b/arch/arm64/include/asm/vdso_datapage.h | |||
@@ -5,8 +5,6 @@ | |||
5 | #ifndef __ASM_VDSO_DATAPAGE_H | 5 | #ifndef __ASM_VDSO_DATAPAGE_H |
6 | #define __ASM_VDSO_DATAPAGE_H | 6 | #define __ASM_VDSO_DATAPAGE_H |
7 | 7 | ||
8 | #ifdef __KERNEL__ | ||
9 | |||
10 | #ifndef __ASSEMBLY__ | 8 | #ifndef __ASSEMBLY__ |
11 | 9 | ||
12 | struct vdso_data { | 10 | struct vdso_data { |
@@ -32,6 +30,4 @@ struct vdso_data { | |||
32 | 30 | ||
33 | #endif /* !__ASSEMBLY__ */ | 31 | #endif /* !__ASSEMBLY__ */ |
34 | 32 | ||
35 | #endif /* __KERNEL__ */ | ||
36 | |||
37 | #endif /* __ASM_VDSO_DATAPAGE_H */ | 33 | #endif /* __ASM_VDSO_DATAPAGE_H */ |
diff --git a/arch/arm64/include/uapi/asm/stat.h b/arch/arm64/include/uapi/asm/stat.h deleted file mode 100644 index 313325fa22fa..000000000000 --- a/arch/arm64/include/uapi/asm/stat.h +++ /dev/null | |||
@@ -1,17 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ | ||
2 | /* | ||
3 | * Copyright (C) 2012 ARM Ltd. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | #include <asm-generic/stat.h> | ||
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d19d14ba9ae4..95201e5ff5e1 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c | |||
@@ -886,7 +886,7 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _ | |||
886 | u32 midr = read_cpuid_id(); | 886 | u32 midr = read_cpuid_id(); |
887 | 887 | ||
888 | /* Cavium ThunderX pass 1.x and 2.x */ | 888 | /* Cavium ThunderX pass 1.x and 2.x */ |
889 | return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, | 889 | return midr_is_cpu_model_range(midr, MIDR_THUNDERX, |
890 | MIDR_CPU_VAR_REV(0, 0), | 890 | MIDR_CPU_VAR_REV(0, 0), |
891 | MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK)); | 891 | MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK)); |
892 | } | 892 | } |
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index d1048173fd8a..e4d6af2fdec7 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/cpu_pm.h> | 11 | #include <linux/cpu_pm.h> |
12 | #include <linux/of.h> | 12 | #include <linux/of.h> |
13 | #include <linux/of_device.h> | 13 | #include <linux/of_device.h> |
14 | #include <linux/psci.h> | ||
14 | 15 | ||
15 | #include <asm/cpuidle.h> | 16 | #include <asm/cpuidle.h> |
16 | #include <asm/cpu_ops.h> | 17 | #include <asm/cpu_ops.h> |
@@ -46,17 +47,58 @@ int arm_cpuidle_suspend(int index) | |||
46 | 47 | ||
47 | #define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags)) | 48 | #define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags)) |
48 | 49 | ||
50 | static int psci_acpi_cpu_init_idle(unsigned int cpu) | ||
51 | { | ||
52 | int i, count; | ||
53 | struct acpi_lpi_state *lpi; | ||
54 | struct acpi_processor *pr = per_cpu(processors, cpu); | ||
55 | |||
56 | /* | ||
57 | * If the PSCI cpu_suspend function hook has not been initialized | ||
58 | * idle states must not be enabled, so bail out | ||
59 | */ | ||
60 | if (!psci_ops.cpu_suspend) | ||
61 | return -EOPNOTSUPP; | ||
62 | |||
63 | if (unlikely(!pr || !pr->flags.has_lpi)) | ||
64 | return -EINVAL; | ||
65 | |||
66 | count = pr->power.count - 1; | ||
67 | if (count <= 0) | ||
68 | return -ENODEV; | ||
69 | |||
70 | for (i = 0; i < count; i++) { | ||
71 | u32 state; | ||
72 | |||
73 | lpi = &pr->power.lpi_states[i + 1]; | ||
74 | /* | ||
75 | * Only bits[31:0] represent a PSCI power_state while | ||
76 | * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification | ||
77 | */ | ||
78 | state = lpi->address; | ||
79 | if (!psci_power_state_is_valid(state)) { | ||
80 | pr_warn("Invalid PSCI power state %#x\n", state); | ||
81 | return -EINVAL; | ||
82 | } | ||
83 | } | ||
84 | |||
85 | return 0; | ||
86 | } | ||
87 | |||
49 | int acpi_processor_ffh_lpi_probe(unsigned int cpu) | 88 | int acpi_processor_ffh_lpi_probe(unsigned int cpu) |
50 | { | 89 | { |
51 | return arm_cpuidle_init(cpu); | 90 | return psci_acpi_cpu_init_idle(cpu); |
52 | } | 91 | } |
53 | 92 | ||
54 | int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) | 93 | int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) |
55 | { | 94 | { |
95 | u32 state = lpi->address; | ||
96 | |||
56 | if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags)) | 97 | if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags)) |
57 | return CPU_PM_CPU_IDLE_ENTER_RETENTION(arm_cpuidle_suspend, | 98 | return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(psci_cpu_suspend_enter, |
58 | lpi->index); | 99 | lpi->index, state); |
59 | else | 100 | else |
60 | return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, lpi->index); | 101 | return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, |
102 | lpi->index, state); | ||
61 | } | 103 | } |
62 | #endif | 104 | #endif |
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 876055e37352..05933c065732 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c | |||
@@ -33,7 +33,7 @@ | |||
33 | DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); | 33 | DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); |
34 | static struct cpuinfo_arm64 boot_cpu_data; | 34 | static struct cpuinfo_arm64 boot_cpu_data; |
35 | 35 | ||
36 | static char *icache_policy_str[] = { | 36 | static const char *icache_policy_str[] = { |
37 | [0 ... ICACHE_POLICY_PIPT] = "RESERVED/UNKNOWN", | 37 | [0 ... ICACHE_POLICY_PIPT] = "RESERVED/UNKNOWN", |
38 | [ICACHE_POLICY_VIPT] = "VIPT", | 38 | [ICACHE_POLICY_VIPT] = "VIPT", |
39 | [ICACHE_POLICY_PIPT] = "PIPT", | 39 | [ICACHE_POLICY_PIPT] = "PIPT", |
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 320a30dbe35e..84a822748c84 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S | |||
@@ -30,9 +30,9 @@ | |||
30 | * Context tracking subsystem. Used to instrument transitions | 30 | * Context tracking subsystem. Used to instrument transitions |
31 | * between user and kernel mode. | 31 | * between user and kernel mode. |
32 | */ | 32 | */ |
33 | .macro ct_user_exit | 33 | .macro ct_user_exit_irqoff |
34 | #ifdef CONFIG_CONTEXT_TRACKING | 34 | #ifdef CONFIG_CONTEXT_TRACKING |
35 | bl context_tracking_user_exit | 35 | bl enter_from_user_mode |
36 | #endif | 36 | #endif |
37 | .endm | 37 | .endm |
38 | 38 | ||
@@ -792,8 +792,8 @@ el0_cp15: | |||
792 | /* | 792 | /* |
793 | * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions | 793 | * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions |
794 | */ | 794 | */ |
795 | ct_user_exit_irqoff | ||
795 | enable_daif | 796 | enable_daif |
796 | ct_user_exit | ||
797 | mov x0, x25 | 797 | mov x0, x25 |
798 | mov x1, sp | 798 | mov x1, sp |
799 | bl do_cp15instr | 799 | bl do_cp15instr |
@@ -805,8 +805,8 @@ el0_da: | |||
805 | * Data abort handling | 805 | * Data abort handling |
806 | */ | 806 | */ |
807 | mrs x26, far_el1 | 807 | mrs x26, far_el1 |
808 | ct_user_exit_irqoff | ||
808 | enable_daif | 809 | enable_daif |
809 | ct_user_exit | ||
810 | clear_address_tag x0, x26 | 810 | clear_address_tag x0, x26 |
811 | mov x1, x25 | 811 | mov x1, x25 |
812 | mov x2, sp | 812 | mov x2, sp |
@@ -818,11 +818,11 @@ el0_ia: | |||
818 | */ | 818 | */ |
819 | mrs x26, far_el1 | 819 | mrs x26, far_el1 |
820 | gic_prio_kentry_setup tmp=x0 | 820 | gic_prio_kentry_setup tmp=x0 |
821 | ct_user_exit_irqoff | ||
821 | enable_da_f | 822 | enable_da_f |
822 | #ifdef CONFIG_TRACE_IRQFLAGS | 823 | #ifdef CONFIG_TRACE_IRQFLAGS |
823 | bl trace_hardirqs_off | 824 | bl trace_hardirqs_off |
824 | #endif | 825 | #endif |
825 | ct_user_exit | ||
826 | mov x0, x26 | 826 | mov x0, x26 |
827 | mov x1, x25 | 827 | mov x1, x25 |
828 | mov x2, sp | 828 | mov x2, sp |
@@ -832,8 +832,8 @@ el0_fpsimd_acc: | |||
832 | /* | 832 | /* |
833 | * Floating Point or Advanced SIMD access | 833 | * Floating Point or Advanced SIMD access |
834 | */ | 834 | */ |
835 | ct_user_exit_irqoff | ||
835 | enable_daif | 836 | enable_daif |
836 | ct_user_exit | ||
837 | mov x0, x25 | 837 | mov x0, x25 |
838 | mov x1, sp | 838 | mov x1, sp |
839 | bl do_fpsimd_acc | 839 | bl do_fpsimd_acc |
@@ -842,8 +842,8 @@ el0_sve_acc: | |||
842 | /* | 842 | /* |
843 | * Scalable Vector Extension access | 843 | * Scalable Vector Extension access |
844 | */ | 844 | */ |
845 | ct_user_exit_irqoff | ||
845 | enable_daif | 846 | enable_daif |
846 | ct_user_exit | ||
847 | mov x0, x25 | 847 | mov x0, x25 |
848 | mov x1, sp | 848 | mov x1, sp |
849 | bl do_sve_acc | 849 | bl do_sve_acc |
@@ -852,8 +852,8 @@ el0_fpsimd_exc: | |||
852 | /* | 852 | /* |
853 | * Floating Point, Advanced SIMD or SVE exception | 853 | * Floating Point, Advanced SIMD or SVE exception |
854 | */ | 854 | */ |
855 | ct_user_exit_irqoff | ||
855 | enable_daif | 856 | enable_daif |
856 | ct_user_exit | ||
857 | mov x0, x25 | 857 | mov x0, x25 |
858 | mov x1, sp | 858 | mov x1, sp |
859 | bl do_fpsimd_exc | 859 | bl do_fpsimd_exc |
@@ -868,11 +868,11 @@ el0_sp_pc: | |||
868 | * Stack or PC alignment exception handling | 868 | * Stack or PC alignment exception handling |
869 | */ | 869 | */ |
870 | gic_prio_kentry_setup tmp=x0 | 870 | gic_prio_kentry_setup tmp=x0 |
871 | ct_user_exit_irqoff | ||
871 | enable_da_f | 872 | enable_da_f |
872 | #ifdef CONFIG_TRACE_IRQFLAGS | 873 | #ifdef CONFIG_TRACE_IRQFLAGS |
873 | bl trace_hardirqs_off | 874 | bl trace_hardirqs_off |
874 | #endif | 875 | #endif |
875 | ct_user_exit | ||
876 | mov x0, x26 | 876 | mov x0, x26 |
877 | mov x1, x25 | 877 | mov x1, x25 |
878 | mov x2, sp | 878 | mov x2, sp |
@@ -882,8 +882,8 @@ el0_undef: | |||
882 | /* | 882 | /* |
883 | * Undefined instruction | 883 | * Undefined instruction |
884 | */ | 884 | */ |
885 | ct_user_exit_irqoff | ||
885 | enable_daif | 886 | enable_daif |
886 | ct_user_exit | ||
887 | mov x0, sp | 887 | mov x0, sp |
888 | bl do_undefinstr | 888 | bl do_undefinstr |
889 | b ret_to_user | 889 | b ret_to_user |
@@ -891,8 +891,8 @@ el0_sys: | |||
891 | /* | 891 | /* |
892 | * System instructions, for trapped cache maintenance instructions | 892 | * System instructions, for trapped cache maintenance instructions |
893 | */ | 893 | */ |
894 | ct_user_exit_irqoff | ||
894 | enable_daif | 895 | enable_daif |
895 | ct_user_exit | ||
896 | mov x0, x25 | 896 | mov x0, x25 |
897 | mov x1, sp | 897 | mov x1, sp |
898 | bl do_sysinstr | 898 | bl do_sysinstr |
@@ -902,17 +902,18 @@ el0_dbg: | |||
902 | * Debug exception handling | 902 | * Debug exception handling |
903 | */ | 903 | */ |
904 | tbnz x24, #0, el0_inv // EL0 only | 904 | tbnz x24, #0, el0_inv // EL0 only |
905 | mrs x24, far_el1 | ||
905 | gic_prio_kentry_setup tmp=x3 | 906 | gic_prio_kentry_setup tmp=x3 |
906 | mrs x0, far_el1 | 907 | ct_user_exit_irqoff |
908 | mov x0, x24 | ||
907 | mov x1, x25 | 909 | mov x1, x25 |
908 | mov x2, sp | 910 | mov x2, sp |
909 | bl do_debug_exception | 911 | bl do_debug_exception |
910 | enable_da_f | 912 | enable_da_f |
911 | ct_user_exit | ||
912 | b ret_to_user | 913 | b ret_to_user |
913 | el0_inv: | 914 | el0_inv: |
915 | ct_user_exit_irqoff | ||
914 | enable_daif | 916 | enable_daif |
915 | ct_user_exit | ||
916 | mov x0, sp | 917 | mov x0, sp |
917 | mov x1, #BAD_SYNC | 918 | mov x1, #BAD_SYNC |
918 | mov x2, x25 | 919 | mov x2, x25 |
@@ -925,13 +926,13 @@ el0_irq: | |||
925 | kernel_entry 0 | 926 | kernel_entry 0 |
926 | el0_irq_naked: | 927 | el0_irq_naked: |
927 | gic_prio_irq_setup pmr=x20, tmp=x0 | 928 | gic_prio_irq_setup pmr=x20, tmp=x0 |
929 | ct_user_exit_irqoff | ||
928 | enable_da_f | 930 | enable_da_f |
929 | 931 | ||
930 | #ifdef CONFIG_TRACE_IRQFLAGS | 932 | #ifdef CONFIG_TRACE_IRQFLAGS |
931 | bl trace_hardirqs_off | 933 | bl trace_hardirqs_off |
932 | #endif | 934 | #endif |
933 | 935 | ||
934 | ct_user_exit | ||
935 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR | 936 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR |
936 | tbz x22, #55, 1f | 937 | tbz x22, #55, 1f |
937 | bl do_el0_irq_bp_hardening | 938 | bl do_el0_irq_bp_hardening |
@@ -958,13 +959,14 @@ ENDPROC(el1_error) | |||
958 | el0_error: | 959 | el0_error: |
959 | kernel_entry 0 | 960 | kernel_entry 0 |
960 | el0_error_naked: | 961 | el0_error_naked: |
961 | mrs x1, esr_el1 | 962 | mrs x25, esr_el1 |
962 | gic_prio_kentry_setup tmp=x2 | 963 | gic_prio_kentry_setup tmp=x2 |
964 | ct_user_exit_irqoff | ||
963 | enable_dbg | 965 | enable_dbg |
964 | mov x0, sp | 966 | mov x0, sp |
967 | mov x1, x25 | ||
965 | bl do_serror | 968 | bl do_serror |
966 | enable_da_f | 969 | enable_da_f |
967 | ct_user_exit | ||
968 | b ret_to_user | 970 | b ret_to_user |
969 | ENDPROC(el0_error) | 971 | ENDPROC(el0_error) |
970 | 972 | ||
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 949b001a73bb..989b1944cb71 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -102,6 +102,8 @@ pe_header: | |||
102 | * x23 stext() .. start_kernel() physical misalignment/KASLR offset | 102 | * x23 stext() .. start_kernel() physical misalignment/KASLR offset |
103 | * x28 __create_page_tables() callee preserved temp register | 103 | * x28 __create_page_tables() callee preserved temp register |
104 | * x19/x20 __primary_switch() callee preserved temp registers | 104 | * x19/x20 __primary_switch() callee preserved temp registers |
105 | * x24 __primary_switch() .. relocate_kernel() | ||
106 | * current RELR displacement | ||
105 | */ | 107 | */ |
106 | ENTRY(stext) | 108 | ENTRY(stext) |
107 | bl preserve_boot_args | 109 | bl preserve_boot_args |
@@ -724,14 +726,22 @@ __secondary_switched: | |||
724 | 726 | ||
725 | adr_l x0, secondary_data | 727 | adr_l x0, secondary_data |
726 | ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack | 728 | ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack |
729 | cbz x1, __secondary_too_slow | ||
727 | mov sp, x1 | 730 | mov sp, x1 |
728 | ldr x2, [x0, #CPU_BOOT_TASK] | 731 | ldr x2, [x0, #CPU_BOOT_TASK] |
732 | cbz x2, __secondary_too_slow | ||
729 | msr sp_el0, x2 | 733 | msr sp_el0, x2 |
730 | mov x29, #0 | 734 | mov x29, #0 |
731 | mov x30, #0 | 735 | mov x30, #0 |
732 | b secondary_start_kernel | 736 | b secondary_start_kernel |
733 | ENDPROC(__secondary_switched) | 737 | ENDPROC(__secondary_switched) |
734 | 738 | ||
739 | __secondary_too_slow: | ||
740 | wfe | ||
741 | wfi | ||
742 | b __secondary_too_slow | ||
743 | ENDPROC(__secondary_too_slow) | ||
744 | |||
735 | /* | 745 | /* |
736 | * The booting CPU updates the failed status @__early_cpu_boot_status, | 746 | * The booting CPU updates the failed status @__early_cpu_boot_status, |
737 | * with MMU turned off. | 747 | * with MMU turned off. |
@@ -834,14 +844,93 @@ __relocate_kernel: | |||
834 | 844 | ||
835 | 0: cmp x9, x10 | 845 | 0: cmp x9, x10 |
836 | b.hs 1f | 846 | b.hs 1f |
837 | ldp x11, x12, [x9], #24 | 847 | ldp x12, x13, [x9], #24 |
838 | ldr x13, [x9, #-8] | 848 | ldr x14, [x9, #-8] |
839 | cmp w12, #R_AARCH64_RELATIVE | 849 | cmp w13, #R_AARCH64_RELATIVE |
840 | b.ne 0b | 850 | b.ne 0b |
841 | add x13, x13, x23 // relocate | 851 | add x14, x14, x23 // relocate |
842 | str x13, [x11, x23] | 852 | str x14, [x12, x23] |
843 | b 0b | 853 | b 0b |
844 | 1: ret | 854 | |
855 | 1: | ||
856 | #ifdef CONFIG_RELR | ||
857 | /* | ||
858 | * Apply RELR relocations. | ||
859 | * | ||
860 | * RELR is a compressed format for storing relative relocations. The | ||
861 | * encoded sequence of entries looks like: | ||
862 | * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ] | ||
863 | * | ||
864 | * i.e. start with an address, followed by any number of bitmaps. The | ||
865 | * address entry encodes 1 relocation. The subsequent bitmap entries | ||
866 | * encode up to 63 relocations each, at subsequent offsets following | ||
867 | * the last address entry. | ||
868 | * | ||
869 | * The bitmap entries must have 1 in the least significant bit. The | ||
870 | * assumption here is that an address cannot have 1 in lsb. Odd | ||
871 | * addresses are not supported. Any odd addresses are stored in the RELA | ||
872 | * section, which is handled above. | ||
873 | * | ||
874 | * Excluding the least significant bit in the bitmap, each non-zero | ||
875 | * bit in the bitmap represents a relocation to be applied to | ||
876 | * a corresponding machine word that follows the base address | ||
877 | * word. The second least significant bit represents the machine | ||
878 | * word immediately following the initial address, and each bit | ||
879 | * that follows represents the next word, in linear order. As such, | ||
880 | * a single bitmap can encode up to 63 relocations in a 64-bit object. | ||
881 | * | ||
882 | * In this implementation we store the address of the next RELR table | ||
883 | * entry in x9, the address being relocated by the current address or | ||
884 | * bitmap entry in x13 and the address being relocated by the current | ||
885 | * bit in x14. | ||
886 | * | ||
887 | * Because addends are stored in place in the binary, RELR relocations | ||
888 | * cannot be applied idempotently. We use x24 to keep track of the | ||
889 | * currently applied displacement so that we can correctly relocate if | ||
890 | * __relocate_kernel is called twice with non-zero displacements (i.e. | ||
891 | * if there is both a physical misalignment and a KASLR displacement). | ||
892 | */ | ||
893 | ldr w9, =__relr_offset // offset to reloc table | ||
894 | ldr w10, =__relr_size // size of reloc table | ||
895 | add x9, x9, x11 // __va(.relr) | ||
896 | add x10, x9, x10 // __va(.relr) + sizeof(.relr) | ||
897 | |||
898 | sub x15, x23, x24 // delta from previous offset | ||
899 | cbz x15, 7f // nothing to do if unchanged | ||
900 | mov x24, x23 // save new offset | ||
901 | |||
902 | 2: cmp x9, x10 | ||
903 | b.hs 7f | ||
904 | ldr x11, [x9], #8 | ||
905 | tbnz x11, #0, 3f // branch to handle bitmaps | ||
906 | add x13, x11, x23 | ||
907 | ldr x12, [x13] // relocate address entry | ||
908 | add x12, x12, x15 | ||
909 | str x12, [x13], #8 // adjust to start of bitmap | ||
910 | b 2b | ||
911 | |||
912 | 3: mov x14, x13 | ||
913 | 4: lsr x11, x11, #1 | ||
914 | cbz x11, 6f | ||
915 | tbz x11, #0, 5f // skip bit if not set | ||
916 | ldr x12, [x14] // relocate bit | ||
917 | add x12, x12, x15 | ||
918 | str x12, [x14] | ||
919 | |||
920 | 5: add x14, x14, #8 // move to next bit's address | ||
921 | b 4b | ||
922 | |||
923 | 6: /* | ||
924 | * Move to the next bitmap's address. 8 is the word size, and 63 is the | ||
925 | * number of significant bits in a bitmap entry. | ||
926 | */ | ||
927 | add x13, x13, #(8 * 63) | ||
928 | b 2b | ||
929 | |||
930 | 7: | ||
931 | #endif | ||
932 | ret | ||
933 | |||
845 | ENDPROC(__relocate_kernel) | 934 | ENDPROC(__relocate_kernel) |
846 | #endif | 935 | #endif |
847 | 936 | ||
@@ -854,6 +943,9 @@ __primary_switch: | |||
854 | adrp x1, init_pg_dir | 943 | adrp x1, init_pg_dir |
855 | bl __enable_mmu | 944 | bl __enable_mmu |
856 | #ifdef CONFIG_RELOCATABLE | 945 | #ifdef CONFIG_RELOCATABLE |
946 | #ifdef CONFIG_RELR | ||
947 | mov x24, #0 // no RELR displacement yet | ||
948 | #endif | ||
857 | bl __relocate_kernel | 949 | bl __relocate_kernel |
858 | #ifdef CONFIG_RANDOMIZE_BASE | 950 | #ifdef CONFIG_RANDOMIZE_BASE |
859 | ldr x8, =__primary_switched | 951 | ldr x8, =__primary_switched |
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h new file mode 100644 index 000000000000..25a2a9b479c2 --- /dev/null +++ b/arch/arm64/kernel/image-vars.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0-only */ | ||
2 | /* | ||
3 | * Linker script variables to be set after section resolution, as | ||
4 | * ld.lld does not like variables assigned before SECTIONS is processed. | ||
5 | */ | ||
6 | #ifndef __ARM64_KERNEL_IMAGE_VARS_H | ||
7 | #define __ARM64_KERNEL_IMAGE_VARS_H | ||
8 | |||
9 | #ifndef LINKER_SCRIPT | ||
10 | #error This file should only be included in vmlinux.lds.S | ||
11 | #endif | ||
12 | |||
13 | #ifdef CONFIG_EFI | ||
14 | |||
15 | __efistub_stext_offset = stext - _text; | ||
16 | |||
17 | /* | ||
18 | * The EFI stub has its own symbol namespace prefixed by __efistub_, to | ||
19 | * isolate it from the kernel proper. The following symbols are legally | ||
20 | * accessed by the stub, so provide some aliases to make them accessible. | ||
21 | * Only include data symbols here, or text symbols of functions that are | ||
22 | * guaranteed to be safe when executed at another offset than they were | ||
23 | * linked at. The routines below are all implemented in assembler in a | ||
24 | * position independent manner | ||
25 | */ | ||
26 | __efistub_memcmp = __pi_memcmp; | ||
27 | __efistub_memchr = __pi_memchr; | ||
28 | __efistub_memcpy = __pi_memcpy; | ||
29 | __efistub_memmove = __pi_memmove; | ||
30 | __efistub_memset = __pi_memset; | ||
31 | __efistub_strlen = __pi_strlen; | ||
32 | __efistub_strnlen = __pi_strnlen; | ||
33 | __efistub_strcmp = __pi_strcmp; | ||
34 | __efistub_strncmp = __pi_strncmp; | ||
35 | __efistub_strrchr = __pi_strrchr; | ||
36 | __efistub___flush_dcache_area = __pi___flush_dcache_area; | ||
37 | |||
38 | #ifdef CONFIG_KASAN | ||
39 | __efistub___memcpy = __pi_memcpy; | ||
40 | __efistub___memmove = __pi_memmove; | ||
41 | __efistub___memset = __pi_memset; | ||
42 | #endif | ||
43 | |||
44 | __efistub__text = _text; | ||
45 | __efistub__end = _end; | ||
46 | __efistub__edata = _edata; | ||
47 | __efistub_screen_info = screen_info; | ||
48 | |||
49 | #endif | ||
50 | |||
51 | #endif /* __ARM64_KERNEL_IMAGE_VARS_H */ | ||
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h index 2b85c0d6fa3d..c7d38c660372 100644 --- a/arch/arm64/kernel/image.h +++ b/arch/arm64/kernel/image.h | |||
@@ -65,46 +65,4 @@ | |||
65 | DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \ | 65 | DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \ |
66 | DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS); | 66 | DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS); |
67 | 67 | ||
68 | #ifdef CONFIG_EFI | ||
69 | |||
70 | /* | ||
71 | * Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol: | ||
72 | * https://github.com/ClangBuiltLinux/linux/issues/561 | ||
73 | */ | ||
74 | __efistub_stext_offset = ABSOLUTE(stext - _text); | ||
75 | |||
76 | /* | ||
77 | * The EFI stub has its own symbol namespace prefixed by __efistub_, to | ||
78 | * isolate it from the kernel proper. The following symbols are legally | ||
79 | * accessed by the stub, so provide some aliases to make them accessible. | ||
80 | * Only include data symbols here, or text symbols of functions that are | ||
81 | * guaranteed to be safe when executed at another offset than they were | ||
82 | * linked at. The routines below are all implemented in assembler in a | ||
83 | * position independent manner | ||
84 | */ | ||
85 | __efistub_memcmp = __pi_memcmp; | ||
86 | __efistub_memchr = __pi_memchr; | ||
87 | __efistub_memcpy = __pi_memcpy; | ||
88 | __efistub_memmove = __pi_memmove; | ||
89 | __efistub_memset = __pi_memset; | ||
90 | __efistub_strlen = __pi_strlen; | ||
91 | __efistub_strnlen = __pi_strnlen; | ||
92 | __efistub_strcmp = __pi_strcmp; | ||
93 | __efistub_strncmp = __pi_strncmp; | ||
94 | __efistub_strrchr = __pi_strrchr; | ||
95 | __efistub___flush_dcache_area = __pi___flush_dcache_area; | ||
96 | |||
97 | #ifdef CONFIG_KASAN | ||
98 | __efistub___memcpy = __pi_memcpy; | ||
99 | __efistub___memmove = __pi_memmove; | ||
100 | __efistub___memset = __pi_memset; | ||
101 | #endif | ||
102 | |||
103 | __efistub__text = _text; | ||
104 | __efistub__end = _end; | ||
105 | __efistub__edata = _edata; | ||
106 | __efistub_screen_info = screen_info; | ||
107 | |||
108 | #endif | ||
109 | |||
110 | #endif /* __ARM64_KERNEL_IMAGE_H */ | 68 | #endif /* __ARM64_KERNEL_IMAGE_H */ |
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 84b059ed04fc..d801a7094076 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #define AARCH64_INSN_N_BIT BIT(22) | 26 | #define AARCH64_INSN_N_BIT BIT(22) |
27 | #define AARCH64_INSN_LSL_12 BIT(22) | 27 | #define AARCH64_INSN_LSL_12 BIT(22) |
28 | 28 | ||
29 | static int aarch64_insn_encoding_class[] = { | 29 | static const int aarch64_insn_encoding_class[] = { |
30 | AARCH64_INSN_CLS_UNKNOWN, | 30 | AARCH64_INSN_CLS_UNKNOWN, |
31 | AARCH64_INSN_CLS_UNKNOWN, | 31 | AARCH64_INSN_CLS_UNKNOWN, |
32 | AARCH64_INSN_CLS_UNKNOWN, | 32 | AARCH64_INSN_CLS_UNKNOWN, |
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 5a59f7567f9c..416f537bf614 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c | |||
@@ -62,9 +62,6 @@ out: | |||
62 | return default_cmdline; | 62 | return default_cmdline; |
63 | } | 63 | } |
64 | 64 | ||
65 | extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, | ||
66 | pgprot_t prot); | ||
67 | |||
68 | /* | 65 | /* |
69 | * This routine will be executed with the kernel mapped at its default virtual | 66 | * This routine will be executed with the kernel mapped at its default virtual |
70 | * address, and if it returns successfully, the kernel will be remapped, and | 67 | * address, and if it returns successfully, the kernel will be remapped, and |
@@ -93,7 +90,7 @@ u64 __init kaslr_early_init(u64 dt_phys) | |||
93 | * attempt at mapping the FDT in setup_machine() | 90 | * attempt at mapping the FDT in setup_machine() |
94 | */ | 91 | */ |
95 | early_fixmap_init(); | 92 | early_fixmap_init(); |
96 | fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL); | 93 | fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL); |
97 | if (!fdt) | 94 | if (!fdt) |
98 | return 0; | 95 | return 0; |
99 | 96 | ||
diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c index 2514fd6f12cb..29a9428486a5 100644 --- a/arch/arm64/kernel/kexec_image.c +++ b/arch/arm64/kernel/kexec_image.c | |||
@@ -84,7 +84,7 @@ static void *image_load(struct kimage *image, | |||
84 | 84 | ||
85 | kbuf.buffer = kernel; | 85 | kbuf.buffer = kernel; |
86 | kbuf.bufsz = kernel_len; | 86 | kbuf.bufsz = kernel_len; |
87 | kbuf.mem = 0; | 87 | kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; |
88 | kbuf.memsz = le64_to_cpu(h->image_size); | 88 | kbuf.memsz = le64_to_cpu(h->image_size); |
89 | text_offset = le64_to_cpu(h->text_offset); | 89 | text_offset = le64_to_cpu(h->text_offset); |
90 | kbuf.buf_align = MIN_KIMG_ALIGN; | 90 | kbuf.buf_align = MIN_KIMG_ALIGN; |
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index 58871333737a..7b08bf9499b6 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c | |||
@@ -27,6 +27,8 @@ | |||
27 | #define FDT_PROP_INITRD_END "linux,initrd-end" | 27 | #define FDT_PROP_INITRD_END "linux,initrd-end" |
28 | #define FDT_PROP_BOOTARGS "bootargs" | 28 | #define FDT_PROP_BOOTARGS "bootargs" |
29 | #define FDT_PROP_KASLR_SEED "kaslr-seed" | 29 | #define FDT_PROP_KASLR_SEED "kaslr-seed" |
30 | #define FDT_PROP_RNG_SEED "rng-seed" | ||
31 | #define RNG_SEED_SIZE 128 | ||
30 | 32 | ||
31 | const struct kexec_file_ops * const kexec_file_loaders[] = { | 33 | const struct kexec_file_ops * const kexec_file_loaders[] = { |
32 | &kexec_image_ops, | 34 | &kexec_image_ops, |
@@ -102,6 +104,19 @@ static int setup_dtb(struct kimage *image, | |||
102 | FDT_PROP_KASLR_SEED); | 104 | FDT_PROP_KASLR_SEED); |
103 | } | 105 | } |
104 | 106 | ||
107 | /* add rng-seed */ | ||
108 | if (rng_is_initialized()) { | ||
109 | u8 rng_seed[RNG_SEED_SIZE]; | ||
110 | get_random_bytes(rng_seed, RNG_SEED_SIZE); | ||
111 | ret = fdt_setprop(dtb, off, FDT_PROP_RNG_SEED, rng_seed, | ||
112 | RNG_SEED_SIZE); | ||
113 | if (ret) | ||
114 | goto out; | ||
115 | } else { | ||
116 | pr_notice("RNG is not initialised: omitting \"%s\" property\n", | ||
117 | FDT_PROP_RNG_SEED); | ||
118 | } | ||
119 | |||
105 | out: | 120 | out: |
106 | if (ret) | 121 | if (ret) |
107 | return (ret == -FDT_ERR_NOSPACE) ? -ENOMEM : -EINVAL; | 122 | return (ret == -FDT_ERR_NOSPACE) ? -ENOMEM : -EINVAL; |
@@ -110,7 +125,8 @@ out: | |||
110 | } | 125 | } |
111 | 126 | ||
112 | /* | 127 | /* |
113 | * More space needed so that we can add initrd, bootargs and kaslr-seed. | 128 | * More space needed so that we can add initrd, bootargs, kaslr-seed, and |
129 | * rng-seed. | ||
114 | */ | 130 | */ |
115 | #define DTB_EXTRA_SPACE 0x1000 | 131 | #define DTB_EXTRA_SPACE 0x1000 |
116 | 132 | ||
@@ -177,7 +193,7 @@ int load_other_segments(struct kimage *image, | |||
177 | if (initrd) { | 193 | if (initrd) { |
178 | kbuf.buffer = initrd; | 194 | kbuf.buffer = initrd; |
179 | kbuf.bufsz = initrd_len; | 195 | kbuf.bufsz = initrd_len; |
180 | kbuf.mem = 0; | 196 | kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; |
181 | kbuf.memsz = initrd_len; | 197 | kbuf.memsz = initrd_len; |
182 | kbuf.buf_align = 0; | 198 | kbuf.buf_align = 0; |
183 | /* within 1GB-aligned window of up to 32GB in size */ | 199 | /* within 1GB-aligned window of up to 32GB in size */ |
@@ -204,7 +220,7 @@ int load_other_segments(struct kimage *image, | |||
204 | dtb_len = fdt_totalsize(dtb); | 220 | dtb_len = fdt_totalsize(dtb); |
205 | kbuf.buffer = dtb; | 221 | kbuf.buffer = dtb; |
206 | kbuf.bufsz = dtb_len; | 222 | kbuf.bufsz = dtb_len; |
207 | kbuf.mem = 0; | 223 | kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; |
208 | kbuf.memsz = dtb_len; | 224 | kbuf.memsz = dtb_len; |
209 | /* not across 2MB boundary */ | 225 | /* not across 2MB boundary */ |
210 | kbuf.buf_align = SZ_2M; | 226 | kbuf.buf_align = SZ_2M; |
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index 044c0ae4d6c8..b182442b87a3 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c | |||
@@ -302,7 +302,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, | |||
302 | /* sort by type, symbol index and addend */ | 302 | /* sort by type, symbol index and addend */ |
303 | sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL); | 303 | sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL); |
304 | 304 | ||
305 | if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0) | 305 | if (!str_has_prefix(secstrings + dstsec->sh_name, ".init")) |
306 | core_plts += count_plts(syms, rels, numrels, | 306 | core_plts += count_plts(syms, rels, numrels, |
307 | sechdrs[i].sh_info, dstsec); | 307 | sechdrs[i].sh_info, dstsec); |
308 | else | 308 | else |
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 96e90e270042..a0b4f1bca491 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/of.h> | 19 | #include <linux/of.h> |
20 | #include <linux/perf/arm_pmu.h> | 20 | #include <linux/perf/arm_pmu.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/smp.h> | ||
22 | 23 | ||
23 | /* ARMv8 Cortex-A53 specific event types. */ | 24 | /* ARMv8 Cortex-A53 specific event types. */ |
24 | #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2 | 25 | #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2 |
@@ -157,7 +158,6 @@ armv8pmu_events_sysfs_show(struct device *dev, | |||
157 | return sprintf(page, "event=0x%03llx\n", pmu_attr->id); | 158 | return sprintf(page, "event=0x%03llx\n", pmu_attr->id); |
158 | } | 159 | } |
159 | 160 | ||
160 | #define ARMV8_EVENT_ATTR_RESOLVE(m) #m | ||
161 | #define ARMV8_EVENT_ATTR(name, config) \ | 161 | #define ARMV8_EVENT_ATTR(name, config) \ |
162 | PMU_EVENT_ATTR(name, armv8_event_attr_##name, \ | 162 | PMU_EVENT_ATTR(name, armv8_event_attr_##name, \ |
163 | config, armv8pmu_events_sysfs_show) | 163 | config, armv8pmu_events_sysfs_show) |
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index f674f28df663..03689c0beb34 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
21 | #include <linux/stddef.h> | 21 | #include <linux/stddef.h> |
22 | #include <linux/sysctl.h> | ||
22 | #include <linux/unistd.h> | 23 | #include <linux/unistd.h> |
23 | #include <linux/user.h> | 24 | #include <linux/user.h> |
24 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
@@ -38,6 +39,7 @@ | |||
38 | #include <trace/events/power.h> | 39 | #include <trace/events/power.h> |
39 | #include <linux/percpu.h> | 40 | #include <linux/percpu.h> |
40 | #include <linux/thread_info.h> | 41 | #include <linux/thread_info.h> |
42 | #include <linux/prctl.h> | ||
41 | 43 | ||
42 | #include <asm/alternative.h> | 44 | #include <asm/alternative.h> |
43 | #include <asm/arch_gicv3.h> | 45 | #include <asm/arch_gicv3.h> |
@@ -307,11 +309,18 @@ static void tls_thread_flush(void) | |||
307 | } | 309 | } |
308 | } | 310 | } |
309 | 311 | ||
312 | static void flush_tagged_addr_state(void) | ||
313 | { | ||
314 | if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI)) | ||
315 | clear_thread_flag(TIF_TAGGED_ADDR); | ||
316 | } | ||
317 | |||
310 | void flush_thread(void) | 318 | void flush_thread(void) |
311 | { | 319 | { |
312 | fpsimd_flush_thread(); | 320 | fpsimd_flush_thread(); |
313 | tls_thread_flush(); | 321 | tls_thread_flush(); |
314 | flush_ptrace_hw_breakpoint(current); | 322 | flush_ptrace_hw_breakpoint(current); |
323 | flush_tagged_addr_state(); | ||
315 | } | 324 | } |
316 | 325 | ||
317 | void release_thread(struct task_struct *dead_task) | 326 | void release_thread(struct task_struct *dead_task) |
@@ -565,3 +574,70 @@ void arch_setup_new_exec(void) | |||
565 | 574 | ||
566 | ptrauth_thread_init_user(current); | 575 | ptrauth_thread_init_user(current); |
567 | } | 576 | } |
577 | |||
578 | #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI | ||
579 | /* | ||
580 | * Control the relaxed ABI allowing tagged user addresses into the kernel. | ||
581 | */ | ||
582 | static unsigned int tagged_addr_disabled; | ||
583 | |||
584 | long set_tagged_addr_ctrl(unsigned long arg) | ||
585 | { | ||
586 | if (is_compat_task()) | ||
587 | return -EINVAL; | ||
588 | if (arg & ~PR_TAGGED_ADDR_ENABLE) | ||
589 | return -EINVAL; | ||
590 | |||
591 | /* | ||
592 | * Do not allow the enabling of the tagged address ABI if globally | ||
593 | * disabled via sysctl abi.tagged_addr_disabled. | ||
594 | */ | ||
595 | if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled) | ||
596 | return -EINVAL; | ||
597 | |||
598 | update_thread_flag(TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE); | ||
599 | |||
600 | return 0; | ||
601 | } | ||
602 | |||
603 | long get_tagged_addr_ctrl(void) | ||
604 | { | ||
605 | if (is_compat_task()) | ||
606 | return -EINVAL; | ||
607 | |||
608 | if (test_thread_flag(TIF_TAGGED_ADDR)) | ||
609 | return PR_TAGGED_ADDR_ENABLE; | ||
610 | |||
611 | return 0; | ||
612 | } | ||
613 | |||
614 | /* | ||
615 | * Global sysctl to disable the tagged user addresses support. This control | ||
616 | * only prevents the tagged address ABI enabling via prctl() and does not | ||
617 | * disable it for tasks that already opted in to the relaxed ABI. | ||
618 | */ | ||
619 | static int zero; | ||
620 | static int one = 1; | ||
621 | |||
622 | static struct ctl_table tagged_addr_sysctl_table[] = { | ||
623 | { | ||
624 | .procname = "tagged_addr_disabled", | ||
625 | .mode = 0644, | ||
626 | .data = &tagged_addr_disabled, | ||
627 | .maxlen = sizeof(int), | ||
628 | .proc_handler = proc_dointvec_minmax, | ||
629 | .extra1 = &zero, | ||
630 | .extra2 = &one, | ||
631 | }, | ||
632 | { } | ||
633 | }; | ||
634 | |||
635 | static int __init tagged_addr_init(void) | ||
636 | { | ||
637 | if (!register_sysctl("abi", tagged_addr_sysctl_table)) | ||
638 | return -EINVAL; | ||
639 | return 0; | ||
640 | } | ||
641 | |||
642 | core_initcall(tagged_addr_init); | ||
643 | #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ | ||
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 85ee7d07889e..c9f72b2665f1 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c | |||
@@ -46,6 +46,11 @@ static int cpu_psci_cpu_boot(unsigned int cpu) | |||
46 | } | 46 | } |
47 | 47 | ||
48 | #ifdef CONFIG_HOTPLUG_CPU | 48 | #ifdef CONFIG_HOTPLUG_CPU |
49 | static bool cpu_psci_cpu_can_disable(unsigned int cpu) | ||
50 | { | ||
51 | return !psci_tos_resident_on(cpu); | ||
52 | } | ||
53 | |||
49 | static int cpu_psci_cpu_disable(unsigned int cpu) | 54 | static int cpu_psci_cpu_disable(unsigned int cpu) |
50 | { | 55 | { |
51 | /* Fail early if we don't have CPU_OFF support */ | 56 | /* Fail early if we don't have CPU_OFF support */ |
@@ -105,14 +110,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu) | |||
105 | 110 | ||
106 | const struct cpu_operations cpu_psci_ops = { | 111 | const struct cpu_operations cpu_psci_ops = { |
107 | .name = "psci", | 112 | .name = "psci", |
108 | #ifdef CONFIG_CPU_IDLE | ||
109 | .cpu_init_idle = psci_cpu_init_idle, | ||
110 | .cpu_suspend = psci_cpu_suspend_enter, | ||
111 | #endif | ||
112 | .cpu_init = cpu_psci_cpu_init, | 113 | .cpu_init = cpu_psci_cpu_init, |
113 | .cpu_prepare = cpu_psci_cpu_prepare, | 114 | .cpu_prepare = cpu_psci_cpu_prepare, |
114 | .cpu_boot = cpu_psci_cpu_boot, | 115 | .cpu_boot = cpu_psci_cpu_boot, |
115 | #ifdef CONFIG_HOTPLUG_CPU | 116 | #ifdef CONFIG_HOTPLUG_CPU |
117 | .cpu_can_disable = cpu_psci_cpu_can_disable, | ||
116 | .cpu_disable = cpu_psci_cpu_disable, | 118 | .cpu_disable = cpu_psci_cpu_disable, |
117 | .cpu_die = cpu_psci_cpu_die, | 119 | .cpu_die = cpu_psci_cpu_die, |
118 | .cpu_kill = cpu_psci_cpu_kill, | 120 | .cpu_kill = cpu_psci_cpu_kill, |
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 3cf3b135027e..21176d02e21a 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c | |||
@@ -870,7 +870,7 @@ static int sve_set(struct task_struct *target, | |||
870 | goto out; | 870 | goto out; |
871 | 871 | ||
872 | /* | 872 | /* |
873 | * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by | 873 | * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by |
874 | * sve_set_vector_length(), which will also validate them for us: | 874 | * sve_set_vector_length(), which will also validate them for us: |
875 | */ | 875 | */ |
876 | ret = sve_set_vector_length(target, header.vl, | 876 | ret = sve_set_vector_length(target, header.vl, |
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 9c4bad7d7131..56f664561754 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -170,9 +170,13 @@ static void __init smp_build_mpidr_hash(void) | |||
170 | 170 | ||
171 | static void __init setup_machine_fdt(phys_addr_t dt_phys) | 171 | static void __init setup_machine_fdt(phys_addr_t dt_phys) |
172 | { | 172 | { |
173 | void *dt_virt = fixmap_remap_fdt(dt_phys); | 173 | int size; |
174 | void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL); | ||
174 | const char *name; | 175 | const char *name; |
175 | 176 | ||
177 | if (dt_virt) | ||
178 | memblock_reserve(dt_phys, size); | ||
179 | |||
176 | if (!dt_virt || !early_init_dt_scan(dt_virt)) { | 180 | if (!dt_virt || !early_init_dt_scan(dt_virt)) { |
177 | pr_crit("\n" | 181 | pr_crit("\n" |
178 | "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n" | 182 | "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n" |
@@ -184,6 +188,9 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) | |||
184 | cpu_relax(); | 188 | cpu_relax(); |
185 | } | 189 | } |
186 | 190 | ||
191 | /* Early fixups are done, map the FDT as read-only now */ | ||
192 | fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO); | ||
193 | |||
187 | name = of_flat_dt_get_machine_name(); | 194 | name = of_flat_dt_get_machine_name(); |
188 | if (!name) | 195 | if (!name) |
189 | return; | 196 | return; |
@@ -357,6 +364,15 @@ void __init setup_arch(char **cmdline_p) | |||
357 | } | 364 | } |
358 | } | 365 | } |
359 | 366 | ||
367 | static inline bool cpu_can_disable(unsigned int cpu) | ||
368 | { | ||
369 | #ifdef CONFIG_HOTPLUG_CPU | ||
370 | if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_can_disable) | ||
371 | return cpu_ops[cpu]->cpu_can_disable(cpu); | ||
372 | #endif | ||
373 | return false; | ||
374 | } | ||
375 | |||
360 | static int __init topology_init(void) | 376 | static int __init topology_init(void) |
361 | { | 377 | { |
362 | int i; | 378 | int i; |
@@ -366,7 +382,7 @@ static int __init topology_init(void) | |||
366 | 382 | ||
367 | for_each_possible_cpu(i) { | 383 | for_each_possible_cpu(i) { |
368 | struct cpu *cpu = &per_cpu(cpu_data.cpu, i); | 384 | struct cpu *cpu = &per_cpu(cpu_data.cpu, i); |
369 | cpu->hotpluggable = 1; | 385 | cpu->hotpluggable = cpu_can_disable(i); |
370 | register_cpu(cpu, i); | 386 | register_cpu(cpu, i); |
371 | } | 387 | } |
372 | 388 | ||
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 018a33e01b0e..dc9fe879c279 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
@@ -123,7 +123,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
123 | * time out. | 123 | * time out. |
124 | */ | 124 | */ |
125 | wait_for_completion_timeout(&cpu_running, | 125 | wait_for_completion_timeout(&cpu_running, |
126 | msecs_to_jiffies(1000)); | 126 | msecs_to_jiffies(5000)); |
127 | 127 | ||
128 | if (!cpu_online(cpu)) { | 128 | if (!cpu_online(cpu)) { |
129 | pr_crit("CPU%u: failed to come online\n", cpu); | 129 | pr_crit("CPU%u: failed to come online\n", cpu); |
@@ -136,6 +136,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
136 | 136 | ||
137 | secondary_data.task = NULL; | 137 | secondary_data.task = NULL; |
138 | secondary_data.stack = NULL; | 138 | secondary_data.stack = NULL; |
139 | __flush_dcache_area(&secondary_data, sizeof(secondary_data)); | ||
139 | status = READ_ONCE(secondary_data.status); | 140 | status = READ_ONCE(secondary_data.status); |
140 | if (ret && status) { | 141 | if (ret && status) { |
141 | 142 | ||
@@ -146,6 +147,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
146 | default: | 147 | default: |
147 | pr_err("CPU%u: failed in unknown state : 0x%lx\n", | 148 | pr_err("CPU%u: failed in unknown state : 0x%lx\n", |
148 | cpu, status); | 149 | cpu, status); |
150 | cpus_stuck_in_kernel++; | ||
149 | break; | 151 | break; |
150 | case CPU_KILL_ME: | 152 | case CPU_KILL_ME: |
151 | if (!op_cpu_kill(cpu)) { | 153 | if (!op_cpu_kill(cpu)) { |
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index 76c2739ba8a4..c8a3fee00c11 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <asm/smp_plat.h> | 19 | #include <asm/smp_plat.h> |
20 | 20 | ||
21 | extern void secondary_holding_pen(void); | 21 | extern void secondary_holding_pen(void); |
22 | volatile unsigned long __section(".mmuoff.data.read") | 22 | volatile unsigned long __section(.mmuoff.data.read) |
23 | secondary_holding_pen_release = INVALID_HWID; | 23 | secondary_holding_pen_release = INVALID_HWID; |
24 | 24 | ||
25 | static phys_addr_t cpu_release_addr[NR_CPUS]; | 25 | static phys_addr_t cpu_release_addr[NR_CPUS]; |
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 0825c4a856e3..fa9528dfd0ce 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c | |||
@@ -14,250 +14,13 @@ | |||
14 | #include <linux/acpi.h> | 14 | #include <linux/acpi.h> |
15 | #include <linux/arch_topology.h> | 15 | #include <linux/arch_topology.h> |
16 | #include <linux/cacheinfo.h> | 16 | #include <linux/cacheinfo.h> |
17 | #include <linux/cpu.h> | ||
18 | #include <linux/cpumask.h> | ||
19 | #include <linux/init.h> | 17 | #include <linux/init.h> |
20 | #include <linux/percpu.h> | 18 | #include <linux/percpu.h> |
21 | #include <linux/node.h> | ||
22 | #include <linux/nodemask.h> | ||
23 | #include <linux/of.h> | ||
24 | #include <linux/sched.h> | ||
25 | #include <linux/sched/topology.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/smp.h> | ||
28 | #include <linux/string.h> | ||
29 | 19 | ||
30 | #include <asm/cpu.h> | 20 | #include <asm/cpu.h> |
31 | #include <asm/cputype.h> | 21 | #include <asm/cputype.h> |
32 | #include <asm/topology.h> | 22 | #include <asm/topology.h> |
33 | 23 | ||
34 | static int __init get_cpu_for_node(struct device_node *node) | ||
35 | { | ||
36 | struct device_node *cpu_node; | ||
37 | int cpu; | ||
38 | |||
39 | cpu_node = of_parse_phandle(node, "cpu", 0); | ||
40 | if (!cpu_node) | ||
41 | return -1; | ||
42 | |||
43 | cpu = of_cpu_node_to_id(cpu_node); | ||
44 | if (cpu >= 0) | ||
45 | topology_parse_cpu_capacity(cpu_node, cpu); | ||
46 | else | ||
47 | pr_crit("Unable to find CPU node for %pOF\n", cpu_node); | ||
48 | |||
49 | of_node_put(cpu_node); | ||
50 | return cpu; | ||
51 | } | ||
52 | |||
53 | static int __init parse_core(struct device_node *core, int package_id, | ||
54 | int core_id) | ||
55 | { | ||
56 | char name[10]; | ||
57 | bool leaf = true; | ||
58 | int i = 0; | ||
59 | int cpu; | ||
60 | struct device_node *t; | ||
61 | |||
62 | do { | ||
63 | snprintf(name, sizeof(name), "thread%d", i); | ||
64 | t = of_get_child_by_name(core, name); | ||
65 | if (t) { | ||
66 | leaf = false; | ||
67 | cpu = get_cpu_for_node(t); | ||
68 | if (cpu >= 0) { | ||
69 | cpu_topology[cpu].package_id = package_id; | ||
70 | cpu_topology[cpu].core_id = core_id; | ||
71 | cpu_topology[cpu].thread_id = i; | ||
72 | } else { | ||
73 | pr_err("%pOF: Can't get CPU for thread\n", | ||
74 | t); | ||
75 | of_node_put(t); | ||
76 | return -EINVAL; | ||
77 | } | ||
78 | of_node_put(t); | ||
79 | } | ||
80 | i++; | ||
81 | } while (t); | ||
82 | |||
83 | cpu = get_cpu_for_node(core); | ||
84 | if (cpu >= 0) { | ||
85 | if (!leaf) { | ||
86 | pr_err("%pOF: Core has both threads and CPU\n", | ||
87 | core); | ||
88 | return -EINVAL; | ||
89 | } | ||
90 | |||
91 | cpu_topology[cpu].package_id = package_id; | ||
92 | cpu_topology[cpu].core_id = core_id; | ||
93 | } else if (leaf) { | ||
94 | pr_err("%pOF: Can't get CPU for leaf core\n", core); | ||
95 | return -EINVAL; | ||
96 | } | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | static int __init parse_cluster(struct device_node *cluster, int depth) | ||
102 | { | ||
103 | char name[10]; | ||
104 | bool leaf = true; | ||
105 | bool has_cores = false; | ||
106 | struct device_node *c; | ||
107 | static int package_id __initdata; | ||
108 | int core_id = 0; | ||
109 | int i, ret; | ||
110 | |||
111 | /* | ||
112 | * First check for child clusters; we currently ignore any | ||
113 | * information about the nesting of clusters and present the | ||
114 | * scheduler with a flat list of them. | ||
115 | */ | ||
116 | i = 0; | ||
117 | do { | ||
118 | snprintf(name, sizeof(name), "cluster%d", i); | ||
119 | c = of_get_child_by_name(cluster, name); | ||
120 | if (c) { | ||
121 | leaf = false; | ||
122 | ret = parse_cluster(c, depth + 1); | ||
123 | of_node_put(c); | ||
124 | if (ret != 0) | ||
125 | return ret; | ||
126 | } | ||
127 | i++; | ||
128 | } while (c); | ||
129 | |||
130 | /* Now check for cores */ | ||
131 | i = 0; | ||
132 | do { | ||
133 | snprintf(name, sizeof(name), "core%d", i); | ||
134 | c = of_get_child_by_name(cluster, name); | ||
135 | if (c) { | ||
136 | has_cores = true; | ||
137 | |||
138 | if (depth == 0) { | ||
139 | pr_err("%pOF: cpu-map children should be clusters\n", | ||
140 | c); | ||
141 | of_node_put(c); | ||
142 | return -EINVAL; | ||
143 | } | ||
144 | |||
145 | if (leaf) { | ||
146 | ret = parse_core(c, package_id, core_id++); | ||
147 | } else { | ||
148 | pr_err("%pOF: Non-leaf cluster with core %s\n", | ||
149 | cluster, name); | ||
150 | ret = -EINVAL; | ||
151 | } | ||
152 | |||
153 | of_node_put(c); | ||
154 | if (ret != 0) | ||
155 | return ret; | ||
156 | } | ||
157 | i++; | ||
158 | } while (c); | ||
159 | |||
160 | if (leaf && !has_cores) | ||
161 | pr_warn("%pOF: empty cluster\n", cluster); | ||
162 | |||
163 | if (leaf) | ||
164 | package_id++; | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | static int __init parse_dt_topology(void) | ||
170 | { | ||
171 | struct device_node *cn, *map; | ||
172 | int ret = 0; | ||
173 | int cpu; | ||
174 | |||
175 | cn = of_find_node_by_path("/cpus"); | ||
176 | if (!cn) { | ||
177 | pr_err("No CPU information found in DT\n"); | ||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | /* | ||
182 | * When topology is provided cpu-map is essentially a root | ||
183 | * cluster with restricted subnodes. | ||
184 | */ | ||
185 | map = of_get_child_by_name(cn, "cpu-map"); | ||
186 | if (!map) | ||
187 | goto out; | ||
188 | |||
189 | ret = parse_cluster(map, 0); | ||
190 | if (ret != 0) | ||
191 | goto out_map; | ||
192 | |||
193 | topology_normalize_cpu_scale(); | ||
194 | |||
195 | /* | ||
196 | * Check that all cores are in the topology; the SMP code will | ||
197 | * only mark cores described in the DT as possible. | ||
198 | */ | ||
199 | for_each_possible_cpu(cpu) | ||
200 | if (cpu_topology[cpu].package_id == -1) | ||
201 | ret = -EINVAL; | ||
202 | |||
203 | out_map: | ||
204 | of_node_put(map); | ||
205 | out: | ||
206 | of_node_put(cn); | ||
207 | return ret; | ||
208 | } | ||
209 | |||
210 | /* | ||
211 | * cpu topology table | ||
212 | */ | ||
213 | struct cpu_topology cpu_topology[NR_CPUS]; | ||
214 | EXPORT_SYMBOL_GPL(cpu_topology); | ||
215 | |||
216 | const struct cpumask *cpu_coregroup_mask(int cpu) | ||
217 | { | ||
218 | const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); | ||
219 | |||
220 | /* Find the smaller of NUMA, core or LLC siblings */ | ||
221 | if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { | ||
222 | /* not numa in package, lets use the package siblings */ | ||
223 | core_mask = &cpu_topology[cpu].core_sibling; | ||
224 | } | ||
225 | if (cpu_topology[cpu].llc_id != -1) { | ||
226 | if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) | ||
227 | core_mask = &cpu_topology[cpu].llc_sibling; | ||
228 | } | ||
229 | |||
230 | return core_mask; | ||
231 | } | ||
232 | |||
233 | static void update_siblings_masks(unsigned int cpuid) | ||
234 | { | ||
235 | struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; | ||
236 | int cpu; | ||
237 | |||
238 | /* update core and thread sibling masks */ | ||
239 | for_each_online_cpu(cpu) { | ||
240 | cpu_topo = &cpu_topology[cpu]; | ||
241 | |||
242 | if (cpuid_topo->llc_id == cpu_topo->llc_id) { | ||
243 | cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); | ||
244 | cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); | ||
245 | } | ||
246 | |||
247 | if (cpuid_topo->package_id != cpu_topo->package_id) | ||
248 | continue; | ||
249 | |||
250 | cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); | ||
251 | cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); | ||
252 | |||
253 | if (cpuid_topo->core_id != cpu_topo->core_id) | ||
254 | continue; | ||
255 | |||
256 | cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); | ||
257 | cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); | ||
258 | } | ||
259 | } | ||
260 | |||
261 | void store_cpu_topology(unsigned int cpuid) | 24 | void store_cpu_topology(unsigned int cpuid) |
262 | { | 25 | { |
263 | struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; | 26 | struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; |
@@ -296,60 +59,31 @@ topology_populated: | |||
296 | update_siblings_masks(cpuid); | 59 | update_siblings_masks(cpuid); |
297 | } | 60 | } |
298 | 61 | ||
299 | static void clear_cpu_topology(int cpu) | 62 | #ifdef CONFIG_ACPI |
300 | { | 63 | static bool __init acpi_cpu_is_threaded(int cpu) |
301 | struct cpu_topology *cpu_topo = &cpu_topology[cpu]; | ||
302 | |||
303 | cpumask_clear(&cpu_topo->llc_sibling); | ||
304 | cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); | ||
305 | |||
306 | cpumask_clear(&cpu_topo->core_sibling); | ||
307 | cpumask_set_cpu(cpu, &cpu_topo->core_sibling); | ||
308 | cpumask_clear(&cpu_topo->thread_sibling); | ||
309 | cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); | ||
310 | } | ||
311 | |||
312 | static void __init reset_cpu_topology(void) | ||
313 | { | ||
314 | unsigned int cpu; | ||
315 | |||
316 | for_each_possible_cpu(cpu) { | ||
317 | struct cpu_topology *cpu_topo = &cpu_topology[cpu]; | ||
318 | |||
319 | cpu_topo->thread_id = -1; | ||
320 | cpu_topo->core_id = 0; | ||
321 | cpu_topo->package_id = -1; | ||
322 | cpu_topo->llc_id = -1; | ||
323 | |||
324 | clear_cpu_topology(cpu); | ||
325 | } | ||
326 | } | ||
327 | |||
328 | void remove_cpu_topology(unsigned int cpu) | ||
329 | { | 64 | { |
330 | int sibling; | 65 | int is_threaded = acpi_pptt_cpu_is_thread(cpu); |
331 | 66 | ||
332 | for_each_cpu(sibling, topology_core_cpumask(cpu)) | 67 | /* |
333 | cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); | 68 | * if the PPTT doesn't have thread information, assume a homogeneous |
334 | for_each_cpu(sibling, topology_sibling_cpumask(cpu)) | 69 | * machine and return the current CPU's thread state. |
335 | cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); | 70 | */ |
336 | for_each_cpu(sibling, topology_llc_cpumask(cpu)) | 71 | if (is_threaded < 0) |
337 | cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); | 72 | is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK; |
338 | 73 | ||
339 | clear_cpu_topology(cpu); | 74 | return !!is_threaded; |
340 | } | 75 | } |
341 | 76 | ||
342 | #ifdef CONFIG_ACPI | ||
343 | /* | 77 | /* |
344 | * Propagate the topology information of the processor_topology_node tree to the | 78 | * Propagate the topology information of the processor_topology_node tree to the |
345 | * cpu_topology array. | 79 | * cpu_topology array. |
346 | */ | 80 | */ |
347 | static int __init parse_acpi_topology(void) | 81 | int __init parse_acpi_topology(void) |
348 | { | 82 | { |
349 | bool is_threaded; | ||
350 | int cpu, topology_id; | 83 | int cpu, topology_id; |
351 | 84 | ||
352 | is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK; | 85 | if (acpi_disabled) |
86 | return 0; | ||
353 | 87 | ||
354 | for_each_possible_cpu(cpu) { | 88 | for_each_possible_cpu(cpu) { |
355 | int i, cache_id; | 89 | int i, cache_id; |
@@ -358,7 +92,7 @@ static int __init parse_acpi_topology(void) | |||
358 | if (topology_id < 0) | 92 | if (topology_id < 0) |
359 | return topology_id; | 93 | return topology_id; |
360 | 94 | ||
361 | if (is_threaded) { | 95 | if (acpi_cpu_is_threaded(cpu)) { |
362 | cpu_topology[cpu].thread_id = topology_id; | 96 | cpu_topology[cpu].thread_id = topology_id; |
363 | topology_id = find_acpi_cpu_topology(cpu, 1); | 97 | topology_id = find_acpi_cpu_topology(cpu, 1); |
364 | cpu_topology[cpu].core_id = topology_id; | 98 | cpu_topology[cpu].core_id = topology_id; |
@@ -384,24 +118,6 @@ static int __init parse_acpi_topology(void) | |||
384 | 118 | ||
385 | return 0; | 119 | return 0; |
386 | } | 120 | } |
387 | |||
388 | #else | ||
389 | static inline int __init parse_acpi_topology(void) | ||
390 | { | ||
391 | return -EINVAL; | ||
392 | } | ||
393 | #endif | 121 | #endif |
394 | 122 | ||
395 | void __init init_cpu_topology(void) | ||
396 | { | ||
397 | reset_cpu_topology(); | ||
398 | 123 | ||
399 | /* | ||
400 | * Discard anything that was parsed if we hit an error so we | ||
401 | * don't use partial information. | ||
402 | */ | ||
403 | if (!acpi_disabled && parse_acpi_topology()) | ||
404 | reset_cpu_topology(); | ||
405 | else if (of_have_populated_dt() && parse_dt_topology()) | ||
406 | reset_cpu_topology(); | ||
407 | } | ||
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index d3313797cca9..6e950908eb97 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
@@ -7,9 +7,11 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/bug.h> | 9 | #include <linux/bug.h> |
10 | #include <linux/context_tracking.h> | ||
10 | #include <linux/signal.h> | 11 | #include <linux/signal.h> |
11 | #include <linux/personality.h> | 12 | #include <linux/personality.h> |
12 | #include <linux/kallsyms.h> | 13 | #include <linux/kallsyms.h> |
14 | #include <linux/kprobes.h> | ||
13 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
14 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
15 | #include <linux/hardirq.h> | 17 | #include <linux/hardirq.h> |
@@ -511,7 +513,7 @@ struct sys64_hook { | |||
511 | void (*handler)(unsigned int esr, struct pt_regs *regs); | 513 | void (*handler)(unsigned int esr, struct pt_regs *regs); |
512 | }; | 514 | }; |
513 | 515 | ||
514 | static struct sys64_hook sys64_hooks[] = { | 516 | static const struct sys64_hook sys64_hooks[] = { |
515 | { | 517 | { |
516 | .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK, | 518 | .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK, |
517 | .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL, | 519 | .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL, |
@@ -636,7 +638,7 @@ static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) | |||
636 | arm64_compat_skip_faulting_instruction(regs, 4); | 638 | arm64_compat_skip_faulting_instruction(regs, 4); |
637 | } | 639 | } |
638 | 640 | ||
639 | static struct sys64_hook cp15_32_hooks[] = { | 641 | static const struct sys64_hook cp15_32_hooks[] = { |
640 | { | 642 | { |
641 | .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK, | 643 | .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK, |
642 | .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ, | 644 | .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ, |
@@ -656,7 +658,7 @@ static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs) | |||
656 | arm64_compat_skip_faulting_instruction(regs, 4); | 658 | arm64_compat_skip_faulting_instruction(regs, 4); |
657 | } | 659 | } |
658 | 660 | ||
659 | static struct sys64_hook cp15_64_hooks[] = { | 661 | static const struct sys64_hook cp15_64_hooks[] = { |
660 | { | 662 | { |
661 | .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK, | 663 | .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK, |
662 | .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT, | 664 | .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT, |
@@ -667,7 +669,7 @@ static struct sys64_hook cp15_64_hooks[] = { | |||
667 | 669 | ||
668 | asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs) | 670 | asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs) |
669 | { | 671 | { |
670 | struct sys64_hook *hook, *hook_base; | 672 | const struct sys64_hook *hook, *hook_base; |
671 | 673 | ||
672 | if (!cp15_cond_valid(esr, regs)) { | 674 | if (!cp15_cond_valid(esr, regs)) { |
673 | /* | 675 | /* |
@@ -707,7 +709,7 @@ asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs) | |||
707 | 709 | ||
708 | asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) | 710 | asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) |
709 | { | 711 | { |
710 | struct sys64_hook *hook; | 712 | const struct sys64_hook *hook; |
711 | 713 | ||
712 | for (hook = sys64_hooks; hook->handler; hook++) | 714 | for (hook = sys64_hooks; hook->handler; hook++) |
713 | if ((hook->esr_mask & esr) == hook->esr_val) { | 715 | if ((hook->esr_mask & esr) == hook->esr_val) { |
@@ -743,6 +745,7 @@ static const char *esr_class_str[] = { | |||
743 | [ESR_ELx_EC_SMC64] = "SMC (AArch64)", | 745 | [ESR_ELx_EC_SMC64] = "SMC (AArch64)", |
744 | [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)", | 746 | [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)", |
745 | [ESR_ELx_EC_SVE] = "SVE", | 747 | [ESR_ELx_EC_SVE] = "SVE", |
748 | [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB", | ||
746 | [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF", | 749 | [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF", |
747 | [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)", | 750 | [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)", |
748 | [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)", | 751 | [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)", |
@@ -899,6 +902,13 @@ asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr) | |||
899 | nmi_exit(); | 902 | nmi_exit(); |
900 | } | 903 | } |
901 | 904 | ||
905 | asmlinkage void enter_from_user_mode(void) | ||
906 | { | ||
907 | CT_WARN_ON(ct_state() != CONTEXT_USER); | ||
908 | user_exit_irqoff(); | ||
909 | } | ||
910 | NOKPROBE_SYMBOL(enter_from_user_mode); | ||
911 | |||
902 | void __pte_error(const char *file, int line, unsigned long val) | 912 | void __pte_error(const char *file, int line, unsigned long val) |
903 | { | 913 | { |
904 | pr_err("%s:%d: bad pte %016lx.\n", file, line, val); | 914 | pr_err("%s:%d: bad pte %016lx.\n", file, line, val); |
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 7fa008374907..aa76f7259668 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S | |||
@@ -200,6 +200,15 @@ SECTIONS | |||
200 | __rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR); | 200 | __rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR); |
201 | __rela_size = SIZEOF(.rela.dyn); | 201 | __rela_size = SIZEOF(.rela.dyn); |
202 | 202 | ||
203 | #ifdef CONFIG_RELR | ||
204 | .relr.dyn : ALIGN(8) { | ||
205 | *(.relr.dyn) | ||
206 | } | ||
207 | |||
208 | __relr_offset = ABSOLUTE(ADDR(.relr.dyn) - KIMAGE_VADDR); | ||
209 | __relr_size = SIZEOF(.relr.dyn); | ||
210 | #endif | ||
211 | |||
203 | . = ALIGN(SEGMENT_ALIGN); | 212 | . = ALIGN(SEGMENT_ALIGN); |
204 | __initdata_end = .; | 213 | __initdata_end = .; |
205 | __init_end = .; | 214 | __init_end = .; |
@@ -245,6 +254,8 @@ SECTIONS | |||
245 | HEAD_SYMBOLS | 254 | HEAD_SYMBOLS |
246 | } | 255 | } |
247 | 256 | ||
257 | #include "image-vars.h" | ||
258 | |||
248 | /* | 259 | /* |
249 | * The HYP init code and ID map text can't be longer than a page each, | 260 | * The HYP init code and ID map text can't be longer than a page each, |
250 | * and should not cross a page boundary. | 261 | * and should not cross a page boundary. |
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index adaf266d8de8..bd978ad71936 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c | |||
@@ -264,7 +264,7 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) | |||
264 | tmp = read_sysreg(par_el1); | 264 | tmp = read_sysreg(par_el1); |
265 | write_sysreg(par, par_el1); | 265 | write_sysreg(par, par_el1); |
266 | 266 | ||
267 | if (unlikely(tmp & 1)) | 267 | if (unlikely(tmp & SYS_PAR_EL1_F)) |
268 | return false; /* Translation failed, back to guest */ | 268 | return false; /* Translation failed, back to guest */ |
269 | 269 | ||
270 | /* Convert PAR to HPFAR format */ | 270 | /* Convert PAR to HPFAR format */ |
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 33c2a4abda04..f182ccb0438e 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile | |||
@@ -33,3 +33,5 @@ UBSAN_SANITIZE_atomic_ll_sc.o := n | |||
33 | lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o | 33 | lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o |
34 | 34 | ||
35 | obj-$(CONFIG_CRC32) += crc32.o | 35 | obj-$(CONFIG_CRC32) += crc32.o |
36 | |||
37 | obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o | ||
diff --git a/arch/arm64/lib/error-inject.c b/arch/arm64/lib/error-inject.c new file mode 100644 index 000000000000..ed15021da3ed --- /dev/null +++ b/arch/arm64/lib/error-inject.c | |||
@@ -0,0 +1,18 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | #include <linux/error-injection.h> | ||
4 | #include <linux/kprobes.h> | ||
5 | |||
6 | void override_function_with_return(struct pt_regs *regs) | ||
7 | { | ||
8 | /* | ||
9 | * 'regs' represents the state on entry of a predefined function in | ||
10 | * the kernel/module and which is captured on a kprobe. | ||
11 | * | ||
12 | * When kprobe returns back from exception it will override the end | ||
13 | * of probed function and directly return to the predefined | ||
14 | * function's caller. | ||
15 | */ | ||
16 | instruction_pointer_set(regs, procedure_link_pointer(regs)); | ||
17 | } | ||
18 | NOKPROBE_SYMBOL(override_function_with_return); | ||
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index bb4e4f3fffd8..115d7a0e4b08 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/acpi.h> | 10 | #include <linux/acpi.h> |
11 | #include <linux/bitfield.h> | ||
11 | #include <linux/extable.h> | 12 | #include <linux/extable.h> |
12 | #include <linux/signal.h> | 13 | #include <linux/signal.h> |
13 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
@@ -86,8 +87,8 @@ static void mem_abort_decode(unsigned int esr) | |||
86 | pr_alert("Mem abort info:\n"); | 87 | pr_alert("Mem abort info:\n"); |
87 | 88 | ||
88 | pr_alert(" ESR = 0x%08x\n", esr); | 89 | pr_alert(" ESR = 0x%08x\n", esr); |
89 | pr_alert(" Exception class = %s, IL = %u bits\n", | 90 | pr_alert(" EC = 0x%02lx: %s, IL = %u bits\n", |
90 | esr_get_class_string(esr), | 91 | ESR_ELx_EC(esr), esr_get_class_string(esr), |
91 | (esr & ESR_ELx_IL) ? 32 : 16); | 92 | (esr & ESR_ELx_IL) ? 32 : 16); |
92 | pr_alert(" SET = %lu, FnV = %lu\n", | 93 | pr_alert(" SET = %lu, FnV = %lu\n", |
93 | (esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT, | 94 | (esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT, |
@@ -241,6 +242,34 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, | |||
241 | return false; | 242 | return false; |
242 | } | 243 | } |
243 | 244 | ||
245 | static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, | ||
246 | unsigned int esr, | ||
247 | struct pt_regs *regs) | ||
248 | { | ||
249 | unsigned long flags; | ||
250 | u64 par, dfsc; | ||
251 | |||
252 | if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR || | ||
253 | (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT) | ||
254 | return false; | ||
255 | |||
256 | local_irq_save(flags); | ||
257 | asm volatile("at s1e1r, %0" :: "r" (addr)); | ||
258 | isb(); | ||
259 | par = read_sysreg(par_el1); | ||
260 | local_irq_restore(flags); | ||
261 | |||
262 | if (!(par & SYS_PAR_EL1_F)) | ||
263 | return false; | ||
264 | |||
265 | /* | ||
266 | * If we got a different type of fault from the AT instruction, | ||
267 | * treat the translation fault as spurious. | ||
268 | */ | ||
269 | dfsc = FIELD_PREP(SYS_PAR_EL1_FST, par); | ||
270 | return (dfsc & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT; | ||
271 | } | ||
272 | |||
244 | static void die_kernel_fault(const char *msg, unsigned long addr, | 273 | static void die_kernel_fault(const char *msg, unsigned long addr, |
245 | unsigned int esr, struct pt_regs *regs) | 274 | unsigned int esr, struct pt_regs *regs) |
246 | { | 275 | { |
@@ -269,6 +298,10 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, | |||
269 | if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) | 298 | if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) |
270 | return; | 299 | return; |
271 | 300 | ||
301 | if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs), | ||
302 | "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr)) | ||
303 | return; | ||
304 | |||
272 | if (is_el1_permission_fault(addr, esr, regs)) { | 305 | if (is_el1_permission_fault(addr, esr, regs)) { |
273 | if (esr & ESR_ELx_WNR) | 306 | if (esr & ESR_ELx_WNR) |
274 | msg = "write to read-only memory"; | 307 | msg = "write to read-only memory"; |
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 531c497c5758..45c00a54909c 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c | |||
@@ -583,8 +583,12 @@ void free_initmem(void) | |||
583 | #ifdef CONFIG_BLK_DEV_INITRD | 583 | #ifdef CONFIG_BLK_DEV_INITRD |
584 | void __init free_initrd_mem(unsigned long start, unsigned long end) | 584 | void __init free_initrd_mem(unsigned long start, unsigned long end) |
585 | { | 585 | { |
586 | unsigned long aligned_start, aligned_end; | ||
587 | |||
588 | aligned_start = __virt_to_phys(start) & PAGE_MASK; | ||
589 | aligned_end = PAGE_ALIGN(__virt_to_phys(end)); | ||
590 | memblock_free(aligned_start, aligned_end - aligned_start); | ||
586 | free_reserved_area((void *)start, (void *)end, 0, "initrd"); | 591 | free_reserved_area((void *)start, (void *)end, 0, "initrd"); |
587 | memblock_free(__virt_to_phys(start), end - start); | ||
588 | } | 592 | } |
589 | #endif | 593 | #endif |
590 | 594 | ||
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 3ed44008230e..53dc6f24cfb7 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -879,7 +879,7 @@ void __set_fixmap(enum fixed_addresses idx, | |||
879 | } | 879 | } |
880 | } | 880 | } |
881 | 881 | ||
882 | void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) | 882 | void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) |
883 | { | 883 | { |
884 | const u64 dt_virt_base = __fix_to_virt(FIX_FDT); | 884 | const u64 dt_virt_base = __fix_to_virt(FIX_FDT); |
885 | int offset; | 885 | int offset; |
@@ -932,19 +932,6 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) | |||
932 | return dt_virt; | 932 | return dt_virt; |
933 | } | 933 | } |
934 | 934 | ||
935 | void *__init fixmap_remap_fdt(phys_addr_t dt_phys) | ||
936 | { | ||
937 | void *dt_virt; | ||
938 | int size; | ||
939 | |||
940 | dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO); | ||
941 | if (!dt_virt) | ||
942 | return NULL; | ||
943 | |||
944 | memblock_reserve(dt_phys, size); | ||
945 | return dt_virt; | ||
946 | } | ||
947 | |||
948 | int __init arch_ioremap_p4d_supported(void) | 935 | int __init arch_ioremap_p4d_supported(void) |
949 | { | 936 | { |
950 | return 0; | 937 | return 0; |
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index 4f241cc7cc3b..4decf1659700 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c | |||
@@ -29,7 +29,7 @@ static __init int numa_parse_early_param(char *opt) | |||
29 | { | 29 | { |
30 | if (!opt) | 30 | if (!opt) |
31 | return -EINVAL; | 31 | return -EINVAL; |
32 | if (!strncmp(opt, "off", 3)) | 32 | if (str_has_prefix(opt, "off")) |
33 | numa_off = true; | 33 | numa_off = true; |
34 | 34 | ||
35 | return 0; | 35 | return 0; |
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 03c53f16ee77..9ce7bd9d4d9c 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c | |||
@@ -128,7 +128,6 @@ int set_memory_nx(unsigned long addr, int numpages) | |||
128 | __pgprot(PTE_PXN), | 128 | __pgprot(PTE_PXN), |
129 | __pgprot(0)); | 129 | __pgprot(0)); |
130 | } | 130 | } |
131 | EXPORT_SYMBOL_GPL(set_memory_nx); | ||
132 | 131 | ||
133 | int set_memory_x(unsigned long addr, int numpages) | 132 | int set_memory_x(unsigned long addr, int numpages) |
134 | { | 133 | { |
@@ -136,7 +135,6 @@ int set_memory_x(unsigned long addr, int numpages) | |||
136 | __pgprot(0), | 135 | __pgprot(0), |
137 | __pgprot(PTE_PXN)); | 136 | __pgprot(PTE_PXN)); |
138 | } | 137 | } |
139 | EXPORT_SYMBOL_GPL(set_memory_x); | ||
140 | 138 | ||
141 | int set_memory_valid(unsigned long addr, int numpages, int enable) | 139 | int set_memory_valid(unsigned long addr, int numpages, int enable) |
142 | { | 140 | { |
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 391f9cabfe60..a1e0592d1fbc 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S | |||
@@ -286,6 +286,15 @@ skip_pgd: | |||
286 | msr sctlr_el1, x18 | 286 | msr sctlr_el1, x18 |
287 | isb | 287 | isb |
288 | 288 | ||
289 | /* | ||
290 | * Invalidate the local I-cache so that any instructions fetched | ||
291 | * speculatively from the PoC are discarded, since they may have | ||
292 | * been dynamically patched at the PoU. | ||
293 | */ | ||
294 | ic iallu | ||
295 | dsb nsh | ||
296 | isb | ||
297 | |||
289 | /* Set the flag to zero to indicate that we're all done */ | 298 | /* Set the flag to zero to indicate that we're all done */ |
290 | str wzr, [flag_ptr] | 299 | str wzr, [flag_ptr] |
291 | ret | 300 | ret |
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index c345b79414a9..403f7e193833 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile | |||
@@ -39,13 +39,11 @@ endif | |||
39 | uname := $(shell uname -m) | 39 | uname := $(shell uname -m) |
40 | KBUILD_DEFCONFIG := $(if $(filter ppc%,$(uname)),$(uname),ppc64)_defconfig | 40 | KBUILD_DEFCONFIG := $(if $(filter ppc%,$(uname)),$(uname),ppc64)_defconfig |
41 | 41 | ||
42 | ifdef CONFIG_PPC64 | ||
43 | new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi) | 42 | new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi) |
44 | 43 | ||
45 | ifeq ($(new_nm),y) | 44 | ifeq ($(new_nm),y) |
46 | NM := $(NM) --synthetic | 45 | NM := $(NM) --synthetic |
47 | endif | 46 | endif |
48 | endif | ||
49 | 47 | ||
50 | # BITS is used as extension for files which are available in a 32 bit | 48 | # BITS is used as extension for files which are available in a 32 bit |
51 | # and a 64 bit version to simplify shared Makefiles. | 49 | # and a 64 bit version to simplify shared Makefiles. |
diff --git a/arch/powerpc/include/asm/error-injection.h b/arch/powerpc/include/asm/error-injection.h deleted file mode 100644 index 62fd24739852..000000000000 --- a/arch/powerpc/include/asm/error-injection.h +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
2 | |||
3 | #ifndef _ASM_ERROR_INJECTION_H | ||
4 | #define _ASM_ERROR_INJECTION_H | ||
5 | |||
6 | #include <linux/compiler.h> | ||
7 | #include <linux/linkage.h> | ||
8 | #include <asm/ptrace.h> | ||
9 | #include <asm-generic/error-injection.h> | ||
10 | |||
11 | void override_function_with_return(struct pt_regs *regs); | ||
12 | |||
13 | #endif /* _ASM_ERROR_INJECTION_H */ | ||
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 59a4727ecd6c..86ee362a1375 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig | |||
@@ -48,6 +48,7 @@ config RISCV | |||
48 | select PCI_MSI if PCI | 48 | select PCI_MSI if PCI |
49 | select RISCV_TIMER | 49 | select RISCV_TIMER |
50 | select GENERIC_IRQ_MULTI_HANDLER | 50 | select GENERIC_IRQ_MULTI_HANDLER |
51 | select GENERIC_ARCH_TOPOLOGY if SMP | ||
51 | select ARCH_HAS_PTE_SPECIAL | 52 | select ARCH_HAS_PTE_SPECIAL |
52 | select ARCH_HAS_MMIOWB | 53 | select ARCH_HAS_MMIOWB |
53 | select HAVE_EBPF_JIT if 64BIT | 54 | select HAVE_EBPF_JIT if 64BIT |
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 7462a44304fe..18ae6da5115e 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * Copyright (C) 2017 SiFive | 8 | * Copyright (C) 2017 SiFive |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/arch_topology.h> | ||
11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
12 | #include <linux/init.h> | 13 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
@@ -35,6 +36,7 @@ static DECLARE_COMPLETION(cpu_running); | |||
35 | 36 | ||
36 | void __init smp_prepare_boot_cpu(void) | 37 | void __init smp_prepare_boot_cpu(void) |
37 | { | 38 | { |
39 | init_cpu_topology(); | ||
38 | } | 40 | } |
39 | 41 | ||
40 | void __init smp_prepare_cpus(unsigned int max_cpus) | 42 | void __init smp_prepare_cpus(unsigned int max_cpus) |
@@ -138,6 +140,7 @@ asmlinkage void __init smp_callin(void) | |||
138 | 140 | ||
139 | trap_init(); | 141 | trap_init(); |
140 | notify_cpu_starting(smp_processor_id()); | 142 | notify_cpu_starting(smp_processor_id()); |
143 | update_siblings_masks(smp_processor_id()); | ||
141 | set_cpu_online(smp_processor_id(), 1); | 144 | set_cpu_online(smp_processor_id(), 1); |
142 | /* | 145 | /* |
143 | * Remote TLB flushes are ignored while the CPU is offline, so emit | 146 | * Remote TLB flushes are ignored while the CPU is offline, so emit |
diff --git a/arch/x86/include/asm/error-injection.h b/arch/x86/include/asm/error-injection.h deleted file mode 100644 index 47b7a1296245..000000000000 --- a/arch/x86/include/asm/error-injection.h +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | #ifndef _ASM_ERROR_INJECTION_H | ||
3 | #define _ASM_ERROR_INJECTION_H | ||
4 | |||
5 | #include <linux/compiler.h> | ||
6 | #include <linux/linkage.h> | ||
7 | #include <asm/ptrace.h> | ||
8 | #include <asm-generic/error-injection.h> | ||
9 | |||
10 | asmlinkage void just_return_func(void); | ||
11 | void override_function_with_return(struct pt_regs *regs); | ||
12 | |||
13 | #endif /* _ASM_ERROR_INJECTION_H */ | ||
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 8569b79e8b58..5a7551d060f2 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c | |||
@@ -1256,12 +1256,12 @@ static int __init arm_smmu_v3_set_proximity(struct device *dev, | |||
1256 | 1256 | ||
1257 | smmu = (struct acpi_iort_smmu_v3 *)node->node_data; | 1257 | smmu = (struct acpi_iort_smmu_v3 *)node->node_data; |
1258 | if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { | 1258 | if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { |
1259 | int node = acpi_map_pxm_to_node(smmu->pxm); | 1259 | int dev_node = acpi_map_pxm_to_node(smmu->pxm); |
1260 | 1260 | ||
1261 | if (node != NUMA_NO_NODE && !node_online(node)) | 1261 | if (dev_node != NUMA_NO_NODE && !node_online(dev_node)) |
1262 | return -EINVAL; | 1262 | return -EINVAL; |
1263 | 1263 | ||
1264 | set_dev_node(dev, node); | 1264 | set_dev_node(dev, dev_node); |
1265 | pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", | 1265 | pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", |
1266 | smmu->base_address, | 1266 | smmu->base_address, |
1267 | smmu->pxm); | 1267 | smmu->pxm); |
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 1e7ac0bd0d3a..f31544d3656e 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c | |||
@@ -541,6 +541,44 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag) | |||
541 | } | 541 | } |
542 | 542 | ||
543 | /** | 543 | /** |
544 | * check_acpi_cpu_flag() - Determine if CPU node has a flag set | ||
545 | * @cpu: Kernel logical CPU number | ||
546 | * @rev: The minimum PPTT revision defining the flag | ||
547 | * @flag: The flag itself | ||
548 | * | ||
549 | * Check the node representing a CPU for a given flag. | ||
550 | * | ||
551 | * Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found or | ||
552 | * the table revision isn't new enough. | ||
553 | * 1, any passed flag set | ||
554 | * 0, flag unset | ||
555 | */ | ||
556 | static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag) | ||
557 | { | ||
558 | struct acpi_table_header *table; | ||
559 | acpi_status status; | ||
560 | u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu); | ||
561 | struct acpi_pptt_processor *cpu_node = NULL; | ||
562 | int ret = -ENOENT; | ||
563 | |||
564 | status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); | ||
565 | if (ACPI_FAILURE(status)) { | ||
566 | acpi_pptt_warn_missing(); | ||
567 | return ret; | ||
568 | } | ||
569 | |||
570 | if (table->revision >= rev) | ||
571 | cpu_node = acpi_find_processor_node(table, acpi_cpu_id); | ||
572 | |||
573 | if (cpu_node) | ||
574 | ret = (cpu_node->flags & flag) != 0; | ||
575 | |||
576 | acpi_put_table(table); | ||
577 | |||
578 | return ret; | ||
579 | } | ||
580 | |||
581 | /** | ||
544 | * acpi_find_last_cache_level() - Determines the number of cache levels for a PE | 582 | * acpi_find_last_cache_level() - Determines the number of cache levels for a PE |
545 | * @cpu: Kernel logical CPU number | 583 | * @cpu: Kernel logical CPU number |
546 | * | 584 | * |
@@ -605,6 +643,20 @@ int cache_setup_acpi(unsigned int cpu) | |||
605 | } | 643 | } |
606 | 644 | ||
607 | /** | 645 | /** |
646 | * acpi_pptt_cpu_is_thread() - Determine if CPU is a thread | ||
647 | * @cpu: Kernel logical CPU number | ||
648 | * | ||
649 | * Return: 1, a thread | ||
650 | * 0, not a thread | ||
651 | * -ENOENT ,if the PPTT doesn't exist, the CPU cannot be found or | ||
652 | * the table revision isn't new enough. | ||
653 | */ | ||
654 | int acpi_pptt_cpu_is_thread(unsigned int cpu) | ||
655 | { | ||
656 | return check_acpi_cpu_flag(cpu, 2, ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD); | ||
657 | } | ||
658 | |||
659 | /** | ||
608 | * find_acpi_cpu_topology() - Determine a unique topology value for a given CPU | 660 | * find_acpi_cpu_topology() - Determine a unique topology value for a given CPU |
609 | * @cpu: Kernel logical CPU number | 661 | * @cpu: Kernel logical CPU number |
610 | * @level: The topological level for which we would like a unique ID | 662 | * @level: The topological level for which we would like a unique ID |
@@ -664,7 +716,6 @@ int find_acpi_cpu_cache_topology(unsigned int cpu, int level) | |||
664 | return ret; | 716 | return ret; |
665 | } | 717 | } |
666 | 718 | ||
667 | |||
668 | /** | 719 | /** |
669 | * find_acpi_cpu_topology_package() - Determine a unique CPU package value | 720 | * find_acpi_cpu_topology_package() - Determine a unique CPU package value |
670 | * @cpu: Kernel logical CPU number | 721 | * @cpu: Kernel logical CPU number |
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index dc404492381d..28b92e3cc570 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig | |||
@@ -202,7 +202,7 @@ config GENERIC_ARCH_TOPOLOGY | |||
202 | help | 202 | help |
203 | Enable support for architectures common topology code: e.g., parsing | 203 | Enable support for architectures common topology code: e.g., parsing |
204 | CPU capacity information from DT, usage of such information for | 204 | CPU capacity information from DT, usage of such information for |
205 | appropriate scaling, sysfs interface for changing capacity values at | 205 | appropriate scaling, sysfs interface for reading capacity values at |
206 | runtime. | 206 | runtime. |
207 | 207 | ||
208 | endmenu | 208 | endmenu |
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 63c1e76739f1..b54d241a2ff5 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c | |||
@@ -15,6 +15,11 @@ | |||
15 | #include <linux/string.h> | 15 | #include <linux/string.h> |
16 | #include <linux/sched/topology.h> | 16 | #include <linux/sched/topology.h> |
17 | #include <linux/cpuset.h> | 17 | #include <linux/cpuset.h> |
18 | #include <linux/cpumask.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/percpu.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/smp.h> | ||
18 | 23 | ||
19 | DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; | 24 | DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; |
20 | 25 | ||
@@ -241,3 +246,296 @@ static void parsing_done_workfn(struct work_struct *work) | |||
241 | #else | 246 | #else |
242 | core_initcall(free_raw_capacity); | 247 | core_initcall(free_raw_capacity); |
243 | #endif | 248 | #endif |
249 | |||
250 | #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) | ||
251 | static int __init get_cpu_for_node(struct device_node *node) | ||
252 | { | ||
253 | struct device_node *cpu_node; | ||
254 | int cpu; | ||
255 | |||
256 | cpu_node = of_parse_phandle(node, "cpu", 0); | ||
257 | if (!cpu_node) | ||
258 | return -1; | ||
259 | |||
260 | cpu = of_cpu_node_to_id(cpu_node); | ||
261 | if (cpu >= 0) | ||
262 | topology_parse_cpu_capacity(cpu_node, cpu); | ||
263 | else | ||
264 | pr_crit("Unable to find CPU node for %pOF\n", cpu_node); | ||
265 | |||
266 | of_node_put(cpu_node); | ||
267 | return cpu; | ||
268 | } | ||
269 | |||
270 | static int __init parse_core(struct device_node *core, int package_id, | ||
271 | int core_id) | ||
272 | { | ||
273 | char name[10]; | ||
274 | bool leaf = true; | ||
275 | int i = 0; | ||
276 | int cpu; | ||
277 | struct device_node *t; | ||
278 | |||
279 | do { | ||
280 | snprintf(name, sizeof(name), "thread%d", i); | ||
281 | t = of_get_child_by_name(core, name); | ||
282 | if (t) { | ||
283 | leaf = false; | ||
284 | cpu = get_cpu_for_node(t); | ||
285 | if (cpu >= 0) { | ||
286 | cpu_topology[cpu].package_id = package_id; | ||
287 | cpu_topology[cpu].core_id = core_id; | ||
288 | cpu_topology[cpu].thread_id = i; | ||
289 | } else { | ||
290 | pr_err("%pOF: Can't get CPU for thread\n", | ||
291 | t); | ||
292 | of_node_put(t); | ||
293 | return -EINVAL; | ||
294 | } | ||
295 | of_node_put(t); | ||
296 | } | ||
297 | i++; | ||
298 | } while (t); | ||
299 | |||
300 | cpu = get_cpu_for_node(core); | ||
301 | if (cpu >= 0) { | ||
302 | if (!leaf) { | ||
303 | pr_err("%pOF: Core has both threads and CPU\n", | ||
304 | core); | ||
305 | return -EINVAL; | ||
306 | } | ||
307 | |||
308 | cpu_topology[cpu].package_id = package_id; | ||
309 | cpu_topology[cpu].core_id = core_id; | ||
310 | } else if (leaf) { | ||
311 | pr_err("%pOF: Can't get CPU for leaf core\n", core); | ||
312 | return -EINVAL; | ||
313 | } | ||
314 | |||
315 | return 0; | ||
316 | } | ||
317 | |||
318 | static int __init parse_cluster(struct device_node *cluster, int depth) | ||
319 | { | ||
320 | char name[10]; | ||
321 | bool leaf = true; | ||
322 | bool has_cores = false; | ||
323 | struct device_node *c; | ||
324 | static int package_id __initdata; | ||
325 | int core_id = 0; | ||
326 | int i, ret; | ||
327 | |||
328 | /* | ||
329 | * First check for child clusters; we currently ignore any | ||
330 | * information about the nesting of clusters and present the | ||
331 | * scheduler with a flat list of them. | ||
332 | */ | ||
333 | i = 0; | ||
334 | do { | ||
335 | snprintf(name, sizeof(name), "cluster%d", i); | ||
336 | c = of_get_child_by_name(cluster, name); | ||
337 | if (c) { | ||
338 | leaf = false; | ||
339 | ret = parse_cluster(c, depth + 1); | ||
340 | of_node_put(c); | ||
341 | if (ret != 0) | ||
342 | return ret; | ||
343 | } | ||
344 | i++; | ||
345 | } while (c); | ||
346 | |||
347 | /* Now check for cores */ | ||
348 | i = 0; | ||
349 | do { | ||
350 | snprintf(name, sizeof(name), "core%d", i); | ||
351 | c = of_get_child_by_name(cluster, name); | ||
352 | if (c) { | ||
353 | has_cores = true; | ||
354 | |||
355 | if (depth == 0) { | ||
356 | pr_err("%pOF: cpu-map children should be clusters\n", | ||
357 | c); | ||
358 | of_node_put(c); | ||
359 | return -EINVAL; | ||
360 | } | ||
361 | |||
362 | if (leaf) { | ||
363 | ret = parse_core(c, package_id, core_id++); | ||
364 | } else { | ||
365 | pr_err("%pOF: Non-leaf cluster with core %s\n", | ||
366 | cluster, name); | ||
367 | ret = -EINVAL; | ||
368 | } | ||
369 | |||
370 | of_node_put(c); | ||
371 | if (ret != 0) | ||
372 | return ret; | ||
373 | } | ||
374 | i++; | ||
375 | } while (c); | ||
376 | |||
377 | if (leaf && !has_cores) | ||
378 | pr_warn("%pOF: empty cluster\n", cluster); | ||
379 | |||
380 | if (leaf) | ||
381 | package_id++; | ||
382 | |||
383 | return 0; | ||
384 | } | ||
385 | |||
386 | static int __init parse_dt_topology(void) | ||
387 | { | ||
388 | struct device_node *cn, *map; | ||
389 | int ret = 0; | ||
390 | int cpu; | ||
391 | |||
392 | cn = of_find_node_by_path("/cpus"); | ||
393 | if (!cn) { | ||
394 | pr_err("No CPU information found in DT\n"); | ||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | /* | ||
399 | * When topology is provided cpu-map is essentially a root | ||
400 | * cluster with restricted subnodes. | ||
401 | */ | ||
402 | map = of_get_child_by_name(cn, "cpu-map"); | ||
403 | if (!map) | ||
404 | goto out; | ||
405 | |||
406 | ret = parse_cluster(map, 0); | ||
407 | if (ret != 0) | ||
408 | goto out_map; | ||
409 | |||
410 | topology_normalize_cpu_scale(); | ||
411 | |||
412 | /* | ||
413 | * Check that all cores are in the topology; the SMP code will | ||
414 | * only mark cores described in the DT as possible. | ||
415 | */ | ||
416 | for_each_possible_cpu(cpu) | ||
417 | if (cpu_topology[cpu].package_id == -1) | ||
418 | ret = -EINVAL; | ||
419 | |||
420 | out_map: | ||
421 | of_node_put(map); | ||
422 | out: | ||
423 | of_node_put(cn); | ||
424 | return ret; | ||
425 | } | ||
426 | #endif | ||
427 | |||
428 | /* | ||
429 | * cpu topology table | ||
430 | */ | ||
431 | struct cpu_topology cpu_topology[NR_CPUS]; | ||
432 | EXPORT_SYMBOL_GPL(cpu_topology); | ||
433 | |||
434 | const struct cpumask *cpu_coregroup_mask(int cpu) | ||
435 | { | ||
436 | const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); | ||
437 | |||
438 | /* Find the smaller of NUMA, core or LLC siblings */ | ||
439 | if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { | ||
440 | /* not numa in package, lets use the package siblings */ | ||
441 | core_mask = &cpu_topology[cpu].core_sibling; | ||
442 | } | ||
443 | if (cpu_topology[cpu].llc_id != -1) { | ||
444 | if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) | ||
445 | core_mask = &cpu_topology[cpu].llc_sibling; | ||
446 | } | ||
447 | |||
448 | return core_mask; | ||
449 | } | ||
450 | |||
451 | void update_siblings_masks(unsigned int cpuid) | ||
452 | { | ||
453 | struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; | ||
454 | int cpu; | ||
455 | |||
456 | /* update core and thread sibling masks */ | ||
457 | for_each_online_cpu(cpu) { | ||
458 | cpu_topo = &cpu_topology[cpu]; | ||
459 | |||
460 | if (cpuid_topo->llc_id == cpu_topo->llc_id) { | ||
461 | cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); | ||
462 | cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); | ||
463 | } | ||
464 | |||
465 | if (cpuid_topo->package_id != cpu_topo->package_id) | ||
466 | continue; | ||
467 | |||
468 | cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); | ||
469 | cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); | ||
470 | |||
471 | if (cpuid_topo->core_id != cpu_topo->core_id) | ||
472 | continue; | ||
473 | |||
474 | cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); | ||
475 | cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); | ||
476 | } | ||
477 | } | ||
478 | |||
479 | static void clear_cpu_topology(int cpu) | ||
480 | { | ||
481 | struct cpu_topology *cpu_topo = &cpu_topology[cpu]; | ||
482 | |||
483 | cpumask_clear(&cpu_topo->llc_sibling); | ||
484 | cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); | ||
485 | |||
486 | cpumask_clear(&cpu_topo->core_sibling); | ||
487 | cpumask_set_cpu(cpu, &cpu_topo->core_sibling); | ||
488 | cpumask_clear(&cpu_topo->thread_sibling); | ||
489 | cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); | ||
490 | } | ||
491 | |||
492 | void __init reset_cpu_topology(void) | ||
493 | { | ||
494 | unsigned int cpu; | ||
495 | |||
496 | for_each_possible_cpu(cpu) { | ||
497 | struct cpu_topology *cpu_topo = &cpu_topology[cpu]; | ||
498 | |||
499 | cpu_topo->thread_id = -1; | ||
500 | cpu_topo->core_id = -1; | ||
501 | cpu_topo->package_id = -1; | ||
502 | cpu_topo->llc_id = -1; | ||
503 | |||
504 | clear_cpu_topology(cpu); | ||
505 | } | ||
506 | } | ||
507 | |||
508 | void remove_cpu_topology(unsigned int cpu) | ||
509 | { | ||
510 | int sibling; | ||
511 | |||
512 | for_each_cpu(sibling, topology_core_cpumask(cpu)) | ||
513 | cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); | ||
514 | for_each_cpu(sibling, topology_sibling_cpumask(cpu)) | ||
515 | cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); | ||
516 | for_each_cpu(sibling, topology_llc_cpumask(cpu)) | ||
517 | cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); | ||
518 | |||
519 | clear_cpu_topology(cpu); | ||
520 | } | ||
521 | |||
522 | __weak int __init parse_acpi_topology(void) | ||
523 | { | ||
524 | return 0; | ||
525 | } | ||
526 | |||
527 | #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) | ||
528 | void __init init_cpu_topology(void) | ||
529 | { | ||
530 | reset_cpu_topology(); | ||
531 | |||
532 | /* | ||
533 | * Discard anything that was parsed if we hit an error so we | ||
534 | * don't use partial information. | ||
535 | */ | ||
536 | if (parse_acpi_topology()) | ||
537 | reset_cpu_topology(); | ||
538 | else if (of_have_populated_dt() && parse_dt_topology()) | ||
539 | reset_cpu_topology(); | ||
540 | } | ||
541 | #endif | ||
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 3e866885a405..2794f4b3f62d 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -573,3 +573,12 @@ config RANDOM_TRUST_CPU | |||
573 | has not installed a hidden back door to compromise the CPU's | 573 | has not installed a hidden back door to compromise the CPU's |
574 | random number generation facilities. This can also be configured | 574 | random number generation facilities. This can also be configured |
575 | at boot with "random.trust_cpu=on/off". | 575 | at boot with "random.trust_cpu=on/off". |
576 | |||
577 | config RANDOM_TRUST_BOOTLOADER | ||
578 | bool "Trust the bootloader to initialize Linux's CRNG" | ||
579 | help | ||
580 | Some bootloaders can provide entropy to increase the kernel's initial | ||
581 | device randomness. Say Y here to assume the entropy provided by the | ||
582 | booloader is trustworthy so it will be added to the kernel's entropy | ||
583 | pool. Otherwise, say N here so it will be regarded as device input that | ||
584 | only mixes the entropy pool. \ No newline at end of file | ||
diff --git a/drivers/char/random.c b/drivers/char/random.c index 5d5ea4ce1442..566922df4b7b 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -2445,3 +2445,17 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, | |||
2445 | credit_entropy_bits(poolp, entropy); | 2445 | credit_entropy_bits(poolp, entropy); |
2446 | } | 2446 | } |
2447 | EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); | 2447 | EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); |
2448 | |||
2449 | /* Handle random seed passed by bootloader. | ||
2450 | * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise | ||
2451 | * it would be regarded as device data. | ||
2452 | * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER. | ||
2453 | */ | ||
2454 | void add_bootloader_randomness(const void *buf, unsigned int size) | ||
2455 | { | ||
2456 | if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER)) | ||
2457 | add_hwgenerator_randomness(buf, size, size * 8); | ||
2458 | else | ||
2459 | add_device_randomness(buf, size); | ||
2460 | } | ||
2461 | EXPORT_SYMBOL_GPL(add_bootloader_randomness); \ No newline at end of file | ||
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index 48cb3d4bb7d1..d8530475493c 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm | |||
@@ -13,6 +13,16 @@ config ARM_CPUIDLE | |||
13 | initialized by calling the CPU operations init idle hook | 13 | initialized by calling the CPU operations init idle hook |
14 | provided by architecture code. | 14 | provided by architecture code. |
15 | 15 | ||
16 | config ARM_PSCI_CPUIDLE | ||
17 | bool "PSCI CPU idle Driver" | ||
18 | depends on ARM_PSCI_FW | ||
19 | select DT_IDLE_STATES | ||
20 | select CPU_IDLE_MULTIPLE_DRIVERS | ||
21 | help | ||
22 | Select this to enable PSCI firmware based CPUidle driver for ARM. | ||
23 | It provides an idle driver that is capable of detecting and | ||
24 | managing idle states through the PSCI firmware interface. | ||
25 | |||
16 | config ARM_BIG_LITTLE_CPUIDLE | 26 | config ARM_BIG_LITTLE_CPUIDLE |
17 | bool "Support for ARM big.LITTLE processors" | 27 | bool "Support for ARM big.LITTLE processors" |
18 | depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS | 28 | depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS |
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index 9d7176cee3d3..40d016339b29 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile | |||
@@ -20,6 +20,7 @@ obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o | |||
20 | obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o | 20 | obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o |
21 | obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o | 21 | obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o |
22 | obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o | 22 | obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o |
23 | obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle-psci.o | ||
23 | 24 | ||
24 | ############################################################################### | 25 | ############################################################################### |
25 | # MIPS drivers | 26 | # MIPS drivers |
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index 5bcd82c35dcf..9e5156d39627 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/of.h> | 16 | #include <linux/of.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/topology.h> | ||
19 | 18 | ||
20 | #include <asm/cpuidle.h> | 19 | #include <asm/cpuidle.h> |
21 | 20 | ||
@@ -106,11 +105,17 @@ static int __init arm_idle_init_cpu(int cpu) | |||
106 | ret = arm_cpuidle_init(cpu); | 105 | ret = arm_cpuidle_init(cpu); |
107 | 106 | ||
108 | /* | 107 | /* |
109 | * Allow the initialization to continue for other CPUs, if the reported | 108 | * Allow the initialization to continue for other CPUs, if the |
110 | * failure is a HW misconfiguration/breakage (-ENXIO). | 109 | * reported failure is a HW misconfiguration/breakage (-ENXIO). |
110 | * | ||
111 | * Some platforms do not support idle operations | ||
112 | * (arm_cpuidle_init() returning -EOPNOTSUPP), we should | ||
113 | * not flag this case as an error, it is a valid | ||
114 | * configuration. | ||
111 | */ | 115 | */ |
112 | if (ret) { | 116 | if (ret) { |
113 | pr_err("CPU %d failed to init idle CPU ops\n", cpu); | 117 | if (ret != -EOPNOTSUPP) |
118 | pr_err("CPU %d failed to init idle CPU ops\n", cpu); | ||
114 | ret = ret == -ENXIO ? 0 : ret; | 119 | ret = ret == -ENXIO ? 0 : ret; |
115 | goto out_kfree_drv; | 120 | goto out_kfree_drv; |
116 | } | 121 | } |
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c new file mode 100644 index 000000000000..f3c1a2396f98 --- /dev/null +++ b/drivers/cpuidle/cpuidle-psci.c | |||
@@ -0,0 +1,236 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * PSCI CPU idle driver. | ||
4 | * | ||
5 | * Copyright (C) 2019 ARM Ltd. | ||
6 | * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> | ||
7 | */ | ||
8 | |||
9 | #define pr_fmt(fmt) "CPUidle PSCI: " fmt | ||
10 | |||
11 | #include <linux/cpuidle.h> | ||
12 | #include <linux/cpumask.h> | ||
13 | #include <linux/cpu_pm.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/of.h> | ||
17 | #include <linux/of_device.h> | ||
18 | #include <linux/psci.h> | ||
19 | #include <linux/slab.h> | ||
20 | |||
21 | #include <asm/cpuidle.h> | ||
22 | |||
23 | #include "dt_idle_states.h" | ||
24 | |||
25 | static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); | ||
26 | |||
27 | static int psci_enter_idle_state(struct cpuidle_device *dev, | ||
28 | struct cpuidle_driver *drv, int idx) | ||
29 | { | ||
30 | u32 *state = __this_cpu_read(psci_power_state); | ||
31 | |||
32 | return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, | ||
33 | idx, state[idx - 1]); | ||
34 | } | ||
35 | |||
36 | static struct cpuidle_driver psci_idle_driver __initdata = { | ||
37 | .name = "psci_idle", | ||
38 | .owner = THIS_MODULE, | ||
39 | /* | ||
40 | * PSCI idle states relies on architectural WFI to | ||
41 | * be represented as state index 0. | ||
42 | */ | ||
43 | .states[0] = { | ||
44 | .enter = psci_enter_idle_state, | ||
45 | .exit_latency = 1, | ||
46 | .target_residency = 1, | ||
47 | .power_usage = UINT_MAX, | ||
48 | .name = "WFI", | ||
49 | .desc = "ARM WFI", | ||
50 | } | ||
51 | }; | ||
52 | |||
53 | static const struct of_device_id psci_idle_state_match[] __initconst = { | ||
54 | { .compatible = "arm,idle-state", | ||
55 | .data = psci_enter_idle_state }, | ||
56 | { }, | ||
57 | }; | ||
58 | |||
59 | static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state) | ||
60 | { | ||
61 | int err = of_property_read_u32(np, "arm,psci-suspend-param", state); | ||
62 | |||
63 | if (err) { | ||
64 | pr_warn("%pOF missing arm,psci-suspend-param property\n", np); | ||
65 | return err; | ||
66 | } | ||
67 | |||
68 | if (!psci_power_state_is_valid(*state)) { | ||
69 | pr_warn("Invalid PSCI power state %#x\n", *state); | ||
70 | return -EINVAL; | ||
71 | } | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu) | ||
77 | { | ||
78 | int i, ret = 0, count = 0; | ||
79 | u32 *psci_states; | ||
80 | struct device_node *state_node; | ||
81 | |||
82 | /* Count idle states */ | ||
83 | while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", | ||
84 | count))) { | ||
85 | count++; | ||
86 | of_node_put(state_node); | ||
87 | } | ||
88 | |||
89 | if (!count) | ||
90 | return -ENODEV; | ||
91 | |||
92 | psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); | ||
93 | if (!psci_states) | ||
94 | return -ENOMEM; | ||
95 | |||
96 | for (i = 0; i < count; i++) { | ||
97 | state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); | ||
98 | ret = psci_dt_parse_state_node(state_node, &psci_states[i]); | ||
99 | of_node_put(state_node); | ||
100 | |||
101 | if (ret) | ||
102 | goto free_mem; | ||
103 | |||
104 | pr_debug("psci-power-state %#x index %d\n", psci_states[i], i); | ||
105 | } | ||
106 | |||
107 | /* Idle states parsed correctly, initialize per-cpu pointer */ | ||
108 | per_cpu(psci_power_state, cpu) = psci_states; | ||
109 | return 0; | ||
110 | |||
111 | free_mem: | ||
112 | kfree(psci_states); | ||
113 | return ret; | ||
114 | } | ||
115 | |||
116 | static __init int psci_cpu_init_idle(unsigned int cpu) | ||
117 | { | ||
118 | struct device_node *cpu_node; | ||
119 | int ret; | ||
120 | |||
121 | /* | ||
122 | * If the PSCI cpu_suspend function hook has not been initialized | ||
123 | * idle states must not be enabled, so bail out | ||
124 | */ | ||
125 | if (!psci_ops.cpu_suspend) | ||
126 | return -EOPNOTSUPP; | ||
127 | |||
128 | cpu_node = of_cpu_device_node_get(cpu); | ||
129 | if (!cpu_node) | ||
130 | return -ENODEV; | ||
131 | |||
132 | ret = psci_dt_cpu_init_idle(cpu_node, cpu); | ||
133 | |||
134 | of_node_put(cpu_node); | ||
135 | |||
136 | return ret; | ||
137 | } | ||
138 | |||
139 | static int __init psci_idle_init_cpu(int cpu) | ||
140 | { | ||
141 | struct cpuidle_driver *drv; | ||
142 | struct device_node *cpu_node; | ||
143 | const char *enable_method; | ||
144 | int ret = 0; | ||
145 | |||
146 | cpu_node = of_cpu_device_node_get(cpu); | ||
147 | if (!cpu_node) | ||
148 | return -ENODEV; | ||
149 | |||
150 | /* | ||
151 | * Check whether the enable-method for the cpu is PSCI, fail | ||
152 | * if it is not. | ||
153 | */ | ||
154 | enable_method = of_get_property(cpu_node, "enable-method", NULL); | ||
155 | if (!enable_method || (strcmp(enable_method, "psci"))) | ||
156 | ret = -ENODEV; | ||
157 | |||
158 | of_node_put(cpu_node); | ||
159 | if (ret) | ||
160 | return ret; | ||
161 | |||
162 | drv = kmemdup(&psci_idle_driver, sizeof(*drv), GFP_KERNEL); | ||
163 | if (!drv) | ||
164 | return -ENOMEM; | ||
165 | |||
166 | drv->cpumask = (struct cpumask *)cpumask_of(cpu); | ||
167 | |||
168 | /* | ||
169 | * Initialize idle states data, starting at index 1, since | ||
170 | * by default idle state 0 is the quiescent state reached | ||
171 | * by the cpu by executing the wfi instruction. | ||
172 | * | ||
173 | * If no DT idle states are detected (ret == 0) let the driver | ||
174 | * initialization fail accordingly since there is no reason to | ||
175 | * initialize the idle driver if only wfi is supported, the | ||
176 | * default archictectural back-end already executes wfi | ||
177 | * on idle entry. | ||
178 | */ | ||
179 | ret = dt_init_idle_driver(drv, psci_idle_state_match, 1); | ||
180 | if (ret <= 0) { | ||
181 | ret = ret ? : -ENODEV; | ||
182 | goto out_kfree_drv; | ||
183 | } | ||
184 | |||
185 | /* | ||
186 | * Initialize PSCI idle states. | ||
187 | */ | ||
188 | ret = psci_cpu_init_idle(cpu); | ||
189 | if (ret) { | ||
190 | pr_err("CPU %d failed to PSCI idle\n", cpu); | ||
191 | goto out_kfree_drv; | ||
192 | } | ||
193 | |||
194 | ret = cpuidle_register(drv, NULL); | ||
195 | if (ret) | ||
196 | goto out_kfree_drv; | ||
197 | |||
198 | return 0; | ||
199 | |||
200 | out_kfree_drv: | ||
201 | kfree(drv); | ||
202 | return ret; | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * psci_idle_init - Initializes PSCI cpuidle driver | ||
207 | * | ||
208 | * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails | ||
209 | * to register cpuidle driver then rollback to cancel all CPUs | ||
210 | * registration. | ||
211 | */ | ||
212 | static int __init psci_idle_init(void) | ||
213 | { | ||
214 | int cpu, ret; | ||
215 | struct cpuidle_driver *drv; | ||
216 | struct cpuidle_device *dev; | ||
217 | |||
218 | for_each_possible_cpu(cpu) { | ||
219 | ret = psci_idle_init_cpu(cpu); | ||
220 | if (ret) | ||
221 | goto out_fail; | ||
222 | } | ||
223 | |||
224 | return 0; | ||
225 | |||
226 | out_fail: | ||
227 | while (--cpu >= 0) { | ||
228 | dev = per_cpu(cpuidle_devices, cpu); | ||
229 | drv = cpuidle_get_cpu_driver(dev); | ||
230 | cpuidle_unregister(drv); | ||
231 | kfree(drv); | ||
232 | } | ||
233 | |||
234 | return ret; | ||
235 | } | ||
236 | device_initcall(psci_idle_init); | ||
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c index f82ccd39a913..84f4ff351c62 100644 --- a/drivers/firmware/psci/psci.c +++ b/drivers/firmware/psci/psci.c | |||
@@ -103,7 +103,7 @@ static inline bool psci_power_state_loses_context(u32 state) | |||
103 | return state & mask; | 103 | return state & mask; |
104 | } | 104 | } |
105 | 105 | ||
106 | static inline bool psci_power_state_is_valid(u32 state) | 106 | bool psci_power_state_is_valid(u32 state) |
107 | { | 107 | { |
108 | const u32 valid_mask = psci_has_ext_power_state() ? | 108 | const u32 valid_mask = psci_has_ext_power_state() ? |
109 | PSCI_1_0_EXT_POWER_STATE_MASK : | 109 | PSCI_1_0_EXT_POWER_STATE_MASK : |
@@ -277,175 +277,24 @@ static int __init psci_features(u32 psci_func_id) | |||
277 | } | 277 | } |
278 | 278 | ||
279 | #ifdef CONFIG_CPU_IDLE | 279 | #ifdef CONFIG_CPU_IDLE |
280 | static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); | 280 | static int psci_suspend_finisher(unsigned long state) |
281 | |||
282 | static int psci_dt_parse_state_node(struct device_node *np, u32 *state) | ||
283 | { | ||
284 | int err = of_property_read_u32(np, "arm,psci-suspend-param", state); | ||
285 | |||
286 | if (err) { | ||
287 | pr_warn("%pOF missing arm,psci-suspend-param property\n", np); | ||
288 | return err; | ||
289 | } | ||
290 | |||
291 | if (!psci_power_state_is_valid(*state)) { | ||
292 | pr_warn("Invalid PSCI power state %#x\n", *state); | ||
293 | return -EINVAL; | ||
294 | } | ||
295 | |||
296 | return 0; | ||
297 | } | ||
298 | |||
299 | static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu) | ||
300 | { | ||
301 | int i, ret = 0, count = 0; | ||
302 | u32 *psci_states; | ||
303 | struct device_node *state_node; | ||
304 | |||
305 | /* Count idle states */ | ||
306 | while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", | ||
307 | count))) { | ||
308 | count++; | ||
309 | of_node_put(state_node); | ||
310 | } | ||
311 | |||
312 | if (!count) | ||
313 | return -ENODEV; | ||
314 | |||
315 | psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); | ||
316 | if (!psci_states) | ||
317 | return -ENOMEM; | ||
318 | |||
319 | for (i = 0; i < count; i++) { | ||
320 | state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); | ||
321 | ret = psci_dt_parse_state_node(state_node, &psci_states[i]); | ||
322 | of_node_put(state_node); | ||
323 | |||
324 | if (ret) | ||
325 | goto free_mem; | ||
326 | |||
327 | pr_debug("psci-power-state %#x index %d\n", psci_states[i], i); | ||
328 | } | ||
329 | |||
330 | /* Idle states parsed correctly, initialize per-cpu pointer */ | ||
331 | per_cpu(psci_power_state, cpu) = psci_states; | ||
332 | return 0; | ||
333 | |||
334 | free_mem: | ||
335 | kfree(psci_states); | ||
336 | return ret; | ||
337 | } | ||
338 | |||
339 | #ifdef CONFIG_ACPI | ||
340 | #include <acpi/processor.h> | ||
341 | |||
342 | static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu) | ||
343 | { | ||
344 | int i, count; | ||
345 | u32 *psci_states; | ||
346 | struct acpi_lpi_state *lpi; | ||
347 | struct acpi_processor *pr = per_cpu(processors, cpu); | ||
348 | |||
349 | if (unlikely(!pr || !pr->flags.has_lpi)) | ||
350 | return -EINVAL; | ||
351 | |||
352 | count = pr->power.count - 1; | ||
353 | if (count <= 0) | ||
354 | return -ENODEV; | ||
355 | |||
356 | psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); | ||
357 | if (!psci_states) | ||
358 | return -ENOMEM; | ||
359 | |||
360 | for (i = 0; i < count; i++) { | ||
361 | u32 state; | ||
362 | |||
363 | lpi = &pr->power.lpi_states[i + 1]; | ||
364 | /* | ||
365 | * Only bits[31:0] represent a PSCI power_state while | ||
366 | * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification | ||
367 | */ | ||
368 | state = lpi->address; | ||
369 | if (!psci_power_state_is_valid(state)) { | ||
370 | pr_warn("Invalid PSCI power state %#x\n", state); | ||
371 | kfree(psci_states); | ||
372 | return -EINVAL; | ||
373 | } | ||
374 | psci_states[i] = state; | ||
375 | } | ||
376 | /* Idle states parsed correctly, initialize per-cpu pointer */ | ||
377 | per_cpu(psci_power_state, cpu) = psci_states; | ||
378 | return 0; | ||
379 | } | ||
380 | #else | ||
381 | static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu) | ||
382 | { | ||
383 | return -EINVAL; | ||
384 | } | ||
385 | #endif | ||
386 | |||
387 | int psci_cpu_init_idle(unsigned int cpu) | ||
388 | { | ||
389 | struct device_node *cpu_node; | ||
390 | int ret; | ||
391 | |||
392 | /* | ||
393 | * If the PSCI cpu_suspend function hook has not been initialized | ||
394 | * idle states must not be enabled, so bail out | ||
395 | */ | ||
396 | if (!psci_ops.cpu_suspend) | ||
397 | return -EOPNOTSUPP; | ||
398 | |||
399 | if (!acpi_disabled) | ||
400 | return psci_acpi_cpu_init_idle(cpu); | ||
401 | |||
402 | cpu_node = of_get_cpu_node(cpu, NULL); | ||
403 | if (!cpu_node) | ||
404 | return -ENODEV; | ||
405 | |||
406 | ret = psci_dt_cpu_init_idle(cpu_node, cpu); | ||
407 | |||
408 | of_node_put(cpu_node); | ||
409 | |||
410 | return ret; | ||
411 | } | ||
412 | |||
413 | static int psci_suspend_finisher(unsigned long index) | ||
414 | { | 281 | { |
415 | u32 *state = __this_cpu_read(psci_power_state); | 282 | u32 power_state = state; |
416 | 283 | ||
417 | return psci_ops.cpu_suspend(state[index - 1], | 284 | return psci_ops.cpu_suspend(power_state, __pa_symbol(cpu_resume)); |
418 | __pa_symbol(cpu_resume)); | ||
419 | } | 285 | } |
420 | 286 | ||
421 | int psci_cpu_suspend_enter(unsigned long index) | 287 | int psci_cpu_suspend_enter(u32 state) |
422 | { | 288 | { |
423 | int ret; | 289 | int ret; |
424 | u32 *state = __this_cpu_read(psci_power_state); | ||
425 | /* | ||
426 | * idle state index 0 corresponds to wfi, should never be called | ||
427 | * from the cpu_suspend operations | ||
428 | */ | ||
429 | if (WARN_ON_ONCE(!index)) | ||
430 | return -EINVAL; | ||
431 | 290 | ||
432 | if (!psci_power_state_loses_context(state[index - 1])) | 291 | if (!psci_power_state_loses_context(state)) |
433 | ret = psci_ops.cpu_suspend(state[index - 1], 0); | 292 | ret = psci_ops.cpu_suspend(state, 0); |
434 | else | 293 | else |
435 | ret = cpu_suspend(index, psci_suspend_finisher); | 294 | ret = cpu_suspend(state, psci_suspend_finisher); |
436 | 295 | ||
437 | return ret; | 296 | return ret; |
438 | } | 297 | } |
439 | |||
440 | /* ARM specific CPU idle operations */ | ||
441 | #ifdef CONFIG_ARM | ||
442 | static const struct cpuidle_ops psci_cpuidle_ops __initconst = { | ||
443 | .suspend = psci_cpu_suspend_enter, | ||
444 | .init = psci_dt_cpu_init_idle, | ||
445 | }; | ||
446 | |||
447 | CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops); | ||
448 | #endif | ||
449 | #endif | 298 | #endif |
450 | 299 | ||
451 | static int psci_system_suspend(unsigned long unused) | 300 | static int psci_system_suspend(unsigned long unused) |
diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c index f3659443f8c2..6a445397771c 100644 --- a/drivers/firmware/psci/psci_checker.c +++ b/drivers/firmware/psci/psci_checker.c | |||
@@ -228,8 +228,11 @@ out_free_cpus: | |||
228 | 228 | ||
229 | static void dummy_callback(struct timer_list *unused) {} | 229 | static void dummy_callback(struct timer_list *unused) {} |
230 | 230 | ||
231 | static int suspend_cpu(int index, bool broadcast) | 231 | static int suspend_cpu(struct cpuidle_device *dev, |
232 | struct cpuidle_driver *drv, int index) | ||
232 | { | 233 | { |
234 | struct cpuidle_state *state = &drv->states[index]; | ||
235 | bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP; | ||
233 | int ret; | 236 | int ret; |
234 | 237 | ||
235 | arch_cpu_idle_enter(); | 238 | arch_cpu_idle_enter(); |
@@ -254,11 +257,7 @@ static int suspend_cpu(int index, bool broadcast) | |||
254 | } | 257 | } |
255 | } | 258 | } |
256 | 259 | ||
257 | /* | 260 | ret = state->enter(dev, drv, index); |
258 | * Replicate the common ARM cpuidle enter function | ||
259 | * (arm_enter_idle_state). | ||
260 | */ | ||
261 | ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index); | ||
262 | 261 | ||
263 | if (broadcast) | 262 | if (broadcast) |
264 | tick_broadcast_exit(); | 263 | tick_broadcast_exit(); |
@@ -301,9 +300,8 @@ static int suspend_test_thread(void *arg) | |||
301 | * doesn't use PSCI). | 300 | * doesn't use PSCI). |
302 | */ | 301 | */ |
303 | for (index = 1; index < drv->state_count; ++index) { | 302 | for (index = 1; index < drv->state_count; ++index) { |
304 | struct cpuidle_state *state = &drv->states[index]; | ||
305 | bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP; | ||
306 | int ret; | 303 | int ret; |
304 | struct cpuidle_state *state = &drv->states[index]; | ||
307 | 305 | ||
308 | /* | 306 | /* |
309 | * Set the timer to wake this CPU up in some time (which | 307 | * Set the timer to wake this CPU up in some time (which |
@@ -318,7 +316,7 @@ static int suspend_test_thread(void *arg) | |||
318 | /* IRQs must be disabled during suspend operations. */ | 316 | /* IRQs must be disabled during suspend operations. */ |
319 | local_irq_disable(); | 317 | local_irq_disable(); |
320 | 318 | ||
321 | ret = suspend_cpu(index, broadcast); | 319 | ret = suspend_cpu(dev, drv, index); |
322 | 320 | ||
323 | /* | 321 | /* |
324 | * We have woken up. Re-enable IRQs to handle any | 322 | * We have woken up. Re-enable IRQs to handle any |
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 9cdf14b9aaab..223d617ecfe1 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/debugfs.h> | 24 | #include <linux/debugfs.h> |
25 | #include <linux/serial_core.h> | 25 | #include <linux/serial_core.h> |
26 | #include <linux/sysfs.h> | 26 | #include <linux/sysfs.h> |
27 | #include <linux/random.h> | ||
27 | 28 | ||
28 | #include <asm/setup.h> /* for COMMAND_LINE_SIZE */ | 29 | #include <asm/setup.h> /* for COMMAND_LINE_SIZE */ |
29 | #include <asm/page.h> | 30 | #include <asm/page.h> |
@@ -1044,6 +1045,7 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, | |||
1044 | { | 1045 | { |
1045 | int l; | 1046 | int l; |
1046 | const char *p; | 1047 | const char *p; |
1048 | const void *rng_seed; | ||
1047 | 1049 | ||
1048 | pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname); | 1050 | pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname); |
1049 | 1051 | ||
@@ -1078,6 +1080,18 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, | |||
1078 | 1080 | ||
1079 | pr_debug("Command line is: %s\n", (char*)data); | 1081 | pr_debug("Command line is: %s\n", (char*)data); |
1080 | 1082 | ||
1083 | rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l); | ||
1084 | if (rng_seed && l > 0) { | ||
1085 | add_bootloader_randomness(rng_seed, l); | ||
1086 | |||
1087 | /* try to clear seed so it won't be found. */ | ||
1088 | fdt_nop_property(initial_boot_params, node, "rng-seed"); | ||
1089 | |||
1090 | /* update CRC check value */ | ||
1091 | of_fdt_crc32 = crc32_be(~0, initial_boot_params, | ||
1092 | fdt_totalsize(initial_boot_params)); | ||
1093 | } | ||
1094 | |||
1081 | /* break now */ | 1095 | /* break now */ |
1082 | return 1; | 1096 | return 1; |
1083 | } | 1097 | } |
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index da71c741cb46..abcf54f7d19c 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c | |||
@@ -113,8 +113,6 @@ struct smmu_pmu { | |||
113 | u64 counter_mask; | 113 | u64 counter_mask; |
114 | u32 options; | 114 | u32 options; |
115 | bool global_filter; | 115 | bool global_filter; |
116 | u32 global_filter_span; | ||
117 | u32 global_filter_sid; | ||
118 | }; | 116 | }; |
119 | 117 | ||
120 | #define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu)) | 118 | #define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu)) |
@@ -260,6 +258,19 @@ static void smmu_pmu_set_event_filter(struct perf_event *event, | |||
260 | smmu_pmu_set_smr(smmu_pmu, idx, sid); | 258 | smmu_pmu_set_smr(smmu_pmu, idx, sid); |
261 | } | 259 | } |
262 | 260 | ||
261 | static bool smmu_pmu_check_global_filter(struct perf_event *curr, | ||
262 | struct perf_event *new) | ||
263 | { | ||
264 | if (get_filter_enable(new) != get_filter_enable(curr)) | ||
265 | return false; | ||
266 | |||
267 | if (!get_filter_enable(new)) | ||
268 | return true; | ||
269 | |||
270 | return get_filter_span(new) == get_filter_span(curr) && | ||
271 | get_filter_stream_id(new) == get_filter_stream_id(curr); | ||
272 | } | ||
273 | |||
263 | static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu, | 274 | static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu, |
264 | struct perf_event *event, int idx) | 275 | struct perf_event *event, int idx) |
265 | { | 276 | { |
@@ -279,17 +290,14 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu, | |||
279 | } | 290 | } |
280 | 291 | ||
281 | /* Requested settings same as current global settings*/ | 292 | /* Requested settings same as current global settings*/ |
282 | if (span == smmu_pmu->global_filter_span && | 293 | idx = find_first_bit(smmu_pmu->used_counters, num_ctrs); |
283 | sid == smmu_pmu->global_filter_sid) | 294 | if (idx == num_ctrs || |
295 | smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) { | ||
296 | smmu_pmu_set_event_filter(event, 0, span, sid); | ||
284 | return 0; | 297 | return 0; |
298 | } | ||
285 | 299 | ||
286 | if (!bitmap_empty(smmu_pmu->used_counters, num_ctrs)) | 300 | return -EAGAIN; |
287 | return -EAGAIN; | ||
288 | |||
289 | smmu_pmu_set_event_filter(event, 0, span, sid); | ||
290 | smmu_pmu->global_filter_span = span; | ||
291 | smmu_pmu->global_filter_sid = sid; | ||
292 | return 0; | ||
293 | } | 301 | } |
294 | 302 | ||
295 | static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu, | 303 | static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu, |
@@ -312,6 +320,19 @@ static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu, | |||
312 | return idx; | 320 | return idx; |
313 | } | 321 | } |
314 | 322 | ||
323 | static bool smmu_pmu_events_compatible(struct perf_event *curr, | ||
324 | struct perf_event *new) | ||
325 | { | ||
326 | if (new->pmu != curr->pmu) | ||
327 | return false; | ||
328 | |||
329 | if (to_smmu_pmu(new->pmu)->global_filter && | ||
330 | !smmu_pmu_check_global_filter(curr, new)) | ||
331 | return false; | ||
332 | |||
333 | return true; | ||
334 | } | ||
335 | |||
315 | /* | 336 | /* |
316 | * Implementation of abstract pmu functionality required by | 337 | * Implementation of abstract pmu functionality required by |
317 | * the core perf events code. | 338 | * the core perf events code. |
@@ -323,6 +344,7 @@ static int smmu_pmu_event_init(struct perf_event *event) | |||
323 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); | 344 | struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); |
324 | struct device *dev = smmu_pmu->dev; | 345 | struct device *dev = smmu_pmu->dev; |
325 | struct perf_event *sibling; | 346 | struct perf_event *sibling; |
347 | int group_num_events = 1; | ||
326 | u16 event_id; | 348 | u16 event_id; |
327 | 349 | ||
328 | if (event->attr.type != event->pmu->type) | 350 | if (event->attr.type != event->pmu->type) |
@@ -347,18 +369,23 @@ static int smmu_pmu_event_init(struct perf_event *event) | |||
347 | } | 369 | } |
348 | 370 | ||
349 | /* Don't allow groups with mixed PMUs, except for s/w events */ | 371 | /* Don't allow groups with mixed PMUs, except for s/w events */ |
350 | if (event->group_leader->pmu != event->pmu && | 372 | if (!is_software_event(event->group_leader)) { |
351 | !is_software_event(event->group_leader)) { | 373 | if (!smmu_pmu_events_compatible(event->group_leader, event)) |
352 | dev_dbg(dev, "Can't create mixed PMU group\n"); | 374 | return -EINVAL; |
353 | return -EINVAL; | 375 | |
376 | if (++group_num_events > smmu_pmu->num_counters) | ||
377 | return -EINVAL; | ||
354 | } | 378 | } |
355 | 379 | ||
356 | for_each_sibling_event(sibling, event->group_leader) { | 380 | for_each_sibling_event(sibling, event->group_leader) { |
357 | if (sibling->pmu != event->pmu && | 381 | if (is_software_event(sibling)) |
358 | !is_software_event(sibling)) { | 382 | continue; |
359 | dev_dbg(dev, "Can't create mixed PMU group\n"); | 383 | |
384 | if (!smmu_pmu_events_compatible(sibling, event)) | ||
385 | return -EINVAL; | ||
386 | |||
387 | if (++group_num_events > smmu_pmu->num_counters) | ||
360 | return -EINVAL; | 388 | return -EINVAL; |
361 | } | ||
362 | } | 389 | } |
363 | 390 | ||
364 | hwc->idx = -1; | 391 | hwc->idx = -1; |
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 63fe21600072..ce7345745b42 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c | |||
@@ -35,6 +35,8 @@ | |||
35 | #define EVENT_CYCLES_COUNTER 0 | 35 | #define EVENT_CYCLES_COUNTER 0 |
36 | #define NUM_COUNTERS 4 | 36 | #define NUM_COUNTERS 4 |
37 | 37 | ||
38 | #define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */ | ||
39 | |||
38 | #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) | 40 | #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) |
39 | 41 | ||
40 | #define DDR_PERF_DEV_NAME "imx8_ddr" | 42 | #define DDR_PERF_DEV_NAME "imx8_ddr" |
@@ -42,11 +44,25 @@ | |||
42 | 44 | ||
43 | static DEFINE_IDA(ddr_ida); | 45 | static DEFINE_IDA(ddr_ida); |
44 | 46 | ||
47 | /* DDR Perf hardware feature */ | ||
48 | #define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */ | ||
49 | |||
50 | struct fsl_ddr_devtype_data { | ||
51 | unsigned int quirks; /* quirks needed for different DDR Perf core */ | ||
52 | }; | ||
53 | |||
54 | static const struct fsl_ddr_devtype_data imx8_devtype_data; | ||
55 | |||
56 | static const struct fsl_ddr_devtype_data imx8m_devtype_data = { | ||
57 | .quirks = DDR_CAP_AXI_ID_FILTER, | ||
58 | }; | ||
59 | |||
45 | static const struct of_device_id imx_ddr_pmu_dt_ids[] = { | 60 | static const struct of_device_id imx_ddr_pmu_dt_ids[] = { |
46 | { .compatible = "fsl,imx8-ddr-pmu",}, | 61 | { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data}, |
47 | { .compatible = "fsl,imx8m-ddr-pmu",}, | 62 | { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data}, |
48 | { /* sentinel */ } | 63 | { /* sentinel */ } |
49 | }; | 64 | }; |
65 | MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids); | ||
50 | 66 | ||
51 | struct ddr_pmu { | 67 | struct ddr_pmu { |
52 | struct pmu pmu; | 68 | struct pmu pmu; |
@@ -57,6 +73,7 @@ struct ddr_pmu { | |||
57 | struct perf_event *events[NUM_COUNTERS]; | 73 | struct perf_event *events[NUM_COUNTERS]; |
58 | int active_events; | 74 | int active_events; |
59 | enum cpuhp_state cpuhp_state; | 75 | enum cpuhp_state cpuhp_state; |
76 | const struct fsl_ddr_devtype_data *devtype_data; | ||
60 | int irq; | 77 | int irq; |
61 | int id; | 78 | int id; |
62 | }; | 79 | }; |
@@ -128,6 +145,8 @@ static struct attribute *ddr_perf_events_attrs[] = { | |||
128 | IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37), | 145 | IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37), |
129 | IMX8_DDR_PMU_EVENT_ATTR(write, 0x38), | 146 | IMX8_DDR_PMU_EVENT_ATTR(write, 0x38), |
130 | IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39), | 147 | IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39), |
148 | IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41), | ||
149 | IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42), | ||
131 | NULL, | 150 | NULL, |
132 | }; | 151 | }; |
133 | 152 | ||
@@ -137,9 +156,13 @@ static struct attribute_group ddr_perf_events_attr_group = { | |||
137 | }; | 156 | }; |
138 | 157 | ||
139 | PMU_FORMAT_ATTR(event, "config:0-7"); | 158 | PMU_FORMAT_ATTR(event, "config:0-7"); |
159 | PMU_FORMAT_ATTR(axi_id, "config1:0-15"); | ||
160 | PMU_FORMAT_ATTR(axi_mask, "config1:16-31"); | ||
140 | 161 | ||
141 | static struct attribute *ddr_perf_format_attrs[] = { | 162 | static struct attribute *ddr_perf_format_attrs[] = { |
142 | &format_attr_event.attr, | 163 | &format_attr_event.attr, |
164 | &format_attr_axi_id.attr, | ||
165 | &format_attr_axi_mask.attr, | ||
143 | NULL, | 166 | NULL, |
144 | }; | 167 | }; |
145 | 168 | ||
@@ -189,6 +212,26 @@ static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter) | |||
189 | return readl_relaxed(pmu->base + COUNTER_READ + counter * 4); | 212 | return readl_relaxed(pmu->base + COUNTER_READ + counter * 4); |
190 | } | 213 | } |
191 | 214 | ||
215 | static bool ddr_perf_is_filtered(struct perf_event *event) | ||
216 | { | ||
217 | return event->attr.config == 0x41 || event->attr.config == 0x42; | ||
218 | } | ||
219 | |||
220 | static u32 ddr_perf_filter_val(struct perf_event *event) | ||
221 | { | ||
222 | return event->attr.config1; | ||
223 | } | ||
224 | |||
225 | static bool ddr_perf_filters_compatible(struct perf_event *a, | ||
226 | struct perf_event *b) | ||
227 | { | ||
228 | if (!ddr_perf_is_filtered(a)) | ||
229 | return true; | ||
230 | if (!ddr_perf_is_filtered(b)) | ||
231 | return true; | ||
232 | return ddr_perf_filter_val(a) == ddr_perf_filter_val(b); | ||
233 | } | ||
234 | |||
192 | static int ddr_perf_event_init(struct perf_event *event) | 235 | static int ddr_perf_event_init(struct perf_event *event) |
193 | { | 236 | { |
194 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | 237 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); |
@@ -215,6 +258,15 @@ static int ddr_perf_event_init(struct perf_event *event) | |||
215 | !is_software_event(event->group_leader)) | 258 | !is_software_event(event->group_leader)) |
216 | return -EINVAL; | 259 | return -EINVAL; |
217 | 260 | ||
261 | if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { | ||
262 | if (!ddr_perf_filters_compatible(event, event->group_leader)) | ||
263 | return -EINVAL; | ||
264 | for_each_sibling_event(sibling, event->group_leader) { | ||
265 | if (!ddr_perf_filters_compatible(event, sibling)) | ||
266 | return -EINVAL; | ||
267 | } | ||
268 | } | ||
269 | |||
218 | for_each_sibling_event(sibling, event->group_leader) { | 270 | for_each_sibling_event(sibling, event->group_leader) { |
219 | if (sibling->pmu != event->pmu && | 271 | if (sibling->pmu != event->pmu && |
220 | !is_software_event(sibling)) | 272 | !is_software_event(sibling)) |
@@ -287,6 +339,23 @@ static int ddr_perf_event_add(struct perf_event *event, int flags) | |||
287 | struct hw_perf_event *hwc = &event->hw; | 339 | struct hw_perf_event *hwc = &event->hw; |
288 | int counter; | 340 | int counter; |
289 | int cfg = event->attr.config; | 341 | int cfg = event->attr.config; |
342 | int cfg1 = event->attr.config1; | ||
343 | |||
344 | if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { | ||
345 | int i; | ||
346 | |||
347 | for (i = 1; i < NUM_COUNTERS; i++) { | ||
348 | if (pmu->events[i] && | ||
349 | !ddr_perf_filters_compatible(event, pmu->events[i])) | ||
350 | return -EINVAL; | ||
351 | } | ||
352 | |||
353 | if (ddr_perf_is_filtered(event)) { | ||
354 | /* revert axi id masking(axi_mask) value */ | ||
355 | cfg1 ^= AXI_MASKING_REVERT; | ||
356 | writel(cfg1, pmu->base + COUNTER_DPCR1); | ||
357 | } | ||
358 | } | ||
290 | 359 | ||
291 | counter = ddr_perf_alloc_counter(pmu, cfg); | 360 | counter = ddr_perf_alloc_counter(pmu, cfg); |
292 | if (counter < 0) { | 361 | if (counter < 0) { |
@@ -472,6 +541,8 @@ static int ddr_perf_probe(struct platform_device *pdev) | |||
472 | if (!name) | 541 | if (!name) |
473 | return -ENOMEM; | 542 | return -ENOMEM; |
474 | 543 | ||
544 | pmu->devtype_data = of_device_get_match_data(&pdev->dev); | ||
545 | |||
475 | pmu->cpu = raw_smp_processor_id(); | 546 | pmu->cpu = raw_smp_processor_id(); |
476 | ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, | 547 | ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, |
477 | DDR_CPUHP_CB_NAME, | 548 | DDR_CPUHP_CB_NAME, |
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c index 6ad0823bcf23..e42d4464c2cf 100644 --- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c | |||
@@ -217,10 +217,8 @@ static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu, | |||
217 | 217 | ||
218 | /* Read and init IRQ */ | 218 | /* Read and init IRQ */ |
219 | irq = platform_get_irq(pdev, 0); | 219 | irq = platform_get_irq(pdev, 0); |
220 | if (irq < 0) { | 220 | if (irq < 0) |
221 | dev_err(&pdev->dev, "DDRC PMU get irq fail; irq:%d\n", irq); | ||
222 | return irq; | 221 | return irq; |
223 | } | ||
224 | 222 | ||
225 | ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr, | 223 | ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr, |
226 | IRQF_NOBALANCING | IRQF_NO_THREAD, | 224 | IRQF_NOBALANCING | IRQF_NO_THREAD, |
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c index 4f2917f3e25e..f28063873e11 100644 --- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c | |||
@@ -207,10 +207,8 @@ static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu, | |||
207 | 207 | ||
208 | /* Read and init IRQ */ | 208 | /* Read and init IRQ */ |
209 | irq = platform_get_irq(pdev, 0); | 209 | irq = platform_get_irq(pdev, 0); |
210 | if (irq < 0) { | 210 | if (irq < 0) |
211 | dev_err(&pdev->dev, "HHA PMU get irq fail; irq:%d\n", irq); | ||
212 | return irq; | 211 | return irq; |
213 | } | ||
214 | 212 | ||
215 | ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr, | 213 | ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr, |
216 | IRQF_NOBALANCING | IRQF_NO_THREAD, | 214 | IRQF_NOBALANCING | IRQF_NO_THREAD, |
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c index 9153e093f9df..078b8dc57250 100644 --- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c | |||
@@ -206,10 +206,8 @@ static int hisi_l3c_pmu_init_irq(struct hisi_pmu *l3c_pmu, | |||
206 | 206 | ||
207 | /* Read and init IRQ */ | 207 | /* Read and init IRQ */ |
208 | irq = platform_get_irq(pdev, 0); | 208 | irq = platform_get_irq(pdev, 0); |
209 | if (irq < 0) { | 209 | if (irq < 0) |
210 | dev_err(&pdev->dev, "L3C PMU get irq fail; irq:%d\n", irq); | ||
211 | return irq; | 210 | return irq; |
212 | } | ||
213 | 211 | ||
214 | ret = devm_request_irq(&pdev->dev, irq, hisi_l3c_pmu_isr, | 212 | ret = devm_request_irq(&pdev->dev, irq, hisi_l3c_pmu_isr, |
215 | IRQF_NOBALANCING | IRQF_NO_THREAD, | 213 | IRQF_NOBALANCING | IRQF_NO_THREAD, |
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index d06182fe14b8..21d6991dbe0b 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c | |||
@@ -909,12 +909,8 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data) | |||
909 | cluster->cluster_id = fw_cluster_id; | 909 | cluster->cluster_id = fw_cluster_id; |
910 | 910 | ||
911 | irq = platform_get_irq(sdev, 0); | 911 | irq = platform_get_irq(sdev, 0); |
912 | if (irq < 0) { | 912 | if (irq < 0) |
913 | dev_err(&pdev->dev, | ||
914 | "Failed to get valid irq for cluster %ld\n", | ||
915 | fw_cluster_id); | ||
916 | return irq; | 913 | return irq; |
917 | } | ||
918 | irq_set_status_flags(irq, IRQ_NOAUTOEN); | 914 | irq_set_status_flags(irq, IRQ_NOAUTOEN); |
919 | cluster->irq = irq; | 915 | cluster->irq = irq; |
920 | 916 | ||
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c index 3259e2ebeb39..7e328d6385c3 100644 --- a/drivers/perf/xgene_pmu.c +++ b/drivers/perf/xgene_pmu.c | |||
@@ -1901,10 +1901,8 @@ static int xgene_pmu_probe(struct platform_device *pdev) | |||
1901 | } | 1901 | } |
1902 | 1902 | ||
1903 | irq = platform_get_irq(pdev, 0); | 1903 | irq = platform_get_irq(pdev, 0); |
1904 | if (irq < 0) { | 1904 | if (irq < 0) |
1905 | dev_err(&pdev->dev, "No IRQ resource\n"); | ||
1906 | return -EINVAL; | 1905 | return -EINVAL; |
1907 | } | ||
1908 | 1906 | ||
1909 | rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr, | 1907 | rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr, |
1910 | IRQF_NOBALANCING | IRQF_NO_THREAD, | 1908 | IRQF_NOBALANCING | IRQF_NO_THREAD, |
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h index 95a159a4137f..80ca61058dd2 100644 --- a/include/asm-generic/error-injection.h +++ b/include/asm-generic/error-injection.h | |||
@@ -16,6 +16,8 @@ struct error_injection_entry { | |||
16 | int etype; | 16 | int etype; |
17 | }; | 17 | }; |
18 | 18 | ||
19 | struct pt_regs; | ||
20 | |||
19 | #ifdef CONFIG_FUNCTION_ERROR_INJECTION | 21 | #ifdef CONFIG_FUNCTION_ERROR_INJECTION |
20 | /* | 22 | /* |
21 | * Whitelist ganerating macro. Specify functions which can be | 23 | * Whitelist ganerating macro. Specify functions which can be |
@@ -28,8 +30,12 @@ static struct error_injection_entry __used \ | |||
28 | .addr = (unsigned long)fname, \ | 30 | .addr = (unsigned long)fname, \ |
29 | .etype = EI_ETYPE_##_etype, \ | 31 | .etype = EI_ETYPE_##_etype, \ |
30 | }; | 32 | }; |
33 | |||
34 | void override_function_with_return(struct pt_regs *regs); | ||
31 | #else | 35 | #else |
32 | #define ALLOW_ERROR_INJECTION(fname, _etype) | 36 | #define ALLOW_ERROR_INJECTION(fname, _etype) |
37 | |||
38 | static inline void override_function_with_return(struct pt_regs *regs) { } | ||
33 | #endif | 39 | #endif |
34 | #endif | 40 | #endif |
35 | 41 | ||
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 9426b9aaed86..9d0e20a2ac83 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -1302,11 +1302,16 @@ static inline int lpit_read_residency_count_address(u64 *address) | |||
1302 | #endif | 1302 | #endif |
1303 | 1303 | ||
1304 | #ifdef CONFIG_ACPI_PPTT | 1304 | #ifdef CONFIG_ACPI_PPTT |
1305 | int acpi_pptt_cpu_is_thread(unsigned int cpu); | ||
1305 | int find_acpi_cpu_topology(unsigned int cpu, int level); | 1306 | int find_acpi_cpu_topology(unsigned int cpu, int level); |
1306 | int find_acpi_cpu_topology_package(unsigned int cpu); | 1307 | int find_acpi_cpu_topology_package(unsigned int cpu); |
1307 | int find_acpi_cpu_topology_hetero_id(unsigned int cpu); | 1308 | int find_acpi_cpu_topology_hetero_id(unsigned int cpu); |
1308 | int find_acpi_cpu_cache_topology(unsigned int cpu, int level); | 1309 | int find_acpi_cpu_cache_topology(unsigned int cpu, int level); |
1309 | #else | 1310 | #else |
1311 | static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) | ||
1312 | { | ||
1313 | return -EINVAL; | ||
1314 | } | ||
1310 | static inline int find_acpi_cpu_topology(unsigned int cpu, int level) | 1315 | static inline int find_acpi_cpu_topology(unsigned int cpu, int level) |
1311 | { | 1316 | { |
1312 | return -EINVAL; | 1317 | return -EINVAL; |
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 1cfe05ea1d89..42f2b5126094 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h | |||
@@ -33,4 +33,30 @@ unsigned long topology_get_freq_scale(int cpu) | |||
33 | return per_cpu(freq_scale, cpu); | 33 | return per_cpu(freq_scale, cpu); |
34 | } | 34 | } |
35 | 35 | ||
36 | struct cpu_topology { | ||
37 | int thread_id; | ||
38 | int core_id; | ||
39 | int package_id; | ||
40 | int llc_id; | ||
41 | cpumask_t thread_sibling; | ||
42 | cpumask_t core_sibling; | ||
43 | cpumask_t llc_sibling; | ||
44 | }; | ||
45 | |||
46 | #ifdef CONFIG_GENERIC_ARCH_TOPOLOGY | ||
47 | extern struct cpu_topology cpu_topology[NR_CPUS]; | ||
48 | |||
49 | #define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id) | ||
50 | #define topology_core_id(cpu) (cpu_topology[cpu].core_id) | ||
51 | #define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) | ||
52 | #define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) | ||
53 | #define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) | ||
54 | void init_cpu_topology(void); | ||
55 | void store_cpu_topology(unsigned int cpuid); | ||
56 | const struct cpumask *cpu_coregroup_mask(int cpu); | ||
57 | void update_siblings_masks(unsigned int cpu); | ||
58 | void remove_cpu_topology(unsigned int cpuid); | ||
59 | void reset_cpu_topology(void); | ||
60 | #endif | ||
61 | |||
36 | #endif /* _LINUX_ARCH_TOPOLOGY_H_ */ | 62 | #endif /* _LINUX_ARCH_TOPOLOGY_H_ */ |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index bb9a0db89f1a..12ae4b87494e 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
@@ -256,7 +256,10 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) | |||
256 | {return 0;} | 256 | {return 0;} |
257 | #endif | 257 | #endif |
258 | 258 | ||
259 | #define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \ | 259 | #define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \ |
260 | idx, \ | ||
261 | state, \ | ||
262 | is_retention) \ | ||
260 | ({ \ | 263 | ({ \ |
261 | int __ret = 0; \ | 264 | int __ret = 0; \ |
262 | \ | 265 | \ |
@@ -268,7 +271,7 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) | |||
268 | if (!is_retention) \ | 271 | if (!is_retention) \ |
269 | __ret = cpu_pm_enter(); \ | 272 | __ret = cpu_pm_enter(); \ |
270 | if (!__ret) { \ | 273 | if (!__ret) { \ |
271 | __ret = low_level_idle_enter(idx); \ | 274 | __ret = low_level_idle_enter(state); \ |
272 | if (!is_retention) \ | 275 | if (!is_retention) \ |
273 | cpu_pm_exit(); \ | 276 | cpu_pm_exit(); \ |
274 | } \ | 277 | } \ |
@@ -277,9 +280,15 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) | |||
277 | }) | 280 | }) |
278 | 281 | ||
279 | #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ | 282 | #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ |
280 | __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0) | 283 | __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0) |
281 | 284 | ||
282 | #define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \ | 285 | #define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \ |
283 | __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1) | 286 | __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1) |
287 | |||
288 | #define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state) \ | ||
289 | __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0) | ||
290 | |||
291 | #define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \ | ||
292 | __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1) | ||
284 | 293 | ||
285 | #endif /* _LINUX_CPUIDLE_H */ | 294 | #endif /* _LINUX_CPUIDLE_H */ |
diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h index 280c61ecbf20..635a95caf29f 100644 --- a/include/linux/error-injection.h +++ b/include/linux/error-injection.h | |||
@@ -2,16 +2,16 @@ | |||
2 | #ifndef _LINUX_ERROR_INJECTION_H | 2 | #ifndef _LINUX_ERROR_INJECTION_H |
3 | #define _LINUX_ERROR_INJECTION_H | 3 | #define _LINUX_ERROR_INJECTION_H |
4 | 4 | ||
5 | #ifdef CONFIG_FUNCTION_ERROR_INJECTION | 5 | #include <linux/compiler.h> |
6 | #include <asm-generic/error-injection.h> | ||
6 | 7 | ||
7 | #include <asm/error-injection.h> | 8 | #ifdef CONFIG_FUNCTION_ERROR_INJECTION |
8 | 9 | ||
9 | extern bool within_error_injection_list(unsigned long addr); | 10 | extern bool within_error_injection_list(unsigned long addr); |
10 | extern int get_injectable_error_type(unsigned long addr); | 11 | extern int get_injectable_error_type(unsigned long addr); |
11 | 12 | ||
12 | #else /* !CONFIG_FUNCTION_ERROR_INJECTION */ | 13 | #else /* !CONFIG_FUNCTION_ERROR_INJECTION */ |
13 | 14 | ||
14 | #include <asm-generic/error-injection.h> | ||
15 | static inline bool within_error_injection_list(unsigned long addr) | 15 | static inline bool within_error_injection_list(unsigned long addr) |
16 | { | 16 | { |
17 | return false; | 17 | return false; |
diff --git a/include/linux/psci.h b/include/linux/psci.h index a8a15613c157..e2bacc6fd2f2 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h | |||
@@ -15,8 +15,8 @@ | |||
15 | 15 | ||
16 | bool psci_tos_resident_on(int cpu); | 16 | bool psci_tos_resident_on(int cpu); |
17 | 17 | ||
18 | int psci_cpu_init_idle(unsigned int cpu); | 18 | int psci_cpu_suspend_enter(u32 state); |
19 | int psci_cpu_suspend_enter(unsigned long index); | 19 | bool psci_power_state_is_valid(u32 state); |
20 | 20 | ||
21 | enum psci_conduit { | 21 | enum psci_conduit { |
22 | PSCI_CONDUIT_NONE, | 22 | PSCI_CONDUIT_NONE, |
diff --git a/include/linux/random.h b/include/linux/random.h index 1f7dced2bba6..f189c927fdea 100644 --- a/include/linux/random.h +++ b/include/linux/random.h | |||
@@ -19,6 +19,7 @@ struct random_ready_callback { | |||
19 | }; | 19 | }; |
20 | 20 | ||
21 | extern void add_device_randomness(const void *, unsigned int); | 21 | extern void add_device_randomness(const void *, unsigned int); |
22 | extern void add_bootloader_randomness(const void *, unsigned int); | ||
22 | 23 | ||
23 | #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) | 24 | #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) |
24 | static inline void add_latent_entropy(void) | 25 | static inline void add_latent_entropy(void) |
diff --git a/include/linux/topology.h b/include/linux/topology.h index 47a3e3c08036..2a19d196af28 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -27,6 +27,7 @@ | |||
27 | #ifndef _LINUX_TOPOLOGY_H | 27 | #ifndef _LINUX_TOPOLOGY_H |
28 | #define _LINUX_TOPOLOGY_H | 28 | #define _LINUX_TOPOLOGY_H |
29 | 29 | ||
30 | #include <linux/arch_topology.h> | ||
30 | #include <linux/cpumask.h> | 31 | #include <linux/cpumask.h> |
31 | #include <linux/bitops.h> | 32 | #include <linux/bitops.h> |
32 | #include <linux/mmzone.h> | 33 | #include <linux/mmzone.h> |
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 094bb03b9cc2..2e927b3e9d6c 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h | |||
@@ -229,4 +229,9 @@ struct prctl_mm_map { | |||
229 | # define PR_PAC_APDBKEY (1UL << 3) | 229 | # define PR_PAC_APDBKEY (1UL << 3) |
230 | # define PR_PAC_APGAKEY (1UL << 4) | 230 | # define PR_PAC_APGAKEY (1UL << 4) |
231 | 231 | ||
232 | /* Tagged user address controls for arm64 */ | ||
233 | #define PR_SET_TAGGED_ADDR_CTRL 55 | ||
234 | #define PR_GET_TAGGED_ADDR_CTRL 56 | ||
235 | # define PR_TAGGED_ADDR_ENABLE (1UL << 0) | ||
236 | |||
232 | #endif /* _LINUX_PRCTL_H */ | 237 | #endif /* _LINUX_PRCTL_H */ |
diff --git a/init/Kconfig b/init/Kconfig index bd7d650d4a99..d96127ebc44e 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -30,6 +30,9 @@ config CC_CAN_LINK | |||
30 | config CC_HAS_ASM_GOTO | 30 | config CC_HAS_ASM_GOTO |
31 | def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC)) | 31 | def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC)) |
32 | 32 | ||
33 | config TOOLS_SUPPORT_RELR | ||
34 | def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh) | ||
35 | |||
33 | config CC_HAS_WARN_MAYBE_UNINITIALIZED | 36 | config CC_HAS_WARN_MAYBE_UNINITIALIZED |
34 | def_bool $(cc-option,-Wmaybe-uninitialized) | 37 | def_bool $(cc-option,-Wmaybe-uninitialized) |
35 | help | 38 | help |
diff --git a/kernel/sys.c b/kernel/sys.c index 2969304c29fe..ec48396b4943 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -124,6 +124,12 @@ | |||
124 | #ifndef PAC_RESET_KEYS | 124 | #ifndef PAC_RESET_KEYS |
125 | # define PAC_RESET_KEYS(a, b) (-EINVAL) | 125 | # define PAC_RESET_KEYS(a, b) (-EINVAL) |
126 | #endif | 126 | #endif |
127 | #ifndef SET_TAGGED_ADDR_CTRL | ||
128 | # define SET_TAGGED_ADDR_CTRL(a) (-EINVAL) | ||
129 | #endif | ||
130 | #ifndef GET_TAGGED_ADDR_CTRL | ||
131 | # define GET_TAGGED_ADDR_CTRL() (-EINVAL) | ||
132 | #endif | ||
127 | 133 | ||
128 | /* | 134 | /* |
129 | * this is where the system-wide overflow UID and GID are defined, for | 135 | * this is where the system-wide overflow UID and GID are defined, for |
@@ -2492,6 +2498,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | |||
2492 | return -EINVAL; | 2498 | return -EINVAL; |
2493 | error = PAC_RESET_KEYS(me, arg2); | 2499 | error = PAC_RESET_KEYS(me, arg2); |
2494 | break; | 2500 | break; |
2501 | case PR_SET_TAGGED_ADDR_CTRL: | ||
2502 | if (arg3 || arg4 || arg5) | ||
2503 | return -EINVAL; | ||
2504 | error = SET_TAGGED_ADDR_CTRL(arg2); | ||
2505 | break; | ||
2506 | case PR_GET_TAGGED_ADDR_CTRL: | ||
2507 | if (arg2 || arg3 || arg4 || arg5) | ||
2508 | return -EINVAL; | ||
2509 | error = GET_TAGGED_ADDR_CTRL(); | ||
2510 | break; | ||
2495 | default: | 2511 | default: |
2496 | error = -EINVAL; | 2512 | error = -EINVAL; |
2497 | break; | 2513 | break; |
diff --git a/scripts/tools-support-relr.sh b/scripts/tools-support-relr.sh new file mode 100755 index 000000000000..97a2c844a95e --- /dev/null +++ b/scripts/tools-support-relr.sh | |||
@@ -0,0 +1,16 @@ | |||
1 | #!/bin/sh -eu | ||
2 | # SPDX-License-Identifier: GPL-2.0 | ||
3 | |||
4 | tmp_file=$(mktemp) | ||
5 | trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT | ||
6 | |||
7 | cat << "END" | "$CC" -c -x c - -o $tmp_file.o >/dev/null 2>&1 | ||
8 | void *p = &p; | ||
9 | END | ||
10 | "$LD" $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file | ||
11 | |||
12 | # Despite printing an error message, GNU nm still exits with exit code 0 if it | ||
13 | # sees a relr section. So we need to check that nothing is printed to stderr. | ||
14 | test -z "$("$NM" $tmp_file 2>&1 >/dev/null)" | ||
15 | |||
16 | "$OBJCOPY" -O binary $tmp_file $tmp_file.bin | ||
diff --git a/tools/testing/selftests/arm64/.gitignore b/tools/testing/selftests/arm64/.gitignore new file mode 100644 index 000000000000..e8fae8d61ed6 --- /dev/null +++ b/tools/testing/selftests/arm64/.gitignore | |||
@@ -0,0 +1 @@ | |||
tags_test | |||
diff --git a/tools/testing/selftests/arm64/Makefile b/tools/testing/selftests/arm64/Makefile new file mode 100644 index 000000000000..a61b2e743e99 --- /dev/null +++ b/tools/testing/selftests/arm64/Makefile | |||
@@ -0,0 +1,11 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | # ARCH can be overridden by the user for cross compiling | ||
4 | ARCH ?= $(shell uname -m 2>/dev/null || echo not) | ||
5 | |||
6 | ifneq (,$(filter $(ARCH),aarch64 arm64)) | ||
7 | TEST_GEN_PROGS := tags_test | ||
8 | TEST_PROGS := run_tags_test.sh | ||
9 | endif | ||
10 | |||
11 | include ../lib.mk | ||
diff --git a/tools/testing/selftests/arm64/run_tags_test.sh b/tools/testing/selftests/arm64/run_tags_test.sh new file mode 100755 index 000000000000..745f11379930 --- /dev/null +++ b/tools/testing/selftests/arm64/run_tags_test.sh | |||
@@ -0,0 +1,12 @@ | |||
1 | #!/bin/sh | ||
2 | # SPDX-License-Identifier: GPL-2.0 | ||
3 | |||
4 | echo "--------------------" | ||
5 | echo "running tags test" | ||
6 | echo "--------------------" | ||
7 | ./tags_test | ||
8 | if [ $? -ne 0 ]; then | ||
9 | echo "[FAIL]" | ||
10 | else | ||
11 | echo "[PASS]" | ||
12 | fi | ||
diff --git a/tools/testing/selftests/arm64/tags_test.c b/tools/testing/selftests/arm64/tags_test.c new file mode 100644 index 000000000000..5701163460ef --- /dev/null +++ b/tools/testing/selftests/arm64/tags_test.c | |||
@@ -0,0 +1,31 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | #include <stdio.h> | ||
4 | #include <stdlib.h> | ||
5 | #include <unistd.h> | ||
6 | #include <stdint.h> | ||
7 | #include <sys/prctl.h> | ||
8 | #include <sys/utsname.h> | ||
9 | |||
10 | #define SHIFT_TAG(tag) ((uint64_t)(tag) << 56) | ||
11 | #define SET_TAG(ptr, tag) (((uint64_t)(ptr) & ~SHIFT_TAG(0xff)) | \ | ||
12 | SHIFT_TAG(tag)) | ||
13 | |||
14 | int main(void) | ||
15 | { | ||
16 | static int tbi_enabled = 0; | ||
17 | unsigned long tag = 0; | ||
18 | struct utsname *ptr; | ||
19 | int err; | ||
20 | |||
21 | if (prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) == 0) | ||
22 | tbi_enabled = 1; | ||
23 | ptr = (struct utsname *)malloc(sizeof(*ptr)); | ||
24 | if (tbi_enabled) | ||
25 | tag = 0x42; | ||
26 | ptr = (struct utsname *)SET_TAG(ptr, tag); | ||
27 | err = uname(ptr); | ||
28 | free(ptr); | ||
29 | |||
30 | return err; | ||
31 | } | ||