diff options
196 files changed, 2886 insertions, 641 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index d6d862db3b5d..bfd29bc8d37a 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu | |||
@@ -375,3 +375,19 @@ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> | |||
375 | Description: information about CPUs heterogeneity. | 375 | Description: information about CPUs heterogeneity. |
376 | 376 | ||
377 | cpu_capacity: capacity of cpu#. | 377 | cpu_capacity: capacity of cpu#. |
378 | |||
379 | What: /sys/devices/system/cpu/vulnerabilities | ||
380 | /sys/devices/system/cpu/vulnerabilities/meltdown | ||
381 | /sys/devices/system/cpu/vulnerabilities/spectre_v1 | ||
382 | /sys/devices/system/cpu/vulnerabilities/spectre_v2 | ||
383 | Date: January 2018 | ||
384 | Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> | ||
385 | Description: Information about CPU vulnerabilities | ||
386 | |||
387 | The files are named after the code names of CPU | ||
388 | vulnerabilities. The output of those files reflects the | ||
389 | state of the CPUs in the system. Possible output values: | ||
390 | |||
391 | "Not affected" CPU is not affected by the vulnerability | ||
392 | "Vulnerable" CPU is affected and no mitigation in effect | ||
393 | "Mitigation: $M" CPU is affected and mitigation $M is in effect | ||
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index af7104aaffd9..46b26bfee27b 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt | |||
@@ -713,9 +713,6 @@ | |||
713 | It will be ignored when crashkernel=X,high is not used | 713 | It will be ignored when crashkernel=X,high is not used |
714 | or memory reserved is below 4G. | 714 | or memory reserved is below 4G. |
715 | 715 | ||
716 | crossrelease_fullstack | ||
717 | [KNL] Allow to record full stack trace in cross-release | ||
718 | |||
719 | cryptomgr.notests | 716 | cryptomgr.notests |
720 | [KNL] Disable crypto self-tests | 717 | [KNL] Disable crypto self-tests |
721 | 718 | ||
@@ -2626,6 +2623,11 @@ | |||
2626 | nosmt [KNL,S390] Disable symmetric multithreading (SMT). | 2623 | nosmt [KNL,S390] Disable symmetric multithreading (SMT). |
2627 | Equivalent to smt=1. | 2624 | Equivalent to smt=1. |
2628 | 2625 | ||
2626 | nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2 | ||
2627 | (indirect branch prediction) vulnerability. System may | ||
2628 | allow data leaks with this option, which is equivalent | ||
2629 | to spectre_v2=off. | ||
2630 | |||
2629 | noxsave [BUGS=X86] Disables x86 extended register state save | 2631 | noxsave [BUGS=X86] Disables x86 extended register state save |
2630 | and restore using xsave. The kernel will fallback to | 2632 | and restore using xsave. The kernel will fallback to |
2631 | enabling legacy floating-point and sse state. | 2633 | enabling legacy floating-point and sse state. |
@@ -2712,8 +2714,6 @@ | |||
2712 | steal time is computed, but won't influence scheduler | 2714 | steal time is computed, but won't influence scheduler |
2713 | behaviour | 2715 | behaviour |
2714 | 2716 | ||
2715 | nopti [X86-64] Disable kernel page table isolation | ||
2716 | |||
2717 | nolapic [X86-32,APIC] Do not enable or use the local APIC. | 2717 | nolapic [X86-32,APIC] Do not enable or use the local APIC. |
2718 | 2718 | ||
2719 | nolapic_timer [X86-32,APIC] Do not use the local APIC timer. | 2719 | nolapic_timer [X86-32,APIC] Do not use the local APIC timer. |
@@ -3100,6 +3100,12 @@ | |||
3100 | pcie_scan_all Scan all possible PCIe devices. Otherwise we | 3100 | pcie_scan_all Scan all possible PCIe devices. Otherwise we |
3101 | only look for one device below a PCIe downstream | 3101 | only look for one device below a PCIe downstream |
3102 | port. | 3102 | port. |
3103 | big_root_window Try to add a big 64bit memory window to the PCIe | ||
3104 | root complex on AMD CPUs. Some GFX hardware | ||
3105 | can resize a BAR to allow access to all VRAM. | ||
3106 | Adding the window is slightly risky (it may | ||
3107 | conflict with unreported devices), so this | ||
3108 | taints the kernel. | ||
3103 | 3109 | ||
3104 | pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power | 3110 | pcie_aspm= [PCIE] Forcibly enable or disable PCIe Active State Power |
3105 | Management. | 3111 | Management. |
@@ -3288,11 +3294,20 @@ | |||
3288 | pt. [PARIDE] | 3294 | pt. [PARIDE] |
3289 | See Documentation/blockdev/paride.txt. | 3295 | See Documentation/blockdev/paride.txt. |
3290 | 3296 | ||
3291 | pti= [X86_64] | 3297 | pti= [X86_64] Control Page Table Isolation of user and |
3292 | Control user/kernel address space isolation: | 3298 | kernel address spaces. Disabling this feature |
3293 | on - enable | 3299 | removes hardening, but improves performance of |
3294 | off - disable | 3300 | system calls and interrupts. |
3295 | auto - default setting | 3301 | |
3302 | on - unconditionally enable | ||
3303 | off - unconditionally disable | ||
3304 | auto - kernel detects whether your CPU model is | ||
3305 | vulnerable to issues that PTI mitigates | ||
3306 | |||
3307 | Not specifying this option is equivalent to pti=auto. | ||
3308 | |||
3309 | nopti [X86_64] | ||
3310 | Equivalent to pti=off | ||
3296 | 3311 | ||
3297 | pty.legacy_count= | 3312 | pty.legacy_count= |
3298 | [KNL] Number of legacy pty's. Overwrites compiled-in | 3313 | [KNL] Number of legacy pty's. Overwrites compiled-in |
@@ -3943,6 +3958,29 @@ | |||
3943 | sonypi.*= [HW] Sony Programmable I/O Control Device driver | 3958 | sonypi.*= [HW] Sony Programmable I/O Control Device driver |
3944 | See Documentation/laptops/sonypi.txt | 3959 | See Documentation/laptops/sonypi.txt |
3945 | 3960 | ||
3961 | spectre_v2= [X86] Control mitigation of Spectre variant 2 | ||
3962 | (indirect branch speculation) vulnerability. | ||
3963 | |||
3964 | on - unconditionally enable | ||
3965 | off - unconditionally disable | ||
3966 | auto - kernel detects whether your CPU model is | ||
3967 | vulnerable | ||
3968 | |||
3969 | Selecting 'on' will, and 'auto' may, choose a | ||
3970 | mitigation method at run time according to the | ||
3971 | CPU, the available microcode, the setting of the | ||
3972 | CONFIG_RETPOLINE configuration option, and the | ||
3973 | compiler with which the kernel was built. | ||
3974 | |||
3975 | Specific mitigations can also be selected manually: | ||
3976 | |||
3977 | retpoline - replace indirect branches | ||
3978 | retpoline,generic - google's original retpoline | ||
3979 | retpoline,amd - AMD-specific minimal thunk | ||
3980 | |||
3981 | Not specifying this option is equivalent to | ||
3982 | spectre_v2=auto. | ||
3983 | |||
3946 | spia_io_base= [HW,MTD] | 3984 | spia_io_base= [HW,MTD] |
3947 | spia_fio_base= | 3985 | spia_fio_base= |
3948 | spia_pedr= | 3986 | spia_pedr= |
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt index c0727dc36271..f2f3f8592a6f 100644 --- a/Documentation/filesystems/nilfs2.txt +++ b/Documentation/filesystems/nilfs2.txt | |||
@@ -25,8 +25,8 @@ available from the following download page. At least "mkfs.nilfs2", | |||
25 | cleaner or garbage collector) are required. Details on the tools are | 25 | cleaner or garbage collector) are required. Details on the tools are |
26 | described in the man pages included in the package. | 26 | described in the man pages included in the package. |
27 | 27 | ||
28 | Project web page: http://nilfs.sourceforge.net/ | 28 | Project web page: https://nilfs.sourceforge.io/ |
29 | Download page: http://nilfs.sourceforge.net/en/download.html | 29 | Download page: https://nilfs.sourceforge.io/en/download.html |
30 | List info: http://vger.kernel.org/vger-lists.html#linux-nilfs | 30 | List info: http://vger.kernel.org/vger-lists.html#linux-nilfs |
31 | 31 | ||
32 | Caveats | 32 | Caveats |
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt index 262722d8867b..c4a293a03c33 100644 --- a/Documentation/kbuild/kconfig-language.txt +++ b/Documentation/kbuild/kconfig-language.txt | |||
@@ -200,10 +200,14 @@ module state. Dependency expressions have the following syntax: | |||
200 | <expr> ::= <symbol> (1) | 200 | <expr> ::= <symbol> (1) |
201 | <symbol> '=' <symbol> (2) | 201 | <symbol> '=' <symbol> (2) |
202 | <symbol> '!=' <symbol> (3) | 202 | <symbol> '!=' <symbol> (3) |
203 | '(' <expr> ')' (4) | 203 | <symbol1> '<' <symbol2> (4) |
204 | '!' <expr> (5) | 204 | <symbol1> '>' <symbol2> (4) |
205 | <expr> '&&' <expr> (6) | 205 | <symbol1> '<=' <symbol2> (4) |
206 | <expr> '||' <expr> (7) | 206 | <symbol1> '>=' <symbol2> (4) |
207 | '(' <expr> ')' (5) | ||
208 | '!' <expr> (6) | ||
209 | <expr> '&&' <expr> (7) | ||
210 | <expr> '||' <expr> (8) | ||
207 | 211 | ||
208 | Expressions are listed in decreasing order of precedence. | 212 | Expressions are listed in decreasing order of precedence. |
209 | 213 | ||
@@ -214,10 +218,13 @@ Expressions are listed in decreasing order of precedence. | |||
214 | otherwise 'n'. | 218 | otherwise 'n'. |
215 | (3) If the values of both symbols are equal, it returns 'n', | 219 | (3) If the values of both symbols are equal, it returns 'n', |
216 | otherwise 'y'. | 220 | otherwise 'y'. |
217 | (4) Returns the value of the expression. Used to override precedence. | 221 | (4) If value of <symbol1> is respectively lower, greater, lower-or-equal, |
218 | (5) Returns the result of (2-/expr/). | 222 | or greater-or-equal than value of <symbol2>, it returns 'y', |
219 | (6) Returns the result of min(/expr/, /expr/). | 223 | otherwise 'n'. |
220 | (7) Returns the result of max(/expr/, /expr/). | 224 | (5) Returns the value of the expression. Used to override precedence. |
225 | (6) Returns the result of (2-/expr/). | ||
226 | (7) Returns the result of min(/expr/, /expr/). | ||
227 | (8) Returns the result of max(/expr/, /expr/). | ||
221 | 228 | ||
222 | An expression can have a value of 'n', 'm' or 'y' (or 0, 1, 2 | 229 | An expression can have a value of 'n', 'm' or 'y' (or 0, 1, 2 |
223 | respectively for calculations). A menu entry becomes visible when its | 230 | respectively for calculations). A menu entry becomes visible when its |
diff --git a/Documentation/usb/gadget-testing.txt b/Documentation/usb/gadget-testing.txt index 441a4b9b666f..5908a21fddb6 100644 --- a/Documentation/usb/gadget-testing.txt +++ b/Documentation/usb/gadget-testing.txt | |||
@@ -693,7 +693,7 @@ such specification consists of a number of lines with an inverval value | |||
693 | in each line. The rules stated above are best illustrated with an example: | 693 | in each line. The rules stated above are best illustrated with an example: |
694 | 694 | ||
695 | # mkdir functions/uvc.usb0/control/header/h | 695 | # mkdir functions/uvc.usb0/control/header/h |
696 | # cd functions/uvc.usb0/control/header/h | 696 | # cd functions/uvc.usb0/control/ |
697 | # ln -s header/h class/fs | 697 | # ln -s header/h class/fs |
698 | # ln -s header/h class/ss | 698 | # ln -s header/h class/ss |
699 | # mkdir -p functions/uvc.usb0/streaming/uncompressed/u/360p | 699 | # mkdir -p functions/uvc.usb0/streaming/uncompressed/u/360p |
diff --git a/Documentation/x86/pti.txt b/Documentation/x86/pti.txt new file mode 100644 index 000000000000..d11eff61fc9a --- /dev/null +++ b/Documentation/x86/pti.txt | |||
@@ -0,0 +1,186 @@ | |||
1 | Overview | ||
2 | ======== | ||
3 | |||
4 | Page Table Isolation (pti, previously known as KAISER[1]) is a | ||
5 | countermeasure against attacks on the shared user/kernel address | ||
6 | space such as the "Meltdown" approach[2]. | ||
7 | |||
8 | To mitigate this class of attacks, we create an independent set of | ||
9 | page tables for use only when running userspace applications. When | ||
10 | the kernel is entered via syscalls, interrupts or exceptions, the | ||
11 | page tables are switched to the full "kernel" copy. When the system | ||
12 | switches back to user mode, the user copy is used again. | ||
13 | |||
14 | The userspace page tables contain only a minimal amount of kernel | ||
15 | data: only what is needed to enter/exit the kernel such as the | ||
16 | entry/exit functions themselves and the interrupt descriptor table | ||
17 | (IDT). There are a few strictly unnecessary things that get mapped | ||
18 | such as the first C function when entering an interrupt (see | ||
19 | comments in pti.c). | ||
20 | |||
21 | This approach helps to ensure that side-channel attacks leveraging | ||
22 | the paging structures do not function when PTI is enabled. It can be | ||
23 | enabled by setting CONFIG_PAGE_TABLE_ISOLATION=y at compile time. | ||
24 | Once enabled at compile-time, it can be disabled at boot with the | ||
25 | 'nopti' or 'pti=' kernel parameters (see kernel-parameters.txt). | ||
26 | |||
27 | Page Table Management | ||
28 | ===================== | ||
29 | |||
30 | When PTI is enabled, the kernel manages two sets of page tables. | ||
31 | The first set is very similar to the single set which is present in | ||
32 | kernels without PTI. This includes a complete mapping of userspace | ||
33 | that the kernel can use for things like copy_to_user(). | ||
34 | |||
35 | Although _complete_, the user portion of the kernel page tables is | ||
36 | crippled by setting the NX bit in the top level. This ensures | ||
37 | that any missed kernel->user CR3 switch will immediately crash | ||
38 | userspace upon executing its first instruction. | ||
39 | |||
40 | The userspace page tables map only the kernel data needed to enter | ||
41 | and exit the kernel. This data is entirely contained in the 'struct | ||
42 | cpu_entry_area' structure which is placed in the fixmap which gives | ||
43 | each CPU's copy of the area a compile-time-fixed virtual address. | ||
44 | |||
45 | For new userspace mappings, the kernel makes the entries in its | ||
46 | page tables like normal. The only difference is when the kernel | ||
47 | makes entries in the top (PGD) level. In addition to setting the | ||
48 | entry in the main kernel PGD, a copy of the entry is made in the | ||
49 | userspace page tables' PGD. | ||
50 | |||
51 | This sharing at the PGD level also inherently shares all the lower | ||
52 | layers of the page tables. This leaves a single, shared set of | ||
53 | userspace page tables to manage. One PTE to lock, one set of | ||
54 | accessed bits, dirty bits, etc... | ||
55 | |||
56 | Overhead | ||
57 | ======== | ||
58 | |||
59 | Protection against side-channel attacks is important. But, | ||
60 | this protection comes at a cost: | ||
61 | |||
62 | 1. Increased Memory Use | ||
63 | a. Each process now needs an order-1 PGD instead of order-0. | ||
64 | (Consumes an additional 4k per process). | ||
65 | b. The 'cpu_entry_area' structure must be 2MB in size and 2MB | ||
66 | aligned so that it can be mapped by setting a single PMD | ||
67 | entry. This consumes nearly 2MB of RAM once the kernel | ||
68 | is decompressed, but no space in the kernel image itself. | ||
69 | |||
70 | 2. Runtime Cost | ||
71 | a. CR3 manipulation to switch between the page table copies | ||
72 | must be done at interrupt, syscall, and exception entry | ||
73 | and exit (it can be skipped when the kernel is interrupted, | ||
74 | though.) Moves to CR3 are on the order of a hundred | ||
75 | cycles, and are required at every entry and exit. | ||
76 | b. A "trampoline" must be used for SYSCALL entry. This | ||
77 | trampoline depends on a smaller set of resources than the | ||
78 | non-PTI SYSCALL entry code, so requires mapping fewer | ||
79 | things into the userspace page tables. The downside is | ||
80 | that stacks must be switched at entry time. | ||
81 | d. Global pages are disabled for all kernel structures not | ||
82 | mapped into both kernel and userspace page tables. This | ||
83 | feature of the MMU allows different processes to share TLB | ||
84 | entries mapping the kernel. Losing the feature means more | ||
85 | TLB misses after a context switch. The actual loss of | ||
86 | performance is very small, however, never exceeding 1%. | ||
87 | d. Process Context IDentifiers (PCID) is a CPU feature that | ||
88 | allows us to skip flushing the entire TLB when switching page | ||
89 | tables by setting a special bit in CR3 when the page tables | ||
90 | are changed. This makes switching the page tables (at context | ||
91 | switch, or kernel entry/exit) cheaper. But, on systems with | ||
92 | PCID support, the context switch code must flush both the user | ||
93 | and kernel entries out of the TLB. The user PCID TLB flush is | ||
94 | deferred until the exit to userspace, minimizing the cost. | ||
95 | See intel.com/sdm for the gory PCID/INVPCID details. | ||
96 | e. The userspace page tables must be populated for each new | ||
97 | process. Even without PTI, the shared kernel mappings | ||
98 | are created by copying top-level (PGD) entries into each | ||
99 | new process. But, with PTI, there are now *two* kernel | ||
100 | mappings: one in the kernel page tables that maps everything | ||
101 | and one for the entry/exit structures. At fork(), we need to | ||
102 | copy both. | ||
103 | f. In addition to the fork()-time copying, there must also | ||
104 | be an update to the userspace PGD any time a set_pgd() is done | ||
105 | on a PGD used to map userspace. This ensures that the kernel | ||
106 | and userspace copies always map the same userspace | ||
107 | memory. | ||
108 | g. On systems without PCID support, each CR3 write flushes | ||
109 | the entire TLB. That means that each syscall, interrupt | ||
110 | or exception flushes the TLB. | ||
111 | h. INVPCID is a TLB-flushing instruction which allows flushing | ||
112 | of TLB entries for non-current PCIDs. Some systems support | ||
113 | PCIDs, but do not support INVPCID. On these systems, addresses | ||
114 | can only be flushed from the TLB for the current PCID. When | ||
115 | flushing a kernel address, we need to flush all PCIDs, so a | ||
116 | single kernel address flush will require a TLB-flushing CR3 | ||
117 | write upon the next use of every PCID. | ||
118 | |||
119 | Possible Future Work | ||
120 | ==================== | ||
121 | 1. We can be more careful about not actually writing to CR3 | ||
122 | unless its value is actually changed. | ||
123 | 2. Allow PTI to be enabled/disabled at runtime in addition to the | ||
124 | boot-time switching. | ||
125 | |||
126 | Testing | ||
127 | ======== | ||
128 | |||
129 | To test stability of PTI, the following test procedure is recommended, | ||
130 | ideally doing all of these in parallel: | ||
131 | |||
132 | 1. Set CONFIG_DEBUG_ENTRY=y | ||
133 | 2. Run several copies of all of the tools/testing/selftests/x86/ tests | ||
134 | (excluding MPX and protection_keys) in a loop on multiple CPUs for | ||
135 | several minutes. These tests frequently uncover corner cases in the | ||
136 | kernel entry code. In general, old kernels might cause these tests | ||
137 | themselves to crash, but they should never crash the kernel. | ||
138 | 3. Run the 'perf' tool in a mode (top or record) that generates many | ||
139 | frequent performance monitoring non-maskable interrupts (see "NMI" | ||
140 | in /proc/interrupts). This exercises the NMI entry/exit code which | ||
141 | is known to trigger bugs in code paths that did not expect to be | ||
142 | interrupted, including nested NMIs. Using "-c" boosts the rate of | ||
143 | NMIs, and using two -c with separate counters encourages nested NMIs | ||
144 | and less deterministic behavior. | ||
145 | |||
146 | while true; do perf record -c 10000 -e instructions,cycles -a sleep 10; done | ||
147 | |||
148 | 4. Launch a KVM virtual machine. | ||
149 | 5. Run 32-bit binaries on systems supporting the SYSCALL instruction. | ||
150 | This has been a lightly-tested code path and needs extra scrutiny. | ||
151 | |||
152 | Debugging | ||
153 | ========= | ||
154 | |||
155 | Bugs in PTI cause a few different signatures of crashes | ||
156 | that are worth noting here. | ||
157 | |||
158 | * Failures of the selftests/x86 code. Usually a bug in one of the | ||
159 | more obscure corners of entry_64.S | ||
160 | * Crashes in early boot, especially around CPU bringup. Bugs | ||
161 | in the trampoline code or mappings cause these. | ||
162 | * Crashes at the first interrupt. Caused by bugs in entry_64.S, | ||
163 | like screwing up a page table switch. Also caused by | ||
164 | incorrectly mapping the IRQ handler entry code. | ||
165 | * Crashes at the first NMI. The NMI code is separate from main | ||
166 | interrupt handlers and can have bugs that do not affect | ||
167 | normal interrupts. Also caused by incorrectly mapping NMI | ||
168 | code. NMIs that interrupt the entry code must be very | ||
169 | careful and can be the cause of crashes that show up when | ||
170 | running perf. | ||
171 | * Kernel crashes at the first exit to userspace. entry_64.S | ||
172 | bugs, or failing to map some of the exit code. | ||
173 | * Crashes at first interrupt that interrupts userspace. The paths | ||
174 | in entry_64.S that return to userspace are sometimes separate | ||
175 | from the ones that return to the kernel. | ||
176 | * Double faults: overflowing the kernel stack because of page | ||
177 | faults upon page faults. Caused by touching non-pti-mapped | ||
178 | data in the entry code, or forgetting to switch to kernel | ||
179 | CR3 before calling into C functions which are not pti-mapped. | ||
180 | * Userspace segfaults early in boot, sometimes manifesting | ||
181 | as mount(8) failing to mount the rootfs. These have | ||
182 | tended to be TLB invalidation issues. Usually invalidating | ||
183 | the wrong PCID, or otherwise missing an invalidation. | ||
184 | |||
185 | 1. https://gruss.cc/files/kaiser.pdf | ||
186 | 2. https://meltdownattack.com/meltdown.pdf | ||
diff --git a/MAINTAINERS b/MAINTAINERS index c7f40d3bb12b..3a28cee4f0ce 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -9660,8 +9660,8 @@ F: include/uapi/linux/sunrpc/ | |||
9660 | NILFS2 FILESYSTEM | 9660 | NILFS2 FILESYSTEM |
9661 | M: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> | 9661 | M: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> |
9662 | L: linux-nilfs@vger.kernel.org | 9662 | L: linux-nilfs@vger.kernel.org |
9663 | W: http://nilfs.sourceforge.net/ | 9663 | W: https://nilfs.sourceforge.io/ |
9664 | W: http://nilfs.osdn.jp/ | 9664 | W: https://nilfs.osdn.jp/ |
9665 | T: git git://github.com/konis/nilfs2.git | 9665 | T: git git://github.com/konis/nilfs2.git |
9666 | S: Supported | 9666 | S: Supported |
9667 | F: Documentation/filesystems/nilfs2.txt | 9667 | F: Documentation/filesystems/nilfs2.txt |
@@ -2,7 +2,7 @@ | |||
2 | VERSION = 4 | 2 | VERSION = 4 |
3 | PATCHLEVEL = 15 | 3 | PATCHLEVEL = 15 |
4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
5 | EXTRAVERSION = -rc7 | 5 | EXTRAVERSION = -rc8 |
6 | NAME = Fearless Coyote | 6 | NAME = Fearless Coyote |
7 | 7 | ||
8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
@@ -484,26 +484,6 @@ CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN) | |||
484 | endif | 484 | endif |
485 | KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) | 485 | KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) |
486 | KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) | 486 | KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) |
487 | KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) | ||
488 | KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable) | ||
489 | KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier) | ||
490 | KBUILD_CFLAGS += $(call cc-disable-warning, gnu) | ||
491 | KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) | ||
492 | # Quiet clang warning: comparison of unsigned expression < 0 is always false | ||
493 | KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare) | ||
494 | # CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the | ||
495 | # source of a reference will be _MergedGlobals and not on of the whitelisted names. | ||
496 | # See modpost pattern 2 | ||
497 | KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,) | ||
498 | KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior) | ||
499 | KBUILD_CFLAGS += $(call cc-option, -no-integrated-as) | ||
500 | KBUILD_AFLAGS += $(call cc-option, -no-integrated-as) | ||
501 | else | ||
502 | |||
503 | # These warnings generated too much noise in a regular build. | ||
504 | # Use make W=1 to enable them (see scripts/Makefile.extrawarn) | ||
505 | KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) | ||
506 | KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) | ||
507 | endif | 487 | endif |
508 | 488 | ||
509 | ifeq ($(config-targets),1) | 489 | ifeq ($(config-targets),1) |
@@ -716,6 +696,29 @@ ifdef CONFIG_CC_STACKPROTECTOR | |||
716 | endif | 696 | endif |
717 | KBUILD_CFLAGS += $(stackp-flag) | 697 | KBUILD_CFLAGS += $(stackp-flag) |
718 | 698 | ||
699 | ifeq ($(cc-name),clang) | ||
700 | KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) | ||
701 | KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable) | ||
702 | KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier) | ||
703 | KBUILD_CFLAGS += $(call cc-disable-warning, gnu) | ||
704 | KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) | ||
705 | # Quiet clang warning: comparison of unsigned expression < 0 is always false | ||
706 | KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare) | ||
707 | # CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the | ||
708 | # source of a reference will be _MergedGlobals and not on of the whitelisted names. | ||
709 | # See modpost pattern 2 | ||
710 | KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,) | ||
711 | KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior) | ||
712 | KBUILD_CFLAGS += $(call cc-option, -no-integrated-as) | ||
713 | KBUILD_AFLAGS += $(call cc-option, -no-integrated-as) | ||
714 | else | ||
715 | |||
716 | # These warnings generated too much noise in a regular build. | ||
717 | # Use make W=1 to enable them (see scripts/Makefile.extrawarn) | ||
718 | KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) | ||
719 | KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) | ||
720 | endif | ||
721 | |||
719 | ifdef CONFIG_FRAME_POINTER | 722 | ifdef CONFIG_FRAME_POINTER |
720 | KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls | 723 | KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls |
721 | else | 724 | else |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index c6ecb97151a2..9025699049ca 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -88,7 +88,7 @@ void vtime_flush(struct task_struct *tsk) | |||
88 | } | 88 | } |
89 | 89 | ||
90 | if (ti->softirq_time) { | 90 | if (ti->softirq_time) { |
91 | delta = cycle_to_nsec(ti->softirq_time)); | 91 | delta = cycle_to_nsec(ti->softirq_time); |
92 | account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ); | 92 | account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ); |
93 | } | 93 | } |
94 | 94 | ||
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h index a703452d67b6..555e22d5e07f 100644 --- a/arch/powerpc/include/asm/exception-64e.h +++ b/arch/powerpc/include/asm/exception-64e.h | |||
@@ -209,5 +209,11 @@ exc_##label##_book3e: | |||
209 | ori r3,r3,vector_offset@l; \ | 209 | ori r3,r3,vector_offset@l; \ |
210 | mtspr SPRN_IVOR##vector_number,r3; | 210 | mtspr SPRN_IVOR##vector_number,r3; |
211 | 211 | ||
212 | #define RFI_TO_KERNEL \ | ||
213 | rfi | ||
214 | |||
215 | #define RFI_TO_USER \ | ||
216 | rfi | ||
217 | |||
212 | #endif /* _ASM_POWERPC_EXCEPTION_64E_H */ | 218 | #endif /* _ASM_POWERPC_EXCEPTION_64E_H */ |
213 | 219 | ||
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index b27205297e1d..7197b179c1b1 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
@@ -74,6 +74,59 @@ | |||
74 | */ | 74 | */ |
75 | #define EX_R3 EX_DAR | 75 | #define EX_R3 EX_DAR |
76 | 76 | ||
77 | /* | ||
78 | * Macros for annotating the expected destination of (h)rfid | ||
79 | * | ||
80 | * The nop instructions allow us to insert one or more instructions to flush the | ||
81 | * L1-D cache when returning to userspace or a guest. | ||
82 | */ | ||
83 | #define RFI_FLUSH_SLOT \ | ||
84 | RFI_FLUSH_FIXUP_SECTION; \ | ||
85 | nop; \ | ||
86 | nop; \ | ||
87 | nop | ||
88 | |||
89 | #define RFI_TO_KERNEL \ | ||
90 | rfid | ||
91 | |||
92 | #define RFI_TO_USER \ | ||
93 | RFI_FLUSH_SLOT; \ | ||
94 | rfid; \ | ||
95 | b rfi_flush_fallback | ||
96 | |||
97 | #define RFI_TO_USER_OR_KERNEL \ | ||
98 | RFI_FLUSH_SLOT; \ | ||
99 | rfid; \ | ||
100 | b rfi_flush_fallback | ||
101 | |||
102 | #define RFI_TO_GUEST \ | ||
103 | RFI_FLUSH_SLOT; \ | ||
104 | rfid; \ | ||
105 | b rfi_flush_fallback | ||
106 | |||
107 | #define HRFI_TO_KERNEL \ | ||
108 | hrfid | ||
109 | |||
110 | #define HRFI_TO_USER \ | ||
111 | RFI_FLUSH_SLOT; \ | ||
112 | hrfid; \ | ||
113 | b hrfi_flush_fallback | ||
114 | |||
115 | #define HRFI_TO_USER_OR_KERNEL \ | ||
116 | RFI_FLUSH_SLOT; \ | ||
117 | hrfid; \ | ||
118 | b hrfi_flush_fallback | ||
119 | |||
120 | #define HRFI_TO_GUEST \ | ||
121 | RFI_FLUSH_SLOT; \ | ||
122 | hrfid; \ | ||
123 | b hrfi_flush_fallback | ||
124 | |||
125 | #define HRFI_TO_UNKNOWN \ | ||
126 | RFI_FLUSH_SLOT; \ | ||
127 | hrfid; \ | ||
128 | b hrfi_flush_fallback | ||
129 | |||
77 | #ifdef CONFIG_RELOCATABLE | 130 | #ifdef CONFIG_RELOCATABLE |
78 | #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ | 131 | #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ |
79 | mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ | 132 | mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ |
@@ -218,7 +271,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) | |||
218 | mtspr SPRN_##h##SRR0,r12; \ | 271 | mtspr SPRN_##h##SRR0,r12; \ |
219 | mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ | 272 | mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ |
220 | mtspr SPRN_##h##SRR1,r10; \ | 273 | mtspr SPRN_##h##SRR1,r10; \ |
221 | h##rfid; \ | 274 | h##RFI_TO_KERNEL; \ |
222 | b . /* prevent speculative execution */ | 275 | b . /* prevent speculative execution */ |
223 | #define EXCEPTION_PROLOG_PSERIES_1(label, h) \ | 276 | #define EXCEPTION_PROLOG_PSERIES_1(label, h) \ |
224 | __EXCEPTION_PROLOG_PSERIES_1(label, h) | 277 | __EXCEPTION_PROLOG_PSERIES_1(label, h) |
@@ -232,7 +285,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) | |||
232 | mtspr SPRN_##h##SRR0,r12; \ | 285 | mtspr SPRN_##h##SRR0,r12; \ |
233 | mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ | 286 | mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ |
234 | mtspr SPRN_##h##SRR1,r10; \ | 287 | mtspr SPRN_##h##SRR1,r10; \ |
235 | h##rfid; \ | 288 | h##RFI_TO_KERNEL; \ |
236 | b . /* prevent speculative execution */ | 289 | b . /* prevent speculative execution */ |
237 | 290 | ||
238 | #define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \ | 291 | #define EXCEPTION_PROLOG_PSERIES_1_NORI(label, h) \ |
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index 8f88f771cc55..1e82eb3caabd 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h | |||
@@ -187,7 +187,20 @@ label##3: \ | |||
187 | FTR_ENTRY_OFFSET label##1b-label##3b; \ | 187 | FTR_ENTRY_OFFSET label##1b-label##3b; \ |
188 | .popsection; | 188 | .popsection; |
189 | 189 | ||
190 | #define RFI_FLUSH_FIXUP_SECTION \ | ||
191 | 951: \ | ||
192 | .pushsection __rfi_flush_fixup,"a"; \ | ||
193 | .align 2; \ | ||
194 | 952: \ | ||
195 | FTR_ENTRY_OFFSET 951b-952b; \ | ||
196 | .popsection; | ||
197 | |||
198 | |||
190 | #ifndef __ASSEMBLY__ | 199 | #ifndef __ASSEMBLY__ |
200 | #include <linux/types.h> | ||
201 | |||
202 | extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; | ||
203 | |||
191 | void apply_feature_fixups(void); | 204 | void apply_feature_fixups(void); |
192 | void setup_feature_keys(void); | 205 | void setup_feature_keys(void); |
193 | #endif | 206 | #endif |
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index a409177be8bd..f0461618bf7b 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h | |||
@@ -241,6 +241,7 @@ | |||
241 | #define H_GET_HCA_INFO 0x1B8 | 241 | #define H_GET_HCA_INFO 0x1B8 |
242 | #define H_GET_PERF_COUNT 0x1BC | 242 | #define H_GET_PERF_COUNT 0x1BC |
243 | #define H_MANAGE_TRACE 0x1C0 | 243 | #define H_MANAGE_TRACE 0x1C0 |
244 | #define H_GET_CPU_CHARACTERISTICS 0x1C8 | ||
244 | #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 | 245 | #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4 |
245 | #define H_QUERY_INT_STATE 0x1E4 | 246 | #define H_QUERY_INT_STATE 0x1E4 |
246 | #define H_POLL_PENDING 0x1D8 | 247 | #define H_POLL_PENDING 0x1D8 |
@@ -330,6 +331,17 @@ | |||
330 | #define H_SIGNAL_SYS_RESET_ALL_OTHERS -2 | 331 | #define H_SIGNAL_SYS_RESET_ALL_OTHERS -2 |
331 | /* >= 0 values are CPU number */ | 332 | /* >= 0 values are CPU number */ |
332 | 333 | ||
334 | /* H_GET_CPU_CHARACTERISTICS return values */ | ||
335 | #define H_CPU_CHAR_SPEC_BAR_ORI31 (1ull << 63) // IBM bit 0 | ||
336 | #define H_CPU_CHAR_BCCTRL_SERIALISED (1ull << 62) // IBM bit 1 | ||
337 | #define H_CPU_CHAR_L1D_FLUSH_ORI30 (1ull << 61) // IBM bit 2 | ||
338 | #define H_CPU_CHAR_L1D_FLUSH_TRIG2 (1ull << 60) // IBM bit 3 | ||
339 | #define H_CPU_CHAR_L1D_THREAD_PRIV (1ull << 59) // IBM bit 4 | ||
340 | |||
341 | #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0 | ||
342 | #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1 | ||
343 | #define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2 | ||
344 | |||
333 | /* Flag values used in H_REGISTER_PROC_TBL hcall */ | 345 | /* Flag values used in H_REGISTER_PROC_TBL hcall */ |
334 | #define PROC_TABLE_OP_MASK 0x18 | 346 | #define PROC_TABLE_OP_MASK 0x18 |
335 | #define PROC_TABLE_DEREG 0x10 | 347 | #define PROC_TABLE_DEREG 0x10 |
@@ -436,6 +448,11 @@ static inline unsigned int get_longbusy_msecs(int longbusy_rc) | |||
436 | } | 448 | } |
437 | } | 449 | } |
438 | 450 | ||
451 | struct h_cpu_char_result { | ||
452 | u64 character; | ||
453 | u64 behaviour; | ||
454 | }; | ||
455 | |||
439 | #endif /* __ASSEMBLY__ */ | 456 | #endif /* __ASSEMBLY__ */ |
440 | #endif /* __KERNEL__ */ | 457 | #endif /* __KERNEL__ */ |
441 | #endif /* _ASM_POWERPC_HVCALL_H */ | 458 | #endif /* _ASM_POWERPC_HVCALL_H */ |
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 3892db93b837..23ac7fc0af23 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h | |||
@@ -232,6 +232,16 @@ struct paca_struct { | |||
232 | struct sibling_subcore_state *sibling_subcore_state; | 232 | struct sibling_subcore_state *sibling_subcore_state; |
233 | #endif | 233 | #endif |
234 | #endif | 234 | #endif |
235 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
236 | /* | ||
237 | * rfi fallback flush must be in its own cacheline to prevent | ||
238 | * other paca data leaking into the L1d | ||
239 | */ | ||
240 | u64 exrfi[EX_SIZE] __aligned(0x80); | ||
241 | void *rfi_flush_fallback_area; | ||
242 | u64 l1d_flush_congruence; | ||
243 | u64 l1d_flush_sets; | ||
244 | #endif | ||
235 | }; | 245 | }; |
236 | 246 | ||
237 | extern void copy_mm_to_paca(struct mm_struct *mm); | 247 | extern void copy_mm_to_paca(struct mm_struct *mm); |
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index 7f01b22fa6cb..55eddf50d149 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h | |||
@@ -326,4 +326,18 @@ static inline long plapr_signal_sys_reset(long cpu) | |||
326 | return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu); | 326 | return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu); |
327 | } | 327 | } |
328 | 328 | ||
329 | static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p) | ||
330 | { | ||
331 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | ||
332 | long rc; | ||
333 | |||
334 | rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf); | ||
335 | if (rc == H_SUCCESS) { | ||
336 | p->character = retbuf[0]; | ||
337 | p->behaviour = retbuf[1]; | ||
338 | } | ||
339 | |||
340 | return rc; | ||
341 | } | ||
342 | |||
329 | #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ | 343 | #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */ |
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index cf00ec26303a..469b7fdc9be4 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h | |||
@@ -39,6 +39,19 @@ static inline void pseries_big_endian_exceptions(void) {} | |||
39 | static inline void pseries_little_endian_exceptions(void) {} | 39 | static inline void pseries_little_endian_exceptions(void) {} |
40 | #endif /* CONFIG_PPC_PSERIES */ | 40 | #endif /* CONFIG_PPC_PSERIES */ |
41 | 41 | ||
42 | void rfi_flush_enable(bool enable); | ||
43 | |||
44 | /* These are bit flags */ | ||
45 | enum l1d_flush_type { | ||
46 | L1D_FLUSH_NONE = 0x1, | ||
47 | L1D_FLUSH_FALLBACK = 0x2, | ||
48 | L1D_FLUSH_ORI = 0x4, | ||
49 | L1D_FLUSH_MTTRIG = 0x8, | ||
50 | }; | ||
51 | |||
52 | void __init setup_rfi_flush(enum l1d_flush_type, bool enable); | ||
53 | void do_rfi_flush_fixups(enum l1d_flush_type types); | ||
54 | |||
42 | #endif /* !__ASSEMBLY__ */ | 55 | #endif /* !__ASSEMBLY__ */ |
43 | 56 | ||
44 | #endif /* _ASM_POWERPC_SETUP_H */ | 57 | #endif /* _ASM_POWERPC_SETUP_H */ |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 6b958414b4e0..f390d57cf2e1 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -237,6 +237,11 @@ int main(void) | |||
237 | OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp); | 237 | OFFSET(PACA_NMI_EMERG_SP, paca_struct, nmi_emergency_sp); |
238 | OFFSET(PACA_IN_MCE, paca_struct, in_mce); | 238 | OFFSET(PACA_IN_MCE, paca_struct, in_mce); |
239 | OFFSET(PACA_IN_NMI, paca_struct, in_nmi); | 239 | OFFSET(PACA_IN_NMI, paca_struct, in_nmi); |
240 | OFFSET(PACA_RFI_FLUSH_FALLBACK_AREA, paca_struct, rfi_flush_fallback_area); | ||
241 | OFFSET(PACA_EXRFI, paca_struct, exrfi); | ||
242 | OFFSET(PACA_L1D_FLUSH_CONGRUENCE, paca_struct, l1d_flush_congruence); | ||
243 | OFFSET(PACA_L1D_FLUSH_SETS, paca_struct, l1d_flush_sets); | ||
244 | |||
240 | #endif | 245 | #endif |
241 | OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id); | 246 | OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id); |
242 | OFFSET(PACAKEXECSTATE, paca_struct, kexec_state); | 247 | OFFSET(PACAKEXECSTATE, paca_struct, kexec_state); |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 3320bcac7192..2748584b767d 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -37,6 +37,11 @@ | |||
37 | #include <asm/tm.h> | 37 | #include <asm/tm.h> |
38 | #include <asm/ppc-opcode.h> | 38 | #include <asm/ppc-opcode.h> |
39 | #include <asm/export.h> | 39 | #include <asm/export.h> |
40 | #ifdef CONFIG_PPC_BOOK3S | ||
41 | #include <asm/exception-64s.h> | ||
42 | #else | ||
43 | #include <asm/exception-64e.h> | ||
44 | #endif | ||
40 | 45 | ||
41 | /* | 46 | /* |
42 | * System calls. | 47 | * System calls. |
@@ -262,13 +267,23 @@ BEGIN_FTR_SECTION | |||
262 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | 267 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
263 | 268 | ||
264 | ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ | 269 | ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ |
270 | ld r2,GPR2(r1) | ||
271 | ld r1,GPR1(r1) | ||
272 | mtlr r4 | ||
273 | mtcr r5 | ||
274 | mtspr SPRN_SRR0,r7 | ||
275 | mtspr SPRN_SRR1,r8 | ||
276 | RFI_TO_USER | ||
277 | b . /* prevent speculative execution */ | ||
278 | |||
279 | /* exit to kernel */ | ||
265 | 1: ld r2,GPR2(r1) | 280 | 1: ld r2,GPR2(r1) |
266 | ld r1,GPR1(r1) | 281 | ld r1,GPR1(r1) |
267 | mtlr r4 | 282 | mtlr r4 |
268 | mtcr r5 | 283 | mtcr r5 |
269 | mtspr SPRN_SRR0,r7 | 284 | mtspr SPRN_SRR0,r7 |
270 | mtspr SPRN_SRR1,r8 | 285 | mtspr SPRN_SRR1,r8 |
271 | RFI | 286 | RFI_TO_KERNEL |
272 | b . /* prevent speculative execution */ | 287 | b . /* prevent speculative execution */ |
273 | 288 | ||
274 | .Lsyscall_error: | 289 | .Lsyscall_error: |
@@ -397,8 +412,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |||
397 | mtmsrd r10, 1 | 412 | mtmsrd r10, 1 |
398 | mtspr SPRN_SRR0, r11 | 413 | mtspr SPRN_SRR0, r11 |
399 | mtspr SPRN_SRR1, r12 | 414 | mtspr SPRN_SRR1, r12 |
400 | 415 | RFI_TO_USER | |
401 | rfid | ||
402 | b . /* prevent speculative execution */ | 416 | b . /* prevent speculative execution */ |
403 | #endif | 417 | #endif |
404 | _ASM_NOKPROBE_SYMBOL(system_call_common); | 418 | _ASM_NOKPROBE_SYMBOL(system_call_common); |
@@ -878,7 +892,7 @@ BEGIN_FTR_SECTION | |||
878 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | 892 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
879 | ACCOUNT_CPU_USER_EXIT(r13, r2, r4) | 893 | ACCOUNT_CPU_USER_EXIT(r13, r2, r4) |
880 | REST_GPR(13, r1) | 894 | REST_GPR(13, r1) |
881 | 1: | 895 | |
882 | mtspr SPRN_SRR1,r3 | 896 | mtspr SPRN_SRR1,r3 |
883 | 897 | ||
884 | ld r2,_CCR(r1) | 898 | ld r2,_CCR(r1) |
@@ -891,8 +905,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |||
891 | ld r3,GPR3(r1) | 905 | ld r3,GPR3(r1) |
892 | ld r4,GPR4(r1) | 906 | ld r4,GPR4(r1) |
893 | ld r1,GPR1(r1) | 907 | ld r1,GPR1(r1) |
908 | RFI_TO_USER | ||
909 | b . /* prevent speculative execution */ | ||
894 | 910 | ||
895 | rfid | 911 | 1: mtspr SPRN_SRR1,r3 |
912 | |||
913 | ld r2,_CCR(r1) | ||
914 | mtcrf 0xFF,r2 | ||
915 | ld r2,_NIP(r1) | ||
916 | mtspr SPRN_SRR0,r2 | ||
917 | |||
918 | ld r0,GPR0(r1) | ||
919 | ld r2,GPR2(r1) | ||
920 | ld r3,GPR3(r1) | ||
921 | ld r4,GPR4(r1) | ||
922 | ld r1,GPR1(r1) | ||
923 | RFI_TO_KERNEL | ||
896 | b . /* prevent speculative execution */ | 924 | b . /* prevent speculative execution */ |
897 | 925 | ||
898 | #endif /* CONFIG_PPC_BOOK3E */ | 926 | #endif /* CONFIG_PPC_BOOK3E */ |
@@ -1073,7 +1101,7 @@ __enter_rtas: | |||
1073 | 1101 | ||
1074 | mtspr SPRN_SRR0,r5 | 1102 | mtspr SPRN_SRR0,r5 |
1075 | mtspr SPRN_SRR1,r6 | 1103 | mtspr SPRN_SRR1,r6 |
1076 | rfid | 1104 | RFI_TO_KERNEL |
1077 | b . /* prevent speculative execution */ | 1105 | b . /* prevent speculative execution */ |
1078 | 1106 | ||
1079 | rtas_return_loc: | 1107 | rtas_return_loc: |
@@ -1098,7 +1126,7 @@ rtas_return_loc: | |||
1098 | 1126 | ||
1099 | mtspr SPRN_SRR0,r3 | 1127 | mtspr SPRN_SRR0,r3 |
1100 | mtspr SPRN_SRR1,r4 | 1128 | mtspr SPRN_SRR1,r4 |
1101 | rfid | 1129 | RFI_TO_KERNEL |
1102 | b . /* prevent speculative execution */ | 1130 | b . /* prevent speculative execution */ |
1103 | _ASM_NOKPROBE_SYMBOL(__enter_rtas) | 1131 | _ASM_NOKPROBE_SYMBOL(__enter_rtas) |
1104 | _ASM_NOKPROBE_SYMBOL(rtas_return_loc) | 1132 | _ASM_NOKPROBE_SYMBOL(rtas_return_loc) |
@@ -1171,7 +1199,7 @@ _GLOBAL(enter_prom) | |||
1171 | LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) | 1199 | LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) |
1172 | andc r11,r11,r12 | 1200 | andc r11,r11,r12 |
1173 | mtsrr1 r11 | 1201 | mtsrr1 r11 |
1174 | rfid | 1202 | RFI_TO_KERNEL |
1175 | #endif /* CONFIG_PPC_BOOK3E */ | 1203 | #endif /* CONFIG_PPC_BOOK3E */ |
1176 | 1204 | ||
1177 | 1: /* Return from OF */ | 1205 | 1: /* Return from OF */ |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index e441b469dc8f..2dc10bf646b8 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -256,7 +256,7 @@ BEGIN_FTR_SECTION | |||
256 | LOAD_HANDLER(r12, machine_check_handle_early) | 256 | LOAD_HANDLER(r12, machine_check_handle_early) |
257 | 1: mtspr SPRN_SRR0,r12 | 257 | 1: mtspr SPRN_SRR0,r12 |
258 | mtspr SPRN_SRR1,r11 | 258 | mtspr SPRN_SRR1,r11 |
259 | rfid | 259 | RFI_TO_KERNEL |
260 | b . /* prevent speculative execution */ | 260 | b . /* prevent speculative execution */ |
261 | 2: | 261 | 2: |
262 | /* Stack overflow. Stay on emergency stack and panic. | 262 | /* Stack overflow. Stay on emergency stack and panic. |
@@ -445,7 +445,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early) | |||
445 | li r3,MSR_ME | 445 | li r3,MSR_ME |
446 | andc r10,r10,r3 /* Turn off MSR_ME */ | 446 | andc r10,r10,r3 /* Turn off MSR_ME */ |
447 | mtspr SPRN_SRR1,r10 | 447 | mtspr SPRN_SRR1,r10 |
448 | rfid | 448 | RFI_TO_KERNEL |
449 | b . | 449 | b . |
450 | 2: | 450 | 2: |
451 | /* | 451 | /* |
@@ -463,7 +463,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early) | |||
463 | */ | 463 | */ |
464 | bl machine_check_queue_event | 464 | bl machine_check_queue_event |
465 | MACHINE_CHECK_HANDLER_WINDUP | 465 | MACHINE_CHECK_HANDLER_WINDUP |
466 | rfid | 466 | RFI_TO_USER_OR_KERNEL |
467 | 9: | 467 | 9: |
468 | /* Deliver the machine check to host kernel in V mode. */ | 468 | /* Deliver the machine check to host kernel in V mode. */ |
469 | MACHINE_CHECK_HANDLER_WINDUP | 469 | MACHINE_CHECK_HANDLER_WINDUP |
@@ -598,6 +598,9 @@ EXC_COMMON_BEGIN(slb_miss_common) | |||
598 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | 598 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ |
599 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | 599 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ |
600 | 600 | ||
601 | andi. r9,r11,MSR_PR // Check for exception from userspace | ||
602 | cmpdi cr4,r9,MSR_PR // And save the result in CR4 for later | ||
603 | |||
601 | /* | 604 | /* |
602 | * Test MSR_RI before calling slb_allocate_realmode, because the | 605 | * Test MSR_RI before calling slb_allocate_realmode, because the |
603 | * MSR in r11 gets clobbered. However we still want to allocate | 606 | * MSR in r11 gets clobbered. However we still want to allocate |
@@ -624,9 +627,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) | |||
624 | 627 | ||
625 | /* All done -- return from exception. */ | 628 | /* All done -- return from exception. */ |
626 | 629 | ||
630 | bne cr4,1f /* returning to kernel */ | ||
631 | |||
627 | .machine push | 632 | .machine push |
628 | .machine "power4" | 633 | .machine "power4" |
629 | mtcrf 0x80,r9 | 634 | mtcrf 0x80,r9 |
635 | mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */ | ||
630 | mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ | 636 | mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ |
631 | mtcrf 0x02,r9 /* I/D indication is in cr6 */ | 637 | mtcrf 0x02,r9 /* I/D indication is in cr6 */ |
632 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | 638 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ |
@@ -640,9 +646,30 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) | |||
640 | ld r11,PACA_EXSLB+EX_R11(r13) | 646 | ld r11,PACA_EXSLB+EX_R11(r13) |
641 | ld r12,PACA_EXSLB+EX_R12(r13) | 647 | ld r12,PACA_EXSLB+EX_R12(r13) |
642 | ld r13,PACA_EXSLB+EX_R13(r13) | 648 | ld r13,PACA_EXSLB+EX_R13(r13) |
643 | rfid | 649 | RFI_TO_USER |
650 | b . /* prevent speculative execution */ | ||
651 | 1: | ||
652 | .machine push | ||
653 | .machine "power4" | ||
654 | mtcrf 0x80,r9 | ||
655 | mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */ | ||
656 | mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */ | ||
657 | mtcrf 0x02,r9 /* I/D indication is in cr6 */ | ||
658 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | ||
659 | .machine pop | ||
660 | |||
661 | RESTORE_CTR(r9, PACA_EXSLB) | ||
662 | RESTORE_PPR_PACA(PACA_EXSLB, r9) | ||
663 | mr r3,r12 | ||
664 | ld r9,PACA_EXSLB+EX_R9(r13) | ||
665 | ld r10,PACA_EXSLB+EX_R10(r13) | ||
666 | ld r11,PACA_EXSLB+EX_R11(r13) | ||
667 | ld r12,PACA_EXSLB+EX_R12(r13) | ||
668 | ld r13,PACA_EXSLB+EX_R13(r13) | ||
669 | RFI_TO_KERNEL | ||
644 | b . /* prevent speculative execution */ | 670 | b . /* prevent speculative execution */ |
645 | 671 | ||
672 | |||
646 | 2: std r3,PACA_EXSLB+EX_DAR(r13) | 673 | 2: std r3,PACA_EXSLB+EX_DAR(r13) |
647 | mr r3,r12 | 674 | mr r3,r12 |
648 | mfspr r11,SPRN_SRR0 | 675 | mfspr r11,SPRN_SRR0 |
@@ -651,7 +678,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) | |||
651 | mtspr SPRN_SRR0,r10 | 678 | mtspr SPRN_SRR0,r10 |
652 | ld r10,PACAKMSR(r13) | 679 | ld r10,PACAKMSR(r13) |
653 | mtspr SPRN_SRR1,r10 | 680 | mtspr SPRN_SRR1,r10 |
654 | rfid | 681 | RFI_TO_KERNEL |
655 | b . | 682 | b . |
656 | 683 | ||
657 | 8: std r3,PACA_EXSLB+EX_DAR(r13) | 684 | 8: std r3,PACA_EXSLB+EX_DAR(r13) |
@@ -662,7 +689,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) | |||
662 | mtspr SPRN_SRR0,r10 | 689 | mtspr SPRN_SRR0,r10 |
663 | ld r10,PACAKMSR(r13) | 690 | ld r10,PACAKMSR(r13) |
664 | mtspr SPRN_SRR1,r10 | 691 | mtspr SPRN_SRR1,r10 |
665 | rfid | 692 | RFI_TO_KERNEL |
666 | b . | 693 | b . |
667 | 694 | ||
668 | EXC_COMMON_BEGIN(unrecov_slb) | 695 | EXC_COMMON_BEGIN(unrecov_slb) |
@@ -901,7 +928,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) | |||
901 | mtspr SPRN_SRR0,r10 ; \ | 928 | mtspr SPRN_SRR0,r10 ; \ |
902 | ld r10,PACAKMSR(r13) ; \ | 929 | ld r10,PACAKMSR(r13) ; \ |
903 | mtspr SPRN_SRR1,r10 ; \ | 930 | mtspr SPRN_SRR1,r10 ; \ |
904 | rfid ; \ | 931 | RFI_TO_KERNEL ; \ |
905 | b . ; /* prevent speculative execution */ | 932 | b . ; /* prevent speculative execution */ |
906 | 933 | ||
907 | #ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH | 934 | #ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH |
@@ -917,7 +944,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \ | |||
917 | xori r12,r12,MSR_LE ; \ | 944 | xori r12,r12,MSR_LE ; \ |
918 | mtspr SPRN_SRR1,r12 ; \ | 945 | mtspr SPRN_SRR1,r12 ; \ |
919 | mr r13,r9 ; \ | 946 | mr r13,r9 ; \ |
920 | rfid ; /* return to userspace */ \ | 947 | RFI_TO_USER ; /* return to userspace */ \ |
921 | b . ; /* prevent speculative execution */ | 948 | b . ; /* prevent speculative execution */ |
922 | #else | 949 | #else |
923 | #define SYSCALL_FASTENDIAN_TEST | 950 | #define SYSCALL_FASTENDIAN_TEST |
@@ -1063,7 +1090,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early) | |||
1063 | mtcr r11 | 1090 | mtcr r11 |
1064 | REST_GPR(11, r1) | 1091 | REST_GPR(11, r1) |
1065 | ld r1,GPR1(r1) | 1092 | ld r1,GPR1(r1) |
1066 | hrfid | 1093 | HRFI_TO_USER_OR_KERNEL |
1067 | 1094 | ||
1068 | 1: mtcr r11 | 1095 | 1: mtcr r11 |
1069 | REST_GPR(11, r1) | 1096 | REST_GPR(11, r1) |
@@ -1314,7 +1341,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) | |||
1314 | ld r11,PACA_EXGEN+EX_R11(r13) | 1341 | ld r11,PACA_EXGEN+EX_R11(r13) |
1315 | ld r12,PACA_EXGEN+EX_R12(r13) | 1342 | ld r12,PACA_EXGEN+EX_R12(r13) |
1316 | ld r13,PACA_EXGEN+EX_R13(r13) | 1343 | ld r13,PACA_EXGEN+EX_R13(r13) |
1317 | HRFID | 1344 | HRFI_TO_UNKNOWN |
1318 | b . | 1345 | b . |
1319 | #endif | 1346 | #endif |
1320 | 1347 | ||
@@ -1418,10 +1445,94 @@ masked_##_H##interrupt: \ | |||
1418 | ld r10,PACA_EXGEN+EX_R10(r13); \ | 1445 | ld r10,PACA_EXGEN+EX_R10(r13); \ |
1419 | ld r11,PACA_EXGEN+EX_R11(r13); \ | 1446 | ld r11,PACA_EXGEN+EX_R11(r13); \ |
1420 | /* returns to kernel where r13 must be set up, so don't restore it */ \ | 1447 | /* returns to kernel where r13 must be set up, so don't restore it */ \ |
1421 | ##_H##rfid; \ | 1448 | ##_H##RFI_TO_KERNEL; \ |
1422 | b .; \ | 1449 | b .; \ |
1423 | MASKED_DEC_HANDLER(_H) | 1450 | MASKED_DEC_HANDLER(_H) |
1424 | 1451 | ||
1452 | TRAMP_REAL_BEGIN(rfi_flush_fallback) | ||
1453 | SET_SCRATCH0(r13); | ||
1454 | GET_PACA(r13); | ||
1455 | std r9,PACA_EXRFI+EX_R9(r13) | ||
1456 | std r10,PACA_EXRFI+EX_R10(r13) | ||
1457 | std r11,PACA_EXRFI+EX_R11(r13) | ||
1458 | std r12,PACA_EXRFI+EX_R12(r13) | ||
1459 | std r8,PACA_EXRFI+EX_R13(r13) | ||
1460 | mfctr r9 | ||
1461 | ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) | ||
1462 | ld r11,PACA_L1D_FLUSH_SETS(r13) | ||
1463 | ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13) | ||
1464 | /* | ||
1465 | * The load adresses are at staggered offsets within cachelines, | ||
1466 | * which suits some pipelines better (on others it should not | ||
1467 | * hurt). | ||
1468 | */ | ||
1469 | addi r12,r12,8 | ||
1470 | mtctr r11 | ||
1471 | DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ | ||
1472 | |||
1473 | /* order ld/st prior to dcbt stop all streams with flushing */ | ||
1474 | sync | ||
1475 | 1: li r8,0 | ||
1476 | .rept 8 /* 8-way set associative */ | ||
1477 | ldx r11,r10,r8 | ||
1478 | add r8,r8,r12 | ||
1479 | xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not | ||
1480 | add r8,r8,r11 // Add 0, this creates a dependency on the ldx | ||
1481 | .endr | ||
1482 | addi r10,r10,128 /* 128 byte cache line */ | ||
1483 | bdnz 1b | ||
1484 | |||
1485 | mtctr r9 | ||
1486 | ld r9,PACA_EXRFI+EX_R9(r13) | ||
1487 | ld r10,PACA_EXRFI+EX_R10(r13) | ||
1488 | ld r11,PACA_EXRFI+EX_R11(r13) | ||
1489 | ld r12,PACA_EXRFI+EX_R12(r13) | ||
1490 | ld r8,PACA_EXRFI+EX_R13(r13) | ||
1491 | GET_SCRATCH0(r13); | ||
1492 | rfid | ||
1493 | |||
1494 | TRAMP_REAL_BEGIN(hrfi_flush_fallback) | ||
1495 | SET_SCRATCH0(r13); | ||
1496 | GET_PACA(r13); | ||
1497 | std r9,PACA_EXRFI+EX_R9(r13) | ||
1498 | std r10,PACA_EXRFI+EX_R10(r13) | ||
1499 | std r11,PACA_EXRFI+EX_R11(r13) | ||
1500 | std r12,PACA_EXRFI+EX_R12(r13) | ||
1501 | std r8,PACA_EXRFI+EX_R13(r13) | ||
1502 | mfctr r9 | ||
1503 | ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) | ||
1504 | ld r11,PACA_L1D_FLUSH_SETS(r13) | ||
1505 | ld r12,PACA_L1D_FLUSH_CONGRUENCE(r13) | ||
1506 | /* | ||
1507 | * The load adresses are at staggered offsets within cachelines, | ||
1508 | * which suits some pipelines better (on others it should not | ||
1509 | * hurt). | ||
1510 | */ | ||
1511 | addi r12,r12,8 | ||
1512 | mtctr r11 | ||
1513 | DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ | ||
1514 | |||
1515 | /* order ld/st prior to dcbt stop all streams with flushing */ | ||
1516 | sync | ||
1517 | 1: li r8,0 | ||
1518 | .rept 8 /* 8-way set associative */ | ||
1519 | ldx r11,r10,r8 | ||
1520 | add r8,r8,r12 | ||
1521 | xor r11,r11,r11 // Ensure r11 is 0 even if fallback area is not | ||
1522 | add r8,r8,r11 // Add 0, this creates a dependency on the ldx | ||
1523 | .endr | ||
1524 | addi r10,r10,128 /* 128 byte cache line */ | ||
1525 | bdnz 1b | ||
1526 | |||
1527 | mtctr r9 | ||
1528 | ld r9,PACA_EXRFI+EX_R9(r13) | ||
1529 | ld r10,PACA_EXRFI+EX_R10(r13) | ||
1530 | ld r11,PACA_EXRFI+EX_R11(r13) | ||
1531 | ld r12,PACA_EXRFI+EX_R12(r13) | ||
1532 | ld r8,PACA_EXRFI+EX_R13(r13) | ||
1533 | GET_SCRATCH0(r13); | ||
1534 | hrfid | ||
1535 | |||
1425 | /* | 1536 | /* |
1426 | * Real mode exceptions actually use this too, but alternate | 1537 | * Real mode exceptions actually use this too, but alternate |
1427 | * instruction code patches (which end up in the common .text area) | 1538 | * instruction code patches (which end up in the common .text area) |
@@ -1441,7 +1552,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_interrupt) | |||
1441 | addi r13, r13, 4 | 1552 | addi r13, r13, 4 |
1442 | mtspr SPRN_SRR0, r13 | 1553 | mtspr SPRN_SRR0, r13 |
1443 | GET_SCRATCH0(r13) | 1554 | GET_SCRATCH0(r13) |
1444 | rfid | 1555 | RFI_TO_KERNEL |
1445 | b . | 1556 | b . |
1446 | 1557 | ||
1447 | TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt) | 1558 | TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt) |
@@ -1453,7 +1564,7 @@ TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt) | |||
1453 | addi r13, r13, 4 | 1564 | addi r13, r13, 4 |
1454 | mtspr SPRN_HSRR0, r13 | 1565 | mtspr SPRN_HSRR0, r13 |
1455 | GET_SCRATCH0(r13) | 1566 | GET_SCRATCH0(r13) |
1456 | hrfid | 1567 | HRFI_TO_KERNEL |
1457 | b . | 1568 | b . |
1458 | #endif | 1569 | #endif |
1459 | 1570 | ||
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 8956a9856604..491be4179ddd 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -801,3 +801,104 @@ static int __init disable_hardlockup_detector(void) | |||
801 | return 0; | 801 | return 0; |
802 | } | 802 | } |
803 | early_initcall(disable_hardlockup_detector); | 803 | early_initcall(disable_hardlockup_detector); |
804 | |||
805 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
806 | static enum l1d_flush_type enabled_flush_types; | ||
807 | static void *l1d_flush_fallback_area; | ||
808 | static bool no_rfi_flush; | ||
809 | bool rfi_flush; | ||
810 | |||
811 | static int __init handle_no_rfi_flush(char *p) | ||
812 | { | ||
813 | pr_info("rfi-flush: disabled on command line."); | ||
814 | no_rfi_flush = true; | ||
815 | return 0; | ||
816 | } | ||
817 | early_param("no_rfi_flush", handle_no_rfi_flush); | ||
818 | |||
819 | /* | ||
820 | * The RFI flush is not KPTI, but because users will see doco that says to use | ||
821 | * nopti we hijack that option here to also disable the RFI flush. | ||
822 | */ | ||
823 | static int __init handle_no_pti(char *p) | ||
824 | { | ||
825 | pr_info("rfi-flush: disabling due to 'nopti' on command line.\n"); | ||
826 | handle_no_rfi_flush(NULL); | ||
827 | return 0; | ||
828 | } | ||
829 | early_param("nopti", handle_no_pti); | ||
830 | |||
831 | static void do_nothing(void *unused) | ||
832 | { | ||
833 | /* | ||
834 | * We don't need to do the flush explicitly, just enter+exit kernel is | ||
835 | * sufficient, the RFI exit handlers will do the right thing. | ||
836 | */ | ||
837 | } | ||
838 | |||
839 | void rfi_flush_enable(bool enable) | ||
840 | { | ||
841 | if (rfi_flush == enable) | ||
842 | return; | ||
843 | |||
844 | if (enable) { | ||
845 | do_rfi_flush_fixups(enabled_flush_types); | ||
846 | on_each_cpu(do_nothing, NULL, 1); | ||
847 | } else | ||
848 | do_rfi_flush_fixups(L1D_FLUSH_NONE); | ||
849 | |||
850 | rfi_flush = enable; | ||
851 | } | ||
852 | |||
853 | static void init_fallback_flush(void) | ||
854 | { | ||
855 | u64 l1d_size, limit; | ||
856 | int cpu; | ||
857 | |||
858 | l1d_size = ppc64_caches.l1d.size; | ||
859 | limit = min(safe_stack_limit(), ppc64_rma_size); | ||
860 | |||
861 | /* | ||
862 | * Align to L1d size, and size it at 2x L1d size, to catch possible | ||
863 | * hardware prefetch runoff. We don't have a recipe for load patterns to | ||
864 | * reliably avoid the prefetcher. | ||
865 | */ | ||
866 | l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit)); | ||
867 | memset(l1d_flush_fallback_area, 0, l1d_size * 2); | ||
868 | |||
869 | for_each_possible_cpu(cpu) { | ||
870 | /* | ||
871 | * The fallback flush is currently coded for 8-way | ||
872 | * associativity. Different associativity is possible, but it | ||
873 | * will be treated as 8-way and may not evict the lines as | ||
874 | * effectively. | ||
875 | * | ||
876 | * 128 byte lines are mandatory. | ||
877 | */ | ||
878 | u64 c = l1d_size / 8; | ||
879 | |||
880 | paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area; | ||
881 | paca[cpu].l1d_flush_congruence = c; | ||
882 | paca[cpu].l1d_flush_sets = c / 128; | ||
883 | } | ||
884 | } | ||
885 | |||
886 | void __init setup_rfi_flush(enum l1d_flush_type types, bool enable) | ||
887 | { | ||
888 | if (types & L1D_FLUSH_FALLBACK) { | ||
889 | pr_info("rfi-flush: Using fallback displacement flush\n"); | ||
890 | init_fallback_flush(); | ||
891 | } | ||
892 | |||
893 | if (types & L1D_FLUSH_ORI) | ||
894 | pr_info("rfi-flush: Using ori type flush\n"); | ||
895 | |||
896 | if (types & L1D_FLUSH_MTTRIG) | ||
897 | pr_info("rfi-flush: Using mttrig type flush\n"); | ||
898 | |||
899 | enabled_flush_types = types; | ||
900 | |||
901 | if (!no_rfi_flush) | ||
902 | rfi_flush_enable(enable); | ||
903 | } | ||
904 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 0494e1566ee2..307843d23682 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S | |||
@@ -132,6 +132,15 @@ SECTIONS | |||
132 | /* Read-only data */ | 132 | /* Read-only data */ |
133 | RO_DATA(PAGE_SIZE) | 133 | RO_DATA(PAGE_SIZE) |
134 | 134 | ||
135 | #ifdef CONFIG_PPC64 | ||
136 | . = ALIGN(8); | ||
137 | __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) { | ||
138 | __start___rfi_flush_fixup = .; | ||
139 | *(__rfi_flush_fixup) | ||
140 | __stop___rfi_flush_fixup = .; | ||
141 | } | ||
142 | #endif | ||
143 | |||
135 | EXCEPTION_TABLE(0) | 144 | EXCEPTION_TABLE(0) |
136 | 145 | ||
137 | NOTES :kernel :notes | 146 | NOTES :kernel :notes |
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 29ebe2fd5867..a93d719edc90 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c | |||
@@ -235,6 +235,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
235 | gpte->may_read = true; | 235 | gpte->may_read = true; |
236 | gpte->may_write = true; | 236 | gpte->may_write = true; |
237 | gpte->page_size = MMU_PAGE_4K; | 237 | gpte->page_size = MMU_PAGE_4K; |
238 | gpte->wimg = HPTE_R_M; | ||
238 | 239 | ||
239 | return 0; | 240 | return 0; |
240 | } | 241 | } |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 966097232d21..b73dbc9e797d 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -65,11 +65,17 @@ struct kvm_resize_hpt { | |||
65 | u32 order; | 65 | u32 order; |
66 | 66 | ||
67 | /* These fields protected by kvm->lock */ | 67 | /* These fields protected by kvm->lock */ |
68 | |||
69 | /* Possible values and their usage: | ||
70 | * <0 an error occurred during allocation, | ||
71 | * -EBUSY allocation is in the progress, | ||
72 | * 0 allocation made successfuly. | ||
73 | */ | ||
68 | int error; | 74 | int error; |
69 | bool prepare_done; | ||
70 | 75 | ||
71 | /* Private to the work thread, until prepare_done is true, | 76 | /* Private to the work thread, until error != -EBUSY, |
72 | * then protected by kvm->resize_hpt_sem */ | 77 | * then protected by kvm->lock. |
78 | */ | ||
73 | struct kvm_hpt_info hpt; | 79 | struct kvm_hpt_info hpt; |
74 | }; | 80 | }; |
75 | 81 | ||
@@ -159,8 +165,6 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) | |||
159 | * Reset all the reverse-mapping chains for all memslots | 165 | * Reset all the reverse-mapping chains for all memslots |
160 | */ | 166 | */ |
161 | kvmppc_rmap_reset(kvm); | 167 | kvmppc_rmap_reset(kvm); |
162 | /* Ensure that each vcpu will flush its TLB on next entry. */ | ||
163 | cpumask_setall(&kvm->arch.need_tlb_flush); | ||
164 | err = 0; | 168 | err = 0; |
165 | goto out; | 169 | goto out; |
166 | } | 170 | } |
@@ -176,6 +180,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) | |||
176 | kvmppc_set_hpt(kvm, &info); | 180 | kvmppc_set_hpt(kvm, &info); |
177 | 181 | ||
178 | out: | 182 | out: |
183 | if (err == 0) | ||
184 | /* Ensure that each vcpu will flush its TLB on next entry. */ | ||
185 | cpumask_setall(&kvm->arch.need_tlb_flush); | ||
186 | |||
179 | mutex_unlock(&kvm->lock); | 187 | mutex_unlock(&kvm->lock); |
180 | return err; | 188 | return err; |
181 | } | 189 | } |
@@ -1413,16 +1421,20 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize) | |||
1413 | 1421 | ||
1414 | static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) | 1422 | static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) |
1415 | { | 1423 | { |
1416 | BUG_ON(kvm->arch.resize_hpt != resize); | 1424 | if (WARN_ON(!mutex_is_locked(&kvm->lock))) |
1425 | return; | ||
1417 | 1426 | ||
1418 | if (!resize) | 1427 | if (!resize) |
1419 | return; | 1428 | return; |
1420 | 1429 | ||
1421 | if (resize->hpt.virt) | 1430 | if (resize->error != -EBUSY) { |
1422 | kvmppc_free_hpt(&resize->hpt); | 1431 | if (resize->hpt.virt) |
1432 | kvmppc_free_hpt(&resize->hpt); | ||
1433 | kfree(resize); | ||
1434 | } | ||
1423 | 1435 | ||
1424 | kvm->arch.resize_hpt = NULL; | 1436 | if (kvm->arch.resize_hpt == resize) |
1425 | kfree(resize); | 1437 | kvm->arch.resize_hpt = NULL; |
1426 | } | 1438 | } |
1427 | 1439 | ||
1428 | static void resize_hpt_prepare_work(struct work_struct *work) | 1440 | static void resize_hpt_prepare_work(struct work_struct *work) |
@@ -1431,17 +1443,41 @@ static void resize_hpt_prepare_work(struct work_struct *work) | |||
1431 | struct kvm_resize_hpt, | 1443 | struct kvm_resize_hpt, |
1432 | work); | 1444 | work); |
1433 | struct kvm *kvm = resize->kvm; | 1445 | struct kvm *kvm = resize->kvm; |
1434 | int err; | 1446 | int err = 0; |
1435 | 1447 | ||
1436 | resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", | 1448 | if (WARN_ON(resize->error != -EBUSY)) |
1437 | resize->order); | 1449 | return; |
1438 | |||
1439 | err = resize_hpt_allocate(resize); | ||
1440 | 1450 | ||
1441 | mutex_lock(&kvm->lock); | 1451 | mutex_lock(&kvm->lock); |
1442 | 1452 | ||
1453 | /* Request is still current? */ | ||
1454 | if (kvm->arch.resize_hpt == resize) { | ||
1455 | /* We may request large allocations here: | ||
1456 | * do not sleep with kvm->lock held for a while. | ||
1457 | */ | ||
1458 | mutex_unlock(&kvm->lock); | ||
1459 | |||
1460 | resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", | ||
1461 | resize->order); | ||
1462 | |||
1463 | err = resize_hpt_allocate(resize); | ||
1464 | |||
1465 | /* We have strict assumption about -EBUSY | ||
1466 | * when preparing for HPT resize. | ||
1467 | */ | ||
1468 | if (WARN_ON(err == -EBUSY)) | ||
1469 | err = -EINPROGRESS; | ||
1470 | |||
1471 | mutex_lock(&kvm->lock); | ||
1472 | /* It is possible that kvm->arch.resize_hpt != resize | ||
1473 | * after we grab kvm->lock again. | ||
1474 | */ | ||
1475 | } | ||
1476 | |||
1443 | resize->error = err; | 1477 | resize->error = err; |
1444 | resize->prepare_done = true; | 1478 | |
1479 | if (kvm->arch.resize_hpt != resize) | ||
1480 | resize_hpt_release(kvm, resize); | ||
1445 | 1481 | ||
1446 | mutex_unlock(&kvm->lock); | 1482 | mutex_unlock(&kvm->lock); |
1447 | } | 1483 | } |
@@ -1466,14 +1502,12 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, | |||
1466 | 1502 | ||
1467 | if (resize) { | 1503 | if (resize) { |
1468 | if (resize->order == shift) { | 1504 | if (resize->order == shift) { |
1469 | /* Suitable resize in progress */ | 1505 | /* Suitable resize in progress? */ |
1470 | if (resize->prepare_done) { | 1506 | ret = resize->error; |
1471 | ret = resize->error; | 1507 | if (ret == -EBUSY) |
1472 | if (ret != 0) | ||
1473 | resize_hpt_release(kvm, resize); | ||
1474 | } else { | ||
1475 | ret = 100; /* estimated time in ms */ | 1508 | ret = 100; /* estimated time in ms */ |
1476 | } | 1509 | else if (ret) |
1510 | resize_hpt_release(kvm, resize); | ||
1477 | 1511 | ||
1478 | goto out; | 1512 | goto out; |
1479 | } | 1513 | } |
@@ -1493,6 +1527,8 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, | |||
1493 | ret = -ENOMEM; | 1527 | ret = -ENOMEM; |
1494 | goto out; | 1528 | goto out; |
1495 | } | 1529 | } |
1530 | |||
1531 | resize->error = -EBUSY; | ||
1496 | resize->order = shift; | 1532 | resize->order = shift; |
1497 | resize->kvm = kvm; | 1533 | resize->kvm = kvm; |
1498 | INIT_WORK(&resize->work, resize_hpt_prepare_work); | 1534 | INIT_WORK(&resize->work, resize_hpt_prepare_work); |
@@ -1547,16 +1583,12 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, | |||
1547 | if (!resize || (resize->order != shift)) | 1583 | if (!resize || (resize->order != shift)) |
1548 | goto out; | 1584 | goto out; |
1549 | 1585 | ||
1550 | ret = -EBUSY; | ||
1551 | if (!resize->prepare_done) | ||
1552 | goto out; | ||
1553 | |||
1554 | ret = resize->error; | 1586 | ret = resize->error; |
1555 | if (ret != 0) | 1587 | if (ret) |
1556 | goto out; | 1588 | goto out; |
1557 | 1589 | ||
1558 | ret = resize_hpt_rehash(resize); | 1590 | ret = resize_hpt_rehash(resize); |
1559 | if (ret != 0) | 1591 | if (ret) |
1560 | goto out; | 1592 | goto out; |
1561 | 1593 | ||
1562 | resize_hpt_pivot(resize); | 1594 | resize_hpt_pivot(resize); |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 2659844784b8..9c61f736c75b 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -79,7 +79,7 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline) | |||
79 | mtmsrd r0,1 /* clear RI in MSR */ | 79 | mtmsrd r0,1 /* clear RI in MSR */ |
80 | mtsrr0 r5 | 80 | mtsrr0 r5 |
81 | mtsrr1 r6 | 81 | mtsrr1 r6 |
82 | RFI | 82 | RFI_TO_KERNEL |
83 | 83 | ||
84 | kvmppc_call_hv_entry: | 84 | kvmppc_call_hv_entry: |
85 | BEGIN_FTR_SECTION | 85 | BEGIN_FTR_SECTION |
@@ -199,7 +199,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |||
199 | mtmsrd r6, 1 /* Clear RI in MSR */ | 199 | mtmsrd r6, 1 /* Clear RI in MSR */ |
200 | mtsrr0 r8 | 200 | mtsrr0 r8 |
201 | mtsrr1 r7 | 201 | mtsrr1 r7 |
202 | RFI | 202 | RFI_TO_KERNEL |
203 | 203 | ||
204 | /* Virtual-mode return */ | 204 | /* Virtual-mode return */ |
205 | .Lvirt_return: | 205 | .Lvirt_return: |
@@ -1167,8 +1167,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |||
1167 | 1167 | ||
1168 | ld r0, VCPU_GPR(R0)(r4) | 1168 | ld r0, VCPU_GPR(R0)(r4) |
1169 | ld r4, VCPU_GPR(R4)(r4) | 1169 | ld r4, VCPU_GPR(R4)(r4) |
1170 | 1170 | HRFI_TO_GUEST | |
1171 | hrfid | ||
1172 | b . | 1171 | b . |
1173 | 1172 | ||
1174 | secondary_too_late: | 1173 | secondary_too_late: |
@@ -3320,7 +3319,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) | |||
3320 | ld r4, PACAKMSR(r13) | 3319 | ld r4, PACAKMSR(r13) |
3321 | mtspr SPRN_SRR0, r3 | 3320 | mtspr SPRN_SRR0, r3 |
3322 | mtspr SPRN_SRR1, r4 | 3321 | mtspr SPRN_SRR1, r4 |
3323 | rfid | 3322 | RFI_TO_KERNEL |
3324 | 9: addi r3, r1, STACK_FRAME_OVERHEAD | 3323 | 9: addi r3, r1, STACK_FRAME_OVERHEAD |
3325 | bl kvmppc_bad_interrupt | 3324 | bl kvmppc_bad_interrupt |
3326 | b 9b | 3325 | b 9b |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index d0dc8624198f..7deaeeb14b93 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -60,6 +60,7 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); | |||
60 | #define MSR_USER32 MSR_USER | 60 | #define MSR_USER32 MSR_USER |
61 | #define MSR_USER64 MSR_USER | 61 | #define MSR_USER64 MSR_USER |
62 | #define HW_PAGE_SIZE PAGE_SIZE | 62 | #define HW_PAGE_SIZE PAGE_SIZE |
63 | #define HPTE_R_M _PAGE_COHERENT | ||
63 | #endif | 64 | #endif |
64 | 65 | ||
65 | static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) | 66 | static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) |
@@ -557,6 +558,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
557 | pte.eaddr = eaddr; | 558 | pte.eaddr = eaddr; |
558 | pte.vpage = eaddr >> 12; | 559 | pte.vpage = eaddr >> 12; |
559 | pte.page_size = MMU_PAGE_64K; | 560 | pte.page_size = MMU_PAGE_64K; |
561 | pte.wimg = HPTE_R_M; | ||
560 | } | 562 | } |
561 | 563 | ||
562 | switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { | 564 | switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { |
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index 42a4b237df5f..34a5adeff084 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S | |||
@@ -46,6 +46,9 @@ | |||
46 | 46 | ||
47 | #define FUNC(name) name | 47 | #define FUNC(name) name |
48 | 48 | ||
49 | #define RFI_TO_KERNEL RFI | ||
50 | #define RFI_TO_GUEST RFI | ||
51 | |||
49 | .macro INTERRUPT_TRAMPOLINE intno | 52 | .macro INTERRUPT_TRAMPOLINE intno |
50 | 53 | ||
51 | .global kvmppc_trampoline_\intno | 54 | .global kvmppc_trampoline_\intno |
@@ -141,7 +144,7 @@ kvmppc_handler_skip_ins: | |||
141 | GET_SCRATCH0(r13) | 144 | GET_SCRATCH0(r13) |
142 | 145 | ||
143 | /* And get back into the code */ | 146 | /* And get back into the code */ |
144 | RFI | 147 | RFI_TO_KERNEL |
145 | #endif | 148 | #endif |
146 | 149 | ||
147 | /* | 150 | /* |
@@ -164,6 +167,6 @@ _GLOBAL_TOC(kvmppc_entry_trampoline) | |||
164 | ori r5, r5, MSR_EE | 167 | ori r5, r5, MSR_EE |
165 | mtsrr0 r7 | 168 | mtsrr0 r7 |
166 | mtsrr1 r6 | 169 | mtsrr1 r6 |
167 | RFI | 170 | RFI_TO_KERNEL |
168 | 171 | ||
169 | #include "book3s_segment.S" | 172 | #include "book3s_segment.S" |
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 2a2b96d53999..93a180ceefad 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S | |||
@@ -156,7 +156,7 @@ no_dcbz32_on: | |||
156 | PPC_LL r9, SVCPU_R9(r3) | 156 | PPC_LL r9, SVCPU_R9(r3) |
157 | PPC_LL r3, (SVCPU_R3)(r3) | 157 | PPC_LL r3, (SVCPU_R3)(r3) |
158 | 158 | ||
159 | RFI | 159 | RFI_TO_GUEST |
160 | kvmppc_handler_trampoline_enter_end: | 160 | kvmppc_handler_trampoline_enter_end: |
161 | 161 | ||
162 | 162 | ||
@@ -407,5 +407,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |||
407 | cmpwi r12, BOOK3S_INTERRUPT_DOORBELL | 407 | cmpwi r12, BOOK3S_INTERRUPT_DOORBELL |
408 | beqa BOOK3S_INTERRUPT_DOORBELL | 408 | beqa BOOK3S_INTERRUPT_DOORBELL |
409 | 409 | ||
410 | RFI | 410 | RFI_TO_KERNEL |
411 | kvmppc_handler_trampoline_exit_end: | 411 | kvmppc_handler_trampoline_exit_end: |
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index 41cf5ae273cf..a95ea007d654 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c | |||
@@ -116,6 +116,47 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end) | |||
116 | } | 116 | } |
117 | } | 117 | } |
118 | 118 | ||
119 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
120 | void do_rfi_flush_fixups(enum l1d_flush_type types) | ||
121 | { | ||
122 | unsigned int instrs[3], *dest; | ||
123 | long *start, *end; | ||
124 | int i; | ||
125 | |||
126 | start = PTRRELOC(&__start___rfi_flush_fixup), | ||
127 | end = PTRRELOC(&__stop___rfi_flush_fixup); | ||
128 | |||
129 | instrs[0] = 0x60000000; /* nop */ | ||
130 | instrs[1] = 0x60000000; /* nop */ | ||
131 | instrs[2] = 0x60000000; /* nop */ | ||
132 | |||
133 | if (types & L1D_FLUSH_FALLBACK) | ||
134 | /* b .+16 to fallback flush */ | ||
135 | instrs[0] = 0x48000010; | ||
136 | |||
137 | i = 0; | ||
138 | if (types & L1D_FLUSH_ORI) { | ||
139 | instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ | ||
140 | instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/ | ||
141 | } | ||
142 | |||
143 | if (types & L1D_FLUSH_MTTRIG) | ||
144 | instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */ | ||
145 | |||
146 | for (i = 0; start < end; start++, i++) { | ||
147 | dest = (void *)start + *start; | ||
148 | |||
149 | pr_devel("patching dest %lx\n", (unsigned long)dest); | ||
150 | |||
151 | patch_instruction(dest, instrs[0]); | ||
152 | patch_instruction(dest + 1, instrs[1]); | ||
153 | patch_instruction(dest + 2, instrs[2]); | ||
154 | } | ||
155 | |||
156 | printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i); | ||
157 | } | ||
158 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
159 | |||
119 | void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) | 160 | void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end) |
120 | { | 161 | { |
121 | long *start, *end; | 162 | long *start, *end; |
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 1edfbc1e40f4..4fb21e17504a 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c | |||
@@ -37,13 +37,62 @@ | |||
37 | #include <asm/kexec.h> | 37 | #include <asm/kexec.h> |
38 | #include <asm/smp.h> | 38 | #include <asm/smp.h> |
39 | #include <asm/tm.h> | 39 | #include <asm/tm.h> |
40 | #include <asm/setup.h> | ||
40 | 41 | ||
41 | #include "powernv.h" | 42 | #include "powernv.h" |
42 | 43 | ||
44 | static void pnv_setup_rfi_flush(void) | ||
45 | { | ||
46 | struct device_node *np, *fw_features; | ||
47 | enum l1d_flush_type type; | ||
48 | int enable; | ||
49 | |||
50 | /* Default to fallback in case fw-features are not available */ | ||
51 | type = L1D_FLUSH_FALLBACK; | ||
52 | enable = 1; | ||
53 | |||
54 | np = of_find_node_by_name(NULL, "ibm,opal"); | ||
55 | fw_features = of_get_child_by_name(np, "fw-features"); | ||
56 | of_node_put(np); | ||
57 | |||
58 | if (fw_features) { | ||
59 | np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2"); | ||
60 | if (np && of_property_read_bool(np, "enabled")) | ||
61 | type = L1D_FLUSH_MTTRIG; | ||
62 | |||
63 | of_node_put(np); | ||
64 | |||
65 | np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0"); | ||
66 | if (np && of_property_read_bool(np, "enabled")) | ||
67 | type = L1D_FLUSH_ORI; | ||
68 | |||
69 | of_node_put(np); | ||
70 | |||
71 | /* Enable unless firmware says NOT to */ | ||
72 | enable = 2; | ||
73 | np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0"); | ||
74 | if (np && of_property_read_bool(np, "disabled")) | ||
75 | enable--; | ||
76 | |||
77 | of_node_put(np); | ||
78 | |||
79 | np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1"); | ||
80 | if (np && of_property_read_bool(np, "disabled")) | ||
81 | enable--; | ||
82 | |||
83 | of_node_put(np); | ||
84 | of_node_put(fw_features); | ||
85 | } | ||
86 | |||
87 | setup_rfi_flush(type, enable > 0); | ||
88 | } | ||
89 | |||
43 | static void __init pnv_setup_arch(void) | 90 | static void __init pnv_setup_arch(void) |
44 | { | 91 | { |
45 | set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); | 92 | set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); |
46 | 93 | ||
94 | pnv_setup_rfi_flush(); | ||
95 | |||
47 | /* Initialize SMP */ | 96 | /* Initialize SMP */ |
48 | pnv_smp_init(); | 97 | pnv_smp_init(); |
49 | 98 | ||
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 6e35780c5962..a0b20c03f078 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c | |||
@@ -574,11 +574,26 @@ static ssize_t dlpar_show(struct class *class, struct class_attribute *attr, | |||
574 | 574 | ||
575 | static CLASS_ATTR_RW(dlpar); | 575 | static CLASS_ATTR_RW(dlpar); |
576 | 576 | ||
577 | static int __init pseries_dlpar_init(void) | 577 | int __init dlpar_workqueue_init(void) |
578 | { | 578 | { |
579 | if (pseries_hp_wq) | ||
580 | return 0; | ||
581 | |||
579 | pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue", | 582 | pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue", |
580 | WQ_UNBOUND, 1); | 583 | WQ_UNBOUND, 1); |
584 | |||
585 | return pseries_hp_wq ? 0 : -ENOMEM; | ||
586 | } | ||
587 | |||
588 | static int __init dlpar_sysfs_init(void) | ||
589 | { | ||
590 | int rc; | ||
591 | |||
592 | rc = dlpar_workqueue_init(); | ||
593 | if (rc) | ||
594 | return rc; | ||
595 | |||
581 | return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr); | 596 | return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr); |
582 | } | 597 | } |
583 | machine_device_initcall(pseries, pseries_dlpar_init); | 598 | machine_device_initcall(pseries, dlpar_sysfs_init); |
584 | 599 | ||
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 4470a3194311..1ae1d9f4dbe9 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h | |||
@@ -98,4 +98,6 @@ static inline unsigned long cmo_get_page_size(void) | |||
98 | return CMO_PageSize; | 98 | return CMO_PageSize; |
99 | } | 99 | } |
100 | 100 | ||
101 | int dlpar_workqueue_init(void); | ||
102 | |||
101 | #endif /* _PSERIES_PSERIES_H */ | 103 | #endif /* _PSERIES_PSERIES_H */ |
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index 4923ffe230cf..81d8614e7379 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c | |||
@@ -69,7 +69,8 @@ static int __init init_ras_IRQ(void) | |||
69 | /* Hotplug Events */ | 69 | /* Hotplug Events */ |
70 | np = of_find_node_by_path("/event-sources/hot-plug-events"); | 70 | np = of_find_node_by_path("/event-sources/hot-plug-events"); |
71 | if (np != NULL) { | 71 | if (np != NULL) { |
72 | request_event_sources_irqs(np, ras_hotplug_interrupt, | 72 | if (dlpar_workqueue_init() == 0) |
73 | request_event_sources_irqs(np, ras_hotplug_interrupt, | ||
73 | "RAS_HOTPLUG"); | 74 | "RAS_HOTPLUG"); |
74 | of_node_put(np); | 75 | of_node_put(np); |
75 | } | 76 | } |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index a8531e012658..ae4f596273b5 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -459,6 +459,39 @@ static void __init find_and_init_phbs(void) | |||
459 | of_pci_check_probe_only(); | 459 | of_pci_check_probe_only(); |
460 | } | 460 | } |
461 | 461 | ||
462 | static void pseries_setup_rfi_flush(void) | ||
463 | { | ||
464 | struct h_cpu_char_result result; | ||
465 | enum l1d_flush_type types; | ||
466 | bool enable; | ||
467 | long rc; | ||
468 | |||
469 | /* Enable by default */ | ||
470 | enable = true; | ||
471 | |||
472 | rc = plpar_get_cpu_characteristics(&result); | ||
473 | if (rc == H_SUCCESS) { | ||
474 | types = L1D_FLUSH_NONE; | ||
475 | |||
476 | if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2) | ||
477 | types |= L1D_FLUSH_MTTRIG; | ||
478 | if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30) | ||
479 | types |= L1D_FLUSH_ORI; | ||
480 | |||
481 | /* Use fallback if nothing set in hcall */ | ||
482 | if (types == L1D_FLUSH_NONE) | ||
483 | types = L1D_FLUSH_FALLBACK; | ||
484 | |||
485 | if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) | ||
486 | enable = false; | ||
487 | } else { | ||
488 | /* Default to fallback if case hcall is not available */ | ||
489 | types = L1D_FLUSH_FALLBACK; | ||
490 | } | ||
491 | |||
492 | setup_rfi_flush(types, enable); | ||
493 | } | ||
494 | |||
462 | static void __init pSeries_setup_arch(void) | 495 | static void __init pSeries_setup_arch(void) |
463 | { | 496 | { |
464 | set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); | 497 | set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); |
@@ -476,6 +509,8 @@ static void __init pSeries_setup_arch(void) | |||
476 | 509 | ||
477 | fwnmi_init(); | 510 | fwnmi_init(); |
478 | 511 | ||
512 | pseries_setup_rfi_flush(); | ||
513 | |||
479 | /* By default, only probe PCI (can be overridden by rtas_pci) */ | 514 | /* By default, only probe PCI (can be overridden by rtas_pci) */ |
480 | pci_add_flags(PCI_PROBE_ONLY); | 515 | pci_add_flags(PCI_PROBE_ONLY); |
481 | 516 | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 366b19cb79b7..bc2204f829d3 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -55,7 +55,6 @@ config X86 | |||
55 | select ARCH_HAS_GCOV_PROFILE_ALL | 55 | select ARCH_HAS_GCOV_PROFILE_ALL |
56 | select ARCH_HAS_KCOV if X86_64 | 56 | select ARCH_HAS_KCOV if X86_64 |
57 | select ARCH_HAS_PMEM_API if X86_64 | 57 | select ARCH_HAS_PMEM_API if X86_64 |
58 | # Causing hangs/crashes, see the commit that added this change for details. | ||
59 | select ARCH_HAS_REFCOUNT | 58 | select ARCH_HAS_REFCOUNT |
60 | select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 | 59 | select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 |
61 | select ARCH_HAS_SET_MEMORY | 60 | select ARCH_HAS_SET_MEMORY |
@@ -89,6 +88,7 @@ config X86 | |||
89 | select GENERIC_CLOCKEVENTS_MIN_ADJUST | 88 | select GENERIC_CLOCKEVENTS_MIN_ADJUST |
90 | select GENERIC_CMOS_UPDATE | 89 | select GENERIC_CMOS_UPDATE |
91 | select GENERIC_CPU_AUTOPROBE | 90 | select GENERIC_CPU_AUTOPROBE |
91 | select GENERIC_CPU_VULNERABILITIES | ||
92 | select GENERIC_EARLY_IOREMAP | 92 | select GENERIC_EARLY_IOREMAP |
93 | select GENERIC_FIND_FIRST_BIT | 93 | select GENERIC_FIND_FIRST_BIT |
94 | select GENERIC_IOMAP | 94 | select GENERIC_IOMAP |
@@ -430,6 +430,19 @@ config GOLDFISH | |||
430 | def_bool y | 430 | def_bool y |
431 | depends on X86_GOLDFISH | 431 | depends on X86_GOLDFISH |
432 | 432 | ||
433 | config RETPOLINE | ||
434 | bool "Avoid speculative indirect branches in kernel" | ||
435 | default y | ||
436 | help | ||
437 | Compile kernel with the retpoline compiler options to guard against | ||
438 | kernel-to-user data leaks by avoiding speculative indirect | ||
439 | branches. Requires a compiler with -mindirect-branch=thunk-extern | ||
440 | support for full protection. The kernel may run slower. | ||
441 | |||
442 | Without compiler support, at least indirect branches in assembler | ||
443 | code are eliminated. Since this includes the syscall entry path, | ||
444 | it is not entirely pointless. | ||
445 | |||
433 | config INTEL_RDT | 446 | config INTEL_RDT |
434 | bool "Intel Resource Director Technology support" | 447 | bool "Intel Resource Director Technology support" |
435 | default n | 448 | default n |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 3e73bc255e4e..fad55160dcb9 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -230,6 +230,14 @@ KBUILD_CFLAGS += -Wno-sign-compare | |||
230 | # | 230 | # |
231 | KBUILD_CFLAGS += -fno-asynchronous-unwind-tables | 231 | KBUILD_CFLAGS += -fno-asynchronous-unwind-tables |
232 | 232 | ||
233 | # Avoid indirect branches in kernel to deal with Spectre | ||
234 | ifdef CONFIG_RETPOLINE | ||
235 | RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) | ||
236 | ifneq ($(RETPOLINE_CFLAGS),) | ||
237 | KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE | ||
238 | endif | ||
239 | endif | ||
240 | |||
233 | archscripts: scripts_basic | 241 | archscripts: scripts_basic |
234 | $(Q)$(MAKE) $(build)=arch/x86/tools relocs | 242 | $(Q)$(MAKE) $(build)=arch/x86/tools relocs |
235 | 243 | ||
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 16627fec80b2..3d09e3aca18d 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/linkage.h> | 32 | #include <linux/linkage.h> |
33 | #include <asm/inst.h> | 33 | #include <asm/inst.h> |
34 | #include <asm/frame.h> | 34 | #include <asm/frame.h> |
35 | #include <asm/nospec-branch.h> | ||
35 | 36 | ||
36 | /* | 37 | /* |
37 | * The following macros are used to move an (un)aligned 16 byte value to/from | 38 | * The following macros are used to move an (un)aligned 16 byte value to/from |
@@ -2884,7 +2885,7 @@ ENTRY(aesni_xts_crypt8) | |||
2884 | pxor INC, STATE4 | 2885 | pxor INC, STATE4 |
2885 | movdqu IV, 0x30(OUTP) | 2886 | movdqu IV, 0x30(OUTP) |
2886 | 2887 | ||
2887 | call *%r11 | 2888 | CALL_NOSPEC %r11 |
2888 | 2889 | ||
2889 | movdqu 0x00(OUTP), INC | 2890 | movdqu 0x00(OUTP), INC |
2890 | pxor INC, STATE1 | 2891 | pxor INC, STATE1 |
@@ -2929,7 +2930,7 @@ ENTRY(aesni_xts_crypt8) | |||
2929 | _aesni_gf128mul_x_ble() | 2930 | _aesni_gf128mul_x_ble() |
2930 | movups IV, (IVP) | 2931 | movups IV, (IVP) |
2931 | 2932 | ||
2932 | call *%r11 | 2933 | CALL_NOSPEC %r11 |
2933 | 2934 | ||
2934 | movdqu 0x40(OUTP), INC | 2935 | movdqu 0x40(OUTP), INC |
2935 | pxor INC, STATE1 | 2936 | pxor INC, STATE1 |
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S index f7c495e2863c..a14af6eb09cb 100644 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include <linux/linkage.h> | 18 | #include <linux/linkage.h> |
19 | #include <asm/frame.h> | 19 | #include <asm/frame.h> |
20 | #include <asm/nospec-branch.h> | ||
20 | 21 | ||
21 | #define CAMELLIA_TABLE_BYTE_LEN 272 | 22 | #define CAMELLIA_TABLE_BYTE_LEN 272 |
22 | 23 | ||
@@ -1227,7 +1228,7 @@ camellia_xts_crypt_16way: | |||
1227 | vpxor 14 * 16(%rax), %xmm15, %xmm14; | 1228 | vpxor 14 * 16(%rax), %xmm15, %xmm14; |
1228 | vpxor 15 * 16(%rax), %xmm15, %xmm15; | 1229 | vpxor 15 * 16(%rax), %xmm15, %xmm15; |
1229 | 1230 | ||
1230 | call *%r9; | 1231 | CALL_NOSPEC %r9; |
1231 | 1232 | ||
1232 | addq $(16 * 16), %rsp; | 1233 | addq $(16 * 16), %rsp; |
1233 | 1234 | ||
diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S index eee5b3982cfd..b66bbfa62f50 100644 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
14 | #include <asm/frame.h> | 14 | #include <asm/frame.h> |
15 | #include <asm/nospec-branch.h> | ||
15 | 16 | ||
16 | #define CAMELLIA_TABLE_BYTE_LEN 272 | 17 | #define CAMELLIA_TABLE_BYTE_LEN 272 |
17 | 18 | ||
@@ -1343,7 +1344,7 @@ camellia_xts_crypt_32way: | |||
1343 | vpxor 14 * 32(%rax), %ymm15, %ymm14; | 1344 | vpxor 14 * 32(%rax), %ymm15, %ymm14; |
1344 | vpxor 15 * 32(%rax), %ymm15, %ymm15; | 1345 | vpxor 15 * 32(%rax), %ymm15, %ymm15; |
1345 | 1346 | ||
1346 | call *%r9; | 1347 | CALL_NOSPEC %r9; |
1347 | 1348 | ||
1348 | addq $(16 * 32), %rsp; | 1349 | addq $(16 * 32), %rsp; |
1349 | 1350 | ||
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index 7a7de27c6f41..d9b734d0c8cc 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S | |||
@@ -45,6 +45,7 @@ | |||
45 | 45 | ||
46 | #include <asm/inst.h> | 46 | #include <asm/inst.h> |
47 | #include <linux/linkage.h> | 47 | #include <linux/linkage.h> |
48 | #include <asm/nospec-branch.h> | ||
48 | 49 | ||
49 | ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction | 50 | ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction |
50 | 51 | ||
@@ -172,7 +173,7 @@ continue_block: | |||
172 | movzxw (bufp, %rax, 2), len | 173 | movzxw (bufp, %rax, 2), len |
173 | lea crc_array(%rip), bufp | 174 | lea crc_array(%rip), bufp |
174 | lea (bufp, len, 1), bufp | 175 | lea (bufp, len, 1), bufp |
175 | jmp *bufp | 176 | JMP_NOSPEC bufp |
176 | 177 | ||
177 | ################################################################ | 178 | ################################################################ |
178 | ## 2a) PROCESS FULL BLOCKS: | 179 | ## 2a) PROCESS FULL BLOCKS: |
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 45a63e00a6af..3f48f695d5e6 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h | |||
@@ -198,8 +198,11 @@ For 32-bit we have the following conventions - kernel is built with | |||
198 | * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two | 198 | * PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two |
199 | * halves: | 199 | * halves: |
200 | */ | 200 | */ |
201 | #define PTI_SWITCH_PGTABLES_MASK (1<<PAGE_SHIFT) | 201 | #define PTI_USER_PGTABLE_BIT PAGE_SHIFT |
202 | #define PTI_SWITCH_MASK (PTI_SWITCH_PGTABLES_MASK|(1<<X86_CR3_PTI_SWITCH_BIT)) | 202 | #define PTI_USER_PGTABLE_MASK (1 << PTI_USER_PGTABLE_BIT) |
203 | #define PTI_USER_PCID_BIT X86_CR3_PTI_PCID_USER_BIT | ||
204 | #define PTI_USER_PCID_MASK (1 << PTI_USER_PCID_BIT) | ||
205 | #define PTI_USER_PGTABLE_AND_PCID_MASK (PTI_USER_PCID_MASK | PTI_USER_PGTABLE_MASK) | ||
203 | 206 | ||
204 | .macro SET_NOFLUSH_BIT reg:req | 207 | .macro SET_NOFLUSH_BIT reg:req |
205 | bts $X86_CR3_PCID_NOFLUSH_BIT, \reg | 208 | bts $X86_CR3_PCID_NOFLUSH_BIT, \reg |
@@ -208,7 +211,7 @@ For 32-bit we have the following conventions - kernel is built with | |||
208 | .macro ADJUST_KERNEL_CR3 reg:req | 211 | .macro ADJUST_KERNEL_CR3 reg:req |
209 | ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID | 212 | ALTERNATIVE "", "SET_NOFLUSH_BIT \reg", X86_FEATURE_PCID |
210 | /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */ | 213 | /* Clear PCID and "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */ |
211 | andq $(~PTI_SWITCH_MASK), \reg | 214 | andq $(~PTI_USER_PGTABLE_AND_PCID_MASK), \reg |
212 | .endm | 215 | .endm |
213 | 216 | ||
214 | .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req | 217 | .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req |
@@ -239,15 +242,19 @@ For 32-bit we have the following conventions - kernel is built with | |||
239 | /* Flush needed, clear the bit */ | 242 | /* Flush needed, clear the bit */ |
240 | btr \scratch_reg, THIS_CPU_user_pcid_flush_mask | 243 | btr \scratch_reg, THIS_CPU_user_pcid_flush_mask |
241 | movq \scratch_reg2, \scratch_reg | 244 | movq \scratch_reg2, \scratch_reg |
242 | jmp .Lwrcr3_\@ | 245 | jmp .Lwrcr3_pcid_\@ |
243 | 246 | ||
244 | .Lnoflush_\@: | 247 | .Lnoflush_\@: |
245 | movq \scratch_reg2, \scratch_reg | 248 | movq \scratch_reg2, \scratch_reg |
246 | SET_NOFLUSH_BIT \scratch_reg | 249 | SET_NOFLUSH_BIT \scratch_reg |
247 | 250 | ||
251 | .Lwrcr3_pcid_\@: | ||
252 | /* Flip the ASID to the user version */ | ||
253 | orq $(PTI_USER_PCID_MASK), \scratch_reg | ||
254 | |||
248 | .Lwrcr3_\@: | 255 | .Lwrcr3_\@: |
249 | /* Flip the PGD and ASID to the user version */ | 256 | /* Flip the PGD to the user version */ |
250 | orq $(PTI_SWITCH_MASK), \scratch_reg | 257 | orq $(PTI_USER_PGTABLE_MASK), \scratch_reg |
251 | mov \scratch_reg, %cr3 | 258 | mov \scratch_reg, %cr3 |
252 | .Lend_\@: | 259 | .Lend_\@: |
253 | .endm | 260 | .endm |
@@ -263,17 +270,12 @@ For 32-bit we have the following conventions - kernel is built with | |||
263 | movq %cr3, \scratch_reg | 270 | movq %cr3, \scratch_reg |
264 | movq \scratch_reg, \save_reg | 271 | movq \scratch_reg, \save_reg |
265 | /* | 272 | /* |
266 | * Is the "switch mask" all zero? That means that both of | 273 | * Test the user pagetable bit. If set, then the user page tables |
267 | * these are zero: | 274 | * are active. If clear CR3 already has the kernel page table |
268 | * | 275 | * active. |
269 | * 1. The user/kernel PCID bit, and | ||
270 | * 2. The user/kernel "bit" that points CR3 to the | ||
271 | * bottom half of the 8k PGD | ||
272 | * | ||
273 | * That indicates a kernel CR3 value, not a user CR3. | ||
274 | */ | 276 | */ |
275 | testq $(PTI_SWITCH_MASK), \scratch_reg | 277 | bt $PTI_USER_PGTABLE_BIT, \scratch_reg |
276 | jz .Ldone_\@ | 278 | jnc .Ldone_\@ |
277 | 279 | ||
278 | ADJUST_KERNEL_CR3 \scratch_reg | 280 | ADJUST_KERNEL_CR3 \scratch_reg |
279 | movq \scratch_reg, %cr3 | 281 | movq \scratch_reg, %cr3 |
@@ -290,7 +292,7 @@ For 32-bit we have the following conventions - kernel is built with | |||
290 | * KERNEL pages can always resume with NOFLUSH as we do | 292 | * KERNEL pages can always resume with NOFLUSH as we do |
291 | * explicit flushes. | 293 | * explicit flushes. |
292 | */ | 294 | */ |
293 | bt $X86_CR3_PTI_SWITCH_BIT, \save_reg | 295 | bt $PTI_USER_PGTABLE_BIT, \save_reg |
294 | jnc .Lnoflush_\@ | 296 | jnc .Lnoflush_\@ |
295 | 297 | ||
296 | /* | 298 | /* |
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index ace8f321a5a1..a1f28a54f23a 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <asm/asm.h> | 44 | #include <asm/asm.h> |
45 | #include <asm/smap.h> | 45 | #include <asm/smap.h> |
46 | #include <asm/frame.h> | 46 | #include <asm/frame.h> |
47 | #include <asm/nospec-branch.h> | ||
47 | 48 | ||
48 | .section .entry.text, "ax" | 49 | .section .entry.text, "ax" |
49 | 50 | ||
@@ -290,7 +291,7 @@ ENTRY(ret_from_fork) | |||
290 | 291 | ||
291 | /* kernel thread */ | 292 | /* kernel thread */ |
292 | 1: movl %edi, %eax | 293 | 1: movl %edi, %eax |
293 | call *%ebx | 294 | CALL_NOSPEC %ebx |
294 | /* | 295 | /* |
295 | * A kernel thread is allowed to return here after successfully | 296 | * A kernel thread is allowed to return here after successfully |
296 | * calling do_execve(). Exit to userspace to complete the execve() | 297 | * calling do_execve(). Exit to userspace to complete the execve() |
@@ -919,7 +920,7 @@ common_exception: | |||
919 | movl %ecx, %es | 920 | movl %ecx, %es |
920 | TRACE_IRQS_OFF | 921 | TRACE_IRQS_OFF |
921 | movl %esp, %eax # pt_regs pointer | 922 | movl %esp, %eax # pt_regs pointer |
922 | call *%edi | 923 | CALL_NOSPEC %edi |
923 | jmp ret_from_exception | 924 | jmp ret_from_exception |
924 | END(common_exception) | 925 | END(common_exception) |
925 | 926 | ||
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index f048e384ff54..4f8e1d35a97c 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <asm/pgtable_types.h> | 37 | #include <asm/pgtable_types.h> |
38 | #include <asm/export.h> | 38 | #include <asm/export.h> |
39 | #include <asm/frame.h> | 39 | #include <asm/frame.h> |
40 | #include <asm/nospec-branch.h> | ||
40 | #include <linux/err.h> | 41 | #include <linux/err.h> |
41 | 42 | ||
42 | #include "calling.h" | 43 | #include "calling.h" |
@@ -191,7 +192,7 @@ ENTRY(entry_SYSCALL_64_trampoline) | |||
191 | */ | 192 | */ |
192 | pushq %rdi | 193 | pushq %rdi |
193 | movq $entry_SYSCALL_64_stage2, %rdi | 194 | movq $entry_SYSCALL_64_stage2, %rdi |
194 | jmp *%rdi | 195 | JMP_NOSPEC %rdi |
195 | END(entry_SYSCALL_64_trampoline) | 196 | END(entry_SYSCALL_64_trampoline) |
196 | 197 | ||
197 | .popsection | 198 | .popsection |
@@ -270,7 +271,12 @@ entry_SYSCALL_64_fastpath: | |||
270 | * It might end up jumping to the slow path. If it jumps, RAX | 271 | * It might end up jumping to the slow path. If it jumps, RAX |
271 | * and all argument registers are clobbered. | 272 | * and all argument registers are clobbered. |
272 | */ | 273 | */ |
274 | #ifdef CONFIG_RETPOLINE | ||
275 | movq sys_call_table(, %rax, 8), %rax | ||
276 | call __x86_indirect_thunk_rax | ||
277 | #else | ||
273 | call *sys_call_table(, %rax, 8) | 278 | call *sys_call_table(, %rax, 8) |
279 | #endif | ||
274 | .Lentry_SYSCALL_64_after_fastpath_call: | 280 | .Lentry_SYSCALL_64_after_fastpath_call: |
275 | 281 | ||
276 | movq %rax, RAX(%rsp) | 282 | movq %rax, RAX(%rsp) |
@@ -442,7 +448,7 @@ ENTRY(stub_ptregs_64) | |||
442 | jmp entry_SYSCALL64_slow_path | 448 | jmp entry_SYSCALL64_slow_path |
443 | 449 | ||
444 | 1: | 450 | 1: |
445 | jmp *%rax /* Called from C */ | 451 | JMP_NOSPEC %rax /* Called from C */ |
446 | END(stub_ptregs_64) | 452 | END(stub_ptregs_64) |
447 | 453 | ||
448 | .macro ptregs_stub func | 454 | .macro ptregs_stub func |
@@ -521,7 +527,7 @@ ENTRY(ret_from_fork) | |||
521 | 1: | 527 | 1: |
522 | /* kernel thread */ | 528 | /* kernel thread */ |
523 | movq %r12, %rdi | 529 | movq %r12, %rdi |
524 | call *%rbx | 530 | CALL_NOSPEC %rbx |
525 | /* | 531 | /* |
526 | * A kernel thread is allowed to return here after successfully | 532 | * A kernel thread is allowed to return here after successfully |
527 | * calling do_execve(). Exit to userspace to complete the execve() | 533 | * calling do_execve(). Exit to userspace to complete the execve() |
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 141e07b06216..24ffa1e88cf9 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c | |||
@@ -582,6 +582,24 @@ static __init int bts_init(void) | |||
582 | if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) | 582 | if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) |
583 | return -ENODEV; | 583 | return -ENODEV; |
584 | 584 | ||
585 | if (boot_cpu_has(X86_FEATURE_PTI)) { | ||
586 | /* | ||
587 | * BTS hardware writes through a virtual memory map we must | ||
588 | * either use the kernel physical map, or the user mapping of | ||
589 | * the AUX buffer. | ||
590 | * | ||
591 | * However, since this driver supports per-CPU and per-task inherit | ||
592 | * we cannot use the user mapping since it will not be availble | ||
593 | * if we're not running the owning process. | ||
594 | * | ||
595 | * With PTI we can't use the kernal map either, because its not | ||
596 | * there when we run userspace. | ||
597 | * | ||
598 | * For now, disable this driver when using PTI. | ||
599 | */ | ||
600 | return -ENODEV; | ||
601 | } | ||
602 | |||
585 | bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE | | 603 | bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE | |
586 | PERF_PMU_CAP_EXCLUSIVE; | 604 | PERF_PMU_CAP_EXCLUSIVE; |
587 | bts_pmu.task_ctx_nr = perf_sw_context; | 605 | bts_pmu.task_ctx_nr = perf_sw_context; |
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h index ff700d81e91e..0927cdc4f946 100644 --- a/arch/x86/include/asm/asm-prototypes.h +++ b/arch/x86/include/asm/asm-prototypes.h | |||
@@ -11,7 +11,32 @@ | |||
11 | #include <asm/pgtable.h> | 11 | #include <asm/pgtable.h> |
12 | #include <asm/special_insns.h> | 12 | #include <asm/special_insns.h> |
13 | #include <asm/preempt.h> | 13 | #include <asm/preempt.h> |
14 | #include <asm/asm.h> | ||
14 | 15 | ||
15 | #ifndef CONFIG_X86_CMPXCHG64 | 16 | #ifndef CONFIG_X86_CMPXCHG64 |
16 | extern void cmpxchg8b_emu(void); | 17 | extern void cmpxchg8b_emu(void); |
17 | #endif | 18 | #endif |
19 | |||
20 | #ifdef CONFIG_RETPOLINE | ||
21 | #ifdef CONFIG_X86_32 | ||
22 | #define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_e ## reg(void); | ||
23 | #else | ||
24 | #define INDIRECT_THUNK(reg) extern asmlinkage void __x86_indirect_thunk_r ## reg(void); | ||
25 | INDIRECT_THUNK(8) | ||
26 | INDIRECT_THUNK(9) | ||
27 | INDIRECT_THUNK(10) | ||
28 | INDIRECT_THUNK(11) | ||
29 | INDIRECT_THUNK(12) | ||
30 | INDIRECT_THUNK(13) | ||
31 | INDIRECT_THUNK(14) | ||
32 | INDIRECT_THUNK(15) | ||
33 | #endif | ||
34 | INDIRECT_THUNK(ax) | ||
35 | INDIRECT_THUNK(bx) | ||
36 | INDIRECT_THUNK(cx) | ||
37 | INDIRECT_THUNK(dx) | ||
38 | INDIRECT_THUNK(si) | ||
39 | INDIRECT_THUNK(di) | ||
40 | INDIRECT_THUNK(bp) | ||
41 | INDIRECT_THUNK(sp) | ||
42 | #endif /* CONFIG_RETPOLINE */ | ||
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 21ac898df2d8..f275447862f4 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h | |||
@@ -203,6 +203,8 @@ | |||
203 | #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ | 203 | #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ |
204 | #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ | 204 | #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ |
205 | #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ | 205 | #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ |
206 | #define X86_FEATURE_RETPOLINE ( 7*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */ | ||
207 | #define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* AMD Retpoline mitigation for Spectre variant 2 */ | ||
206 | #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ | 208 | #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ |
207 | #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ | 209 | #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */ |
208 | #define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */ | 210 | #define X86_FEATURE_AVX512_4VNNIW ( 7*32+16) /* AVX-512 Neural Network Instructions */ |
@@ -342,5 +344,7 @@ | |||
342 | #define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ | 344 | #define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */ |
343 | #define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ | 345 | #define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */ |
344 | #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ | 346 | #define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */ |
347 | #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */ | ||
348 | #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ | ||
345 | 349 | ||
346 | #endif /* _ASM_X86_CPUFEATURES_H */ | 350 | #endif /* _ASM_X86_CPUFEATURES_H */ |
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 5400add2885b..8bf450b13d9f 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/nmi.h> | 7 | #include <linux/nmi.h> |
8 | #include <asm/io.h> | 8 | #include <asm/io.h> |
9 | #include <asm/hyperv.h> | 9 | #include <asm/hyperv.h> |
10 | #include <asm/nospec-branch.h> | ||
10 | 11 | ||
11 | /* | 12 | /* |
12 | * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent | 13 | * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent |
@@ -186,10 +187,11 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) | |||
186 | return U64_MAX; | 187 | return U64_MAX; |
187 | 188 | ||
188 | __asm__ __volatile__("mov %4, %%r8\n" | 189 | __asm__ __volatile__("mov %4, %%r8\n" |
189 | "call *%5" | 190 | CALL_NOSPEC |
190 | : "=a" (hv_status), ASM_CALL_CONSTRAINT, | 191 | : "=a" (hv_status), ASM_CALL_CONSTRAINT, |
191 | "+c" (control), "+d" (input_address) | 192 | "+c" (control), "+d" (input_address) |
192 | : "r" (output_address), "m" (hv_hypercall_pg) | 193 | : "r" (output_address), |
194 | THUNK_TARGET(hv_hypercall_pg) | ||
193 | : "cc", "memory", "r8", "r9", "r10", "r11"); | 195 | : "cc", "memory", "r8", "r9", "r10", "r11"); |
194 | #else | 196 | #else |
195 | u32 input_address_hi = upper_32_bits(input_address); | 197 | u32 input_address_hi = upper_32_bits(input_address); |
@@ -200,13 +202,13 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) | |||
200 | if (!hv_hypercall_pg) | 202 | if (!hv_hypercall_pg) |
201 | return U64_MAX; | 203 | return U64_MAX; |
202 | 204 | ||
203 | __asm__ __volatile__("call *%7" | 205 | __asm__ __volatile__(CALL_NOSPEC |
204 | : "=A" (hv_status), | 206 | : "=A" (hv_status), |
205 | "+c" (input_address_lo), ASM_CALL_CONSTRAINT | 207 | "+c" (input_address_lo), ASM_CALL_CONSTRAINT |
206 | : "A" (control), | 208 | : "A" (control), |
207 | "b" (input_address_hi), | 209 | "b" (input_address_hi), |
208 | "D"(output_address_hi), "S"(output_address_lo), | 210 | "D"(output_address_hi), "S"(output_address_lo), |
209 | "m" (hv_hypercall_pg) | 211 | THUNK_TARGET(hv_hypercall_pg) |
210 | : "cc", "memory"); | 212 | : "cc", "memory"); |
211 | #endif /* !x86_64 */ | 213 | #endif /* !x86_64 */ |
212 | return hv_status; | 214 | return hv_status; |
@@ -227,10 +229,10 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) | |||
227 | 229 | ||
228 | #ifdef CONFIG_X86_64 | 230 | #ifdef CONFIG_X86_64 |
229 | { | 231 | { |
230 | __asm__ __volatile__("call *%4" | 232 | __asm__ __volatile__(CALL_NOSPEC |
231 | : "=a" (hv_status), ASM_CALL_CONSTRAINT, | 233 | : "=a" (hv_status), ASM_CALL_CONSTRAINT, |
232 | "+c" (control), "+d" (input1) | 234 | "+c" (control), "+d" (input1) |
233 | : "m" (hv_hypercall_pg) | 235 | : THUNK_TARGET(hv_hypercall_pg) |
234 | : "cc", "r8", "r9", "r10", "r11"); | 236 | : "cc", "r8", "r9", "r10", "r11"); |
235 | } | 237 | } |
236 | #else | 238 | #else |
@@ -238,13 +240,13 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) | |||
238 | u32 input1_hi = upper_32_bits(input1); | 240 | u32 input1_hi = upper_32_bits(input1); |
239 | u32 input1_lo = lower_32_bits(input1); | 241 | u32 input1_lo = lower_32_bits(input1); |
240 | 242 | ||
241 | __asm__ __volatile__ ("call *%5" | 243 | __asm__ __volatile__ (CALL_NOSPEC |
242 | : "=A"(hv_status), | 244 | : "=A"(hv_status), |
243 | "+c"(input1_lo), | 245 | "+c"(input1_lo), |
244 | ASM_CALL_CONSTRAINT | 246 | ASM_CALL_CONSTRAINT |
245 | : "A" (control), | 247 | : "A" (control), |
246 | "b" (input1_hi), | 248 | "b" (input1_hi), |
247 | "m" (hv_hypercall_pg) | 249 | THUNK_TARGET(hv_hypercall_pg) |
248 | : "cc", "edi", "esi"); | 250 | : "cc", "edi", "esi"); |
249 | } | 251 | } |
250 | #endif | 252 | #endif |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 34c4922bbc3f..e7b983a35506 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -355,6 +355,9 @@ | |||
355 | #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL | 355 | #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL |
356 | #define FAM10H_MMIO_CONF_BASE_SHIFT 20 | 356 | #define FAM10H_MMIO_CONF_BASE_SHIFT 20 |
357 | #define MSR_FAM10H_NODE_ID 0xc001100c | 357 | #define MSR_FAM10H_NODE_ID 0xc001100c |
358 | #define MSR_F10H_DECFG 0xc0011029 | ||
359 | #define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1 | ||
360 | #define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT) | ||
358 | 361 | ||
359 | /* K8 MSRs */ | 362 | /* K8 MSRs */ |
360 | #define MSR_K8_TOP_MEM1 0xc001001a | 363 | #define MSR_K8_TOP_MEM1 0xc001001a |
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h new file mode 100644 index 000000000000..402a11c803c3 --- /dev/null +++ b/arch/x86/include/asm/nospec-branch.h | |||
@@ -0,0 +1,214 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | |||
3 | #ifndef __NOSPEC_BRANCH_H__ | ||
4 | #define __NOSPEC_BRANCH_H__ | ||
5 | |||
6 | #include <asm/alternative.h> | ||
7 | #include <asm/alternative-asm.h> | ||
8 | #include <asm/cpufeatures.h> | ||
9 | |||
10 | /* | ||
11 | * Fill the CPU return stack buffer. | ||
12 | * | ||
13 | * Each entry in the RSB, if used for a speculative 'ret', contains an | ||
14 | * infinite 'pause; jmp' loop to capture speculative execution. | ||
15 | * | ||
16 | * This is required in various cases for retpoline and IBRS-based | ||
17 | * mitigations for the Spectre variant 2 vulnerability. Sometimes to | ||
18 | * eliminate potentially bogus entries from the RSB, and sometimes | ||
19 | * purely to ensure that it doesn't get empty, which on some CPUs would | ||
20 | * allow predictions from other (unwanted!) sources to be used. | ||
21 | * | ||
22 | * We define a CPP macro such that it can be used from both .S files and | ||
23 | * inline assembly. It's possible to do a .macro and then include that | ||
24 | * from C via asm(".include <asm/nospec-branch.h>") but let's not go there. | ||
25 | */ | ||
26 | |||
27 | #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ | ||
28 | #define RSB_FILL_LOOPS 16 /* To avoid underflow */ | ||
29 | |||
30 | /* | ||
31 | * Google experimented with loop-unrolling and this turned out to be | ||
32 | * the optimal version — two calls, each with their own speculation | ||
33 | * trap should their return address end up getting used, in a loop. | ||
34 | */ | ||
35 | #define __FILL_RETURN_BUFFER(reg, nr, sp) \ | ||
36 | mov $(nr/2), reg; \ | ||
37 | 771: \ | ||
38 | call 772f; \ | ||
39 | 773: /* speculation trap */ \ | ||
40 | pause; \ | ||
41 | jmp 773b; \ | ||
42 | 772: \ | ||
43 | call 774f; \ | ||
44 | 775: /* speculation trap */ \ | ||
45 | pause; \ | ||
46 | jmp 775b; \ | ||
47 | 774: \ | ||
48 | dec reg; \ | ||
49 | jnz 771b; \ | ||
50 | add $(BITS_PER_LONG/8) * nr, sp; | ||
51 | |||
52 | #ifdef __ASSEMBLY__ | ||
53 | |||
54 | /* | ||
55 | * This should be used immediately before a retpoline alternative. It tells | ||
56 | * objtool where the retpolines are so that it can make sense of the control | ||
57 | * flow by just reading the original instruction(s) and ignoring the | ||
58 | * alternatives. | ||
59 | */ | ||
60 | .macro ANNOTATE_NOSPEC_ALTERNATIVE | ||
61 | .Lannotate_\@: | ||
62 | .pushsection .discard.nospec | ||
63 | .long .Lannotate_\@ - . | ||
64 | .popsection | ||
65 | .endm | ||
66 | |||
67 | /* | ||
68 | * These are the bare retpoline primitives for indirect jmp and call. | ||
69 | * Do not use these directly; they only exist to make the ALTERNATIVE | ||
70 | * invocation below less ugly. | ||
71 | */ | ||
72 | .macro RETPOLINE_JMP reg:req | ||
73 | call .Ldo_rop_\@ | ||
74 | .Lspec_trap_\@: | ||
75 | pause | ||
76 | jmp .Lspec_trap_\@ | ||
77 | .Ldo_rop_\@: | ||
78 | mov \reg, (%_ASM_SP) | ||
79 | ret | ||
80 | .endm | ||
81 | |||
82 | /* | ||
83 | * This is a wrapper around RETPOLINE_JMP so the called function in reg | ||
84 | * returns to the instruction after the macro. | ||
85 | */ | ||
86 | .macro RETPOLINE_CALL reg:req | ||
87 | jmp .Ldo_call_\@ | ||
88 | .Ldo_retpoline_jmp_\@: | ||
89 | RETPOLINE_JMP \reg | ||
90 | .Ldo_call_\@: | ||
91 | call .Ldo_retpoline_jmp_\@ | ||
92 | .endm | ||
93 | |||
94 | /* | ||
95 | * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple | ||
96 | * indirect jmp/call which may be susceptible to the Spectre variant 2 | ||
97 | * attack. | ||
98 | */ | ||
99 | .macro JMP_NOSPEC reg:req | ||
100 | #ifdef CONFIG_RETPOLINE | ||
101 | ANNOTATE_NOSPEC_ALTERNATIVE | ||
102 | ALTERNATIVE_2 __stringify(jmp *\reg), \ | ||
103 | __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \ | ||
104 | __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD | ||
105 | #else | ||
106 | jmp *\reg | ||
107 | #endif | ||
108 | .endm | ||
109 | |||
110 | .macro CALL_NOSPEC reg:req | ||
111 | #ifdef CONFIG_RETPOLINE | ||
112 | ANNOTATE_NOSPEC_ALTERNATIVE | ||
113 | ALTERNATIVE_2 __stringify(call *\reg), \ | ||
114 | __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\ | ||
115 | __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD | ||
116 | #else | ||
117 | call *\reg | ||
118 | #endif | ||
119 | .endm | ||
120 | |||
121 | /* | ||
122 | * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP | ||
123 | * monstrosity above, manually. | ||
124 | */ | ||
125 | .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req | ||
126 | #ifdef CONFIG_RETPOLINE | ||
127 | ANNOTATE_NOSPEC_ALTERNATIVE | ||
128 | ALTERNATIVE "jmp .Lskip_rsb_\@", \ | ||
129 | __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \ | ||
130 | \ftr | ||
131 | .Lskip_rsb_\@: | ||
132 | #endif | ||
133 | .endm | ||
134 | |||
135 | #else /* __ASSEMBLY__ */ | ||
136 | |||
137 | #define ANNOTATE_NOSPEC_ALTERNATIVE \ | ||
138 | "999:\n\t" \ | ||
139 | ".pushsection .discard.nospec\n\t" \ | ||
140 | ".long 999b - .\n\t" \ | ||
141 | ".popsection\n\t" | ||
142 | |||
143 | #if defined(CONFIG_X86_64) && defined(RETPOLINE) | ||
144 | |||
145 | /* | ||
146 | * Since the inline asm uses the %V modifier which is only in newer GCC, | ||
147 | * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE. | ||
148 | */ | ||
149 | # define CALL_NOSPEC \ | ||
150 | ANNOTATE_NOSPEC_ALTERNATIVE \ | ||
151 | ALTERNATIVE( \ | ||
152 | "call *%[thunk_target]\n", \ | ||
153 | "call __x86_indirect_thunk_%V[thunk_target]\n", \ | ||
154 | X86_FEATURE_RETPOLINE) | ||
155 | # define THUNK_TARGET(addr) [thunk_target] "r" (addr) | ||
156 | |||
157 | #elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE) | ||
158 | /* | ||
159 | * For i386 we use the original ret-equivalent retpoline, because | ||
160 | * otherwise we'll run out of registers. We don't care about CET | ||
161 | * here, anyway. | ||
162 | */ | ||
163 | # define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \ | ||
164 | " jmp 904f;\n" \ | ||
165 | " .align 16\n" \ | ||
166 | "901: call 903f;\n" \ | ||
167 | "902: pause;\n" \ | ||
168 | " jmp 902b;\n" \ | ||
169 | " .align 16\n" \ | ||
170 | "903: addl $4, %%esp;\n" \ | ||
171 | " pushl %[thunk_target];\n" \ | ||
172 | " ret;\n" \ | ||
173 | " .align 16\n" \ | ||
174 | "904: call 901b;\n", \ | ||
175 | X86_FEATURE_RETPOLINE) | ||
176 | |||
177 | # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) | ||
178 | #else /* No retpoline for C / inline asm */ | ||
179 | # define CALL_NOSPEC "call *%[thunk_target]\n" | ||
180 | # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) | ||
181 | #endif | ||
182 | |||
183 | /* The Spectre V2 mitigation variants */ | ||
184 | enum spectre_v2_mitigation { | ||
185 | SPECTRE_V2_NONE, | ||
186 | SPECTRE_V2_RETPOLINE_MINIMAL, | ||
187 | SPECTRE_V2_RETPOLINE_MINIMAL_AMD, | ||
188 | SPECTRE_V2_RETPOLINE_GENERIC, | ||
189 | SPECTRE_V2_RETPOLINE_AMD, | ||
190 | SPECTRE_V2_IBRS, | ||
191 | }; | ||
192 | |||
193 | /* | ||
194 | * On VMEXIT we must ensure that no RSB predictions learned in the guest | ||
195 | * can be followed in the host, by overwriting the RSB completely. Both | ||
196 | * retpoline and IBRS mitigations for Spectre v2 need this; only on future | ||
197 | * CPUs with IBRS_ATT *might* it be avoided. | ||
198 | */ | ||
199 | static inline void vmexit_fill_RSB(void) | ||
200 | { | ||
201 | #ifdef CONFIG_RETPOLINE | ||
202 | unsigned long loops = RSB_CLEAR_LOOPS / 2; | ||
203 | |||
204 | asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE | ||
205 | ALTERNATIVE("jmp 910f", | ||
206 | __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)), | ||
207 | X86_FEATURE_RETPOLINE) | ||
208 | "910:" | ||
209 | : "=&r" (loops), ASM_CALL_CONSTRAINT | ||
210 | : "r" (loops) : "memory" ); | ||
211 | #endif | ||
212 | } | ||
213 | #endif /* __ASSEMBLY__ */ | ||
214 | #endif /* __NOSPEC_BRANCH_H__ */ | ||
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index 7a5d6695abd3..eb66fa9cd0fc 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h | |||
@@ -38,6 +38,7 @@ do { \ | |||
38 | #define PCI_NOASSIGN_ROMS 0x80000 | 38 | #define PCI_NOASSIGN_ROMS 0x80000 |
39 | #define PCI_ROOT_NO_CRS 0x100000 | 39 | #define PCI_ROOT_NO_CRS 0x100000 |
40 | #define PCI_NOASSIGN_BARS 0x200000 | 40 | #define PCI_NOASSIGN_BARS 0x200000 |
41 | #define PCI_BIG_ROOT_WINDOW 0x400000 | ||
41 | 42 | ||
42 | extern unsigned int pci_probe; | 43 | extern unsigned int pci_probe; |
43 | extern unsigned long pirq_table_addr; | 44 | extern unsigned long pirq_table_addr; |
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h index 6a60fea90b9d..625a52a5594f 100644 --- a/arch/x86/include/asm/processor-flags.h +++ b/arch/x86/include/asm/processor-flags.h | |||
@@ -40,7 +40,7 @@ | |||
40 | #define CR3_NOFLUSH BIT_ULL(63) | 40 | #define CR3_NOFLUSH BIT_ULL(63) |
41 | 41 | ||
42 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | 42 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
43 | # define X86_CR3_PTI_SWITCH_BIT 11 | 43 | # define X86_CR3_PTI_PCID_USER_BIT 11 |
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | #else | 46 | #else |
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 4a08dd2ab32a..d33e4a26dc7e 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
@@ -81,13 +81,13 @@ static inline u16 kern_pcid(u16 asid) | |||
81 | * Make sure that the dynamic ASID space does not confict with the | 81 | * Make sure that the dynamic ASID space does not confict with the |
82 | * bit we are using to switch between user and kernel ASIDs. | 82 | * bit we are using to switch between user and kernel ASIDs. |
83 | */ | 83 | */ |
84 | BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_SWITCH_BIT)); | 84 | BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT)); |
85 | 85 | ||
86 | /* | 86 | /* |
87 | * The ASID being passed in here should have respected the | 87 | * The ASID being passed in here should have respected the |
88 | * MAX_ASID_AVAILABLE and thus never have the switch bit set. | 88 | * MAX_ASID_AVAILABLE and thus never have the switch bit set. |
89 | */ | 89 | */ |
90 | VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_SWITCH_BIT)); | 90 | VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_PCID_USER_BIT)); |
91 | #endif | 91 | #endif |
92 | /* | 92 | /* |
93 | * The dynamically-assigned ASIDs that get passed in are small | 93 | * The dynamically-assigned ASIDs that get passed in are small |
@@ -112,7 +112,7 @@ static inline u16 user_pcid(u16 asid) | |||
112 | { | 112 | { |
113 | u16 ret = kern_pcid(asid); | 113 | u16 ret = kern_pcid(asid); |
114 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | 114 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
115 | ret |= 1 << X86_CR3_PTI_SWITCH_BIT; | 115 | ret |= 1 << X86_CR3_PTI_PCID_USER_BIT; |
116 | #endif | 116 | #endif |
117 | return ret; | 117 | return ret; |
118 | } | 118 | } |
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 7cb282e9e587..bfd882617613 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <asm/page.h> | 44 | #include <asm/page.h> |
45 | #include <asm/pgtable.h> | 45 | #include <asm/pgtable.h> |
46 | #include <asm/smap.h> | 46 | #include <asm/smap.h> |
47 | #include <asm/nospec-branch.h> | ||
47 | 48 | ||
48 | #include <xen/interface/xen.h> | 49 | #include <xen/interface/xen.h> |
49 | #include <xen/interface/sched.h> | 50 | #include <xen/interface/sched.h> |
@@ -217,9 +218,9 @@ privcmd_call(unsigned call, | |||
217 | __HYPERCALL_5ARG(a1, a2, a3, a4, a5); | 218 | __HYPERCALL_5ARG(a1, a2, a3, a4, a5); |
218 | 219 | ||
219 | stac(); | 220 | stac(); |
220 | asm volatile("call *%[call]" | 221 | asm volatile(CALL_NOSPEC |
221 | : __HYPERCALL_5PARAM | 222 | : __HYPERCALL_5PARAM |
222 | : [call] "a" (&hypercall_page[call]) | 223 | : [thunk_target] "a" (&hypercall_page[call]) |
223 | : __HYPERCALL_CLOBBER5); | 224 | : __HYPERCALL_CLOBBER5); |
224 | clac(); | 225 | clac(); |
225 | 226 | ||
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index dbaf14d69ebd..4817d743c263 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -344,9 +344,12 @@ done: | |||
344 | static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr) | 344 | static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr) |
345 | { | 345 | { |
346 | unsigned long flags; | 346 | unsigned long flags; |
347 | int i; | ||
347 | 348 | ||
348 | if (instr[0] != 0x90) | 349 | for (i = 0; i < a->padlen; i++) { |
349 | return; | 350 | if (instr[i] != 0x90) |
351 | return; | ||
352 | } | ||
350 | 353 | ||
351 | local_irq_save(flags); | 354 | local_irq_save(flags); |
352 | add_nops(instr + (a->instrlen - a->padlen), a->padlen); | 355 | add_nops(instr + (a->instrlen - a->padlen), a->padlen); |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index bcb75dc97d44..ea831c858195 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -829,8 +829,32 @@ static void init_amd(struct cpuinfo_x86 *c) | |||
829 | set_cpu_cap(c, X86_FEATURE_K8); | 829 | set_cpu_cap(c, X86_FEATURE_K8); |
830 | 830 | ||
831 | if (cpu_has(c, X86_FEATURE_XMM2)) { | 831 | if (cpu_has(c, X86_FEATURE_XMM2)) { |
832 | /* MFENCE stops RDTSC speculation */ | 832 | unsigned long long val; |
833 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); | 833 | int ret; |
834 | |||
835 | /* | ||
836 | * A serializing LFENCE has less overhead than MFENCE, so | ||
837 | * use it for execution serialization. On families which | ||
838 | * don't have that MSR, LFENCE is already serializing. | ||
839 | * msr_set_bit() uses the safe accessors, too, even if the MSR | ||
840 | * is not present. | ||
841 | */ | ||
842 | msr_set_bit(MSR_F10H_DECFG, | ||
843 | MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT); | ||
844 | |||
845 | /* | ||
846 | * Verify that the MSR write was successful (could be running | ||
847 | * under a hypervisor) and only then assume that LFENCE is | ||
848 | * serializing. | ||
849 | */ | ||
850 | ret = rdmsrl_safe(MSR_F10H_DECFG, &val); | ||
851 | if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) { | ||
852 | /* A serializing LFENCE stops RDTSC speculation */ | ||
853 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | ||
854 | } else { | ||
855 | /* MFENCE stops RDTSC speculation */ | ||
856 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); | ||
857 | } | ||
834 | } | 858 | } |
835 | 859 | ||
836 | /* | 860 | /* |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index ba0b2424c9b0..e4dc26185aa7 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -10,6 +10,10 @@ | |||
10 | */ | 10 | */ |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/utsname.h> | 12 | #include <linux/utsname.h> |
13 | #include <linux/cpu.h> | ||
14 | |||
15 | #include <asm/nospec-branch.h> | ||
16 | #include <asm/cmdline.h> | ||
13 | #include <asm/bugs.h> | 17 | #include <asm/bugs.h> |
14 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
15 | #include <asm/processor-flags.h> | 19 | #include <asm/processor-flags.h> |
@@ -20,6 +24,8 @@ | |||
20 | #include <asm/pgtable.h> | 24 | #include <asm/pgtable.h> |
21 | #include <asm/set_memory.h> | 25 | #include <asm/set_memory.h> |
22 | 26 | ||
27 | static void __init spectre_v2_select_mitigation(void); | ||
28 | |||
23 | void __init check_bugs(void) | 29 | void __init check_bugs(void) |
24 | { | 30 | { |
25 | identify_boot_cpu(); | 31 | identify_boot_cpu(); |
@@ -29,6 +35,9 @@ void __init check_bugs(void) | |||
29 | print_cpu_info(&boot_cpu_data); | 35 | print_cpu_info(&boot_cpu_data); |
30 | } | 36 | } |
31 | 37 | ||
38 | /* Select the proper spectre mitigation before patching alternatives */ | ||
39 | spectre_v2_select_mitigation(); | ||
40 | |||
32 | #ifdef CONFIG_X86_32 | 41 | #ifdef CONFIG_X86_32 |
33 | /* | 42 | /* |
34 | * Check whether we are able to run this kernel safely on SMP. | 43 | * Check whether we are able to run this kernel safely on SMP. |
@@ -60,3 +69,179 @@ void __init check_bugs(void) | |||
60 | set_memory_4k((unsigned long)__va(0), 1); | 69 | set_memory_4k((unsigned long)__va(0), 1); |
61 | #endif | 70 | #endif |
62 | } | 71 | } |
72 | |||
73 | /* The kernel command line selection */ | ||
74 | enum spectre_v2_mitigation_cmd { | ||
75 | SPECTRE_V2_CMD_NONE, | ||
76 | SPECTRE_V2_CMD_AUTO, | ||
77 | SPECTRE_V2_CMD_FORCE, | ||
78 | SPECTRE_V2_CMD_RETPOLINE, | ||
79 | SPECTRE_V2_CMD_RETPOLINE_GENERIC, | ||
80 | SPECTRE_V2_CMD_RETPOLINE_AMD, | ||
81 | }; | ||
82 | |||
83 | static const char *spectre_v2_strings[] = { | ||
84 | [SPECTRE_V2_NONE] = "Vulnerable", | ||
85 | [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline", | ||
86 | [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline", | ||
87 | [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", | ||
88 | [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", | ||
89 | }; | ||
90 | |||
91 | #undef pr_fmt | ||
92 | #define pr_fmt(fmt) "Spectre V2 mitigation: " fmt | ||
93 | |||
94 | static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE; | ||
95 | |||
96 | static void __init spec2_print_if_insecure(const char *reason) | ||
97 | { | ||
98 | if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) | ||
99 | pr_info("%s\n", reason); | ||
100 | } | ||
101 | |||
102 | static void __init spec2_print_if_secure(const char *reason) | ||
103 | { | ||
104 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) | ||
105 | pr_info("%s\n", reason); | ||
106 | } | ||
107 | |||
108 | static inline bool retp_compiler(void) | ||
109 | { | ||
110 | return __is_defined(RETPOLINE); | ||
111 | } | ||
112 | |||
113 | static inline bool match_option(const char *arg, int arglen, const char *opt) | ||
114 | { | ||
115 | int len = strlen(opt); | ||
116 | |||
117 | return len == arglen && !strncmp(arg, opt, len); | ||
118 | } | ||
119 | |||
120 | static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) | ||
121 | { | ||
122 | char arg[20]; | ||
123 | int ret; | ||
124 | |||
125 | ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, | ||
126 | sizeof(arg)); | ||
127 | if (ret > 0) { | ||
128 | if (match_option(arg, ret, "off")) { | ||
129 | goto disable; | ||
130 | } else if (match_option(arg, ret, "on")) { | ||
131 | spec2_print_if_secure("force enabled on command line."); | ||
132 | return SPECTRE_V2_CMD_FORCE; | ||
133 | } else if (match_option(arg, ret, "retpoline")) { | ||
134 | spec2_print_if_insecure("retpoline selected on command line."); | ||
135 | return SPECTRE_V2_CMD_RETPOLINE; | ||
136 | } else if (match_option(arg, ret, "retpoline,amd")) { | ||
137 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { | ||
138 | pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n"); | ||
139 | return SPECTRE_V2_CMD_AUTO; | ||
140 | } | ||
141 | spec2_print_if_insecure("AMD retpoline selected on command line."); | ||
142 | return SPECTRE_V2_CMD_RETPOLINE_AMD; | ||
143 | } else if (match_option(arg, ret, "retpoline,generic")) { | ||
144 | spec2_print_if_insecure("generic retpoline selected on command line."); | ||
145 | return SPECTRE_V2_CMD_RETPOLINE_GENERIC; | ||
146 | } else if (match_option(arg, ret, "auto")) { | ||
147 | return SPECTRE_V2_CMD_AUTO; | ||
148 | } | ||
149 | } | ||
150 | |||
151 | if (!cmdline_find_option_bool(boot_command_line, "nospectre_v2")) | ||
152 | return SPECTRE_V2_CMD_AUTO; | ||
153 | disable: | ||
154 | spec2_print_if_insecure("disabled on command line."); | ||
155 | return SPECTRE_V2_CMD_NONE; | ||
156 | } | ||
157 | |||
158 | static void __init spectre_v2_select_mitigation(void) | ||
159 | { | ||
160 | enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); | ||
161 | enum spectre_v2_mitigation mode = SPECTRE_V2_NONE; | ||
162 | |||
163 | /* | ||
164 | * If the CPU is not affected and the command line mode is NONE or AUTO | ||
165 | * then nothing to do. | ||
166 | */ | ||
167 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && | ||
168 | (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO)) | ||
169 | return; | ||
170 | |||
171 | switch (cmd) { | ||
172 | case SPECTRE_V2_CMD_NONE: | ||
173 | return; | ||
174 | |||
175 | case SPECTRE_V2_CMD_FORCE: | ||
176 | /* FALLTRHU */ | ||
177 | case SPECTRE_V2_CMD_AUTO: | ||
178 | goto retpoline_auto; | ||
179 | |||
180 | case SPECTRE_V2_CMD_RETPOLINE_AMD: | ||
181 | if (IS_ENABLED(CONFIG_RETPOLINE)) | ||
182 | goto retpoline_amd; | ||
183 | break; | ||
184 | case SPECTRE_V2_CMD_RETPOLINE_GENERIC: | ||
185 | if (IS_ENABLED(CONFIG_RETPOLINE)) | ||
186 | goto retpoline_generic; | ||
187 | break; | ||
188 | case SPECTRE_V2_CMD_RETPOLINE: | ||
189 | if (IS_ENABLED(CONFIG_RETPOLINE)) | ||
190 | goto retpoline_auto; | ||
191 | break; | ||
192 | } | ||
193 | pr_err("kernel not compiled with retpoline; no mitigation available!"); | ||
194 | return; | ||
195 | |||
196 | retpoline_auto: | ||
197 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { | ||
198 | retpoline_amd: | ||
199 | if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { | ||
200 | pr_err("LFENCE not serializing. Switching to generic retpoline\n"); | ||
201 | goto retpoline_generic; | ||
202 | } | ||
203 | mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : | ||
204 | SPECTRE_V2_RETPOLINE_MINIMAL_AMD; | ||
205 | setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); | ||
206 | setup_force_cpu_cap(X86_FEATURE_RETPOLINE); | ||
207 | } else { | ||
208 | retpoline_generic: | ||
209 | mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC : | ||
210 | SPECTRE_V2_RETPOLINE_MINIMAL; | ||
211 | setup_force_cpu_cap(X86_FEATURE_RETPOLINE); | ||
212 | } | ||
213 | |||
214 | spectre_v2_enabled = mode; | ||
215 | pr_info("%s\n", spectre_v2_strings[mode]); | ||
216 | } | ||
217 | |||
218 | #undef pr_fmt | ||
219 | |||
220 | #ifdef CONFIG_SYSFS | ||
221 | ssize_t cpu_show_meltdown(struct device *dev, | ||
222 | struct device_attribute *attr, char *buf) | ||
223 | { | ||
224 | if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN)) | ||
225 | return sprintf(buf, "Not affected\n"); | ||
226 | if (boot_cpu_has(X86_FEATURE_PTI)) | ||
227 | return sprintf(buf, "Mitigation: PTI\n"); | ||
228 | return sprintf(buf, "Vulnerable\n"); | ||
229 | } | ||
230 | |||
231 | ssize_t cpu_show_spectre_v1(struct device *dev, | ||
232 | struct device_attribute *attr, char *buf) | ||
233 | { | ||
234 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1)) | ||
235 | return sprintf(buf, "Not affected\n"); | ||
236 | return sprintf(buf, "Vulnerable\n"); | ||
237 | } | ||
238 | |||
239 | ssize_t cpu_show_spectre_v2(struct device *dev, | ||
240 | struct device_attribute *attr, char *buf) | ||
241 | { | ||
242 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) | ||
243 | return sprintf(buf, "Not affected\n"); | ||
244 | |||
245 | return sprintf(buf, "%s\n", spectre_v2_strings[spectre_v2_enabled]); | ||
246 | } | ||
247 | #endif | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 39d7ea865207..ef29ad001991 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -926,6 +926,9 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
926 | if (c->x86_vendor != X86_VENDOR_AMD) | 926 | if (c->x86_vendor != X86_VENDOR_AMD) |
927 | setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); | 927 | setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN); |
928 | 928 | ||
929 | setup_force_cpu_bug(X86_BUG_SPECTRE_V1); | ||
930 | setup_force_cpu_bug(X86_BUG_SPECTRE_V2); | ||
931 | |||
929 | fpu__init_system(c); | 932 | fpu__init_system(c); |
930 | 933 | ||
931 | #ifdef CONFIG_X86_32 | 934 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 8ccdca6d3f9e..d9e460fc7a3b 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c | |||
@@ -910,8 +910,17 @@ static bool is_blacklisted(unsigned int cpu) | |||
910 | { | 910 | { |
911 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 911 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
912 | 912 | ||
913 | if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) { | 913 | /* |
914 | pr_err_once("late loading on model 79 is disabled.\n"); | 914 | * Late loading on model 79 with microcode revision less than 0x0b000021 |
915 | * may result in a system hang. This behavior is documented in item | ||
916 | * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family). | ||
917 | */ | ||
918 | if (c->x86 == 6 && | ||
919 | c->x86_model == INTEL_FAM6_BROADWELL_X && | ||
920 | c->x86_mask == 0x01 && | ||
921 | c->microcode < 0x0b000021) { | ||
922 | pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); | ||
923 | pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); | ||
915 | return true; | 924 | return true; |
916 | } | 925 | } |
917 | 926 | ||
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S index b6c6468e10bc..4c8440de3355 100644 --- a/arch/x86/kernel/ftrace_32.S +++ b/arch/x86/kernel/ftrace_32.S | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <asm/segment.h> | 8 | #include <asm/segment.h> |
9 | #include <asm/export.h> | 9 | #include <asm/export.h> |
10 | #include <asm/ftrace.h> | 10 | #include <asm/ftrace.h> |
11 | #include <asm/nospec-branch.h> | ||
11 | 12 | ||
12 | #ifdef CC_USING_FENTRY | 13 | #ifdef CC_USING_FENTRY |
13 | # define function_hook __fentry__ | 14 | # define function_hook __fentry__ |
@@ -197,7 +198,8 @@ ftrace_stub: | |||
197 | movl 0x4(%ebp), %edx | 198 | movl 0x4(%ebp), %edx |
198 | subl $MCOUNT_INSN_SIZE, %eax | 199 | subl $MCOUNT_INSN_SIZE, %eax |
199 | 200 | ||
200 | call *ftrace_trace_function | 201 | movl ftrace_trace_function, %ecx |
202 | CALL_NOSPEC %ecx | ||
201 | 203 | ||
202 | popl %edx | 204 | popl %edx |
203 | popl %ecx | 205 | popl %ecx |
@@ -241,5 +243,5 @@ return_to_handler: | |||
241 | movl %eax, %ecx | 243 | movl %eax, %ecx |
242 | popl %edx | 244 | popl %edx |
243 | popl %eax | 245 | popl %eax |
244 | jmp *%ecx | 246 | JMP_NOSPEC %ecx |
245 | #endif | 247 | #endif |
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index c832291d948a..7cb8ba08beb9 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <asm/ptrace.h> | 7 | #include <asm/ptrace.h> |
8 | #include <asm/ftrace.h> | 8 | #include <asm/ftrace.h> |
9 | #include <asm/export.h> | 9 | #include <asm/export.h> |
10 | 10 | #include <asm/nospec-branch.h> | |
11 | 11 | ||
12 | .code64 | 12 | .code64 |
13 | .section .entry.text, "ax" | 13 | .section .entry.text, "ax" |
@@ -286,8 +286,8 @@ trace: | |||
286 | * ip and parent ip are used and the list function is called when | 286 | * ip and parent ip are used and the list function is called when |
287 | * function tracing is enabled. | 287 | * function tracing is enabled. |
288 | */ | 288 | */ |
289 | call *ftrace_trace_function | 289 | movq ftrace_trace_function, %r8 |
290 | 290 | CALL_NOSPEC %r8 | |
291 | restore_mcount_regs | 291 | restore_mcount_regs |
292 | 292 | ||
293 | jmp fgraph_trace | 293 | jmp fgraph_trace |
@@ -329,5 +329,5 @@ GLOBAL(return_to_handler) | |||
329 | movq 8(%rsp), %rdx | 329 | movq 8(%rsp), %rdx |
330 | movq (%rsp), %rax | 330 | movq (%rsp), %rax |
331 | addq $24, %rsp | 331 | addq $24, %rsp |
332 | jmp *%rdi | 332 | JMP_NOSPEC %rdi |
333 | #endif | 333 | #endif |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index a83b3346a0e1..c1bdbd3d3232 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
21 | 21 | ||
22 | #include <asm/apic.h> | 22 | #include <asm/apic.h> |
23 | #include <asm/nospec-branch.h> | ||
23 | 24 | ||
24 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 25 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
25 | 26 | ||
@@ -55,11 +56,11 @@ DEFINE_PER_CPU(struct irq_stack *, softirq_stack); | |||
55 | static void call_on_stack(void *func, void *stack) | 56 | static void call_on_stack(void *func, void *stack) |
56 | { | 57 | { |
57 | asm volatile("xchgl %%ebx,%%esp \n" | 58 | asm volatile("xchgl %%ebx,%%esp \n" |
58 | "call *%%edi \n" | 59 | CALL_NOSPEC |
59 | "movl %%ebx,%%esp \n" | 60 | "movl %%ebx,%%esp \n" |
60 | : "=b" (stack) | 61 | : "=b" (stack) |
61 | : "0" (stack), | 62 | : "0" (stack), |
62 | "D"(func) | 63 | [thunk_target] "D"(func) |
63 | : "memory", "cc", "edx", "ecx", "eax"); | 64 | : "memory", "cc", "edx", "ecx", "eax"); |
64 | } | 65 | } |
65 | 66 | ||
@@ -95,11 +96,11 @@ static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc) | |||
95 | call_on_stack(print_stack_overflow, isp); | 96 | call_on_stack(print_stack_overflow, isp); |
96 | 97 | ||
97 | asm volatile("xchgl %%ebx,%%esp \n" | 98 | asm volatile("xchgl %%ebx,%%esp \n" |
98 | "call *%%edi \n" | 99 | CALL_NOSPEC |
99 | "movl %%ebx,%%esp \n" | 100 | "movl %%ebx,%%esp \n" |
100 | : "=a" (arg1), "=b" (isp) | 101 | : "=a" (arg1), "=b" (isp) |
101 | : "0" (desc), "1" (isp), | 102 | : "0" (desc), "1" (isp), |
102 | "D" (desc->handle_irq) | 103 | [thunk_target] "D" (desc->handle_irq) |
103 | : "memory", "cc", "ecx"); | 104 | : "memory", "cc", "ecx"); |
104 | return 1; | 105 | return 1; |
105 | } | 106 | } |
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index a4eb27918ceb..a2486f444073 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c | |||
@@ -138,6 +138,17 @@ static int map_tboot_page(unsigned long vaddr, unsigned long pfn, | |||
138 | return -1; | 138 | return -1; |
139 | set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); | 139 | set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot)); |
140 | pte_unmap(pte); | 140 | pte_unmap(pte); |
141 | |||
142 | /* | ||
143 | * PTI poisons low addresses in the kernel page tables in the | ||
144 | * name of making them unusable for userspace. To execute | ||
145 | * code at such a low address, the poison must be cleared. | ||
146 | * | ||
147 | * Note: 'pgd' actually gets set in p4d_alloc() _or_ | ||
148 | * pud_alloc() depending on 4/5-level paging. | ||
149 | */ | ||
150 | pgd->pgd &= ~_PAGE_NX; | ||
151 | |||
141 | return 0; | 152 | return 0; |
142 | } | 153 | } |
143 | 154 | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index c4deb1f34faa..2b8eb4da4d08 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -3781,7 +3781,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) | |||
3781 | bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) | 3781 | bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) |
3782 | { | 3782 | { |
3783 | if (unlikely(!lapic_in_kernel(vcpu) || | 3783 | if (unlikely(!lapic_in_kernel(vcpu) || |
3784 | kvm_event_needs_reinjection(vcpu))) | 3784 | kvm_event_needs_reinjection(vcpu) || |
3785 | vcpu->arch.exception.pending)) | ||
3785 | return false; | 3786 | return false; |
3786 | 3787 | ||
3787 | if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) | 3788 | if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) |
@@ -5465,30 +5466,34 @@ static void mmu_destroy_caches(void) | |||
5465 | 5466 | ||
5466 | int kvm_mmu_module_init(void) | 5467 | int kvm_mmu_module_init(void) |
5467 | { | 5468 | { |
5469 | int ret = -ENOMEM; | ||
5470 | |||
5468 | kvm_mmu_clear_all_pte_masks(); | 5471 | kvm_mmu_clear_all_pte_masks(); |
5469 | 5472 | ||
5470 | pte_list_desc_cache = kmem_cache_create("pte_list_desc", | 5473 | pte_list_desc_cache = kmem_cache_create("pte_list_desc", |
5471 | sizeof(struct pte_list_desc), | 5474 | sizeof(struct pte_list_desc), |
5472 | 0, SLAB_ACCOUNT, NULL); | 5475 | 0, SLAB_ACCOUNT, NULL); |
5473 | if (!pte_list_desc_cache) | 5476 | if (!pte_list_desc_cache) |
5474 | goto nomem; | 5477 | goto out; |
5475 | 5478 | ||
5476 | mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", | 5479 | mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", |
5477 | sizeof(struct kvm_mmu_page), | 5480 | sizeof(struct kvm_mmu_page), |
5478 | 0, SLAB_ACCOUNT, NULL); | 5481 | 0, SLAB_ACCOUNT, NULL); |
5479 | if (!mmu_page_header_cache) | 5482 | if (!mmu_page_header_cache) |
5480 | goto nomem; | 5483 | goto out; |
5481 | 5484 | ||
5482 | if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL)) | 5485 | if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL)) |
5483 | goto nomem; | 5486 | goto out; |
5484 | 5487 | ||
5485 | register_shrinker(&mmu_shrinker); | 5488 | ret = register_shrinker(&mmu_shrinker); |
5489 | if (ret) | ||
5490 | goto out; | ||
5486 | 5491 | ||
5487 | return 0; | 5492 | return 0; |
5488 | 5493 | ||
5489 | nomem: | 5494 | out: |
5490 | mmu_destroy_caches(); | 5495 | mmu_destroy_caches(); |
5491 | return -ENOMEM; | 5496 | return ret; |
5492 | } | 5497 | } |
5493 | 5498 | ||
5494 | /* | 5499 | /* |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index bb31c801f1fc..f40d0da1f1d3 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <asm/debugreg.h> | 45 | #include <asm/debugreg.h> |
46 | #include <asm/kvm_para.h> | 46 | #include <asm/kvm_para.h> |
47 | #include <asm/irq_remapping.h> | 47 | #include <asm/irq_remapping.h> |
48 | #include <asm/nospec-branch.h> | ||
48 | 49 | ||
49 | #include <asm/virtext.h> | 50 | #include <asm/virtext.h> |
50 | #include "trace.h" | 51 | #include "trace.h" |
@@ -361,7 +362,6 @@ static void recalc_intercepts(struct vcpu_svm *svm) | |||
361 | { | 362 | { |
362 | struct vmcb_control_area *c, *h; | 363 | struct vmcb_control_area *c, *h; |
363 | struct nested_state *g; | 364 | struct nested_state *g; |
364 | u32 h_intercept_exceptions; | ||
365 | 365 | ||
366 | mark_dirty(svm->vmcb, VMCB_INTERCEPTS); | 366 | mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
367 | 367 | ||
@@ -372,14 +372,9 @@ static void recalc_intercepts(struct vcpu_svm *svm) | |||
372 | h = &svm->nested.hsave->control; | 372 | h = &svm->nested.hsave->control; |
373 | g = &svm->nested; | 373 | g = &svm->nested; |
374 | 374 | ||
375 | /* No need to intercept #UD if L1 doesn't intercept it */ | ||
376 | h_intercept_exceptions = | ||
377 | h->intercept_exceptions & ~(1U << UD_VECTOR); | ||
378 | |||
379 | c->intercept_cr = h->intercept_cr | g->intercept_cr; | 375 | c->intercept_cr = h->intercept_cr | g->intercept_cr; |
380 | c->intercept_dr = h->intercept_dr | g->intercept_dr; | 376 | c->intercept_dr = h->intercept_dr | g->intercept_dr; |
381 | c->intercept_exceptions = | 377 | c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions; |
382 | h_intercept_exceptions | g->intercept_exceptions; | ||
383 | c->intercept = h->intercept | g->intercept; | 378 | c->intercept = h->intercept | g->intercept; |
384 | } | 379 | } |
385 | 380 | ||
@@ -2202,7 +2197,6 @@ static int ud_interception(struct vcpu_svm *svm) | |||
2202 | { | 2197 | { |
2203 | int er; | 2198 | int er; |
2204 | 2199 | ||
2205 | WARN_ON_ONCE(is_guest_mode(&svm->vcpu)); | ||
2206 | er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); | 2200 | er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); |
2207 | if (er == EMULATE_USER_EXIT) | 2201 | if (er == EMULATE_USER_EXIT) |
2208 | return 0; | 2202 | return 0; |
@@ -5034,6 +5028,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
5034 | #endif | 5028 | #endif |
5035 | ); | 5029 | ); |
5036 | 5030 | ||
5031 | /* Eliminate branch target predictions from guest mode */ | ||
5032 | vmexit_fill_RSB(); | ||
5033 | |||
5037 | #ifdef CONFIG_X86_64 | 5034 | #ifdef CONFIG_X86_64 |
5038 | wrmsrl(MSR_GS_BASE, svm->host.gs_base); | 5035 | wrmsrl(MSR_GS_BASE, svm->host.gs_base); |
5039 | #else | 5036 | #else |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 5c14d65f676a..c829d89e2e63 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <asm/apic.h> | 50 | #include <asm/apic.h> |
51 | #include <asm/irq_remapping.h> | 51 | #include <asm/irq_remapping.h> |
52 | #include <asm/mmu_context.h> | 52 | #include <asm/mmu_context.h> |
53 | #include <asm/nospec-branch.h> | ||
53 | 54 | ||
54 | #include "trace.h" | 55 | #include "trace.h" |
55 | #include "pmu.h" | 56 | #include "pmu.h" |
@@ -899,8 +900,16 @@ static inline short vmcs_field_to_offset(unsigned long field) | |||
899 | { | 900 | { |
900 | BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX); | 901 | BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX); |
901 | 902 | ||
902 | if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) || | 903 | if (field >= ARRAY_SIZE(vmcs_field_to_offset_table)) |
903 | vmcs_field_to_offset_table[field] == 0) | 904 | return -ENOENT; |
905 | |||
906 | /* | ||
907 | * FIXME: Mitigation for CVE-2017-5753. To be replaced with a | ||
908 | * generic mechanism. | ||
909 | */ | ||
910 | asm("lfence"); | ||
911 | |||
912 | if (vmcs_field_to_offset_table[field] == 0) | ||
904 | return -ENOENT; | 913 | return -ENOENT; |
905 | 914 | ||
906 | return vmcs_field_to_offset_table[field]; | 915 | return vmcs_field_to_offset_table[field]; |
@@ -1887,7 +1896,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) | |||
1887 | { | 1896 | { |
1888 | u32 eb; | 1897 | u32 eb; |
1889 | 1898 | ||
1890 | eb = (1u << PF_VECTOR) | (1u << MC_VECTOR) | | 1899 | eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | |
1891 | (1u << DB_VECTOR) | (1u << AC_VECTOR); | 1900 | (1u << DB_VECTOR) | (1u << AC_VECTOR); |
1892 | if ((vcpu->guest_debug & | 1901 | if ((vcpu->guest_debug & |
1893 | (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == | 1902 | (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == |
@@ -1905,8 +1914,6 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) | |||
1905 | */ | 1914 | */ |
1906 | if (is_guest_mode(vcpu)) | 1915 | if (is_guest_mode(vcpu)) |
1907 | eb |= get_vmcs12(vcpu)->exception_bitmap; | 1916 | eb |= get_vmcs12(vcpu)->exception_bitmap; |
1908 | else | ||
1909 | eb |= 1u << UD_VECTOR; | ||
1910 | 1917 | ||
1911 | vmcs_write32(EXCEPTION_BITMAP, eb); | 1918 | vmcs_write32(EXCEPTION_BITMAP, eb); |
1912 | } | 1919 | } |
@@ -5917,7 +5924,6 @@ static int handle_exception(struct kvm_vcpu *vcpu) | |||
5917 | return 1; /* already handled by vmx_vcpu_run() */ | 5924 | return 1; /* already handled by vmx_vcpu_run() */ |
5918 | 5925 | ||
5919 | if (is_invalid_opcode(intr_info)) { | 5926 | if (is_invalid_opcode(intr_info)) { |
5920 | WARN_ON_ONCE(is_guest_mode(vcpu)); | ||
5921 | er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); | 5927 | er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); |
5922 | if (er == EMULATE_USER_EXIT) | 5928 | if (er == EMULATE_USER_EXIT) |
5923 | return 0; | 5929 | return 0; |
@@ -9485,6 +9491,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
9485 | #endif | 9491 | #endif |
9486 | ); | 9492 | ); |
9487 | 9493 | ||
9494 | /* Eliminate branch target predictions from guest mode */ | ||
9495 | vmexit_fill_RSB(); | ||
9496 | |||
9488 | /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ | 9497 | /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */ |
9489 | if (debugctlmsr) | 9498 | if (debugctlmsr) |
9490 | update_debugctlmsr(debugctlmsr); | 9499 | update_debugctlmsr(debugctlmsr); |
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 171377b83be1..25a972c61b0a 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile | |||
@@ -27,6 +27,7 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o | |||
27 | lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o | 27 | lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o |
28 | lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o | 28 | lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o |
29 | lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o | 29 | lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o |
30 | lib-$(CONFIG_RETPOLINE) += retpoline.o | ||
30 | 31 | ||
31 | obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o | 32 | obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o |
32 | 33 | ||
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S index 4d34bb548b41..46e71a74e612 100644 --- a/arch/x86/lib/checksum_32.S +++ b/arch/x86/lib/checksum_32.S | |||
@@ -29,7 +29,8 @@ | |||
29 | #include <asm/errno.h> | 29 | #include <asm/errno.h> |
30 | #include <asm/asm.h> | 30 | #include <asm/asm.h> |
31 | #include <asm/export.h> | 31 | #include <asm/export.h> |
32 | 32 | #include <asm/nospec-branch.h> | |
33 | |||
33 | /* | 34 | /* |
34 | * computes a partial checksum, e.g. for TCP/UDP fragments | 35 | * computes a partial checksum, e.g. for TCP/UDP fragments |
35 | */ | 36 | */ |
@@ -156,7 +157,7 @@ ENTRY(csum_partial) | |||
156 | negl %ebx | 157 | negl %ebx |
157 | lea 45f(%ebx,%ebx,2), %ebx | 158 | lea 45f(%ebx,%ebx,2), %ebx |
158 | testl %esi, %esi | 159 | testl %esi, %esi |
159 | jmp *%ebx | 160 | JMP_NOSPEC %ebx |
160 | 161 | ||
161 | # Handle 2-byte-aligned regions | 162 | # Handle 2-byte-aligned regions |
162 | 20: addw (%esi), %ax | 163 | 20: addw (%esi), %ax |
@@ -439,7 +440,7 @@ ENTRY(csum_partial_copy_generic) | |||
439 | andl $-32,%edx | 440 | andl $-32,%edx |
440 | lea 3f(%ebx,%ebx), %ebx | 441 | lea 3f(%ebx,%ebx), %ebx |
441 | testl %esi, %esi | 442 | testl %esi, %esi |
442 | jmp *%ebx | 443 | JMP_NOSPEC %ebx |
443 | 1: addl $64,%esi | 444 | 1: addl $64,%esi |
444 | addl $64,%edi | 445 | addl $64,%edi |
445 | SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) | 446 | SRC(movb -32(%edx),%bl) ; SRC(movb (%edx),%bl) |
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S new file mode 100644 index 000000000000..cb45c6cb465f --- /dev/null +++ b/arch/x86/lib/retpoline.S | |||
@@ -0,0 +1,48 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | |||
3 | #include <linux/stringify.h> | ||
4 | #include <linux/linkage.h> | ||
5 | #include <asm/dwarf2.h> | ||
6 | #include <asm/cpufeatures.h> | ||
7 | #include <asm/alternative-asm.h> | ||
8 | #include <asm/export.h> | ||
9 | #include <asm/nospec-branch.h> | ||
10 | |||
11 | .macro THUNK reg | ||
12 | .section .text.__x86.indirect_thunk.\reg | ||
13 | |||
14 | ENTRY(__x86_indirect_thunk_\reg) | ||
15 | CFI_STARTPROC | ||
16 | JMP_NOSPEC %\reg | ||
17 | CFI_ENDPROC | ||
18 | ENDPROC(__x86_indirect_thunk_\reg) | ||
19 | .endm | ||
20 | |||
21 | /* | ||
22 | * Despite being an assembler file we can't just use .irp here | ||
23 | * because __KSYM_DEPS__ only uses the C preprocessor and would | ||
24 | * only see one instance of "__x86_indirect_thunk_\reg" rather | ||
25 | * than one per register with the correct names. So we do it | ||
26 | * the simple and nasty way... | ||
27 | */ | ||
28 | #define EXPORT_THUNK(reg) EXPORT_SYMBOL(__x86_indirect_thunk_ ## reg) | ||
29 | #define GENERATE_THUNK(reg) THUNK reg ; EXPORT_THUNK(reg) | ||
30 | |||
31 | GENERATE_THUNK(_ASM_AX) | ||
32 | GENERATE_THUNK(_ASM_BX) | ||
33 | GENERATE_THUNK(_ASM_CX) | ||
34 | GENERATE_THUNK(_ASM_DX) | ||
35 | GENERATE_THUNK(_ASM_SI) | ||
36 | GENERATE_THUNK(_ASM_DI) | ||
37 | GENERATE_THUNK(_ASM_BP) | ||
38 | GENERATE_THUNK(_ASM_SP) | ||
39 | #ifdef CONFIG_64BIT | ||
40 | GENERATE_THUNK(r8) | ||
41 | GENERATE_THUNK(r9) | ||
42 | GENERATE_THUNK(r10) | ||
43 | GENERATE_THUNK(r11) | ||
44 | GENERATE_THUNK(r12) | ||
45 | GENERATE_THUNK(r13) | ||
46 | GENERATE_THUNK(r14) | ||
47 | GENERATE_THUNK(r15) | ||
48 | #endif | ||
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index 43d4a4a29037..ce38f165489b 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c | |||
@@ -149,7 +149,7 @@ pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) | |||
149 | * | 149 | * |
150 | * Returns a pointer to a P4D on success, or NULL on failure. | 150 | * Returns a pointer to a P4D on success, or NULL on failure. |
151 | */ | 151 | */ |
152 | static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) | 152 | static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) |
153 | { | 153 | { |
154 | pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address)); | 154 | pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address)); |
155 | gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); | 155 | gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); |
@@ -164,12 +164,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) | |||
164 | if (!new_p4d_page) | 164 | if (!new_p4d_page) |
165 | return NULL; | 165 | return NULL; |
166 | 166 | ||
167 | if (pgd_none(*pgd)) { | 167 | set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page))); |
168 | set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page))); | ||
169 | new_p4d_page = 0; | ||
170 | } | ||
171 | if (new_p4d_page) | ||
172 | free_page(new_p4d_page); | ||
173 | } | 168 | } |
174 | BUILD_BUG_ON(pgd_large(*pgd) != 0); | 169 | BUILD_BUG_ON(pgd_large(*pgd) != 0); |
175 | 170 | ||
@@ -182,7 +177,7 @@ static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address) | |||
182 | * | 177 | * |
183 | * Returns a pointer to a PMD on success, or NULL on failure. | 178 | * Returns a pointer to a PMD on success, or NULL on failure. |
184 | */ | 179 | */ |
185 | static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) | 180 | static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) |
186 | { | 181 | { |
187 | gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); | 182 | gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); |
188 | p4d_t *p4d = pti_user_pagetable_walk_p4d(address); | 183 | p4d_t *p4d = pti_user_pagetable_walk_p4d(address); |
@@ -194,12 +189,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) | |||
194 | if (!new_pud_page) | 189 | if (!new_pud_page) |
195 | return NULL; | 190 | return NULL; |
196 | 191 | ||
197 | if (p4d_none(*p4d)) { | 192 | set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page))); |
198 | set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page))); | ||
199 | new_pud_page = 0; | ||
200 | } | ||
201 | if (new_pud_page) | ||
202 | free_page(new_pud_page); | ||
203 | } | 193 | } |
204 | 194 | ||
205 | pud = pud_offset(p4d, address); | 195 | pud = pud_offset(p4d, address); |
@@ -213,12 +203,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address) | |||
213 | if (!new_pmd_page) | 203 | if (!new_pmd_page) |
214 | return NULL; | 204 | return NULL; |
215 | 205 | ||
216 | if (pud_none(*pud)) { | 206 | set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page))); |
217 | set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page))); | ||
218 | new_pmd_page = 0; | ||
219 | } | ||
220 | if (new_pmd_page) | ||
221 | free_page(new_pmd_page); | ||
222 | } | 207 | } |
223 | 208 | ||
224 | return pmd_offset(pud, address); | 209 | return pmd_offset(pud, address); |
@@ -251,12 +236,7 @@ static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address) | |||
251 | if (!new_pte_page) | 236 | if (!new_pte_page) |
252 | return NULL; | 237 | return NULL; |
253 | 238 | ||
254 | if (pmd_none(*pmd)) { | 239 | set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page))); |
255 | set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page))); | ||
256 | new_pte_page = 0; | ||
257 | } | ||
258 | if (new_pte_page) | ||
259 | free_page(new_pte_page); | ||
260 | } | 240 | } |
261 | 241 | ||
262 | pte = pte_offset_kernel(pmd, address); | 242 | pte = pte_offset_kernel(pmd, address); |
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 7a5350d08cef..563049c483a1 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
@@ -594,6 +594,11 @@ char *__init pcibios_setup(char *str) | |||
594 | } else if (!strcmp(str, "nocrs")) { | 594 | } else if (!strcmp(str, "nocrs")) { |
595 | pci_probe |= PCI_ROOT_NO_CRS; | 595 | pci_probe |= PCI_ROOT_NO_CRS; |
596 | return NULL; | 596 | return NULL; |
597 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | ||
598 | } else if (!strcmp(str, "big_root_window")) { | ||
599 | pci_probe |= PCI_BIG_ROOT_WINDOW; | ||
600 | return NULL; | ||
601 | #endif | ||
597 | } else if (!strcmp(str, "earlydump")) { | 602 | } else if (!strcmp(str, "earlydump")) { |
598 | pci_early_dump_regs = 1; | 603 | pci_early_dump_regs = 1; |
599 | return NULL; | 604 | return NULL; |
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index e663d6bf1328..f6a26e3cb476 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c | |||
@@ -662,10 +662,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid); | |||
662 | */ | 662 | */ |
663 | static void pci_amd_enable_64bit_bar(struct pci_dev *dev) | 663 | static void pci_amd_enable_64bit_bar(struct pci_dev *dev) |
664 | { | 664 | { |
665 | unsigned i; | ||
666 | u32 base, limit, high; | 665 | u32 base, limit, high; |
667 | struct resource *res, *conflict; | ||
668 | struct pci_dev *other; | 666 | struct pci_dev *other; |
667 | struct resource *res; | ||
668 | unsigned i; | ||
669 | int r; | ||
670 | |||
671 | if (!(pci_probe & PCI_BIG_ROOT_WINDOW)) | ||
672 | return; | ||
669 | 673 | ||
670 | /* Check that we are the only device of that type */ | 674 | /* Check that we are the only device of that type */ |
671 | other = pci_get_device(dev->vendor, dev->device, NULL); | 675 | other = pci_get_device(dev->vendor, dev->device, NULL); |
@@ -699,22 +703,25 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev) | |||
699 | if (!res) | 703 | if (!res) |
700 | return; | 704 | return; |
701 | 705 | ||
706 | /* | ||
707 | * Allocate a 256GB window directly below the 0xfd00000000 hardware | ||
708 | * limit (see AMD Family 15h Models 30h-3Fh BKDG, sec 2.4.6). | ||
709 | */ | ||
702 | res->name = "PCI Bus 0000:00"; | 710 | res->name = "PCI Bus 0000:00"; |
703 | res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM | | 711 | res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM | |
704 | IORESOURCE_MEM_64 | IORESOURCE_WINDOW; | 712 | IORESOURCE_MEM_64 | IORESOURCE_WINDOW; |
705 | res->start = 0x100000000ull; | 713 | res->start = 0xbd00000000ull; |
706 | res->end = 0xfd00000000ull - 1; | 714 | res->end = 0xfd00000000ull - 1; |
707 | 715 | ||
708 | /* Just grab the free area behind system memory for this */ | 716 | r = request_resource(&iomem_resource, res); |
709 | while ((conflict = request_resource_conflict(&iomem_resource, res))) { | 717 | if (r) { |
710 | if (conflict->end >= res->end) { | 718 | kfree(res); |
711 | kfree(res); | 719 | return; |
712 | return; | ||
713 | } | ||
714 | res->start = conflict->end + 1; | ||
715 | } | 720 | } |
716 | 721 | ||
717 | dev_info(&dev->dev, "adding root bus resource %pR\n", res); | 722 | dev_info(&dev->dev, "adding root bus resource %pR (tainting kernel)\n", |
723 | res); | ||
724 | add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); | ||
718 | 725 | ||
719 | base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) | | 726 | base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) | |
720 | AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK; | 727 | AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK; |
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index d87ac96e37ed..2dd15e967c3f 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c | |||
@@ -135,7 +135,9 @@ pgd_t * __init efi_call_phys_prolog(void) | |||
135 | pud[j] = *pud_offset(p4d_k, vaddr); | 135 | pud[j] = *pud_offset(p4d_k, vaddr); |
136 | } | 136 | } |
137 | } | 137 | } |
138 | pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX; | ||
138 | } | 139 | } |
140 | |||
139 | out: | 141 | out: |
140 | __flush_tlb_all(); | 142 | __flush_tlb_all(); |
141 | 143 | ||
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bt.c b/arch/x86/platform/intel-mid/device_libs/platform_bt.c index dc036e511f48..5a0483e7bf66 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_bt.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_bt.c | |||
@@ -60,7 +60,7 @@ static int __init tng_bt_sfi_setup(struct bt_sfi_data *ddata) | |||
60 | return 0; | 60 | return 0; |
61 | } | 61 | } |
62 | 62 | ||
63 | static const struct bt_sfi_data tng_bt_sfi_data __initdata = { | 63 | static struct bt_sfi_data tng_bt_sfi_data __initdata = { |
64 | .setup = tng_bt_sfi_setup, | 64 | .setup = tng_bt_sfi_setup, |
65 | }; | 65 | }; |
66 | 66 | ||
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 4d62c071b166..d85076223a69 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c | |||
@@ -1325,20 +1325,18 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, | |||
1325 | { | 1325 | { |
1326 | struct { | 1326 | struct { |
1327 | struct mmuext_op op; | 1327 | struct mmuext_op op; |
1328 | #ifdef CONFIG_SMP | ||
1329 | DECLARE_BITMAP(mask, num_processors); | ||
1330 | #else | ||
1331 | DECLARE_BITMAP(mask, NR_CPUS); | 1328 | DECLARE_BITMAP(mask, NR_CPUS); |
1332 | #endif | ||
1333 | } *args; | 1329 | } *args; |
1334 | struct multicall_space mcs; | 1330 | struct multicall_space mcs; |
1331 | const size_t mc_entry_size = sizeof(args->op) + | ||
1332 | sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus()); | ||
1335 | 1333 | ||
1336 | trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end); | 1334 | trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end); |
1337 | 1335 | ||
1338 | if (cpumask_empty(cpus)) | 1336 | if (cpumask_empty(cpus)) |
1339 | return; /* nothing to do */ | 1337 | return; /* nothing to do */ |
1340 | 1338 | ||
1341 | mcs = xen_mc_entry(sizeof(*args)); | 1339 | mcs = xen_mc_entry(mc_entry_size); |
1342 | args = mcs.args; | 1340 | args = mcs.args; |
1343 | args->op.arg2.vcpumask = to_cpumask(args->mask); | 1341 | args->op.arg2.vcpumask = to_cpumask(args->mask); |
1344 | 1342 | ||
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 75011b80660f..3b34745d0a52 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -72,7 +72,7 @@ u64 xen_clocksource_read(void); | |||
72 | void xen_setup_cpu_clockevents(void); | 72 | void xen_setup_cpu_clockevents(void); |
73 | void xen_save_time_memory_area(void); | 73 | void xen_save_time_memory_area(void); |
74 | void xen_restore_time_memory_area(void); | 74 | void xen_restore_time_memory_area(void); |
75 | void __init xen_init_time_ops(void); | 75 | void __ref xen_init_time_ops(void); |
76 | void __init xen_hvm_init_time_ops(void); | 76 | void __init xen_hvm_init_time_ops(void); |
77 | 77 | ||
78 | irqreturn_t xen_debug_interrupt(int irq, void *dev_id); | 78 | irqreturn_t xen_debug_interrupt(int irq, void *dev_id); |
diff --git a/crypto/algapi.c b/crypto/algapi.c index 60d7366ed343..9a636f961572 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
@@ -167,6 +167,18 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list, | |||
167 | 167 | ||
168 | spawn->alg = NULL; | 168 | spawn->alg = NULL; |
169 | spawns = &inst->alg.cra_users; | 169 | spawns = &inst->alg.cra_users; |
170 | |||
171 | /* | ||
172 | * We may encounter an unregistered instance here, since | ||
173 | * an instance's spawns are set up prior to the instance | ||
174 | * being registered. An unregistered instance will have | ||
175 | * NULL ->cra_users.next, since ->cra_users isn't | ||
176 | * properly initialized until registration. But an | ||
177 | * unregistered instance cannot have any users, so treat | ||
178 | * it the same as ->cra_users being empty. | ||
179 | */ | ||
180 | if (spawns->next == NULL) | ||
181 | break; | ||
170 | } | 182 | } |
171 | } while ((spawns = crypto_more_spawns(alg, &stack, &top, | 183 | } while ((spawns = crypto_more_spawns(alg, &stack, &top, |
172 | &secondary_spawns))); | 184 | &secondary_spawns))); |
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index bdc87907d6a1..2415ad9f6dd4 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig | |||
@@ -236,6 +236,9 @@ config GENERIC_CPU_DEVICES | |||
236 | config GENERIC_CPU_AUTOPROBE | 236 | config GENERIC_CPU_AUTOPROBE |
237 | bool | 237 | bool |
238 | 238 | ||
239 | config GENERIC_CPU_VULNERABILITIES | ||
240 | bool | ||
241 | |||
239 | config SOC_BUS | 242 | config SOC_BUS |
240 | bool | 243 | bool |
241 | select GLOB | 244 | select GLOB |
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 58a9b608d821..d99038487a0d 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -511,10 +511,58 @@ static void __init cpu_dev_register_generic(void) | |||
511 | #endif | 511 | #endif |
512 | } | 512 | } |
513 | 513 | ||
514 | #ifdef CONFIG_GENERIC_CPU_VULNERABILITIES | ||
515 | |||
516 | ssize_t __weak cpu_show_meltdown(struct device *dev, | ||
517 | struct device_attribute *attr, char *buf) | ||
518 | { | ||
519 | return sprintf(buf, "Not affected\n"); | ||
520 | } | ||
521 | |||
522 | ssize_t __weak cpu_show_spectre_v1(struct device *dev, | ||
523 | struct device_attribute *attr, char *buf) | ||
524 | { | ||
525 | return sprintf(buf, "Not affected\n"); | ||
526 | } | ||
527 | |||
528 | ssize_t __weak cpu_show_spectre_v2(struct device *dev, | ||
529 | struct device_attribute *attr, char *buf) | ||
530 | { | ||
531 | return sprintf(buf, "Not affected\n"); | ||
532 | } | ||
533 | |||
534 | static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); | ||
535 | static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); | ||
536 | static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); | ||
537 | |||
538 | static struct attribute *cpu_root_vulnerabilities_attrs[] = { | ||
539 | &dev_attr_meltdown.attr, | ||
540 | &dev_attr_spectre_v1.attr, | ||
541 | &dev_attr_spectre_v2.attr, | ||
542 | NULL | ||
543 | }; | ||
544 | |||
545 | static const struct attribute_group cpu_root_vulnerabilities_group = { | ||
546 | .name = "vulnerabilities", | ||
547 | .attrs = cpu_root_vulnerabilities_attrs, | ||
548 | }; | ||
549 | |||
550 | static void __init cpu_register_vulnerabilities(void) | ||
551 | { | ||
552 | if (sysfs_create_group(&cpu_subsys.dev_root->kobj, | ||
553 | &cpu_root_vulnerabilities_group)) | ||
554 | pr_err("Unable to register CPU vulnerabilities\n"); | ||
555 | } | ||
556 | |||
557 | #else | ||
558 | static inline void cpu_register_vulnerabilities(void) { } | ||
559 | #endif | ||
560 | |||
514 | void __init cpu_dev_init(void) | 561 | void __init cpu_dev_init(void) |
515 | { | 562 | { |
516 | if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups)) | 563 | if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups)) |
517 | panic("Failed to register CPU subsystem"); | 564 | panic("Failed to register CPU subsystem"); |
518 | 565 | ||
519 | cpu_dev_register_generic(); | 566 | cpu_dev_register_generic(); |
567 | cpu_register_vulnerabilities(); | ||
520 | } | 568 | } |
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 85d4c57870fb..49af94627c8a 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c | |||
@@ -2777,12 +2777,12 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
2777 | } | 2777 | } |
2778 | 2778 | ||
2779 | static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt, | 2779 | static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt, |
2780 | unsigned int opcode, int rings) | 2780 | unsigned int opcode, unsigned long rings) |
2781 | { | 2781 | { |
2782 | struct cmd_info *info = NULL; | 2782 | struct cmd_info *info = NULL; |
2783 | unsigned int ring; | 2783 | unsigned int ring; |
2784 | 2784 | ||
2785 | for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) { | 2785 | for_each_set_bit(ring, &rings, I915_NUM_ENGINES) { |
2786 | info = find_cmd_entry(gvt, opcode, ring); | 2786 | info = find_cmd_entry(gvt, opcode, ring); |
2787 | if (info) | 2787 | if (info) |
2788 | break; | 2788 | break; |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 8e331142badb..64d67ff9bf08 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
@@ -1359,12 +1359,15 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp, | |||
1359 | return ret; | 1359 | return ret; |
1360 | } else { | 1360 | } else { |
1361 | if (!test_bit(index, spt->post_shadow_bitmap)) { | 1361 | if (!test_bit(index, spt->post_shadow_bitmap)) { |
1362 | int type = spt->shadow_page.type; | ||
1363 | |||
1362 | ppgtt_get_shadow_entry(spt, &se, index); | 1364 | ppgtt_get_shadow_entry(spt, &se, index); |
1363 | ret = ppgtt_handle_guest_entry_removal(gpt, &se, index); | 1365 | ret = ppgtt_handle_guest_entry_removal(gpt, &se, index); |
1364 | if (ret) | 1366 | if (ret) |
1365 | return ret; | 1367 | return ret; |
1368 | ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn); | ||
1369 | ppgtt_set_shadow_entry(spt, &se, index); | ||
1366 | } | 1370 | } |
1367 | |||
1368 | ppgtt_set_post_shadow(spt, index); | 1371 | ppgtt_set_post_shadow(spt, index); |
1369 | } | 1372 | } |
1370 | 1373 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 18de6569d04a..5cfba89ed586 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -467,7 +467,7 @@ static void __fence_set_priority(struct dma_fence *fence, int prio) | |||
467 | struct drm_i915_gem_request *rq; | 467 | struct drm_i915_gem_request *rq; |
468 | struct intel_engine_cs *engine; | 468 | struct intel_engine_cs *engine; |
469 | 469 | ||
470 | if (!dma_fence_is_i915(fence)) | 470 | if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence)) |
471 | return; | 471 | return; |
472 | 472 | ||
473 | rq = to_request(fence); | 473 | rq = to_request(fence); |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 333f40bc03bb..7923dfd9963c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -7027,6 +7027,8 @@ enum { | |||
7027 | #define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308) | 7027 | #define GEN9_SLICE_COMMON_ECO_CHICKEN0 _MMIO(0x7308) |
7028 | #define DISABLE_PIXEL_MASK_CAMMING (1<<14) | 7028 | #define DISABLE_PIXEL_MASK_CAMMING (1<<14) |
7029 | 7029 | ||
7030 | #define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c) | ||
7031 | |||
7030 | #define GEN7_L3SQCREG1 _MMIO(0xB010) | 7032 | #define GEN7_L3SQCREG1 _MMIO(0xB010) |
7031 | #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 | 7033 | #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 |
7032 | 7034 | ||
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index ab5bf4e2e28e..6074e04dc99f 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c | |||
@@ -1390,6 +1390,11 @@ static int glk_init_workarounds(struct intel_engine_cs *engine) | |||
1390 | if (ret) | 1390 | if (ret) |
1391 | return ret; | 1391 | return ret; |
1392 | 1392 | ||
1393 | /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */ | ||
1394 | ret = wa_ring_whitelist_reg(engine, GEN9_SLICE_COMMON_ECO_CHICKEN1); | ||
1395 | if (ret) | ||
1396 | return ret; | ||
1397 | |||
1393 | /* WaToEnableHwFixForPushConstHWBug:glk */ | 1398 | /* WaToEnableHwFixForPushConstHWBug:glk */ |
1394 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, | 1399 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, |
1395 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); | 1400 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index d36e25607435..e71a8cd50498 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -974,6 +974,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) | |||
974 | 974 | ||
975 | GEM_BUG_ON(prio == I915_PRIORITY_INVALID); | 975 | GEM_BUG_ON(prio == I915_PRIORITY_INVALID); |
976 | 976 | ||
977 | if (i915_gem_request_completed(request)) | ||
978 | return; | ||
979 | |||
977 | if (prio <= READ_ONCE(request->priotree.priority)) | 980 | if (prio <= READ_ONCE(request->priotree.priority)) |
978 | return; | 981 | return; |
979 | 982 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c index a2978a37b4f3..700fc754f28a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c | |||
@@ -174,6 +174,7 @@ gf119_sor = { | |||
174 | .links = gf119_sor_dp_links, | 174 | .links = gf119_sor_dp_links, |
175 | .power = g94_sor_dp_power, | 175 | .power = g94_sor_dp_power, |
176 | .pattern = gf119_sor_dp_pattern, | 176 | .pattern = gf119_sor_dp_pattern, |
177 | .drive = gf119_sor_dp_drive, | ||
177 | .vcpi = gf119_sor_dp_vcpi, | 178 | .vcpi = gf119_sor_dp_vcpi, |
178 | .audio = gf119_sor_dp_audio, | 179 | .audio = gf119_sor_dp_audio, |
179 | .audio_sym = gf119_sor_dp_audio_sym, | 180 | .audio_sym = gf119_sor_dp_audio_sym, |
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index b0a1dedac802..476079f1255f 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c | |||
@@ -2656,6 +2656,9 @@ static int tegra_sor_probe(struct platform_device *pdev) | |||
2656 | name, err); | 2656 | name, err); |
2657 | goto remove; | 2657 | goto remove; |
2658 | } | 2658 | } |
2659 | } else { | ||
2660 | /* fall back to the module clock on SOR0 (eDP/LVDS only) */ | ||
2661 | sor->clk_out = sor->clk; | ||
2659 | } | 2662 | } |
2660 | 2663 | ||
2661 | sor->clk_parent = devm_clk_get(&pdev->dev, "parent"); | 2664 | sor->clk_parent = devm_clk_get(&pdev->dev, "parent"); |
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c index 26eddbb62893..3dd62d75f531 100644 --- a/drivers/gpu/drm/vc4/vc4_irq.c +++ b/drivers/gpu/drm/vc4/vc4_irq.c | |||
@@ -209,9 +209,6 @@ vc4_irq_postinstall(struct drm_device *dev) | |||
209 | { | 209 | { |
210 | struct vc4_dev *vc4 = to_vc4_dev(dev); | 210 | struct vc4_dev *vc4 = to_vc4_dev(dev); |
211 | 211 | ||
212 | /* Undo the effects of a previous vc4_irq_uninstall. */ | ||
213 | enable_irq(dev->irq); | ||
214 | |||
215 | /* Enable both the render done and out of memory interrupts. */ | 212 | /* Enable both the render done and out of memory interrupts. */ |
216 | V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS); | 213 | V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS); |
217 | 214 | ||
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c index 622cd43840b8..493f392b3a0a 100644 --- a/drivers/gpu/drm/vc4/vc4_v3d.c +++ b/drivers/gpu/drm/vc4/vc4_v3d.c | |||
@@ -327,6 +327,9 @@ static int vc4_v3d_runtime_resume(struct device *dev) | |||
327 | return ret; | 327 | return ret; |
328 | 328 | ||
329 | vc4_v3d_init_hw(vc4->dev); | 329 | vc4_v3d_init_hw(vc4->dev); |
330 | |||
331 | /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */ | ||
332 | enable_irq(vc4->dev->irq); | ||
330 | vc4_irq_postinstall(vc4->dev); | 333 | vc4_irq_postinstall(vc4->dev); |
331 | 334 | ||
332 | return 0; | 335 | return 0; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 21c62a34e558..87e8af5776a3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -2731,6 +2731,8 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, | |||
2731 | } | 2731 | } |
2732 | 2732 | ||
2733 | view_type = vmw_view_cmd_to_type(header->id); | 2733 | view_type = vmw_view_cmd_to_type(header->id); |
2734 | if (view_type == vmw_view_max) | ||
2735 | return -EINVAL; | ||
2734 | cmd = container_of(header, typeof(*cmd), header); | 2736 | cmd = container_of(header, typeof(*cmd), header); |
2735 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, | 2737 | ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, |
2736 | user_surface_converter, | 2738 | user_surface_converter, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 0545740b3724..641294aef165 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -697,7 +697,6 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane) | |||
697 | vps->pinned = 0; | 697 | vps->pinned = 0; |
698 | 698 | ||
699 | /* Mapping is managed by prepare_fb/cleanup_fb */ | 699 | /* Mapping is managed by prepare_fb/cleanup_fb */ |
700 | memset(&vps->guest_map, 0, sizeof(vps->guest_map)); | ||
701 | memset(&vps->host_map, 0, sizeof(vps->host_map)); | 700 | memset(&vps->host_map, 0, sizeof(vps->host_map)); |
702 | vps->cpp = 0; | 701 | vps->cpp = 0; |
703 | 702 | ||
@@ -760,11 +759,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane, | |||
760 | 759 | ||
761 | 760 | ||
762 | /* Should have been freed by cleanup_fb */ | 761 | /* Should have been freed by cleanup_fb */ |
763 | if (vps->guest_map.virtual) { | ||
764 | DRM_ERROR("Guest mapping not freed\n"); | ||
765 | ttm_bo_kunmap(&vps->guest_map); | ||
766 | } | ||
767 | |||
768 | if (vps->host_map.virtual) { | 762 | if (vps->host_map.virtual) { |
769 | DRM_ERROR("Host mapping not freed\n"); | 763 | DRM_ERROR("Host mapping not freed\n"); |
770 | ttm_bo_kunmap(&vps->host_map); | 764 | ttm_bo_kunmap(&vps->host_map); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index ff9c8389ff21..cd9da2dd79af 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | |||
@@ -175,7 +175,7 @@ struct vmw_plane_state { | |||
175 | int pinned; | 175 | int pinned; |
176 | 176 | ||
177 | /* For CPU Blit */ | 177 | /* For CPU Blit */ |
178 | struct ttm_bo_kmap_obj host_map, guest_map; | 178 | struct ttm_bo_kmap_obj host_map; |
179 | unsigned int cpp; | 179 | unsigned int cpp; |
180 | }; | 180 | }; |
181 | 181 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 90b5437fd787..b68d74888ab1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | |||
@@ -114,7 +114,7 @@ struct vmw_screen_target_display_unit { | |||
114 | bool defined; | 114 | bool defined; |
115 | 115 | ||
116 | /* For CPU Blit */ | 116 | /* For CPU Blit */ |
117 | struct ttm_bo_kmap_obj host_map, guest_map; | 117 | struct ttm_bo_kmap_obj host_map; |
118 | unsigned int cpp; | 118 | unsigned int cpp; |
119 | }; | 119 | }; |
120 | 120 | ||
@@ -695,7 +695,8 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) | |||
695 | s32 src_pitch, dst_pitch; | 695 | s32 src_pitch, dst_pitch; |
696 | u8 *src, *dst; | 696 | u8 *src, *dst; |
697 | bool not_used; | 697 | bool not_used; |
698 | 698 | struct ttm_bo_kmap_obj guest_map; | |
699 | int ret; | ||
699 | 700 | ||
700 | if (!dirty->num_hits) | 701 | if (!dirty->num_hits) |
701 | return; | 702 | return; |
@@ -706,6 +707,13 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) | |||
706 | if (width == 0 || height == 0) | 707 | if (width == 0 || height == 0) |
707 | return; | 708 | return; |
708 | 709 | ||
710 | ret = ttm_bo_kmap(&ddirty->buf->base, 0, ddirty->buf->base.num_pages, | ||
711 | &guest_map); | ||
712 | if (ret) { | ||
713 | DRM_ERROR("Failed mapping framebuffer for blit: %d\n", | ||
714 | ret); | ||
715 | goto out_cleanup; | ||
716 | } | ||
709 | 717 | ||
710 | /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */ | 718 | /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */ |
711 | src_pitch = stdu->display_srf->base_size.width * stdu->cpp; | 719 | src_pitch = stdu->display_srf->base_size.width * stdu->cpp; |
@@ -713,7 +721,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) | |||
713 | src += ddirty->top * src_pitch + ddirty->left * stdu->cpp; | 721 | src += ddirty->top * src_pitch + ddirty->left * stdu->cpp; |
714 | 722 | ||
715 | dst_pitch = ddirty->pitch; | 723 | dst_pitch = ddirty->pitch; |
716 | dst = ttm_kmap_obj_virtual(&stdu->guest_map, ¬_used); | 724 | dst = ttm_kmap_obj_virtual(&guest_map, ¬_used); |
717 | dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp; | 725 | dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp; |
718 | 726 | ||
719 | 727 | ||
@@ -772,6 +780,7 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty) | |||
772 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | 780 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); |
773 | } | 781 | } |
774 | 782 | ||
783 | ttm_bo_kunmap(&guest_map); | ||
775 | out_cleanup: | 784 | out_cleanup: |
776 | ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX; | 785 | ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX; |
777 | ddirty->right = ddirty->bottom = S32_MIN; | 786 | ddirty->right = ddirty->bottom = S32_MIN; |
@@ -1109,9 +1118,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane, | |||
1109 | { | 1118 | { |
1110 | struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); | 1119 | struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); |
1111 | 1120 | ||
1112 | if (vps->guest_map.virtual) | ||
1113 | ttm_bo_kunmap(&vps->guest_map); | ||
1114 | |||
1115 | if (vps->host_map.virtual) | 1121 | if (vps->host_map.virtual) |
1116 | ttm_bo_kunmap(&vps->host_map); | 1122 | ttm_bo_kunmap(&vps->host_map); |
1117 | 1123 | ||
@@ -1277,33 +1283,11 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane, | |||
1277 | */ | 1283 | */ |
1278 | if (vps->content_fb_type == SEPARATE_DMA && | 1284 | if (vps->content_fb_type == SEPARATE_DMA && |
1279 | !(dev_priv->capabilities & SVGA_CAP_3D)) { | 1285 | !(dev_priv->capabilities & SVGA_CAP_3D)) { |
1280 | |||
1281 | struct vmw_framebuffer_dmabuf *new_vfbd; | ||
1282 | |||
1283 | new_vfbd = vmw_framebuffer_to_vfbd(new_fb); | ||
1284 | |||
1285 | ret = ttm_bo_reserve(&new_vfbd->buffer->base, false, false, | ||
1286 | NULL); | ||
1287 | if (ret) | ||
1288 | goto out_srf_unpin; | ||
1289 | |||
1290 | ret = ttm_bo_kmap(&new_vfbd->buffer->base, 0, | ||
1291 | new_vfbd->buffer->base.num_pages, | ||
1292 | &vps->guest_map); | ||
1293 | |||
1294 | ttm_bo_unreserve(&new_vfbd->buffer->base); | ||
1295 | |||
1296 | if (ret) { | ||
1297 | DRM_ERROR("Failed to map content buffer to CPU\n"); | ||
1298 | goto out_srf_unpin; | ||
1299 | } | ||
1300 | |||
1301 | ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0, | 1286 | ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0, |
1302 | vps->surf->res.backup->base.num_pages, | 1287 | vps->surf->res.backup->base.num_pages, |
1303 | &vps->host_map); | 1288 | &vps->host_map); |
1304 | if (ret) { | 1289 | if (ret) { |
1305 | DRM_ERROR("Failed to map display buffer to CPU\n"); | 1290 | DRM_ERROR("Failed to map display buffer to CPU\n"); |
1306 | ttm_bo_kunmap(&vps->guest_map); | ||
1307 | goto out_srf_unpin; | 1291 | goto out_srf_unpin; |
1308 | } | 1292 | } |
1309 | 1293 | ||
@@ -1350,7 +1334,6 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane, | |||
1350 | stdu->display_srf = vps->surf; | 1334 | stdu->display_srf = vps->surf; |
1351 | stdu->content_fb_type = vps->content_fb_type; | 1335 | stdu->content_fb_type = vps->content_fb_type; |
1352 | stdu->cpp = vps->cpp; | 1336 | stdu->cpp = vps->cpp; |
1353 | memcpy(&stdu->guest_map, &vps->guest_map, sizeof(vps->guest_map)); | ||
1354 | memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map)); | 1337 | memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map)); |
1355 | 1338 | ||
1356 | if (!stdu->defined) | 1339 | if (!stdu->defined) |
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 7750a9c38b06..1df7da47f431 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c | |||
@@ -763,11 +763,11 @@ static int complete_subctxt(struct hfi1_filedata *fd) | |||
763 | } | 763 | } |
764 | 764 | ||
765 | if (ret) { | 765 | if (ret) { |
766 | hfi1_rcd_put(fd->uctxt); | ||
767 | fd->uctxt = NULL; | ||
768 | spin_lock_irqsave(&fd->dd->uctxt_lock, flags); | 766 | spin_lock_irqsave(&fd->dd->uctxt_lock, flags); |
769 | __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts); | 767 | __clear_bit(fd->subctxt, fd->uctxt->in_use_ctxts); |
770 | spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags); | 768 | spin_unlock_irqrestore(&fd->dd->uctxt_lock, flags); |
769 | hfi1_rcd_put(fd->uctxt); | ||
770 | fd->uctxt = NULL; | ||
771 | } | 771 | } |
772 | 772 | ||
773 | return ret; | 773 | return ret; |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 8ac50de2b242..262c1aa2e028 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -1324,7 +1324,8 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn) | |||
1324 | return err; | 1324 | return err; |
1325 | 1325 | ||
1326 | if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || | 1326 | if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || |
1327 | !MLX5_CAP_GEN(dev->mdev, disable_local_lb)) | 1327 | (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && |
1328 | !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) | ||
1328 | return err; | 1329 | return err; |
1329 | 1330 | ||
1330 | mutex_lock(&dev->lb_mutex); | 1331 | mutex_lock(&dev->lb_mutex); |
@@ -1342,7 +1343,8 @@ static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn) | |||
1342 | mlx5_core_dealloc_transport_domain(dev->mdev, tdn); | 1343 | mlx5_core_dealloc_transport_domain(dev->mdev, tdn); |
1343 | 1344 | ||
1344 | if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || | 1345 | if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || |
1345 | !MLX5_CAP_GEN(dev->mdev, disable_local_lb)) | 1346 | (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && |
1347 | !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) | ||
1346 | return; | 1348 | return; |
1347 | 1349 | ||
1348 | mutex_lock(&dev->lb_mutex); | 1350 | mutex_lock(&dev->lb_mutex); |
@@ -4158,7 +4160,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
4158 | goto err_cnt; | 4160 | goto err_cnt; |
4159 | 4161 | ||
4160 | dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev); | 4162 | dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev); |
4161 | if (!dev->mdev->priv.uar) | 4163 | if (IS_ERR(dev->mdev->priv.uar)) |
4162 | goto err_cong; | 4164 | goto err_cong; |
4163 | 4165 | ||
4164 | err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false); | 4166 | err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false); |
@@ -4187,7 +4189,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
4187 | } | 4189 | } |
4188 | 4190 | ||
4189 | if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && | 4191 | if ((MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && |
4190 | MLX5_CAP_GEN(mdev, disable_local_lb)) | 4192 | (MLX5_CAP_GEN(mdev, disable_local_lb_uc) || |
4193 | MLX5_CAP_GEN(mdev, disable_local_lb_mc))) | ||
4191 | mutex_init(&dev->lb_mutex); | 4194 | mutex_init(&dev->lb_mutex); |
4192 | 4195 | ||
4193 | dev->ib_active = true; | 4196 | dev->ib_active = true; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 31ad28853efa..cffe5966aef9 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -4362,12 +4362,11 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev, | |||
4362 | 4362 | ||
4363 | memset(ah_attr, 0, sizeof(*ah_attr)); | 4363 | memset(ah_attr, 0, sizeof(*ah_attr)); |
4364 | 4364 | ||
4365 | ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port); | 4365 | if (!path->port || path->port > MLX5_CAP_GEN(dev, num_ports)) |
4366 | rdma_ah_set_port_num(ah_attr, path->port); | ||
4367 | if (rdma_ah_get_port_num(ah_attr) == 0 || | ||
4368 | rdma_ah_get_port_num(ah_attr) > MLX5_CAP_GEN(dev, num_ports)) | ||
4369 | return; | 4366 | return; |
4370 | 4367 | ||
4368 | ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port); | ||
4369 | |||
4371 | rdma_ah_set_port_num(ah_attr, path->port); | 4370 | rdma_ah_set_port_num(ah_attr, path->port); |
4372 | rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf); | 4371 | rdma_ah_set_sl(ah_attr, path->dci_cfi_prio_sl & 0xf); |
4373 | 4372 | ||
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 720dfb3a1ac2..1b02283ce20e 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c | |||
@@ -741,6 +741,7 @@ isert_connect_error(struct rdma_cm_id *cma_id) | |||
741 | { | 741 | { |
742 | struct isert_conn *isert_conn = cma_id->qp->qp_context; | 742 | struct isert_conn *isert_conn = cma_id->qp->qp_context; |
743 | 743 | ||
744 | ib_drain_qp(isert_conn->qp); | ||
744 | list_del_init(&isert_conn->node); | 745 | list_del_init(&isert_conn->node); |
745 | isert_conn->cm_id = NULL; | 746 | isert_conn->cm_id = NULL; |
746 | isert_put_conn(isert_conn); | 747 | isert_put_conn(isert_conn); |
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index fcf7235d5742..157e1d9e7725 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
25 | #include <linux/clk.h> | 25 | #include <linux/clk.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/module.h> | ||
27 | #include <linux/of_device.h> | 28 | #include <linux/of_device.h> |
28 | #include <linux/platform_device.h> | 29 | #include <linux/platform_device.h> |
29 | #include <linux/mmc/host.h> | 30 | #include <linux/mmc/host.h> |
@@ -667,3 +668,5 @@ int renesas_sdhi_remove(struct platform_device *pdev) | |||
667 | return 0; | 668 | return 0; |
668 | } | 669 | } |
669 | EXPORT_SYMBOL_GPL(renesas_sdhi_remove); | 670 | EXPORT_SYMBOL_GPL(renesas_sdhi_remove); |
671 | |||
672 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index f7f157a62a4a..555c7f133eb8 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c | |||
@@ -1424,7 +1424,9 @@ static const struct file_operations s3cmci_fops_state = { | |||
1424 | struct s3cmci_reg { | 1424 | struct s3cmci_reg { |
1425 | unsigned short addr; | 1425 | unsigned short addr; |
1426 | unsigned char *name; | 1426 | unsigned char *name; |
1427 | } debug_regs[] = { | 1427 | }; |
1428 | |||
1429 | static const struct s3cmci_reg debug_regs[] = { | ||
1428 | DBG_REG(CON), | 1430 | DBG_REG(CON), |
1429 | DBG_REG(PRE), | 1431 | DBG_REG(PRE), |
1430 | DBG_REG(CMDARG), | 1432 | DBG_REG(CMDARG), |
@@ -1446,7 +1448,7 @@ struct s3cmci_reg { | |||
1446 | static int s3cmci_regs_show(struct seq_file *seq, void *v) | 1448 | static int s3cmci_regs_show(struct seq_file *seq, void *v) |
1447 | { | 1449 | { |
1448 | struct s3cmci_host *host = seq->private; | 1450 | struct s3cmci_host *host = seq->private; |
1449 | struct s3cmci_reg *rptr = debug_regs; | 1451 | const struct s3cmci_reg *rptr = debug_regs; |
1450 | 1452 | ||
1451 | for (; rptr->name; rptr++) | 1453 | for (; rptr->name; rptr++) |
1452 | seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name, | 1454 | seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name, |
diff --git a/drivers/mux/core.c b/drivers/mux/core.c index 2260063b0ea8..6e5cf9d9cd99 100644 --- a/drivers/mux/core.c +++ b/drivers/mux/core.c | |||
@@ -413,6 +413,7 @@ static int of_dev_node_match(struct device *dev, const void *data) | |||
413 | return dev->of_node == data; | 413 | return dev->of_node == data; |
414 | } | 414 | } |
415 | 415 | ||
416 | /* Note this function returns a reference to the mux_chip dev. */ | ||
416 | static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np) | 417 | static struct mux_chip *of_find_mux_chip_by_node(struct device_node *np) |
417 | { | 418 | { |
418 | struct device *dev; | 419 | struct device *dev; |
@@ -466,6 +467,7 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name) | |||
466 | (!args.args_count && (mux_chip->controllers > 1))) { | 467 | (!args.args_count && (mux_chip->controllers > 1))) { |
467 | dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n", | 468 | dev_err(dev, "%pOF: wrong #mux-control-cells for %pOF\n", |
468 | np, args.np); | 469 | np, args.np); |
470 | put_device(&mux_chip->dev); | ||
469 | return ERR_PTR(-EINVAL); | 471 | return ERR_PTR(-EINVAL); |
470 | } | 472 | } |
471 | 473 | ||
@@ -476,10 +478,10 @@ struct mux_control *mux_control_get(struct device *dev, const char *mux_name) | |||
476 | if (controller >= mux_chip->controllers) { | 478 | if (controller >= mux_chip->controllers) { |
477 | dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n", | 479 | dev_err(dev, "%pOF: bad mux controller %u specified in %pOF\n", |
478 | np, controller, args.np); | 480 | np, controller, args.np); |
481 | put_device(&mux_chip->dev); | ||
479 | return ERR_PTR(-EINVAL); | 482 | return ERR_PTR(-EINVAL); |
480 | } | 483 | } |
481 | 484 | ||
482 | get_device(&mux_chip->dev); | ||
483 | return &mux_chip->mux[controller]; | 485 | return &mux_chip->mux[controller]; |
484 | } | 486 | } |
485 | EXPORT_SYMBOL_GPL(mux_control_get); | 487 | EXPORT_SYMBOL_GPL(mux_control_get); |
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index 410a0a95130b..b3e7fafee3df 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c | |||
@@ -1913,3 +1913,7 @@ static struct platform_driver cs89x0_driver = { | |||
1913 | module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe); | 1913 | module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe); |
1914 | 1914 | ||
1915 | #endif /* CONFIG_CS89x0_PLATFORM */ | 1915 | #endif /* CONFIG_CS89x0_PLATFORM */ |
1916 | |||
1917 | MODULE_LICENSE("GPL"); | ||
1918 | MODULE_DESCRIPTION("Crystal Semiconductor (Now Cirrus Logic) CS89[02]0 network driver"); | ||
1919 | MODULE_AUTHOR("Russell Nelson <nelson@crynwr.com>"); | ||
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 461014b7ccdd..736df59c16f5 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
@@ -757,6 +757,12 @@ static int ibmvnic_login(struct net_device *netdev) | |||
757 | } | 757 | } |
758 | } while (adapter->renegotiate); | 758 | } while (adapter->renegotiate); |
759 | 759 | ||
760 | /* handle pending MAC address changes after successful login */ | ||
761 | if (adapter->mac_change_pending) { | ||
762 | __ibmvnic_set_mac(netdev, &adapter->desired.mac); | ||
763 | adapter->mac_change_pending = false; | ||
764 | } | ||
765 | |||
760 | return 0; | 766 | return 0; |
761 | } | 767 | } |
762 | 768 | ||
@@ -994,11 +1000,6 @@ static int ibmvnic_open(struct net_device *netdev) | |||
994 | 1000 | ||
995 | mutex_lock(&adapter->reset_lock); | 1001 | mutex_lock(&adapter->reset_lock); |
996 | 1002 | ||
997 | if (adapter->mac_change_pending) { | ||
998 | __ibmvnic_set_mac(netdev, &adapter->desired.mac); | ||
999 | adapter->mac_change_pending = false; | ||
1000 | } | ||
1001 | |||
1002 | if (adapter->state != VNIC_CLOSED) { | 1003 | if (adapter->state != VNIC_CLOSED) { |
1003 | rc = ibmvnic_login(netdev); | 1004 | rc = ibmvnic_login(netdev); |
1004 | if (rc) { | 1005 | if (rc) { |
@@ -1532,7 +1533,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p) | |||
1532 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | 1533 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
1533 | struct sockaddr *addr = p; | 1534 | struct sockaddr *addr = p; |
1534 | 1535 | ||
1535 | if (adapter->state != VNIC_OPEN) { | 1536 | if (adapter->state == VNIC_PROBED) { |
1536 | memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr)); | 1537 | memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr)); |
1537 | adapter->mac_change_pending = true; | 1538 | adapter->mac_change_pending = true; |
1538 | return 0; | 1539 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index d629da213511..7b988595ac5f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
@@ -865,7 +865,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, | |||
865 | u16 vid); | 865 | u16 vid); |
866 | void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv); | 866 | void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv); |
867 | void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); | 867 | void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); |
868 | void mlx5e_timestamp_set(struct mlx5e_priv *priv); | 868 | void mlx5e_timestamp_init(struct mlx5e_priv *priv); |
869 | 869 | ||
870 | struct mlx5e_redirect_rqt_param { | 870 | struct mlx5e_redirect_rqt_param { |
871 | bool is_rss; | 871 | bool is_rss; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 9bcf38f4123b..3d46ef48d5b8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | |||
@@ -922,8 +922,9 @@ static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv, | |||
922 | 922 | ||
923 | static void mlx5e_ets_init(struct mlx5e_priv *priv) | 923 | static void mlx5e_ets_init(struct mlx5e_priv *priv) |
924 | { | 924 | { |
925 | int i; | ||
926 | struct ieee_ets ets; | 925 | struct ieee_ets ets; |
926 | int err; | ||
927 | int i; | ||
927 | 928 | ||
928 | if (!MLX5_CAP_GEN(priv->mdev, ets)) | 929 | if (!MLX5_CAP_GEN(priv->mdev, ets)) |
929 | return; | 930 | return; |
@@ -936,11 +937,16 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv) | |||
936 | ets.prio_tc[i] = i; | 937 | ets.prio_tc[i] = i; |
937 | } | 938 | } |
938 | 939 | ||
939 | /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */ | 940 | if (ets.ets_cap > 1) { |
940 | ets.prio_tc[0] = 1; | 941 | /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */ |
941 | ets.prio_tc[1] = 0; | 942 | ets.prio_tc[0] = 1; |
943 | ets.prio_tc[1] = 0; | ||
944 | } | ||
942 | 945 | ||
943 | mlx5e_dcbnl_ieee_setets_core(priv, &ets); | 946 | err = mlx5e_dcbnl_ieee_setets_core(priv, &ets); |
947 | if (err) | ||
948 | netdev_err(priv->netdev, | ||
949 | "%s, Failed to init ETS: %d\n", __func__, err); | ||
944 | } | 950 | } |
945 | 951 | ||
946 | enum { | 952 | enum { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index bd5af7f37198..2d1395015ab5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
@@ -207,8 +207,7 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, | |||
207 | return; | 207 | return; |
208 | 208 | ||
209 | mutex_lock(&priv->state_lock); | 209 | mutex_lock(&priv->state_lock); |
210 | if (test_bit(MLX5E_STATE_OPENED, &priv->state)) | 210 | mlx5e_update_stats(priv, true); |
211 | mlx5e_update_stats(priv, true); | ||
212 | mutex_unlock(&priv->state_lock); | 211 | mutex_unlock(&priv->state_lock); |
213 | 212 | ||
214 | for (i = 0; i < mlx5e_num_stats_grps; i++) | 213 | for (i = 0; i < mlx5e_num_stats_grps; i++) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index bbbdb5c0086b..466a4e1244d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -2685,7 +2685,7 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, | |||
2685 | netif_carrier_on(netdev); | 2685 | netif_carrier_on(netdev); |
2686 | } | 2686 | } |
2687 | 2687 | ||
2688 | void mlx5e_timestamp_set(struct mlx5e_priv *priv) | 2688 | void mlx5e_timestamp_init(struct mlx5e_priv *priv) |
2689 | { | 2689 | { |
2690 | priv->tstamp.tx_type = HWTSTAMP_TX_OFF; | 2690 | priv->tstamp.tx_type = HWTSTAMP_TX_OFF; |
2691 | priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE; | 2691 | priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE; |
@@ -2706,7 +2706,6 @@ int mlx5e_open_locked(struct net_device *netdev) | |||
2706 | mlx5e_activate_priv_channels(priv); | 2706 | mlx5e_activate_priv_channels(priv); |
2707 | if (priv->profile->update_carrier) | 2707 | if (priv->profile->update_carrier) |
2708 | priv->profile->update_carrier(priv); | 2708 | priv->profile->update_carrier(priv); |
2709 | mlx5e_timestamp_set(priv); | ||
2710 | 2709 | ||
2711 | if (priv->profile->update_stats) | 2710 | if (priv->profile->update_stats) |
2712 | queue_delayed_work(priv->wq, &priv->update_stats_work, 0); | 2711 | queue_delayed_work(priv->wq, &priv->update_stats_work, 0); |
@@ -3238,12 +3237,12 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr) | |||
3238 | return 0; | 3237 | return 0; |
3239 | } | 3238 | } |
3240 | 3239 | ||
3241 | #define MLX5E_SET_FEATURE(netdev, feature, enable) \ | 3240 | #define MLX5E_SET_FEATURE(features, feature, enable) \ |
3242 | do { \ | 3241 | do { \ |
3243 | if (enable) \ | 3242 | if (enable) \ |
3244 | netdev->features |= feature; \ | 3243 | *features |= feature; \ |
3245 | else \ | 3244 | else \ |
3246 | netdev->features &= ~feature; \ | 3245 | *features &= ~feature; \ |
3247 | } while (0) | 3246 | } while (0) |
3248 | 3247 | ||
3249 | typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable); | 3248 | typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable); |
@@ -3366,6 +3365,7 @@ static int set_feature_arfs(struct net_device *netdev, bool enable) | |||
3366 | #endif | 3365 | #endif |
3367 | 3366 | ||
3368 | static int mlx5e_handle_feature(struct net_device *netdev, | 3367 | static int mlx5e_handle_feature(struct net_device *netdev, |
3368 | netdev_features_t *features, | ||
3369 | netdev_features_t wanted_features, | 3369 | netdev_features_t wanted_features, |
3370 | netdev_features_t feature, | 3370 | netdev_features_t feature, |
3371 | mlx5e_feature_handler feature_handler) | 3371 | mlx5e_feature_handler feature_handler) |
@@ -3384,34 +3384,40 @@ static int mlx5e_handle_feature(struct net_device *netdev, | |||
3384 | return err; | 3384 | return err; |
3385 | } | 3385 | } |
3386 | 3386 | ||
3387 | MLX5E_SET_FEATURE(netdev, feature, enable); | 3387 | MLX5E_SET_FEATURE(features, feature, enable); |
3388 | return 0; | 3388 | return 0; |
3389 | } | 3389 | } |
3390 | 3390 | ||
3391 | static int mlx5e_set_features(struct net_device *netdev, | 3391 | static int mlx5e_set_features(struct net_device *netdev, |
3392 | netdev_features_t features) | 3392 | netdev_features_t features) |
3393 | { | 3393 | { |
3394 | netdev_features_t oper_features = netdev->features; | ||
3394 | int err; | 3395 | int err; |
3395 | 3396 | ||
3396 | err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO, | 3397 | err = mlx5e_handle_feature(netdev, &oper_features, features, |
3397 | set_feature_lro); | 3398 | NETIF_F_LRO, set_feature_lro); |
3398 | err |= mlx5e_handle_feature(netdev, features, | 3399 | err |= mlx5e_handle_feature(netdev, &oper_features, features, |
3399 | NETIF_F_HW_VLAN_CTAG_FILTER, | 3400 | NETIF_F_HW_VLAN_CTAG_FILTER, |
3400 | set_feature_cvlan_filter); | 3401 | set_feature_cvlan_filter); |
3401 | err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC, | 3402 | err |= mlx5e_handle_feature(netdev, &oper_features, features, |
3402 | set_feature_tc_num_filters); | 3403 | NETIF_F_HW_TC, set_feature_tc_num_filters); |
3403 | err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL, | 3404 | err |= mlx5e_handle_feature(netdev, &oper_features, features, |
3404 | set_feature_rx_all); | 3405 | NETIF_F_RXALL, set_feature_rx_all); |
3405 | err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS, | 3406 | err |= mlx5e_handle_feature(netdev, &oper_features, features, |
3406 | set_feature_rx_fcs); | 3407 | NETIF_F_RXFCS, set_feature_rx_fcs); |
3407 | err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX, | 3408 | err |= mlx5e_handle_feature(netdev, &oper_features, features, |
3408 | set_feature_rx_vlan); | 3409 | NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); |
3409 | #ifdef CONFIG_RFS_ACCEL | 3410 | #ifdef CONFIG_RFS_ACCEL |
3410 | err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE, | 3411 | err |= mlx5e_handle_feature(netdev, &oper_features, features, |
3411 | set_feature_arfs); | 3412 | NETIF_F_NTUPLE, set_feature_arfs); |
3412 | #endif | 3413 | #endif |
3413 | 3414 | ||
3414 | return err ? -EINVAL : 0; | 3415 | if (err) { |
3416 | netdev->features = oper_features; | ||
3417 | return -EINVAL; | ||
3418 | } | ||
3419 | |||
3420 | return 0; | ||
3415 | } | 3421 | } |
3416 | 3422 | ||
3417 | static netdev_features_t mlx5e_fix_features(struct net_device *netdev, | 3423 | static netdev_features_t mlx5e_fix_features(struct net_device *netdev, |
@@ -4167,6 +4173,8 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, | |||
4167 | INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); | 4173 | INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work); |
4168 | INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work); | 4174 | INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work); |
4169 | INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); | 4175 | INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); |
4176 | |||
4177 | mlx5e_timestamp_init(priv); | ||
4170 | } | 4178 | } |
4171 | 4179 | ||
4172 | static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) | 4180 | static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 4d1b0ff4b6e4..10fa6a18fcf9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
@@ -934,6 +934,8 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev, | |||
934 | 934 | ||
935 | mlx5e_build_rep_params(mdev, &priv->channels.params); | 935 | mlx5e_build_rep_params(mdev, &priv->channels.params); |
936 | mlx5e_build_rep_netdev(netdev); | 936 | mlx5e_build_rep_netdev(netdev); |
937 | |||
938 | mlx5e_timestamp_init(priv); | ||
937 | } | 939 | } |
938 | 940 | ||
939 | static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) | 941 | static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 1f1f8af87d4d..5a4608281f38 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c | |||
@@ -238,15 +238,19 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv, | |||
238 | int err = 0; | 238 | int err = 0; |
239 | 239 | ||
240 | /* Temporarily enable local_lb */ | 240 | /* Temporarily enable local_lb */ |
241 | if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) { | 241 | err = mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb); |
242 | mlx5_nic_vport_query_local_lb(priv->mdev, &lbtp->local_lb); | 242 | if (err) |
243 | if (!lbtp->local_lb) | 243 | return err; |
244 | mlx5_nic_vport_update_local_lb(priv->mdev, true); | 244 | |
245 | if (!lbtp->local_lb) { | ||
246 | err = mlx5_nic_vport_update_local_lb(priv->mdev, true); | ||
247 | if (err) | ||
248 | return err; | ||
245 | } | 249 | } |
246 | 250 | ||
247 | err = mlx5e_refresh_tirs(priv, true); | 251 | err = mlx5e_refresh_tirs(priv, true); |
248 | if (err) | 252 | if (err) |
249 | return err; | 253 | goto out; |
250 | 254 | ||
251 | lbtp->loopback_ok = false; | 255 | lbtp->loopback_ok = false; |
252 | init_completion(&lbtp->comp); | 256 | init_completion(&lbtp->comp); |
@@ -256,16 +260,21 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv, | |||
256 | lbtp->pt.dev = priv->netdev; | 260 | lbtp->pt.dev = priv->netdev; |
257 | lbtp->pt.af_packet_priv = lbtp; | 261 | lbtp->pt.af_packet_priv = lbtp; |
258 | dev_add_pack(&lbtp->pt); | 262 | dev_add_pack(&lbtp->pt); |
263 | |||
264 | return 0; | ||
265 | |||
266 | out: | ||
267 | if (!lbtp->local_lb) | ||
268 | mlx5_nic_vport_update_local_lb(priv->mdev, false); | ||
269 | |||
259 | return err; | 270 | return err; |
260 | } | 271 | } |
261 | 272 | ||
262 | static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv, | 273 | static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv, |
263 | struct mlx5e_lbt_priv *lbtp) | 274 | struct mlx5e_lbt_priv *lbtp) |
264 | { | 275 | { |
265 | if (MLX5_CAP_GEN(priv->mdev, disable_local_lb)) { | 276 | if (!lbtp->local_lb) |
266 | if (!lbtp->local_lb) | 277 | mlx5_nic_vport_update_local_lb(priv->mdev, false); |
267 | mlx5_nic_vport_update_local_lb(priv->mdev, false); | ||
268 | } | ||
269 | 278 | ||
270 | dev_remove_pack(&lbtp->pt); | 279 | dev_remove_pack(&lbtp->pt); |
271 | mlx5e_refresh_tirs(priv, false); | 280 | mlx5e_refresh_tirs(priv, false); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 3b2363e93ba5..ef1e787e6140 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | |||
@@ -85,6 +85,8 @@ void mlx5i_init(struct mlx5_core_dev *mdev, | |||
85 | mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); | 85 | mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); |
86 | mlx5i_build_nic_params(mdev, &priv->channels.params); | 86 | mlx5i_build_nic_params(mdev, &priv->channels.params); |
87 | 87 | ||
88 | mlx5e_timestamp_init(priv); | ||
89 | |||
88 | /* netdev init */ | 90 | /* netdev init */ |
89 | netdev->hw_features |= NETIF_F_SG; | 91 | netdev->hw_features |= NETIF_F_SG; |
90 | netdev->hw_features |= NETIF_F_IP_CSUM; | 92 | netdev->hw_features |= NETIF_F_IP_CSUM; |
@@ -449,7 +451,6 @@ static int mlx5i_open(struct net_device *netdev) | |||
449 | 451 | ||
450 | mlx5e_refresh_tirs(epriv, false); | 452 | mlx5e_refresh_tirs(epriv, false); |
451 | mlx5e_activate_priv_channels(epriv); | 453 | mlx5e_activate_priv_channels(epriv); |
452 | mlx5e_timestamp_set(epriv); | ||
453 | 454 | ||
454 | mutex_unlock(&epriv->state_lock); | 455 | mutex_unlock(&epriv->state_lock); |
455 | return 0; | 456 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index fa8aed62b231..5701f125e99c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c | |||
@@ -423,9 +423,13 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev, | |||
423 | 423 | ||
424 | switch (clock->ptp_info.pin_config[pin].func) { | 424 | switch (clock->ptp_info.pin_config[pin].func) { |
425 | case PTP_PF_EXTTS: | 425 | case PTP_PF_EXTTS: |
426 | ptp_event.index = pin; | ||
427 | ptp_event.timestamp = timecounter_cyc2time(&clock->tc, | ||
428 | be64_to_cpu(eqe->data.pps.time_stamp)); | ||
426 | if (clock->pps_info.enabled) { | 429 | if (clock->pps_info.enabled) { |
427 | ptp_event.type = PTP_CLOCK_PPSUSR; | 430 | ptp_event.type = PTP_CLOCK_PPSUSR; |
428 | ptp_event.pps_times.ts_real = ns_to_timespec64(eqe->data.pps.time_stamp); | 431 | ptp_event.pps_times.ts_real = |
432 | ns_to_timespec64(ptp_event.timestamp); | ||
429 | } else { | 433 | } else { |
430 | ptp_event.type = PTP_CLOCK_EXTTS; | 434 | ptp_event.type = PTP_CLOCK_EXTTS; |
431 | } | 435 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 8a89c7e8cd63..0f88fd30a09a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -319,6 +319,7 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev) | |||
319 | struct mlx5_eq_table *table = &priv->eq_table; | 319 | struct mlx5_eq_table *table = &priv->eq_table; |
320 | int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); | 320 | int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq); |
321 | int nvec; | 321 | int nvec; |
322 | int err; | ||
322 | 323 | ||
323 | nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + | 324 | nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + |
324 | MLX5_EQ_VEC_COMP_BASE; | 325 | MLX5_EQ_VEC_COMP_BASE; |
@@ -328,21 +329,23 @@ static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev) | |||
328 | 329 | ||
329 | priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL); | 330 | priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL); |
330 | if (!priv->irq_info) | 331 | if (!priv->irq_info) |
331 | goto err_free_msix; | 332 | return -ENOMEM; |
332 | 333 | ||
333 | nvec = pci_alloc_irq_vectors(dev->pdev, | 334 | nvec = pci_alloc_irq_vectors(dev->pdev, |
334 | MLX5_EQ_VEC_COMP_BASE + 1, nvec, | 335 | MLX5_EQ_VEC_COMP_BASE + 1, nvec, |
335 | PCI_IRQ_MSIX); | 336 | PCI_IRQ_MSIX); |
336 | if (nvec < 0) | 337 | if (nvec < 0) { |
337 | return nvec; | 338 | err = nvec; |
339 | goto err_free_irq_info; | ||
340 | } | ||
338 | 341 | ||
339 | table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; | 342 | table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; |
340 | 343 | ||
341 | return 0; | 344 | return 0; |
342 | 345 | ||
343 | err_free_msix: | 346 | err_free_irq_info: |
344 | kfree(priv->irq_info); | 347 | kfree(priv->irq_info); |
345 | return -ENOMEM; | 348 | return err; |
346 | } | 349 | } |
347 | 350 | ||
348 | static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev) | 351 | static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev) |
@@ -578,8 +581,7 @@ static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev) | |||
578 | int ret = 0; | 581 | int ret = 0; |
579 | 582 | ||
580 | /* Disable local_lb by default */ | 583 | /* Disable local_lb by default */ |
581 | if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && | 584 | if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) |
582 | MLX5_CAP_GEN(dev, disable_local_lb)) | ||
583 | ret = mlx5_nic_vport_update_local_lb(dev, false); | 585 | ret = mlx5_nic_vport_update_local_lb(dev, false); |
584 | 586 | ||
585 | return ret; | 587 | return ret; |
@@ -1121,9 +1123,12 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, | |||
1121 | goto err_stop_poll; | 1123 | goto err_stop_poll; |
1122 | } | 1124 | } |
1123 | 1125 | ||
1124 | if (boot && mlx5_init_once(dev, priv)) { | 1126 | if (boot) { |
1125 | dev_err(&pdev->dev, "sw objs init failed\n"); | 1127 | err = mlx5_init_once(dev, priv); |
1126 | goto err_stop_poll; | 1128 | if (err) { |
1129 | dev_err(&pdev->dev, "sw objs init failed\n"); | ||
1130 | goto err_stop_poll; | ||
1131 | } | ||
1127 | } | 1132 | } |
1128 | 1133 | ||
1129 | err = mlx5_alloc_irq_vectors(dev); | 1134 | err = mlx5_alloc_irq_vectors(dev); |
@@ -1133,8 +1138,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, | |||
1133 | } | 1138 | } |
1134 | 1139 | ||
1135 | dev->priv.uar = mlx5_get_uars_page(dev); | 1140 | dev->priv.uar = mlx5_get_uars_page(dev); |
1136 | if (!dev->priv.uar) { | 1141 | if (IS_ERR(dev->priv.uar)) { |
1137 | dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); | 1142 | dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); |
1143 | err = PTR_ERR(dev->priv.uar); | ||
1138 | goto err_disable_msix; | 1144 | goto err_disable_msix; |
1139 | } | 1145 | } |
1140 | 1146 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 222b25908d01..8b97066dd1f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c | |||
@@ -168,18 +168,16 @@ struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev) | |||
168 | struct mlx5_uars_page *ret; | 168 | struct mlx5_uars_page *ret; |
169 | 169 | ||
170 | mutex_lock(&mdev->priv.bfregs.reg_head.lock); | 170 | mutex_lock(&mdev->priv.bfregs.reg_head.lock); |
171 | if (list_empty(&mdev->priv.bfregs.reg_head.list)) { | 171 | if (!list_empty(&mdev->priv.bfregs.reg_head.list)) { |
172 | ret = alloc_uars_page(mdev, false); | ||
173 | if (IS_ERR(ret)) { | ||
174 | ret = NULL; | ||
175 | goto out; | ||
176 | } | ||
177 | list_add(&ret->list, &mdev->priv.bfregs.reg_head.list); | ||
178 | } else { | ||
179 | ret = list_first_entry(&mdev->priv.bfregs.reg_head.list, | 172 | ret = list_first_entry(&mdev->priv.bfregs.reg_head.list, |
180 | struct mlx5_uars_page, list); | 173 | struct mlx5_uars_page, list); |
181 | kref_get(&ret->ref_count); | 174 | kref_get(&ret->ref_count); |
175 | goto out; | ||
182 | } | 176 | } |
177 | ret = alloc_uars_page(mdev, false); | ||
178 | if (IS_ERR(ret)) | ||
179 | goto out; | ||
180 | list_add(&ret->list, &mdev->priv.bfregs.reg_head.list); | ||
183 | out: | 181 | out: |
184 | mutex_unlock(&mdev->priv.bfregs.reg_head.lock); | 182 | mutex_unlock(&mdev->priv.bfregs.reg_head.lock); |
185 | 183 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index d653b0025b13..a1296a62497d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c | |||
@@ -908,23 +908,33 @@ int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable) | |||
908 | void *in; | 908 | void *in; |
909 | int err; | 909 | int err; |
910 | 910 | ||
911 | mlx5_core_dbg(mdev, "%s local_lb\n", enable ? "enable" : "disable"); | 911 | if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) && |
912 | !MLX5_CAP_GEN(mdev, disable_local_lb_uc)) | ||
913 | return 0; | ||
914 | |||
912 | in = kvzalloc(inlen, GFP_KERNEL); | 915 | in = kvzalloc(inlen, GFP_KERNEL); |
913 | if (!in) | 916 | if (!in) |
914 | return -ENOMEM; | 917 | return -ENOMEM; |
915 | 918 | ||
916 | MLX5_SET(modify_nic_vport_context_in, in, | 919 | MLX5_SET(modify_nic_vport_context_in, in, |
917 | field_select.disable_mc_local_lb, 1); | ||
918 | MLX5_SET(modify_nic_vport_context_in, in, | ||
919 | nic_vport_context.disable_mc_local_lb, !enable); | 920 | nic_vport_context.disable_mc_local_lb, !enable); |
920 | |||
921 | MLX5_SET(modify_nic_vport_context_in, in, | ||
922 | field_select.disable_uc_local_lb, 1); | ||
923 | MLX5_SET(modify_nic_vport_context_in, in, | 921 | MLX5_SET(modify_nic_vport_context_in, in, |
924 | nic_vport_context.disable_uc_local_lb, !enable); | 922 | nic_vport_context.disable_uc_local_lb, !enable); |
925 | 923 | ||
924 | if (MLX5_CAP_GEN(mdev, disable_local_lb_mc)) | ||
925 | MLX5_SET(modify_nic_vport_context_in, in, | ||
926 | field_select.disable_mc_local_lb, 1); | ||
927 | |||
928 | if (MLX5_CAP_GEN(mdev, disable_local_lb_uc)) | ||
929 | MLX5_SET(modify_nic_vport_context_in, in, | ||
930 | field_select.disable_uc_local_lb, 1); | ||
931 | |||
926 | err = mlx5_modify_nic_vport_context(mdev, in, inlen); | 932 | err = mlx5_modify_nic_vport_context(mdev, in, inlen); |
927 | 933 | ||
934 | if (!err) | ||
935 | mlx5_core_dbg(mdev, "%s local_lb\n", | ||
936 | enable ? "enable" : "disable"); | ||
937 | |||
928 | kvfree(in); | 938 | kvfree(in); |
929 | return err; | 939 | return err; |
930 | } | 940 | } |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 00b8c642e672..e1dae0616f52 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | |||
@@ -331,7 +331,7 @@ nfp_net_get_link_ksettings(struct net_device *netdev, | |||
331 | ls >= ARRAY_SIZE(ls_to_ethtool)) | 331 | ls >= ARRAY_SIZE(ls_to_ethtool)) |
332 | return 0; | 332 | return 0; |
333 | 333 | ||
334 | cmd->base.speed = ls_to_ethtool[sts]; | 334 | cmd->base.speed = ls_to_ethtool[ls]; |
335 | cmd->base.duplex = DUPLEX_FULL; | 335 | cmd->base.duplex = DUPLEX_FULL; |
336 | 336 | ||
337 | return 0; | 337 | return 0; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 217b62a3f587..3e57bf5d3d03 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c | |||
@@ -776,6 +776,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, | |||
776 | int rc = 0; | 776 | int rc = 0; |
777 | struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL; | 777 | struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL; |
778 | bool b_ret_ent = true; | 778 | bool b_ret_ent = true; |
779 | bool eblock; | ||
779 | 780 | ||
780 | if (!p_hwfn) | 781 | if (!p_hwfn) |
781 | return -EINVAL; | 782 | return -EINVAL; |
@@ -794,6 +795,11 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, | |||
794 | if (rc) | 795 | if (rc) |
795 | goto spq_post_fail; | 796 | goto spq_post_fail; |
796 | 797 | ||
798 | /* Check if entry is in block mode before qed_spq_add_entry, | ||
799 | * which might kfree p_ent. | ||
800 | */ | ||
801 | eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK); | ||
802 | |||
797 | /* Add the request to the pending queue */ | 803 | /* Add the request to the pending queue */ |
798 | rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); | 804 | rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); |
799 | if (rc) | 805 | if (rc) |
@@ -811,7 +817,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, | |||
811 | 817 | ||
812 | spin_unlock_bh(&p_spq->lock); | 818 | spin_unlock_bh(&p_spq->lock); |
813 | 819 | ||
814 | if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) { | 820 | if (eblock) { |
815 | /* For entries in QED BLOCK mode, the completion code cannot | 821 | /* For entries in QED BLOCK mode, the completion code cannot |
816 | * perform the necessary cleanup - if it did, we couldn't | 822 | * perform the necessary cleanup - if it did, we couldn't |
817 | * access p_ent here to see whether it's successful or not. | 823 | * access p_ent here to see whether it's successful or not. |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index a0fe05968348..a197e11f3a56 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -2089,8 +2089,8 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf) | |||
2089 | add_reg(CSMR); | 2089 | add_reg(CSMR); |
2090 | if (cd->select_mii) | 2090 | if (cd->select_mii) |
2091 | add_reg(RMII_MII); | 2091 | add_reg(RMII_MII); |
2092 | add_reg(ARSTR); | ||
2093 | if (cd->tsu) { | 2092 | if (cd->tsu) { |
2093 | add_tsu_reg(ARSTR); | ||
2094 | add_tsu_reg(TSU_CTRST); | 2094 | add_tsu_reg(TSU_CTRST); |
2095 | add_tsu_reg(TSU_FWEN0); | 2095 | add_tsu_reg(TSU_FWEN0); |
2096 | add_tsu_reg(TSU_FWEN1); | 2096 | add_tsu_reg(TSU_FWEN1); |
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index d8e5747ff4e3..264d4af0bf69 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c | |||
@@ -1006,17 +1006,18 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set) | |||
1006 | if (!ifname_is_set) | 1006 | if (!ifname_is_set) |
1007 | snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index); | 1007 | snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index); |
1008 | 1008 | ||
1009 | mutex_unlock(&pn->all_ppp_mutex); | ||
1010 | |||
1009 | ret = register_netdevice(ppp->dev); | 1011 | ret = register_netdevice(ppp->dev); |
1010 | if (ret < 0) | 1012 | if (ret < 0) |
1011 | goto err_unit; | 1013 | goto err_unit; |
1012 | 1014 | ||
1013 | atomic_inc(&ppp_unit_count); | 1015 | atomic_inc(&ppp_unit_count); |
1014 | 1016 | ||
1015 | mutex_unlock(&pn->all_ppp_mutex); | ||
1016 | |||
1017 | return 0; | 1017 | return 0; |
1018 | 1018 | ||
1019 | err_unit: | 1019 | err_unit: |
1020 | mutex_lock(&pn->all_ppp_mutex); | ||
1020 | unit_put(&pn->units_idr, ppp->file.index); | 1021 | unit_put(&pn->units_idr, ppp->file.index); |
1021 | err: | 1022 | err: |
1022 | mutex_unlock(&pn->all_ppp_mutex); | 1023 | mutex_unlock(&pn->all_ppp_mutex); |
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 94c7804903c4..ec56ff29aac4 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c | |||
@@ -2396,6 +2396,7 @@ static int lan78xx_reset(struct lan78xx_net *dev) | |||
2396 | buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE; | 2396 | buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE; |
2397 | dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; | 2397 | dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE; |
2398 | dev->rx_qlen = 4; | 2398 | dev->rx_qlen = 4; |
2399 | dev->tx_qlen = 4; | ||
2399 | } | 2400 | } |
2400 | 2401 | ||
2401 | ret = lan78xx_write_reg(dev, BURST_CAP, buf); | 2402 | ret = lan78xx_write_reg(dev, BURST_CAP, buf); |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 78367373185f..e54255597fac 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -489,6 +489,7 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = { | |||
489 | 489 | ||
490 | static spinlock_t hwsim_radio_lock; | 490 | static spinlock_t hwsim_radio_lock; |
491 | static LIST_HEAD(hwsim_radios); | 491 | static LIST_HEAD(hwsim_radios); |
492 | static struct workqueue_struct *hwsim_wq; | ||
492 | static int hwsim_radio_idx; | 493 | static int hwsim_radio_idx; |
493 | 494 | ||
494 | static struct platform_driver mac80211_hwsim_driver = { | 495 | static struct platform_driver mac80211_hwsim_driver = { |
@@ -3125,6 +3126,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) | |||
3125 | if (info->attrs[HWSIM_ATTR_CHANNELS]) | 3126 | if (info->attrs[HWSIM_ATTR_CHANNELS]) |
3126 | param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); | 3127 | param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); |
3127 | 3128 | ||
3129 | if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) { | ||
3130 | GENL_SET_ERR_MSG(info, "too many channels specified"); | ||
3131 | return -EINVAL; | ||
3132 | } | ||
3133 | |||
3128 | if (info->attrs[HWSIM_ATTR_NO_VIF]) | 3134 | if (info->attrs[HWSIM_ATTR_NO_VIF]) |
3129 | param.no_vif = true; | 3135 | param.no_vif = true; |
3130 | 3136 | ||
@@ -3347,7 +3353,7 @@ static void remove_user_radios(u32 portid) | |||
3347 | if (entry->destroy_on_close && entry->portid == portid) { | 3353 | if (entry->destroy_on_close && entry->portid == portid) { |
3348 | list_del(&entry->list); | 3354 | list_del(&entry->list); |
3349 | INIT_WORK(&entry->destroy_work, destroy_radio); | 3355 | INIT_WORK(&entry->destroy_work, destroy_radio); |
3350 | schedule_work(&entry->destroy_work); | 3356 | queue_work(hwsim_wq, &entry->destroy_work); |
3351 | } | 3357 | } |
3352 | } | 3358 | } |
3353 | spin_unlock_bh(&hwsim_radio_lock); | 3359 | spin_unlock_bh(&hwsim_radio_lock); |
@@ -3422,7 +3428,7 @@ static void __net_exit hwsim_exit_net(struct net *net) | |||
3422 | 3428 | ||
3423 | list_del(&data->list); | 3429 | list_del(&data->list); |
3424 | INIT_WORK(&data->destroy_work, destroy_radio); | 3430 | INIT_WORK(&data->destroy_work, destroy_radio); |
3425 | schedule_work(&data->destroy_work); | 3431 | queue_work(hwsim_wq, &data->destroy_work); |
3426 | } | 3432 | } |
3427 | spin_unlock_bh(&hwsim_radio_lock); | 3433 | spin_unlock_bh(&hwsim_radio_lock); |
3428 | } | 3434 | } |
@@ -3454,6 +3460,10 @@ static int __init init_mac80211_hwsim(void) | |||
3454 | 3460 | ||
3455 | spin_lock_init(&hwsim_radio_lock); | 3461 | spin_lock_init(&hwsim_radio_lock); |
3456 | 3462 | ||
3463 | hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0); | ||
3464 | if (!hwsim_wq) | ||
3465 | return -ENOMEM; | ||
3466 | |||
3457 | err = register_pernet_device(&hwsim_net_ops); | 3467 | err = register_pernet_device(&hwsim_net_ops); |
3458 | if (err) | 3468 | if (err) |
3459 | return err; | 3469 | return err; |
@@ -3592,8 +3602,11 @@ static void __exit exit_mac80211_hwsim(void) | |||
3592 | hwsim_exit_netlink(); | 3602 | hwsim_exit_netlink(); |
3593 | 3603 | ||
3594 | mac80211_hwsim_free(); | 3604 | mac80211_hwsim_free(); |
3605 | flush_workqueue(hwsim_wq); | ||
3606 | |||
3595 | unregister_netdev(hwsim_mon); | 3607 | unregister_netdev(hwsim_mon); |
3596 | platform_driver_unregister(&mac80211_hwsim_driver); | 3608 | platform_driver_unregister(&mac80211_hwsim_driver); |
3597 | unregister_pernet_device(&hwsim_net_ops); | 3609 | unregister_pernet_device(&hwsim_net_ops); |
3610 | destroy_workqueue(hwsim_wq); | ||
3598 | } | 3611 | } |
3599 | module_exit(exit_mac80211_hwsim); | 3612 | module_exit(exit_mac80211_hwsim); |
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 76b4fe6816a0..894c2ccb3891 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c | |||
@@ -74,6 +74,7 @@ static struct nvmf_host *nvmf_host_default(void) | |||
74 | return NULL; | 74 | return NULL; |
75 | 75 | ||
76 | kref_init(&host->ref); | 76 | kref_init(&host->ref); |
77 | uuid_gen(&host->id); | ||
77 | snprintf(host->nqn, NVMF_NQN_SIZE, | 78 | snprintf(host->nqn, NVMF_NQN_SIZE, |
78 | "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id); | 79 | "nqn.2014-08.org.nvmexpress:uuid:%pUb", &host->id); |
79 | 80 | ||
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 0f695df14c9d..372ce9913e6d 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c | |||
@@ -765,10 +765,12 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
765 | break; | 765 | break; |
766 | case ASHMEM_SET_SIZE: | 766 | case ASHMEM_SET_SIZE: |
767 | ret = -EINVAL; | 767 | ret = -EINVAL; |
768 | mutex_lock(&ashmem_mutex); | ||
768 | if (!asma->file) { | 769 | if (!asma->file) { |
769 | ret = 0; | 770 | ret = 0; |
770 | asma->size = (size_t)arg; | 771 | asma->size = (size_t)arg; |
771 | } | 772 | } |
773 | mutex_unlock(&ashmem_mutex); | ||
772 | break; | 774 | break; |
773 | case ASHMEM_GET_SIZE: | 775 | case ASHMEM_GET_SIZE: |
774 | ret = asma->size; | 776 | ret = asma->size; |
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 93eff7dec2f5..1b3efb14aec7 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c | |||
@@ -1147,11 +1147,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, | |||
1147 | 1147 | ||
1148 | udc = kzalloc(sizeof(*udc), GFP_KERNEL); | 1148 | udc = kzalloc(sizeof(*udc), GFP_KERNEL); |
1149 | if (!udc) | 1149 | if (!udc) |
1150 | goto err1; | 1150 | goto err_put_gadget; |
1151 | |||
1152 | ret = device_add(&gadget->dev); | ||
1153 | if (ret) | ||
1154 | goto err2; | ||
1155 | 1151 | ||
1156 | device_initialize(&udc->dev); | 1152 | device_initialize(&udc->dev); |
1157 | udc->dev.release = usb_udc_release; | 1153 | udc->dev.release = usb_udc_release; |
@@ -1160,7 +1156,11 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, | |||
1160 | udc->dev.parent = parent; | 1156 | udc->dev.parent = parent; |
1161 | ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj)); | 1157 | ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj)); |
1162 | if (ret) | 1158 | if (ret) |
1163 | goto err3; | 1159 | goto err_put_udc; |
1160 | |||
1161 | ret = device_add(&gadget->dev); | ||
1162 | if (ret) | ||
1163 | goto err_put_udc; | ||
1164 | 1164 | ||
1165 | udc->gadget = gadget; | 1165 | udc->gadget = gadget; |
1166 | gadget->udc = udc; | 1166 | gadget->udc = udc; |
@@ -1170,7 +1170,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, | |||
1170 | 1170 | ||
1171 | ret = device_add(&udc->dev); | 1171 | ret = device_add(&udc->dev); |
1172 | if (ret) | 1172 | if (ret) |
1173 | goto err4; | 1173 | goto err_unlist_udc; |
1174 | 1174 | ||
1175 | usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED); | 1175 | usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED); |
1176 | udc->vbus = true; | 1176 | udc->vbus = true; |
@@ -1178,27 +1178,25 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, | |||
1178 | /* pick up one of pending gadget drivers */ | 1178 | /* pick up one of pending gadget drivers */ |
1179 | ret = check_pending_gadget_drivers(udc); | 1179 | ret = check_pending_gadget_drivers(udc); |
1180 | if (ret) | 1180 | if (ret) |
1181 | goto err5; | 1181 | goto err_del_udc; |
1182 | 1182 | ||
1183 | mutex_unlock(&udc_lock); | 1183 | mutex_unlock(&udc_lock); |
1184 | 1184 | ||
1185 | return 0; | 1185 | return 0; |
1186 | 1186 | ||
1187 | err5: | 1187 | err_del_udc: |
1188 | device_del(&udc->dev); | 1188 | device_del(&udc->dev); |
1189 | 1189 | ||
1190 | err4: | 1190 | err_unlist_udc: |
1191 | list_del(&udc->list); | 1191 | list_del(&udc->list); |
1192 | mutex_unlock(&udc_lock); | 1192 | mutex_unlock(&udc_lock); |
1193 | 1193 | ||
1194 | err3: | ||
1195 | put_device(&udc->dev); | ||
1196 | device_del(&gadget->dev); | 1194 | device_del(&gadget->dev); |
1197 | 1195 | ||
1198 | err2: | 1196 | err_put_udc: |
1199 | kfree(udc); | 1197 | put_device(&udc->dev); |
1200 | 1198 | ||
1201 | err1: | 1199 | err_put_gadget: |
1202 | put_device(&gadget->dev); | 1200 | put_device(&gadget->dev); |
1203 | return ret; | 1201 | return ret; |
1204 | } | 1202 | } |
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c index 465dbf68b463..f723f7b8c9ac 100644 --- a/drivers/usb/misc/usb3503.c +++ b/drivers/usb/misc/usb3503.c | |||
@@ -279,6 +279,8 @@ static int usb3503_probe(struct usb3503 *hub) | |||
279 | if (gpio_is_valid(hub->gpio_reset)) { | 279 | if (gpio_is_valid(hub->gpio_reset)) { |
280 | err = devm_gpio_request_one(dev, hub->gpio_reset, | 280 | err = devm_gpio_request_one(dev, hub->gpio_reset, |
281 | GPIOF_OUT_INIT_LOW, "usb3503 reset"); | 281 | GPIOF_OUT_INIT_LOW, "usb3503 reset"); |
282 | /* Datasheet defines a hardware reset to be at least 100us */ | ||
283 | usleep_range(100, 10000); | ||
282 | if (err) { | 284 | if (err) { |
283 | dev_err(dev, | 285 | dev_err(dev, |
284 | "unable to request GPIO %d as reset pin (%d)\n", | 286 | "unable to request GPIO %d as reset pin (%d)\n", |
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index f6ae753ab99b..f932f40302df 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c | |||
@@ -1004,7 +1004,9 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg | |||
1004 | break; | 1004 | break; |
1005 | 1005 | ||
1006 | case MON_IOCQ_RING_SIZE: | 1006 | case MON_IOCQ_RING_SIZE: |
1007 | mutex_lock(&rp->fetch_lock); | ||
1007 | ret = rp->b_size; | 1008 | ret = rp->b_size; |
1009 | mutex_unlock(&rp->fetch_lock); | ||
1008 | break; | 1010 | break; |
1009 | 1011 | ||
1010 | case MON_IOCT_RING_SIZE: | 1012 | case MON_IOCT_RING_SIZE: |
@@ -1231,12 +1233,16 @@ static int mon_bin_vma_fault(struct vm_fault *vmf) | |||
1231 | unsigned long offset, chunk_idx; | 1233 | unsigned long offset, chunk_idx; |
1232 | struct page *pageptr; | 1234 | struct page *pageptr; |
1233 | 1235 | ||
1236 | mutex_lock(&rp->fetch_lock); | ||
1234 | offset = vmf->pgoff << PAGE_SHIFT; | 1237 | offset = vmf->pgoff << PAGE_SHIFT; |
1235 | if (offset >= rp->b_size) | 1238 | if (offset >= rp->b_size) { |
1239 | mutex_unlock(&rp->fetch_lock); | ||
1236 | return VM_FAULT_SIGBUS; | 1240 | return VM_FAULT_SIGBUS; |
1241 | } | ||
1237 | chunk_idx = offset / CHUNK_SIZE; | 1242 | chunk_idx = offset / CHUNK_SIZE; |
1238 | pageptr = rp->b_vec[chunk_idx].pg; | 1243 | pageptr = rp->b_vec[chunk_idx].pg; |
1239 | get_page(pageptr); | 1244 | get_page(pageptr); |
1245 | mutex_unlock(&rp->fetch_lock); | ||
1240 | vmf->page = pageptr; | 1246 | vmf->page = pageptr; |
1241 | return 0; | 1247 | return 0; |
1242 | } | 1248 | } |
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 7c6273bf5beb..06d502b3e913 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = { | |||
124 | { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ | 124 | { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ |
125 | { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ | 125 | { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ |
126 | { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ | 126 | { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ |
127 | { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */ | ||
127 | { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ | 128 | { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ |
128 | { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ | 129 | { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ |
129 | { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ | 130 | { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ |
@@ -174,6 +175,7 @@ static const struct usb_device_id id_table[] = { | |||
174 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ | 175 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ |
175 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ | 176 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ |
176 | { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ | 177 | { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ |
178 | { USB_DEVICE(0x18EF, 0xE030) }, /* ELV ALC 8xxx Battery Charger */ | ||
177 | { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */ | 179 | { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */ |
178 | { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ | 180 | { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ |
179 | { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ | 181 | { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ |
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index e6127fb21c12..a7d08ae0adad 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h | |||
@@ -143,6 +143,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, | |||
143 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 143 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
144 | US_FL_NO_ATA_1X), | 144 | US_FL_NO_ATA_1X), |
145 | 145 | ||
146 | /* Reported-by: Icenowy Zheng <icenowy@aosc.io> */ | ||
147 | UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999, | ||
148 | "Norelsys", | ||
149 | "NS1068X", | ||
150 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
151 | US_FL_IGNORE_UAS), | ||
152 | |||
146 | /* Reported-by: Takeo Nakayama <javhera@gmx.com> */ | 153 | /* Reported-by: Takeo Nakayama <javhera@gmx.com> */ |
147 | UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999, | 154 | UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999, |
148 | "JMicron", | 155 | "JMicron", |
diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index 7b219d9109b4..ee2bbce24584 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c | |||
@@ -91,7 +91,7 @@ static void usbip_dump_usb_device(struct usb_device *udev) | |||
91 | dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)", | 91 | dev_dbg(dev, " devnum(%d) devpath(%s) usb speed(%s)", |
92 | udev->devnum, udev->devpath, usb_speed_string(udev->speed)); | 92 | udev->devnum, udev->devpath, usb_speed_string(udev->speed)); |
93 | 93 | ||
94 | pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport); | 94 | pr_debug("tt hub ttport %d\n", udev->ttport); |
95 | 95 | ||
96 | dev_dbg(dev, " "); | 96 | dev_dbg(dev, " "); |
97 | for (i = 0; i < 16; i++) | 97 | for (i = 0; i < 16; i++) |
@@ -124,12 +124,8 @@ static void usbip_dump_usb_device(struct usb_device *udev) | |||
124 | } | 124 | } |
125 | pr_debug("\n"); | 125 | pr_debug("\n"); |
126 | 126 | ||
127 | dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus); | 127 | dev_dbg(dev, "parent %s, bus %s\n", dev_name(&udev->parent->dev), |
128 | 128 | udev->bus->bus_name); | |
129 | dev_dbg(dev, | ||
130 | "descriptor %p, config %p, actconfig %p, rawdescriptors %p\n", | ||
131 | &udev->descriptor, udev->config, | ||
132 | udev->actconfig, udev->rawdescriptors); | ||
133 | 129 | ||
134 | dev_dbg(dev, "have_langid %d, string_langid %d\n", | 130 | dev_dbg(dev, "have_langid %d, string_langid %d\n", |
135 | udev->have_langid, udev->string_langid); | 131 | udev->have_langid, udev->string_langid); |
@@ -237,9 +233,6 @@ void usbip_dump_urb(struct urb *urb) | |||
237 | 233 | ||
238 | dev = &urb->dev->dev; | 234 | dev = &urb->dev->dev; |
239 | 235 | ||
240 | dev_dbg(dev, " urb :%p\n", urb); | ||
241 | dev_dbg(dev, " dev :%p\n", urb->dev); | ||
242 | |||
243 | usbip_dump_usb_device(urb->dev); | 236 | usbip_dump_usb_device(urb->dev); |
244 | 237 | ||
245 | dev_dbg(dev, " pipe :%08x ", urb->pipe); | 238 | dev_dbg(dev, " pipe :%08x ", urb->pipe); |
@@ -248,11 +241,9 @@ void usbip_dump_urb(struct urb *urb) | |||
248 | 241 | ||
249 | dev_dbg(dev, " status :%d\n", urb->status); | 242 | dev_dbg(dev, " status :%d\n", urb->status); |
250 | dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags); | 243 | dev_dbg(dev, " transfer_flags :%08X\n", urb->transfer_flags); |
251 | dev_dbg(dev, " transfer_buffer :%p\n", urb->transfer_buffer); | ||
252 | dev_dbg(dev, " transfer_buffer_length:%d\n", | 244 | dev_dbg(dev, " transfer_buffer_length:%d\n", |
253 | urb->transfer_buffer_length); | 245 | urb->transfer_buffer_length); |
254 | dev_dbg(dev, " actual_length :%d\n", urb->actual_length); | 246 | dev_dbg(dev, " actual_length :%d\n", urb->actual_length); |
255 | dev_dbg(dev, " setup_packet :%p\n", urb->setup_packet); | ||
256 | 247 | ||
257 | if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL) | 248 | if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL) |
258 | usbip_dump_usb_ctrlrequest( | 249 | usbip_dump_usb_ctrlrequest( |
@@ -262,8 +253,6 @@ void usbip_dump_urb(struct urb *urb) | |||
262 | dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets); | 253 | dev_dbg(dev, " number_of_packets :%d\n", urb->number_of_packets); |
263 | dev_dbg(dev, " interval :%d\n", urb->interval); | 254 | dev_dbg(dev, " interval :%d\n", urb->interval); |
264 | dev_dbg(dev, " error_count :%d\n", urb->error_count); | 255 | dev_dbg(dev, " error_count :%d\n", urb->error_count); |
265 | dev_dbg(dev, " context :%p\n", urb->context); | ||
266 | dev_dbg(dev, " complete :%p\n", urb->complete); | ||
267 | } | 256 | } |
268 | EXPORT_SYMBOL_GPL(usbip_dump_urb); | 257 | EXPORT_SYMBOL_GPL(usbip_dump_urb); |
269 | 258 | ||
diff --git a/drivers/usb/usbip/vudc_rx.c b/drivers/usb/usbip/vudc_rx.c index df1e30989148..1e8a23d92cb4 100644 --- a/drivers/usb/usbip/vudc_rx.c +++ b/drivers/usb/usbip/vudc_rx.c | |||
@@ -120,6 +120,25 @@ static int v_recv_cmd_submit(struct vudc *udc, | |||
120 | urb_p->new = 1; | 120 | urb_p->new = 1; |
121 | urb_p->seqnum = pdu->base.seqnum; | 121 | urb_p->seqnum = pdu->base.seqnum; |
122 | 122 | ||
123 | if (urb_p->ep->type == USB_ENDPOINT_XFER_ISOC) { | ||
124 | /* validate packet size and number of packets */ | ||
125 | unsigned int maxp, packets, bytes; | ||
126 | |||
127 | maxp = usb_endpoint_maxp(urb_p->ep->desc); | ||
128 | maxp *= usb_endpoint_maxp_mult(urb_p->ep->desc); | ||
129 | bytes = pdu->u.cmd_submit.transfer_buffer_length; | ||
130 | packets = DIV_ROUND_UP(bytes, maxp); | ||
131 | |||
132 | if (pdu->u.cmd_submit.number_of_packets < 0 || | ||
133 | pdu->u.cmd_submit.number_of_packets > packets) { | ||
134 | dev_err(&udc->gadget.dev, | ||
135 | "CMD_SUBMIT: isoc invalid num packets %d\n", | ||
136 | pdu->u.cmd_submit.number_of_packets); | ||
137 | ret = -EMSGSIZE; | ||
138 | goto free_urbp; | ||
139 | } | ||
140 | } | ||
141 | |||
123 | ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type); | 142 | ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type); |
124 | if (ret) { | 143 | if (ret) { |
125 | usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC); | 144 | usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC); |
diff --git a/drivers/usb/usbip/vudc_tx.c b/drivers/usb/usbip/vudc_tx.c index 1440ae0919ec..3ccb17c3e840 100644 --- a/drivers/usb/usbip/vudc_tx.c +++ b/drivers/usb/usbip/vudc_tx.c | |||
@@ -85,6 +85,13 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p) | |||
85 | memset(&pdu_header, 0, sizeof(pdu_header)); | 85 | memset(&pdu_header, 0, sizeof(pdu_header)); |
86 | memset(&msg, 0, sizeof(msg)); | 86 | memset(&msg, 0, sizeof(msg)); |
87 | 87 | ||
88 | if (urb->actual_length > 0 && !urb->transfer_buffer) { | ||
89 | dev_err(&udc->gadget.dev, | ||
90 | "urb: actual_length %d transfer_buffer null\n", | ||
91 | urb->actual_length); | ||
92 | return -1; | ||
93 | } | ||
94 | |||
88 | if (urb_p->type == USB_ENDPOINT_XFER_ISOC) | 95 | if (urb_p->type == USB_ENDPOINT_XFER_ISOC) |
89 | iovnum = 2 + urb->number_of_packets; | 96 | iovnum = 2 + urb->number_of_packets; |
90 | else | 97 | else |
@@ -100,8 +107,8 @@ static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p) | |||
100 | 107 | ||
101 | /* 1. setup usbip_header */ | 108 | /* 1. setup usbip_header */ |
102 | setup_ret_submit_pdu(&pdu_header, urb_p); | 109 | setup_ret_submit_pdu(&pdu_header, urb_p); |
103 | usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", | 110 | usbip_dbg_stub_tx("setup txdata seqnum: %d\n", |
104 | pdu_header.base.seqnum, urb); | 111 | pdu_header.base.seqnum); |
105 | usbip_header_correct_endian(&pdu_header, 1); | 112 | usbip_header_correct_endian(&pdu_header, 1); |
106 | 113 | ||
107 | iov[iovnum].iov_base = &pdu_header; | 114 | iov[iovnum].iov_base = &pdu_header; |
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 57efbd3b053b..bd56653b9bbc 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
@@ -380,10 +380,8 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages) | |||
380 | } | 380 | } |
381 | range = 0; | 381 | range = 0; |
382 | while (range < pages) { | 382 | while (range < pages) { |
383 | if (map->unmap_ops[offset+range].handle == -1) { | 383 | if (map->unmap_ops[offset+range].handle == -1) |
384 | range--; | ||
385 | break; | 384 | break; |
386 | } | ||
387 | range++; | 385 | range++; |
388 | } | 386 | } |
389 | err = __unmap_grant_pages(map, offset, range); | 387 | err = __unmap_grant_pages(map, offset, range); |
@@ -1073,8 +1071,10 @@ unlock_out: | |||
1073 | out_unlock_put: | 1071 | out_unlock_put: |
1074 | mutex_unlock(&priv->lock); | 1072 | mutex_unlock(&priv->lock); |
1075 | out_put_map: | 1073 | out_put_map: |
1076 | if (use_ptemod) | 1074 | if (use_ptemod) { |
1077 | map->vma = NULL; | 1075 | map->vma = NULL; |
1076 | unmap_grant_pages(map, 0, map->count); | ||
1077 | } | ||
1078 | gntdev_put_map(priv, map); | 1078 | gntdev_put_map(priv, map); |
1079 | return err; | 1079 | return err; |
1080 | } | 1080 | } |
diff --git a/include/linux/completion.h b/include/linux/completion.h index 94a59ba7d422..519e94915d18 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h | |||
@@ -32,7 +32,6 @@ struct completion { | |||
32 | #define init_completion(x) __init_completion(x) | 32 | #define init_completion(x) __init_completion(x) |
33 | static inline void complete_acquire(struct completion *x) {} | 33 | static inline void complete_acquire(struct completion *x) {} |
34 | static inline void complete_release(struct completion *x) {} | 34 | static inline void complete_release(struct completion *x) {} |
35 | static inline void complete_release_commit(struct completion *x) {} | ||
36 | 35 | ||
37 | #define COMPLETION_INITIALIZER(work) \ | 36 | #define COMPLETION_INITIALIZER(work) \ |
38 | { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } | 37 | { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index a04ef7c15c6a..7b01bc11c692 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -47,6 +47,13 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr); | |||
47 | extern int cpu_add_dev_attr_group(struct attribute_group *attrs); | 47 | extern int cpu_add_dev_attr_group(struct attribute_group *attrs); |
48 | extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); | 48 | extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); |
49 | 49 | ||
50 | extern ssize_t cpu_show_meltdown(struct device *dev, | ||
51 | struct device_attribute *attr, char *buf); | ||
52 | extern ssize_t cpu_show_spectre_v1(struct device *dev, | ||
53 | struct device_attribute *attr, char *buf); | ||
54 | extern ssize_t cpu_show_spectre_v2(struct device *dev, | ||
55 | struct device_attribute *attr, char *buf); | ||
56 | |||
50 | extern __printf(4, 5) | 57 | extern __printf(4, 5) |
51 | struct device *cpu_device_create(struct device *parent, void *drvdata, | 58 | struct device *cpu_device_create(struct device *parent, void *drvdata, |
52 | const struct attribute_group **groups, | 59 | const struct attribute_group **groups, |
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h index 06097ef30449..b511f6d24b42 100644 --- a/include/linux/crash_core.h +++ b/include/linux/crash_core.h | |||
@@ -42,6 +42,8 @@ phys_addr_t paddr_vmcoreinfo_note(void); | |||
42 | vmcoreinfo_append_str("PAGESIZE=%ld\n", value) | 42 | vmcoreinfo_append_str("PAGESIZE=%ld\n", value) |
43 | #define VMCOREINFO_SYMBOL(name) \ | 43 | #define VMCOREINFO_SYMBOL(name) \ |
44 | vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name) | 44 | vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name) |
45 | #define VMCOREINFO_SYMBOL_ARRAY(name) \ | ||
46 | vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)name) | ||
45 | #define VMCOREINFO_SIZE(name) \ | 47 | #define VMCOREINFO_SIZE(name) \ |
46 | vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \ | 48 | vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \ |
47 | (unsigned long)sizeof(name)) | 49 | (unsigned long)sizeof(name)) |
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 46cb57d5eb13..1b3996ff3f16 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h | |||
@@ -27,22 +27,18 @@ | |||
27 | # define trace_hardirq_enter() \ | 27 | # define trace_hardirq_enter() \ |
28 | do { \ | 28 | do { \ |
29 | current->hardirq_context++; \ | 29 | current->hardirq_context++; \ |
30 | crossrelease_hist_start(XHLOCK_HARD); \ | ||
31 | } while (0) | 30 | } while (0) |
32 | # define trace_hardirq_exit() \ | 31 | # define trace_hardirq_exit() \ |
33 | do { \ | 32 | do { \ |
34 | current->hardirq_context--; \ | 33 | current->hardirq_context--; \ |
35 | crossrelease_hist_end(XHLOCK_HARD); \ | ||
36 | } while (0) | 34 | } while (0) |
37 | # define lockdep_softirq_enter() \ | 35 | # define lockdep_softirq_enter() \ |
38 | do { \ | 36 | do { \ |
39 | current->softirq_context++; \ | 37 | current->softirq_context++; \ |
40 | crossrelease_hist_start(XHLOCK_SOFT); \ | ||
41 | } while (0) | 38 | } while (0) |
42 | # define lockdep_softirq_exit() \ | 39 | # define lockdep_softirq_exit() \ |
43 | do { \ | 40 | do { \ |
44 | current->softirq_context--; \ | 41 | current->softirq_context--; \ |
45 | crossrelease_hist_end(XHLOCK_SOFT); \ | ||
46 | } while (0) | 42 | } while (0) |
47 | # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, | 43 | # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, |
48 | #else | 44 | #else |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 2e75dc34bff5..3251d9c0d313 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -475,8 +475,6 @@ enum xhlock_context_t { | |||
475 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ | 475 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ |
476 | { .name = (_name), .key = (void *)(_key), } | 476 | { .name = (_name), .key = (void *)(_key), } |
477 | 477 | ||
478 | static inline void crossrelease_hist_start(enum xhlock_context_t c) {} | ||
479 | static inline void crossrelease_hist_end(enum xhlock_context_t c) {} | ||
480 | static inline void lockdep_invariant_state(bool force) {} | 478 | static inline void lockdep_invariant_state(bool force) {} |
481 | static inline void lockdep_init_task(struct task_struct *task) {} | 479 | static inline void lockdep_init_task(struct task_struct *task) {} |
482 | static inline void lockdep_free_task(struct task_struct *task) {} | 480 | static inline void lockdep_free_task(struct task_struct *task) {} |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 1f509d072026..a0610427e168 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/kernel.h> | 36 | #include <linux/kernel.h> |
37 | #include <linux/completion.h> | 37 | #include <linux/completion.h> |
38 | #include <linux/pci.h> | 38 | #include <linux/pci.h> |
39 | #include <linux/irq.h> | ||
39 | #include <linux/spinlock_types.h> | 40 | #include <linux/spinlock_types.h> |
40 | #include <linux/semaphore.h> | 41 | #include <linux/semaphore.h> |
41 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
@@ -1231,7 +1232,23 @@ enum { | |||
1231 | static inline const struct cpumask * | 1232 | static inline const struct cpumask * |
1232 | mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector) | 1233 | mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector) |
1233 | { | 1234 | { |
1234 | return pci_irq_get_affinity(dev->pdev, MLX5_EQ_VEC_COMP_BASE + vector); | 1235 | const struct cpumask *mask; |
1236 | struct irq_desc *desc; | ||
1237 | unsigned int irq; | ||
1238 | int eqn; | ||
1239 | int err; | ||
1240 | |||
1241 | err = mlx5_vector2eqn(dev, vector, &eqn, &irq); | ||
1242 | if (err) | ||
1243 | return NULL; | ||
1244 | |||
1245 | desc = irq_to_desc(irq); | ||
1246 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK | ||
1247 | mask = irq_data_get_effective_affinity_mask(&desc->irq_data); | ||
1248 | #else | ||
1249 | mask = desc->irq_common_data.affinity; | ||
1250 | #endif | ||
1251 | return mask; | ||
1235 | } | 1252 | } |
1236 | 1253 | ||
1237 | #endif /* MLX5_DRIVER_H */ | 1254 | #endif /* MLX5_DRIVER_H */ |
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 78e36fc2609e..94135c03d52b 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h | |||
@@ -1037,6 +1037,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { | |||
1037 | u8 nic_vport_change_event[0x1]; | 1037 | u8 nic_vport_change_event[0x1]; |
1038 | u8 disable_local_lb[0x1]; | 1038 | u8 disable_local_lb[0x1]; |
1039 | u8 reserved_at_3e2[0x1]; | 1039 | u8 reserved_at_3e2[0x1]; |
1040 | u8 disable_local_lb_uc[0x1]; | ||
1041 | u8 disable_local_lb_mc[0x1]; | ||
1040 | u8 log_min_hairpin_wq_data_sz[0x5]; | 1042 | u8 log_min_hairpin_wq_data_sz[0x5]; |
1041 | u8 reserved_at_3e8[0x3]; | 1043 | u8 reserved_at_3e8[0x3]; |
1042 | u8 log_max_vlan_list[0x5]; | 1044 | u8 log_max_vlan_list[0x5]; |
diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 49b4257ce1ea..f3075d6c7e82 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h | |||
@@ -85,7 +85,7 @@ struct netlink_ext_ack { | |||
85 | * to the lack of an output buffer.) | 85 | * to the lack of an output buffer.) |
86 | */ | 86 | */ |
87 | #define NL_SET_ERR_MSG(extack, msg) do { \ | 87 | #define NL_SET_ERR_MSG(extack, msg) do { \ |
88 | static const char __msg[] = (msg); \ | 88 | static const char __msg[] = msg; \ |
89 | struct netlink_ext_ack *__extack = (extack); \ | 89 | struct netlink_ext_ack *__extack = (extack); \ |
90 | \ | 90 | \ |
91 | if (__extack) \ | 91 | if (__extack) \ |
@@ -101,7 +101,7 @@ struct netlink_ext_ack { | |||
101 | } while (0) | 101 | } while (0) |
102 | 102 | ||
103 | #define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \ | 103 | #define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \ |
104 | static const char __msg[] = (msg); \ | 104 | static const char __msg[] = msg; \ |
105 | struct netlink_ext_ack *__extack = (extack); \ | 105 | struct netlink_ext_ack *__extack = (extack); \ |
106 | \ | 106 | \ |
107 | if (__extack) { \ | 107 | if (__extack) { \ |
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 13fb06a103c6..9ca1726ff963 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h | |||
@@ -174,6 +174,15 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) | |||
174 | * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL. | 174 | * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL. |
175 | * If ring is never resized, and if the pointer is merely | 175 | * If ring is never resized, and if the pointer is merely |
176 | * tested, there's no need to take the lock - see e.g. __ptr_ring_empty. | 176 | * tested, there's no need to take the lock - see e.g. __ptr_ring_empty. |
177 | * However, if called outside the lock, and if some other CPU | ||
178 | * consumes ring entries at the same time, the value returned | ||
179 | * is not guaranteed to be correct. | ||
180 | * In this case - to avoid incorrectly detecting the ring | ||
181 | * as empty - the CPU consuming the ring entries is responsible | ||
182 | * for either consuming all ring entries until the ring is empty, | ||
183 | * or synchronizing with some other CPU and causing it to | ||
184 | * execute __ptr_ring_peek and/or consume the ring enteries | ||
185 | * after the synchronization point. | ||
177 | */ | 186 | */ |
178 | static inline void *__ptr_ring_peek(struct ptr_ring *r) | 187 | static inline void *__ptr_ring_peek(struct ptr_ring *r) |
179 | { | 188 | { |
@@ -182,10 +191,7 @@ static inline void *__ptr_ring_peek(struct ptr_ring *r) | |||
182 | return NULL; | 191 | return NULL; |
183 | } | 192 | } |
184 | 193 | ||
185 | /* Note: callers invoking this in a loop must use a compiler barrier, | 194 | /* See __ptr_ring_peek above for locking rules. */ |
186 | * for example cpu_relax(). Callers must take consumer_lock | ||
187 | * if the ring is ever resized - see e.g. ptr_ring_empty. | ||
188 | */ | ||
189 | static inline bool __ptr_ring_empty(struct ptr_ring *r) | 195 | static inline bool __ptr_ring_empty(struct ptr_ring *r) |
190 | { | 196 | { |
191 | return !__ptr_ring_peek(r); | 197 | return !__ptr_ring_peek(r); |
diff --git a/include/net/arp.h b/include/net/arp.h index dc8cd47f883b..977aabfcdc03 100644 --- a/include/net/arp.h +++ b/include/net/arp.h | |||
@@ -20,6 +20,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32 | |||
20 | 20 | ||
21 | static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key) | 21 | static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key) |
22 | { | 22 | { |
23 | if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) | ||
24 | key = INADDR_ANY; | ||
25 | |||
23 | return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev); | 26 | return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev); |
24 | } | 27 | } |
25 | 28 | ||
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index ab30a22ef4fd..81174f9b8d14 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h | |||
@@ -815,6 +815,8 @@ struct cfg80211_csa_settings { | |||
815 | u8 count; | 815 | u8 count; |
816 | }; | 816 | }; |
817 | 817 | ||
818 | #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 | ||
819 | |||
818 | /** | 820 | /** |
819 | * struct iface_combination_params - input parameters for interface combinations | 821 | * struct iface_combination_params - input parameters for interface combinations |
820 | * | 822 | * |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index ac029d5d88e4..bd9125b0481f 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -473,6 +473,7 @@ void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, | |||
473 | struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, | 473 | struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, |
474 | const struct Qdisc_ops *ops, | 474 | const struct Qdisc_ops *ops, |
475 | struct netlink_ext_ack *extack); | 475 | struct netlink_ext_ack *extack); |
476 | void qdisc_free(struct Qdisc *qdisc); | ||
476 | struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, | 477 | struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, |
477 | const struct Qdisc_ops *ops, u32 parentid, | 478 | const struct Qdisc_ops *ops, u32 parentid, |
478 | struct netlink_ext_ack *extack); | 479 | struct netlink_ext_ack *extack); |
diff --git a/include/net/tls.h b/include/net/tls.h index 936cfc5cab7d..9185e53a743c 100644 --- a/include/net/tls.h +++ b/include/net/tls.h | |||
@@ -170,7 +170,7 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx) | |||
170 | 170 | ||
171 | static inline void tls_err_abort(struct sock *sk) | 171 | static inline void tls_err_abort(struct sock *sk) |
172 | { | 172 | { |
173 | sk->sk_err = -EBADMSG; | 173 | sk->sk_err = EBADMSG; |
174 | sk->sk_error_report(sk); | 174 | sk->sk_error_report(sk); |
175 | } | 175 | } |
176 | 176 | ||
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 4265d7f9e1f2..dcfab5e3b55c 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h | |||
@@ -363,7 +363,6 @@ enum ovs_tunnel_key_attr { | |||
363 | OVS_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */ | 363 | OVS_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */ |
364 | OVS_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */ | 364 | OVS_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */ |
365 | OVS_TUNNEL_KEY_ATTR_PAD, | 365 | OVS_TUNNEL_KEY_ATTR_PAD, |
366 | OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, /* be32 ERSPAN index. */ | ||
367 | __OVS_TUNNEL_KEY_ATTR_MAX | 366 | __OVS_TUNNEL_KEY_ATTR_MAX |
368 | }; | 367 | }; |
369 | 368 | ||
diff --git a/init/Kconfig b/init/Kconfig index 19a6b845d834..a9a2e2c86671 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -461,6 +461,7 @@ endmenu # "CPU/Task time and stats accounting" | |||
461 | 461 | ||
462 | config CPU_ISOLATION | 462 | config CPU_ISOLATION |
463 | bool "CPU isolation" | 463 | bool "CPU isolation" |
464 | depends on SMP || COMPILE_TEST | ||
464 | default y | 465 | default y |
465 | help | 466 | help |
466 | Make sure that CPUs running critical tasks are not disturbed by | 467 | Make sure that CPUs running critical tasks are not disturbed by |
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index aaa319848e7d..ab94d304a634 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c | |||
@@ -56,7 +56,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) | |||
56 | u32 elem_size, index_mask, max_entries; | 56 | u32 elem_size, index_mask, max_entries; |
57 | bool unpriv = !capable(CAP_SYS_ADMIN); | 57 | bool unpriv = !capable(CAP_SYS_ADMIN); |
58 | struct bpf_array *array; | 58 | struct bpf_array *array; |
59 | u64 array_size; | 59 | u64 array_size, mask64; |
60 | 60 | ||
61 | /* check sanity of attributes */ | 61 | /* check sanity of attributes */ |
62 | if (attr->max_entries == 0 || attr->key_size != 4 || | 62 | if (attr->max_entries == 0 || attr->key_size != 4 || |
@@ -74,13 +74,25 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) | |||
74 | elem_size = round_up(attr->value_size, 8); | 74 | elem_size = round_up(attr->value_size, 8); |
75 | 75 | ||
76 | max_entries = attr->max_entries; | 76 | max_entries = attr->max_entries; |
77 | index_mask = roundup_pow_of_two(max_entries) - 1; | ||
78 | 77 | ||
79 | if (unpriv) | 78 | /* On 32 bit archs roundup_pow_of_two() with max_entries that has |
79 | * upper most bit set in u32 space is undefined behavior due to | ||
80 | * resulting 1U << 32, so do it manually here in u64 space. | ||
81 | */ | ||
82 | mask64 = fls_long(max_entries - 1); | ||
83 | mask64 = 1ULL << mask64; | ||
84 | mask64 -= 1; | ||
85 | |||
86 | index_mask = mask64; | ||
87 | if (unpriv) { | ||
80 | /* round up array size to nearest power of 2, | 88 | /* round up array size to nearest power of 2, |
81 | * since cpu will speculate within index_mask limits | 89 | * since cpu will speculate within index_mask limits |
82 | */ | 90 | */ |
83 | max_entries = index_mask + 1; | 91 | max_entries = index_mask + 1; |
92 | /* Check for overflows. */ | ||
93 | if (max_entries < attr->max_entries) | ||
94 | return ERR_PTR(-E2BIG); | ||
95 | } | ||
84 | 96 | ||
85 | array_size = sizeof(*array); | 97 | array_size = sizeof(*array); |
86 | if (percpu) | 98 | if (percpu) |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index ceabb394d2dc..2e7a43edf264 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -3091,6 +3091,11 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) | |||
3091 | return -EINVAL; | 3091 | return -EINVAL; |
3092 | } | 3092 | } |
3093 | 3093 | ||
3094 | if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) { | ||
3095 | verbose(env, "BPF_ARSH not supported for 32 bit ALU\n"); | ||
3096 | return -EINVAL; | ||
3097 | } | ||
3098 | |||
3094 | if ((opcode == BPF_LSH || opcode == BPF_RSH || | 3099 | if ((opcode == BPF_LSH || opcode == BPF_RSH || |
3095 | opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { | 3100 | opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { |
3096 | int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; | 3101 | int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; |
@@ -5361,7 +5366,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) | |||
5361 | */ | 5366 | */ |
5362 | map_ptr = env->insn_aux_data[i + delta].map_ptr; | 5367 | map_ptr = env->insn_aux_data[i + delta].map_ptr; |
5363 | if (map_ptr == BPF_MAP_PTR_POISON) { | 5368 | if (map_ptr == BPF_MAP_PTR_POISON) { |
5364 | verbose(env, "tail_call obusing map_ptr\n"); | 5369 | verbose(env, "tail_call abusing map_ptr\n"); |
5365 | return -EINVAL; | 5370 | return -EINVAL; |
5366 | } | 5371 | } |
5367 | if (!map_ptr->unpriv_array) | 5372 | if (!map_ptr->unpriv_array) |
diff --git a/kernel/crash_core.c b/kernel/crash_core.c index b3663896278e..4f63597c824d 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c | |||
@@ -410,7 +410,7 @@ static int __init crash_save_vmcoreinfo_init(void) | |||
410 | VMCOREINFO_SYMBOL(contig_page_data); | 410 | VMCOREINFO_SYMBOL(contig_page_data); |
411 | #endif | 411 | #endif |
412 | #ifdef CONFIG_SPARSEMEM | 412 | #ifdef CONFIG_SPARSEMEM |
413 | VMCOREINFO_SYMBOL(mem_section); | 413 | VMCOREINFO_SYMBOL_ARRAY(mem_section); |
414 | VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); | 414 | VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); |
415 | VMCOREINFO_STRUCT_SIZE(mem_section); | 415 | VMCOREINFO_STRUCT_SIZE(mem_section); |
416 | VMCOREINFO_OFFSET(mem_section, section_mem_map); | 416 | VMCOREINFO_OFFSET(mem_section, section_mem_map); |
diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c index 2ddaec40956f..0926aef10dad 100644 --- a/kernel/sched/completion.c +++ b/kernel/sched/completion.c | |||
@@ -34,11 +34,6 @@ void complete(struct completion *x) | |||
34 | 34 | ||
35 | spin_lock_irqsave(&x->wait.lock, flags); | 35 | spin_lock_irqsave(&x->wait.lock, flags); |
36 | 36 | ||
37 | /* | ||
38 | * Perform commit of crossrelease here. | ||
39 | */ | ||
40 | complete_release_commit(x); | ||
41 | |||
42 | if (x->done != UINT_MAX) | 37 | if (x->done != UINT_MAX) |
43 | x->done++; | 38 | x->done++; |
44 | __wake_up_locked(&x->wait, TASK_NORMAL, 1); | 39 | __wake_up_locked(&x->wait, TASK_NORMAL, 1); |
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index dd7908743dab..9bcbacba82a8 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c | |||
@@ -89,7 +89,9 @@ static int membarrier_private_expedited(void) | |||
89 | rcu_read_unlock(); | 89 | rcu_read_unlock(); |
90 | } | 90 | } |
91 | if (!fallback) { | 91 | if (!fallback) { |
92 | preempt_disable(); | ||
92 | smp_call_function_many(tmpmask, ipi_mb, NULL, 1); | 93 | smp_call_function_many(tmpmask, ipi_mb, NULL, 1); |
94 | preempt_enable(); | ||
93 | free_cpumask_var(tmpmask); | 95 | free_cpumask_var(tmpmask); |
94 | } | 96 | } |
95 | cpus_read_unlock(); | 97 | cpus_read_unlock(); |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 7114c885a78a..0b249e2f0c3c 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -355,7 +355,7 @@ config PROFILE_ANNOTATED_BRANCHES | |||
355 | on if you need to profile the system's use of these macros. | 355 | on if you need to profile the system's use of these macros. |
356 | 356 | ||
357 | config PROFILE_ALL_BRANCHES | 357 | config PROFILE_ALL_BRANCHES |
358 | bool "Profile all if conditionals" | 358 | bool "Profile all if conditionals" if !FORTIFY_SOURCE |
359 | select TRACE_BRANCH_PROFILING | 359 | select TRACE_BRANCH_PROFILING |
360 | help | 360 | help |
361 | This tracer profiles all branch conditions. Every if () | 361 | This tracer profiles all branch conditions. Every if () |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 9ab18995ff1e..0cddf60186da 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -2534,29 +2534,59 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) | |||
2534 | * The lock and unlock are done within a preempt disable section. | 2534 | * The lock and unlock are done within a preempt disable section. |
2535 | * The current_context per_cpu variable can only be modified | 2535 | * The current_context per_cpu variable can only be modified |
2536 | * by the current task between lock and unlock. But it can | 2536 | * by the current task between lock and unlock. But it can |
2537 | * be modified more than once via an interrupt. There are four | 2537 | * be modified more than once via an interrupt. To pass this |
2538 | * different contexts that we need to consider. | 2538 | * information from the lock to the unlock without having to |
2539 | * access the 'in_interrupt()' functions again (which do show | ||
2540 | * a bit of overhead in something as critical as function tracing, | ||
2541 | * we use a bitmask trick. | ||
2539 | * | 2542 | * |
2540 | * Normal context. | 2543 | * bit 0 = NMI context |
2541 | * SoftIRQ context | 2544 | * bit 1 = IRQ context |
2542 | * IRQ context | 2545 | * bit 2 = SoftIRQ context |
2543 | * NMI context | 2546 | * bit 3 = normal context. |
2544 | * | 2547 | * |
2545 | * If for some reason the ring buffer starts to recurse, we | 2548 | * This works because this is the order of contexts that can |
2546 | * only allow that to happen at most 4 times (one for each | 2549 | * preempt other contexts. A SoftIRQ never preempts an IRQ |
2547 | * context). If it happens 5 times, then we consider this a | 2550 | * context. |
2548 | * recusive loop and do not let it go further. | 2551 | * |
2552 | * When the context is determined, the corresponding bit is | ||
2553 | * checked and set (if it was set, then a recursion of that context | ||
2554 | * happened). | ||
2555 | * | ||
2556 | * On unlock, we need to clear this bit. To do so, just subtract | ||
2557 | * 1 from the current_context and AND it to itself. | ||
2558 | * | ||
2559 | * (binary) | ||
2560 | * 101 - 1 = 100 | ||
2561 | * 101 & 100 = 100 (clearing bit zero) | ||
2562 | * | ||
2563 | * 1010 - 1 = 1001 | ||
2564 | * 1010 & 1001 = 1000 (clearing bit 1) | ||
2565 | * | ||
2566 | * The least significant bit can be cleared this way, and it | ||
2567 | * just so happens that it is the same bit corresponding to | ||
2568 | * the current context. | ||
2549 | */ | 2569 | */ |
2550 | 2570 | ||
2551 | static __always_inline int | 2571 | static __always_inline int |
2552 | trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) | 2572 | trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) |
2553 | { | 2573 | { |
2554 | if (cpu_buffer->current_context >= 4) | 2574 | unsigned int val = cpu_buffer->current_context; |
2575 | unsigned long pc = preempt_count(); | ||
2576 | int bit; | ||
2577 | |||
2578 | if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) | ||
2579 | bit = RB_CTX_NORMAL; | ||
2580 | else | ||
2581 | bit = pc & NMI_MASK ? RB_CTX_NMI : | ||
2582 | pc & HARDIRQ_MASK ? RB_CTX_IRQ : | ||
2583 | pc & SOFTIRQ_OFFSET ? 2 : RB_CTX_SOFTIRQ; | ||
2584 | |||
2585 | if (unlikely(val & (1 << bit))) | ||
2555 | return 1; | 2586 | return 1; |
2556 | 2587 | ||
2557 | cpu_buffer->current_context++; | 2588 | val |= (1 << bit); |
2558 | /* Interrupts must see this update */ | 2589 | cpu_buffer->current_context = val; |
2559 | barrier(); | ||
2560 | 2590 | ||
2561 | return 0; | 2591 | return 0; |
2562 | } | 2592 | } |
@@ -2564,9 +2594,7 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) | |||
2564 | static __always_inline void | 2594 | static __always_inline void |
2565 | trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) | 2595 | trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) |
2566 | { | 2596 | { |
2567 | /* Don't let the dec leak out */ | 2597 | cpu_buffer->current_context &= cpu_buffer->current_context - 1; |
2568 | barrier(); | ||
2569 | cpu_buffer->current_context--; | ||
2570 | } | 2598 | } |
2571 | 2599 | ||
2572 | /** | 2600 | /** |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index d73c14294f3a..f656ca27f6c2 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -127,7 +127,7 @@ | |||
127 | /* GFP bitmask for kmemleak internal allocations */ | 127 | /* GFP bitmask for kmemleak internal allocations */ |
128 | #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ | 128 | #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ |
129 | __GFP_NORETRY | __GFP_NOMEMALLOC | \ | 129 | __GFP_NORETRY | __GFP_NOMEMALLOC | \ |
130 | __GFP_NOWARN) | 130 | __GFP_NOWARN | __GFP_NOFAIL) |
131 | 131 | ||
132 | /* scanning area inside a memory block */ | 132 | /* scanning area inside a memory block */ |
133 | struct kmemleak_scan_area { | 133 | struct kmemleak_scan_area { |
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c index 325c56043007..086a4abdfa7c 100644 --- a/net/9p/trans_xen.c +++ b/net/9p/trans_xen.c | |||
@@ -543,3 +543,7 @@ static void p9_trans_xen_exit(void) | |||
543 | return xenbus_unregister_driver(&xen_9pfs_front_driver); | 543 | return xenbus_unregister_driver(&xen_9pfs_front_driver); |
544 | } | 544 | } |
545 | module_exit(p9_trans_xen_exit); | 545 | module_exit(p9_trans_xen_exit); |
546 | |||
547 | MODULE_AUTHOR("Stefano Stabellini <stefano@aporeto.com>"); | ||
548 | MODULE_DESCRIPTION("Xen Transport for 9P"); | ||
549 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 43ba91c440bc..fc6615d59165 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c | |||
@@ -3363,9 +3363,10 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data | |||
3363 | break; | 3363 | break; |
3364 | 3364 | ||
3365 | case L2CAP_CONF_EFS: | 3365 | case L2CAP_CONF_EFS: |
3366 | remote_efs = 1; | 3366 | if (olen == sizeof(efs)) { |
3367 | if (olen == sizeof(efs)) | 3367 | remote_efs = 1; |
3368 | memcpy(&efs, (void *) val, olen); | 3368 | memcpy(&efs, (void *) val, olen); |
3369 | } | ||
3369 | break; | 3370 | break; |
3370 | 3371 | ||
3371 | case L2CAP_CONF_EWS: | 3372 | case L2CAP_CONF_EWS: |
@@ -3584,16 +3585,17 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, | |||
3584 | break; | 3585 | break; |
3585 | 3586 | ||
3586 | case L2CAP_CONF_EFS: | 3587 | case L2CAP_CONF_EFS: |
3587 | if (olen == sizeof(efs)) | 3588 | if (olen == sizeof(efs)) { |
3588 | memcpy(&efs, (void *)val, olen); | 3589 | memcpy(&efs, (void *)val, olen); |
3589 | 3590 | ||
3590 | if (chan->local_stype != L2CAP_SERV_NOTRAFIC && | 3591 | if (chan->local_stype != L2CAP_SERV_NOTRAFIC && |
3591 | efs.stype != L2CAP_SERV_NOTRAFIC && | 3592 | efs.stype != L2CAP_SERV_NOTRAFIC && |
3592 | efs.stype != chan->local_stype) | 3593 | efs.stype != chan->local_stype) |
3593 | return -ECONNREFUSED; | 3594 | return -ECONNREFUSED; |
3594 | 3595 | ||
3595 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), | 3596 | l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), |
3596 | (unsigned long) &efs, endptr - ptr); | 3597 | (unsigned long) &efs, endptr - ptr); |
3598 | } | ||
3597 | break; | 3599 | break; |
3598 | 3600 | ||
3599 | case L2CAP_CONF_FCS: | 3601 | case L2CAP_CONF_FCS: |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index f96f9f58b894..7b7a14abba28 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -532,7 +532,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, | |||
532 | if (atomic_read(&tbl->entries) > (1 << nht->hash_shift)) | 532 | if (atomic_read(&tbl->entries) > (1 << nht->hash_shift)) |
533 | nht = neigh_hash_grow(tbl, nht->hash_shift + 1); | 533 | nht = neigh_hash_grow(tbl, nht->hash_shift + 1); |
534 | 534 | ||
535 | hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); | 535 | hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift); |
536 | 536 | ||
537 | if (n->parms->dead) { | 537 | if (n->parms->dead) { |
538 | rc = ERR_PTR(-EINVAL); | 538 | rc = ERR_PTR(-EINVAL); |
@@ -544,7 +544,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, | |||
544 | n1 != NULL; | 544 | n1 != NULL; |
545 | n1 = rcu_dereference_protected(n1->next, | 545 | n1 = rcu_dereference_protected(n1->next, |
546 | lockdep_is_held(&tbl->lock))) { | 546 | lockdep_is_held(&tbl->lock))) { |
547 | if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) { | 547 | if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) { |
548 | if (want_ref) | 548 | if (want_ref) |
549 | neigh_hold(n1); | 549 | neigh_hold(n1); |
550 | rc = n1; | 550 | rc = n1; |
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 56aef027df31..f28f06c91ead 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -223,11 +223,16 @@ static bool arp_key_eq(const struct neighbour *neigh, const void *pkey) | |||
223 | 223 | ||
224 | static int arp_constructor(struct neighbour *neigh) | 224 | static int arp_constructor(struct neighbour *neigh) |
225 | { | 225 | { |
226 | __be32 addr = *(__be32 *)neigh->primary_key; | 226 | __be32 addr; |
227 | struct net_device *dev = neigh->dev; | 227 | struct net_device *dev = neigh->dev; |
228 | struct in_device *in_dev; | 228 | struct in_device *in_dev; |
229 | struct neigh_parms *parms; | 229 | struct neigh_parms *parms; |
230 | u32 inaddr_any = INADDR_ANY; | ||
230 | 231 | ||
232 | if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) | ||
233 | memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len); | ||
234 | |||
235 | addr = *(__be32 *)neigh->primary_key; | ||
231 | rcu_read_lock(); | 236 | rcu_read_lock(); |
232 | in_dev = __in_dev_get_rcu(dev); | 237 | in_dev = __in_dev_get_rcu(dev); |
233 | if (!in_dev) { | 238 | if (!in_dev) { |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 6f00e43120a8..296d0b956bfe 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -991,6 +991,7 @@ static int esp_init_state(struct xfrm_state *x) | |||
991 | 991 | ||
992 | switch (encap->encap_type) { | 992 | switch (encap->encap_type) { |
993 | default: | 993 | default: |
994 | err = -EINVAL; | ||
994 | goto error; | 995 | goto error; |
995 | case UDP_ENCAP_ESPINUDP: | 996 | case UDP_ENCAP_ESPINUDP: |
996 | x->props.header_len += sizeof(struct udphdr); | 997 | x->props.header_len += sizeof(struct udphdr); |
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index c359f3cfeec3..32fbd9ba3609 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c | |||
@@ -38,7 +38,8 @@ static struct sk_buff **esp4_gro_receive(struct sk_buff **head, | |||
38 | __be32 spi; | 38 | __be32 spi; |
39 | int err; | 39 | int err; |
40 | 40 | ||
41 | skb_pull(skb, offset); | 41 | if (!pskb_pull(skb, offset)) |
42 | return NULL; | ||
42 | 43 | ||
43 | if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) | 44 | if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) |
44 | goto out; | 45 | goto out; |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index b7d90b48d821..49cc1c1df1ba 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -2759,6 +2759,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, | |||
2759 | if (err == 0 && rt->dst.error) | 2759 | if (err == 0 && rt->dst.error) |
2760 | err = -rt->dst.error; | 2760 | err = -rt->dst.error; |
2761 | } else { | 2761 | } else { |
2762 | fl4.flowi4_iif = LOOPBACK_IFINDEX; | ||
2762 | rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb); | 2763 | rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb); |
2763 | err = 0; | 2764 | err = 0; |
2764 | if (IS_ERR(rt)) | 2765 | if (IS_ERR(rt)) |
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 7c888c6e53a9..97513f35bcc5 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
@@ -900,13 +900,12 @@ static int esp6_init_state(struct xfrm_state *x) | |||
900 | x->props.header_len += IPV4_BEET_PHMAXLEN + | 900 | x->props.header_len += IPV4_BEET_PHMAXLEN + |
901 | (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); | 901 | (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); |
902 | break; | 902 | break; |
903 | default: | ||
903 | case XFRM_MODE_TRANSPORT: | 904 | case XFRM_MODE_TRANSPORT: |
904 | break; | 905 | break; |
905 | case XFRM_MODE_TUNNEL: | 906 | case XFRM_MODE_TUNNEL: |
906 | x->props.header_len += sizeof(struct ipv6hdr); | 907 | x->props.header_len += sizeof(struct ipv6hdr); |
907 | break; | 908 | break; |
908 | default: | ||
909 | goto error; | ||
910 | } | 909 | } |
911 | 910 | ||
912 | align = ALIGN(crypto_aead_blocksize(aead), 4); | 911 | align = ALIGN(crypto_aead_blocksize(aead), 4); |
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 0bb7d54cf2cb..44d109c435bc 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c | |||
@@ -60,7 +60,8 @@ static struct sk_buff **esp6_gro_receive(struct sk_buff **head, | |||
60 | int nhoff; | 60 | int nhoff; |
61 | int err; | 61 | int err; |
62 | 62 | ||
63 | skb_pull(skb, offset); | 63 | if (!pskb_pull(skb, offset)) |
64 | return NULL; | ||
64 | 65 | ||
65 | if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) | 66 | if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) |
66 | goto out; | 67 | goto out; |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 18547a44bdaf..a4a94452132b 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -1215,14 +1215,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, | |||
1215 | v6_cork->tclass = ipc6->tclass; | 1215 | v6_cork->tclass = ipc6->tclass; |
1216 | if (rt->dst.flags & DST_XFRM_TUNNEL) | 1216 | if (rt->dst.flags & DST_XFRM_TUNNEL) |
1217 | mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? | 1217 | mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? |
1218 | rt->dst.dev->mtu : dst_mtu(&rt->dst); | 1218 | READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst); |
1219 | else | 1219 | else |
1220 | mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? | 1220 | mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? |
1221 | rt->dst.dev->mtu : dst_mtu(xfrm_dst_path(&rt->dst)); | 1221 | READ_ONCE(rt->dst.dev->mtu) : dst_mtu(xfrm_dst_path(&rt->dst)); |
1222 | if (np->frag_size < mtu) { | 1222 | if (np->frag_size < mtu) { |
1223 | if (np->frag_size) | 1223 | if (np->frag_size) |
1224 | mtu = np->frag_size; | 1224 | mtu = np->frag_size; |
1225 | } | 1225 | } |
1226 | if (mtu < IPV6_MIN_MTU) | ||
1227 | return -EINVAL; | ||
1226 | cork->base.fragsize = mtu; | 1228 | cork->base.fragsize = mtu; |
1227 | if (dst_allfrag(xfrm_dst_path(&rt->dst))) | 1229 | if (dst_allfrag(xfrm_dst_path(&rt->dst))) |
1228 | cork->base.flags |= IPCORK_ALLFRAG; | 1230 | cork->base.flags |= IPCORK_ALLFRAG; |
@@ -1742,6 +1744,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, | |||
1742 | cork.base.flags = 0; | 1744 | cork.base.flags = 0; |
1743 | cork.base.addr = 0; | 1745 | cork.base.addr = 0; |
1744 | cork.base.opt = NULL; | 1746 | cork.base.opt = NULL; |
1747 | cork.base.dst = NULL; | ||
1745 | v6_cork.opt = NULL; | 1748 | v6_cork.opt = NULL; |
1746 | err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6); | 1749 | err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6); |
1747 | if (err) { | 1750 | if (err) { |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 3dffb892d52c..7e2e7188e7f4 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -401,6 +401,11 @@ static int verify_address_len(const void *p) | |||
401 | #endif | 401 | #endif |
402 | int len; | 402 | int len; |
403 | 403 | ||
404 | if (sp->sadb_address_len < | ||
405 | DIV_ROUND_UP(sizeof(*sp) + offsetofend(typeof(*addr), sa_family), | ||
406 | sizeof(uint64_t))) | ||
407 | return -EINVAL; | ||
408 | |||
404 | switch (addr->sa_family) { | 409 | switch (addr->sa_family) { |
405 | case AF_INET: | 410 | case AF_INET: |
406 | len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t)); | 411 | len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t)); |
@@ -511,6 +516,9 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void * | |||
511 | uint16_t ext_type; | 516 | uint16_t ext_type; |
512 | int ext_len; | 517 | int ext_len; |
513 | 518 | ||
519 | if (len < sizeof(*ehdr)) | ||
520 | return -EINVAL; | ||
521 | |||
514 | ext_len = ehdr->sadb_ext_len; | 522 | ext_len = ehdr->sadb_ext_len; |
515 | ext_len *= sizeof(uint64_t); | 523 | ext_len *= sizeof(uint64_t); |
516 | ext_type = ehdr->sadb_ext_type; | 524 | ext_type = ehdr->sadb_ext_type; |
@@ -2194,8 +2202,10 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev | |||
2194 | return PTR_ERR(out_skb); | 2202 | return PTR_ERR(out_skb); |
2195 | 2203 | ||
2196 | err = pfkey_xfrm_policy2msg(out_skb, xp, dir); | 2204 | err = pfkey_xfrm_policy2msg(out_skb, xp, dir); |
2197 | if (err < 0) | 2205 | if (err < 0) { |
2206 | kfree_skb(out_skb); | ||
2198 | return err; | 2207 | return err; |
2208 | } | ||
2199 | 2209 | ||
2200 | out_hdr = (struct sadb_msg *) out_skb->data; | 2210 | out_hdr = (struct sadb_msg *) out_skb->data; |
2201 | out_hdr->sadb_msg_version = PF_KEY_V2; | 2211 | out_hdr->sadb_msg_version = PF_KEY_V2; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 1e52f6012d5d..54cbf5b9864c 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -2417,7 +2417,7 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, | |||
2417 | struct nlmsghdr *, | 2417 | struct nlmsghdr *, |
2418 | struct netlink_ext_ack *)) | 2418 | struct netlink_ext_ack *)) |
2419 | { | 2419 | { |
2420 | struct netlink_ext_ack extack = {}; | 2420 | struct netlink_ext_ack extack; |
2421 | struct nlmsghdr *nlh; | 2421 | struct nlmsghdr *nlh; |
2422 | int err; | 2422 | int err; |
2423 | 2423 | ||
@@ -2438,6 +2438,7 @@ int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, | |||
2438 | if (nlh->nlmsg_type < NLMSG_MIN_TYPE) | 2438 | if (nlh->nlmsg_type < NLMSG_MIN_TYPE) |
2439 | goto ack; | 2439 | goto ack; |
2440 | 2440 | ||
2441 | memset(&extack, 0, sizeof(extack)); | ||
2441 | err = cb(skb, nlh, &extack); | 2442 | err = cb(skb, nlh, &extack); |
2442 | if (err == -EINTR) | 2443 | if (err == -EINTR) |
2443 | goto skip; | 2444 | goto skip; |
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index bce1f78b0de5..f143908b651d 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
@@ -49,7 +49,6 @@ | |||
49 | #include <net/mpls.h> | 49 | #include <net/mpls.h> |
50 | #include <net/vxlan.h> | 50 | #include <net/vxlan.h> |
51 | #include <net/tun_proto.h> | 51 | #include <net/tun_proto.h> |
52 | #include <net/erspan.h> | ||
53 | 52 | ||
54 | #include "flow_netlink.h" | 53 | #include "flow_netlink.h" |
55 | 54 | ||
@@ -334,8 +333,7 @@ size_t ovs_tun_key_attr_size(void) | |||
334 | * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it. | 333 | * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it. |
335 | */ | 334 | */ |
336 | + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */ | 335 | + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */ |
337 | + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_DST */ | 336 | + nla_total_size(2); /* OVS_TUNNEL_KEY_ATTR_TP_DST */ |
338 | + nla_total_size(4); /* OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS */ | ||
339 | } | 337 | } |
340 | 338 | ||
341 | static size_t ovs_nsh_key_attr_size(void) | 339 | static size_t ovs_nsh_key_attr_size(void) |
@@ -402,7 +400,6 @@ static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] | |||
402 | .next = ovs_vxlan_ext_key_lens }, | 400 | .next = ovs_vxlan_ext_key_lens }, |
403 | [OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, | 401 | [OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = sizeof(struct in6_addr) }, |
404 | [OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = sizeof(struct in6_addr) }, | 402 | [OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = sizeof(struct in6_addr) }, |
405 | [OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS] = { .len = sizeof(u32) }, | ||
406 | }; | 403 | }; |
407 | 404 | ||
408 | static const struct ovs_len_tbl | 405 | static const struct ovs_len_tbl |
@@ -634,33 +631,6 @@ static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr, | |||
634 | return 0; | 631 | return 0; |
635 | } | 632 | } |
636 | 633 | ||
637 | static int erspan_tun_opt_from_nlattr(const struct nlattr *attr, | ||
638 | struct sw_flow_match *match, bool is_mask, | ||
639 | bool log) | ||
640 | { | ||
641 | unsigned long opt_key_offset; | ||
642 | struct erspan_metadata opts; | ||
643 | |||
644 | BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts)); | ||
645 | |||
646 | memset(&opts, 0, sizeof(opts)); | ||
647 | opts.u.index = nla_get_be32(attr); | ||
648 | |||
649 | /* Index has only 20-bit */ | ||
650 | if (ntohl(opts.u.index) & ~INDEX_MASK) { | ||
651 | OVS_NLERR(log, "ERSPAN index number %x too large.", | ||
652 | ntohl(opts.u.index)); | ||
653 | return -EINVAL; | ||
654 | } | ||
655 | |||
656 | SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), is_mask); | ||
657 | opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts)); | ||
658 | SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts), | ||
659 | is_mask); | ||
660 | |||
661 | return 0; | ||
662 | } | ||
663 | |||
664 | static int ip_tun_from_nlattr(const struct nlattr *attr, | 634 | static int ip_tun_from_nlattr(const struct nlattr *attr, |
665 | struct sw_flow_match *match, bool is_mask, | 635 | struct sw_flow_match *match, bool is_mask, |
666 | bool log) | 636 | bool log) |
@@ -768,19 +738,6 @@ static int ip_tun_from_nlattr(const struct nlattr *attr, | |||
768 | break; | 738 | break; |
769 | case OVS_TUNNEL_KEY_ATTR_PAD: | 739 | case OVS_TUNNEL_KEY_ATTR_PAD: |
770 | break; | 740 | break; |
771 | case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS: | ||
772 | if (opts_type) { | ||
773 | OVS_NLERR(log, "Multiple metadata blocks provided"); | ||
774 | return -EINVAL; | ||
775 | } | ||
776 | |||
777 | err = erspan_tun_opt_from_nlattr(a, match, is_mask, log); | ||
778 | if (err) | ||
779 | return err; | ||
780 | |||
781 | tun_flags |= TUNNEL_ERSPAN_OPT; | ||
782 | opts_type = type; | ||
783 | break; | ||
784 | default: | 741 | default: |
785 | OVS_NLERR(log, "Unknown IP tunnel attribute %d", | 742 | OVS_NLERR(log, "Unknown IP tunnel attribute %d", |
786 | type); | 743 | type); |
@@ -905,10 +862,6 @@ static int __ip_tun_to_nlattr(struct sk_buff *skb, | |||
905 | else if (output->tun_flags & TUNNEL_VXLAN_OPT && | 862 | else if (output->tun_flags & TUNNEL_VXLAN_OPT && |
906 | vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len)) | 863 | vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len)) |
907 | return -EMSGSIZE; | 864 | return -EMSGSIZE; |
908 | else if (output->tun_flags & TUNNEL_ERSPAN_OPT && | ||
909 | nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, | ||
910 | ((struct erspan_metadata *)tun_opts)->u.index)) | ||
911 | return -EMSGSIZE; | ||
912 | } | 865 | } |
913 | 866 | ||
914 | return 0; | 867 | return 0; |
@@ -2533,8 +2486,6 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, | |||
2533 | break; | 2486 | break; |
2534 | case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: | 2487 | case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: |
2535 | break; | 2488 | break; |
2536 | case OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS: | ||
2537 | break; | ||
2538 | } | 2489 | } |
2539 | }; | 2490 | }; |
2540 | 2491 | ||
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 0038a1c44ee9..7dffa9dce28b 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -1094,17 +1094,6 @@ static struct Qdisc *qdisc_create(struct net_device *dev, | |||
1094 | goto err_out5; | 1094 | goto err_out5; |
1095 | } | 1095 | } |
1096 | 1096 | ||
1097 | if (qdisc_is_percpu_stats(sch)) { | ||
1098 | sch->cpu_bstats = | ||
1099 | netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); | ||
1100 | if (!sch->cpu_bstats) | ||
1101 | goto err_out4; | ||
1102 | |||
1103 | sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue); | ||
1104 | if (!sch->cpu_qstats) | ||
1105 | goto err_out4; | ||
1106 | } | ||
1107 | |||
1108 | if (tca[TCA_STAB]) { | 1097 | if (tca[TCA_STAB]) { |
1109 | stab = qdisc_get_stab(tca[TCA_STAB], extack); | 1098 | stab = qdisc_get_stab(tca[TCA_STAB], extack); |
1110 | if (IS_ERR(stab)) { | 1099 | if (IS_ERR(stab)) { |
@@ -1151,7 +1140,7 @@ err_out5: | |||
1151 | ops->destroy(sch); | 1140 | ops->destroy(sch); |
1152 | err_out3: | 1141 | err_out3: |
1153 | dev_put(dev); | 1142 | dev_put(dev); |
1154 | kfree((char *) sch - sch->padded); | 1143 | qdisc_free(sch); |
1155 | err_out2: | 1144 | err_out2: |
1156 | module_put(ops->owner); | 1145 | module_put(ops->owner); |
1157 | err_out: | 1146 | err_out: |
@@ -1159,8 +1148,6 @@ err_out: | |||
1159 | return NULL; | 1148 | return NULL; |
1160 | 1149 | ||
1161 | err_out4: | 1150 | err_out4: |
1162 | free_percpu(sch->cpu_bstats); | ||
1163 | free_percpu(sch->cpu_qstats); | ||
1164 | /* | 1151 | /* |
1165 | * Any broken qdiscs that would require a ops->reset() here? | 1152 | * Any broken qdiscs that would require a ops->reset() here? |
1166 | * The qdisc was never in action so it shouldn't be necessary. | 1153 | * The qdisc was never in action so it shouldn't be necessary. |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index a883c501d5ec..ef8b4ecde2ac 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -907,7 +907,7 @@ void qdisc_reset(struct Qdisc *qdisc) | |||
907 | } | 907 | } |
908 | EXPORT_SYMBOL(qdisc_reset); | 908 | EXPORT_SYMBOL(qdisc_reset); |
909 | 909 | ||
910 | static void qdisc_free(struct Qdisc *qdisc) | 910 | void qdisc_free(struct Qdisc *qdisc) |
911 | { | 911 | { |
912 | if (qdisc_is_percpu_stats(qdisc)) { | 912 | if (qdisc_is_percpu_stats(qdisc)) { |
913 | free_percpu(qdisc->cpu_bstats); | 913 | free_percpu(qdisc->cpu_bstats); |
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 7ca2be20dd6f..3372dd5e984d 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c | |||
@@ -68,7 +68,6 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt, | |||
68 | { | 68 | { |
69 | struct ingress_sched_data *q = qdisc_priv(sch); | 69 | struct ingress_sched_data *q = qdisc_priv(sch); |
70 | struct net_device *dev = qdisc_dev(sch); | 70 | struct net_device *dev = qdisc_dev(sch); |
71 | int err; | ||
72 | 71 | ||
73 | net_inc_ingress_queue(); | 72 | net_inc_ingress_queue(); |
74 | 73 | ||
@@ -78,13 +77,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt, | |||
78 | q->block_info.chain_head_change = clsact_chain_head_change; | 77 | q->block_info.chain_head_change = clsact_chain_head_change; |
79 | q->block_info.chain_head_change_priv = &q->miniqp; | 78 | q->block_info.chain_head_change_priv = &q->miniqp; |
80 | 79 | ||
81 | err = tcf_block_get_ext(&q->block, sch, &q->block_info, extack); | 80 | return tcf_block_get_ext(&q->block, sch, &q->block_info, extack); |
82 | if (err) | ||
83 | return err; | ||
84 | |||
85 | sch->flags |= TCQ_F_CPUSTATS; | ||
86 | |||
87 | return 0; | ||
88 | } | 81 | } |
89 | 82 | ||
90 | static void ingress_destroy(struct Qdisc *sch) | 83 | static void ingress_destroy(struct Qdisc *sch) |
@@ -123,6 +116,7 @@ static struct Qdisc_ops ingress_qdisc_ops __read_mostly = { | |||
123 | .cl_ops = &ingress_class_ops, | 116 | .cl_ops = &ingress_class_ops, |
124 | .id = "ingress", | 117 | .id = "ingress", |
125 | .priv_size = sizeof(struct ingress_sched_data), | 118 | .priv_size = sizeof(struct ingress_sched_data), |
119 | .static_flags = TCQ_F_CPUSTATS, | ||
126 | .init = ingress_init, | 120 | .init = ingress_init, |
127 | .destroy = ingress_destroy, | 121 | .destroy = ingress_destroy, |
128 | .dump = ingress_dump, | 122 | .dump = ingress_dump, |
@@ -197,14 +191,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt, | |||
197 | q->egress_block_info.chain_head_change = clsact_chain_head_change; | 191 | q->egress_block_info.chain_head_change = clsact_chain_head_change; |
198 | q->egress_block_info.chain_head_change_priv = &q->miniqp_egress; | 192 | q->egress_block_info.chain_head_change_priv = &q->miniqp_egress; |
199 | 193 | ||
200 | err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, | 194 | return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack); |
201 | extack); | ||
202 | if (err) | ||
203 | return err; | ||
204 | |||
205 | sch->flags |= TCQ_F_CPUSTATS; | ||
206 | |||
207 | return 0; | ||
208 | } | 195 | } |
209 | 196 | ||
210 | static void clsact_destroy(struct Qdisc *sch) | 197 | static void clsact_destroy(struct Qdisc *sch) |
@@ -231,6 +218,7 @@ static struct Qdisc_ops clsact_qdisc_ops __read_mostly = { | |||
231 | .cl_ops = &clsact_class_ops, | 218 | .cl_ops = &clsact_class_ops, |
232 | .id = "clsact", | 219 | .id = "clsact", |
233 | .priv_size = sizeof(struct clsact_sched_data), | 220 | .priv_size = sizeof(struct clsact_sched_data), |
221 | .static_flags = TCQ_F_CPUSTATS, | ||
234 | .init = clsact_init, | 222 | .init = clsact_init, |
235 | .destroy = clsact_destroy, | 223 | .destroy = clsact_destroy, |
236 | .dump = ingress_dump, | 224 | .dump = ingress_dump, |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 3b18085e3b10..5d4c15bf66d2 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -826,6 +826,7 @@ static int sctp_inet6_af_supported(sa_family_t family, struct sctp_sock *sp) | |||
826 | case AF_INET: | 826 | case AF_INET: |
827 | if (!__ipv6_only_sock(sctp_opt2sk(sp))) | 827 | if (!__ipv6_only_sock(sctp_opt2sk(sp))) |
828 | return 1; | 828 | return 1; |
829 | /* fallthru */ | ||
829 | default: | 830 | default: |
830 | return 0; | 831 | return 0; |
831 | } | 832 | } |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index af9b5ebcae50..f211b3db6a35 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
@@ -916,9 +916,9 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) | |||
916 | break; | 916 | break; |
917 | 917 | ||
918 | case SCTP_CID_ABORT: | 918 | case SCTP_CID_ABORT: |
919 | if (sctp_test_T_bit(chunk)) { | 919 | if (sctp_test_T_bit(chunk)) |
920 | packet->vtag = asoc->c.my_vtag; | 920 | packet->vtag = asoc->c.my_vtag; |
921 | } | 921 | /* fallthru */ |
922 | /* The following chunks are "response" chunks, i.e. | 922 | /* The following chunks are "response" chunks, i.e. |
923 | * they are generated in response to something we | 923 | * they are generated in response to something we |
924 | * received. If we are sending these, then we can | 924 | * received. If we are sending these, then we can |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 6a54ff06c9da..7ff444ecee75 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -85,7 +85,7 @@ | |||
85 | static int sctp_writeable(struct sock *sk); | 85 | static int sctp_writeable(struct sock *sk); |
86 | static void sctp_wfree(struct sk_buff *skb); | 86 | static void sctp_wfree(struct sk_buff *skb); |
87 | static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, | 87 | static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, |
88 | size_t msg_len, struct sock **orig_sk); | 88 | size_t msg_len); |
89 | static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); | 89 | static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); |
90 | static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); | 90 | static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); |
91 | static int sctp_wait_for_accept(struct sock *sk, long timeo); | 91 | static int sctp_wait_for_accept(struct sock *sk, long timeo); |
@@ -351,16 +351,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, | |||
351 | if (len < sizeof (struct sockaddr)) | 351 | if (len < sizeof (struct sockaddr)) |
352 | return NULL; | 352 | return NULL; |
353 | 353 | ||
354 | if (!opt->pf->af_supported(addr->sa.sa_family, opt)) | ||
355 | return NULL; | ||
356 | |||
354 | /* V4 mapped address are really of AF_INET family */ | 357 | /* V4 mapped address are really of AF_INET family */ |
355 | if (addr->sa.sa_family == AF_INET6 && | 358 | if (addr->sa.sa_family == AF_INET6 && |
356 | ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { | 359 | ipv6_addr_v4mapped(&addr->v6.sin6_addr) && |
357 | if (!opt->pf->af_supported(AF_INET, opt)) | 360 | !opt->pf->af_supported(AF_INET, opt)) |
358 | return NULL; | 361 | return NULL; |
359 | } else { | ||
360 | /* Does this PF support this AF? */ | ||
361 | if (!opt->pf->af_supported(addr->sa.sa_family, opt)) | ||
362 | return NULL; | ||
363 | } | ||
364 | 362 | ||
365 | /* If we get this far, af is valid. */ | 363 | /* If we get this far, af is valid. */ |
366 | af = sctp_get_af_specific(addr->sa.sa_family); | 364 | af = sctp_get_af_specific(addr->sa.sa_family); |
@@ -1900,8 +1898,14 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) | |||
1900 | */ | 1898 | */ |
1901 | if (sinit) { | 1899 | if (sinit) { |
1902 | if (sinit->sinit_num_ostreams) { | 1900 | if (sinit->sinit_num_ostreams) { |
1903 | asoc->c.sinit_num_ostreams = | 1901 | __u16 outcnt = sinit->sinit_num_ostreams; |
1904 | sinit->sinit_num_ostreams; | 1902 | |
1903 | asoc->c.sinit_num_ostreams = outcnt; | ||
1904 | /* outcnt has been changed, so re-init stream */ | ||
1905 | err = sctp_stream_init(&asoc->stream, outcnt, 0, | ||
1906 | GFP_KERNEL); | ||
1907 | if (err) | ||
1908 | goto out_free; | ||
1905 | } | 1909 | } |
1906 | if (sinit->sinit_max_instreams) { | 1910 | if (sinit->sinit_max_instreams) { |
1907 | asoc->c.sinit_max_instreams = | 1911 | asoc->c.sinit_max_instreams = |
@@ -1988,7 +1992,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) | |||
1988 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); | 1992 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); |
1989 | if (!sctp_wspace(asoc)) { | 1993 | if (!sctp_wspace(asoc)) { |
1990 | /* sk can be changed by peel off when waiting for buf. */ | 1994 | /* sk can be changed by peel off when waiting for buf. */ |
1991 | err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk); | 1995 | err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); |
1992 | if (err) { | 1996 | if (err) { |
1993 | if (err == -ESRCH) { | 1997 | if (err == -ESRCH) { |
1994 | /* asoc is already dead. */ | 1998 | /* asoc is already dead. */ |
@@ -8121,12 +8125,12 @@ void sctp_sock_rfree(struct sk_buff *skb) | |||
8121 | 8125 | ||
8122 | /* Helper function to wait for space in the sndbuf. */ | 8126 | /* Helper function to wait for space in the sndbuf. */ |
8123 | static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, | 8127 | static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, |
8124 | size_t msg_len, struct sock **orig_sk) | 8128 | size_t msg_len) |
8125 | { | 8129 | { |
8126 | struct sock *sk = asoc->base.sk; | 8130 | struct sock *sk = asoc->base.sk; |
8127 | int err = 0; | ||
8128 | long current_timeo = *timeo_p; | 8131 | long current_timeo = *timeo_p; |
8129 | DEFINE_WAIT(wait); | 8132 | DEFINE_WAIT(wait); |
8133 | int err = 0; | ||
8130 | 8134 | ||
8131 | pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, | 8135 | pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, |
8132 | *timeo_p, msg_len); | 8136 | *timeo_p, msg_len); |
@@ -8155,17 +8159,13 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, | |||
8155 | release_sock(sk); | 8159 | release_sock(sk); |
8156 | current_timeo = schedule_timeout(current_timeo); | 8160 | current_timeo = schedule_timeout(current_timeo); |
8157 | lock_sock(sk); | 8161 | lock_sock(sk); |
8158 | if (sk != asoc->base.sk) { | 8162 | if (sk != asoc->base.sk) |
8159 | release_sock(sk); | 8163 | goto do_error; |
8160 | sk = asoc->base.sk; | ||
8161 | lock_sock(sk); | ||
8162 | } | ||
8163 | 8164 | ||
8164 | *timeo_p = current_timeo; | 8165 | *timeo_p = current_timeo; |
8165 | } | 8166 | } |
8166 | 8167 | ||
8167 | out: | 8168 | out: |
8168 | *orig_sk = sk; | ||
8169 | finish_wait(&asoc->wait, &wait); | 8169 | finish_wait(&asoc->wait, &wait); |
8170 | 8170 | ||
8171 | /* Release the association's refcnt. */ | 8171 | /* Release the association's refcnt. */ |
diff --git a/net/tipc/node.c b/net/tipc/node.c index 507017fe0f1b..9036d8756e73 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -1880,36 +1880,38 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info) | |||
1880 | 1880 | ||
1881 | if (strcmp(name, tipc_bclink_name) == 0) { | 1881 | if (strcmp(name, tipc_bclink_name) == 0) { |
1882 | err = tipc_nl_add_bc_link(net, &msg); | 1882 | err = tipc_nl_add_bc_link(net, &msg); |
1883 | if (err) { | 1883 | if (err) |
1884 | nlmsg_free(msg.skb); | 1884 | goto err_free; |
1885 | return err; | ||
1886 | } | ||
1887 | } else { | 1885 | } else { |
1888 | int bearer_id; | 1886 | int bearer_id; |
1889 | struct tipc_node *node; | 1887 | struct tipc_node *node; |
1890 | struct tipc_link *link; | 1888 | struct tipc_link *link; |
1891 | 1889 | ||
1892 | node = tipc_node_find_by_name(net, name, &bearer_id); | 1890 | node = tipc_node_find_by_name(net, name, &bearer_id); |
1893 | if (!node) | 1891 | if (!node) { |
1894 | return -EINVAL; | 1892 | err = -EINVAL; |
1893 | goto err_free; | ||
1894 | } | ||
1895 | 1895 | ||
1896 | tipc_node_read_lock(node); | 1896 | tipc_node_read_lock(node); |
1897 | link = node->links[bearer_id].link; | 1897 | link = node->links[bearer_id].link; |
1898 | if (!link) { | 1898 | if (!link) { |
1899 | tipc_node_read_unlock(node); | 1899 | tipc_node_read_unlock(node); |
1900 | nlmsg_free(msg.skb); | 1900 | err = -EINVAL; |
1901 | return -EINVAL; | 1901 | goto err_free; |
1902 | } | 1902 | } |
1903 | 1903 | ||
1904 | err = __tipc_nl_add_link(net, &msg, link, 0); | 1904 | err = __tipc_nl_add_link(net, &msg, link, 0); |
1905 | tipc_node_read_unlock(node); | 1905 | tipc_node_read_unlock(node); |
1906 | if (err) { | 1906 | if (err) |
1907 | nlmsg_free(msg.skb); | 1907 | goto err_free; |
1908 | return err; | ||
1909 | } | ||
1910 | } | 1908 | } |
1911 | 1909 | ||
1912 | return genlmsg_reply(msg.skb, info); | 1910 | return genlmsg_reply(msg.skb, info); |
1911 | |||
1912 | err_free: | ||
1913 | nlmsg_free(msg.skb); | ||
1914 | return err; | ||
1913 | } | 1915 | } |
1914 | 1916 | ||
1915 | int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info) | 1917 | int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info) |
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 73d19210dd49..9773571b6a34 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c | |||
@@ -391,7 +391,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) | |||
391 | 391 | ||
392 | while (msg_data_left(msg)) { | 392 | while (msg_data_left(msg)) { |
393 | if (sk->sk_err) { | 393 | if (sk->sk_err) { |
394 | ret = sk->sk_err; | 394 | ret = -sk->sk_err; |
395 | goto send_end; | 395 | goto send_end; |
396 | } | 396 | } |
397 | 397 | ||
@@ -544,7 +544,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page, | |||
544 | size_t copy, required_size; | 544 | size_t copy, required_size; |
545 | 545 | ||
546 | if (sk->sk_err) { | 546 | if (sk->sk_err) { |
547 | ret = sk->sk_err; | 547 | ret = -sk->sk_err; |
548 | goto sendpage_end; | 548 | goto sendpage_end; |
549 | } | 549 | } |
550 | 550 | ||
diff --git a/net/wireless/core.c b/net/wireless/core.c index fdde0d98fde1..a6f3cac8c640 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -439,6 +439,8 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, | |||
439 | if (rv) | 439 | if (rv) |
440 | goto use_default_name; | 440 | goto use_default_name; |
441 | } else { | 441 | } else { |
442 | int rv; | ||
443 | |||
442 | use_default_name: | 444 | use_default_name: |
443 | /* NOTE: This is *probably* safe w/out holding rtnl because of | 445 | /* NOTE: This is *probably* safe w/out holding rtnl because of |
444 | * the restrictions on phy names. Probably this call could | 446 | * the restrictions on phy names. Probably this call could |
@@ -446,7 +448,11 @@ use_default_name: | |||
446 | * phyX. But, might should add some locking and check return | 448 | * phyX. But, might should add some locking and check return |
447 | * value, and use a different name if this one exists? | 449 | * value, and use a different name if this one exists? |
448 | */ | 450 | */ |
449 | dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); | 451 | rv = dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx); |
452 | if (rv < 0) { | ||
453 | kfree(rdev); | ||
454 | return NULL; | ||
455 | } | ||
450 | } | 456 | } |
451 | 457 | ||
452 | INIT_LIST_HEAD(&rdev->wiphy.wdev_list); | 458 | INIT_LIST_HEAD(&rdev->wiphy.wdev_list); |
diff --git a/net/wireless/core.h b/net/wireless/core.h index d2f7e8b8a097..eaff636169c2 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -507,8 +507,6 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, | |||
507 | void cfg80211_stop_nan(struct cfg80211_registered_device *rdev, | 507 | void cfg80211_stop_nan(struct cfg80211_registered_device *rdev, |
508 | struct wireless_dev *wdev); | 508 | struct wireless_dev *wdev); |
509 | 509 | ||
510 | #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 | ||
511 | |||
512 | #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS | 510 | #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS |
513 | #define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond) | 511 | #define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond) |
514 | #else | 512 | #else |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c084dd2205ac..b48eb6d104c9 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -2642,12 +2642,13 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag | |||
2642 | const u8 *ssid_ie; | 2642 | const u8 *ssid_ie; |
2643 | if (!wdev->current_bss) | 2643 | if (!wdev->current_bss) |
2644 | break; | 2644 | break; |
2645 | rcu_read_lock(); | ||
2645 | ssid_ie = ieee80211_bss_get_ie(&wdev->current_bss->pub, | 2646 | ssid_ie = ieee80211_bss_get_ie(&wdev->current_bss->pub, |
2646 | WLAN_EID_SSID); | 2647 | WLAN_EID_SSID); |
2647 | if (!ssid_ie) | 2648 | if (ssid_ie && |
2648 | break; | 2649 | nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2)) |
2649 | if (nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2)) | 2650 | goto nla_put_failure_rcu_locked; |
2650 | goto nla_put_failure_locked; | 2651 | rcu_read_unlock(); |
2651 | break; | 2652 | break; |
2652 | } | 2653 | } |
2653 | default: | 2654 | default: |
@@ -2659,6 +2660,8 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag | |||
2659 | genlmsg_end(msg, hdr); | 2660 | genlmsg_end(msg, hdr); |
2660 | return 0; | 2661 | return 0; |
2661 | 2662 | ||
2663 | nla_put_failure_rcu_locked: | ||
2664 | rcu_read_unlock(); | ||
2662 | nla_put_failure_locked: | 2665 | nla_put_failure_locked: |
2663 | wdev_unlock(wdev); | 2666 | wdev_unlock(wdev); |
2664 | nla_put_failure: | 2667 | nla_put_failure: |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 78e71b0390be..7b42f0bacfd8 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -1769,8 +1769,7 @@ static void handle_reg_beacon(struct wiphy *wiphy, unsigned int chan_idx, | |||
1769 | if (wiphy->regulatory_flags & REGULATORY_DISABLE_BEACON_HINTS) | 1769 | if (wiphy->regulatory_flags & REGULATORY_DISABLE_BEACON_HINTS) |
1770 | return; | 1770 | return; |
1771 | 1771 | ||
1772 | chan_before.center_freq = chan->center_freq; | 1772 | chan_before = *chan; |
1773 | chan_before.flags = chan->flags; | ||
1774 | 1773 | ||
1775 | if (chan->flags & IEEE80211_CHAN_NO_IR) { | 1774 | if (chan->flags & IEEE80211_CHAN_NO_IR) { |
1776 | chan->flags &= ~IEEE80211_CHAN_NO_IR; | 1775 | chan->flags &= ~IEEE80211_CHAN_NO_IR; |
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 26b10eb7a206..1472c0857975 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c | |||
@@ -517,7 +517,7 @@ int xfrm_trans_queue(struct sk_buff *skb, | |||
517 | return -ENOBUFS; | 517 | return -ENOBUFS; |
518 | 518 | ||
519 | XFRM_TRANS_SKB_CB(skb)->finish = finish; | 519 | XFRM_TRANS_SKB_CB(skb)->finish = finish; |
520 | skb_queue_tail(&trans->queue, skb); | 520 | __skb_queue_tail(&trans->queue, skb); |
521 | tasklet_schedule(&trans->tasklet); | 521 | tasklet_schedule(&trans->tasklet); |
522 | return 0; | 522 | return 0; |
523 | } | 523 | } |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index d8a8129b9232..7a23078132cf 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -609,7 +609,8 @@ static void xfrm_hash_rebuild(struct work_struct *work) | |||
609 | 609 | ||
610 | /* re-insert all policies by order of creation */ | 610 | /* re-insert all policies by order of creation */ |
611 | list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { | 611 | list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { |
612 | if (xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) { | 612 | if (policy->walk.dead || |
613 | xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) { | ||
613 | /* skip socket policies */ | 614 | /* skip socket policies */ |
614 | continue; | 615 | continue; |
615 | } | 616 | } |
@@ -974,8 +975,6 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid) | |||
974 | } | 975 | } |
975 | if (!cnt) | 976 | if (!cnt) |
976 | err = -ESRCH; | 977 | err = -ESRCH; |
977 | else | ||
978 | xfrm_policy_cache_flush(); | ||
979 | out: | 978 | out: |
980 | spin_unlock_bh(&net->xfrm.xfrm_policy_lock); | 979 | spin_unlock_bh(&net->xfrm.xfrm_policy_lock); |
981 | return err; | 980 | return err; |
@@ -1744,6 +1743,8 @@ void xfrm_policy_cache_flush(void) | |||
1744 | bool found = 0; | 1743 | bool found = 0; |
1745 | int cpu; | 1744 | int cpu; |
1746 | 1745 | ||
1746 | might_sleep(); | ||
1747 | |||
1747 | local_bh_disable(); | 1748 | local_bh_disable(); |
1748 | rcu_read_lock(); | 1749 | rcu_read_lock(); |
1749 | for_each_possible_cpu(cpu) { | 1750 | for_each_possible_cpu(cpu) { |
@@ -2064,8 +2065,11 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, | |||
2064 | if (num_xfrms <= 0) | 2065 | if (num_xfrms <= 0) |
2065 | goto make_dummy_bundle; | 2066 | goto make_dummy_bundle; |
2066 | 2067 | ||
2068 | local_bh_disable(); | ||
2067 | xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, | 2069 | xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, |
2068 | xflo->dst_orig); | 2070 | xflo->dst_orig); |
2071 | local_bh_enable(); | ||
2072 | |||
2069 | if (IS_ERR(xdst)) { | 2073 | if (IS_ERR(xdst)) { |
2070 | err = PTR_ERR(xdst); | 2074 | err = PTR_ERR(xdst); |
2071 | if (err != -EAGAIN) | 2075 | if (err != -EAGAIN) |
@@ -2152,9 +2156,12 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, | |||
2152 | goto no_transform; | 2156 | goto no_transform; |
2153 | } | 2157 | } |
2154 | 2158 | ||
2159 | local_bh_disable(); | ||
2155 | xdst = xfrm_resolve_and_create_bundle( | 2160 | xdst = xfrm_resolve_and_create_bundle( |
2156 | pols, num_pols, fl, | 2161 | pols, num_pols, fl, |
2157 | family, dst_orig); | 2162 | family, dst_orig); |
2163 | local_bh_enable(); | ||
2164 | |||
2158 | if (IS_ERR(xdst)) { | 2165 | if (IS_ERR(xdst)) { |
2159 | xfrm_pols_put(pols, num_pols); | 2166 | xfrm_pols_put(pols, num_pols); |
2160 | err = PTR_ERR(xdst); | 2167 | err = PTR_ERR(xdst); |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index cc4c519cad76..20b1e414dbee 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -313,13 +313,14 @@ retry: | |||
313 | if ((type && !try_module_get(type->owner))) | 313 | if ((type && !try_module_get(type->owner))) |
314 | type = NULL; | 314 | type = NULL; |
315 | 315 | ||
316 | rcu_read_unlock(); | ||
317 | |||
316 | if (!type && try_load) { | 318 | if (!type && try_load) { |
317 | request_module("xfrm-offload-%d-%d", family, proto); | 319 | request_module("xfrm-offload-%d-%d", family, proto); |
318 | try_load = 0; | 320 | try_load = 0; |
319 | goto retry; | 321 | goto retry; |
320 | } | 322 | } |
321 | 323 | ||
322 | rcu_read_unlock(); | ||
323 | return type; | 324 | return type; |
324 | } | 325 | } |
325 | 326 | ||
@@ -1534,8 +1535,12 @@ out: | |||
1534 | err = -EINVAL; | 1535 | err = -EINVAL; |
1535 | spin_lock_bh(&x1->lock); | 1536 | spin_lock_bh(&x1->lock); |
1536 | if (likely(x1->km.state == XFRM_STATE_VALID)) { | 1537 | if (likely(x1->km.state == XFRM_STATE_VALID)) { |
1537 | if (x->encap && x1->encap) | 1538 | if (x->encap && x1->encap && |
1539 | x->encap->encap_type == x1->encap->encap_type) | ||
1538 | memcpy(x1->encap, x->encap, sizeof(*x1->encap)); | 1540 | memcpy(x1->encap, x->encap, sizeof(*x1->encap)); |
1541 | else if (x->encap || x1->encap) | ||
1542 | goto fail; | ||
1543 | |||
1539 | if (x->coaddr && x1->coaddr) { | 1544 | if (x->coaddr && x1->coaddr) { |
1540 | memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr)); | 1545 | memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr)); |
1541 | } | 1546 | } |
@@ -1552,6 +1557,8 @@ out: | |||
1552 | x->km.state = XFRM_STATE_DEAD; | 1557 | x->km.state = XFRM_STATE_DEAD; |
1553 | __xfrm_state_put(x); | 1558 | __xfrm_state_put(x); |
1554 | } | 1559 | } |
1560 | |||
1561 | fail: | ||
1555 | spin_unlock_bh(&x1->lock); | 1562 | spin_unlock_bh(&x1->lock); |
1556 | 1563 | ||
1557 | xfrm_state_put(x1); | 1564 | xfrm_state_put(x1); |
diff --git a/scripts/genksyms/.gitignore b/scripts/genksyms/.gitignore index 86dc07a01b43..e7836b47f060 100644 --- a/scripts/genksyms/.gitignore +++ b/scripts/genksyms/.gitignore | |||
@@ -1,4 +1,3 @@ | |||
1 | *.hash.c | ||
2 | *.lex.c | 1 | *.lex.c |
3 | *.tab.c | 2 | *.tab.c |
4 | *.tab.h | 3 | *.tab.h |
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c index cbf4996dd9c1..8cee597d33a5 100644 --- a/scripts/kconfig/expr.c +++ b/scripts/kconfig/expr.c | |||
@@ -893,7 +893,10 @@ static enum string_value_kind expr_parse_string(const char *str, | |||
893 | switch (type) { | 893 | switch (type) { |
894 | case S_BOOLEAN: | 894 | case S_BOOLEAN: |
895 | case S_TRISTATE: | 895 | case S_TRISTATE: |
896 | return k_string; | 896 | val->s = !strcmp(str, "n") ? 0 : |
897 | !strcmp(str, "m") ? 1 : | ||
898 | !strcmp(str, "y") ? 2 : -1; | ||
899 | return k_signed; | ||
897 | case S_INT: | 900 | case S_INT: |
898 | val->s = strtoll(str, &tail, 10); | 901 | val->s = strtoll(str, &tail, 10); |
899 | kind = k_signed; | 902 | kind = k_signed; |
diff --git a/security/Kconfig b/security/Kconfig index 3d4debd0257e..b0cb9a5f9448 100644 --- a/security/Kconfig +++ b/security/Kconfig | |||
@@ -63,7 +63,7 @@ config PAGE_TABLE_ISOLATION | |||
63 | ensuring that the majority of kernel addresses are not mapped | 63 | ensuring that the majority of kernel addresses are not mapped |
64 | into userspace. | 64 | into userspace. |
65 | 65 | ||
66 | See Documentation/x86/pagetable-isolation.txt for more details. | 66 | See Documentation/x86/pti.txt for more details. |
67 | 67 | ||
68 | config SECURITY_INFINIBAND | 68 | config SECURITY_INFINIBAND |
69 | bool "Infiniband Security Hooks" | 69 | bool "Infiniband Security Hooks" |
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index 04ba9d0718ea..6a54d2ffa840 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c | |||
@@ -330,10 +330,7 @@ static struct aa_profile *__attach_match(const char *name, | |||
330 | continue; | 330 | continue; |
331 | 331 | ||
332 | if (profile->xmatch) { | 332 | if (profile->xmatch) { |
333 | if (profile->xmatch_len == len) { | 333 | if (profile->xmatch_len >= len) { |
334 | conflict = true; | ||
335 | continue; | ||
336 | } else if (profile->xmatch_len > len) { | ||
337 | unsigned int state; | 334 | unsigned int state; |
338 | u32 perm; | 335 | u32 perm; |
339 | 336 | ||
@@ -342,6 +339,10 @@ static struct aa_profile *__attach_match(const char *name, | |||
342 | perm = dfa_user_allow(profile->xmatch, state); | 339 | perm = dfa_user_allow(profile->xmatch, state); |
343 | /* any accepting state means a valid match. */ | 340 | /* any accepting state means a valid match. */ |
344 | if (perm & MAY_EXEC) { | 341 | if (perm & MAY_EXEC) { |
342 | if (profile->xmatch_len == len) { | ||
343 | conflict = true; | ||
344 | continue; | ||
345 | } | ||
345 | candidate = profile; | 346 | candidate = profile; |
346 | len = profile->xmatch_len; | 347 | len = profile->xmatch_len; |
347 | conflict = false; | 348 | conflict = false; |
diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h index 2b27bb79aec4..d7b7e7115160 100644 --- a/security/apparmor/include/perms.h +++ b/security/apparmor/include/perms.h | |||
@@ -133,6 +133,9 @@ extern struct aa_perms allperms; | |||
133 | #define xcheck_labels_profiles(L1, L2, FN, args...) \ | 133 | #define xcheck_labels_profiles(L1, L2, FN, args...) \ |
134 | xcheck_ns_labels((L1), (L2), xcheck_ns_profile_label, (FN), args) | 134 | xcheck_ns_labels((L1), (L2), xcheck_ns_profile_label, (FN), args) |
135 | 135 | ||
136 | #define xcheck_labels(L1, L2, P, FN1, FN2) \ | ||
137 | xcheck(fn_for_each((L1), (P), (FN1)), fn_for_each((L2), (P), (FN2))) | ||
138 | |||
136 | 139 | ||
137 | void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask); | 140 | void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask); |
138 | void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask); | 141 | void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask); |
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c index 7ca0032e7ba9..b40678f3c1d5 100644 --- a/security/apparmor/ipc.c +++ b/security/apparmor/ipc.c | |||
@@ -64,40 +64,48 @@ static void audit_ptrace_cb(struct audit_buffer *ab, void *va) | |||
64 | FLAGS_NONE, GFP_ATOMIC); | 64 | FLAGS_NONE, GFP_ATOMIC); |
65 | } | 65 | } |
66 | 66 | ||
67 | /* assumes check for PROFILE_MEDIATES is already done */ | ||
67 | /* TODO: conditionals */ | 68 | /* TODO: conditionals */ |
68 | static int profile_ptrace_perm(struct aa_profile *profile, | 69 | static int profile_ptrace_perm(struct aa_profile *profile, |
69 | struct aa_profile *peer, u32 request, | 70 | struct aa_label *peer, u32 request, |
70 | struct common_audit_data *sa) | 71 | struct common_audit_data *sa) |
71 | { | 72 | { |
72 | struct aa_perms perms = { }; | 73 | struct aa_perms perms = { }; |
73 | 74 | ||
74 | /* need because of peer in cross check */ | 75 | aad(sa)->peer = peer; |
75 | if (profile_unconfined(profile) || | 76 | aa_profile_match_label(profile, peer, AA_CLASS_PTRACE, request, |
76 | !PROFILE_MEDIATES(profile, AA_CLASS_PTRACE)) | ||
77 | return 0; | ||
78 | |||
79 | aad(sa)->peer = &peer->label; | ||
80 | aa_profile_match_label(profile, &peer->label, AA_CLASS_PTRACE, request, | ||
81 | &perms); | 77 | &perms); |
82 | aa_apply_modes_to_perms(profile, &perms); | 78 | aa_apply_modes_to_perms(profile, &perms); |
83 | return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb); | 79 | return aa_check_perms(profile, &perms, request, sa, audit_ptrace_cb); |
84 | } | 80 | } |
85 | 81 | ||
86 | static int cross_ptrace_perm(struct aa_profile *tracer, | 82 | static int profile_tracee_perm(struct aa_profile *tracee, |
87 | struct aa_profile *tracee, u32 request, | 83 | struct aa_label *tracer, u32 request, |
88 | struct common_audit_data *sa) | 84 | struct common_audit_data *sa) |
89 | { | 85 | { |
86 | if (profile_unconfined(tracee) || unconfined(tracer) || | ||
87 | !PROFILE_MEDIATES(tracee, AA_CLASS_PTRACE)) | ||
88 | return 0; | ||
89 | |||
90 | return profile_ptrace_perm(tracee, tracer, request, sa); | ||
91 | } | ||
92 | |||
93 | static int profile_tracer_perm(struct aa_profile *tracer, | ||
94 | struct aa_label *tracee, u32 request, | ||
95 | struct common_audit_data *sa) | ||
96 | { | ||
97 | if (profile_unconfined(tracer)) | ||
98 | return 0; | ||
99 | |||
90 | if (PROFILE_MEDIATES(tracer, AA_CLASS_PTRACE)) | 100 | if (PROFILE_MEDIATES(tracer, AA_CLASS_PTRACE)) |
91 | return xcheck(profile_ptrace_perm(tracer, tracee, request, sa), | 101 | return profile_ptrace_perm(tracer, tracee, request, sa); |
92 | profile_ptrace_perm(tracee, tracer, | 102 | |
93 | request << PTRACE_PERM_SHIFT, | 103 | /* profile uses the old style capability check for ptrace */ |
94 | sa)); | 104 | if (&tracer->label == tracee) |
95 | /* policy uses the old style capability check for ptrace */ | ||
96 | if (profile_unconfined(tracer) || tracer == tracee) | ||
97 | return 0; | 105 | return 0; |
98 | 106 | ||
99 | aad(sa)->label = &tracer->label; | 107 | aad(sa)->label = &tracer->label; |
100 | aad(sa)->peer = &tracee->label; | 108 | aad(sa)->peer = tracee; |
101 | aad(sa)->request = 0; | 109 | aad(sa)->request = 0; |
102 | aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE, 1); | 110 | aad(sa)->error = aa_capable(&tracer->label, CAP_SYS_PTRACE, 1); |
103 | 111 | ||
@@ -115,10 +123,13 @@ static int cross_ptrace_perm(struct aa_profile *tracer, | |||
115 | int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee, | 123 | int aa_may_ptrace(struct aa_label *tracer, struct aa_label *tracee, |
116 | u32 request) | 124 | u32 request) |
117 | { | 125 | { |
126 | struct aa_profile *profile; | ||
127 | u32 xrequest = request << PTRACE_PERM_SHIFT; | ||
118 | DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_PTRACE); | 128 | DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, OP_PTRACE); |
119 | 129 | ||
120 | return xcheck_labels_profiles(tracer, tracee, cross_ptrace_perm, | 130 | return xcheck_labels(tracer, tracee, profile, |
121 | request, &sa); | 131 | profile_tracer_perm(profile, tracee, request, &sa), |
132 | profile_tracee_perm(profile, tracer, xrequest, &sa)); | ||
122 | } | 133 | } |
123 | 134 | ||
124 | 135 | ||
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index db7894bb028c..faa67861cbc1 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c | |||
@@ -560,7 +560,6 @@ static inline unsigned int muldiv32(unsigned int a, unsigned int b, | |||
560 | { | 560 | { |
561 | u_int64_t n = (u_int64_t) a * b; | 561 | u_int64_t n = (u_int64_t) a * b; |
562 | if (c == 0) { | 562 | if (c == 0) { |
563 | snd_BUG_ON(!n); | ||
564 | *r = 0; | 563 | *r = 0; |
565 | return UINT_MAX; | 564 | return UINT_MAX; |
566 | } | 565 | } |
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index 6e22eea72654..d01913404581 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c | |||
@@ -221,6 +221,7 @@ static struct snd_seq_client *seq_create_client1(int client_index, int poolsize) | |||
221 | rwlock_init(&client->ports_lock); | 221 | rwlock_init(&client->ports_lock); |
222 | mutex_init(&client->ports_mutex); | 222 | mutex_init(&client->ports_mutex); |
223 | INIT_LIST_HEAD(&client->ports_list_head); | 223 | INIT_LIST_HEAD(&client->ports_list_head); |
224 | mutex_init(&client->ioctl_mutex); | ||
224 | 225 | ||
225 | /* find free slot in the client table */ | 226 | /* find free slot in the client table */ |
226 | spin_lock_irqsave(&clients_lock, flags); | 227 | spin_lock_irqsave(&clients_lock, flags); |
@@ -2130,7 +2131,9 @@ static long snd_seq_ioctl(struct file *file, unsigned int cmd, | |||
2130 | return -EFAULT; | 2131 | return -EFAULT; |
2131 | } | 2132 | } |
2132 | 2133 | ||
2134 | mutex_lock(&client->ioctl_mutex); | ||
2133 | err = handler->func(client, &buf); | 2135 | err = handler->func(client, &buf); |
2136 | mutex_unlock(&client->ioctl_mutex); | ||
2134 | if (err >= 0) { | 2137 | if (err >= 0) { |
2135 | /* Some commands includes a bug in 'dir' field. */ | 2138 | /* Some commands includes a bug in 'dir' field. */ |
2136 | if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT || | 2139 | if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT || |
diff --git a/sound/core/seq/seq_clientmgr.h b/sound/core/seq/seq_clientmgr.h index c6614254ef8a..0611e1e0ed5b 100644 --- a/sound/core/seq/seq_clientmgr.h +++ b/sound/core/seq/seq_clientmgr.h | |||
@@ -61,6 +61,7 @@ struct snd_seq_client { | |||
61 | struct list_head ports_list_head; | 61 | struct list_head ports_list_head; |
62 | rwlock_t ports_lock; | 62 | rwlock_t ports_lock; |
63 | struct mutex ports_mutex; | 63 | struct mutex ports_mutex; |
64 | struct mutex ioctl_mutex; | ||
64 | int convert32; /* convert 32->64bit */ | 65 | int convert32; /* convert 32->64bit */ |
65 | 66 | ||
66 | /* output pool */ | 67 | /* output pool */ |
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 80bbadc83721..d6e079f4ec09 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c | |||
@@ -408,6 +408,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = { | |||
408 | /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/ | 408 | /*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/ |
409 | 409 | ||
410 | /* codec SSID */ | 410 | /* codec SSID */ |
411 | SND_PCI_QUIRK(0x106b, 0x0600, "iMac 14,1", CS420X_IMAC27_122), | ||
411 | SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), | 412 | SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81), |
412 | SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), | 413 | SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122), |
413 | SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), | 414 | SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101), |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 8fd2d9c62c96..9aafc6c86132 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -6196,6 +6196,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
6196 | SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), | 6196 | SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), |
6197 | SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), | 6197 | SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), |
6198 | SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), | 6198 | SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER), |
6199 | SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), | ||
6199 | SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), | 6200 | SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
6200 | SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), | 6201 | SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
6201 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), | 6202 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), |
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index ae0272f9a091..e6acc281dd37 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile | |||
@@ -46,7 +46,7 @@ $(OBJTOOL_IN): fixdep FORCE | |||
46 | @$(MAKE) $(build)=objtool | 46 | @$(MAKE) $(build)=objtool |
47 | 47 | ||
48 | $(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN) | 48 | $(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN) |
49 | @./sync-check.sh | 49 | @$(CONFIG_SHELL) ./sync-check.sh |
50 | $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@ | 50 | $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@ |
51 | 51 | ||
52 | 52 | ||
diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 9b341584eb1b..f40d46e24bcc 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c | |||
@@ -428,6 +428,40 @@ static void add_ignores(struct objtool_file *file) | |||
428 | } | 428 | } |
429 | 429 | ||
430 | /* | 430 | /* |
431 | * FIXME: For now, just ignore any alternatives which add retpolines. This is | ||
432 | * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline. | ||
433 | * But it at least allows objtool to understand the control flow *around* the | ||
434 | * retpoline. | ||
435 | */ | ||
436 | static int add_nospec_ignores(struct objtool_file *file) | ||
437 | { | ||
438 | struct section *sec; | ||
439 | struct rela *rela; | ||
440 | struct instruction *insn; | ||
441 | |||
442 | sec = find_section_by_name(file->elf, ".rela.discard.nospec"); | ||
443 | if (!sec) | ||
444 | return 0; | ||
445 | |||
446 | list_for_each_entry(rela, &sec->rela_list, list) { | ||
447 | if (rela->sym->type != STT_SECTION) { | ||
448 | WARN("unexpected relocation symbol type in %s", sec->name); | ||
449 | return -1; | ||
450 | } | ||
451 | |||
452 | insn = find_insn(file, rela->sym->sec, rela->addend); | ||
453 | if (!insn) { | ||
454 | WARN("bad .discard.nospec entry"); | ||
455 | return -1; | ||
456 | } | ||
457 | |||
458 | insn->ignore_alts = true; | ||
459 | } | ||
460 | |||
461 | return 0; | ||
462 | } | ||
463 | |||
464 | /* | ||
431 | * Find the destination instructions for all jumps. | 465 | * Find the destination instructions for all jumps. |
432 | */ | 466 | */ |
433 | static int add_jump_destinations(struct objtool_file *file) | 467 | static int add_jump_destinations(struct objtool_file *file) |
@@ -456,6 +490,13 @@ static int add_jump_destinations(struct objtool_file *file) | |||
456 | } else if (rela->sym->sec->idx) { | 490 | } else if (rela->sym->sec->idx) { |
457 | dest_sec = rela->sym->sec; | 491 | dest_sec = rela->sym->sec; |
458 | dest_off = rela->sym->sym.st_value + rela->addend + 4; | 492 | dest_off = rela->sym->sym.st_value + rela->addend + 4; |
493 | } else if (strstr(rela->sym->name, "_indirect_thunk_")) { | ||
494 | /* | ||
495 | * Retpoline jumps are really dynamic jumps in | ||
496 | * disguise, so convert them accordingly. | ||
497 | */ | ||
498 | insn->type = INSN_JUMP_DYNAMIC; | ||
499 | continue; | ||
459 | } else { | 500 | } else { |
460 | /* sibling call */ | 501 | /* sibling call */ |
461 | insn->jump_dest = 0; | 502 | insn->jump_dest = 0; |
@@ -502,11 +543,18 @@ static int add_call_destinations(struct objtool_file *file) | |||
502 | dest_off = insn->offset + insn->len + insn->immediate; | 543 | dest_off = insn->offset + insn->len + insn->immediate; |
503 | insn->call_dest = find_symbol_by_offset(insn->sec, | 544 | insn->call_dest = find_symbol_by_offset(insn->sec, |
504 | dest_off); | 545 | dest_off); |
546 | /* | ||
547 | * FIXME: Thanks to retpolines, it's now considered | ||
548 | * normal for a function to call within itself. So | ||
549 | * disable this warning for now. | ||
550 | */ | ||
551 | #if 0 | ||
505 | if (!insn->call_dest) { | 552 | if (!insn->call_dest) { |
506 | WARN_FUNC("can't find call dest symbol at offset 0x%lx", | 553 | WARN_FUNC("can't find call dest symbol at offset 0x%lx", |
507 | insn->sec, insn->offset, dest_off); | 554 | insn->sec, insn->offset, dest_off); |
508 | return -1; | 555 | return -1; |
509 | } | 556 | } |
557 | #endif | ||
510 | } else if (rela->sym->type == STT_SECTION) { | 558 | } else if (rela->sym->type == STT_SECTION) { |
511 | insn->call_dest = find_symbol_by_offset(rela->sym->sec, | 559 | insn->call_dest = find_symbol_by_offset(rela->sym->sec, |
512 | rela->addend+4); | 560 | rela->addend+4); |
@@ -671,12 +719,6 @@ static int add_special_section_alts(struct objtool_file *file) | |||
671 | return ret; | 719 | return ret; |
672 | 720 | ||
673 | list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { | 721 | list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { |
674 | alt = malloc(sizeof(*alt)); | ||
675 | if (!alt) { | ||
676 | WARN("malloc failed"); | ||
677 | ret = -1; | ||
678 | goto out; | ||
679 | } | ||
680 | 722 | ||
681 | orig_insn = find_insn(file, special_alt->orig_sec, | 723 | orig_insn = find_insn(file, special_alt->orig_sec, |
682 | special_alt->orig_off); | 724 | special_alt->orig_off); |
@@ -687,6 +729,10 @@ static int add_special_section_alts(struct objtool_file *file) | |||
687 | goto out; | 729 | goto out; |
688 | } | 730 | } |
689 | 731 | ||
732 | /* Ignore retpoline alternatives. */ | ||
733 | if (orig_insn->ignore_alts) | ||
734 | continue; | ||
735 | |||
690 | new_insn = NULL; | 736 | new_insn = NULL; |
691 | if (!special_alt->group || special_alt->new_len) { | 737 | if (!special_alt->group || special_alt->new_len) { |
692 | new_insn = find_insn(file, special_alt->new_sec, | 738 | new_insn = find_insn(file, special_alt->new_sec, |
@@ -712,6 +758,13 @@ static int add_special_section_alts(struct objtool_file *file) | |||
712 | goto out; | 758 | goto out; |
713 | } | 759 | } |
714 | 760 | ||
761 | alt = malloc(sizeof(*alt)); | ||
762 | if (!alt) { | ||
763 | WARN("malloc failed"); | ||
764 | ret = -1; | ||
765 | goto out; | ||
766 | } | ||
767 | |||
715 | alt->insn = new_insn; | 768 | alt->insn = new_insn; |
716 | list_add_tail(&alt->list, &orig_insn->alts); | 769 | list_add_tail(&alt->list, &orig_insn->alts); |
717 | 770 | ||
@@ -1028,6 +1081,10 @@ static int decode_sections(struct objtool_file *file) | |||
1028 | 1081 | ||
1029 | add_ignores(file); | 1082 | add_ignores(file); |
1030 | 1083 | ||
1084 | ret = add_nospec_ignores(file); | ||
1085 | if (ret) | ||
1086 | return ret; | ||
1087 | |||
1031 | ret = add_jump_destinations(file); | 1088 | ret = add_jump_destinations(file); |
1032 | if (ret) | 1089 | if (ret) |
1033 | return ret; | 1090 | return ret; |
diff --git a/tools/objtool/check.h b/tools/objtool/check.h index 47d9ea70a83d..dbadb304a410 100644 --- a/tools/objtool/check.h +++ b/tools/objtool/check.h | |||
@@ -44,7 +44,7 @@ struct instruction { | |||
44 | unsigned int len; | 44 | unsigned int len; |
45 | unsigned char type; | 45 | unsigned char type; |
46 | unsigned long immediate; | 46 | unsigned long immediate; |
47 | bool alt_group, visited, dead_end, ignore, hint, save, restore; | 47 | bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts; |
48 | struct symbol *call_dest; | 48 | struct symbol *call_dest; |
49 | struct instruction *jump_dest; | 49 | struct instruction *jump_dest; |
50 | struct list_head alts; | 50 | struct list_head alts; |
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 543847957fdd..960179882a1c 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c | |||
@@ -274,6 +274,46 @@ static struct bpf_test tests[] = { | |||
274 | .result = REJECT, | 274 | .result = REJECT, |
275 | }, | 275 | }, |
276 | { | 276 | { |
277 | "arsh32 on imm", | ||
278 | .insns = { | ||
279 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
280 | BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5), | ||
281 | BPF_EXIT_INSN(), | ||
282 | }, | ||
283 | .result = REJECT, | ||
284 | .errstr = "BPF_ARSH not supported for 32 bit ALU", | ||
285 | }, | ||
286 | { | ||
287 | "arsh32 on reg", | ||
288 | .insns = { | ||
289 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
290 | BPF_MOV64_IMM(BPF_REG_1, 5), | ||
291 | BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1), | ||
292 | BPF_EXIT_INSN(), | ||
293 | }, | ||
294 | .result = REJECT, | ||
295 | .errstr = "BPF_ARSH not supported for 32 bit ALU", | ||
296 | }, | ||
297 | { | ||
298 | "arsh64 on imm", | ||
299 | .insns = { | ||
300 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
301 | BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5), | ||
302 | BPF_EXIT_INSN(), | ||
303 | }, | ||
304 | .result = ACCEPT, | ||
305 | }, | ||
306 | { | ||
307 | "arsh64 on reg", | ||
308 | .insns = { | ||
309 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
310 | BPF_MOV64_IMM(BPF_REG_1, 5), | ||
311 | BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1), | ||
312 | BPF_EXIT_INSN(), | ||
313 | }, | ||
314 | .result = ACCEPT, | ||
315 | }, | ||
316 | { | ||
277 | "no bpf_exit", | 317 | "no bpf_exit", |
278 | .insns = { | 318 | .insns = { |
279 | BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2), | 319 | BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2), |
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index 939a337128db..5d4f10ac2af2 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile | |||
@@ -7,7 +7,7 @@ include ../lib.mk | |||
7 | 7 | ||
8 | TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \ | 8 | TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \ |
9 | check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \ | 9 | check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test ioperm \ |
10 | protection_keys test_vdso | 10 | protection_keys test_vdso test_vsyscall |
11 | TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \ | 11 | TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \ |
12 | test_FCMOV test_FCOMI test_FISTTP \ | 12 | test_FCMOV test_FCOMI test_FISTTP \ |
13 | vdso_restorer | 13 | vdso_restorer |
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c new file mode 100644 index 000000000000..7a744fa7b786 --- /dev/null +++ b/tools/testing/selftests/x86/test_vsyscall.c | |||
@@ -0,0 +1,500 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | |||
3 | #define _GNU_SOURCE | ||
4 | |||
5 | #include <stdio.h> | ||
6 | #include <sys/time.h> | ||
7 | #include <time.h> | ||
8 | #include <stdlib.h> | ||
9 | #include <sys/syscall.h> | ||
10 | #include <unistd.h> | ||
11 | #include <dlfcn.h> | ||
12 | #include <string.h> | ||
13 | #include <inttypes.h> | ||
14 | #include <signal.h> | ||
15 | #include <sys/ucontext.h> | ||
16 | #include <errno.h> | ||
17 | #include <err.h> | ||
18 | #include <sched.h> | ||
19 | #include <stdbool.h> | ||
20 | #include <setjmp.h> | ||
21 | |||
22 | #ifdef __x86_64__ | ||
23 | # define VSYS(x) (x) | ||
24 | #else | ||
25 | # define VSYS(x) 0 | ||
26 | #endif | ||
27 | |||
28 | #ifndef SYS_getcpu | ||
29 | # ifdef __x86_64__ | ||
30 | # define SYS_getcpu 309 | ||
31 | # else | ||
32 | # define SYS_getcpu 318 | ||
33 | # endif | ||
34 | #endif | ||
35 | |||
36 | static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), | ||
37 | int flags) | ||
38 | { | ||
39 | struct sigaction sa; | ||
40 | memset(&sa, 0, sizeof(sa)); | ||
41 | sa.sa_sigaction = handler; | ||
42 | sa.sa_flags = SA_SIGINFO | flags; | ||
43 | sigemptyset(&sa.sa_mask); | ||
44 | if (sigaction(sig, &sa, 0)) | ||
45 | err(1, "sigaction"); | ||
46 | } | ||
47 | |||
48 | /* vsyscalls and vDSO */ | ||
49 | bool should_read_vsyscall = false; | ||
50 | |||
51 | typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz); | ||
52 | gtod_t vgtod = (gtod_t)VSYS(0xffffffffff600000); | ||
53 | gtod_t vdso_gtod; | ||
54 | |||
55 | typedef int (*vgettime_t)(clockid_t, struct timespec *); | ||
56 | vgettime_t vdso_gettime; | ||
57 | |||
58 | typedef long (*time_func_t)(time_t *t); | ||
59 | time_func_t vtime = (time_func_t)VSYS(0xffffffffff600400); | ||
60 | time_func_t vdso_time; | ||
61 | |||
62 | typedef long (*getcpu_t)(unsigned *, unsigned *, void *); | ||
63 | getcpu_t vgetcpu = (getcpu_t)VSYS(0xffffffffff600800); | ||
64 | getcpu_t vdso_getcpu; | ||
65 | |||
66 | static void init_vdso(void) | ||
67 | { | ||
68 | void *vdso = dlopen("linux-vdso.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD); | ||
69 | if (!vdso) | ||
70 | vdso = dlopen("linux-gate.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD); | ||
71 | if (!vdso) { | ||
72 | printf("[WARN]\tfailed to find vDSO\n"); | ||
73 | return; | ||
74 | } | ||
75 | |||
76 | vdso_gtod = (gtod_t)dlsym(vdso, "__vdso_gettimeofday"); | ||
77 | if (!vdso_gtod) | ||
78 | printf("[WARN]\tfailed to find gettimeofday in vDSO\n"); | ||
79 | |||
80 | vdso_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime"); | ||
81 | if (!vdso_gettime) | ||
82 | printf("[WARN]\tfailed to find clock_gettime in vDSO\n"); | ||
83 | |||
84 | vdso_time = (time_func_t)dlsym(vdso, "__vdso_time"); | ||
85 | if (!vdso_time) | ||
86 | printf("[WARN]\tfailed to find time in vDSO\n"); | ||
87 | |||
88 | vdso_getcpu = (getcpu_t)dlsym(vdso, "__vdso_getcpu"); | ||
89 | if (!vdso_getcpu) { | ||
90 | /* getcpu() was never wired up in the 32-bit vDSO. */ | ||
91 | printf("[%s]\tfailed to find getcpu in vDSO\n", | ||
92 | sizeof(long) == 8 ? "WARN" : "NOTE"); | ||
93 | } | ||
94 | } | ||
95 | |||
96 | static int init_vsys(void) | ||
97 | { | ||
98 | #ifdef __x86_64__ | ||
99 | int nerrs = 0; | ||
100 | FILE *maps; | ||
101 | char line[128]; | ||
102 | bool found = false; | ||
103 | |||
104 | maps = fopen("/proc/self/maps", "r"); | ||
105 | if (!maps) { | ||
106 | printf("[WARN]\tCould not open /proc/self/maps -- assuming vsyscall is r-x\n"); | ||
107 | should_read_vsyscall = true; | ||
108 | return 0; | ||
109 | } | ||
110 | |||
111 | while (fgets(line, sizeof(line), maps)) { | ||
112 | char r, x; | ||
113 | void *start, *end; | ||
114 | char name[128]; | ||
115 | if (sscanf(line, "%p-%p %c-%cp %*x %*x:%*x %*u %s", | ||
116 | &start, &end, &r, &x, name) != 5) | ||
117 | continue; | ||
118 | |||
119 | if (strcmp(name, "[vsyscall]")) | ||
120 | continue; | ||
121 | |||
122 | printf("\tvsyscall map: %s", line); | ||
123 | |||
124 | if (start != (void *)0xffffffffff600000 || | ||
125 | end != (void *)0xffffffffff601000) { | ||
126 | printf("[FAIL]\taddress range is nonsense\n"); | ||
127 | nerrs++; | ||
128 | } | ||
129 | |||
130 | printf("\tvsyscall permissions are %c-%c\n", r, x); | ||
131 | should_read_vsyscall = (r == 'r'); | ||
132 | if (x != 'x') { | ||
133 | vgtod = NULL; | ||
134 | vtime = NULL; | ||
135 | vgetcpu = NULL; | ||
136 | } | ||
137 | |||
138 | found = true; | ||
139 | break; | ||
140 | } | ||
141 | |||
142 | fclose(maps); | ||
143 | |||
144 | if (!found) { | ||
145 | printf("\tno vsyscall map in /proc/self/maps\n"); | ||
146 | should_read_vsyscall = false; | ||
147 | vgtod = NULL; | ||
148 | vtime = NULL; | ||
149 | vgetcpu = NULL; | ||
150 | } | ||
151 | |||
152 | return nerrs; | ||
153 | #else | ||
154 | return 0; | ||
155 | #endif | ||
156 | } | ||
157 | |||
158 | /* syscalls */ | ||
159 | static inline long sys_gtod(struct timeval *tv, struct timezone *tz) | ||
160 | { | ||
161 | return syscall(SYS_gettimeofday, tv, tz); | ||
162 | } | ||
163 | |||
164 | static inline int sys_clock_gettime(clockid_t id, struct timespec *ts) | ||
165 | { | ||
166 | return syscall(SYS_clock_gettime, id, ts); | ||
167 | } | ||
168 | |||
169 | static inline long sys_time(time_t *t) | ||
170 | { | ||
171 | return syscall(SYS_time, t); | ||
172 | } | ||
173 | |||
174 | static inline long sys_getcpu(unsigned * cpu, unsigned * node, | ||
175 | void* cache) | ||
176 | { | ||
177 | return syscall(SYS_getcpu, cpu, node, cache); | ||
178 | } | ||
179 | |||
180 | static jmp_buf jmpbuf; | ||
181 | |||
182 | static void sigsegv(int sig, siginfo_t *info, void *ctx_void) | ||
183 | { | ||
184 | siglongjmp(jmpbuf, 1); | ||
185 | } | ||
186 | |||
187 | static double tv_diff(const struct timeval *a, const struct timeval *b) | ||
188 | { | ||
189 | return (double)(a->tv_sec - b->tv_sec) + | ||
190 | (double)((int)a->tv_usec - (int)b->tv_usec) * 1e-6; | ||
191 | } | ||
192 | |||
193 | static int check_gtod(const struct timeval *tv_sys1, | ||
194 | const struct timeval *tv_sys2, | ||
195 | const struct timezone *tz_sys, | ||
196 | const char *which, | ||
197 | const struct timeval *tv_other, | ||
198 | const struct timezone *tz_other) | ||
199 | { | ||
200 | int nerrs = 0; | ||
201 | double d1, d2; | ||
202 | |||
203 | if (tz_other && (tz_sys->tz_minuteswest != tz_other->tz_minuteswest || tz_sys->tz_dsttime != tz_other->tz_dsttime)) { | ||
204 | printf("[FAIL] %s tz mismatch\n", which); | ||
205 | nerrs++; | ||
206 | } | ||
207 | |||
208 | d1 = tv_diff(tv_other, tv_sys1); | ||
209 | d2 = tv_diff(tv_sys2, tv_other); | ||
210 | printf("\t%s time offsets: %lf %lf\n", which, d1, d2); | ||
211 | |||
212 | if (d1 < 0 || d2 < 0) { | ||
213 | printf("[FAIL]\t%s time was inconsistent with the syscall\n", which); | ||
214 | nerrs++; | ||
215 | } else { | ||
216 | printf("[OK]\t%s gettimeofday()'s timeval was okay\n", which); | ||
217 | } | ||
218 | |||
219 | return nerrs; | ||
220 | } | ||
221 | |||
222 | static int test_gtod(void) | ||
223 | { | ||
224 | struct timeval tv_sys1, tv_sys2, tv_vdso, tv_vsys; | ||
225 | struct timezone tz_sys, tz_vdso, tz_vsys; | ||
226 | long ret_vdso = -1; | ||
227 | long ret_vsys = -1; | ||
228 | int nerrs = 0; | ||
229 | |||
230 | printf("[RUN]\ttest gettimeofday()\n"); | ||
231 | |||
232 | if (sys_gtod(&tv_sys1, &tz_sys) != 0) | ||
233 | err(1, "syscall gettimeofday"); | ||
234 | if (vdso_gtod) | ||
235 | ret_vdso = vdso_gtod(&tv_vdso, &tz_vdso); | ||
236 | if (vgtod) | ||
237 | ret_vsys = vgtod(&tv_vsys, &tz_vsys); | ||
238 | if (sys_gtod(&tv_sys2, &tz_sys) != 0) | ||
239 | err(1, "syscall gettimeofday"); | ||
240 | |||
241 | if (vdso_gtod) { | ||
242 | if (ret_vdso == 0) { | ||
243 | nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vDSO", &tv_vdso, &tz_vdso); | ||
244 | } else { | ||
245 | printf("[FAIL]\tvDSO gettimeofday() failed: %ld\n", ret_vdso); | ||
246 | nerrs++; | ||
247 | } | ||
248 | } | ||
249 | |||
250 | if (vgtod) { | ||
251 | if (ret_vsys == 0) { | ||
252 | nerrs += check_gtod(&tv_sys1, &tv_sys2, &tz_sys, "vsyscall", &tv_vsys, &tz_vsys); | ||
253 | } else { | ||
254 | printf("[FAIL]\tvsys gettimeofday() failed: %ld\n", ret_vsys); | ||
255 | nerrs++; | ||
256 | } | ||
257 | } | ||
258 | |||
259 | return nerrs; | ||
260 | } | ||
261 | |||
262 | static int test_time(void) { | ||
263 | int nerrs = 0; | ||
264 | |||
265 | printf("[RUN]\ttest time()\n"); | ||
266 | long t_sys1, t_sys2, t_vdso = 0, t_vsys = 0; | ||
267 | long t2_sys1 = -1, t2_sys2 = -1, t2_vdso = -1, t2_vsys = -1; | ||
268 | t_sys1 = sys_time(&t2_sys1); | ||
269 | if (vdso_time) | ||
270 | t_vdso = vdso_time(&t2_vdso); | ||
271 | if (vtime) | ||
272 | t_vsys = vtime(&t2_vsys); | ||
273 | t_sys2 = sys_time(&t2_sys2); | ||
274 | if (t_sys1 < 0 || t_sys1 != t2_sys1 || t_sys2 < 0 || t_sys2 != t2_sys2) { | ||
275 | printf("[FAIL]\tsyscall failed (ret1:%ld output1:%ld ret2:%ld output2:%ld)\n", t_sys1, t2_sys1, t_sys2, t2_sys2); | ||
276 | nerrs++; | ||
277 | return nerrs; | ||
278 | } | ||
279 | |||
280 | if (vdso_time) { | ||
281 | if (t_vdso < 0 || t_vdso != t2_vdso) { | ||
282 | printf("[FAIL]\tvDSO failed (ret:%ld output:%ld)\n", t_vdso, t2_vdso); | ||
283 | nerrs++; | ||
284 | } else if (t_vdso < t_sys1 || t_vdso > t_sys2) { | ||
285 | printf("[FAIL]\tvDSO returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vdso, t_sys2); | ||
286 | nerrs++; | ||
287 | } else { | ||
288 | printf("[OK]\tvDSO time() is okay\n"); | ||
289 | } | ||
290 | } | ||
291 | |||
292 | if (vtime) { | ||
293 | if (t_vsys < 0 || t_vsys != t2_vsys) { | ||
294 | printf("[FAIL]\tvsyscall failed (ret:%ld output:%ld)\n", t_vsys, t2_vsys); | ||
295 | nerrs++; | ||
296 | } else if (t_vsys < t_sys1 || t_vsys > t_sys2) { | ||
297 | printf("[FAIL]\tvsyscall returned the wrong time (%ld %ld %ld)\n", t_sys1, t_vsys, t_sys2); | ||
298 | nerrs++; | ||
299 | } else { | ||
300 | printf("[OK]\tvsyscall time() is okay\n"); | ||
301 | } | ||
302 | } | ||
303 | |||
304 | return nerrs; | ||
305 | } | ||
306 | |||
307 | static int test_getcpu(int cpu) | ||
308 | { | ||
309 | int nerrs = 0; | ||
310 | long ret_sys, ret_vdso = -1, ret_vsys = -1; | ||
311 | |||
312 | printf("[RUN]\tgetcpu() on CPU %d\n", cpu); | ||
313 | |||
314 | cpu_set_t cpuset; | ||
315 | CPU_ZERO(&cpuset); | ||
316 | CPU_SET(cpu, &cpuset); | ||
317 | if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) { | ||
318 | printf("[SKIP]\tfailed to force CPU %d\n", cpu); | ||
319 | return nerrs; | ||
320 | } | ||
321 | |||
322 | unsigned cpu_sys, cpu_vdso, cpu_vsys, node_sys, node_vdso, node_vsys; | ||
323 | unsigned node = 0; | ||
324 | bool have_node = false; | ||
325 | ret_sys = sys_getcpu(&cpu_sys, &node_sys, 0); | ||
326 | if (vdso_getcpu) | ||
327 | ret_vdso = vdso_getcpu(&cpu_vdso, &node_vdso, 0); | ||
328 | if (vgetcpu) | ||
329 | ret_vsys = vgetcpu(&cpu_vsys, &node_vsys, 0); | ||
330 | |||
331 | if (ret_sys == 0) { | ||
332 | if (cpu_sys != cpu) { | ||
333 | printf("[FAIL]\tsyscall reported CPU %hu but should be %d\n", cpu_sys, cpu); | ||
334 | nerrs++; | ||
335 | } | ||
336 | |||
337 | have_node = true; | ||
338 | node = node_sys; | ||
339 | } | ||
340 | |||
341 | if (vdso_getcpu) { | ||
342 | if (ret_vdso) { | ||
343 | printf("[FAIL]\tvDSO getcpu() failed\n"); | ||
344 | nerrs++; | ||
345 | } else { | ||
346 | if (!have_node) { | ||
347 | have_node = true; | ||
348 | node = node_vdso; | ||
349 | } | ||
350 | |||
351 | if (cpu_vdso != cpu) { | ||
352 | printf("[FAIL]\tvDSO reported CPU %hu but should be %d\n", cpu_vdso, cpu); | ||
353 | nerrs++; | ||
354 | } else { | ||
355 | printf("[OK]\tvDSO reported correct CPU\n"); | ||
356 | } | ||
357 | |||
358 | if (node_vdso != node) { | ||
359 | printf("[FAIL]\tvDSO reported node %hu but should be %hu\n", node_vdso, node); | ||
360 | nerrs++; | ||
361 | } else { | ||
362 | printf("[OK]\tvDSO reported correct node\n"); | ||
363 | } | ||
364 | } | ||
365 | } | ||
366 | |||
367 | if (vgetcpu) { | ||
368 | if (ret_vsys) { | ||
369 | printf("[FAIL]\tvsyscall getcpu() failed\n"); | ||
370 | nerrs++; | ||
371 | } else { | ||
372 | if (!have_node) { | ||
373 | have_node = true; | ||
374 | node = node_vsys; | ||
375 | } | ||
376 | |||
377 | if (cpu_vsys != cpu) { | ||
378 | printf("[FAIL]\tvsyscall reported CPU %hu but should be %d\n", cpu_vsys, cpu); | ||
379 | nerrs++; | ||
380 | } else { | ||
381 | printf("[OK]\tvsyscall reported correct CPU\n"); | ||
382 | } | ||
383 | |||
384 | if (node_vsys != node) { | ||
385 | printf("[FAIL]\tvsyscall reported node %hu but should be %hu\n", node_vsys, node); | ||
386 | nerrs++; | ||
387 | } else { | ||
388 | printf("[OK]\tvsyscall reported correct node\n"); | ||
389 | } | ||
390 | } | ||
391 | } | ||
392 | |||
393 | return nerrs; | ||
394 | } | ||
395 | |||
396 | static int test_vsys_r(void) | ||
397 | { | ||
398 | #ifdef __x86_64__ | ||
399 | printf("[RUN]\tChecking read access to the vsyscall page\n"); | ||
400 | bool can_read; | ||
401 | if (sigsetjmp(jmpbuf, 1) == 0) { | ||
402 | *(volatile int *)0xffffffffff600000; | ||
403 | can_read = true; | ||
404 | } else { | ||
405 | can_read = false; | ||
406 | } | ||
407 | |||
408 | if (can_read && !should_read_vsyscall) { | ||
409 | printf("[FAIL]\tWe have read access, but we shouldn't\n"); | ||
410 | return 1; | ||
411 | } else if (!can_read && should_read_vsyscall) { | ||
412 | printf("[FAIL]\tWe don't have read access, but we should\n"); | ||
413 | return 1; | ||
414 | } else { | ||
415 | printf("[OK]\tgot expected result\n"); | ||
416 | } | ||
417 | #endif | ||
418 | |||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | |||
423 | #ifdef __x86_64__ | ||
424 | #define X86_EFLAGS_TF (1UL << 8) | ||
425 | static volatile sig_atomic_t num_vsyscall_traps; | ||
426 | |||
427 | static unsigned long get_eflags(void) | ||
428 | { | ||
429 | unsigned long eflags; | ||
430 | asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags)); | ||
431 | return eflags; | ||
432 | } | ||
433 | |||
434 | static void set_eflags(unsigned long eflags) | ||
435 | { | ||
436 | asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags"); | ||
437 | } | ||
438 | |||
439 | static void sigtrap(int sig, siginfo_t *info, void *ctx_void) | ||
440 | { | ||
441 | ucontext_t *ctx = (ucontext_t *)ctx_void; | ||
442 | unsigned long ip = ctx->uc_mcontext.gregs[REG_RIP]; | ||
443 | |||
444 | if (((ip ^ 0xffffffffff600000UL) & ~0xfffUL) == 0) | ||
445 | num_vsyscall_traps++; | ||
446 | } | ||
447 | |||
448 | static int test_native_vsyscall(void) | ||
449 | { | ||
450 | time_t tmp; | ||
451 | bool is_native; | ||
452 | |||
453 | if (!vtime) | ||
454 | return 0; | ||
455 | |||
456 | printf("[RUN]\tchecking for native vsyscall\n"); | ||
457 | sethandler(SIGTRAP, sigtrap, 0); | ||
458 | set_eflags(get_eflags() | X86_EFLAGS_TF); | ||
459 | vtime(&tmp); | ||
460 | set_eflags(get_eflags() & ~X86_EFLAGS_TF); | ||
461 | |||
462 | /* | ||
463 | * If vsyscalls are emulated, we expect a single trap in the | ||
464 | * vsyscall page -- the call instruction will trap with RIP | ||
465 | * pointing to the entry point before emulation takes over. | ||
466 | * In native mode, we expect two traps, since whatever code | ||
467 | * the vsyscall page contains will be more than just a ret | ||
468 | * instruction. | ||
469 | */ | ||
470 | is_native = (num_vsyscall_traps > 1); | ||
471 | |||
472 | printf("\tvsyscalls are %s (%d instructions in vsyscall page)\n", | ||
473 | (is_native ? "native" : "emulated"), | ||
474 | (int)num_vsyscall_traps); | ||
475 | |||
476 | return 0; | ||
477 | } | ||
478 | #endif | ||
479 | |||
480 | int main(int argc, char **argv) | ||
481 | { | ||
482 | int nerrs = 0; | ||
483 | |||
484 | init_vdso(); | ||
485 | nerrs += init_vsys(); | ||
486 | |||
487 | nerrs += test_gtod(); | ||
488 | nerrs += test_time(); | ||
489 | nerrs += test_getcpu(0); | ||
490 | nerrs += test_getcpu(1); | ||
491 | |||
492 | sethandler(SIGSEGV, sigsegv, 0); | ||
493 | nerrs += test_vsys_r(); | ||
494 | |||
495 | #ifdef __x86_64__ | ||
496 | nerrs += test_native_vsyscall(); | ||
497 | #endif | ||
498 | |||
499 | return nerrs ? 1 : 0; | ||
500 | } | ||