diff options
author | David S. Miller <davem@davemloft.net> | 2018-03-23 11:24:57 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-03-23 11:31:58 -0400 |
commit | 03fe2debbb2771fb90881e4ce8109b09cf772a5c (patch) | |
tree | fbaf8738296b2e9dcba81c6daef2d515b6c4948c /arch/x86 | |
parent | 6686c459e1449a3ee5f3fd313b0a559ace7a700e (diff) | |
parent | f36b7534b83357cf52e747905de6d65b4f7c2512 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Fun set of conflict resolutions here...
For the mac80211 stuff, these were fortunately just parallel
adds. Trivially resolved.
In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the
function phy_disable_interrupts() earlier in the file, whilst in
'net-next' the phy_error() call from this function was removed.
In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the
'rt_table_id' member of rtable collided with a bug fix in 'net' that
added a new struct member "rt_mtu_locked" which needs to be copied
over here.
The mlxsw driver conflict consisted of net-next separating
the span code and definitions into separate files, whilst
a 'net' bug fix made some changes to that moved code.
The mlx5 infiniband conflict resolution was quite non-trivial,
the RDMA tree's merge commit was used as a guide here, and
here are their notes:
====================
Due to bug fixes found by the syzkaller bot and taken into the for-rc
branch after development for the 4.17 merge window had already started
being taken into the for-next branch, there were fairly non-trivial
merge issues that would need to be resolved between the for-rc branch
and the for-next branch. This merge resolves those conflicts and
provides a unified base upon which ongoing development for 4.17 can
be based.
Conflicts:
drivers/infiniband/hw/mlx5/main.c - Commit 42cea83f9524
(IB/mlx5: Fix cleanup order on unload) added to for-rc and
commit b5ca15ad7e61 (IB/mlx5: Add proper representors support)
add as part of the devel cycle both needed to modify the
init/de-init functions used by mlx5. To support the new
representors, the new functions added by the cleanup patch
needed to be made non-static, and the init/de-init list
added by the representors patch needed to be modified to
match the init/de-init list changes made by the cleanup
patch.
Updates:
drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function
prototypes added by representors patch to reflect new function
names as changed by cleanup patch
drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init
stage list to match new order from cleanup patch
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/x86')
31 files changed, 512 insertions, 220 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index eb7f43f23521..0fa71a78ec99 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -2307,7 +2307,7 @@ choice | |||
2307 | it can be used to assist security vulnerability exploitation. | 2307 | it can be used to assist security vulnerability exploitation. |
2308 | 2308 | ||
2309 | This setting can be changed at boot time via the kernel command | 2309 | This setting can be changed at boot time via the kernel command |
2310 | line parameter vsyscall=[native|emulate|none]. | 2310 | line parameter vsyscall=[emulate|none]. |
2311 | 2311 | ||
2312 | On a system with recent enough glibc (2.14 or newer) and no | 2312 | On a system with recent enough glibc (2.14 or newer) and no |
2313 | static binaries, you can say None without a performance penalty | 2313 | static binaries, you can say None without a performance penalty |
@@ -2315,15 +2315,6 @@ choice | |||
2315 | 2315 | ||
2316 | If unsure, select "Emulate". | 2316 | If unsure, select "Emulate". |
2317 | 2317 | ||
2318 | config LEGACY_VSYSCALL_NATIVE | ||
2319 | bool "Native" | ||
2320 | help | ||
2321 | Actual executable code is located in the fixed vsyscall | ||
2322 | address mapping, implementing time() efficiently. Since | ||
2323 | this makes the mapping executable, it can be used during | ||
2324 | security vulnerability exploitation (traditionally as | ||
2325 | ROP gadgets). This configuration is not recommended. | ||
2326 | |||
2327 | config LEGACY_VSYSCALL_EMULATE | 2318 | config LEGACY_VSYSCALL_EMULATE |
2328 | bool "Emulate" | 2319 | bool "Emulate" |
2329 | help | 2320 | help |
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index e811dd9c5e99..08425c42f8b7 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S | |||
@@ -363,9 +363,7 @@ ENTRY(entry_INT80_compat) | |||
363 | pushq 2*8(%rdi) /* regs->ip */ | 363 | pushq 2*8(%rdi) /* regs->ip */ |
364 | pushq 1*8(%rdi) /* regs->orig_ax */ | 364 | pushq 1*8(%rdi) /* regs->orig_ax */ |
365 | 365 | ||
366 | movq (%rdi), %rdi /* restore %rdi */ | 366 | pushq (%rdi) /* pt_regs->di */ |
367 | |||
368 | pushq %rdi /* pt_regs->di */ | ||
369 | pushq %rsi /* pt_regs->si */ | 367 | pushq %rsi /* pt_regs->si */ |
370 | pushq %rdx /* pt_regs->dx */ | 368 | pushq %rdx /* pt_regs->dx */ |
371 | pushq %rcx /* pt_regs->cx */ | 369 | pushq %rcx /* pt_regs->cx */ |
@@ -406,15 +404,3 @@ ENTRY(entry_INT80_compat) | |||
406 | TRACE_IRQS_ON | 404 | TRACE_IRQS_ON |
407 | jmp swapgs_restore_regs_and_return_to_usermode | 405 | jmp swapgs_restore_regs_and_return_to_usermode |
408 | END(entry_INT80_compat) | 406 | END(entry_INT80_compat) |
409 | |||
410 | ENTRY(stub32_clone) | ||
411 | /* | ||
412 | * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr). | ||
413 | * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val). | ||
414 | * | ||
415 | * The native 64-bit kernel's sys_clone() implements the latter, | ||
416 | * so we need to swap arguments here before calling it: | ||
417 | */ | ||
418 | xchg %r8, %rcx | ||
419 | jmp sys_clone | ||
420 | ENDPROC(stub32_clone) | ||
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 448ac2161112..2a5e99cff859 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl | |||
@@ -8,12 +8,12 @@ | |||
8 | # | 8 | # |
9 | 0 i386 restart_syscall sys_restart_syscall | 9 | 0 i386 restart_syscall sys_restart_syscall |
10 | 1 i386 exit sys_exit | 10 | 1 i386 exit sys_exit |
11 | 2 i386 fork sys_fork sys_fork | 11 | 2 i386 fork sys_fork |
12 | 3 i386 read sys_read | 12 | 3 i386 read sys_read |
13 | 4 i386 write sys_write | 13 | 4 i386 write sys_write |
14 | 5 i386 open sys_open compat_sys_open | 14 | 5 i386 open sys_open compat_sys_open |
15 | 6 i386 close sys_close | 15 | 6 i386 close sys_close |
16 | 7 i386 waitpid sys_waitpid sys32_waitpid | 16 | 7 i386 waitpid sys_waitpid compat_sys_x86_waitpid |
17 | 8 i386 creat sys_creat | 17 | 8 i386 creat sys_creat |
18 | 9 i386 link sys_link | 18 | 9 i386 link sys_link |
19 | 10 i386 unlink sys_unlink | 19 | 10 i386 unlink sys_unlink |
@@ -78,7 +78,7 @@ | |||
78 | 69 i386 ssetmask sys_ssetmask | 78 | 69 i386 ssetmask sys_ssetmask |
79 | 70 i386 setreuid sys_setreuid16 | 79 | 70 i386 setreuid sys_setreuid16 |
80 | 71 i386 setregid sys_setregid16 | 80 | 71 i386 setregid sys_setregid16 |
81 | 72 i386 sigsuspend sys_sigsuspend sys_sigsuspend | 81 | 72 i386 sigsuspend sys_sigsuspend |
82 | 73 i386 sigpending sys_sigpending compat_sys_sigpending | 82 | 73 i386 sigpending sys_sigpending compat_sys_sigpending |
83 | 74 i386 sethostname sys_sethostname | 83 | 74 i386 sethostname sys_sethostname |
84 | 75 i386 setrlimit sys_setrlimit compat_sys_setrlimit | 84 | 75 i386 setrlimit sys_setrlimit compat_sys_setrlimit |
@@ -96,7 +96,7 @@ | |||
96 | 87 i386 swapon sys_swapon | 96 | 87 i386 swapon sys_swapon |
97 | 88 i386 reboot sys_reboot | 97 | 88 i386 reboot sys_reboot |
98 | 89 i386 readdir sys_old_readdir compat_sys_old_readdir | 98 | 89 i386 readdir sys_old_readdir compat_sys_old_readdir |
99 | 90 i386 mmap sys_old_mmap sys32_mmap | 99 | 90 i386 mmap sys_old_mmap compat_sys_x86_mmap |
100 | 91 i386 munmap sys_munmap | 100 | 91 i386 munmap sys_munmap |
101 | 92 i386 truncate sys_truncate compat_sys_truncate | 101 | 92 i386 truncate sys_truncate compat_sys_truncate |
102 | 93 i386 ftruncate sys_ftruncate compat_sys_ftruncate | 102 | 93 i386 ftruncate sys_ftruncate compat_sys_ftruncate |
@@ -126,7 +126,7 @@ | |||
126 | 117 i386 ipc sys_ipc compat_sys_ipc | 126 | 117 i386 ipc sys_ipc compat_sys_ipc |
127 | 118 i386 fsync sys_fsync | 127 | 118 i386 fsync sys_fsync |
128 | 119 i386 sigreturn sys_sigreturn sys32_sigreturn | 128 | 119 i386 sigreturn sys_sigreturn sys32_sigreturn |
129 | 120 i386 clone sys_clone stub32_clone | 129 | 120 i386 clone sys_clone compat_sys_x86_clone |
130 | 121 i386 setdomainname sys_setdomainname | 130 | 121 i386 setdomainname sys_setdomainname |
131 | 122 i386 uname sys_newuname | 131 | 122 i386 uname sys_newuname |
132 | 123 i386 modify_ldt sys_modify_ldt | 132 | 123 i386 modify_ldt sys_modify_ldt |
@@ -186,8 +186,8 @@ | |||
186 | 177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait | 186 | 177 i386 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait |
187 | 178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo | 187 | 178 i386 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo |
188 | 179 i386 rt_sigsuspend sys_rt_sigsuspend | 188 | 179 i386 rt_sigsuspend sys_rt_sigsuspend |
189 | 180 i386 pread64 sys_pread64 sys32_pread | 189 | 180 i386 pread64 sys_pread64 compat_sys_x86_pread |
190 | 181 i386 pwrite64 sys_pwrite64 sys32_pwrite | 190 | 181 i386 pwrite64 sys_pwrite64 compat_sys_x86_pwrite |
191 | 182 i386 chown sys_chown16 | 191 | 182 i386 chown sys_chown16 |
192 | 183 i386 getcwd sys_getcwd | 192 | 183 i386 getcwd sys_getcwd |
193 | 184 i386 capget sys_capget | 193 | 184 i386 capget sys_capget |
@@ -196,14 +196,14 @@ | |||
196 | 187 i386 sendfile sys_sendfile compat_sys_sendfile | 196 | 187 i386 sendfile sys_sendfile compat_sys_sendfile |
197 | 188 i386 getpmsg | 197 | 188 i386 getpmsg |
198 | 189 i386 putpmsg | 198 | 189 i386 putpmsg |
199 | 190 i386 vfork sys_vfork sys_vfork | 199 | 190 i386 vfork sys_vfork |
200 | 191 i386 ugetrlimit sys_getrlimit compat_sys_getrlimit | 200 | 191 i386 ugetrlimit sys_getrlimit compat_sys_getrlimit |
201 | 192 i386 mmap2 sys_mmap_pgoff | 201 | 192 i386 mmap2 sys_mmap_pgoff |
202 | 193 i386 truncate64 sys_truncate64 sys32_truncate64 | 202 | 193 i386 truncate64 sys_truncate64 compat_sys_x86_truncate64 |
203 | 194 i386 ftruncate64 sys_ftruncate64 sys32_ftruncate64 | 203 | 194 i386 ftruncate64 sys_ftruncate64 compat_sys_x86_ftruncate64 |
204 | 195 i386 stat64 sys_stat64 sys32_stat64 | 204 | 195 i386 stat64 sys_stat64 compat_sys_x86_stat64 |
205 | 196 i386 lstat64 sys_lstat64 sys32_lstat64 | 205 | 196 i386 lstat64 sys_lstat64 compat_sys_x86_lstat64 |
206 | 197 i386 fstat64 sys_fstat64 sys32_fstat64 | 206 | 197 i386 fstat64 sys_fstat64 compat_sys_x86_fstat64 |
207 | 198 i386 lchown32 sys_lchown | 207 | 198 i386 lchown32 sys_lchown |
208 | 199 i386 getuid32 sys_getuid | 208 | 199 i386 getuid32 sys_getuid |
209 | 200 i386 getgid32 sys_getgid | 209 | 200 i386 getgid32 sys_getgid |
@@ -231,7 +231,7 @@ | |||
231 | # 222 is unused | 231 | # 222 is unused |
232 | # 223 is unused | 232 | # 223 is unused |
233 | 224 i386 gettid sys_gettid | 233 | 224 i386 gettid sys_gettid |
234 | 225 i386 readahead sys_readahead sys32_readahead | 234 | 225 i386 readahead sys_readahead compat_sys_x86_readahead |
235 | 226 i386 setxattr sys_setxattr | 235 | 226 i386 setxattr sys_setxattr |
236 | 227 i386 lsetxattr sys_lsetxattr | 236 | 227 i386 lsetxattr sys_lsetxattr |
237 | 228 i386 fsetxattr sys_fsetxattr | 237 | 228 i386 fsetxattr sys_fsetxattr |
@@ -256,7 +256,7 @@ | |||
256 | 247 i386 io_getevents sys_io_getevents compat_sys_io_getevents | 256 | 247 i386 io_getevents sys_io_getevents compat_sys_io_getevents |
257 | 248 i386 io_submit sys_io_submit compat_sys_io_submit | 257 | 248 i386 io_submit sys_io_submit compat_sys_io_submit |
258 | 249 i386 io_cancel sys_io_cancel | 258 | 249 i386 io_cancel sys_io_cancel |
259 | 250 i386 fadvise64 sys_fadvise64 sys32_fadvise64 | 259 | 250 i386 fadvise64 sys_fadvise64 compat_sys_x86_fadvise64 |
260 | # 251 is available for reuse (was briefly sys_set_zone_reclaim) | 260 | # 251 is available for reuse (was briefly sys_set_zone_reclaim) |
261 | 252 i386 exit_group sys_exit_group | 261 | 252 i386 exit_group sys_exit_group |
262 | 253 i386 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie | 262 | 253 i386 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie |
@@ -278,7 +278,7 @@ | |||
278 | 269 i386 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 | 278 | 269 i386 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 |
279 | 270 i386 tgkill sys_tgkill | 279 | 270 i386 tgkill sys_tgkill |
280 | 271 i386 utimes sys_utimes compat_sys_utimes | 280 | 271 i386 utimes sys_utimes compat_sys_utimes |
281 | 272 i386 fadvise64_64 sys_fadvise64_64 sys32_fadvise64_64 | 281 | 272 i386 fadvise64_64 sys_fadvise64_64 compat_sys_x86_fadvise64_64 |
282 | 273 i386 vserver | 282 | 273 i386 vserver |
283 | 274 i386 mbind sys_mbind | 283 | 274 i386 mbind sys_mbind |
284 | 275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy | 284 | 275 i386 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy |
@@ -306,7 +306,7 @@ | |||
306 | 297 i386 mknodat sys_mknodat | 306 | 297 i386 mknodat sys_mknodat |
307 | 298 i386 fchownat sys_fchownat | 307 | 298 i386 fchownat sys_fchownat |
308 | 299 i386 futimesat sys_futimesat compat_sys_futimesat | 308 | 299 i386 futimesat sys_futimesat compat_sys_futimesat |
309 | 300 i386 fstatat64 sys_fstatat64 sys32_fstatat | 309 | 300 i386 fstatat64 sys_fstatat64 compat_sys_x86_fstatat |
310 | 301 i386 unlinkat sys_unlinkat | 310 | 301 i386 unlinkat sys_unlinkat |
311 | 302 i386 renameat sys_renameat | 311 | 302 i386 renameat sys_renameat |
312 | 303 i386 linkat sys_linkat | 312 | 303 i386 linkat sys_linkat |
@@ -320,7 +320,7 @@ | |||
320 | 311 i386 set_robust_list sys_set_robust_list compat_sys_set_robust_list | 320 | 311 i386 set_robust_list sys_set_robust_list compat_sys_set_robust_list |
321 | 312 i386 get_robust_list sys_get_robust_list compat_sys_get_robust_list | 321 | 312 i386 get_robust_list sys_get_robust_list compat_sys_get_robust_list |
322 | 313 i386 splice sys_splice | 322 | 313 i386 splice sys_splice |
323 | 314 i386 sync_file_range sys_sync_file_range sys32_sync_file_range | 323 | 314 i386 sync_file_range sys_sync_file_range compat_sys_x86_sync_file_range |
324 | 315 i386 tee sys_tee | 324 | 315 i386 tee sys_tee |
325 | 316 i386 vmsplice sys_vmsplice compat_sys_vmsplice | 325 | 316 i386 vmsplice sys_vmsplice compat_sys_vmsplice |
326 | 317 i386 move_pages sys_move_pages compat_sys_move_pages | 326 | 317 i386 move_pages sys_move_pages compat_sys_move_pages |
@@ -330,7 +330,7 @@ | |||
330 | 321 i386 signalfd sys_signalfd compat_sys_signalfd | 330 | 321 i386 signalfd sys_signalfd compat_sys_signalfd |
331 | 322 i386 timerfd_create sys_timerfd_create | 331 | 322 i386 timerfd_create sys_timerfd_create |
332 | 323 i386 eventfd sys_eventfd | 332 | 323 i386 eventfd sys_eventfd |
333 | 324 i386 fallocate sys_fallocate sys32_fallocate | 333 | 324 i386 fallocate sys_fallocate compat_sys_x86_fallocate |
334 | 325 i386 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime | 334 | 325 i386 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime |
335 | 326 i386 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime | 335 | 326 i386 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime |
336 | 327 i386 signalfd4 sys_signalfd4 compat_sys_signalfd4 | 336 | 327 i386 signalfd4 sys_signalfd4 compat_sys_signalfd4 |
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c index 577fa8adb785..8560ef68a9d6 100644 --- a/arch/x86/entry/vsyscall/vsyscall_64.c +++ b/arch/x86/entry/vsyscall/vsyscall_64.c | |||
@@ -42,10 +42,8 @@ | |||
42 | #define CREATE_TRACE_POINTS | 42 | #define CREATE_TRACE_POINTS |
43 | #include "vsyscall_trace.h" | 43 | #include "vsyscall_trace.h" |
44 | 44 | ||
45 | static enum { EMULATE, NATIVE, NONE } vsyscall_mode = | 45 | static enum { EMULATE, NONE } vsyscall_mode = |
46 | #if defined(CONFIG_LEGACY_VSYSCALL_NATIVE) | 46 | #ifdef CONFIG_LEGACY_VSYSCALL_NONE |
47 | NATIVE; | ||
48 | #elif defined(CONFIG_LEGACY_VSYSCALL_NONE) | ||
49 | NONE; | 47 | NONE; |
50 | #else | 48 | #else |
51 | EMULATE; | 49 | EMULATE; |
@@ -56,8 +54,6 @@ static int __init vsyscall_setup(char *str) | |||
56 | if (str) { | 54 | if (str) { |
57 | if (!strcmp("emulate", str)) | 55 | if (!strcmp("emulate", str)) |
58 | vsyscall_mode = EMULATE; | 56 | vsyscall_mode = EMULATE; |
59 | else if (!strcmp("native", str)) | ||
60 | vsyscall_mode = NATIVE; | ||
61 | else if (!strcmp("none", str)) | 57 | else if (!strcmp("none", str)) |
62 | vsyscall_mode = NONE; | 58 | vsyscall_mode = NONE; |
63 | else | 59 | else |
@@ -139,10 +135,6 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address) | |||
139 | 135 | ||
140 | WARN_ON_ONCE(address != regs->ip); | 136 | WARN_ON_ONCE(address != regs->ip); |
141 | 137 | ||
142 | /* This should be unreachable in NATIVE mode. */ | ||
143 | if (WARN_ON(vsyscall_mode == NATIVE)) | ||
144 | return false; | ||
145 | |||
146 | if (vsyscall_mode == NONE) { | 138 | if (vsyscall_mode == NONE) { |
147 | warn_bad_vsyscall(KERN_INFO, regs, | 139 | warn_bad_vsyscall(KERN_INFO, regs, |
148 | "vsyscall attempted with vsyscall=none"); | 140 | "vsyscall attempted with vsyscall=none"); |
@@ -370,9 +362,7 @@ void __init map_vsyscall(void) | |||
370 | 362 | ||
371 | if (vsyscall_mode != NONE) { | 363 | if (vsyscall_mode != NONE) { |
372 | __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, | 364 | __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall, |
373 | vsyscall_mode == NATIVE | 365 | PAGE_KERNEL_VVAR); |
374 | ? PAGE_KERNEL_VSYSCALL | ||
375 | : PAGE_KERNEL_VVAR); | ||
376 | set_vsyscall_pgtable_user_bits(swapper_pg_dir); | 366 | set_vsyscall_pgtable_user_bits(swapper_pg_dir); |
377 | } | 367 | } |
378 | 368 | ||
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 6d8044ab1060..22ec65bc033a 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c | |||
@@ -3606,7 +3606,7 @@ static struct intel_uncore_type skx_uncore_imc = { | |||
3606 | }; | 3606 | }; |
3607 | 3607 | ||
3608 | static struct attribute *skx_upi_uncore_formats_attr[] = { | 3608 | static struct attribute *skx_upi_uncore_formats_attr[] = { |
3609 | &format_attr_event_ext.attr, | 3609 | &format_attr_event.attr, |
3610 | &format_attr_umask_ext.attr, | 3610 | &format_attr_umask_ext.attr, |
3611 | &format_attr_edge.attr, | 3611 | &format_attr_edge.attr, |
3612 | &format_attr_inv.attr, | 3612 | &format_attr_inv.attr, |
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c index 96cd33bbfc85..6512498bbef6 100644 --- a/arch/x86/ia32/sys_ia32.c +++ b/arch/x86/ia32/sys_ia32.c | |||
@@ -51,15 +51,14 @@ | |||
51 | #define AA(__x) ((unsigned long)(__x)) | 51 | #define AA(__x) ((unsigned long)(__x)) |
52 | 52 | ||
53 | 53 | ||
54 | asmlinkage long sys32_truncate64(const char __user *filename, | 54 | COMPAT_SYSCALL_DEFINE3(x86_truncate64, const char __user *, filename, |
55 | unsigned long offset_low, | 55 | unsigned long, offset_low, unsigned long, offset_high) |
56 | unsigned long offset_high) | ||
57 | { | 56 | { |
58 | return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low); | 57 | return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low); |
59 | } | 58 | } |
60 | 59 | ||
61 | asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low, | 60 | COMPAT_SYSCALL_DEFINE3(x86_ftruncate64, unsigned int, fd, |
62 | unsigned long offset_high) | 61 | unsigned long, offset_low, unsigned long, offset_high) |
63 | { | 62 | { |
64 | return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low); | 63 | return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low); |
65 | } | 64 | } |
@@ -96,8 +95,8 @@ static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat) | |||
96 | return 0; | 95 | return 0; |
97 | } | 96 | } |
98 | 97 | ||
99 | asmlinkage long sys32_stat64(const char __user *filename, | 98 | COMPAT_SYSCALL_DEFINE2(x86_stat64, const char __user *, filename, |
100 | struct stat64 __user *statbuf) | 99 | struct stat64 __user *, statbuf) |
101 | { | 100 | { |
102 | struct kstat stat; | 101 | struct kstat stat; |
103 | int ret = vfs_stat(filename, &stat); | 102 | int ret = vfs_stat(filename, &stat); |
@@ -107,8 +106,8 @@ asmlinkage long sys32_stat64(const char __user *filename, | |||
107 | return ret; | 106 | return ret; |
108 | } | 107 | } |
109 | 108 | ||
110 | asmlinkage long sys32_lstat64(const char __user *filename, | 109 | COMPAT_SYSCALL_DEFINE2(x86_lstat64, const char __user *, filename, |
111 | struct stat64 __user *statbuf) | 110 | struct stat64 __user *, statbuf) |
112 | { | 111 | { |
113 | struct kstat stat; | 112 | struct kstat stat; |
114 | int ret = vfs_lstat(filename, &stat); | 113 | int ret = vfs_lstat(filename, &stat); |
@@ -117,7 +116,8 @@ asmlinkage long sys32_lstat64(const char __user *filename, | |||
117 | return ret; | 116 | return ret; |
118 | } | 117 | } |
119 | 118 | ||
120 | asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf) | 119 | COMPAT_SYSCALL_DEFINE2(x86_fstat64, unsigned int, fd, |
120 | struct stat64 __user *, statbuf) | ||
121 | { | 121 | { |
122 | struct kstat stat; | 122 | struct kstat stat; |
123 | int ret = vfs_fstat(fd, &stat); | 123 | int ret = vfs_fstat(fd, &stat); |
@@ -126,8 +126,9 @@ asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf) | |||
126 | return ret; | 126 | return ret; |
127 | } | 127 | } |
128 | 128 | ||
129 | asmlinkage long sys32_fstatat(unsigned int dfd, const char __user *filename, | 129 | COMPAT_SYSCALL_DEFINE4(x86_fstatat, unsigned int, dfd, |
130 | struct stat64 __user *statbuf, int flag) | 130 | const char __user *, filename, |
131 | struct stat64 __user *, statbuf, int, flag) | ||
131 | { | 132 | { |
132 | struct kstat stat; | 133 | struct kstat stat; |
133 | int error; | 134 | int error; |
@@ -153,7 +154,7 @@ struct mmap_arg_struct32 { | |||
153 | unsigned int offset; | 154 | unsigned int offset; |
154 | }; | 155 | }; |
155 | 156 | ||
156 | asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg) | 157 | COMPAT_SYSCALL_DEFINE1(x86_mmap, struct mmap_arg_struct32 __user *, arg) |
157 | { | 158 | { |
158 | struct mmap_arg_struct32 a; | 159 | struct mmap_arg_struct32 a; |
159 | 160 | ||
@@ -167,22 +168,22 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg) | |||
167 | a.offset>>PAGE_SHIFT); | 168 | a.offset>>PAGE_SHIFT); |
168 | } | 169 | } |
169 | 170 | ||
170 | asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr, | 171 | COMPAT_SYSCALL_DEFINE3(x86_waitpid, compat_pid_t, pid, unsigned int __user *, |
171 | int options) | 172 | stat_addr, int, options) |
172 | { | 173 | { |
173 | return compat_sys_wait4(pid, stat_addr, options, NULL); | 174 | return compat_sys_wait4(pid, stat_addr, options, NULL); |
174 | } | 175 | } |
175 | 176 | ||
176 | /* warning: next two assume little endian */ | 177 | /* warning: next two assume little endian */ |
177 | asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count, | 178 | COMPAT_SYSCALL_DEFINE5(x86_pread, unsigned int, fd, char __user *, ubuf, |
178 | u32 poslo, u32 poshi) | 179 | u32, count, u32, poslo, u32, poshi) |
179 | { | 180 | { |
180 | return sys_pread64(fd, ubuf, count, | 181 | return sys_pread64(fd, ubuf, count, |
181 | ((loff_t)AA(poshi) << 32) | AA(poslo)); | 182 | ((loff_t)AA(poshi) << 32) | AA(poslo)); |
182 | } | 183 | } |
183 | 184 | ||
184 | asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf, | 185 | COMPAT_SYSCALL_DEFINE5(x86_pwrite, unsigned int, fd, const char __user *, ubuf, |
185 | u32 count, u32 poslo, u32 poshi) | 186 | u32, count, u32, poslo, u32, poshi) |
186 | { | 187 | { |
187 | return sys_pwrite64(fd, ubuf, count, | 188 | return sys_pwrite64(fd, ubuf, count, |
188 | ((loff_t)AA(poshi) << 32) | AA(poslo)); | 189 | ((loff_t)AA(poshi) << 32) | AA(poslo)); |
@@ -193,8 +194,9 @@ asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf, | |||
193 | * Some system calls that need sign extended arguments. This could be | 194 | * Some system calls that need sign extended arguments. This could be |
194 | * done by a generic wrapper. | 195 | * done by a generic wrapper. |
195 | */ | 196 | */ |
196 | long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, | 197 | COMPAT_SYSCALL_DEFINE6(x86_fadvise64_64, int, fd, __u32, offset_low, |
197 | __u32 len_low, __u32 len_high, int advice) | 198 | __u32, offset_high, __u32, len_low, __u32, len_high, |
199 | int, advice) | ||
198 | { | 200 | { |
199 | return sys_fadvise64_64(fd, | 201 | return sys_fadvise64_64(fd, |
200 | (((u64)offset_high)<<32) | offset_low, | 202 | (((u64)offset_high)<<32) | offset_low, |
@@ -202,31 +204,43 @@ long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high, | |||
202 | advice); | 204 | advice); |
203 | } | 205 | } |
204 | 206 | ||
205 | asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi, | 207 | COMPAT_SYSCALL_DEFINE4(x86_readahead, int, fd, unsigned int, off_lo, |
206 | size_t count) | 208 | unsigned int, off_hi, size_t, count) |
207 | { | 209 | { |
208 | return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count); | 210 | return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count); |
209 | } | 211 | } |
210 | 212 | ||
211 | asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi, | 213 | COMPAT_SYSCALL_DEFINE6(x86_sync_file_range, int, fd, unsigned int, off_low, |
212 | unsigned n_low, unsigned n_hi, int flags) | 214 | unsigned int, off_hi, unsigned int, n_low, |
215 | unsigned int, n_hi, int, flags) | ||
213 | { | 216 | { |
214 | return sys_sync_file_range(fd, | 217 | return sys_sync_file_range(fd, |
215 | ((u64)off_hi << 32) | off_low, | 218 | ((u64)off_hi << 32) | off_low, |
216 | ((u64)n_hi << 32) | n_low, flags); | 219 | ((u64)n_hi << 32) | n_low, flags); |
217 | } | 220 | } |
218 | 221 | ||
219 | asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi, | 222 | COMPAT_SYSCALL_DEFINE5(x86_fadvise64, int, fd, unsigned int, offset_lo, |
220 | size_t len, int advice) | 223 | unsigned int, offset_hi, size_t, len, int, advice) |
221 | { | 224 | { |
222 | return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo, | 225 | return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo, |
223 | len, advice); | 226 | len, advice); |
224 | } | 227 | } |
225 | 228 | ||
226 | asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo, | 229 | COMPAT_SYSCALL_DEFINE6(x86_fallocate, int, fd, int, mode, |
227 | unsigned offset_hi, unsigned len_lo, | 230 | unsigned int, offset_lo, unsigned int, offset_hi, |
228 | unsigned len_hi) | 231 | unsigned int, len_lo, unsigned int, len_hi) |
229 | { | 232 | { |
230 | return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo, | 233 | return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo, |
231 | ((u64)len_hi << 32) | len_lo); | 234 | ((u64)len_hi << 32) | len_lo); |
232 | } | 235 | } |
236 | |||
237 | /* | ||
238 | * The 32-bit clone ABI is CONFIG_CLONE_BACKWARDS | ||
239 | */ | ||
240 | COMPAT_SYSCALL_DEFINE5(x86_clone, unsigned long, clone_flags, | ||
241 | unsigned long, newsp, int __user *, parent_tidptr, | ||
242 | unsigned long, tls_val, int __user *, child_tidptr) | ||
243 | { | ||
244 | return sys_clone(clone_flags, newsp, parent_tidptr, child_tidptr, | ||
245 | tls_val); | ||
246 | } | ||
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index f41079da38c5..d554c11e01ff 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h | |||
@@ -316,6 +316,7 @@ | |||
316 | #define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */ | 316 | #define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */ |
317 | #define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */ | 317 | #define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */ |
318 | #define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */ | 318 | #define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */ |
319 | #define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */ | ||
319 | #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ | 320 | #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ |
320 | #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ | 321 | #define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ |
321 | #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ | 322 | #define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ |
@@ -328,6 +329,7 @@ | |||
328 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ | 329 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ |
329 | #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ | 330 | #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ |
330 | #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ | 331 | #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ |
332 | #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ | ||
331 | #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ | 333 | #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ |
332 | #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ | 334 | #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */ |
333 | #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ | 335 | #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */ |
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 7fb1047d61c7..6cf0e4cb7b97 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h | |||
@@ -39,6 +39,7 @@ struct device; | |||
39 | 39 | ||
40 | enum ucode_state { | 40 | enum ucode_state { |
41 | UCODE_OK = 0, | 41 | UCODE_OK = 0, |
42 | UCODE_NEW, | ||
42 | UCODE_UPDATED, | 43 | UCODE_UPDATED, |
43 | UCODE_NFOUND, | 44 | UCODE_NFOUND, |
44 | UCODE_ERROR, | 45 | UCODE_ERROR, |
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index d0dabeae0505..f928ad9b143f 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h | |||
@@ -183,7 +183,10 @@ | |||
183 | * otherwise we'll run out of registers. We don't care about CET | 183 | * otherwise we'll run out of registers. We don't care about CET |
184 | * here, anyway. | 184 | * here, anyway. |
185 | */ | 185 | */ |
186 | # define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \ | 186 | # define CALL_NOSPEC \ |
187 | ALTERNATIVE( \ | ||
188 | ANNOTATE_RETPOLINE_SAFE \ | ||
189 | "call *%[thunk_target]\n", \ | ||
187 | " jmp 904f;\n" \ | 190 | " jmp 904f;\n" \ |
188 | " .align 16\n" \ | 191 | " .align 16\n" \ |
189 | "901: call 903f;\n" \ | 192 | "901: call 903f;\n" \ |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 246f15b4e64c..acfe755562a6 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -174,7 +174,6 @@ enum page_cache_mode { | |||
174 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) | 174 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) |
175 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) | 175 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) |
176 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE) | 176 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_NOCACHE) |
177 | #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) | ||
178 | #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) | 177 | #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER) |
179 | #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) | 178 | #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) |
180 | #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) | 179 | #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) |
@@ -206,7 +205,6 @@ enum page_cache_mode { | |||
206 | #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC) | 205 | #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC) |
207 | #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC) | 206 | #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC) |
208 | #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC) | 207 | #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC) |
209 | #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL | _PAGE_ENC) | ||
210 | #define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC) | 208 | #define PAGE_KERNEL_VVAR __pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC) |
211 | 209 | ||
212 | #define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) | 210 | #define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) |
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h index d6baf23782bc..5c019d23d06b 100644 --- a/arch/x86/include/asm/sections.h +++ b/arch/x86/include/asm/sections.h | |||
@@ -10,6 +10,7 @@ extern struct exception_table_entry __stop___ex_table[]; | |||
10 | 10 | ||
11 | #if defined(CONFIG_X86_64) | 11 | #if defined(CONFIG_X86_64) |
12 | extern char __end_rodata_hpage_align[]; | 12 | extern char __end_rodata_hpage_align[]; |
13 | extern char __entry_trampoline_start[], __entry_trampoline_end[]; | ||
13 | #endif | 14 | #endif |
14 | 15 | ||
15 | #endif /* _ASM_X86_SECTIONS_H */ | 16 | #endif /* _ASM_X86_SECTIONS_H */ |
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h index 82c34ee25a65..906794aa034e 100644 --- a/arch/x86/include/asm/sys_ia32.h +++ b/arch/x86/include/asm/sys_ia32.h | |||
@@ -20,31 +20,43 @@ | |||
20 | #include <asm/ia32.h> | 20 | #include <asm/ia32.h> |
21 | 21 | ||
22 | /* ia32/sys_ia32.c */ | 22 | /* ia32/sys_ia32.c */ |
23 | asmlinkage long sys32_truncate64(const char __user *, unsigned long, unsigned long); | 23 | asmlinkage long compat_sys_x86_truncate64(const char __user *, unsigned long, |
24 | asmlinkage long sys32_ftruncate64(unsigned int, unsigned long, unsigned long); | 24 | unsigned long); |
25 | asmlinkage long compat_sys_x86_ftruncate64(unsigned int, unsigned long, | ||
26 | unsigned long); | ||
25 | 27 | ||
26 | asmlinkage long sys32_stat64(const char __user *, struct stat64 __user *); | 28 | asmlinkage long compat_sys_x86_stat64(const char __user *, |
27 | asmlinkage long sys32_lstat64(const char __user *, struct stat64 __user *); | 29 | struct stat64 __user *); |
28 | asmlinkage long sys32_fstat64(unsigned int, struct stat64 __user *); | 30 | asmlinkage long compat_sys_x86_lstat64(const char __user *, |
29 | asmlinkage long sys32_fstatat(unsigned int, const char __user *, | 31 | struct stat64 __user *); |
32 | asmlinkage long compat_sys_x86_fstat64(unsigned int, struct stat64 __user *); | ||
33 | asmlinkage long compat_sys_x86_fstatat(unsigned int, const char __user *, | ||
30 | struct stat64 __user *, int); | 34 | struct stat64 __user *, int); |
31 | struct mmap_arg_struct32; | 35 | struct mmap_arg_struct32; |
32 | asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *); | 36 | asmlinkage long compat_sys_x86_mmap(struct mmap_arg_struct32 __user *); |
33 | 37 | ||
34 | asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int); | 38 | asmlinkage long compat_sys_x86_waitpid(compat_pid_t, unsigned int __user *, |
39 | int); | ||
35 | 40 | ||
36 | asmlinkage long sys32_pread(unsigned int, char __user *, u32, u32, u32); | 41 | asmlinkage long compat_sys_x86_pread(unsigned int, char __user *, u32, u32, |
37 | asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32); | 42 | u32); |
43 | asmlinkage long compat_sys_x86_pwrite(unsigned int, const char __user *, u32, | ||
44 | u32, u32); | ||
38 | 45 | ||
39 | long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int); | 46 | asmlinkage long compat_sys_x86_fadvise64_64(int, __u32, __u32, __u32, __u32, |
40 | long sys32_vm86_warning(void); | 47 | int); |
41 | 48 | ||
42 | asmlinkage ssize_t sys32_readahead(int, unsigned, unsigned, size_t); | 49 | asmlinkage ssize_t compat_sys_x86_readahead(int, unsigned int, unsigned int, |
43 | asmlinkage long sys32_sync_file_range(int, unsigned, unsigned, | 50 | size_t); |
44 | unsigned, unsigned, int); | 51 | asmlinkage long compat_sys_x86_sync_file_range(int, unsigned int, unsigned int, |
45 | asmlinkage long sys32_fadvise64(int, unsigned, unsigned, size_t, int); | 52 | unsigned int, unsigned int, |
46 | asmlinkage long sys32_fallocate(int, int, unsigned, | 53 | int); |
47 | unsigned, unsigned, unsigned); | 54 | asmlinkage long compat_sys_x86_fadvise64(int, unsigned int, unsigned int, |
55 | size_t, int); | ||
56 | asmlinkage long compat_sys_x86_fallocate(int, int, unsigned int, unsigned int, | ||
57 | unsigned int, unsigned int); | ||
58 | asmlinkage long compat_sys_x86_clone(unsigned long, unsigned long, int __user *, | ||
59 | unsigned long, int __user *); | ||
48 | 60 | ||
49 | /* ia32/ia32_signal.c */ | 61 | /* ia32/ia32_signal.c */ |
50 | asmlinkage long sys32_sigreturn(void); | 62 | asmlinkage long sys32_sigreturn(void); |
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 8b6780751132..5db8b0b10766 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h | |||
@@ -352,6 +352,7 @@ enum vmcs_field { | |||
352 | #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ | 352 | #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ |
353 | #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ | 353 | #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ |
354 | #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ | 354 | #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ |
355 | #define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */ | ||
355 | #define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ | 356 | #define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ |
356 | 357 | ||
357 | /* GUEST_INTERRUPTIBILITY_INFO flags. */ | 358 | /* GUEST_INTERRUPTIBILITY_INFO flags. */ |
diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h index 91723461dc1f..435db58a7bad 100644 --- a/arch/x86/include/uapi/asm/mce.h +++ b/arch/x86/include/uapi/asm/mce.h | |||
@@ -30,6 +30,7 @@ struct mce { | |||
30 | __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */ | 30 | __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */ |
31 | __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */ | 31 | __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */ |
32 | __u64 ppin; /* Protected Processor Inventory Number */ | 32 | __u64 ppin; /* Protected Processor Inventory Number */ |
33 | __u32 microcode;/* Microcode revision */ | ||
33 | }; | 34 | }; |
34 | 35 | ||
35 | #define MCE_GET_RECORD_LEN _IOR('M', 1, int) | 36 | #define MCE_GET_RECORD_LEN _IOR('M', 1, int) |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index d19e903214b4..c3af167d0a70 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -105,7 +105,7 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c) | |||
105 | /* | 105 | /* |
106 | * Early microcode releases for the Spectre v2 mitigation were broken. | 106 | * Early microcode releases for the Spectre v2 mitigation were broken. |
107 | * Information taken from; | 107 | * Information taken from; |
108 | * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf | 108 | * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf |
109 | * - https://kb.vmware.com/s/article/52345 | 109 | * - https://kb.vmware.com/s/article/52345 |
110 | * - Microcode revisions observed in the wild | 110 | * - Microcode revisions observed in the wild |
111 | * - Release note from 20180108 microcode release | 111 | * - Release note from 20180108 microcode release |
@@ -123,7 +123,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = { | |||
123 | { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 }, | 123 | { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 }, |
124 | { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, | 124 | { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, |
125 | { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, | 125 | { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, |
126 | { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 }, | ||
127 | { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, | 126 | { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, |
128 | { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, | 127 | { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, |
129 | { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, | 128 | { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, |
@@ -144,6 +143,13 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c) | |||
144 | { | 143 | { |
145 | int i; | 144 | int i; |
146 | 145 | ||
146 | /* | ||
147 | * We know that the hypervisor lie to us on the microcode version so | ||
148 | * we may as well hope that it is running the correct version. | ||
149 | */ | ||
150 | if (cpu_has(c, X86_FEATURE_HYPERVISOR)) | ||
151 | return false; | ||
152 | |||
147 | for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { | 153 | for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { |
148 | if (c->x86_model == spectre_bad_microcodes[i].model && | 154 | if (c->x86_model == spectre_bad_microcodes[i].model && |
149 | c->x86_stepping == spectre_bad_microcodes[i].stepping) | 155 | c->x86_stepping == spectre_bad_microcodes[i].stepping) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 8ff94d1e2dce..466f47301334 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -56,6 +56,9 @@ | |||
56 | 56 | ||
57 | static DEFINE_MUTEX(mce_log_mutex); | 57 | static DEFINE_MUTEX(mce_log_mutex); |
58 | 58 | ||
59 | /* sysfs synchronization */ | ||
60 | static DEFINE_MUTEX(mce_sysfs_mutex); | ||
61 | |||
59 | #define CREATE_TRACE_POINTS | 62 | #define CREATE_TRACE_POINTS |
60 | #include <trace/events/mce.h> | 63 | #include <trace/events/mce.h> |
61 | 64 | ||
@@ -130,6 +133,8 @@ void mce_setup(struct mce *m) | |||
130 | 133 | ||
131 | if (this_cpu_has(X86_FEATURE_INTEL_PPIN)) | 134 | if (this_cpu_has(X86_FEATURE_INTEL_PPIN)) |
132 | rdmsrl(MSR_PPIN, m->ppin); | 135 | rdmsrl(MSR_PPIN, m->ppin); |
136 | |||
137 | m->microcode = boot_cpu_data.microcode; | ||
133 | } | 138 | } |
134 | 139 | ||
135 | DEFINE_PER_CPU(struct mce, injectm); | 140 | DEFINE_PER_CPU(struct mce, injectm); |
@@ -262,7 +267,7 @@ static void __print_mce(struct mce *m) | |||
262 | */ | 267 | */ |
263 | pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", | 268 | pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", |
264 | m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, | 269 | m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, |
265 | cpu_data(m->extcpu).microcode); | 270 | m->microcode); |
266 | } | 271 | } |
267 | 272 | ||
268 | static void print_mce(struct mce *m) | 273 | static void print_mce(struct mce *m) |
@@ -2086,6 +2091,7 @@ static ssize_t set_ignore_ce(struct device *s, | |||
2086 | if (kstrtou64(buf, 0, &new) < 0) | 2091 | if (kstrtou64(buf, 0, &new) < 0) |
2087 | return -EINVAL; | 2092 | return -EINVAL; |
2088 | 2093 | ||
2094 | mutex_lock(&mce_sysfs_mutex); | ||
2089 | if (mca_cfg.ignore_ce ^ !!new) { | 2095 | if (mca_cfg.ignore_ce ^ !!new) { |
2090 | if (new) { | 2096 | if (new) { |
2091 | /* disable ce features */ | 2097 | /* disable ce features */ |
@@ -2098,6 +2104,8 @@ static ssize_t set_ignore_ce(struct device *s, | |||
2098 | on_each_cpu(mce_enable_ce, (void *)1, 1); | 2104 | on_each_cpu(mce_enable_ce, (void *)1, 1); |
2099 | } | 2105 | } |
2100 | } | 2106 | } |
2107 | mutex_unlock(&mce_sysfs_mutex); | ||
2108 | |||
2101 | return size; | 2109 | return size; |
2102 | } | 2110 | } |
2103 | 2111 | ||
@@ -2110,6 +2118,7 @@ static ssize_t set_cmci_disabled(struct device *s, | |||
2110 | if (kstrtou64(buf, 0, &new) < 0) | 2118 | if (kstrtou64(buf, 0, &new) < 0) |
2111 | return -EINVAL; | 2119 | return -EINVAL; |
2112 | 2120 | ||
2121 | mutex_lock(&mce_sysfs_mutex); | ||
2113 | if (mca_cfg.cmci_disabled ^ !!new) { | 2122 | if (mca_cfg.cmci_disabled ^ !!new) { |
2114 | if (new) { | 2123 | if (new) { |
2115 | /* disable cmci */ | 2124 | /* disable cmci */ |
@@ -2121,6 +2130,8 @@ static ssize_t set_cmci_disabled(struct device *s, | |||
2121 | on_each_cpu(mce_enable_ce, NULL, 1); | 2130 | on_each_cpu(mce_enable_ce, NULL, 1); |
2122 | } | 2131 | } |
2123 | } | 2132 | } |
2133 | mutex_unlock(&mce_sysfs_mutex); | ||
2134 | |||
2124 | return size; | 2135 | return size; |
2125 | } | 2136 | } |
2126 | 2137 | ||
@@ -2128,8 +2139,19 @@ static ssize_t store_int_with_restart(struct device *s, | |||
2128 | struct device_attribute *attr, | 2139 | struct device_attribute *attr, |
2129 | const char *buf, size_t size) | 2140 | const char *buf, size_t size) |
2130 | { | 2141 | { |
2131 | ssize_t ret = device_store_int(s, attr, buf, size); | 2142 | unsigned long old_check_interval = check_interval; |
2143 | ssize_t ret = device_store_ulong(s, attr, buf, size); | ||
2144 | |||
2145 | if (check_interval == old_check_interval) | ||
2146 | return ret; | ||
2147 | |||
2148 | if (check_interval < 1) | ||
2149 | check_interval = 1; | ||
2150 | |||
2151 | mutex_lock(&mce_sysfs_mutex); | ||
2132 | mce_restart(); | 2152 | mce_restart(); |
2153 | mutex_unlock(&mce_sysfs_mutex); | ||
2154 | |||
2133 | return ret; | 2155 | return ret; |
2134 | } | 2156 | } |
2135 | 2157 | ||
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index a998e1a7d46f..48179928ff38 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c | |||
@@ -339,7 +339,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) | |||
339 | return -EINVAL; | 339 | return -EINVAL; |
340 | 340 | ||
341 | ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size); | 341 | ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size); |
342 | if (ret != UCODE_OK) | 342 | if (ret > UCODE_UPDATED) |
343 | return -EINVAL; | 343 | return -EINVAL; |
344 | 344 | ||
345 | return 0; | 345 | return 0; |
@@ -683,27 +683,35 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, | |||
683 | static enum ucode_state | 683 | static enum ucode_state |
684 | load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) | 684 | load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) |
685 | { | 685 | { |
686 | struct ucode_patch *p; | ||
686 | enum ucode_state ret; | 687 | enum ucode_state ret; |
687 | 688 | ||
688 | /* free old equiv table */ | 689 | /* free old equiv table */ |
689 | free_equiv_cpu_table(); | 690 | free_equiv_cpu_table(); |
690 | 691 | ||
691 | ret = __load_microcode_amd(family, data, size); | 692 | ret = __load_microcode_amd(family, data, size); |
692 | 693 | if (ret != UCODE_OK) { | |
693 | if (ret != UCODE_OK) | ||
694 | cleanup(); | 694 | cleanup(); |
695 | return ret; | ||
696 | } | ||
695 | 697 | ||
696 | #ifdef CONFIG_X86_32 | 698 | p = find_patch(0); |
697 | /* save BSP's matching patch for early load */ | 699 | if (!p) { |
698 | if (save) { | 700 | return ret; |
699 | struct ucode_patch *p = find_patch(0); | 701 | } else { |
700 | if (p) { | 702 | if (boot_cpu_data.microcode == p->patch_id) |
701 | memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); | 703 | return ret; |
702 | memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), | 704 | |
703 | PATCH_MAX_SIZE)); | 705 | ret = UCODE_NEW; |
704 | } | ||
705 | } | 706 | } |
706 | #endif | 707 | |
708 | /* save BSP's matching patch for early load */ | ||
709 | if (!save) | ||
710 | return ret; | ||
711 | |||
712 | memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); | ||
713 | memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE)); | ||
714 | |||
707 | return ret; | 715 | return ret; |
708 | } | 716 | } |
709 | 717 | ||
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index aa1b9a422f2b..10c4fc2c91f8 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c | |||
@@ -22,13 +22,16 @@ | |||
22 | #define pr_fmt(fmt) "microcode: " fmt | 22 | #define pr_fmt(fmt) "microcode: " fmt |
23 | 23 | ||
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/stop_machine.h> | ||
25 | #include <linux/syscore_ops.h> | 26 | #include <linux/syscore_ops.h> |
26 | #include <linux/miscdevice.h> | 27 | #include <linux/miscdevice.h> |
27 | #include <linux/capability.h> | 28 | #include <linux/capability.h> |
28 | #include <linux/firmware.h> | 29 | #include <linux/firmware.h> |
29 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
31 | #include <linux/delay.h> | ||
30 | #include <linux/mutex.h> | 32 | #include <linux/mutex.h> |
31 | #include <linux/cpu.h> | 33 | #include <linux/cpu.h> |
34 | #include <linux/nmi.h> | ||
32 | #include <linux/fs.h> | 35 | #include <linux/fs.h> |
33 | #include <linux/mm.h> | 36 | #include <linux/mm.h> |
34 | 37 | ||
@@ -64,6 +67,11 @@ LIST_HEAD(microcode_cache); | |||
64 | */ | 67 | */ |
65 | static DEFINE_MUTEX(microcode_mutex); | 68 | static DEFINE_MUTEX(microcode_mutex); |
66 | 69 | ||
70 | /* | ||
71 | * Serialize late loading so that CPUs get updated one-by-one. | ||
72 | */ | ||
73 | static DEFINE_SPINLOCK(update_lock); | ||
74 | |||
67 | struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; | 75 | struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; |
68 | 76 | ||
69 | struct cpu_info_ctx { | 77 | struct cpu_info_ctx { |
@@ -373,26 +381,23 @@ static int collect_cpu_info(int cpu) | |||
373 | return ret; | 381 | return ret; |
374 | } | 382 | } |
375 | 383 | ||
376 | struct apply_microcode_ctx { | ||
377 | enum ucode_state err; | ||
378 | }; | ||
379 | |||
380 | static void apply_microcode_local(void *arg) | 384 | static void apply_microcode_local(void *arg) |
381 | { | 385 | { |
382 | struct apply_microcode_ctx *ctx = arg; | 386 | enum ucode_state *err = arg; |
383 | 387 | ||
384 | ctx->err = microcode_ops->apply_microcode(smp_processor_id()); | 388 | *err = microcode_ops->apply_microcode(smp_processor_id()); |
385 | } | 389 | } |
386 | 390 | ||
387 | static int apply_microcode_on_target(int cpu) | 391 | static int apply_microcode_on_target(int cpu) |
388 | { | 392 | { |
389 | struct apply_microcode_ctx ctx = { .err = 0 }; | 393 | enum ucode_state err; |
390 | int ret; | 394 | int ret; |
391 | 395 | ||
392 | ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1); | 396 | ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1); |
393 | if (!ret) | 397 | if (!ret) { |
394 | ret = ctx.err; | 398 | if (err == UCODE_ERROR) |
395 | 399 | ret = 1; | |
400 | } | ||
396 | return ret; | 401 | return ret; |
397 | } | 402 | } |
398 | 403 | ||
@@ -489,19 +494,114 @@ static void __exit microcode_dev_exit(void) | |||
489 | /* fake device for request_firmware */ | 494 | /* fake device for request_firmware */ |
490 | static struct platform_device *microcode_pdev; | 495 | static struct platform_device *microcode_pdev; |
491 | 496 | ||
492 | static enum ucode_state reload_for_cpu(int cpu) | 497 | /* |
498 | * Late loading dance. Why the heavy-handed stomp_machine effort? | ||
499 | * | ||
500 | * - HT siblings must be idle and not execute other code while the other sibling | ||
501 | * is loading microcode in order to avoid any negative interactions caused by | ||
502 | * the loading. | ||
503 | * | ||
504 | * - In addition, microcode update on the cores must be serialized until this | ||
505 | * requirement can be relaxed in the future. Right now, this is conservative | ||
506 | * and good. | ||
507 | */ | ||
508 | #define SPINUNIT 100 /* 100 nsec */ | ||
509 | |||
510 | static int check_online_cpus(void) | ||
493 | { | 511 | { |
494 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | 512 | if (num_online_cpus() == num_present_cpus()) |
495 | enum ucode_state ustate; | 513 | return 0; |
496 | 514 | ||
497 | if (!uci->valid) | 515 | pr_err("Not all CPUs online, aborting microcode update.\n"); |
498 | return UCODE_OK; | 516 | |
517 | return -EINVAL; | ||
518 | } | ||
519 | |||
520 | static atomic_t late_cpus_in; | ||
521 | static atomic_t late_cpus_out; | ||
522 | |||
523 | static int __wait_for_cpus(atomic_t *t, long long timeout) | ||
524 | { | ||
525 | int all_cpus = num_online_cpus(); | ||
526 | |||
527 | atomic_inc(t); | ||
499 | 528 | ||
500 | ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, true); | 529 | while (atomic_read(t) < all_cpus) { |
501 | if (ustate != UCODE_OK) | 530 | if (timeout < SPINUNIT) { |
502 | return ustate; | 531 | pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n", |
532 | all_cpus - atomic_read(t)); | ||
533 | return 1; | ||
534 | } | ||
503 | 535 | ||
504 | return apply_microcode_on_target(cpu); | 536 | ndelay(SPINUNIT); |
537 | timeout -= SPINUNIT; | ||
538 | |||
539 | touch_nmi_watchdog(); | ||
540 | } | ||
541 | return 0; | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | * Returns: | ||
546 | * < 0 - on error | ||
547 | * 0 - no update done | ||
548 | * 1 - microcode was updated | ||
549 | */ | ||
550 | static int __reload_late(void *info) | ||
551 | { | ||
552 | int cpu = smp_processor_id(); | ||
553 | enum ucode_state err; | ||
554 | int ret = 0; | ||
555 | |||
556 | /* | ||
557 | * Wait for all CPUs to arrive. A load will not be attempted unless all | ||
558 | * CPUs show up. | ||
559 | * */ | ||
560 | if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) | ||
561 | return -1; | ||
562 | |||
563 | spin_lock(&update_lock); | ||
564 | apply_microcode_local(&err); | ||
565 | spin_unlock(&update_lock); | ||
566 | |||
567 | if (err > UCODE_NFOUND) { | ||
568 | pr_warn("Error reloading microcode on CPU %d\n", cpu); | ||
569 | return -1; | ||
570 | /* siblings return UCODE_OK because their engine got updated already */ | ||
571 | } else if (err == UCODE_UPDATED || err == UCODE_OK) { | ||
572 | ret = 1; | ||
573 | } else { | ||
574 | return ret; | ||
575 | } | ||
576 | |||
577 | /* | ||
578 | * Increase the wait timeout to a safe value here since we're | ||
579 | * serializing the microcode update and that could take a while on a | ||
580 | * large number of CPUs. And that is fine as the *actual* timeout will | ||
581 | * be determined by the last CPU finished updating and thus cut short. | ||
582 | */ | ||
583 | if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus())) | ||
584 | panic("Timeout during microcode update!\n"); | ||
585 | |||
586 | return ret; | ||
587 | } | ||
588 | |||
589 | /* | ||
590 | * Reload microcode late on all CPUs. Wait for a sec until they | ||
591 | * all gather together. | ||
592 | */ | ||
593 | static int microcode_reload_late(void) | ||
594 | { | ||
595 | int ret; | ||
596 | |||
597 | atomic_set(&late_cpus_in, 0); | ||
598 | atomic_set(&late_cpus_out, 0); | ||
599 | |||
600 | ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); | ||
601 | if (ret > 0) | ||
602 | microcode_check(); | ||
603 | |||
604 | return ret; | ||
505 | } | 605 | } |
506 | 606 | ||
507 | static ssize_t reload_store(struct device *dev, | 607 | static ssize_t reload_store(struct device *dev, |
@@ -509,10 +609,9 @@ static ssize_t reload_store(struct device *dev, | |||
509 | const char *buf, size_t size) | 609 | const char *buf, size_t size) |
510 | { | 610 | { |
511 | enum ucode_state tmp_ret = UCODE_OK; | 611 | enum ucode_state tmp_ret = UCODE_OK; |
512 | bool do_callback = false; | 612 | int bsp = boot_cpu_data.cpu_index; |
513 | unsigned long val; | 613 | unsigned long val; |
514 | ssize_t ret = 0; | 614 | ssize_t ret = 0; |
515 | int cpu; | ||
516 | 615 | ||
517 | ret = kstrtoul(buf, 0, &val); | 616 | ret = kstrtoul(buf, 0, &val); |
518 | if (ret) | 617 | if (ret) |
@@ -521,29 +620,24 @@ static ssize_t reload_store(struct device *dev, | |||
521 | if (val != 1) | 620 | if (val != 1) |
522 | return size; | 621 | return size; |
523 | 622 | ||
524 | get_online_cpus(); | 623 | tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true); |
525 | mutex_lock(µcode_mutex); | 624 | if (tmp_ret != UCODE_NEW) |
526 | for_each_online_cpu(cpu) { | 625 | return size; |
527 | tmp_ret = reload_for_cpu(cpu); | ||
528 | if (tmp_ret > UCODE_NFOUND) { | ||
529 | pr_warn("Error reloading microcode on CPU %d\n", cpu); | ||
530 | |||
531 | /* set retval for the first encountered reload error */ | ||
532 | if (!ret) | ||
533 | ret = -EINVAL; | ||
534 | } | ||
535 | 626 | ||
536 | if (tmp_ret == UCODE_UPDATED) | 627 | get_online_cpus(); |
537 | do_callback = true; | ||
538 | } | ||
539 | 628 | ||
540 | if (!ret && do_callback) | 629 | ret = check_online_cpus(); |
541 | microcode_check(); | 630 | if (ret) |
631 | goto put; | ||
542 | 632 | ||
633 | mutex_lock(µcode_mutex); | ||
634 | ret = microcode_reload_late(); | ||
543 | mutex_unlock(µcode_mutex); | 635 | mutex_unlock(µcode_mutex); |
636 | |||
637 | put: | ||
544 | put_online_cpus(); | 638 | put_online_cpus(); |
545 | 639 | ||
546 | if (!ret) | 640 | if (ret >= 0) |
547 | ret = size; | 641 | ret = size; |
548 | 642 | ||
549 | return ret; | 643 | return ret; |
@@ -611,10 +705,8 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw) | |||
611 | if (system_state != SYSTEM_RUNNING) | 705 | if (system_state != SYSTEM_RUNNING) |
612 | return UCODE_NFOUND; | 706 | return UCODE_NFOUND; |
613 | 707 | ||
614 | ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, | 708 | ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, refresh_fw); |
615 | refresh_fw); | 709 | if (ustate == UCODE_NEW) { |
616 | |||
617 | if (ustate == UCODE_OK) { | ||
618 | pr_debug("CPU%d updated upon init\n", cpu); | 710 | pr_debug("CPU%d updated upon init\n", cpu); |
619 | apply_microcode_on_target(cpu); | 711 | apply_microcode_on_target(cpu); |
620 | } | 712 | } |
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 923054a6b760..32b8e5724f96 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c | |||
@@ -589,6 +589,23 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) | |||
589 | if (!mc) | 589 | if (!mc) |
590 | return 0; | 590 | return 0; |
591 | 591 | ||
592 | /* | ||
593 | * Save us the MSR write below - which is a particular expensive | ||
594 | * operation - when the other hyperthread has updated the microcode | ||
595 | * already. | ||
596 | */ | ||
597 | rev = intel_get_microcode_revision(); | ||
598 | if (rev >= mc->hdr.rev) { | ||
599 | uci->cpu_sig.rev = rev; | ||
600 | return UCODE_OK; | ||
601 | } | ||
602 | |||
603 | /* | ||
604 | * Writeback and invalidate caches before updating microcode to avoid | ||
605 | * internal issues depending on what the microcode is updating. | ||
606 | */ | ||
607 | native_wbinvd(); | ||
608 | |||
592 | /* write microcode via MSR 0x79 */ | 609 | /* write microcode via MSR 0x79 */ |
593 | native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); | 610 | native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); |
594 | 611 | ||
@@ -774,9 +791,9 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) | |||
774 | 791 | ||
775 | static enum ucode_state apply_microcode_intel(int cpu) | 792 | static enum ucode_state apply_microcode_intel(int cpu) |
776 | { | 793 | { |
794 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | ||
795 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
777 | struct microcode_intel *mc; | 796 | struct microcode_intel *mc; |
778 | struct ucode_cpu_info *uci; | ||
779 | struct cpuinfo_x86 *c; | ||
780 | static int prev_rev; | 797 | static int prev_rev; |
781 | u32 rev; | 798 | u32 rev; |
782 | 799 | ||
@@ -784,15 +801,32 @@ static enum ucode_state apply_microcode_intel(int cpu) | |||
784 | if (WARN_ON(raw_smp_processor_id() != cpu)) | 801 | if (WARN_ON(raw_smp_processor_id() != cpu)) |
785 | return UCODE_ERROR; | 802 | return UCODE_ERROR; |
786 | 803 | ||
787 | uci = ucode_cpu_info + cpu; | 804 | /* Look for a newer patch in our cache: */ |
788 | mc = uci->mc; | 805 | mc = find_patch(uci); |
789 | if (!mc) { | 806 | if (!mc) { |
790 | /* Look for a newer patch in our cache: */ | 807 | mc = uci->mc; |
791 | mc = find_patch(uci); | ||
792 | if (!mc) | 808 | if (!mc) |
793 | return UCODE_NFOUND; | 809 | return UCODE_NFOUND; |
794 | } | 810 | } |
795 | 811 | ||
812 | /* | ||
813 | * Save us the MSR write below - which is a particular expensive | ||
814 | * operation - when the other hyperthread has updated the microcode | ||
815 | * already. | ||
816 | */ | ||
817 | rev = intel_get_microcode_revision(); | ||
818 | if (rev >= mc->hdr.rev) { | ||
819 | uci->cpu_sig.rev = rev; | ||
820 | c->microcode = rev; | ||
821 | return UCODE_OK; | ||
822 | } | ||
823 | |||
824 | /* | ||
825 | * Writeback and invalidate caches before updating microcode to avoid | ||
826 | * internal issues depending on what the microcode is updating. | ||
827 | */ | ||
828 | native_wbinvd(); | ||
829 | |||
796 | /* write microcode via MSR 0x79 */ | 830 | /* write microcode via MSR 0x79 */ |
797 | wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); | 831 | wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); |
798 | 832 | ||
@@ -813,8 +847,6 @@ static enum ucode_state apply_microcode_intel(int cpu) | |||
813 | prev_rev = rev; | 847 | prev_rev = rev; |
814 | } | 848 | } |
815 | 849 | ||
816 | c = &cpu_data(cpu); | ||
817 | |||
818 | uci->cpu_sig.rev = rev; | 850 | uci->cpu_sig.rev = rev; |
819 | c->microcode = rev; | 851 | c->microcode = rev; |
820 | 852 | ||
@@ -830,6 +862,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
830 | unsigned int leftover = size; | 862 | unsigned int leftover = size; |
831 | unsigned int curr_mc_size = 0, new_mc_size = 0; | 863 | unsigned int curr_mc_size = 0, new_mc_size = 0; |
832 | unsigned int csig, cpf; | 864 | unsigned int csig, cpf; |
865 | enum ucode_state ret = UCODE_OK; | ||
833 | 866 | ||
834 | while (leftover) { | 867 | while (leftover) { |
835 | struct microcode_header_intel mc_header; | 868 | struct microcode_header_intel mc_header; |
@@ -871,6 +904,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
871 | new_mc = mc; | 904 | new_mc = mc; |
872 | new_mc_size = mc_size; | 905 | new_mc_size = mc_size; |
873 | mc = NULL; /* trigger new vmalloc */ | 906 | mc = NULL; /* trigger new vmalloc */ |
907 | ret = UCODE_NEW; | ||
874 | } | 908 | } |
875 | 909 | ||
876 | ucode_ptr += mc_size; | 910 | ucode_ptr += mc_size; |
@@ -900,7 +934,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
900 | pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", | 934 | pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", |
901 | cpu, new_rev, uci->cpu_sig.rev); | 935 | cpu, new_rev, uci->cpu_sig.rev); |
902 | 936 | ||
903 | return UCODE_OK; | 937 | return ret; |
904 | } | 938 | } |
905 | 939 | ||
906 | static int get_ucode_fw(void *to, const void *from, size_t n) | 940 | static int get_ucode_fw(void *to, const void *from, size_t n) |
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index 2f723301eb58..38deafebb21b 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c | |||
@@ -23,7 +23,7 @@ | |||
23 | /* | 23 | /* |
24 | * this changes the io permissions bitmap in the current task. | 24 | * this changes the io permissions bitmap in the current task. |
25 | */ | 25 | */ |
26 | asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) | 26 | SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on) |
27 | { | 27 | { |
28 | struct thread_struct *t = ¤t->thread; | 28 | struct thread_struct *t = ¤t->thread; |
29 | struct tss_struct *tss; | 29 | struct tss_struct *tss; |
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index bd36f3c33cd0..0715f827607c 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -1168,10 +1168,18 @@ NOKPROBE_SYMBOL(longjmp_break_handler); | |||
1168 | 1168 | ||
1169 | bool arch_within_kprobe_blacklist(unsigned long addr) | 1169 | bool arch_within_kprobe_blacklist(unsigned long addr) |
1170 | { | 1170 | { |
1171 | bool is_in_entry_trampoline_section = false; | ||
1172 | |||
1173 | #ifdef CONFIG_X86_64 | ||
1174 | is_in_entry_trampoline_section = | ||
1175 | (addr >= (unsigned long)__entry_trampoline_start && | ||
1176 | addr < (unsigned long)__entry_trampoline_end); | ||
1177 | #endif | ||
1171 | return (addr >= (unsigned long)__kprobes_text_start && | 1178 | return (addr >= (unsigned long)__kprobes_text_start && |
1172 | addr < (unsigned long)__kprobes_text_end) || | 1179 | addr < (unsigned long)__kprobes_text_end) || |
1173 | (addr >= (unsigned long)__entry_text_start && | 1180 | (addr >= (unsigned long)__entry_text_start && |
1174 | addr < (unsigned long)__entry_text_end); | 1181 | addr < (unsigned long)__entry_text_end) || |
1182 | is_in_entry_trampoline_section; | ||
1175 | } | 1183 | } |
1176 | 1184 | ||
1177 | int __init arch_init_kprobes(void) | 1185 | int __init arch_init_kprobes(void) |
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c index ac057f9b0763..0d930d8987cc 100644 --- a/arch/x86/kernel/signal_compat.c +++ b/arch/x86/kernel/signal_compat.c | |||
@@ -43,6 +43,13 @@ static inline void signal_compat_build_tests(void) | |||
43 | BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields) != 3 * sizeof(int)); | 43 | BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields) != 3 * sizeof(int)); |
44 | #define CHECK_CSI_OFFSET(name) BUILD_BUG_ON(_sifields_offset != offsetof(compat_siginfo_t, _sifields.name)) | 44 | #define CHECK_CSI_OFFSET(name) BUILD_BUG_ON(_sifields_offset != offsetof(compat_siginfo_t, _sifields.name)) |
45 | 45 | ||
46 | BUILD_BUG_ON(offsetof(siginfo_t, si_signo) != 0); | ||
47 | BUILD_BUG_ON(offsetof(siginfo_t, si_errno) != 4); | ||
48 | BUILD_BUG_ON(offsetof(siginfo_t, si_code) != 8); | ||
49 | |||
50 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_signo) != 0); | ||
51 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_errno) != 4); | ||
52 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_code) != 8); | ||
46 | /* | 53 | /* |
47 | * Ensure that the size of each si_field never changes. | 54 | * Ensure that the size of each si_field never changes. |
48 | * If it does, it is a sign that the | 55 | * If it does, it is a sign that the |
@@ -63,36 +70,94 @@ static inline void signal_compat_build_tests(void) | |||
63 | CHECK_CSI_SIZE (_kill, 2*sizeof(int)); | 70 | CHECK_CSI_SIZE (_kill, 2*sizeof(int)); |
64 | CHECK_SI_SIZE (_kill, 2*sizeof(int)); | 71 | CHECK_SI_SIZE (_kill, 2*sizeof(int)); |
65 | 72 | ||
73 | BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10); | ||
74 | BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14); | ||
75 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0xC); | ||
76 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10); | ||
77 | |||
66 | CHECK_CSI_OFFSET(_timer); | 78 | CHECK_CSI_OFFSET(_timer); |
67 | CHECK_CSI_SIZE (_timer, 3*sizeof(int)); | 79 | CHECK_CSI_SIZE (_timer, 3*sizeof(int)); |
68 | CHECK_SI_SIZE (_timer, 6*sizeof(int)); | 80 | CHECK_SI_SIZE (_timer, 6*sizeof(int)); |
69 | 81 | ||
82 | BUILD_BUG_ON(offsetof(siginfo_t, si_tid) != 0x10); | ||
83 | BUILD_BUG_ON(offsetof(siginfo_t, si_overrun) != 0x14); | ||
84 | BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x18); | ||
85 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_tid) != 0x0C); | ||
86 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_overrun) != 0x10); | ||
87 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_value) != 0x14); | ||
88 | |||
70 | CHECK_CSI_OFFSET(_rt); | 89 | CHECK_CSI_OFFSET(_rt); |
71 | CHECK_CSI_SIZE (_rt, 3*sizeof(int)); | 90 | CHECK_CSI_SIZE (_rt, 3*sizeof(int)); |
72 | CHECK_SI_SIZE (_rt, 4*sizeof(int)); | 91 | CHECK_SI_SIZE (_rt, 4*sizeof(int)); |
73 | 92 | ||
93 | BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10); | ||
94 | BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14); | ||
95 | BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x18); | ||
96 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0x0C); | ||
97 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10); | ||
98 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_value) != 0x14); | ||
99 | |||
74 | CHECK_CSI_OFFSET(_sigchld); | 100 | CHECK_CSI_OFFSET(_sigchld); |
75 | CHECK_CSI_SIZE (_sigchld, 5*sizeof(int)); | 101 | CHECK_CSI_SIZE (_sigchld, 5*sizeof(int)); |
76 | CHECK_SI_SIZE (_sigchld, 8*sizeof(int)); | 102 | CHECK_SI_SIZE (_sigchld, 8*sizeof(int)); |
77 | 103 | ||
104 | BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10); | ||
105 | BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14); | ||
106 | BUILD_BUG_ON(offsetof(siginfo_t, si_status) != 0x18); | ||
107 | BUILD_BUG_ON(offsetof(siginfo_t, si_utime) != 0x20); | ||
108 | BUILD_BUG_ON(offsetof(siginfo_t, si_stime) != 0x28); | ||
109 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0x0C); | ||
110 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10); | ||
111 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_status) != 0x14); | ||
112 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_utime) != 0x18); | ||
113 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_stime) != 0x1C); | ||
114 | |||
78 | #ifdef CONFIG_X86_X32_ABI | 115 | #ifdef CONFIG_X86_X32_ABI |
79 | CHECK_CSI_OFFSET(_sigchld_x32); | 116 | CHECK_CSI_OFFSET(_sigchld_x32); |
80 | CHECK_CSI_SIZE (_sigchld_x32, 7*sizeof(int)); | 117 | CHECK_CSI_SIZE (_sigchld_x32, 7*sizeof(int)); |
81 | /* no _sigchld_x32 in the generic siginfo_t */ | 118 | /* no _sigchld_x32 in the generic siginfo_t */ |
119 | BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields._sigchld_x32._utime) != 0x18); | ||
120 | BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields._sigchld_x32._stime) != 0x20); | ||
82 | #endif | 121 | #endif |
83 | 122 | ||
84 | CHECK_CSI_OFFSET(_sigfault); | 123 | CHECK_CSI_OFFSET(_sigfault); |
85 | CHECK_CSI_SIZE (_sigfault, 4*sizeof(int)); | 124 | CHECK_CSI_SIZE (_sigfault, 4*sizeof(int)); |
86 | CHECK_SI_SIZE (_sigfault, 8*sizeof(int)); | 125 | CHECK_SI_SIZE (_sigfault, 8*sizeof(int)); |
87 | 126 | ||
127 | BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x10); | ||
128 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr) != 0x0C); | ||
129 | |||
130 | BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x18); | ||
131 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr_lsb) != 0x10); | ||
132 | |||
133 | BUILD_BUG_ON(offsetof(siginfo_t, si_lower) != 0x20); | ||
134 | BUILD_BUG_ON(offsetof(siginfo_t, si_upper) != 0x28); | ||
135 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_lower) != 0x14); | ||
136 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_upper) != 0x18); | ||
137 | |||
138 | BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x20); | ||
139 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pkey) != 0x14); | ||
140 | |||
88 | CHECK_CSI_OFFSET(_sigpoll); | 141 | CHECK_CSI_OFFSET(_sigpoll); |
89 | CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int)); | 142 | CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int)); |
90 | CHECK_SI_SIZE (_sigpoll, 4*sizeof(int)); | 143 | CHECK_SI_SIZE (_sigpoll, 4*sizeof(int)); |
91 | 144 | ||
145 | BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x10); | ||
146 | BUILD_BUG_ON(offsetof(siginfo_t, si_fd) != 0x18); | ||
147 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_band) != 0x0C); | ||
148 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_fd) != 0x10); | ||
149 | |||
92 | CHECK_CSI_OFFSET(_sigsys); | 150 | CHECK_CSI_OFFSET(_sigsys); |
93 | CHECK_CSI_SIZE (_sigsys, 3*sizeof(int)); | 151 | CHECK_CSI_SIZE (_sigsys, 3*sizeof(int)); |
94 | CHECK_SI_SIZE (_sigsys, 4*sizeof(int)); | 152 | CHECK_SI_SIZE (_sigsys, 4*sizeof(int)); |
95 | 153 | ||
154 | BUILD_BUG_ON(offsetof(siginfo_t, si_call_addr) != 0x10); | ||
155 | BUILD_BUG_ON(offsetof(siginfo_t, si_syscall) != 0x18); | ||
156 | BUILD_BUG_ON(offsetof(siginfo_t, si_arch) != 0x1C); | ||
157 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_call_addr) != 0x0C); | ||
158 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_syscall) != 0x10); | ||
159 | BUILD_BUG_ON(offsetof(compat_siginfo_t, si_arch) != 0x14); | ||
160 | |||
96 | /* any new si_fields should be added here */ | 161 | /* any new si_fields should be added here */ |
97 | } | 162 | } |
98 | 163 | ||
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 5edb27f1a2c4..9d0b5af7db91 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c | |||
@@ -727,7 +727,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) | |||
727 | return; | 727 | return; |
728 | 728 | ||
729 | check_vip: | 729 | check_vip: |
730 | if (VEFLAGS & X86_EFLAGS_VIP) { | 730 | if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) == |
731 | (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) { | ||
731 | save_v86_state(regs, VM86_STI); | 732 | save_v86_state(regs, VM86_STI); |
732 | return; | 733 | return; |
733 | } | 734 | } |
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 9b138a06c1a4..b854ebf5851b 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -118,9 +118,11 @@ SECTIONS | |||
118 | 118 | ||
119 | #ifdef CONFIG_X86_64 | 119 | #ifdef CONFIG_X86_64 |
120 | . = ALIGN(PAGE_SIZE); | 120 | . = ALIGN(PAGE_SIZE); |
121 | VMLINUX_SYMBOL(__entry_trampoline_start) = .; | ||
121 | _entry_trampoline = .; | 122 | _entry_trampoline = .; |
122 | *(.entry_trampoline) | 123 | *(.entry_trampoline) |
123 | . = ALIGN(PAGE_SIZE); | 124 | . = ALIGN(PAGE_SIZE); |
125 | VMLINUX_SYMBOL(__entry_trampoline_end) = .; | ||
124 | ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big"); | 126 | ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big"); |
125 | #endif | 127 | #endif |
126 | 128 | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f551962ac294..763bb3bade63 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2770,8 +2770,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
2770 | else | 2770 | else |
2771 | pte_access &= ~ACC_WRITE_MASK; | 2771 | pte_access &= ~ACC_WRITE_MASK; |
2772 | 2772 | ||
2773 | if (!kvm_is_mmio_pfn(pfn)) | ||
2774 | spte |= shadow_me_mask; | ||
2775 | |||
2773 | spte |= (u64)pfn << PAGE_SHIFT; | 2776 | spte |= (u64)pfn << PAGE_SHIFT; |
2774 | spte |= shadow_me_mask; | ||
2775 | 2777 | ||
2776 | if (pte_access & ACC_WRITE_MASK) { | 2778 | if (pte_access & ACC_WRITE_MASK) { |
2777 | 2779 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 051dab74e4e9..2d87603f9179 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1045,6 +1045,13 @@ static inline bool is_machine_check(u32 intr_info) | |||
1045 | (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); | 1045 | (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); |
1046 | } | 1046 | } |
1047 | 1047 | ||
1048 | /* Undocumented: icebp/int1 */ | ||
1049 | static inline bool is_icebp(u32 intr_info) | ||
1050 | { | ||
1051 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) | ||
1052 | == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK); | ||
1053 | } | ||
1054 | |||
1048 | static inline bool cpu_has_vmx_msr_bitmap(void) | 1055 | static inline bool cpu_has_vmx_msr_bitmap(void) |
1049 | { | 1056 | { |
1050 | return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; | 1057 | return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; |
@@ -6179,7 +6186,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) | |||
6179 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { | 6186 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { |
6180 | vcpu->arch.dr6 &= ~15; | 6187 | vcpu->arch.dr6 &= ~15; |
6181 | vcpu->arch.dr6 |= dr6 | DR6_RTM; | 6188 | vcpu->arch.dr6 |= dr6 | DR6_RTM; |
6182 | if (!(dr6 & ~DR6_RESERVED)) /* icebp */ | 6189 | if (is_icebp(intr_info)) |
6183 | skip_emulated_instruction(vcpu); | 6190 | skip_emulated_instruction(vcpu); |
6184 | 6191 | ||
6185 | kvm_queue_exception(vcpu, DB_VECTOR); | 6192 | kvm_queue_exception(vcpu, DB_VECTOR); |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index c88573d90f3e..25a30b5d6582 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -330,7 +330,7 @@ static noinline int vmalloc_fault(unsigned long address) | |||
330 | if (!pmd_k) | 330 | if (!pmd_k) |
331 | return -1; | 331 | return -1; |
332 | 332 | ||
333 | if (pmd_huge(*pmd_k)) | 333 | if (pmd_large(*pmd_k)) |
334 | return 0; | 334 | return 0; |
335 | 335 | ||
336 | pte_k = pte_offset_kernel(pmd_k, address); | 336 | pte_k = pte_offset_kernel(pmd_k, address); |
@@ -475,7 +475,7 @@ static noinline int vmalloc_fault(unsigned long address) | |||
475 | if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) | 475 | if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) |
476 | BUG(); | 476 | BUG(); |
477 | 477 | ||
478 | if (pud_huge(*pud)) | 478 | if (pud_large(*pud)) |
479 | return 0; | 479 | return 0; |
480 | 480 | ||
481 | pmd = pmd_offset(pud, address); | 481 | pmd = pmd_offset(pud, address); |
@@ -486,7 +486,7 @@ static noinline int vmalloc_fault(unsigned long address) | |||
486 | if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) | 486 | if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) |
487 | BUG(); | 487 | BUG(); |
488 | 488 | ||
489 | if (pmd_huge(*pmd)) | 489 | if (pmd_large(*pmd)) |
490 | return 0; | 490 | return 0; |
491 | 491 | ||
492 | pte_ref = pte_offset_kernel(pmd_ref, address); | 492 | pte_ref = pte_offset_kernel(pmd_ref, address); |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 8b72923f1d35..af11a2890235 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -800,17 +800,11 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, | |||
800 | 800 | ||
801 | #define PAGE_INUSE 0xFD | 801 | #define PAGE_INUSE 0xFD |
802 | 802 | ||
803 | static void __meminit free_pagetable(struct page *page, int order, | 803 | static void __meminit free_pagetable(struct page *page, int order) |
804 | struct vmem_altmap *altmap) | ||
805 | { | 804 | { |
806 | unsigned long magic; | 805 | unsigned long magic; |
807 | unsigned int nr_pages = 1 << order; | 806 | unsigned int nr_pages = 1 << order; |
808 | 807 | ||
809 | if (altmap) { | ||
810 | vmem_altmap_free(altmap, nr_pages); | ||
811 | return; | ||
812 | } | ||
813 | |||
814 | /* bootmem page has reserved flag */ | 808 | /* bootmem page has reserved flag */ |
815 | if (PageReserved(page)) { | 809 | if (PageReserved(page)) { |
816 | __ClearPageReserved(page); | 810 | __ClearPageReserved(page); |
@@ -826,9 +820,17 @@ static void __meminit free_pagetable(struct page *page, int order, | |||
826 | free_pages((unsigned long)page_address(page), order); | 820 | free_pages((unsigned long)page_address(page), order); |
827 | } | 821 | } |
828 | 822 | ||
829 | static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd, | 823 | static void __meminit free_hugepage_table(struct page *page, |
830 | struct vmem_altmap *altmap) | 824 | struct vmem_altmap *altmap) |
831 | { | 825 | { |
826 | if (altmap) | ||
827 | vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE); | ||
828 | else | ||
829 | free_pagetable(page, get_order(PMD_SIZE)); | ||
830 | } | ||
831 | |||
832 | static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) | ||
833 | { | ||
832 | pte_t *pte; | 834 | pte_t *pte; |
833 | int i; | 835 | int i; |
834 | 836 | ||
@@ -839,14 +841,13 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd, | |||
839 | } | 841 | } |
840 | 842 | ||
841 | /* free a pte talbe */ | 843 | /* free a pte talbe */ |
842 | free_pagetable(pmd_page(*pmd), 0, altmap); | 844 | free_pagetable(pmd_page(*pmd), 0); |
843 | spin_lock(&init_mm.page_table_lock); | 845 | spin_lock(&init_mm.page_table_lock); |
844 | pmd_clear(pmd); | 846 | pmd_clear(pmd); |
845 | spin_unlock(&init_mm.page_table_lock); | 847 | spin_unlock(&init_mm.page_table_lock); |
846 | } | 848 | } |
847 | 849 | ||
848 | static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud, | 850 | static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) |
849 | struct vmem_altmap *altmap) | ||
850 | { | 851 | { |
851 | pmd_t *pmd; | 852 | pmd_t *pmd; |
852 | int i; | 853 | int i; |
@@ -858,14 +859,13 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud, | |||
858 | } | 859 | } |
859 | 860 | ||
860 | /* free a pmd talbe */ | 861 | /* free a pmd talbe */ |
861 | free_pagetable(pud_page(*pud), 0, altmap); | 862 | free_pagetable(pud_page(*pud), 0); |
862 | spin_lock(&init_mm.page_table_lock); | 863 | spin_lock(&init_mm.page_table_lock); |
863 | pud_clear(pud); | 864 | pud_clear(pud); |
864 | spin_unlock(&init_mm.page_table_lock); | 865 | spin_unlock(&init_mm.page_table_lock); |
865 | } | 866 | } |
866 | 867 | ||
867 | static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d, | 868 | static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d) |
868 | struct vmem_altmap *altmap) | ||
869 | { | 869 | { |
870 | pud_t *pud; | 870 | pud_t *pud; |
871 | int i; | 871 | int i; |
@@ -877,7 +877,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d, | |||
877 | } | 877 | } |
878 | 878 | ||
879 | /* free a pud talbe */ | 879 | /* free a pud talbe */ |
880 | free_pagetable(p4d_page(*p4d), 0, altmap); | 880 | free_pagetable(p4d_page(*p4d), 0); |
881 | spin_lock(&init_mm.page_table_lock); | 881 | spin_lock(&init_mm.page_table_lock); |
882 | p4d_clear(p4d); | 882 | p4d_clear(p4d); |
883 | spin_unlock(&init_mm.page_table_lock); | 883 | spin_unlock(&init_mm.page_table_lock); |
@@ -885,7 +885,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d, | |||
885 | 885 | ||
886 | static void __meminit | 886 | static void __meminit |
887 | remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, | 887 | remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, |
888 | struct vmem_altmap *altmap, bool direct) | 888 | bool direct) |
889 | { | 889 | { |
890 | unsigned long next, pages = 0; | 890 | unsigned long next, pages = 0; |
891 | pte_t *pte; | 891 | pte_t *pte; |
@@ -916,7 +916,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, | |||
916 | * freed when offlining, or simplely not in use. | 916 | * freed when offlining, or simplely not in use. |
917 | */ | 917 | */ |
918 | if (!direct) | 918 | if (!direct) |
919 | free_pagetable(pte_page(*pte), 0, altmap); | 919 | free_pagetable(pte_page(*pte), 0); |
920 | 920 | ||
921 | spin_lock(&init_mm.page_table_lock); | 921 | spin_lock(&init_mm.page_table_lock); |
922 | pte_clear(&init_mm, addr, pte); | 922 | pte_clear(&init_mm, addr, pte); |
@@ -939,7 +939,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, | |||
939 | 939 | ||
940 | page_addr = page_address(pte_page(*pte)); | 940 | page_addr = page_address(pte_page(*pte)); |
941 | if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) { | 941 | if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) { |
942 | free_pagetable(pte_page(*pte), 0, altmap); | 942 | free_pagetable(pte_page(*pte), 0); |
943 | 943 | ||
944 | spin_lock(&init_mm.page_table_lock); | 944 | spin_lock(&init_mm.page_table_lock); |
945 | pte_clear(&init_mm, addr, pte); | 945 | pte_clear(&init_mm, addr, pte); |
@@ -974,9 +974,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, | |||
974 | if (IS_ALIGNED(addr, PMD_SIZE) && | 974 | if (IS_ALIGNED(addr, PMD_SIZE) && |
975 | IS_ALIGNED(next, PMD_SIZE)) { | 975 | IS_ALIGNED(next, PMD_SIZE)) { |
976 | if (!direct) | 976 | if (!direct) |
977 | free_pagetable(pmd_page(*pmd), | 977 | free_hugepage_table(pmd_page(*pmd), |
978 | get_order(PMD_SIZE), | 978 | altmap); |
979 | altmap); | ||
980 | 979 | ||
981 | spin_lock(&init_mm.page_table_lock); | 980 | spin_lock(&init_mm.page_table_lock); |
982 | pmd_clear(pmd); | 981 | pmd_clear(pmd); |
@@ -989,9 +988,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, | |||
989 | page_addr = page_address(pmd_page(*pmd)); | 988 | page_addr = page_address(pmd_page(*pmd)); |
990 | if (!memchr_inv(page_addr, PAGE_INUSE, | 989 | if (!memchr_inv(page_addr, PAGE_INUSE, |
991 | PMD_SIZE)) { | 990 | PMD_SIZE)) { |
992 | free_pagetable(pmd_page(*pmd), | 991 | free_hugepage_table(pmd_page(*pmd), |
993 | get_order(PMD_SIZE), | 992 | altmap); |
994 | altmap); | ||
995 | 993 | ||
996 | spin_lock(&init_mm.page_table_lock); | 994 | spin_lock(&init_mm.page_table_lock); |
997 | pmd_clear(pmd); | 995 | pmd_clear(pmd); |
@@ -1003,8 +1001,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, | |||
1003 | } | 1001 | } |
1004 | 1002 | ||
1005 | pte_base = (pte_t *)pmd_page_vaddr(*pmd); | 1003 | pte_base = (pte_t *)pmd_page_vaddr(*pmd); |
1006 | remove_pte_table(pte_base, addr, next, altmap, direct); | 1004 | remove_pte_table(pte_base, addr, next, direct); |
1007 | free_pte_table(pte_base, pmd, altmap); | 1005 | free_pte_table(pte_base, pmd); |
1008 | } | 1006 | } |
1009 | 1007 | ||
1010 | /* Call free_pmd_table() in remove_pud_table(). */ | 1008 | /* Call free_pmd_table() in remove_pud_table(). */ |
@@ -1033,8 +1031,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, | |||
1033 | IS_ALIGNED(next, PUD_SIZE)) { | 1031 | IS_ALIGNED(next, PUD_SIZE)) { |
1034 | if (!direct) | 1032 | if (!direct) |
1035 | free_pagetable(pud_page(*pud), | 1033 | free_pagetable(pud_page(*pud), |
1036 | get_order(PUD_SIZE), | 1034 | get_order(PUD_SIZE)); |
1037 | altmap); | ||
1038 | 1035 | ||
1039 | spin_lock(&init_mm.page_table_lock); | 1036 | spin_lock(&init_mm.page_table_lock); |
1040 | pud_clear(pud); | 1037 | pud_clear(pud); |
@@ -1048,8 +1045,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, | |||
1048 | if (!memchr_inv(page_addr, PAGE_INUSE, | 1045 | if (!memchr_inv(page_addr, PAGE_INUSE, |
1049 | PUD_SIZE)) { | 1046 | PUD_SIZE)) { |
1050 | free_pagetable(pud_page(*pud), | 1047 | free_pagetable(pud_page(*pud), |
1051 | get_order(PUD_SIZE), | 1048 | get_order(PUD_SIZE)); |
1052 | altmap); | ||
1053 | 1049 | ||
1054 | spin_lock(&init_mm.page_table_lock); | 1050 | spin_lock(&init_mm.page_table_lock); |
1055 | pud_clear(pud); | 1051 | pud_clear(pud); |
@@ -1062,7 +1058,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, | |||
1062 | 1058 | ||
1063 | pmd_base = pmd_offset(pud, 0); | 1059 | pmd_base = pmd_offset(pud, 0); |
1064 | remove_pmd_table(pmd_base, addr, next, direct, altmap); | 1060 | remove_pmd_table(pmd_base, addr, next, direct, altmap); |
1065 | free_pmd_table(pmd_base, pud, altmap); | 1061 | free_pmd_table(pmd_base, pud); |
1066 | } | 1062 | } |
1067 | 1063 | ||
1068 | if (direct) | 1064 | if (direct) |
@@ -1094,7 +1090,7 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end, | |||
1094 | * to adapt for boot-time switching between 4 and 5 level page tables. | 1090 | * to adapt for boot-time switching between 4 and 5 level page tables. |
1095 | */ | 1091 | */ |
1096 | if (CONFIG_PGTABLE_LEVELS == 5) | 1092 | if (CONFIG_PGTABLE_LEVELS == 5) |
1097 | free_pud_table(pud_base, p4d, altmap); | 1093 | free_pud_table(pud_base, p4d); |
1098 | } | 1094 | } |
1099 | 1095 | ||
1100 | if (direct) | 1096 | if (direct) |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 004abf9ebf12..34cda7e0551b 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -702,4 +702,52 @@ int pmd_clear_huge(pmd_t *pmd) | |||
702 | 702 | ||
703 | return 0; | 703 | return 0; |
704 | } | 704 | } |
705 | |||
706 | /** | ||
707 | * pud_free_pmd_page - Clear pud entry and free pmd page. | ||
708 | * @pud: Pointer to a PUD. | ||
709 | * | ||
710 | * Context: The pud range has been unmaped and TLB purged. | ||
711 | * Return: 1 if clearing the entry succeeded. 0 otherwise. | ||
712 | */ | ||
713 | int pud_free_pmd_page(pud_t *pud) | ||
714 | { | ||
715 | pmd_t *pmd; | ||
716 | int i; | ||
717 | |||
718 | if (pud_none(*pud)) | ||
719 | return 1; | ||
720 | |||
721 | pmd = (pmd_t *)pud_page_vaddr(*pud); | ||
722 | |||
723 | for (i = 0; i < PTRS_PER_PMD; i++) | ||
724 | if (!pmd_free_pte_page(&pmd[i])) | ||
725 | return 0; | ||
726 | |||
727 | pud_clear(pud); | ||
728 | free_page((unsigned long)pmd); | ||
729 | |||
730 | return 1; | ||
731 | } | ||
732 | |||
733 | /** | ||
734 | * pmd_free_pte_page - Clear pmd entry and free pte page. | ||
735 | * @pmd: Pointer to a PMD. | ||
736 | * | ||
737 | * Context: The pmd range has been unmaped and TLB purged. | ||
738 | * Return: 1 if clearing the entry succeeded. 0 otherwise. | ||
739 | */ | ||
740 | int pmd_free_pte_page(pmd_t *pmd) | ||
741 | { | ||
742 | pte_t *pte; | ||
743 | |||
744 | if (pmd_none(*pmd)) | ||
745 | return 1; | ||
746 | |||
747 | pte = (pte_t *)pmd_page_vaddr(*pmd); | ||
748 | pmd_clear(pmd); | ||
749 | free_page((unsigned long)pte); | ||
750 | |||
751 | return 1; | ||
752 | } | ||
705 | #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ | 753 | #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ |
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c index ce38f165489b..631507f0c198 100644 --- a/arch/x86/mm/pti.c +++ b/arch/x86/mm/pti.c | |||
@@ -332,7 +332,7 @@ static void __init pti_clone_user_shared(void) | |||
332 | } | 332 | } |
333 | 333 | ||
334 | /* | 334 | /* |
335 | * Clone the ESPFIX P4D into the user space visinble page table | 335 | * Clone the ESPFIX P4D into the user space visible page table |
336 | */ | 336 | */ |
337 | static void __init pti_setup_espfix64(void) | 337 | static void __init pti_setup_espfix64(void) |
338 | { | 338 | { |
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index eb661fff94d7..b725154182cc 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c | |||
@@ -1223,7 +1223,7 @@ skip_init_addrs: | |||
1223 | * may converge on the last pass. In such case do one more | 1223 | * may converge on the last pass. In such case do one more |
1224 | * pass to emit the final image | 1224 | * pass to emit the final image |
1225 | */ | 1225 | */ |
1226 | for (pass = 0; pass < 10 || image; pass++) { | 1226 | for (pass = 0; pass < 20 || image; pass++) { |
1227 | proglen = do_jit(prog, addrs, image, oldproglen, &ctx); | 1227 | proglen = do_jit(prog, addrs, image, oldproglen, &ctx); |
1228 | if (proglen <= 0) { | 1228 | if (proglen <= 0) { |
1229 | image = NULL; | 1229 | image = NULL; |
@@ -1250,6 +1250,7 @@ skip_init_addrs: | |||
1250 | } | 1250 | } |
1251 | } | 1251 | } |
1252 | oldproglen = proglen; | 1252 | oldproglen = proglen; |
1253 | cond_resched(); | ||
1253 | } | 1254 | } |
1254 | 1255 | ||
1255 | if (bpf_jit_enable > 1) | 1256 | if (bpf_jit_enable > 1) |