diff options
Diffstat (limited to 'arch/x86/kernel/process.c')
-rw-r--r-- | arch/x86/kernel/process.c | 101 |
1 files changed, 82 insertions, 19 deletions
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index c93fcfdf1673..7d31192296a8 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -40,6 +40,8 @@ | |||
40 | #include <asm/prctl.h> | 40 | #include <asm/prctl.h> |
41 | #include <asm/spec-ctrl.h> | 41 | #include <asm/spec-ctrl.h> |
42 | 42 | ||
43 | #include "process.h" | ||
44 | |||
43 | /* | 45 | /* |
44 | * per-CPU TSS segments. Threads are completely 'soft' on Linux, | 46 | * per-CPU TSS segments. Threads are completely 'soft' on Linux, |
45 | * no more per-task TSS's. The TSS size is kept cacheline-aligned | 47 | * no more per-task TSS's. The TSS size is kept cacheline-aligned |
@@ -252,11 +254,12 @@ void arch_setup_new_exec(void) | |||
252 | enable_cpuid(); | 254 | enable_cpuid(); |
253 | } | 255 | } |
254 | 256 | ||
255 | static inline void switch_to_bitmap(struct tss_struct *tss, | 257 | static inline void switch_to_bitmap(struct thread_struct *prev, |
256 | struct thread_struct *prev, | ||
257 | struct thread_struct *next, | 258 | struct thread_struct *next, |
258 | unsigned long tifp, unsigned long tifn) | 259 | unsigned long tifp, unsigned long tifn) |
259 | { | 260 | { |
261 | struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); | ||
262 | |||
260 | if (tifn & _TIF_IO_BITMAP) { | 263 | if (tifn & _TIF_IO_BITMAP) { |
261 | /* | 264 | /* |
262 | * Copy the relevant range of the IO bitmap. | 265 | * Copy the relevant range of the IO bitmap. |
@@ -395,32 +398,85 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn) | |||
395 | wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); | 398 | wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); |
396 | } | 399 | } |
397 | 400 | ||
398 | static __always_inline void intel_set_ssb_state(unsigned long tifn) | 401 | /* |
402 | * Update the MSRs managing speculation control, during context switch. | ||
403 | * | ||
404 | * tifp: Previous task's thread flags | ||
405 | * tifn: Next task's thread flags | ||
406 | */ | ||
407 | static __always_inline void __speculation_ctrl_update(unsigned long tifp, | ||
408 | unsigned long tifn) | ||
399 | { | 409 | { |
400 | u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn); | 410 | unsigned long tif_diff = tifp ^ tifn; |
411 | u64 msr = x86_spec_ctrl_base; | ||
412 | bool updmsr = false; | ||
413 | |||
414 | /* | ||
415 | * If TIF_SSBD is different, select the proper mitigation | ||
416 | * method. Note that if SSBD mitigation is disabled or permanentely | ||
417 | * enabled this branch can't be taken because nothing can set | ||
418 | * TIF_SSBD. | ||
419 | */ | ||
420 | if (tif_diff & _TIF_SSBD) { | ||
421 | if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { | ||
422 | amd_set_ssb_virt_state(tifn); | ||
423 | } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { | ||
424 | amd_set_core_ssb_state(tifn); | ||
425 | } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || | ||
426 | static_cpu_has(X86_FEATURE_AMD_SSBD)) { | ||
427 | msr |= ssbd_tif_to_spec_ctrl(tifn); | ||
428 | updmsr = true; | ||
429 | } | ||
430 | } | ||
431 | |||
432 | /* | ||
433 | * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled, | ||
434 | * otherwise avoid the MSR write. | ||
435 | */ | ||
436 | if (IS_ENABLED(CONFIG_SMP) && | ||
437 | static_branch_unlikely(&switch_to_cond_stibp)) { | ||
438 | updmsr |= !!(tif_diff & _TIF_SPEC_IB); | ||
439 | msr |= stibp_tif_to_spec_ctrl(tifn); | ||
440 | } | ||
401 | 441 | ||
402 | wrmsrl(MSR_IA32_SPEC_CTRL, msr); | 442 | if (updmsr) |
443 | wrmsrl(MSR_IA32_SPEC_CTRL, msr); | ||
403 | } | 444 | } |
404 | 445 | ||
405 | static __always_inline void __speculative_store_bypass_update(unsigned long tifn) | 446 | static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk) |
406 | { | 447 | { |
407 | if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) | 448 | if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) { |
408 | amd_set_ssb_virt_state(tifn); | 449 | if (task_spec_ssb_disable(tsk)) |
409 | else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) | 450 | set_tsk_thread_flag(tsk, TIF_SSBD); |
410 | amd_set_core_ssb_state(tifn); | 451 | else |
411 | else | 452 | clear_tsk_thread_flag(tsk, TIF_SSBD); |
412 | intel_set_ssb_state(tifn); | 453 | |
454 | if (task_spec_ib_disable(tsk)) | ||
455 | set_tsk_thread_flag(tsk, TIF_SPEC_IB); | ||
456 | else | ||
457 | clear_tsk_thread_flag(tsk, TIF_SPEC_IB); | ||
458 | } | ||
459 | /* Return the updated threadinfo flags*/ | ||
460 | return task_thread_info(tsk)->flags; | ||
413 | } | 461 | } |
414 | 462 | ||
415 | void speculative_store_bypass_update(unsigned long tif) | 463 | void speculation_ctrl_update(unsigned long tif) |
416 | { | 464 | { |
465 | /* Forced update. Make sure all relevant TIF flags are different */ | ||
417 | preempt_disable(); | 466 | preempt_disable(); |
418 | __speculative_store_bypass_update(tif); | 467 | __speculation_ctrl_update(~tif, tif); |
419 | preempt_enable(); | 468 | preempt_enable(); |
420 | } | 469 | } |
421 | 470 | ||
422 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | 471 | /* Called from seccomp/prctl update */ |
423 | struct tss_struct *tss) | 472 | void speculation_ctrl_update_current(void) |
473 | { | ||
474 | preempt_disable(); | ||
475 | speculation_ctrl_update(speculation_ctrl_update_tif(current)); | ||
476 | preempt_enable(); | ||
477 | } | ||
478 | |||
479 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p) | ||
424 | { | 480 | { |
425 | struct thread_struct *prev, *next; | 481 | struct thread_struct *prev, *next; |
426 | unsigned long tifp, tifn; | 482 | unsigned long tifp, tifn; |
@@ -430,7 +486,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
430 | 486 | ||
431 | tifn = READ_ONCE(task_thread_info(next_p)->flags); | 487 | tifn = READ_ONCE(task_thread_info(next_p)->flags); |
432 | tifp = READ_ONCE(task_thread_info(prev_p)->flags); | 488 | tifp = READ_ONCE(task_thread_info(prev_p)->flags); |
433 | switch_to_bitmap(tss, prev, next, tifp, tifn); | 489 | switch_to_bitmap(prev, next, tifp, tifn); |
434 | 490 | ||
435 | propagate_user_return_notify(prev_p, next_p); | 491 | propagate_user_return_notify(prev_p, next_p); |
436 | 492 | ||
@@ -451,8 +507,15 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
451 | if ((tifp ^ tifn) & _TIF_NOCPUID) | 507 | if ((tifp ^ tifn) & _TIF_NOCPUID) |
452 | set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); | 508 | set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); |
453 | 509 | ||
454 | if ((tifp ^ tifn) & _TIF_SSBD) | 510 | if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) { |
455 | __speculative_store_bypass_update(tifn); | 511 | __speculation_ctrl_update(tifp, tifn); |
512 | } else { | ||
513 | speculation_ctrl_update_tif(prev_p); | ||
514 | tifn = speculation_ctrl_update_tif(next_p); | ||
515 | |||
516 | /* Enforce MSR update to ensure consistent state */ | ||
517 | __speculation_ctrl_update(~tifn, tifn); | ||
518 | } | ||
456 | } | 519 | } |
457 | 520 | ||
458 | /* | 521 | /* |