diff options
| -rw-r--r-- | arch/x86/include/asm/switch_to.h | 3 | ||||
| -rw-r--r-- | arch/x86/kernel/process.c | 12 | ||||
| -rw-r--r-- | arch/x86/kernel/process.h | 24 | ||||
| -rw-r--r-- | arch/x86/kernel/process_32.c | 10 | ||||
| -rw-r--r-- | arch/x86/kernel/process_64.c | 10 |
5 files changed, 37 insertions, 22 deletions
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 36bd243843d6..7cf1a270d891 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h | |||
| @@ -11,9 +11,6 @@ struct task_struct *__switch_to_asm(struct task_struct *prev, | |||
| 11 | 11 | ||
| 12 | __visible struct task_struct *__switch_to(struct task_struct *prev, | 12 | __visible struct task_struct *__switch_to(struct task_struct *prev, |
| 13 | struct task_struct *next); | 13 | struct task_struct *next); |
| 14 | struct tss_struct; | ||
| 15 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | ||
| 16 | struct tss_struct *tss); | ||
| 17 | 14 | ||
| 18 | /* This runs runs on the previous thread's stack. */ | 15 | /* This runs runs on the previous thread's stack. */ |
| 19 | static inline void prepare_switch_to(struct task_struct *next) | 16 | static inline void prepare_switch_to(struct task_struct *next) |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 574b144d2b53..cdf8e6694f71 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
| @@ -40,6 +40,8 @@ | |||
| 40 | #include <asm/prctl.h> | 40 | #include <asm/prctl.h> |
| 41 | #include <asm/spec-ctrl.h> | 41 | #include <asm/spec-ctrl.h> |
| 42 | 42 | ||
| 43 | #include "process.h" | ||
| 44 | |||
| 43 | /* | 45 | /* |
| 44 | * per-CPU TSS segments. Threads are completely 'soft' on Linux, | 46 | * per-CPU TSS segments. Threads are completely 'soft' on Linux, |
| 45 | * no more per-task TSS's. The TSS size is kept cacheline-aligned | 47 | * no more per-task TSS's. The TSS size is kept cacheline-aligned |
| @@ -252,11 +254,12 @@ void arch_setup_new_exec(void) | |||
| 252 | enable_cpuid(); | 254 | enable_cpuid(); |
| 253 | } | 255 | } |
| 254 | 256 | ||
| 255 | static inline void switch_to_bitmap(struct tss_struct *tss, | 257 | static inline void switch_to_bitmap(struct thread_struct *prev, |
| 256 | struct thread_struct *prev, | ||
| 257 | struct thread_struct *next, | 258 | struct thread_struct *next, |
| 258 | unsigned long tifp, unsigned long tifn) | 259 | unsigned long tifp, unsigned long tifn) |
| 259 | { | 260 | { |
| 261 | struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); | ||
| 262 | |||
| 260 | if (tifn & _TIF_IO_BITMAP) { | 263 | if (tifn & _TIF_IO_BITMAP) { |
| 261 | /* | 264 | /* |
| 262 | * Copy the relevant range of the IO bitmap. | 265 | * Copy the relevant range of the IO bitmap. |
| @@ -448,8 +451,7 @@ void speculation_ctrl_update(unsigned long tif) | |||
| 448 | preempt_enable(); | 451 | preempt_enable(); |
| 449 | } | 452 | } |
| 450 | 453 | ||
| 451 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | 454 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p) |
| 452 | struct tss_struct *tss) | ||
| 453 | { | 455 | { |
| 454 | struct thread_struct *prev, *next; | 456 | struct thread_struct *prev, *next; |
| 455 | unsigned long tifp, tifn; | 457 | unsigned long tifp, tifn; |
| @@ -459,7 +461,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
| 459 | 461 | ||
| 460 | tifn = READ_ONCE(task_thread_info(next_p)->flags); | 462 | tifn = READ_ONCE(task_thread_info(next_p)->flags); |
| 461 | tifp = READ_ONCE(task_thread_info(prev_p)->flags); | 463 | tifp = READ_ONCE(task_thread_info(prev_p)->flags); |
| 462 | switch_to_bitmap(tss, prev, next, tifp, tifn); | 464 | switch_to_bitmap(prev, next, tifp, tifn); |
| 463 | 465 | ||
| 464 | propagate_user_return_notify(prev_p, next_p); | 466 | propagate_user_return_notify(prev_p, next_p); |
| 465 | 467 | ||
diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h new file mode 100644 index 000000000000..020fbfac3a27 --- /dev/null +++ b/arch/x86/kernel/process.h | |||
| @@ -0,0 +1,24 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // | ||
| 3 | // Code shared between 32 and 64 bit | ||
| 4 | |||
| 5 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p); | ||
| 6 | |||
| 7 | /* | ||
| 8 | * This needs to be inline to optimize for the common case where no extra | ||
| 9 | * work needs to be done. | ||
| 10 | */ | ||
| 11 | static inline void switch_to_extra(struct task_struct *prev, | ||
| 12 | struct task_struct *next) | ||
| 13 | { | ||
| 14 | unsigned long next_tif = task_thread_info(next)->flags; | ||
| 15 | unsigned long prev_tif = task_thread_info(prev)->flags; | ||
| 16 | |||
| 17 | /* | ||
| 18 | * __switch_to_xtra() handles debug registers, i/o bitmaps, | ||
| 19 | * speculation mitigations etc. | ||
| 20 | */ | ||
| 21 | if (unlikely(next_tif & _TIF_WORK_CTXSW_NEXT || | ||
| 22 | prev_tif & _TIF_WORK_CTXSW_PREV)) | ||
| 23 | __switch_to_xtra(prev, next); | ||
| 24 | } | ||
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 5046a3c9dec2..d3e593eb189f 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
| @@ -59,6 +59,8 @@ | |||
| 59 | #include <asm/intel_rdt_sched.h> | 59 | #include <asm/intel_rdt_sched.h> |
| 60 | #include <asm/proto.h> | 60 | #include <asm/proto.h> |
| 61 | 61 | ||
| 62 | #include "process.h" | ||
| 63 | |||
| 62 | void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) | 64 | void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) |
| 63 | { | 65 | { |
| 64 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; | 66 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; |
| @@ -232,7 +234,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 232 | struct fpu *prev_fpu = &prev->fpu; | 234 | struct fpu *prev_fpu = &prev->fpu; |
| 233 | struct fpu *next_fpu = &next->fpu; | 235 | struct fpu *next_fpu = &next->fpu; |
| 234 | int cpu = smp_processor_id(); | 236 | int cpu = smp_processor_id(); |
| 235 | struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu); | ||
| 236 | 237 | ||
| 237 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | 238 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ |
| 238 | 239 | ||
| @@ -264,12 +265,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 264 | if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) | 265 | if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) |
| 265 | set_iopl_mask(next->iopl); | 266 | set_iopl_mask(next->iopl); |
| 266 | 267 | ||
| 267 | /* | 268 | switch_to_extra(prev_p, next_p); |
| 268 | * Now maybe handle debug registers and/or IO bitmaps | ||
| 269 | */ | ||
| 270 | if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || | ||
| 271 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) | ||
| 272 | __switch_to_xtra(prev_p, next_p, tss); | ||
| 273 | 269 | ||
| 274 | /* | 270 | /* |
| 275 | * Leave lazy mode, flushing any hypercalls made here. | 271 | * Leave lazy mode, flushing any hypercalls made here. |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 0e0b4288a4b2..bbfbf017065c 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
| @@ -60,6 +60,8 @@ | |||
| 60 | #include <asm/unistd_32_ia32.h> | 60 | #include <asm/unistd_32_ia32.h> |
| 61 | #endif | 61 | #endif |
| 62 | 62 | ||
| 63 | #include "process.h" | ||
| 64 | |||
| 63 | /* Prints also some state that isn't saved in the pt_regs */ | 65 | /* Prints also some state that isn't saved in the pt_regs */ |
| 64 | void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) | 66 | void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) |
| 65 | { | 67 | { |
| @@ -553,7 +555,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 553 | struct fpu *prev_fpu = &prev->fpu; | 555 | struct fpu *prev_fpu = &prev->fpu; |
| 554 | struct fpu *next_fpu = &next->fpu; | 556 | struct fpu *next_fpu = &next->fpu; |
| 555 | int cpu = smp_processor_id(); | 557 | int cpu = smp_processor_id(); |
| 556 | struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu); | ||
| 557 | 558 | ||
| 558 | WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && | 559 | WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && |
| 559 | this_cpu_read(irq_count) != -1); | 560 | this_cpu_read(irq_count) != -1); |
| @@ -617,12 +618,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 617 | /* Reload sp0. */ | 618 | /* Reload sp0. */ |
| 618 | update_task_stack(next_p); | 619 | update_task_stack(next_p); |
| 619 | 620 | ||
| 620 | /* | 621 | switch_to_extra(prev_p, next_p); |
| 621 | * Now maybe reload the debug registers and handle I/O bitmaps | ||
| 622 | */ | ||
| 623 | if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT || | ||
| 624 | task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) | ||
| 625 | __switch_to_xtra(prev_p, next_p, tss); | ||
| 626 | 622 | ||
| 627 | #ifdef CONFIG_XEN_PV | 623 | #ifdef CONFIG_XEN_PV |
| 628 | /* | 624 | /* |
