diff options
| author | Andy Lutomirski <luto@kernel.org> | 2017-02-22 10:36:16 -0500 |
|---|---|---|
| committer | Radim Krčmář <rkrcmar@redhat.com> | 2017-03-01 11:03:22 -0500 |
| commit | b7ceaec112aa35aa287325754d8c52b8304892cd (patch) | |
| tree | af5014f643adf8db9d82178bfb22f68062ed41c2 | |
| parent | e3736c3eb3a6f7c0966923b629c9f92b558aa9c7 (diff) | |
x86/asm: Tidy up TSS limit code
In an earlier version of the patch ("x86/kvm/vmx: Defer TR reload
after VM exit") that introduced TSS limit validity tracking, I
confused which helper was which. On reflection, the names I chose
sucked. Rename the helpers to make it more obvious what's going on
and add some comments.
While I'm at it, clear __tss_limit_invalid when force-reloading as
well as when contitionally reloading, since any TR reload fixes the
limit.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
| -rw-r--r-- | arch/x86/include/asm/desc.h | 18 | ||||
| -rw-r--r-- | arch/x86/kernel/ioport.c | 8 | ||||
| -rw-r--r-- | arch/x86/kernel/process.c | 6 |
3 files changed, 21 insertions, 11 deletions
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index cb8f9149f6c8..1548ca92ad3f 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h | |||
| @@ -205,6 +205,8 @@ static inline void native_load_tr_desc(void) | |||
| 205 | asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); | 205 | asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); |
| 206 | } | 206 | } |
| 207 | 207 | ||
| 208 | DECLARE_PER_CPU(bool, __tss_limit_invalid); | ||
| 209 | |||
| 208 | static inline void force_reload_TR(void) | 210 | static inline void force_reload_TR(void) |
| 209 | { | 211 | { |
| 210 | struct desc_struct *d = get_cpu_gdt_table(smp_processor_id()); | 212 | struct desc_struct *d = get_cpu_gdt_table(smp_processor_id()); |
| @@ -220,18 +222,20 @@ static inline void force_reload_TR(void) | |||
| 220 | write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS); | 222 | write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS); |
| 221 | 223 | ||
| 222 | load_TR_desc(); | 224 | load_TR_desc(); |
| 225 | this_cpu_write(__tss_limit_invalid, false); | ||
| 223 | } | 226 | } |
| 224 | 227 | ||
| 225 | DECLARE_PER_CPU(bool, need_tr_refresh); | 228 | /* |
| 226 | 229 | * Call this if you need the TSS limit to be correct, which should be the case | |
| 227 | static inline void refresh_TR(void) | 230 | * if and only if you have TIF_IO_BITMAP set or you're switching to a task |
| 231 | * with TIF_IO_BITMAP set. | ||
| 232 | */ | ||
| 233 | static inline void refresh_tss_limit(void) | ||
| 228 | { | 234 | { |
| 229 | DEBUG_LOCKS_WARN_ON(preemptible()); | 235 | DEBUG_LOCKS_WARN_ON(preemptible()); |
| 230 | 236 | ||
| 231 | if (unlikely(this_cpu_read(need_tr_refresh))) { | 237 | if (unlikely(this_cpu_read(__tss_limit_invalid))) |
| 232 | force_reload_TR(); | 238 | force_reload_TR(); |
| 233 | this_cpu_write(need_tr_refresh, false); | ||
| 234 | } | ||
| 235 | } | 239 | } |
| 236 | 240 | ||
| 237 | /* | 241 | /* |
| @@ -250,7 +254,7 @@ static inline void invalidate_tss_limit(void) | |||
| 250 | if (unlikely(test_thread_flag(TIF_IO_BITMAP))) | 254 | if (unlikely(test_thread_flag(TIF_IO_BITMAP))) |
| 251 | force_reload_TR(); | 255 | force_reload_TR(); |
| 252 | else | 256 | else |
| 253 | this_cpu_write(need_tr_refresh, true); | 257 | this_cpu_write(__tss_limit_invalid, true); |
| 254 | } | 258 | } |
| 255 | 259 | ||
| 256 | static inline void native_load_gdt(const struct desc_ptr *dtr) | 260 | static inline void native_load_gdt(const struct desc_ptr *dtr) |
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c index b01bc8517450..875d3d25dd6a 100644 --- a/arch/x86/kernel/ioport.c +++ b/arch/x86/kernel/ioport.c | |||
| @@ -47,8 +47,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) | |||
| 47 | t->io_bitmap_ptr = bitmap; | 47 | t->io_bitmap_ptr = bitmap; |
| 48 | set_thread_flag(TIF_IO_BITMAP); | 48 | set_thread_flag(TIF_IO_BITMAP); |
| 49 | 49 | ||
| 50 | /* | ||
| 51 | * Now that we have an IO bitmap, we need our TSS limit to be | ||
| 52 | * correct. It's fine if we are preempted after doing this: | ||
| 53 | * with TIF_IO_BITMAP set, context switches will keep our TSS | ||
| 54 | * limit correct. | ||
| 55 | */ | ||
| 50 | preempt_disable(); | 56 | preempt_disable(); |
| 51 | refresh_TR(); | 57 | refresh_tss_limit(); |
| 52 | preempt_enable(); | 58 | preempt_enable(); |
| 53 | } | 59 | } |
| 54 | 60 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 7780efa635b9..0b302591b51f 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
| @@ -65,8 +65,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { | |||
| 65 | }; | 65 | }; |
| 66 | EXPORT_PER_CPU_SYMBOL(cpu_tss); | 66 | EXPORT_PER_CPU_SYMBOL(cpu_tss); |
| 67 | 67 | ||
| 68 | DEFINE_PER_CPU(bool, need_tr_refresh); | 68 | DEFINE_PER_CPU(bool, __tss_limit_invalid); |
| 69 | EXPORT_PER_CPU_SYMBOL_GPL(need_tr_refresh); | 69 | EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid); |
| 70 | 70 | ||
| 71 | /* | 71 | /* |
| 72 | * this gets called so that we can store lazy state into memory and copy the | 72 | * this gets called so that we can store lazy state into memory and copy the |
| @@ -218,7 +218,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
| 218 | * Make sure that the TSS limit is correct for the CPU | 218 | * Make sure that the TSS limit is correct for the CPU |
| 219 | * to notice the IO bitmap. | 219 | * to notice the IO bitmap. |
| 220 | */ | 220 | */ |
| 221 | refresh_TR(); | 221 | refresh_tss_limit(); |
| 222 | } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { | 222 | } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { |
| 223 | /* | 223 | /* |
| 224 | * Clear any possible leftover bits: | 224 | * Clear any possible leftover bits: |
