aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrea Arcangeli <andrea@suse.de>2005-06-27 17:36:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-27 18:11:44 -0400
commitffaa8bd6c904d1ab79b677905067349a5ff51d84 (patch)
treeec7960440a7d7700e15bf2e34453db448b808c5e
parent6ae3db110e62b0846aae1b5c6e661484ee3a5ed1 (diff)
[PATCH] seccomp: tsc disable
I believe at least for seccomp it's worth to turn off the tsc, not just for HT but for the L2 cache too. So it's up to you, either you turn it off completely (which isn't very nice IMHO) or I recommend to apply this below patch. This has been tested successfully on x86-64 against current cogito repository (i686 compiles so I didn't bother testing ;). People selling the cpu through cpushare may appreciate this bit for a peace of mind. There's no way to get any timing info anymore with this applied (gettimeofday is forbidden of course). The seccomp environment is completely deterministic so it can't be allowed to get timing info, it has to be deterministic so in the future I can enable a computing mode that does a parallel computing for each task with server side transparent checkpointing and verification that the output is the same from all the 2/3 seller computers for each task, without the buyer even noticing (for now the verification is left to the buyer client side and there's no checkpointing, since that would require more kernel changes to track the dirty bits but it'll be easy to extend once the basic mode is finished). Eliminating a cold-cache read of the cr4 global variable will save one cacheline during the tlb flush while making the code per-cpu-safe at the same time. Thanks to Mikael Pettersson for noticing the tlb flush wasn't per-cpu-safe. The global tlb flush can run from irq (IPI calling do_flush_tlb_all) but it'll be transparent to the switch_to code since the IPI won't make any change to the cr4 contents from the point of view of the interrupted code and since it's now all per-cpu stuff, it will not race. So no need to disable irqs in switch_to slow path. Signed-off-by: Andrea Arcangeli <andrea@cpushare.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/i386/kernel/process.c29
-rw-r--r--arch/x86_64/kernel/process.c29
-rw-r--r--include/asm-i386/tlbflush.h12
-rw-r--r--include/asm-x86_64/tlbflush.h12
-rw-r--r--include/linux/seccomp.h10
5 files changed, 82 insertions, 10 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index 5f8cfa6b7940..ba243a4cc119 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -617,6 +617,33 @@ handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss)
617} 617}
618 618
619/* 619/*
620 * This function selects if the context switch from prev to next
621 * has to tweak the TSC disable bit in the cr4.
622 */
623static inline void disable_tsc(struct task_struct *prev_p,
624 struct task_struct *next_p)
625{
626 struct thread_info *prev, *next;
627
628 /*
629 * gcc should eliminate the ->thread_info dereference if
630 * has_secure_computing returns 0 at compile time (SECCOMP=n).
631 */
632 prev = prev_p->thread_info;
633 next = next_p->thread_info;
634
635 if (has_secure_computing(prev) || has_secure_computing(next)) {
636 /* slow path here */
637 if (has_secure_computing(prev) &&
638 !has_secure_computing(next)) {
639 write_cr4(read_cr4() & ~X86_CR4_TSD);
640 } else if (!has_secure_computing(prev) &&
641 has_secure_computing(next))
642 write_cr4(read_cr4() | X86_CR4_TSD);
643 }
644}
645
646/*
620 * switch_to(x,yn) should switch tasks from x to y. 647 * switch_to(x,yn) should switch tasks from x to y.
621 * 648 *
622 * We fsave/fwait so that an exception goes off at the right time 649 * We fsave/fwait so that an exception goes off at the right time
@@ -695,6 +722,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
695 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) 722 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
696 handle_io_bitmap(next, tss); 723 handle_io_bitmap(next, tss);
697 724
725 disable_tsc(prev_p, next_p);
726
698 return prev_p; 727 return prev_p;
699} 728}
700 729
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c
index 1d91271796e5..7577f9d7a75d 100644
--- a/arch/x86_64/kernel/process.c
+++ b/arch/x86_64/kernel/process.c
@@ -482,6 +482,33 @@ out:
482} 482}
483 483
484/* 484/*
485 * This function selects if the context switch from prev to next
486 * has to tweak the TSC disable bit in the cr4.
487 */
488static inline void disable_tsc(struct task_struct *prev_p,
489 struct task_struct *next_p)
490{
491 struct thread_info *prev, *next;
492
493 /*
494 * gcc should eliminate the ->thread_info dereference if
495 * has_secure_computing returns 0 at compile time (SECCOMP=n).
496 */
497 prev = prev_p->thread_info;
498 next = next_p->thread_info;
499
500 if (has_secure_computing(prev) || has_secure_computing(next)) {
501 /* slow path here */
502 if (has_secure_computing(prev) &&
503 !has_secure_computing(next)) {
504 write_cr4(read_cr4() & ~X86_CR4_TSD);
505 } else if (!has_secure_computing(prev) &&
506 has_secure_computing(next))
507 write_cr4(read_cr4() | X86_CR4_TSD);
508 }
509}
510
511/*
485 * This special macro can be used to load a debugging register 512 * This special macro can be used to load a debugging register
486 */ 513 */
487#define loaddebug(thread,r) set_debug(thread->debugreg ## r, r) 514#define loaddebug(thread,r) set_debug(thread->debugreg ## r, r)
@@ -599,6 +626,8 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *
599 } 626 }
600 } 627 }
601 628
629 disable_tsc(prev_p, next_p);
630
602 return prev_p; 631 return prev_p;
603} 632}
604 633
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h
index f22fab0cea26..ab216e1370ef 100644
--- a/include/asm-i386/tlbflush.h
+++ b/include/asm-i386/tlbflush.h
@@ -22,16 +22,18 @@
22 */ 22 */
23#define __flush_tlb_global() \ 23#define __flush_tlb_global() \
24 do { \ 24 do { \
25 unsigned int tmpreg; \ 25 unsigned int tmpreg, cr4, cr4_orig; \
26 \ 26 \
27 __asm__ __volatile__( \ 27 __asm__ __volatile__( \
28 "movl %1, %%cr4; # turn off PGE \n" \ 28 "movl %%cr4, %2; # turn off PGE \n" \
29 "movl %2, %1; \n" \
30 "andl %3, %1; \n" \
31 "movl %1, %%cr4; \n" \
29 "movl %%cr3, %0; \n" \ 32 "movl %%cr3, %0; \n" \
30 "movl %0, %%cr3; # flush TLB \n" \ 33 "movl %0, %%cr3; # flush TLB \n" \
31 "movl %2, %%cr4; # turn PGE back on \n" \ 34 "movl %2, %%cr4; # turn PGE back on \n" \
32 : "=&r" (tmpreg) \ 35 : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \
33 : "r" (mmu_cr4_features & ~X86_CR4_PGE), \ 36 : "i" (~X86_CR4_PGE) \
34 "r" (mmu_cr4_features) \
35 : "memory"); \ 37 : "memory"); \
36 } while (0) 38 } while (0)
37 39
diff --git a/include/asm-x86_64/tlbflush.h b/include/asm-x86_64/tlbflush.h
index 2e811ac262af..061742382520 100644
--- a/include/asm-x86_64/tlbflush.h
+++ b/include/asm-x86_64/tlbflush.h
@@ -22,16 +22,18 @@
22 */ 22 */
23#define __flush_tlb_global() \ 23#define __flush_tlb_global() \
24 do { \ 24 do { \
25 unsigned long tmpreg; \ 25 unsigned long tmpreg, cr4, cr4_orig; \
26 \ 26 \
27 __asm__ __volatile__( \ 27 __asm__ __volatile__( \
28 "movq %1, %%cr4; # turn off PGE \n" \ 28 "movq %%cr4, %2; # turn off PGE \n" \
29 "movq %2, %1; \n" \
30 "andq %3, %1; \n" \
31 "movq %1, %%cr4; \n" \
29 "movq %%cr3, %0; # flush TLB \n" \ 32 "movq %%cr3, %0; # flush TLB \n" \
30 "movq %0, %%cr3; \n" \ 33 "movq %0, %%cr3; \n" \
31 "movq %2, %%cr4; # turn PGE back on \n" \ 34 "movq %2, %%cr4; # turn PGE back on \n" \
32 : "=&r" (tmpreg) \ 35 : "=&r" (tmpreg), "=&r" (cr4), "=&r" (cr4_orig) \
33 : "r" (mmu_cr4_features & ~X86_CR4_PGE), \ 36 : "i" (~X86_CR4_PGE) \
34 "r" (mmu_cr4_features) \
35 : "memory"); \ 37 : "memory"); \
36 } while (0) 38 } while (0)
37 39
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 3a2702bbb1d6..dc89116bb1ca 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -19,6 +19,11 @@ static inline void secure_computing(int this_syscall)
19 __secure_computing(this_syscall); 19 __secure_computing(this_syscall);
20} 20}
21 21
22static inline int has_secure_computing(struct thread_info *ti)
23{
24 return unlikely(test_ti_thread_flag(ti, TIF_SECCOMP));
25}
26
22#else /* CONFIG_SECCOMP */ 27#else /* CONFIG_SECCOMP */
23 28
24#if (__GNUC__ > 2) 29#if (__GNUC__ > 2)
@@ -28,6 +33,11 @@ static inline void secure_computing(int this_syscall)
28#endif 33#endif
29 34
30#define secure_computing(x) do { } while (0) 35#define secure_computing(x) do { } while (0)
36/* static inline to preserve typechecking */
37static inline int has_secure_computing(struct thread_info *ti)
38{
39 return 0;
40}
31 41
32#endif /* CONFIG_SECCOMP */ 42#endif /* CONFIG_SECCOMP */
33 43