diff options
author | Andrea Arcangeli <andrea@cpushare.com> | 2007-07-16 02:41:33 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-16 12:05:50 -0400 |
commit | cf99abace7e07dd8491e7093a9a9ef11d48838ed (patch) | |
tree | 3b7cfd7c76c2c43e6ae3fdaaff3a50a752072424 /arch/i386 | |
parent | 1d9d02feeee89e9132034d504c9a45eeaf618a3d (diff) |
make seccomp zerocost in schedule
This follows a suggestion from Chuck Ebbert on how to make seccomp
absolutely zerocost in schedule too. The only remaining footprint of
seccomp is in terms of the bzImage size that becomes a few bytes (perhaps
even a few kbytes) larger, measure it if you care in the embedded.
Signed-off-by: Andrea Arcangeli <andrea@cpushare.com>
Cc: Andi Kleen <ak@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/i386')
-rw-r--r-- | arch/i386/kernel/process.c | 73 |
1 files changed, 39 insertions, 34 deletions
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 06dfa65ad180..6c49acb96982 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
@@ -538,8 +538,31 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) | |||
538 | return 1; | 538 | return 1; |
539 | } | 539 | } |
540 | 540 | ||
541 | static noinline void __switch_to_xtra(struct task_struct *next_p, | 541 | #ifdef CONFIG_SECCOMP |
542 | struct tss_struct *tss) | 542 | void hard_disable_TSC(void) |
543 | { | ||
544 | write_cr4(read_cr4() | X86_CR4_TSD); | ||
545 | } | ||
546 | void disable_TSC(void) | ||
547 | { | ||
548 | preempt_disable(); | ||
549 | if (!test_and_set_thread_flag(TIF_NOTSC)) | ||
550 | /* | ||
551 | * Must flip the CPU state synchronously with | ||
552 | * TIF_NOTSC in the current running context. | ||
553 | */ | ||
554 | hard_disable_TSC(); | ||
555 | preempt_enable(); | ||
556 | } | ||
557 | void hard_enable_TSC(void) | ||
558 | { | ||
559 | write_cr4(read_cr4() & ~X86_CR4_TSD); | ||
560 | } | ||
561 | #endif /* CONFIG_SECCOMP */ | ||
562 | |||
563 | static noinline void | ||
564 | __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | ||
565 | struct tss_struct *tss) | ||
543 | { | 566 | { |
544 | struct thread_struct *next; | 567 | struct thread_struct *next; |
545 | 568 | ||
@@ -555,6 +578,17 @@ static noinline void __switch_to_xtra(struct task_struct *next_p, | |||
555 | set_debugreg(next->debugreg[7], 7); | 578 | set_debugreg(next->debugreg[7], 7); |
556 | } | 579 | } |
557 | 580 | ||
581 | #ifdef CONFIG_SECCOMP | ||
582 | if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ | ||
583 | test_tsk_thread_flag(next_p, TIF_NOTSC)) { | ||
584 | /* prev and next are different */ | ||
585 | if (test_tsk_thread_flag(next_p, TIF_NOTSC)) | ||
586 | hard_disable_TSC(); | ||
587 | else | ||
588 | hard_enable_TSC(); | ||
589 | } | ||
590 | #endif | ||
591 | |||
558 | if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { | 592 | if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { |
559 | /* | 593 | /* |
560 | * Disable the bitmap via an invalid offset. We still cache | 594 | * Disable the bitmap via an invalid offset. We still cache |
@@ -586,33 +620,6 @@ static noinline void __switch_to_xtra(struct task_struct *next_p, | |||
586 | } | 620 | } |
587 | 621 | ||
588 | /* | 622 | /* |
589 | * This function selects if the context switch from prev to next | ||
590 | * has to tweak the TSC disable bit in the cr4. | ||
591 | */ | ||
592 | static inline void disable_tsc(struct task_struct *prev_p, | ||
593 | struct task_struct *next_p) | ||
594 | { | ||
595 | struct thread_info *prev, *next; | ||
596 | |||
597 | /* | ||
598 | * gcc should eliminate the ->thread_info dereference if | ||
599 | * has_secure_computing returns 0 at compile time (SECCOMP=n). | ||
600 | */ | ||
601 | prev = task_thread_info(prev_p); | ||
602 | next = task_thread_info(next_p); | ||
603 | |||
604 | if (has_secure_computing(prev) || has_secure_computing(next)) { | ||
605 | /* slow path here */ | ||
606 | if (has_secure_computing(prev) && | ||
607 | !has_secure_computing(next)) { | ||
608 | write_cr4(read_cr4() & ~X86_CR4_TSD); | ||
609 | } else if (!has_secure_computing(prev) && | ||
610 | has_secure_computing(next)) | ||
611 | write_cr4(read_cr4() | X86_CR4_TSD); | ||
612 | } | ||
613 | } | ||
614 | |||
615 | /* | ||
616 | * switch_to(x,yn) should switch tasks from x to y. | 623 | * switch_to(x,yn) should switch tasks from x to y. |
617 | * | 624 | * |
618 | * We fsave/fwait so that an exception goes off at the right time | 625 | * We fsave/fwait so that an exception goes off at the right time |
@@ -689,11 +696,9 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas | |||
689 | /* | 696 | /* |
690 | * Now maybe handle debug registers and/or IO bitmaps | 697 | * Now maybe handle debug registers and/or IO bitmaps |
691 | */ | 698 | */ |
692 | if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW) | 699 | if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || |
693 | || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))) | 700 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) |
694 | __switch_to_xtra(next_p, tss); | 701 | __switch_to_xtra(prev_p, next_p, tss); |
695 | |||
696 | disable_tsc(prev_p, next_p); | ||
697 | 702 | ||
698 | /* | 703 | /* |
699 | * Leave lazy mode, flushing any hypercalls made here. | 704 | * Leave lazy mode, flushing any hypercalls made here. |