diff options
author | Andy Lutomirski <luto@amacapital.net> | 2014-07-21 21:49:16 -0400 |
---|---|---|
committer | Kees Cook <keescook@chromium.org> | 2014-09-03 17:58:17 -0400 |
commit | d39bd00deabe57420f2a3669eb71b0e0c4997184 (patch) | |
tree | bc32386d90df23c8d657b6b6a0051a0a59af11fe /kernel/seccomp.c | |
parent | 13aa72f0fd0a9f98a41cefb662487269e2f1ad65 (diff) |
seccomp: Allow arch code to provide seccomp_data
populate_seccomp_data is expensive: it works by inspecting
task_pt_regs and various other bits to piece together all the
information, and it's does so in multiple partially redundant steps.
Arch-specific code in the syscall entry path can do much better.
Admittedly this adds a bit of additional room for error, but the
speedup should be worth it.
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: Kees Cook <keescook@chromium.org>
Diffstat (limited to 'kernel/seccomp.c')
-rw-r--r-- | kernel/seccomp.c | 32 |
1 files changed, 19 insertions, 13 deletions
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 6c8528ce9df9..1285cb205d49 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
@@ -173,10 +173,10 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) | |||
173 | * | 173 | * |
174 | * Returns valid seccomp BPF response codes. | 174 | * Returns valid seccomp BPF response codes. |
175 | */ | 175 | */ |
176 | static u32 seccomp_run_filters(void) | 176 | static u32 seccomp_run_filters(struct seccomp_data *sd) |
177 | { | 177 | { |
178 | struct seccomp_filter *f = ACCESS_ONCE(current->seccomp.filter); | 178 | struct seccomp_filter *f = ACCESS_ONCE(current->seccomp.filter); |
179 | struct seccomp_data sd; | 179 | struct seccomp_data sd_local; |
180 | u32 ret = SECCOMP_RET_ALLOW; | 180 | u32 ret = SECCOMP_RET_ALLOW; |
181 | 181 | ||
182 | /* Ensure unexpected behavior doesn't result in failing open. */ | 182 | /* Ensure unexpected behavior doesn't result in failing open. */ |
@@ -186,14 +186,17 @@ static u32 seccomp_run_filters(void) | |||
186 | /* Make sure cross-thread synced filter points somewhere sane. */ | 186 | /* Make sure cross-thread synced filter points somewhere sane. */ |
187 | smp_read_barrier_depends(); | 187 | smp_read_barrier_depends(); |
188 | 188 | ||
189 | populate_seccomp_data(&sd); | 189 | if (!sd) { |
190 | populate_seccomp_data(&sd_local); | ||
191 | sd = &sd_local; | ||
192 | } | ||
190 | 193 | ||
191 | /* | 194 | /* |
192 | * All filters in the list are evaluated and the lowest BPF return | 195 | * All filters in the list are evaluated and the lowest BPF return |
193 | * value always takes priority (ignoring the DATA). | 196 | * value always takes priority (ignoring the DATA). |
194 | */ | 197 | */ |
195 | for (; f; f = f->prev) { | 198 | for (; f; f = f->prev) { |
196 | u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)&sd); | 199 | u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)sd); |
197 | 200 | ||
198 | if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) | 201 | if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) |
199 | ret = cur_ret; | 202 | ret = cur_ret; |
@@ -599,7 +602,7 @@ void secure_computing_strict(int this_syscall) | |||
599 | #else | 602 | #else |
600 | int __secure_computing(void) | 603 | int __secure_computing(void) |
601 | { | 604 | { |
602 | u32 phase1_result = seccomp_phase1(); | 605 | u32 phase1_result = seccomp_phase1(NULL); |
603 | 606 | ||
604 | if (likely(phase1_result == SECCOMP_PHASE1_OK)) | 607 | if (likely(phase1_result == SECCOMP_PHASE1_OK)) |
605 | return 0; | 608 | return 0; |
@@ -610,7 +613,7 @@ int __secure_computing(void) | |||
610 | } | 613 | } |
611 | 614 | ||
612 | #ifdef CONFIG_SECCOMP_FILTER | 615 | #ifdef CONFIG_SECCOMP_FILTER |
613 | static u32 __seccomp_phase1_filter(int this_syscall, struct pt_regs *regs) | 616 | static u32 __seccomp_phase1_filter(int this_syscall, struct seccomp_data *sd) |
614 | { | 617 | { |
615 | u32 filter_ret, action; | 618 | u32 filter_ret, action; |
616 | int data; | 619 | int data; |
@@ -621,20 +624,20 @@ static u32 __seccomp_phase1_filter(int this_syscall, struct pt_regs *regs) | |||
621 | */ | 624 | */ |
622 | rmb(); | 625 | rmb(); |
623 | 626 | ||
624 | filter_ret = seccomp_run_filters(); | 627 | filter_ret = seccomp_run_filters(sd); |
625 | data = filter_ret & SECCOMP_RET_DATA; | 628 | data = filter_ret & SECCOMP_RET_DATA; |
626 | action = filter_ret & SECCOMP_RET_ACTION; | 629 | action = filter_ret & SECCOMP_RET_ACTION; |
627 | 630 | ||
628 | switch (action) { | 631 | switch (action) { |
629 | case SECCOMP_RET_ERRNO: | 632 | case SECCOMP_RET_ERRNO: |
630 | /* Set the low-order 16-bits as a errno. */ | 633 | /* Set the low-order 16-bits as a errno. */ |
631 | syscall_set_return_value(current, regs, | 634 | syscall_set_return_value(current, task_pt_regs(current), |
632 | -data, 0); | 635 | -data, 0); |
633 | goto skip; | 636 | goto skip; |
634 | 637 | ||
635 | case SECCOMP_RET_TRAP: | 638 | case SECCOMP_RET_TRAP: |
636 | /* Show the handler the original registers. */ | 639 | /* Show the handler the original registers. */ |
637 | syscall_rollback(current, regs); | 640 | syscall_rollback(current, task_pt_regs(current)); |
638 | /* Let the filter pass back 16 bits of data. */ | 641 | /* Let the filter pass back 16 bits of data. */ |
639 | seccomp_send_sigsys(this_syscall, data); | 642 | seccomp_send_sigsys(this_syscall, data); |
640 | goto skip; | 643 | goto skip; |
@@ -661,11 +664,14 @@ skip: | |||
661 | 664 | ||
662 | /** | 665 | /** |
663 | * seccomp_phase1() - run fast path seccomp checks on the current syscall | 666 | * seccomp_phase1() - run fast path seccomp checks on the current syscall |
667 | * @arg sd: The seccomp_data or NULL | ||
664 | * | 668 | * |
665 | * This only reads pt_regs via the syscall_xyz helpers. The only change | 669 | * This only reads pt_regs via the syscall_xyz helpers. The only change |
666 | * it will make to pt_regs is via syscall_set_return_value, and it will | 670 | * it will make to pt_regs is via syscall_set_return_value, and it will |
667 | * only do that if it returns SECCOMP_PHASE1_SKIP. | 671 | * only do that if it returns SECCOMP_PHASE1_SKIP. |
668 | * | 672 | * |
673 | * If sd is provided, it will not read pt_regs at all. | ||
674 | * | ||
669 | * It may also call do_exit or force a signal; these actions must be | 675 | * It may also call do_exit or force a signal; these actions must be |
670 | * safe. | 676 | * safe. |
671 | * | 677 | * |
@@ -679,11 +685,11 @@ skip: | |||
679 | * If it returns anything else, then the return value should be passed | 685 | * If it returns anything else, then the return value should be passed |
680 | * to seccomp_phase2 from a context in which ptrace hooks are safe. | 686 | * to seccomp_phase2 from a context in which ptrace hooks are safe. |
681 | */ | 687 | */ |
682 | u32 seccomp_phase1(void) | 688 | u32 seccomp_phase1(struct seccomp_data *sd) |
683 | { | 689 | { |
684 | int mode = current->seccomp.mode; | 690 | int mode = current->seccomp.mode; |
685 | struct pt_regs *regs = task_pt_regs(current); | 691 | int this_syscall = sd ? sd->nr : |
686 | int this_syscall = syscall_get_nr(current, regs); | 692 | syscall_get_nr(current, task_pt_regs(current)); |
687 | 693 | ||
688 | switch (mode) { | 694 | switch (mode) { |
689 | case SECCOMP_MODE_STRICT: | 695 | case SECCOMP_MODE_STRICT: |
@@ -691,7 +697,7 @@ u32 seccomp_phase1(void) | |||
691 | return SECCOMP_PHASE1_OK; | 697 | return SECCOMP_PHASE1_OK; |
692 | #ifdef CONFIG_SECCOMP_FILTER | 698 | #ifdef CONFIG_SECCOMP_FILTER |
693 | case SECCOMP_MODE_FILTER: | 699 | case SECCOMP_MODE_FILTER: |
694 | return __seccomp_phase1_filter(this_syscall, regs); | 700 | return __seccomp_phase1_filter(this_syscall, sd); |
695 | #endif | 701 | #endif |
696 | default: | 702 | default: |
697 | BUG(); | 703 | BUG(); |