diff options
author | Kees Cook <keescook@chromium.org> | 2017-08-16 16:00:58 -0400 |
---|---|---|
committer | Kees Cook <keescook@chromium.org> | 2018-01-15 15:08:04 -0500 |
commit | 5905429ad85657c28d93ec3d826ddeea1f44c3ce (patch) | |
tree | 57fe66f71ea19aa2305ee2085bdd1809e946a136 | |
parent | f9d29946c56734e954459bc9a0e688a8ae9b4cbf (diff) |
fork: Provide usercopy whitelisting for task_struct
While the blocked and saved_sigmask fields of task_struct are copied to
userspace (via sigmask_to_save() and setup_rt_frame()), it is always
copied with a static length (i.e. sizeof(sigset_t)).
The only portion of task_struct that is potentially dynamically sized and
may be copied to userspace is in the architecture-specific thread_struct
at the end of task_struct.
cache object allocation:
kernel/fork.c:
alloc_task_struct_node(...):
return kmem_cache_alloc_node(task_struct_cachep, ...);
dup_task_struct(...):
...
tsk = alloc_task_struct_node(node);
copy_process(...):
...
dup_task_struct(...)
_do_fork(...):
...
copy_process(...)
example usage trace:
arch/x86/kernel/fpu/signal.c:
__fpu__restore_sig(...):
...
struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu;
...
__copy_from_user(&fpu->state.xsave, ..., state_size);
fpu__restore_sig(...):
...
return __fpu__restore_sig(...);
arch/x86/kernel/signal.c:
restore_sigcontext(...):
...
fpu__restore_sig(...)
This introduces arch_thread_struct_whitelist() to let an architecture
declare specifically where the whitelist should be within thread_struct.
If undefined, the entire thread_struct field is left whitelisted.
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Laura Abbott <labbott@redhat.com>
Cc: "Mickaël Salaün" <mic@digikod.net>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Kees Cook <keescook@chromium.org>
Acked-by: Rik van Riel <riel@redhat.com>
-rw-r--r-- | arch/Kconfig | 11 | ||||
-rw-r--r-- | include/linux/sched/task.h | 14 | ||||
-rw-r--r-- | kernel/fork.c | 22 |
3 files changed, 45 insertions, 2 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 400b9e1b2f27..8911ff37335a 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -242,6 +242,17 @@ config ARCH_INIT_TASK | |||
242 | config ARCH_TASK_STRUCT_ALLOCATOR | 242 | config ARCH_TASK_STRUCT_ALLOCATOR |
243 | bool | 243 | bool |
244 | 244 | ||
245 | config HAVE_ARCH_THREAD_STRUCT_WHITELIST | ||
246 | bool | ||
247 | depends on !ARCH_TASK_STRUCT_ALLOCATOR | ||
248 | help | ||
249 | An architecture should select this to provide hardened usercopy | ||
250 | knowledge about what region of the thread_struct should be | ||
251 | whitelisted for copying to userspace. Normally this is only the | ||
252 | FPU registers. Specifically, arch_thread_struct_whitelist() | ||
253 | should be implemented. Without this, the entire thread_struct | ||
254 | field in task_struct will be left whitelisted. | ||
255 | |||
245 | # Select if arch has its private alloc_thread_stack() function | 256 | # Select if arch has its private alloc_thread_stack() function |
246 | config ARCH_THREAD_STACK_ALLOCATOR | 257 | config ARCH_THREAD_STACK_ALLOCATOR |
247 | bool | 258 | bool |
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 05b8650f06f5..5be31eb7b266 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h | |||
@@ -104,6 +104,20 @@ extern int arch_task_struct_size __read_mostly; | |||
104 | # define arch_task_struct_size (sizeof(struct task_struct)) | 104 | # define arch_task_struct_size (sizeof(struct task_struct)) |
105 | #endif | 105 | #endif |
106 | 106 | ||
107 | #ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST | ||
108 | /* | ||
109 | * If an architecture has not declared a thread_struct whitelist we | ||
110 | * must assume something there may need to be copied to userspace. | ||
111 | */ | ||
112 | static inline void arch_thread_struct_whitelist(unsigned long *offset, | ||
113 | unsigned long *size) | ||
114 | { | ||
115 | *offset = 0; | ||
116 | /* Handle dynamically sized thread_struct. */ | ||
117 | *size = arch_task_struct_size - offsetof(struct task_struct, thread); | ||
118 | } | ||
119 | #endif | ||
120 | |||
107 | #ifdef CONFIG_VMAP_STACK | 121 | #ifdef CONFIG_VMAP_STACK |
108 | static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) | 122 | static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) |
109 | { | 123 | { |
diff --git a/kernel/fork.c b/kernel/fork.c index 0e086af148f2..5977e691c754 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -458,6 +458,21 @@ static void set_max_threads(unsigned int max_threads_suggested) | |||
458 | int arch_task_struct_size __read_mostly; | 458 | int arch_task_struct_size __read_mostly; |
459 | #endif | 459 | #endif |
460 | 460 | ||
461 | static void task_struct_whitelist(unsigned long *offset, unsigned long *size) | ||
462 | { | ||
463 | /* Fetch thread_struct whitelist for the architecture. */ | ||
464 | arch_thread_struct_whitelist(offset, size); | ||
465 | |||
466 | /* | ||
467 | * Handle zero-sized whitelist or empty thread_struct, otherwise | ||
468 | * adjust offset to position of thread_struct in task_struct. | ||
469 | */ | ||
470 | if (unlikely(*size == 0)) | ||
471 | *offset = 0; | ||
472 | else | ||
473 | *offset += offsetof(struct task_struct, thread); | ||
474 | } | ||
475 | |||
461 | void __init fork_init(void) | 476 | void __init fork_init(void) |
462 | { | 477 | { |
463 | int i; | 478 | int i; |
@@ -466,11 +481,14 @@ void __init fork_init(void) | |||
466 | #define ARCH_MIN_TASKALIGN 0 | 481 | #define ARCH_MIN_TASKALIGN 0 |
467 | #endif | 482 | #endif |
468 | int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); | 483 | int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); |
484 | unsigned long useroffset, usersize; | ||
469 | 485 | ||
470 | /* create a slab on which task_structs can be allocated */ | 486 | /* create a slab on which task_structs can be allocated */ |
471 | task_struct_cachep = kmem_cache_create("task_struct", | 487 | task_struct_whitelist(&useroffset, &usersize); |
488 | task_struct_cachep = kmem_cache_create_usercopy("task_struct", | ||
472 | arch_task_struct_size, align, | 489 | arch_task_struct_size, align, |
473 | SLAB_PANIC|SLAB_ACCOUNT, NULL); | 490 | SLAB_PANIC|SLAB_ACCOUNT, |
491 | useroffset, usersize, NULL); | ||
474 | #endif | 492 | #endif |
475 | 493 | ||
476 | /* do the arch specific task caches init */ | 494 | /* do the arch specific task caches init */ |