diff options
Diffstat (limited to 'kernel/fork.c')
-rw-r--r-- | kernel/fork.c | 31 |
1 files changed, 26 insertions, 5 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 5e6cf0dd031c..5c372c954f3b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -283,8 +283,9 @@ static void free_thread_stack(struct task_struct *tsk) | |||
283 | 283 | ||
284 | void thread_stack_cache_init(void) | 284 | void thread_stack_cache_init(void) |
285 | { | 285 | { |
286 | thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE, | 286 | thread_stack_cache = kmem_cache_create_usercopy("thread_stack", |
287 | THREAD_SIZE, 0, NULL); | 287 | THREAD_SIZE, THREAD_SIZE, 0, 0, |
288 | THREAD_SIZE, NULL); | ||
288 | BUG_ON(thread_stack_cache == NULL); | 289 | BUG_ON(thread_stack_cache == NULL); |
289 | } | 290 | } |
290 | # endif | 291 | # endif |
@@ -693,6 +694,21 @@ static void set_max_threads(unsigned int max_threads_suggested) | |||
693 | int arch_task_struct_size __read_mostly; | 694 | int arch_task_struct_size __read_mostly; |
694 | #endif | 695 | #endif |
695 | 696 | ||
697 | static void task_struct_whitelist(unsigned long *offset, unsigned long *size) | ||
698 | { | ||
699 | /* Fetch thread_struct whitelist for the architecture. */ | ||
700 | arch_thread_struct_whitelist(offset, size); | ||
701 | |||
702 | /* | ||
703 | * Handle zero-sized whitelist or empty thread_struct, otherwise | ||
704 | * adjust offset to position of thread_struct in task_struct. | ||
705 | */ | ||
706 | if (unlikely(*size == 0)) | ||
707 | *offset = 0; | ||
708 | else | ||
709 | *offset += offsetof(struct task_struct, thread); | ||
710 | } | ||
711 | |||
696 | void __init fork_init(void) | 712 | void __init fork_init(void) |
697 | { | 713 | { |
698 | int i; | 714 | int i; |
@@ -701,11 +717,14 @@ void __init fork_init(void) | |||
701 | #define ARCH_MIN_TASKALIGN 0 | 717 | #define ARCH_MIN_TASKALIGN 0 |
702 | #endif | 718 | #endif |
703 | int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); | 719 | int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); |
720 | unsigned long useroffset, usersize; | ||
704 | 721 | ||
705 | /* create a slab on which task_structs can be allocated */ | 722 | /* create a slab on which task_structs can be allocated */ |
706 | task_struct_cachep = kmem_cache_create("task_struct", | 723 | task_struct_whitelist(&useroffset, &usersize); |
724 | task_struct_cachep = kmem_cache_create_usercopy("task_struct", | ||
707 | arch_task_struct_size, align, | 725 | arch_task_struct_size, align, |
708 | SLAB_PANIC|SLAB_ACCOUNT, NULL); | 726 | SLAB_PANIC|SLAB_ACCOUNT, |
727 | useroffset, usersize, NULL); | ||
709 | #endif | 728 | #endif |
710 | 729 | ||
711 | /* do the arch specific task caches init */ | 730 | /* do the arch specific task caches init */ |
@@ -2248,9 +2267,11 @@ void __init proc_caches_init(void) | |||
2248 | * maximum number of CPU's we can ever have. The cpumask_allocation | 2267 | * maximum number of CPU's we can ever have. The cpumask_allocation |
2249 | * is at the end of the structure, exactly for that reason. | 2268 | * is at the end of the structure, exactly for that reason. |
2250 | */ | 2269 | */ |
2251 | mm_cachep = kmem_cache_create("mm_struct", | 2270 | mm_cachep = kmem_cache_create_usercopy("mm_struct", |
2252 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, | 2271 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, |
2253 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, | 2272 | SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, |
2273 | offsetof(struct mm_struct, saved_auxv), | ||
2274 | sizeof_field(struct mm_struct, saved_auxv), | ||
2254 | NULL); | 2275 | NULL); |
2255 | vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); | 2276 | vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); |
2256 | mmap_init(); | 2277 | mmap_init(); |