diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/fork.c | 50 |
1 files changed, 25 insertions, 25 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 5c2c355aa97f..37b9439b8c07 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -148,18 +148,18 @@ static inline void free_task_struct(struct task_struct *tsk) | |||
| 148 | } | 148 | } |
| 149 | #endif | 149 | #endif |
| 150 | 150 | ||
| 151 | void __weak arch_release_thread_info(struct thread_info *ti) | 151 | void __weak arch_release_thread_stack(unsigned long *stack) |
| 152 | { | 152 | { |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR | 155 | #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR |
| 156 | 156 | ||
| 157 | /* | 157 | /* |
| 158 | * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a | 158 | * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a |
| 159 | * kmemcache based allocator. | 159 | * kmemcache based allocator. |
| 160 | */ | 160 | */ |
| 161 | # if THREAD_SIZE >= PAGE_SIZE | 161 | # if THREAD_SIZE >= PAGE_SIZE |
| 162 | static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, | 162 | static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, |
| 163 | int node) | 163 | int node) |
| 164 | { | 164 | { |
| 165 | struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP, | 165 | struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP, |
| @@ -172,33 +172,33 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, | |||
| 172 | return page ? page_address(page) : NULL; | 172 | return page ? page_address(page) : NULL; |
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | static inline void free_thread_info(struct thread_info *ti) | 175 | static inline void free_thread_stack(unsigned long *stack) |
| 176 | { | 176 | { |
| 177 | struct page *page = virt_to_page(ti); | 177 | struct page *page = virt_to_page(stack); |
| 178 | 178 | ||
| 179 | memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK, | 179 | memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK, |
| 180 | -(1 << THREAD_SIZE_ORDER)); | 180 | -(1 << THREAD_SIZE_ORDER)); |
| 181 | __free_kmem_pages(page, THREAD_SIZE_ORDER); | 181 | __free_kmem_pages(page, THREAD_SIZE_ORDER); |
| 182 | } | 182 | } |
| 183 | # else | 183 | # else |
| 184 | static struct kmem_cache *thread_info_cache; | 184 | static struct kmem_cache *thread_stack_cache; |
| 185 | 185 | ||
| 186 | static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, | 186 | static struct thread_info *alloc_thread_stack_node(struct task_struct *tsk, |
| 187 | int node) | 187 | int node) |
| 188 | { | 188 | { |
| 189 | return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node); | 189 | return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); |
| 190 | } | 190 | } |
| 191 | 191 | ||
| 192 | static void free_thread_info(struct thread_info *ti) | 192 | static void free_stack(unsigned long *stack) |
| 193 | { | 193 | { |
| 194 | kmem_cache_free(thread_info_cache, ti); | 194 | kmem_cache_free(thread_stack_cache, stack); |
| 195 | } | 195 | } |
| 196 | 196 | ||
| 197 | void thread_info_cache_init(void) | 197 | void thread_stack_cache_init(void) |
| 198 | { | 198 | { |
| 199 | thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, | 199 | thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE, |
| 200 | THREAD_SIZE, 0, NULL); | 200 | THREAD_SIZE, 0, NULL); |
| 201 | BUG_ON(thread_info_cache == NULL); | 201 | BUG_ON(thread_stack_cache == NULL); |
| 202 | } | 202 | } |
| 203 | # endif | 203 | # endif |
| 204 | #endif | 204 | #endif |
| @@ -221,9 +221,9 @@ struct kmem_cache *vm_area_cachep; | |||
| 221 | /* SLAB cache for mm_struct structures (tsk->mm) */ | 221 | /* SLAB cache for mm_struct structures (tsk->mm) */ |
| 222 | static struct kmem_cache *mm_cachep; | 222 | static struct kmem_cache *mm_cachep; |
| 223 | 223 | ||
| 224 | static void account_kernel_stack(struct thread_info *ti, int account) | 224 | static void account_kernel_stack(unsigned long *stack, int account) |
| 225 | { | 225 | { |
| 226 | struct zone *zone = page_zone(virt_to_page(ti)); | 226 | struct zone *zone = page_zone(virt_to_page(stack)); |
| 227 | 227 | ||
| 228 | mod_zone_page_state(zone, NR_KERNEL_STACK, account); | 228 | mod_zone_page_state(zone, NR_KERNEL_STACK, account); |
| 229 | } | 229 | } |
| @@ -231,8 +231,8 @@ static void account_kernel_stack(struct thread_info *ti, int account) | |||
| 231 | void free_task(struct task_struct *tsk) | 231 | void free_task(struct task_struct *tsk) |
| 232 | { | 232 | { |
| 233 | account_kernel_stack(tsk->stack, -1); | 233 | account_kernel_stack(tsk->stack, -1); |
| 234 | arch_release_thread_info(tsk->stack); | 234 | arch_release_thread_stack(tsk->stack); |
| 235 | free_thread_info(tsk->stack); | 235 | free_thread_stack(tsk->stack); |
| 236 | rt_mutex_debug_task_free(tsk); | 236 | rt_mutex_debug_task_free(tsk); |
| 237 | ftrace_graph_exit_task(tsk); | 237 | ftrace_graph_exit_task(tsk); |
| 238 | put_seccomp_filter(tsk); | 238 | put_seccomp_filter(tsk); |
| @@ -343,7 +343,7 @@ void set_task_stack_end_magic(struct task_struct *tsk) | |||
| 343 | static struct task_struct *dup_task_struct(struct task_struct *orig, int node) | 343 | static struct task_struct *dup_task_struct(struct task_struct *orig, int node) |
| 344 | { | 344 | { |
| 345 | struct task_struct *tsk; | 345 | struct task_struct *tsk; |
| 346 | struct thread_info *ti; | 346 | unsigned long *stack; |
| 347 | int err; | 347 | int err; |
| 348 | 348 | ||
| 349 | if (node == NUMA_NO_NODE) | 349 | if (node == NUMA_NO_NODE) |
| @@ -352,15 +352,15 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) | |||
| 352 | if (!tsk) | 352 | if (!tsk) |
| 353 | return NULL; | 353 | return NULL; |
| 354 | 354 | ||
| 355 | ti = alloc_thread_info_node(tsk, node); | 355 | stack = alloc_thread_stack_node(tsk, node); |
| 356 | if (!ti) | 356 | if (!stack) |
| 357 | goto free_tsk; | 357 | goto free_tsk; |
| 358 | 358 | ||
| 359 | err = arch_dup_task_struct(tsk, orig); | 359 | err = arch_dup_task_struct(tsk, orig); |
| 360 | if (err) | 360 | if (err) |
| 361 | goto free_ti; | 361 | goto free_stack; |
| 362 | 362 | ||
| 363 | tsk->stack = ti; | 363 | tsk->stack = stack; |
| 364 | #ifdef CONFIG_SECCOMP | 364 | #ifdef CONFIG_SECCOMP |
| 365 | /* | 365 | /* |
| 366 | * We must handle setting up seccomp filters once we're under | 366 | * We must handle setting up seccomp filters once we're under |
| @@ -392,14 +392,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) | |||
| 392 | tsk->task_frag.page = NULL; | 392 | tsk->task_frag.page = NULL; |
| 393 | tsk->wake_q.next = NULL; | 393 | tsk->wake_q.next = NULL; |
| 394 | 394 | ||
| 395 | account_kernel_stack(ti, 1); | 395 | account_kernel_stack(stack, 1); |
| 396 | 396 | ||
| 397 | kcov_task_init(tsk); | 397 | kcov_task_init(tsk); |
| 398 | 398 | ||
| 399 | return tsk; | 399 | return tsk; |
| 400 | 400 | ||
| 401 | free_ti: | 401 | free_stack: |
| 402 | free_thread_info(ti); | 402 | free_thread_stack(stack); |
| 403 | free_tsk: | 403 | free_tsk: |
| 404 | free_task_struct(tsk); | 404 | free_task_struct(tsk); |
| 405 | return NULL; | 405 | return NULL; |
