diff options
| author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-02-10 14:35:36 -0500 |
|---|---|---|
| committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-02-10 14:35:36 -0500 |
| commit | 4ba24fef3eb3b142197135223b90ced2f319cd53 (patch) | |
| tree | a20c125b27740ec7b4c761b11d801108e1b316b2 /kernel/fork.c | |
| parent | 47c1ffb2b6b630894e9a16442611c056ab21c057 (diff) | |
| parent | 98a4a59ee31a12105a2b84f5b8b515ac2cb208ef (diff) | |
Merge branch 'next' into for-linus
Prepare first round of input updates for 3.20.
Diffstat (limited to 'kernel/fork.c')
| -rw-r--r-- | kernel/fork.c | 25 |
1 files changed, 17 insertions, 8 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index a91e47d86de2..4dc2ddade9f1 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -294,11 +294,18 @@ int __weak arch_dup_task_struct(struct task_struct *dst, | |||
| 294 | return 0; | 294 | return 0; |
| 295 | } | 295 | } |
| 296 | 296 | ||
| 297 | void set_task_stack_end_magic(struct task_struct *tsk) | ||
| 298 | { | ||
| 299 | unsigned long *stackend; | ||
| 300 | |||
| 301 | stackend = end_of_stack(tsk); | ||
| 302 | *stackend = STACK_END_MAGIC; /* for overflow detection */ | ||
| 303 | } | ||
| 304 | |||
| 297 | static struct task_struct *dup_task_struct(struct task_struct *orig) | 305 | static struct task_struct *dup_task_struct(struct task_struct *orig) |
| 298 | { | 306 | { |
| 299 | struct task_struct *tsk; | 307 | struct task_struct *tsk; |
| 300 | struct thread_info *ti; | 308 | struct thread_info *ti; |
| 301 | unsigned long *stackend; | ||
| 302 | int node = tsk_fork_get_node(orig); | 309 | int node = tsk_fork_get_node(orig); |
| 303 | int err; | 310 | int err; |
| 304 | 311 | ||
| @@ -328,8 +335,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
| 328 | setup_thread_stack(tsk, orig); | 335 | setup_thread_stack(tsk, orig); |
| 329 | clear_user_return_notifier(tsk); | 336 | clear_user_return_notifier(tsk); |
| 330 | clear_tsk_need_resched(tsk); | 337 | clear_tsk_need_resched(tsk); |
| 331 | stackend = end_of_stack(tsk); | 338 | set_task_stack_end_magic(tsk); |
| 332 | *stackend = STACK_END_MAGIC; /* for overflow detection */ | ||
| 333 | 339 | ||
| 334 | #ifdef CONFIG_CC_STACKPROTECTOR | 340 | #ifdef CONFIG_CC_STACKPROTECTOR |
| 335 | tsk->stack_canary = get_random_int(); | 341 | tsk->stack_canary = get_random_int(); |
| @@ -427,7 +433,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
| 427 | get_file(file); | 433 | get_file(file); |
| 428 | if (tmp->vm_flags & VM_DENYWRITE) | 434 | if (tmp->vm_flags & VM_DENYWRITE) |
| 429 | atomic_dec(&inode->i_writecount); | 435 | atomic_dec(&inode->i_writecount); |
| 430 | mutex_lock(&mapping->i_mmap_mutex); | 436 | i_mmap_lock_write(mapping); |
| 431 | if (tmp->vm_flags & VM_SHARED) | 437 | if (tmp->vm_flags & VM_SHARED) |
| 432 | atomic_inc(&mapping->i_mmap_writable); | 438 | atomic_inc(&mapping->i_mmap_writable); |
| 433 | flush_dcache_mmap_lock(mapping); | 439 | flush_dcache_mmap_lock(mapping); |
| @@ -439,7 +445,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
| 439 | vma_interval_tree_insert_after(tmp, mpnt, | 445 | vma_interval_tree_insert_after(tmp, mpnt, |
| 440 | &mapping->i_mmap); | 446 | &mapping->i_mmap); |
| 441 | flush_dcache_mmap_unlock(mapping); | 447 | flush_dcache_mmap_unlock(mapping); |
| 442 | mutex_unlock(&mapping->i_mmap_mutex); | 448 | i_mmap_unlock_write(mapping); |
| 443 | } | 449 | } |
| 444 | 450 | ||
| 445 | /* | 451 | /* |
| @@ -601,9 +607,8 @@ static void check_mm(struct mm_struct *mm) | |||
| 601 | printk(KERN_ALERT "BUG: Bad rss-counter state " | 607 | printk(KERN_ALERT "BUG: Bad rss-counter state " |
| 602 | "mm:%p idx:%d val:%ld\n", mm, i, x); | 608 | "mm:%p idx:%d val:%ld\n", mm, i, x); |
| 603 | } | 609 | } |
| 604 | |||
| 605 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS | 610 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS |
| 606 | VM_BUG_ON(mm->pmd_huge_pte); | 611 | VM_BUG_ON_MM(mm->pmd_huge_pte, mm); |
| 607 | #endif | 612 | #endif |
| 608 | } | 613 | } |
| 609 | 614 | ||
| @@ -1017,11 +1022,14 @@ void __cleanup_sighand(struct sighand_struct *sighand) | |||
| 1017 | { | 1022 | { |
| 1018 | if (atomic_dec_and_test(&sighand->count)) { | 1023 | if (atomic_dec_and_test(&sighand->count)) { |
| 1019 | signalfd_cleanup(sighand); | 1024 | signalfd_cleanup(sighand); |
| 1025 | /* | ||
| 1026 | * sighand_cachep is SLAB_DESTROY_BY_RCU so we can free it | ||
| 1027 | * without an RCU grace period, see __lock_task_sighand(). | ||
| 1028 | */ | ||
| 1020 | kmem_cache_free(sighand_cachep, sighand); | 1029 | kmem_cache_free(sighand_cachep, sighand); |
| 1021 | } | 1030 | } |
| 1022 | } | 1031 | } |
| 1023 | 1032 | ||
| 1024 | |||
| 1025 | /* | 1033 | /* |
| 1026 | * Initialize POSIX timer handling for a thread group. | 1034 | * Initialize POSIX timer handling for a thread group. |
| 1027 | */ | 1035 | */ |
| @@ -1068,6 +1076,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
| 1068 | sig->curr_target = tsk; | 1076 | sig->curr_target = tsk; |
| 1069 | init_sigpending(&sig->shared_pending); | 1077 | init_sigpending(&sig->shared_pending); |
| 1070 | INIT_LIST_HEAD(&sig->posix_timers); | 1078 | INIT_LIST_HEAD(&sig->posix_timers); |
| 1079 | seqlock_init(&sig->stats_lock); | ||
| 1071 | 1080 | ||
| 1072 | hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 1081 | hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 1073 | sig->real_timer.function = it_real_fn; | 1082 | sig->real_timer.function = it_real_fn; |
