diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 10:23:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 10:23:15 -0400 |
commit | faafcba3b5e15999cf75d5c5a513ac8e47e2545f (patch) | |
tree | 47d58d1c00e650e820506c91eb9a41268756bdda /kernel/fork.c | |
parent | 13ead805c5a14b0e7ecd34f61404a5bfba655895 (diff) | |
parent | f10e00f4bf360c36edbe6bf18a6c75b171cbe012 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
"The main changes in this cycle were:
- Optimized support for Intel "Cluster-on-Die" (CoD) topologies (Dave
Hansen)
- Various sched/idle refinements for better idle handling (Nicolas
Pitre, Daniel Lezcano, Chuansheng Liu, Vincent Guittot)
- sched/numa updates and optimizations (Rik van Riel)
- sysbench speedup (Vincent Guittot)
- capacity calculation cleanups/refactoring (Vincent Guittot)
- Various cleanups to thread group iteration (Oleg Nesterov)
- Double-rq-lock removal optimization and various refactorings
(Kirill Tkhai)
- various sched/deadline fixes
... and lots of other changes"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (72 commits)
sched/dl: Use dl_bw_of() under rcu_read_lock_sched()
sched/fair: Delete resched_cpu() from idle_balance()
sched, time: Fix build error with 64 bit cputime_t on 32 bit systems
sched: Improve sysbench performance by fixing spurious active migration
sched/x86: Fix up typo in topology detection
x86, sched: Add new topology for multi-NUMA-node CPUs
sched/rt: Use resched_curr() in task_tick_rt()
sched: Use rq->rd in sched_setaffinity() under RCU read lock
sched: cleanup: Rename 'out_unlock' to 'out_free_new_mask'
sched: Use dl_bw_of() under RCU read lock
sched/fair: Remove duplicate code from can_migrate_task()
sched, mips, ia64: Remove __ARCH_WANT_UNLOCKED_CTXSW
sched: print_rq(): Don't use tasklist_lock
sched: normalize_rt_tasks(): Don't use _irqsave for tasklist_lock, use task_rq_lock()
sched: Fix the task-group check in tg_has_rt_tasks()
sched/fair: Leverage the idle state info when choosing the "idlest" cpu
sched: Let the scheduler see CPU idle states
sched/deadline: Fix inter- exclusive cpusets migrations
sched/deadline: Clear dl_entity params when setscheduling to different class
sched/numa: Kill the wrong/dead TASK_DEAD check in task_numa_fault()
...
Diffstat (limited to 'kernel/fork.c')
-rw-r--r-- | kernel/fork.c | 13 |
1 files changed, 10 insertions, 3 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 8c162d102740..9b7d746d6d62 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -294,11 +294,18 @@ int __weak arch_dup_task_struct(struct task_struct *dst, | |||
294 | return 0; | 294 | return 0; |
295 | } | 295 | } |
296 | 296 | ||
297 | void set_task_stack_end_magic(struct task_struct *tsk) | ||
298 | { | ||
299 | unsigned long *stackend; | ||
300 | |||
301 | stackend = end_of_stack(tsk); | ||
302 | *stackend = STACK_END_MAGIC; /* for overflow detection */ | ||
303 | } | ||
304 | |||
297 | static struct task_struct *dup_task_struct(struct task_struct *orig) | 305 | static struct task_struct *dup_task_struct(struct task_struct *orig) |
298 | { | 306 | { |
299 | struct task_struct *tsk; | 307 | struct task_struct *tsk; |
300 | struct thread_info *ti; | 308 | struct thread_info *ti; |
301 | unsigned long *stackend; | ||
302 | int node = tsk_fork_get_node(orig); | 309 | int node = tsk_fork_get_node(orig); |
303 | int err; | 310 | int err; |
304 | 311 | ||
@@ -328,8 +335,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
328 | setup_thread_stack(tsk, orig); | 335 | setup_thread_stack(tsk, orig); |
329 | clear_user_return_notifier(tsk); | 336 | clear_user_return_notifier(tsk); |
330 | clear_tsk_need_resched(tsk); | 337 | clear_tsk_need_resched(tsk); |
331 | stackend = end_of_stack(tsk); | 338 | set_task_stack_end_magic(tsk); |
332 | *stackend = STACK_END_MAGIC; /* for overflow detection */ | ||
333 | 339 | ||
334 | #ifdef CONFIG_CC_STACKPROTECTOR | 340 | #ifdef CONFIG_CC_STACKPROTECTOR |
335 | tsk->stack_canary = get_random_int(); | 341 | tsk->stack_canary = get_random_int(); |
@@ -1067,6 +1073,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
1067 | sig->curr_target = tsk; | 1073 | sig->curr_target = tsk; |
1068 | init_sigpending(&sig->shared_pending); | 1074 | init_sigpending(&sig->shared_pending); |
1069 | INIT_LIST_HEAD(&sig->posix_timers); | 1075 | INIT_LIST_HEAD(&sig->posix_timers); |
1076 | seqlock_init(&sig->stats_lock); | ||
1070 | 1077 | ||
1071 | hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 1078 | hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1072 | sig->real_timer.function = it_real_fn; | 1079 | sig->real_timer.function = it_real_fn; |