diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 10:23:15 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 10:23:15 -0400 |
| commit | faafcba3b5e15999cf75d5c5a513ac8e47e2545f (patch) | |
| tree | 47d58d1c00e650e820506c91eb9a41268756bdda /kernel/trace | |
| parent | 13ead805c5a14b0e7ecd34f61404a5bfba655895 (diff) | |
| parent | f10e00f4bf360c36edbe6bf18a6c75b171cbe012 (diff) | |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar:
"The main changes in this cycle were:
- Optimized support for Intel "Cluster-on-Die" (CoD) topologies (Dave
Hansen)
- Various sched/idle refinements for better idle handling (Nicolas
Pitre, Daniel Lezcano, Chuansheng Liu, Vincent Guittot)
- sched/numa updates and optimizations (Rik van Riel)
- sysbench speedup (Vincent Guittot)
- capacity calculation cleanups/refactoring (Vincent Guittot)
- Various cleanups to thread group iteration (Oleg Nesterov)
- Double-rq-lock removal optimization and various refactorings
(Kirill Tkhai)
- various sched/deadline fixes
... and lots of other changes"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (72 commits)
sched/dl: Use dl_bw_of() under rcu_read_lock_sched()
sched/fair: Delete resched_cpu() from idle_balance()
sched, time: Fix build error with 64 bit cputime_t on 32 bit systems
sched: Improve sysbench performance by fixing spurious active migration
sched/x86: Fix up typo in topology detection
x86, sched: Add new topology for multi-NUMA-node CPUs
sched/rt: Use resched_curr() in task_tick_rt()
sched: Use rq->rd in sched_setaffinity() under RCU read lock
sched: cleanup: Rename 'out_unlock' to 'out_free_new_mask'
sched: Use dl_bw_of() under RCU read lock
sched/fair: Remove duplicate code from can_migrate_task()
sched, mips, ia64: Remove __ARCH_WANT_UNLOCKED_CTXSW
sched: print_rq(): Don't use tasklist_lock
sched: normalize_rt_tasks(): Don't use _irqsave for tasklist_lock, use task_rq_lock()
sched: Fix the task-group check in tg_has_rt_tasks()
sched/fair: Leverage the idle state info when choosing the "idlest" cpu
sched: Let the scheduler see CPU idle states
sched/deadline: Fix inter- exclusive cpusets migrations
sched/deadline: Clear dl_entity params when setscheduling to different class
sched/numa: Kill the wrong/dead TASK_DEAD check in task_numa_fault()
...
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/ring_buffer_benchmark.c | 3 | ||||
| -rw-r--r-- | kernel/trace/trace_stack.c | 4 |
2 files changed, 1 insertions, 6 deletions
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index 0434ff1b808e..3f9e328c30b5 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c | |||
| @@ -205,7 +205,6 @@ static void ring_buffer_consumer(void) | |||
| 205 | break; | 205 | break; |
| 206 | 206 | ||
| 207 | schedule(); | 207 | schedule(); |
| 208 | __set_current_state(TASK_RUNNING); | ||
| 209 | } | 208 | } |
| 210 | reader_finish = 0; | 209 | reader_finish = 0; |
| 211 | complete(&read_done); | 210 | complete(&read_done); |
| @@ -379,7 +378,6 @@ static int ring_buffer_consumer_thread(void *arg) | |||
| 379 | break; | 378 | break; |
| 380 | 379 | ||
| 381 | schedule(); | 380 | schedule(); |
| 382 | __set_current_state(TASK_RUNNING); | ||
| 383 | } | 381 | } |
| 384 | __set_current_state(TASK_RUNNING); | 382 | __set_current_state(TASK_RUNNING); |
| 385 | 383 | ||
| @@ -407,7 +405,6 @@ static int ring_buffer_producer_thread(void *arg) | |||
| 407 | trace_printk("Sleeping for 10 secs\n"); | 405 | trace_printk("Sleeping for 10 secs\n"); |
| 408 | set_current_state(TASK_INTERRUPTIBLE); | 406 | set_current_state(TASK_INTERRUPTIBLE); |
| 409 | schedule_timeout(HZ * SLEEP_TIME); | 407 | schedule_timeout(HZ * SLEEP_TIME); |
| 410 | __set_current_state(TASK_RUNNING); | ||
| 411 | } | 408 | } |
| 412 | 409 | ||
| 413 | if (kill_test) | 410 | if (kill_test) |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 8a4e5cb66a4c..16eddb308c33 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -13,7 +13,6 @@ | |||
| 13 | #include <linux/sysctl.h> | 13 | #include <linux/sysctl.h> |
| 14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
| 16 | #include <linux/magic.h> | ||
| 17 | 16 | ||
| 18 | #include <asm/setup.h> | 17 | #include <asm/setup.h> |
| 19 | 18 | ||
| @@ -171,8 +170,7 @@ check_stack(unsigned long ip, unsigned long *stack) | |||
| 171 | i++; | 170 | i++; |
| 172 | } | 171 | } |
| 173 | 172 | ||
| 174 | if ((current != &init_task && | 173 | if (task_stack_end_corrupted(current)) { |
| 175 | *(end_of_stack(current)) != STACK_END_MAGIC)) { | ||
| 176 | print_max_stack(); | 174 | print_max_stack(); |
| 177 | BUG(); | 175 | BUG(); |
| 178 | } | 176 | } |
