diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-15 21:31:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-15 21:31:30 -0400 |
commit | a926021cb1f8a99a275eaf6eb546102e9469dc59 (patch) | |
tree | c6d0300cd4b1a1fd658708476db4577b68b4de31 /kernel/sched.c | |
parent | 0586bed3e8563c2eb89bc7256e30ce633ae06cfb (diff) | |
parent | 5e814dd597c42daeb8d2a276e64a6ec986ad0e2a (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (184 commits)
perf probe: Clean up probe_point_lazy_walker() return value
tracing: Fix irqoff selftest expanding max buffer
tracing: Align 4 byte ints together in struct tracer
tracing: Export trace_set_clr_event()
tracing: Explain about unstable clock on resume with ring buffer warning
ftrace/graph: Trace function entry before updating index
ftrace: Add .ref.text as one of the safe areas to trace
tracing: Adjust conditional expression latency formatting.
tracing: Fix event alignment: skb:kfree_skb
tracing: Fix event alignment: mce:mce_record
tracing: Fix event alignment: kvm:kvm_hv_hypercall
tracing: Fix event alignment: module:module_request
tracing: Fix event alignment: ftrace:context_switch and ftrace:wakeup
tracing: Remove lock_depth from event entry
perf header: Stop using 'self'
perf session: Use evlist/evsel for managing perf.data attributes
perf top: Don't let events to eat up whole header line
perf top: Fix events overflow in top command
ring-buffer: Remove unused #include <linux/trace_irq.h>
tracing: Add an 'overwrite' trace_option.
...
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 37 |
1 files changed, 7 insertions, 30 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 42eab5a8437d..57a18e8d28c8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -606,9 +606,6 @@ static inline struct task_group *task_group(struct task_struct *p) | |||
606 | struct task_group *tg; | 606 | struct task_group *tg; |
607 | struct cgroup_subsys_state *css; | 607 | struct cgroup_subsys_state *css; |
608 | 608 | ||
609 | if (p->flags & PF_EXITING) | ||
610 | return &root_task_group; | ||
611 | |||
612 | css = task_subsys_state_check(p, cpu_cgroup_subsys_id, | 609 | css = task_subsys_state_check(p, cpu_cgroup_subsys_id, |
613 | lockdep_is_held(&task_rq(p)->lock)); | 610 | lockdep_is_held(&task_rq(p)->lock)); |
614 | tg = container_of(css, struct task_group, css); | 611 | tg = container_of(css, struct task_group, css); |
@@ -2265,27 +2262,6 @@ void kick_process(struct task_struct *p) | |||
2265 | EXPORT_SYMBOL_GPL(kick_process); | 2262 | EXPORT_SYMBOL_GPL(kick_process); |
2266 | #endif /* CONFIG_SMP */ | 2263 | #endif /* CONFIG_SMP */ |
2267 | 2264 | ||
2268 | /** | ||
2269 | * task_oncpu_function_call - call a function on the cpu on which a task runs | ||
2270 | * @p: the task to evaluate | ||
2271 | * @func: the function to be called | ||
2272 | * @info: the function call argument | ||
2273 | * | ||
2274 | * Calls the function @func when the task is currently running. This might | ||
2275 | * be on the current CPU, which just calls the function directly | ||
2276 | */ | ||
2277 | void task_oncpu_function_call(struct task_struct *p, | ||
2278 | void (*func) (void *info), void *info) | ||
2279 | { | ||
2280 | int cpu; | ||
2281 | |||
2282 | preempt_disable(); | ||
2283 | cpu = task_cpu(p); | ||
2284 | if (task_curr(p)) | ||
2285 | smp_call_function_single(cpu, func, info, 1); | ||
2286 | preempt_enable(); | ||
2287 | } | ||
2288 | |||
2289 | #ifdef CONFIG_SMP | 2265 | #ifdef CONFIG_SMP |
2290 | /* | 2266 | /* |
2291 | * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held. | 2267 | * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held. |
@@ -2776,9 +2752,12 @@ static inline void | |||
2776 | prepare_task_switch(struct rq *rq, struct task_struct *prev, | 2752 | prepare_task_switch(struct rq *rq, struct task_struct *prev, |
2777 | struct task_struct *next) | 2753 | struct task_struct *next) |
2778 | { | 2754 | { |
2755 | sched_info_switch(prev, next); | ||
2756 | perf_event_task_sched_out(prev, next); | ||
2779 | fire_sched_out_preempt_notifiers(prev, next); | 2757 | fire_sched_out_preempt_notifiers(prev, next); |
2780 | prepare_lock_switch(rq, next); | 2758 | prepare_lock_switch(rq, next); |
2781 | prepare_arch_switch(next); | 2759 | prepare_arch_switch(next); |
2760 | trace_sched_switch(prev, next); | ||
2782 | } | 2761 | } |
2783 | 2762 | ||
2784 | /** | 2763 | /** |
@@ -2911,7 +2890,7 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
2911 | struct mm_struct *mm, *oldmm; | 2890 | struct mm_struct *mm, *oldmm; |
2912 | 2891 | ||
2913 | prepare_task_switch(rq, prev, next); | 2892 | prepare_task_switch(rq, prev, next); |
2914 | trace_sched_switch(prev, next); | 2893 | |
2915 | mm = next->mm; | 2894 | mm = next->mm; |
2916 | oldmm = prev->active_mm; | 2895 | oldmm = prev->active_mm; |
2917 | /* | 2896 | /* |
@@ -3989,9 +3968,6 @@ need_resched_nonpreemptible: | |||
3989 | rq->skip_clock_update = 0; | 3968 | rq->skip_clock_update = 0; |
3990 | 3969 | ||
3991 | if (likely(prev != next)) { | 3970 | if (likely(prev != next)) { |
3992 | sched_info_switch(prev, next); | ||
3993 | perf_event_task_sched_out(prev, next); | ||
3994 | |||
3995 | rq->nr_switches++; | 3971 | rq->nr_switches++; |
3996 | rq->curr = next; | 3972 | rq->curr = next; |
3997 | ++*switch_count; | 3973 | ++*switch_count; |
@@ -5572,7 +5548,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5572 | * The idle tasks have their own, simple scheduling class: | 5548 | * The idle tasks have their own, simple scheduling class: |
5573 | */ | 5549 | */ |
5574 | idle->sched_class = &idle_sched_class; | 5550 | idle->sched_class = &idle_sched_class; |
5575 | ftrace_graph_init_task(idle); | 5551 | ftrace_graph_init_idle_task(idle, cpu); |
5576 | } | 5552 | } |
5577 | 5553 | ||
5578 | /* | 5554 | /* |
@@ -8885,7 +8861,8 @@ cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
8885 | } | 8861 | } |
8886 | 8862 | ||
8887 | static void | 8863 | static void |
8888 | cpu_cgroup_exit(struct cgroup_subsys *ss, struct task_struct *task) | 8864 | cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, |
8865 | struct cgroup *old_cgrp, struct task_struct *task) | ||
8889 | { | 8866 | { |
8890 | /* | 8867 | /* |
8891 | * cgroup_exit() is called in the copy_process() failure path. | 8868 | * cgroup_exit() is called in the copy_process() failure path. |