aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c49
1 files changed, 45 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 26efa475bdc1..419a39d0988f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -39,6 +39,7 @@
39#include <linux/completion.h> 39#include <linux/completion.h>
40#include <linux/kernel_stat.h> 40#include <linux/kernel_stat.h>
41#include <linux/debug_locks.h> 41#include <linux/debug_locks.h>
42#include <linux/perf_counter.h>
42#include <linux/security.h> 43#include <linux/security.h>
43#include <linux/notifier.h> 44#include <linux/notifier.h>
44#include <linux/profile.h> 45#include <linux/profile.h>
@@ -584,6 +585,7 @@ struct rq {
584 struct load_weight load; 585 struct load_weight load;
585 unsigned long nr_load_updates; 586 unsigned long nr_load_updates;
586 u64 nr_switches; 587 u64 nr_switches;
588 u64 nr_migrations_in;
587 589
588 struct cfs_rq cfs; 590 struct cfs_rq cfs;
589 struct rt_rq rt; 591 struct rt_rq rt;
@@ -692,7 +694,7 @@ static inline int cpu_of(struct rq *rq)
692#define task_rq(p) cpu_rq(task_cpu(p)) 694#define task_rq(p) cpu_rq(task_cpu(p))
693#define cpu_curr(cpu) (cpu_rq(cpu)->curr) 695#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
694 696
695static inline void update_rq_clock(struct rq *rq) 697inline void update_rq_clock(struct rq *rq)
696{ 698{
697 rq->clock = sched_clock_cpu(cpu_of(rq)); 699 rq->clock = sched_clock_cpu(cpu_of(rq));
698} 700}
@@ -1967,12 +1969,15 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1967 p->se.sleep_start -= clock_offset; 1969 p->se.sleep_start -= clock_offset;
1968 if (p->se.block_start) 1970 if (p->se.block_start)
1969 p->se.block_start -= clock_offset; 1971 p->se.block_start -= clock_offset;
1972#endif
1970 if (old_cpu != new_cpu) { 1973 if (old_cpu != new_cpu) {
1971 schedstat_inc(p, se.nr_migrations); 1974 p->se.nr_migrations++;
1975 new_rq->nr_migrations_in++;
1976#ifdef CONFIG_SCHEDSTATS
1972 if (task_hot(p, old_rq->clock, NULL)) 1977 if (task_hot(p, old_rq->clock, NULL))
1973 schedstat_inc(p, se.nr_forced2_migrations); 1978 schedstat_inc(p, se.nr_forced2_migrations);
1974 }
1975#endif 1979#endif
1980 }
1976 p->se.vruntime -= old_cfsrq->min_vruntime - 1981 p->se.vruntime -= old_cfsrq->min_vruntime -
1977 new_cfsrq->min_vruntime; 1982 new_cfsrq->min_vruntime;
1978 1983
@@ -2324,6 +2329,27 @@ static int sched_balance_self(int cpu, int flag)
2324 2329
2325#endif /* CONFIG_SMP */ 2330#endif /* CONFIG_SMP */
2326 2331
2332/**
2333 * task_oncpu_function_call - call a function on the cpu on which a task runs
2334 * @p: the task to evaluate
2335 * @func: the function to be called
2336 * @info: the function call argument
2337 *
2338 * Calls the function @func when the task is currently running. This might
2339 * be on the current CPU, which just calls the function directly
2340 */
2341void task_oncpu_function_call(struct task_struct *p,
2342 void (*func) (void *info), void *info)
2343{
2344 int cpu;
2345
2346 preempt_disable();
2347 cpu = task_cpu(p);
2348 if (task_curr(p))
2349 smp_call_function_single(cpu, func, info, 1);
2350 preempt_enable();
2351}
2352
2327/*** 2353/***
2328 * try_to_wake_up - wake up a thread 2354 * try_to_wake_up - wake up a thread
2329 * @p: the to-be-woken-up thread 2355 * @p: the to-be-woken-up thread
@@ -2480,6 +2506,7 @@ static void __sched_fork(struct task_struct *p)
2480 p->se.exec_start = 0; 2506 p->se.exec_start = 0;
2481 p->se.sum_exec_runtime = 0; 2507 p->se.sum_exec_runtime = 0;
2482 p->se.prev_sum_exec_runtime = 0; 2508 p->se.prev_sum_exec_runtime = 0;
2509 p->se.nr_migrations = 0;
2483 p->se.last_wakeup = 0; 2510 p->se.last_wakeup = 0;
2484 p->se.avg_overlap = 0; 2511 p->se.avg_overlap = 0;
2485 p->se.start_runtime = 0; 2512 p->se.start_runtime = 0;
@@ -2710,6 +2737,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2710 */ 2737 */
2711 prev_state = prev->state; 2738 prev_state = prev->state;
2712 finish_arch_switch(prev); 2739 finish_arch_switch(prev);
2740 perf_counter_task_sched_in(current, cpu_of(rq));
2713 finish_lock_switch(rq, prev); 2741 finish_lock_switch(rq, prev);
2714#ifdef CONFIG_SMP 2742#ifdef CONFIG_SMP
2715 if (post_schedule) 2743 if (post_schedule)
@@ -2872,6 +2900,15 @@ unsigned long nr_active(void)
2872} 2900}
2873 2901
2874/* 2902/*
2903 * Externally visible per-cpu scheduler statistics:
2904 * cpu_nr_migrations(cpu) - number of migrations into that cpu
2905 */
2906u64 cpu_nr_migrations(int cpu)
2907{
2908 return cpu_rq(cpu)->nr_migrations_in;
2909}
2910
2911/*
2875 * Update rq->cpu_load[] statistics. This function is usually called every 2912 * Update rq->cpu_load[] statistics. This function is usually called every
2876 * scheduler tick (TICK_NSEC). 2913 * scheduler tick (TICK_NSEC).
2877 */ 2914 */
@@ -4838,6 +4875,7 @@ void scheduler_tick(void)
4838 update_rq_clock(rq); 4875 update_rq_clock(rq);
4839 update_cpu_load(rq); 4876 update_cpu_load(rq);
4840 curr->sched_class->task_tick(rq, curr, 0); 4877 curr->sched_class->task_tick(rq, curr, 0);
4878 perf_counter_task_tick(curr, cpu);
4841 spin_unlock(&rq->lock); 4879 spin_unlock(&rq->lock);
4842 4880
4843#ifdef CONFIG_SMP 4881#ifdef CONFIG_SMP
@@ -5053,6 +5091,7 @@ need_resched_nonpreemptible:
5053 5091
5054 if (likely(prev != next)) { 5092 if (likely(prev != next)) {
5055 sched_info_switch(prev, next); 5093 sched_info_switch(prev, next);
5094 perf_counter_task_sched_out(prev, cpu);
5056 5095
5057 rq->nr_switches++; 5096 rq->nr_switches++;
5058 rq->curr = next; 5097 rq->curr = next;
@@ -8958,7 +8997,7 @@ void __init sched_init(void)
8958 * 1024) and two child groups A0 and A1 (of weight 1024 each), 8997 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8959 * then A0's share of the cpu resource is: 8998 * then A0's share of the cpu resource is:
8960 * 8999 *
8961 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 9000 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8962 * 9001 *
8963 * We achieve this by letting init_task_group's tasks sit 9002 * We achieve this by letting init_task_group's tasks sit
8964 * directly in rq->cfs (i.e init_task_group->se[] = NULL). 9003 * directly in rq->cfs (i.e init_task_group->se[] = NULL).
@@ -9059,6 +9098,8 @@ void __init sched_init(void)
9059 alloc_bootmem_cpumask_var(&cpu_isolated_map); 9098 alloc_bootmem_cpumask_var(&cpu_isolated_map);
9060#endif /* SMP */ 9099#endif /* SMP */
9061 9100
9101 perf_counter_init();
9102
9062 scheduler_running = 1; 9103 scheduler_running = 1;
9063} 9104}
9064 9105