aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c67
1 files changed, 64 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 6cc1fd5d5072..b66a08c2480e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -584,6 +584,7 @@ struct rq {
584 struct load_weight load; 584 struct load_weight load;
585 unsigned long nr_load_updates; 585 unsigned long nr_load_updates;
586 u64 nr_switches; 586 u64 nr_switches;
587 u64 nr_migrations_in;
587 588
588 struct cfs_rq cfs; 589 struct cfs_rq cfs;
589 struct rt_rq rt; 590 struct rt_rq rt;
@@ -692,7 +693,7 @@ static inline int cpu_of(struct rq *rq)
692#define task_rq(p) cpu_rq(task_cpu(p)) 693#define task_rq(p) cpu_rq(task_cpu(p))
693#define cpu_curr(cpu) (cpu_rq(cpu)->curr) 694#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
694 695
695static inline void update_rq_clock(struct rq *rq) 696inline void update_rq_clock(struct rq *rq)
696{ 697{
697 rq->clock = sched_clock_cpu(cpu_of(rq)); 698 rq->clock = sched_clock_cpu(cpu_of(rq));
698} 699}
@@ -1955,12 +1956,15 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1955 p->se.sleep_start -= clock_offset; 1956 p->se.sleep_start -= clock_offset;
1956 if (p->se.block_start) 1957 if (p->se.block_start)
1957 p->se.block_start -= clock_offset; 1958 p->se.block_start -= clock_offset;
1959#endif
1958 if (old_cpu != new_cpu) { 1960 if (old_cpu != new_cpu) {
1959 schedstat_inc(p, se.nr_migrations); 1961 p->se.nr_migrations++;
1962 new_rq->nr_migrations_in++;
1963#ifdef CONFIG_SCHEDSTATS
1960 if (task_hot(p, old_rq->clock, NULL)) 1964 if (task_hot(p, old_rq->clock, NULL))
1961 schedstat_inc(p, se.nr_forced2_migrations); 1965 schedstat_inc(p, se.nr_forced2_migrations);
1962 }
1963#endif 1966#endif
1967 }
1964 p->se.vruntime -= old_cfsrq->min_vruntime - 1968 p->se.vruntime -= old_cfsrq->min_vruntime -
1965 new_cfsrq->min_vruntime; 1969 new_cfsrq->min_vruntime;
1966 1970
@@ -2312,6 +2316,27 @@ static int sched_balance_self(int cpu, int flag)
2312 2316
2313#endif /* CONFIG_SMP */ 2317#endif /* CONFIG_SMP */
2314 2318
2319/**
2320 * task_oncpu_function_call - call a function on the cpu on which a task runs
2321 * @p: the task to evaluate
2322 * @func: the function to be called
2323 * @info: the function call argument
2324 *
2325 * Calls the function @func when the task is currently running. This might
2326 * be on the current CPU, which just calls the function directly
2327 */
2328void task_oncpu_function_call(struct task_struct *p,
2329 void (*func) (void *info), void *info)
2330{
2331 int cpu;
2332
2333 preempt_disable();
2334 cpu = task_cpu(p);
2335 if (task_curr(p))
2336 smp_call_function_single(cpu, func, info, 1);
2337 preempt_enable();
2338}
2339
2315/*** 2340/***
2316 * try_to_wake_up - wake up a thread 2341 * try_to_wake_up - wake up a thread
2317 * @p: the to-be-woken-up thread 2342 * @p: the to-be-woken-up thread
@@ -2468,6 +2493,7 @@ static void __sched_fork(struct task_struct *p)
2468 p->se.exec_start = 0; 2493 p->se.exec_start = 0;
2469 p->se.sum_exec_runtime = 0; 2494 p->se.sum_exec_runtime = 0;
2470 p->se.prev_sum_exec_runtime = 0; 2495 p->se.prev_sum_exec_runtime = 0;
2496 p->se.nr_migrations = 0;
2471 p->se.last_wakeup = 0; 2497 p->se.last_wakeup = 0;
2472 p->se.avg_overlap = 0; 2498 p->se.avg_overlap = 0;
2473 p->se.start_runtime = 0; 2499 p->se.start_runtime = 0;
@@ -2698,6 +2724,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2698 */ 2724 */
2699 prev_state = prev->state; 2725 prev_state = prev->state;
2700 finish_arch_switch(prev); 2726 finish_arch_switch(prev);
2727 perf_counter_task_sched_in(current, cpu_of(rq));
2701 finish_lock_switch(rq, prev); 2728 finish_lock_switch(rq, prev);
2702#ifdef CONFIG_SMP 2729#ifdef CONFIG_SMP
2703 if (post_schedule) 2730 if (post_schedule)
@@ -2860,6 +2887,15 @@ unsigned long nr_active(void)
2860} 2887}
2861 2888
2862/* 2889/*
2890 * Externally visible per-cpu scheduler statistics:
2891 * cpu_nr_migrations(cpu) - number of migrations into that cpu
2892 */
2893u64 cpu_nr_migrations(int cpu)
2894{
2895 return cpu_rq(cpu)->nr_migrations_in;
2896}
2897
2898/*
2863 * Update rq->cpu_load[] statistics. This function is usually called every 2899 * Update rq->cpu_load[] statistics. This function is usually called every
2864 * scheduler tick (TICK_NSEC). 2900 * scheduler tick (TICK_NSEC).
2865 */ 2901 */
@@ -4514,6 +4550,29 @@ EXPORT_PER_CPU_SYMBOL(kstat);
4514 * Return any ns on the sched_clock that have not yet been banked in 4550 * Return any ns on the sched_clock that have not yet been banked in
4515 * @p in case that task is currently running. 4551 * @p in case that task is currently running.
4516 */ 4552 */
4553unsigned long long __task_delta_exec(struct task_struct *p, int update)
4554{
4555 s64 delta_exec;
4556 struct rq *rq;
4557
4558 rq = task_rq(p);
4559 WARN_ON_ONCE(!runqueue_is_locked());
4560 WARN_ON_ONCE(!task_current(rq, p));
4561
4562 if (update)
4563 update_rq_clock(rq);
4564
4565 delta_exec = rq->clock - p->se.exec_start;
4566
4567 WARN_ON_ONCE(delta_exec < 0);
4568
4569 return delta_exec;
4570}
4571
4572/*
4573 * Return any ns on the sched_clock that have not yet been banked in
4574 * @p in case that task is currently running.
4575 */
4517unsigned long long task_delta_exec(struct task_struct *p) 4576unsigned long long task_delta_exec(struct task_struct *p)
4518{ 4577{
4519 unsigned long flags; 4578 unsigned long flags;
@@ -4773,6 +4832,7 @@ void scheduler_tick(void)
4773 update_rq_clock(rq); 4832 update_rq_clock(rq);
4774 update_cpu_load(rq); 4833 update_cpu_load(rq);
4775 curr->sched_class->task_tick(rq, curr, 0); 4834 curr->sched_class->task_tick(rq, curr, 0);
4835 perf_counter_task_tick(curr, cpu);
4776 spin_unlock(&rq->lock); 4836 spin_unlock(&rq->lock);
4777 4837
4778#ifdef CONFIG_SMP 4838#ifdef CONFIG_SMP
@@ -4988,6 +5048,7 @@ need_resched_nonpreemptible:
4988 5048
4989 if (likely(prev != next)) { 5049 if (likely(prev != next)) {
4990 sched_info_switch(prev, next); 5050 sched_info_switch(prev, next);
5051 perf_counter_task_sched_out(prev, cpu);
4991 5052
4992 rq->nr_switches++; 5053 rq->nr_switches++;
4993 rq->curr = next; 5054 rq->curr = next;