diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 75 |
1 files changed, 58 insertions, 17 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 830967e18285..ee61f454a98b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #include <linux/completion.h> | 39 | #include <linux/completion.h> |
40 | #include <linux/kernel_stat.h> | 40 | #include <linux/kernel_stat.h> |
41 | #include <linux/debug_locks.h> | 41 | #include <linux/debug_locks.h> |
42 | #include <linux/perf_counter.h> | 42 | #include <linux/perf_event.h> |
43 | #include <linux/security.h> | 43 | #include <linux/security.h> |
44 | #include <linux/notifier.h> | 44 | #include <linux/notifier.h> |
45 | #include <linux/profile.h> | 45 | #include <linux/profile.h> |
@@ -2053,7 +2053,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
2053 | if (task_hot(p, old_rq->clock, NULL)) | 2053 | if (task_hot(p, old_rq->clock, NULL)) |
2054 | schedstat_inc(p, se.nr_forced2_migrations); | 2054 | schedstat_inc(p, se.nr_forced2_migrations); |
2055 | #endif | 2055 | #endif |
2056 | perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS, | 2056 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, |
2057 | 1, 1, NULL, 0); | 2057 | 1, 1, NULL, 0); |
2058 | } | 2058 | } |
2059 | p->se.vruntime -= old_cfsrq->min_vruntime - | 2059 | p->se.vruntime -= old_cfsrq->min_vruntime - |
@@ -2718,7 +2718,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
2718 | */ | 2718 | */ |
2719 | prev_state = prev->state; | 2719 | prev_state = prev->state; |
2720 | finish_arch_switch(prev); | 2720 | finish_arch_switch(prev); |
2721 | perf_counter_task_sched_in(current, cpu_of(rq)); | 2721 | perf_event_task_sched_in(current, cpu_of(rq)); |
2722 | finish_lock_switch(rq, prev); | 2722 | finish_lock_switch(rq, prev); |
2723 | 2723 | ||
2724 | fire_sched_in_preempt_notifiers(current); | 2724 | fire_sched_in_preempt_notifiers(current); |
@@ -2904,6 +2904,19 @@ unsigned long nr_iowait(void) | |||
2904 | return sum; | 2904 | return sum; |
2905 | } | 2905 | } |
2906 | 2906 | ||
2907 | unsigned long nr_iowait_cpu(void) | ||
2908 | { | ||
2909 | struct rq *this = this_rq(); | ||
2910 | return atomic_read(&this->nr_iowait); | ||
2911 | } | ||
2912 | |||
2913 | unsigned long this_cpu_load(void) | ||
2914 | { | ||
2915 | struct rq *this = this_rq(); | ||
2916 | return this->cpu_load[0]; | ||
2917 | } | ||
2918 | |||
2919 | |||
2907 | /* Variables and functions for calc_load */ | 2920 | /* Variables and functions for calc_load */ |
2908 | static atomic_long_t calc_load_tasks; | 2921 | static atomic_long_t calc_load_tasks; |
2909 | static unsigned long calc_load_update; | 2922 | static unsigned long calc_load_update; |
@@ -5079,17 +5092,16 @@ void account_idle_time(cputime_t cputime) | |||
5079 | */ | 5092 | */ |
5080 | void account_process_tick(struct task_struct *p, int user_tick) | 5093 | void account_process_tick(struct task_struct *p, int user_tick) |
5081 | { | 5094 | { |
5082 | cputime_t one_jiffy = jiffies_to_cputime(1); | 5095 | cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); |
5083 | cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy); | ||
5084 | struct rq *rq = this_rq(); | 5096 | struct rq *rq = this_rq(); |
5085 | 5097 | ||
5086 | if (user_tick) | 5098 | if (user_tick) |
5087 | account_user_time(p, one_jiffy, one_jiffy_scaled); | 5099 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); |
5088 | else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) | 5100 | else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) |
5089 | account_system_time(p, HARDIRQ_OFFSET, one_jiffy, | 5101 | account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy, |
5090 | one_jiffy_scaled); | 5102 | one_jiffy_scaled); |
5091 | else | 5103 | else |
5092 | account_idle_time(one_jiffy); | 5104 | account_idle_time(cputime_one_jiffy); |
5093 | } | 5105 | } |
5094 | 5106 | ||
5095 | /* | 5107 | /* |
@@ -5193,7 +5205,7 @@ void scheduler_tick(void) | |||
5193 | curr->sched_class->task_tick(rq, curr, 0); | 5205 | curr->sched_class->task_tick(rq, curr, 0); |
5194 | spin_unlock(&rq->lock); | 5206 | spin_unlock(&rq->lock); |
5195 | 5207 | ||
5196 | perf_counter_task_tick(curr, cpu); | 5208 | perf_event_task_tick(curr, cpu); |
5197 | 5209 | ||
5198 | #ifdef CONFIG_SMP | 5210 | #ifdef CONFIG_SMP |
5199 | rq->idle_at_tick = idle_cpu(cpu); | 5211 | rq->idle_at_tick = idle_cpu(cpu); |
@@ -5409,7 +5421,7 @@ need_resched_nonpreemptible: | |||
5409 | 5421 | ||
5410 | if (likely(prev != next)) { | 5422 | if (likely(prev != next)) { |
5411 | sched_info_switch(prev, next); | 5423 | sched_info_switch(prev, next); |
5412 | perf_counter_task_sched_out(prev, next, cpu); | 5424 | perf_event_task_sched_out(prev, next, cpu); |
5413 | 5425 | ||
5414 | rq->nr_switches++; | 5426 | rq->nr_switches++; |
5415 | rq->curr = next; | 5427 | rq->curr = next; |
@@ -7671,7 +7683,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
7671 | /* | 7683 | /* |
7672 | * Register at high priority so that task migration (migrate_all_tasks) | 7684 | * Register at high priority so that task migration (migrate_all_tasks) |
7673 | * happens before everything else. This has to be lower priority than | 7685 | * happens before everything else. This has to be lower priority than |
7674 | * the notifier in the perf_counter subsystem, though. | 7686 | * the notifier in the perf_event subsystem, though. |
7675 | */ | 7687 | */ |
7676 | static struct notifier_block __cpuinitdata migration_notifier = { | 7688 | static struct notifier_block __cpuinitdata migration_notifier = { |
7677 | .notifier_call = migration_call, | 7689 | .notifier_call = migration_call, |
@@ -9528,7 +9540,7 @@ void __init sched_init(void) | |||
9528 | alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); | 9540 | alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); |
9529 | #endif /* SMP */ | 9541 | #endif /* SMP */ |
9530 | 9542 | ||
9531 | perf_counter_init(); | 9543 | perf_event_init(); |
9532 | 9544 | ||
9533 | scheduler_running = 1; | 9545 | scheduler_running = 1; |
9534 | } | 9546 | } |
@@ -10300,7 +10312,7 @@ static int sched_rt_global_constraints(void) | |||
10300 | #endif /* CONFIG_RT_GROUP_SCHED */ | 10312 | #endif /* CONFIG_RT_GROUP_SCHED */ |
10301 | 10313 | ||
10302 | int sched_rt_handler(struct ctl_table *table, int write, | 10314 | int sched_rt_handler(struct ctl_table *table, int write, |
10303 | struct file *filp, void __user *buffer, size_t *lenp, | 10315 | void __user *buffer, size_t *lenp, |
10304 | loff_t *ppos) | 10316 | loff_t *ppos) |
10305 | { | 10317 | { |
10306 | int ret; | 10318 | int ret; |
@@ -10311,7 +10323,7 @@ int sched_rt_handler(struct ctl_table *table, int write, | |||
10311 | old_period = sysctl_sched_rt_period; | 10323 | old_period = sysctl_sched_rt_period; |
10312 | old_runtime = sysctl_sched_rt_runtime; | 10324 | old_runtime = sysctl_sched_rt_runtime; |
10313 | 10325 | ||
10314 | ret = proc_dointvec(table, write, filp, buffer, lenp, ppos); | 10326 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
10315 | 10327 | ||
10316 | if (!ret && write) { | 10328 | if (!ret && write) { |
10317 | ret = sched_rt_global_constraints(); | 10329 | ret = sched_rt_global_constraints(); |
@@ -10365,8 +10377,7 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
10365 | } | 10377 | } |
10366 | 10378 | ||
10367 | static int | 10379 | static int |
10368 | cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 10380 | cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) |
10369 | struct task_struct *tsk) | ||
10370 | { | 10381 | { |
10371 | #ifdef CONFIG_RT_GROUP_SCHED | 10382 | #ifdef CONFIG_RT_GROUP_SCHED |
10372 | if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) | 10383 | if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) |
@@ -10376,15 +10387,45 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
10376 | if (tsk->sched_class != &fair_sched_class) | 10387 | if (tsk->sched_class != &fair_sched_class) |
10377 | return -EINVAL; | 10388 | return -EINVAL; |
10378 | #endif | 10389 | #endif |
10390 | return 0; | ||
10391 | } | ||
10379 | 10392 | ||
10393 | static int | ||
10394 | cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | ||
10395 | struct task_struct *tsk, bool threadgroup) | ||
10396 | { | ||
10397 | int retval = cpu_cgroup_can_attach_task(cgrp, tsk); | ||
10398 | if (retval) | ||
10399 | return retval; | ||
10400 | if (threadgroup) { | ||
10401 | struct task_struct *c; | ||
10402 | rcu_read_lock(); | ||
10403 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | ||
10404 | retval = cpu_cgroup_can_attach_task(cgrp, c); | ||
10405 | if (retval) { | ||
10406 | rcu_read_unlock(); | ||
10407 | return retval; | ||
10408 | } | ||
10409 | } | ||
10410 | rcu_read_unlock(); | ||
10411 | } | ||
10380 | return 0; | 10412 | return 0; |
10381 | } | 10413 | } |
10382 | 10414 | ||
10383 | static void | 10415 | static void |
10384 | cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 10416 | cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
10385 | struct cgroup *old_cont, struct task_struct *tsk) | 10417 | struct cgroup *old_cont, struct task_struct *tsk, |
10418 | bool threadgroup) | ||
10386 | { | 10419 | { |
10387 | sched_move_task(tsk); | 10420 | sched_move_task(tsk); |
10421 | if (threadgroup) { | ||
10422 | struct task_struct *c; | ||
10423 | rcu_read_lock(); | ||
10424 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | ||
10425 | sched_move_task(c); | ||
10426 | } | ||
10427 | rcu_read_unlock(); | ||
10428 | } | ||
10388 | } | 10429 | } |
10389 | 10430 | ||
10390 | #ifdef CONFIG_FAIR_GROUP_SCHED | 10431 | #ifdef CONFIG_FAIR_GROUP_SCHED |