aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2009-11-01 05:11:07 -0500
committerTakashi Iwai <tiwai@suse.de>2009-11-01 05:11:07 -0500
commite87a3dd33eab30b4db539500064a9584867e4f2c (patch)
tree2f7ad16e46ae30518ff63bb5391b63f7f7cc74dd /kernel/sched.c
parentb14f5de731ae657d498d18d713c6431bfbeefb4b (diff)
parent3d00941371a765779c4e3509214c7e5793cce1fe (diff)
Merge branch 'fix/misc' into topic/misc
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c106
1 files changed, 63 insertions, 43 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index faf4d463bbff..1535f3884b88 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -39,7 +39,7 @@
39#include <linux/completion.h> 39#include <linux/completion.h>
40#include <linux/kernel_stat.h> 40#include <linux/kernel_stat.h>
41#include <linux/debug_locks.h> 41#include <linux/debug_locks.h>
42#include <linux/perf_counter.h> 42#include <linux/perf_event.h>
43#include <linux/security.h> 43#include <linux/security.h>
44#include <linux/notifier.h> 44#include <linux/notifier.h>
45#include <linux/profile.h> 45#include <linux/profile.h>
@@ -681,15 +681,9 @@ inline void update_rq_clock(struct rq *rq)
681 * This interface allows printk to be called with the runqueue lock 681 * This interface allows printk to be called with the runqueue lock
682 * held and know whether or not it is OK to wake up the klogd. 682 * held and know whether or not it is OK to wake up the klogd.
683 */ 683 */
684int runqueue_is_locked(void) 684int runqueue_is_locked(int cpu)
685{ 685{
686 int cpu = get_cpu(); 686 return spin_is_locked(&cpu_rq(cpu)->lock);
687 struct rq *rq = cpu_rq(cpu);
688 int ret;
689
690 ret = spin_is_locked(&rq->lock);
691 put_cpu();
692 return ret;
693} 687}
694 688
695/* 689/*
@@ -786,7 +780,7 @@ static int sched_feat_open(struct inode *inode, struct file *filp)
786 return single_open(filp, sched_feat_show, NULL); 780 return single_open(filp, sched_feat_show, NULL);
787} 781}
788 782
789static struct file_operations sched_feat_fops = { 783static const struct file_operations sched_feat_fops = {
790 .open = sched_feat_open, 784 .open = sched_feat_open,
791 .write = sched_feat_write, 785 .write = sched_feat_write,
792 .read = seq_read, 786 .read = seq_read,
@@ -2059,7 +2053,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2059 if (task_hot(p, old_rq->clock, NULL)) 2053 if (task_hot(p, old_rq->clock, NULL))
2060 schedstat_inc(p, se.nr_forced2_migrations); 2054 schedstat_inc(p, se.nr_forced2_migrations);
2061#endif 2055#endif
2062 perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS, 2056 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
2063 1, 1, NULL, 0); 2057 1, 1, NULL, 0);
2064 } 2058 }
2065 p->se.vruntime -= old_cfsrq->min_vruntime - 2059 p->se.vruntime -= old_cfsrq->min_vruntime -
@@ -2724,7 +2718,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2724 */ 2718 */
2725 prev_state = prev->state; 2719 prev_state = prev->state;
2726 finish_arch_switch(prev); 2720 finish_arch_switch(prev);
2727 perf_counter_task_sched_in(current, cpu_of(rq)); 2721 perf_event_task_sched_in(current, cpu_of(rq));
2728 finish_lock_switch(rq, prev); 2722 finish_lock_switch(rq, prev);
2729 2723
2730 fire_sched_in_preempt_notifiers(current); 2724 fire_sched_in_preempt_notifiers(current);
@@ -2910,6 +2904,19 @@ unsigned long nr_iowait(void)
2910 return sum; 2904 return sum;
2911} 2905}
2912 2906
2907unsigned long nr_iowait_cpu(void)
2908{
2909 struct rq *this = this_rq();
2910 return atomic_read(&this->nr_iowait);
2911}
2912
2913unsigned long this_cpu_load(void)
2914{
2915 struct rq *this = this_rq();
2916 return this->cpu_load[0];
2917}
2918
2919
2913/* Variables and functions for calc_load */ 2920/* Variables and functions for calc_load */
2914static atomic_long_t calc_load_tasks; 2921static atomic_long_t calc_load_tasks;
2915static unsigned long calc_load_update; 2922static unsigned long calc_load_update;
@@ -5085,17 +5092,16 @@ void account_idle_time(cputime_t cputime)
5085 */ 5092 */
5086void account_process_tick(struct task_struct *p, int user_tick) 5093void account_process_tick(struct task_struct *p, int user_tick)
5087{ 5094{
5088 cputime_t one_jiffy = jiffies_to_cputime(1); 5095 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
5089 cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
5090 struct rq *rq = this_rq(); 5096 struct rq *rq = this_rq();
5091 5097
5092 if (user_tick) 5098 if (user_tick)
5093 account_user_time(p, one_jiffy, one_jiffy_scaled); 5099 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
5094 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) 5100 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
5095 account_system_time(p, HARDIRQ_OFFSET, one_jiffy, 5101 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
5096 one_jiffy_scaled); 5102 one_jiffy_scaled);
5097 else 5103 else
5098 account_idle_time(one_jiffy); 5104 account_idle_time(cputime_one_jiffy);
5099} 5105}
5100 5106
5101/* 5107/*
@@ -5199,7 +5205,7 @@ void scheduler_tick(void)
5199 curr->sched_class->task_tick(rq, curr, 0); 5205 curr->sched_class->task_tick(rq, curr, 0);
5200 spin_unlock(&rq->lock); 5206 spin_unlock(&rq->lock);
5201 5207
5202 perf_counter_task_tick(curr, cpu); 5208 perf_event_task_tick(curr, cpu);
5203 5209
5204#ifdef CONFIG_SMP 5210#ifdef CONFIG_SMP
5205 rq->idle_at_tick = idle_cpu(cpu); 5211 rq->idle_at_tick = idle_cpu(cpu);
@@ -5415,7 +5421,7 @@ need_resched_nonpreemptible:
5415 5421
5416 if (likely(prev != next)) { 5422 if (likely(prev != next)) {
5417 sched_info_switch(prev, next); 5423 sched_info_switch(prev, next);
5418 perf_counter_task_sched_out(prev, next, cpu); 5424 perf_event_task_sched_out(prev, next, cpu);
5419 5425
5420 rq->nr_switches++; 5426 rq->nr_switches++;
5421 rq->curr = next; 5427 rq->curr = next;
@@ -6825,23 +6831,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
6825 if (retval) 6831 if (retval)
6826 goto out_unlock; 6832 goto out_unlock;
6827 6833
6828 /* 6834 time_slice = p->sched_class->get_rr_interval(p);
6829 * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
6830 * tasks that are on an otherwise idle runqueue:
6831 */
6832 time_slice = 0;
6833 if (p->policy == SCHED_RR) {
6834 time_slice = DEF_TIMESLICE;
6835 } else if (p->policy != SCHED_FIFO) {
6836 struct sched_entity *se = &p->se;
6837 unsigned long flags;
6838 struct rq *rq;
6839 6835
6840 rq = task_rq_lock(p, &flags);
6841 if (rq->cfs.load.weight)
6842 time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
6843 task_rq_unlock(rq, &flags);
6844 }
6845 read_unlock(&tasklist_lock); 6836 read_unlock(&tasklist_lock);
6846 jiffies_to_timespec(time_slice, &t); 6837 jiffies_to_timespec(time_slice, &t);
6847 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 6838 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
@@ -7692,7 +7683,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7692/* 7683/*
7693 * Register at high priority so that task migration (migrate_all_tasks) 7684 * Register at high priority so that task migration (migrate_all_tasks)
7694 * happens before everything else. This has to be lower priority than 7685 * happens before everything else. This has to be lower priority than
7695 * the notifier in the perf_counter subsystem, though. 7686 * the notifier in the perf_event subsystem, though.
7696 */ 7687 */
7697static struct notifier_block __cpuinitdata migration_notifier = { 7688static struct notifier_block __cpuinitdata migration_notifier = {
7698 .notifier_call = migration_call, 7689 .notifier_call = migration_call,
@@ -9171,6 +9162,7 @@ void __init sched_init_smp(void)
9171 cpumask_var_t non_isolated_cpus; 9162 cpumask_var_t non_isolated_cpus;
9172 9163
9173 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); 9164 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
9165 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
9174 9166
9175#if defined(CONFIG_NUMA) 9167#if defined(CONFIG_NUMA)
9176 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), 9168 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
@@ -9202,7 +9194,6 @@ void __init sched_init_smp(void)
9202 sched_init_granularity(); 9194 sched_init_granularity();
9203 free_cpumask_var(non_isolated_cpus); 9195 free_cpumask_var(non_isolated_cpus);
9204 9196
9205 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
9206 init_sched_rt_class(); 9197 init_sched_rt_class();
9207} 9198}
9208#else 9199#else
@@ -9549,7 +9540,7 @@ void __init sched_init(void)
9549 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 9540 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9550#endif /* SMP */ 9541#endif /* SMP */
9551 9542
9552 perf_counter_init(); 9543 perf_event_init();
9553 9544
9554 scheduler_running = 1; 9545 scheduler_running = 1;
9555} 9546}
@@ -10321,7 +10312,7 @@ static int sched_rt_global_constraints(void)
10321#endif /* CONFIG_RT_GROUP_SCHED */ 10312#endif /* CONFIG_RT_GROUP_SCHED */
10322 10313
10323int sched_rt_handler(struct ctl_table *table, int write, 10314int sched_rt_handler(struct ctl_table *table, int write,
10324 struct file *filp, void __user *buffer, size_t *lenp, 10315 void __user *buffer, size_t *lenp,
10325 loff_t *ppos) 10316 loff_t *ppos)
10326{ 10317{
10327 int ret; 10318 int ret;
@@ -10332,7 +10323,7 @@ int sched_rt_handler(struct ctl_table *table, int write,
10332 old_period = sysctl_sched_rt_period; 10323 old_period = sysctl_sched_rt_period;
10333 old_runtime = sysctl_sched_rt_runtime; 10324 old_runtime = sysctl_sched_rt_runtime;
10334 10325
10335 ret = proc_dointvec(table, write, filp, buffer, lenp, ppos); 10326 ret = proc_dointvec(table, write, buffer, lenp, ppos);
10336 10327
10337 if (!ret && write) { 10328 if (!ret && write) {
10338 ret = sched_rt_global_constraints(); 10329 ret = sched_rt_global_constraints();
@@ -10386,8 +10377,7 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
10386} 10377}
10387 10378
10388static int 10379static int
10389cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 10380cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
10390 struct task_struct *tsk)
10391{ 10381{
10392#ifdef CONFIG_RT_GROUP_SCHED 10382#ifdef CONFIG_RT_GROUP_SCHED
10393 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) 10383 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
@@ -10397,15 +10387,45 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
10397 if (tsk->sched_class != &fair_sched_class) 10387 if (tsk->sched_class != &fair_sched_class)
10398 return -EINVAL; 10388 return -EINVAL;
10399#endif 10389#endif
10390 return 0;
10391}
10400 10392
10393static int
10394cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
10395 struct task_struct *tsk, bool threadgroup)
10396{
10397 int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
10398 if (retval)
10399 return retval;
10400 if (threadgroup) {
10401 struct task_struct *c;
10402 rcu_read_lock();
10403 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
10404 retval = cpu_cgroup_can_attach_task(cgrp, c);
10405 if (retval) {
10406 rcu_read_unlock();
10407 return retval;
10408 }
10409 }
10410 rcu_read_unlock();
10411 }
10401 return 0; 10412 return 0;
10402} 10413}
10403 10414
10404static void 10415static void
10405cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 10416cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
10406 struct cgroup *old_cont, struct task_struct *tsk) 10417 struct cgroup *old_cont, struct task_struct *tsk,
10418 bool threadgroup)
10407{ 10419{
10408 sched_move_task(tsk); 10420 sched_move_task(tsk);
10421 if (threadgroup) {
10422 struct task_struct *c;
10423 rcu_read_lock();
10424 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
10425 sched_move_task(c);
10426 }
10427 rcu_read_unlock();
10428 }
10409} 10429}
10410 10430
10411#ifdef CONFIG_FAIR_GROUP_SCHED 10431#ifdef CONFIG_FAIR_GROUP_SCHED