aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c29
-rw-r--r--kernel/sched_fair.c65
-rw-r--r--kernel/sched_idletask.c7
-rw-r--r--kernel/sched_rt.c13
-rw-r--r--kernel/trace/trace.c8
5 files changed, 73 insertions, 49 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index faf4d463bbff..830967e18285 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -681,15 +681,9 @@ inline void update_rq_clock(struct rq *rq)
681 * This interface allows printk to be called with the runqueue lock 681 * This interface allows printk to be called with the runqueue lock
682 * held and know whether or not it is OK to wake up the klogd. 682 * held and know whether or not it is OK to wake up the klogd.
683 */ 683 */
684int runqueue_is_locked(void) 684int runqueue_is_locked(int cpu)
685{ 685{
686 int cpu = get_cpu(); 686 return spin_is_locked(&cpu_rq(cpu)->lock);
687 struct rq *rq = cpu_rq(cpu);
688 int ret;
689
690 ret = spin_is_locked(&rq->lock);
691 put_cpu();
692 return ret;
693} 687}
694 688
695/* 689/*
@@ -6825,23 +6819,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
6825 if (retval) 6819 if (retval)
6826 goto out_unlock; 6820 goto out_unlock;
6827 6821
6828 /* 6822 time_slice = p->sched_class->get_rr_interval(p);
6829 * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
6830 * tasks that are on an otherwise idle runqueue:
6831 */
6832 time_slice = 0;
6833 if (p->policy == SCHED_RR) {
6834 time_slice = DEF_TIMESLICE;
6835 } else if (p->policy != SCHED_FIFO) {
6836 struct sched_entity *se = &p->se;
6837 unsigned long flags;
6838 struct rq *rq;
6839 6823
6840 rq = task_rq_lock(p, &flags);
6841 if (rq->cfs.load.weight)
6842 time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
6843 task_rq_unlock(rq, &flags);
6844 }
6845 read_unlock(&tasklist_lock); 6824 read_unlock(&tasklist_lock);
6846 jiffies_to_timespec(time_slice, &t); 6825 jiffies_to_timespec(time_slice, &t);
6847 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 6826 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
@@ -9171,6 +9150,7 @@ void __init sched_init_smp(void)
9171 cpumask_var_t non_isolated_cpus; 9150 cpumask_var_t non_isolated_cpus;
9172 9151
9173 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); 9152 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
9153 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
9174 9154
9175#if defined(CONFIG_NUMA) 9155#if defined(CONFIG_NUMA)
9176 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), 9156 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
@@ -9202,7 +9182,6 @@ void __init sched_init_smp(void)
9202 sched_init_granularity(); 9182 sched_init_granularity();
9203 free_cpumask_var(non_isolated_cpus); 9183 free_cpumask_var(non_isolated_cpus);
9204 9184
9205 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
9206 init_sched_rt_class(); 9185 init_sched_rt_class();
9207} 9186}
9208#else 9187#else
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 990b188803ce..ecc637a0d591 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -710,31 +710,28 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
710 if (initial && sched_feat(START_DEBIT)) 710 if (initial && sched_feat(START_DEBIT))
711 vruntime += sched_vslice(cfs_rq, se); 711 vruntime += sched_vslice(cfs_rq, se);
712 712
713 if (!initial) { 713 /* sleeps up to a single latency don't count. */
714 /* sleeps upto a single latency don't count. */ 714 if (!initial && sched_feat(FAIR_SLEEPERS)) {
715 if (sched_feat(FAIR_SLEEPERS)) { 715 unsigned long thresh = sysctl_sched_latency;
716 unsigned long thresh = sysctl_sched_latency;
717 716
718 /* 717 /*
719 * Convert the sleeper threshold into virtual time. 718 * Convert the sleeper threshold into virtual time.
720 * SCHED_IDLE is a special sub-class. We care about 719 * SCHED_IDLE is a special sub-class. We care about
721 * fairness only relative to other SCHED_IDLE tasks, 720 * fairness only relative to other SCHED_IDLE tasks,
722 * all of which have the same weight. 721 * all of which have the same weight.
723 */ 722 */
724 if (sched_feat(NORMALIZED_SLEEPER) && 723 if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) ||
725 (!entity_is_task(se) || 724 task_of(se)->policy != SCHED_IDLE))
726 task_of(se)->policy != SCHED_IDLE)) 725 thresh = calc_delta_fair(thresh, se);
727 thresh = calc_delta_fair(thresh, se);
728 726
729 /* 727 /*
730 * Halve their sleep time's effect, to allow 728 * Halve their sleep time's effect, to allow
731 * for a gentler effect of sleepers: 729 * for a gentler effect of sleepers:
732 */ 730 */
733 if (sched_feat(GENTLE_FAIR_SLEEPERS)) 731 if (sched_feat(GENTLE_FAIR_SLEEPERS))
734 thresh >>= 1; 732 thresh >>= 1;
735 733
736 vruntime -= thresh; 734 vruntime -= thresh;
737 }
738 } 735 }
739 736
740 /* ensure we never gain time by being placed backwards. */ 737 /* ensure we never gain time by being placed backwards. */
@@ -1343,7 +1340,8 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1343 int sync = wake_flags & WF_SYNC; 1340 int sync = wake_flags & WF_SYNC;
1344 1341
1345 if (sd_flag & SD_BALANCE_WAKE) { 1342 if (sd_flag & SD_BALANCE_WAKE) {
1346 if (sched_feat(AFFINE_WAKEUPS)) 1343 if (sched_feat(AFFINE_WAKEUPS) &&
1344 cpumask_test_cpu(cpu, &p->cpus_allowed))
1347 want_affine = 1; 1345 want_affine = 1;
1348 new_cpu = prev_cpu; 1346 new_cpu = prev_cpu;
1349 } 1347 }
@@ -1941,6 +1939,25 @@ static void moved_group_fair(struct task_struct *p)
1941} 1939}
1942#endif 1940#endif
1943 1941
1942unsigned int get_rr_interval_fair(struct task_struct *task)
1943{
1944 struct sched_entity *se = &task->se;
1945 unsigned long flags;
1946 struct rq *rq;
1947 unsigned int rr_interval = 0;
1948
1949 /*
1950 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
1951 * idle runqueue:
1952 */
1953 rq = task_rq_lock(task, &flags);
1954 if (rq->cfs.load.weight)
1955 rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
1956 task_rq_unlock(rq, &flags);
1957
1958 return rr_interval;
1959}
1960
1944/* 1961/*
1945 * All the scheduling class methods: 1962 * All the scheduling class methods:
1946 */ 1963 */
@@ -1969,6 +1986,8 @@ static const struct sched_class fair_sched_class = {
1969 .prio_changed = prio_changed_fair, 1986 .prio_changed = prio_changed_fair,
1970 .switched_to = switched_to_fair, 1987 .switched_to = switched_to_fair,
1971 1988
1989 .get_rr_interval = get_rr_interval_fair,
1990
1972#ifdef CONFIG_FAIR_GROUP_SCHED 1991#ifdef CONFIG_FAIR_GROUP_SCHED
1973 .moved_group = moved_group_fair, 1992 .moved_group = moved_group_fair,
1974#endif 1993#endif
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index a8b448af004b..b133a28fcde3 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -97,6 +97,11 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p,
97 check_preempt_curr(rq, p, 0); 97 check_preempt_curr(rq, p, 0);
98} 98}
99 99
100unsigned int get_rr_interval_idle(struct task_struct *task)
101{
102 return 0;
103}
104
100/* 105/*
101 * Simple, special scheduling class for the per-CPU idle tasks: 106 * Simple, special scheduling class for the per-CPU idle tasks:
102 */ 107 */
@@ -122,6 +127,8 @@ static const struct sched_class idle_sched_class = {
122 .set_curr_task = set_curr_task_idle, 127 .set_curr_task = set_curr_task_idle,
123 .task_tick = task_tick_idle, 128 .task_tick = task_tick_idle,
124 129
130 .get_rr_interval = get_rr_interval_idle,
131
125 .prio_changed = prio_changed_idle, 132 .prio_changed = prio_changed_idle,
126 .switched_to = switched_to_idle, 133 .switched_to = switched_to_idle,
127 134
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 13de7126a6ab..a4d790cddb19 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1734,6 +1734,17 @@ static void set_curr_task_rt(struct rq *rq)
1734 dequeue_pushable_task(rq, p); 1734 dequeue_pushable_task(rq, p);
1735} 1735}
1736 1736
1737unsigned int get_rr_interval_rt(struct task_struct *task)
1738{
1739 /*
1740 * Time slice is 0 for SCHED_FIFO tasks
1741 */
1742 if (task->policy == SCHED_RR)
1743 return DEF_TIMESLICE;
1744 else
1745 return 0;
1746}
1747
1737static const struct sched_class rt_sched_class = { 1748static const struct sched_class rt_sched_class = {
1738 .next = &fair_sched_class, 1749 .next = &fair_sched_class,
1739 .enqueue_task = enqueue_task_rt, 1750 .enqueue_task = enqueue_task_rt,
@@ -1762,6 +1773,8 @@ static const struct sched_class rt_sched_class = {
1762 .set_curr_task = set_curr_task_rt, 1773 .set_curr_task = set_curr_task_rt,
1763 .task_tick = task_tick_rt, 1774 .task_tick = task_tick_rt,
1764 1775
1776 .get_rr_interval = get_rr_interval_rt,
1777
1765 .prio_changed = prio_changed_rt, 1778 .prio_changed = prio_changed_rt,
1766 .switched_to = switched_to_rt, 1779 .switched_to = switched_to_rt,
1767}; 1780};
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 861308072d28..a35925d222ba 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -268,12 +268,18 @@ static DEFINE_SPINLOCK(tracing_start_lock);
268 */ 268 */
269void trace_wake_up(void) 269void trace_wake_up(void)
270{ 270{
271 int cpu;
272
273 if (trace_flags & TRACE_ITER_BLOCK)
274 return;
271 /* 275 /*
272 * The runqueue_is_locked() can fail, but this is the best we 276 * The runqueue_is_locked() can fail, but this is the best we
273 * have for now: 277 * have for now:
274 */ 278 */
275 if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked()) 279 cpu = get_cpu();
280 if (!runqueue_is_locked(cpu))
276 wake_up(&trace_wait); 281 wake_up(&trace_wait);
282 put_cpu();
277} 283}
278 284
279static int __init set_buf_size(char *str) 285static int __init set_buf_size(char *str)