aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c23
-rw-r--r--kernel/sched_rt.c30
-rw-r--r--kernel/time/tick-sched.c5
3 files changed, 43 insertions, 15 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 5ea2c533b432..22712b2e058a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -442,6 +442,7 @@ struct rq {
442 struct cfs_rq cfs; 442 struct cfs_rq cfs;
443 struct rt_rq rt; 443 struct rt_rq rt;
444 u64 rt_period_expire; 444 u64 rt_period_expire;
445 int rt_throttled;
445 446
446#ifdef CONFIG_FAIR_GROUP_SCHED 447#ifdef CONFIG_FAIR_GROUP_SCHED
447 /* list of leaf cfs_rq on this cpu: */ 448 /* list of leaf cfs_rq on this cpu: */
@@ -594,6 +595,23 @@ static void update_rq_clock(struct rq *rq)
594#define task_rq(p) cpu_rq(task_cpu(p)) 595#define task_rq(p) cpu_rq(task_cpu(p))
595#define cpu_curr(cpu) (cpu_rq(cpu)->curr) 596#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
596 597
598unsigned long rt_needs_cpu(int cpu)
599{
600 struct rq *rq = cpu_rq(cpu);
601 u64 delta;
602
603 if (!rq->rt_throttled)
604 return 0;
605
606 if (rq->clock > rq->rt_period_expire)
607 return 1;
608
609 delta = rq->rt_period_expire - rq->clock;
610 do_div(delta, NSEC_PER_SEC / HZ);
611
612 return (unsigned long)delta;
613}
614
597/* 615/*
598 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 616 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
599 */ 617 */
@@ -7102,9 +7120,11 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
7102 /* delimiter for bitsearch: */ 7120 /* delimiter for bitsearch: */
7103 __set_bit(MAX_RT_PRIO, array->bitmap); 7121 __set_bit(MAX_RT_PRIO, array->bitmap);
7104 7122
7123#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED
7124 rt_rq->highest_prio = MAX_RT_PRIO;
7125#endif
7105#ifdef CONFIG_SMP 7126#ifdef CONFIG_SMP
7106 rt_rq->rt_nr_migratory = 0; 7127 rt_rq->rt_nr_migratory = 0;
7107 rt_rq->highest_prio = MAX_RT_PRIO;
7108 rt_rq->overloaded = 0; 7128 rt_rq->overloaded = 0;
7109#endif 7129#endif
7110 7130
@@ -7191,6 +7211,7 @@ void __init sched_init(void)
7191 &per_cpu(init_sched_rt_entity, i), i, 1); 7211 &per_cpu(init_sched_rt_entity, i), i, 1);
7192#endif 7212#endif
7193 rq->rt_period_expire = 0; 7213 rq->rt_period_expire = 0;
7214 rq->rt_throttled = 0;
7194 7215
7195 for (j = 0; j < CPU_LOAD_IDX_MAX; j++) 7216 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7196 rq->cpu_load[j] = 0; 7217 rq->cpu_load[j] = 0;
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 1144bf55669d..8bfdb3f8a52d 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -175,7 +175,11 @@ static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq)
175 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT; 175 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
176 176
177 if (rt_rq->rt_time > ratio) { 177 if (rt_rq->rt_time > ratio) {
178 struct rq *rq = rq_of_rt_rq(rt_rq);
179
180 rq->rt_throttled = 1;
178 rt_rq->rt_throttled = 1; 181 rt_rq->rt_throttled = 1;
182
179 sched_rt_ratio_dequeue(rt_rq); 183 sched_rt_ratio_dequeue(rt_rq);
180 return 1; 184 return 1;
181 } 185 }
@@ -183,18 +187,6 @@ static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq)
183 return 0; 187 return 0;
184} 188}
185 189
186static void __update_sched_rt_period(struct rt_rq *rt_rq, u64 period)
187{
188 unsigned long rt_ratio = sched_rt_ratio(rt_rq);
189 u64 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
190
191 rt_rq->rt_time -= min(rt_rq->rt_time, ratio);
192 if (rt_rq->rt_throttled) {
193 rt_rq->rt_throttled = 0;
194 sched_rt_ratio_enqueue(rt_rq);
195 }
196}
197
198static void update_sched_rt_period(struct rq *rq) 190static void update_sched_rt_period(struct rq *rq)
199{ 191{
200 struct rt_rq *rt_rq; 192 struct rt_rq *rt_rq;
@@ -204,8 +196,18 @@ static void update_sched_rt_period(struct rq *rq)
204 period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC; 196 period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
205 rq->rt_period_expire += period; 197 rq->rt_period_expire += period;
206 198
207 for_each_leaf_rt_rq(rt_rq, rq) 199 for_each_leaf_rt_rq(rt_rq, rq) {
208 __update_sched_rt_period(rt_rq, period); 200 unsigned long rt_ratio = sched_rt_ratio(rt_rq);
201 u64 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
202
203 rt_rq->rt_time -= min(rt_rq->rt_time, ratio);
204 if (rt_rq->rt_throttled) {
205 rt_rq->rt_throttled = 0;
206 sched_rt_ratio_enqueue(rt_rq);
207 }
208 }
209
210 rq->rt_throttled = 0;
209 } 211 }
210} 212}
211 213
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index cb89fa8db110..5f9fb645b725 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -153,6 +153,7 @@ void tick_nohz_update_jiffies(void)
153void tick_nohz_stop_sched_tick(void) 153void tick_nohz_stop_sched_tick(void)
154{ 154{
155 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; 155 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
156 unsigned long rt_jiffies;
156 struct tick_sched *ts; 157 struct tick_sched *ts;
157 ktime_t last_update, expires, now, delta; 158 ktime_t last_update, expires, now, delta;
158 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 159 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
@@ -216,6 +217,10 @@ void tick_nohz_stop_sched_tick(void)
216 next_jiffies = get_next_timer_interrupt(last_jiffies); 217 next_jiffies = get_next_timer_interrupt(last_jiffies);
217 delta_jiffies = next_jiffies - last_jiffies; 218 delta_jiffies = next_jiffies - last_jiffies;
218 219
220 rt_jiffies = rt_needs_cpu(cpu);
221 if (rt_jiffies && rt_jiffies < delta_jiffies)
222 delta_jiffies = rt_jiffies;
223
219 if (rcu_needs_cpu(cpu)) 224 if (rcu_needs_cpu(cpu))
220 delta_jiffies = 1; 225 delta_jiffies = 1;
221 /* 226 /*