aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c53
1 files changed, 23 insertions, 30 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 8d4269381239..35825b28e429 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -57,12 +57,12 @@ static inline int on_rt_rq(struct sched_rt_entity *rt_se)
57 57
58#ifdef CONFIG_FAIR_GROUP_SCHED 58#ifdef CONFIG_FAIR_GROUP_SCHED
59 59
60static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq) 60static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
61{ 61{
62 if (!rt_rq->tg) 62 if (!rt_rq->tg)
63 return SCHED_RT_FRAC; 63 return RUNTIME_INF;
64 64
65 return rt_rq->tg->rt_ratio; 65 return rt_rq->tg->rt_runtime;
66} 66}
67 67
68#define for_each_leaf_rt_rq(rt_rq, rq) \ 68#define for_each_leaf_rt_rq(rt_rq, rq) \
@@ -89,7 +89,7 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
89static void enqueue_rt_entity(struct sched_rt_entity *rt_se); 89static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
90static void dequeue_rt_entity(struct sched_rt_entity *rt_se); 90static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
91 91
92static void sched_rt_ratio_enqueue(struct rt_rq *rt_rq) 92static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
93{ 93{
94 struct sched_rt_entity *rt_se = rt_rq->rt_se; 94 struct sched_rt_entity *rt_se = rt_rq->rt_se;
95 95
@@ -102,7 +102,7 @@ static void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
102 } 102 }
103} 103}
104 104
105static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq) 105static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
106{ 106{
107 struct sched_rt_entity *rt_se = rt_rq->rt_se; 107 struct sched_rt_entity *rt_se = rt_rq->rt_se;
108 108
@@ -129,9 +129,12 @@ static int rt_se_boosted(struct sched_rt_entity *rt_se)
129 129
130#else 130#else
131 131
132static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq) 132static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
133{ 133{
134 return sysctl_sched_rt_ratio; 134 if (sysctl_sched_rt_runtime == -1)
135 return RUNTIME_INF;
136
137 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
135} 138}
136 139
137#define for_each_leaf_rt_rq(rt_rq, rq) \ 140#define for_each_leaf_rt_rq(rt_rq, rq) \
@@ -158,11 +161,11 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
158 return NULL; 161 return NULL;
159} 162}
160 163
161static inline void sched_rt_ratio_enqueue(struct rt_rq *rt_rq) 164static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
162{ 165{
163} 166}
164 167
165static inline void sched_rt_ratio_dequeue(struct rt_rq *rt_rq) 168static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
166{ 169{
167} 170}
168 171
@@ -184,28 +187,24 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
184 return rt_task_of(rt_se)->prio; 187 return rt_task_of(rt_se)->prio;
185} 188}
186 189
187static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq) 190static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
188{ 191{
189 unsigned int rt_ratio = sched_rt_ratio(rt_rq); 192 u64 runtime = sched_rt_runtime(rt_rq);
190 u64 period, ratio;
191 193
192 if (rt_ratio == SCHED_RT_FRAC) 194 if (runtime == RUNTIME_INF)
193 return 0; 195 return 0;
194 196
195 if (rt_rq->rt_throttled) 197 if (rt_rq->rt_throttled)
196 return rt_rq_throttled(rt_rq); 198 return rt_rq_throttled(rt_rq);
197 199
198 period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC; 200 if (rt_rq->rt_time > runtime) {
199 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
200
201 if (rt_rq->rt_time > ratio) {
202 struct rq *rq = rq_of_rt_rq(rt_rq); 201 struct rq *rq = rq_of_rt_rq(rt_rq);
203 202
204 rq->rt_throttled = 1; 203 rq->rt_throttled = 1;
205 rt_rq->rt_throttled = 1; 204 rt_rq->rt_throttled = 1;
206 205
207 if (rt_rq_throttled(rt_rq)) { 206 if (rt_rq_throttled(rt_rq)) {
208 sched_rt_ratio_dequeue(rt_rq); 207 sched_rt_rq_dequeue(rt_rq);
209 return 1; 208 return 1;
210 } 209 }
211 } 210 }
@@ -219,17 +218,16 @@ static void update_sched_rt_period(struct rq *rq)
219 u64 period; 218 u64 period;
220 219
221 while (rq->clock > rq->rt_period_expire) { 220 while (rq->clock > rq->rt_period_expire) {
222 period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC; 221 period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
223 rq->rt_period_expire += period; 222 rq->rt_period_expire += period;
224 223
225 for_each_leaf_rt_rq(rt_rq, rq) { 224 for_each_leaf_rt_rq(rt_rq, rq) {
226 unsigned long rt_ratio = sched_rt_ratio(rt_rq); 225 u64 runtime = sched_rt_runtime(rt_rq);
227 u64 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
228 226
229 rt_rq->rt_time -= min(rt_rq->rt_time, ratio); 227 rt_rq->rt_time -= min(rt_rq->rt_time, runtime);
230 if (rt_rq->rt_throttled) { 228 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
231 rt_rq->rt_throttled = 0; 229 rt_rq->rt_throttled = 0;
232 sched_rt_ratio_enqueue(rt_rq); 230 sched_rt_rq_enqueue(rt_rq);
233 } 231 }
234 } 232 }
235 233
@@ -262,12 +260,7 @@ static void update_curr_rt(struct rq *rq)
262 cpuacct_charge(curr, delta_exec); 260 cpuacct_charge(curr, delta_exec);
263 261
264 rt_rq->rt_time += delta_exec; 262 rt_rq->rt_time += delta_exec;
265 /* 263 if (sched_rt_runtime_exceeded(rt_rq))
266 * might make it a tad more accurate:
267 *
268 * update_sched_rt_period(rq);
269 */
270 if (sched_rt_ratio_exceeded(rt_rq))
271 resched_task(curr); 264 resched_task(curr);
272} 265}
273 266