diff options
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 102 |
1 files changed, 64 insertions, 38 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 274b40d7bef2..f54792b175b2 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -55,14 +55,14 @@ static inline int on_rt_rq(struct sched_rt_entity *rt_se) | |||
55 | return !list_empty(&rt_se->run_list); | 55 | return !list_empty(&rt_se->run_list); |
56 | } | 56 | } |
57 | 57 | ||
58 | #ifdef CONFIG_FAIR_GROUP_SCHED | 58 | #ifdef CONFIG_RT_GROUP_SCHED |
59 | 59 | ||
60 | static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq) | 60 | static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) |
61 | { | 61 | { |
62 | if (!rt_rq->tg) | 62 | if (!rt_rq->tg) |
63 | return SCHED_RT_FRAC; | 63 | return RUNTIME_INF; |
64 | 64 | ||
65 | return rt_rq->tg->rt_ratio; | 65 | return rt_rq->tg->rt_runtime; |
66 | } | 66 | } |
67 | 67 | ||
68 | #define for_each_leaf_rt_rq(rt_rq, rq) \ | 68 | #define for_each_leaf_rt_rq(rt_rq, rq) \ |
@@ -89,7 +89,7 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) | |||
89 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se); | 89 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se); |
90 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se); | 90 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se); |
91 | 91 | ||
92 | static void sched_rt_ratio_enqueue(struct rt_rq *rt_rq) | 92 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
93 | { | 93 | { |
94 | struct sched_rt_entity *rt_se = rt_rq->rt_se; | 94 | struct sched_rt_entity *rt_se = rt_rq->rt_se; |
95 | 95 | ||
@@ -102,7 +102,7 @@ static void sched_rt_ratio_enqueue(struct rt_rq *rt_rq) | |||
102 | } | 102 | } |
103 | } | 103 | } |
104 | 104 | ||
105 | static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq) | 105 | static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) |
106 | { | 106 | { |
107 | struct sched_rt_entity *rt_se = rt_rq->rt_se; | 107 | struct sched_rt_entity *rt_se = rt_rq->rt_se; |
108 | 108 | ||
@@ -110,11 +110,31 @@ static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq) | |||
110 | dequeue_rt_entity(rt_se); | 110 | dequeue_rt_entity(rt_se); |
111 | } | 111 | } |
112 | 112 | ||
113 | static inline int rt_rq_throttled(struct rt_rq *rt_rq) | ||
114 | { | ||
115 | return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; | ||
116 | } | ||
117 | |||
118 | static int rt_se_boosted(struct sched_rt_entity *rt_se) | ||
119 | { | ||
120 | struct rt_rq *rt_rq = group_rt_rq(rt_se); | ||
121 | struct task_struct *p; | ||
122 | |||
123 | if (rt_rq) | ||
124 | return !!rt_rq->rt_nr_boosted; | ||
125 | |||
126 | p = rt_task_of(rt_se); | ||
127 | return p->prio != p->normal_prio; | ||
128 | } | ||
129 | |||
113 | #else | 130 | #else |
114 | 131 | ||
115 | static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq) | 132 | static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) |
116 | { | 133 | { |
117 | return sysctl_sched_rt_ratio; | 134 | if (sysctl_sched_rt_runtime == -1) |
135 | return RUNTIME_INF; | ||
136 | |||
137 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; | ||
118 | } | 138 | } |
119 | 139 | ||
120 | #define for_each_leaf_rt_rq(rt_rq, rq) \ | 140 | #define for_each_leaf_rt_rq(rt_rq, rq) \ |
@@ -141,19 +161,23 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) | |||
141 | return NULL; | 161 | return NULL; |
142 | } | 162 | } |
143 | 163 | ||
144 | static inline void sched_rt_ratio_enqueue(struct rt_rq *rt_rq) | 164 | static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
145 | { | 165 | { |
146 | } | 166 | } |
147 | 167 | ||
148 | static inline void sched_rt_ratio_dequeue(struct rt_rq *rt_rq) | 168 | static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) |
149 | { | 169 | { |
150 | } | 170 | } |
151 | 171 | ||
172 | static inline int rt_rq_throttled(struct rt_rq *rt_rq) | ||
173 | { | ||
174 | return rt_rq->rt_throttled; | ||
175 | } | ||
152 | #endif | 176 | #endif |
153 | 177 | ||
154 | static inline int rt_se_prio(struct sched_rt_entity *rt_se) | 178 | static inline int rt_se_prio(struct sched_rt_entity *rt_se) |
155 | { | 179 | { |
156 | #ifdef CONFIG_FAIR_GROUP_SCHED | 180 | #ifdef CONFIG_RT_GROUP_SCHED |
157 | struct rt_rq *rt_rq = group_rt_rq(rt_se); | 181 | struct rt_rq *rt_rq = group_rt_rq(rt_se); |
158 | 182 | ||
159 | if (rt_rq) | 183 | if (rt_rq) |
@@ -163,28 +187,26 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se) | |||
163 | return rt_task_of(rt_se)->prio; | 187 | return rt_task_of(rt_se)->prio; |
164 | } | 188 | } |
165 | 189 | ||
166 | static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq) | 190 | static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) |
167 | { | 191 | { |
168 | unsigned int rt_ratio = sched_rt_ratio(rt_rq); | 192 | u64 runtime = sched_rt_runtime(rt_rq); |
169 | u64 period, ratio; | ||
170 | 193 | ||
171 | if (rt_ratio == SCHED_RT_FRAC) | 194 | if (runtime == RUNTIME_INF) |
172 | return 0; | 195 | return 0; |
173 | 196 | ||
174 | if (rt_rq->rt_throttled) | 197 | if (rt_rq->rt_throttled) |
175 | return 1; | 198 | return rt_rq_throttled(rt_rq); |
176 | |||
177 | period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC; | ||
178 | ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT; | ||
179 | 199 | ||
180 | if (rt_rq->rt_time > ratio) { | 200 | if (rt_rq->rt_time > runtime) { |
181 | struct rq *rq = rq_of_rt_rq(rt_rq); | 201 | struct rq *rq = rq_of_rt_rq(rt_rq); |
182 | 202 | ||
183 | rq->rt_throttled = 1; | 203 | rq->rt_throttled = 1; |
184 | rt_rq->rt_throttled = 1; | 204 | rt_rq->rt_throttled = 1; |
185 | 205 | ||
186 | sched_rt_ratio_dequeue(rt_rq); | 206 | if (rt_rq_throttled(rt_rq)) { |
187 | return 1; | 207 | sched_rt_rq_dequeue(rt_rq); |
208 | return 1; | ||
209 | } | ||
188 | } | 210 | } |
189 | 211 | ||
190 | return 0; | 212 | return 0; |
@@ -196,17 +218,16 @@ static void update_sched_rt_period(struct rq *rq) | |||
196 | u64 period; | 218 | u64 period; |
197 | 219 | ||
198 | while (rq->clock > rq->rt_period_expire) { | 220 | while (rq->clock > rq->rt_period_expire) { |
199 | period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC; | 221 | period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC; |
200 | rq->rt_period_expire += period; | 222 | rq->rt_period_expire += period; |
201 | 223 | ||
202 | for_each_leaf_rt_rq(rt_rq, rq) { | 224 | for_each_leaf_rt_rq(rt_rq, rq) { |
203 | unsigned long rt_ratio = sched_rt_ratio(rt_rq); | 225 | u64 runtime = sched_rt_runtime(rt_rq); |
204 | u64 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT; | ||
205 | 226 | ||
206 | rt_rq->rt_time -= min(rt_rq->rt_time, ratio); | 227 | rt_rq->rt_time -= min(rt_rq->rt_time, runtime); |
207 | if (rt_rq->rt_throttled) { | 228 | if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { |
208 | rt_rq->rt_throttled = 0; | 229 | rt_rq->rt_throttled = 0; |
209 | sched_rt_ratio_enqueue(rt_rq); | 230 | sched_rt_rq_enqueue(rt_rq); |
210 | } | 231 | } |
211 | } | 232 | } |
212 | 233 | ||
@@ -239,12 +260,7 @@ static void update_curr_rt(struct rq *rq) | |||
239 | cpuacct_charge(curr, delta_exec); | 260 | cpuacct_charge(curr, delta_exec); |
240 | 261 | ||
241 | rt_rq->rt_time += delta_exec; | 262 | rt_rq->rt_time += delta_exec; |
242 | /* | 263 | if (sched_rt_runtime_exceeded(rt_rq)) |
243 | * might make it a tad more accurate: | ||
244 | * | ||
245 | * update_sched_rt_period(rq); | ||
246 | */ | ||
247 | if (sched_rt_ratio_exceeded(rt_rq)) | ||
248 | resched_task(curr); | 264 | resched_task(curr); |
249 | } | 265 | } |
250 | 266 | ||
@@ -253,7 +269,7 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
253 | { | 269 | { |
254 | WARN_ON(!rt_prio(rt_se_prio(rt_se))); | 270 | WARN_ON(!rt_prio(rt_se_prio(rt_se))); |
255 | rt_rq->rt_nr_running++; | 271 | rt_rq->rt_nr_running++; |
256 | #if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED | 272 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
257 | if (rt_se_prio(rt_se) < rt_rq->highest_prio) | 273 | if (rt_se_prio(rt_se) < rt_rq->highest_prio) |
258 | rt_rq->highest_prio = rt_se_prio(rt_se); | 274 | rt_rq->highest_prio = rt_se_prio(rt_se); |
259 | #endif | 275 | #endif |
@@ -265,6 +281,10 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
265 | 281 | ||
266 | update_rt_migration(rq_of_rt_rq(rt_rq)); | 282 | update_rt_migration(rq_of_rt_rq(rt_rq)); |
267 | #endif | 283 | #endif |
284 | #ifdef CONFIG_RT_GROUP_SCHED | ||
285 | if (rt_se_boosted(rt_se)) | ||
286 | rt_rq->rt_nr_boosted++; | ||
287 | #endif | ||
268 | } | 288 | } |
269 | 289 | ||
270 | static inline | 290 | static inline |
@@ -273,7 +293,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
273 | WARN_ON(!rt_prio(rt_se_prio(rt_se))); | 293 | WARN_ON(!rt_prio(rt_se_prio(rt_se))); |
274 | WARN_ON(!rt_rq->rt_nr_running); | 294 | WARN_ON(!rt_rq->rt_nr_running); |
275 | rt_rq->rt_nr_running--; | 295 | rt_rq->rt_nr_running--; |
276 | #if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED | 296 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
277 | if (rt_rq->rt_nr_running) { | 297 | if (rt_rq->rt_nr_running) { |
278 | struct rt_prio_array *array; | 298 | struct rt_prio_array *array; |
279 | 299 | ||
@@ -295,6 +315,12 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
295 | 315 | ||
296 | update_rt_migration(rq_of_rt_rq(rt_rq)); | 316 | update_rt_migration(rq_of_rt_rq(rt_rq)); |
297 | #endif /* CONFIG_SMP */ | 317 | #endif /* CONFIG_SMP */ |
318 | #ifdef CONFIG_RT_GROUP_SCHED | ||
319 | if (rt_se_boosted(rt_se)) | ||
320 | rt_rq->rt_nr_boosted--; | ||
321 | |||
322 | WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); | ||
323 | #endif | ||
298 | } | 324 | } |
299 | 325 | ||
300 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | 326 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se) |
@@ -303,7 +329,7 @@ static void enqueue_rt_entity(struct sched_rt_entity *rt_se) | |||
303 | struct rt_prio_array *array = &rt_rq->active; | 329 | struct rt_prio_array *array = &rt_rq->active; |
304 | struct rt_rq *group_rq = group_rt_rq(rt_se); | 330 | struct rt_rq *group_rq = group_rt_rq(rt_se); |
305 | 331 | ||
306 | if (group_rq && group_rq->rt_throttled) | 332 | if (group_rq && rt_rq_throttled(group_rq)) |
307 | return; | 333 | return; |
308 | 334 | ||
309 | list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); | 335 | list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); |
@@ -496,7 +522,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq) | |||
496 | if (unlikely(!rt_rq->rt_nr_running)) | 522 | if (unlikely(!rt_rq->rt_nr_running)) |
497 | return NULL; | 523 | return NULL; |
498 | 524 | ||
499 | if (sched_rt_ratio_exceeded(rt_rq)) | 525 | if (rt_rq_throttled(rt_rq)) |
500 | return NULL; | 526 | return NULL; |
501 | 527 | ||
502 | do { | 528 | do { |