aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h37
1 files changed, 19 insertions, 18 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c9007f28d3a2..e47679b04d16 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -278,7 +278,7 @@ extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
278extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); 278extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
279 279
280extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); 280extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
281extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); 281extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force);
282extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); 282extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
283 283
284extern void free_rt_sched_group(struct task_group *tg); 284extern void free_rt_sched_group(struct task_group *tg);
@@ -409,6 +409,8 @@ struct rt_rq {
409 int overloaded; 409 int overloaded;
410 struct plist_head pushable_tasks; 410 struct plist_head pushable_tasks;
411#endif 411#endif
412 int rt_queued;
413
412 int rt_throttled; 414 int rt_throttled;
413 u64 rt_time; 415 u64 rt_time;
414 u64 rt_runtime; 416 u64 rt_runtime;
@@ -423,18 +425,6 @@ struct rt_rq {
423#endif 425#endif
424}; 426};
425 427
426#ifdef CONFIG_RT_GROUP_SCHED
427static inline int rt_rq_throttled(struct rt_rq *rt_rq)
428{
429 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
430}
431#else
432static inline int rt_rq_throttled(struct rt_rq *rt_rq)
433{
434 return rt_rq->rt_throttled;
435}
436#endif
437
438/* Deadline class' related fields in a runqueue */ 428/* Deadline class' related fields in a runqueue */
439struct dl_rq { 429struct dl_rq {
440 /* runqueue is an rbtree, ordered by deadline */ 430 /* runqueue is an rbtree, ordered by deadline */
@@ -1216,12 +1206,14 @@ extern void update_idle_cpu_load(struct rq *this_rq);
1216 1206
1217extern void init_task_runnable_average(struct task_struct *p); 1207extern void init_task_runnable_average(struct task_struct *p);
1218 1208
1219static inline void inc_nr_running(struct rq *rq) 1209static inline void add_nr_running(struct rq *rq, unsigned count)
1220{ 1210{
1221 rq->nr_running++; 1211 unsigned prev_nr = rq->nr_running;
1212
1213 rq->nr_running = prev_nr + count;
1222 1214
1223#ifdef CONFIG_NO_HZ_FULL 1215#ifdef CONFIG_NO_HZ_FULL
1224 if (rq->nr_running == 2) { 1216 if (prev_nr < 2 && rq->nr_running >= 2) {
1225 if (tick_nohz_full_cpu(rq->cpu)) { 1217 if (tick_nohz_full_cpu(rq->cpu)) {
1226 /* Order rq->nr_running write against the IPI */ 1218 /* Order rq->nr_running write against the IPI */
1227 smp_wmb(); 1219 smp_wmb();
@@ -1231,9 +1223,9 @@ static inline void inc_nr_running(struct rq *rq)
1231#endif 1223#endif
1232} 1224}
1233 1225
1234static inline void dec_nr_running(struct rq *rq) 1226static inline void sub_nr_running(struct rq *rq, unsigned count)
1235{ 1227{
1236 rq->nr_running--; 1228 rq->nr_running -= count;
1237} 1229}
1238 1230
1239static inline void rq_last_tick_reset(struct rq *rq) 1231static inline void rq_last_tick_reset(struct rq *rq)
@@ -1385,6 +1377,15 @@ static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
1385 spin_lock_nested(l2, SINGLE_DEPTH_NESTING); 1377 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1386} 1378}
1387 1379
1380static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
1381{
1382 if (l1 > l2)
1383 swap(l1, l2);
1384
1385 spin_lock_irq(l1);
1386 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1387}
1388
1388static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2) 1389static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
1389{ 1390{
1390 if (l1 > l2) 1391 if (l1 > l2)