diff options
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r-- | kernel/sched/sched.h | 10 |
1 files changed, 7 insertions, 3 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index e0e129993958..d85455539d5c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -26,8 +26,14 @@ extern __read_mostly int scheduler_running; | |||
26 | extern unsigned long calc_load_update; | 26 | extern unsigned long calc_load_update; |
27 | extern atomic_long_t calc_load_tasks; | 27 | extern atomic_long_t calc_load_tasks; |
28 | 28 | ||
29 | extern void calc_global_load_tick(struct rq *this_rq); | ||
29 | extern long calc_load_fold_active(struct rq *this_rq); | 30 | extern long calc_load_fold_active(struct rq *this_rq); |
31 | |||
32 | #ifdef CONFIG_SMP | ||
30 | extern void update_cpu_load_active(struct rq *this_rq); | 33 | extern void update_cpu_load_active(struct rq *this_rq); |
34 | #else | ||
35 | static inline void update_cpu_load_active(struct rq *this_rq) { } | ||
36 | #endif | ||
31 | 37 | ||
32 | /* | 38 | /* |
33 | * Helpers for converting nanosecond timing to jiffy resolution | 39 | * Helpers for converting nanosecond timing to jiffy resolution |
@@ -707,7 +713,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | |||
707 | 713 | ||
708 | static inline u64 __rq_clock_broken(struct rq *rq) | 714 | static inline u64 __rq_clock_broken(struct rq *rq) |
709 | { | 715 | { |
710 | return ACCESS_ONCE(rq->clock); | 716 | return READ_ONCE(rq->clock); |
711 | } | 717 | } |
712 | 718 | ||
713 | static inline u64 rq_clock(struct rq *rq) | 719 | static inline u64 rq_clock(struct rq *rq) |
@@ -1298,8 +1304,6 @@ extern void init_dl_task_timer(struct sched_dl_entity *dl_se); | |||
1298 | 1304 | ||
1299 | unsigned long to_ratio(u64 period, u64 runtime); | 1305 | unsigned long to_ratio(u64 period, u64 runtime); |
1300 | 1306 | ||
1301 | extern void update_idle_cpu_load(struct rq *this_rq); | ||
1302 | |||
1303 | extern void init_task_runnable_average(struct task_struct *p); | 1307 | extern void init_task_runnable_average(struct task_struct *p); |
1304 | 1308 | ||
1305 | static inline void add_nr_running(struct rq *rq, unsigned count) | 1309 | static inline void add_nr_running(struct rq *rq, unsigned count) |