diff options
author | Ingo Molnar <mingo@kernel.org> | 2017-02-01 07:10:18 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-02-07 04:58:12 -0500 |
commit | f2cb13609d5397cdd747f3ed6fb651233851717d (patch) | |
tree | 0714785a7b04430b41346653178afc7b9a7bca70 /kernel/sched/sched.h | |
parent | 004172bdad644327dc7a6543186b9d7b529ee944 (diff) |
sched/topology: Split out scheduler topology code from core.c into topology.c
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r-- | kernel/sched/sched.h | 23 |
1 files changed, 22 insertions, 1 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 8ff5cc539e8a..17ed94b9b413 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -223,7 +223,7 @@ bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) | |||
223 | dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; | 223 | dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; |
224 | } | 224 | } |
225 | 225 | ||
226 | extern struct mutex sched_domains_mutex; | 226 | extern void init_dl_bw(struct dl_bw *dl_b); |
227 | 227 | ||
228 | #ifdef CONFIG_CGROUP_SCHED | 228 | #ifdef CONFIG_CGROUP_SCHED |
229 | 229 | ||
@@ -584,6 +584,13 @@ struct root_domain { | |||
584 | }; | 584 | }; |
585 | 585 | ||
586 | extern struct root_domain def_root_domain; | 586 | extern struct root_domain def_root_domain; |
587 | extern struct mutex sched_domains_mutex; | ||
588 | extern cpumask_var_t fallback_doms; | ||
589 | extern cpumask_var_t sched_domains_tmpmask; | ||
590 | |||
591 | extern void init_defrootdomain(void); | ||
592 | extern int init_sched_domains(const struct cpumask *cpu_map); | ||
593 | extern void rq_attach_root(struct rq *rq, struct root_domain *rd); | ||
587 | 594 | ||
588 | #endif /* CONFIG_SMP */ | 595 | #endif /* CONFIG_SMP */ |
589 | 596 | ||
@@ -886,6 +893,16 @@ extern int sched_max_numa_distance; | |||
886 | extern bool find_numa_distance(int distance); | 893 | extern bool find_numa_distance(int distance); |
887 | #endif | 894 | #endif |
888 | 895 | ||
896 | #ifdef CONFIG_NUMA | ||
897 | extern void sched_init_numa(void); | ||
898 | extern void sched_domains_numa_masks_set(unsigned int cpu); | ||
899 | extern void sched_domains_numa_masks_clear(unsigned int cpu); | ||
900 | #else | ||
901 | static inline void sched_init_numa(void) { } | ||
902 | static inline void sched_domains_numa_masks_set(unsigned int cpu) { } | ||
903 | static inline void sched_domains_numa_masks_clear(unsigned int cpu) { } | ||
904 | #endif | ||
905 | |||
889 | #ifdef CONFIG_NUMA_BALANCING | 906 | #ifdef CONFIG_NUMA_BALANCING |
890 | /* The regions in numa_faults array from task_struct */ | 907 | /* The regions in numa_faults array from task_struct */ |
891 | enum numa_faults_stats { | 908 | enum numa_faults_stats { |
@@ -1752,6 +1769,10 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2) | |||
1752 | __release(rq2->lock); | 1769 | __release(rq2->lock); |
1753 | } | 1770 | } |
1754 | 1771 | ||
1772 | extern void set_rq_online (struct rq *rq); | ||
1773 | extern void set_rq_offline(struct rq *rq); | ||
1774 | extern bool sched_smp_initialized; | ||
1775 | |||
1755 | #else /* CONFIG_SMP */ | 1776 | #else /* CONFIG_SMP */ |
1756 | 1777 | ||
1757 | /* | 1778 | /* |