diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 133 |
1 files changed, 39 insertions, 94 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index f0e1821dcb96..00d1e37b4596 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -6816,6 +6816,11 @@ static const struct cpumask *cpu_node_mask(int cpu) | |||
6816 | 6816 | ||
6817 | return sched_domains_tmpmask; | 6817 | return sched_domains_tmpmask; |
6818 | } | 6818 | } |
6819 | |||
6820 | static const struct cpumask *cpu_allnodes_mask(int cpu) | ||
6821 | { | ||
6822 | return cpu_possible_mask; | ||
6823 | } | ||
6819 | #endif /* CONFIG_NUMA */ | 6824 | #endif /* CONFIG_NUMA */ |
6820 | 6825 | ||
6821 | static const struct cpumask *cpu_cpu_mask(int cpu) | 6826 | static const struct cpumask *cpu_cpu_mask(int cpu) |
@@ -6843,14 +6848,12 @@ enum s_alloc { | |||
6843 | sa_none, | 6848 | sa_none, |
6844 | }; | 6849 | }; |
6845 | 6850 | ||
6846 | typedef struct sched_domain *(*sched_domain_build_f)(struct s_data *d, | 6851 | typedef struct sched_domain *(*sched_domain_init_f)(struct s_data *d, int cpu); |
6847 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | ||
6848 | struct sched_domain *parent, int cpu); | ||
6849 | |||
6850 | typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); | 6852 | typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); |
6851 | 6853 | ||
6852 | struct sched_domain_topology_level { | 6854 | struct sched_domain_topology_level { |
6853 | sched_domain_build_f build; | 6855 | sched_domain_init_f init; |
6856 | sched_domain_mask_f mask; | ||
6854 | }; | 6857 | }; |
6855 | 6858 | ||
6856 | /* | 6859 | /* |
@@ -7104,109 +7107,51 @@ static void claim_allocations(int cpu, struct sched_domain *sd) | |||
7104 | } | 7107 | } |
7105 | } | 7108 | } |
7106 | 7109 | ||
7107 | static struct sched_domain *__build_allnodes_sched_domain(struct s_data *d, | 7110 | #ifdef CONFIG_SCHED_SMT |
7108 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | 7111 | static const struct cpumask *cpu_smt_mask(int cpu) |
7109 | struct sched_domain *parent, int i) | ||
7110 | { | 7112 | { |
7111 | struct sched_domain *sd = NULL; | 7113 | return topology_thread_cpumask(cpu); |
7112 | #ifdef CONFIG_NUMA | ||
7113 | sd = sd_init_ALLNODES(d, i); | ||
7114 | set_domain_attribute(sd, attr); | ||
7115 | cpumask_and(sched_domain_span(sd), cpu_map, cpu_possible_mask); | ||
7116 | sd->parent = parent; | ||
7117 | if (parent) | ||
7118 | parent->child = sd; | ||
7119 | #endif | ||
7120 | return sd; | ||
7121 | } | 7114 | } |
7115 | #endif | ||
7122 | 7116 | ||
7123 | static struct sched_domain *__build_node_sched_domain(struct s_data *d, | 7117 | static struct sched_domain_topology_level default_topology[] = { |
7124 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | ||
7125 | struct sched_domain *parent, int i) | ||
7126 | { | ||
7127 | struct sched_domain *sd = NULL; | ||
7128 | #ifdef CONFIG_NUMA | 7118 | #ifdef CONFIG_NUMA |
7129 | sd = sd_init_NODE(d, i); | 7119 | { sd_init_ALLNODES, cpu_allnodes_mask, }, |
7130 | set_domain_attribute(sd, attr); | 7120 | { sd_init_NODE, cpu_node_mask, }, |
7131 | cpumask_and(sched_domain_span(sd), cpu_map, cpu_node_mask(i)); | ||
7132 | sd->parent = parent; | ||
7133 | if (parent) | ||
7134 | parent->child = sd; | ||
7135 | #endif | 7121 | #endif |
7136 | return sd; | 7122 | { sd_init_CPU, cpu_cpu_mask, }, |
7137 | } | ||
7138 | |||
7139 | static struct sched_domain *__build_cpu_sched_domain(struct s_data *d, | ||
7140 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | ||
7141 | struct sched_domain *parent, int i) | ||
7142 | { | ||
7143 | struct sched_domain *sd; | ||
7144 | sd = sd_init_CPU(d, i); | ||
7145 | set_domain_attribute(sd, attr); | ||
7146 | cpumask_and(sched_domain_span(sd), cpu_map, cpu_cpu_mask(i)); | ||
7147 | sd->parent = parent; | ||
7148 | if (parent) | ||
7149 | parent->child = sd; | ||
7150 | return sd; | ||
7151 | } | ||
7152 | |||
7153 | static struct sched_domain *__build_book_sched_domain(struct s_data *d, | ||
7154 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | ||
7155 | struct sched_domain *parent, int i) | ||
7156 | { | ||
7157 | struct sched_domain *sd = parent; | ||
7158 | #ifdef CONFIG_SCHED_BOOK | 7123 | #ifdef CONFIG_SCHED_BOOK |
7159 | sd = sd_init_BOOK(d, i); | 7124 | { sd_init_BOOK, cpu_book_mask, }, |
7160 | set_domain_attribute(sd, attr); | ||
7161 | cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i)); | ||
7162 | sd->parent = parent; | ||
7163 | parent->child = sd; | ||
7164 | #endif | 7125 | #endif |
7165 | return sd; | ||
7166 | } | ||
7167 | |||
7168 | static struct sched_domain *__build_mc_sched_domain(struct s_data *d, | ||
7169 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | ||
7170 | struct sched_domain *parent, int i) | ||
7171 | { | ||
7172 | struct sched_domain *sd = parent; | ||
7173 | #ifdef CONFIG_SCHED_MC | 7126 | #ifdef CONFIG_SCHED_MC |
7174 | sd = sd_init_MC(d, i); | 7127 | { sd_init_MC, cpu_coregroup_mask, }, |
7175 | set_domain_attribute(sd, attr); | ||
7176 | cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i)); | ||
7177 | sd->parent = parent; | ||
7178 | parent->child = sd; | ||
7179 | #endif | 7128 | #endif |
7180 | return sd; | ||
7181 | } | ||
7182 | |||
7183 | static struct sched_domain *__build_smt_sched_domain(struct s_data *d, | ||
7184 | const struct cpumask *cpu_map, struct sched_domain_attr *attr, | ||
7185 | struct sched_domain *parent, int i) | ||
7186 | { | ||
7187 | struct sched_domain *sd = parent; | ||
7188 | #ifdef CONFIG_SCHED_SMT | 7129 | #ifdef CONFIG_SCHED_SMT |
7189 | sd = sd_init_SIBLING(d, i); | 7130 | { sd_init_SIBLING, cpu_smt_mask, }, |
7190 | set_domain_attribute(sd, attr); | ||
7191 | cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i)); | ||
7192 | sd->parent = parent; | ||
7193 | parent->child = sd; | ||
7194 | #endif | 7131 | #endif |
7195 | return sd; | ||
7196 | } | ||
7197 | |||
7198 | static struct sched_domain_topology_level default_topology[] = { | ||
7199 | { __build_allnodes_sched_domain, }, | ||
7200 | { __build_node_sched_domain, }, | ||
7201 | { __build_cpu_sched_domain, }, | ||
7202 | { __build_book_sched_domain, }, | ||
7203 | { __build_mc_sched_domain, }, | ||
7204 | { __build_smt_sched_domain, }, | ||
7205 | { NULL, }, | 7132 | { NULL, }, |
7206 | }; | 7133 | }; |
7207 | 7134 | ||
7208 | static struct sched_domain_topology_level *sched_domain_topology = default_topology; | 7135 | static struct sched_domain_topology_level *sched_domain_topology = default_topology; |
7209 | 7136 | ||
7137 | struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, | ||
7138 | struct s_data *d, const struct cpumask *cpu_map, | ||
7139 | struct sched_domain_attr *attr, struct sched_domain *parent, | ||
7140 | int cpu) | ||
7141 | { | ||
7142 | struct sched_domain *sd = tl->init(d, cpu); | ||
7143 | if (!sd) | ||
7144 | return parent; | ||
7145 | |||
7146 | set_domain_attribute(sd, attr); | ||
7147 | cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); | ||
7148 | sd->parent = parent; | ||
7149 | if (parent) | ||
7150 | parent->child = sd; | ||
7151 | |||
7152 | return sd; | ||
7153 | } | ||
7154 | |||
7210 | /* | 7155 | /* |
7211 | * Build sched domains for a given set of cpus and attach the sched domains | 7156 | * Build sched domains for a given set of cpus and attach the sched domains |
7212 | * to the individual cpus | 7157 | * to the individual cpus |
@@ -7228,8 +7173,8 @@ static int build_sched_domains(const struct cpumask *cpu_map, | |||
7228 | struct sched_domain_topology_level *tl; | 7173 | struct sched_domain_topology_level *tl; |
7229 | 7174 | ||
7230 | sd = NULL; | 7175 | sd = NULL; |
7231 | for (tl = sched_domain_topology; tl->build; tl++) | 7176 | for (tl = sched_domain_topology; tl->init; tl++) |
7232 | sd = tl->build(&d, cpu_map, attr, sd, i); | 7177 | sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i); |
7233 | 7178 | ||
7234 | *per_cpu_ptr(d.sd, i) = sd; | 7179 | *per_cpu_ptr(d.sd, i) = sd; |
7235 | } | 7180 | } |