diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2012-04-17 09:49:36 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-05-09 09:00:55 -0400 |
commit | cb83b629bae0327cf9f44f096adc38d150ceb913 (patch) | |
tree | 13f7da07ee150a97c21aace57eaa817a30df9539 /kernel | |
parent | bd939f45da24e25e08a8f5c993c50b1afada0fef (diff) |
sched/numa: Rewrite the CONFIG_NUMA sched domain support
The current code groups up to 16 nodes in a level and then puts an
ALLNODES domain spanning the entire tree on top of that. This doesn't
reflect the numa topology and esp for the smaller not-fully-connected
machines out there today this might make a difference.
Therefore, build a proper numa topology based on node_distance().
Since there's no fixed numa layers anymore, the static SD_NODE_INIT
and SD_ALLNODES_INIT aren't usable anymore, the new code tries to
construct something similar and scales some values either on the
number of cpus in the domain and/or the node_distance() ratio.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Anton Blanchard <anton@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: David Howells <dhowells@redhat.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: linux-alpha@vger.kernel.org
Cc: linux-ia64@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: linux-mips@linux-mips.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-sh@vger.kernel.org
Cc: Matt Turner <mattst88@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: sparclinux@vger.kernel.org
Cc: Tony Luck <tony.luck@intel.com>
Cc: x86@kernel.org
Cc: Dimitri Sivanich <sivanich@sgi.com>
Cc: Greg Pearson <greg.pearson@hp.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: bob.picco@oracle.com
Cc: chris.mason@oracle.com
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/n/tip-r74n3n8hhuc2ynbrnp3vt954@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 280 |
1 files changed, 185 insertions, 95 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6001e5c3b4e4..b4f2096980a3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -5560,7 +5560,8 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
5560 | break; | 5560 | break; |
5561 | } | 5561 | } |
5562 | 5562 | ||
5563 | if (cpumask_intersects(groupmask, sched_group_cpus(group))) { | 5563 | if (!(sd->flags & SD_OVERLAP) && |
5564 | cpumask_intersects(groupmask, sched_group_cpus(group))) { | ||
5564 | printk(KERN_CONT "\n"); | 5565 | printk(KERN_CONT "\n"); |
5565 | printk(KERN_ERR "ERROR: repeated CPUs\n"); | 5566 | printk(KERN_ERR "ERROR: repeated CPUs\n"); |
5566 | break; | 5567 | break; |
@@ -5898,92 +5899,6 @@ static int __init isolated_cpu_setup(char *str) | |||
5898 | 5899 | ||
5899 | __setup("isolcpus=", isolated_cpu_setup); | 5900 | __setup("isolcpus=", isolated_cpu_setup); |
5900 | 5901 | ||
5901 | #ifdef CONFIG_NUMA | ||
5902 | |||
5903 | /** | ||
5904 | * find_next_best_node - find the next node to include in a sched_domain | ||
5905 | * @node: node whose sched_domain we're building | ||
5906 | * @used_nodes: nodes already in the sched_domain | ||
5907 | * | ||
5908 | * Find the next node to include in a given scheduling domain. Simply | ||
5909 | * finds the closest node not already in the @used_nodes map. | ||
5910 | * | ||
5911 | * Should use nodemask_t. | ||
5912 | */ | ||
5913 | static int find_next_best_node(int node, nodemask_t *used_nodes) | ||
5914 | { | ||
5915 | int i, n, val, min_val, best_node = -1; | ||
5916 | |||
5917 | min_val = INT_MAX; | ||
5918 | |||
5919 | for (i = 0; i < nr_node_ids; i++) { | ||
5920 | /* Start at @node */ | ||
5921 | n = (node + i) % nr_node_ids; | ||
5922 | |||
5923 | if (!nr_cpus_node(n)) | ||
5924 | continue; | ||
5925 | |||
5926 | /* Skip already used nodes */ | ||
5927 | if (node_isset(n, *used_nodes)) | ||
5928 | continue; | ||
5929 | |||
5930 | /* Simple min distance search */ | ||
5931 | val = node_distance(node, n); | ||
5932 | |||
5933 | if (val < min_val) { | ||
5934 | min_val = val; | ||
5935 | best_node = n; | ||
5936 | } | ||
5937 | } | ||
5938 | |||
5939 | if (best_node != -1) | ||
5940 | node_set(best_node, *used_nodes); | ||
5941 | return best_node; | ||
5942 | } | ||
5943 | |||
5944 | /** | ||
5945 | * sched_domain_node_span - get a cpumask for a node's sched_domain | ||
5946 | * @node: node whose cpumask we're constructing | ||
5947 | * @span: resulting cpumask | ||
5948 | * | ||
5949 | * Given a node, construct a good cpumask for its sched_domain to span. It | ||
5950 | * should be one that prevents unnecessary balancing, but also spreads tasks | ||
5951 | * out optimally. | ||
5952 | */ | ||
5953 | static void sched_domain_node_span(int node, struct cpumask *span) | ||
5954 | { | ||
5955 | nodemask_t used_nodes; | ||
5956 | int i; | ||
5957 | |||
5958 | cpumask_clear(span); | ||
5959 | nodes_clear(used_nodes); | ||
5960 | |||
5961 | cpumask_or(span, span, cpumask_of_node(node)); | ||
5962 | node_set(node, used_nodes); | ||
5963 | |||
5964 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { | ||
5965 | int next_node = find_next_best_node(node, &used_nodes); | ||
5966 | if (next_node < 0) | ||
5967 | break; | ||
5968 | cpumask_or(span, span, cpumask_of_node(next_node)); | ||
5969 | } | ||
5970 | } | ||
5971 | |||
5972 | static const struct cpumask *cpu_node_mask(int cpu) | ||
5973 | { | ||
5974 | lockdep_assert_held(&sched_domains_mutex); | ||
5975 | |||
5976 | sched_domain_node_span(cpu_to_node(cpu), sched_domains_tmpmask); | ||
5977 | |||
5978 | return sched_domains_tmpmask; | ||
5979 | } | ||
5980 | |||
5981 | static const struct cpumask *cpu_allnodes_mask(int cpu) | ||
5982 | { | ||
5983 | return cpu_possible_mask; | ||
5984 | } | ||
5985 | #endif /* CONFIG_NUMA */ | ||
5986 | |||
5987 | static const struct cpumask *cpu_cpu_mask(int cpu) | 5902 | static const struct cpumask *cpu_cpu_mask(int cpu) |
5988 | { | 5903 | { |
5989 | return cpumask_of_node(cpu_to_node(cpu)); | 5904 | return cpumask_of_node(cpu_to_node(cpu)); |
@@ -6020,6 +5935,7 @@ struct sched_domain_topology_level { | |||
6020 | sched_domain_init_f init; | 5935 | sched_domain_init_f init; |
6021 | sched_domain_mask_f mask; | 5936 | sched_domain_mask_f mask; |
6022 | int flags; | 5937 | int flags; |
5938 | int numa_level; | ||
6023 | struct sd_data data; | 5939 | struct sd_data data; |
6024 | }; | 5940 | }; |
6025 | 5941 | ||
@@ -6213,10 +6129,6 @@ sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \ | |||
6213 | } | 6129 | } |
6214 | 6130 | ||
6215 | SD_INIT_FUNC(CPU) | 6131 | SD_INIT_FUNC(CPU) |
6216 | #ifdef CONFIG_NUMA | ||
6217 | SD_INIT_FUNC(ALLNODES) | ||
6218 | SD_INIT_FUNC(NODE) | ||
6219 | #endif | ||
6220 | #ifdef CONFIG_SCHED_SMT | 6132 | #ifdef CONFIG_SCHED_SMT |
6221 | SD_INIT_FUNC(SIBLING) | 6133 | SD_INIT_FUNC(SIBLING) |
6222 | #endif | 6134 | #endif |
@@ -6338,15 +6250,191 @@ static struct sched_domain_topology_level default_topology[] = { | |||
6338 | { sd_init_BOOK, cpu_book_mask, }, | 6250 | { sd_init_BOOK, cpu_book_mask, }, |
6339 | #endif | 6251 | #endif |
6340 | { sd_init_CPU, cpu_cpu_mask, }, | 6252 | { sd_init_CPU, cpu_cpu_mask, }, |
6341 | #ifdef CONFIG_NUMA | ||
6342 | { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, }, | ||
6343 | { sd_init_ALLNODES, cpu_allnodes_mask, }, | ||
6344 | #endif | ||
6345 | { NULL, }, | 6253 | { NULL, }, |
6346 | }; | 6254 | }; |
6347 | 6255 | ||
6348 | static struct sched_domain_topology_level *sched_domain_topology = default_topology; | 6256 | static struct sched_domain_topology_level *sched_domain_topology = default_topology; |
6349 | 6257 | ||
6258 | #ifdef CONFIG_NUMA | ||
6259 | |||
6260 | static int sched_domains_numa_levels; | ||
6261 | static int sched_domains_numa_scale; | ||
6262 | static int *sched_domains_numa_distance; | ||
6263 | static struct cpumask ***sched_domains_numa_masks; | ||
6264 | static int sched_domains_curr_level; | ||
6265 | |||
6266 | static inline unsigned long numa_scale(unsigned long x, int level) | ||
6267 | { | ||
6268 | return x * sched_domains_numa_distance[level] / sched_domains_numa_scale; | ||
6269 | } | ||
6270 | |||
6271 | static inline int sd_local_flags(int level) | ||
6272 | { | ||
6273 | if (sched_domains_numa_distance[level] > REMOTE_DISTANCE) | ||
6274 | return 0; | ||
6275 | |||
6276 | return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE; | ||
6277 | } | ||
6278 | |||
6279 | static struct sched_domain * | ||
6280 | sd_numa_init(struct sched_domain_topology_level *tl, int cpu) | ||
6281 | { | ||
6282 | struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); | ||
6283 | int level = tl->numa_level; | ||
6284 | int sd_weight = cpumask_weight( | ||
6285 | sched_domains_numa_masks[level][cpu_to_node(cpu)]); | ||
6286 | |||
6287 | *sd = (struct sched_domain){ | ||
6288 | .min_interval = sd_weight, | ||
6289 | .max_interval = 2*sd_weight, | ||
6290 | .busy_factor = 32, | ||
6291 | .imbalance_pct = 100 + numa_scale(25, level), | ||
6292 | .cache_nice_tries = 2, | ||
6293 | .busy_idx = 3, | ||
6294 | .idle_idx = 2, | ||
6295 | .newidle_idx = 0, | ||
6296 | .wake_idx = 0, | ||
6297 | .forkexec_idx = 0, | ||
6298 | |||
6299 | .flags = 1*SD_LOAD_BALANCE | ||
6300 | | 1*SD_BALANCE_NEWIDLE | ||
6301 | | 0*SD_BALANCE_EXEC | ||
6302 | | 0*SD_BALANCE_FORK | ||
6303 | | 0*SD_BALANCE_WAKE | ||
6304 | | 0*SD_WAKE_AFFINE | ||
6305 | | 0*SD_PREFER_LOCAL | ||
6306 | | 0*SD_SHARE_CPUPOWER | ||
6307 | | 0*SD_POWERSAVINGS_BALANCE | ||
6308 | | 0*SD_SHARE_PKG_RESOURCES | ||
6309 | | 1*SD_SERIALIZE | ||
6310 | | 0*SD_PREFER_SIBLING | ||
6311 | | sd_local_flags(level) | ||
6312 | , | ||
6313 | .last_balance = jiffies, | ||
6314 | .balance_interval = sd_weight, | ||
6315 | }; | ||
6316 | SD_INIT_NAME(sd, NUMA); | ||
6317 | sd->private = &tl->data; | ||
6318 | |||
6319 | /* | ||
6320 | * Ugly hack to pass state to sd_numa_mask()... | ||
6321 | */ | ||
6322 | sched_domains_curr_level = tl->numa_level; | ||
6323 | |||
6324 | return sd; | ||
6325 | } | ||
6326 | |||
6327 | static const struct cpumask *sd_numa_mask(int cpu) | ||
6328 | { | ||
6329 | return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; | ||
6330 | } | ||
6331 | |||
6332 | static void sched_init_numa(void) | ||
6333 | { | ||
6334 | int next_distance, curr_distance = node_distance(0, 0); | ||
6335 | struct sched_domain_topology_level *tl; | ||
6336 | int level = 0; | ||
6337 | int i, j, k; | ||
6338 | |||
6339 | sched_domains_numa_scale = curr_distance; | ||
6340 | sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); | ||
6341 | if (!sched_domains_numa_distance) | ||
6342 | return; | ||
6343 | |||
6344 | /* | ||
6345 | * O(nr_nodes^2) deduplicating selection sort -- in order to find the | ||
6346 | * unique distances in the node_distance() table. | ||
6347 | * | ||
6348 | * Assumes node_distance(0,j) includes all distances in | ||
6349 | * node_distance(i,j) in order to avoid cubic time. | ||
6350 | * | ||
6351 | * XXX: could be optimized to O(n log n) by using sort() | ||
6352 | */ | ||
6353 | next_distance = curr_distance; | ||
6354 | for (i = 0; i < nr_node_ids; i++) { | ||
6355 | for (j = 0; j < nr_node_ids; j++) { | ||
6356 | int distance = node_distance(0, j); | ||
6357 | if (distance > curr_distance && | ||
6358 | (distance < next_distance || | ||
6359 | next_distance == curr_distance)) | ||
6360 | next_distance = distance; | ||
6361 | } | ||
6362 | if (next_distance != curr_distance) { | ||
6363 | sched_domains_numa_distance[level++] = next_distance; | ||
6364 | sched_domains_numa_levels = level; | ||
6365 | curr_distance = next_distance; | ||
6366 | } else break; | ||
6367 | } | ||
6368 | /* | ||
6369 | * 'level' contains the number of unique distances, excluding the | ||
6370 | * identity distance node_distance(i,i). | ||
6371 | * | ||
6372 | * The sched_domains_nume_distance[] array includes the actual distance | ||
6373 | * numbers. | ||
6374 | */ | ||
6375 | |||
6376 | sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); | ||
6377 | if (!sched_domains_numa_masks) | ||
6378 | return; | ||
6379 | |||
6380 | /* | ||
6381 | * Now for each level, construct a mask per node which contains all | ||
6382 | * cpus of nodes that are that many hops away from us. | ||
6383 | */ | ||
6384 | for (i = 0; i < level; i++) { | ||
6385 | sched_domains_numa_masks[i] = | ||
6386 | kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); | ||
6387 | if (!sched_domains_numa_masks[i]) | ||
6388 | return; | ||
6389 | |||
6390 | for (j = 0; j < nr_node_ids; j++) { | ||
6391 | struct cpumask *mask = kzalloc_node(cpumask_size(), GFP_KERNEL, j); | ||
6392 | if (!mask) | ||
6393 | return; | ||
6394 | |||
6395 | sched_domains_numa_masks[i][j] = mask; | ||
6396 | |||
6397 | for (k = 0; k < nr_node_ids; k++) { | ||
6398 | if (node_distance(cpu_to_node(j), k) > | ||
6399 | sched_domains_numa_distance[i]) | ||
6400 | continue; | ||
6401 | |||
6402 | cpumask_or(mask, mask, cpumask_of_node(k)); | ||
6403 | } | ||
6404 | } | ||
6405 | } | ||
6406 | |||
6407 | tl = kzalloc((ARRAY_SIZE(default_topology) + level) * | ||
6408 | sizeof(struct sched_domain_topology_level), GFP_KERNEL); | ||
6409 | if (!tl) | ||
6410 | return; | ||
6411 | |||
6412 | /* | ||
6413 | * Copy the default topology bits.. | ||
6414 | */ | ||
6415 | for (i = 0; default_topology[i].init; i++) | ||
6416 | tl[i] = default_topology[i]; | ||
6417 | |||
6418 | /* | ||
6419 | * .. and append 'j' levels of NUMA goodness. | ||
6420 | */ | ||
6421 | for (j = 0; j < level; i++, j++) { | ||
6422 | tl[i] = (struct sched_domain_topology_level){ | ||
6423 | .init = sd_numa_init, | ||
6424 | .mask = sd_numa_mask, | ||
6425 | .flags = SDTL_OVERLAP, | ||
6426 | .numa_level = j, | ||
6427 | }; | ||
6428 | } | ||
6429 | |||
6430 | sched_domain_topology = tl; | ||
6431 | } | ||
6432 | #else | ||
6433 | static inline void sched_init_numa(void) | ||
6434 | { | ||
6435 | } | ||
6436 | #endif /* CONFIG_NUMA */ | ||
6437 | |||
6350 | static int __sdt_alloc(const struct cpumask *cpu_map) | 6438 | static int __sdt_alloc(const struct cpumask *cpu_map) |
6351 | { | 6439 | { |
6352 | struct sched_domain_topology_level *tl; | 6440 | struct sched_domain_topology_level *tl; |
@@ -6840,6 +6928,8 @@ void __init sched_init_smp(void) | |||
6840 | alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); | 6928 | alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); |
6841 | alloc_cpumask_var(&fallback_doms, GFP_KERNEL); | 6929 | alloc_cpumask_var(&fallback_doms, GFP_KERNEL); |
6842 | 6930 | ||
6931 | sched_init_numa(); | ||
6932 | |||
6843 | get_online_cpus(); | 6933 | get_online_cpus(); |
6844 | mutex_lock(&sched_domains_mutex); | 6934 | mutex_lock(&sched_domains_mutex); |
6845 | init_sched_domains(cpu_active_mask); | 6935 | init_sched_domains(cpu_active_mask); |