aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-04-04 21:11:10 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-19 13:44:59 -0400
commitc5f59f0833df945eef7ff35f3dc6ba61c5f293dd (patch)
tree32c1a94847d0154051c79011212d401462723d55 /kernel
parentb53e921ba1cff8453dc9a87a84052fa12d5b30bd (diff)
nodemask: use new node_to_cpumask_ptr function
* Use new node_to_cpumask_ptr. This creates a pointer to the cpumask for a given node. This definition is in mm patch: asm-generic-add-node_to_cpumask_ptr-macro.patch * Use new set_cpus_allowed_ptr function. Depends on: [mm-patch]: asm-generic-add-node_to_cpumask_ptr-macro.patch [sched-devel]: sched: add new set_cpus_allowed_ptr function [x86/latest]: x86: add cpus_scnprintf function Cc: Greg Kroah-Hartman <gregkh@suse.de> Cc: Greg Banks <gnb@melbourne.sgi.com> Cc: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 1a8252385c4d..9f7980f8ec00 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6448,7 +6448,7 @@ init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
6448 * 6448 *
6449 * Should use nodemask_t. 6449 * Should use nodemask_t.
6450 */ 6450 */
6451static int find_next_best_node(int node, unsigned long *used_nodes) 6451static int find_next_best_node(int node, nodemask_t *used_nodes)
6452{ 6452{
6453 int i, n, val, min_val, best_node = 0; 6453 int i, n, val, min_val, best_node = 0;
6454 6454
@@ -6462,7 +6462,7 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
6462 continue; 6462 continue;
6463 6463
6464 /* Skip already used nodes */ 6464 /* Skip already used nodes */
6465 if (test_bit(n, used_nodes)) 6465 if (node_isset(n, *used_nodes))
6466 continue; 6466 continue;
6467 6467
6468 /* Simple min distance search */ 6468 /* Simple min distance search */
@@ -6474,14 +6474,13 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
6474 } 6474 }
6475 } 6475 }
6476 6476
6477 set_bit(best_node, used_nodes); 6477 node_set(best_node, *used_nodes);
6478 return best_node; 6478 return best_node;
6479} 6479}
6480 6480
6481/** 6481/**
6482 * sched_domain_node_span - get a cpumask for a node's sched_domain 6482 * sched_domain_node_span - get a cpumask for a node's sched_domain
6483 * @node: node whose cpumask we're constructing 6483 * @node: node whose cpumask we're constructing
6484 * @size: number of nodes to include in this span
6485 * 6484 *
6486 * Given a node, construct a good cpumask for its sched_domain to span. It 6485 * Given a node, construct a good cpumask for its sched_domain to span. It
6487 * should be one that prevents unnecessary balancing, but also spreads tasks 6486 * should be one that prevents unnecessary balancing, but also spreads tasks
@@ -6489,22 +6488,22 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
6489 */ 6488 */
6490static cpumask_t sched_domain_node_span(int node) 6489static cpumask_t sched_domain_node_span(int node)
6491{ 6490{
6492 DECLARE_BITMAP(used_nodes, MAX_NUMNODES); 6491 nodemask_t used_nodes;
6493 cpumask_t span, nodemask; 6492 cpumask_t span;
6493 node_to_cpumask_ptr(nodemask, node);
6494 int i; 6494 int i;
6495 6495
6496 cpus_clear(span); 6496 cpus_clear(span);
6497 bitmap_zero(used_nodes, MAX_NUMNODES); 6497 nodes_clear(used_nodes);
6498 6498
6499 nodemask = node_to_cpumask(node); 6499 cpus_or(span, span, *nodemask);
6500 cpus_or(span, span, nodemask); 6500 node_set(node, used_nodes);
6501 set_bit(node, used_nodes);
6502 6501
6503 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { 6502 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
6504 int next_node = find_next_best_node(node, used_nodes); 6503 int next_node = find_next_best_node(node, &used_nodes);
6505 6504
6506 nodemask = node_to_cpumask(next_node); 6505 node_to_cpumask_ptr_next(nodemask, next_node);
6507 cpus_or(span, span, nodemask); 6506 cpus_or(span, span, *nodemask);
6508 } 6507 }
6509 6508
6510 return span; 6509 return span;
@@ -6901,6 +6900,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6901 for (j = 0; j < MAX_NUMNODES; j++) { 6900 for (j = 0; j < MAX_NUMNODES; j++) {
6902 cpumask_t tmp, notcovered; 6901 cpumask_t tmp, notcovered;
6903 int n = (i + j) % MAX_NUMNODES; 6902 int n = (i + j) % MAX_NUMNODES;
6903 node_to_cpumask_ptr(pnodemask, n);
6904 6904
6905 cpus_complement(notcovered, covered); 6905 cpus_complement(notcovered, covered);
6906 cpus_and(tmp, notcovered, *cpu_map); 6906 cpus_and(tmp, notcovered, *cpu_map);
@@ -6908,8 +6908,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6908 if (cpus_empty(tmp)) 6908 if (cpus_empty(tmp))
6909 break; 6909 break;
6910 6910
6911 nodemask = node_to_cpumask(n); 6911 cpus_and(tmp, tmp, *pnodemask);
6912 cpus_and(tmp, tmp, nodemask);
6913 if (cpus_empty(tmp)) 6912 if (cpus_empty(tmp))
6914 continue; 6913 continue;
6915 6914