diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-03-13 00:19:46 -0400 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2009-03-13 00:19:46 -0400 |
commit | a70f730282019f487aa33a84e5ac9a5e89c5abd0 (patch) | |
tree | e6891ec5db5383c6f39617d0cc9671e1a0d1a988 /mm | |
parent | c69fc56de1df5769f2ec69c915c7ad5afe63804c (diff) |
cpumask: replace node_to_cpumask with cpumask_of_node.
Impact: cleanup
node_to_cpumask (and the blecherous node_to_cpumask_ptr which
contained a declaration) are replaced now everyone implements
cpumask_of_node.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 6 | ||||
-rw-r--r-- | mm/quicklist.c | 2 | ||||
-rw-r--r-- | mm/slab.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 6 |
4 files changed, 9 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5c44ed49ca93..a92b0975b9a5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2134,7 +2134,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask) | |||
2134 | int n, val; | 2134 | int n, val; |
2135 | int min_val = INT_MAX; | 2135 | int min_val = INT_MAX; |
2136 | int best_node = -1; | 2136 | int best_node = -1; |
2137 | node_to_cpumask_ptr(tmp, 0); | 2137 | const struct cpumask *tmp = cpumask_of_node(0); |
2138 | 2138 | ||
2139 | /* Use the local node if we haven't already */ | 2139 | /* Use the local node if we haven't already */ |
2140 | if (!node_isset(node, *used_node_mask)) { | 2140 | if (!node_isset(node, *used_node_mask)) { |
@@ -2155,8 +2155,8 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask) | |||
2155 | val += (n < node); | 2155 | val += (n < node); |
2156 | 2156 | ||
2157 | /* Give preference to headless and unused nodes */ | 2157 | /* Give preference to headless and unused nodes */ |
2158 | node_to_cpumask_ptr_next(tmp, n); | 2158 | tmp = cpumask_of_node(n); |
2159 | if (!cpus_empty(*tmp)) | 2159 | if (!cpumask_empty(tmp)) |
2160 | val += PENALTY_FOR_NODE_WITH_CPUS; | 2160 | val += PENALTY_FOR_NODE_WITH_CPUS; |
2161 | 2161 | ||
2162 | /* Slight preference for less loaded node */ | 2162 | /* Slight preference for less loaded node */ |
diff --git a/mm/quicklist.c b/mm/quicklist.c index 8dbb6805ef35..e66d07d1b4ff 100644 --- a/mm/quicklist.c +++ b/mm/quicklist.c | |||
@@ -29,7 +29,7 @@ static unsigned long max_pages(unsigned long min_pages) | |||
29 | int node = numa_node_id(); | 29 | int node = numa_node_id(); |
30 | struct zone *zones = NODE_DATA(node)->node_zones; | 30 | struct zone *zones = NODE_DATA(node)->node_zones; |
31 | int num_cpus_on_node; | 31 | int num_cpus_on_node; |
32 | node_to_cpumask_ptr(cpumask_on_node, node); | 32 | const struct cpumask *cpumask_on_node = cpumask_of_node(node); |
33 | 33 | ||
34 | node_free_pages = | 34 | node_free_pages = |
35 | #ifdef CONFIG_ZONE_DMA | 35 | #ifdef CONFIG_ZONE_DMA |
@@ -1160,7 +1160,7 @@ static void __cpuinit cpuup_canceled(long cpu) | |||
1160 | struct kmem_cache *cachep; | 1160 | struct kmem_cache *cachep; |
1161 | struct kmem_list3 *l3 = NULL; | 1161 | struct kmem_list3 *l3 = NULL; |
1162 | int node = cpu_to_node(cpu); | 1162 | int node = cpu_to_node(cpu); |
1163 | node_to_cpumask_ptr(mask, node); | 1163 | const struct cpumask *mask = cpumask_of_node(node); |
1164 | 1164 | ||
1165 | list_for_each_entry(cachep, &cache_chain, next) { | 1165 | list_for_each_entry(cachep, &cache_chain, next) { |
1166 | struct array_cache *nc; | 1166 | struct array_cache *nc; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 6177e3bcd66b..cc6135586b44 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1963,7 +1963,7 @@ static int kswapd(void *p) | |||
1963 | struct reclaim_state reclaim_state = { | 1963 | struct reclaim_state reclaim_state = { |
1964 | .reclaimed_slab = 0, | 1964 | .reclaimed_slab = 0, |
1965 | }; | 1965 | }; |
1966 | node_to_cpumask_ptr(cpumask, pgdat->node_id); | 1966 | const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); |
1967 | 1967 | ||
1968 | if (!cpumask_empty(cpumask)) | 1968 | if (!cpumask_empty(cpumask)) |
1969 | set_cpus_allowed_ptr(tsk, cpumask); | 1969 | set_cpus_allowed_ptr(tsk, cpumask); |
@@ -2198,7 +2198,9 @@ static int __devinit cpu_callback(struct notifier_block *nfb, | |||
2198 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { | 2198 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { |
2199 | for_each_node_state(nid, N_HIGH_MEMORY) { | 2199 | for_each_node_state(nid, N_HIGH_MEMORY) { |
2200 | pg_data_t *pgdat = NODE_DATA(nid); | 2200 | pg_data_t *pgdat = NODE_DATA(nid); |
2201 | node_to_cpumask_ptr(mask, pgdat->node_id); | 2201 | const struct cpumask *mask; |
2202 | |||
2203 | mask = cpumask_of_node(pgdat->node_id); | ||
2202 | 2204 | ||
2203 | if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) | 2205 | if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) |
2204 | /* One of our CPUs online: restore mask */ | 2206 | /* One of our CPUs online: restore mask */ |