aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/quicklist.c2
-rw-r--r--mm/slab.c2
-rw-r--r--mm/vmscan.c6
4 files changed, 9 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a3803ea8c27d..f87e0d8df5a7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2139,7 +2139,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
2139 int n, val; 2139 int n, val;
2140 int min_val = INT_MAX; 2140 int min_val = INT_MAX;
2141 int best_node = -1; 2141 int best_node = -1;
2142 node_to_cpumask_ptr(tmp, 0); 2142 const struct cpumask *tmp = cpumask_of_node(0);
2143 2143
2144 /* Use the local node if we haven't already */ 2144 /* Use the local node if we haven't already */
2145 if (!node_isset(node, *used_node_mask)) { 2145 if (!node_isset(node, *used_node_mask)) {
@@ -2160,8 +2160,8 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
2160 val += (n < node); 2160 val += (n < node);
2161 2161
2162 /* Give preference to headless and unused nodes */ 2162 /* Give preference to headless and unused nodes */
2163 node_to_cpumask_ptr_next(tmp, n); 2163 tmp = cpumask_of_node(n);
2164 if (!cpus_empty(*tmp)) 2164 if (!cpumask_empty(tmp))
2165 val += PENALTY_FOR_NODE_WITH_CPUS; 2165 val += PENALTY_FOR_NODE_WITH_CPUS;
2166 2166
2167 /* Slight preference for less loaded node */ 2167 /* Slight preference for less loaded node */
diff --git a/mm/quicklist.c b/mm/quicklist.c
index 8dbb6805ef35..e66d07d1b4ff 100644
--- a/mm/quicklist.c
+++ b/mm/quicklist.c
@@ -29,7 +29,7 @@ static unsigned long max_pages(unsigned long min_pages)
29 int node = numa_node_id(); 29 int node = numa_node_id();
30 struct zone *zones = NODE_DATA(node)->node_zones; 30 struct zone *zones = NODE_DATA(node)->node_zones;
31 int num_cpus_on_node; 31 int num_cpus_on_node;
32 node_to_cpumask_ptr(cpumask_on_node, node); 32 const struct cpumask *cpumask_on_node = cpumask_of_node(node);
33 33
34 node_free_pages = 34 node_free_pages =
35#ifdef CONFIG_ZONE_DMA 35#ifdef CONFIG_ZONE_DMA
diff --git a/mm/slab.c b/mm/slab.c
index 825c606f691d..59839d7ee5b3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1160,7 +1160,7 @@ static void __cpuinit cpuup_canceled(long cpu)
1160 struct kmem_cache *cachep; 1160 struct kmem_cache *cachep;
1161 struct kmem_list3 *l3 = NULL; 1161 struct kmem_list3 *l3 = NULL;
1162 int node = cpu_to_node(cpu); 1162 int node = cpu_to_node(cpu);
1163 node_to_cpumask_ptr(mask, node); 1163 const struct cpumask *mask = cpumask_of_node(node);
1164 1164
1165 list_for_each_entry(cachep, &cache_chain, next) { 1165 list_for_each_entry(cachep, &cache_chain, next) {
1166 struct array_cache *nc; 1166 struct array_cache *nc;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 479e46719394..f74a61e522f4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1963,7 +1963,7 @@ static int kswapd(void *p)
1963 struct reclaim_state reclaim_state = { 1963 struct reclaim_state reclaim_state = {
1964 .reclaimed_slab = 0, 1964 .reclaimed_slab = 0,
1965 }; 1965 };
1966 node_to_cpumask_ptr(cpumask, pgdat->node_id); 1966 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1967 1967
1968 lockdep_set_current_reclaim_state(GFP_KERNEL); 1968 lockdep_set_current_reclaim_state(GFP_KERNEL);
1969 1969
@@ -2200,7 +2200,9 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
2200 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { 2200 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
2201 for_each_node_state(nid, N_HIGH_MEMORY) { 2201 for_each_node_state(nid, N_HIGH_MEMORY) {
2202 pg_data_t *pgdat = NODE_DATA(nid); 2202 pg_data_t *pgdat = NODE_DATA(nid);
2203 node_to_cpumask_ptr(mask, pgdat->node_id); 2203 const struct cpumask *mask;
2204
2205 mask = cpumask_of_node(pgdat->node_id);
2204 2206
2205 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 2207 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2206 /* One of our CPUs online: restore mask */ 2208 /* One of our CPUs online: restore mask */