aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-03-13 00:19:46 -0400
committerRusty Russell <rusty@rustcorp.com.au>2009-03-13 00:19:46 -0400
commita70f730282019f487aa33a84e5ac9a5e89c5abd0 (patch)
treee6891ec5db5383c6f39617d0cc9671e1a0d1a988
parentc69fc56de1df5769f2ec69c915c7ad5afe63804c (diff)
cpumask: replace node_to_cpumask with cpumask_of_node.
Impact: cleanup node_to_cpumask (and the blecherous node_to_cpumask_ptr which contained a declaration) are replaced now everyone implements cpumask_of_node. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/pci/pci-driver.c3
-rw-r--r--include/linux/topology.h6
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/quicklist.c2
-rw-r--r--mm/slab.c2
-rw-r--r--mm/vmscan.c6
-rw-r--r--net/sunrpc/svc.c3
8 files changed, 13 insertions, 17 deletions
diff --git a/drivers/base/node.c b/drivers/base/node.c
index f8f578a71b25..40b809742a1c 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -24,7 +24,7 @@ static struct sysdev_class node_class = {
24static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf) 24static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf)
25{ 25{
26 struct node *node_dev = to_node(dev); 26 struct node *node_dev = to_node(dev);
27 node_to_cpumask_ptr(mask, node_dev->sysdev.id); 27 const struct cpumask *mask = cpumask_of_node(node_dev->sysdev.id);
28 int len; 28 int len;
29 29
30 /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ 30 /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 93eac1423585..b522f883d674 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -212,10 +212,9 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
212 node = dev_to_node(&dev->dev); 212 node = dev_to_node(&dev->dev);
213 if (node >= 0) { 213 if (node >= 0) {
214 int cpu; 214 int cpu;
215 node_to_cpumask_ptr(nodecpumask, node);
216 215
217 get_online_cpus(); 216 get_online_cpus();
218 cpu = cpumask_any_and(nodecpumask, cpu_online_mask); 217 cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
219 if (cpu < nr_cpu_ids) 218 if (cpu < nr_cpu_ids)
220 error = work_on_cpu(cpu, local_pci_probe, &ddi); 219 error = work_on_cpu(cpu, local_pci_probe, &ddi);
221 else 220 else
diff --git a/include/linux/topology.h b/include/linux/topology.h
index a16b9e06f2e5..16b7d6896ce9 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -38,11 +38,7 @@
38#endif 38#endif
39 39
40#ifndef nr_cpus_node 40#ifndef nr_cpus_node
41#define nr_cpus_node(node) \ 41#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
42 ({ \
43 node_to_cpumask_ptr(__tmp__, node); \
44 cpus_weight(*__tmp__); \
45 })
46#endif 42#endif
47 43
48#define for_each_node_with_cpus(node) \ 44#define for_each_node_with_cpus(node) \
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5c44ed49ca93..a92b0975b9a5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2134,7 +2134,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
2134 int n, val; 2134 int n, val;
2135 int min_val = INT_MAX; 2135 int min_val = INT_MAX;
2136 int best_node = -1; 2136 int best_node = -1;
2137 node_to_cpumask_ptr(tmp, 0); 2137 const struct cpumask *tmp = cpumask_of_node(0);
2138 2138
2139 /* Use the local node if we haven't already */ 2139 /* Use the local node if we haven't already */
2140 if (!node_isset(node, *used_node_mask)) { 2140 if (!node_isset(node, *used_node_mask)) {
@@ -2155,8 +2155,8 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
2155 val += (n < node); 2155 val += (n < node);
2156 2156
2157 /* Give preference to headless and unused nodes */ 2157 /* Give preference to headless and unused nodes */
2158 node_to_cpumask_ptr_next(tmp, n); 2158 tmp = cpumask_of_node(n);
2159 if (!cpus_empty(*tmp)) 2159 if (!cpumask_empty(tmp))
2160 val += PENALTY_FOR_NODE_WITH_CPUS; 2160 val += PENALTY_FOR_NODE_WITH_CPUS;
2161 2161
2162 /* Slight preference for less loaded node */ 2162 /* Slight preference for less loaded node */
diff --git a/mm/quicklist.c b/mm/quicklist.c
index 8dbb6805ef35..e66d07d1b4ff 100644
--- a/mm/quicklist.c
+++ b/mm/quicklist.c
@@ -29,7 +29,7 @@ static unsigned long max_pages(unsigned long min_pages)
29 int node = numa_node_id(); 29 int node = numa_node_id();
30 struct zone *zones = NODE_DATA(node)->node_zones; 30 struct zone *zones = NODE_DATA(node)->node_zones;
31 int num_cpus_on_node; 31 int num_cpus_on_node;
32 node_to_cpumask_ptr(cpumask_on_node, node); 32 const struct cpumask *cpumask_on_node = cpumask_of_node(node);
33 33
34 node_free_pages = 34 node_free_pages =
35#ifdef CONFIG_ZONE_DMA 35#ifdef CONFIG_ZONE_DMA
diff --git a/mm/slab.c b/mm/slab.c
index 4d00855629c4..2daaca0b4541 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1160,7 +1160,7 @@ static void __cpuinit cpuup_canceled(long cpu)
1160 struct kmem_cache *cachep; 1160 struct kmem_cache *cachep;
1161 struct kmem_list3 *l3 = NULL; 1161 struct kmem_list3 *l3 = NULL;
1162 int node = cpu_to_node(cpu); 1162 int node = cpu_to_node(cpu);
1163 node_to_cpumask_ptr(mask, node); 1163 const struct cpumask *mask = cpumask_of_node(node);
1164 1164
1165 list_for_each_entry(cachep, &cache_chain, next) { 1165 list_for_each_entry(cachep, &cache_chain, next) {
1166 struct array_cache *nc; 1166 struct array_cache *nc;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6177e3bcd66b..cc6135586b44 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1963,7 +1963,7 @@ static int kswapd(void *p)
1963 struct reclaim_state reclaim_state = { 1963 struct reclaim_state reclaim_state = {
1964 .reclaimed_slab = 0, 1964 .reclaimed_slab = 0,
1965 }; 1965 };
1966 node_to_cpumask_ptr(cpumask, pgdat->node_id); 1966 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1967 1967
1968 if (!cpumask_empty(cpumask)) 1968 if (!cpumask_empty(cpumask))
1969 set_cpus_allowed_ptr(tsk, cpumask); 1969 set_cpus_allowed_ptr(tsk, cpumask);
@@ -2198,7 +2198,9 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
2198 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { 2198 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
2199 for_each_node_state(nid, N_HIGH_MEMORY) { 2199 for_each_node_state(nid, N_HIGH_MEMORY) {
2200 pg_data_t *pgdat = NODE_DATA(nid); 2200 pg_data_t *pgdat = NODE_DATA(nid);
2201 node_to_cpumask_ptr(mask, pgdat->node_id); 2201 const struct cpumask *mask;
2202
2203 mask = cpumask_of_node(pgdat->node_id);
2202 2204
2203 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 2205 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
2204 /* One of our CPUs online: restore mask */ 2206 /* One of our CPUs online: restore mask */
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index c51fed4d1af1..3bdd5bffaca8 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -317,8 +317,7 @@ svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
317 } 317 }
318 case SVC_POOL_PERNODE: 318 case SVC_POOL_PERNODE:
319 { 319 {
320 node_to_cpumask_ptr(nodecpumask, node); 320 set_cpus_allowed_ptr(task, cpumask_of_node(node));
321 set_cpus_allowed_ptr(task, nodecpumask);
322 break; 321 break;
323 } 322 }
324 } 323 }