aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/base/node.c7
-rw-r--r--kernel/sched.c29
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/slab.c5
-rw-r--r--mm/vmscan.c18
-rw-r--r--net/sunrpc/svc.c16
6 files changed, 42 insertions, 39 deletions
diff --git a/drivers/base/node.c b/drivers/base/node.c
index e59861f18ce5..8e3f25bb8f80 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -22,14 +22,15 @@ static struct sysdev_class node_class = {
22static ssize_t node_read_cpumap(struct sys_device * dev, char * buf) 22static ssize_t node_read_cpumap(struct sys_device * dev, char * buf)
23{ 23{
24 struct node *node_dev = to_node(dev); 24 struct node *node_dev = to_node(dev);
25 cpumask_t mask = node_to_cpumask(node_dev->sysdev.id); 25 node_to_cpumask_ptr(mask, node_dev->sysdev.id);
26 int len; 26 int len;
27 27
28 /* 2004/06/03: buf currently PAGE_SIZE, need > 1 char per 4 bits. */ 28 /* 2004/06/03: buf currently PAGE_SIZE, need > 1 char per 4 bits. */
29 BUILD_BUG_ON(MAX_NUMNODES/4 > PAGE_SIZE/2); 29 BUILD_BUG_ON(MAX_NUMNODES/4 > PAGE_SIZE/2);
30 30
31 len = cpumask_scnprintf(buf, PAGE_SIZE-1, mask); 31 len = cpumask_scnprintf(buf, PAGE_SIZE-2, *mask);
32 len += sprintf(buf + len, "\n"); 32 buf[len++] = '\n';
33 buf[len] = '\0';
33 return len; 34 return len;
34} 35}
35 36
diff --git a/kernel/sched.c b/kernel/sched.c
index 1a8252385c4d..9f7980f8ec00 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6448,7 +6448,7 @@ init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
6448 * 6448 *
6449 * Should use nodemask_t. 6449 * Should use nodemask_t.
6450 */ 6450 */
6451static int find_next_best_node(int node, unsigned long *used_nodes) 6451static int find_next_best_node(int node, nodemask_t *used_nodes)
6452{ 6452{
6453 int i, n, val, min_val, best_node = 0; 6453 int i, n, val, min_val, best_node = 0;
6454 6454
@@ -6462,7 +6462,7 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
6462 continue; 6462 continue;
6463 6463
6464 /* Skip already used nodes */ 6464 /* Skip already used nodes */
6465 if (test_bit(n, used_nodes)) 6465 if (node_isset(n, *used_nodes))
6466 continue; 6466 continue;
6467 6467
6468 /* Simple min distance search */ 6468 /* Simple min distance search */
@@ -6474,14 +6474,13 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
6474 } 6474 }
6475 } 6475 }
6476 6476
6477 set_bit(best_node, used_nodes); 6477 node_set(best_node, *used_nodes);
6478 return best_node; 6478 return best_node;
6479} 6479}
6480 6480
6481/** 6481/**
6482 * sched_domain_node_span - get a cpumask for a node's sched_domain 6482 * sched_domain_node_span - get a cpumask for a node's sched_domain
6483 * @node: node whose cpumask we're constructing 6483 * @node: node whose cpumask we're constructing
6484 * @size: number of nodes to include in this span
6485 * 6484 *
6486 * Given a node, construct a good cpumask for its sched_domain to span. It 6485 * Given a node, construct a good cpumask for its sched_domain to span. It
6487 * should be one that prevents unnecessary balancing, but also spreads tasks 6486 * should be one that prevents unnecessary balancing, but also spreads tasks
@@ -6489,22 +6488,22 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
6489 */ 6488 */
6490static cpumask_t sched_domain_node_span(int node) 6489static cpumask_t sched_domain_node_span(int node)
6491{ 6490{
6492 DECLARE_BITMAP(used_nodes, MAX_NUMNODES); 6491 nodemask_t used_nodes;
6493 cpumask_t span, nodemask; 6492 cpumask_t span;
6493 node_to_cpumask_ptr(nodemask, node);
6494 int i; 6494 int i;
6495 6495
6496 cpus_clear(span); 6496 cpus_clear(span);
6497 bitmap_zero(used_nodes, MAX_NUMNODES); 6497 nodes_clear(used_nodes);
6498 6498
6499 nodemask = node_to_cpumask(node); 6499 cpus_or(span, span, *nodemask);
6500 cpus_or(span, span, nodemask); 6500 node_set(node, used_nodes);
6501 set_bit(node, used_nodes);
6502 6501
6503 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { 6502 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
6504 int next_node = find_next_best_node(node, used_nodes); 6503 int next_node = find_next_best_node(node, &used_nodes);
6505 6504
6506 nodemask = node_to_cpumask(next_node); 6505 node_to_cpumask_ptr_next(nodemask, next_node);
6507 cpus_or(span, span, nodemask); 6506 cpus_or(span, span, *nodemask);
6508 } 6507 }
6509 6508
6510 return span; 6509 return span;
@@ -6901,6 +6900,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6901 for (j = 0; j < MAX_NUMNODES; j++) { 6900 for (j = 0; j < MAX_NUMNODES; j++) {
6902 cpumask_t tmp, notcovered; 6901 cpumask_t tmp, notcovered;
6903 int n = (i + j) % MAX_NUMNODES; 6902 int n = (i + j) % MAX_NUMNODES;
6903 node_to_cpumask_ptr(pnodemask, n);
6904 6904
6905 cpus_complement(notcovered, covered); 6905 cpus_complement(notcovered, covered);
6906 cpus_and(tmp, notcovered, *cpu_map); 6906 cpus_and(tmp, notcovered, *cpu_map);
@@ -6908,8 +6908,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6908 if (cpus_empty(tmp)) 6908 if (cpus_empty(tmp))
6909 break; 6909 break;
6910 6910
6911 nodemask = node_to_cpumask(n); 6911 cpus_and(tmp, tmp, *pnodemask);
6912 cpus_and(tmp, tmp, nodemask);
6913 if (cpus_empty(tmp)) 6912 if (cpus_empty(tmp))
6914 continue; 6913 continue;
6915 6914
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 402a504f1228..32e796af12a1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2029,6 +2029,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
2029 int n, val; 2029 int n, val;
2030 int min_val = INT_MAX; 2030 int min_val = INT_MAX;
2031 int best_node = -1; 2031 int best_node = -1;
2032 node_to_cpumask_ptr(tmp, 0);
2032 2033
2033 /* Use the local node if we haven't already */ 2034 /* Use the local node if we haven't already */
2034 if (!node_isset(node, *used_node_mask)) { 2035 if (!node_isset(node, *used_node_mask)) {
@@ -2037,7 +2038,6 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
2037 } 2038 }
2038 2039
2039 for_each_node_state(n, N_HIGH_MEMORY) { 2040 for_each_node_state(n, N_HIGH_MEMORY) {
2040 cpumask_t tmp;
2041 2041
2042 /* Don't want a node to appear more than once */ 2042 /* Don't want a node to appear more than once */
2043 if (node_isset(n, *used_node_mask)) 2043 if (node_isset(n, *used_node_mask))
@@ -2050,8 +2050,8 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
2050 val += (n < node); 2050 val += (n < node);
2051 2051
2052 /* Give preference to headless and unused nodes */ 2052 /* Give preference to headless and unused nodes */
2053 tmp = node_to_cpumask(n); 2053 node_to_cpumask_ptr_next(tmp, n);
2054 if (!cpus_empty(tmp)) 2054 if (!cpus_empty(*tmp))
2055 val += PENALTY_FOR_NODE_WITH_CPUS; 2055 val += PENALTY_FOR_NODE_WITH_CPUS;
2056 2056
2057 /* Slight preference for less loaded node */ 2057 /* Slight preference for less loaded node */
diff --git a/mm/slab.c b/mm/slab.c
index 04b308c3bc54..03927cb5ec9e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1160,14 +1160,13 @@ static void __cpuinit cpuup_canceled(long cpu)
1160 struct kmem_cache *cachep; 1160 struct kmem_cache *cachep;
1161 struct kmem_list3 *l3 = NULL; 1161 struct kmem_list3 *l3 = NULL;
1162 int node = cpu_to_node(cpu); 1162 int node = cpu_to_node(cpu);
1163 node_to_cpumask_ptr(mask, node);
1163 1164
1164 list_for_each_entry(cachep, &cache_chain, next) { 1165 list_for_each_entry(cachep, &cache_chain, next) {
1165 struct array_cache *nc; 1166 struct array_cache *nc;
1166 struct array_cache *shared; 1167 struct array_cache *shared;
1167 struct array_cache **alien; 1168 struct array_cache **alien;
1168 cpumask_t mask;
1169 1169
1170 mask = node_to_cpumask(node);
1171 /* cpu is dead; no one can alloc from it. */ 1170 /* cpu is dead; no one can alloc from it. */
1172 nc = cachep->array[cpu]; 1171 nc = cachep->array[cpu];
1173 cachep->array[cpu] = NULL; 1172 cachep->array[cpu] = NULL;
@@ -1183,7 +1182,7 @@ static void __cpuinit cpuup_canceled(long cpu)
1183 if (nc) 1182 if (nc)
1184 free_block(cachep, nc->entry, nc->avail, node); 1183 free_block(cachep, nc->entry, nc->avail, node);
1185 1184
1186 if (!cpus_empty(mask)) { 1185 if (!cpus_empty(*mask)) {
1187 spin_unlock_irq(&l3->list_lock); 1186 spin_unlock_irq(&l3->list_lock);
1188 goto free_array_cache; 1187 goto free_array_cache;
1189 } 1188 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4046434046e6..f80a5b7c057f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1647,11 +1647,10 @@ static int kswapd(void *p)
1647 struct reclaim_state reclaim_state = { 1647 struct reclaim_state reclaim_state = {
1648 .reclaimed_slab = 0, 1648 .reclaimed_slab = 0,
1649 }; 1649 };
1650 cpumask_t cpumask; 1650 node_to_cpumask_ptr(cpumask, pgdat->node_id);
1651 1651
1652 cpumask = node_to_cpumask(pgdat->node_id); 1652 if (!cpus_empty(*cpumask))
1653 if (!cpus_empty(cpumask)) 1653 set_cpus_allowed_ptr(tsk, cpumask);
1654 set_cpus_allowed(tsk, cpumask);
1655 current->reclaim_state = &reclaim_state; 1654 current->reclaim_state = &reclaim_state;
1656 1655
1657 /* 1656 /*
@@ -1880,17 +1879,16 @@ out:
1880static int __devinit cpu_callback(struct notifier_block *nfb, 1879static int __devinit cpu_callback(struct notifier_block *nfb,
1881 unsigned long action, void *hcpu) 1880 unsigned long action, void *hcpu)
1882{ 1881{
1883 pg_data_t *pgdat;
1884 cpumask_t mask;
1885 int nid; 1882 int nid;
1886 1883
1887 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { 1884 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
1888 for_each_node_state(nid, N_HIGH_MEMORY) { 1885 for_each_node_state(nid, N_HIGH_MEMORY) {
1889 pgdat = NODE_DATA(nid); 1886 pg_data_t *pgdat = NODE_DATA(nid);
1890 mask = node_to_cpumask(pgdat->node_id); 1887 node_to_cpumask_ptr(mask, pgdat->node_id);
1891 if (any_online_cpu(mask) != NR_CPUS) 1888
1889 if (any_online_cpu(*mask) < nr_cpu_ids)
1892 /* One of our CPUs online: restore mask */ 1890 /* One of our CPUs online: restore mask */
1893 set_cpus_allowed(pgdat->kswapd, mask); 1891 set_cpus_allowed_ptr(pgdat->kswapd, mask);
1894 } 1892 }
1895 } 1893 }
1896 return NOTIFY_OK; 1894 return NOTIFY_OK;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index a290e1523297..090af78d68b5 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -301,7 +301,6 @@ static inline int
301svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) 301svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask)
302{ 302{
303 struct svc_pool_map *m = &svc_pool_map; 303 struct svc_pool_map *m = &svc_pool_map;
304 unsigned int node; /* or cpu */
305 304
306 /* 305 /*
307 * The caller checks for sv_nrpools > 1, which 306 * The caller checks for sv_nrpools > 1, which
@@ -314,16 +313,23 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask)
314 default: 313 default:
315 return 0; 314 return 0;
316 case SVC_POOL_PERCPU: 315 case SVC_POOL_PERCPU:
317 node = m->pool_to[pidx]; 316 {
317 unsigned int cpu = m->pool_to[pidx];
318
318 *oldmask = current->cpus_allowed; 319 *oldmask = current->cpus_allowed;
319 set_cpus_allowed(current, cpumask_of_cpu(node)); 320 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
320 return 1; 321 return 1;
322 }
321 case SVC_POOL_PERNODE: 323 case SVC_POOL_PERNODE:
322 node = m->pool_to[pidx]; 324 {
325 unsigned int node = m->pool_to[pidx];
326 node_to_cpumask_ptr(nodecpumask, node);
327
323 *oldmask = current->cpus_allowed; 328 *oldmask = current->cpus_allowed;
324 set_cpus_allowed(current, node_to_cpumask(node)); 329 set_cpus_allowed_ptr(current, nodecpumask);
325 return 1; 330 return 1;
326 } 331 }
332 }
327} 333}
328 334
329/* 335/*