aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/allocpercpu.c3
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/pdflush.c4
-rw-r--r--mm/slab.c5
-rw-r--r--mm/vmscan.c18
6 files changed, 18 insertions, 20 deletions
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index b0012e27fea8..f4026bae6eed 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -82,9 +82,10 @@ EXPORT_SYMBOL_GPL(percpu_populate);
82int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, 82int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
83 cpumask_t *mask) 83 cpumask_t *mask)
84{ 84{
85 cpumask_t populated = CPU_MASK_NONE; 85 cpumask_t populated;
86 int cpu; 86 int cpu;
87 87
88 cpus_clear(populated);
88 for_each_cpu_mask(cpu, *mask) 89 for_each_cpu_mask(cpu, *mask)
89 if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) { 90 if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
90 __percpu_depopulate_mask(__pdata, &populated); 91 __percpu_depopulate_mask(__pdata, &populated);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 7469c503580d..0fb330271271 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -208,7 +208,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
208 /* 208 /*
209 * This doesn't need a lock to do pfn_to_page(). 209 * This doesn't need a lock to do pfn_to_page().
210 * The section can't be removed here because of the 210 * The section can't be removed here because of the
211 * memory_block->state_sem. 211 * memory_block->state_mutex.
212 */ 212 */
213 zone = page_zone(pfn_to_page(pfn)); 213 zone = page_zone(pfn_to_page(pfn));
214 pgdat_resize_lock(zone->zone_pgdat, &flags); 214 pgdat_resize_lock(zone->zone_pgdat, &flags);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 402a504f1228..32e796af12a1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2029,6 +2029,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
2029 int n, val; 2029 int n, val;
2030 int min_val = INT_MAX; 2030 int min_val = INT_MAX;
2031 int best_node = -1; 2031 int best_node = -1;
2032 node_to_cpumask_ptr(tmp, 0);
2032 2033
2033 /* Use the local node if we haven't already */ 2034 /* Use the local node if we haven't already */
2034 if (!node_isset(node, *used_node_mask)) { 2035 if (!node_isset(node, *used_node_mask)) {
@@ -2037,7 +2038,6 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
2037 } 2038 }
2038 2039
2039 for_each_node_state(n, N_HIGH_MEMORY) { 2040 for_each_node_state(n, N_HIGH_MEMORY) {
2040 cpumask_t tmp;
2041 2041
2042 /* Don't want a node to appear more than once */ 2042 /* Don't want a node to appear more than once */
2043 if (node_isset(n, *used_node_mask)) 2043 if (node_isset(n, *used_node_mask))
@@ -2050,8 +2050,8 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
2050 val += (n < node); 2050 val += (n < node);
2051 2051
2052 /* Give preference to headless and unused nodes */ 2052 /* Give preference to headless and unused nodes */
2053 tmp = node_to_cpumask(n); 2053 node_to_cpumask_ptr_next(tmp, n);
2054 if (!cpus_empty(tmp)) 2054 if (!cpus_empty(*tmp))
2055 val += PENALTY_FOR_NODE_WITH_CPUS; 2055 val += PENALTY_FOR_NODE_WITH_CPUS;
2056 2056
2057 /* Slight preference for less loaded node */ 2057 /* Slight preference for less loaded node */
diff --git a/mm/pdflush.c b/mm/pdflush.c
index 3931f716454a..1c96cfc9e040 100644
--- a/mm/pdflush.c
+++ b/mm/pdflush.c
@@ -187,8 +187,8 @@ static int pdflush(void *dummy)
187 * This is needed as pdflush's are dynamically created and destroyed. 187 * This is needed as pdflush's are dynamically created and destroyed.
188 * The boottime pdflush's are easily placed w/o these 2 lines. 188 * The boottime pdflush's are easily placed w/o these 2 lines.
189 */ 189 */
190 cpus_allowed = cpuset_cpus_allowed(current); 190 cpuset_cpus_allowed(current, &cpus_allowed);
191 set_cpus_allowed(current, cpus_allowed); 191 set_cpus_allowed_ptr(current, &cpus_allowed);
192 192
193 return __pdflush(&my_work); 193 return __pdflush(&my_work);
194} 194}
diff --git a/mm/slab.c b/mm/slab.c
index 04b308c3bc54..03927cb5ec9e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1160,14 +1160,13 @@ static void __cpuinit cpuup_canceled(long cpu)
1160 struct kmem_cache *cachep; 1160 struct kmem_cache *cachep;
1161 struct kmem_list3 *l3 = NULL; 1161 struct kmem_list3 *l3 = NULL;
1162 int node = cpu_to_node(cpu); 1162 int node = cpu_to_node(cpu);
1163 node_to_cpumask_ptr(mask, node);
1163 1164
1164 list_for_each_entry(cachep, &cache_chain, next) { 1165 list_for_each_entry(cachep, &cache_chain, next) {
1165 struct array_cache *nc; 1166 struct array_cache *nc;
1166 struct array_cache *shared; 1167 struct array_cache *shared;
1167 struct array_cache **alien; 1168 struct array_cache **alien;
1168 cpumask_t mask;
1169 1169
1170 mask = node_to_cpumask(node);
1171 /* cpu is dead; no one can alloc from it. */ 1170 /* cpu is dead; no one can alloc from it. */
1172 nc = cachep->array[cpu]; 1171 nc = cachep->array[cpu];
1173 cachep->array[cpu] = NULL; 1172 cachep->array[cpu] = NULL;
@@ -1183,7 +1182,7 @@ static void __cpuinit cpuup_canceled(long cpu)
1183 if (nc) 1182 if (nc)
1184 free_block(cachep, nc->entry, nc->avail, node); 1183 free_block(cachep, nc->entry, nc->avail, node);
1185 1184
1186 if (!cpus_empty(mask)) { 1185 if (!cpus_empty(*mask)) {
1187 spin_unlock_irq(&l3->list_lock); 1186 spin_unlock_irq(&l3->list_lock);
1188 goto free_array_cache; 1187 goto free_array_cache;
1189 } 1188 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4046434046e6..f80a5b7c057f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1647,11 +1647,10 @@ static int kswapd(void *p)
1647 struct reclaim_state reclaim_state = { 1647 struct reclaim_state reclaim_state = {
1648 .reclaimed_slab = 0, 1648 .reclaimed_slab = 0,
1649 }; 1649 };
1650 cpumask_t cpumask; 1650 node_to_cpumask_ptr(cpumask, pgdat->node_id);
1651 1651
1652 cpumask = node_to_cpumask(pgdat->node_id); 1652 if (!cpus_empty(*cpumask))
1653 if (!cpus_empty(cpumask)) 1653 set_cpus_allowed_ptr(tsk, cpumask);
1654 set_cpus_allowed(tsk, cpumask);
1655 current->reclaim_state = &reclaim_state; 1654 current->reclaim_state = &reclaim_state;
1656 1655
1657 /* 1656 /*
@@ -1880,17 +1879,16 @@ out:
1880static int __devinit cpu_callback(struct notifier_block *nfb, 1879static int __devinit cpu_callback(struct notifier_block *nfb,
1881 unsigned long action, void *hcpu) 1880 unsigned long action, void *hcpu)
1882{ 1881{
1883 pg_data_t *pgdat;
1884 cpumask_t mask;
1885 int nid; 1882 int nid;
1886 1883
1887 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { 1884 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
1888 for_each_node_state(nid, N_HIGH_MEMORY) { 1885 for_each_node_state(nid, N_HIGH_MEMORY) {
1889 pgdat = NODE_DATA(nid); 1886 pg_data_t *pgdat = NODE_DATA(nid);
1890 mask = node_to_cpumask(pgdat->node_id); 1887 node_to_cpumask_ptr(mask, pgdat->node_id);
1891 if (any_online_cpu(mask) != NR_CPUS) 1888
1889 if (any_online_cpu(*mask) < nr_cpu_ids)
1892 /* One of our CPUs online: restore mask */ 1890 /* One of our CPUs online: restore mask */
1893 set_cpus_allowed(pgdat->kswapd, mask); 1891 set_cpus_allowed_ptr(pgdat->kswapd, mask);
1894 } 1892 }
1895 } 1893 }
1896 return NOTIFY_OK; 1894 return NOTIFY_OK;