diff options
-rw-r--r-- | mm/pdflush.c | 16 | ||||
-rw-r--r-- | mm/slab.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 20 | ||||
-rw-r--r-- | mm/vmscan.c | 2 | ||||
-rw-r--r-- | mm/vmstat.c | 4 |
5 files changed, 28 insertions, 16 deletions
diff --git a/mm/pdflush.c b/mm/pdflush.c index a0a14c4d5072..15de509b68fd 100644 --- a/mm/pdflush.c +++ b/mm/pdflush.c | |||
@@ -172,7 +172,16 @@ static int __pdflush(struct pdflush_work *my_work) | |||
172 | static int pdflush(void *dummy) | 172 | static int pdflush(void *dummy) |
173 | { | 173 | { |
174 | struct pdflush_work my_work; | 174 | struct pdflush_work my_work; |
175 | cpumask_t cpus_allowed; | 175 | cpumask_var_t cpus_allowed; |
176 | |||
177 | /* | ||
178 | * Since the caller doesn't even check kthread_run() worked, let's not | ||
179 | * freak out too much if this fails. | ||
180 | */ | ||
181 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { | ||
182 | printk(KERN_WARNING "pdflush failed to allocate cpumask\n"); | ||
183 | return 0; | ||
184 | } | ||
176 | 185 | ||
177 | /* | 186 | /* |
178 | * pdflush can spend a lot of time doing encryption via dm-crypt. We | 187 | * pdflush can spend a lot of time doing encryption via dm-crypt. We |
@@ -187,8 +196,9 @@ static int pdflush(void *dummy) | |||
187 | * This is needed as pdflush's are dynamically created and destroyed. | 196 | * This is needed as pdflush's are dynamically created and destroyed. |
188 | * The boottime pdflush's are easily placed w/o these 2 lines. | 197 | * The boottime pdflush's are easily placed w/o these 2 lines. |
189 | */ | 198 | */ |
190 | cpuset_cpus_allowed(current, &cpus_allowed); | 199 | cpuset_cpus_allowed(current, cpus_allowed); |
191 | set_cpus_allowed_ptr(current, &cpus_allowed); | 200 | set_cpus_allowed_ptr(current, cpus_allowed); |
201 | free_cpumask_var(cpus_allowed); | ||
192 | 202 | ||
193 | return __pdflush(&my_work); | 203 | return __pdflush(&my_work); |
194 | } | 204 | } |
@@ -2157,7 +2157,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2157 | 2157 | ||
2158 | /* | 2158 | /* |
2159 | * We use cache_chain_mutex to ensure a consistent view of | 2159 | * We use cache_chain_mutex to ensure a consistent view of |
2160 | * cpu_online_map as well. Please see cpuup_callback | 2160 | * cpu_online_mask as well. Please see cpuup_callback |
2161 | */ | 2161 | */ |
2162 | get_online_cpus(); | 2162 | get_online_cpus(); |
2163 | mutex_lock(&cache_chain_mutex); | 2163 | mutex_lock(&cache_chain_mutex); |
@@ -1970,7 +1970,7 @@ static DEFINE_PER_CPU(struct kmem_cache_cpu, | |||
1970 | kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; | 1970 | kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; |
1971 | 1971 | ||
1972 | static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); | 1972 | static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); |
1973 | static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE; | 1973 | static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS); |
1974 | 1974 | ||
1975 | static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, | 1975 | static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, |
1976 | int cpu, gfp_t flags) | 1976 | int cpu, gfp_t flags) |
@@ -2045,13 +2045,13 @@ static void init_alloc_cpu_cpu(int cpu) | |||
2045 | { | 2045 | { |
2046 | int i; | 2046 | int i; |
2047 | 2047 | ||
2048 | if (cpu_isset(cpu, kmem_cach_cpu_free_init_once)) | 2048 | if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once))) |
2049 | return; | 2049 | return; |
2050 | 2050 | ||
2051 | for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) | 2051 | for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) |
2052 | free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); | 2052 | free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); |
2053 | 2053 | ||
2054 | cpu_set(cpu, kmem_cach_cpu_free_init_once); | 2054 | cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)); |
2055 | } | 2055 | } |
2056 | 2056 | ||
2057 | static void __init init_alloc_cpu(void) | 2057 | static void __init init_alloc_cpu(void) |
@@ -3451,7 +3451,7 @@ struct location { | |||
3451 | long max_time; | 3451 | long max_time; |
3452 | long min_pid; | 3452 | long min_pid; |
3453 | long max_pid; | 3453 | long max_pid; |
3454 | cpumask_t cpus; | 3454 | DECLARE_BITMAP(cpus, NR_CPUS); |
3455 | nodemask_t nodes; | 3455 | nodemask_t nodes; |
3456 | }; | 3456 | }; |
3457 | 3457 | ||
@@ -3526,7 +3526,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, | |||
3526 | if (track->pid > l->max_pid) | 3526 | if (track->pid > l->max_pid) |
3527 | l->max_pid = track->pid; | 3527 | l->max_pid = track->pid; |
3528 | 3528 | ||
3529 | cpu_set(track->cpu, l->cpus); | 3529 | cpumask_set_cpu(track->cpu, |
3530 | to_cpumask(l->cpus)); | ||
3530 | } | 3531 | } |
3531 | node_set(page_to_nid(virt_to_page(track)), l->nodes); | 3532 | node_set(page_to_nid(virt_to_page(track)), l->nodes); |
3532 | return 1; | 3533 | return 1; |
@@ -3556,8 +3557,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, | |||
3556 | l->max_time = age; | 3557 | l->max_time = age; |
3557 | l->min_pid = track->pid; | 3558 | l->min_pid = track->pid; |
3558 | l->max_pid = track->pid; | 3559 | l->max_pid = track->pid; |
3559 | cpus_clear(l->cpus); | 3560 | cpumask_clear(to_cpumask(l->cpus)); |
3560 | cpu_set(track->cpu, l->cpus); | 3561 | cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); |
3561 | nodes_clear(l->nodes); | 3562 | nodes_clear(l->nodes); |
3562 | node_set(page_to_nid(virt_to_page(track)), l->nodes); | 3563 | node_set(page_to_nid(virt_to_page(track)), l->nodes); |
3563 | return 1; | 3564 | return 1; |
@@ -3638,11 +3639,12 @@ static int list_locations(struct kmem_cache *s, char *buf, | |||
3638 | len += sprintf(buf + len, " pid=%ld", | 3639 | len += sprintf(buf + len, " pid=%ld", |
3639 | l->min_pid); | 3640 | l->min_pid); |
3640 | 3641 | ||
3641 | if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && | 3642 | if (num_online_cpus() > 1 && |
3643 | !cpumask_empty(to_cpumask(l->cpus)) && | ||
3642 | len < PAGE_SIZE - 60) { | 3644 | len < PAGE_SIZE - 60) { |
3643 | len += sprintf(buf + len, " cpus="); | 3645 | len += sprintf(buf + len, " cpus="); |
3644 | len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, | 3646 | len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, |
3645 | &l->cpus); | 3647 | to_cpumask(l->cpus)); |
3646 | } | 3648 | } |
3647 | 3649 | ||
3648 | if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && | 3650 | if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 240f062f71f1..d196f46c8808 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1902,7 +1902,7 @@ static int kswapd(void *p) | |||
1902 | }; | 1902 | }; |
1903 | node_to_cpumask_ptr(cpumask, pgdat->node_id); | 1903 | node_to_cpumask_ptr(cpumask, pgdat->node_id); |
1904 | 1904 | ||
1905 | if (!cpus_empty(*cpumask)) | 1905 | if (!cpumask_empty(cpumask)) |
1906 | set_cpus_allowed_ptr(tsk, cpumask); | 1906 | set_cpus_allowed_ptr(tsk, cpumask); |
1907 | current->reclaim_state = &reclaim_state; | 1907 | current->reclaim_state = &reclaim_state; |
1908 | 1908 | ||
diff --git a/mm/vmstat.c b/mm/vmstat.c index c3ccfda23adc..91149746bb8d 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -20,7 +20,7 @@ | |||
20 | DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; | 20 | DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; |
21 | EXPORT_PER_CPU_SYMBOL(vm_event_states); | 21 | EXPORT_PER_CPU_SYMBOL(vm_event_states); |
22 | 22 | ||
23 | static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) | 23 | static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask) |
24 | { | 24 | { |
25 | int cpu; | 25 | int cpu; |
26 | int i; | 26 | int i; |
@@ -43,7 +43,7 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) | |||
43 | void all_vm_events(unsigned long *ret) | 43 | void all_vm_events(unsigned long *ret) |
44 | { | 44 | { |
45 | get_online_cpus(); | 45 | get_online_cpus(); |
46 | sum_vm_events(ret, &cpu_online_map); | 46 | sum_vm_events(ret, cpu_online_mask); |
47 | put_online_cpus(); | 47 | put_online_cpus(); |
48 | } | 48 | } |
49 | EXPORT_SYMBOL_GPL(all_vm_events); | 49 | EXPORT_SYMBOL_GPL(all_vm_events); |