diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-30 21:00:26 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-30 21:00:26 -0400 |
| commit | d17abcd5417d84cfa8a225160481203a37dc81d4 (patch) | |
| tree | 74ce2c425c5c6550acec90bc92c8a3f735f0d257 /kernel | |
| parent | db6f204019380c788f1de06ee937bdbccd60e5c0 (diff) | |
| parent | bb75efddeaca89f8a67fd82cdcbaaf436cf17ca9 (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-cpumask
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-cpumask:
oprofile: Thou shalt not call __exit functions from __init functions
cpumask: remove the now-obsoleted pcibus_to_cpumask(): generic
cpumask: remove cpumask_t from core
cpumask: convert rcutorture.c
cpumask: use new cpumask_ functions in core code.
cpumask: remove references to struct irqaction's mask field.
cpumask: use mm_cpumask() wrapper: kernel/fork.c
cpumask: use set_cpu_active in init/main.c
cpumask: remove node_to_first_cpu
cpumask: fix seq_bitmap_*() functions.
cpumask: remove dangerous CPU_MASK_ALL_PTR, &CPU_MASK_ALL
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cpu.c | 6 | ||||
| -rw-r--r-- | kernel/fork.c | 2 | ||||
| -rw-r--r-- | kernel/kmod.c | 2 | ||||
| -rw-r--r-- | kernel/kthread.c | 4 | ||||
| -rw-r--r-- | kernel/rcutorture.c | 25 | ||||
| -rw-r--r-- | kernel/sched_cpupri.h | 2 | ||||
| -rw-r--r-- | kernel/stop_machine.c | 2 | ||||
| -rw-r--r-- | kernel/workqueue.c | 6 |
8 files changed, 29 insertions, 20 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 79e40f00dcb8..395b6974dc8d 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -281,7 +281,7 @@ int __ref cpu_down(unsigned int cpu) | |||
| 281 | goto out; | 281 | goto out; |
| 282 | } | 282 | } |
| 283 | 283 | ||
| 284 | cpu_clear(cpu, cpu_active_map); | 284 | set_cpu_active(cpu, false); |
| 285 | 285 | ||
| 286 | /* | 286 | /* |
| 287 | * Make sure the all cpus did the reschedule and are not | 287 | * Make sure the all cpus did the reschedule and are not |
| @@ -296,7 +296,7 @@ int __ref cpu_down(unsigned int cpu) | |||
| 296 | err = _cpu_down(cpu, 0); | 296 | err = _cpu_down(cpu, 0); |
| 297 | 297 | ||
| 298 | if (cpu_online(cpu)) | 298 | if (cpu_online(cpu)) |
| 299 | cpu_set(cpu, cpu_active_map); | 299 | set_cpu_active(cpu, true); |
| 300 | 300 | ||
| 301 | out: | 301 | out: |
| 302 | cpu_maps_update_done(); | 302 | cpu_maps_update_done(); |
| @@ -333,7 +333,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) | |||
| 333 | goto out_notify; | 333 | goto out_notify; |
| 334 | BUG_ON(!cpu_online(cpu)); | 334 | BUG_ON(!cpu_online(cpu)); |
| 335 | 335 | ||
| 336 | cpu_set(cpu, cpu_active_map); | 336 | set_cpu_active(cpu, true); |
| 337 | 337 | ||
| 338 | /* Now call notifier in preparation. */ | 338 | /* Now call notifier in preparation. */ |
| 339 | raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); | 339 | raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); |
diff --git a/kernel/fork.c b/kernel/fork.c index 6715ebc3761d..47c15840a381 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -284,7 +284,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
| 284 | mm->free_area_cache = oldmm->mmap_base; | 284 | mm->free_area_cache = oldmm->mmap_base; |
| 285 | mm->cached_hole_size = ~0UL; | 285 | mm->cached_hole_size = ~0UL; |
| 286 | mm->map_count = 0; | 286 | mm->map_count = 0; |
| 287 | cpus_clear(mm->cpu_vm_mask); | 287 | cpumask_clear(mm_cpumask(mm)); |
| 288 | mm->mm_rb = RB_ROOT; | 288 | mm->mm_rb = RB_ROOT; |
| 289 | rb_link = &mm->mm_rb.rb_node; | 289 | rb_link = &mm->mm_rb.rb_node; |
| 290 | rb_parent = NULL; | 290 | rb_parent = NULL; |
diff --git a/kernel/kmod.c b/kernel/kmod.c index a27a5f64443d..f0c8f545180d 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
| @@ -167,7 +167,7 @@ static int ____call_usermodehelper(void *data) | |||
| 167 | } | 167 | } |
| 168 | 168 | ||
| 169 | /* We can run anywhere, unlike our parent keventd(). */ | 169 | /* We can run anywhere, unlike our parent keventd(). */ |
| 170 | set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR); | 170 | set_cpus_allowed_ptr(current, cpu_all_mask); |
| 171 | 171 | ||
| 172 | /* | 172 | /* |
| 173 | * Our parent is keventd, which runs with elevated scheduling priority. | 173 | * Our parent is keventd, which runs with elevated scheduling priority. |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 4fbc456f393d..84bbadd4d021 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
| @@ -110,7 +110,7 @@ static void create_kthread(struct kthread_create_info *create) | |||
| 110 | */ | 110 | */ |
| 111 | sched_setscheduler(create->result, SCHED_NORMAL, ¶m); | 111 | sched_setscheduler(create->result, SCHED_NORMAL, ¶m); |
| 112 | set_user_nice(create->result, KTHREAD_NICE_LEVEL); | 112 | set_user_nice(create->result, KTHREAD_NICE_LEVEL); |
| 113 | set_cpus_allowed_ptr(create->result, CPU_MASK_ALL_PTR); | 113 | set_cpus_allowed_ptr(create->result, cpu_all_mask); |
| 114 | } | 114 | } |
| 115 | complete(&create->done); | 115 | complete(&create->done); |
| 116 | } | 116 | } |
| @@ -240,7 +240,7 @@ int kthreadd(void *unused) | |||
| 240 | set_task_comm(tsk, "kthreadd"); | 240 | set_task_comm(tsk, "kthreadd"); |
| 241 | ignore_signals(tsk); | 241 | ignore_signals(tsk); |
| 242 | set_user_nice(tsk, KTHREAD_NICE_LEVEL); | 242 | set_user_nice(tsk, KTHREAD_NICE_LEVEL); |
| 243 | set_cpus_allowed_ptr(tsk, CPU_MASK_ALL_PTR); | 243 | set_cpus_allowed_ptr(tsk, cpu_all_mask); |
| 244 | 244 | ||
| 245 | current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; | 245 | current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; |
| 246 | 246 | ||
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 7c4142a79f0a..9b4a975a4b4a 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
| @@ -126,6 +126,7 @@ static atomic_t n_rcu_torture_mberror; | |||
| 126 | static atomic_t n_rcu_torture_error; | 126 | static atomic_t n_rcu_torture_error; |
| 127 | static long n_rcu_torture_timers = 0; | 127 | static long n_rcu_torture_timers = 0; |
| 128 | static struct list_head rcu_torture_removed; | 128 | static struct list_head rcu_torture_removed; |
| 129 | static cpumask_var_t shuffle_tmp_mask; | ||
| 129 | 130 | ||
| 130 | static int stutter_pause_test = 0; | 131 | static int stutter_pause_test = 0; |
| 131 | 132 | ||
| @@ -889,10 +890,9 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */ | |||
| 889 | */ | 890 | */ |
| 890 | static void rcu_torture_shuffle_tasks(void) | 891 | static void rcu_torture_shuffle_tasks(void) |
| 891 | { | 892 | { |
| 892 | cpumask_t tmp_mask; | ||
| 893 | int i; | 893 | int i; |
| 894 | 894 | ||
| 895 | cpus_setall(tmp_mask); | 895 | cpumask_setall(shuffle_tmp_mask); |
| 896 | get_online_cpus(); | 896 | get_online_cpus(); |
| 897 | 897 | ||
| 898 | /* No point in shuffling if there is only one online CPU (ex: UP) */ | 898 | /* No point in shuffling if there is only one online CPU (ex: UP) */ |
| @@ -902,29 +902,29 @@ static void rcu_torture_shuffle_tasks(void) | |||
| 902 | } | 902 | } |
| 903 | 903 | ||
| 904 | if (rcu_idle_cpu != -1) | 904 | if (rcu_idle_cpu != -1) |
| 905 | cpu_clear(rcu_idle_cpu, tmp_mask); | 905 | cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask); |
| 906 | 906 | ||
| 907 | set_cpus_allowed_ptr(current, &tmp_mask); | 907 | set_cpus_allowed_ptr(current, shuffle_tmp_mask); |
| 908 | 908 | ||
| 909 | if (reader_tasks) { | 909 | if (reader_tasks) { |
| 910 | for (i = 0; i < nrealreaders; i++) | 910 | for (i = 0; i < nrealreaders; i++) |
| 911 | if (reader_tasks[i]) | 911 | if (reader_tasks[i]) |
| 912 | set_cpus_allowed_ptr(reader_tasks[i], | 912 | set_cpus_allowed_ptr(reader_tasks[i], |
| 913 | &tmp_mask); | 913 | shuffle_tmp_mask); |
| 914 | } | 914 | } |
| 915 | 915 | ||
| 916 | if (fakewriter_tasks) { | 916 | if (fakewriter_tasks) { |
| 917 | for (i = 0; i < nfakewriters; i++) | 917 | for (i = 0; i < nfakewriters; i++) |
| 918 | if (fakewriter_tasks[i]) | 918 | if (fakewriter_tasks[i]) |
| 919 | set_cpus_allowed_ptr(fakewriter_tasks[i], | 919 | set_cpus_allowed_ptr(fakewriter_tasks[i], |
| 920 | &tmp_mask); | 920 | shuffle_tmp_mask); |
| 921 | } | 921 | } |
| 922 | 922 | ||
| 923 | if (writer_task) | 923 | if (writer_task) |
| 924 | set_cpus_allowed_ptr(writer_task, &tmp_mask); | 924 | set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask); |
| 925 | 925 | ||
| 926 | if (stats_task) | 926 | if (stats_task) |
| 927 | set_cpus_allowed_ptr(stats_task, &tmp_mask); | 927 | set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask); |
| 928 | 928 | ||
| 929 | if (rcu_idle_cpu == -1) | 929 | if (rcu_idle_cpu == -1) |
| 930 | rcu_idle_cpu = num_online_cpus() - 1; | 930 | rcu_idle_cpu = num_online_cpus() - 1; |
| @@ -1012,6 +1012,7 @@ rcu_torture_cleanup(void) | |||
| 1012 | if (shuffler_task) { | 1012 | if (shuffler_task) { |
| 1013 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task"); | 1013 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task"); |
| 1014 | kthread_stop(shuffler_task); | 1014 | kthread_stop(shuffler_task); |
| 1015 | free_cpumask_var(shuffle_tmp_mask); | ||
| 1015 | } | 1016 | } |
| 1016 | shuffler_task = NULL; | 1017 | shuffler_task = NULL; |
| 1017 | 1018 | ||
| @@ -1190,10 +1191,18 @@ rcu_torture_init(void) | |||
| 1190 | } | 1191 | } |
| 1191 | if (test_no_idle_hz) { | 1192 | if (test_no_idle_hz) { |
| 1192 | rcu_idle_cpu = num_online_cpus() - 1; | 1193 | rcu_idle_cpu = num_online_cpus() - 1; |
| 1194 | |||
| 1195 | if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) { | ||
| 1196 | firsterr = -ENOMEM; | ||
| 1197 | VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask"); | ||
| 1198 | goto unwind; | ||
| 1199 | } | ||
| 1200 | |||
| 1193 | /* Create the shuffler thread */ | 1201 | /* Create the shuffler thread */ |
| 1194 | shuffler_task = kthread_run(rcu_torture_shuffle, NULL, | 1202 | shuffler_task = kthread_run(rcu_torture_shuffle, NULL, |
| 1195 | "rcu_torture_shuffle"); | 1203 | "rcu_torture_shuffle"); |
| 1196 | if (IS_ERR(shuffler_task)) { | 1204 | if (IS_ERR(shuffler_task)) { |
| 1205 | free_cpumask_var(shuffle_tmp_mask); | ||
| 1197 | firsterr = PTR_ERR(shuffler_task); | 1206 | firsterr = PTR_ERR(shuffler_task); |
| 1198 | VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler"); | 1207 | VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler"); |
| 1199 | shuffler_task = NULL; | 1208 | shuffler_task = NULL; |
diff --git a/kernel/sched_cpupri.h b/kernel/sched_cpupri.h index 642a94ef8a0a..9a7e859b8fbf 100644 --- a/kernel/sched_cpupri.h +++ b/kernel/sched_cpupri.h | |||
| @@ -25,7 +25,7 @@ struct cpupri { | |||
| 25 | 25 | ||
| 26 | #ifdef CONFIG_SMP | 26 | #ifdef CONFIG_SMP |
| 27 | int cpupri_find(struct cpupri *cp, | 27 | int cpupri_find(struct cpupri *cp, |
| 28 | struct task_struct *p, cpumask_t *lowest_mask); | 28 | struct task_struct *p, struct cpumask *lowest_mask); |
| 29 | void cpupri_set(struct cpupri *cp, int cpu, int pri); | 29 | void cpupri_set(struct cpupri *cp, int cpu, int pri); |
| 30 | int cpupri_init(struct cpupri *cp, bool bootmem); | 30 | int cpupri_init(struct cpupri *cp, bool bootmem); |
| 31 | void cpupri_cleanup(struct cpupri *cp); | 31 | void cpupri_cleanup(struct cpupri *cp); |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 74541ca49536..912823e2a11b 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
| @@ -44,7 +44,7 @@ static DEFINE_MUTEX(setup_lock); | |||
| 44 | static int refcount; | 44 | static int refcount; |
| 45 | static struct workqueue_struct *stop_machine_wq; | 45 | static struct workqueue_struct *stop_machine_wq; |
| 46 | static struct stop_machine_data active, idle; | 46 | static struct stop_machine_data active, idle; |
| 47 | static const cpumask_t *active_cpus; | 47 | static const struct cpumask *active_cpus; |
| 48 | static void *stop_machine_work; | 48 | static void *stop_machine_work; |
| 49 | 49 | ||
| 50 | static void set_state(enum stopmachine_state newstate) | 50 | static void set_state(enum stopmachine_state newstate) |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1f0c509b40d3..9aedd9fd825b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -416,7 +416,7 @@ void flush_workqueue(struct workqueue_struct *wq) | |||
| 416 | might_sleep(); | 416 | might_sleep(); |
| 417 | lock_map_acquire(&wq->lockdep_map); | 417 | lock_map_acquire(&wq->lockdep_map); |
| 418 | lock_map_release(&wq->lockdep_map); | 418 | lock_map_release(&wq->lockdep_map); |
| 419 | for_each_cpu_mask_nr(cpu, *cpu_map) | 419 | for_each_cpu(cpu, cpu_map) |
| 420 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 420 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); |
| 421 | } | 421 | } |
| 422 | EXPORT_SYMBOL_GPL(flush_workqueue); | 422 | EXPORT_SYMBOL_GPL(flush_workqueue); |
| @@ -547,7 +547,7 @@ static void wait_on_work(struct work_struct *work) | |||
| 547 | wq = cwq->wq; | 547 | wq = cwq->wq; |
| 548 | cpu_map = wq_cpu_map(wq); | 548 | cpu_map = wq_cpu_map(wq); |
| 549 | 549 | ||
| 550 | for_each_cpu_mask_nr(cpu, *cpu_map) | 550 | for_each_cpu(cpu, cpu_map) |
| 551 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 551 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
| 552 | } | 552 | } |
| 553 | 553 | ||
| @@ -911,7 +911,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
| 911 | list_del(&wq->list); | 911 | list_del(&wq->list); |
| 912 | spin_unlock(&workqueue_lock); | 912 | spin_unlock(&workqueue_lock); |
| 913 | 913 | ||
| 914 | for_each_cpu_mask_nr(cpu, *cpu_map) | 914 | for_each_cpu(cpu, cpu_map) |
| 915 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); | 915 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); |
| 916 | cpu_maps_update_done(); | 916 | cpu_maps_update_done(); |
| 917 | 917 | ||
