diff options
author | Mike Travis <travis@sgi.com> | 2008-04-04 21:11:06 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-19 13:44:58 -0400 |
commit | f70316dace2bb99730800d47044acb818c6735f6 (patch) | |
tree | 03c0178c8d6c0213a82b800f4a3e00c2da9a4d5c /kernel/rcutorture.c | |
parent | fc0e474840d1fd96f28fbd76d4f36b80e7ad1cc3 (diff) |
generic: use new set_cpus_allowed_ptr function
* Use new set_cpus_allowed_ptr() function added by previous patch,
which instead of passing the "newly allowed cpus" cpumask_t arg
by value, pass it by pointer:
-int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
+int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)
* Modify CPU_MASK_ALL
Depends on:
[sched-devel]: sched: add new set_cpus_allowed_ptr function
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutorture.c')
-rw-r--r-- | kernel/rcutorture.c | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index fd599829e72a..47894f919d4e 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -723,9 +723,10 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */ | |||
723 | */ | 723 | */ |
724 | static void rcu_torture_shuffle_tasks(void) | 724 | static void rcu_torture_shuffle_tasks(void) |
725 | { | 725 | { |
726 | cpumask_t tmp_mask = CPU_MASK_ALL; | 726 | cpumask_t tmp_mask; |
727 | int i; | 727 | int i; |
728 | 728 | ||
729 | cpus_setall(tmp_mask); | ||
729 | get_online_cpus(); | 730 | get_online_cpus(); |
730 | 731 | ||
731 | /* No point in shuffling if there is only one online CPU (ex: UP) */ | 732 | /* No point in shuffling if there is only one online CPU (ex: UP) */ |
@@ -737,25 +738,27 @@ static void rcu_torture_shuffle_tasks(void) | |||
737 | if (rcu_idle_cpu != -1) | 738 | if (rcu_idle_cpu != -1) |
738 | cpu_clear(rcu_idle_cpu, tmp_mask); | 739 | cpu_clear(rcu_idle_cpu, tmp_mask); |
739 | 740 | ||
740 | set_cpus_allowed(current, tmp_mask); | 741 | set_cpus_allowed_ptr(current, &tmp_mask); |
741 | 742 | ||
742 | if (reader_tasks) { | 743 | if (reader_tasks) { |
743 | for (i = 0; i < nrealreaders; i++) | 744 | for (i = 0; i < nrealreaders; i++) |
744 | if (reader_tasks[i]) | 745 | if (reader_tasks[i]) |
745 | set_cpus_allowed(reader_tasks[i], tmp_mask); | 746 | set_cpus_allowed_ptr(reader_tasks[i], |
747 | &tmp_mask); | ||
746 | } | 748 | } |
747 | 749 | ||
748 | if (fakewriter_tasks) { | 750 | if (fakewriter_tasks) { |
749 | for (i = 0; i < nfakewriters; i++) | 751 | for (i = 0; i < nfakewriters; i++) |
750 | if (fakewriter_tasks[i]) | 752 | if (fakewriter_tasks[i]) |
751 | set_cpus_allowed(fakewriter_tasks[i], tmp_mask); | 753 | set_cpus_allowed_ptr(fakewriter_tasks[i], |
754 | &tmp_mask); | ||
752 | } | 755 | } |
753 | 756 | ||
754 | if (writer_task) | 757 | if (writer_task) |
755 | set_cpus_allowed(writer_task, tmp_mask); | 758 | set_cpus_allowed_ptr(writer_task, &tmp_mask); |
756 | 759 | ||
757 | if (stats_task) | 760 | if (stats_task) |
758 | set_cpus_allowed(stats_task, tmp_mask); | 761 | set_cpus_allowed_ptr(stats_task, &tmp_mask); |
759 | 762 | ||
760 | if (rcu_idle_cpu == -1) | 763 | if (rcu_idle_cpu == -1) |
761 | rcu_idle_cpu = num_online_cpus() - 1; | 764 | rcu_idle_cpu = num_online_cpus() - 1; |