diff options
-rw-r--r-- | kernel/cpuset.c | 37 |
1 files changed, 15 insertions, 22 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index f227bc172690..827cd9adccb2 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -843,37 +843,25 @@ static void cpuset_change_cpumask(struct task_struct *tsk, | |||
843 | /** | 843 | /** |
844 | * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. | 844 | * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. |
845 | * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed | 845 | * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed |
846 | * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks() | ||
846 | * | 847 | * |
847 | * Called with cgroup_mutex held | 848 | * Called with cgroup_mutex held |
848 | * | 849 | * |
849 | * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, | 850 | * The cgroup_scan_tasks() function will scan all the tasks in a cgroup, |
850 | * calling callback functions for each. | 851 | * calling callback functions for each. |
851 | * | 852 | * |
852 | * Return 0 if successful, -errno if not. | 853 | * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0 |
854 | * if @heap != NULL. | ||
853 | */ | 855 | */ |
854 | static int update_tasks_cpumask(struct cpuset *cs) | 856 | static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap) |
855 | { | 857 | { |
856 | struct cgroup_scanner scan; | 858 | struct cgroup_scanner scan; |
857 | struct ptr_heap heap; | ||
858 | int retval; | ||
859 | |||
860 | /* | ||
861 | * cgroup_scan_tasks() will initialize heap->gt for us. | ||
862 | * heap_init() is still needed here for we should not change | ||
863 | * cs->cpus_allowed when heap_init() fails. | ||
864 | */ | ||
865 | retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); | ||
866 | if (retval) | ||
867 | return retval; | ||
868 | 859 | ||
869 | scan.cg = cs->css.cgroup; | 860 | scan.cg = cs->css.cgroup; |
870 | scan.test_task = cpuset_test_cpumask; | 861 | scan.test_task = cpuset_test_cpumask; |
871 | scan.process_task = cpuset_change_cpumask; | 862 | scan.process_task = cpuset_change_cpumask; |
872 | scan.heap = &heap; | 863 | scan.heap = heap; |
873 | retval = cgroup_scan_tasks(&scan); | 864 | cgroup_scan_tasks(&scan); |
874 | |||
875 | heap_free(&heap); | ||
876 | return retval; | ||
877 | } | 865 | } |
878 | 866 | ||
879 | /** | 867 | /** |
@@ -883,6 +871,7 @@ static int update_tasks_cpumask(struct cpuset *cs) | |||
883 | */ | 871 | */ |
884 | static int update_cpumask(struct cpuset *cs, const char *buf) | 872 | static int update_cpumask(struct cpuset *cs, const char *buf) |
885 | { | 873 | { |
874 | struct ptr_heap heap; | ||
886 | struct cpuset trialcs; | 875 | struct cpuset trialcs; |
887 | int retval; | 876 | int retval; |
888 | int is_load_balanced; | 877 | int is_load_balanced; |
@@ -917,6 +906,10 @@ static int update_cpumask(struct cpuset *cs, const char *buf) | |||
917 | if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed)) | 906 | if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed)) |
918 | return 0; | 907 | return 0; |
919 | 908 | ||
909 | retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); | ||
910 | if (retval) | ||
911 | return retval; | ||
912 | |||
920 | is_load_balanced = is_sched_load_balance(&trialcs); | 913 | is_load_balanced = is_sched_load_balance(&trialcs); |
921 | 914 | ||
922 | mutex_lock(&callback_mutex); | 915 | mutex_lock(&callback_mutex); |
@@ -927,9 +920,9 @@ static int update_cpumask(struct cpuset *cs, const char *buf) | |||
927 | * Scan tasks in the cpuset, and update the cpumasks of any | 920 | * Scan tasks in the cpuset, and update the cpumasks of any |
928 | * that need an update. | 921 | * that need an update. |
929 | */ | 922 | */ |
930 | retval = update_tasks_cpumask(cs); | 923 | update_tasks_cpumask(cs, &heap); |
931 | if (retval < 0) | 924 | |
932 | return retval; | 925 | heap_free(&heap); |
933 | 926 | ||
934 | if (is_load_balanced) | 927 | if (is_load_balanced) |
935 | async_rebuild_sched_domains(); | 928 | async_rebuild_sched_domains(); |
@@ -1965,7 +1958,7 @@ static void scan_for_empty_cpusets(const struct cpuset *root) | |||
1965 | nodes_empty(cp->mems_allowed)) | 1958 | nodes_empty(cp->mems_allowed)) |
1966 | remove_tasks_in_empty_cpuset(cp); | 1959 | remove_tasks_in_empty_cpuset(cp); |
1967 | else { | 1960 | else { |
1968 | update_tasks_cpumask(cp); | 1961 | update_tasks_cpumask(cp, NULL); |
1969 | update_tasks_nodemask(cp, &oldmems); | 1962 | update_tasks_nodemask(cp, &oldmems); |
1970 | } | 1963 | } |
1971 | } | 1964 | } |