aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpuset.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r--kernel/cpuset.c63
1 files changed, 26 insertions, 37 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index be4f5036ea5e..6fe23f2ac742 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -830,7 +830,7 @@ static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
830/** 830/**
831 * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's 831 * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
832 * @tsk: task to test 832 * @tsk: task to test
833 * @scan: struct cgroup_scanner containing the cgroup of the task 833 * @data: cpuset to @tsk belongs to
834 * 834 *
835 * Called by cgroup_scan_tasks() for each task in a cgroup whose 835 * Called by cgroup_scan_tasks() for each task in a cgroup whose
836 * cpus_allowed mask needs to be changed. 836 * cpus_allowed mask needs to be changed.
@@ -838,12 +838,11 @@ static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
838 * We don't need to re-check for the cgroup/cpuset membership, since we're 838 * We don't need to re-check for the cgroup/cpuset membership, since we're
839 * holding cpuset_mutex at this point. 839 * holding cpuset_mutex at this point.
840 */ 840 */
841static void cpuset_change_cpumask(struct task_struct *tsk, 841static void cpuset_change_cpumask(struct task_struct *tsk, void *data)
842 struct cgroup_scanner *scan)
843{ 842{
844 struct cpuset *cpus_cs; 843 struct cpuset *cs = data;
844 struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
845 845
846 cpus_cs = effective_cpumask_cpuset(cgroup_cs(scan->cgrp));
847 set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed); 846 set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed);
848} 847}
849 848
@@ -862,13 +861,8 @@ static void cpuset_change_cpumask(struct task_struct *tsk,
862 */ 861 */
863static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap) 862static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
864{ 863{
865 struct cgroup_scanner scan; 864 cgroup_scan_tasks(cs->css.cgroup, NULL, cpuset_change_cpumask, cs,
866 865 heap);
867 scan.cgrp = cs->css.cgroup;
868 scan.test_task = NULL;
869 scan.process_task = cpuset_change_cpumask;
870 scan.heap = heap;
871 cgroup_scan_tasks(&scan);
872} 866}
873 867
874/* 868/*
@@ -1052,20 +1046,24 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
1052 task_unlock(tsk); 1046 task_unlock(tsk);
1053} 1047}
1054 1048
1049struct cpuset_change_nodemask_arg {
1050 struct cpuset *cs;
1051 nodemask_t *newmems;
1052};
1053
1055/* 1054/*
1056 * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy 1055 * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
1057 * of it to cpuset's new mems_allowed, and migrate pages to new nodes if 1056 * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
1058 * memory_migrate flag is set. Called with cpuset_mutex held. 1057 * memory_migrate flag is set. Called with cpuset_mutex held.
1059 */ 1058 */
1060static void cpuset_change_nodemask(struct task_struct *p, 1059static void cpuset_change_nodemask(struct task_struct *p, void *data)
1061 struct cgroup_scanner *scan)
1062{ 1060{
1063 struct cpuset *cs = cgroup_cs(scan->cgrp); 1061 struct cpuset_change_nodemask_arg *arg = data;
1062 struct cpuset *cs = arg->cs;
1064 struct mm_struct *mm; 1063 struct mm_struct *mm;
1065 int migrate; 1064 int migrate;
1066 nodemask_t *newmems = scan->data;
1067 1065
1068 cpuset_change_task_nodemask(p, newmems); 1066 cpuset_change_task_nodemask(p, arg->newmems);
1069 1067
1070 mm = get_task_mm(p); 1068 mm = get_task_mm(p);
1071 if (!mm) 1069 if (!mm)
@@ -1075,7 +1073,7 @@ static void cpuset_change_nodemask(struct task_struct *p,
1075 1073
1076 mpol_rebind_mm(mm, &cs->mems_allowed); 1074 mpol_rebind_mm(mm, &cs->mems_allowed);
1077 if (migrate) 1075 if (migrate)
1078 cpuset_migrate_mm(mm, &cs->old_mems_allowed, newmems); 1076 cpuset_migrate_mm(mm, &cs->old_mems_allowed, arg->newmems);
1079 mmput(mm); 1077 mmput(mm);
1080} 1078}
1081 1079
@@ -1093,19 +1091,14 @@ static void *cpuset_being_rebound;
1093static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap) 1091static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
1094{ 1092{
1095 static nodemask_t newmems; /* protected by cpuset_mutex */ 1093 static nodemask_t newmems; /* protected by cpuset_mutex */
1096 struct cgroup_scanner scan;
1097 struct cpuset *mems_cs = effective_nodemask_cpuset(cs); 1094 struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
1095 struct cpuset_change_nodemask_arg arg = { .cs = cs,
1096 .newmems = &newmems };
1098 1097
1099 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ 1098 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
1100 1099
1101 guarantee_online_mems(mems_cs, &newmems); 1100 guarantee_online_mems(mems_cs, &newmems);
1102 1101
1103 scan.cgrp = cs->css.cgroup;
1104 scan.test_task = NULL;
1105 scan.process_task = cpuset_change_nodemask;
1106 scan.heap = heap;
1107 scan.data = &newmems;
1108
1109 /* 1102 /*
1110 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't 1103 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1111 * take while holding tasklist_lock. Forks can happen - the 1104 * take while holding tasklist_lock. Forks can happen - the
@@ -1116,7 +1109,8 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
1116 * It's ok if we rebind the same mm twice; mpol_rebind_mm() 1109 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1117 * is idempotent. Also migrate pages in each mm to new nodes. 1110 * is idempotent. Also migrate pages in each mm to new nodes.
1118 */ 1111 */
1119 cgroup_scan_tasks(&scan); 1112 cgroup_scan_tasks(cs->css.cgroup, NULL, cpuset_change_nodemask, &arg,
1113 heap);
1120 1114
1121 /* 1115 /*
1122 * All the tasks' nodemasks have been updated, update 1116 * All the tasks' nodemasks have been updated, update
@@ -1263,17 +1257,18 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
1263/* 1257/*
1264 * cpuset_change_flag - make a task's spread flags the same as its cpuset's 1258 * cpuset_change_flag - make a task's spread flags the same as its cpuset's
1265 * @tsk: task to be updated 1259 * @tsk: task to be updated
1266 * @scan: struct cgroup_scanner containing the cgroup of the task 1260 * @data: cpuset to @tsk belongs to
1267 * 1261 *
1268 * Called by cgroup_scan_tasks() for each task in a cgroup. 1262 * Called by cgroup_scan_tasks() for each task in a cgroup.
1269 * 1263 *
1270 * We don't need to re-check for the cgroup/cpuset membership, since we're 1264 * We don't need to re-check for the cgroup/cpuset membership, since we're
1271 * holding cpuset_mutex at this point. 1265 * holding cpuset_mutex at this point.
1272 */ 1266 */
1273static void cpuset_change_flag(struct task_struct *tsk, 1267static void cpuset_change_flag(struct task_struct *tsk, void *data)
1274 struct cgroup_scanner *scan)
1275{ 1268{
1276 cpuset_update_task_spread_flag(cgroup_cs(scan->cgrp), tsk); 1269 struct cpuset *cs = data;
1270
1271 cpuset_update_task_spread_flag(cs, tsk);
1277} 1272}
1278 1273
1279/* 1274/*
@@ -1291,13 +1286,7 @@ static void cpuset_change_flag(struct task_struct *tsk,
1291 */ 1286 */
1292static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap) 1287static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
1293{ 1288{
1294 struct cgroup_scanner scan; 1289 cgroup_scan_tasks(cs->css.cgroup, NULL, cpuset_change_flag, cs, heap);
1295
1296 scan.cgrp = cs->css.cgroup;
1297 scan.test_task = NULL;
1298 scan.process_task = cpuset_change_flag;
1299 scan.heap = heap;
1300 cgroup_scan_tasks(&scan);
1301} 1290}
1302 1291
1303/* 1292/*