aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2016-01-19 12:18:41 -0500
committerTejun Heo <tj@kernel.org>2016-01-22 10:22:46 -0500
commite93ad19d05648397ef3bcb838d26aec06c245dc0 (patch)
tree9b5587594c7f589c87cf476ae45835d837058a7f
parent3e1e21c7bfcfa9bf06c07f48a13faca2f62b3339 (diff)
cpuset: make mm migration asynchronous
If "cpuset.memory_migrate" is set, when a process is moved from one cpuset to another with a different memory node mask, pages in used by the process are migrated to the new set of nodes. This was performed synchronously in the ->attach() callback, which is synchronized against process management. Recently, the synchronization was changed from per-process rwsem to global percpu rwsem for simplicity and optimization. Combined with the synchronous mm migration, this led to deadlocks because mm migration could schedule a work item which may in turn try to create a new worker blocking on the process management lock held from cgroup process migration path. This heavy an operation shouldn't be performed synchronously from that deep inside cgroup migration in the first place. This patch punts the actual migration to an ordered workqueue and updates cgroup process migration and cpuset config update paths to flush the workqueue after all locks are released. This way, the operations still seem synchronous to userland without entangling mm migration with process management synchronization. CPU hotplug can also invoke mm migration but there's no reason for it to wait for mm migrations and thus doesn't synchronize against their completions. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-and-tested-by: Christian Borntraeger <borntraeger@de.ibm.com> Cc: stable@vger.kernel.org # v4.4+
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/cpuset.c71
3 files changed, 57 insertions, 22 deletions
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 85a868ccb493..fea160ee5803 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
137 task_unlock(current); 137 task_unlock(current);
138} 138}
139 139
140extern void cpuset_post_attach_flush(void);
141
140#else /* !CONFIG_CPUSETS */ 142#else /* !CONFIG_CPUSETS */
141 143
142static inline bool cpusets_enabled(void) { return false; } 144static inline bool cpusets_enabled(void) { return false; }
@@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
243 return false; 245 return false;
244} 246}
245 247
248static inline void cpuset_post_attach_flush(void)
249{
250}
251
246#endif /* !CONFIG_CPUSETS */ 252#endif /* !CONFIG_CPUSETS */
247 253
248#endif /* _LINUX_CPUSET_H */ 254#endif /* _LINUX_CPUSET_H */
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index c03a640ef6da..88abd4d076d8 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -58,6 +58,7 @@
58#include <linux/kthread.h> 58#include <linux/kthread.h>
59#include <linux/delay.h> 59#include <linux/delay.h>
60#include <linux/atomic.h> 60#include <linux/atomic.h>
61#include <linux/cpuset.h>
61#include <net/sock.h> 62#include <net/sock.h>
62 63
63/* 64/*
@@ -2739,6 +2740,7 @@ out_unlock_rcu:
2739out_unlock_threadgroup: 2740out_unlock_threadgroup:
2740 percpu_up_write(&cgroup_threadgroup_rwsem); 2741 percpu_up_write(&cgroup_threadgroup_rwsem);
2741 cgroup_kn_unlock(of->kn); 2742 cgroup_kn_unlock(of->kn);
2743 cpuset_post_attach_flush();
2742 return ret ?: nbytes; 2744 return ret ?: nbytes;
2743} 2745}
2744 2746
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 3e945fcd8179..41989ab4db57 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -287,6 +287,8 @@ static struct cpuset top_cpuset = {
287static DEFINE_MUTEX(cpuset_mutex); 287static DEFINE_MUTEX(cpuset_mutex);
288static DEFINE_SPINLOCK(callback_lock); 288static DEFINE_SPINLOCK(callback_lock);
289 289
290static struct workqueue_struct *cpuset_migrate_mm_wq;
291
290/* 292/*
291 * CPU / memory hotplug is handled asynchronously. 293 * CPU / memory hotplug is handled asynchronously.
292 */ 294 */
@@ -972,31 +974,51 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
972} 974}
973 975
974/* 976/*
975 * cpuset_migrate_mm 977 * Migrate memory region from one set of nodes to another. This is
976 * 978 * performed asynchronously as it can be called from process migration path
977 * Migrate memory region from one set of nodes to another. 979 * holding locks involved in process management. All mm migrations are
978 * 980 * performed in the queued order and can be waited for by flushing
979 * Temporarilly set tasks mems_allowed to target nodes of migration, 981 * cpuset_migrate_mm_wq.
980 * so that the migration code can allocate pages on these nodes.
981 *
982 * While the mm_struct we are migrating is typically from some
983 * other task, the task_struct mems_allowed that we are hacking
984 * is for our current task, which must allocate new pages for that
985 * migrating memory region.
986 */ 982 */
987 983
984struct cpuset_migrate_mm_work {
985 struct work_struct work;
986 struct mm_struct *mm;
987 nodemask_t from;
988 nodemask_t to;
989};
990
991static void cpuset_migrate_mm_workfn(struct work_struct *work)
992{
993 struct cpuset_migrate_mm_work *mwork =
994 container_of(work, struct cpuset_migrate_mm_work, work);
995
996 /* on a wq worker, no need to worry about %current's mems_allowed */
997 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
998 mmput(mwork->mm);
999 kfree(mwork);
1000}
1001
988static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, 1002static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
989 const nodemask_t *to) 1003 const nodemask_t *to)
990{ 1004{
991 struct task_struct *tsk = current; 1005 struct cpuset_migrate_mm_work *mwork;
992
993 tsk->mems_allowed = *to;
994 1006
995 do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); 1007 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
1008 if (mwork) {
1009 mwork->mm = mm;
1010 mwork->from = *from;
1011 mwork->to = *to;
1012 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
1013 queue_work(cpuset_migrate_mm_wq, &mwork->work);
1014 } else {
1015 mmput(mm);
1016 }
1017}
996 1018
997 rcu_read_lock(); 1019void cpuset_post_attach_flush(void)
998 guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed); 1020{
999 rcu_read_unlock(); 1021 flush_workqueue(cpuset_migrate_mm_wq);
1000} 1022}
1001 1023
1002/* 1024/*
@@ -1097,7 +1119,8 @@ static void update_tasks_nodemask(struct cpuset *cs)
1097 mpol_rebind_mm(mm, &cs->mems_allowed); 1119 mpol_rebind_mm(mm, &cs->mems_allowed);
1098 if (migrate) 1120 if (migrate)
1099 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); 1121 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
1100 mmput(mm); 1122 else
1123 mmput(mm);
1101 } 1124 }
1102 css_task_iter_end(&it); 1125 css_task_iter_end(&it);
1103 1126
@@ -1545,11 +1568,11 @@ static void cpuset_attach(struct cgroup_taskset *tset)
1545 * @old_mems_allowed is the right nodesets that we 1568 * @old_mems_allowed is the right nodesets that we
1546 * migrate mm from. 1569 * migrate mm from.
1547 */ 1570 */
1548 if (is_memory_migrate(cs)) { 1571 if (is_memory_migrate(cs))
1549 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, 1572 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
1550 &cpuset_attach_nodemask_to); 1573 &cpuset_attach_nodemask_to);
1551 } 1574 else
1552 mmput(mm); 1575 mmput(mm);
1553 } 1576 }
1554 } 1577 }
1555 1578
@@ -1714,6 +1737,7 @@ out_unlock:
1714 mutex_unlock(&cpuset_mutex); 1737 mutex_unlock(&cpuset_mutex);
1715 kernfs_unbreak_active_protection(of->kn); 1738 kernfs_unbreak_active_protection(of->kn);
1716 css_put(&cs->css); 1739 css_put(&cs->css);
1740 flush_workqueue(cpuset_migrate_mm_wq);
1717 return retval ?: nbytes; 1741 return retval ?: nbytes;
1718} 1742}
1719 1743
@@ -2359,6 +2383,9 @@ void __init cpuset_init_smp(void)
2359 top_cpuset.effective_mems = node_states[N_MEMORY]; 2383 top_cpuset.effective_mems = node_states[N_MEMORY];
2360 2384
2361 register_hotmemory_notifier(&cpuset_track_online_nodes_nb); 2385 register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
2386
2387 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
2388 BUG_ON(!cpuset_migrate_mm_wq);
2362} 2389}
2363 2390
2364/** 2391/**