aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/cpuset.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index d3df02e76643..116a4164720a 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1623,7 +1623,17 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
1623 * resources, wait for the previously scheduled operations before 1623 * resources, wait for the previously scheduled operations before
1624 * proceeding, so that we don't end up keep removing tasks added 1624 * proceeding, so that we don't end up keep removing tasks added
1625 * after execution capability is restored. 1625 * after execution capability is restored.
1626 *
1627 * cpuset_hotplug_work calls back into cgroup core via
1628 * cgroup_transfer_tasks() and waiting for it from a cgroupfs
1629 * operation like this one can lead to a deadlock through kernfs
1630 * active_ref protection. Let's break the protection. Losing the
1631 * protection is okay as we check whether @cs is online after
1632 * grabbing cpuset_mutex anyway. This only happens on the legacy
1633 * hierarchies.
1626 */ 1634 */
1635 css_get(&cs->css);
1636 kernfs_break_active_protection(of->kn);
1627 flush_work(&cpuset_hotplug_work); 1637 flush_work(&cpuset_hotplug_work);
1628 1638
1629 mutex_lock(&cpuset_mutex); 1639 mutex_lock(&cpuset_mutex);
@@ -1651,6 +1661,8 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
1651 free_trial_cpuset(trialcs); 1661 free_trial_cpuset(trialcs);
1652out_unlock: 1662out_unlock:
1653 mutex_unlock(&cpuset_mutex); 1663 mutex_unlock(&cpuset_mutex);
1664 kernfs_unbreak_active_protection(of->kn);
1665 css_put(&cs->css);
1654 return retval ?: nbytes; 1666 return retval ?: nbytes;
1655} 1667}
1656 1668