aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2017-12-04 17:55:59 -0500
committerTejun Heo <tj@kernel.org>2017-12-04 17:55:59 -0500
commitbdfbbda90aeb75ce0951413fd7f495d4d377bd5e (patch)
tree126764843f3c07922b7603dc32216308f44bb4e9
parent11db855c3d06e82f432cb1bafd73296586d5ceec (diff)
Revert "cgroup/cpuset: remove circular dependency deadlock"
This reverts commit aa24163b2ee5c92120e32e99b5a93143a0f4258e. This and the following commit led to another circular locking scenario and the scenario which is fixed by this commit no longer exists after e8b3f8db7aad ("workqueue/hotplug: simplify workqueue_offline_cpu()") which removes work item flushing from hotplug path. Revert it for now. Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--kernel/cgroup/cpuset.c53
1 files changed, 23 insertions, 30 deletions
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index cab5fd1ee767..f7efa7b4d825 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -812,18 +812,6 @@ done:
812 return ndoms; 812 return ndoms;
813} 813}
814 814
815static void cpuset_sched_change_begin(void)
816{
817 cpus_read_lock();
818 mutex_lock(&cpuset_mutex);
819}
820
821static void cpuset_sched_change_end(void)
822{
823 mutex_unlock(&cpuset_mutex);
824 cpus_read_unlock();
825}
826
827/* 815/*
828 * Rebuild scheduler domains. 816 * Rebuild scheduler domains.
829 * 817 *
@@ -833,14 +821,16 @@ static void cpuset_sched_change_end(void)
833 * 'cpus' is removed, then call this routine to rebuild the 821 * 'cpus' is removed, then call this routine to rebuild the
834 * scheduler's dynamic sched domains. 822 * scheduler's dynamic sched domains.
835 * 823 *
824 * Call with cpuset_mutex held. Takes get_online_cpus().
836 */ 825 */
837static void rebuild_sched_domains_cpuslocked(void) 826static void rebuild_sched_domains_locked(void)
838{ 827{
839 struct sched_domain_attr *attr; 828 struct sched_domain_attr *attr;
840 cpumask_var_t *doms; 829 cpumask_var_t *doms;
841 int ndoms; 830 int ndoms;
842 831
843 lockdep_assert_held(&cpuset_mutex); 832 lockdep_assert_held(&cpuset_mutex);
833 get_online_cpus();
844 834
845 /* 835 /*
846 * We have raced with CPU hotplug. Don't do anything to avoid 836 * We have raced with CPU hotplug. Don't do anything to avoid
@@ -848,25 +838,27 @@ static void rebuild_sched_domains_cpuslocked(void)
848 * Anyways, hotplug work item will rebuild sched domains. 838 * Anyways, hotplug work item will rebuild sched domains.
849 */ 839 */
850 if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask)) 840 if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
851 return; 841 goto out;
852 842
853 /* Generate domain masks and attrs */ 843 /* Generate domain masks and attrs */
854 ndoms = generate_sched_domains(&doms, &attr); 844 ndoms = generate_sched_domains(&doms, &attr);
855 845
856 /* Have scheduler rebuild the domains */ 846 /* Have scheduler rebuild the domains */
857 partition_sched_domains(ndoms, doms, attr); 847 partition_sched_domains(ndoms, doms, attr);
848out:
849 put_online_cpus();
858} 850}
859#else /* !CONFIG_SMP */ 851#else /* !CONFIG_SMP */
860static void rebuild_sched_domains_cpuslocked(void) 852static void rebuild_sched_domains_locked(void)
861{ 853{
862} 854}
863#endif /* CONFIG_SMP */ 855#endif /* CONFIG_SMP */
864 856
865void rebuild_sched_domains(void) 857void rebuild_sched_domains(void)
866{ 858{
867 cpuset_sched_change_begin(); 859 mutex_lock(&cpuset_mutex);
868 rebuild_sched_domains_cpuslocked(); 860 rebuild_sched_domains_locked();
869 cpuset_sched_change_end(); 861 mutex_unlock(&cpuset_mutex);
870} 862}
871 863
872/** 864/**
@@ -952,7 +944,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
952 rcu_read_unlock(); 944 rcu_read_unlock();
953 945
954 if (need_rebuild_sched_domains) 946 if (need_rebuild_sched_domains)
955 rebuild_sched_domains_cpuslocked(); 947 rebuild_sched_domains_locked();
956} 948}
957 949
958/** 950/**
@@ -1284,7 +1276,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
1284 cs->relax_domain_level = val; 1276 cs->relax_domain_level = val;
1285 if (!cpumask_empty(cs->cpus_allowed) && 1277 if (!cpumask_empty(cs->cpus_allowed) &&
1286 is_sched_load_balance(cs)) 1278 is_sched_load_balance(cs))
1287 rebuild_sched_domains_cpuslocked(); 1279 rebuild_sched_domains_locked();
1288 } 1280 }
1289 1281
1290 return 0; 1282 return 0;
@@ -1317,6 +1309,7 @@ static void update_tasks_flags(struct cpuset *cs)
1317 * 1309 *
1318 * Call with cpuset_mutex held. 1310 * Call with cpuset_mutex held.
1319 */ 1311 */
1312
1320static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, 1313static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1321 int turning_on) 1314 int turning_on)
1322{ 1315{
@@ -1349,7 +1342,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1349 spin_unlock_irq(&callback_lock); 1342 spin_unlock_irq(&callback_lock);
1350 1343
1351 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) 1344 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1352 rebuild_sched_domains_cpuslocked(); 1345 rebuild_sched_domains_locked();
1353 1346
1354 if (spread_flag_changed) 1347 if (spread_flag_changed)
1355 update_tasks_flags(cs); 1348 update_tasks_flags(cs);
@@ -1617,7 +1610,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
1617 cpuset_filetype_t type = cft->private; 1610 cpuset_filetype_t type = cft->private;
1618 int retval = 0; 1611 int retval = 0;
1619 1612
1620 cpuset_sched_change_begin(); 1613 mutex_lock(&cpuset_mutex);
1621 if (!is_cpuset_online(cs)) { 1614 if (!is_cpuset_online(cs)) {
1622 retval = -ENODEV; 1615 retval = -ENODEV;
1623 goto out_unlock; 1616 goto out_unlock;
@@ -1653,7 +1646,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
1653 break; 1646 break;
1654 } 1647 }
1655out_unlock: 1648out_unlock:
1656 cpuset_sched_change_end(); 1649 mutex_unlock(&cpuset_mutex);
1657 return retval; 1650 return retval;
1658} 1651}
1659 1652
@@ -1664,7 +1657,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
1664 cpuset_filetype_t type = cft->private; 1657 cpuset_filetype_t type = cft->private;
1665 int retval = -ENODEV; 1658 int retval = -ENODEV;
1666 1659
1667 cpuset_sched_change_begin(); 1660 mutex_lock(&cpuset_mutex);
1668 if (!is_cpuset_online(cs)) 1661 if (!is_cpuset_online(cs))
1669 goto out_unlock; 1662 goto out_unlock;
1670 1663
@@ -1677,7 +1670,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
1677 break; 1670 break;
1678 } 1671 }
1679out_unlock: 1672out_unlock:
1680 cpuset_sched_change_end(); 1673 mutex_unlock(&cpuset_mutex);
1681 return retval; 1674 return retval;
1682} 1675}
1683 1676
@@ -1716,7 +1709,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
1716 kernfs_break_active_protection(of->kn); 1709 kernfs_break_active_protection(of->kn);
1717 flush_work(&cpuset_hotplug_work); 1710 flush_work(&cpuset_hotplug_work);
1718 1711
1719 cpuset_sched_change_begin(); 1712 mutex_lock(&cpuset_mutex);
1720 if (!is_cpuset_online(cs)) 1713 if (!is_cpuset_online(cs))
1721 goto out_unlock; 1714 goto out_unlock;
1722 1715
@@ -1740,7 +1733,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
1740 1733
1741 free_trial_cpuset(trialcs); 1734 free_trial_cpuset(trialcs);
1742out_unlock: 1735out_unlock:
1743 cpuset_sched_change_end(); 1736 mutex_unlock(&cpuset_mutex);
1744 kernfs_unbreak_active_protection(of->kn); 1737 kernfs_unbreak_active_protection(of->kn);
1745 css_put(&cs->css); 1738 css_put(&cs->css);
1746 flush_workqueue(cpuset_migrate_mm_wq); 1739 flush_workqueue(cpuset_migrate_mm_wq);
@@ -2041,14 +2034,14 @@ out_unlock:
2041/* 2034/*
2042 * If the cpuset being removed has its flag 'sched_load_balance' 2035 * If the cpuset being removed has its flag 'sched_load_balance'
2043 * enabled, then simulate turning sched_load_balance off, which 2036 * enabled, then simulate turning sched_load_balance off, which
2044 * will call rebuild_sched_domains_cpuslocked(). 2037 * will call rebuild_sched_domains_locked().
2045 */ 2038 */
2046 2039
2047static void cpuset_css_offline(struct cgroup_subsys_state *css) 2040static void cpuset_css_offline(struct cgroup_subsys_state *css)
2048{ 2041{
2049 struct cpuset *cs = css_cs(css); 2042 struct cpuset *cs = css_cs(css);
2050 2043
2051 cpuset_sched_change_begin(); 2044 mutex_lock(&cpuset_mutex);
2052 2045
2053 if (is_sched_load_balance(cs)) 2046 if (is_sched_load_balance(cs))
2054 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); 2047 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
@@ -2056,7 +2049,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
2056 cpuset_dec(); 2049 cpuset_dec();
2057 clear_bit(CS_ONLINE, &cs->flags); 2050 clear_bit(CS_ONLINE, &cs->flags);
2058 2051
2059 cpuset_sched_change_end(); 2052 mutex_unlock(&cpuset_mutex);
2060} 2053}
2061 2054
2062static void cpuset_css_free(struct cgroup_subsys_state *css) 2055static void cpuset_css_free(struct cgroup_subsys_state *css)