diff options
author | Tejun Heo <tj@kernel.org> | 2018-05-23 14:04:54 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2018-05-23 14:04:54 -0400 |
commit | d8742e22902186e30c346b1ba881cb52942ae3e4 (patch) | |
tree | a4dd3d558a5eb43dd5ce547ec10a00e9c9bc993a /kernel/cgroup | |
parent | cc659e76f375ab158fb682c1e39b2a22bf9f7657 (diff) |
cgroup: css_set_lock should nest inside tasklist_lock
cgroup_enable_task_cg_lists() incorrectly nests non-irq-safe
tasklist_lock inside irq-safe css_set_lock triggering the following
lockdep warning.
WARNING: possible irq lock inversion dependency detected
4.17.0-rc1-00027-gb37d049 #6 Not tainted
--------------------------------------------------------
systemd/1 just changed the state of lock:
00000000fe57773b (css_set_lock){..-.}, at: cgroup_free+0xf2/0x12a
but this lock took another, SOFTIRQ-unsafe lock in the past:
(tasklist_lock){.+.+}
and interrupts could create inverse lock ordering between them.
other info that might help us debug this:
Possible interrupt unsafe locking scenario:
CPU0 CPU1
---- ----
lock(tasklist_lock);
local_irq_disable();
lock(css_set_lock);
lock(tasklist_lock);
<Interrupt>
lock(css_set_lock);
*** DEADLOCK ***
The condition is highly unlikely to actually happen especially given
that the path is executed only once per boot.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: Boqun Feng <boqun.feng@gmail.com>
Diffstat (limited to 'kernel/cgroup')
-rw-r--r-- | kernel/cgroup/cgroup.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 04b7e7fad31a..63989cb44566 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c | |||
@@ -1798,13 +1798,6 @@ static void cgroup_enable_task_cg_lists(void) | |||
1798 | { | 1798 | { |
1799 | struct task_struct *p, *g; | 1799 | struct task_struct *p, *g; |
1800 | 1800 | ||
1801 | spin_lock_irq(&css_set_lock); | ||
1802 | |||
1803 | if (use_task_css_set_links) | ||
1804 | goto out_unlock; | ||
1805 | |||
1806 | use_task_css_set_links = true; | ||
1807 | |||
1808 | /* | 1801 | /* |
1809 | * We need tasklist_lock because RCU is not safe against | 1802 | * We need tasklist_lock because RCU is not safe against |
1810 | * while_each_thread(). Besides, a forking task that has passed | 1803 | * while_each_thread(). Besides, a forking task that has passed |
@@ -1813,6 +1806,13 @@ static void cgroup_enable_task_cg_lists(void) | |||
1813 | * tasklist if we walk through it with RCU. | 1806 | * tasklist if we walk through it with RCU. |
1814 | */ | 1807 | */ |
1815 | read_lock(&tasklist_lock); | 1808 | read_lock(&tasklist_lock); |
1809 | spin_lock_irq(&css_set_lock); | ||
1810 | |||
1811 | if (use_task_css_set_links) | ||
1812 | goto out_unlock; | ||
1813 | |||
1814 | use_task_css_set_links = true; | ||
1815 | |||
1816 | do_each_thread(g, p) { | 1816 | do_each_thread(g, p) { |
1817 | WARN_ON_ONCE(!list_empty(&p->cg_list) || | 1817 | WARN_ON_ONCE(!list_empty(&p->cg_list) || |
1818 | task_css_set(p) != &init_css_set); | 1818 | task_css_set(p) != &init_css_set); |
@@ -1840,9 +1840,9 @@ static void cgroup_enable_task_cg_lists(void) | |||
1840 | } | 1840 | } |
1841 | spin_unlock(&p->sighand->siglock); | 1841 | spin_unlock(&p->sighand->siglock); |
1842 | } while_each_thread(g, p); | 1842 | } while_each_thread(g, p); |
1843 | read_unlock(&tasklist_lock); | ||
1844 | out_unlock: | 1843 | out_unlock: |
1845 | spin_unlock_irq(&css_set_lock); | 1844 | spin_unlock_irq(&css_set_lock); |
1845 | read_unlock(&tasklist_lock); | ||
1846 | } | 1846 | } |
1847 | 1847 | ||
1848 | static void init_cgroup_housekeeping(struct cgroup *cgrp) | 1848 | static void init_cgroup_housekeeping(struct cgroup *cgrp) |