aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2016-08-02 17:03:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-08-02 19:35:02 -0400
commit61e96496d3c949701a48b908f99f4ed891cd1101 (patch)
treebf282d3036968de0bfd834914d712b6875fff7e3 /kernel
parent949bed2f5764435715e3d6dd3ab6dd4dbd890a71 (diff)
task_work: use READ_ONCE/lockless_dereference, avoid pi_lock if !task_works
Change task_work_cancel() to use lockless_dereference(), this is what the code really wants but we didn't have this helper when it was written. Also add the fast-path task->task_works == NULL check, in the likely case this task has no pending works and we can avoid spin_lock(task->pi_lock). While at it, change other users of ACCESS_ONCE() to use READ_ONCE(). Link: http://lkml.kernel.org/r/20160610150042.GA13868@redhat.com Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: Andrea Parri <parri.andrea@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/task_work.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 6ab4842b00e8..d513051fcca2 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -29,7 +29,7 @@ task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
29 struct callback_head *head; 29 struct callback_head *head;
30 30
31 do { 31 do {
32 head = ACCESS_ONCE(task->task_works); 32 head = READ_ONCE(task->task_works);
33 if (unlikely(head == &work_exited)) 33 if (unlikely(head == &work_exited))
34 return -ESRCH; 34 return -ESRCH;
35 work->next = head; 35 work->next = head;
@@ -57,6 +57,9 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
57 struct callback_head **pprev = &task->task_works; 57 struct callback_head **pprev = &task->task_works;
58 struct callback_head *work; 58 struct callback_head *work;
59 unsigned long flags; 59 unsigned long flags;
60
61 if (likely(!task->task_works))
62 return NULL;
60 /* 63 /*
61 * If cmpxchg() fails we continue without updating pprev. 64 * If cmpxchg() fails we continue without updating pprev.
62 * Either we raced with task_work_add() which added the 65 * Either we raced with task_work_add() which added the
@@ -64,8 +67,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
64 * we raced with task_work_run(), *pprev == NULL/exited. 67 * we raced with task_work_run(), *pprev == NULL/exited.
65 */ 68 */
66 raw_spin_lock_irqsave(&task->pi_lock, flags); 69 raw_spin_lock_irqsave(&task->pi_lock, flags);
67 while ((work = ACCESS_ONCE(*pprev))) { 70 while ((work = lockless_dereference(*pprev))) {
68 smp_read_barrier_depends();
69 if (work->func != func) 71 if (work->func != func)
70 pprev = &work->next; 72 pprev = &work->next;
71 else if (cmpxchg(pprev, work, work->next) == work) 73 else if (cmpxchg(pprev, work, work->next) == work)
@@ -95,7 +97,7 @@ void task_work_run(void)
95 * work_exited unless the list is empty. 97 * work_exited unless the list is empty.
96 */ 98 */
97 do { 99 do {
98 work = ACCESS_ONCE(task->task_works); 100 work = READ_ONCE(task->task_works);
99 head = !work && (task->flags & PF_EXITING) ? 101 head = !work && (task->flags & PF_EXITING) ?
100 &work_exited : NULL; 102 &work_exited : NULL;
101 } while (cmpxchg(&task->task_works, work, head) != work); 103 } while (cmpxchg(&task->task_works, work, head) != work);