aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/task_work.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2012-06-27 03:33:29 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2012-07-22 15:57:57 -0400
commita2d4c71d1559426155e5da8db3265bfa0d8d398d (patch)
treee00739a549bd68afeff0685cb9998834b5eca877 /kernel/task_work.c
parented3e694d78cc75fa79bf29698631b146fd27aa35 (diff)
deal with task_work callbacks adding more work
It doesn't matter on normal return to userland path (we'll recheck the NOTIFY_RESUME flag anyway), but in case of exit_task_work() we'll need that as soon as we get callbacks capable of triggering more task_work_add(). Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'kernel/task_work.c')
-rw-r--r--kernel/task_work.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/kernel/task_work.c b/kernel/task_work.c
index fb396089f66..91d4e1742a0 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -60,19 +60,21 @@ void task_work_run(void)
60 struct task_struct *task = current; 60 struct task_struct *task = current;
61 struct callback_head *p, *q; 61 struct callback_head *p, *q;
62 62
63 raw_spin_lock_irq(&task->pi_lock); 63 while (1) {
64 p = task->task_works; 64 raw_spin_lock_irq(&task->pi_lock);
65 task->task_works = NULL; 65 p = task->task_works;
66 raw_spin_unlock_irq(&task->pi_lock); 66 task->task_works = NULL;
67 raw_spin_unlock_irq(&task->pi_lock);
67 68
68 if (unlikely(!p)) 69 if (unlikely(!p))
69 return; 70 return;
70 71
71 q = p->next; /* head */ 72 q = p->next; /* head */
72 p->next = NULL; /* cut it */ 73 p->next = NULL; /* cut it */
73 while (q) { 74 while (q) {
74 p = q->next; 75 p = q->next;
75 q->func(q); 76 q->func(q);
76 q = p; 77 q = p;
78 }
77 } 79 }
78} 80}