diff options
Diffstat (limited to 'kernel/task_work.c')
-rw-r--r-- | kernel/task_work.c | 111 |
1 files changed, 61 insertions, 50 deletions
diff --git a/kernel/task_work.c b/kernel/task_work.c index d320d44903bd..65bd3c92d6f3 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c | |||
@@ -2,26 +2,20 @@ | |||
2 | #include <linux/task_work.h> | 2 | #include <linux/task_work.h> |
3 | #include <linux/tracehook.h> | 3 | #include <linux/tracehook.h> |
4 | 4 | ||
5 | static struct callback_head work_exited; /* all we need is ->next == NULL */ | ||
6 | |||
5 | int | 7 | int |
6 | task_work_add(struct task_struct *task, struct callback_head *twork, bool notify) | 8 | task_work_add(struct task_struct *task, struct callback_head *work, bool notify) |
7 | { | 9 | { |
8 | struct callback_head *last, *first; | 10 | struct callback_head *head; |
9 | unsigned long flags; | ||
10 | 11 | ||
11 | /* | 12 | do { |
12 | * Not inserting the new work if the task has already passed | 13 | head = ACCESS_ONCE(task->task_works); |
13 | * exit_task_work() is the responisbility of callers. | 14 | if (unlikely(head == &work_exited)) |
14 | */ | 15 | return -ESRCH; |
15 | raw_spin_lock_irqsave(&task->pi_lock, flags); | 16 | work->next = head; |
16 | last = task->task_works; | 17 | } while (cmpxchg(&task->task_works, head, work) != head); |
17 | first = last ? last->next : twork; | ||
18 | twork->next = first; | ||
19 | if (last) | ||
20 | last->next = twork; | ||
21 | task->task_works = twork; | ||
22 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | ||
23 | 18 | ||
24 | /* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */ | ||
25 | if (notify) | 19 | if (notify) |
26 | set_notify_resume(task); | 20 | set_notify_resume(task); |
27 | return 0; | 21 | return 0; |
@@ -30,52 +24,69 @@ task_work_add(struct task_struct *task, struct callback_head *twork, bool notify | |||
30 | struct callback_head * | 24 | struct callback_head * |
31 | task_work_cancel(struct task_struct *task, task_work_func_t func) | 25 | task_work_cancel(struct task_struct *task, task_work_func_t func) |
32 | { | 26 | { |
27 | struct callback_head **pprev = &task->task_works; | ||
28 | struct callback_head *work = NULL; | ||
33 | unsigned long flags; | 29 | unsigned long flags; |
34 | struct callback_head *last, *res = NULL; | 30 | /* |
35 | 31 | * If cmpxchg() fails we continue without updating pprev. | |
32 | * Either we raced with task_work_add() which added the | ||
33 | * new entry before this work, we will find it again. Or | ||
34 | * we raced with task_work_run(), *pprev == NULL/exited. | ||
35 | */ | ||
36 | raw_spin_lock_irqsave(&task->pi_lock, flags); | 36 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
37 | last = task->task_works; | 37 | while ((work = ACCESS_ONCE(*pprev))) { |
38 | if (last) { | 38 | read_barrier_depends(); |
39 | struct callback_head *q = last, *p = q->next; | 39 | if (work->func != func) |
40 | while (1) { | 40 | pprev = &work->next; |
41 | if (p->func == func) { | 41 | else if (cmpxchg(pprev, work, work->next) == work) |
42 | q->next = p->next; | 42 | break; |
43 | if (p == last) | ||
44 | task->task_works = q == p ? NULL : q; | ||
45 | res = p; | ||
46 | break; | ||
47 | } | ||
48 | if (p == last) | ||
49 | break; | ||
50 | q = p; | ||
51 | p = q->next; | ||
52 | } | ||
53 | } | 43 | } |
54 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | 44 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); |
55 | return res; | 45 | |
46 | return work; | ||
56 | } | 47 | } |
57 | 48 | ||
58 | void task_work_run(void) | 49 | void task_work_run(void) |
59 | { | 50 | { |
60 | struct task_struct *task = current; | 51 | struct task_struct *task = current; |
61 | struct callback_head *p, *q; | 52 | struct callback_head *work, *head, *next; |
53 | |||
54 | for (;;) { | ||
55 | /* | ||
56 | * work->func() can do task_work_add(), do not set | ||
57 | * work_exited unless the list is empty. | ||
58 | */ | ||
59 | do { | ||
60 | work = ACCESS_ONCE(task->task_works); | ||
61 | head = !work && (task->flags & PF_EXITING) ? | ||
62 | &work_exited : NULL; | ||
63 | } while (cmpxchg(&task->task_works, work, head) != work); | ||
62 | 64 | ||
63 | while (1) { | 65 | if (!work) |
64 | raw_spin_lock_irq(&task->pi_lock); | 66 | break; |
65 | p = task->task_works; | 67 | /* |
66 | task->task_works = NULL; | 68 | * Synchronize with task_work_cancel(). It can't remove |
67 | raw_spin_unlock_irq(&task->pi_lock); | 69 | * the first entry == work, cmpxchg(task_works) should |
70 | * fail, but it can play with *work and other entries. | ||
71 | */ | ||
72 | raw_spin_unlock_wait(&task->pi_lock); | ||
73 | smp_mb(); | ||
68 | 74 | ||
69 | if (unlikely(!p)) | 75 | /* Reverse the list to run the works in fifo order */ |
70 | return; | 76 | head = NULL; |
77 | do { | ||
78 | next = work->next; | ||
79 | work->next = head; | ||
80 | head = work; | ||
81 | work = next; | ||
82 | } while (work); | ||
71 | 83 | ||
72 | q = p->next; /* head */ | 84 | work = head; |
73 | p->next = NULL; /* cut it */ | 85 | do { |
74 | while (q) { | 86 | next = work->next; |
75 | p = q->next; | 87 | work->func(work); |
76 | q->func(q); | 88 | work = next; |
77 | q = p; | ||
78 | cond_resched(); | 89 | cond_resched(); |
79 | } | 90 | } while (work); |
80 | } | 91 | } |
81 | } | 92 | } |