aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/task_work.c64
2 files changed, 34 insertions, 32 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index ab5211b9e622..bebabad59202 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1415,7 +1415,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1415 */ 1415 */
1416 p->group_leader = p; 1416 p->group_leader = p;
1417 INIT_LIST_HEAD(&p->thread_group); 1417 INIT_LIST_HEAD(&p->thread_group);
1418 INIT_HLIST_HEAD(&p->task_works); 1418 p->task_works = NULL;
1419 1419
1420 /* Now that the task is set up, run cgroup callbacks if 1420 /* Now that the task is set up, run cgroup callbacks if
1421 * necessary. We need to run them before the task is visible 1421 * necessary. We need to run them before the task is visible
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 82d1c794066d..9b8948dbdc60 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -19,7 +19,12 @@ task_work_add(struct task_struct *task, struct task_work *twork, bool notify)
19 */ 19 */
20 raw_spin_lock_irqsave(&task->pi_lock, flags); 20 raw_spin_lock_irqsave(&task->pi_lock, flags);
21 if (likely(!(task->flags & PF_EXITING))) { 21 if (likely(!(task->flags & PF_EXITING))) {
22 hlist_add_head(&twork->hlist, &task->task_works); 22 struct task_work *last = task->task_works;
23 struct task_work *first = last ? last->next : twork;
24 twork->next = first;
25 if (last)
26 last->next = twork;
27 task->task_works = twork;
23 err = 0; 28 err = 0;
24 } 29 }
25 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 30 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
@@ -34,51 +39,48 @@ struct task_work *
34task_work_cancel(struct task_struct *task, task_work_func_t func) 39task_work_cancel(struct task_struct *task, task_work_func_t func)
35{ 40{
36 unsigned long flags; 41 unsigned long flags;
37 struct task_work *twork; 42 struct task_work *last, *res = NULL;
38 struct hlist_node *pos;
39 43
40 raw_spin_lock_irqsave(&task->pi_lock, flags); 44 raw_spin_lock_irqsave(&task->pi_lock, flags);
41 hlist_for_each_entry(twork, pos, &task->task_works, hlist) { 45 last = task->task_works;
42 if (twork->func == func) { 46 if (last) {
43 hlist_del(&twork->hlist); 47 struct task_work *q = last, *p = q->next;
44 goto found; 48 while (1) {
49 if (p->func == func) {
50 q->next = p->next;
51 if (p == last)
52 task->task_works = q == p ? NULL : q;
53 res = p;
54 break;
55 }
56 if (p == last)
57 break;
58 q = p;
59 p = q->next;
45 } 60 }
46 } 61 }
47 twork = NULL;
48 found:
49 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 62 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
50 63 return res;
51 return twork;
52} 64}
53 65
54void task_work_run(void) 66void task_work_run(void)
55{ 67{
56 struct task_struct *task = current; 68 struct task_struct *task = current;
57 struct hlist_head task_works; 69 struct task_work *p, *q;
58 struct hlist_node *pos;
59 70
60 raw_spin_lock_irq(&task->pi_lock); 71 raw_spin_lock_irq(&task->pi_lock);
61 hlist_move_list(&task->task_works, &task_works); 72 p = task->task_works;
73 task->task_works = NULL;
62 raw_spin_unlock_irq(&task->pi_lock); 74 raw_spin_unlock_irq(&task->pi_lock);
63 75
64 if (unlikely(hlist_empty(&task_works))) 76 if (unlikely(!p))
65 return; 77 return;
66 /*
67 * We use hlist to save the space in task_struct, but we want fifo.
68 * Find the last entry, the list should be short, then process them
69 * in reverse order.
70 */
71 for (pos = task_works.first; pos->next; pos = pos->next)
72 ;
73
74 for (;;) {
75 struct hlist_node **pprev = pos->pprev;
76 struct task_work *twork = container_of(pos, struct task_work,
77 hlist);
78 twork->func(twork);
79 78
80 if (pprev == &task_works.first) 79 q = p->next; /* head */
81 break; 80 p->next = NULL; /* cut it */
82 pos = container_of(pprev, struct hlist_node, next); 81 while (q) {
82 p = q->next;
83 q->func(q);
84 q = p;
83 } 85 }
84} 86}