diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2008-07-25 04:47:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-25 13:53:40 -0400 |
commit | 1a4d9b0aa0d3c50314e57525a5e5ec2cfc48b4c8 (patch) | |
tree | 2ea94cce9fe2cfbcfdc2fd25d33e57f31e0b2699 /kernel/workqueue.c | |
parent | 565b9b14e7f48131bca58840aa404bbef058fa89 (diff) |
workqueues: insert_work: use "list_head *" instead of "int tail"
insert_work() inserts the new work_struct before or after cwq->worklist,
depending on the "int tail" parameter. Change it to accept "list_head *"
instead, this shrinks .text a bit and allows us to insert the barrier
after specific work_struct.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Jarek Poplawski <jarkao2@gmail.com>
Cc: Max Krasnyansky <maxk@qualcomm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 17 |
1 files changed, 7 insertions, 10 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 6fd158b21026..d9a2d65cc63e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -125,7 +125,7 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) | |||
125 | } | 125 | } |
126 | 126 | ||
127 | static void insert_work(struct cpu_workqueue_struct *cwq, | 127 | static void insert_work(struct cpu_workqueue_struct *cwq, |
128 | struct work_struct *work, int tail) | 128 | struct work_struct *work, struct list_head *head) |
129 | { | 129 | { |
130 | set_wq_data(work, cwq); | 130 | set_wq_data(work, cwq); |
131 | /* | 131 | /* |
@@ -133,10 +133,7 @@ static void insert_work(struct cpu_workqueue_struct *cwq, | |||
133 | * result of list_add() below, see try_to_grab_pending(). | 133 | * result of list_add() below, see try_to_grab_pending(). |
134 | */ | 134 | */ |
135 | smp_wmb(); | 135 | smp_wmb(); |
136 | if (tail) | 136 | list_add_tail(&work->entry, head); |
137 | list_add_tail(&work->entry, &cwq->worklist); | ||
138 | else | ||
139 | list_add(&work->entry, &cwq->worklist); | ||
140 | wake_up(&cwq->more_work); | 137 | wake_up(&cwq->more_work); |
141 | } | 138 | } |
142 | 139 | ||
@@ -146,7 +143,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, | |||
146 | unsigned long flags; | 143 | unsigned long flags; |
147 | 144 | ||
148 | spin_lock_irqsave(&cwq->lock, flags); | 145 | spin_lock_irqsave(&cwq->lock, flags); |
149 | insert_work(cwq, work, 1); | 146 | insert_work(cwq, work, &cwq->worklist); |
150 | spin_unlock_irqrestore(&cwq->lock, flags); | 147 | spin_unlock_irqrestore(&cwq->lock, flags); |
151 | } | 148 | } |
152 | 149 | ||
@@ -361,14 +358,14 @@ static void wq_barrier_func(struct work_struct *work) | |||
361 | } | 358 | } |
362 | 359 | ||
363 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | 360 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, |
364 | struct wq_barrier *barr, int tail) | 361 | struct wq_barrier *barr, struct list_head *head) |
365 | { | 362 | { |
366 | INIT_WORK(&barr->work, wq_barrier_func); | 363 | INIT_WORK(&barr->work, wq_barrier_func); |
367 | __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); | 364 | __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); |
368 | 365 | ||
369 | init_completion(&barr->done); | 366 | init_completion(&barr->done); |
370 | 367 | ||
371 | insert_work(cwq, &barr->work, tail); | 368 | insert_work(cwq, &barr->work, head); |
372 | } | 369 | } |
373 | 370 | ||
374 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | 371 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) |
@@ -388,7 +385,7 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | |||
388 | active = 0; | 385 | active = 0; |
389 | spin_lock_irq(&cwq->lock); | 386 | spin_lock_irq(&cwq->lock); |
390 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { | 387 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { |
391 | insert_wq_barrier(cwq, &barr, 1); | 388 | insert_wq_barrier(cwq, &barr, &cwq->worklist); |
392 | active = 1; | 389 | active = 1; |
393 | } | 390 | } |
394 | spin_unlock_irq(&cwq->lock); | 391 | spin_unlock_irq(&cwq->lock); |
@@ -473,7 +470,7 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, | |||
473 | 470 | ||
474 | spin_lock_irq(&cwq->lock); | 471 | spin_lock_irq(&cwq->lock); |
475 | if (unlikely(cwq->current_work == work)) { | 472 | if (unlikely(cwq->current_work == work)) { |
476 | insert_wq_barrier(cwq, &barr, 0); | 473 | insert_wq_barrier(cwq, &barr, cwq->worklist.next); |
477 | running = 1; | 474 | running = 1; |
478 | } | 475 | } |
479 | spin_unlock_irq(&cwq->lock); | 476 | spin_unlock_irq(&cwq->lock); |