aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-06-29 04:07:14 -0400
committerTejun Heo <tj@kernel.org>2010-06-29 04:07:14 -0400
commit649027d73a6309ac34dc2886362e662bd73456dc (patch)
tree4faf07773683ff5ec2b120d9070dbbb590199057 /kernel/workqueue.c
parentdcd989cb73ab0f7b722d64ab6516f101d9f43f88 (diff)
workqueue: implement high priority workqueue
This patch implements high priority workqueue which can be specified with WQ_HIGHPRI flag on creation. A high priority workqueue has the following properties. * A work queued to it is queued at the head of the worklist of the respective gcwq after other highpri works, while normal works are always appended at the end. * As long as there are highpri works on gcwq->worklist, [__]need_more_worker() remains %true and process_one_work() wakes up another worker before it start executing a work. The above two properties guarantee that works queued to high priority workqueues are dispatched to workers and start execution as soon as possible regardless of the state of other works. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c70
1 files changed, 64 insertions, 6 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c1aa65c2ff38..5775717288d5 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -43,6 +43,7 @@ enum {
43 GCWQ_MANAGING_WORKERS = 1 << 1, /* managing workers */ 43 GCWQ_MANAGING_WORKERS = 1 << 1, /* managing workers */
44 GCWQ_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ 44 GCWQ_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
45 GCWQ_FREEZING = 1 << 3, /* freeze in progress */ 45 GCWQ_FREEZING = 1 << 3, /* freeze in progress */
46 GCWQ_HIGHPRI_PENDING = 1 << 4, /* highpri works on queue */
46 47
47 /* worker flags */ 48 /* worker flags */
48 WORKER_STARTED = 1 << 0, /* started */ 49 WORKER_STARTED = 1 << 0, /* started */
@@ -452,15 +453,19 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work)
452 * assume that they're being called with gcwq->lock held. 453 * assume that they're being called with gcwq->lock held.
453 */ 454 */
454 455
456static bool __need_more_worker(struct global_cwq *gcwq)
457{
458 return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
459 gcwq->flags & GCWQ_HIGHPRI_PENDING;
460}
461
455/* 462/*
456 * Need to wake up a worker? Called from anything but currently 463 * Need to wake up a worker? Called from anything but currently
457 * running workers. 464 * running workers.
458 */ 465 */
459static bool need_more_worker(struct global_cwq *gcwq) 466static bool need_more_worker(struct global_cwq *gcwq)
460{ 467{
461 atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu); 468 return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
462
463 return !list_empty(&gcwq->worklist) && !atomic_read(nr_running);
464} 469}
465 470
466/* Can I start working? Called from busy but !running workers. */ 471/* Can I start working? Called from busy but !running workers. */
@@ -734,6 +739,43 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
734} 739}
735 740
736/** 741/**
742 * gcwq_determine_ins_pos - find insertion position
743 * @gcwq: gcwq of interest
744 * @cwq: cwq a work is being queued for
745 *
746 * A work for @cwq is about to be queued on @gcwq, determine insertion
747 * position for the work. If @cwq is for HIGHPRI wq, the work is
748 * queued at the head of the queue but in FIFO order with respect to
749 * other HIGHPRI works; otherwise, at the end of the queue. This
750 * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
751 * there are HIGHPRI works pending.
752 *
753 * CONTEXT:
754 * spin_lock_irq(gcwq->lock).
755 *
756 * RETURNS:
757 * Pointer to inserstion position.
758 */
759static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
760 struct cpu_workqueue_struct *cwq)
761{
762 struct work_struct *twork;
763
764 if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
765 return &gcwq->worklist;
766
767 list_for_each_entry(twork, &gcwq->worklist, entry) {
768 struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
769
770 if (!(tcwq->wq->flags & WQ_HIGHPRI))
771 break;
772 }
773
774 gcwq->flags |= GCWQ_HIGHPRI_PENDING;
775 return &twork->entry;
776}
777
778/**
737 * insert_work - insert a work into gcwq 779 * insert_work - insert a work into gcwq
738 * @cwq: cwq @work belongs to 780 * @cwq: cwq @work belongs to
739 * @work: work to insert 781 * @work: work to insert
@@ -770,7 +812,7 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
770 */ 812 */
771 smp_mb(); 813 smp_mb();
772 814
773 if (!atomic_read(get_gcwq_nr_running(gcwq->cpu))) 815 if (__need_more_worker(gcwq))
774 wake_up_worker(gcwq); 816 wake_up_worker(gcwq);
775} 817}
776 818
@@ -887,7 +929,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
887 929
888 if (likely(cwq->nr_active < cwq->max_active)) { 930 if (likely(cwq->nr_active < cwq->max_active)) {
889 cwq->nr_active++; 931 cwq->nr_active++;
890 worklist = &gcwq->worklist; 932 worklist = gcwq_determine_ins_pos(gcwq, cwq);
891 } else 933 } else
892 worklist = &cwq->delayed_works; 934 worklist = &cwq->delayed_works;
893 935
@@ -1526,8 +1568,9 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1526{ 1568{
1527 struct work_struct *work = list_first_entry(&cwq->delayed_works, 1569 struct work_struct *work = list_first_entry(&cwq->delayed_works,
1528 struct work_struct, entry); 1570 struct work_struct, entry);
1571 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1529 1572
1530 move_linked_works(work, &cwq->gcwq->worklist, NULL); 1573 move_linked_works(work, pos, NULL);
1531 cwq->nr_active++; 1574 cwq->nr_active++;
1532} 1575}
1533 1576
@@ -1634,6 +1677,21 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1634 set_work_cpu(work, gcwq->cpu); 1677 set_work_cpu(work, gcwq->cpu);
1635 list_del_init(&work->entry); 1678 list_del_init(&work->entry);
1636 1679
1680 /*
1681 * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1682 * wake up another worker; otherwise, clear HIGHPRI_PENDING.
1683 */
1684 if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
1685 struct work_struct *nwork = list_first_entry(&gcwq->worklist,
1686 struct work_struct, entry);
1687
1688 if (!list_empty(&gcwq->worklist) &&
1689 get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
1690 wake_up_worker(gcwq);
1691 else
1692 gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1693 }
1694
1637 spin_unlock_irq(&gcwq->lock); 1695 spin_unlock_irq(&gcwq->lock);
1638 1696
1639 work_clear_pending(work); 1697 work_clear_pending(work);