diff options
author | Tejun Heo <tj@kernel.org> | 2010-07-02 04:03:51 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-07-02 05:00:08 -0400 |
commit | c7fc77f78f16d138ca997ce096a62f46e2e9420a (patch) | |
tree | 0478e5dde66f6ff86d4baa0fe541748e1a6f1ed2 /kernel/workqueue.c | |
parent | f34217977d717385a3e9fd7018ac39fade3964c0 (diff) |
workqueue: remove WQ_SINGLE_CPU and use WQ_UNBOUND instead
WQ_SINGLE_CPU combined with @max_active of 1 is used to achieve full
ordering among works queued to a workqueue. The same can be achieved
using WQ_UNBOUND as unbound workqueues always use the gcwq for
WORK_CPU_UNBOUND. As @max_active is always one and benefits from cpu
locality isn't accessible anyway, serving them with unbound workqueues
should be fine.
Drop WQ_SINGLE_CPU support and use WQ_UNBOUND instead. Note that most
single thread workqueue users will be converted to use multithread or
non-reentrant instead and only the ones which require strict ordering
will keep using WQ_UNBOUND + @max_active of 1.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 100 |
1 files changed, 18 insertions, 82 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4608563cdd63..20d6237d7498 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -206,8 +206,6 @@ struct workqueue_struct { | |||
206 | struct list_head flusher_queue; /* F: flush waiters */ | 206 | struct list_head flusher_queue; /* F: flush waiters */ |
207 | struct list_head flusher_overflow; /* F: flush overflow list */ | 207 | struct list_head flusher_overflow; /* F: flush overflow list */ |
208 | 208 | ||
209 | unsigned long single_cpu; /* cpu for single cpu wq */ | ||
210 | |||
211 | cpumask_var_t mayday_mask; /* cpus requesting rescue */ | 209 | cpumask_var_t mayday_mask; /* cpus requesting rescue */ |
212 | struct worker *rescuer; /* I: rescue worker */ | 210 | struct worker *rescuer; /* I: rescue worker */ |
213 | 211 | ||
@@ -889,34 +887,6 @@ static void insert_work(struct cpu_workqueue_struct *cwq, | |||
889 | wake_up_worker(gcwq); | 887 | wake_up_worker(gcwq); |
890 | } | 888 | } |
891 | 889 | ||
892 | /** | ||
893 | * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing | ||
894 | * @cwq: cwq to unbind | ||
895 | * | ||
896 | * Try to unbind @cwq from single cpu workqueue processing. If | ||
897 | * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed. | ||
898 | * | ||
899 | * CONTEXT: | ||
900 | * spin_lock_irq(gcwq->lock). | ||
901 | */ | ||
902 | static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq) | ||
903 | { | ||
904 | struct workqueue_struct *wq = cwq->wq; | ||
905 | struct global_cwq *gcwq = cwq->gcwq; | ||
906 | |||
907 | BUG_ON(wq->single_cpu != gcwq->cpu); | ||
908 | /* | ||
909 | * Unbind from workqueue if @cwq is not frozen. If frozen, | ||
910 | * thaw_workqueues() will either restart processing on this | ||
911 | * cpu or unbind if empty. This keeps works queued while | ||
912 | * frozen fully ordered and flushable. | ||
913 | */ | ||
914 | if (likely(!(gcwq->flags & GCWQ_FREEZING))) { | ||
915 | smp_wmb(); /* paired with cmpxchg() in __queue_work() */ | ||
916 | wq->single_cpu = WORK_CPU_NONE; | ||
917 | } | ||
918 | } | ||
919 | |||
920 | static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | 890 | static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, |
921 | struct work_struct *work) | 891 | struct work_struct *work) |
922 | { | 892 | { |
@@ -924,20 +894,16 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
924 | struct cpu_workqueue_struct *cwq; | 894 | struct cpu_workqueue_struct *cwq; |
925 | struct list_head *worklist; | 895 | struct list_head *worklist; |
926 | unsigned long flags; | 896 | unsigned long flags; |
927 | bool arbitrate; | ||
928 | 897 | ||
929 | debug_work_activate(work); | 898 | debug_work_activate(work); |
930 | 899 | ||
931 | if (unlikely(cpu == WORK_CPU_UNBOUND)) | 900 | /* determine gcwq to use */ |
932 | cpu = raw_smp_processor_id(); | 901 | if (!(wq->flags & WQ_UNBOUND)) { |
933 | |||
934 | /* | ||
935 | * Determine gcwq to use. SINGLE_CPU is inherently | ||
936 | * NON_REENTRANT, so test it first. | ||
937 | */ | ||
938 | if (!(wq->flags & (WQ_SINGLE_CPU | WQ_UNBOUND))) { | ||
939 | struct global_cwq *last_gcwq; | 902 | struct global_cwq *last_gcwq; |
940 | 903 | ||
904 | if (unlikely(cpu == WORK_CPU_UNBOUND)) | ||
905 | cpu = raw_smp_processor_id(); | ||
906 | |||
941 | /* | 907 | /* |
942 | * It's multi cpu. If @wq is non-reentrant and @work | 908 | * It's multi cpu. If @wq is non-reentrant and @work |
943 | * was previously on a different cpu, it might still | 909 | * was previously on a different cpu, it might still |
@@ -962,38 +928,6 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
962 | } | 928 | } |
963 | } else | 929 | } else |
964 | spin_lock_irqsave(&gcwq->lock, flags); | 930 | spin_lock_irqsave(&gcwq->lock, flags); |
965 | } else if (!(wq->flags & WQ_UNBOUND)) { | ||
966 | unsigned int req_cpu = cpu; | ||
967 | |||
968 | /* | ||
969 | * It's a bit more complex for single cpu workqueues. | ||
970 | * We first need to determine which cpu is going to be | ||
971 | * used. If no cpu is currently serving this | ||
972 | * workqueue, arbitrate using atomic accesses to | ||
973 | * wq->single_cpu; otherwise, use the current one. | ||
974 | */ | ||
975 | retry: | ||
976 | cpu = wq->single_cpu; | ||
977 | arbitrate = cpu == WORK_CPU_NONE; | ||
978 | if (arbitrate) | ||
979 | cpu = req_cpu; | ||
980 | |||
981 | gcwq = get_gcwq(cpu); | ||
982 | spin_lock_irqsave(&gcwq->lock, flags); | ||
983 | |||
984 | /* | ||
985 | * The following cmpxchg() is a full barrier paired | ||
986 | * with smp_wmb() in cwq_unbind_single_cpu() and | ||
987 | * guarantees that all changes to wq->st_* fields are | ||
988 | * visible on the new cpu after this point. | ||
989 | */ | ||
990 | if (arbitrate) | ||
991 | cmpxchg(&wq->single_cpu, WORK_CPU_NONE, cpu); | ||
992 | |||
993 | if (unlikely(wq->single_cpu != cpu)) { | ||
994 | spin_unlock_irqrestore(&gcwq->lock, flags); | ||
995 | goto retry; | ||
996 | } | ||
997 | } else { | 931 | } else { |
998 | gcwq = get_gcwq(WORK_CPU_UNBOUND); | 932 | gcwq = get_gcwq(WORK_CPU_UNBOUND); |
999 | spin_lock_irqsave(&gcwq->lock, flags); | 933 | spin_lock_irqsave(&gcwq->lock, flags); |
@@ -1105,19 +1039,30 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
1105 | struct work_struct *work = &dwork->work; | 1039 | struct work_struct *work = &dwork->work; |
1106 | 1040 | ||
1107 | if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { | 1041 | if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { |
1108 | struct global_cwq *gcwq = get_work_gcwq(work); | 1042 | unsigned int lcpu; |
1109 | unsigned int lcpu = gcwq ? gcwq->cpu : raw_smp_processor_id(); | ||
1110 | 1043 | ||
1111 | BUG_ON(timer_pending(timer)); | 1044 | BUG_ON(timer_pending(timer)); |
1112 | BUG_ON(!list_empty(&work->entry)); | 1045 | BUG_ON(!list_empty(&work->entry)); |
1113 | 1046 | ||
1114 | timer_stats_timer_set_start_info(&dwork->timer); | 1047 | timer_stats_timer_set_start_info(&dwork->timer); |
1048 | |||
1115 | /* | 1049 | /* |
1116 | * This stores cwq for the moment, for the timer_fn. | 1050 | * This stores cwq for the moment, for the timer_fn. |
1117 | * Note that the work's gcwq is preserved to allow | 1051 | * Note that the work's gcwq is preserved to allow |
1118 | * reentrance detection for delayed works. | 1052 | * reentrance detection for delayed works. |
1119 | */ | 1053 | */ |
1054 | if (!(wq->flags & WQ_UNBOUND)) { | ||
1055 | struct global_cwq *gcwq = get_work_gcwq(work); | ||
1056 | |||
1057 | if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND) | ||
1058 | lcpu = gcwq->cpu; | ||
1059 | else | ||
1060 | lcpu = raw_smp_processor_id(); | ||
1061 | } else | ||
1062 | lcpu = WORK_CPU_UNBOUND; | ||
1063 | |||
1120 | set_work_cwq(work, get_cwq(lcpu, wq), 0); | 1064 | set_work_cwq(work, get_cwq(lcpu, wq), 0); |
1065 | |||
1121 | timer->expires = jiffies + delay; | 1066 | timer->expires = jiffies + delay; |
1122 | timer->data = (unsigned long)dwork; | 1067 | timer->data = (unsigned long)dwork; |
1123 | timer->function = delayed_work_timer_fn; | 1068 | timer->function = delayed_work_timer_fn; |
@@ -1696,9 +1641,6 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) | |||
1696 | /* one down, submit a delayed one */ | 1641 | /* one down, submit a delayed one */ |
1697 | if (cwq->nr_active < cwq->max_active) | 1642 | if (cwq->nr_active < cwq->max_active) |
1698 | cwq_activate_first_delayed(cwq); | 1643 | cwq_activate_first_delayed(cwq); |
1699 | } else if (!cwq->nr_active && cwq->wq->flags & WQ_SINGLE_CPU) { | ||
1700 | /* this was the last work, unbind from single cpu */ | ||
1701 | cwq_unbind_single_cpu(cwq); | ||
1702 | } | 1644 | } |
1703 | 1645 | ||
1704 | /* is flush in progress and are we at the flushing tip? */ | 1646 | /* is flush in progress and are we at the flushing tip? */ |
@@ -2751,7 +2693,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, | |||
2751 | atomic_set(&wq->nr_cwqs_to_flush, 0); | 2693 | atomic_set(&wq->nr_cwqs_to_flush, 0); |
2752 | INIT_LIST_HEAD(&wq->flusher_queue); | 2694 | INIT_LIST_HEAD(&wq->flusher_queue); |
2753 | INIT_LIST_HEAD(&wq->flusher_overflow); | 2695 | INIT_LIST_HEAD(&wq->flusher_overflow); |
2754 | wq->single_cpu = WORK_CPU_NONE; | ||
2755 | 2696 | ||
2756 | wq->name = name; | 2697 | wq->name = name; |
2757 | lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); | 2698 | lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); |
@@ -3513,11 +3454,6 @@ void thaw_workqueues(void) | |||
3513 | while (!list_empty(&cwq->delayed_works) && | 3454 | while (!list_empty(&cwq->delayed_works) && |
3514 | cwq->nr_active < cwq->max_active) | 3455 | cwq->nr_active < cwq->max_active) |
3515 | cwq_activate_first_delayed(cwq); | 3456 | cwq_activate_first_delayed(cwq); |
3516 | |||
3517 | /* perform delayed unbind from single cpu if empty */ | ||
3518 | if (wq->single_cpu == gcwq->cpu && | ||
3519 | !cwq->nr_active && list_empty(&cwq->delayed_works)) | ||
3520 | cwq_unbind_single_cpu(cwq); | ||
3521 | } | 3457 | } |
3522 | 3458 | ||
3523 | wake_up_worker(gcwq); | 3459 | wake_up_worker(gcwq); |