aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorSasha Levin <sasha.levin@oracle.com>2012-12-17 10:01:23 -0500
committerTejun Heo <tj@kernel.org>2012-12-18 12:21:13 -0500
commit42f8570f437b65aaf3ef176a38ad7d7fc5847d8b (patch)
treebe5eee8505b195f952afb4d5a7655142a9de1b12 /kernel/workqueue.c
parent848b81415c42ff3dc9a4204749087b015c37ef66 (diff)
workqueue: use new hashtable implementation
Switch workqueues to use the new hashtable implementation. This reduces the amount of generic unrelated code in the workqueues. This patch depends on d9b482c ("hashtable: introduce a small and naive hashtable") which was merged in v3.6. Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c86
1 files changed, 15 insertions, 71 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index fbc6576a83c3..acd417be8199 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -41,6 +41,7 @@
41#include <linux/debug_locks.h> 41#include <linux/debug_locks.h>
42#include <linux/lockdep.h> 42#include <linux/lockdep.h>
43#include <linux/idr.h> 43#include <linux/idr.h>
44#include <linux/hashtable.h>
44 45
45#include "workqueue_sched.h" 46#include "workqueue_sched.h"
46 47
@@ -82,8 +83,6 @@ enum {
82 NR_WORKER_POOLS = 2, /* # worker pools per gcwq */ 83 NR_WORKER_POOLS = 2, /* # worker pools per gcwq */
83 84
84 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 85 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
85 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
86 BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
87 86
88 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 87 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
89 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 88 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
@@ -180,7 +179,7 @@ struct global_cwq {
180 unsigned int flags; /* L: GCWQ_* flags */ 179 unsigned int flags; /* L: GCWQ_* flags */
181 180
182 /* workers are chained either in busy_hash or pool idle_list */ 181 /* workers are chained either in busy_hash or pool idle_list */
183 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE]; 182 DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
184 /* L: hash of busy workers */ 183 /* L: hash of busy workers */
185 184
186 struct worker_pool pools[NR_WORKER_POOLS]; 185 struct worker_pool pools[NR_WORKER_POOLS];
@@ -285,8 +284,7 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
285 (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++) 284 (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++)
286 285
287#define for_each_busy_worker(worker, i, pos, gcwq) \ 286#define for_each_busy_worker(worker, i, pos, gcwq) \
288 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ 287 hash_for_each(gcwq->busy_hash, i, pos, worker, hentry)
289 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
290 288
291static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask, 289static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
292 unsigned int sw) 290 unsigned int sw)
@@ -859,63 +857,6 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
859} 857}
860 858
861/** 859/**
862 * busy_worker_head - return the busy hash head for a work
863 * @gcwq: gcwq of interest
864 * @work: work to be hashed
865 *
866 * Return hash head of @gcwq for @work.
867 *
868 * CONTEXT:
869 * spin_lock_irq(gcwq->lock).
870 *
871 * RETURNS:
872 * Pointer to the hash head.
873 */
874static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
875 struct work_struct *work)
876{
877 const int base_shift = ilog2(sizeof(struct work_struct));
878 unsigned long v = (unsigned long)work;
879
880 /* simple shift and fold hash, do we need something better? */
881 v >>= base_shift;
882 v += v >> BUSY_WORKER_HASH_ORDER;
883 v &= BUSY_WORKER_HASH_MASK;
884
885 return &gcwq->busy_hash[v];
886}
887
888/**
889 * __find_worker_executing_work - find worker which is executing a work
890 * @gcwq: gcwq of interest
891 * @bwh: hash head as returned by busy_worker_head()
892 * @work: work to find worker for
893 *
894 * Find a worker which is executing @work on @gcwq. @bwh should be
895 * the hash head obtained by calling busy_worker_head() with the same
896 * work.
897 *
898 * CONTEXT:
899 * spin_lock_irq(gcwq->lock).
900 *
901 * RETURNS:
902 * Pointer to worker which is executing @work if found, NULL
903 * otherwise.
904 */
905static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
906 struct hlist_head *bwh,
907 struct work_struct *work)
908{
909 struct worker *worker;
910 struct hlist_node *tmp;
911
912 hlist_for_each_entry(worker, tmp, bwh, hentry)
913 if (worker->current_work == work)
914 return worker;
915 return NULL;
916}
917
918/**
919 * find_worker_executing_work - find worker which is executing a work 860 * find_worker_executing_work - find worker which is executing a work
920 * @gcwq: gcwq of interest 861 * @gcwq: gcwq of interest
921 * @work: work to find worker for 862 * @work: work to find worker for
@@ -934,8 +875,14 @@ static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
934static struct worker *find_worker_executing_work(struct global_cwq *gcwq, 875static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
935 struct work_struct *work) 876 struct work_struct *work)
936{ 877{
937 return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work), 878 struct worker *worker;
938 work); 879 struct hlist_node *tmp;
880
881 hash_for_each_possible(gcwq->busy_hash, worker, tmp, hentry, (unsigned long)work)
882 if (worker->current_work == work)
883 return worker;
884
885 return NULL;
939} 886}
940 887
941/** 888/**
@@ -2166,7 +2113,6 @@ __acquires(&gcwq->lock)
2166 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 2113 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
2167 struct worker_pool *pool = worker->pool; 2114 struct worker_pool *pool = worker->pool;
2168 struct global_cwq *gcwq = pool->gcwq; 2115 struct global_cwq *gcwq = pool->gcwq;
2169 struct hlist_head *bwh = busy_worker_head(gcwq, work);
2170 bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; 2116 bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
2171 work_func_t f = work->func; 2117 work_func_t f = work->func;
2172 int work_color; 2118 int work_color;
@@ -2198,7 +2144,7 @@ __acquires(&gcwq->lock)
2198 * already processing the work. If so, defer the work to the 2144 * already processing the work. If so, defer the work to the
2199 * currently executing one. 2145 * currently executing one.
2200 */ 2146 */
2201 collision = __find_worker_executing_work(gcwq, bwh, work); 2147 collision = find_worker_executing_work(gcwq, work);
2202 if (unlikely(collision)) { 2148 if (unlikely(collision)) {
2203 move_linked_works(work, &collision->scheduled, NULL); 2149 move_linked_works(work, &collision->scheduled, NULL);
2204 return; 2150 return;
@@ -2206,7 +2152,7 @@ __acquires(&gcwq->lock)
2206 2152
2207 /* claim and dequeue */ 2153 /* claim and dequeue */
2208 debug_work_deactivate(work); 2154 debug_work_deactivate(work);
2209 hlist_add_head(&worker->hentry, bwh); 2155 hash_add(gcwq->busy_hash, &worker->hentry, (unsigned long)worker);
2210 worker->current_work = work; 2156 worker->current_work = work;
2211 worker->current_cwq = cwq; 2157 worker->current_cwq = cwq;
2212 work_color = get_work_color(work); 2158 work_color = get_work_color(work);
@@ -2264,7 +2210,7 @@ __acquires(&gcwq->lock)
2264 worker_clr_flags(worker, WORKER_CPU_INTENSIVE); 2210 worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2265 2211
2266 /* we're done with it, release */ 2212 /* we're done with it, release */
2267 hlist_del_init(&worker->hentry); 2213 hash_del(&worker->hentry);
2268 worker->current_work = NULL; 2214 worker->current_work = NULL;
2269 worker->current_cwq = NULL; 2215 worker->current_cwq = NULL;
2270 cwq_dec_nr_in_flight(cwq, work_color); 2216 cwq_dec_nr_in_flight(cwq, work_color);
@@ -3831,7 +3777,6 @@ out_unlock:
3831static int __init init_workqueues(void) 3777static int __init init_workqueues(void)
3832{ 3778{
3833 unsigned int cpu; 3779 unsigned int cpu;
3834 int i;
3835 3780
3836 /* make sure we have enough bits for OFFQ CPU number */ 3781 /* make sure we have enough bits for OFFQ CPU number */
3837 BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) < 3782 BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) <
@@ -3849,8 +3794,7 @@ static int __init init_workqueues(void)
3849 gcwq->cpu = cpu; 3794 gcwq->cpu = cpu;
3850 gcwq->flags |= GCWQ_DISASSOCIATED; 3795 gcwq->flags |= GCWQ_DISASSOCIATED;
3851 3796
3852 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) 3797 hash_init(gcwq->busy_hash);
3853 INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3854 3798
3855 for_each_worker_pool(pool, gcwq) { 3799 for_each_worker_pool(pool, gcwq) {
3856 pool->gcwq = gcwq; 3800 pool->gcwq = gcwq;