aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/workqueue.c32
-rw-r--r--kernel/workqueue_internal.h37
2 files changed, 38 insertions, 31 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index b4e92061a934..2ffa240052fa 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -122,37 +122,7 @@ enum {
122 * W: workqueue_lock protected. 122 * W: workqueue_lock protected.
123 */ 123 */
124 124
125struct global_cwq; 125/* struct worker is defined in workqueue_internal.h */
126struct worker_pool;
127
128/*
129 * The poor guys doing the actual heavy lifting. All on-duty workers
130 * are either serving the manager role, on idle list or on busy hash.
131 */
132struct worker {
133 /* on idle list while idle, on busy hash table while busy */
134 union {
135 struct list_head entry; /* L: while idle */
136 struct hlist_node hentry; /* L: while busy */
137 };
138
139 struct work_struct *current_work; /* L: work being processed */
140 work_func_t current_func; /* L: current_work's fn */
141 struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
142 struct list_head scheduled; /* L: scheduled works */
143 struct task_struct *task; /* I: worker task */
144 struct worker_pool *pool; /* I: the associated pool */
145 /* 64 bytes boundary on 64bit, 32 on 32bit */
146 unsigned long last_active; /* L: last active timestamp */
147 unsigned int flags; /* X: flags */
148 int id; /* I: worker id */
149
150 /* for rebinding worker to CPU */
151 struct work_struct rebind_work; /* L: for busy worker */
152
153 /* used only by rescuers to point to the target workqueue */
154 struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
155};
156 126
157struct worker_pool { 127struct worker_pool {
158 struct global_cwq *gcwq; /* I: the owning gcwq */ 128 struct global_cwq *gcwq; /* I: the owning gcwq */
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index b3ea6ad5566b..02549fa04587 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -7,6 +7,43 @@
7#ifndef _KERNEL_WORKQUEUE_INTERNAL_H 7#ifndef _KERNEL_WORKQUEUE_INTERNAL_H
8#define _KERNEL_WORKQUEUE_INTERNAL_H 8#define _KERNEL_WORKQUEUE_INTERNAL_H
9 9
10#include <linux/workqueue.h>
11
12struct global_cwq;
13struct worker_pool;
14
15/*
16 * The poor guys doing the actual heavy lifting. All on-duty workers are
17 * either serving the manager role, on idle list or on busy hash. For
18 * details on the locking annotation (L, I, X...), refer to workqueue.c.
19 *
20 * Only to be used in workqueue and async.
21 */
22struct worker {
23 /* on idle list while idle, on busy hash table while busy */
24 union {
25 struct list_head entry; /* L: while idle */
26 struct hlist_node hentry; /* L: while busy */
27 };
28
29 struct work_struct *current_work; /* L: work being processed */
30 work_func_t current_func; /* L: current_work's fn */
31 struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
32 struct list_head scheduled; /* L: scheduled works */
33 struct task_struct *task; /* I: worker task */
34 struct worker_pool *pool; /* I: the associated pool */
35 /* 64 bytes boundary on 64bit, 32 on 32bit */
36 unsigned long last_active; /* L: last active timestamp */
37 unsigned int flags; /* X: flags */
38 int id; /* I: worker id */
39
40 /* for rebinding worker to CPU */
41 struct work_struct rebind_work; /* L: for busy worker */
42
43 /* used only by rescuers to point to the target workqueue */
44 struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
45};
46
10/* 47/*
11 * Scheduler hooks for concurrency managed workqueue. Only to be used from 48 * Scheduler hooks for concurrency managed workqueue. Only to be used from
12 * sched.c and workqueue.c. 49 * sched.c and workqueue.c.