diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-24 20:46:16 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-24 20:46:16 -0400 |
| commit | a08489c569dc174cff97d2cb165aa81e3f1501cc (patch) | |
| tree | c583700a11bab82ea864425004dd5bb03bf8a987 /include/linux | |
| parent | 08d9329c29ec98477e8ac2f7a513f2bfa3e9f3c5 (diff) | |
| parent | 6fec10a1a5866dda3cd6a825a521fc7c2f226ba5 (diff) | |
Merge branch 'for-3.6' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue changes from Tejun Heo:
"There are three major changes.
- WQ_HIGHPRI has been reimplemented so that high priority work items
are served by worker threads with -20 nice value from dedicated
highpri worker pools.
- CPU hotplug support has been reimplemented such that idle workers
are kept across CPU hotplug events. This makes CPU hotplug cheaper
(for PM) and makes the code simpler.
- flush_kthread_work() has been reimplemented so that a work item can
be freed while executing. This removes an annoying behavior
difference between kthread_worker and workqueue."
* 'for-3.6' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
workqueue: fix spurious CPU locality WARN from process_one_work()
kthread_worker: reimplement flush_kthread_work() to allow freeing the work item being executed
kthread_worker: reorganize to prepare for flush_kthread_work() reimplementation
workqueue: simplify CPU hotplug code
workqueue: remove CPU offline trustee
workqueue: don't butcher idle workers on an offline CPU
workqueue: reimplement CPU online rebinding to handle idle workers
workqueue: drop @bind from create_worker()
workqueue: use mutex for global_cwq manager exclusion
workqueue: ROGUE workers are UNBOUND workers
workqueue: drop CPU_DYING notifier operation
workqueue: perform cpu down operations from low priority cpu_notifier()
workqueue: reimplement WQ_HIGHPRI using a separate worker_pool
workqueue: introduce NR_WORKER_POOLS and for_each_worker_pool()
workqueue: separate out worker_pool flags
workqueue: use @pool instead of @gcwq or @cpu where applicable
workqueue: factor out worker_pool from global_cwq
workqueue: don't use WQ_HIGHPRI for unbound workqueues
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/cpu.h | 5 | ||||
| -rw-r--r-- | include/linux/kthread.h | 8 |
2 files changed, 5 insertions, 8 deletions
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 2e9b9ebbeb78..ce7a074f2519 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
| @@ -73,8 +73,9 @@ enum { | |||
| 73 | /* migration should happen before other stuff but after perf */ | 73 | /* migration should happen before other stuff but after perf */ |
| 74 | CPU_PRI_PERF = 20, | 74 | CPU_PRI_PERF = 20, |
| 75 | CPU_PRI_MIGRATION = 10, | 75 | CPU_PRI_MIGRATION = 10, |
| 76 | /* prepare workqueues for other notifiers */ | 76 | /* bring up workqueues before normal notifiers and down after */ |
| 77 | CPU_PRI_WORKQUEUE = 5, | 77 | CPU_PRI_WORKQUEUE_UP = 5, |
| 78 | CPU_PRI_WORKQUEUE_DOWN = -5, | ||
| 78 | }; | 79 | }; |
| 79 | 80 | ||
| 80 | #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ | 81 | #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ |
diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 0714b24c0e45..22ccf9dee177 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h | |||
| @@ -49,8 +49,6 @@ extern int tsk_fork_get_node(struct task_struct *tsk); | |||
| 49 | * can be queued and flushed using queue/flush_kthread_work() | 49 | * can be queued and flushed using queue/flush_kthread_work() |
| 50 | * respectively. Queued kthread_works are processed by a kthread | 50 | * respectively. Queued kthread_works are processed by a kthread |
| 51 | * running kthread_worker_fn(). | 51 | * running kthread_worker_fn(). |
| 52 | * | ||
| 53 | * A kthread_work can't be freed while it is executing. | ||
| 54 | */ | 52 | */ |
| 55 | struct kthread_work; | 53 | struct kthread_work; |
| 56 | typedef void (*kthread_work_func_t)(struct kthread_work *work); | 54 | typedef void (*kthread_work_func_t)(struct kthread_work *work); |
| @@ -59,15 +57,14 @@ struct kthread_worker { | |||
| 59 | spinlock_t lock; | 57 | spinlock_t lock; |
| 60 | struct list_head work_list; | 58 | struct list_head work_list; |
| 61 | struct task_struct *task; | 59 | struct task_struct *task; |
| 60 | struct kthread_work *current_work; | ||
| 62 | }; | 61 | }; |
| 63 | 62 | ||
| 64 | struct kthread_work { | 63 | struct kthread_work { |
| 65 | struct list_head node; | 64 | struct list_head node; |
| 66 | kthread_work_func_t func; | 65 | kthread_work_func_t func; |
| 67 | wait_queue_head_t done; | 66 | wait_queue_head_t done; |
| 68 | atomic_t flushing; | 67 | struct kthread_worker *worker; |
| 69 | int queue_seq; | ||
| 70 | int done_seq; | ||
| 71 | }; | 68 | }; |
| 72 | 69 | ||
| 73 | #define KTHREAD_WORKER_INIT(worker) { \ | 70 | #define KTHREAD_WORKER_INIT(worker) { \ |
| @@ -79,7 +76,6 @@ struct kthread_work { | |||
| 79 | .node = LIST_HEAD_INIT((work).node), \ | 76 | .node = LIST_HEAD_INIT((work).node), \ |
| 80 | .func = (fn), \ | 77 | .func = (fn), \ |
| 81 | .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done), \ | 78 | .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done), \ |
| 82 | .flushing = ATOMIC_INIT(0), \ | ||
| 83 | } | 79 | } |
| 84 | 80 | ||
| 85 | #define DEFINE_KTHREAD_WORKER(worker) \ | 81 | #define DEFINE_KTHREAD_WORKER(worker) \ |
