diff options
author | Tejun Heo <tj@kernel.org> | 2010-06-08 15:40:37 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-06-08 15:40:37 -0400 |
commit | 21aa9af03d06cb1d19a3738e5cf12acff984e69b (patch) | |
tree | e205a742a4a9baf098b4e3d428c04f4d3a065bca /kernel | |
parent | 9ed3811a6c0d6b66e6cd47a5d7b9136386dce743 (diff) |
sched: add hooks for workqueue
Concurrency managed workqueue needs to know when workers are going to
sleep and waking up. Using these two hooks, cmwq keeps track of the
current concurrency level and throttles execution of new works if it's
too high and wakes up another worker from the sleep hook if it becomes
too low.
This patch introduces PF_WQ_WORKER to identify workqueue workers and
adds the following two hooks.
* wq_worker_waking_up(): called when a worker is woken up.
* wq_worker_sleeping(): called when a worker is going to sleep and may
return a pointer to a local task which should be woken up. The
returned task is woken up using try_to_wake_up_local() which is
simplified ttwu which is called under rq lock and can only wake up
local tasks.
Both hooks are currently defined as noop in kernel/workqueue_sched.h.
Later cmwq implementation will replace them with proper
implementation.
These hooks are hard coded as they'll always be enabled.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/fork.c | 2 | ||||
-rw-r--r-- | kernel/sched.c | 53 | ||||
-rw-r--r-- | kernel/workqueue_sched.h | 16 |
3 files changed, 68 insertions, 3 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index b6cce14ba047..a82a65cef741 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -907,7 +907,7 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p) | |||
907 | { | 907 | { |
908 | unsigned long new_flags = p->flags; | 908 | unsigned long new_flags = p->flags; |
909 | 909 | ||
910 | new_flags &= ~PF_SUPERPRIV; | 910 | new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER); |
911 | new_flags |= PF_FORKNOEXEC; | 911 | new_flags |= PF_FORKNOEXEC; |
912 | new_flags |= PF_STARTING; | 912 | new_flags |= PF_STARTING; |
913 | p->flags = new_flags; | 913 | p->flags = new_flags; |
diff --git a/kernel/sched.c b/kernel/sched.c index 96eafd5f345f..edd5a54b95da 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -77,6 +77,7 @@ | |||
77 | #include <asm/irq_regs.h> | 77 | #include <asm/irq_regs.h> |
78 | 78 | ||
79 | #include "sched_cpupri.h" | 79 | #include "sched_cpupri.h" |
80 | #include "workqueue_sched.h" | ||
80 | 81 | ||
81 | #define CREATE_TRACE_POINTS | 82 | #define CREATE_TRACE_POINTS |
82 | #include <trace/events/sched.h> | 83 | #include <trace/events/sched.h> |
@@ -2306,6 +2307,9 @@ static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, | |||
2306 | rq->idle_stamp = 0; | 2307 | rq->idle_stamp = 0; |
2307 | } | 2308 | } |
2308 | #endif | 2309 | #endif |
2310 | /* if a worker is waking up, notify workqueue */ | ||
2311 | if ((p->flags & PF_WQ_WORKER) && success) | ||
2312 | wq_worker_waking_up(p, cpu_of(rq)); | ||
2309 | } | 2313 | } |
2310 | 2314 | ||
2311 | /** | 2315 | /** |
@@ -2414,6 +2418,37 @@ out: | |||
2414 | } | 2418 | } |
2415 | 2419 | ||
2416 | /** | 2420 | /** |
2421 | * try_to_wake_up_local - try to wake up a local task with rq lock held | ||
2422 | * @p: the thread to be awakened | ||
2423 | * | ||
2424 | * Put @p on the run-queue if it's not alredy there. The caller must | ||
2425 | * ensure that this_rq() is locked, @p is bound to this_rq() and not | ||
2426 | * the current task. this_rq() stays locked over invocation. | ||
2427 | */ | ||
2428 | static void try_to_wake_up_local(struct task_struct *p) | ||
2429 | { | ||
2430 | struct rq *rq = task_rq(p); | ||
2431 | bool success = false; | ||
2432 | |||
2433 | BUG_ON(rq != this_rq()); | ||
2434 | BUG_ON(p == current); | ||
2435 | lockdep_assert_held(&rq->lock); | ||
2436 | |||
2437 | if (!(p->state & TASK_NORMAL)) | ||
2438 | return; | ||
2439 | |||
2440 | if (!p->se.on_rq) { | ||
2441 | if (likely(!task_running(rq, p))) { | ||
2442 | schedstat_inc(rq, ttwu_count); | ||
2443 | schedstat_inc(rq, ttwu_local); | ||
2444 | } | ||
2445 | ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP); | ||
2446 | success = true; | ||
2447 | } | ||
2448 | ttwu_post_activation(p, rq, 0, success); | ||
2449 | } | ||
2450 | |||
2451 | /** | ||
2417 | * wake_up_process - Wake up a specific process | 2452 | * wake_up_process - Wake up a specific process |
2418 | * @p: The process to be woken up. | 2453 | * @p: The process to be woken up. |
2419 | * | 2454 | * |
@@ -3618,10 +3653,24 @@ need_resched_nonpreemptible: | |||
3618 | clear_tsk_need_resched(prev); | 3653 | clear_tsk_need_resched(prev); |
3619 | 3654 | ||
3620 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | 3655 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
3621 | if (unlikely(signal_pending_state(prev->state, prev))) | 3656 | if (unlikely(signal_pending_state(prev->state, prev))) { |
3622 | prev->state = TASK_RUNNING; | 3657 | prev->state = TASK_RUNNING; |
3623 | else | 3658 | } else { |
3659 | /* | ||
3660 | * If a worker is going to sleep, notify and | ||
3661 | * ask workqueue whether it wants to wake up a | ||
3662 | * task to maintain concurrency. If so, wake | ||
3663 | * up the task. | ||
3664 | */ | ||
3665 | if (prev->flags & PF_WQ_WORKER) { | ||
3666 | struct task_struct *to_wakeup; | ||
3667 | |||
3668 | to_wakeup = wq_worker_sleeping(prev, cpu); | ||
3669 | if (to_wakeup) | ||
3670 | try_to_wake_up_local(to_wakeup); | ||
3671 | } | ||
3624 | deactivate_task(rq, prev, DEQUEUE_SLEEP); | 3672 | deactivate_task(rq, prev, DEQUEUE_SLEEP); |
3673 | } | ||
3625 | switch_count = &prev->nvcsw; | 3674 | switch_count = &prev->nvcsw; |
3626 | } | 3675 | } |
3627 | 3676 | ||
diff --git a/kernel/workqueue_sched.h b/kernel/workqueue_sched.h new file mode 100644 index 000000000000..af040babb742 --- /dev/null +++ b/kernel/workqueue_sched.h | |||
@@ -0,0 +1,16 @@ | |||
1 | /* | ||
2 | * kernel/workqueue_sched.h | ||
3 | * | ||
4 | * Scheduler hooks for concurrency managed workqueue. Only to be | ||
5 | * included from sched.c and workqueue.c. | ||
6 | */ | ||
7 | static inline void wq_worker_waking_up(struct task_struct *task, | ||
8 | unsigned int cpu) | ||
9 | { | ||
10 | } | ||
11 | |||
12 | static inline struct task_struct *wq_worker_sleeping(struct task_struct *task, | ||
13 | unsigned int cpu) | ||
14 | { | ||
15 | return NULL; | ||
16 | } | ||