aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-12-03 01:08:03 -0500
committerTejun Heo <tj@kernel.org>2010-06-08 15:40:36 -0400
commit9ed3811a6c0d6b66e6cd47a5d7b9136386dce743 (patch)
tree6a1e92b3e51978c7e32f1afdf98b1966a3d65818 /kernel/sched.c
parent3a101d0548e925ab16ca6aaa8cf4f767d322ddb0 (diff)
sched: refactor try_to_wake_up()
Factor ttwu_activate() and ttwu_woken_up() out of try_to_wake_up(). The factoring out doesn't affect try_to_wake_up() much code-generation-wise. Depending on configuration options, it ends up generating the same object code as before or slightly different one due to different register assignment. This is to help future implementation of try_to_wake_up_local(). Mike Galbraith suggested rename to ttwu_post_activation() from ttwu_woken_up() and comment update in try_to_wake_up(). Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c83
1 files changed, 49 insertions, 34 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 2b942e49d0fa..96eafd5f345f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2267,11 +2267,52 @@ static void update_avg(u64 *avg, u64 sample)
2267} 2267}
2268#endif 2268#endif
2269 2269
2270/*** 2270static inline void ttwu_activate(struct task_struct *p, struct rq *rq,
2271 bool is_sync, bool is_migrate, bool is_local,
2272 unsigned long en_flags)
2273{
2274 schedstat_inc(p, se.statistics.nr_wakeups);
2275 if (is_sync)
2276 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2277 if (is_migrate)
2278 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2279 if (is_local)
2280 schedstat_inc(p, se.statistics.nr_wakeups_local);
2281 else
2282 schedstat_inc(p, se.statistics.nr_wakeups_remote);
2283
2284 activate_task(rq, p, en_flags);
2285}
2286
2287static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq,
2288 int wake_flags, bool success)
2289{
2290 trace_sched_wakeup(p, success);
2291 check_preempt_curr(rq, p, wake_flags);
2292
2293 p->state = TASK_RUNNING;
2294#ifdef CONFIG_SMP
2295 if (p->sched_class->task_woken)
2296 p->sched_class->task_woken(rq, p);
2297
2298 if (unlikely(rq->idle_stamp)) {
2299 u64 delta = rq->clock - rq->idle_stamp;
2300 u64 max = 2*sysctl_sched_migration_cost;
2301
2302 if (delta > max)
2303 rq->avg_idle = max;
2304 else
2305 update_avg(&rq->avg_idle, delta);
2306 rq->idle_stamp = 0;
2307 }
2308#endif
2309}
2310
2311/**
2271 * try_to_wake_up - wake up a thread 2312 * try_to_wake_up - wake up a thread
2272 * @p: the to-be-woken-up thread 2313 * @p: the thread to be awakened
2273 * @state: the mask of task states that can be woken 2314 * @state: the mask of task states that can be woken
2274 * @sync: do a synchronous wakeup? 2315 * @wake_flags: wake modifier flags (WF_*)
2275 * 2316 *
2276 * Put it on the run-queue if it's not already there. The "current" 2317 * Put it on the run-queue if it's not already there. The "current"
2277 * thread is always on the run-queue (except when the actual 2318 * thread is always on the run-queue (except when the actual
@@ -2279,7 +2320,8 @@ static void update_avg(u64 *avg, u64 sample)
2279 * the simpler "current->state = TASK_RUNNING" to mark yourself 2320 * the simpler "current->state = TASK_RUNNING" to mark yourself
2280 * runnable without the overhead of this. 2321 * runnable without the overhead of this.
2281 * 2322 *
2282 * returns failure only if the task is already active. 2323 * Returns %true if @p was woken up, %false if it was already running
2324 * or @state didn't match @p's state.
2283 */ 2325 */
2284static int try_to_wake_up(struct task_struct *p, unsigned int state, 2326static int try_to_wake_up(struct task_struct *p, unsigned int state,
2285 int wake_flags) 2327 int wake_flags)
@@ -2359,38 +2401,11 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2359 2401
2360out_activate: 2402out_activate:
2361#endif /* CONFIG_SMP */ 2403#endif /* CONFIG_SMP */
2362 schedstat_inc(p, se.statistics.nr_wakeups); 2404 ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu,
2363 if (wake_flags & WF_SYNC) 2405 cpu == this_cpu, en_flags);
2364 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2365 if (orig_cpu != cpu)
2366 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2367 if (cpu == this_cpu)
2368 schedstat_inc(p, se.statistics.nr_wakeups_local);
2369 else
2370 schedstat_inc(p, se.statistics.nr_wakeups_remote);
2371 activate_task(rq, p, en_flags);
2372 success = 1; 2406 success = 1;
2373
2374out_running: 2407out_running:
2375 trace_sched_wakeup(p, success); 2408 ttwu_post_activation(p, rq, wake_flags, success);
2376 check_preempt_curr(rq, p, wake_flags);
2377
2378 p->state = TASK_RUNNING;
2379#ifdef CONFIG_SMP
2380 if (p->sched_class->task_woken)
2381 p->sched_class->task_woken(rq, p);
2382
2383 if (unlikely(rq->idle_stamp)) {
2384 u64 delta = rq->clock - rq->idle_stamp;
2385 u64 max = 2*sysctl_sched_migration_cost;
2386
2387 if (delta > max)
2388 rq->avg_idle = max;
2389 else
2390 update_avg(&rq->avg_idle, delta);
2391 rq->idle_stamp = 0;
2392 }
2393#endif
2394out: 2409out:
2395 task_rq_unlock(rq, &flags); 2410 task_rq_unlock(rq, &flags);
2396 put_cpu(); 2411 put_cpu();