diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-03-24 11:38:48 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-04-02 14:12:05 -0400 |
commit | 371fd7e7a56a5c136d31aa980011bd2f131c3ef5 (patch) | |
tree | cf52014018e8258acd8bcfd486d855f098a02c03 /kernel/sched.c | |
parent | cc87f76a601d2d256118f7bab15e35254356ae21 (diff) |
sched: Add enqueue/dequeue flags
In order to reduce the dependency on TASK_WAKING rework the enqueue
interface to support a proper flags field.
Replace the int wakeup, bool head arguments with an int flags argument
and create the following flags:
ENQUEUE_WAKEUP - the enqueue is a wakeup of a sleeping task,
ENQUEUE_WAKING - the enqueue has relative vruntime due to
having sched_class::task_waking() called,
ENQUEUE_HEAD - the waking task should be places on the head
of the priority queue (where appropriate).
For symmetry also convert sched_class::dequeue() to a flags scheme.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 32 |
1 files changed, 17 insertions, 15 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 14c8d2a1b38a..4a57e96dd6c7 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1877,44 +1877,43 @@ static void update_avg(u64 *avg, u64 sample) | |||
1877 | *avg += diff >> 3; | 1877 | *avg += diff >> 3; |
1878 | } | 1878 | } |
1879 | 1879 | ||
1880 | static void | 1880 | static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) |
1881 | enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head) | ||
1882 | { | 1881 | { |
1883 | update_rq_clock(rq); | 1882 | update_rq_clock(rq); |
1884 | sched_info_queued(p); | 1883 | sched_info_queued(p); |
1885 | p->sched_class->enqueue_task(rq, p, wakeup, head); | 1884 | p->sched_class->enqueue_task(rq, p, flags); |
1886 | p->se.on_rq = 1; | 1885 | p->se.on_rq = 1; |
1887 | } | 1886 | } |
1888 | 1887 | ||
1889 | static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) | 1888 | static void dequeue_task(struct rq *rq, struct task_struct *p, int flags) |
1890 | { | 1889 | { |
1891 | update_rq_clock(rq); | 1890 | update_rq_clock(rq); |
1892 | sched_info_dequeued(p); | 1891 | sched_info_dequeued(p); |
1893 | p->sched_class->dequeue_task(rq, p, sleep); | 1892 | p->sched_class->dequeue_task(rq, p, flags); |
1894 | p->se.on_rq = 0; | 1893 | p->se.on_rq = 0; |
1895 | } | 1894 | } |
1896 | 1895 | ||
1897 | /* | 1896 | /* |
1898 | * activate_task - move a task to the runqueue. | 1897 | * activate_task - move a task to the runqueue. |
1899 | */ | 1898 | */ |
1900 | static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) | 1899 | static void activate_task(struct rq *rq, struct task_struct *p, int flags) |
1901 | { | 1900 | { |
1902 | if (task_contributes_to_load(p)) | 1901 | if (task_contributes_to_load(p)) |
1903 | rq->nr_uninterruptible--; | 1902 | rq->nr_uninterruptible--; |
1904 | 1903 | ||
1905 | enqueue_task(rq, p, wakeup, false); | 1904 | enqueue_task(rq, p, flags); |
1906 | inc_nr_running(rq); | 1905 | inc_nr_running(rq); |
1907 | } | 1906 | } |
1908 | 1907 | ||
1909 | /* | 1908 | /* |
1910 | * deactivate_task - remove a task from the runqueue. | 1909 | * deactivate_task - remove a task from the runqueue. |
1911 | */ | 1910 | */ |
1912 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) | 1911 | static void deactivate_task(struct rq *rq, struct task_struct *p, int flags) |
1913 | { | 1912 | { |
1914 | if (task_contributes_to_load(p)) | 1913 | if (task_contributes_to_load(p)) |
1915 | rq->nr_uninterruptible++; | 1914 | rq->nr_uninterruptible++; |
1916 | 1915 | ||
1917 | dequeue_task(rq, p, sleep); | 1916 | dequeue_task(rq, p, flags); |
1918 | dec_nr_running(rq); | 1917 | dec_nr_running(rq); |
1919 | } | 1918 | } |
1920 | 1919 | ||
@@ -2353,6 +2352,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2353 | { | 2352 | { |
2354 | int cpu, orig_cpu, this_cpu, success = 0; | 2353 | int cpu, orig_cpu, this_cpu, success = 0; |
2355 | unsigned long flags; | 2354 | unsigned long flags; |
2355 | unsigned long en_flags = ENQUEUE_WAKEUP; | ||
2356 | struct rq *rq; | 2356 | struct rq *rq; |
2357 | 2357 | ||
2358 | this_cpu = get_cpu(); | 2358 | this_cpu = get_cpu(); |
@@ -2386,8 +2386,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | |||
2386 | } | 2386 | } |
2387 | p->state = TASK_WAKING; | 2387 | p->state = TASK_WAKING; |
2388 | 2388 | ||
2389 | if (p->sched_class->task_waking) | 2389 | if (p->sched_class->task_waking) { |
2390 | p->sched_class->task_waking(rq, p); | 2390 | p->sched_class->task_waking(rq, p); |
2391 | en_flags |= ENQUEUE_WAKING; | ||
2392 | } | ||
2391 | 2393 | ||
2392 | cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags); | 2394 | cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags); |
2393 | if (cpu != orig_cpu) | 2395 | if (cpu != orig_cpu) |
@@ -2432,7 +2434,7 @@ out_activate: | |||
2432 | schedstat_inc(p, se.statistics.nr_wakeups_local); | 2434 | schedstat_inc(p, se.statistics.nr_wakeups_local); |
2433 | else | 2435 | else |
2434 | schedstat_inc(p, se.statistics.nr_wakeups_remote); | 2436 | schedstat_inc(p, se.statistics.nr_wakeups_remote); |
2435 | activate_task(rq, p, 1); | 2437 | activate_task(rq, p, en_flags); |
2436 | success = 1; | 2438 | success = 1; |
2437 | 2439 | ||
2438 | out_running: | 2440 | out_running: |
@@ -3623,7 +3625,7 @@ need_resched_nonpreemptible: | |||
3623 | if (unlikely(signal_pending_state(prev->state, prev))) | 3625 | if (unlikely(signal_pending_state(prev->state, prev))) |
3624 | prev->state = TASK_RUNNING; | 3626 | prev->state = TASK_RUNNING; |
3625 | else | 3627 | else |
3626 | deactivate_task(rq, prev, 1); | 3628 | deactivate_task(rq, prev, DEQUEUE_SLEEP); |
3627 | switch_count = &prev->nvcsw; | 3629 | switch_count = &prev->nvcsw; |
3628 | } | 3630 | } |
3629 | 3631 | ||
@@ -4193,7 +4195,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
4193 | if (running) | 4195 | if (running) |
4194 | p->sched_class->set_curr_task(rq); | 4196 | p->sched_class->set_curr_task(rq); |
4195 | if (on_rq) { | 4197 | if (on_rq) { |
4196 | enqueue_task(rq, p, 0, oldprio < prio); | 4198 | enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); |
4197 | 4199 | ||
4198 | check_class_changed(rq, p, prev_class, oldprio, running); | 4200 | check_class_changed(rq, p, prev_class, oldprio, running); |
4199 | } | 4201 | } |
@@ -4236,7 +4238,7 @@ void set_user_nice(struct task_struct *p, long nice) | |||
4236 | delta = p->prio - old_prio; | 4238 | delta = p->prio - old_prio; |
4237 | 4239 | ||
4238 | if (on_rq) { | 4240 | if (on_rq) { |
4239 | enqueue_task(rq, p, 0, false); | 4241 | enqueue_task(rq, p, 0); |
4240 | /* | 4242 | /* |
4241 | * If the task increased its priority or is running and | 4243 | * If the task increased its priority or is running and |
4242 | * lowered its priority, then reschedule its CPU: | 4244 | * lowered its priority, then reschedule its CPU: |
@@ -8180,7 +8182,7 @@ void sched_move_task(struct task_struct *tsk) | |||
8180 | if (unlikely(running)) | 8182 | if (unlikely(running)) |
8181 | tsk->sched_class->set_curr_task(rq); | 8183 | tsk->sched_class->set_curr_task(rq); |
8182 | if (on_rq) | 8184 | if (on_rq) |
8183 | enqueue_task(rq, tsk, 0, false); | 8185 | enqueue_task(rq, tsk, 0); |
8184 | 8186 | ||
8185 | task_rq_unlock(rq, &flags); | 8187 | task_rq_unlock(rq, &flags); |
8186 | } | 8188 | } |