aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-03-24 11:38:48 -0400
committerIngo Molnar <mingo@elte.hu>2010-04-02 14:12:05 -0400
commit371fd7e7a56a5c136d31aa980011bd2f131c3ef5 (patch)
treecf52014018e8258acd8bcfd486d855f098a02c03
parentcc87f76a601d2d256118f7bab15e35254356ae21 (diff)
sched: Add enqueue/dequeue flags
In order to reduce the dependency on TASK_WAKING rework the enqueue interface to support a proper flags field. Replace the int wakeup, bool head arguments with an int flags argument and create the following flags: ENQUEUE_WAKEUP - the enqueue is a wakeup of a sleeping task, ENQUEUE_WAKING - the enqueue has relative vruntime due to having sched_class::task_waking() called, ENQUEUE_HEAD - the waking task should be places on the head of the priority queue (where appropriate). For symmetry also convert sched_class::dequeue() to a flags scheme. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/sched.h11
-rw-r--r--kernel/sched.c32
-rw-r--r--kernel/sched_fair.c25
-rw-r--r--kernel/sched_idletask.c2
-rw-r--r--kernel/sched_rt.c8
5 files changed, 38 insertions, 40 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index fb6c18843ee8..e3e900f318d7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1032,12 +1032,17 @@ struct sched_domain;
1032#define WF_SYNC 0x01 /* waker goes to sleep after wakup */ 1032#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
1033#define WF_FORK 0x02 /* child wakeup after fork */ 1033#define WF_FORK 0x02 /* child wakeup after fork */
1034 1034
1035#define ENQUEUE_WAKEUP 1
1036#define ENQUEUE_WAKING 2
1037#define ENQUEUE_HEAD 4
1038
1039#define DEQUEUE_SLEEP 1
1040
1035struct sched_class { 1041struct sched_class {
1036 const struct sched_class *next; 1042 const struct sched_class *next;
1037 1043
1038 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup, 1044 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1039 bool head); 1045 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1040 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
1041 void (*yield_task) (struct rq *rq); 1046 void (*yield_task) (struct rq *rq);
1042 1047
1043 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); 1048 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
diff --git a/kernel/sched.c b/kernel/sched.c
index 14c8d2a1b38a..4a57e96dd6c7 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1877,44 +1877,43 @@ static void update_avg(u64 *avg, u64 sample)
1877 *avg += diff >> 3; 1877 *avg += diff >> 3;
1878} 1878}
1879 1879
1880static void 1880static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
1881enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
1882{ 1881{
1883 update_rq_clock(rq); 1882 update_rq_clock(rq);
1884 sched_info_queued(p); 1883 sched_info_queued(p);
1885 p->sched_class->enqueue_task(rq, p, wakeup, head); 1884 p->sched_class->enqueue_task(rq, p, flags);
1886 p->se.on_rq = 1; 1885 p->se.on_rq = 1;
1887} 1886}
1888 1887
1889static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) 1888static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
1890{ 1889{
1891 update_rq_clock(rq); 1890 update_rq_clock(rq);
1892 sched_info_dequeued(p); 1891 sched_info_dequeued(p);
1893 p->sched_class->dequeue_task(rq, p, sleep); 1892 p->sched_class->dequeue_task(rq, p, flags);
1894 p->se.on_rq = 0; 1893 p->se.on_rq = 0;
1895} 1894}
1896 1895
1897/* 1896/*
1898 * activate_task - move a task to the runqueue. 1897 * activate_task - move a task to the runqueue.
1899 */ 1898 */
1900static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) 1899static void activate_task(struct rq *rq, struct task_struct *p, int flags)
1901{ 1900{
1902 if (task_contributes_to_load(p)) 1901 if (task_contributes_to_load(p))
1903 rq->nr_uninterruptible--; 1902 rq->nr_uninterruptible--;
1904 1903
1905 enqueue_task(rq, p, wakeup, false); 1904 enqueue_task(rq, p, flags);
1906 inc_nr_running(rq); 1905 inc_nr_running(rq);
1907} 1906}
1908 1907
1909/* 1908/*
1910 * deactivate_task - remove a task from the runqueue. 1909 * deactivate_task - remove a task from the runqueue.
1911 */ 1910 */
1912static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) 1911static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1913{ 1912{
1914 if (task_contributes_to_load(p)) 1913 if (task_contributes_to_load(p))
1915 rq->nr_uninterruptible++; 1914 rq->nr_uninterruptible++;
1916 1915
1917 dequeue_task(rq, p, sleep); 1916 dequeue_task(rq, p, flags);
1918 dec_nr_running(rq); 1917 dec_nr_running(rq);
1919} 1918}
1920 1919
@@ -2353,6 +2352,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2353{ 2352{
2354 int cpu, orig_cpu, this_cpu, success = 0; 2353 int cpu, orig_cpu, this_cpu, success = 0;
2355 unsigned long flags; 2354 unsigned long flags;
2355 unsigned long en_flags = ENQUEUE_WAKEUP;
2356 struct rq *rq; 2356 struct rq *rq;
2357 2357
2358 this_cpu = get_cpu(); 2358 this_cpu = get_cpu();
@@ -2386,8 +2386,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2386 } 2386 }
2387 p->state = TASK_WAKING; 2387 p->state = TASK_WAKING;
2388 2388
2389 if (p->sched_class->task_waking) 2389 if (p->sched_class->task_waking) {
2390 p->sched_class->task_waking(rq, p); 2390 p->sched_class->task_waking(rq, p);
2391 en_flags |= ENQUEUE_WAKING;
2392 }
2391 2393
2392 cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags); 2394 cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
2393 if (cpu != orig_cpu) 2395 if (cpu != orig_cpu)
@@ -2432,7 +2434,7 @@ out_activate:
2432 schedstat_inc(p, se.statistics.nr_wakeups_local); 2434 schedstat_inc(p, se.statistics.nr_wakeups_local);
2433 else 2435 else
2434 schedstat_inc(p, se.statistics.nr_wakeups_remote); 2436 schedstat_inc(p, se.statistics.nr_wakeups_remote);
2435 activate_task(rq, p, 1); 2437 activate_task(rq, p, en_flags);
2436 success = 1; 2438 success = 1;
2437 2439
2438out_running: 2440out_running:
@@ -3623,7 +3625,7 @@ need_resched_nonpreemptible:
3623 if (unlikely(signal_pending_state(prev->state, prev))) 3625 if (unlikely(signal_pending_state(prev->state, prev)))
3624 prev->state = TASK_RUNNING; 3626 prev->state = TASK_RUNNING;
3625 else 3627 else
3626 deactivate_task(rq, prev, 1); 3628 deactivate_task(rq, prev, DEQUEUE_SLEEP);
3627 switch_count = &prev->nvcsw; 3629 switch_count = &prev->nvcsw;
3628 } 3630 }
3629 3631
@@ -4193,7 +4195,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4193 if (running) 4195 if (running)
4194 p->sched_class->set_curr_task(rq); 4196 p->sched_class->set_curr_task(rq);
4195 if (on_rq) { 4197 if (on_rq) {
4196 enqueue_task(rq, p, 0, oldprio < prio); 4198 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
4197 4199
4198 check_class_changed(rq, p, prev_class, oldprio, running); 4200 check_class_changed(rq, p, prev_class, oldprio, running);
4199 } 4201 }
@@ -4236,7 +4238,7 @@ void set_user_nice(struct task_struct *p, long nice)
4236 delta = p->prio - old_prio; 4238 delta = p->prio - old_prio;
4237 4239
4238 if (on_rq) { 4240 if (on_rq) {
4239 enqueue_task(rq, p, 0, false); 4241 enqueue_task(rq, p, 0);
4240 /* 4242 /*
4241 * If the task increased its priority or is running and 4243 * If the task increased its priority or is running and
4242 * lowered its priority, then reschedule its CPU: 4244 * lowered its priority, then reschedule its CPU:
@@ -8180,7 +8182,7 @@ void sched_move_task(struct task_struct *tsk)
8180 if (unlikely(running)) 8182 if (unlikely(running))
8181 tsk->sched_class->set_curr_task(rq); 8183 tsk->sched_class->set_curr_task(rq);
8182 if (on_rq) 8184 if (on_rq)
8183 enqueue_task(rq, tsk, 0, false); 8185 enqueue_task(rq, tsk, 0);
8184 8186
8185 task_rq_unlock(rq, &flags); 8187 task_rq_unlock(rq, &flags);
8186} 8188}
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 8a5e7632d09b..88d3053ac7c2 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -757,9 +757,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
757 se->vruntime = vruntime; 757 se->vruntime = vruntime;
758} 758}
759 759
760#define ENQUEUE_WAKEUP 1
761#define ENQUEUE_MIGRATE 2
762
763static void 760static void
764enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 761enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
765{ 762{
@@ -767,7 +764,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
767 * Update the normalized vruntime before updating min_vruntime 764 * Update the normalized vruntime before updating min_vruntime
768 * through callig update_curr(). 765 * through callig update_curr().
769 */ 766 */
770 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE)) 767 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
771 se->vruntime += cfs_rq->min_vruntime; 768 se->vruntime += cfs_rq->min_vruntime;
772 769
773 /* 770 /*
@@ -803,7 +800,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
803} 800}
804 801
805static void 802static void
806dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) 803dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
807{ 804{
808 /* 805 /*
809 * Update run-time statistics of the 'current'. 806 * Update run-time statistics of the 'current'.
@@ -811,7 +808,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
811 update_curr(cfs_rq); 808 update_curr(cfs_rq);
812 809
813 update_stats_dequeue(cfs_rq, se); 810 update_stats_dequeue(cfs_rq, se);
814 if (sleep) { 811 if (flags & DEQUEUE_SLEEP) {
815#ifdef CONFIG_SCHEDSTATS 812#ifdef CONFIG_SCHEDSTATS
816 if (entity_is_task(se)) { 813 if (entity_is_task(se)) {
817 struct task_struct *tsk = task_of(se); 814 struct task_struct *tsk = task_of(se);
@@ -836,7 +833,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
836 * update can refer to the ->curr item and we need to reflect this 833 * update can refer to the ->curr item and we need to reflect this
837 * movement in our normalized position. 834 * movement in our normalized position.
838 */ 835 */
839 if (!sleep) 836 if (!(flags & DEQUEUE_SLEEP))
840 se->vruntime -= cfs_rq->min_vruntime; 837 se->vruntime -= cfs_rq->min_vruntime;
841} 838}
842 839
@@ -1045,16 +1042,10 @@ static inline void hrtick_update(struct rq *rq)
1045 * then put the task into the rbtree: 1042 * then put the task into the rbtree:
1046 */ 1043 */
1047static void 1044static void
1048enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head) 1045enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1049{ 1046{
1050 struct cfs_rq *cfs_rq; 1047 struct cfs_rq *cfs_rq;
1051 struct sched_entity *se = &p->se; 1048 struct sched_entity *se = &p->se;
1052 int flags = 0;
1053
1054 if (wakeup)
1055 flags |= ENQUEUE_WAKEUP;
1056 if (p->state == TASK_WAKING)
1057 flags |= ENQUEUE_MIGRATE;
1058 1049
1059 for_each_sched_entity(se) { 1050 for_each_sched_entity(se) {
1060 if (se->on_rq) 1051 if (se->on_rq)
@@ -1072,18 +1063,18 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
1072 * decreased. We remove the task from the rbtree and 1063 * decreased. We remove the task from the rbtree and
1073 * update the fair scheduling stats: 1064 * update the fair scheduling stats:
1074 */ 1065 */
1075static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) 1066static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1076{ 1067{
1077 struct cfs_rq *cfs_rq; 1068 struct cfs_rq *cfs_rq;
1078 struct sched_entity *se = &p->se; 1069 struct sched_entity *se = &p->se;
1079 1070
1080 for_each_sched_entity(se) { 1071 for_each_sched_entity(se) {
1081 cfs_rq = cfs_rq_of(se); 1072 cfs_rq = cfs_rq_of(se);
1082 dequeue_entity(cfs_rq, se, sleep); 1073 dequeue_entity(cfs_rq, se, flags);
1083 /* Don't dequeue parent if it has other entities besides us */ 1074 /* Don't dequeue parent if it has other entities besides us */
1084 if (cfs_rq->load.weight) 1075 if (cfs_rq->load.weight)
1085 break; 1076 break;
1086 sleep = 1; 1077 flags |= DEQUEUE_SLEEP;
1087 } 1078 }
1088 1079
1089 hrtick_update(rq); 1080 hrtick_update(rq);
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 5af709f503b0..bea2b8f12024 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -33,7 +33,7 @@ static struct task_struct *pick_next_task_idle(struct rq *rq)
33 * message if some code attempts to do it: 33 * message if some code attempts to do it:
34 */ 34 */
35static void 35static void
36dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) 36dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
37{ 37{
38 raw_spin_unlock_irq(&rq->lock); 38 raw_spin_unlock_irq(&rq->lock);
39 printk(KERN_ERR "bad: scheduling from the idle thread!\n"); 39 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index fde895f8044d..8afb953e31c6 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -888,20 +888,20 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
888 * Adding/removing a task to/from a priority array: 888 * Adding/removing a task to/from a priority array:
889 */ 889 */
890static void 890static void
891enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head) 891enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
892{ 892{
893 struct sched_rt_entity *rt_se = &p->rt; 893 struct sched_rt_entity *rt_se = &p->rt;
894 894
895 if (wakeup) 895 if (flags & ENQUEUE_WAKEUP)
896 rt_se->timeout = 0; 896 rt_se->timeout = 0;
897 897
898 enqueue_rt_entity(rt_se, head); 898 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
899 899
900 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) 900 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
901 enqueue_pushable_task(rq, p); 901 enqueue_pushable_task(rq, p);
902} 902}
903 903
904static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) 904static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
905{ 905{
906 struct sched_rt_entity *rt_se = &p->rt; 906 struct sched_rt_entity *rt_se = &p->rt;
907 907