diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 63 |
1 files changed, 57 insertions, 6 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 892616bf2c77..c9fbe8e73a45 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -43,6 +43,14 @@ unsigned int sysctl_sched_latency __read_mostly = 20000000ULL; | |||
43 | unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL; | 43 | unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL; |
44 | 44 | ||
45 | /* | 45 | /* |
46 | * sys_sched_yield() compat mode | ||
47 | * | ||
48 | * This option switches the agressive yield implementation of the | ||
49 | * old scheduler back on. | ||
50 | */ | ||
51 | unsigned int __read_mostly sysctl_sched_compat_yield; | ||
52 | |||
53 | /* | ||
46 | * SCHED_BATCH wake-up granularity. | 54 | * SCHED_BATCH wake-up granularity. |
47 | * (default: 25 msec, units: nanoseconds) | 55 | * (default: 25 msec, units: nanoseconds) |
48 | * | 56 | * |
@@ -897,19 +905,62 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) | |||
897 | } | 905 | } |
898 | 906 | ||
899 | /* | 907 | /* |
900 | * sched_yield() support is very simple - we dequeue and enqueue | 908 | * sched_yield() support is very simple - we dequeue and enqueue. |
909 | * | ||
910 | * If compat_yield is turned on then we requeue to the end of the tree. | ||
901 | */ | 911 | */ |
902 | static void yield_task_fair(struct rq *rq, struct task_struct *p) | 912 | static void yield_task_fair(struct rq *rq, struct task_struct *p) |
903 | { | 913 | { |
904 | struct cfs_rq *cfs_rq = task_cfs_rq(p); | 914 | struct cfs_rq *cfs_rq = task_cfs_rq(p); |
915 | struct rb_node **link = &cfs_rq->tasks_timeline.rb_node; | ||
916 | struct sched_entity *rightmost, *se = &p->se; | ||
917 | struct rb_node *parent; | ||
905 | 918 | ||
906 | __update_rq_clock(rq); | ||
907 | /* | 919 | /* |
908 | * Dequeue and enqueue the task to update its | 920 | * Are we the only task in the tree? |
909 | * position within the tree: | 921 | */ |
922 | if (unlikely(cfs_rq->nr_running == 1)) | ||
923 | return; | ||
924 | |||
925 | if (likely(!sysctl_sched_compat_yield)) { | ||
926 | __update_rq_clock(rq); | ||
927 | /* | ||
928 | * Dequeue and enqueue the task to update its | ||
929 | * position within the tree: | ||
930 | */ | ||
931 | dequeue_entity(cfs_rq, &p->se, 0); | ||
932 | enqueue_entity(cfs_rq, &p->se, 0); | ||
933 | |||
934 | return; | ||
935 | } | ||
936 | /* | ||
937 | * Find the rightmost entry in the rbtree: | ||
910 | */ | 938 | */ |
911 | dequeue_entity(cfs_rq, &p->se, 0); | 939 | do { |
912 | enqueue_entity(cfs_rq, &p->se, 0); | 940 | parent = *link; |
941 | link = &parent->rb_right; | ||
942 | } while (*link); | ||
943 | |||
944 | rightmost = rb_entry(parent, struct sched_entity, run_node); | ||
945 | /* | ||
946 | * Already in the rightmost position? | ||
947 | */ | ||
948 | if (unlikely(rightmost == se)) | ||
949 | return; | ||
950 | |||
951 | /* | ||
952 | * Minimally necessary key value to be last in the tree: | ||
953 | */ | ||
954 | se->fair_key = rightmost->fair_key + 1; | ||
955 | |||
956 | if (cfs_rq->rb_leftmost == &se->run_node) | ||
957 | cfs_rq->rb_leftmost = rb_next(&se->run_node); | ||
958 | /* | ||
959 | * Relink the task to the rightmost position: | ||
960 | */ | ||
961 | rb_erase(&se->run_node, &cfs_rq->tasks_timeline); | ||
962 | rb_link_node(&se->run_node, parent, link); | ||
963 | rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline); | ||
913 | } | 964 | } |
914 | 965 | ||
915 | /* | 966 | /* |