diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 28 |
1 files changed, 15 insertions, 13 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 613c1c749677..44ec80ccfa85 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1922,28 +1922,30 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) | |||
1922 | } | 1922 | } |
1923 | 1923 | ||
1924 | /* | 1924 | /* |
1925 | * Share the fairness runtime between parent and child, thus the | 1925 | * called on fork with the child task as argument from the parent's context |
1926 | * total amount of pressure for CPU stays equal - new tasks | 1926 | * - child not yet on the tasklist |
1927 | * get a chance to run but frequent forkers are not allowed to | 1927 | * - preemption disabled |
1928 | * monopolize the CPU. Note: the parent runqueue is locked, | ||
1929 | * the child is not running yet. | ||
1930 | */ | 1928 | */ |
1931 | static void task_new_fair(struct rq *rq, struct task_struct *p) | 1929 | static void task_fork_fair(struct task_struct *p) |
1932 | { | 1930 | { |
1933 | struct cfs_rq *cfs_rq = task_cfs_rq(p); | 1931 | struct cfs_rq *cfs_rq = task_cfs_rq(current); |
1934 | struct sched_entity *se = &p->se, *curr = cfs_rq->curr; | 1932 | struct sched_entity *se = &p->se, *curr = cfs_rq->curr; |
1935 | int this_cpu = smp_processor_id(); | 1933 | int this_cpu = smp_processor_id(); |
1934 | struct rq *rq = this_rq(); | ||
1935 | unsigned long flags; | ||
1936 | |||
1937 | spin_lock_irqsave(&rq->lock, flags); | ||
1936 | 1938 | ||
1937 | sched_info_queued(p); | 1939 | if (unlikely(task_cpu(p) != this_cpu)) |
1940 | __set_task_cpu(p, this_cpu); | ||
1938 | 1941 | ||
1939 | update_curr(cfs_rq); | 1942 | update_curr(cfs_rq); |
1943 | |||
1940 | if (curr) | 1944 | if (curr) |
1941 | se->vruntime = curr->vruntime; | 1945 | se->vruntime = curr->vruntime; |
1942 | place_entity(cfs_rq, se, 1); | 1946 | place_entity(cfs_rq, se, 1); |
1943 | 1947 | ||
1944 | /* 'curr' will be NULL if the child belongs to a different group */ | 1948 | if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { |
1945 | if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && | ||
1946 | curr && entity_before(curr, se)) { | ||
1947 | /* | 1949 | /* |
1948 | * Upon rescheduling, sched_class::put_prev_task() will place | 1950 | * Upon rescheduling, sched_class::put_prev_task() will place |
1949 | * 'current' within the tree based on its new key value. | 1951 | * 'current' within the tree based on its new key value. |
@@ -1952,7 +1954,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) | |||
1952 | resched_task(rq->curr); | 1954 | resched_task(rq->curr); |
1953 | } | 1955 | } |
1954 | 1956 | ||
1955 | enqueue_task_fair(rq, p, 0); | 1957 | spin_unlock_irqrestore(&rq->lock, flags); |
1956 | } | 1958 | } |
1957 | 1959 | ||
1958 | /* | 1960 | /* |
@@ -2052,7 +2054,7 @@ static const struct sched_class fair_sched_class = { | |||
2052 | 2054 | ||
2053 | .set_curr_task = set_curr_task_fair, | 2055 | .set_curr_task = set_curr_task_fair, |
2054 | .task_tick = task_tick_fair, | 2056 | .task_tick = task_tick_fair, |
2055 | .task_new = task_new_fair, | 2057 | .task_fork = task_fork_fair, |
2056 | 2058 | ||
2057 | .prio_changed = prio_changed_fair, | 2059 | .prio_changed = prio_changed_fair, |
2058 | .switched_to = switched_to_fair, | 2060 | .switched_to = switched_to_fair, |