aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c26
1 files changed, 23 insertions, 3 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 501ab637b94b..5280272cce3e 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1344,6 +1344,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1344 hrtick_update(rq); 1344 hrtick_update(rq);
1345} 1345}
1346 1346
1347static void set_next_buddy(struct sched_entity *se);
1348
1347/* 1349/*
1348 * The dequeue_task method is called before nr_running is 1350 * The dequeue_task method is called before nr_running is
1349 * decreased. We remove the task from the rbtree and 1351 * decreased. We remove the task from the rbtree and
@@ -1353,14 +1355,22 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1353{ 1355{
1354 struct cfs_rq *cfs_rq; 1356 struct cfs_rq *cfs_rq;
1355 struct sched_entity *se = &p->se; 1357 struct sched_entity *se = &p->se;
1358 int task_sleep = flags & DEQUEUE_SLEEP;
1356 1359
1357 for_each_sched_entity(se) { 1360 for_each_sched_entity(se) {
1358 cfs_rq = cfs_rq_of(se); 1361 cfs_rq = cfs_rq_of(se);
1359 dequeue_entity(cfs_rq, se, flags); 1362 dequeue_entity(cfs_rq, se, flags);
1360 1363
1361 /* Don't dequeue parent if it has other entities besides us */ 1364 /* Don't dequeue parent if it has other entities besides us */
1362 if (cfs_rq->load.weight) 1365 if (cfs_rq->load.weight) {
1366 /*
1367 * Bias pick_next to pick a task from this cfs_rq, as
1368 * p is sleeping when it is within its sched_slice.
1369 */
1370 if (task_sleep && parent_entity(se))
1371 set_next_buddy(parent_entity(se));
1363 break; 1372 break;
1373 }
1364 flags |= DEQUEUE_SLEEP; 1374 flags |= DEQUEUE_SLEEP;
1365 } 1375 }
1366 1376
@@ -1877,12 +1887,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1877 struct sched_entity *se = &curr->se, *pse = &p->se; 1887 struct sched_entity *se = &curr->se, *pse = &p->se;
1878 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 1888 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1879 int scale = cfs_rq->nr_running >= sched_nr_latency; 1889 int scale = cfs_rq->nr_running >= sched_nr_latency;
1890 int next_buddy_marked = 0;
1880 1891
1881 if (unlikely(se == pse)) 1892 if (unlikely(se == pse))
1882 return; 1893 return;
1883 1894
1884 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) 1895 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
1885 set_next_buddy(pse); 1896 set_next_buddy(pse);
1897 next_buddy_marked = 1;
1898 }
1886 1899
1887 /* 1900 /*
1888 * We can come here with TIF_NEED_RESCHED already set from new task 1901 * We can come here with TIF_NEED_RESCHED already set from new task
@@ -1910,8 +1923,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1910 update_curr(cfs_rq); 1923 update_curr(cfs_rq);
1911 find_matching_se(&se, &pse); 1924 find_matching_se(&se, &pse);
1912 BUG_ON(!pse); 1925 BUG_ON(!pse);
1913 if (wakeup_preempt_entity(se, pse) == 1) 1926 if (wakeup_preempt_entity(se, pse) == 1) {
1927 /*
1928 * Bias pick_next to pick the sched entity that is
1929 * triggering this preemption.
1930 */
1931 if (!next_buddy_marked)
1932 set_next_buddy(pse);
1914 goto preempt; 1933 goto preempt;
1934 }
1915 1935
1916 return; 1936 return;
1917 1937