aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h3
-rw-r--r--kernel/sched.c26
-rw-r--r--kernel/sched_debug.c1
-rw-r--r--kernel/sched_fair.c31
-rw-r--r--kernel/sched_features.h6
5 files changed, 4 insertions, 63 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 13efe7dac5fa..70c560f5ada0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1185,9 +1185,6 @@ struct sched_entity {
1185 1185
1186 u64 nr_migrations; 1186 u64 nr_migrations;
1187 1187
1188 u64 start_runtime;
1189 u64 avg_wakeup;
1190
1191#ifdef CONFIG_SCHEDSTATS 1188#ifdef CONFIG_SCHEDSTATS
1192 struct sched_statistics statistics; 1189 struct sched_statistics statistics;
1193#endif 1190#endif
diff --git a/kernel/sched.c b/kernel/sched.c
index 60b1bbe2ad1b..35a8626ace7d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1880,9 +1880,6 @@ static void update_avg(u64 *avg, u64 sample)
1880static void 1880static void
1881enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head) 1881enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
1882{ 1882{
1883 if (wakeup)
1884 p->se.start_runtime = p->se.sum_exec_runtime;
1885
1886 sched_info_queued(p); 1883 sched_info_queued(p);
1887 p->sched_class->enqueue_task(rq, p, wakeup, head); 1884 p->sched_class->enqueue_task(rq, p, wakeup, head);
1888 p->se.on_rq = 1; 1885 p->se.on_rq = 1;
@@ -1890,17 +1887,11 @@ enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
1890 1887
1891static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) 1888static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
1892{ 1889{
1893 if (sleep) { 1890 if (sleep && p->se.last_wakeup) {
1894 if (p->se.last_wakeup) { 1891 update_avg(&p->se.avg_overlap,
1895 update_avg(&p->se.avg_overlap, 1892 p->se.sum_exec_runtime - p->se.last_wakeup);
1896 p->se.sum_exec_runtime - p->se.last_wakeup); 1893 p->se.last_wakeup = 0;
1897 p->se.last_wakeup = 0;
1898 } else {
1899 update_avg(&p->se.avg_wakeup,
1900 sysctl_sched_wakeup_granularity);
1901 }
1902 } 1894 }
1903
1904 sched_info_dequeued(p); 1895 sched_info_dequeued(p);
1905 p->sched_class->dequeue_task(rq, p, sleep); 1896 p->sched_class->dequeue_task(rq, p, sleep);
1906 p->se.on_rq = 0; 1897 p->se.on_rq = 0;
@@ -2466,13 +2457,6 @@ out_activate:
2466 */ 2457 */
2467 if (!in_interrupt()) { 2458 if (!in_interrupt()) {
2468 struct sched_entity *se = &current->se; 2459 struct sched_entity *se = &current->se;
2469 u64 sample = se->sum_exec_runtime;
2470
2471 if (se->last_wakeup)
2472 sample -= se->last_wakeup;
2473 else
2474 sample -= se->start_runtime;
2475 update_avg(&se->avg_wakeup, sample);
2476 2460
2477 se->last_wakeup = se->sum_exec_runtime; 2461 se->last_wakeup = se->sum_exec_runtime;
2478 } 2462 }
@@ -2540,8 +2524,6 @@ static void __sched_fork(struct task_struct *p)
2540 p->se.nr_migrations = 0; 2524 p->se.nr_migrations = 0;
2541 p->se.last_wakeup = 0; 2525 p->se.last_wakeup = 0;
2542 p->se.avg_overlap = 0; 2526 p->se.avg_overlap = 0;
2543 p->se.start_runtime = 0;
2544 p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
2545 2527
2546#ifdef CONFIG_SCHEDSTATS 2528#ifdef CONFIG_SCHEDSTATS
2547 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 2529 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index ad9df4422763..20b95a420fec 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -408,7 +408,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
408 PN(se.vruntime); 408 PN(se.vruntime);
409 PN(se.sum_exec_runtime); 409 PN(se.sum_exec_runtime);
410 PN(se.avg_overlap); 410 PN(se.avg_overlap);
411 PN(se.avg_wakeup);
412 411
413 nr_switches = p->nvcsw + p->nivcsw; 412 nr_switches = p->nvcsw + p->nivcsw;
414 413
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 8ad164bbdac1..6fc62854422c 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1592,42 +1592,11 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1592} 1592}
1593#endif /* CONFIG_SMP */ 1593#endif /* CONFIG_SMP */
1594 1594
1595/*
1596 * Adaptive granularity
1597 *
1598 * se->avg_wakeup gives the average time a task runs until it does a wakeup,
1599 * with the limit of wakeup_gran -- when it never does a wakeup.
1600 *
1601 * So the smaller avg_wakeup is the faster we want this task to preempt,
1602 * but we don't want to treat the preemptee unfairly and therefore allow it
1603 * to run for at least the amount of time we'd like to run.
1604 *
1605 * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
1606 *
1607 * NOTE: we use *nr_running to scale with load, this nicely matches the
1608 * degrading latency on load.
1609 */
1610static unsigned long
1611adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
1612{
1613 u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1614 u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
1615 u64 gran = 0;
1616
1617 if (this_run < expected_wakeup)
1618 gran = expected_wakeup - this_run;
1619
1620 return min_t(s64, gran, sysctl_sched_wakeup_granularity);
1621}
1622
1623static unsigned long 1595static unsigned long
1624wakeup_gran(struct sched_entity *curr, struct sched_entity *se) 1596wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1625{ 1597{
1626 unsigned long gran = sysctl_sched_wakeup_granularity; 1598 unsigned long gran = sysctl_sched_wakeup_granularity;
1627 1599
1628 if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
1629 gran = adaptive_gran(curr, se);
1630
1631 /* 1600 /*
1632 * Since its curr running now, convert the gran from real-time 1601 * Since its curr running now, convert the gran from real-time
1633 * to virtual-time in his units. 1602 * to virtual-time in his units.
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index d5059fd761d9..96ef5dbc66e1 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -31,12 +31,6 @@ SCHED_FEAT(START_DEBIT, 1)
31SCHED_FEAT(WAKEUP_PREEMPT, 1) 31SCHED_FEAT(WAKEUP_PREEMPT, 1)
32 32
33/* 33/*
34 * Compute wakeup_gran based on task behaviour, clipped to
35 * [0, sched_wakeup_gran_ns]
36 */
37SCHED_FEAT(ADAPTIVE_GRAN, 1)
38
39/*
40 * When converting the wakeup granularity to virtual time, do it such 34 * When converting the wakeup granularity to virtual time, do it such
41 * that heavier tasks preempting a lighter task have an edge. 35 * that heavier tasks preempting a lighter task have an edge.
42 */ 36 */