aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index b320753aa6c9..6c84439ce987 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -89,6 +89,13 @@ unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
89 89
90const_debug unsigned int sysctl_sched_migration_cost = 500000UL; 90const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
91 91
92/*
93 * The exponential sliding window over which load is averaged for shares
94 * distribution.
95 * (default: 10msec)
96 */
97unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
98
92static const struct sched_class fair_sched_class; 99static const struct sched_class fair_sched_class;
93 100
94/************************************************************** 101/**************************************************************
@@ -688,7 +695,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
688#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED 695#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
689static void update_cfs_load(struct cfs_rq *cfs_rq) 696static void update_cfs_load(struct cfs_rq *cfs_rq)
690{ 697{
691 u64 period = sched_avg_period(); 698 u64 period = sysctl_sched_shares_window;
692 u64 now, delta; 699 u64 now, delta;
693 unsigned long load = cfs_rq->load.weight; 700 unsigned long load = cfs_rq->load.weight;
694 701