aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2011-07-21 12:43:30 -0400
committerIngo Molnar <mingo@elte.hu>2011-08-14 06:03:26 -0400
commitec12cb7f31e28854efae7dd6f9544e0a66379040 (patch)
tree30a7293a4f9d566043f524bb4c43d4ae8b0560db /kernel/sched_fair.c
parenta790de99599a29ad3f18667530cf4b9f4b7e3234 (diff)
sched: Accumulate per-cfs_rq cpu usage and charge against bandwidth
Account bandwidth usage on the cfs_rq level versus the task_groups to which they belong. Whether we are tracking bandwidth on a given cfs_rq is maintained under cfs_rq->runtime_enabled. cfs_rq's which belong to a bandwidth constrained task_group have their runtime accounted via the update_curr() path, which withdraws bandwidth from the global pool as desired. Updates involving the global pool are currently protected under cfs_bandwidth->lock, local runtime is protected by rq->lock. This patch only assigns and tracks quota, no action is taken in the case that cfs_rq->runtime_used exceeds cfs_rq->runtime_assigned. Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Nikhil Rao <ncrao@google.com> Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com> Reviewed-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110721184757.179386821@google.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c79
1 files changed, 77 insertions, 2 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index f24f4171019d..9502aa899f73 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -89,6 +89,20 @@ const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
89 */ 89 */
90unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL; 90unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
91 91
92#ifdef CONFIG_CFS_BANDWIDTH
93/*
94 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
95 * each time a cfs_rq requests quota.
96 *
97 * Note: in the case that the slice exceeds the runtime remaining (either due
98 * to consumption or the quota being specified to be smaller than the slice)
99 * we will always only issue the remaining available time.
100 *
101 * default: 5 msec, units: microseconds
102 */
103unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
104#endif
105
92static const struct sched_class fair_sched_class; 106static const struct sched_class fair_sched_class;
93 107
94/************************************************************** 108/**************************************************************
@@ -292,6 +306,8 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
292 306
293#endif /* CONFIG_FAIR_GROUP_SCHED */ 307#endif /* CONFIG_FAIR_GROUP_SCHED */
294 308
309static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
310 unsigned long delta_exec);
295 311
296/************************************************************** 312/**************************************************************
297 * Scheduling class tree data structure manipulation methods: 313 * Scheduling class tree data structure manipulation methods:
@@ -583,6 +599,8 @@ static void update_curr(struct cfs_rq *cfs_rq)
583 cpuacct_charge(curtask, delta_exec); 599 cpuacct_charge(curtask, delta_exec);
584 account_group_exec_runtime(curtask, delta_exec); 600 account_group_exec_runtime(curtask, delta_exec);
585 } 601 }
602
603 account_cfs_rq_runtime(cfs_rq, delta_exec);
586} 604}
587 605
588static inline void 606static inline void
@@ -1248,6 +1266,58 @@ static inline u64 default_cfs_period(void)
1248{ 1266{
1249 return 100000000ULL; 1267 return 100000000ULL;
1250} 1268}
1269
1270static inline u64 sched_cfs_bandwidth_slice(void)
1271{
1272 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
1273}
1274
1275static void assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1276{
1277 struct task_group *tg = cfs_rq->tg;
1278 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
1279 u64 amount = 0, min_amount;
1280
1281 /* note: this is a positive sum as runtime_remaining <= 0 */
1282 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
1283
1284 raw_spin_lock(&cfs_b->lock);
1285 if (cfs_b->quota == RUNTIME_INF)
1286 amount = min_amount;
1287 else if (cfs_b->runtime > 0) {
1288 amount = min(cfs_b->runtime, min_amount);
1289 cfs_b->runtime -= amount;
1290 }
1291 raw_spin_unlock(&cfs_b->lock);
1292
1293 cfs_rq->runtime_remaining += amount;
1294}
1295
1296static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
1297 unsigned long delta_exec)
1298{
1299 if (!cfs_rq->runtime_enabled)
1300 return;
1301
1302 cfs_rq->runtime_remaining -= delta_exec;
1303 if (cfs_rq->runtime_remaining > 0)
1304 return;
1305
1306 assign_cfs_rq_runtime(cfs_rq);
1307}
1308
1309static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
1310 unsigned long delta_exec)
1311{
1312 if (!cfs_rq->runtime_enabled)
1313 return;
1314
1315 __account_cfs_rq_runtime(cfs_rq, delta_exec);
1316}
1317
1318#else
1319static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
1320 unsigned long delta_exec) {}
1251#endif 1321#endif
1252 1322
1253/************************************************** 1323/**************************************************
@@ -4266,8 +4336,13 @@ static void set_curr_task_fair(struct rq *rq)
4266{ 4336{
4267 struct sched_entity *se = &rq->curr->se; 4337 struct sched_entity *se = &rq->curr->se;
4268 4338
4269 for_each_sched_entity(se) 4339 for_each_sched_entity(se) {
4270 set_next_entity(cfs_rq_of(se), se); 4340 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4341
4342 set_next_entity(cfs_rq, se);
4343 /* ensure bandwidth has been allocated on our new cfs_rq */
4344 account_cfs_rq_runtime(cfs_rq, 0);
4345 }
4271} 4346}
4272 4347
4273#ifdef CONFIG_FAIR_GROUP_SCHED 4348#ifdef CONFIG_FAIR_GROUP_SCHED