aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorZhihui Zhang <zzhsuny@gmail.com>2014-09-20 21:24:36 -0400
committerIngo Molnar <mingo@kernel.org>2014-09-21 03:00:02 -0400
commit9c58c79a8a76c510cd3a5012c536d4fe3c81ec3b (patch)
treef1ab38bfa70d4f9c2a7ef1008c5de9c7d5729d8f /kernel/sched
parentbd61c98f9b3f142cd63f9e15acfe203bec9e5f5a (diff)
sched: Clean up some typos and grammatical errors in code/comments
Signed-off-by: Zhihui Zhang <zzhsuny@gmail.com> Cc: peterz@infradead.org Link: http://lkml.kernel.org/r/1411262676-19928-1-git-send-email-zzhsuny@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c4
-rw-r--r--kernel/sched/fair.c6
-rw-r--r--kernel/sched/sched.h2
3 files changed, 6 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 61ee2b327a27..a2841904f2d5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8069,7 +8069,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
8069 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth; 8069 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
8070 8070
8071 quota = normalize_cfs_quota(tg, d); 8071 quota = normalize_cfs_quota(tg, d);
8072 parent_quota = parent_b->hierarchal_quota; 8072 parent_quota = parent_b->hierarchical_quota;
8073 8073
8074 /* 8074 /*
8075 * ensure max(child_quota) <= parent_quota, inherit when no 8075 * ensure max(child_quota) <= parent_quota, inherit when no
@@ -8080,7 +8080,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
8080 else if (parent_quota != RUNTIME_INF && quota > parent_quota) 8080 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
8081 return -EINVAL; 8081 return -EINVAL;
8082 } 8082 }
8083 cfs_b->hierarchal_quota = quota; 8083 cfs_b->hierarchical_quota = quota;
8084 8084
8085 return 0; 8085 return 0;
8086} 8086}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 74fa2c210b6d..2a1e6ac6bb32 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2224,8 +2224,8 @@ static __always_inline u64 decay_load(u64 val, u64 n)
2224 2224
2225 /* 2225 /*
2226 * As y^PERIOD = 1/2, we can combine 2226 * As y^PERIOD = 1/2, we can combine
2227 * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD) 2227 * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
2228 * With a look-up table which covers k^n (n<PERIOD) 2228 * With a look-up table which covers y^n (n<PERIOD)
2229 * 2229 *
2230 * To achieve constant time decay_load. 2230 * To achieve constant time decay_load.
2231 */ 2231 */
@@ -6410,7 +6410,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
6410 goto force_balance; 6410 goto force_balance;
6411 6411
6412 /* 6412 /*
6413 * If the local group is more busy than the selected busiest group 6413 * If the local group is busier than the selected busiest group
6414 * don't try and pull any tasks. 6414 * don't try and pull any tasks.
6415 */ 6415 */
6416 if (local->avg_load >= busiest->avg_load) 6416 if (local->avg_load >= busiest->avg_load)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index aa0f73ba3777..1bc6aad1391a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -188,7 +188,7 @@ struct cfs_bandwidth {
188 raw_spinlock_t lock; 188 raw_spinlock_t lock;
189 ktime_t period; 189 ktime_t period;
190 u64 quota, runtime; 190 u64 quota, runtime;
191 s64 hierarchal_quota; 191 s64 hierarchical_quota;
192 u64 runtime_expires; 192 u64 runtime_expires;
193 193
194 int idle, timer_active; 194 int idle, timer_active;