aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c41
1 files changed, 28 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index c62acf45d3b9..2d12893b8b0f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -293,7 +293,7 @@ static DEFINE_SPINLOCK(task_group_lock);
293 * limitation from this.) 293 * limitation from this.)
294 */ 294 */
295#define MIN_SHARES 2 295#define MIN_SHARES 2
296#define MAX_SHARES (1UL << 18) 296#define MAX_SHARES (1UL << (18 + SCHED_LOAD_RESOLUTION))
297 297
298static int root_task_group_load = ROOT_TASK_GROUP_LOAD; 298static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
299#endif 299#endif
@@ -1330,13 +1330,25 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1330{ 1330{
1331 u64 tmp; 1331 u64 tmp;
1332 1332
1333 tmp = (u64)delta_exec * weight; 1333 /*
1334 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
1335 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
1336 * 2^SCHED_LOAD_RESOLUTION.
1337 */
1338 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
1339 tmp = (u64)delta_exec * scale_load_down(weight);
1340 else
1341 tmp = (u64)delta_exec;
1334 1342
1335 if (!lw->inv_weight) { 1343 if (!lw->inv_weight) {
1336 if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST)) 1344 unsigned long w = scale_load_down(lw->weight);
1345
1346 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
1337 lw->inv_weight = 1; 1347 lw->inv_weight = 1;
1348 else if (unlikely(!w))
1349 lw->inv_weight = WMULT_CONST;
1338 else 1350 else
1339 lw->inv_weight = WMULT_CONST / lw->weight; 1351 lw->inv_weight = WMULT_CONST / w;
1340 } 1352 }
1341 1353
1342 /* 1354 /*
@@ -1778,17 +1790,20 @@ static void dec_nr_running(struct rq *rq)
1778 1790
1779static void set_load_weight(struct task_struct *p) 1791static void set_load_weight(struct task_struct *p)
1780{ 1792{
1793 int prio = p->static_prio - MAX_RT_PRIO;
1794 struct load_weight *load = &p->se.load;
1795
1781 /* 1796 /*
1782 * SCHED_IDLE tasks get minimal weight: 1797 * SCHED_IDLE tasks get minimal weight:
1783 */ 1798 */
1784 if (p->policy == SCHED_IDLE) { 1799 if (p->policy == SCHED_IDLE) {
1785 p->se.load.weight = WEIGHT_IDLEPRIO; 1800 load->weight = scale_load(WEIGHT_IDLEPRIO);
1786 p->se.load.inv_weight = WMULT_IDLEPRIO; 1801 load->inv_weight = WMULT_IDLEPRIO;
1787 return; 1802 return;
1788 } 1803 }
1789 1804
1790 p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO]; 1805 load->weight = scale_load(prio_to_weight[prio]);
1791 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO]; 1806 load->inv_weight = prio_to_wmult[prio];
1792} 1807}
1793 1808
1794static void enqueue_task(struct rq *rq, struct task_struct *p, int flags) 1809static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
@@ -2564,7 +2579,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
2564{ 2579{
2565 struct rq *rq = cpu_rq(cpu); 2580 struct rq *rq = cpu_rq(cpu);
2566 2581
2567#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_TTWU_QUEUE) 2582#if defined(CONFIG_SMP)
2568 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) { 2583 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
2569 ttwu_queue_remote(p, cpu); 2584 ttwu_queue_remote(p, cpu);
2570 return; 2585 return;
@@ -6527,7 +6542,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6527 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); 6542 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
6528 6543
6529 printk(KERN_CONT " %s", str); 6544 printk(KERN_CONT " %s", str);
6530 if (group->cpu_power != SCHED_LOAD_SCALE) { 6545 if (group->cpu_power != SCHED_POWER_SCALE) {
6531 printk(KERN_CONT " (cpu_power = %d)", 6546 printk(KERN_CONT " (cpu_power = %d)",
6532 group->cpu_power); 6547 group->cpu_power);
6533 } 6548 }
@@ -7902,7 +7917,7 @@ void __init sched_init(void)
7902#ifdef CONFIG_SMP 7917#ifdef CONFIG_SMP
7903 rq->sd = NULL; 7918 rq->sd = NULL;
7904 rq->rd = NULL; 7919 rq->rd = NULL;
7905 rq->cpu_power = SCHED_LOAD_SCALE; 7920 rq->cpu_power = SCHED_POWER_SCALE;
7906 rq->post_schedule = 0; 7921 rq->post_schedule = 0;
7907 rq->active_balance = 0; 7922 rq->active_balance = 0;
7908 rq->next_balance = jiffies; 7923 rq->next_balance = jiffies;
@@ -8806,14 +8821,14 @@ cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
8806static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, 8821static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
8807 u64 shareval) 8822 u64 shareval)
8808{ 8823{
8809 return sched_group_set_shares(cgroup_tg(cgrp), shareval); 8824 return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
8810} 8825}
8811 8826
8812static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft) 8827static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
8813{ 8828{
8814 struct task_group *tg = cgroup_tg(cgrp); 8829 struct task_group *tg = cgroup_tg(cgrp);
8815 8830
8816 return (u64) tg->shares; 8831 return (u64) scale_load_down(tg->shares);
8817} 8832}
8818#endif /* CONFIG_FAIR_GROUP_SCHED */ 8833#endif /* CONFIG_FAIR_GROUP_SCHED */
8819 8834