diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 28 |
1 files changed, 20 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 375e9c677d58..bb504e1839e5 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -293,7 +293,7 @@ static DEFINE_SPINLOCK(task_group_lock); | |||
293 | * limitation from this.) | 293 | * limitation from this.) |
294 | */ | 294 | */ |
295 | #define MIN_SHARES 2 | 295 | #define MIN_SHARES 2 |
296 | #define MAX_SHARES (1UL << 18) | 296 | #define MAX_SHARES (1UL << (18 + SCHED_LOAD_RESOLUTION)) |
297 | 297 | ||
298 | static int root_task_group_load = ROOT_TASK_GROUP_LOAD; | 298 | static int root_task_group_load = ROOT_TASK_GROUP_LOAD; |
299 | #endif | 299 | #endif |
@@ -1330,13 +1330,25 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight, | |||
1330 | { | 1330 | { |
1331 | u64 tmp; | 1331 | u64 tmp; |
1332 | 1332 | ||
1333 | tmp = (u64)delta_exec * weight; | 1333 | /* |
1334 | * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched | ||
1335 | * entities since MIN_SHARES = 2. Treat weight as 1 if less than | ||
1336 | * 2^SCHED_LOAD_RESOLUTION. | ||
1337 | */ | ||
1338 | if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION))) | ||
1339 | tmp = (u64)delta_exec * scale_load_down(weight); | ||
1340 | else | ||
1341 | tmp = (u64)delta_exec; | ||
1334 | 1342 | ||
1335 | if (!lw->inv_weight) { | 1343 | if (!lw->inv_weight) { |
1336 | if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST)) | 1344 | unsigned long w = scale_load_down(lw->weight); |
1345 | |||
1346 | if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST)) | ||
1337 | lw->inv_weight = 1; | 1347 | lw->inv_weight = 1; |
1348 | else if (unlikely(!w)) | ||
1349 | lw->inv_weight = WMULT_CONST; | ||
1338 | else | 1350 | else |
1339 | lw->inv_weight = WMULT_CONST / lw->weight; | 1351 | lw->inv_weight = WMULT_CONST / w; |
1340 | } | 1352 | } |
1341 | 1353 | ||
1342 | /* | 1354 | /* |
@@ -1785,12 +1797,12 @@ static void set_load_weight(struct task_struct *p) | |||
1785 | * SCHED_IDLE tasks get minimal weight: | 1797 | * SCHED_IDLE tasks get minimal weight: |
1786 | */ | 1798 | */ |
1787 | if (p->policy == SCHED_IDLE) { | 1799 | if (p->policy == SCHED_IDLE) { |
1788 | load->weight = WEIGHT_IDLEPRIO; | 1800 | load->weight = scale_load(WEIGHT_IDLEPRIO); |
1789 | load->inv_weight = WMULT_IDLEPRIO; | 1801 | load->inv_weight = WMULT_IDLEPRIO; |
1790 | return; | 1802 | return; |
1791 | } | 1803 | } |
1792 | 1804 | ||
1793 | load->weight = prio_to_weight[prio]; | 1805 | load->weight = scale_load(prio_to_weight[prio]); |
1794 | load->inv_weight = prio_to_wmult[prio]; | 1806 | load->inv_weight = prio_to_wmult[prio]; |
1795 | } | 1807 | } |
1796 | 1808 | ||
@@ -8809,14 +8821,14 @@ cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
8809 | static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, | 8821 | static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, |
8810 | u64 shareval) | 8822 | u64 shareval) |
8811 | { | 8823 | { |
8812 | return sched_group_set_shares(cgroup_tg(cgrp), shareval); | 8824 | return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval)); |
8813 | } | 8825 | } |
8814 | 8826 | ||
8815 | static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft) | 8827 | static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft) |
8816 | { | 8828 | { |
8817 | struct task_group *tg = cgroup_tg(cgrp); | 8829 | struct task_group *tg = cgroup_tg(cgrp); |
8818 | 8830 | ||
8819 | return (u64) tg->shares; | 8831 | return (u64) scale_load_down(tg->shares); |
8820 | } | 8832 | } |
8821 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 8833 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
8822 | 8834 | ||