summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-10-12 18:29:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-10-12 18:29:54 -0400
commit328fefadd9cfa15cd6ab746553d9ef13303c11a6 (patch)
tree4e6188b7c3592aa57fd55778cfde5ad42bee0eae /kernel/sched
parent465a7e291fd4f056d81baf5d5ed557bdb44c5457 (diff)
parent68e7a4d66b0ce04bf18ff2ffded5596ab3618585 (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Two fixes: a guest-cputime accounting fix, and a cgroup bandwidth quota precision fix" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/vtime: Fix guest/system mis-accounting on task switch sched/fair: Scale bandwidth quota and period without losing quota/period ratio precision
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/cputime.c6
-rw-r--r--kernel/sched/fair.c36
2 files changed, 25 insertions, 17 deletions
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 2305ce89a26c..46ed4e1383e2 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -740,7 +740,7 @@ void vtime_account_system(struct task_struct *tsk)
740 740
741 write_seqcount_begin(&vtime->seqcount); 741 write_seqcount_begin(&vtime->seqcount);
742 /* We might have scheduled out from guest path */ 742 /* We might have scheduled out from guest path */
743 if (current->flags & PF_VCPU) 743 if (tsk->flags & PF_VCPU)
744 vtime_account_guest(tsk, vtime); 744 vtime_account_guest(tsk, vtime);
745 else 745 else
746 __vtime_account_system(tsk, vtime); 746 __vtime_account_system(tsk, vtime);
@@ -783,7 +783,7 @@ void vtime_guest_enter(struct task_struct *tsk)
783 */ 783 */
784 write_seqcount_begin(&vtime->seqcount); 784 write_seqcount_begin(&vtime->seqcount);
785 __vtime_account_system(tsk, vtime); 785 __vtime_account_system(tsk, vtime);
786 current->flags |= PF_VCPU; 786 tsk->flags |= PF_VCPU;
787 write_seqcount_end(&vtime->seqcount); 787 write_seqcount_end(&vtime->seqcount);
788} 788}
789EXPORT_SYMBOL_GPL(vtime_guest_enter); 789EXPORT_SYMBOL_GPL(vtime_guest_enter);
@@ -794,7 +794,7 @@ void vtime_guest_exit(struct task_struct *tsk)
794 794
795 write_seqcount_begin(&vtime->seqcount); 795 write_seqcount_begin(&vtime->seqcount);
796 vtime_account_guest(tsk, vtime); 796 vtime_account_guest(tsk, vtime);
797 current->flags &= ~PF_VCPU; 797 tsk->flags &= ~PF_VCPU;
798 write_seqcount_end(&vtime->seqcount); 798 write_seqcount_end(&vtime->seqcount);
799} 799}
800EXPORT_SYMBOL_GPL(vtime_guest_exit); 800EXPORT_SYMBOL_GPL(vtime_guest_exit);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 83ab35e2374f..682a754ea3e1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4926,20 +4926,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
4926 if (++count > 3) { 4926 if (++count > 3) {
4927 u64 new, old = ktime_to_ns(cfs_b->period); 4927 u64 new, old = ktime_to_ns(cfs_b->period);
4928 4928
4929 new = (old * 147) / 128; /* ~115% */ 4929 /*
4930 new = min(new, max_cfs_quota_period); 4930 * Grow period by a factor of 2 to avoid losing precision.
4931 4931 * Precision loss in the quota/period ratio can cause __cfs_schedulable
4932 cfs_b->period = ns_to_ktime(new); 4932 * to fail.
4933 4933 */
4934 /* since max is 1s, this is limited to 1e9^2, which fits in u64 */ 4934 new = old * 2;
4935 cfs_b->quota *= new; 4935 if (new < max_cfs_quota_period) {
4936 cfs_b->quota = div64_u64(cfs_b->quota, old); 4936 cfs_b->period = ns_to_ktime(new);
4937 4937 cfs_b->quota *= 2;
4938 pr_warn_ratelimited( 4938
4939 "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n", 4939 pr_warn_ratelimited(
4940 smp_processor_id(), 4940 "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
4941 div_u64(new, NSEC_PER_USEC), 4941 smp_processor_id(),
4942 div_u64(cfs_b->quota, NSEC_PER_USEC)); 4942 div_u64(new, NSEC_PER_USEC),
4943 div_u64(cfs_b->quota, NSEC_PER_USEC));
4944 } else {
4945 pr_warn_ratelimited(
4946 "cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
4947 smp_processor_id(),
4948 div_u64(old, NSEC_PER_USEC),
4949 div_u64(cfs_b->quota, NSEC_PER_USEC));
4950 }
4943 4951
4944 /* reset count so we don't come right back in here */ 4952 /* reset count so we don't come right back in here */
4945 count = 0; 4953 count = 0;