diff options
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 70 |
1 files changed, 45 insertions, 25 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 10d218ab69f2..4e777b47eeda 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -384,10 +384,10 @@ static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq) | |||
384 | 384 | ||
385 | #ifdef CONFIG_SCHED_DEBUG | 385 | #ifdef CONFIG_SCHED_DEBUG |
386 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 386 | int sched_nr_latency_handler(struct ctl_table *table, int write, |
387 | struct file *filp, void __user *buffer, size_t *lenp, | 387 | void __user *buffer, size_t *lenp, |
388 | loff_t *ppos) | 388 | loff_t *ppos) |
389 | { | 389 | { |
390 | int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos); | 390 | int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
391 | 391 | ||
392 | if (ret || !write) | 392 | if (ret || !write) |
393 | return ret; | 393 | return ret; |
@@ -513,6 +513,7 @@ static void update_curr(struct cfs_rq *cfs_rq) | |||
513 | if (entity_is_task(curr)) { | 513 | if (entity_is_task(curr)) { |
514 | struct task_struct *curtask = task_of(curr); | 514 | struct task_struct *curtask = task_of(curr); |
515 | 515 | ||
516 | trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); | ||
516 | cpuacct_charge(curtask, delta_exec); | 517 | cpuacct_charge(curtask, delta_exec); |
517 | account_group_exec_runtime(curtask, delta_exec); | 518 | account_group_exec_runtime(curtask, delta_exec); |
518 | } | 519 | } |
@@ -709,31 +710,28 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
709 | if (initial && sched_feat(START_DEBIT)) | 710 | if (initial && sched_feat(START_DEBIT)) |
710 | vruntime += sched_vslice(cfs_rq, se); | 711 | vruntime += sched_vslice(cfs_rq, se); |
711 | 712 | ||
712 | if (!initial) { | 713 | /* sleeps up to a single latency don't count. */ |
713 | /* sleeps upto a single latency don't count. */ | 714 | if (!initial && sched_feat(FAIR_SLEEPERS)) { |
714 | if (sched_feat(FAIR_SLEEPERS)) { | 715 | unsigned long thresh = sysctl_sched_latency; |
715 | unsigned long thresh = sysctl_sched_latency; | ||
716 | 716 | ||
717 | /* | 717 | /* |
718 | * Convert the sleeper threshold into virtual time. | 718 | * Convert the sleeper threshold into virtual time. |
719 | * SCHED_IDLE is a special sub-class. We care about | 719 | * SCHED_IDLE is a special sub-class. We care about |
720 | * fairness only relative to other SCHED_IDLE tasks, | 720 | * fairness only relative to other SCHED_IDLE tasks, |
721 | * all of which have the same weight. | 721 | * all of which have the same weight. |
722 | */ | 722 | */ |
723 | if (sched_feat(NORMALIZED_SLEEPER) && | 723 | if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) || |
724 | (!entity_is_task(se) || | 724 | task_of(se)->policy != SCHED_IDLE)) |
725 | task_of(se)->policy != SCHED_IDLE)) | 725 | thresh = calc_delta_fair(thresh, se); |
726 | thresh = calc_delta_fair(thresh, se); | ||
727 | 726 | ||
728 | /* | 727 | /* |
729 | * Halve their sleep time's effect, to allow | 728 | * Halve their sleep time's effect, to allow |
730 | * for a gentler effect of sleepers: | 729 | * for a gentler effect of sleepers: |
731 | */ | 730 | */ |
732 | if (sched_feat(GENTLE_FAIR_SLEEPERS)) | 731 | if (sched_feat(GENTLE_FAIR_SLEEPERS)) |
733 | thresh >>= 1; | 732 | thresh >>= 1; |
734 | 733 | ||
735 | vruntime -= thresh; | 734 | vruntime -= thresh; |
736 | } | ||
737 | } | 735 | } |
738 | 736 | ||
739 | /* ensure we never gain time by being placed backwards. */ | 737 | /* ensure we never gain time by being placed backwards. */ |
@@ -1342,7 +1340,8 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag | |||
1342 | int sync = wake_flags & WF_SYNC; | 1340 | int sync = wake_flags & WF_SYNC; |
1343 | 1341 | ||
1344 | if (sd_flag & SD_BALANCE_WAKE) { | 1342 | if (sd_flag & SD_BALANCE_WAKE) { |
1345 | if (sched_feat(AFFINE_WAKEUPS)) | 1343 | if (sched_feat(AFFINE_WAKEUPS) && |
1344 | cpumask_test_cpu(cpu, &p->cpus_allowed)) | ||
1346 | want_affine = 1; | 1345 | want_affine = 1; |
1347 | new_cpu = prev_cpu; | 1346 | new_cpu = prev_cpu; |
1348 | } | 1347 | } |
@@ -1940,6 +1939,25 @@ static void moved_group_fair(struct task_struct *p) | |||
1940 | } | 1939 | } |
1941 | #endif | 1940 | #endif |
1942 | 1941 | ||
1942 | unsigned int get_rr_interval_fair(struct task_struct *task) | ||
1943 | { | ||
1944 | struct sched_entity *se = &task->se; | ||
1945 | unsigned long flags; | ||
1946 | struct rq *rq; | ||
1947 | unsigned int rr_interval = 0; | ||
1948 | |||
1949 | /* | ||
1950 | * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise | ||
1951 | * idle runqueue: | ||
1952 | */ | ||
1953 | rq = task_rq_lock(task, &flags); | ||
1954 | if (rq->cfs.load.weight) | ||
1955 | rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); | ||
1956 | task_rq_unlock(rq, &flags); | ||
1957 | |||
1958 | return rr_interval; | ||
1959 | } | ||
1960 | |||
1943 | /* | 1961 | /* |
1944 | * All the scheduling class methods: | 1962 | * All the scheduling class methods: |
1945 | */ | 1963 | */ |
@@ -1968,6 +1986,8 @@ static const struct sched_class fair_sched_class = { | |||
1968 | .prio_changed = prio_changed_fair, | 1986 | .prio_changed = prio_changed_fair, |
1969 | .switched_to = switched_to_fair, | 1987 | .switched_to = switched_to_fair, |
1970 | 1988 | ||
1989 | .get_rr_interval = get_rr_interval_fair, | ||
1990 | |||
1971 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1991 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1972 | .moved_group = moved_group_fair, | 1992 | .moved_group = moved_group_fair, |
1973 | #endif | 1993 | #endif |