diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-21 12:06:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-21 12:06:17 -0400 |
commit | 8e4bc3dd2ca07d77882eba73cea240aba95a1854 (patch) | |
tree | da105cd2c4f51c78c401c00d8b399572914694ed /kernel/sched_fair.c | |
parent | bd4c3a3441144cd46d1f544046523724c5bc6e94 (diff) | |
parent | 0d721ceadbeaa24d7f9dd41b3e5e29912327a7e1 (diff) |
Merge branch 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
sched: Simplify sys_sched_rr_get_interval() system call
sched: Fix potential NULL derference of doms_cur
sched: Fix raciness in runqueue_is_locked()
sched: Re-add lost cpu_allowed check to sched_fair.c::select_task_rq_fair()
sched: Remove unneeded indentation in sched_fair.c::place_entity()
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 65 |
1 files changed, 42 insertions, 23 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 990b188803ce..ecc637a0d591 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -710,31 +710,28 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
710 | if (initial && sched_feat(START_DEBIT)) | 710 | if (initial && sched_feat(START_DEBIT)) |
711 | vruntime += sched_vslice(cfs_rq, se); | 711 | vruntime += sched_vslice(cfs_rq, se); |
712 | 712 | ||
713 | if (!initial) { | 713 | /* sleeps up to a single latency don't count. */ |
714 | /* sleeps upto a single latency don't count. */ | 714 | if (!initial && sched_feat(FAIR_SLEEPERS)) { |
715 | if (sched_feat(FAIR_SLEEPERS)) { | 715 | unsigned long thresh = sysctl_sched_latency; |
716 | unsigned long thresh = sysctl_sched_latency; | ||
717 | 716 | ||
718 | /* | 717 | /* |
719 | * Convert the sleeper threshold into virtual time. | 718 | * Convert the sleeper threshold into virtual time. |
720 | * SCHED_IDLE is a special sub-class. We care about | 719 | * SCHED_IDLE is a special sub-class. We care about |
721 | * fairness only relative to other SCHED_IDLE tasks, | 720 | * fairness only relative to other SCHED_IDLE tasks, |
722 | * all of which have the same weight. | 721 | * all of which have the same weight. |
723 | */ | 722 | */ |
724 | if (sched_feat(NORMALIZED_SLEEPER) && | 723 | if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) || |
725 | (!entity_is_task(se) || | 724 | task_of(se)->policy != SCHED_IDLE)) |
726 | task_of(se)->policy != SCHED_IDLE)) | 725 | thresh = calc_delta_fair(thresh, se); |
727 | thresh = calc_delta_fair(thresh, se); | ||
728 | 726 | ||
729 | /* | 727 | /* |
730 | * Halve their sleep time's effect, to allow | 728 | * Halve their sleep time's effect, to allow |
731 | * for a gentler effect of sleepers: | 729 | * for a gentler effect of sleepers: |
732 | */ | 730 | */ |
733 | if (sched_feat(GENTLE_FAIR_SLEEPERS)) | 731 | if (sched_feat(GENTLE_FAIR_SLEEPERS)) |
734 | thresh >>= 1; | 732 | thresh >>= 1; |
735 | 733 | ||
736 | vruntime -= thresh; | 734 | vruntime -= thresh; |
737 | } | ||
738 | } | 735 | } |
739 | 736 | ||
740 | /* ensure we never gain time by being placed backwards. */ | 737 | /* ensure we never gain time by being placed backwards. */ |
@@ -1343,7 +1340,8 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag | |||
1343 | int sync = wake_flags & WF_SYNC; | 1340 | int sync = wake_flags & WF_SYNC; |
1344 | 1341 | ||
1345 | if (sd_flag & SD_BALANCE_WAKE) { | 1342 | if (sd_flag & SD_BALANCE_WAKE) { |
1346 | if (sched_feat(AFFINE_WAKEUPS)) | 1343 | if (sched_feat(AFFINE_WAKEUPS) && |
1344 | cpumask_test_cpu(cpu, &p->cpus_allowed)) | ||
1347 | want_affine = 1; | 1345 | want_affine = 1; |
1348 | new_cpu = prev_cpu; | 1346 | new_cpu = prev_cpu; |
1349 | } | 1347 | } |
@@ -1941,6 +1939,25 @@ static void moved_group_fair(struct task_struct *p) | |||
1941 | } | 1939 | } |
1942 | #endif | 1940 | #endif |
1943 | 1941 | ||
1942 | unsigned int get_rr_interval_fair(struct task_struct *task) | ||
1943 | { | ||
1944 | struct sched_entity *se = &task->se; | ||
1945 | unsigned long flags; | ||
1946 | struct rq *rq; | ||
1947 | unsigned int rr_interval = 0; | ||
1948 | |||
1949 | /* | ||
1950 | * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise | ||
1951 | * idle runqueue: | ||
1952 | */ | ||
1953 | rq = task_rq_lock(task, &flags); | ||
1954 | if (rq->cfs.load.weight) | ||
1955 | rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); | ||
1956 | task_rq_unlock(rq, &flags); | ||
1957 | |||
1958 | return rr_interval; | ||
1959 | } | ||
1960 | |||
1944 | /* | 1961 | /* |
1945 | * All the scheduling class methods: | 1962 | * All the scheduling class methods: |
1946 | */ | 1963 | */ |
@@ -1969,6 +1986,8 @@ static const struct sched_class fair_sched_class = { | |||
1969 | .prio_changed = prio_changed_fair, | 1986 | .prio_changed = prio_changed_fair, |
1970 | .switched_to = switched_to_fair, | 1987 | .switched_to = switched_to_fair, |
1971 | 1988 | ||
1989 | .get_rr_interval = get_rr_interval_fair, | ||
1990 | |||
1972 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1991 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1973 | .moved_group = moved_group_fair, | 1992 | .moved_group = moved_group_fair, |
1974 | #endif | 1993 | #endif |