diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-03-07 13:08:17 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-03-07 13:08:17 -0500 |
commit | 1d6789c3bc2b70bed1eb71aa616b1d94f9c23a63 (patch) | |
tree | eb5bcbae964e99242d54c060085a87fc27c2ddf8 | |
parent | 0a9e0703497013cf7a21455e51face5f048a187f (diff) | |
parent | 521f1a2489c41f8b1181b0a8eb52e1c34284d50b (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel:
sched: don't allow rt_runtime_us to be zero for groups having rt tasks
sched: rt-group: fixup schedulability constraints calculation
sched: fix the wrong time slice value for SCHED_FIFO tasks
sched: export task_nice
sched: balance RT task resched only on runqueue
sched: retain vruntime
-rw-r--r-- | include/linux/sched.h | 4 | ||||
-rw-r--r-- | kernel/sched.c | 36 | ||||
-rw-r--r-- | kernel/sched_fair.c | 14 | ||||
-rw-r--r-- | kernel/sched_rt.c | 6 |
4 files changed, 49 insertions, 11 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 9ae4030067a9..11d8e9a74eff 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -899,6 +899,10 @@ struct sched_class { | |||
899 | int running); | 899 | int running); |
900 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, | 900 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, |
901 | int oldprio, int running); | 901 | int oldprio, int running); |
902 | |||
903 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
904 | void (*moved_group) (struct task_struct *p); | ||
905 | #endif | ||
902 | }; | 906 | }; |
903 | 907 | ||
904 | struct load_weight { | 908 | struct load_weight { |
diff --git a/kernel/sched.c b/kernel/sched.c index dcd553cc4ee8..52b98675acb2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4422,7 +4422,7 @@ int task_nice(const struct task_struct *p) | |||
4422 | { | 4422 | { |
4423 | return TASK_NICE(p); | 4423 | return TASK_NICE(p); |
4424 | } | 4424 | } |
4425 | EXPORT_SYMBOL_GPL(task_nice); | 4425 | EXPORT_SYMBOL(task_nice); |
4426 | 4426 | ||
4427 | /** | 4427 | /** |
4428 | * idle_cpu - is a given cpu idle currently? | 4428 | * idle_cpu - is a given cpu idle currently? |
@@ -5100,7 +5100,7 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) | |||
5100 | time_slice = 0; | 5100 | time_slice = 0; |
5101 | if (p->policy == SCHED_RR) { | 5101 | if (p->policy == SCHED_RR) { |
5102 | time_slice = DEF_TIMESLICE; | 5102 | time_slice = DEF_TIMESLICE; |
5103 | } else { | 5103 | } else if (p->policy != SCHED_FIFO) { |
5104 | struct sched_entity *se = &p->se; | 5104 | struct sched_entity *se = &p->se; |
5105 | unsigned long flags; | 5105 | unsigned long flags; |
5106 | struct rq *rq; | 5106 | struct rq *rq; |
@@ -7625,6 +7625,11 @@ void sched_move_task(struct task_struct *tsk) | |||
7625 | 7625 | ||
7626 | set_task_rq(tsk, task_cpu(tsk)); | 7626 | set_task_rq(tsk, task_cpu(tsk)); |
7627 | 7627 | ||
7628 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
7629 | if (tsk->sched_class->moved_group) | ||
7630 | tsk->sched_class->moved_group(tsk); | ||
7631 | #endif | ||
7632 | |||
7628 | if (on_rq) { | 7633 | if (on_rq) { |
7629 | if (unlikely(running)) | 7634 | if (unlikely(running)) |
7630 | tsk->sched_class->set_curr_task(rq); | 7635 | tsk->sched_class->set_curr_task(rq); |
@@ -7721,9 +7726,7 @@ static unsigned long to_ratio(u64 period, u64 runtime) | |||
7721 | if (runtime == RUNTIME_INF) | 7726 | if (runtime == RUNTIME_INF) |
7722 | return 1ULL << 16; | 7727 | return 1ULL << 16; |
7723 | 7728 | ||
7724 | runtime *= (1ULL << 16); | 7729 | return div64_64(runtime << 16, period); |
7725 | div64_64(runtime, period); | ||
7726 | return runtime; | ||
7727 | } | 7730 | } |
7728 | 7731 | ||
7729 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) | 7732 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) |
@@ -7747,25 +7750,40 @@ static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) | |||
7747 | return total + to_ratio(period, runtime) < global_ratio; | 7750 | return total + to_ratio(period, runtime) < global_ratio; |
7748 | } | 7751 | } |
7749 | 7752 | ||
7753 | /* Must be called with tasklist_lock held */ | ||
7754 | static inline int tg_has_rt_tasks(struct task_group *tg) | ||
7755 | { | ||
7756 | struct task_struct *g, *p; | ||
7757 | do_each_thread(g, p) { | ||
7758 | if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) | ||
7759 | return 1; | ||
7760 | } while_each_thread(g, p); | ||
7761 | return 0; | ||
7762 | } | ||
7763 | |||
7750 | int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) | 7764 | int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) |
7751 | { | 7765 | { |
7752 | u64 rt_runtime, rt_period; | 7766 | u64 rt_runtime, rt_period; |
7753 | int err = 0; | 7767 | int err = 0; |
7754 | 7768 | ||
7755 | rt_period = sysctl_sched_rt_period * NSEC_PER_USEC; | 7769 | rt_period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC; |
7756 | rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; | 7770 | rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC; |
7757 | if (rt_runtime_us == -1) | 7771 | if (rt_runtime_us == -1) |
7758 | rt_runtime = rt_period; | 7772 | rt_runtime = RUNTIME_INF; |
7759 | 7773 | ||
7760 | mutex_lock(&rt_constraints_mutex); | 7774 | mutex_lock(&rt_constraints_mutex); |
7775 | read_lock(&tasklist_lock); | ||
7776 | if (rt_runtime_us == 0 && tg_has_rt_tasks(tg)) { | ||
7777 | err = -EBUSY; | ||
7778 | goto unlock; | ||
7779 | } | ||
7761 | if (!__rt_schedulable(tg, rt_period, rt_runtime)) { | 7780 | if (!__rt_schedulable(tg, rt_period, rt_runtime)) { |
7762 | err = -EINVAL; | 7781 | err = -EINVAL; |
7763 | goto unlock; | 7782 | goto unlock; |
7764 | } | 7783 | } |
7765 | if (rt_runtime_us == -1) | ||
7766 | rt_runtime = RUNTIME_INF; | ||
7767 | tg->rt_runtime = rt_runtime; | 7784 | tg->rt_runtime = rt_runtime; |
7768 | unlock: | 7785 | unlock: |
7786 | read_unlock(&tasklist_lock); | ||
7769 | mutex_unlock(&rt_constraints_mutex); | 7787 | mutex_unlock(&rt_constraints_mutex); |
7770 | 7788 | ||
7771 | return err; | 7789 | return err; |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 3df4d46994ca..e2a530515619 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1353,6 +1353,16 @@ static void set_curr_task_fair(struct rq *rq) | |||
1353 | set_next_entity(cfs_rq_of(se), se); | 1353 | set_next_entity(cfs_rq_of(se), se); |
1354 | } | 1354 | } |
1355 | 1355 | ||
1356 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
1357 | static void moved_group_fair(struct task_struct *p) | ||
1358 | { | ||
1359 | struct cfs_rq *cfs_rq = task_cfs_rq(p); | ||
1360 | |||
1361 | update_curr(cfs_rq); | ||
1362 | place_entity(cfs_rq, &p->se, 1); | ||
1363 | } | ||
1364 | #endif | ||
1365 | |||
1356 | /* | 1366 | /* |
1357 | * All the scheduling class methods: | 1367 | * All the scheduling class methods: |
1358 | */ | 1368 | */ |
@@ -1381,6 +1391,10 @@ static const struct sched_class fair_sched_class = { | |||
1381 | 1391 | ||
1382 | .prio_changed = prio_changed_fair, | 1392 | .prio_changed = prio_changed_fair, |
1383 | .switched_to = switched_to_fair, | 1393 | .switched_to = switched_to_fair, |
1394 | |||
1395 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
1396 | .moved_group = moved_group_fair, | ||
1397 | #endif | ||
1384 | }; | 1398 | }; |
1385 | 1399 | ||
1386 | #ifdef CONFIG_SCHED_DEBUG | 1400 | #ifdef CONFIG_SCHED_DEBUG |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 76e828517541..0a6d2e516420 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -1107,9 +1107,11 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p, | |||
1107 | pull_rt_task(rq); | 1107 | pull_rt_task(rq); |
1108 | /* | 1108 | /* |
1109 | * If there's a higher priority task waiting to run | 1109 | * If there's a higher priority task waiting to run |
1110 | * then reschedule. | 1110 | * then reschedule. Note, the above pull_rt_task |
1111 | * can release the rq lock and p could migrate. | ||
1112 | * Only reschedule if p is still on the same runqueue. | ||
1111 | */ | 1113 | */ |
1112 | if (p->prio > rq->rt.highest_prio) | 1114 | if (p->prio > rq->rt.highest_prio && rq->curr == p) |
1113 | resched_task(p); | 1115 | resched_task(p); |
1114 | #else | 1116 | #else |
1115 | /* For UP simply resched on drop of prio */ | 1117 | /* For UP simply resched on drop of prio */ |