aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-07-09 12:51:58 -0400
committerIngo Molnar <mingo@elte.hu>2007-07-09 12:51:58 -0400
commitf64f61145a38f7039e4f1c0b50dcc3fbe70ec28e (patch)
tree5864c65e858dc320ab16d6025132bd26d4f54e7e /kernel/sched.c
parentc65cc8705256ad7524c97564b4fe3ca9782bf6d1 (diff)
sched: remove sched_exit()
remove sched_exit(): the elaborate dance of us trying to recover timeslices given to child tasks never really worked. CFS does not need it either. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c31
1 files changed, 0 insertions, 31 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index d6624978feb2..7090982350d3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1831,37 +1831,6 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1831 task_rq_unlock(this_rq, &flags); 1831 task_rq_unlock(this_rq, &flags);
1832} 1832}
1833 1833
1834/*
1835 * Potentially available exiting-child timeslices are
1836 * retrieved here - this way the parent does not get
1837 * penalized for creating too many threads.
1838 *
1839 * (this cannot be used to 'generate' timeslices
1840 * artificially, because any timeslice recovered here
1841 * was given away by the parent in the first place.)
1842 */
1843void fastcall sched_exit(struct task_struct *p)
1844{
1845 unsigned long flags;
1846 struct rq *rq;
1847
1848 /*
1849 * If the child was a (relative-) CPU hog then decrease
1850 * the sleep_avg of the parent as well.
1851 */
1852 rq = task_rq_lock(p->parent, &flags);
1853 if (p->first_time_slice && task_cpu(p) == task_cpu(p->parent)) {
1854 p->parent->time_slice += p->time_slice;
1855 if (unlikely(p->parent->time_slice > task_timeslice(p)))
1856 p->parent->time_slice = task_timeslice(p);
1857 }
1858 if (p->sleep_avg < p->parent->sleep_avg)
1859 p->parent->sleep_avg = p->parent->sleep_avg /
1860 (EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
1861 (EXIT_WEIGHT + 1);
1862 task_rq_unlock(rq, &flags);
1863}
1864
1865/** 1834/**
1866 * prepare_task_switch - prepare to switch tasks 1835 * prepare_task_switch - prepare to switch tasks
1867 * @rq: the runqueue preparing to switch 1836 * @rq: the runqueue preparing to switch