aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDmitry Adamushko <dmitry.adamushko@gmail.com>2007-10-15 11:00:13 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:13 -0400
commita4ec24b48ddef1e93f7578be53270f0b95ad666c (patch)
treecad14ba03ec8647761396e23e693328c5db40991 /kernel
parenta9957449b08ab561a33e1e038df06843b8d8dd9f (diff)
sched: tidy up SCHED_RR
- make timeslices of SCHED_RR tasks constant and not dependent on task's static_prio [1] ; - remove obsolete code (timeslice related bits); - make sched_rr_get_interval() return something more meaningful [2] for SCHED_OTHER tasks. [1] according to the following link, it's not compliant with SUSv3 (not sure though, what is the reference for us :-) http://lkml.org/lkml/2007/3/7/656 [2] the interval is dynamic and can be depicted as follows "should a task be one of the runnable tasks at this particular moment, it would expect to run for this interval of time before being re-scheduled by the scheduler tick". (i.e. it's more precise if a task is runnable at the moment) yeah, this seems to require task_rq_lock/unlock() but this is not a hot path. results: (SCHED_FIFO) dimm@earth:~/storage/prog$ sudo chrt -f 10 ./rr_interval time_slice: 0 : 0 (SCHED_RR) dimm@earth:~/storage/prog$ sudo chrt 10 ./rr_interval time_slice: 0 : 99984800 (SCHED_NORMAL) dimm@earth:~/storage/prog$ ./rr_interval time_slice: 0 : 19996960 (SCHED_NORMAL + a cpu_hog of similar 'weight' on the same CPU --- so should be a half of the previous result) dimm@earth:~/storage/prog$ taskset 1 ./rr_interval time_slice: 0 : 9998480 Signed-off-by: Dmitry Adamushko <dmitry.adamushko@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c41
-rw-r--r--kernel/sched_rt.c2
2 files changed, 18 insertions, 25 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ce9bb7aa7c12..f370f108ed04 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -96,7 +96,7 @@ unsigned long long __attribute__((weak)) sched_clock(void)
96/* 96/*
97 * Some helpers for converting nanosecond timing to jiffy resolution 97 * Some helpers for converting nanosecond timing to jiffy resolution
98 */ 98 */
99#define NS_TO_JIFFIES(TIME) ((TIME) / (1000000000 / HZ)) 99#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (1000000000 / HZ))
100#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ)) 100#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
101 101
102#define NICE_0_LOAD SCHED_LOAD_SCALE 102#define NICE_0_LOAD SCHED_LOAD_SCALE
@@ -105,11 +105,9 @@ unsigned long long __attribute__((weak)) sched_clock(void)
105/* 105/*
106 * These are the 'tuning knobs' of the scheduler: 106 * These are the 'tuning knobs' of the scheduler:
107 * 107 *
108 * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger), 108 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
109 * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
110 * Timeslices get refilled after they expire. 109 * Timeslices get refilled after they expire.
111 */ 110 */
112#define MIN_TIMESLICE max(5 * HZ / 1000, 1)
113#define DEF_TIMESLICE (100 * HZ / 1000) 111#define DEF_TIMESLICE (100 * HZ / 1000)
114 112
115#ifdef CONFIG_SMP 113#ifdef CONFIG_SMP
@@ -133,24 +131,6 @@ static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
133} 131}
134#endif 132#endif
135 133
136#define SCALE_PRIO(x, prio) \
137 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
138
139/*
140 * static_prio_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
141 * to time slice values: [800ms ... 100ms ... 5ms]
142 */
143static unsigned int static_prio_timeslice(int static_prio)
144{
145 if (static_prio == NICE_TO_PRIO(19))
146 return 1;
147
148 if (static_prio < NICE_TO_PRIO(0))
149 return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
150 else
151 return SCALE_PRIO(DEF_TIMESLICE, static_prio);
152}
153
154static inline int rt_policy(int policy) 134static inline int rt_policy(int policy)
155{ 135{
156 if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR)) 136 if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR))
@@ -4746,6 +4726,7 @@ asmlinkage
4746long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) 4726long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
4747{ 4727{
4748 struct task_struct *p; 4728 struct task_struct *p;
4729 unsigned int time_slice;
4749 int retval = -EINVAL; 4730 int retval = -EINVAL;
4750 struct timespec t; 4731 struct timespec t;
4751 4732
@@ -4762,9 +4743,21 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
4762 if (retval) 4743 if (retval)
4763 goto out_unlock; 4744 goto out_unlock;
4764 4745
4765 jiffies_to_timespec(p->policy == SCHED_FIFO ? 4746 if (p->policy == SCHED_FIFO)
4766 0 : static_prio_timeslice(p->static_prio), &t); 4747 time_slice = 0;
4748 else if (p->policy == SCHED_RR)
4749 time_slice = DEF_TIMESLICE;
4750 else {
4751 struct sched_entity *se = &p->se;
4752 unsigned long flags;
4753 struct rq *rq;
4754
4755 rq = task_rq_lock(p, &flags);
4756 time_slice = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
4757 task_rq_unlock(rq, &flags);
4758 }
4767 read_unlock(&tasklist_lock); 4759 read_unlock(&tasklist_lock);
4760 jiffies_to_timespec(time_slice, &t);
4768 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 4761 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
4769out_nounlock: 4762out_nounlock:
4770 return retval; 4763 return retval;
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 2f26c3d73506..d0097a0634e5 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -206,7 +206,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
206 if (--p->time_slice) 206 if (--p->time_slice)
207 return; 207 return;
208 208
209 p->time_slice = static_prio_timeslice(p->static_prio); 209 p->time_slice = DEF_TIMESLICE;
210 210
211 /* 211 /*
212 * Requeue to the end of queue if we are not the only element 212 * Requeue to the end of queue if we are not the only element