aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c17
-rw-r--r--kernel/sched_fair.c21
-rw-r--r--kernel/sched_idletask.c7
-rw-r--r--kernel/sched_rt.c13
4 files changed, 42 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 1b900fb1c6e1..830967e18285 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6819,23 +6819,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
6819 if (retval) 6819 if (retval)
6820 goto out_unlock; 6820 goto out_unlock;
6821 6821
6822 /* 6822 time_slice = p->sched_class->get_rr_interval(p);
6823 * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
6824 * tasks that are on an otherwise idle runqueue:
6825 */
6826 time_slice = 0;
6827 if (p->policy == SCHED_RR) {
6828 time_slice = DEF_TIMESLICE;
6829 } else if (p->policy != SCHED_FIFO) {
6830 struct sched_entity *se = &p->se;
6831 unsigned long flags;
6832 struct rq *rq;
6833 6823
6834 rq = task_rq_lock(p, &flags);
6835 if (rq->cfs.load.weight)
6836 time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
6837 task_rq_unlock(rq, &flags);
6838 }
6839 read_unlock(&tasklist_lock); 6824 read_unlock(&tasklist_lock);
6840 jiffies_to_timespec(time_slice, &t); 6825 jiffies_to_timespec(time_slice, &t);
6841 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 6826 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 566e3bb78ed9..cd73738f0d5f 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1938,6 +1938,25 @@ static void moved_group_fair(struct task_struct *p)
1938} 1938}
1939#endif 1939#endif
1940 1940
1941unsigned int get_rr_interval_fair(struct task_struct *task)
1942{
1943 struct sched_entity *se = &task->se;
1944 unsigned long flags;
1945 struct rq *rq;
1946 unsigned int rr_interval = 0;
1947
1948 /*
1949 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
1950 * idle runqueue:
1951 */
1952 rq = task_rq_lock(task, &flags);
1953 if (rq->cfs.load.weight)
1954 rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
1955 task_rq_unlock(rq, &flags);
1956
1957 return rr_interval;
1958}
1959
1941/* 1960/*
1942 * All the scheduling class methods: 1961 * All the scheduling class methods:
1943 */ 1962 */
@@ -1966,6 +1985,8 @@ static const struct sched_class fair_sched_class = {
1966 .prio_changed = prio_changed_fair, 1985 .prio_changed = prio_changed_fair,
1967 .switched_to = switched_to_fair, 1986 .switched_to = switched_to_fair,
1968 1987
1988 .get_rr_interval = get_rr_interval_fair,
1989
1969#ifdef CONFIG_FAIR_GROUP_SCHED 1990#ifdef CONFIG_FAIR_GROUP_SCHED
1970 .moved_group = moved_group_fair, 1991 .moved_group = moved_group_fair,
1971#endif 1992#endif
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index a8b448af004b..b133a28fcde3 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -97,6 +97,11 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p,
97 check_preempt_curr(rq, p, 0); 97 check_preempt_curr(rq, p, 0);
98} 98}
99 99
100unsigned int get_rr_interval_idle(struct task_struct *task)
101{
102 return 0;
103}
104
100/* 105/*
101 * Simple, special scheduling class for the per-CPU idle tasks: 106 * Simple, special scheduling class for the per-CPU idle tasks:
102 */ 107 */
@@ -122,6 +127,8 @@ static const struct sched_class idle_sched_class = {
122 .set_curr_task = set_curr_task_idle, 127 .set_curr_task = set_curr_task_idle,
123 .task_tick = task_tick_idle, 128 .task_tick = task_tick_idle,
124 129
130 .get_rr_interval = get_rr_interval_idle,
131
125 .prio_changed = prio_changed_idle, 132 .prio_changed = prio_changed_idle,
126 .switched_to = switched_to_idle, 133 .switched_to = switched_to_idle,
127 134
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 13de7126a6ab..a4d790cddb19 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1734,6 +1734,17 @@ static void set_curr_task_rt(struct rq *rq)
1734 dequeue_pushable_task(rq, p); 1734 dequeue_pushable_task(rq, p);
1735} 1735}
1736 1736
1737unsigned int get_rr_interval_rt(struct task_struct *task)
1738{
1739 /*
1740 * Time slice is 0 for SCHED_FIFO tasks
1741 */
1742 if (task->policy == SCHED_RR)
1743 return DEF_TIMESLICE;
1744 else
1745 return 0;
1746}
1747
1737static const struct sched_class rt_sched_class = { 1748static const struct sched_class rt_sched_class = {
1738 .next = &fair_sched_class, 1749 .next = &fair_sched_class,
1739 .enqueue_task = enqueue_task_rt, 1750 .enqueue_task = enqueue_task_rt,
@@ -1762,6 +1773,8 @@ static const struct sched_class rt_sched_class = {
1762 .set_curr_task = set_curr_task_rt, 1773 .set_curr_task = set_curr_task_rt,
1763 .task_tick = task_tick_rt, 1774 .task_tick = task_tick_rt,
1764 1775
1776 .get_rr_interval = get_rr_interval_rt,
1777
1765 .prio_changed = prio_changed_rt, 1778 .prio_changed = prio_changed_rt,
1766 .switched_to = switched_to_rt, 1779 .switched_to = switched_to_rt,
1767}; 1780};