aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c48
1 files changed, 16 insertions, 32 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 002fcf8d3f64..dcdcad632fd9 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -7,7 +7,7 @@
7 * Update the current task's runtime statistics. Skip current tasks that 7 * Update the current task's runtime statistics. Skip current tasks that
8 * are not in our scheduling class. 8 * are not in our scheduling class.
9 */ 9 */
10static inline void update_curr_rt(struct rq *rq, u64 now) 10static inline void update_curr_rt(struct rq *rq)
11{ 11{
12 struct task_struct *curr = rq->curr; 12 struct task_struct *curr = rq->curr;
13 u64 delta_exec; 13 u64 delta_exec;
@@ -15,18 +15,17 @@ static inline void update_curr_rt(struct rq *rq, u64 now)
15 if (!task_has_rt_policy(curr)) 15 if (!task_has_rt_policy(curr))
16 return; 16 return;
17 17
18 delta_exec = now - curr->se.exec_start; 18 delta_exec = rq->clock - curr->se.exec_start;
19 if (unlikely((s64)delta_exec < 0)) 19 if (unlikely((s64)delta_exec < 0))
20 delta_exec = 0; 20 delta_exec = 0;
21 21
22 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); 22 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
23 23
24 curr->se.sum_exec_runtime += delta_exec; 24 curr->se.sum_exec_runtime += delta_exec;
25 curr->se.exec_start = now; 25 curr->se.exec_start = rq->clock;
26} 26}
27 27
28static void 28static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
29enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, u64 now)
30{ 29{
31 struct rt_prio_array *array = &rq->rt.active; 30 struct rt_prio_array *array = &rq->rt.active;
32 31
@@ -37,12 +36,11 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, u64 now)
37/* 36/*
38 * Adding/removing a task to/from a priority array: 37 * Adding/removing a task to/from a priority array:
39 */ 38 */
40static void 39static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
41dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep, u64 now)
42{ 40{
43 struct rt_prio_array *array = &rq->rt.active; 41 struct rt_prio_array *array = &rq->rt.active;
44 42
45 update_curr_rt(rq, now); 43 update_curr_rt(rq);
46 44
47 list_del(&p->run_list); 45 list_del(&p->run_list);
48 if (list_empty(array->queue + p->prio)) 46 if (list_empty(array->queue + p->prio))
@@ -75,7 +73,7 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
75 resched_task(rq->curr); 73 resched_task(rq->curr);
76} 74}
77 75
78static struct task_struct *pick_next_task_rt(struct rq *rq, u64 now) 76static struct task_struct *pick_next_task_rt(struct rq *rq)
79{ 77{
80 struct rt_prio_array *array = &rq->rt.active; 78 struct rt_prio_array *array = &rq->rt.active;
81 struct task_struct *next; 79 struct task_struct *next;
@@ -89,14 +87,14 @@ static struct task_struct *pick_next_task_rt(struct rq *rq, u64 now)
89 queue = array->queue + idx; 87 queue = array->queue + idx;
90 next = list_entry(queue->next, struct task_struct, run_list); 88 next = list_entry(queue->next, struct task_struct, run_list);
91 89
92 next->se.exec_start = now; 90 next->se.exec_start = rq->clock;
93 91
94 return next; 92 return next;
95} 93}
96 94
97static void put_prev_task_rt(struct rq *rq, struct task_struct *p, u64 now) 95static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
98{ 96{
99 update_curr_rt(rq, now); 97 update_curr_rt(rq);
100 p->se.exec_start = 0; 98 p->se.exec_start = 0;
101} 99}
102 100
@@ -172,28 +170,15 @@ static struct task_struct *load_balance_next_rt(void *arg)
172 return p; 170 return p;
173} 171}
174 172
175static int 173static unsigned long
176load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, 174load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
177 unsigned long max_nr_move, unsigned long max_load_move, 175 unsigned long max_nr_move, unsigned long max_load_move,
178 struct sched_domain *sd, enum cpu_idle_type idle, 176 struct sched_domain *sd, enum cpu_idle_type idle,
179 int *all_pinned, unsigned long *load_moved) 177 int *all_pinned, int *this_best_prio)
180{ 178{
181 int this_best_prio, best_prio, best_prio_seen = 0;
182 int nr_moved; 179 int nr_moved;
183 struct rq_iterator rt_rq_iterator; 180 struct rq_iterator rt_rq_iterator;
184 181 unsigned long load_moved;
185 best_prio = sched_find_first_bit(busiest->rt.active.bitmap);
186 this_best_prio = sched_find_first_bit(this_rq->rt.active.bitmap);
187
188 /*
189 * Enable handling of the case where there is more than one task
190 * with the best priority. If the current running task is one
191 * of those with prio==best_prio we know it won't be moved
192 * and therefore it's safe to override the skip (based on load)
193 * of any task we find with that prio.
194 */
195 if (busiest->curr->prio == best_prio)
196 best_prio_seen = 1;
197 182
198 rt_rq_iterator.start = load_balance_start_rt; 183 rt_rq_iterator.start = load_balance_start_rt;
199 rt_rq_iterator.next = load_balance_next_rt; 184 rt_rq_iterator.next = load_balance_next_rt;
@@ -203,11 +188,10 @@ load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
203 rt_rq_iterator.arg = busiest; 188 rt_rq_iterator.arg = busiest;
204 189
205 nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move, 190 nr_moved = balance_tasks(this_rq, this_cpu, busiest, max_nr_move,
206 max_load_move, sd, idle, all_pinned, load_moved, 191 max_load_move, sd, idle, all_pinned, &load_moved,
207 this_best_prio, best_prio, best_prio_seen, 192 this_best_prio, &rt_rq_iterator);
208 &rt_rq_iterator);
209 193
210 return nr_moved; 194 return load_moved;
211} 195}
212 196
213static void task_tick_rt(struct rq *rq, struct task_struct *p) 197static void task_tick_rt(struct rq *rq, struct task_struct *p)