diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-01-25 15:08:06 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:06 -0500 |
commit | 4fd29176b7cd24909f8ceba2105cb3ae2857b90c (patch) | |
tree | 2d83445de1d500cd3794a73e6d9d35b44444e259 /kernel | |
parent | e8fa136262e1121288bb93befe2295928ffd240d (diff) |
sched: add rt-overload tracking
This patch adds an RT overload accounting system. When a runqueue has
more than one RT task queued, it is marked as overloaded. That is that it
is a candidate to have RT tasks pulled from it.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_rt.c | 36 |
1 files changed, 36 insertions, 0 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 7815e90b1147..547f858b0752 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -3,6 +3,38 @@ | |||
3 | * policies) | 3 | * policies) |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #ifdef CONFIG_SMP | ||
7 | static cpumask_t rt_overload_mask; | ||
8 | static atomic_t rto_count; | ||
9 | static inline int rt_overloaded(void) | ||
10 | { | ||
11 | return atomic_read(&rto_count); | ||
12 | } | ||
13 | static inline cpumask_t *rt_overload(void) | ||
14 | { | ||
15 | return &rt_overload_mask; | ||
16 | } | ||
17 | static inline void rt_set_overload(struct rq *rq) | ||
18 | { | ||
19 | cpu_set(rq->cpu, rt_overload_mask); | ||
20 | /* | ||
21 | * Make sure the mask is visible before we set | ||
22 | * the overload count. That is checked to determine | ||
23 | * if we should look at the mask. It would be a shame | ||
24 | * if we looked at the mask, but the mask was not | ||
25 | * updated yet. | ||
26 | */ | ||
27 | wmb(); | ||
28 | atomic_inc(&rto_count); | ||
29 | } | ||
30 | static inline void rt_clear_overload(struct rq *rq) | ||
31 | { | ||
32 | /* the order here really doesn't matter */ | ||
33 | atomic_dec(&rto_count); | ||
34 | cpu_clear(rq->cpu, rt_overload_mask); | ||
35 | } | ||
36 | #endif /* CONFIG_SMP */ | ||
37 | |||
6 | /* | 38 | /* |
7 | * Update the current task's runtime statistics. Skip current tasks that | 39 | * Update the current task's runtime statistics. Skip current tasks that |
8 | * are not in our scheduling class. | 40 | * are not in our scheduling class. |
@@ -33,6 +65,8 @@ static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq) | |||
33 | #ifdef CONFIG_SMP | 65 | #ifdef CONFIG_SMP |
34 | if (p->prio < rq->rt.highest_prio) | 66 | if (p->prio < rq->rt.highest_prio) |
35 | rq->rt.highest_prio = p->prio; | 67 | rq->rt.highest_prio = p->prio; |
68 | if (rq->rt.rt_nr_running > 1) | ||
69 | rt_set_overload(rq); | ||
36 | #endif /* CONFIG_SMP */ | 70 | #endif /* CONFIG_SMP */ |
37 | } | 71 | } |
38 | 72 | ||
@@ -54,6 +88,8 @@ static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq) | |||
54 | } /* otherwise leave rq->highest prio alone */ | 88 | } /* otherwise leave rq->highest prio alone */ |
55 | } else | 89 | } else |
56 | rq->rt.highest_prio = MAX_RT_PRIO; | 90 | rq->rt.highest_prio = MAX_RT_PRIO; |
91 | if (rq->rt.rt_nr_running < 2) | ||
92 | rt_clear_overload(rq); | ||
57 | #endif /* CONFIG_SMP */ | 93 | #endif /* CONFIG_SMP */ |
58 | } | 94 | } |
59 | 95 | ||