diff options
| author | Sripathi Kodi <sripathik@in.ibm.com> | 2008-11-05 08:27:14 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-11-06 16:12:09 -0500 |
| commit | cf7f8690e864c6fe11e77202dd847fa60f483418 (patch) | |
| tree | 9f0e3cca10a550698c3761c3ee5de6496ecf1e78 | |
| parent | e113a745f693af196c8081b328bf42def086989b (diff) | |
sched, lockdep: inline double_unlock_balance()
We have a test case which measures the variation in the amount of time
needed to perform a fixed amount of work on the preempt_rt kernel. We
started seeing deterioration in it's performance recently. The test
should never take more than 10 microseconds, but we started 5-10%
failure rate.
Using elimination method, we traced the problem to commit
1b12bbc747560ea68bcc132c3d05699e52271da0 (lockdep: re-annotate
scheduler runqueues).
When LOCKDEP is disabled, this patch only adds an additional function
call to double_unlock_balance(). Hence I inlined double_unlock_balance()
and the problem went away. Here is a patch to make this change.
Signed-off-by: Sripathi Kodi <sripathik@in.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | kernel/sched.c | 2 | ||||
| -rw-r--r-- | kernel/sched_rt.c | 3 |
2 files changed, 3 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index e8819bc6f462..ad10d0aae1d7 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -2825,7 +2825,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
| 2825 | return ret; | 2825 | return ret; |
| 2826 | } | 2826 | } |
| 2827 | 2827 | ||
| 2828 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | 2828 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) |
| 2829 | __releases(busiest->lock) | 2829 | __releases(busiest->lock) |
| 2830 | { | 2830 | { |
| 2831 | spin_unlock(&busiest->lock); | 2831 | spin_unlock(&busiest->lock); |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index c7963d5d0625..2bdd44423599 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -910,7 +910,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) | |||
| 910 | #define RT_MAX_TRIES 3 | 910 | #define RT_MAX_TRIES 3 |
| 911 | 911 | ||
| 912 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest); | 912 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest); |
| 913 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest); | 913 | static inline void double_unlock_balance(struct rq *this_rq, |
| 914 | struct rq *busiest); | ||
| 914 | 915 | ||
| 915 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); | 916 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); |
| 916 | 917 | ||
