diff options
-rw-r--r-- | kernel/sched.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index e27a53685ed9..17e4391ec2de 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -119,8 +119,6 @@ | |||
119 | */ | 119 | */ |
120 | #define RUNTIME_INF ((u64)~0ULL) | 120 | #define RUNTIME_INF ((u64)~0ULL) |
121 | 121 | ||
122 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); | ||
123 | |||
124 | static inline int rt_policy(int policy) | 122 | static inline int rt_policy(int policy) |
125 | { | 123 | { |
126 | if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR)) | 124 | if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR)) |
@@ -1695,6 +1693,8 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd) | |||
1695 | 1693 | ||
1696 | #ifdef CONFIG_PREEMPT | 1694 | #ifdef CONFIG_PREEMPT |
1697 | 1695 | ||
1696 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); | ||
1697 | |||
1698 | /* | 1698 | /* |
1699 | * fair double_lock_balance: Safely acquires both rq->locks in a fair | 1699 | * fair double_lock_balance: Safely acquires both rq->locks in a fair |
1700 | * way at the expense of forcing extra atomic operations in all | 1700 | * way at the expense of forcing extra atomic operations in all |