aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAlexey Dobriyan <adobriyan@gmail.com>2008-11-28 14:08:00 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-28 14:11:15 -0500
commit70574a996fc7a70c5586eb56bd92a544eccf18b6 (patch)
tree6c0fbb9a9e9b65817316d1765217478bb6c88cbf /kernel
parentf1860c34b3ed829ac774647f266abf1074cd58cd (diff)
sched: move double_unlock_balance() higher
Move double_lock_balance()/double_unlock_balance() higher to fix the following with gcc-3.4.6: CC kernel/sched.o In file included from kernel/sched.c:1605: kernel/sched_rt.c: In function `find_lock_lowest_rq': kernel/sched_rt.c:914: sorry, unimplemented: inlining failed in call to 'double_unlock_balance': function body not available kernel/sched_rt.c:1077: sorry, unimplemented: called from here make[2]: *** [kernel/sched.o] Error 1 Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c67
-rw-r--r--kernel/sched_rt.c4
2 files changed, 33 insertions, 38 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 3d1ee429219b..6a99703e0eb0 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1581,6 +1581,39 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1581 1581
1582#endif 1582#endif
1583 1583
1584/*
1585 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1586 */
1587static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1588 __releases(this_rq->lock)
1589 __acquires(busiest->lock)
1590 __acquires(this_rq->lock)
1591{
1592 int ret = 0;
1593
1594 if (unlikely(!irqs_disabled())) {
1595 /* printk() doesn't work good under rq->lock */
1596 spin_unlock(&this_rq->lock);
1597 BUG_ON(1);
1598 }
1599 if (unlikely(!spin_trylock(&busiest->lock))) {
1600 if (busiest < this_rq) {
1601 spin_unlock(&this_rq->lock);
1602 spin_lock(&busiest->lock);
1603 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
1604 ret = 1;
1605 } else
1606 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
1607 }
1608 return ret;
1609}
1610
1611static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1612 __releases(busiest->lock)
1613{
1614 spin_unlock(&busiest->lock);
1615 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1616}
1584#endif 1617#endif
1585 1618
1586#ifdef CONFIG_FAIR_GROUP_SCHED 1619#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -2781,40 +2814,6 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2781} 2814}
2782 2815
2783/* 2816/*
2784 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
2785 */
2786static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2787 __releases(this_rq->lock)
2788 __acquires(busiest->lock)
2789 __acquires(this_rq->lock)
2790{
2791 int ret = 0;
2792
2793 if (unlikely(!irqs_disabled())) {
2794 /* printk() doesn't work good under rq->lock */
2795 spin_unlock(&this_rq->lock);
2796 BUG_ON(1);
2797 }
2798 if (unlikely(!spin_trylock(&busiest->lock))) {
2799 if (busiest < this_rq) {
2800 spin_unlock(&this_rq->lock);
2801 spin_lock(&busiest->lock);
2802 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
2803 ret = 1;
2804 } else
2805 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
2806 }
2807 return ret;
2808}
2809
2810static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
2811 __releases(busiest->lock)
2812{
2813 spin_unlock(&busiest->lock);
2814 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
2815}
2816
2817/*
2818 * If dest_cpu is allowed for this process, migrate the task to it. 2817 * If dest_cpu is allowed for this process, migrate the task to it.
2819 * This is accomplished by forcing the cpu_allowed mask to only 2818 * This is accomplished by forcing the cpu_allowed mask to only
2820 * allow dest_cpu, which will force the cpu onto dest_cpu. Then 2819 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 2bdd44423599..587a16e2a8f5 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -909,10 +909,6 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
909/* Only try algorithms three times */ 909/* Only try algorithms three times */
910#define RT_MAX_TRIES 3 910#define RT_MAX_TRIES 3
911 911
912static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
913static inline void double_unlock_balance(struct rq *this_rq,
914 struct rq *busiest);
915
916static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); 912static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
917 913
918static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 914static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)