diff options
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 268a45ea238c..d9d8ece46a15 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2192,7 +2192,7 @@ static inline void post_schedule(struct rq *rq) | |||
2192 | * schedule_tail - first thing a freshly forked thread must call. | 2192 | * schedule_tail - first thing a freshly forked thread must call. |
2193 | * @prev: the thread we just switched away from. | 2193 | * @prev: the thread we just switched away from. |
2194 | */ | 2194 | */ |
2195 | asmlinkage void schedule_tail(struct task_struct *prev) | 2195 | asmlinkage __visible void schedule_tail(struct task_struct *prev) |
2196 | __releases(rq->lock) | 2196 | __releases(rq->lock) |
2197 | { | 2197 | { |
2198 | struct rq *rq = this_rq(); | 2198 | struct rq *rq = this_rq(); |
@@ -2741,7 +2741,7 @@ static inline void sched_submit_work(struct task_struct *tsk) | |||
2741 | blk_schedule_flush_plug(tsk); | 2741 | blk_schedule_flush_plug(tsk); |
2742 | } | 2742 | } |
2743 | 2743 | ||
2744 | asmlinkage void __sched schedule(void) | 2744 | asmlinkage __visible void __sched schedule(void) |
2745 | { | 2745 | { |
2746 | struct task_struct *tsk = current; | 2746 | struct task_struct *tsk = current; |
2747 | 2747 | ||
@@ -2751,7 +2751,7 @@ asmlinkage void __sched schedule(void) | |||
2751 | EXPORT_SYMBOL(schedule); | 2751 | EXPORT_SYMBOL(schedule); |
2752 | 2752 | ||
2753 | #ifdef CONFIG_CONTEXT_TRACKING | 2753 | #ifdef CONFIG_CONTEXT_TRACKING |
2754 | asmlinkage void __sched schedule_user(void) | 2754 | asmlinkage __visible void __sched schedule_user(void) |
2755 | { | 2755 | { |
2756 | /* | 2756 | /* |
2757 | * If we come here after a random call to set_need_resched(), | 2757 | * If we come here after a random call to set_need_resched(), |
@@ -2783,7 +2783,7 @@ void __sched schedule_preempt_disabled(void) | |||
2783 | * off of preempt_enable. Kernel preemptions off return from interrupt | 2783 | * off of preempt_enable. Kernel preemptions off return from interrupt |
2784 | * occur there and call schedule directly. | 2784 | * occur there and call schedule directly. |
2785 | */ | 2785 | */ |
2786 | asmlinkage void __sched notrace preempt_schedule(void) | 2786 | asmlinkage __visible void __sched notrace preempt_schedule(void) |
2787 | { | 2787 | { |
2788 | /* | 2788 | /* |
2789 | * If there is a non-zero preempt_count or interrupts are disabled, | 2789 | * If there is a non-zero preempt_count or interrupts are disabled, |
@@ -2813,7 +2813,7 @@ EXPORT_SYMBOL(preempt_schedule); | |||
2813 | * Note, that this is called and return with irqs disabled. This will | 2813 | * Note, that this is called and return with irqs disabled. This will |
2814 | * protect us against recursive calling from irq. | 2814 | * protect us against recursive calling from irq. |
2815 | */ | 2815 | */ |
2816 | asmlinkage void __sched preempt_schedule_irq(void) | 2816 | asmlinkage __visible void __sched preempt_schedule_irq(void) |
2817 | { | 2817 | { |
2818 | enum ctx_state prev_state; | 2818 | enum ctx_state prev_state; |
2819 | 2819 | ||