aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-05-22 04:28:56 -0400
committerIngo Molnar <mingo@kernel.org>2014-05-22 04:28:56 -0400
commit65c2ce70046c779974af8b5dfc25a0df489089b5 (patch)
treeb16f152eb62b71cf5a1edc51da865b357c989922 /kernel/sched
parent842514849a616e9b61acad65771c7afe01e651f9 (diff)
parent4b660a7f5c8099d88d1a43d8ae138965112592c7 (diff)
Merge tag 'v3.15-rc6' into sched/core, to pick up the latest fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4b82622b6252..092e511605ec 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2223,7 +2223,7 @@ static inline void post_schedule(struct rq *rq)
2223 * schedule_tail - first thing a freshly forked thread must call. 2223 * schedule_tail - first thing a freshly forked thread must call.
2224 * @prev: the thread we just switched away from. 2224 * @prev: the thread we just switched away from.
2225 */ 2225 */
2226asmlinkage void schedule_tail(struct task_struct *prev) 2226asmlinkage __visible void schedule_tail(struct task_struct *prev)
2227 __releases(rq->lock) 2227 __releases(rq->lock)
2228{ 2228{
2229 struct rq *rq = this_rq(); 2229 struct rq *rq = this_rq();
@@ -2778,7 +2778,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
2778 blk_schedule_flush_plug(tsk); 2778 blk_schedule_flush_plug(tsk);
2779} 2779}
2780 2780
2781asmlinkage void __sched schedule(void) 2781asmlinkage __visible void __sched schedule(void)
2782{ 2782{
2783 struct task_struct *tsk = current; 2783 struct task_struct *tsk = current;
2784 2784
@@ -2788,7 +2788,7 @@ asmlinkage void __sched schedule(void)
2788EXPORT_SYMBOL(schedule); 2788EXPORT_SYMBOL(schedule);
2789 2789
2790#ifdef CONFIG_CONTEXT_TRACKING 2790#ifdef CONFIG_CONTEXT_TRACKING
2791asmlinkage void __sched schedule_user(void) 2791asmlinkage __visible void __sched schedule_user(void)
2792{ 2792{
2793 /* 2793 /*
2794 * If we come here after a random call to set_need_resched(), 2794 * If we come here after a random call to set_need_resched(),
@@ -2820,7 +2820,7 @@ void __sched schedule_preempt_disabled(void)
2820 * off of preempt_enable. Kernel preemptions off return from interrupt 2820 * off of preempt_enable. Kernel preemptions off return from interrupt
2821 * occur there and call schedule directly. 2821 * occur there and call schedule directly.
2822 */ 2822 */
2823asmlinkage void __sched notrace preempt_schedule(void) 2823asmlinkage __visible void __sched notrace preempt_schedule(void)
2824{ 2824{
2825 /* 2825 /*
2826 * If there is a non-zero preempt_count or interrupts are disabled, 2826 * If there is a non-zero preempt_count or interrupts are disabled,
@@ -2850,7 +2850,7 @@ EXPORT_SYMBOL(preempt_schedule);
2850 * Note, that this is called and return with irqs disabled. This will 2850 * Note, that this is called and return with irqs disabled. This will
2851 * protect us against recursive calling from irq. 2851 * protect us against recursive calling from irq.
2852 */ 2852 */
2853asmlinkage void __sched preempt_schedule_irq(void) 2853asmlinkage __visible void __sched preempt_schedule_irq(void)
2854{ 2854{
2855 enum ctx_state prev_state; 2855 enum ctx_state prev_state;
2856 2856