diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 38 |
1 files changed, 27 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index ca1f76ba7773..a030d4514cdc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2839,14 +2839,14 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, | |||
2839 | * with the lock held can cause deadlocks; see schedule() for | 2839 | * with the lock held can cause deadlocks; see schedule() for |
2840 | * details.) | 2840 | * details.) |
2841 | */ | 2841 | */ |
2842 | static void finish_task_switch(struct rq *rq, struct task_struct *prev) | 2842 | static int finish_task_switch(struct rq *rq, struct task_struct *prev) |
2843 | __releases(rq->lock) | 2843 | __releases(rq->lock) |
2844 | { | 2844 | { |
2845 | struct mm_struct *mm = rq->prev_mm; | 2845 | struct mm_struct *mm = rq->prev_mm; |
2846 | long prev_state; | 2846 | long prev_state; |
2847 | #ifdef CONFIG_SMP | ||
2848 | int post_schedule = 0; | 2847 | int post_schedule = 0; |
2849 | 2848 | ||
2849 | #ifdef CONFIG_SMP | ||
2850 | if (current->sched_class->needs_post_schedule) | 2850 | if (current->sched_class->needs_post_schedule) |
2851 | post_schedule = current->sched_class->needs_post_schedule(rq); | 2851 | post_schedule = current->sched_class->needs_post_schedule(rq); |
2852 | #endif | 2852 | #endif |
@@ -2868,10 +2868,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
2868 | finish_arch_switch(prev); | 2868 | finish_arch_switch(prev); |
2869 | perf_counter_task_sched_in(current, cpu_of(rq)); | 2869 | perf_counter_task_sched_in(current, cpu_of(rq)); |
2870 | finish_lock_switch(rq, prev); | 2870 | finish_lock_switch(rq, prev); |
2871 | #ifdef CONFIG_SMP | ||
2872 | if (post_schedule) | ||
2873 | current->sched_class->post_schedule(rq); | ||
2874 | #endif | ||
2875 | 2871 | ||
2876 | fire_sched_in_preempt_notifiers(current); | 2872 | fire_sched_in_preempt_notifiers(current); |
2877 | if (mm) | 2873 | if (mm) |
@@ -2884,6 +2880,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
2884 | kprobe_flush_task(prev); | 2880 | kprobe_flush_task(prev); |
2885 | put_task_struct(prev); | 2881 | put_task_struct(prev); |
2886 | } | 2882 | } |
2883 | |||
2884 | return post_schedule; | ||
2887 | } | 2885 | } |
2888 | 2886 | ||
2889 | /** | 2887 | /** |
@@ -2894,8 +2892,15 @@ asmlinkage void schedule_tail(struct task_struct *prev) | |||
2894 | __releases(rq->lock) | 2892 | __releases(rq->lock) |
2895 | { | 2893 | { |
2896 | struct rq *rq = this_rq(); | 2894 | struct rq *rq = this_rq(); |
2895 | int post_schedule; | ||
2896 | |||
2897 | post_schedule = finish_task_switch(rq, prev); | ||
2898 | |||
2899 | #ifdef CONFIG_SMP | ||
2900 | if (post_schedule) | ||
2901 | current->sched_class->post_schedule(rq); | ||
2902 | #endif | ||
2897 | 2903 | ||
2898 | finish_task_switch(rq, prev); | ||
2899 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW | 2904 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW |
2900 | /* In this case, finish_task_switch does not reenable preemption */ | 2905 | /* In this case, finish_task_switch does not reenable preemption */ |
2901 | preempt_enable(); | 2906 | preempt_enable(); |
@@ -2908,7 +2913,7 @@ asmlinkage void schedule_tail(struct task_struct *prev) | |||
2908 | * context_switch - switch to the new MM and the new | 2913 | * context_switch - switch to the new MM and the new |
2909 | * thread's register state. | 2914 | * thread's register state. |
2910 | */ | 2915 | */ |
2911 | static inline void | 2916 | static inline int |
2912 | context_switch(struct rq *rq, struct task_struct *prev, | 2917 | context_switch(struct rq *rq, struct task_struct *prev, |
2913 | struct task_struct *next) | 2918 | struct task_struct *next) |
2914 | { | 2919 | { |
@@ -2955,7 +2960,7 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
2955 | * CPUs since it called schedule(), thus the 'rq' on its stack | 2960 | * CPUs since it called schedule(), thus the 'rq' on its stack |
2956 | * frame will be invalid. | 2961 | * frame will be invalid. |
2957 | */ | 2962 | */ |
2958 | finish_task_switch(this_rq(), prev); | 2963 | return finish_task_switch(this_rq(), prev); |
2959 | } | 2964 | } |
2960 | 2965 | ||
2961 | /* | 2966 | /* |
@@ -5366,6 +5371,7 @@ asmlinkage void __sched schedule(void) | |||
5366 | { | 5371 | { |
5367 | struct task_struct *prev, *next; | 5372 | struct task_struct *prev, *next; |
5368 | unsigned long *switch_count; | 5373 | unsigned long *switch_count; |
5374 | int post_schedule = 0; | ||
5369 | struct rq *rq; | 5375 | struct rq *rq; |
5370 | int cpu; | 5376 | int cpu; |
5371 | 5377 | ||
@@ -5416,15 +5422,25 @@ need_resched_nonpreemptible: | |||
5416 | rq->curr = next; | 5422 | rq->curr = next; |
5417 | ++*switch_count; | 5423 | ++*switch_count; |
5418 | 5424 | ||
5419 | context_switch(rq, prev, next); /* unlocks the rq */ | 5425 | post_schedule = context_switch(rq, prev, next); /* unlocks the rq */ |
5420 | /* | 5426 | /* |
5421 | * the context switch might have flipped the stack from under | 5427 | * the context switch might have flipped the stack from under |
5422 | * us, hence refresh the local variables. | 5428 | * us, hence refresh the local variables. |
5423 | */ | 5429 | */ |
5424 | cpu = smp_processor_id(); | 5430 | cpu = smp_processor_id(); |
5425 | rq = cpu_rq(cpu); | 5431 | rq = cpu_rq(cpu); |
5426 | } else | 5432 | } else { |
5433 | #ifdef CONFIG_SMP | ||
5434 | if (current->sched_class->needs_post_schedule) | ||
5435 | post_schedule = current->sched_class->needs_post_schedule(rq); | ||
5436 | #endif | ||
5427 | spin_unlock_irq(&rq->lock); | 5437 | spin_unlock_irq(&rq->lock); |
5438 | } | ||
5439 | |||
5440 | #ifdef CONFIG_SMP | ||
5441 | if (post_schedule) | ||
5442 | current->sched_class->post_schedule(rq); | ||
5443 | #endif | ||
5428 | 5444 | ||
5429 | if (unlikely(reacquire_kernel_lock(current) < 0)) | 5445 | if (unlikely(reacquire_kernel_lock(current) < 0)) |
5430 | goto need_resched_nonpreemptible; | 5446 | goto need_resched_nonpreemptible; |