diff options
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 96 |
1 files changed, 88 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 5757e03cfac0..2325db2be31b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -1110,7 +1110,7 @@ static void hrtick_start(struct rq *rq, u64 delay) | |||
| 1110 | if (rq == this_rq()) { | 1110 | if (rq == this_rq()) { |
| 1111 | hrtimer_restart(timer); | 1111 | hrtimer_restart(timer); |
| 1112 | } else if (!rq->hrtick_csd_pending) { | 1112 | } else if (!rq->hrtick_csd_pending) { |
| 1113 | __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd); | 1113 | __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0); |
| 1114 | rq->hrtick_csd_pending = 1; | 1114 | rq->hrtick_csd_pending = 1; |
| 1115 | } | 1115 | } |
| 1116 | } | 1116 | } |
| @@ -4942,15 +4942,13 @@ pick_next_task(struct rq *rq) | |||
| 4942 | /* | 4942 | /* |
| 4943 | * schedule() is the main scheduler function. | 4943 | * schedule() is the main scheduler function. |
| 4944 | */ | 4944 | */ |
| 4945 | asmlinkage void __sched schedule(void) | 4945 | asmlinkage void __sched __schedule(void) |
| 4946 | { | 4946 | { |
| 4947 | struct task_struct *prev, *next; | 4947 | struct task_struct *prev, *next; |
| 4948 | unsigned long *switch_count; | 4948 | unsigned long *switch_count; |
| 4949 | struct rq *rq; | 4949 | struct rq *rq; |
| 4950 | int cpu; | 4950 | int cpu; |
| 4951 | 4951 | ||
| 4952 | need_resched: | ||
| 4953 | preempt_disable(); | ||
| 4954 | cpu = smp_processor_id(); | 4952 | cpu = smp_processor_id(); |
| 4955 | rq = cpu_rq(cpu); | 4953 | rq = cpu_rq(cpu); |
| 4956 | rcu_qsctr_inc(cpu); | 4954 | rcu_qsctr_inc(cpu); |
| @@ -5007,13 +5005,80 @@ need_resched_nonpreemptible: | |||
| 5007 | 5005 | ||
| 5008 | if (unlikely(reacquire_kernel_lock(current) < 0)) | 5006 | if (unlikely(reacquire_kernel_lock(current) < 0)) |
| 5009 | goto need_resched_nonpreemptible; | 5007 | goto need_resched_nonpreemptible; |
| 5008 | } | ||
| 5010 | 5009 | ||
| 5010 | asmlinkage void __sched schedule(void) | ||
| 5011 | { | ||
| 5012 | need_resched: | ||
| 5013 | preempt_disable(); | ||
| 5014 | __schedule(); | ||
| 5011 | preempt_enable_no_resched(); | 5015 | preempt_enable_no_resched(); |
| 5012 | if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) | 5016 | if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) |
| 5013 | goto need_resched; | 5017 | goto need_resched; |
| 5014 | } | 5018 | } |
| 5015 | EXPORT_SYMBOL(schedule); | 5019 | EXPORT_SYMBOL(schedule); |
| 5016 | 5020 | ||
| 5021 | #ifdef CONFIG_SMP | ||
| 5022 | /* | ||
| 5023 | * Look out! "owner" is an entirely speculative pointer | ||
| 5024 | * access and not reliable. | ||
| 5025 | */ | ||
| 5026 | int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | ||
| 5027 | { | ||
| 5028 | unsigned int cpu; | ||
| 5029 | struct rq *rq; | ||
| 5030 | |||
| 5031 | if (!sched_feat(OWNER_SPIN)) | ||
| 5032 | return 0; | ||
| 5033 | |||
| 5034 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
| 5035 | /* | ||
| 5036 | * Need to access the cpu field knowing that | ||
| 5037 | * DEBUG_PAGEALLOC could have unmapped it if | ||
| 5038 | * the mutex owner just released it and exited. | ||
| 5039 | */ | ||
| 5040 | if (probe_kernel_address(&owner->cpu, cpu)) | ||
| 5041 | goto out; | ||
| 5042 | #else | ||
| 5043 | cpu = owner->cpu; | ||
| 5044 | #endif | ||
| 5045 | |||
| 5046 | /* | ||
| 5047 | * Even if the access succeeded (likely case), | ||
| 5048 | * the cpu field may no longer be valid. | ||
| 5049 | */ | ||
| 5050 | if (cpu >= nr_cpumask_bits) | ||
| 5051 | goto out; | ||
| 5052 | |||
| 5053 | /* | ||
| 5054 | * We need to validate that we can do a | ||
| 5055 | * get_cpu() and that we have the percpu area. | ||
| 5056 | */ | ||
| 5057 | if (!cpu_online(cpu)) | ||
| 5058 | goto out; | ||
| 5059 | |||
| 5060 | rq = cpu_rq(cpu); | ||
| 5061 | |||
| 5062 | for (;;) { | ||
| 5063 | /* | ||
| 5064 | * Owner changed, break to re-assess state. | ||
| 5065 | */ | ||
| 5066 | if (lock->owner != owner) | ||
| 5067 | break; | ||
| 5068 | |||
| 5069 | /* | ||
| 5070 | * Is that owner really running on that cpu? | ||
| 5071 | */ | ||
| 5072 | if (task_thread_info(rq->curr) != owner || need_resched()) | ||
| 5073 | return 0; | ||
| 5074 | |||
| 5075 | cpu_relax(); | ||
| 5076 | } | ||
| 5077 | out: | ||
| 5078 | return 1; | ||
| 5079 | } | ||
| 5080 | #endif | ||
| 5081 | |||
| 5017 | #ifdef CONFIG_PREEMPT | 5082 | #ifdef CONFIG_PREEMPT |
| 5018 | /* | 5083 | /* |
| 5019 | * this is the entry point to schedule() from in-kernel preemption | 5084 | * this is the entry point to schedule() from in-kernel preemption |
| @@ -5131,11 +5196,17 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) | |||
| 5131 | __wake_up_common(q, mode, 1, 0, NULL); | 5196 | __wake_up_common(q, mode, 1, 0, NULL); |
| 5132 | } | 5197 | } |
| 5133 | 5198 | ||
| 5199 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) | ||
| 5200 | { | ||
| 5201 | __wake_up_common(q, mode, 1, 0, key); | ||
| 5202 | } | ||
| 5203 | |||
| 5134 | /** | 5204 | /** |
| 5135 | * __wake_up_sync - wake up threads blocked on a waitqueue. | 5205 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. |
| 5136 | * @q: the waitqueue | 5206 | * @q: the waitqueue |
| 5137 | * @mode: which threads | 5207 | * @mode: which threads |
| 5138 | * @nr_exclusive: how many wake-one or wake-many threads to wake up | 5208 | * @nr_exclusive: how many wake-one or wake-many threads to wake up |
| 5209 | * @key: opaque value to be passed to wakeup targets | ||
| 5139 | * | 5210 | * |
| 5140 | * The sync wakeup differs that the waker knows that it will schedule | 5211 | * The sync wakeup differs that the waker knows that it will schedule |
| 5141 | * away soon, so while the target thread will be woken up, it will not | 5212 | * away soon, so while the target thread will be woken up, it will not |
| @@ -5144,8 +5215,8 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) | |||
| 5144 | * | 5215 | * |
| 5145 | * On UP it can prevent extra preemption. | 5216 | * On UP it can prevent extra preemption. |
| 5146 | */ | 5217 | */ |
| 5147 | void | 5218 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, |
| 5148 | __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | 5219 | int nr_exclusive, void *key) |
| 5149 | { | 5220 | { |
| 5150 | unsigned long flags; | 5221 | unsigned long flags; |
| 5151 | int sync = 1; | 5222 | int sync = 1; |
| @@ -5157,9 +5228,18 @@ __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | |||
| 5157 | sync = 0; | 5228 | sync = 0; |
| 5158 | 5229 | ||
| 5159 | spin_lock_irqsave(&q->lock, flags); | 5230 | spin_lock_irqsave(&q->lock, flags); |
| 5160 | __wake_up_common(q, mode, nr_exclusive, sync, NULL); | 5231 | __wake_up_common(q, mode, nr_exclusive, sync, key); |
| 5161 | spin_unlock_irqrestore(&q->lock, flags); | 5232 | spin_unlock_irqrestore(&q->lock, flags); |
| 5162 | } | 5233 | } |
| 5234 | EXPORT_SYMBOL_GPL(__wake_up_sync_key); | ||
| 5235 | |||
| 5236 | /* | ||
| 5237 | * __wake_up_sync - see __wake_up_sync_key() | ||
| 5238 | */ | ||
| 5239 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | ||
| 5240 | { | ||
| 5241 | __wake_up_sync_key(q, mode, nr_exclusive, NULL); | ||
| 5242 | } | ||
| 5163 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ | 5243 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ |
| 5164 | 5244 | ||
| 5165 | /** | 5245 | /** |
