aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/clock.c26
-rw-r--r--kernel/sched/core.c68
-rw-r--r--kernel/sched/cputime.c2
-rw-r--r--kernel/sched/features.h7
-rw-r--r--kernel/sched/stats.c7
5 files changed, 38 insertions, 72 deletions
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index c685e31492df..c3ae1446461c 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -176,10 +176,36 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)
176 u64 this_clock, remote_clock; 176 u64 this_clock, remote_clock;
177 u64 *ptr, old_val, val; 177 u64 *ptr, old_val, val;
178 178
179#if BITS_PER_LONG != 64
180again:
181 /*
182 * Careful here: The local and the remote clock values need to
183 * be read out atomic as we need to compare the values and
184 * then update either the local or the remote side. So the
185 * cmpxchg64 below only protects one readout.
186 *
187 * We must reread via sched_clock_local() in the retry case on
188 * 32bit as an NMI could use sched_clock_local() via the
189 * tracer and hit between the readout of
190 * the low32bit and the high 32bit portion.
191 */
192 this_clock = sched_clock_local(my_scd);
193 /*
194 * We must enforce atomic readout on 32bit, otherwise the
195 * update on the remote cpu can hit inbetween the readout of
196 * the low32bit and the high 32bit portion.
197 */
198 remote_clock = cmpxchg64(&scd->clock, 0, 0);
199#else
200 /*
201 * On 64bit the read of [my]scd->clock is atomic versus the
202 * update, so we can avoid the above 32bit dance.
203 */
179 sched_clock_local(my_scd); 204 sched_clock_local(my_scd);
180again: 205again:
181 this_clock = my_scd->clock; 206 this_clock = my_scd->clock;
182 remote_clock = scd->clock; 207 remote_clock = scd->clock;
208#endif
183 209
184 /* 210 /*
185 * Use the opportunity that we have both locks 211 * Use the opportunity that we have both locks
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index cb49b2ab0e16..5662f58f0b69 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -512,11 +512,6 @@ static inline void init_hrtick(void)
512 * the target CPU. 512 * the target CPU.
513 */ 513 */
514#ifdef CONFIG_SMP 514#ifdef CONFIG_SMP
515
516#ifndef tsk_is_polling
517#define tsk_is_polling(t) 0
518#endif
519
520void resched_task(struct task_struct *p) 515void resched_task(struct task_struct *p)
521{ 516{
522 int cpu; 517 int cpu;
@@ -1498,8 +1493,10 @@ static void try_to_wake_up_local(struct task_struct *p)
1498{ 1493{
1499 struct rq *rq = task_rq(p); 1494 struct rq *rq = task_rq(p);
1500 1495
1501 BUG_ON(rq != this_rq()); 1496 if (WARN_ON_ONCE(rq != this_rq()) ||
1502 BUG_ON(p == current); 1497 WARN_ON_ONCE(p == current))
1498 return;
1499
1503 lockdep_assert_held(&rq->lock); 1500 lockdep_assert_held(&rq->lock);
1504 1501
1505 if (!raw_spin_trylock(&p->pi_lock)) { 1502 if (!raw_spin_trylock(&p->pi_lock)) {
@@ -2997,51 +2994,6 @@ void __sched schedule_preempt_disabled(void)
2997 preempt_disable(); 2994 preempt_disable();
2998} 2995}
2999 2996
3000#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
3001
3002static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
3003{
3004 if (lock->owner != owner)
3005 return false;
3006
3007 /*
3008 * Ensure we emit the owner->on_cpu, dereference _after_ checking
3009 * lock->owner still matches owner, if that fails, owner might
3010 * point to free()d memory, if it still matches, the rcu_read_lock()
3011 * ensures the memory stays valid.
3012 */
3013 barrier();
3014
3015 return owner->on_cpu;
3016}
3017
3018/*
3019 * Look out! "owner" is an entirely speculative pointer
3020 * access and not reliable.
3021 */
3022int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
3023{
3024 if (!sched_feat(OWNER_SPIN))
3025 return 0;
3026
3027 rcu_read_lock();
3028 while (owner_running(lock, owner)) {
3029 if (need_resched())
3030 break;
3031
3032 arch_mutex_cpu_relax();
3033 }
3034 rcu_read_unlock();
3035
3036 /*
3037 * We break out the loop above on need_resched() and when the
3038 * owner changed, which is a sign for heavy contention. Return
3039 * success only when lock->owner is NULL.
3040 */
3041 return lock->owner == NULL;
3042}
3043#endif
3044
3045#ifdef CONFIG_PREEMPT 2997#ifdef CONFIG_PREEMPT
3046/* 2998/*
3047 * this is the entry point to schedule() from in-kernel preemption 2999 * this is the entry point to schedule() from in-kernel preemption
@@ -4130,6 +4082,10 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
4130 get_task_struct(p); 4082 get_task_struct(p);
4131 rcu_read_unlock(); 4083 rcu_read_unlock();
4132 4084
4085 if (p->flags & PF_NO_SETAFFINITY) {
4086 retval = -EINVAL;
4087 goto out_put_task;
4088 }
4133 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { 4089 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4134 retval = -ENOMEM; 4090 retval = -ENOMEM;
4135 goto out_put_task; 4091 goto out_put_task;
@@ -4630,6 +4586,7 @@ void sched_show_task(struct task_struct *p)
4630 task_pid_nr(p), ppid, 4586 task_pid_nr(p), ppid,
4631 (unsigned long)task_thread_info(p)->flags); 4587 (unsigned long)task_thread_info(p)->flags);
4632 4588
4589 print_worker_info(KERN_INFO, p);
4633 show_stack(p, NULL); 4590 show_stack(p, NULL);
4634} 4591}
4635 4592
@@ -4777,11 +4734,6 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
4777 goto out; 4734 goto out;
4778 } 4735 }
4779 4736
4780 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
4781 ret = -EINVAL;
4782 goto out;
4783 }
4784
4785 do_set_cpus_allowed(p, new_mask); 4737 do_set_cpus_allowed(p, new_mask);
4786 4738
4787 /* Can the task run on the task's current CPU? If so, we're done */ 4739 /* Can the task run on the task's current CPU? If so, we're done */
@@ -5003,7 +4955,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
5003} 4955}
5004 4956
5005static int min_load_idx = 0; 4957static int min_load_idx = 0;
5006static int max_load_idx = CPU_LOAD_IDX_MAX; 4958static int max_load_idx = CPU_LOAD_IDX_MAX-1;
5007 4959
5008static void 4960static void
5009set_table_entry(struct ctl_table *entry, 4961set_table_entry(struct ctl_table *entry,
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 337a36745800..cc2dc3eea8a3 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -294,7 +294,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
294 294
295 t = tsk; 295 t = tsk;
296 do { 296 do {
297 task_cputime(tsk, &utime, &stime); 297 task_cputime(t, &utime, &stime);
298 times->utime += utime; 298 times->utime += utime;
299 times->stime += stime; 299 times->stime += stime;
300 times->sum_exec_runtime += task_sched_runtime(t); 300 times->sum_exec_runtime += task_sched_runtime(t);
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 1ad1d2b5395f..99399f8e4799 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -46,13 +46,6 @@ SCHED_FEAT(DOUBLE_TICK, false)
46SCHED_FEAT(LB_BIAS, true) 46SCHED_FEAT(LB_BIAS, true)
47 47
48/* 48/*
49 * Spin-wait on mutex acquisition when the mutex owner is running on
50 * another cpu -- assumes that when the owner is running, it will soon
51 * release the lock. Decreases scheduling overhead.
52 */
53SCHED_FEAT(OWNER_SPIN, true)
54
55/*
56 * Decrement CPU power based on time not spent running tasks 49 * Decrement CPU power based on time not spent running tasks
57 */ 50 */
58SCHED_FEAT(NONTASK_POWER, true) 51SCHED_FEAT(NONTASK_POWER, true)
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
index e036eda1a9c9..da98af347e8b 100644
--- a/kernel/sched/stats.c
+++ b/kernel/sched/stats.c
@@ -130,16 +130,11 @@ static int schedstat_open(struct inode *inode, struct file *file)
130 return seq_open(file, &schedstat_sops); 130 return seq_open(file, &schedstat_sops);
131} 131}
132 132
133static int schedstat_release(struct inode *inode, struct file *file)
134{
135 return 0;
136};
137
138static const struct file_operations proc_schedstat_operations = { 133static const struct file_operations proc_schedstat_operations = {
139 .open = schedstat_open, 134 .open = schedstat_open,
140 .read = seq_read, 135 .read = seq_read,
141 .llseek = seq_lseek, 136 .llseek = seq_lseek,
142 .release = schedstat_release, 137 .release = seq_release,
143}; 138};
144 139
145static int __init proc_schedstat_init(void) 140static int __init proc_schedstat_init(void)