aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c92
1 files changed, 76 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8e2558c2ba67..7299083e69e7 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4404,10 +4404,7 @@ void scheduler_tick(void)
4404#endif 4404#endif
4405} 4405}
4406 4406
4407#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ 4407unsigned long get_parent_ip(unsigned long addr)
4408 defined(CONFIG_PREEMPT_TRACER))
4409
4410static inline unsigned long get_parent_ip(unsigned long addr)
4411{ 4408{
4412 if (in_lock_functions(addr)) { 4409 if (in_lock_functions(addr)) {
4413 addr = CALLER_ADDR2; 4410 addr = CALLER_ADDR2;
@@ -4417,6 +4414,9 @@ static inline unsigned long get_parent_ip(unsigned long addr)
4417 return addr; 4414 return addr;
4418} 4415}
4419 4416
4417#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
4418 defined(CONFIG_PREEMPT_TRACER))
4419
4420void __kprobes add_preempt_count(int val) 4420void __kprobes add_preempt_count(int val)
4421{ 4421{
4422#ifdef CONFIG_DEBUG_PREEMPT 4422#ifdef CONFIG_DEBUG_PREEMPT
@@ -4543,15 +4543,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
4543/* 4543/*
4544 * schedule() is the main scheduler function. 4544 * schedule() is the main scheduler function.
4545 */ 4545 */
4546asmlinkage void __sched schedule(void) 4546asmlinkage void __sched __schedule(void)
4547{ 4547{
4548 struct task_struct *prev, *next; 4548 struct task_struct *prev, *next;
4549 unsigned long *switch_count; 4549 unsigned long *switch_count;
4550 struct rq *rq; 4550 struct rq *rq;
4551 int cpu; 4551 int cpu;
4552 4552
4553need_resched:
4554 preempt_disable();
4555 cpu = smp_processor_id(); 4553 cpu = smp_processor_id();
4556 rq = cpu_rq(cpu); 4554 rq = cpu_rq(cpu);
4557 rcu_qsctr_inc(cpu); 4555 rcu_qsctr_inc(cpu);
@@ -4608,13 +4606,80 @@ need_resched_nonpreemptible:
4608 4606
4609 if (unlikely(reacquire_kernel_lock(current) < 0)) 4607 if (unlikely(reacquire_kernel_lock(current) < 0))
4610 goto need_resched_nonpreemptible; 4608 goto need_resched_nonpreemptible;
4609}
4611 4610
4611asmlinkage void __sched schedule(void)
4612{
4613need_resched:
4614 preempt_disable();
4615 __schedule();
4612 preempt_enable_no_resched(); 4616 preempt_enable_no_resched();
4613 if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) 4617 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
4614 goto need_resched; 4618 goto need_resched;
4615} 4619}
4616EXPORT_SYMBOL(schedule); 4620EXPORT_SYMBOL(schedule);
4617 4621
4622#ifdef CONFIG_SMP
4623/*
4624 * Look out! "owner" is an entirely speculative pointer
4625 * access and not reliable.
4626 */
4627int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
4628{
4629 unsigned int cpu;
4630 struct rq *rq;
4631
4632 if (!sched_feat(OWNER_SPIN))
4633 return 0;
4634
4635#ifdef CONFIG_DEBUG_PAGEALLOC
4636 /*
4637 * Need to access the cpu field knowing that
4638 * DEBUG_PAGEALLOC could have unmapped it if
4639 * the mutex owner just released it and exited.
4640 */
4641 if (probe_kernel_address(&owner->cpu, cpu))
4642 goto out;
4643#else
4644 cpu = owner->cpu;
4645#endif
4646
4647 /*
4648 * Even if the access succeeded (likely case),
4649 * the cpu field may no longer be valid.
4650 */
4651 if (cpu >= nr_cpumask_bits)
4652 goto out;
4653
4654 /*
4655 * We need to validate that we can do a
4656 * get_cpu() and that we have the percpu area.
4657 */
4658 if (!cpu_online(cpu))
4659 goto out;
4660
4661 rq = cpu_rq(cpu);
4662
4663 for (;;) {
4664 /*
4665 * Owner changed, break to re-assess state.
4666 */
4667 if (lock->owner != owner)
4668 break;
4669
4670 /*
4671 * Is that owner really running on that cpu?
4672 */
4673 if (task_thread_info(rq->curr) != owner || need_resched())
4674 return 0;
4675
4676 cpu_relax();
4677 }
4678out:
4679 return 1;
4680}
4681#endif
4682
4618#ifdef CONFIG_PREEMPT 4683#ifdef CONFIG_PREEMPT
4619/* 4684/*
4620 * this is the entry point to schedule() from in-kernel preemption 4685 * this is the entry point to schedule() from in-kernel preemption
@@ -5944,12 +6009,7 @@ void sched_show_task(struct task_struct *p)
5944 printk(KERN_CONT " %016lx ", thread_saved_pc(p)); 6009 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
5945#endif 6010#endif
5946#ifdef CONFIG_DEBUG_STACK_USAGE 6011#ifdef CONFIG_DEBUG_STACK_USAGE
5947 { 6012 free = stack_not_used(p);
5948 unsigned long *n = end_of_stack(p);
5949 while (!*n)
5950 n++;
5951 free = (unsigned long)n - (unsigned long)end_of_stack(p);
5952 }
5953#endif 6013#endif
5954 printk(KERN_CONT "%5lu %5d %6d\n", free, 6014 printk(KERN_CONT "%5lu %5d %6d\n", free,
5955 task_pid_nr(p), task_pid_nr(p->real_parent)); 6015 task_pid_nr(p), task_pid_nr(p->real_parent));
@@ -9490,7 +9550,7 @@ cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
9490 9550
9491static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) 9551static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
9492{ 9552{
9493 u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); 9553 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
9494 u64 data; 9554 u64 data;
9495 9555
9496#ifndef CONFIG_64BIT 9556#ifndef CONFIG_64BIT
@@ -9509,7 +9569,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
9509 9569
9510static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) 9570static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
9511{ 9571{
9512 u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); 9572 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
9513 9573
9514#ifndef CONFIG_64BIT 9574#ifndef CONFIG_64BIT
9515 /* 9575 /*
@@ -9605,7 +9665,7 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9605 ca = task_ca(tsk); 9665 ca = task_ca(tsk);
9606 9666
9607 for (; ca; ca = ca->parent) { 9667 for (; ca; ca = ca->parent) {
9608 u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); 9668 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
9609 *cpuusage += cputime; 9669 *cpuusage += cputime;
9610 } 9670 }
9611} 9671}