aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c57
1 files changed, 54 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 94ead43eda62..42899dce837d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -70,6 +70,7 @@
70#include <linux/bootmem.h> 70#include <linux/bootmem.h>
71#include <linux/debugfs.h> 71#include <linux/debugfs.h>
72#include <linux/ctype.h> 72#include <linux/ctype.h>
73#include <linux/ftrace.h>
73 74
74#include <asm/tlb.h> 75#include <asm/tlb.h>
75#include <asm/irq_regs.h> 76#include <asm/irq_regs.h>
@@ -607,6 +608,24 @@ static inline void update_rq_clock(struct rq *rq)
607# define const_debug static const 608# define const_debug static const
608#endif 609#endif
609 610
611/**
612 * runqueue_is_locked
613 *
614 * Returns true if the current cpu runqueue is locked.
615 * This interface allows printk to be called with the runqueue lock
616 * held and know whether or not it is OK to wake up the klogd.
617 */
618int runqueue_is_locked(void)
619{
620 int cpu = get_cpu();
621 struct rq *rq = cpu_rq(cpu);
622 int ret;
623
624 ret = spin_is_locked(&rq->lock);
625 put_cpu();
626 return ret;
627}
628
610/* 629/*
611 * Debugging: various feature bits 630 * Debugging: various feature bits
612 */ 631 */
@@ -831,7 +850,7 @@ static unsigned long long __cpu_clock(int cpu)
831 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu 850 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
832 * clock constructed from sched_clock(): 851 * clock constructed from sched_clock():
833 */ 852 */
834unsigned long long cpu_clock(int cpu) 853unsigned long long notrace cpu_clock(int cpu)
835{ 854{
836 unsigned long long prev_cpu_time, time, delta_time; 855 unsigned long long prev_cpu_time, time, delta_time;
837 unsigned long flags; 856 unsigned long flags;
@@ -2149,6 +2168,9 @@ out_activate:
2149 success = 1; 2168 success = 1;
2150 2169
2151out_running: 2170out_running:
2171 trace_mark(kernel_sched_wakeup,
2172 "pid %d state %ld ## rq %p task %p rq->curr %p",
2173 p->pid, p->state, rq, p, rq->curr);
2152 check_preempt_curr(rq, p); 2174 check_preempt_curr(rq, p);
2153 2175
2154 p->state = TASK_RUNNING; 2176 p->state = TASK_RUNNING;
@@ -2279,6 +2301,9 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2279 p->sched_class->task_new(rq, p); 2301 p->sched_class->task_new(rq, p);
2280 inc_nr_running(p, rq); 2302 inc_nr_running(p, rq);
2281 } 2303 }
2304 trace_mark(kernel_sched_wakeup_new,
2305 "pid %d state %ld ## rq %p task %p rq->curr %p",
2306 p->pid, p->state, rq, p, rq->curr);
2282 check_preempt_curr(rq, p); 2307 check_preempt_curr(rq, p);
2283#ifdef CONFIG_SMP 2308#ifdef CONFIG_SMP
2284 if (p->sched_class->task_wake_up) 2309 if (p->sched_class->task_wake_up)
@@ -2451,6 +2476,11 @@ context_switch(struct rq *rq, struct task_struct *prev,
2451 struct mm_struct *mm, *oldmm; 2476 struct mm_struct *mm, *oldmm;
2452 2477
2453 prepare_task_switch(rq, prev, next); 2478 prepare_task_switch(rq, prev, next);
2479 trace_mark(kernel_sched_schedule,
2480 "prev_pid %d next_pid %d prev_state %ld "
2481 "## rq %p prev %p next %p",
2482 prev->pid, next->pid, prev->state,
2483 rq, prev, next);
2454 mm = next->mm; 2484 mm = next->mm;
2455 oldmm = prev->active_mm; 2485 oldmm = prev->active_mm;
2456 /* 2486 /*
@@ -4021,26 +4051,44 @@ void scheduler_tick(void)
4021#endif 4051#endif
4022} 4052}
4023 4053
4024#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) 4054#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
4055 defined(CONFIG_PREEMPT_TRACER))
4056
4057static inline unsigned long get_parent_ip(unsigned long addr)
4058{
4059 if (in_lock_functions(addr)) {
4060 addr = CALLER_ADDR2;
4061 if (in_lock_functions(addr))
4062 addr = CALLER_ADDR3;
4063 }
4064 return addr;
4065}
4025 4066
4026void __kprobes add_preempt_count(int val) 4067void __kprobes add_preempt_count(int val)
4027{ 4068{
4069#ifdef CONFIG_DEBUG_PREEMPT
4028 /* 4070 /*
4029 * Underflow? 4071 * Underflow?
4030 */ 4072 */
4031 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) 4073 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
4032 return; 4074 return;
4075#endif
4033 preempt_count() += val; 4076 preempt_count() += val;
4077#ifdef CONFIG_DEBUG_PREEMPT
4034 /* 4078 /*
4035 * Spinlock count overflowing soon? 4079 * Spinlock count overflowing soon?
4036 */ 4080 */
4037 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= 4081 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
4038 PREEMPT_MASK - 10); 4082 PREEMPT_MASK - 10);
4083#endif
4084 if (preempt_count() == val)
4085 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
4039} 4086}
4040EXPORT_SYMBOL(add_preempt_count); 4087EXPORT_SYMBOL(add_preempt_count);
4041 4088
4042void __kprobes sub_preempt_count(int val) 4089void __kprobes sub_preempt_count(int val)
4043{ 4090{
4091#ifdef CONFIG_DEBUG_PREEMPT
4044 /* 4092 /*
4045 * Underflow? 4093 * Underflow?
4046 */ 4094 */
@@ -4052,7 +4100,10 @@ void __kprobes sub_preempt_count(int val)
4052 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && 4100 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
4053 !(preempt_count() & PREEMPT_MASK))) 4101 !(preempt_count() & PREEMPT_MASK)))
4054 return; 4102 return;
4103#endif
4055 4104
4105 if (preempt_count() == val)
4106 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
4056 preempt_count() -= val; 4107 preempt_count() -= val;
4057} 4108}
4058EXPORT_SYMBOL(sub_preempt_count); 4109EXPORT_SYMBOL(sub_preempt_count);
@@ -5384,7 +5435,7 @@ out_unlock:
5384 return retval; 5435 return retval;
5385} 5436}
5386 5437
5387static const char stat_nam[] = "RSDTtZX"; 5438static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
5388 5439
5389void sched_show_task(struct task_struct *p) 5440void sched_show_task(struct task_struct *p)
5390{ 5441{