aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sched.c18
-rw-r--r--kernel/trace/trace.c15
3 files changed, 31 insertions, 4 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 652d380ae563..a3970b563757 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -246,6 +246,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
246extern void init_idle(struct task_struct *idle, int cpu); 246extern void init_idle(struct task_struct *idle, int cpu);
247extern void init_idle_bootup_task(struct task_struct *idle); 247extern void init_idle_bootup_task(struct task_struct *idle);
248 248
249extern int runqueue_is_locked(void);
250
249extern cpumask_t nohz_cpu_mask; 251extern cpumask_t nohz_cpu_mask;
250#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 252#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
251extern int select_nohz_load_balancer(int cpu); 253extern int select_nohz_load_balancer(int cpu);
diff --git a/kernel/sched.c b/kernel/sched.c
index 673b588b713b..9ca4a2e6a236 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -642,6 +642,24 @@ static inline void update_rq_clock(struct rq *rq)
642# define const_debug static const 642# define const_debug static const
643#endif 643#endif
644 644
645/**
646 * runqueue_is_locked
647 *
648 * Returns true if the current cpu runqueue is locked.
649 * This interface allows printk to be called with the runqueue lock
650 * held and know whether or not it is OK to wake up the klogd.
651 */
652int runqueue_is_locked(void)
653{
654 int cpu = get_cpu();
655 struct rq *rq = cpu_rq(cpu);
656 int ret;
657
658 ret = spin_is_locked(&rq->lock);
659 put_cpu();
660 return ret;
661}
662
645/* 663/*
646 * Debugging: various feature bits 664 * Debugging: various feature bits
647 */ 665 */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 65173b14b914..2ca9d66aa74e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -70,12 +70,13 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
70 70
71unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; 71unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
72 72
73/*
74 * FIXME: where should this be called?
75 */
76void trace_wake_up(void) 73void trace_wake_up(void)
77{ 74{
78 if (!(trace_flags & TRACE_ITER_BLOCK)) 75 /*
76 * The runqueue_is_locked() can fail, but this is the best we
77 * have for now:
78 */
79 if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
79 wake_up(&trace_wait); 80 wake_up(&trace_wait);
80} 81}
81 82
@@ -657,6 +658,8 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
657 entry->fn.ip = ip; 658 entry->fn.ip = ip;
658 entry->fn.parent_ip = parent_ip; 659 entry->fn.parent_ip = parent_ip;
659 spin_unlock_irqrestore(&data->lock, irq_flags); 660 spin_unlock_irqrestore(&data->lock, irq_flags);
661
662 trace_wake_up();
660} 663}
661 664
662void 665void
@@ -686,6 +689,8 @@ __trace_special(void *__tr, void *__data,
686 entry->special.arg2 = arg2; 689 entry->special.arg2 = arg2;
687 entry->special.arg3 = arg3; 690 entry->special.arg3 = arg3;
688 spin_unlock_irqrestore(&data->lock, irq_flags); 691 spin_unlock_irqrestore(&data->lock, irq_flags);
692
693 trace_wake_up();
689} 694}
690 695
691#endif 696#endif
@@ -759,6 +764,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
759 entry->ctx.next_prio = wakee->prio; 764 entry->ctx.next_prio = wakee->prio;
760 __trace_stack(tr, data, flags, 5); 765 __trace_stack(tr, data, flags, 5);
761 spin_unlock_irqrestore(&data->lock, irq_flags); 766 spin_unlock_irqrestore(&data->lock, irq_flags);
767
768 trace_wake_up();
762} 769}
763 770
764#ifdef CONFIG_FTRACE 771#ifdef CONFIG_FTRACE