aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSoumya PN <soumya.p.n@hpe.com>2016-05-17 12:01:14 -0400
committerSteven Rostedt <rostedt@goodmis.org>2016-05-20 13:19:37 -0400
commit6112a300c9e41993cc0dc56ac393743d28381284 (patch)
treebfc725d59bb98e3726c6884add2445c233c64eb7
parent91e6f1ce8657795cec83a81090c20cbaa8337c68 (diff)
ftrace: Don't disable irqs when taking the tasklist_lock read_lock
In ftrace.c inside the function alloc_retstack_tasklist() (which will be invoked when function_graph tracing is on) the tasklist_lock is being held as reader while iterating through a list of threads. Here the lock is being held as reader with irqs disabled. The tasklist_lock is never write_locked in interrupt context so it is safe to not disable interrupts for the duration of read_lock in this block which, can be significant, given the block of code iterates through all threads. Hence changing the code to call read_lock() and read_unlock() instead of read_lock_irqsave() and read_unlock_irqrestore(). A similar change was made in commits: 8063e41d2ffc ("tracing: Change syscall_*regfunc() to check PF_KTHREAD and use for_each_process_thread()")' and 3472eaa1f12e ("sched: normalize_rt_tasks(): Don't use _irqsave for tasklist_lock, use task_rq_lock()")' Link: http://lkml.kernel.org/r/1463500874-77480-1-git-send-email-soumya.p.n@hpe.com Signed-off-by: Soumya PN <soumya.p.n@hpe.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--kernel/trace/ftrace.c5
1 files changed, 2 insertions, 3 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index b1870fbd2b67..a6804823a058 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -5713,7 +5713,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
5713{ 5713{
5714 int i; 5714 int i;
5715 int ret = 0; 5715 int ret = 0;
5716 unsigned long flags;
5717 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; 5716 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
5718 struct task_struct *g, *t; 5717 struct task_struct *g, *t;
5719 5718
@@ -5729,7 +5728,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
5729 } 5728 }
5730 } 5729 }
5731 5730
5732 read_lock_irqsave(&tasklist_lock, flags); 5731 read_lock(&tasklist_lock);
5733 do_each_thread(g, t) { 5732 do_each_thread(g, t) {
5734 if (start == end) { 5733 if (start == end) {
5735 ret = -EAGAIN; 5734 ret = -EAGAIN;
@@ -5747,7 +5746,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
5747 } while_each_thread(g, t); 5746 } while_each_thread(g, t);
5748 5747
5749unlock: 5748unlock:
5750 read_unlock_irqrestore(&tasklist_lock, flags); 5749 read_unlock(&tasklist_lock);
5751free: 5750free:
5752 for (i = start; i < end; i++) 5751 for (i = start; i < end; i++)
5753 kfree(ret_stack_list[i]); 5752 kfree(ret_stack_list[i]);