aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-05-12 15:20:55 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 15:14:11 -0400
commit92205c2343527a863d660360599a4bf8cede77b0 (patch)
treec85cf8495cd2eb52a99062e3505cd81851cd3444 /kernel
parentc5f888cae49dfe3e86d9d1e0dab2b63ceb057be3 (diff)
ftrace: user raw_spin_lock in tracing
Lock debugging enabled cause huge performance problems for tracing. Having the lock verification happening for every function that is called because mcount calls spin_lock can cripple the system. This patch converts the spin_locks used by ftrace into raw_spin_locks. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace.c51
-rw-r--r--kernel/trace/trace.h2
2 files changed, 31 insertions, 22 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 736dcfb3ed01..3009aafa4dde 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -133,7 +133,8 @@ static const char *trace_options[] = {
133 NULL 133 NULL
134}; 134};
135 135
136static DEFINE_SPINLOCK(ftrace_max_lock); 136static raw_spinlock_t ftrace_max_lock =
137 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
137 138
138/* 139/*
139 * Copy the new maximum trace into the separate maximum-trace 140 * Copy the new maximum trace into the separate maximum-trace
@@ -335,7 +336,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
335 int i; 336 int i;
336 337
337 WARN_ON_ONCE(!irqs_disabled()); 338 WARN_ON_ONCE(!irqs_disabled());
338 spin_lock(&ftrace_max_lock); 339 __raw_spin_lock(&ftrace_max_lock);
339 /* clear out all the previous traces */ 340 /* clear out all the previous traces */
340 for_each_possible_cpu(i) { 341 for_each_possible_cpu(i) {
341 data = tr->data[i]; 342 data = tr->data[i];
@@ -344,7 +345,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
344 } 345 }
345 346
346 __update_max_tr(tr, tsk, cpu); 347 __update_max_tr(tr, tsk, cpu);
347 spin_unlock(&ftrace_max_lock); 348 __raw_spin_unlock(&ftrace_max_lock);
348} 349}
349 350
350/** 351/**
@@ -360,7 +361,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
360 int i; 361 int i;
361 362
362 WARN_ON_ONCE(!irqs_disabled()); 363 WARN_ON_ONCE(!irqs_disabled());
363 spin_lock(&ftrace_max_lock); 364 __raw_spin_lock(&ftrace_max_lock);
364 for_each_possible_cpu(i) 365 for_each_possible_cpu(i)
365 tracing_reset(max_tr.data[i]); 366 tracing_reset(max_tr.data[i]);
366 367
@@ -368,7 +369,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
368 tracing_reset(data); 369 tracing_reset(data);
369 370
370 __update_max_tr(tr, tsk, cpu); 371 __update_max_tr(tr, tsk, cpu);
371 spin_unlock(&ftrace_max_lock); 372 __raw_spin_unlock(&ftrace_max_lock);
372} 373}
373 374
374int register_tracer(struct tracer *type) 375int register_tracer(struct tracer *type)
@@ -652,13 +653,15 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
652 struct trace_entry *entry; 653 struct trace_entry *entry;
653 unsigned long irq_flags; 654 unsigned long irq_flags;
654 655
655 spin_lock_irqsave(&data->lock, irq_flags); 656 raw_local_irq_save(irq_flags);
657 __raw_spin_lock(&data->lock);
656 entry = tracing_get_trace_entry(tr, data); 658 entry = tracing_get_trace_entry(tr, data);
657 tracing_generic_entry_update(entry, flags); 659 tracing_generic_entry_update(entry, flags);
658 entry->type = TRACE_FN; 660 entry->type = TRACE_FN;
659 entry->fn.ip = ip; 661 entry->fn.ip = ip;
660 entry->fn.parent_ip = parent_ip; 662 entry->fn.parent_ip = parent_ip;
661 spin_unlock_irqrestore(&data->lock, irq_flags); 663 __raw_spin_unlock(&data->lock);
664 raw_local_irq_restore(irq_flags);
662} 665}
663 666
664void 667void
@@ -678,14 +681,16 @@ __trace_special(void *__tr, void *__data,
678 struct trace_entry *entry; 681 struct trace_entry *entry;
679 unsigned long irq_flags; 682 unsigned long irq_flags;
680 683
681 spin_lock_irqsave(&data->lock, irq_flags); 684 raw_local_irq_save(irq_flags);
685 __raw_spin_lock(&data->lock);
682 entry = tracing_get_trace_entry(tr, data); 686 entry = tracing_get_trace_entry(tr, data);
683 tracing_generic_entry_update(entry, 0); 687 tracing_generic_entry_update(entry, 0);
684 entry->type = TRACE_SPECIAL; 688 entry->type = TRACE_SPECIAL;
685 entry->special.arg1 = arg1; 689 entry->special.arg1 = arg1;
686 entry->special.arg2 = arg2; 690 entry->special.arg2 = arg2;
687 entry->special.arg3 = arg3; 691 entry->special.arg3 = arg3;
688 spin_unlock_irqrestore(&data->lock, irq_flags); 692 __raw_spin_unlock(&data->lock);
693 raw_local_irq_restore(irq_flags);
689 694
690 trace_wake_up(); 695 trace_wake_up();
691} 696}
@@ -725,7 +730,8 @@ tracing_sched_switch_trace(struct trace_array *tr,
725 struct trace_entry *entry; 730 struct trace_entry *entry;
726 unsigned long irq_flags; 731 unsigned long irq_flags;
727 732
728 spin_lock_irqsave(&data->lock, irq_flags); 733 raw_local_irq_save(irq_flags);
734 __raw_spin_lock(&data->lock);
729 entry = tracing_get_trace_entry(tr, data); 735 entry = tracing_get_trace_entry(tr, data);
730 tracing_generic_entry_update(entry, flags); 736 tracing_generic_entry_update(entry, flags);
731 entry->type = TRACE_CTX; 737 entry->type = TRACE_CTX;
@@ -736,7 +742,8 @@ tracing_sched_switch_trace(struct trace_array *tr,
736 entry->ctx.next_prio = next->prio; 742 entry->ctx.next_prio = next->prio;
737 entry->ctx.next_state = next->state; 743 entry->ctx.next_state = next->state;
738 __trace_stack(tr, data, flags, 4); 744 __trace_stack(tr, data, flags, 4);
739 spin_unlock_irqrestore(&data->lock, irq_flags); 745 __raw_spin_unlock(&data->lock);
746 raw_local_irq_restore(irq_flags);
740} 747}
741 748
742void 749void
@@ -749,7 +756,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
749 struct trace_entry *entry; 756 struct trace_entry *entry;
750 unsigned long irq_flags; 757 unsigned long irq_flags;
751 758
752 spin_lock_irqsave(&data->lock, irq_flags); 759 raw_local_irq_save(irq_flags);
760 __raw_spin_lock(&data->lock);
753 entry = tracing_get_trace_entry(tr, data); 761 entry = tracing_get_trace_entry(tr, data);
754 tracing_generic_entry_update(entry, flags); 762 tracing_generic_entry_update(entry, flags);
755 entry->type = TRACE_WAKE; 763 entry->type = TRACE_WAKE;
@@ -760,7 +768,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
760 entry->ctx.next_prio = wakee->prio; 768 entry->ctx.next_prio = wakee->prio;
761 entry->ctx.next_state = wakee->state; 769 entry->ctx.next_state = wakee->state;
762 __trace_stack(tr, data, flags, 5); 770 __trace_stack(tr, data, flags, 5);
763 spin_unlock_irqrestore(&data->lock, irq_flags); 771 __raw_spin_unlock(&data->lock);
772 raw_local_irq_restore(irq_flags);
764 773
765 trace_wake_up(); 774 trace_wake_up();
766} 775}
@@ -1824,7 +1833,8 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
1824 if (err) 1833 if (err)
1825 goto err_unlock; 1834 goto err_unlock;
1826 1835
1827 spin_lock_irq(&ftrace_max_lock); 1836 raw_local_irq_disable();
1837 __raw_spin_lock(&ftrace_max_lock);
1828 for_each_possible_cpu(cpu) { 1838 for_each_possible_cpu(cpu) {
1829 /* 1839 /*
1830 * Increase/decrease the disabled counter if we are 1840 * Increase/decrease the disabled counter if we are
@@ -1839,7 +1849,8 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
1839 atomic_dec(&global_trace.data[cpu]->disabled); 1849 atomic_dec(&global_trace.data[cpu]->disabled);
1840 } 1850 }
1841 } 1851 }
1842 spin_unlock_irq(&ftrace_max_lock); 1852 __raw_spin_unlock(&ftrace_max_lock);
1853 raw_local_irq_enable();
1843 1854
1844 tracing_cpumask = tracing_cpumask_new; 1855 tracing_cpumask = tracing_cpumask_new;
1845 1856
@@ -2299,7 +2310,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2299 2310
2300 for_each_cpu_mask(cpu, mask) { 2311 for_each_cpu_mask(cpu, mask) {
2301 data = iter->tr->data[cpu]; 2312 data = iter->tr->data[cpu];
2302 spin_lock(&data->lock); 2313 __raw_spin_lock(&data->lock);
2303 } 2314 }
2304 2315
2305 while (find_next_entry_inc(iter) != NULL) { 2316 while (find_next_entry_inc(iter) != NULL) {
@@ -2320,7 +2331,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2320 2331
2321 for_each_cpu_mask(cpu, mask) { 2332 for_each_cpu_mask(cpu, mask) {
2322 data = iter->tr->data[cpu]; 2333 data = iter->tr->data[cpu];
2323 spin_unlock(&data->lock); 2334 __raw_spin_unlock(&data->lock);
2324 } 2335 }
2325 2336
2326 for_each_cpu_mask(cpu, mask) { 2337 for_each_cpu_mask(cpu, mask) {
@@ -2538,8 +2549,7 @@ static int trace_alloc_page(void)
2538 /* Now that we successfully allocate a page per CPU, add them */ 2549 /* Now that we successfully allocate a page per CPU, add them */
2539 for_each_possible_cpu(i) { 2550 for_each_possible_cpu(i) {
2540 data = global_trace.data[i]; 2551 data = global_trace.data[i];
2541 spin_lock_init(&data->lock); 2552 data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
2542 lockdep_set_class(&data->lock, &data->lock_key);
2543 page = list_entry(pages.next, struct page, lru); 2553 page = list_entry(pages.next, struct page, lru);
2544 list_del_init(&page->lru); 2554 list_del_init(&page->lru);
2545 list_add_tail(&page->lru, &data->trace_pages); 2555 list_add_tail(&page->lru, &data->trace_pages);
@@ -2547,8 +2557,7 @@ static int trace_alloc_page(void)
2547 2557
2548#ifdef CONFIG_TRACER_MAX_TRACE 2558#ifdef CONFIG_TRACER_MAX_TRACE
2549 data = max_tr.data[i]; 2559 data = max_tr.data[i];
2550 spin_lock_init(&data->lock); 2560 data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
2551 lockdep_set_class(&data->lock, &data->lock_key);
2552 page = list_entry(pages.next, struct page, lru); 2561 page = list_entry(pages.next, struct page, lru);
2553 list_del_init(&page->lru); 2562 list_del_init(&page->lru);
2554 list_add_tail(&page->lru, &data->trace_pages); 2563 list_add_tail(&page->lru, &data->trace_pages);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 98cbfd05d754..25cba28eb9ba 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -76,7 +76,7 @@ struct trace_entry {
76struct trace_array_cpu { 76struct trace_array_cpu {
77 struct list_head trace_pages; 77 struct list_head trace_pages;
78 atomic_t disabled; 78 atomic_t disabled;
79 spinlock_t lock; 79 raw_spinlock_t lock;
80 struct lock_class_key lock_key; 80 struct lock_class_key lock_key;
81 81
82 /* these fields get copied into max-trace: */ 82 /* these fields get copied into max-trace: */