aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_events.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-15 12:39:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-15 12:39:44 -0400
commitada3fa15057205b7d3f727bba5cd26b5912e350f (patch)
tree60962fc9e4021b92f484d1a58e72cd3906d4f3db /kernel/trace/trace_events.c
parent2f82af08fcc7dc01a7e98a49a5995a77e32a2925 (diff)
parent5579fd7e6aed8860ea0c8e3f11897493153b10ad (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (46 commits) powerpc64: convert to dynamic percpu allocator sparc64: use embedding percpu first chunk allocator percpu: kill lpage first chunk allocator x86,percpu: use embedding for 64bit NUMA and page for 32bit NUMA percpu: update embedding first chunk allocator to handle sparse units percpu: use group information to allocate vmap areas sparsely vmalloc: implement pcpu_get_vm_areas() vmalloc: separate out insert_vmalloc_vm() percpu: add chunk->base_addr percpu: add pcpu_unit_offsets[] percpu: introduce pcpu_alloc_info and pcpu_group_info percpu: move pcpu_lpage_build_unit_map() and pcpul_lpage_dump_cfg() upward percpu: add @align to pcpu_fc_alloc_fn_t percpu: make @dyn_size mandatory for pcpu_setup_first_chunk() percpu: drop @static_size from first chunk allocators percpu: generalize first chunk allocator selection percpu: build first chunk allocators selectively percpu: rename 4k first chunk allocator to page percpu: improve boot messages percpu: fix pcpu_reclaim() locking ... Fix trivial conflict as by Tejun Heo in kernel/sched.c
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r--kernel/trace/trace_events.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 78b1ed230177..97e2c4d2e9eb 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1432,7 +1432,7 @@ static __init void event_trace_self_tests(void)
1432 1432
1433#ifdef CONFIG_FUNCTION_TRACER 1433#ifdef CONFIG_FUNCTION_TRACER
1434 1434
1435static DEFINE_PER_CPU(atomic_t, test_event_disable); 1435static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1436 1436
1437static void 1437static void
1438function_test_events_call(unsigned long ip, unsigned long parent_ip) 1438function_test_events_call(unsigned long ip, unsigned long parent_ip)
@@ -1449,7 +1449,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1449 pc = preempt_count(); 1449 pc = preempt_count();
1450 resched = ftrace_preempt_disable(); 1450 resched = ftrace_preempt_disable();
1451 cpu = raw_smp_processor_id(); 1451 cpu = raw_smp_processor_id();
1452 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu)); 1452 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1453 1453
1454 if (disabled != 1) 1454 if (disabled != 1)
1455 goto out; 1455 goto out;
@@ -1468,7 +1468,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1468 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); 1468 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
1469 1469
1470 out: 1470 out:
1471 atomic_dec(&per_cpu(test_event_disable, cpu)); 1471 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1472 ftrace_preempt_enable(resched); 1472 ftrace_preempt_enable(resched);
1473} 1473}
1474 1474