aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile2
-rw-r--r--arch/x86/include/asm/cpufeature.h5
-rw-r--r--arch/x86/include/asm/msr.h3
-rw-r--r--arch/x86/kernel/cpu/Makefile5
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c8
-rw-r--r--arch/x86/kernel/cpu/amd.c9
-rw-r--r--arch/x86/kernel/cpu/intel.c18
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--drivers/acpi/processor_idle.c6
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/trace/sched.h4
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/trace/ring_buffer.c19
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_boot.c12
-rw-r--r--kernel/trace/trace_functions.c14
-rw-r--r--kernel/trace/trace_hw_branches.c14
-rw-r--r--kernel/trace/trace_mmiotrace.c6
-rw-r--r--kernel/trace/trace_sched_switch.c16
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/trace/trace_sysprof.c12
23 files changed, 92 insertions, 82 deletions
diff --git a/Makefile b/Makefile
index 4c8d79710b84..71e98e9e6acd 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 28 3SUBLEVEL = 28
4EXTRAVERSION = -rc9 4EXTRAVERSION =
5NAME = Erotic Pickled Herring 5NAME = Erotic Pickled Herring
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index cfdf8c2c5c31..ea408dcba513 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -80,7 +80,6 @@
80#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ 80#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
81#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */ 81#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */
82#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ 82#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
83#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
84#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ 83#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
85#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ 84#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
86#define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */ 85#define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */
@@ -92,6 +91,8 @@
92#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ 91#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
93#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ 92#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */
94#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */ 93#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */
94#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */
95#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
95 96
96/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ 97/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
97#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ 98#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
@@ -117,6 +118,7 @@
117#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ 118#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
118#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ 119#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
119#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ 120#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
121#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
120 122
121/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ 123/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
122#define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */ 124#define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */
@@ -237,6 +239,7 @@ extern const char * const x86_power_flags[32];
237#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) 239#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
238#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) 240#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
239#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) 241#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
242#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
240 243
241#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) 244#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
242# define cpu_has_invlpg 1 245# define cpu_has_invlpg 1
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index c2a812ebde89..b8a1799ea871 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -85,7 +85,8 @@ static inline void native_write_msr(unsigned int msr,
85 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory"); 85 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
86} 86}
87 87
88static inline int native_write_msr_safe(unsigned int msr, 88/* Can be uninlined because referenced by paravirt */
89notrace static inline int native_write_msr_safe(unsigned int msr,
89 unsigned low, unsigned high) 90 unsigned low, unsigned high)
90{ 91{
91 int err; 92 int err;
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 82ec6075c057..4ae495a313f3 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -2,6 +2,11 @@
2# Makefile for x86-compatible CPU details and quirks 2# Makefile for x86-compatible CPU details and quirks
3# 3#
4 4
5# Don't trace early stages of a secondary CPU boot
6ifdef CONFIG_FUNCTION_TRACER
7CFLAGS_REMOVE_common.o = -pg
8endif
9
5obj-y := intel_cacheinfo.o addon_cpuid_features.o 10obj-y := intel_cacheinfo.o addon_cpuid_features.o
6obj-y += proc.o capflags.o powerflags.o common.o 11obj-y += proc.o capflags.o powerflags.o common.o
7 12
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index ef8f831af823..2cf23634b6d9 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -120,9 +120,17 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
120 c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width) 120 c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)
121 & core_select_mask; 121 & core_select_mask;
122 c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width); 122 c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width);
123 /*
124 * Reinit the apicid, now that we have extended initial_apicid.
125 */
126 c->apicid = phys_pkg_id(c->initial_apicid, 0);
123#else 127#else
124 c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask; 128 c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;
125 c->phys_proc_id = phys_pkg_id(core_plus_mask_width); 129 c->phys_proc_id = phys_pkg_id(core_plus_mask_width);
130 /*
131 * Reinit the apicid, now that we have extended initial_apicid.
132 */
133 c->apicid = phys_pkg_id(0);
126#endif 134#endif
127 c->x86_max_cores = (core_level_siblings / smp_num_siblings); 135 c->x86_max_cores = (core_level_siblings / smp_num_siblings);
128 136
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 8f1e31db2ad5..7c878f6aa919 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -283,9 +283,14 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
283{ 283{
284 early_init_amd_mc(c); 284 early_init_amd_mc(c);
285 285
286 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */ 286 /*
287 if (c->x86_power & (1<<8)) 287 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
288 * with P/T states and does not stop in deep C-states
289 */
290 if (c->x86_power & (1 << 8)) {
288 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 291 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
292 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
293 }
289 294
290#ifdef CONFIG_X86_64 295#ifdef CONFIG_X86_64
291 set_cpu_cap(c, X86_FEATURE_SYSCALL32); 296 set_cpu_cap(c, X86_FEATURE_SYSCALL32);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index cd413d9a0218..8ea6929e974c 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -40,6 +40,16 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
40 if (c->x86 == 15 && c->x86_cache_alignment == 64) 40 if (c->x86 == 15 && c->x86_cache_alignment == 64)
41 c->x86_cache_alignment = 128; 41 c->x86_cache_alignment = 128;
42#endif 42#endif
43
44 /*
45 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
46 * with P/T states and does not stop in deep C-states
47 */
48 if (c->x86_power & (1 << 8)) {
49 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
50 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
51 }
52
43} 53}
44 54
45#ifdef CONFIG_X86_32 55#ifdef CONFIG_X86_32
@@ -241,6 +251,13 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
241 251
242 intel_workarounds(c); 252 intel_workarounds(c);
243 253
254 /*
255 * Detect the extended topology information if available. This
256 * will reinitialise the initial_apicid which will be used
257 * in init_intel_cacheinfo()
258 */
259 detect_extended_topology(c);
260
244 l2 = init_intel_cacheinfo(c); 261 l2 = init_intel_cacheinfo(c);
245 if (c->cpuid_level > 9) { 262 if (c->cpuid_level > 9) {
246 unsigned eax = cpuid_eax(10); 263 unsigned eax = cpuid_eax(10);
@@ -308,7 +325,6 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
308 set_cpu_cap(c, X86_FEATURE_P3); 325 set_cpu_cap(c, X86_FEATURE_P3);
309#endif 326#endif
310 327
311 detect_extended_topology(c);
312 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { 328 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
313 /* 329 /*
314 * let's use the legacy cpuid vector 0x1 and 0x4 for topology 330 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index c27af49a4ede..cff9a50e389d 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -286,7 +286,7 @@ static void c1e_idle(void)
286 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); 286 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
287 if (lo & K8_INTP_C1E_ACTIVE_MASK) { 287 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
288 c1e_detected = 1; 288 c1e_detected = 1;
289 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 289 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
290 mark_tsc_unstable("TSC halt in AMD C1E"); 290 mark_tsc_unstable("TSC halt in AMD C1E");
291 printk(KERN_INFO "System has AMD C1E enabled\n"); 291 printk(KERN_INFO "System has AMD C1E enabled\n");
292 set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E); 292 set_cpu_cap(&boot_cpu_data, X86_FEATURE_AMDC1E);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f71f96fc9e62..f6174d229024 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -287,7 +287,7 @@ static int __cpuinitdata unsafe_smp;
287/* 287/*
288 * Activate a secondary processor. 288 * Activate a secondary processor.
289 */ 289 */
290static void __cpuinit start_secondary(void *unused) 290notrace static void __cpuinit start_secondary(void *unused)
291{ 291{
292 /* 292 /*
293 * Don't put *anything* before cpu_init(), SMP booting is too 293 * Don't put *anything* before cpu_init(), SMP booting is too
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 5f8d746a9b81..38aca048e951 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -374,15 +374,15 @@ static int tsc_halts_in_c(int state)
374{ 374{
375 switch (boot_cpu_data.x86_vendor) { 375 switch (boot_cpu_data.x86_vendor) {
376 case X86_VENDOR_AMD: 376 case X86_VENDOR_AMD:
377 case X86_VENDOR_INTEL:
377 /* 378 /*
378 * AMD Fam10h TSC will tick in all 379 * AMD Fam10h TSC will tick in all
379 * C/P/S0/S1 states when this bit is set. 380 * C/P/S0/S1 states when this bit is set.
380 */ 381 */
381 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 382 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
382 return 0; 383 return 0;
384
383 /*FALL THROUGH*/ 385 /*FALL THROUGH*/
384 case X86_VENDOR_INTEL:
385 /* Several cases known where TSC halts in C2 too */
386 default: 386 default:
387 return state > ACPI_STATE_C1; 387 return state > ACPI_STATE_C1;
388 } 388 }
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 04b52e6ebc66..677432b9cb7e 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -303,7 +303,7 @@ extern void ftrace_dump(void);
303static inline void 303static inline void
304ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } 304ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
305static inline int 305static inline int
306ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0))); 306ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
307 307
308static inline void tracing_start(void) { } 308static inline void tracing_start(void) { }
309static inline void tracing_stop(void) { } 309static inline void tracing_stop(void) { }
diff --git a/include/trace/sched.h b/include/trace/sched.h
index bc4c9eadc6ba..0d81098ee9fc 100644
--- a/include/trace/sched.h
+++ b/include/trace/sched.h
@@ -17,8 +17,8 @@ DECLARE_TRACE(sched_wait_task,
17 TPARGS(rq, p)); 17 TPARGS(rq, p));
18 18
19DECLARE_TRACE(sched_wakeup, 19DECLARE_TRACE(sched_wakeup,
20 TPPROTO(struct rq *rq, struct task_struct *p), 20 TPPROTO(struct rq *rq, struct task_struct *p, int success),
21 TPARGS(rq, p)); 21 TPARGS(rq, p, success));
22 22
23DECLARE_TRACE(sched_wakeup_new, 23DECLARE_TRACE(sched_wakeup_new,
24 TPPROTO(struct rq *rq, struct task_struct *p, int success), 24 TPPROTO(struct rq *rq, struct task_struct *p, int success),
diff --git a/kernel/sched.c b/kernel/sched.c
index ceda5799466e..dcb39bc88f6c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2324,7 +2324,7 @@ out_activate:
2324 success = 1; 2324 success = 1;
2325 2325
2326out_running: 2326out_running:
2327 trace_sched_wakeup(rq, p); 2327 trace_sched_wakeup(rq, p, success);
2328 check_preempt_curr(rq, p, sync); 2328 check_preempt_curr(rq, p, sync);
2329 2329
2330 p->state = TASK_RUNNING; 2330 p->state = TASK_RUNNING;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index bb6922a931b1..76f34c0ef29c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -838,6 +838,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
838 * back to us). This allows us to do a simple loop to 838 * back to us). This allows us to do a simple loop to
839 * assign the commit to the tail. 839 * assign the commit to the tail.
840 */ 840 */
841 again:
841 while (cpu_buffer->commit_page != cpu_buffer->tail_page) { 842 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
842 cpu_buffer->commit_page->page->commit = 843 cpu_buffer->commit_page->page->commit =
843 cpu_buffer->commit_page->write; 844 cpu_buffer->commit_page->write;
@@ -853,6 +854,17 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
853 cpu_buffer->commit_page->write; 854 cpu_buffer->commit_page->write;
854 barrier(); 855 barrier();
855 } 856 }
857
858 /* again, keep gcc from optimizing */
859 barrier();
860
861 /*
862 * If an interrupt came in just after the first while loop
863 * and pushed the tail page forward, we will be left with
864 * a dangling commit that will never go forward.
865 */
866 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
867 goto again;
856} 868}
857 869
858static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) 870static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
@@ -950,12 +962,15 @@ static struct ring_buffer_event *
950__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, 962__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
951 unsigned type, unsigned long length, u64 *ts) 963 unsigned type, unsigned long length, u64 *ts)
952{ 964{
953 struct buffer_page *tail_page, *head_page, *reader_page; 965 struct buffer_page *tail_page, *head_page, *reader_page, *commit_page;
954 unsigned long tail, write; 966 unsigned long tail, write;
955 struct ring_buffer *buffer = cpu_buffer->buffer; 967 struct ring_buffer *buffer = cpu_buffer->buffer;
956 struct ring_buffer_event *event; 968 struct ring_buffer_event *event;
957 unsigned long flags; 969 unsigned long flags;
958 970
971 commit_page = cpu_buffer->commit_page;
972 /* we just need to protect against interrupts */
973 barrier();
959 tail_page = cpu_buffer->tail_page; 974 tail_page = cpu_buffer->tail_page;
960 write = local_add_return(length, &tail_page->write); 975 write = local_add_return(length, &tail_page->write);
961 tail = write - length; 976 tail = write - length;
@@ -981,7 +996,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
981 * it all the way around the buffer, bail, and warn 996 * it all the way around the buffer, bail, and warn
982 * about it. 997 * about it.
983 */ 998 */
984 if (unlikely(next_page == cpu_buffer->commit_page)) { 999 if (unlikely(next_page == commit_page)) {
985 WARN_ON_ONCE(1); 1000 WARN_ON_ONCE(1);
986 goto out_unlock; 1001 goto out_unlock;
987 } 1002 }
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0eb6d48347f7..79db26e8216e 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -679,6 +679,16 @@ void tracing_reset(struct trace_array *tr, int cpu)
679 ftrace_enable_cpu(); 679 ftrace_enable_cpu();
680} 680}
681 681
682void tracing_reset_online_cpus(struct trace_array *tr)
683{
684 int cpu;
685
686 tr->time_start = ftrace_now(tr->cpu);
687
688 for_each_online_cpu(cpu)
689 tracing_reset(tr, cpu);
690}
691
682#define SAVED_CMDLINES 128 692#define SAVED_CMDLINES 128
683static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; 693static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
684static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 694static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index fc75dce7a664..cc7a4f864036 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -374,6 +374,7 @@ struct trace_iterator {
374int tracing_is_enabled(void); 374int tracing_is_enabled(void);
375void trace_wake_up(void); 375void trace_wake_up(void);
376void tracing_reset(struct trace_array *tr, int cpu); 376void tracing_reset(struct trace_array *tr, int cpu);
377void tracing_reset_online_cpus(struct trace_array *tr);
377int tracing_open_generic(struct inode *inode, struct file *filp); 378int tracing_open_generic(struct inode *inode, struct file *filp);
378struct dentry *tracing_init_dentry(void); 379struct dentry *tracing_init_dentry(void);
379void init_tracer_sysprof_debugfs(struct dentry *d_tracer); 380void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index a4fa2c57e34e..3ccebde28482 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -37,16 +37,6 @@ void disable_boot_trace(void)
37 tracing_stop_sched_switch_record(); 37 tracing_stop_sched_switch_record();
38} 38}
39 39
40static void reset_boot_trace(struct trace_array *tr)
41{
42 int cpu;
43
44 tr->time_start = ftrace_now(tr->cpu);
45
46 for_each_online_cpu(cpu)
47 tracing_reset(tr, cpu);
48}
49
50static int boot_trace_init(struct trace_array *tr) 40static int boot_trace_init(struct trace_array *tr)
51{ 41{
52 int cpu; 42 int cpu;
@@ -130,7 +120,7 @@ struct tracer boot_tracer __read_mostly =
130{ 120{
131 .name = "initcall", 121 .name = "initcall",
132 .init = boot_trace_init, 122 .init = boot_trace_init,
133 .reset = reset_boot_trace, 123 .reset = tracing_reset_online_cpus,
134 .print_line = initcall_print_line, 124 .print_line = initcall_print_line,
135}; 125};
136 126
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index e74f6d0a3216..9236d7e25a16 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -16,20 +16,10 @@
16 16
17#include "trace.h" 17#include "trace.h"
18 18
19static void function_reset(struct trace_array *tr)
20{
21 int cpu;
22
23 tr->time_start = ftrace_now(tr->cpu);
24
25 for_each_online_cpu(cpu)
26 tracing_reset(tr, cpu);
27}
28
29static void start_function_trace(struct trace_array *tr) 19static void start_function_trace(struct trace_array *tr)
30{ 20{
31 tr->cpu = get_cpu(); 21 tr->cpu = get_cpu();
32 function_reset(tr); 22 tracing_reset_online_cpus(tr);
33 put_cpu(); 23 put_cpu();
34 24
35 tracing_start_cmdline_record(); 25 tracing_start_cmdline_record();
@@ -55,7 +45,7 @@ static void function_trace_reset(struct trace_array *tr)
55 45
56static void function_trace_start(struct trace_array *tr) 46static void function_trace_start(struct trace_array *tr)
57{ 47{
58 function_reset(tr); 48 tracing_reset_online_cpus(tr);
59} 49}
60 50
61static struct tracer function_trace __read_mostly = 51static struct tracer function_trace __read_mostly =
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index ee29e012aa97..b6a3e20a49a9 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -25,16 +25,6 @@ static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
25#define this_buffer per_cpu(buffer, smp_processor_id()) 25#define this_buffer per_cpu(buffer, smp_processor_id())
26 26
27 27
28static void bts_trace_reset(struct trace_array *tr)
29{
30 int cpu;
31
32 tr->time_start = ftrace_now(tr->cpu);
33
34 for_each_online_cpu(cpu)
35 tracing_reset(tr, cpu);
36}
37
38static void bts_trace_start_cpu(void *arg) 28static void bts_trace_start_cpu(void *arg)
39{ 29{
40 if (this_tracer) 30 if (this_tracer)
@@ -54,7 +44,7 @@ static void bts_trace_start(struct trace_array *tr)
54{ 44{
55 int cpu; 45 int cpu;
56 46
57 bts_trace_reset(tr); 47 tracing_reset_online_cpus(tr);
58 48
59 for_each_cpu_mask(cpu, cpu_possible_map) 49 for_each_cpu_mask(cpu, cpu_possible_map)
60 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); 50 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
@@ -78,7 +68,7 @@ static void bts_trace_stop(struct trace_array *tr)
78 68
79static int bts_trace_init(struct trace_array *tr) 69static int bts_trace_init(struct trace_array *tr)
80{ 70{
81 bts_trace_reset(tr); 71 tracing_reset_online_cpus(tr);
82 bts_trace_start(tr); 72 bts_trace_start(tr);
83 73
84 return 0; 74 return 0;
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 2fb6da6523b3..fffcb069f1dc 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -22,14 +22,10 @@ static unsigned long prev_overruns;
22 22
23static void mmio_reset_data(struct trace_array *tr) 23static void mmio_reset_data(struct trace_array *tr)
24{ 24{
25 int cpu;
26
27 overrun_detected = false; 25 overrun_detected = false;
28 prev_overruns = 0; 26 prev_overruns = 0;
29 tr->time_start = ftrace_now(tr->cpu);
30 27
31 for_each_online_cpu(cpu) 28 tracing_reset_online_cpus(tr);
32 tracing_reset(tr, cpu);
33} 29}
34 30
35static int mmio_trace_init(struct trace_array *tr) 31static int mmio_trace_init(struct trace_array *tr)
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 781d72ef873c..df175cb4564f 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -49,7 +49,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
49} 49}
50 50
51static void 51static void
52probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) 52probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
53{ 53{
54 struct trace_array_cpu *data; 54 struct trace_array_cpu *data;
55 unsigned long flags; 55 unsigned long flags;
@@ -72,16 +72,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
72 local_irq_restore(flags); 72 local_irq_restore(flags);
73} 73}
74 74
75static void sched_switch_reset(struct trace_array *tr)
76{
77 int cpu;
78
79 tr->time_start = ftrace_now(tr->cpu);
80
81 for_each_online_cpu(cpu)
82 tracing_reset(tr, cpu);
83}
84
85static int tracing_sched_register(void) 75static int tracing_sched_register(void)
86{ 76{
87 int ret; 77 int ret;
@@ -197,7 +187,7 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr)
197 187
198static void start_sched_trace(struct trace_array *tr) 188static void start_sched_trace(struct trace_array *tr)
199{ 189{
200 sched_switch_reset(tr); 190 tracing_reset_online_cpus(tr);
201 tracing_start_sched_switch_record(); 191 tracing_start_sched_switch_record();
202} 192}
203 193
@@ -221,7 +211,7 @@ static void sched_switch_trace_reset(struct trace_array *tr)
221 211
222static void sched_switch_trace_start(struct trace_array *tr) 212static void sched_switch_trace_start(struct trace_array *tr)
223{ 213{
224 sched_switch_reset(tr); 214 tracing_reset_online_cpus(tr);
225 tracing_start_sched_switch(); 215 tracing_start_sched_switch();
226} 216}
227 217
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 0067b49746c1..43586b689e31 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -211,7 +211,7 @@ static void wakeup_reset(struct trace_array *tr)
211} 211}
212 212
213static void 213static void
214probe_wakeup(struct rq *rq, struct task_struct *p) 214probe_wakeup(struct rq *rq, struct task_struct *p, int success)
215{ 215{
216 int cpu = smp_processor_id(); 216 int cpu = smp_processor_id();
217 unsigned long flags; 217 unsigned long flags;
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 54960edb96d0..01becf1f19ff 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -234,20 +234,10 @@ static void stop_stack_timers(void)
234 stop_stack_timer(cpu); 234 stop_stack_timer(cpu);
235} 235}
236 236
237static void stack_reset(struct trace_array *tr)
238{
239 int cpu;
240
241 tr->time_start = ftrace_now(tr->cpu);
242
243 for_each_online_cpu(cpu)
244 tracing_reset(tr, cpu);
245}
246
247static void start_stack_trace(struct trace_array *tr) 237static void start_stack_trace(struct trace_array *tr)
248{ 238{
249 mutex_lock(&sample_timer_lock); 239 mutex_lock(&sample_timer_lock);
250 stack_reset(tr); 240 tracing_reset_online_cpus(tr);
251 start_stack_timers(); 241 start_stack_timers();
252 tracer_enabled = 1; 242 tracer_enabled = 1;
253 mutex_unlock(&sample_timer_lock); 243 mutex_unlock(&sample_timer_lock);