aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-23 05:10:03 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-23 05:10:03 -0500
commit9b03638963e4b239dff1b424b91062a710d4b2e9 (patch)
tree184bbcc7160c5d0e304166b1ecb190897e1f0afc /kernel
parent03b30d151a918364c1c7d08bcb3e167be0a3746f (diff)
parent69507c06539332e6e49f83aa478844130233bece (diff)
Merge branch 'tracing/ftrace' into tracing/core
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ring_buffer.c18
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_sched_wakeup.c78
3 files changed, 61 insertions, 37 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 0b9de5a3d699..7839280ffcd8 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2166,6 +2166,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2166 2166
2167 cpu_buffer->overrun = 0; 2167 cpu_buffer->overrun = 0;
2168 cpu_buffer->entries = 0; 2168 cpu_buffer->entries = 0;
2169
2170 cpu_buffer->write_stamp = 0;
2171 cpu_buffer->read_stamp = 0;
2169} 2172}
2170 2173
2171/** 2174/**
@@ -2266,9 +2269,24 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2266 if (buffer_a->pages != buffer_b->pages) 2269 if (buffer_a->pages != buffer_b->pages)
2267 return -EINVAL; 2270 return -EINVAL;
2268 2271
2272 if (ring_buffer_flags != RB_BUFFERS_ON)
2273 return -EAGAIN;
2274
2275 if (atomic_read(&buffer_a->record_disabled))
2276 return -EAGAIN;
2277
2278 if (atomic_read(&buffer_b->record_disabled))
2279 return -EAGAIN;
2280
2269 cpu_buffer_a = buffer_a->buffers[cpu]; 2281 cpu_buffer_a = buffer_a->buffers[cpu];
2270 cpu_buffer_b = buffer_b->buffers[cpu]; 2282 cpu_buffer_b = buffer_b->buffers[cpu];
2271 2283
2284 if (atomic_read(&cpu_buffer_a->record_disabled))
2285 return -EAGAIN;
2286
2287 if (atomic_read(&cpu_buffer_b->record_disabled))
2288 return -EAGAIN;
2289
2272 /* 2290 /*
2273 * We can't do a synchronize_sched here because this 2291 * We can't do a synchronize_sched here because this
2274 * function can be called in atomic context. 2292 * function can be called in atomic context.
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 220c264e3111..757ae6f7e648 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -415,7 +415,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
415 415
416 ftrace_enable_cpu(); 416 ftrace_enable_cpu();
417 417
418 WARN_ON_ONCE(ret); 418 WARN_ON_ONCE(ret && ret != -EAGAIN);
419 419
420 __update_max_tr(tr, tsk, cpu); 420 __update_max_tr(tr, tsk, cpu);
421 __raw_spin_unlock(&ftrace_max_lock); 421 __raw_spin_unlock(&ftrace_max_lock);
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 42ae1e77b6b3..93cecda650b2 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -25,6 +25,7 @@ static int __read_mostly tracer_enabled;
25static struct task_struct *wakeup_task; 25static struct task_struct *wakeup_task;
26static int wakeup_cpu; 26static int wakeup_cpu;
27static unsigned wakeup_prio = -1; 27static unsigned wakeup_prio = -1;
28static int wakeup_rt;
28 29
29static raw_spinlock_t wakeup_lock = 30static raw_spinlock_t wakeup_lock =
30 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 31 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
@@ -152,6 +153,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
152 goto out_unlock; 153 goto out_unlock;
153 154
154 trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc); 155 trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
156 tracing_sched_switch_trace(wakeup_trace, data, prev, next, flags, pc);
155 157
156 /* 158 /*
157 * usecs conversion is slow so we try to delay the conversion 159 * usecs conversion is slow so we try to delay the conversion
@@ -213,6 +215,7 @@ static void wakeup_reset(struct trace_array *tr)
213static void 215static void
214probe_wakeup(struct rq *rq, struct task_struct *p, int success) 216probe_wakeup(struct rq *rq, struct task_struct *p, int success)
215{ 217{
218 struct trace_array_cpu *data;
216 int cpu = smp_processor_id(); 219 int cpu = smp_processor_id();
217 unsigned long flags; 220 unsigned long flags;
218 long disabled; 221 long disabled;
@@ -224,7 +227,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
224 tracing_record_cmdline(p); 227 tracing_record_cmdline(p);
225 tracing_record_cmdline(current); 228 tracing_record_cmdline(current);
226 229
227 if (likely(!rt_task(p)) || 230 if ((wakeup_rt && !rt_task(p)) ||
228 p->prio >= wakeup_prio || 231 p->prio >= wakeup_prio ||
229 p->prio >= current->prio) 232 p->prio >= current->prio)
230 return; 233 return;
@@ -252,9 +255,12 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
252 255
253 local_save_flags(flags); 256 local_save_flags(flags);
254 257
255 wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); 258 data = wakeup_trace->data[wakeup_cpu];
256 trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu], 259 data->preempt_timestamp = ftrace_now(cpu);
257 CALLER_ADDR1, CALLER_ADDR2, flags, pc); 260 tracing_sched_wakeup_trace(wakeup_trace, data, p, current,
261 flags, pc);
262 trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2,
263 flags, pc);
258 264
259out_locked: 265out_locked:
260 __raw_spin_unlock(&wakeup_lock); 266 __raw_spin_unlock(&wakeup_lock);
@@ -262,12 +268,6 @@ out:
262 atomic_dec(&wakeup_trace->data[cpu]->disabled); 268 atomic_dec(&wakeup_trace->data[cpu]->disabled);
263} 269}
264 270
265/*
266 * save_tracer_enabled is used to save the state of the tracer_enabled
267 * variable when we disable it when we open a trace output file.
268 */
269static int save_tracer_enabled;
270
271static void start_wakeup_tracer(struct trace_array *tr) 271static void start_wakeup_tracer(struct trace_array *tr)
272{ 272{
273 int ret; 273 int ret;
@@ -306,13 +306,10 @@ static void start_wakeup_tracer(struct trace_array *tr)
306 306
307 register_ftrace_function(&trace_ops); 307 register_ftrace_function(&trace_ops);
308 308
309 if (tracing_is_enabled()) { 309 if (tracing_is_enabled())
310 tracer_enabled = 1; 310 tracer_enabled = 1;
311 save_tracer_enabled = 1; 311 else
312 } else {
313 tracer_enabled = 0; 312 tracer_enabled = 0;
314 save_tracer_enabled = 0;
315 }
316 313
317 return; 314 return;
318fail_deprobe_wake_new: 315fail_deprobe_wake_new:
@@ -324,14 +321,13 @@ fail_deprobe:
324static void stop_wakeup_tracer(struct trace_array *tr) 321static void stop_wakeup_tracer(struct trace_array *tr)
325{ 322{
326 tracer_enabled = 0; 323 tracer_enabled = 0;
327 save_tracer_enabled = 0;
328 unregister_ftrace_function(&trace_ops); 324 unregister_ftrace_function(&trace_ops);
329 unregister_trace_sched_switch(probe_wakeup_sched_switch); 325 unregister_trace_sched_switch(probe_wakeup_sched_switch);
330 unregister_trace_sched_wakeup_new(probe_wakeup); 326 unregister_trace_sched_wakeup_new(probe_wakeup);
331 unregister_trace_sched_wakeup(probe_wakeup); 327 unregister_trace_sched_wakeup(probe_wakeup);
332} 328}
333 329
334static int wakeup_tracer_init(struct trace_array *tr) 330static int __wakeup_tracer_init(struct trace_array *tr)
335{ 331{
336 tracing_max_latency = 0; 332 tracing_max_latency = 0;
337 wakeup_trace = tr; 333 wakeup_trace = tr;
@@ -339,6 +335,18 @@ static int wakeup_tracer_init(struct trace_array *tr)
339 return 0; 335 return 0;
340} 336}
341 337
338static int wakeup_tracer_init(struct trace_array *tr)
339{
340 wakeup_rt = 0;
341 return __wakeup_tracer_init(tr);
342}
343
344static int wakeup_rt_tracer_init(struct trace_array *tr)
345{
346 wakeup_rt = 1;
347 return __wakeup_tracer_init(tr);
348}
349
342static void wakeup_tracer_reset(struct trace_array *tr) 350static void wakeup_tracer_reset(struct trace_array *tr)
343{ 351{
344 stop_wakeup_tracer(tr); 352 stop_wakeup_tracer(tr);
@@ -350,28 +358,11 @@ static void wakeup_tracer_start(struct trace_array *tr)
350{ 358{
351 wakeup_reset(tr); 359 wakeup_reset(tr);
352 tracer_enabled = 1; 360 tracer_enabled = 1;
353 save_tracer_enabled = 1;
354} 361}
355 362
356static void wakeup_tracer_stop(struct trace_array *tr) 363static void wakeup_tracer_stop(struct trace_array *tr)
357{ 364{
358 tracer_enabled = 0; 365 tracer_enabled = 0;
359 save_tracer_enabled = 0;
360}
361
362static void wakeup_tracer_open(struct trace_iterator *iter)
363{
364 /* stop the trace while dumping */
365 tracer_enabled = 0;
366}
367
368static void wakeup_tracer_close(struct trace_iterator *iter)
369{
370 /* forget about any processes we were recording */
371 if (save_tracer_enabled) {
372 wakeup_reset(iter->tr);
373 tracer_enabled = 1;
374 }
375} 366}
376 367
377static struct tracer wakeup_tracer __read_mostly = 368static struct tracer wakeup_tracer __read_mostly =
@@ -381,8 +372,19 @@ static struct tracer wakeup_tracer __read_mostly =
381 .reset = wakeup_tracer_reset, 372 .reset = wakeup_tracer_reset,
382 .start = wakeup_tracer_start, 373 .start = wakeup_tracer_start,
383 .stop = wakeup_tracer_stop, 374 .stop = wakeup_tracer_stop,
384 .open = wakeup_tracer_open, 375 .print_max = 1,
385 .close = wakeup_tracer_close, 376#ifdef CONFIG_FTRACE_SELFTEST
377 .selftest = trace_selftest_startup_wakeup,
378#endif
379};
380
381static struct tracer wakeup_rt_tracer __read_mostly =
382{
383 .name = "wakeup_rt",
384 .init = wakeup_rt_tracer_init,
385 .reset = wakeup_tracer_reset,
386 .start = wakeup_tracer_start,
387 .stop = wakeup_tracer_stop,
386 .print_max = 1, 388 .print_max = 1,
387#ifdef CONFIG_FTRACE_SELFTEST 389#ifdef CONFIG_FTRACE_SELFTEST
388 .selftest = trace_selftest_startup_wakeup, 390 .selftest = trace_selftest_startup_wakeup,
@@ -397,6 +399,10 @@ __init static int init_wakeup_tracer(void)
397 if (ret) 399 if (ret)
398 return ret; 400 return ret;
399 401
402 ret = register_tracer(&wakeup_rt_tracer);
403 if (ret)
404 return ret;
405
400 return 0; 406 return 0;
401} 407}
402device_initcall(init_wakeup_tracer); 408device_initcall(init_wakeup_tracer);