From 1bc2f5cceedcacb1079687908233a55a1ae681a3 Mon Sep 17 00:00:00 2001 From: "Bjoern B. Brandenburg" Date: Tue, 24 Jan 2012 09:36:12 +0100 Subject: Feather-Trace: keep track of interrupt-related interference. Increment a processor-local counter whenever an interrupt is handled. This allows Feather-Trace to include a (truncated) counter and a flag to report interference from interrupts. This could be used to filter samples that were disturbed by interrupts. --- include/linux/hardirq.h | 4 ++++ include/litmus/trace.h | 4 +++- include/litmus/trace_irq.h | 21 +++++++++++++++++++++ litmus/trace.c | 47 ++++++++++++++++++++++++++++++++++++++++++++-- 4 files changed, 73 insertions(+), 3 deletions(-) create mode 100644 include/litmus/trace_irq.h diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index ba362171e8ae..e6dd5a456bae 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -6,6 +6,8 @@ #include #include +#include + /* * We put the hardirq and softirq counter into the preemption * counter. The bitmask has the following meaning: @@ -186,6 +188,7 @@ extern void rcu_nmi_exit(void); account_system_vtime(current); \ add_preempt_count(HARDIRQ_OFFSET); \ trace_hardirq_enter(); \ + ft_irq_fired(); \ } while (0) /* @@ -216,6 +219,7 @@ extern void irq_exit(void); lockdep_off(); \ rcu_nmi_enter(); \ trace_hardirq_enter(); \ + ft_irq_fired(); \ } while (0) #define nmi_exit() \ diff --git a/include/litmus/trace.h b/include/litmus/trace.h index e2926a08c2f4..e809376d6487 100644 --- a/include/litmus/trace.h +++ b/include/litmus/trace.h @@ -20,7 +20,9 @@ struct timestamp { uint32_t seq_no; uint8_t cpu; uint8_t event; - uint8_t task_type; + uint8_t task_type:2; + uint8_t irq_flag:1; + uint8_t irq_count:5; }; /* tracing callbacks */ diff --git a/include/litmus/trace_irq.h b/include/litmus/trace_irq.h new file mode 100644 index 000000000000..f18b127a089d --- /dev/null +++ b/include/litmus/trace_irq.h @@ -0,0 +1,21 @@ +#ifndef _LITMUS_TRACE_IRQ_H_ +#define _LITMUS_TRACE_IRQ_H_ + +#ifdef CONFIG_SCHED_OVERHEAD_TRACE + +extern DEFINE_PER_CPU(atomic_t, irq_fired_count); + +static inline void ft_irq_fired(void) +{ + /* Only called with preemptions disabled. */ + atomic_inc(&__get_cpu_var(irq_fired_count)); +} + + +#else + +#define ft_irq_fired() /* nothing to do */ + +#endif + +#endif diff --git a/litmus/trace.c b/litmus/trace.c index 5d77806da647..3c35c527e805 100644 --- a/litmus/trace.c +++ b/litmus/trace.c @@ -16,6 +16,35 @@ static struct ftdev overhead_dev; static unsigned int ts_seq_no = 0; +DEFINE_PER_CPU(atomic_t, irq_fired_count); + +static inline void clear_irq_fired(void) +{ + atomic_set(&__raw_get_cpu_var(irq_fired_count), 0); +} + +static inline unsigned int get_and_clear_irq_fired(void) +{ + /* This is potentially not atomic since we might migrate if + * preemptions are not disabled. As a tradeoff between + * accuracy and tracing overheads, this seems acceptable. + * If it proves to be a problem, then one could add a callback + * from the migration code to invalidate irq_fired_count. + */ + return atomic_xchg(&__raw_get_cpu_var(irq_fired_count), 0); +} + +static inline void __save_irq_flags(struct timestamp *ts) +{ + unsigned int irq_count; + + irq_count = get_and_clear_irq_fired(); + /* Store how many interrupts occurred. */ + ts->irq_count = irq_count; + /* Extra flag because ts->irq_count overflows quickly. */ + ts->irq_flag = irq_count > 0; +} + static inline void __save_timestamp_cpu(unsigned long event, uint8_t type, uint8_t cpu) { @@ -24,10 +53,13 @@ static inline void __save_timestamp_cpu(unsigned long event, seq_no = fetch_and_inc((int *) &ts_seq_no); if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { ts->event = event; - ts->timestamp = ft_timestamp(); ts->seq_no = seq_no; ts->cpu = cpu; ts->task_type = type; + __save_irq_flags(ts); + barrier(); + /* prevent re-ordering of ft_timestamp() */ + ts->timestamp = ft_timestamp(); ft_buffer_finish_write(trace_ts_buf, ts); } } @@ -40,6 +72,7 @@ static void __add_timestamp_user(struct timestamp *pre_recorded) if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { *ts = *pre_recorded; ts->seq_no = seq_no; + __save_irq_flags(ts); ft_buffer_finish_write(trace_ts_buf, ts); } } @@ -90,6 +123,7 @@ feather_callback void save_task_latency(unsigned long event, ts->seq_no = seq_no; ts->cpu = cpu; ts->task_type = TSK_RT; + __save_irq_flags(ts); ft_buffer_finish_write(trace_ts_buf, ts); } } @@ -107,6 +141,10 @@ feather_callback void save_task_latency(unsigned long event, static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) { unsigned int count = NO_TIMESTAMPS; + + /* An overhead-tracing timestamp should be exactly 16 bytes long. */ + BUILD_BUG_ON(sizeof(struct timestamp) != 16); + while (count && !trace_ts_buf) { printk("time stamp buffer: trying to allocate %u time stamps.\n", count); ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp)); @@ -149,7 +187,7 @@ out: static int __init init_ft_overhead_trace(void) { - int err; + int err, cpu; printk("Initializing Feather-Trace overhead tracing device.\n"); err = ftdev_init(&overhead_dev, THIS_MODULE, 1, "ft_trace"); @@ -164,6 +202,11 @@ static int __init init_ft_overhead_trace(void) if (err) goto err_dealloc; + /* initialize IRQ flags */ + for (cpu = 0; cpu < NR_CPUS; cpu++) { + clear_irq_fired(); + } + return 0; err_dealloc: -- cgit v1.2.2