diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /litmus/trace.c | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'litmus/trace.c')
-rw-r--r-- | litmus/trace.c | 109 |
1 files changed, 106 insertions, 3 deletions
diff --git a/litmus/trace.c b/litmus/trace.c index e7ea1c2ab3e4..3c35c527e805 100644 --- a/litmus/trace.c +++ b/litmus/trace.c | |||
@@ -1,5 +1,6 @@ | |||
1 | #include <linux/sched.h> | 1 | #include <linux/sched.h> |
2 | #include <linux/module.h> | 2 | #include <linux/module.h> |
3 | #include <linux/uaccess.h> | ||
3 | 4 | ||
4 | #include <litmus/ftdev.h> | 5 | #include <litmus/ftdev.h> |
5 | #include <litmus/litmus.h> | 6 | #include <litmus/litmus.h> |
@@ -15,6 +16,35 @@ static struct ftdev overhead_dev; | |||
15 | 16 | ||
16 | static unsigned int ts_seq_no = 0; | 17 | static unsigned int ts_seq_no = 0; |
17 | 18 | ||
19 | DEFINE_PER_CPU(atomic_t, irq_fired_count); | ||
20 | |||
21 | static inline void clear_irq_fired(void) | ||
22 | { | ||
23 | atomic_set(&__raw_get_cpu_var(irq_fired_count), 0); | ||
24 | } | ||
25 | |||
26 | static inline unsigned int get_and_clear_irq_fired(void) | ||
27 | { | ||
28 | /* This is potentially not atomic since we might migrate if | ||
29 | * preemptions are not disabled. As a tradeoff between | ||
30 | * accuracy and tracing overheads, this seems acceptable. | ||
31 | * If it proves to be a problem, then one could add a callback | ||
32 | * from the migration code to invalidate irq_fired_count. | ||
33 | */ | ||
34 | return atomic_xchg(&__raw_get_cpu_var(irq_fired_count), 0); | ||
35 | } | ||
36 | |||
37 | static inline void __save_irq_flags(struct timestamp *ts) | ||
38 | { | ||
39 | unsigned int irq_count; | ||
40 | |||
41 | irq_count = get_and_clear_irq_fired(); | ||
42 | /* Store how many interrupts occurred. */ | ||
43 | ts->irq_count = irq_count; | ||
44 | /* Extra flag because ts->irq_count overflows quickly. */ | ||
45 | ts->irq_flag = irq_count > 0; | ||
46 | } | ||
47 | |||
18 | static inline void __save_timestamp_cpu(unsigned long event, | 48 | static inline void __save_timestamp_cpu(unsigned long event, |
19 | uint8_t type, uint8_t cpu) | 49 | uint8_t type, uint8_t cpu) |
20 | { | 50 | { |
@@ -23,10 +53,26 @@ static inline void __save_timestamp_cpu(unsigned long event, | |||
23 | seq_no = fetch_and_inc((int *) &ts_seq_no); | 53 | seq_no = fetch_and_inc((int *) &ts_seq_no); |
24 | if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { | 54 | if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { |
25 | ts->event = event; | 55 | ts->event = event; |
26 | ts->timestamp = ft_timestamp(); | ||
27 | ts->seq_no = seq_no; | 56 | ts->seq_no = seq_no; |
28 | ts->cpu = cpu; | 57 | ts->cpu = cpu; |
29 | ts->task_type = type; | 58 | ts->task_type = type; |
59 | __save_irq_flags(ts); | ||
60 | barrier(); | ||
61 | /* prevent re-ordering of ft_timestamp() */ | ||
62 | ts->timestamp = ft_timestamp(); | ||
63 | ft_buffer_finish_write(trace_ts_buf, ts); | ||
64 | } | ||
65 | } | ||
66 | |||
67 | static void __add_timestamp_user(struct timestamp *pre_recorded) | ||
68 | { | ||
69 | unsigned int seq_no; | ||
70 | struct timestamp *ts; | ||
71 | seq_no = fetch_and_inc((int *) &ts_seq_no); | ||
72 | if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { | ||
73 | *ts = *pre_recorded; | ||
74 | ts->seq_no = seq_no; | ||
75 | __save_irq_flags(ts); | ||
30 | ft_buffer_finish_write(trace_ts_buf, ts); | 76 | ft_buffer_finish_write(trace_ts_buf, ts); |
31 | } | 77 | } |
32 | } | 78 | } |
@@ -61,6 +107,27 @@ feather_callback void save_timestamp_cpu(unsigned long event, | |||
61 | __save_timestamp_cpu(event, TSK_UNKNOWN, cpu); | 107 | __save_timestamp_cpu(event, TSK_UNKNOWN, cpu); |
62 | } | 108 | } |
63 | 109 | ||
110 | feather_callback void save_task_latency(unsigned long event, | ||
111 | unsigned long when_ptr) | ||
112 | { | ||
113 | lt_t now = litmus_clock(); | ||
114 | lt_t *when = (lt_t*) when_ptr; | ||
115 | unsigned int seq_no; | ||
116 | int cpu = raw_smp_processor_id(); | ||
117 | struct timestamp *ts; | ||
118 | |||
119 | seq_no = fetch_and_inc((int *) &ts_seq_no); | ||
120 | if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { | ||
121 | ts->event = event; | ||
122 | ts->timestamp = now - *when; | ||
123 | ts->seq_no = seq_no; | ||
124 | ts->cpu = cpu; | ||
125 | ts->task_type = TSK_RT; | ||
126 | __save_irq_flags(ts); | ||
127 | ft_buffer_finish_write(trace_ts_buf, ts); | ||
128 | } | ||
129 | } | ||
130 | |||
64 | /******************************************************************************/ | 131 | /******************************************************************************/ |
65 | /* DEVICE FILE DRIVER */ | 132 | /* DEVICE FILE DRIVER */ |
66 | /******************************************************************************/ | 133 | /******************************************************************************/ |
@@ -69,11 +136,15 @@ feather_callback void save_timestamp_cpu(unsigned long event, | |||
69 | * should be 8M; it is the max we can ask to buddy system allocator (MAX_ORDER) | 136 | * should be 8M; it is the max we can ask to buddy system allocator (MAX_ORDER) |
70 | * and we might not get as much | 137 | * and we might not get as much |
71 | */ | 138 | */ |
72 | #define NO_TIMESTAMPS (2 << 11) | 139 | #define NO_TIMESTAMPS (2 << 16) |
73 | 140 | ||
74 | static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) | 141 | static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) |
75 | { | 142 | { |
76 | unsigned int count = NO_TIMESTAMPS; | 143 | unsigned int count = NO_TIMESTAMPS; |
144 | |||
145 | /* An overhead-tracing timestamp should be exactly 16 bytes long. */ | ||
146 | BUILD_BUG_ON(sizeof(struct timestamp) != 16); | ||
147 | |||
77 | while (count && !trace_ts_buf) { | 148 | while (count && !trace_ts_buf) { |
78 | printk("time stamp buffer: trying to allocate %u time stamps.\n", count); | 149 | printk("time stamp buffer: trying to allocate %u time stamps.\n", count); |
79 | ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp)); | 150 | ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp)); |
@@ -88,9 +159,35 @@ static void free_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) | |||
88 | ftdev->minor[idx].buf = NULL; | 159 | ftdev->minor[idx].buf = NULL; |
89 | } | 160 | } |
90 | 161 | ||
162 | static ssize_t write_timestamp_from_user(struct ft_buffer* buf, size_t len, | ||
163 | const char __user *from) | ||
164 | { | ||
165 | ssize_t consumed = 0; | ||
166 | struct timestamp ts; | ||
167 | |||
168 | /* don't give us partial timestamps */ | ||
169 | if (len % sizeof(ts)) | ||
170 | return -EINVAL; | ||
171 | |||
172 | while (len >= sizeof(ts)) { | ||
173 | if (copy_from_user(&ts, from, sizeof(ts))) { | ||
174 | consumed = -EFAULT; | ||
175 | goto out; | ||
176 | } | ||
177 | len -= sizeof(ts); | ||
178 | from += sizeof(ts); | ||
179 | consumed += sizeof(ts); | ||
180 | |||
181 | __add_timestamp_user(&ts); | ||
182 | } | ||
183 | |||
184 | out: | ||
185 | return consumed; | ||
186 | } | ||
187 | |||
91 | static int __init init_ft_overhead_trace(void) | 188 | static int __init init_ft_overhead_trace(void) |
92 | { | 189 | { |
93 | int err; | 190 | int err, cpu; |
94 | 191 | ||
95 | printk("Initializing Feather-Trace overhead tracing device.\n"); | 192 | printk("Initializing Feather-Trace overhead tracing device.\n"); |
96 | err = ftdev_init(&overhead_dev, THIS_MODULE, 1, "ft_trace"); | 193 | err = ftdev_init(&overhead_dev, THIS_MODULE, 1, "ft_trace"); |
@@ -99,11 +196,17 @@ static int __init init_ft_overhead_trace(void) | |||
99 | 196 | ||
100 | overhead_dev.alloc = alloc_timestamp_buffer; | 197 | overhead_dev.alloc = alloc_timestamp_buffer; |
101 | overhead_dev.free = free_timestamp_buffer; | 198 | overhead_dev.free = free_timestamp_buffer; |
199 | overhead_dev.write = write_timestamp_from_user; | ||
102 | 200 | ||
103 | err = register_ftdev(&overhead_dev); | 201 | err = register_ftdev(&overhead_dev); |
104 | if (err) | 202 | if (err) |
105 | goto err_dealloc; | 203 | goto err_dealloc; |
106 | 204 | ||
205 | /* initialize IRQ flags */ | ||
206 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
207 | clear_irq_fired(); | ||
208 | } | ||
209 | |||
107 | return 0; | 210 | return 0; |
108 | 211 | ||
109 | err_dealloc: | 212 | err_dealloc: |