aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-01-19 19:38:14 -0500
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-01-19 19:38:14 -0500
commitca4a474ff184b93bc1d2c49b1d80edac844e65cf (patch)
tree62b0da916328d1e974669bcad62c6cc2f7efd4b8 /include
parente745cd9f0d056f1d00951c1eecdad2374b343d67 (diff)
Add Feather-Trace x86_32 architecture dependent code
Add x86_32 architecture dependent code and add the infrastructure for x86_32 - x86_64 integration.
Diffstat (limited to 'include')
-rw-r--r--include/litmus/feather_trace.h25
-rw-r--r--include/litmus/sched_trace.h4
-rw-r--r--include/litmus/trace.h4
3 files changed, 16 insertions, 17 deletions
diff --git a/include/litmus/feather_trace.h b/include/litmus/feather_trace.h
index eef8af7a414e..7d27e763406f 100644
--- a/include/litmus/feather_trace.h
+++ b/include/litmus/feather_trace.h
@@ -1,6 +1,7 @@
1#ifndef _FEATHER_TRACE_H_ 1#ifndef _FEATHER_TRACE_H_
2#define _FEATHER_TRACE_H_ 2#define _FEATHER_TRACE_H_
3 3
4#include <asm/atomic.h>
4#include <asm/feather_trace.h> 5#include <asm/feather_trace.h>
5 6
6int ft_enable_event(unsigned long id); 7int ft_enable_event(unsigned long id);
@@ -8,6 +9,17 @@ int ft_disable_event(unsigned long id);
8int ft_is_event_enabled(unsigned long id); 9int ft_is_event_enabled(unsigned long id);
9int ft_disable_all_events(void); 10int ft_disable_all_events(void);
10 11
12/* atomic_* funcitons are inline anyway */
13static inline int fetch_and_inc(int *val)
14{
15 return atomic_add_return(1, (atomic_t*) val) - 1;
16}
17
18static inline int fetch_and_dec(int *val)
19{
20 return atomic_sub_return(1, (atomic_t*) val) + 1;
21}
22
11#ifndef __ARCH_HAS_FEATHER_TRACE 23#ifndef __ARCH_HAS_FEATHER_TRACE
12/* provide default implementation */ 24/* provide default implementation */
13 25
@@ -32,19 +44,6 @@ extern int ft_events[MAX_EVENTS];
32#define ft_event3(id, callback, p, p2, p3) \ 44#define ft_event3(id, callback, p, p2, p3) \
33 if (ft_events[id]) callback(id, p, p2, p3); 45 if (ft_events[id]) callback(id, p, p2, p3);
34 46
35#include <asm/atomic.h>
36
37static inline int fetch_and_inc(int *val)
38{
39 return atomic_add_return(1, (atomic_t*) val) - 1;
40}
41
42static inline int fetch_and_dec(int *val)
43{
44 return atomic_sub_return(1, (atomic_t*) val) + 1;
45}
46
47#endif 47#endif
48 48
49
50#endif 49#endif
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
index aae6ac27fe1b..e1b0c9712b5f 100644
--- a/include/litmus/sched_trace.h
+++ b/include/litmus/sched_trace.h
@@ -167,12 +167,12 @@ feather_callback void do_sched_trace_sys_release(unsigned long id,
167 SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, do_sched_trace_task_switch_away, t) 167 SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, do_sched_trace_task_switch_away, t)
168#define sched_trace_task_completion(t, forced) \ 168#define sched_trace_task_completion(t, forced) \
169 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \ 169 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \
170 forced) 170 (unsigned long) forced)
171#define sched_trace_task_block(t) \ 171#define sched_trace_task_block(t) \
172 SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t) 172 SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t)
173#define sched_trace_task_resume(t) \ 173#define sched_trace_task_resume(t) \
174 SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t) 174 SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t)
175 175/* when is a pointer, it does not need an explicit cast to unsigned long */
176#define sched_trace_sys_release(when) \ 176#define sched_trace_sys_release(when) \
177 SCHED_TRACE(SCHED_TRACE_BASE_ID + 9, do_sched_trace_sys_release, when) 177 SCHED_TRACE(SCHED_TRACE_BASE_ID + 9, do_sched_trace_sys_release, when)
178 178
diff --git a/include/litmus/trace.h b/include/litmus/trace.h
index e8e0c7b6cc6a..b32c71180774 100644
--- a/include/litmus/trace.h
+++ b/include/litmus/trace.h
@@ -32,13 +32,13 @@ feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu)
32 32
33#define TIMESTAMP(id) ft_event0(id, save_timestamp) 33#define TIMESTAMP(id) ft_event0(id, save_timestamp)
34 34
35#define DTIMESTAMP(id, def) ft_event1(id, save_timestamp_def, def) 35#define DTIMESTAMP(id, def) ft_event1(id, save_timestamp_def, (unsigned long) def)
36 36
37#define TTIMESTAMP(id, task) \ 37#define TTIMESTAMP(id, task) \
38 ft_event1(id, save_timestamp_task, (unsigned long) task) 38 ft_event1(id, save_timestamp_task, (unsigned long) task)
39 39
40#define CTIMESTAMP(id, cpu) \ 40#define CTIMESTAMP(id, cpu) \
41 ft_event1(id, save_timestamp_cpu, cpu) 41 ft_event1(id, save_timestamp_cpu, (unsigned long) cpu)
42 42
43#else /* !CONFIG_SCHED_OVERHEAD_TRACE */ 43#else /* !CONFIG_SCHED_OVERHEAD_TRACE */
44 44