aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /include/litmus
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'include/litmus')
-rw-r--r--include/litmus/affinity.h80
-rw-r--r--include/litmus/ftdev.h5
-rw-r--r--include/litmus/litmus.h47
-rw-r--r--include/litmus/preempt.h2
-rw-r--r--include/litmus/rt_param.h18
-rw-r--r--include/litmus/trace.h23
-rw-r--r--include/litmus/trace_irq.h21
7 files changed, 174 insertions, 22 deletions
diff --git a/include/litmus/affinity.h b/include/litmus/affinity.h
new file mode 100644
index 000000000000..ca2e442eb547
--- /dev/null
+++ b/include/litmus/affinity.h
@@ -0,0 +1,80 @@
1#ifndef __LITMUS_AFFINITY_H
2#define __LITMUS_AFFINITY_H
3
4#include <linux/cpumask.h>
5
6/*
7 L1 (instr) = depth 0
8 L1 (data) = depth 1
9 L2 = depth 2
10 L3 = depth 3
11 */
12#define NUM_CACHE_LEVELS 4
13
14struct neighborhood
15{
16 unsigned int size[NUM_CACHE_LEVELS];
17 cpumask_var_t neighbors[NUM_CACHE_LEVELS];
18};
19
20/* topology info is stored redundently in a big array for fast lookups */
21extern struct neighborhood neigh_info[NR_CPUS];
22
23void init_topology(void); /* called by Litmus module's _init_litmus() */
24
25/* Works like:
26void get_nearest_available_cpu(
27 cpu_entry_t **nearest,
28 cpu_entry_t *start,
29 cpu_entry_t *entries,
30 int release_master)
31
32Set release_master = NO_CPU for no Release Master.
33
34We use a macro here to exploit the fact that C-EDF and G-EDF
35have similar structures for their cpu_entry_t structs, even though
36they do not share a common base-struct. The macro allows us to
37avoid code duplication.
38
39TODO: Factor out the job-to-processor linking from C/G-EDF into
40a reusable "processor mapping". (See B.B.'s RTSS'09 paper &
41dissertation.)
42 */
43#define get_nearest_available_cpu(nearest, start, entries, release_master) \
44{ \
45 (nearest) = NULL; \
46 if (!(start)->linked) { \
47 (nearest) = (start); \
48 } else { \
49 int __level; \
50 int __cpu; \
51 int __release_master = ((release_master) == NO_CPU) ? -1 : (release_master); \
52 struct neighborhood *__neighbors = &neigh_info[(start)->cpu]; \
53 \
54 for (__level = 0; (__level < NUM_CACHE_LEVELS) && !(nearest); ++__level) { \
55 if (__neighbors->size[__level] > 1) { \
56 for_each_cpu(__cpu, __neighbors->neighbors[__level]) { \
57 if (__cpu != __release_master) { \
58 cpu_entry_t *__entry = &per_cpu((entries), __cpu); \
59 if (!__entry->linked) { \
60 (nearest) = __entry; \
61 break; \
62 } \
63 } \
64 } \
65 } else if (__neighbors->size[__level] == 0) { \
66 break; \
67 } \
68 } \
69 } \
70 \
71 if ((nearest)) { \
72 TRACE("P%d is closest available CPU to P%d\n", \
73 (nearest)->cpu, (start)->cpu); \
74 } else { \
75 TRACE("Could not find an available CPU close to P%d\n", \
76 (start)->cpu); \
77 } \
78}
79
80#endif
diff --git a/include/litmus/ftdev.h b/include/litmus/ftdev.h
index 348387e9adf9..0b959874dd70 100644
--- a/include/litmus/ftdev.h
+++ b/include/litmus/ftdev.h
@@ -16,7 +16,8 @@ typedef int (*ftdev_can_open_t)(struct ftdev* dev, unsigned int buf_no);
16/* return 0 on success, otherwise -$REASON */ 16/* return 0 on success, otherwise -$REASON */
17typedef int (*ftdev_alloc_t)(struct ftdev* dev, unsigned int buf_no); 17typedef int (*ftdev_alloc_t)(struct ftdev* dev, unsigned int buf_no);
18typedef void (*ftdev_free_t)(struct ftdev* dev, unsigned int buf_no); 18typedef void (*ftdev_free_t)(struct ftdev* dev, unsigned int buf_no);
19 19/* Let devices handle writes from userspace. No synchronization provided. */
20typedef ssize_t (*ftdev_write_t)(struct ft_buffer* buf, size_t len, const char __user *from);
20 21
21struct ftdev_event; 22struct ftdev_event;
22 23
@@ -27,6 +28,7 @@ struct ftdev_minor {
27 /* FIXME: filter for authorized events */ 28 /* FIXME: filter for authorized events */
28 struct ftdev_event* events; 29 struct ftdev_event* events;
29 struct device* device; 30 struct device* device;
31 struct ftdev* ftdev;
30}; 32};
31 33
32struct ftdev { 34struct ftdev {
@@ -39,6 +41,7 @@ struct ftdev {
39 ftdev_alloc_t alloc; 41 ftdev_alloc_t alloc;
40 ftdev_free_t free; 42 ftdev_free_t free;
41 ftdev_can_open_t can_open; 43 ftdev_can_open_t can_open;
44 ftdev_write_t write;
42}; 45};
43 46
44struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size); 47struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size);
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 3df242bf272f..95d0805519de 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -138,7 +138,7 @@ static inline int is_kernel_np(struct task_struct *t)
138 138
139static inline int is_user_np(struct task_struct *t) 139static inline int is_user_np(struct task_struct *t)
140{ 140{
141 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->np_flag : 0; 141 return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->sched.np.flag : 0;
142} 142}
143 143
144static inline void request_exit_np(struct task_struct *t) 144static inline void request_exit_np(struct task_struct *t)
@@ -148,17 +148,11 @@ static inline void request_exit_np(struct task_struct *t)
148 * into the kernel at the end of a critical section. */ 148 * into the kernel at the end of a critical section. */
149 if (likely(tsk_rt(t)->ctrl_page)) { 149 if (likely(tsk_rt(t)->ctrl_page)) {
150 TRACE_TASK(t, "setting delayed_preemption flag\n"); 150 TRACE_TASK(t, "setting delayed_preemption flag\n");
151 tsk_rt(t)->ctrl_page->delayed_preemption = 1; 151 tsk_rt(t)->ctrl_page->sched.np.preempt = 1;
152 } 152 }
153 } 153 }
154} 154}
155 155
156static inline void clear_exit_np(struct task_struct *t)
157{
158 if (likely(tsk_rt(t)->ctrl_page))
159 tsk_rt(t)->ctrl_page->delayed_preemption = 0;
160}
161
162static inline void make_np(struct task_struct *t) 156static inline void make_np(struct task_struct *t)
163{ 157{
164 tsk_rt(t)->kernel_np++; 158 tsk_rt(t)->kernel_np++;
@@ -172,6 +166,34 @@ static inline int take_np(struct task_struct *t)
172 return --tsk_rt(t)->kernel_np; 166 return --tsk_rt(t)->kernel_np;
173} 167}
174 168
169/* returns 0 if remote CPU needs an IPI to preempt, 1 if no IPI is required */
170static inline int request_exit_np_atomic(struct task_struct *t)
171{
172 union np_flag old, new;
173
174 if (tsk_rt(t)->ctrl_page) {
175 old.raw = tsk_rt(t)->ctrl_page->sched.raw;
176 if (old.np.flag == 0) {
177 /* no longer non-preemptive */
178 return 0;
179 } else if (old.np.preempt) {
180 /* already set, nothing for us to do */
181 return 1;
182 } else {
183 /* non preemptive and flag not set */
184 new.raw = old.raw;
185 new.np.preempt = 1;
186 /* if we get old back, then we atomically set the flag */
187 return cmpxchg(&tsk_rt(t)->ctrl_page->sched.raw, old.raw, new.raw) == old.raw;
188 /* If we raced with a concurrent change, then so be
189 * it. Deliver it by IPI. We don't want an unbounded
190 * retry loop here since tasks might exploit that to
191 * keep the kernel busy indefinitely. */
192 }
193 } else
194 return 0;
195}
196
175#else 197#else
176 198
177static inline int is_kernel_np(struct task_struct* t) 199static inline int is_kernel_np(struct task_struct* t)
@@ -190,12 +212,19 @@ static inline void request_exit_np(struct task_struct *t)
190 BUG(); 212 BUG();
191} 213}
192 214
193static inline void clear_exit_np(struct task_struct* t) 215static inline int request_exit_np_atomic(struct task_struct *t)
194{ 216{
217 return 0;
195} 218}
196 219
197#endif 220#endif
198 221
222static inline void clear_exit_np(struct task_struct *t)
223{
224 if (likely(tsk_rt(t)->ctrl_page))
225 tsk_rt(t)->ctrl_page->sched.np.preempt = 0;
226}
227
199static inline int is_np(struct task_struct *t) 228static inline int is_np(struct task_struct *t)
200{ 229{
201#ifdef CONFIG_SCHED_DEBUG_TRACE 230#ifdef CONFIG_SCHED_DEBUG_TRACE
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h
index 260c6fe17986..380b886d78ff 100644
--- a/include/litmus/preempt.h
+++ b/include/litmus/preempt.h
@@ -10,7 +10,7 @@
10 10
11extern DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state); 11extern DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state);
12 12
13#ifdef CONFIG_DEBUG_KERNEL 13#ifdef CONFIG_PREEMPT_STATE_TRACE
14const char* sched_state_name(int s); 14const char* sched_state_name(int s);
15#define TRACE_STATE(fmt, args...) TRACE("SCHED_STATE " fmt, args) 15#define TRACE_STATE(fmt, args...) TRACE("SCHED_STATE " fmt, args)
16#else 16#else
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 5de422c742f6..d6d799174160 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -30,7 +30,7 @@ typedef enum {
30typedef enum { 30typedef enum {
31 NO_ENFORCEMENT, /* job may overrun unhindered */ 31 NO_ENFORCEMENT, /* job may overrun unhindered */
32 QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */ 32 QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */
33 PRECISE_ENFORCEMENT /* NOT IMPLEMENTED - enforced with hrtimers */ 33 PRECISE_ENFORCEMENT /* budgets are enforced with hrtimers */
34} budget_policy_t; 34} budget_policy_t;
35 35
36struct rt_task { 36struct rt_task {
@@ -42,6 +42,16 @@ struct rt_task {
42 budget_policy_t budget_policy; /* ignored by pfair */ 42 budget_policy_t budget_policy; /* ignored by pfair */
43}; 43};
44 44
45union np_flag {
46 uint32_t raw;
47 struct {
48 /* Is the task currently in a non-preemptive section? */
49 uint32_t flag:31;
50 /* Should the task call into the scheduler? */
51 uint32_t preempt:1;
52 } np;
53};
54
45/* The definition of the data that is shared between the kernel and real-time 55/* The definition of the data that is shared between the kernel and real-time
46 * tasks via a shared page (see litmus/ctrldev.c). 56 * tasks via a shared page (see litmus/ctrldev.c).
47 * 57 *
@@ -57,11 +67,7 @@ struct rt_task {
57 * determining preemption/migration overheads). 67 * determining preemption/migration overheads).
58 */ 68 */
59struct control_page { 69struct control_page {
60 /* Is the task currently in a non-preemptive section? */ 70 volatile union np_flag sched;
61 int np_flag;
62 /* Should the task call into the kernel when it leaves
63 * its non-preemptive section? */
64 int delayed_preemption;
65 71
66 /* to be extended */ 72 /* to be extended */
67}; 73};
diff --git a/include/litmus/trace.h b/include/litmus/trace.h
index 05f487263f28..e809376d6487 100644
--- a/include/litmus/trace.h
+++ b/include/litmus/trace.h
@@ -20,7 +20,9 @@ struct timestamp {
20 uint32_t seq_no; 20 uint32_t seq_no;
21 uint8_t cpu; 21 uint8_t cpu;
22 uint8_t event; 22 uint8_t event;
23 uint8_t task_type; 23 uint8_t task_type:2;
24 uint8_t irq_flag:1;
25 uint8_t irq_count:5;
24}; 26};
25 27
26/* tracing callbacks */ 28/* tracing callbacks */
@@ -28,7 +30,7 @@ feather_callback void save_timestamp(unsigned long event);
28feather_callback void save_timestamp_def(unsigned long event, unsigned long type); 30feather_callback void save_timestamp_def(unsigned long event, unsigned long type);
29feather_callback void save_timestamp_task(unsigned long event, unsigned long t_ptr); 31feather_callback void save_timestamp_task(unsigned long event, unsigned long t_ptr);
30feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu); 32feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu);
31 33feather_callback void save_task_latency(unsigned long event, unsigned long when_ptr);
32 34
33#define TIMESTAMP(id) ft_event0(id, save_timestamp) 35#define TIMESTAMP(id) ft_event0(id, save_timestamp)
34 36
@@ -40,6 +42,9 @@ feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu)
40#define CTIMESTAMP(id, cpu) \ 42#define CTIMESTAMP(id, cpu) \
41 ft_event1(id, save_timestamp_cpu, (unsigned long) cpu) 43 ft_event1(id, save_timestamp_cpu, (unsigned long) cpu)
42 44
45#define LTIMESTAMP(id, task) \
46 ft_event1(id, save_task_latency, (unsigned long) task)
47
43#else /* !CONFIG_SCHED_OVERHEAD_TRACE */ 48#else /* !CONFIG_SCHED_OVERHEAD_TRACE */
44 49
45#define TIMESTAMP(id) /* no tracing */ 50#define TIMESTAMP(id) /* no tracing */
@@ -50,6 +55,8 @@ feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu)
50 55
51#define CTIMESTAMP(id, cpu) /* no tracing */ 56#define CTIMESTAMP(id, cpu) /* no tracing */
52 57
58#define LTIMESTAMP(id, when_ptr) /* no tracing */
59
53#endif 60#endif
54 61
55 62
@@ -61,6 +68,8 @@ feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu)
61 * always the next number after the start time event id. 68 * always the next number after the start time event id.
62 */ 69 */
63 70
71
72
64#define TS_SCHED_START DTIMESTAMP(100, TSK_UNKNOWN) /* we only 73#define TS_SCHED_START DTIMESTAMP(100, TSK_UNKNOWN) /* we only
65 * care 74 * care
66 * about 75 * about
@@ -92,12 +101,16 @@ feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu)
92#define TS_EXIT_NP_END TIMESTAMP(151) 101#define TS_EXIT_NP_END TIMESTAMP(151)
93 102
94#define TS_LOCK_START TIMESTAMP(170) 103#define TS_LOCK_START TIMESTAMP(170)
95#define TS_LOCK_END TIMESTAMP(171) 104#define TS_LOCK_SUSPEND TIMESTAMP(171)
96#define TS_UNLOCK_START TIMESTAMP(172) 105#define TS_LOCK_RESUME TIMESTAMP(172)
97#define TS_UNLOCK_END TIMESTAMP(173) 106#define TS_LOCK_END TIMESTAMP(173)
107
108#define TS_UNLOCK_START TIMESTAMP(180)
109#define TS_UNLOCK_END TIMESTAMP(181)
98 110
99#define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c) 111#define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c)
100#define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN) 112#define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN)
101 113
114#define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when))
102 115
103#endif /* !_SYS_TRACE_H_ */ 116#endif /* !_SYS_TRACE_H_ */
diff --git a/include/litmus/trace_irq.h b/include/litmus/trace_irq.h
new file mode 100644
index 000000000000..f18b127a089d
--- /dev/null
+++ b/include/litmus/trace_irq.h
@@ -0,0 +1,21 @@
1#ifndef _LITMUS_TRACE_IRQ_H_
2#define _LITMUS_TRACE_IRQ_H_
3
4#ifdef CONFIG_SCHED_OVERHEAD_TRACE
5
6extern DEFINE_PER_CPU(atomic_t, irq_fired_count);
7
8static inline void ft_irq_fired(void)
9{
10 /* Only called with preemptions disabled. */
11 atomic_inc(&__get_cpu_var(irq_fired_count));
12}
13
14
15#else
16
17#define ft_irq_fired() /* nothing to do */
18
19#endif
20
21#endif