diff options
32 files changed, 600 insertions, 368 deletions
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index ed4c4f54e2ae..7539d84628f7 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -25,7 +25,6 @@ | |||
25 | 25 | ||
26 | #include <litmus/preempt.h> | 26 | #include <litmus/preempt.h> |
27 | #include <litmus/debug_trace.h> | 27 | #include <litmus/debug_trace.h> |
28 | #include <litmus/trace.h> | ||
29 | 28 | ||
30 | #include <asm/mtrr.h> | 29 | #include <asm/mtrr.h> |
31 | #include <asm/tlbflush.h> | 30 | #include <asm/tlbflush.h> |
@@ -122,7 +121,6 @@ static void native_smp_send_reschedule(int cpu) | |||
122 | WARN_ON(1); | 121 | WARN_ON(1); |
123 | return; | 122 | return; |
124 | } | 123 | } |
125 | TS_SEND_RESCHED_START(cpu); | ||
126 | apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); | 124 | apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); |
127 | } | 125 | } |
128 | 126 | ||
@@ -214,18 +212,16 @@ static void native_stop_other_cpus(int wait) | |||
214 | void smp_reschedule_interrupt(struct pt_regs *regs) | 212 | void smp_reschedule_interrupt(struct pt_regs *regs) |
215 | { | 213 | { |
216 | ack_APIC_irq(); | 214 | ack_APIC_irq(); |
217 | /* LITMUS^RT: this IPI might need to trigger the sched state machine. */ | ||
218 | sched_state_ipi(); | ||
219 | inc_irq_stat(irq_resched_count); | 215 | inc_irq_stat(irq_resched_count); |
220 | /* | ||
221 | * LITMUS^RT: starting from 3.0 schedule_ipi() actually does something. | ||
222 | * This may increase IPI latencies compared with previous versions. | ||
223 | */ | ||
224 | scheduler_ipi(); | 216 | scheduler_ipi(); |
225 | TS_SEND_RESCHED_END; | ||
226 | /* | 217 | /* |
227 | * KVM uses this interrupt to force a cpu out of guest mode | 218 | * KVM uses this interrupt to force a cpu out of guest mode |
228 | */ | 219 | */ |
220 | |||
221 | /* LITMUS^RT: this IPI might need to trigger the sched state machine. | ||
222 | * Starting from 3.0 schedule_ipi() actually does something. This may | ||
223 | * increase IPI latencies compared with previous versions. */ | ||
224 | sched_state_ipi(); | ||
229 | } | 225 | } |
230 | 226 | ||
231 | void smp_call_function_interrupt(struct pt_regs *regs) | 227 | void smp_call_function_interrupt(struct pt_regs *regs) |
@@ -251,8 +247,10 @@ extern void hrtimer_pull(void); | |||
251 | void smp_pull_timers_interrupt(struct pt_regs *regs) | 247 | void smp_pull_timers_interrupt(struct pt_regs *regs) |
252 | { | 248 | { |
253 | ack_APIC_irq(); | 249 | ack_APIC_irq(); |
250 | irq_enter(); | ||
254 | TRACE("pull timer interrupt\n"); | 251 | TRACE("pull timer interrupt\n"); |
255 | hrtimer_pull(); | 252 | hrtimer_pull(); |
253 | irq_exit(); | ||
256 | } | 254 | } |
257 | 255 | ||
258 | struct smp_ops smp_ops = { | 256 | struct smp_ops smp_ops = { |
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index 0cb4373698e7..bd91e647228d 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S | |||
@@ -358,4 +358,7 @@ ENTRY(sys_call_table) | |||
358 | .long sys_wait_for_ts_release | 358 | .long sys_wait_for_ts_release |
359 | .long sys_release_ts /* +10 */ | 359 | .long sys_release_ts /* +10 */ |
360 | .long sys_null_call | 360 | .long sys_null_call |
361 | .long sys_register_nv_device | 361 | .long sys_litmus_dgl_lock |
362 | .long sys_litmus_dgl_unlock | ||
363 | .long sys_set_aux_tasks | ||
364 | .long sys_sched_trace_event /* +15 */ | ||
diff --git a/include/linux/completion.h b/include/linux/completion.h index cff405c4dd3a..a64fb5680400 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h | |||
@@ -91,7 +91,6 @@ extern bool completion_done(struct completion *x); | |||
91 | 91 | ||
92 | extern void complete(struct completion *); | 92 | extern void complete(struct completion *); |
93 | extern void complete_all(struct completion *); | 93 | extern void complete_all(struct completion *); |
94 | extern void complete_n(struct completion *, int n); | ||
95 | 94 | ||
96 | /** | 95 | /** |
97 | * INIT_COMPLETION - reinitialize a completion structure | 96 | * INIT_COMPLETION - reinitialize a completion structure |
diff --git a/include/litmus/fp_common.h b/include/litmus/fp_common.h index dd1f7bf1e347..19356c0fa6c1 100644 --- a/include/litmus/fp_common.h +++ b/include/litmus/fp_common.h | |||
@@ -57,7 +57,7 @@ static inline unsigned int fpq_find(struct fp_prio_queue* q) | |||
57 | 57 | ||
58 | static inline void fp_prio_add(struct fp_prio_queue* q, struct task_struct* t, unsigned int index) | 58 | static inline void fp_prio_add(struct fp_prio_queue* q, struct task_struct* t, unsigned int index) |
59 | { | 59 | { |
60 | 60 | BUG_ON(index >= LITMUS_MAX_PRIORITY); | |
61 | BUG_ON(bheap_node_in_heap(tsk_rt(t)->heap_node)); | 61 | BUG_ON(bheap_node_in_heap(tsk_rt(t)->heap_node)); |
62 | 62 | ||
63 | fpq_set(q, index); | 63 | fpq_set(q, index); |
diff --git a/include/litmus/fpmath.h b/include/litmus/fpmath.h index 04d4bcaeae96..3d545fd2f5c5 100644 --- a/include/litmus/fpmath.h +++ b/include/litmus/fpmath.h | |||
@@ -1,9 +1,12 @@ | |||
1 | #ifndef __FP_MATH_H__ | 1 | #ifndef __FP_MATH_H__ |
2 | #define __FP_MATH_H__ | 2 | #define __FP_MATH_H__ |
3 | 3 | ||
4 | #ifndef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | #include <linux/math64.h> | ||
6 | #else | ||
5 | #include <stdint.h> | 7 | #include <stdint.h> |
6 | #define abs(x) (((x) < 0) ? -(x) : x) | 8 | #define abs(x) (((x) < 0) ? -(x) : x) |
9 | #define div64_s64(a, b) (a)/(b) | ||
7 | #endif | 10 | #endif |
8 | 11 | ||
9 | // Use 64-bit because we want to track things at the nanosecond scale. | 12 | // Use 64-bit because we want to track things at the nanosecond scale. |
@@ -32,7 +35,7 @@ static inline fp_t FP(fpbuf_t x) | |||
32 | /* divide two integers to obtain a fixed point value */ | 35 | /* divide two integers to obtain a fixed point value */ |
33 | static inline fp_t _frac(fpbuf_t a, fpbuf_t b) | 36 | static inline fp_t _frac(fpbuf_t a, fpbuf_t b) |
34 | { | 37 | { |
35 | return _fp(FP(a).val / (b)); | 38 | return _fp(div64_s64(FP(a).val, (b))); |
36 | } | 39 | } |
37 | 40 | ||
38 | static inline fpbuf_t _point(fp_t x) | 41 | static inline fpbuf_t _point(fp_t x) |
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 54f33e835682..2da61fa58bdc 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -50,8 +50,6 @@ void litmus_exit_task(struct task_struct *tsk); | |||
50 | #define tsk_aux(t) (&(t)->aux_data) | 50 | #define tsk_aux(t) (&(t)->aux_data) |
51 | 51 | ||
52 | /* Realtime utility macros */ | 52 | /* Realtime utility macros */ |
53 | #define get_rt_flags(t) (tsk_rt(t)->flags) | ||
54 | #define set_rt_flags(t,f) (tsk_rt(t)->flags=(f)) | ||
55 | #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) | 53 | #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) |
56 | #define get_boost_start(t) (tsk_rt(t)->boost_start_time) | 54 | #define get_boost_start(t) (tsk_rt(t)->boost_start_time) |
57 | 55 | ||
@@ -69,7 +67,7 @@ void litmus_exit_task(struct task_struct *tsk); | |||
69 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) | 67 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) |
70 | #define get_period(t) (tsk_rt(t)->task_params.period) | 68 | #define get_period(t) (tsk_rt(t)->task_params.period) |
71 | #define get_release(t) (tsk_rt(t)->job_params.release) | 69 | #define get_release(t) (tsk_rt(t)->job_params.release) |
72 | #define get_lateness(t) (tsk_rt(t)->job_params.lateness) | 70 | #define get_lateness(t) (tsk_rt(t)->job_params.lateness) |
73 | 71 | ||
74 | #define effective_priority(t) ((!(tsk_rt(t)->inh_task)) ? t : tsk_rt(t)->inh_task) | 72 | #define effective_priority(t) ((!(tsk_rt(t)->inh_task)) ? t : tsk_rt(t)->inh_task) |
75 | #define base_priority(t) (t) | 73 | #define base_priority(t) (t) |
@@ -245,6 +243,11 @@ static inline int is_present(struct task_struct* t) | |||
245 | return t && tsk_rt(t)->present; | 243 | return t && tsk_rt(t)->present; |
246 | } | 244 | } |
247 | 245 | ||
246 | static inline int is_completed(struct task_struct* t) | ||
247 | { | ||
248 | return t && tsk_rt(t)->completed; | ||
249 | } | ||
250 | |||
248 | 251 | ||
249 | /* make the unit explicit */ | 252 | /* make the unit explicit */ |
250 | typedef unsigned long quanta_t; | 253 | typedef unsigned long quanta_t; |
@@ -272,4 +275,39 @@ static inline quanta_t time2quanta(lt_t time, enum round round) | |||
272 | /* By how much is cpu staggered behind CPU 0? */ | 275 | /* By how much is cpu staggered behind CPU 0? */ |
273 | u64 cpu_stagger_offset(int cpu); | 276 | u64 cpu_stagger_offset(int cpu); |
274 | 277 | ||
278 | static inline struct control_page* get_control_page(struct task_struct *t) | ||
279 | { | ||
280 | return tsk_rt(t)->ctrl_page; | ||
281 | } | ||
282 | |||
283 | static inline int has_control_page(struct task_struct* t) | ||
284 | { | ||
285 | return tsk_rt(t)->ctrl_page != NULL; | ||
286 | } | ||
287 | |||
288 | |||
289 | #ifdef CONFIG_SCHED_OVERHEAD_TRACE | ||
290 | |||
291 | #define TS_SYSCALL_IN_START \ | ||
292 | if (has_control_page(current)) { \ | ||
293 | __TS_SYSCALL_IN_START(&get_control_page(current)->ts_syscall_start); \ | ||
294 | } | ||
295 | |||
296 | #define TS_SYSCALL_IN_END \ | ||
297 | if (has_control_page(current)) { \ | ||
298 | uint64_t irqs; \ | ||
299 | local_irq_disable(); \ | ||
300 | irqs = get_control_page(current)->irq_count - \ | ||
301 | get_control_page(current)->irq_syscall_start; \ | ||
302 | __TS_SYSCALL_IN_END(&irqs); \ | ||
303 | local_irq_enable(); \ | ||
304 | } | ||
305 | |||
306 | #else | ||
307 | |||
308 | #define TS_SYSCALL_IN_START | ||
309 | #define TS_SYSCALL_IN_END | ||
310 | |||
311 | #endif | ||
312 | |||
275 | #endif | 313 | #endif |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 43daaf84101d..39685a351cb1 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -1,9 +1,9 @@ | |||
1 | #ifndef _LINUX_RT_PARAM_H_ | ||
2 | #define _LINUX_RT_PARAM_H_ | ||
1 | /* | 3 | /* |
2 | * Definition of the scheduler plugin interface. | 4 | * Definition of the scheduler plugin interface. |
3 | * | 5 | * |
4 | */ | 6 | */ |
5 | #ifndef _LINUX_RT_PARAM_H_ | ||
6 | #define _LINUX_RT_PARAM_H_ | ||
7 | 7 | ||
8 | #include <litmus/fpmath.h> | 8 | #include <litmus/fpmath.h> |
9 | 9 | ||
@@ -102,12 +102,12 @@ struct rt_task { | |||
102 | }; | 102 | }; |
103 | 103 | ||
104 | union np_flag { | 104 | union np_flag { |
105 | uint32_t raw; | 105 | uint64_t raw; |
106 | struct { | 106 | struct { |
107 | /* Is the task currently in a non-preemptive section? */ | 107 | /* Is the task currently in a non-preemptive section? */ |
108 | uint32_t flag:31; | 108 | uint64_t flag:31; |
109 | /* Should the task call into the scheduler? */ | 109 | /* Should the task call into the scheduler? */ |
110 | uint32_t preempt:1; | 110 | uint64_t preempt:1; |
111 | } np; | 111 | } np; |
112 | }; | 112 | }; |
113 | 113 | ||
@@ -139,11 +139,29 @@ struct gpu_affinity_observer_args | |||
139 | * determining preemption/migration overheads). | 139 | * determining preemption/migration overheads). |
140 | */ | 140 | */ |
141 | struct control_page { | 141 | struct control_page { |
142 | /* This flag is used by userspace to communicate non-preempive | ||
143 | * sections. */ | ||
142 | volatile union np_flag sched; | 144 | volatile union np_flag sched; |
143 | 145 | ||
146 | volatile uint64_t irq_count; /* Incremented by the kernel each time an IRQ is | ||
147 | * handled. */ | ||
148 | |||
149 | /* Locking overhead tracing: userspace records here the time stamp | ||
150 | * and IRQ counter prior to starting the system call. */ | ||
151 | uint64_t ts_syscall_start; /* Feather-Trace cycles */ | ||
152 | uint64_t irq_syscall_start; /* Snapshot of irq_count when the syscall | ||
153 | * started. */ | ||
154 | |||
144 | /* to be extended */ | 155 | /* to be extended */ |
145 | }; | 156 | }; |
146 | 157 | ||
158 | /* Expected offsets within the control page. */ | ||
159 | |||
160 | #define LITMUS_CP_OFFSET_SCHED 0 | ||
161 | #define LITMUS_CP_OFFSET_IRQ_COUNT 8 | ||
162 | #define LITMUS_CP_OFFSET_TS_SC_START 16 | ||
163 | #define LITMUS_CP_OFFSET_IRQ_SC_START 24 | ||
164 | |||
147 | /* don't export internal data structures to user space (liblitmus) */ | 165 | /* don't export internal data structures to user space (liblitmus) */ |
148 | #ifdef __KERNEL__ | 166 | #ifdef __KERNEL__ |
149 | 167 | ||
@@ -234,12 +252,6 @@ typedef struct avg_est{ | |||
234 | 252 | ||
235 | 253 | ||
236 | #ifdef CONFIG_LITMUS_SOFTIRQD | 254 | #ifdef CONFIG_LITMUS_SOFTIRQD |
237 | //struct tasklet_head | ||
238 | //{ | ||
239 | // struct tasklet_struct *head; | ||
240 | // struct tasklet_struct **tail; | ||
241 | //}; | ||
242 | |||
243 | struct klmirqd_info | 255 | struct klmirqd_info |
244 | { | 256 | { |
245 | struct task_struct* klmirqd; | 257 | struct task_struct* klmirqd; |
@@ -277,6 +289,9 @@ struct rt_param { | |||
277 | /* is the task present? (true if it can be scheduled) */ | 289 | /* is the task present? (true if it can be scheduled) */ |
278 | unsigned int present:1; | 290 | unsigned int present:1; |
279 | 291 | ||
292 | /* has the task completed? */ | ||
293 | unsigned int completed:1; | ||
294 | |||
280 | #ifdef CONFIG_LITMUS_SOFTIRQD | 295 | #ifdef CONFIG_LITMUS_SOFTIRQD |
281 | /* proxy threads have minimum priority by default */ | 296 | /* proxy threads have minimum priority by default */ |
282 | unsigned int is_interrupt_thread:1; | 297 | unsigned int is_interrupt_thread:1; |
@@ -415,13 +430,6 @@ struct rt_param { | |||
415 | struct control_page * ctrl_page; | 430 | struct control_page * ctrl_page; |
416 | }; | 431 | }; |
417 | 432 | ||
418 | //#ifdef CONFIG_LITMUS_SOFTIRQD | ||
419 | //struct klmirqd_data | ||
420 | //{ | ||
421 | // struct binheap klmirqd_users; | ||
422 | //}; | ||
423 | //#endif | ||
424 | |||
425 | #ifdef CONFIG_REALTIME_AUX_TASKS | 433 | #ifdef CONFIG_REALTIME_AUX_TASKS |
426 | struct aux_data | 434 | struct aux_data |
427 | { | 435 | { |
@@ -432,11 +440,6 @@ struct aux_data | |||
432 | }; | 440 | }; |
433 | #endif | 441 | #endif |
434 | 442 | ||
435 | /* Possible RT flags */ | 443 | #endif /* __KERNEL */ |
436 | #define RT_F_RUNNING 0x00000000 | ||
437 | #define RT_F_SLEEP 0x00000001 | ||
438 | #define RT_F_EXIT_SEM 0x00000008 | ||
439 | |||
440 | #endif | ||
441 | 444 | ||
442 | #endif | 445 | #endif |
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index 78004381a6cc..d0e7d74bb45e 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h | |||
@@ -58,10 +58,12 @@ typedef void (*task_block_t) (struct task_struct *task); | |||
58 | */ | 58 | */ |
59 | typedef void (*task_exit_t) (struct task_struct *); | 59 | typedef void (*task_exit_t) (struct task_struct *); |
60 | 60 | ||
61 | #ifdef CONFIG_LITMUS_LOCKING | ||
61 | /* Called when the current task attempts to create a new lock of a given | 62 | /* Called when the current task attempts to create a new lock of a given |
62 | * protocol type. */ | 63 | * protocol type. */ |
63 | typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type, | 64 | typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type, |
64 | void* __user config); | 65 | void* __user config); |
66 | #endif | ||
65 | 67 | ||
66 | struct affinity_observer; | 68 | struct affinity_observer; |
67 | typedef long (*allocate_affinity_observer_t) ( | 69 | typedef long (*allocate_affinity_observer_t) ( |
diff --git a/include/litmus/trace.h b/include/litmus/trace.h index e078aee4234d..15bd645d2466 100644 --- a/include/litmus/trace.h +++ b/include/litmus/trace.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #ifdef CONFIG_SCHED_OVERHEAD_TRACE | 4 | #ifdef CONFIG_SCHED_OVERHEAD_TRACE |
5 | 5 | ||
6 | |||
6 | #include <litmus/feather_trace.h> | 7 | #include <litmus/feather_trace.h> |
7 | #include <litmus/feather_buffer.h> | 8 | #include <litmus/feather_buffer.h> |
8 | 9 | ||
@@ -16,7 +17,8 @@ enum task_type_marker { | |||
16 | }; | 17 | }; |
17 | 18 | ||
18 | struct timestamp { | 19 | struct timestamp { |
19 | uint64_t timestamp; | 20 | uint64_t timestamp:48; |
21 | uint64_t pid:16; | ||
20 | uint32_t seq_no; | 22 | uint32_t seq_no; |
21 | uint8_t cpu; | 23 | uint8_t cpu; |
22 | uint8_t event; | 24 | uint8_t event; |
@@ -31,11 +33,16 @@ feather_callback void save_timestamp_def(unsigned long event, unsigned long type | |||
31 | feather_callback void save_timestamp_task(unsigned long event, unsigned long t_ptr); | 33 | feather_callback void save_timestamp_task(unsigned long event, unsigned long t_ptr); |
32 | feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu); | 34 | feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu); |
33 | feather_callback void save_task_latency(unsigned long event, unsigned long when_ptr); | 35 | feather_callback void save_task_latency(unsigned long event, unsigned long when_ptr); |
36 | feather_callback void save_timestamp_time(unsigned long event, unsigned long time_ptr); | ||
37 | feather_callback void save_timestamp_irq(unsigned long event, unsigned long irq_count_ptr); | ||
38 | feather_callback void save_timestamp_hide_irq(unsigned long event); | ||
34 | 39 | ||
35 | #define TIMESTAMP(id) ft_event0(id, save_timestamp) | 40 | #define TIMESTAMP(id) ft_event0(id, save_timestamp) |
36 | 41 | ||
37 | #define DTIMESTAMP(id, def) ft_event1(id, save_timestamp_def, (unsigned long) def) | 42 | #define DTIMESTAMP(id, def) ft_event1(id, save_timestamp_def, (unsigned long) def) |
38 | 43 | ||
44 | #define TIMESTAMP_CUR(id) DTIMESTAMP(id, is_realtime(current) ? TSK_RT : TSK_BE) | ||
45 | |||
39 | #define TTIMESTAMP(id, task) \ | 46 | #define TTIMESTAMP(id, task) \ |
40 | ft_event1(id, save_timestamp_task, (unsigned long) task) | 47 | ft_event1(id, save_timestamp_task, (unsigned long) task) |
41 | 48 | ||
@@ -45,18 +52,35 @@ feather_callback void save_task_latency(unsigned long event, unsigned long when_ | |||
45 | #define LTIMESTAMP(id, task) \ | 52 | #define LTIMESTAMP(id, task) \ |
46 | ft_event1(id, save_task_latency, (unsigned long) task) | 53 | ft_event1(id, save_task_latency, (unsigned long) task) |
47 | 54 | ||
55 | #define TIMESTAMP_TIME(id, time_ptr) \ | ||
56 | ft_event1(id, save_timestamp_time, (unsigned long) time_ptr) | ||
57 | |||
58 | #define TIMESTAMP_IRQ(id, irq_count_ptr) \ | ||
59 | ft_event1(id, save_timestamp_irq, (unsigned long) irq_count_ptr) | ||
60 | |||
61 | #define TIMESTAMP_IN_IRQ(id) \ | ||
62 | ft_event0(id, save_timestamp_hide_irq) | ||
63 | |||
48 | #else /* !CONFIG_SCHED_OVERHEAD_TRACE */ | 64 | #else /* !CONFIG_SCHED_OVERHEAD_TRACE */ |
49 | 65 | ||
50 | #define TIMESTAMP(id) /* no tracing */ | 66 | #define TIMESTAMP(id) /* no tracing */ |
51 | 67 | ||
52 | #define DTIMESTAMP(id, def) /* no tracing */ | 68 | #define DTIMESTAMP(id, def) /* no tracing */ |
53 | 69 | ||
70 | #define TIMESTAMP_CUR(id) /* no tracing */ | ||
71 | |||
54 | #define TTIMESTAMP(id, task) /* no tracing */ | 72 | #define TTIMESTAMP(id, task) /* no tracing */ |
55 | 73 | ||
56 | #define CTIMESTAMP(id, cpu) /* no tracing */ | 74 | #define CTIMESTAMP(id, cpu) /* no tracing */ |
57 | 75 | ||
58 | #define LTIMESTAMP(id, when_ptr) /* no tracing */ | 76 | #define LTIMESTAMP(id, when_ptr) /* no tracing */ |
59 | 77 | ||
78 | #define TIMESTAMP_TIME(id, time_ptr) /* no tracing */ | ||
79 | |||
80 | #define TIMESTAMP_IRQ(id, irq_count_ptr) /* no tracing */ | ||
81 | |||
82 | #define TIMESTAMP_IN_IRQ(id) /* no tracing */ | ||
83 | |||
60 | #endif | 84 | #endif |
61 | 85 | ||
62 | 86 | ||
@@ -68,7 +92,20 @@ feather_callback void save_task_latency(unsigned long event, unsigned long when_ | |||
68 | * always the next number after the start time event id. | 92 | * always the next number after the start time event id. |
69 | */ | 93 | */ |
70 | 94 | ||
95 | #define __TS_SYSCALL_IN_START(p) TIMESTAMP_TIME(10, p) | ||
96 | #define __TS_SYSCALL_IN_END(p) TIMESTAMP_IRQ(11, p) | ||
97 | |||
98 | #define TS_SYSCALL_OUT_START TIMESTAMP_CUR(20) | ||
99 | #define TS_SYSCALL_OUT_END TIMESTAMP_CUR(21) | ||
100 | |||
101 | #define TS_LOCK_START TIMESTAMP_CUR(30) | ||
102 | #define TS_LOCK_END TIMESTAMP_CUR(31) | ||
103 | |||
104 | #define TS_LOCK_SUSPEND TIMESTAMP_CUR(38) | ||
105 | #define TS_LOCK_RESUME TIMESTAMP_CUR(39) | ||
71 | 106 | ||
107 | #define TS_UNLOCK_START TIMESTAMP_CUR(40) | ||
108 | #define TS_UNLOCK_END TIMESTAMP_CUR(41) | ||
72 | 109 | ||
73 | #define TS_SCHED_START DTIMESTAMP(100, TSK_UNKNOWN) /* we only | 110 | #define TS_SCHED_START DTIMESTAMP(100, TSK_UNKNOWN) /* we only |
74 | * care | 111 | * care |
@@ -100,34 +137,19 @@ feather_callback void save_task_latency(unsigned long event, unsigned long when_ | |||
100 | #define TS_EXIT_NP_START TIMESTAMP(150) | 137 | #define TS_EXIT_NP_START TIMESTAMP(150) |
101 | #define TS_EXIT_NP_END TIMESTAMP(151) | 138 | #define TS_EXIT_NP_END TIMESTAMP(151) |
102 | 139 | ||
103 | #define TS_LOCK_START TIMESTAMP(170) | ||
104 | #define TS_LOCK_SUSPEND TIMESTAMP(171) | ||
105 | #define TS_LOCK_RESUME TIMESTAMP(172) | ||
106 | #define TS_LOCK_END TIMESTAMP(173) | ||
107 | |||
108 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 140 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
109 | #define TS_DGL_LOCK_START TIMESTAMP(175) | 141 | #define TS_DGL_LOCK_START TIMESTAMP(175) |
110 | #define TS_DGL_LOCK_SUSPEND TIMESTAMP(176) | 142 | #define TS_DGL_LOCK_SUSPEND TIMESTAMP(176) |
111 | #define TS_DGL_LOCK_RESUME TIMESTAMP(177) | 143 | #define TS_DGL_LOCK_RESUME TIMESTAMP(177) |
112 | #define TS_DGL_LOCK_END TIMESTAMP(178) | 144 | #define TS_DGL_LOCK_END TIMESTAMP(178) |
113 | #endif | ||
114 | |||
115 | #define TS_UNLOCK_START TIMESTAMP(180) | ||
116 | #define TS_UNLOCK_END TIMESTAMP(181) | ||
117 | |||
118 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
119 | #define TS_DGL_UNLOCK_START TIMESTAMP(185) | 145 | #define TS_DGL_UNLOCK_START TIMESTAMP(185) |
120 | #define TS_DGL_UNLOCK_END TIMESTAMP(186) | 146 | #define TS_DGL_UNLOCK_END TIMESTAMP(186) |
121 | #endif | 147 | #endif |
122 | 148 | ||
123 | #define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c) | 149 | #define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c) |
124 | #define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN) | 150 | #define TS_SEND_RESCHED_END TIMESTAMP_IN_IRQ(191) |
125 | |||
126 | #define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when)) | ||
127 | |||
128 | 151 | ||
129 | #ifdef CONFIG_LITMUS_NVIDIA | 152 | #ifdef CONFIG_LITMUS_NVIDIA |
130 | |||
131 | #define TS_NV_TOPISR_START TIMESTAMP(200) | 153 | #define TS_NV_TOPISR_START TIMESTAMP(200) |
132 | #define TS_NV_TOPISR_END TIMESTAMP(201) | 154 | #define TS_NV_TOPISR_END TIMESTAMP(201) |
133 | 155 | ||
@@ -136,7 +158,6 @@ feather_callback void save_task_latency(unsigned long event, unsigned long when_ | |||
136 | 158 | ||
137 | #define TS_NV_RELEASE_BOTISR_START TIMESTAMP(204) | 159 | #define TS_NV_RELEASE_BOTISR_START TIMESTAMP(204) |
138 | #define TS_NV_RELEASE_BOTISR_END TIMESTAMP(205) | 160 | #define TS_NV_RELEASE_BOTISR_END TIMESTAMP(205) |
139 | |||
140 | #endif | 161 | #endif |
141 | 162 | ||
142 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | 163 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD |
@@ -144,5 +165,6 @@ feather_callback void save_task_latency(unsigned long event, unsigned long when_ | |||
144 | #define TS_NV_SCHED_BOTISR_END TIMESTAMP(207) | 165 | #define TS_NV_SCHED_BOTISR_END TIMESTAMP(207) |
145 | #endif | 166 | #endif |
146 | 167 | ||
168 | #define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when)) | ||
147 | 169 | ||
148 | #endif /* !_SYS_TRACE_H_ */ | 170 | #endif /* !_SYS_TRACE_H_ */ |
diff --git a/include/litmus/trace_irq.h b/include/litmus/trace_irq.h index f18b127a089d..0d0c042ba9c3 100644 --- a/include/litmus/trace_irq.h +++ b/include/litmus/trace_irq.h | |||
@@ -3,14 +3,7 @@ | |||
3 | 3 | ||
4 | #ifdef CONFIG_SCHED_OVERHEAD_TRACE | 4 | #ifdef CONFIG_SCHED_OVERHEAD_TRACE |
5 | 5 | ||
6 | extern DEFINE_PER_CPU(atomic_t, irq_fired_count); | 6 | void ft_irq_fired(void); |
7 | |||
8 | static inline void ft_irq_fired(void) | ||
9 | { | ||
10 | /* Only called with preemptions disabled. */ | ||
11 | atomic_inc(&__get_cpu_var(irq_fired_count)); | ||
12 | } | ||
13 | |||
14 | 7 | ||
15 | #else | 8 | #else |
16 | 9 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index a1f10984adb3..e29a97235f26 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2601,8 +2601,12 @@ void scheduler_ipi(void) | |||
2601 | struct rq *rq = this_rq(); | 2601 | struct rq *rq = this_rq(); |
2602 | struct task_struct *list = xchg(&rq->wake_list, NULL); | 2602 | struct task_struct *list = xchg(&rq->wake_list, NULL); |
2603 | 2603 | ||
2604 | if (!list) | 2604 | if (!list) { |
2605 | /* If we don't call irq_enter(), we need to trigger the IRQ | ||
2606 | * tracing manually. */ | ||
2607 | ft_irq_fired(); | ||
2605 | return; | 2608 | return; |
2609 | } | ||
2606 | 2610 | ||
2607 | /* | 2611 | /* |
2608 | * Not all reschedule IPI handlers call irq_enter/irq_exit, since | 2612 | * Not all reschedule IPI handlers call irq_enter/irq_exit, since |
@@ -4420,23 +4424,20 @@ litmus_need_resched_nonpreemptible: | |||
4420 | raw_spin_unlock_irq(&rq->lock); | 4424 | raw_spin_unlock_irq(&rq->lock); |
4421 | } | 4425 | } |
4422 | 4426 | ||
4427 | TS_SCHED2_START(prev); | ||
4423 | sched_trace_task_switch_to(current); | 4428 | sched_trace_task_switch_to(current); |
4424 | 4429 | ||
4425 | post_schedule(rq); | 4430 | post_schedule(rq); |
4426 | 4431 | ||
4427 | if (sched_state_validate_switch()) { | 4432 | if (sched_state_validate_switch()) { |
4428 | TRACE_CUR("cpu %d: have to redo scheduling decision!\n", cpu); | 4433 | TS_SCHED2_END(prev); |
4429 | goto litmus_need_resched_nonpreemptible; | 4434 | goto litmus_need_resched_nonpreemptible; |
4430 | } | 4435 | } |
4431 | else if (current->policy == SCHED_LITMUS) { | ||
4432 | TRACE_CUR("cpu %d: valid switch to rt task %s/%d.\n", cpu, current->comm, current->pid); | ||
4433 | } | ||
4434 | else { | ||
4435 | // TRACE_CUR("cpu %d: switch: %s/%d\n", cpu, current->comm, current->pid); | ||
4436 | } | ||
4437 | 4436 | ||
4438 | preempt_enable_no_resched(); | 4437 | preempt_enable_no_resched(); |
4439 | 4438 | ||
4439 | TS_SCHED2_END(prev); | ||
4440 | |||
4440 | if (need_resched()) | 4441 | if (need_resched()) |
4441 | goto need_resched; | 4442 | goto need_resched; |
4442 | 4443 | ||
@@ -4717,17 +4718,6 @@ void complete_all(struct completion *x) | |||
4717 | } | 4718 | } |
4718 | EXPORT_SYMBOL(complete_all); | 4719 | EXPORT_SYMBOL(complete_all); |
4719 | 4720 | ||
4720 | void complete_n(struct completion *x, int n) | ||
4721 | { | ||
4722 | unsigned long flags; | ||
4723 | |||
4724 | spin_lock_irqsave(&x->wait.lock, flags); | ||
4725 | x->done += n; | ||
4726 | __wake_up_common(&x->wait, TASK_NORMAL, n, 0, NULL); | ||
4727 | spin_unlock_irqrestore(&x->wait.lock, flags); | ||
4728 | } | ||
4729 | EXPORT_SYMBOL(complete_n); | ||
4730 | |||
4731 | static inline long __sched | 4721 | static inline long __sched |
4732 | do_wait_for_common(struct completion *x, long timeout, int state) | 4722 | do_wait_for_common(struct completion *x, long timeout, int state) |
4733 | { | 4723 | { |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 58cf5d18dfdc..db04161fe37c 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -3,6 +3,8 @@ | |||
3 | * policies) | 3 | * policies) |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <litmus/litmus.h> | ||
7 | |||
6 | #ifdef CONFIG_RT_GROUP_SCHED | 8 | #ifdef CONFIG_RT_GROUP_SCHED |
7 | 9 | ||
8 | #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) | 10 | #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) |
@@ -228,8 +230,11 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | |||
228 | if (rt_rq->rt_nr_running) { | 230 | if (rt_rq->rt_nr_running) { |
229 | if (rt_se && !on_rt_rq(rt_se)) | 231 | if (rt_se && !on_rt_rq(rt_se)) |
230 | enqueue_rt_entity(rt_se, false); | 232 | enqueue_rt_entity(rt_se, false); |
231 | if (rt_rq->highest_prio.curr < curr->prio) | 233 | if (rt_rq->highest_prio.curr < curr->prio && |
234 | /* Don't subject LITMUS tasks to remote reschedules */ | ||
235 | !is_realtime(curr)) { | ||
232 | resched_task(curr); | 236 | resched_task(curr); |
237 | } | ||
233 | } | 238 | } |
234 | } | 239 | } |
235 | 240 | ||
@@ -322,8 +327,10 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) | |||
322 | 327 | ||
323 | static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | 328 | static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
324 | { | 329 | { |
325 | if (rt_rq->rt_nr_running) | 330 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; |
326 | resched_task(rq_of_rt_rq(rt_rq)->curr); | 331 | |
332 | if (rt_rq->rt_nr_running && !is_realtime(curr)) | ||
333 | resched_task(curr); | ||
327 | } | 334 | } |
328 | 335 | ||
329 | static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) | 336 | static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) |
diff --git a/kernel/softirq.c b/kernel/softirq.c index 053aec196a50..ea438a8635d0 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -221,8 +221,11 @@ asmlinkage void __do_softirq(void) | |||
221 | int max_restart = MAX_SOFTIRQ_RESTART; | 221 | int max_restart = MAX_SOFTIRQ_RESTART; |
222 | int cpu; | 222 | int cpu; |
223 | 223 | ||
224 | pending = local_softirq_pending(); | 224 | /* Mark Feather-Trace samples as "disturbed". */ |
225 | account_system_vtime(current); | 225 | ft_irq_fired(); |
226 | |||
227 | pending = local_softirq_pending(); | ||
228 | account_system_vtime(current); | ||
226 | 229 | ||
227 | __local_bh_disable((unsigned long)__builtin_return_address(0), | 230 | __local_bh_disable((unsigned long)__builtin_return_address(0), |
228 | SOFTIRQ_OFFSET); | 231 | SOFTIRQ_OFFSET); |
@@ -403,13 +406,6 @@ void open_softirq(int nr, void (*action)(struct softirq_action *)) | |||
403 | /* | 406 | /* |
404 | * Tasklets | 407 | * Tasklets |
405 | */ | 408 | */ |
406 | /* | ||
407 | struct tasklet_head | ||
408 | { | ||
409 | struct tasklet_struct *head; | ||
410 | struct tasklet_struct **tail; | ||
411 | }; | ||
412 | */ | ||
413 | 409 | ||
414 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); | 410 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); |
415 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); | 411 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); |
diff --git a/litmus/Kconfig b/litmus/Kconfig index c05405094ea4..594c54342bdc 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
@@ -210,6 +210,14 @@ choice | |||
210 | Break ties between two jobs, A and B, with equal deadlines by using a | 210 | Break ties between two jobs, A and B, with equal deadlines by using a |
211 | uniform hash; i.e.: hash(A.pid, A.job_num) < hash(B.pid, B.job_num). Job | 211 | uniform hash; i.e.: hash(A.pid, A.job_num) < hash(B.pid, B.job_num). Job |
212 | A has ~50% of winning a given tie-break. | 212 | A has ~50% of winning a given tie-break. |
213 | |||
214 | NOTES: | ||
215 | * This method doesn't work very well if a tied job has a low-valued | ||
216 | hash while the jobs it ties with do not make progress (that is, | ||
217 | they don't increment to new job numbers). The job with a low-valued | ||
218 | hash job will lose most tie-breaks. This is usually not a problem | ||
219 | unless you are doing something funky in Litmus (ex. worker threads | ||
220 | that do not increment job numbers). | ||
213 | 221 | ||
214 | config EDF_PID_TIE_BREAK | 222 | config EDF_PID_TIE_BREAK |
215 | bool "PID-based Tie Breaks" | 223 | bool "PID-based Tie Breaks" |
@@ -363,7 +371,7 @@ config SCHED_DEBUG_TRACE_CALLER | |||
363 | 371 | ||
364 | config PREEMPT_STATE_TRACE | 372 | config PREEMPT_STATE_TRACE |
365 | bool "Trace preemption state machine transitions" | 373 | bool "Trace preemption state machine transitions" |
366 | depends on SCHED_DEBUG_TRACE | 374 | depends on SCHED_DEBUG_TRACE && DEBUG_KERNEL |
367 | default n | 375 | default n |
368 | help | 376 | help |
369 | With this option enabled, each CPU will log when it transitions | 377 | With this option enabled, each CPU will log when it transitions |
diff --git a/litmus/ctrldev.c b/litmus/ctrldev.c index 9969ab17c190..41919b2714cb 100644 --- a/litmus/ctrldev.c +++ b/litmus/ctrldev.c | |||
@@ -133,6 +133,17 @@ static int __init init_litmus_ctrl_dev(void) | |||
133 | 133 | ||
134 | BUILD_BUG_ON(sizeof(struct control_page) > PAGE_SIZE); | 134 | BUILD_BUG_ON(sizeof(struct control_page) > PAGE_SIZE); |
135 | 135 | ||
136 | BUILD_BUG_ON(sizeof(union np_flag) != sizeof(uint64_t)); | ||
137 | |||
138 | BUILD_BUG_ON(offsetof(struct control_page, sched.raw) | ||
139 | != LITMUS_CP_OFFSET_SCHED); | ||
140 | BUILD_BUG_ON(offsetof(struct control_page, irq_count) | ||
141 | != LITMUS_CP_OFFSET_IRQ_COUNT); | ||
142 | BUILD_BUG_ON(offsetof(struct control_page, ts_syscall_start) | ||
143 | != LITMUS_CP_OFFSET_TS_SC_START); | ||
144 | BUILD_BUG_ON(offsetof(struct control_page, irq_syscall_start) | ||
145 | != LITMUS_CP_OFFSET_IRQ_SC_START); | ||
146 | |||
136 | printk("Initializing LITMUS^RT control device.\n"); | 147 | printk("Initializing LITMUS^RT control device.\n"); |
137 | err = misc_register(&litmus_ctrl_dev); | 148 | err = misc_register(&litmus_ctrl_dev); |
138 | if (err) | 149 | if (err) |
diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 255e4f36e413..441fbfddf0c2 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <litmus/fpmath.h> | 22 | #include <litmus/fpmath.h> |
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | #if defined(CONFIG_EDF_TIE_BREAK_HASH) | 25 | #ifdef CONFIG_EDF_TIE_BREAK_HASH |
26 | #include <linux/hash.h> | 26 | #include <linux/hash.h> |
27 | static inline long edf_hash(struct task_struct *t) | 27 | static inline long edf_hash(struct task_struct *t) |
28 | { | 28 | { |
@@ -134,7 +134,8 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
134 | } | 134 | } |
135 | 135 | ||
136 | if (first->rt_param.is_aux_task && second->rt_param.is_aux_task && | 136 | if (first->rt_param.is_aux_task && second->rt_param.is_aux_task && |
137 | first->rt_param.inh_task == second->rt_param.inh_task) { // inh_task is !NULL for both tasks since neither was a lo_aux task | 137 | first->rt_param.inh_task == second->rt_param.inh_task) { |
138 | // inh_task is !NULL for both tasks since neither was a lo_aux task. | ||
138 | // Both aux tasks inherit from the same task, so tie-break | 139 | // Both aux tasks inherit from the same task, so tie-break |
139 | // by base priority of the aux tasks. | 140 | // by base priority of the aux tasks. |
140 | TRACE_CUR("aux tie break!\n"); | 141 | TRACE_CUR("aux tie break!\n"); |
@@ -171,9 +172,10 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
171 | } | 172 | } |
172 | 173 | ||
173 | if (first->rt_param.is_interrupt_thread && second->rt_param.is_interrupt_thread && | 174 | if (first->rt_param.is_interrupt_thread && second->rt_param.is_interrupt_thread && |
174 | first->rt_param.inh_task == second->rt_param.inh_task) { // inh_task is !NULL for both tasks since neither was a lo_klmirqd task | 175 | first->rt_param.inh_task == second->rt_param.inh_task) { |
175 | // Both klmirqd tasks inherit from the same task, so tie-break | 176 | // inh_task is !NULL for both tasks since neither was a lo_klmirqd task. |
176 | // by base priority of the klmirqd tasks. | 177 | // Both klmirqd tasks inherit from the same task, so tie-break |
178 | // by base priority of the klmirqd tasks. | ||
177 | TRACE_CUR("klmirqd tie break!\n"); | 179 | TRACE_CUR("klmirqd tie break!\n"); |
178 | goto klmirqd_tie_break; | 180 | goto klmirqd_tie_break; |
179 | } | 181 | } |
@@ -220,8 +222,12 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
220 | 222 | ||
221 | #endif | 223 | #endif |
222 | 224 | ||
225 | #ifdef CONFIG_REALTIME_AUX_TASKS | ||
223 | aux_tie_break: | 226 | aux_tie_break: |
227 | #endif | ||
228 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
224 | klmirqd_tie_break: | 229 | klmirqd_tie_break: |
230 | #endif | ||
225 | 231 | ||
226 | if (!is_realtime(second_task)) { | 232 | if (!is_realtime(second_task)) { |
227 | return 1; | 233 | return 1; |
@@ -236,7 +242,7 @@ klmirqd_tie_break: | |||
236 | int pid_break; | 242 | int pid_break; |
237 | 243 | ||
238 | #if defined(CONFIG_EDF_TIE_BREAK_LATENESS) | 244 | #if defined(CONFIG_EDF_TIE_BREAK_LATENESS) |
239 | /* Tie break by lateness. Jobs with greater lateness get | 245 | /* Tie break by lateness. Jobs with greater lateness get |
240 | * priority. This should spread tardiness across all tasks, | 246 | * priority. This should spread tardiness across all tasks, |
241 | * especially in task sets where all tasks have the same | 247 | * especially in task sets where all tasks have the same |
242 | * period and relative deadlines. | 248 | * period and relative deadlines. |
diff --git a/litmus/fdso.c b/litmus/fdso.c index 2411d16ba486..709be3cc8992 100644 --- a/litmus/fdso.c +++ b/litmus/fdso.c | |||
@@ -181,6 +181,18 @@ static int put_od_entry(struct od_table_entry* od) | |||
181 | return 0; | 181 | return 0; |
182 | } | 182 | } |
183 | 183 | ||
184 | static long close_od_entry(struct od_table_entry *od) | ||
185 | { | ||
186 | long ret; | ||
187 | |||
188 | /* Give the class a chance to reject the close. */ | ||
189 | ret = fdso_close(od); | ||
190 | if (ret == 0) | ||
191 | ret = put_od_entry(od); | ||
192 | |||
193 | return ret; | ||
194 | } | ||
195 | |||
184 | void exit_od_table(struct task_struct* t) | 196 | void exit_od_table(struct task_struct* t) |
185 | { | 197 | { |
186 | int i; | 198 | int i; |
@@ -188,7 +200,7 @@ void exit_od_table(struct task_struct* t) | |||
188 | if (t->od_table) { | 200 | if (t->od_table) { |
189 | for (i = 0; i < MAX_OBJECT_DESCRIPTORS; i++) | 201 | for (i = 0; i < MAX_OBJECT_DESCRIPTORS; i++) |
190 | if (t->od_table[i].used) | 202 | if (t->od_table[i].used) |
191 | put_od_entry(t->od_table + i); | 203 | close_od_entry(t->od_table + i); |
192 | kfree(t->od_table); | 204 | kfree(t->od_table); |
193 | t->od_table = NULL; | 205 | t->od_table = NULL; |
194 | } | 206 | } |
@@ -302,11 +314,7 @@ asmlinkage long sys_od_close(int od) | |||
302 | return ret; | 314 | return ret; |
303 | 315 | ||
304 | 316 | ||
305 | /* give the class a chance to reject the close | 317 | ret = close_od_entry(t->od_table + od); |
306 | */ | ||
307 | ret = fdso_close(t->od_table + od); | ||
308 | if (ret == 0) | ||
309 | ret = put_od_entry(t->od_table + od); | ||
310 | 318 | ||
311 | return ret; | 319 | return ret; |
312 | } | 320 | } |
diff --git a/litmus/fp_common.c b/litmus/fp_common.c index 31fc2db20adf..964a4729deff 100644 --- a/litmus/fp_common.c +++ b/litmus/fp_common.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <litmus/fp_common.h> | 15 | #include <litmus/fp_common.h> |
16 | 16 | ||
17 | /* fp_higher_prio - returns true if first has a higher static priority | 17 | /* fp_higher_prio - returns true if first has a higher static priority |
18 | * than second. Deadline ties are broken by PID. | 18 | * than second. Ties are broken by PID. |
19 | * | 19 | * |
20 | * both first and second may be NULL | 20 | * both first and second may be NULL |
21 | */ | 21 | */ |
@@ -37,6 +37,9 @@ int fp_higher_prio(struct task_struct* first, | |||
37 | if (!first || !second) | 37 | if (!first || !second) |
38 | return first && !second; | 38 | return first && !second; |
39 | 39 | ||
40 | if (!is_realtime(second_task)) | ||
41 | return 1; | ||
42 | |||
40 | #ifdef CONFIG_LITMUS_LOCKING | 43 | #ifdef CONFIG_LITMUS_LOCKING |
41 | 44 | ||
42 | /* Check for inherited priorities. Change task | 45 | /* Check for inherited priorities. Change task |
@@ -51,33 +54,30 @@ int fp_higher_prio(struct task_struct* first, | |||
51 | */ | 54 | */ |
52 | if (unlikely(is_priority_boosted(first_task))) { | 55 | if (unlikely(is_priority_boosted(first_task))) { |
53 | /* first_task is boosted, how about second_task? */ | 56 | /* first_task is boosted, how about second_task? */ |
54 | if (!is_priority_boosted(second_task) || | 57 | if (is_priority_boosted(second_task)) |
55 | lt_before(get_boost_start(first_task), | 58 | /* break by priority point */ |
56 | get_boost_start(second_task))) | 59 | return lt_before(get_boost_start(first_task), |
57 | return 1; | 60 | get_boost_start(second_task)); |
58 | else | 61 | else |
59 | return 0; | 62 | /* priority boosting wins. */ |
63 | return 1; | ||
60 | } else if (unlikely(is_priority_boosted(second_task))) | 64 | } else if (unlikely(is_priority_boosted(second_task))) |
61 | /* second_task is boosted, first is not*/ | 65 | /* second_task is boosted, first is not*/ |
62 | return 0; | 66 | return 0; |
63 | 67 | ||
64 | #endif | 68 | #endif |
65 | 69 | ||
70 | /* Comparisons to itself are not expected; priority inheritance | ||
71 | * should also not cause this to happen. */ | ||
72 | BUG_ON(first_task == second_task); | ||
66 | 73 | ||
67 | return !is_realtime(second_task) || | 74 | if (get_priority(first_task) < get_priority(second_task)) |
68 | 75 | return 1; | |
69 | get_priority(first_task) < get_priority(second_task) || | 76 | else if (get_priority(first_task) == get_priority(second_task)) |
70 | 77 | /* Break by PID. */ | |
71 | /* Break by PID. | 78 | return first_task->pid < second_task->pid; |
72 | */ | 79 | else |
73 | (get_priority(first_task) == get_priority(second_task) && | 80 | return 0; |
74 | (first_task->pid < second_task->pid || | ||
75 | |||
76 | /* If the PIDs are the same then the task with the inherited | ||
77 | * priority wins. | ||
78 | */ | ||
79 | (first_task->pid == second_task->pid && | ||
80 | !second->rt_param.inh_task))); | ||
81 | } | 81 | } |
82 | 82 | ||
83 | int fp_ready_order(struct bheap_node* a, struct bheap_node* b) | 83 | int fp_ready_order(struct bheap_node* a, struct bheap_node* b) |
diff --git a/litmus/ftdev.c b/litmus/ftdev.c index 06fcf4cf77dc..99bc39ffbcef 100644 --- a/litmus/ftdev.c +++ b/litmus/ftdev.c | |||
@@ -230,13 +230,20 @@ static ssize_t ftdev_read(struct file *filp, | |||
230 | * here with copied data because that data would get | 230 | * here with copied data because that data would get |
231 | * lost if the task is interrupted (e.g., killed). | 231 | * lost if the task is interrupted (e.g., killed). |
232 | */ | 232 | */ |
233 | mutex_unlock(&ftdm->lock); | ||
233 | set_current_state(TASK_INTERRUPTIBLE); | 234 | set_current_state(TASK_INTERRUPTIBLE); |
235 | |||
234 | schedule_timeout(50); | 236 | schedule_timeout(50); |
237 | |||
235 | if (signal_pending(current)) { | 238 | if (signal_pending(current)) { |
236 | if (err == 0) | 239 | if (err == 0) |
237 | /* nothing read yet, signal problem */ | 240 | /* nothing read yet, signal problem */ |
238 | err = -ERESTARTSYS; | 241 | err = -ERESTARTSYS; |
239 | break; | 242 | goto out; |
243 | } | ||
244 | if (mutex_lock_interruptible(&ftdm->lock)) { | ||
245 | err = -ERESTARTSYS; | ||
246 | goto out; | ||
240 | } | 247 | } |
241 | } else if (copied < 0) { | 248 | } else if (copied < 0) { |
242 | /* page fault */ | 249 | /* page fault */ |
diff --git a/litmus/jobs.c b/litmus/jobs.c index 8593a8d2f107..e25854e1d143 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c | |||
@@ -30,7 +30,7 @@ void prepare_for_next_period(struct task_struct *t) | |||
30 | * release and deadline. Lateness may be negative. | 30 | * release and deadline. Lateness may be negative. |
31 | */ | 31 | */ |
32 | t->rt_param.job_params.lateness = | 32 | t->rt_param.job_params.lateness = |
33 | (long long)litmus_clock() - | 33 | (long long)litmus_clock() - |
34 | (long long)t->rt_param.job_params.deadline; | 34 | (long long)t->rt_param.job_params.deadline; |
35 | 35 | ||
36 | setup_release(t, get_release(t) + get_rt_period(t)); | 36 | setup_release(t, get_release(t) + get_rt_period(t)); |
@@ -40,7 +40,7 @@ void release_at(struct task_struct *t, lt_t start) | |||
40 | { | 40 | { |
41 | BUG_ON(!t); | 41 | BUG_ON(!t); |
42 | setup_release(t, start); | 42 | setup_release(t, start); |
43 | set_rt_flags(t, RT_F_RUNNING); | 43 | tsk_rt(t)->completed = 0; |
44 | } | 44 | } |
45 | 45 | ||
46 | 46 | ||
@@ -50,7 +50,7 @@ void release_at(struct task_struct *t, lt_t start) | |||
50 | long complete_job(void) | 50 | long complete_job(void) |
51 | { | 51 | { |
52 | /* Mark that we do not excute anymore */ | 52 | /* Mark that we do not excute anymore */ |
53 | set_rt_flags(current, RT_F_SLEEP); | 53 | tsk_rt(current)->completed = 1; |
54 | /* call schedule, this will return when a new job arrives | 54 | /* call schedule, this will return when a new job arrives |
55 | * it also takes care of preparing for the next release | 55 | * it also takes care of preparing for the next release |
56 | */ | 56 | */ |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 6a1095aa7725..2911e7ec7029 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/reboot.h> | 12 | #include <linux/reboot.h> |
13 | #include <linux/stop_machine.h> | ||
13 | 14 | ||
14 | #include <litmus/litmus.h> | 15 | #include <litmus/litmus.h> |
15 | #include <litmus/bheap.h> | 16 | #include <litmus/bheap.h> |
@@ -32,9 +33,6 @@ | |||
32 | 33 | ||
33 | /* Number of RT tasks that exist in the system */ | 34 | /* Number of RT tasks that exist in the system */ |
34 | atomic_t rt_task_count = ATOMIC_INIT(0); | 35 | atomic_t rt_task_count = ATOMIC_INIT(0); |
35 | static DEFINE_RAW_SPINLOCK(task_transition_lock); | ||
36 | /* synchronize plugin switching */ | ||
37 | atomic_t cannot_use_plugin = ATOMIC_INIT(0); | ||
38 | 36 | ||
39 | /* Give log messages sequential IDs. */ | 37 | /* Give log messages sequential IDs. */ |
40 | atomic_t __log_seq_no = ATOMIC_INIT(0); | 38 | atomic_t __log_seq_no = ATOMIC_INIT(0); |
@@ -479,13 +477,9 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
479 | long __litmus_admit_task(struct task_struct* tsk) | 477 | long __litmus_admit_task(struct task_struct* tsk) |
480 | { | 478 | { |
481 | long retval = 0; | 479 | long retval = 0; |
482 | unsigned long flags; | ||
483 | 480 | ||
484 | INIT_LIST_HEAD(&tsk_rt(tsk)->list); | 481 | INIT_LIST_HEAD(&tsk_rt(tsk)->list); |
485 | 482 | ||
486 | /* avoid scheduler plugin changing underneath us */ | ||
487 | raw_spin_lock_irqsave(&task_transition_lock, flags); | ||
488 | |||
489 | /* allocate heap node for this task */ | 483 | /* allocate heap node for this task */ |
490 | tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); | 484 | tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); |
491 | tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC); | 485 | tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC); |
@@ -493,11 +487,8 @@ long __litmus_admit_task(struct task_struct* tsk) | |||
493 | if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) { | 487 | if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) { |
494 | printk(KERN_WARNING "litmus: no more heap node memory!?\n"); | 488 | printk(KERN_WARNING "litmus: no more heap node memory!?\n"); |
495 | 489 | ||
496 | bheap_node_free(tsk_rt(tsk)->heap_node); | ||
497 | release_heap_free(tsk_rt(tsk)->rel_heap); | ||
498 | |||
499 | retval = -ENOMEM; | 490 | retval = -ENOMEM; |
500 | goto out_unlock; | 491 | goto out; |
501 | } else { | 492 | } else { |
502 | bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); | 493 | bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); |
503 | } | 494 | } |
@@ -511,14 +502,10 @@ long __litmus_admit_task(struct task_struct* tsk) | |||
511 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 502 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
512 | tsk_rt(tsk)->blocked_lock = NULL; | 503 | tsk_rt(tsk)->blocked_lock = NULL; |
513 | raw_spin_lock_init(&tsk_rt(tsk)->hp_blocked_tasks_lock); | 504 | raw_spin_lock_init(&tsk_rt(tsk)->hp_blocked_tasks_lock); |
514 | //INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, prio_order); // done by scheduler | ||
515 | #endif | ||
516 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
517 | /* not an interrupt thread by default */ | ||
518 | //tsk_rt(tsk)->is_interrupt_thread = 0; | ||
519 | //tsk_rt(tsk)->klmirqd_info = NULL; | ||
520 | #endif | 505 | #endif |
521 | 506 | ||
507 | preempt_disable(); | ||
508 | |||
522 | retval = litmus->admit_task(tsk); | 509 | retval = litmus->admit_task(tsk); |
523 | 510 | ||
524 | if (!retval) { | 511 | if (!retval) { |
@@ -527,9 +514,14 @@ long __litmus_admit_task(struct task_struct* tsk) | |||
527 | atomic_inc(&rt_task_count); | 514 | atomic_inc(&rt_task_count); |
528 | } | 515 | } |
529 | 516 | ||
530 | out_unlock: | 517 | preempt_enable(); |
531 | raw_spin_unlock_irqrestore(&task_transition_lock, flags); | 518 | |
519 | if (retval) { | ||
520 | bheap_node_free(tsk_rt(tsk)->heap_node); | ||
521 | release_heap_free(tsk_rt(tsk)->rel_heap); | ||
522 | } | ||
532 | 523 | ||
524 | out: | ||
533 | return retval; | 525 | return retval; |
534 | } | 526 | } |
535 | 527 | ||
@@ -590,39 +582,10 @@ void litmus_exit_task(struct task_struct* tsk) | |||
590 | } | 582 | } |
591 | } | 583 | } |
592 | 584 | ||
593 | /* IPI callback to synchronize plugin switching */ | 585 | static int do_plugin_switch(void *_plugin) |
594 | static void synch_on_plugin_switch(void* info) | ||
595 | { | ||
596 | atomic_inc(&cannot_use_plugin); | ||
597 | while (atomic_read(&cannot_use_plugin) > 0) | ||
598 | cpu_relax(); | ||
599 | } | ||
600 | |||
601 | int switch_sched_plugin(struct sched_plugin* plugin) | ||
602 | { | 586 | { |
603 | //unsigned long flags; | 587 | int ret; |
604 | int ret = 0; | 588 | struct sched_plugin* plugin = _plugin; |
605 | |||
606 | BUG_ON(!plugin); | ||
607 | |||
608 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
609 | if (!klmirqd_is_dead()) { | ||
610 | kill_klmirqd(); | ||
611 | } | ||
612 | #endif | ||
613 | |||
614 | /* forbid other cpus to use the plugin */ | ||
615 | atomic_set(&cannot_use_plugin, 1); | ||
616 | /* send IPI to force other CPUs to synch with us */ | ||
617 | smp_call_function(synch_on_plugin_switch, NULL, 0); | ||
618 | |||
619 | /* wait until all other CPUs have started synch */ | ||
620 | while (atomic_read(&cannot_use_plugin) < num_online_cpus()) { | ||
621 | cpu_relax(); | ||
622 | } | ||
623 | |||
624 | /* stop task transitions */ | ||
625 | //raw_spin_lock_irqsave(&task_transition_lock, flags); | ||
626 | 589 | ||
627 | /* don't switch if there are active real-time tasks */ | 590 | /* don't switch if there are active real-time tasks */ |
628 | if (atomic_read(&rt_task_count) == 0) { | 591 | if (atomic_read(&rt_task_count) == 0) { |
@@ -630,24 +593,45 @@ int switch_sched_plugin(struct sched_plugin* plugin) | |||
630 | if (0 != ret) | 593 | if (0 != ret) |
631 | goto out; | 594 | goto out; |
632 | 595 | ||
633 | litmus = plugin; // switch | 596 | litmus = plugin; /* optimistic switch */ |
634 | mb(); // make sure it's seen everywhere. | 597 | mb(); |
598 | |||
635 | ret = litmus->activate_plugin(); | 599 | ret = litmus->activate_plugin(); |
636 | if (0 != ret) { | 600 | if (0 != ret) { |
637 | printk(KERN_INFO "Can't activate %s (%d).\n", | 601 | printk(KERN_INFO "Can't activate %s (%d).\n", |
638 | litmus->plugin_name, ret); | 602 | litmus->plugin_name, ret); |
639 | litmus = &linux_sched_plugin; | 603 | litmus = &linux_sched_plugin; /* fail to Linux */ |
604 | ret = litmus->activate_plugin(); | ||
605 | BUG_ON(ret); | ||
640 | } | 606 | } |
641 | printk(KERN_INFO "Switching to LITMUS^RT plugin %s.\n", litmus->plugin_name); | 607 | printk(KERN_INFO "Switched to LITMUS^RT plugin %s.\n", litmus->plugin_name); |
642 | } else | 608 | } else |
643 | ret = -EBUSY; | 609 | ret = -EBUSY; |
644 | out: | 610 | out: |
645 | //raw_spin_unlock_irqrestore(&task_transition_lock, flags); | ||
646 | atomic_set(&cannot_use_plugin, 0); | ||
647 | |||
648 | return ret; | 611 | return ret; |
649 | } | 612 | } |
650 | 613 | ||
614 | /* Switching a plugin in use is tricky. | ||
615 | * We must watch out that no real-time tasks exists | ||
616 | * (and that none is created in parallel) and that the plugin is not | ||
617 | * currently in use on any processor (in theory). | ||
618 | */ | ||
619 | int switch_sched_plugin(struct sched_plugin* plugin) | ||
620 | { | ||
621 | BUG_ON(!plugin); | ||
622 | |||
623 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
624 | if (!klmirqd_is_dead()) { | ||
625 | kill_klmirqd(); | ||
626 | } | ||
627 | #endif | ||
628 | |||
629 | if (atomic_read(&rt_task_count) == 0) | ||
630 | return stop_machine(do_plugin_switch, plugin, NULL); | ||
631 | else | ||
632 | return -EBUSY; | ||
633 | } | ||
634 | |||
651 | /* Called upon fork. | 635 | /* Called upon fork. |
652 | * p is the newly forked task. | 636 | * p is the newly forked task. |
653 | */ | 637 | */ |
@@ -772,8 +756,6 @@ static int __init _init_litmus(void) | |||
772 | */ | 756 | */ |
773 | printk("Starting LITMUS^RT kernel\n"); | 757 | printk("Starting LITMUS^RT kernel\n"); |
774 | 758 | ||
775 | BUILD_BUG_ON(sizeof(union np_flag) != sizeof(uint32_t)); | ||
776 | |||
777 | register_sched_plugin(&linux_sched_plugin); | 759 | register_sched_plugin(&linux_sched_plugin); |
778 | 760 | ||
779 | bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC); | 761 | bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC); |
diff --git a/litmus/locking.c b/litmus/locking.c index 7af1dd69a079..c21ec1ae36d7 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -1,7 +1,11 @@ | |||
1 | #include <linux/sched.h> | ||
2 | #include <litmus/litmus.h> | ||
1 | #include <litmus/fdso.h> | 3 | #include <litmus/fdso.h> |
2 | 4 | ||
3 | #ifdef CONFIG_LITMUS_LOCKING | 5 | #ifdef CONFIG_LITMUS_LOCKING |
4 | 6 | ||
7 | #include <linux/sched.h> | ||
8 | #include <litmus/litmus.h> | ||
5 | #include <litmus/sched_plugin.h> | 9 | #include <litmus/sched_plugin.h> |
6 | #include <litmus/trace.h> | 10 | #include <litmus/trace.h> |
7 | #include <litmus/litmus.h> | 11 | #include <litmus/litmus.h> |
@@ -95,6 +99,10 @@ asmlinkage long sys_litmus_lock(int lock_od) | |||
95 | struct od_table_entry* entry; | 99 | struct od_table_entry* entry; |
96 | struct litmus_lock* l; | 100 | struct litmus_lock* l; |
97 | 101 | ||
102 | TS_SYSCALL_IN_START; | ||
103 | |||
104 | TS_SYSCALL_IN_END; | ||
105 | |||
98 | TS_LOCK_START; | 106 | TS_LOCK_START; |
99 | 107 | ||
100 | entry = get_entry_for_od(lock_od); | 108 | entry = get_entry_for_od(lock_od); |
@@ -109,6 +117,8 @@ asmlinkage long sys_litmus_lock(int lock_od) | |||
109 | * this into account when computing overheads. */ | 117 | * this into account when computing overheads. */ |
110 | TS_LOCK_END; | 118 | TS_LOCK_END; |
111 | 119 | ||
120 | TS_SYSCALL_OUT_START; | ||
121 | |||
112 | return err; | 122 | return err; |
113 | } | 123 | } |
114 | 124 | ||
@@ -118,6 +128,10 @@ asmlinkage long sys_litmus_unlock(int lock_od) | |||
118 | struct od_table_entry* entry; | 128 | struct od_table_entry* entry; |
119 | struct litmus_lock* l; | 129 | struct litmus_lock* l; |
120 | 130 | ||
131 | TS_SYSCALL_IN_START; | ||
132 | |||
133 | TS_SYSCALL_IN_END; | ||
134 | |||
121 | TS_UNLOCK_START; | 135 | TS_UNLOCK_START; |
122 | 136 | ||
123 | entry = get_entry_for_od(lock_od); | 137 | entry = get_entry_for_od(lock_od); |
@@ -132,6 +146,8 @@ asmlinkage long sys_litmus_unlock(int lock_od) | |||
132 | * account when computing overheads. */ | 146 | * account when computing overheads. */ |
133 | TS_UNLOCK_END; | 147 | TS_UNLOCK_END; |
134 | 148 | ||
149 | TS_SYSCALL_OUT_START; | ||
150 | |||
135 | return err; | 151 | return err; |
136 | } | 152 | } |
137 | 153 | ||
@@ -557,7 +573,6 @@ void suspend_for_lock(void) | |||
557 | unsigned int gpu_hide; | 573 | unsigned int gpu_hide; |
558 | #endif | 574 | #endif |
559 | 575 | ||
560 | |||
561 | //#ifdef CONFIG_REALTIME_AUX_TASKS | 576 | //#ifdef CONFIG_REALTIME_AUX_TASKS |
562 | // if (tsk_rt(t)->has_aux_tasks) { | 577 | // if (tsk_rt(t)->has_aux_tasks) { |
563 | // /* hide from aux tasks so they can't inherit our priority when we block | 578 | // /* hide from aux tasks so they can't inherit our priority when we block |
diff --git a/litmus/preempt.c b/litmus/preempt.c index c9ccc80c1df9..86ad2efb591a 100644 --- a/litmus/preempt.c +++ b/litmus/preempt.c | |||
@@ -2,6 +2,7 @@ | |||
2 | 2 | ||
3 | #include <litmus/litmus.h> | 3 | #include <litmus/litmus.h> |
4 | #include <litmus/preempt.h> | 4 | #include <litmus/preempt.h> |
5 | #include <litmus/trace.h> | ||
5 | 6 | ||
6 | /* The rescheduling state of each processor. | 7 | /* The rescheduling state of each processor. |
7 | */ | 8 | */ |
@@ -51,6 +52,7 @@ void sched_state_ipi(void) | |||
51 | TRACE_STATE("IPI -> set_tsk_need_resched(%s/%d)\n", | 52 | TRACE_STATE("IPI -> set_tsk_need_resched(%s/%d)\n", |
52 | current->comm, current->pid); | 53 | current->comm, current->pid); |
53 | */ | 54 | */ |
55 | TS_SEND_RESCHED_END; | ||
54 | } else { | 56 | } else { |
55 | /* ignore */ | 57 | /* ignore */ |
56 | /* | 58 | /* |
@@ -103,6 +105,7 @@ void litmus_reschedule(int cpu) | |||
103 | set_tsk_need_resched(current); | 105 | set_tsk_need_resched(current); |
104 | } | 106 | } |
105 | else { | 107 | else { |
108 | TS_SEND_RESCHED_START(cpu); | ||
106 | smp_send_reschedule(cpu); | 109 | smp_send_reschedule(cpu); |
107 | } | 110 | } |
108 | } | 111 | } |
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index d4f030728d3c..54322e278a1e 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c | |||
@@ -336,12 +336,7 @@ void __add_release_on(rt_domain_t* rt, struct task_struct *task, | |||
336 | list_add(&tsk_rt(task)->list, &rt->tobe_released); | 336 | list_add(&tsk_rt(task)->list, &rt->tobe_released); |
337 | task->rt_param.domain = rt; | 337 | task->rt_param.domain = rt; |
338 | 338 | ||
339 | /* start release timer */ | ||
340 | TS_SCHED2_START(task); | ||
341 | |||
342 | arm_release_timer_on(rt, target_cpu); | 339 | arm_release_timer_on(rt, target_cpu); |
343 | |||
344 | TS_SCHED2_END(task); | ||
345 | } | 340 | } |
346 | #endif | 341 | #endif |
347 | 342 | ||
@@ -354,11 +349,6 @@ void __add_release(rt_domain_t* rt, struct task_struct *task) | |||
354 | list_add(&tsk_rt(task)->list, &rt->tobe_released); | 349 | list_add(&tsk_rt(task)->list, &rt->tobe_released); |
355 | task->rt_param.domain = rt; | 350 | task->rt_param.domain = rt; |
356 | 351 | ||
357 | /* start release timer */ | ||
358 | TS_SCHED2_START(task); | ||
359 | |||
360 | arm_release_timer(rt); | 352 | arm_release_timer(rt); |
361 | |||
362 | TS_SCHED2_END(task); | ||
363 | } | 353 | } |
364 | 354 | ||
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index a454832b2aa8..db47f4413329 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -232,7 +232,7 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
232 | 232 | ||
233 | /* Link new task to CPU. */ | 233 | /* Link new task to CPU. */ |
234 | if (linked) { | 234 | if (linked) { |
235 | set_rt_flags(linked, RT_F_RUNNING); | 235 | tsk_rt(linked)->completed = 0; |
236 | /* handle task is already scheduled somewhere! */ | 236 | /* handle task is already scheduled somewhere! */ |
237 | on_cpu = linked->rt_param.scheduled_on; | 237 | on_cpu = linked->rt_param.scheduled_on; |
238 | if (on_cpu != NO_CPU) { | 238 | if (on_cpu != NO_CPU) { |
@@ -423,7 +423,7 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
423 | TRACE_TASK(t, "job_completion().\n"); | 423 | TRACE_TASK(t, "job_completion().\n"); |
424 | 424 | ||
425 | /* set flags */ | 425 | /* set flags */ |
426 | set_rt_flags(t, RT_F_SLEEP); | 426 | tsk_rt(t)->completed = 1; |
427 | /* prepare for next period */ | 427 | /* prepare for next period */ |
428 | prepare_for_next_period(t); | 428 | prepare_for_next_period(t); |
429 | if (is_released(t, litmus_clock())) | 429 | if (is_released(t, litmus_clock())) |
@@ -793,7 +793,7 @@ static void cedf_change_prio_pai_tasklet(struct task_struct *old_prio, | |||
793 | * | 793 | * |
794 | * - !is_running(scheduled) // the job blocks | 794 | * - !is_running(scheduled) // the job blocks |
795 | * - scheduled->timeslice == 0 // the job completed (forcefully) | 795 | * - scheduled->timeslice == 0 // the job completed (forcefully) |
796 | * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) | 796 | * - is_completed() // the job completed (by syscall) |
797 | * - linked != scheduled // we need to reschedule (for any reason) | 797 | * - linked != scheduled // we need to reschedule (for any reason) |
798 | * - is_np(scheduled) // rescheduling must be delayed, | 798 | * - is_np(scheduled) // rescheduling must be delayed, |
799 | * sys_exit_np must be requested | 799 | * sys_exit_np must be requested |
@@ -836,7 +836,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
836 | budget_exhausted(entry->scheduled) && | 836 | budget_exhausted(entry->scheduled) && |
837 | !sigbudget_sent(entry->scheduled); | 837 | !sigbudget_sent(entry->scheduled); |
838 | np = exists && is_np(entry->scheduled); | 838 | np = exists && is_np(entry->scheduled); |
839 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | 839 | sleep = exists && is_completed(entry->scheduled); |
840 | preempt = entry->scheduled != entry->linked; | 840 | preempt = entry->scheduled != entry->linked; |
841 | 841 | ||
842 | #ifdef WANT_ALL_SCHED_EVENTS | 842 | #ifdef WANT_ALL_SCHED_EVENTS |
@@ -1004,30 +1004,25 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
1004 | 1004 | ||
1005 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); | 1005 | raw_spin_lock_irqsave(&cluster->cluster_lock, flags); |
1006 | 1006 | ||
1007 | #if 0 // sproadic task model | 1007 | #if 0 |
1008 | /* We need to take suspensions because of semaphores into | 1008 | /* sporadic task model. will increment job numbers automatically */ |
1009 | * account! If a job resumes after being suspended due to acquiring | 1009 | now = litmus_clock(); |
1010 | * a semaphore, it should never be treated as a new job release. | 1010 | if (is_tardy(task, now)) { |
1011 | */ | 1011 | /* new sporadic release */ |
1012 | if (get_rt_flags(task) == RT_F_EXIT_SEM) { | 1012 | release_at(task, now); |
1013 | set_rt_flags(task, RT_F_RUNNING); | 1013 | sched_trace_task_release(task); |
1014 | } else { | 1014 | } |
1015 | now = litmus_clock(); | 1015 | else { |
1016 | if (is_tardy(task, now)) { | 1016 | if (task->rt.time_slice) { |
1017 | /* new sporadic release */ | 1017 | /* came back in time before deadline |
1018 | release_at(task, now); | 1018 | */ |
1019 | sched_trace_task_release(task); | 1019 | tsk_rt(task)->completed = 0; |
1020 | } | ||
1021 | else { | ||
1022 | if (task->rt.time_slice) { | ||
1023 | /* came back in time before deadline | ||
1024 | */ | ||
1025 | set_rt_flags(task, RT_F_RUNNING); | ||
1026 | } | ||
1027 | } | 1020 | } |
1028 | } | 1021 | } |
1029 | #else | 1022 | #else |
1030 | set_rt_flags(task, RT_F_RUNNING); // periodic model | 1023 | /* periodic task model. don't force job to end. |
1024 | * rely on user to say when jobs complete or when budget expires. */ | ||
1025 | tsk_rt(task)->completed = 0; | ||
1031 | #endif | 1026 | #endif |
1032 | 1027 | ||
1033 | #ifdef CONFIG_REALTIME_AUX_TASKS | 1028 | #ifdef CONFIG_REALTIME_AUX_TASKS |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 7eb44fee1861..01791a18e8f3 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -74,7 +74,7 @@ | |||
74 | * (thereby removing its association with this | 74 | * (thereby removing its association with this |
75 | * CPU). However, it will not requeue the | 75 | * CPU). However, it will not requeue the |
76 | * previously linked task (if any). It will set | 76 | * previously linked task (if any). It will set |
77 | * T's state to RT_F_RUNNING and check whether | 77 | * T's state to not completed and check whether |
78 | * it is already running somewhere else. If T | 78 | * it is already running somewhere else. If T |
79 | * is scheduled somewhere else it will link | 79 | * is scheduled somewhere else it will link |
80 | * it to that CPU instead (and pull the linked | 80 | * it to that CPU instead (and pull the linked |
@@ -224,7 +224,7 @@ static noinline void link_task_to_cpu(struct task_struct* linked, | |||
224 | 224 | ||
225 | /* Link new task to CPU. */ | 225 | /* Link new task to CPU. */ |
226 | if (linked) { | 226 | if (linked) { |
227 | set_rt_flags(linked, RT_F_RUNNING); | 227 | tsk_rt(linked)->completed = 0; |
228 | /* handle task is already scheduled somewhere! */ | 228 | /* handle task is already scheduled somewhere! */ |
229 | on_cpu = linked->rt_param.scheduled_on; | 229 | on_cpu = linked->rt_param.scheduled_on; |
230 | if (on_cpu != NO_CPU) { | 230 | if (on_cpu != NO_CPU) { |
@@ -412,7 +412,7 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
412 | TRACE_TASK(t, "job_completion().\n"); | 412 | TRACE_TASK(t, "job_completion().\n"); |
413 | 413 | ||
414 | /* set flags */ | 414 | /* set flags */ |
415 | set_rt_flags(t, RT_F_SLEEP); | 415 | tsk_rt(t)->completed = 1; |
416 | /* prepare for next period */ | 416 | /* prepare for next period */ |
417 | prepare_for_next_period(t); | 417 | prepare_for_next_period(t); |
418 | 418 | ||
@@ -792,7 +792,7 @@ static void gsnedf_change_prio_pai_tasklet(struct task_struct *old_prio, | |||
792 | * | 792 | * |
793 | * - !is_running(scheduled) // the job blocks | 793 | * - !is_running(scheduled) // the job blocks |
794 | * - scheduled->timeslice == 0 // the job completed (forcefully) | 794 | * - scheduled->timeslice == 0 // the job completed (forcefully) |
795 | * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) | 795 | * - is_completed() // the job completed (by syscall) |
796 | * - linked != scheduled // we need to reschedule (for any reason) | 796 | * - linked != scheduled // we need to reschedule (for any reason) |
797 | * - is_np(scheduled) // rescheduling must be delayed, | 797 | * - is_np(scheduled) // rescheduling must be delayed, |
798 | * sys_exit_np must be requested | 798 | * sys_exit_np must be requested |
@@ -835,7 +835,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
835 | budget_exhausted(entry->scheduled) && | 835 | budget_exhausted(entry->scheduled) && |
836 | !sigbudget_sent(entry->scheduled); | 836 | !sigbudget_sent(entry->scheduled); |
837 | np = exists && is_np(entry->scheduled); | 837 | np = exists && is_np(entry->scheduled); |
838 | sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | 838 | sleep = exists && is_completed(entry->scheduled); |
839 | preempt = entry->scheduled != entry->linked; | 839 | preempt = entry->scheduled != entry->linked; |
840 | 840 | ||
841 | #ifdef WANT_ALL_SCHED_EVENTS | 841 | #ifdef WANT_ALL_SCHED_EVENTS |
@@ -1009,31 +1009,24 @@ static void gsnedf_task_wake_up(struct task_struct *task) | |||
1009 | 1009 | ||
1010 | raw_spin_lock_irqsave(&gsnedf_lock, flags); | 1010 | raw_spin_lock_irqsave(&gsnedf_lock, flags); |
1011 | 1011 | ||
1012 | 1012 | #if 0 | |
1013 | #if 0 // sporadic task model | 1013 | /* sporadic task model. will increment job numbers automatically */ |
1014 | /* We need to take suspensions because of semaphores into | 1014 | now = litmus_clock(); |
1015 | * account! If a job resumes after being suspended due to acquiring | 1015 | if (is_tardy(task, now)) { |
1016 | * a semaphore, it should never be treated as a new job release. | 1016 | /* new sporadic release */ |
1017 | */ | 1017 | release_at(task, now); |
1018 | if (get_rt_flags(task) == RT_F_EXIT_SEM) { | 1018 | sched_trace_task_release(task); |
1019 | set_rt_flags(task, RT_F_RUNNING); | 1019 | } |
1020 | } else { | 1020 | else { |
1021 | now = litmus_clock(); | 1021 | if (task->rt.time_slice) { |
1022 | if (is_tardy(task, now)) { | 1022 | /* came back in time before deadline |
1023 | /* new sporadic release */ | 1023 | */ |
1024 | release_at(task, now); | 1024 | tsk_rt(task)->completed = 0; |
1025 | sched_trace_task_release(task); | ||
1026 | } | ||
1027 | else { | ||
1028 | if (task->rt.time_slice) { | ||
1029 | /* came back in time before deadline | ||
1030 | */ | ||
1031 | set_rt_flags(task, RT_F_RUNNING); | ||
1032 | } | ||
1033 | } | 1025 | } |
1034 | } | 1026 | } |
1035 | #else // periodic task model | 1027 | #else |
1036 | set_rt_flags(task, RT_F_RUNNING); | 1028 | /* don't force job to end. rely on user to say when jobs complete */ |
1029 | tsk_rt(task)->completed = 0; | ||
1037 | #endif | 1030 | #endif |
1038 | 1031 | ||
1039 | #ifdef CONFIG_REALTIME_AUX_TASKS | 1032 | #ifdef CONFIG_REALTIME_AUX_TASKS |
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index 62854b576796..9de03c95b825 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c | |||
@@ -102,11 +102,9 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
102 | } | 102 | } |
103 | } | 103 | } |
104 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW | 104 | #ifdef __ARCH_WANT_UNLOCKED_CTXSW |
105 | if (next->oncpu) | 105 | if (next->on_cpu) |
106 | { | ||
107 | TRACE_TASK(next, "waiting for !oncpu"); | 106 | TRACE_TASK(next, "waiting for !oncpu"); |
108 | } | 107 | while (next->on_cpu) { |
109 | while (next->oncpu) { | ||
110 | cpu_relax(); | 108 | cpu_relax(); |
111 | mb(); | 109 | mb(); |
112 | } | 110 | } |
@@ -198,6 +196,9 @@ static void dequeue_task_litmus(struct rq *rq, struct task_struct *p, | |||
198 | 196 | ||
199 | static void yield_task_litmus(struct rq *rq) | 197 | static void yield_task_litmus(struct rq *rq) |
200 | { | 198 | { |
199 | TS_SYSCALL_IN_START; | ||
200 | TS_SYSCALL_IN_END; | ||
201 | |||
201 | BUG_ON(rq->curr != current); | 202 | BUG_ON(rq->curr != current); |
202 | /* sched_yield() is called to trigger delayed preemptions. | 203 | /* sched_yield() is called to trigger delayed preemptions. |
203 | * Thus, mark the current task as needing to be rescheduled. | 204 | * Thus, mark the current task as needing to be rescheduled. |
@@ -206,6 +207,8 @@ static void yield_task_litmus(struct rq *rq) | |||
206 | */ | 207 | */ |
207 | clear_exit_np(current); | 208 | clear_exit_np(current); |
208 | litmus_reschedule_local(); | 209 | litmus_reschedule_local(); |
210 | |||
211 | TS_SYSCALL_OUT_START; | ||
209 | } | 212 | } |
210 | 213 | ||
211 | /* Plugins are responsible for this. | 214 | /* Plugins are responsible for this. |
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index 72c06a492ef9..6a89b003306c 100644 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c | |||
@@ -254,7 +254,7 @@ static void check_preempt(struct task_struct* t) | |||
254 | { | 254 | { |
255 | int cpu = NO_CPU; | 255 | int cpu = NO_CPU; |
256 | if (tsk_rt(t)->linked_on != tsk_rt(t)->scheduled_on && | 256 | if (tsk_rt(t)->linked_on != tsk_rt(t)->scheduled_on && |
257 | tsk_rt(t)->present) { | 257 | is_present(t)) { |
258 | /* the task can be scheduled and | 258 | /* the task can be scheduled and |
259 | * is not scheduled where it ought to be scheduled | 259 | * is not scheduled where it ought to be scheduled |
260 | */ | 260 | */ |
@@ -299,7 +299,7 @@ static void pfair_prepare_next_period(struct task_struct* t) | |||
299 | struct pfair_param* p = tsk_pfair(t); | 299 | struct pfair_param* p = tsk_pfair(t); |
300 | 300 | ||
301 | prepare_for_next_period(t); | 301 | prepare_for_next_period(t); |
302 | get_rt_flags(t) = RT_F_RUNNING; | 302 | tsk_rt(t)->completed = 0; |
303 | p->release += p->period; | 303 | p->release += p->period; |
304 | } | 304 | } |
305 | 305 | ||
@@ -310,7 +310,7 @@ static int advance_subtask(quanta_t time, struct task_struct* t, int cpu) | |||
310 | int to_relq; | 310 | int to_relq; |
311 | p->cur = (p->cur + 1) % p->quanta; | 311 | p->cur = (p->cur + 1) % p->quanta; |
312 | if (!p->cur) { | 312 | if (!p->cur) { |
313 | if (tsk_rt(t)->present) { | 313 | if (is_present(t)) { |
314 | /* The job overran; we start a new budget allocation. */ | 314 | /* The job overran; we start a new budget allocation. */ |
315 | pfair_prepare_next_period(t); | 315 | pfair_prepare_next_period(t); |
316 | } else { | 316 | } else { |
@@ -598,7 +598,7 @@ static int safe_to_schedule(struct task_struct* t, int cpu) | |||
598 | "scheduled already on %d.\n", cpu, where); | 598 | "scheduled already on %d.\n", cpu, where); |
599 | return 0; | 599 | return 0; |
600 | } else | 600 | } else |
601 | return tsk_rt(t)->present && get_rt_flags(t) == RT_F_RUNNING; | 601 | return is_present(t) && !is_completed(t); |
602 | } | 602 | } |
603 | 603 | ||
604 | static struct task_struct* pfair_schedule(struct task_struct * prev) | 604 | static struct task_struct* pfair_schedule(struct task_struct * prev) |
@@ -621,7 +621,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev) | |||
621 | raw_spin_lock(cpu_lock(state)); | 621 | raw_spin_lock(cpu_lock(state)); |
622 | 622 | ||
623 | blocks = is_realtime(prev) && !is_running(prev); | 623 | blocks = is_realtime(prev) && !is_running(prev); |
624 | completion = is_realtime(prev) && get_rt_flags(prev) == RT_F_SLEEP; | 624 | completion = is_realtime(prev) && is_completed(prev); |
625 | out_of_time = is_realtime(prev) && time_after(cur_release(prev), | 625 | out_of_time = is_realtime(prev) && time_after(cur_release(prev), |
626 | state->local_tick); | 626 | state->local_tick); |
627 | 627 | ||
@@ -720,7 +720,7 @@ static void pfair_task_wake_up(struct task_struct *t) | |||
720 | /* only add to ready queue if the task isn't still linked somewhere */ | 720 | /* only add to ready queue if the task isn't still linked somewhere */ |
721 | if (requeue) { | 721 | if (requeue) { |
722 | TRACE_TASK(t, "requeueing required\n"); | 722 | TRACE_TASK(t, "requeueing required\n"); |
723 | tsk_rt(t)->flags = RT_F_RUNNING; | 723 | tsk_rt(t)->completed = 0; |
724 | __add_ready(&cluster->pfair, t); | 724 | __add_ready(&cluster->pfair, t); |
725 | } | 725 | } |
726 | 726 | ||
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c index 6129eb94d3ea..a96c2b1aa26f 100644 --- a/litmus/sched_pfp.c +++ b/litmus/sched_pfp.c | |||
@@ -55,7 +55,7 @@ static void preempt(pfp_domain_t *pfp) | |||
55 | 55 | ||
56 | static unsigned int priority_index(struct task_struct* t) | 56 | static unsigned int priority_index(struct task_struct* t) |
57 | { | 57 | { |
58 | #ifdef CONFIG_LOCKING | 58 | #ifdef CONFIG_LITMUS_LOCKING |
59 | if (unlikely(t->rt_param.inh_task)) | 59 | if (unlikely(t->rt_param.inh_task)) |
60 | /* use effective priority */ | 60 | /* use effective priority */ |
61 | t = t->rt_param.inh_task; | 61 | t = t->rt_param.inh_task; |
@@ -95,6 +95,12 @@ static void pfp_release_jobs(rt_domain_t* rt, struct bheap* tasks) | |||
95 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | 95 | raw_spin_unlock_irqrestore(&pfp->slock, flags); |
96 | } | 96 | } |
97 | 97 | ||
98 | static void pfp_preempt_check(pfp_domain_t *pfp) | ||
99 | { | ||
100 | if (fp_higher_prio(fp_prio_peek(&pfp->ready_queue), pfp->scheduled)) | ||
101 | preempt(pfp); | ||
102 | } | ||
103 | |||
98 | static void pfp_domain_init(pfp_domain_t* pfp, | 104 | static void pfp_domain_init(pfp_domain_t* pfp, |
99 | int cpu) | 105 | int cpu) |
100 | { | 106 | { |
@@ -106,10 +112,9 @@ static void pfp_domain_init(pfp_domain_t* pfp, | |||
106 | 112 | ||
107 | static void requeue(struct task_struct* t, pfp_domain_t *pfp) | 113 | static void requeue(struct task_struct* t, pfp_domain_t *pfp) |
108 | { | 114 | { |
109 | if (t->state != TASK_RUNNING) | 115 | BUG_ON(!is_running(t)); |
110 | TRACE_TASK(t, "requeue: !TASK_RUNNING\n"); | ||
111 | 116 | ||
112 | set_rt_flags(t, RT_F_RUNNING); | 117 | tsk_rt(t)->completed = 0; |
113 | if (is_released(t, litmus_clock())) | 118 | if (is_released(t, litmus_clock())) |
114 | fp_prio_add(&pfp->ready_queue, t, priority_index(t)); | 119 | fp_prio_add(&pfp->ready_queue, t, priority_index(t)); |
115 | else | 120 | else |
@@ -121,8 +126,10 @@ static void job_completion(struct task_struct* t, int forced) | |||
121 | sched_trace_task_completion(t,forced); | 126 | sched_trace_task_completion(t,forced); |
122 | TRACE_TASK(t, "job_completion().\n"); | 127 | TRACE_TASK(t, "job_completion().\n"); |
123 | 128 | ||
124 | set_rt_flags(t, RT_F_SLEEP); | 129 | tsk_rt(t)->completed = 1; |
125 | prepare_for_next_period(t); | 130 | prepare_for_next_period(t); |
131 | if (is_released(t, litmus_clock())) | ||
132 | sched_trace_task_release(t); | ||
126 | } | 133 | } |
127 | 134 | ||
128 | static void pfp_tick(struct task_struct *t) | 135 | static void pfp_tick(struct task_struct *t) |
@@ -185,7 +192,7 @@ static struct task_struct* pfp_schedule(struct task_struct * prev) | |||
185 | budget_exhausted(pfp->scheduled) && | 192 | budget_exhausted(pfp->scheduled) && |
186 | !sigbudget_sent(pfp->scheduled); | 193 | !sigbudget_sent(pfp->scheduled); |
187 | np = exists && is_np(pfp->scheduled); | 194 | np = exists && is_np(pfp->scheduled); |
188 | sleep = exists && get_rt_flags(pfp->scheduled) == RT_F_SLEEP; | 195 | sleep = exists && is_completed(pfp->scheduled); |
189 | migrate = exists && get_partition(pfp->scheduled) != pfp->cpu; | 196 | migrate = exists && get_partition(pfp->scheduled) != pfp->cpu; |
190 | preempt = migrate || fp_preemption_needed(&pfp->ready_queue, prev); | 197 | preempt = migrate || fp_preemption_needed(&pfp->ready_queue, prev); |
191 | 198 | ||
@@ -233,6 +240,26 @@ static struct task_struct* pfp_schedule(struct task_struct * prev) | |||
233 | if (pfp->scheduled && !blocks && !migrate) | 240 | if (pfp->scheduled && !blocks && !migrate) |
234 | requeue(pfp->scheduled, pfp); | 241 | requeue(pfp->scheduled, pfp); |
235 | next = fp_prio_take(&pfp->ready_queue); | 242 | next = fp_prio_take(&pfp->ready_queue); |
243 | if (next == prev) { | ||
244 | struct task_struct *t = fp_prio_peek(&pfp->ready_queue); | ||
245 | TRACE_TASK(next, "next==prev sleep=%d oot=%d np=%d preempt=%d migrate=%d " | ||
246 | "boost=%d empty=%d prio-idx=%u prio=%u\n", | ||
247 | sleep, out_of_time, np, preempt, migrate, | ||
248 | is_priority_boosted(next), | ||
249 | t == NULL, | ||
250 | priority_index(next), | ||
251 | get_priority(next)); | ||
252 | if (t) | ||
253 | TRACE_TASK(t, "waiter boost=%d prio-idx=%u prio=%u\n", | ||
254 | is_priority_boosted(t), | ||
255 | priority_index(t), | ||
256 | get_priority(t)); | ||
257 | } | ||
258 | /* If preempt is set, we should not see the same task again. */ | ||
259 | BUG_ON(preempt && next == prev); | ||
260 | /* Similarly, if preempt is set, then next may not be NULL, | ||
261 | * unless it's a migration. */ | ||
262 | BUG_ON(preempt && !migrate && next == NULL); | ||
236 | } else | 263 | } else |
237 | /* Only override Linux scheduler if we have a real-time task | 264 | /* Only override Linux scheduler if we have a real-time task |
238 | * scheduled that needs to continue. | 265 | * scheduled that needs to continue. |
@@ -242,7 +269,7 @@ static struct task_struct* pfp_schedule(struct task_struct * prev) | |||
242 | 269 | ||
243 | if (next) { | 270 | if (next) { |
244 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | 271 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); |
245 | set_rt_flags(next, RT_F_RUNNING); | 272 | tsk_rt(next)->completed = 0; |
246 | } else { | 273 | } else { |
247 | TRACE("becoming idle at %llu\n", litmus_clock()); | 274 | TRACE("becoming idle at %llu\n", litmus_clock()); |
248 | } | 275 | } |
@@ -307,7 +334,7 @@ static void pfp_task_new(struct task_struct * t, int on_rq, int running) | |||
307 | } else { | 334 | } else { |
308 | requeue(t, pfp); | 335 | requeue(t, pfp); |
309 | /* maybe we have to reschedule */ | 336 | /* maybe we have to reschedule */ |
310 | preempt(pfp); | 337 | pfp_preempt_check(pfp); |
311 | } | 338 | } |
312 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | 339 | raw_spin_unlock_irqrestore(&pfp->slock, flags); |
313 | } | 340 | } |
@@ -353,10 +380,14 @@ static void pfp_task_wake_up(struct task_struct *task) | |||
353 | * and won. Also, don't requeue if it is still queued, which can | 380 | * and won. Also, don't requeue if it is still queued, which can |
354 | * happen under the DPCP due wake-ups racing with migrations. | 381 | * happen under the DPCP due wake-ups racing with migrations. |
355 | */ | 382 | */ |
356 | if (pfp->scheduled != task) | 383 | if (pfp->scheduled != task) { |
357 | requeue(task, pfp); | 384 | requeue(task, pfp); |
385 | pfp_preempt_check(pfp); | ||
386 | } | ||
358 | 387 | ||
388 | #ifdef CONFIG_LITMUS_LOCKING | ||
359 | out_unlock: | 389 | out_unlock: |
390 | #endif | ||
360 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | 391 | raw_spin_unlock_irqrestore(&pfp->slock, flags); |
361 | TRACE_TASK(task, "wake up done\n"); | 392 | TRACE_TASK(task, "wake up done\n"); |
362 | } | 393 | } |
@@ -471,17 +502,10 @@ static void boost_priority(struct task_struct* t, lt_t priority_point) | |||
471 | /* tie-break by protocol-specific priority point */ | 502 | /* tie-break by protocol-specific priority point */ |
472 | tsk_rt(t)->boost_start_time = priority_point; | 503 | tsk_rt(t)->boost_start_time = priority_point; |
473 | 504 | ||
474 | if (pfp->scheduled != t) { | 505 | /* Priority boosting currently only takes effect for already-scheduled |
475 | /* holder may be queued: first stop queue changes */ | 506 | * tasks. This is sufficient since priority boosting only kicks in as |
476 | raw_spin_lock(&pfp->domain.release_lock); | 507 | * part of lock acquisitions. */ |
477 | if (is_queued(t) && | 508 | BUG_ON(pfp->scheduled != t); |
478 | /* If it is queued, then we need to re-order. */ | ||
479 | bheap_decrease(fp_ready_order, tsk_rt(t)->heap_node) && | ||
480 | /* If we bubbled to the top, then we need to check for preemptions. */ | ||
481 | fp_preemption_needed(&pfp->ready_queue, pfp->scheduled)) | ||
482 | preempt(pfp); | ||
483 | raw_spin_unlock(&pfp->domain.release_lock); | ||
484 | } /* else: nothing to do since the job is not queued while scheduled */ | ||
485 | 509 | ||
486 | raw_spin_unlock_irqrestore(&pfp->slock, flags); | 510 | raw_spin_unlock_irqrestore(&pfp->slock, flags); |
487 | } | 511 | } |
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index a5fda133bad9..63fa6103882a 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c | |||
@@ -60,7 +60,7 @@ static void requeue(struct task_struct* t, rt_domain_t *edf) | |||
60 | if (t->state != TASK_RUNNING) | 60 | if (t->state != TASK_RUNNING) |
61 | TRACE_TASK(t, "requeue: !TASK_RUNNING\n"); | 61 | TRACE_TASK(t, "requeue: !TASK_RUNNING\n"); |
62 | 62 | ||
63 | set_rt_flags(t, RT_F_RUNNING); | 63 | tsk_rt(t)->completed = 0; |
64 | if (is_released(t, litmus_clock())) | 64 | if (is_released(t, litmus_clock())) |
65 | __add_ready(edf, t); | 65 | __add_ready(edf, t); |
66 | else | 66 | else |
@@ -133,6 +133,15 @@ static void unboost_priority(struct task_struct* t) | |||
133 | 133 | ||
134 | #endif | 134 | #endif |
135 | 135 | ||
136 | static int psnedf_preempt_check(psnedf_domain_t *pedf) | ||
137 | { | ||
138 | if (edf_preemption_needed(&pedf->domain, pedf->scheduled)) { | ||
139 | preempt(pedf); | ||
140 | return 1; | ||
141 | } else | ||
142 | return 0; | ||
143 | } | ||
144 | |||
136 | /* This check is trivial in partioned systems as we only have to consider | 145 | /* This check is trivial in partioned systems as we only have to consider |
137 | * the CPU of the partition. | 146 | * the CPU of the partition. |
138 | */ | 147 | */ |
@@ -143,11 +152,7 @@ static int psnedf_check_resched(rt_domain_t *edf) | |||
143 | /* because this is a callback from rt_domain_t we already hold | 152 | /* because this is a callback from rt_domain_t we already hold |
144 | * the necessary lock for the ready queue | 153 | * the necessary lock for the ready queue |
145 | */ | 154 | */ |
146 | if (edf_preemption_needed(edf, pedf->scheduled)) { | 155 | return psnedf_preempt_check(pedf); |
147 | preempt(pedf); | ||
148 | return 1; | ||
149 | } else | ||
150 | return 0; | ||
151 | } | 156 | } |
152 | 157 | ||
153 | static void job_completion(struct task_struct* t, int forced) | 158 | static void job_completion(struct task_struct* t, int forced) |
@@ -155,7 +160,7 @@ static void job_completion(struct task_struct* t, int forced) | |||
155 | sched_trace_task_completion(t,forced); | 160 | sched_trace_task_completion(t,forced); |
156 | TRACE_TASK(t, "job_completion().\n"); | 161 | TRACE_TASK(t, "job_completion().\n"); |
157 | 162 | ||
158 | set_rt_flags(t, RT_F_SLEEP); | 163 | tsk_rt(t)->completed = 1; |
159 | prepare_for_next_period(t); | 164 | prepare_for_next_period(t); |
160 | } | 165 | } |
161 | 166 | ||
@@ -220,7 +225,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev) | |||
220 | budget_exhausted(pedf->scheduled) && | 225 | budget_exhausted(pedf->scheduled) && |
221 | !sigbudget_sent(pedf->scheduled); | 226 | !sigbudget_sent(pedf->scheduled); |
222 | np = exists && is_np(pedf->scheduled); | 227 | np = exists && is_np(pedf->scheduled); |
223 | sleep = exists && get_rt_flags(pedf->scheduled) == RT_F_SLEEP; | 228 | sleep = exists && is_completed(pedf->scheduled); |
224 | preempt = edf_preemption_needed(edf, prev); | 229 | preempt = edf_preemption_needed(edf, prev); |
225 | 230 | ||
226 | /* If we need to preempt do so. | 231 | /* If we need to preempt do so. |
@@ -276,7 +281,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev) | |||
276 | 281 | ||
277 | if (next) { | 282 | if (next) { |
278 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | 283 | TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); |
279 | set_rt_flags(next, RT_F_RUNNING); | 284 | tsk_rt(next)->completed = 0; |
280 | } else { | 285 | } else { |
281 | TRACE("becoming idle at %llu\n", litmus_clock()); | 286 | TRACE("becoming idle at %llu\n", litmus_clock()); |
282 | } | 287 | } |
@@ -314,7 +319,7 @@ static void psnedf_task_new(struct task_struct * t, int on_rq, int running) | |||
314 | } else { | 319 | } else { |
315 | requeue(t, edf); | 320 | requeue(t, edf); |
316 | /* maybe we have to reschedule */ | 321 | /* maybe we have to reschedule */ |
317 | preempt(pedf); | 322 | psnedf_preempt_check(pedf); |
318 | } | 323 | } |
319 | raw_spin_unlock_irqrestore(&pedf->slock, flags); | 324 | raw_spin_unlock_irqrestore(&pedf->slock, flags); |
320 | } | 325 | } |
@@ -350,8 +355,10 @@ static void psnedf_task_wake_up(struct task_struct *task) | |||
350 | * de-scheduling the task, i.e., wake_up() raced with schedule() | 355 | * de-scheduling the task, i.e., wake_up() raced with schedule() |
351 | * and won. | 356 | * and won. |
352 | */ | 357 | */ |
353 | if (pedf->scheduled != task) | 358 | if (pedf->scheduled != task) { |
354 | requeue(task, edf); | 359 | requeue(task, edf); |
360 | psnedf_preempt_check(pedf); | ||
361 | } | ||
355 | 362 | ||
356 | raw_spin_unlock_irqrestore(&pedf->slock, flags); | 363 | raw_spin_unlock_irqrestore(&pedf->slock, flags); |
357 | TRACE_TASK(task, "wake up done\n"); | 364 | TRACE_TASK(task, "wake up done\n"); |
diff --git a/litmus/sync.c b/litmus/sync.c index 9fb6366f002f..908bf21dc210 100644 --- a/litmus/sync.c +++ b/litmus/sync.c | |||
@@ -16,67 +16,111 @@ | |||
16 | 16 | ||
17 | #include <litmus/sched_trace.h> | 17 | #include <litmus/sched_trace.h> |
18 | 18 | ||
19 | static DECLARE_COMPLETION(ts_release); | 19 | struct ts_release_wait { |
20 | struct list_head list; | ||
21 | struct completion completion; | ||
22 | lt_t ts_release_time; | ||
23 | }; | ||
24 | |||
25 | #define DECLARE_TS_RELEASE_WAIT(symb) \ | ||
26 | struct ts_release_wait symb = \ | ||
27 | { \ | ||
28 | LIST_HEAD_INIT(symb.list), \ | ||
29 | COMPLETION_INITIALIZER_ONSTACK(symb.completion), \ | ||
30 | 0 \ | ||
31 | } | ||
32 | |||
33 | static LIST_HEAD(task_release_list); | ||
34 | static DEFINE_MUTEX(task_release_lock); | ||
20 | 35 | ||
21 | static long do_wait_for_ts_release(void) | 36 | static long do_wait_for_ts_release(void) |
22 | { | 37 | { |
23 | long ret = 0; | 38 | DECLARE_TS_RELEASE_WAIT(wait); |
39 | |||
40 | long ret = -ERESTARTSYS; | ||
41 | |||
42 | if (mutex_lock_interruptible(&task_release_lock)) | ||
43 | goto out; | ||
44 | |||
45 | list_add(&wait.list, &task_release_list); | ||
24 | 46 | ||
25 | /* If the interruption races with a release, the completion object | 47 | mutex_unlock(&task_release_lock); |
26 | * may have a non-zero counter. To avoid this problem, this should | ||
27 | * be replaced by wait_for_completion(). | ||
28 | * | ||
29 | * For debugging purposes, this is interruptible for now. | ||
30 | */ | ||
31 | ret = wait_for_completion_interruptible(&ts_release); | ||
32 | 48 | ||
49 | /* We are enqueued, now we wait for someone to wake us up. */ | ||
50 | ret = wait_for_completion_interruptible(&wait.completion); | ||
51 | |||
52 | if (!ret) { | ||
53 | if (is_realtime(current)) { | ||
54 | /* Completion succeeded, setup release. */ | ||
55 | litmus->release_at(current, wait.ts_release_time | ||
56 | + current->rt_param.task_params.phase | ||
57 | - current->rt_param.task_params.period); | ||
58 | /* trigger advance to next job release at the programmed time */ | ||
59 | ret = complete_job(); | ||
60 | } | ||
61 | } else { | ||
62 | /* We were interrupted, must cleanup list. */ | ||
63 | mutex_lock(&task_release_lock); | ||
64 | if (!wait.completion.done) | ||
65 | list_del(&wait.list); | ||
66 | mutex_unlock(&task_release_lock); | ||
67 | } | ||
68 | |||
69 | out: | ||
33 | return ret; | 70 | return ret; |
34 | } | 71 | } |
35 | 72 | ||
36 | int count_tasks_waiting_for_release(void) | 73 | int count_tasks_waiting_for_release(void) |
37 | { | 74 | { |
38 | unsigned long flags; | ||
39 | int task_count = 0; | 75 | int task_count = 0; |
40 | struct list_head *pos; | 76 | struct list_head *pos; |
41 | 77 | ||
42 | spin_lock_irqsave(&ts_release.wait.lock, flags); | 78 | mutex_lock(&task_release_lock); |
43 | list_for_each(pos, &ts_release.wait.task_list) { | 79 | |
80 | list_for_each(pos, &task_release_list) { | ||
44 | task_count++; | 81 | task_count++; |
45 | } | 82 | } |
46 | spin_unlock_irqrestore(&ts_release.wait.lock, flags); | 83 | |
84 | mutex_unlock(&task_release_lock); | ||
85 | |||
47 | 86 | ||
48 | return task_count; | 87 | return task_count; |
49 | } | 88 | } |
50 | 89 | ||
51 | static long do_release_ts(lt_t start) | 90 | static long do_release_ts(lt_t start) |
52 | { | 91 | { |
53 | int task_count = 0; | 92 | long task_count = 0; |
54 | unsigned long flags; | ||
55 | struct list_head *pos; | ||
56 | struct task_struct *t; | ||
57 | 93 | ||
94 | struct list_head *pos, *safe; | ||
95 | struct ts_release_wait *wait; | ||
58 | 96 | ||
59 | spin_lock_irqsave(&ts_release.wait.lock, flags); | 97 | if (mutex_lock_interruptible(&task_release_lock)) { |
60 | TRACE("<<<<<< synchronous task system release >>>>>>\n"); | 98 | task_count = -ERESTARTSYS; |
99 | goto out; | ||
100 | } | ||
61 | 101 | ||
102 | TRACE("<<<<<< synchronous task system release >>>>>>\n"); | ||
62 | sched_trace_sys_release(&start); | 103 | sched_trace_sys_release(&start); |
63 | list_for_each(pos, &ts_release.wait.task_list) { | 104 | |
64 | t = (struct task_struct*) list_entry(pos, | 105 | task_count = 0; |
65 | struct __wait_queue, | 106 | list_for_each_safe(pos, safe, &task_release_list) { |
66 | task_list)->private; | 107 | wait = (struct ts_release_wait*) |
108 | list_entry(pos, struct ts_release_wait, list); | ||
109 | |||
67 | task_count++; | 110 | task_count++; |
111 | |||
68 | /* RT tasks can be delayed. Non-RT tasks are released | 112 | /* RT tasks can be delayed. Non-RT tasks are released |
69 | immediately. */ | 113 | immediately. */ |
70 | if (is_realtime(t)) { | 114 | wait->ts_release_time = start; |
71 | litmus->release_at(t, start + t->rt_param.task_params.phase); | 115 | complete(&wait->completion); |
72 | } | ||
73 | sched_trace_task_release(t); | ||
74 | } | 116 | } |
75 | 117 | ||
76 | spin_unlock_irqrestore(&ts_release.wait.lock, flags); | 118 | /* clear stale list */ |
119 | INIT_LIST_HEAD(&task_release_list); | ||
77 | 120 | ||
78 | complete_n(&ts_release, task_count); | 121 | mutex_unlock(&task_release_lock); |
79 | 122 | ||
123 | out: | ||
80 | return task_count; | 124 | return task_count; |
81 | } | 125 | } |
82 | 126 | ||
@@ -90,19 +134,26 @@ asmlinkage long sys_wait_for_ts_release(void) | |||
90 | return ret; | 134 | return ret; |
91 | } | 135 | } |
92 | 136 | ||
137 | #define ONE_MS 1000000 | ||
93 | 138 | ||
94 | asmlinkage long sys_release_ts(lt_t __user *__delay) | 139 | asmlinkage long sys_release_ts(lt_t __user *__delay) |
95 | { | 140 | { |
96 | long ret; | 141 | long ret = 0; |
97 | lt_t delay; | 142 | lt_t delay = 0; |
143 | lt_t start_time; | ||
98 | 144 | ||
99 | /* FIXME: check capabilities... */ | 145 | /* FIXME: check capabilities... */ |
100 | 146 | ||
101 | ret = copy_from_user(&delay, __delay, sizeof(delay)); | 147 | if (__delay) |
148 | ret = copy_from_user(&delay, __delay, sizeof(delay)); | ||
149 | |||
102 | if (ret == 0) { | 150 | if (ret == 0) { |
151 | /* round up to next larger integral millisecond */ | ||
152 | start_time = ((litmus_clock() / ONE_MS) + 1) * ONE_MS; | ||
153 | |||
103 | /* Note: Non-rt tasks that participate in a sync release cannot be | 154 | /* Note: Non-rt tasks that participate in a sync release cannot be |
104 | delayed. They will be released immediately. */ | 155 | delayed. They will be released immediately. */ |
105 | ret = do_release_ts(litmus_clock() + delay); | 156 | ret = do_release_ts(start_time + delay); |
106 | } | 157 | } |
107 | 158 | ||
108 | return ret; | 159 | return ret; |
diff --git a/litmus/trace.c b/litmus/trace.c index 3c35c527e805..7dbb98e4a3cd 100644 --- a/litmus/trace.c +++ b/litmus/trace.c | |||
@@ -18,6 +18,15 @@ static unsigned int ts_seq_no = 0; | |||
18 | 18 | ||
19 | DEFINE_PER_CPU(atomic_t, irq_fired_count); | 19 | DEFINE_PER_CPU(atomic_t, irq_fired_count); |
20 | 20 | ||
21 | void ft_irq_fired(void) | ||
22 | { | ||
23 | /* Only called with preemptions disabled. */ | ||
24 | atomic_inc(&__get_cpu_var(irq_fired_count)); | ||
25 | |||
26 | if (has_control_page(current)) | ||
27 | get_control_page(current)->irq_count++; | ||
28 | } | ||
29 | |||
21 | static inline void clear_irq_fired(void) | 30 | static inline void clear_irq_fired(void) |
22 | { | 31 | { |
23 | atomic_set(&__raw_get_cpu_var(irq_fired_count), 0); | 32 | atomic_set(&__raw_get_cpu_var(irq_fired_count), 0); |
@@ -34,77 +43,119 @@ static inline unsigned int get_and_clear_irq_fired(void) | |||
34 | return atomic_xchg(&__raw_get_cpu_var(irq_fired_count), 0); | 43 | return atomic_xchg(&__raw_get_cpu_var(irq_fired_count), 0); |
35 | } | 44 | } |
36 | 45 | ||
37 | static inline void __save_irq_flags(struct timestamp *ts) | 46 | static inline void save_irq_flags(struct timestamp *ts, unsigned int irq_count) |
38 | { | 47 | { |
39 | unsigned int irq_count; | ||
40 | |||
41 | irq_count = get_and_clear_irq_fired(); | ||
42 | /* Store how many interrupts occurred. */ | 48 | /* Store how many interrupts occurred. */ |
43 | ts->irq_count = irq_count; | 49 | ts->irq_count = irq_count; |
44 | /* Extra flag because ts->irq_count overflows quickly. */ | 50 | /* Extra flag because ts->irq_count overflows quickly. */ |
45 | ts->irq_flag = irq_count > 0; | 51 | ts->irq_flag = irq_count > 0; |
52 | |||
46 | } | 53 | } |
47 | 54 | ||
48 | static inline void __save_timestamp_cpu(unsigned long event, | 55 | static inline void write_timestamp(uint8_t event, |
49 | uint8_t type, uint8_t cpu) | 56 | uint8_t type, |
57 | uint8_t cpu, | ||
58 | uint16_t pid_fragment, | ||
59 | unsigned int irq_count, | ||
60 | int record_irq, | ||
61 | int hide_irq, | ||
62 | uint64_t timestamp, | ||
63 | int record_timestamp) | ||
50 | { | 64 | { |
65 | unsigned long flags; | ||
51 | unsigned int seq_no; | 66 | unsigned int seq_no; |
52 | struct timestamp *ts; | 67 | struct timestamp *ts; |
68 | |||
69 | /* Avoid preemptions while recording the timestamp. This reduces the | ||
70 | * number of "out of order" timestamps in the stream and makes | ||
71 | * post-processing easier. */ | ||
72 | |||
73 | local_irq_save(flags); | ||
74 | |||
53 | seq_no = fetch_and_inc((int *) &ts_seq_no); | 75 | seq_no = fetch_and_inc((int *) &ts_seq_no); |
54 | if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { | 76 | if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { |
55 | ts->event = event; | 77 | ts->event = event; |
56 | ts->seq_no = seq_no; | 78 | ts->seq_no = seq_no; |
57 | ts->cpu = cpu; | 79 | |
58 | ts->task_type = type; | 80 | ts->task_type = type; |
59 | __save_irq_flags(ts); | 81 | ts->pid = pid_fragment; |
60 | barrier(); | 82 | |
61 | /* prevent re-ordering of ft_timestamp() */ | 83 | ts->cpu = cpu; |
62 | ts->timestamp = ft_timestamp(); | 84 | |
85 | if (record_irq) | ||
86 | irq_count = get_and_clear_irq_fired(); | ||
87 | |||
88 | save_irq_flags(ts, irq_count - hide_irq); | ||
89 | |||
90 | if (record_timestamp) | ||
91 | timestamp = ft_timestamp(); | ||
92 | |||
93 | ts->timestamp = timestamp; | ||
63 | ft_buffer_finish_write(trace_ts_buf, ts); | 94 | ft_buffer_finish_write(trace_ts_buf, ts); |
64 | } | 95 | } |
96 | |||
97 | local_irq_restore(flags); | ||
65 | } | 98 | } |
66 | 99 | ||
67 | static void __add_timestamp_user(struct timestamp *pre_recorded) | 100 | static void __add_timestamp_user(struct timestamp *pre_recorded) |
68 | { | 101 | { |
102 | unsigned long flags; | ||
69 | unsigned int seq_no; | 103 | unsigned int seq_no; |
70 | struct timestamp *ts; | 104 | struct timestamp *ts; |
105 | |||
106 | |||
107 | local_irq_save(flags); | ||
108 | |||
71 | seq_no = fetch_and_inc((int *) &ts_seq_no); | 109 | seq_no = fetch_and_inc((int *) &ts_seq_no); |
72 | if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { | 110 | if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { |
73 | *ts = *pre_recorded; | 111 | *ts = *pre_recorded; |
74 | ts->seq_no = seq_no; | 112 | ts->seq_no = seq_no; |
75 | __save_irq_flags(ts); | 113 | ts->cpu = raw_smp_processor_id(); |
114 | save_irq_flags(ts, get_and_clear_irq_fired()); | ||
76 | ft_buffer_finish_write(trace_ts_buf, ts); | 115 | ft_buffer_finish_write(trace_ts_buf, ts); |
77 | } | 116 | } |
78 | } | ||
79 | 117 | ||
80 | static inline void __save_timestamp(unsigned long event, | 118 | local_irq_restore(flags); |
81 | uint8_t type) | ||
82 | { | ||
83 | __save_timestamp_cpu(event, type, raw_smp_processor_id()); | ||
84 | } | 119 | } |
85 | 120 | ||
86 | feather_callback void save_timestamp(unsigned long event) | 121 | feather_callback void save_timestamp(unsigned long event) |
87 | { | 122 | { |
88 | __save_timestamp(event, TSK_UNKNOWN); | 123 | write_timestamp(event, TSK_UNKNOWN, |
124 | raw_smp_processor_id(), | ||
125 | current->pid, | ||
126 | 0, 1, 0, | ||
127 | 0, 1); | ||
89 | } | 128 | } |
90 | 129 | ||
91 | feather_callback void save_timestamp_def(unsigned long event, | 130 | feather_callback void save_timestamp_def(unsigned long event, |
92 | unsigned long type) | 131 | unsigned long type) |
93 | { | 132 | { |
94 | __save_timestamp(event, (uint8_t) type); | 133 | write_timestamp(event, type, |
134 | raw_smp_processor_id(), | ||
135 | current->pid, | ||
136 | 0, 1, 0, | ||
137 | 0, 1); | ||
95 | } | 138 | } |
96 | 139 | ||
97 | feather_callback void save_timestamp_task(unsigned long event, | 140 | feather_callback void save_timestamp_task(unsigned long event, |
98 | unsigned long t_ptr) | 141 | unsigned long t_ptr) |
99 | { | 142 | { |
100 | int rt = is_realtime((struct task_struct *) t_ptr); | 143 | struct task_struct *t = (struct task_struct *) t_ptr; |
101 | __save_timestamp(event, rt ? TSK_RT : TSK_BE); | 144 | int rt = is_realtime(t); |
145 | |||
146 | write_timestamp(event, rt ? TSK_RT : TSK_BE, | ||
147 | raw_smp_processor_id(), | ||
148 | t->pid, | ||
149 | 0, 1, 0, | ||
150 | 0, 1); | ||
102 | } | 151 | } |
103 | 152 | ||
104 | feather_callback void save_timestamp_cpu(unsigned long event, | 153 | feather_callback void save_timestamp_cpu(unsigned long event, |
105 | unsigned long cpu) | 154 | unsigned long cpu) |
106 | { | 155 | { |
107 | __save_timestamp_cpu(event, TSK_UNKNOWN, cpu); | 156 | write_timestamp(event, TSK_UNKNOWN, cpu, current->pid, |
157 | 0, 1, 0, | ||
158 | 0, 1); | ||
108 | } | 159 | } |
109 | 160 | ||
110 | feather_callback void save_task_latency(unsigned long event, | 161 | feather_callback void save_task_latency(unsigned long event, |
@@ -112,20 +163,44 @@ feather_callback void save_task_latency(unsigned long event, | |||
112 | { | 163 | { |
113 | lt_t now = litmus_clock(); | 164 | lt_t now = litmus_clock(); |
114 | lt_t *when = (lt_t*) when_ptr; | 165 | lt_t *when = (lt_t*) when_ptr; |
115 | unsigned int seq_no; | ||
116 | int cpu = raw_smp_processor_id(); | ||
117 | struct timestamp *ts; | ||
118 | 166 | ||
119 | seq_no = fetch_and_inc((int *) &ts_seq_no); | 167 | write_timestamp(event, TSK_RT, raw_smp_processor_id(), 0, |
120 | if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { | 168 | 0, 1, 0, |
121 | ts->event = event; | 169 | now - *when, 0); |
122 | ts->timestamp = now - *when; | 170 | } |
123 | ts->seq_no = seq_no; | 171 | |
124 | ts->cpu = cpu; | 172 | /* fake timestamp to user-reported time */ |
125 | ts->task_type = TSK_RT; | 173 | feather_callback void save_timestamp_time(unsigned long event, |
126 | __save_irq_flags(ts); | 174 | unsigned long ptr) |
127 | ft_buffer_finish_write(trace_ts_buf, ts); | 175 | { |
128 | } | 176 | uint64_t* time = (uint64_t*) ptr; |
177 | |||
178 | write_timestamp(event, is_realtime(current) ? TSK_RT : TSK_BE, | ||
179 | raw_smp_processor_id(), current->pid, | ||
180 | 0, 1, 0, | ||
181 | *time, 0); | ||
182 | } | ||
183 | |||
184 | /* Record user-reported IRQ count */ | ||
185 | feather_callback void save_timestamp_irq(unsigned long event, | ||
186 | unsigned long irq_counter_ptr) | ||
187 | { | ||
188 | uint64_t* irqs = (uint64_t*) irq_counter_ptr; | ||
189 | |||
190 | write_timestamp(event, is_realtime(current) ? TSK_RT : TSK_BE, | ||
191 | raw_smp_processor_id(), current->pid, | ||
192 | *irqs, 0, 0, | ||
193 | 0, 1); | ||
194 | } | ||
195 | |||
196 | /* Suppress one IRQ from the irq count. Used by TS_SEND_RESCHED_END, which is | ||
197 | * called from within an interrupt that is expected. */ | ||
198 | feather_callback void save_timestamp_hide_irq(unsigned long event) | ||
199 | { | ||
200 | write_timestamp(event, is_realtime(current) ? TSK_RT : TSK_BE, | ||
201 | raw_smp_processor_id(), current->pid, | ||
202 | 0, 1, 1, | ||
203 | 0, 1); | ||
129 | } | 204 | } |
130 | 205 | ||
131 | /******************************************************************************/ | 206 | /******************************************************************************/ |