diff options
author | Glenn <gelliott@bonham.cs.unc.edu> | 2010-11-16 15:41:21 -0500 |
---|---|---|
committer | Glenn <gelliott@bonham.cs.unc.edu> | 2010-11-16 15:41:21 -0500 |
commit | 1b9caf2f95c97e27c91372bd41f79d3e96a99e62 (patch) | |
tree | a02ed0f31998ede99372964549e00216f0733e57 | |
parent | 9a4b9c5c9b2af2f69e7eb6d69893a767701b781b (diff) | |
parent | 1726017e944d0086f14f867befbf5ebf07adc7dd (diff) |
Merge branch 'wip-merge-2.6.36' of ssh://cvs.cs.unc.edu/cvs/proj/litmus/repo/litmus2010 into wip-merge-2.6.36
-rw-r--r-- | arch/arm/Kconfig | 8 | ||||
-rw-r--r-- | arch/arm/include/asm/timex.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/unistd.h | 3 | ||||
-rw-r--r-- | arch/arm/kernel/calls.S | 14 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 4 | ||||
-rw-r--r-- | arch/arm/mach-realview/include/mach/timex.h | 27 | ||||
-rw-r--r-- | arch/x86/kernel/smp.c | 9 | ||||
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | include/litmus/debug_trace.h | 25 | ||||
-rw-r--r-- | include/litmus/litmus.h | 28 | ||||
-rw-r--r-- | include/litmus/preempt.h | 164 | ||||
-rw-r--r-- | include/litmus/sched_trace.h | 9 | ||||
-rw-r--r-- | kernel/sched.c | 12 | ||||
-rw-r--r-- | litmus/Kconfig | 45 | ||||
-rw-r--r-- | litmus/Makefile | 1 | ||||
-rw-r--r-- | litmus/budget.c | 4 | ||||
-rw-r--r-- | litmus/litmus.c | 8 | ||||
-rw-r--r-- | litmus/litmus_proc.c | 1 | ||||
-rw-r--r-- | litmus/preempt.c | 131 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 10 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 24 | ||||
-rw-r--r-- | litmus/sched_litmus.c | 5 | ||||
-rw-r--r-- | litmus/sched_pfair.c | 11 | ||||
-rw-r--r-- | litmus/sched_plugin.c | 46 | ||||
-rw-r--r-- | litmus/sched_psn_edf.c | 7 | ||||
-rw-r--r-- | litmus/sched_task_trace.c | 5 | ||||
-rw-r--r-- | litmus/sched_trace.c | 190 | ||||
-rw-r--r-- | litmus/srp.c | 4 | ||||
-rw-r--r-- | litmus/trace.c | 1 |
29 files changed, 528 insertions, 272 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9c26ba7244fb..babad6d7681a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -1808,3 +1808,11 @@ source "security/Kconfig" | |||
1808 | source "crypto/Kconfig" | 1808 | source "crypto/Kconfig" |
1809 | 1809 | ||
1810 | source "lib/Kconfig" | 1810 | source "lib/Kconfig" |
1811 | |||
1812 | config ARCH_HAS_SEND_PULL_TIMERS | ||
1813 | def_bool n | ||
1814 | |||
1815 | config ARCH_HAS_FEATHER_TRACE | ||
1816 | def_bool n | ||
1817 | |||
1818 | source "litmus/Kconfig" | ||
diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h index 3be8de3adaba..8a102a383a36 100644 --- a/arch/arm/include/asm/timex.h +++ b/arch/arm/include/asm/timex.h | |||
@@ -16,9 +16,11 @@ | |||
16 | 16 | ||
17 | typedef unsigned long cycles_t; | 17 | typedef unsigned long cycles_t; |
18 | 18 | ||
19 | #ifndef get_cycles | ||
19 | static inline cycles_t get_cycles (void) | 20 | static inline cycles_t get_cycles (void) |
20 | { | 21 | { |
21 | return 0; | 22 | return 0; |
22 | } | 23 | } |
24 | #endif | ||
23 | 25 | ||
24 | #endif | 26 | #endif |
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index c891eb76c0e3..625b30490624 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h | |||
@@ -397,6 +397,9 @@ | |||
397 | #define __NR_fanotify_mark (__NR_SYSCALL_BASE+368) | 397 | #define __NR_fanotify_mark (__NR_SYSCALL_BASE+368) |
398 | #define __NR_prlimit64 (__NR_SYSCALL_BASE+369) | 398 | #define __NR_prlimit64 (__NR_SYSCALL_BASE+369) |
399 | 399 | ||
400 | #define __NR_LITMUS (__NR_SYSCALL_BASE+370) | ||
401 | #include <litmus/unistd_32.h> | ||
402 | |||
400 | /* | 403 | /* |
401 | * The following SWIs are ARM private. | 404 | * The following SWIs are ARM private. |
402 | */ | 405 | */ |
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 5c26eccef998..584a68349a41 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S | |||
@@ -379,6 +379,20 @@ | |||
379 | CALL(sys_fanotify_init) | 379 | CALL(sys_fanotify_init) |
380 | CALL(sys_fanotify_mark) | 380 | CALL(sys_fanotify_mark) |
381 | CALL(sys_prlimit64) | 381 | CALL(sys_prlimit64) |
382 | /* 370 */ CALL(sys_set_rt_task_param) | ||
383 | CALL(sys_get_rt_task_param) | ||
384 | CALL(sys_complete_job) | ||
385 | CALL(sys_od_open) | ||
386 | CALL(sys_od_close) | ||
387 | /* 375 */ CALL(sys_fmlp_down) | ||
388 | CALL(sys_fmlp_up) | ||
389 | CALL(sys_srp_down) | ||
390 | CALL(sys_srp_up) | ||
391 | CALL(sys_query_job_no) | ||
392 | /* 380 */ CALL(sys_wait_for_job_release) | ||
393 | CALL(sys_wait_for_ts_release) | ||
394 | CALL(sys_release_ts) | ||
395 | CALL(sys_null_call) | ||
382 | #ifndef syscalls_counted | 396 | #ifndef syscalls_counted |
383 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls | 397 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls |
384 | #define syscalls_counted | 398 | #define syscalls_counted |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 40dc74f2b27f..b72fbf3d043c 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -38,6 +38,8 @@ | |||
38 | #include <asm/localtimer.h> | 38 | #include <asm/localtimer.h> |
39 | #include <asm/smp_plat.h> | 39 | #include <asm/smp_plat.h> |
40 | 40 | ||
41 | #include <litmus/preempt.h> | ||
42 | |||
41 | /* | 43 | /* |
42 | * as from 2.5, kernels no longer have an init_tasks structure | 44 | * as from 2.5, kernels no longer have an init_tasks structure |
43 | * so we need some other way of telling a new secondary core | 45 | * so we need some other way of telling a new secondary core |
@@ -533,6 +535,8 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs) | |||
533 | * nothing more to do - eveything is | 535 | * nothing more to do - eveything is |
534 | * done on the interrupt return path | 536 | * done on the interrupt return path |
535 | */ | 537 | */ |
538 | /* LITMUS^RT: take action based on scheduler state */ | ||
539 | sched_state_ipi(); | ||
536 | break; | 540 | break; |
537 | 541 | ||
538 | case IPI_CALL_FUNC: | 542 | case IPI_CALL_FUNC: |
diff --git a/arch/arm/mach-realview/include/mach/timex.h b/arch/arm/mach-realview/include/mach/timex.h index 4eeb069373c2..e8bcc40d1f08 100644 --- a/arch/arm/mach-realview/include/mach/timex.h +++ b/arch/arm/mach-realview/include/mach/timex.h | |||
@@ -21,3 +21,30 @@ | |||
21 | */ | 21 | */ |
22 | 22 | ||
23 | #define CLOCK_TICK_RATE (50000000 / 16) | 23 | #define CLOCK_TICK_RATE (50000000 / 16) |
24 | |||
25 | #if defined(CONFIG_MACH_REALVIEW_PB11MP) || defined(CONFIG_MACH_REALVIEW_PB1176) | ||
26 | |||
27 | static inline unsigned long realview_get_arm11_cp15_ccnt(void) | ||
28 | { | ||
29 | unsigned long cycles; | ||
30 | /* Read CP15 CCNT register. */ | ||
31 | asm volatile ("mrc p15, 0, %0, c15, c12, 1" : "=r" (cycles)); | ||
32 | return cycles; | ||
33 | } | ||
34 | |||
35 | #define get_cycles realview_get_arm11_cp15_ccnt | ||
36 | |||
37 | #elif defined(CONFIG_MACH_REALVIEW_PBA8) | ||
38 | |||
39 | |||
40 | static inline unsigned long realview_get_a8_cp15_ccnt(void) | ||
41 | { | ||
42 | unsigned long cycles; | ||
43 | /* Read CP15 CCNT register. */ | ||
44 | asm volatile ("mrc p15, 0, %0, c9, c13, 0" : "=r" (cycles)); | ||
45 | return cycles; | ||
46 | } | ||
47 | |||
48 | #define get_cycles realview_get_a8_cp15_ccnt | ||
49 | |||
50 | #endif | ||
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 97af589a5c0c..74cca6014c0e 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -23,7 +23,8 @@ | |||
23 | #include <linux/cpu.h> | 23 | #include <linux/cpu.h> |
24 | #include <linux/gfp.h> | 24 | #include <linux/gfp.h> |
25 | 25 | ||
26 | #include <litmus/litmus.h> | 26 | #include <litmus/preempt.h> |
27 | #include <litmus/debug_trace.h> | ||
27 | #include <litmus/trace.h> | 28 | #include <litmus/trace.h> |
28 | 29 | ||
29 | #include <asm/mtrr.h> | 30 | #include <asm/mtrr.h> |
@@ -212,10 +213,8 @@ static void native_smp_send_stop(void) | |||
212 | void smp_reschedule_interrupt(struct pt_regs *regs) | 213 | void smp_reschedule_interrupt(struct pt_regs *regs) |
213 | { | 214 | { |
214 | ack_APIC_irq(); | 215 | ack_APIC_irq(); |
215 | /* LITMUS^RT needs this interrupt to proper reschedule | 216 | /* LITMUS^RT: this IPI might need to trigger the sched state machine. */ |
216 | * on this cpu | 217 | sched_state_ipi(); |
217 | */ | ||
218 | set_tsk_need_resched(current); | ||
219 | inc_irq_stat(irq_resched_count); | 218 | inc_irq_stat(irq_resched_count); |
220 | TS_SEND_RESCHED_END; | 219 | TS_SEND_RESCHED_END; |
221 | /* | 220 | /* |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 708523b06920..c9ac4fc837ba 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -96,6 +96,7 @@ struct sched_param { | |||
96 | #include <asm/processor.h> | 96 | #include <asm/processor.h> |
97 | 97 | ||
98 | #include <litmus/rt_param.h> | 98 | #include <litmus/rt_param.h> |
99 | #include <litmus/preempt.h> | ||
99 | 100 | ||
100 | struct exec_domain; | 101 | struct exec_domain; |
101 | struct futex_pi_state; | 102 | struct futex_pi_state; |
@@ -2301,6 +2302,7 @@ static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) | |||
2301 | static inline void set_tsk_need_resched(struct task_struct *tsk) | 2302 | static inline void set_tsk_need_resched(struct task_struct *tsk) |
2302 | { | 2303 | { |
2303 | set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); | 2304 | set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); |
2305 | sched_state_will_schedule(tsk); | ||
2304 | } | 2306 | } |
2305 | 2307 | ||
2306 | static inline void clear_tsk_need_resched(struct task_struct *tsk) | 2308 | static inline void clear_tsk_need_resched(struct task_struct *tsk) |
diff --git a/include/litmus/debug_trace.h b/include/litmus/debug_trace.h new file mode 100644 index 000000000000..b743aa96de79 --- /dev/null +++ b/include/litmus/debug_trace.h | |||
@@ -0,0 +1,25 @@ | |||
1 | #ifndef LITMUS_DEBUG_TRACE_H | ||
2 | #define LITMUS_DEBUG_TRACE_H | ||
3 | |||
4 | #ifdef CONFIG_SCHED_DEBUG_TRACE | ||
5 | void sched_trace_log_message(const char* fmt, ...); | ||
6 | void dump_trace_buffer(int max); | ||
7 | #else | ||
8 | |||
9 | #define sched_trace_log_message(fmt, ...) | ||
10 | |||
11 | #endif | ||
12 | |||
13 | extern atomic_t __log_seq_no; | ||
14 | |||
15 | #define TRACE(fmt, args...) \ | ||
16 | sched_trace_log_message("%d P%d: " fmt, atomic_add_return(1, &__log_seq_no), \ | ||
17 | raw_smp_processor_id(), ## args) | ||
18 | |||
19 | #define TRACE_TASK(t, fmt, args...) \ | ||
20 | TRACE("(%s/%d) " fmt, (t)->comm, (t)->pid, ##args) | ||
21 | |||
22 | #define TRACE_CUR(fmt, args...) \ | ||
23 | TRACE_TASK(current, fmt, ## args) | ||
24 | |||
25 | #endif | ||
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 5d20276e44f4..30f41869b455 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -6,38 +6,12 @@ | |||
6 | #ifndef _LINUX_LITMUS_H_ | 6 | #ifndef _LINUX_LITMUS_H_ |
7 | #define _LINUX_LITMUS_H_ | 7 | #define _LINUX_LITMUS_H_ |
8 | 8 | ||
9 | #include <linux/jiffies.h> | 9 | #include <litmus/debug_trace.h> |
10 | #include <litmus/sched_trace.h> | ||
11 | 10 | ||
12 | #ifdef CONFIG_RELEASE_MASTER | 11 | #ifdef CONFIG_RELEASE_MASTER |
13 | extern atomic_t release_master_cpu; | 12 | extern atomic_t release_master_cpu; |
14 | #endif | 13 | #endif |
15 | 14 | ||
16 | extern atomic_t __log_seq_no; | ||
17 | |||
18 | #define TRACE(fmt, args...) \ | ||
19 | sched_trace_log_message("%d P%d: " fmt, atomic_add_return(1, &__log_seq_no), \ | ||
20 | raw_smp_processor_id(), ## args) | ||
21 | |||
22 | #define TRACE_TASK(t, fmt, args...) \ | ||
23 | TRACE("(%s/%d) " fmt, (t)->comm, (t)->pid, ##args) | ||
24 | |||
25 | #define TRACE_CUR(fmt, args...) \ | ||
26 | TRACE_TASK(current, fmt, ## args) | ||
27 | |||
28 | #define TRACE_BUG_ON(cond) \ | ||
29 | do { if (cond) TRACE("BUG_ON(%s) at %s:%d " \ | ||
30 | "called from %p current=%s/%d state=%d " \ | ||
31 | "flags=%x partition=%d cpu=%d rtflags=%d"\ | ||
32 | " job=%u timeslice=%u\n", \ | ||
33 | #cond, __FILE__, __LINE__, __builtin_return_address(0), current->comm, \ | ||
34 | current->pid, current->state, current->flags, \ | ||
35 | get_partition(current), smp_processor_id(), get_rt_flags(current), \ | ||
36 | current->rt_param.job_params.job_no, \ | ||
37 | current->rt.time_slice\ | ||
38 | ); } while(0); | ||
39 | |||
40 | |||
41 | /* in_list - is a given list_head queued on some list? | 15 | /* in_list - is a given list_head queued on some list? |
42 | */ | 16 | */ |
43 | static inline int in_list(struct list_head* list) | 17 | static inline int in_list(struct list_head* list) |
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h new file mode 100644 index 000000000000..260c6fe17986 --- /dev/null +++ b/include/litmus/preempt.h | |||
@@ -0,0 +1,164 @@ | |||
1 | #ifndef LITMUS_PREEMPT_H | ||
2 | #define LITMUS_PREEMPT_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/cache.h> | ||
6 | #include <linux/percpu.h> | ||
7 | #include <asm/atomic.h> | ||
8 | |||
9 | #include <litmus/debug_trace.h> | ||
10 | |||
11 | extern DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state); | ||
12 | |||
13 | #ifdef CONFIG_DEBUG_KERNEL | ||
14 | const char* sched_state_name(int s); | ||
15 | #define TRACE_STATE(fmt, args...) TRACE("SCHED_STATE " fmt, args) | ||
16 | #else | ||
17 | #define TRACE_STATE(fmt, args...) /* ignore */ | ||
18 | #endif | ||
19 | |||
20 | #define VERIFY_SCHED_STATE(x) \ | ||
21 | do { int __s = get_sched_state(); \ | ||
22 | if ((__s & (x)) == 0) \ | ||
23 | TRACE_STATE("INVALID s=0x%x (%s) not " \ | ||
24 | "in 0x%x (%s) [%s]\n", \ | ||
25 | __s, sched_state_name(__s), \ | ||
26 | (x), #x, __FUNCTION__); \ | ||
27 | } while (0); | ||
28 | |||
29 | #define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \ | ||
30 | TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \ | ||
31 | cpu, (x), sched_state_name(x), \ | ||
32 | (y), sched_state_name(y)) | ||
33 | |||
34 | |||
35 | typedef enum scheduling_state { | ||
36 | TASK_SCHEDULED = (1 << 0), /* The currently scheduled task is the one that | ||
37 | * should be scheduled, and the processor does not | ||
38 | * plan to invoke schedule(). */ | ||
39 | SHOULD_SCHEDULE = (1 << 1), /* A remote processor has determined that the | ||
40 | * processor should reschedule, but this has not | ||
41 | * been communicated yet (IPI still pending). */ | ||
42 | WILL_SCHEDULE = (1 << 2), /* The processor has noticed that it has to | ||
43 | * reschedule and will do so shortly. */ | ||
44 | TASK_PICKED = (1 << 3), /* The processor is currently executing schedule(), | ||
45 | * has selected a new task to schedule, but has not | ||
46 | * yet performed the actual context switch. */ | ||
47 | PICKED_WRONG_TASK = (1 << 4), /* The processor has not yet performed the context | ||
48 | * switch, but a remote processor has already | ||
49 | * determined that a higher-priority task became | ||
50 | * eligible after the task was picked. */ | ||
51 | } sched_state_t; | ||
52 | |||
53 | static inline sched_state_t get_sched_state_on(int cpu) | ||
54 | { | ||
55 | return atomic_read(&per_cpu(resched_state, cpu)); | ||
56 | } | ||
57 | |||
58 | static inline sched_state_t get_sched_state(void) | ||
59 | { | ||
60 | return atomic_read(&__get_cpu_var(resched_state)); | ||
61 | } | ||
62 | |||
63 | static inline int is_in_sched_state(int possible_states) | ||
64 | { | ||
65 | return get_sched_state() & possible_states; | ||
66 | } | ||
67 | |||
68 | static inline int cpu_is_in_sched_state(int cpu, int possible_states) | ||
69 | { | ||
70 | return get_sched_state_on(cpu) & possible_states; | ||
71 | } | ||
72 | |||
73 | static inline void set_sched_state(sched_state_t s) | ||
74 | { | ||
75 | TRACE_SCHED_STATE_CHANGE(get_sched_state(), s, smp_processor_id()); | ||
76 | atomic_set(&__get_cpu_var(resched_state), s); | ||
77 | } | ||
78 | |||
79 | static inline int sched_state_transition(sched_state_t from, sched_state_t to) | ||
80 | { | ||
81 | sched_state_t old_state; | ||
82 | |||
83 | old_state = atomic_cmpxchg(&__get_cpu_var(resched_state), from, to); | ||
84 | if (old_state == from) { | ||
85 | TRACE_SCHED_STATE_CHANGE(from, to, smp_processor_id()); | ||
86 | return 1; | ||
87 | } else | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static inline int sched_state_transition_on(int cpu, | ||
92 | sched_state_t from, | ||
93 | sched_state_t to) | ||
94 | { | ||
95 | sched_state_t old_state; | ||
96 | |||
97 | old_state = atomic_cmpxchg(&per_cpu(resched_state, cpu), from, to); | ||
98 | if (old_state == from) { | ||
99 | TRACE_SCHED_STATE_CHANGE(from, to, cpu); | ||
100 | return 1; | ||
101 | } else | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | /* Plugins must call this function after they have decided which job to | ||
106 | * schedule next. IMPORTANT: this function must be called while still holding | ||
107 | * the lock that is used to serialize scheduling decisions. | ||
108 | * | ||
109 | * (Ideally, we would like to use runqueue locks for this purpose, but that | ||
110 | * would lead to deadlocks with the migration code.) | ||
111 | */ | ||
112 | static inline void sched_state_task_picked(void) | ||
113 | { | ||
114 | VERIFY_SCHED_STATE(WILL_SCHEDULE); | ||
115 | |||
116 | /* WILL_SCHEDULE has only a local tansition => simple store is ok */ | ||
117 | set_sched_state(TASK_PICKED); | ||
118 | } | ||
119 | |||
120 | static inline void sched_state_entered_schedule(void) | ||
121 | { | ||
122 | /* Update state for the case that we entered schedule() not due to | ||
123 | * set_tsk_need_resched() */ | ||
124 | set_sched_state(WILL_SCHEDULE); | ||
125 | } | ||
126 | |||
127 | /* Called by schedule() to check if the scheduling decision is still valid | ||
128 | * after a context switch. Returns 1 if the CPU needs to reschdule. */ | ||
129 | static inline int sched_state_validate_switch(void) | ||
130 | { | ||
131 | int left_state_ok = 0; | ||
132 | |||
133 | VERIFY_SCHED_STATE(PICKED_WRONG_TASK | TASK_PICKED); | ||
134 | |||
135 | if (is_in_sched_state(TASK_PICKED)) { | ||
136 | /* Might be good; let's try to transition out of this | ||
137 | * state. This must be done atomically since remote processors | ||
138 | * may try to change the state, too. */ | ||
139 | left_state_ok = sched_state_transition(TASK_PICKED, TASK_SCHEDULED); | ||
140 | } | ||
141 | |||
142 | if (!left_state_ok) { | ||
143 | /* We raced with a higher-priority task arrival => not | ||
144 | * valid. The CPU needs to reschedule. */ | ||
145 | set_sched_state(WILL_SCHEDULE); | ||
146 | return 1; | ||
147 | } else | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | /* State transition events. See litmus/preempt.c for details. */ | ||
152 | void sched_state_will_schedule(struct task_struct* tsk); | ||
153 | void sched_state_ipi(void); | ||
154 | /* Cause a CPU (remote or local) to reschedule. */ | ||
155 | void litmus_reschedule(int cpu); | ||
156 | void litmus_reschedule_local(void); | ||
157 | |||
158 | #ifdef CONFIG_DEBUG_KERNEL | ||
159 | void sched_state_plugin_check(void); | ||
160 | #else | ||
161 | #define sched_state_plugin_check() /* no check */ | ||
162 | #endif | ||
163 | |||
164 | #endif | ||
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index e1b0c9712b5f..a5f73736f7e8 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h | |||
@@ -178,15 +178,6 @@ feather_callback void do_sched_trace_sys_release(unsigned long id, | |||
178 | 178 | ||
179 | #define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ | 179 | #define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ |
180 | 180 | ||
181 | #ifdef CONFIG_SCHED_DEBUG_TRACE | ||
182 | void sched_trace_log_message(const char* fmt, ...); | ||
183 | void dump_trace_buffer(int max); | ||
184 | #else | ||
185 | |||
186 | #define sched_trace_log_message(fmt, ...) | ||
187 | |||
188 | #endif | ||
189 | |||
190 | #endif /* __KERNEL__ */ | 181 | #endif /* __KERNEL__ */ |
191 | 182 | ||
192 | #endif | 183 | #endif |
diff --git a/kernel/sched.c b/kernel/sched.c index 6777dc7942a0..1b13c8e1cfc2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -82,6 +82,8 @@ | |||
82 | #include <litmus/sched_trace.h> | 82 | #include <litmus/sched_trace.h> |
83 | #include <litmus/trace.h> | 83 | #include <litmus/trace.h> |
84 | 84 | ||
85 | static void litmus_tick(struct rq*, struct task_struct*); | ||
86 | |||
85 | #define CREATE_TRACE_POINTS | 87 | #define CREATE_TRACE_POINTS |
86 | #include <trace/events/sched.h> | 88 | #include <trace/events/sched.h> |
87 | 89 | ||
@@ -576,8 +578,14 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | |||
576 | * A queue event has occurred, and we're going to schedule. In | 578 | * A queue event has occurred, and we're going to schedule. In |
577 | * this case, we can save a useless back to back clock update. | 579 | * this case, we can save a useless back to back clock update. |
578 | */ | 580 | */ |
581 | /* LITMUS^RT: turning off the clock update is buggy in Linux 2.6.36; | ||
582 | * the scheduler can "forget" to renable the runqueue clock in some | ||
583 | * cases. LITMUS^RT amplifies the effects of this problem. Hence, we | ||
584 | * turn it off to avoid stalling clocks. */ | ||
585 | /* | ||
579 | if (test_tsk_need_resched(p)) | 586 | if (test_tsk_need_resched(p)) |
580 | rq->skip_clock_update = 1; | 587 | rq->skip_clock_update = 1; |
588 | */ | ||
581 | } | 589 | } |
582 | 590 | ||
583 | static inline int cpu_of(struct rq *rq) | 591 | static inline int cpu_of(struct rq *rq) |
@@ -1052,6 +1060,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer) | |||
1052 | raw_spin_lock(&rq->lock); | 1060 | raw_spin_lock(&rq->lock); |
1053 | update_rq_clock(rq); | 1061 | update_rq_clock(rq); |
1054 | rq->curr->sched_class->task_tick(rq, rq->curr, 1); | 1062 | rq->curr->sched_class->task_tick(rq, rq->curr, 1); |
1063 | litmus_tick(rq, rq->curr); | ||
1055 | raw_spin_unlock(&rq->lock); | 1064 | raw_spin_unlock(&rq->lock); |
1056 | 1065 | ||
1057 | return HRTIMER_NORESTART; | 1066 | return HRTIMER_NORESTART; |
@@ -3791,6 +3800,7 @@ asmlinkage void __sched schedule(void) | |||
3791 | 3800 | ||
3792 | need_resched: | 3801 | need_resched: |
3793 | preempt_disable(); | 3802 | preempt_disable(); |
3803 | sched_state_entered_schedule(); | ||
3794 | cpu = smp_processor_id(); | 3804 | cpu = smp_processor_id(); |
3795 | rq = cpu_rq(cpu); | 3805 | rq = cpu_rq(cpu); |
3796 | rcu_note_context_switch(cpu); | 3806 | rcu_note_context_switch(cpu); |
@@ -3869,7 +3879,7 @@ need_resched_nonpreemptible: | |||
3869 | 3879 | ||
3870 | post_schedule(rq); | 3880 | post_schedule(rq); |
3871 | 3881 | ||
3872 | if (unlikely(reacquire_kernel_lock(prev))) | 3882 | if (sched_state_validate_switch() || unlikely(reacquire_kernel_lock(prev))) |
3873 | goto need_resched_nonpreemptible; | 3883 | goto need_resched_nonpreemptible; |
3874 | 3884 | ||
3875 | preempt_enable_no_resched(); | 3885 | preempt_enable_no_resched(); |
diff --git a/litmus/Kconfig b/litmus/Kconfig index 9888589ef126..d62c417f261e 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
@@ -108,6 +108,26 @@ config SCHED_TASK_TRACE | |||
108 | Say Yes for debugging. | 108 | Say Yes for debugging. |
109 | Say No for overhead tracing. | 109 | Say No for overhead tracing. |
110 | 110 | ||
111 | config SCHED_TASK_TRACE_SHIFT | ||
112 | int "Buffer size for sched_trace_xxx() events" | ||
113 | depends on SCHED_TASK_TRACE | ||
114 | range 8 13 | ||
115 | default 9 | ||
116 | help | ||
117 | |||
118 | Select the buffer size of sched_trace_xxx() events as a power of two. | ||
119 | These buffers are statically allocated as per-CPU data. Each event | ||
120 | requires 24 bytes storage plus one additional flag byte. Too large | ||
121 | buffers can cause issues with the per-cpu allocator (and waste | ||
122 | memory). Too small buffers can cause scheduling events to be lost. The | ||
123 | "right" size is workload dependent and depends on the number of tasks, | ||
124 | each task's period, each task's number of suspensions, and how often | ||
125 | the buffer is flushed. | ||
126 | |||
127 | Examples: 12 => 4k events | ||
128 | 10 => 1k events | ||
129 | 8 => 512 events | ||
130 | |||
111 | config SCHED_OVERHEAD_TRACE | 131 | config SCHED_OVERHEAD_TRACE |
112 | bool "Record timestamps for overhead measurements" | 132 | bool "Record timestamps for overhead measurements" |
113 | depends on FEATHER_TRACE | 133 | depends on FEATHER_TRACE |
@@ -129,6 +149,31 @@ config SCHED_DEBUG_TRACE | |||
129 | Say Yes for debugging. | 149 | Say Yes for debugging. |
130 | Say No for overhead tracing. | 150 | Say No for overhead tracing. |
131 | 151 | ||
152 | config SCHED_DEBUG_TRACE_SHIFT | ||
153 | int "Buffer size for TRACE() buffer" | ||
154 | depends on SCHED_DEBUG_TRACE | ||
155 | range 14 22 | ||
156 | default 18 | ||
157 | help | ||
158 | |||
159 | Select the amount of memory needed per for the TRACE() buffer, as a | ||
160 | power of two. The TRACE() buffer is global and statically allocated. If | ||
161 | the buffer is too small, there will be holes in the TRACE() log if the | ||
162 | buffer-flushing task is starved. | ||
163 | |||
164 | The default should be sufficient for most systems. Increase the buffer | ||
165 | size if the log contains holes. Reduce the buffer size when running on | ||
166 | a memory-constrained system. | ||
167 | |||
168 | Examples: 14 => 16KB | ||
169 | 18 => 256KB | ||
170 | 20 => 1MB | ||
171 | |||
172 | This buffer is exported to usespace using a misc device as | ||
173 | 'litmus/log'. On a system with default udev rules, a corresponding | ||
174 | character device node should be created at /dev/litmus/log. The buffer | ||
175 | can be flushed using cat, e.g., 'cat /dev/litmus/log > my_log_file.txt'. | ||
176 | |||
132 | endmenu | 177 | endmenu |
133 | 178 | ||
134 | endmenu | 179 | endmenu |
diff --git a/litmus/Makefile b/litmus/Makefile index 7bd1abdcb84a..b7366b530749 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -3,6 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y = sched_plugin.o litmus.o \ | 5 | obj-y = sched_plugin.o litmus.o \ |
6 | preempt.o \ | ||
6 | litmus_proc.o \ | 7 | litmus_proc.o \ |
7 | budget.o \ | 8 | budget.o \ |
8 | jobs.o \ | 9 | jobs.o \ |
diff --git a/litmus/budget.c b/litmus/budget.c index b99177a26313..310e9a3d4172 100644 --- a/litmus/budget.c +++ b/litmus/budget.c | |||
@@ -1,7 +1,9 @@ | |||
1 | #include <linux/sched.h> | ||
1 | #include <linux/percpu.h> | 2 | #include <linux/percpu.h> |
2 | #include <linux/hrtimer.h> | 3 | #include <linux/hrtimer.h> |
3 | 4 | ||
4 | #include <litmus/litmus.h> | 5 | #include <litmus/litmus.h> |
6 | #include <litmus/preempt.h> | ||
5 | 7 | ||
6 | struct enforcement_timer { | 8 | struct enforcement_timer { |
7 | /* The enforcement timer is used to accurately police | 9 | /* The enforcement timer is used to accurately police |
@@ -23,7 +25,7 @@ static enum hrtimer_restart on_enforcement_timeout(struct hrtimer *timer) | |||
23 | TRACE("enforcement timer fired.\n"); | 25 | TRACE("enforcement timer fired.\n"); |
24 | et->armed = 0; | 26 | et->armed = 0; |
25 | /* activate scheduler */ | 27 | /* activate scheduler */ |
26 | set_tsk_need_resched(current); | 28 | litmus_reschedule_local(); |
27 | local_irq_restore(flags); | 29 | local_irq_restore(flags); |
28 | 30 | ||
29 | return HRTIMER_NORESTART; | 31 | return HRTIMER_NORESTART; |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 99c35ac99870..0756d0156f8f 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -6,20 +6,16 @@ | |||
6 | #include <asm/uaccess.h> | 6 | #include <asm/uaccess.h> |
7 | #include <linux/uaccess.h> | 7 | #include <linux/uaccess.h> |
8 | #include <linux/sysrq.h> | 8 | #include <linux/sysrq.h> |
9 | 9 | #include <linux/sched.h> | |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | 12 | ||
13 | #include <litmus/litmus.h> | 13 | #include <litmus/litmus.h> |
14 | #include <linux/sched.h> | ||
15 | |||
16 | #include <litmus/bheap.h> | 14 | #include <litmus/bheap.h> |
17 | |||
18 | #include <litmus/trace.h> | 15 | #include <litmus/trace.h> |
19 | |||
20 | #include <litmus/rt_domain.h> | 16 | #include <litmus/rt_domain.h> |
21 | |||
22 | #include <litmus/litmus_proc.h> | 17 | #include <litmus/litmus_proc.h> |
18 | #include <litmus/sched_trace.h> | ||
23 | 19 | ||
24 | /* Number of RT tasks that exist in the system */ | 20 | /* Number of RT tasks that exist in the system */ |
25 | atomic_t rt_task_count = ATOMIC_INIT(0); | 21 | atomic_t rt_task_count = ATOMIC_INIT(0); |
diff --git a/litmus/litmus_proc.c b/litmus/litmus_proc.c index c10a6a6d3975..81ea5c35d291 100644 --- a/litmus/litmus_proc.c +++ b/litmus/litmus_proc.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * litmus_proc.c -- Implementation of the /proc/litmus directory tree. | 2 | * litmus_proc.c -- Implementation of the /proc/litmus directory tree. |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/sched.h> | ||
5 | #include <linux/uaccess.h> | 6 | #include <linux/uaccess.h> |
6 | 7 | ||
7 | #include <litmus/litmus.h> | 8 | #include <litmus/litmus.h> |
diff --git a/litmus/preempt.c b/litmus/preempt.c new file mode 100644 index 000000000000..ebe2e3461895 --- /dev/null +++ b/litmus/preempt.c | |||
@@ -0,0 +1,131 @@ | |||
1 | #include <linux/sched.h> | ||
2 | |||
3 | #include <litmus/litmus.h> | ||
4 | #include <litmus/preempt.h> | ||
5 | |||
6 | /* The rescheduling state of each processor. | ||
7 | */ | ||
8 | DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state); | ||
9 | |||
10 | void sched_state_will_schedule(struct task_struct* tsk) | ||
11 | { | ||
12 | /* Litmus hack: we only care about processor-local invocations of | ||
13 | * set_tsk_need_resched(). We can't reliably set the flag remotely | ||
14 | * since it might race with other updates to the scheduling state. We | ||
15 | * can't rely on the runqueue lock protecting updates to the sched | ||
16 | * state since processors do not acquire the runqueue locks for all | ||
17 | * updates to the sched state (to avoid acquiring two runqueue locks at | ||
18 | * the same time). Further, if tsk is residing on a remote processor, | ||
19 | * then that processor doesn't actually know yet that it is going to | ||
20 | * reschedule; it still must receive an IPI (unless a local invocation | ||
21 | * races). | ||
22 | */ | ||
23 | if (likely(task_cpu(tsk) == smp_processor_id())) { | ||
24 | VERIFY_SCHED_STATE(TASK_SCHEDULED | SHOULD_SCHEDULE | TASK_PICKED | WILL_SCHEDULE); | ||
25 | if (is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK)) | ||
26 | set_sched_state(PICKED_WRONG_TASK); | ||
27 | else | ||
28 | set_sched_state(WILL_SCHEDULE); | ||
29 | } else | ||
30 | /* Litmus tasks should never be subject to a remote | ||
31 | * set_tsk_need_resched(). */ | ||
32 | BUG_ON(is_realtime(tsk)); | ||
33 | TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n", | ||
34 | __builtin_return_address(0)); | ||
35 | } | ||
36 | |||
37 | /* Called by the IPI handler after another CPU called smp_send_resched(). */ | ||
38 | void sched_state_ipi(void) | ||
39 | { | ||
40 | /* If the IPI was slow, we might be in any state right now. The IPI is | ||
41 | * only meaningful if we are in SHOULD_SCHEDULE. */ | ||
42 | if (is_in_sched_state(SHOULD_SCHEDULE)) { | ||
43 | /* Cause scheduler to be invoked. | ||
44 | * This will cause a transition to WILL_SCHEDULE. */ | ||
45 | set_tsk_need_resched(current); | ||
46 | TRACE_STATE("IPI -> set_tsk_need_resched(%s/%d)\n", | ||
47 | current->comm, current->pid); | ||
48 | } else { | ||
49 | /* ignore */ | ||
50 | TRACE_STATE("ignoring IPI in state %x (%s)\n", | ||
51 | get_sched_state(), | ||
52 | sched_state_name(get_sched_state())); | ||
53 | } | ||
54 | } | ||
55 | |||
56 | /* Called by plugins to cause a CPU to reschedule. IMPORTANT: the caller must | ||
57 | * hold the lock that is used to serialize scheduling decisions. */ | ||
58 | void litmus_reschedule(int cpu) | ||
59 | { | ||
60 | int picked_transition_ok = 0; | ||
61 | int scheduled_transition_ok = 0; | ||
62 | |||
63 | /* The (remote) CPU could be in any state. */ | ||
64 | |||
65 | /* The critical states are TASK_PICKED and TASK_SCHEDULED, as the CPU | ||
66 | * is not aware of the need to reschedule at this point. */ | ||
67 | |||
68 | /* is a context switch in progress? */ | ||
69 | if (cpu_is_in_sched_state(cpu, TASK_PICKED)) | ||
70 | picked_transition_ok = sched_state_transition_on( | ||
71 | cpu, TASK_PICKED, PICKED_WRONG_TASK); | ||
72 | |||
73 | if (!picked_transition_ok && | ||
74 | cpu_is_in_sched_state(cpu, TASK_SCHEDULED)) { | ||
75 | /* We either raced with the end of the context switch, or the | ||
76 | * CPU was in TASK_SCHEDULED anyway. */ | ||
77 | scheduled_transition_ok = sched_state_transition_on( | ||
78 | cpu, TASK_SCHEDULED, SHOULD_SCHEDULE); | ||
79 | } | ||
80 | |||
81 | /* If the CPU was in state TASK_SCHEDULED, then we need to cause the | ||
82 | * scheduler to be invoked. */ | ||
83 | if (scheduled_transition_ok) { | ||
84 | if (smp_processor_id() == cpu) | ||
85 | set_tsk_need_resched(current); | ||
86 | else | ||
87 | smp_send_reschedule(cpu); | ||
88 | } | ||
89 | |||
90 | TRACE_STATE("%s picked-ok:%d sched-ok:%d\n", | ||
91 | __FUNCTION__, | ||
92 | picked_transition_ok, | ||
93 | scheduled_transition_ok); | ||
94 | } | ||
95 | |||
96 | void litmus_reschedule_local(void) | ||
97 | { | ||
98 | if (is_in_sched_state(TASK_PICKED)) | ||
99 | set_sched_state(PICKED_WRONG_TASK); | ||
100 | else if (is_in_sched_state(TASK_SCHEDULED | SHOULD_SCHEDULE)) { | ||
101 | set_sched_state(WILL_SCHEDULE); | ||
102 | set_tsk_need_resched(current); | ||
103 | } | ||
104 | } | ||
105 | |||
106 | #ifdef CONFIG_DEBUG_KERNEL | ||
107 | |||
108 | void sched_state_plugin_check(void) | ||
109 | { | ||
110 | if (!is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK)) { | ||
111 | TRACE("!!!! plugin did not call sched_state_task_picked()!" | ||
112 | "Calling sched_state_task_picked() is mandatory---fix this.\n"); | ||
113 | set_sched_state(TASK_PICKED); | ||
114 | } | ||
115 | } | ||
116 | |||
117 | #define NAME_CHECK(x) case x: return #x | ||
118 | const char* sched_state_name(int s) | ||
119 | { | ||
120 | switch (s) { | ||
121 | NAME_CHECK(TASK_SCHEDULED); | ||
122 | NAME_CHECK(SHOULD_SCHEDULE); | ||
123 | NAME_CHECK(WILL_SCHEDULE); | ||
124 | NAME_CHECK(TASK_PICKED); | ||
125 | NAME_CHECK(PICKED_WRONG_TASK); | ||
126 | default: | ||
127 | return "UNKNOWN"; | ||
128 | }; | ||
129 | } | ||
130 | |||
131 | #endif | ||
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index a729d97535e9..111e4fb1c62b 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -34,6 +34,7 @@ | |||
34 | 34 | ||
35 | #include <litmus/litmus.h> | 35 | #include <litmus/litmus.h> |
36 | #include <litmus/jobs.h> | 36 | #include <litmus/jobs.h> |
37 | #include <litmus/preempt.h> | ||
37 | #include <litmus/sched_plugin.h> | 38 | #include <litmus/sched_plugin.h> |
38 | #include <litmus/edf_common.h> | 39 | #include <litmus/edf_common.h> |
39 | #include <litmus/sched_trace.h> | 40 | #include <litmus/sched_trace.h> |
@@ -209,12 +210,6 @@ static noinline void unlink(struct task_struct* t) | |||
209 | { | 210 | { |
210 | cpu_entry_t *entry; | 211 | cpu_entry_t *entry; |
211 | 212 | ||
212 | if (unlikely(!t)) { | ||
213 | TRACE_BUG_ON(!t); | ||
214 | return; | ||
215 | } | ||
216 | |||
217 | |||
218 | if (t->rt_param.linked_on != NO_CPU) { | 213 | if (t->rt_param.linked_on != NO_CPU) { |
219 | /* unlink */ | 214 | /* unlink */ |
220 | entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on); | 215 | entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on); |
@@ -341,7 +336,7 @@ static void cedf_tick(struct task_struct* t) | |||
341 | /* np tasks will be preempted when they become | 336 | /* np tasks will be preempted when they become |
342 | * preemptable again | 337 | * preemptable again |
343 | */ | 338 | */ |
344 | set_tsk_need_resched(t); | 339 | litmus_reschedule_local(); |
345 | set_will_schedule(); | 340 | set_will_schedule(); |
346 | TRACE("cedf_scheduler_tick: " | 341 | TRACE("cedf_scheduler_tick: " |
347 | "%d is preemptable " | 342 | "%d is preemptable " |
@@ -466,6 +461,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
466 | if (exists) | 461 | if (exists) |
467 | next = prev; | 462 | next = prev; |
468 | 463 | ||
464 | sched_state_task_picked(); | ||
469 | raw_spin_unlock(&cluster->lock); | 465 | raw_spin_unlock(&cluster->lock); |
470 | 466 | ||
471 | #ifdef WANT_ALL_SCHED_EVENTS | 467 | #ifdef WANT_ALL_SCHED_EVENTS |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index e101768740ad..e9c5e531b1ae 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <litmus/edf_common.h> | 18 | #include <litmus/edf_common.h> |
19 | #include <litmus/sched_trace.h> | 19 | #include <litmus/sched_trace.h> |
20 | 20 | ||
21 | #include <litmus/preempt.h> | ||
22 | |||
21 | #include <litmus/bheap.h> | 23 | #include <litmus/bheap.h> |
22 | 24 | ||
23 | #include <linux/module.h> | 25 | #include <linux/module.h> |
@@ -95,21 +97,12 @@ typedef struct { | |||
95 | int cpu; | 97 | int cpu; |
96 | struct task_struct* linked; /* only RT tasks */ | 98 | struct task_struct* linked; /* only RT tasks */ |
97 | struct task_struct* scheduled; /* only RT tasks */ | 99 | struct task_struct* scheduled; /* only RT tasks */ |
98 | atomic_t will_schedule; /* prevent unneeded IPIs */ | ||
99 | struct bheap_node* hn; | 100 | struct bheap_node* hn; |
100 | } cpu_entry_t; | 101 | } cpu_entry_t; |
101 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); | 102 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); |
102 | 103 | ||
103 | cpu_entry_t* gsnedf_cpus[NR_CPUS]; | 104 | cpu_entry_t* gsnedf_cpus[NR_CPUS]; |
104 | 105 | ||
105 | #define set_will_schedule() \ | ||
106 | (atomic_set(&__get_cpu_var(gsnedf_cpu_entries).will_schedule, 1)) | ||
107 | #define clear_will_schedule() \ | ||
108 | (atomic_set(&__get_cpu_var(gsnedf_cpu_entries).will_schedule, 0)) | ||
109 | #define test_will_schedule(cpu) \ | ||
110 | (atomic_read(&per_cpu(gsnedf_cpu_entries, cpu).will_schedule)) | ||
111 | |||
112 | |||
113 | /* the cpus queue themselves according to priority in here */ | 106 | /* the cpus queue themselves according to priority in here */ |
114 | static struct bheap_node gsnedf_heap_node[NR_CPUS]; | 107 | static struct bheap_node gsnedf_heap_node[NR_CPUS]; |
115 | static struct bheap gsnedf_cpu_heap; | 108 | static struct bheap gsnedf_cpu_heap; |
@@ -217,11 +210,6 @@ static noinline void unlink(struct task_struct* t) | |||
217 | { | 210 | { |
218 | cpu_entry_t *entry; | 211 | cpu_entry_t *entry; |
219 | 212 | ||
220 | if (unlikely(!t)) { | ||
221 | TRACE_BUG_ON(!t); | ||
222 | return; | ||
223 | } | ||
224 | |||
225 | if (t->rt_param.linked_on != NO_CPU) { | 213 | if (t->rt_param.linked_on != NO_CPU) { |
226 | /* unlink */ | 214 | /* unlink */ |
227 | entry = &per_cpu(gsnedf_cpu_entries, t->rt_param.linked_on); | 215 | entry = &per_cpu(gsnedf_cpu_entries, t->rt_param.linked_on); |
@@ -341,8 +329,7 @@ static void gsnedf_tick(struct task_struct* t) | |||
341 | /* np tasks will be preempted when they become | 329 | /* np tasks will be preempted when they become |
342 | * preemptable again | 330 | * preemptable again |
343 | */ | 331 | */ |
344 | set_tsk_need_resched(t); | 332 | litmus_reschedule_local(); |
345 | set_will_schedule(); | ||
346 | TRACE("gsnedf_scheduler_tick: " | 333 | TRACE("gsnedf_scheduler_tick: " |
347 | "%d is preemptable " | 334 | "%d is preemptable " |
348 | " => FORCE_RESCHED\n", t->pid); | 335 | " => FORCE_RESCHED\n", t->pid); |
@@ -391,7 +378,6 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
391 | #endif | 378 | #endif |
392 | 379 | ||
393 | raw_spin_lock(&gsnedf_lock); | 380 | raw_spin_lock(&gsnedf_lock); |
394 | clear_will_schedule(); | ||
395 | 381 | ||
396 | /* sanity checking */ | 382 | /* sanity checking */ |
397 | BUG_ON(entry->scheduled && entry->scheduled != prev); | 383 | BUG_ON(entry->scheduled && entry->scheduled != prev); |
@@ -473,6 +459,8 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
473 | if (exists) | 459 | if (exists) |
474 | next = prev; | 460 | next = prev; |
475 | 461 | ||
462 | sched_state_task_picked(); | ||
463 | |||
476 | raw_spin_unlock(&gsnedf_lock); | 464 | raw_spin_unlock(&gsnedf_lock); |
477 | 465 | ||
478 | #ifdef WANT_ALL_SCHED_EVENTS | 466 | #ifdef WANT_ALL_SCHED_EVENTS |
@@ -780,7 +768,6 @@ static long gsnedf_activate_plugin(void) | |||
780 | for_each_online_cpu(cpu) { | 768 | for_each_online_cpu(cpu) { |
781 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | 769 | entry = &per_cpu(gsnedf_cpu_entries, cpu); |
782 | bheap_node_init(&entry->hn, entry); | 770 | bheap_node_init(&entry->hn, entry); |
783 | atomic_set(&entry->will_schedule, 0); | ||
784 | entry->linked = NULL; | 771 | entry->linked = NULL; |
785 | entry->scheduled = NULL; | 772 | entry->scheduled = NULL; |
786 | #ifdef CONFIG_RELEASE_MASTER | 773 | #ifdef CONFIG_RELEASE_MASTER |
@@ -829,7 +816,6 @@ static int __init init_gsn_edf(void) | |||
829 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 816 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
830 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | 817 | entry = &per_cpu(gsnedf_cpu_entries, cpu); |
831 | gsnedf_cpus[cpu] = entry; | 818 | gsnedf_cpus[cpu] = entry; |
832 | atomic_set(&entry->will_schedule, 0); | ||
833 | entry->cpu = cpu; | 819 | entry->cpu = cpu; |
834 | entry->hn = &gsnedf_heap_node[cpu]; | 820 | entry->hn = &gsnedf_heap_node[cpu]; |
835 | bheap_node_init(&entry->hn, entry); | 821 | bheap_node_init(&entry->hn, entry); |
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index 65873152e68f..e6952896dc4b 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <litmus/litmus.h> | 3 | #include <litmus/litmus.h> |
4 | #include <litmus/budget.h> | 4 | #include <litmus/budget.h> |
5 | #include <litmus/sched_plugin.h> | 5 | #include <litmus/sched_plugin.h> |
6 | #include <litmus/preempt.h> | ||
6 | 7 | ||
7 | static void update_time_litmus(struct rq *rq, struct task_struct *p) | 8 | static void update_time_litmus(struct rq *rq, struct task_struct *p) |
8 | { | 9 | { |
@@ -52,6 +53,8 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
52 | /* let the plugin schedule */ | 53 | /* let the plugin schedule */ |
53 | next = litmus->schedule(prev); | 54 | next = litmus->schedule(prev); |
54 | 55 | ||
56 | sched_state_plugin_check(); | ||
57 | |||
55 | /* check if a global plugin pulled a task from a different RQ */ | 58 | /* check if a global plugin pulled a task from a different RQ */ |
56 | if (next && task_rq(next) != rq) { | 59 | if (next && task_rq(next) != rq) { |
57 | /* we need to migrate the task */ | 60 | /* we need to migrate the task */ |
@@ -198,7 +201,7 @@ static void yield_task_litmus(struct rq *rq) | |||
198 | * then determine if a preemption is still required. | 201 | * then determine if a preemption is still required. |
199 | */ | 202 | */ |
200 | clear_exit_np(current); | 203 | clear_exit_np(current); |
201 | set_tsk_need_resched(current); | 204 | litmus_reschedule_local(); |
202 | } | 205 | } |
203 | 206 | ||
204 | /* Plugins are responsible for this. | 207 | /* Plugins are responsible for this. |
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index ea77d3295290..c7d5cf7aa2b3 100644 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <litmus/litmus.h> | 17 | #include <litmus/litmus.h> |
18 | #include <litmus/jobs.h> | 18 | #include <litmus/jobs.h> |
19 | #include <litmus/preempt.h> | ||
19 | #include <litmus/rt_domain.h> | 20 | #include <litmus/rt_domain.h> |
20 | #include <litmus/sched_plugin.h> | 21 | #include <litmus/sched_plugin.h> |
21 | #include <litmus/sched_trace.h> | 22 | #include <litmus/sched_trace.h> |
@@ -241,11 +242,7 @@ static void check_preempt(struct task_struct* t) | |||
241 | PTRACE_TASK(t, "linked_on:%d, scheduled_on:%d\n", | 242 | PTRACE_TASK(t, "linked_on:%d, scheduled_on:%d\n", |
242 | tsk_rt(t)->linked_on, tsk_rt(t)->scheduled_on); | 243 | tsk_rt(t)->linked_on, tsk_rt(t)->scheduled_on); |
243 | /* preempt */ | 244 | /* preempt */ |
244 | if (cpu == smp_processor_id()) | 245 | litmus_reschedule(cpu); |
245 | set_tsk_need_resched(current); | ||
246 | else { | ||
247 | smp_send_reschedule(cpu); | ||
248 | } | ||
249 | } | 246 | } |
250 | } | 247 | } |
251 | 248 | ||
@@ -545,7 +542,7 @@ static void pfair_tick(struct task_struct* t) | |||
545 | 542 | ||
546 | if (state->local != current | 543 | if (state->local != current |
547 | && (is_realtime(current) || is_present(state->local))) | 544 | && (is_realtime(current) || is_present(state->local))) |
548 | set_tsk_need_resched(current); | 545 | litmus_reschedule_local(); |
549 | } | 546 | } |
550 | 547 | ||
551 | static int safe_to_schedule(struct task_struct* t, int cpu) | 548 | static int safe_to_schedule(struct task_struct* t, int cpu) |
@@ -577,7 +574,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev) | |||
577 | if (next) | 574 | if (next) |
578 | tsk_rt(next)->scheduled_on = state->cpu; | 575 | tsk_rt(next)->scheduled_on = state->cpu; |
579 | } | 576 | } |
580 | 577 | sched_state_task_picked(); | |
581 | raw_spin_unlock(&pfair_lock); | 578 | raw_spin_unlock(&pfair_lock); |
582 | 579 | ||
583 | if (next) | 580 | if (next) |
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index d706a08fb313..d912a6494d20 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c | |||
@@ -6,10 +6,11 @@ | |||
6 | 6 | ||
7 | #include <linux/list.h> | 7 | #include <linux/list.h> |
8 | #include <linux/spinlock.h> | 8 | #include <linux/spinlock.h> |
9 | #include <linux/sched.h> | ||
9 | 10 | ||
10 | #include <litmus/litmus.h> | 11 | #include <litmus/litmus.h> |
11 | #include <litmus/sched_plugin.h> | 12 | #include <litmus/sched_plugin.h> |
12 | 13 | #include <litmus/preempt.h> | |
13 | #include <litmus/jobs.h> | 14 | #include <litmus/jobs.h> |
14 | 15 | ||
15 | /* | 16 | /* |
@@ -18,36 +19,30 @@ | |||
18 | * non-preemptive section aware and does not invoke the scheduler / send | 19 | * non-preemptive section aware and does not invoke the scheduler / send |
19 | * IPIs if the to-be-preempted task is actually non-preemptive. | 20 | * IPIs if the to-be-preempted task is actually non-preemptive. |
20 | */ | 21 | */ |
21 | void preempt_if_preemptable(struct task_struct* t, int on_cpu) | 22 | void preempt_if_preemptable(struct task_struct* t, int cpu) |
22 | { | 23 | { |
23 | /* t is the real-time task executing on CPU on_cpu If t is NULL, then | 24 | /* t is the real-time task executing on CPU on_cpu If t is NULL, then |
24 | * on_cpu is currently scheduling background work. | 25 | * on_cpu is currently scheduling background work. |
25 | */ | 26 | */ |
26 | 27 | ||
27 | int send_ipi; | 28 | int reschedule = 0; |
28 | 29 | ||
29 | if (smp_processor_id() == on_cpu) { | 30 | if (!t) |
30 | /* local CPU case */ | 31 | /* move non-real-time task out of the way */ |
31 | if (t) { | 32 | reschedule = 1; |
33 | else { | ||
34 | if (smp_processor_id() == cpu) { | ||
35 | /* local CPU case */ | ||
32 | /* check if we need to poke userspace */ | 36 | /* check if we need to poke userspace */ |
33 | if (is_user_np(t)) | 37 | if (is_user_np(t)) |
34 | /* yes, poke it */ | 38 | /* yes, poke it */ |
35 | request_exit_np(t); | 39 | request_exit_np(t); |
36 | else | 40 | else if (!is_kernel_np(t)) |
37 | /* no, see if we are allowed to preempt the | 41 | /* only if we are allowed to preempt the |
38 | * currently-executing task */ | 42 | * currently-executing task */ |
39 | if (!is_kernel_np(t)) | 43 | reschedule = 1; |
40 | set_tsk_need_resched(t); | 44 | } else { |
41 | } else | 45 | /* remote CPU case */ |
42 | /* move non-real-time task out of the way */ | ||
43 | set_tsk_need_resched(current); | ||
44 | } else { | ||
45 | /* remote CPU case */ | ||
46 | if (!t) | ||
47 | /* currently schedules non-real-time work */ | ||
48 | send_ipi = 1; | ||
49 | else { | ||
50 | /* currently schedules real-time work */ | ||
51 | if (is_user_np(t)) { | 46 | if (is_user_np(t)) { |
52 | /* need to notify user space of delayed | 47 | /* need to notify user space of delayed |
53 | * preemption */ | 48 | * preemption */ |
@@ -59,14 +54,14 @@ void preempt_if_preemptable(struct task_struct* t, int on_cpu) | |||
59 | mb(); | 54 | mb(); |
60 | } | 55 | } |
61 | /* Only send an ipi if remote task might have raced our | 56 | /* Only send an ipi if remote task might have raced our |
62 | * request, i.e., send an IPI to make sure if it exited | 57 | * request, i.e., send an IPI to make sure in case it |
63 | * its critical section. | 58 | * exited its critical section. |
64 | */ | 59 | */ |
65 | send_ipi = !is_np(t) && !is_kernel_np(t); | 60 | reschedule = !is_np(t) && !is_kernel_np(t); |
66 | } | 61 | } |
67 | if (likely(send_ipi)) | ||
68 | smp_send_reschedule(on_cpu); | ||
69 | } | 62 | } |
63 | if (likely(reschedule)) | ||
64 | litmus_reschedule(cpu); | ||
70 | } | 65 | } |
71 | 66 | ||
72 | 67 | ||
@@ -80,6 +75,7 @@ static void litmus_dummy_finish_switch(struct task_struct * prev) | |||
80 | 75 | ||
81 | static struct task_struct* litmus_dummy_schedule(struct task_struct * prev) | 76 | static struct task_struct* litmus_dummy_schedule(struct task_struct * prev) |
82 | { | 77 | { |
78 | sched_state_task_picked(); | ||
83 | return NULL; | 79 | return NULL; |
84 | } | 80 | } |
85 | 81 | ||
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index 73f64730bd5e..b89823d5c026 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c | |||
@@ -12,14 +12,14 @@ | |||
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/list.h> | 13 | #include <linux/list.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | |||
16 | #include <linux/module.h> | 15 | #include <linux/module.h> |
17 | 16 | ||
18 | #include <litmus/litmus.h> | 17 | #include <litmus/litmus.h> |
19 | #include <litmus/jobs.h> | 18 | #include <litmus/jobs.h> |
19 | #include <litmus/preempt.h> | ||
20 | #include <litmus/sched_plugin.h> | 20 | #include <litmus/sched_plugin.h> |
21 | #include <litmus/edf_common.h> | 21 | #include <litmus/edf_common.h> |
22 | 22 | #include <litmus/sched_trace.h> | |
23 | 23 | ||
24 | typedef struct { | 24 | typedef struct { |
25 | rt_domain_t domain; | 25 | rt_domain_t domain; |
@@ -109,7 +109,7 @@ static void psnedf_tick(struct task_struct *t) | |||
109 | 109 | ||
110 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | 110 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { |
111 | if (!is_np(t)) { | 111 | if (!is_np(t)) { |
112 | set_tsk_need_resched(t); | 112 | litmus_reschedule_local(); |
113 | TRACE("psnedf_scheduler_tick: " | 113 | TRACE("psnedf_scheduler_tick: " |
114 | "%d is preemptable " | 114 | "%d is preemptable " |
115 | " => FORCE_RESCHED\n", t->pid); | 115 | " => FORCE_RESCHED\n", t->pid); |
@@ -205,6 +205,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev) | |||
205 | } | 205 | } |
206 | 206 | ||
207 | pedf->scheduled = next; | 207 | pedf->scheduled = next; |
208 | sched_state_task_picked(); | ||
208 | raw_spin_unlock(&pedf->slock); | 209 | raw_spin_unlock(&pedf->slock); |
209 | 210 | ||
210 | return next; | 211 | return next; |
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index 39a543e22d41..e5098ddb1ec9 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c | |||
@@ -18,7 +18,7 @@ | |||
18 | 18 | ||
19 | /* set MAJOR to 0 to have it dynamically assigned */ | 19 | /* set MAJOR to 0 to have it dynamically assigned */ |
20 | #define FT_TASK_TRACE_MAJOR 253 | 20 | #define FT_TASK_TRACE_MAJOR 253 |
21 | #define NO_EVENTS 4096 /* this is a buffer of 12 4k pages per CPU */ | 21 | #define NO_EVENTS (1 << CONFIG_SCHED_TASK_TRACE_SHIFT) |
22 | 22 | ||
23 | #define now() litmus_clock() | 23 | #define now() litmus_clock() |
24 | 24 | ||
@@ -41,6 +41,9 @@ static int __init init_sched_task_trace(void) | |||
41 | { | 41 | { |
42 | struct local_buffer* buf; | 42 | struct local_buffer* buf; |
43 | int i, ok = 0; | 43 | int i, ok = 0; |
44 | printk("Allocated %u sched_trace_xxx() events per CPU " | ||
45 | "(buffer size: %d bytes)\n", | ||
46 | NO_EVENTS, (int) sizeof(struct local_buffer)); | ||
44 | ftdev_init(&st_dev, THIS_MODULE); | 47 | ftdev_init(&st_dev, THIS_MODULE); |
45 | for (i = 0; i < NR_CPUS; i++) { | 48 | for (i = 0; i < NR_CPUS; i++) { |
46 | buf = &per_cpu(st_event_buffer, i); | 49 | buf = &per_cpu(st_event_buffer, i); |
diff --git a/litmus/sched_trace.c b/litmus/sched_trace.c index 8051b51a2022..f4171fddbbb1 100644 --- a/litmus/sched_trace.c +++ b/litmus/sched_trace.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * sched_trace.c -- record scheduling events to a byte stream. | 2 | * sched_trace.c -- record scheduling events to a byte stream. |
3 | */ | 3 | */ |
4 | #include <linux/spinlock.h> | 4 | #include <linux/spinlock.h> |
5 | #include <linux/semaphore.h> | 5 | #include <linux/mutex.h> |
6 | 6 | ||
7 | #include <linux/fs.h> | 7 | #include <linux/fs.h> |
8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
@@ -18,133 +18,23 @@ | |||
18 | 18 | ||
19 | #define SCHED_TRACE_NAME "litmus/log" | 19 | #define SCHED_TRACE_NAME "litmus/log" |
20 | 20 | ||
21 | /* Allocate a buffer of about 32k per CPU */ | 21 | /* Compute size of TRACE() buffer */ |
22 | #define LITMUS_TRACE_BUF_PAGES 8 | 22 | #define LITMUS_TRACE_BUF_SIZE (1 << CONFIG_SCHED_DEBUG_TRACE_SHIFT) |
23 | #define LITMUS_TRACE_BUF_SIZE (PAGE_SIZE * LITMUS_TRACE_BUF_PAGES * NR_CPUS) | ||
24 | 23 | ||
25 | /* Max length of one read from the buffer */ | 24 | /* Max length of one read from the buffer */ |
26 | #define MAX_READ_LEN (64 * 1024) | 25 | #define MAX_READ_LEN (64 * 1024) |
27 | 26 | ||
28 | /* Max length for one write --- from kernel --- to the buffer */ | 27 | /* Max length for one write --- by TRACE() --- to the buffer. This is used to |
28 | * allocate a per-cpu buffer for printf() formatting. */ | ||
29 | #define MSG_SIZE 255 | 29 | #define MSG_SIZE 255 |
30 | 30 | ||
31 | /* Inner ring buffer structure */ | ||
32 | typedef struct { | ||
33 | rwlock_t del_lock; | ||
34 | 31 | ||
35 | /* the buffer */ | 32 | static DEFINE_MUTEX(reader_mutex); |
36 | struct kfifo kfifo; | 33 | static atomic_t reader_cnt = ATOMIC_INIT(0); |
37 | } ring_buffer_t; | 34 | static DEFINE_KFIFO(debug_buffer, char, LITMUS_TRACE_BUF_SIZE); |
38 | 35 | ||
39 | /* Main buffer structure */ | ||
40 | typedef struct { | ||
41 | ring_buffer_t buf; | ||
42 | atomic_t reader_cnt; | ||
43 | struct semaphore reader_mutex; | ||
44 | } trace_buffer_t; | ||
45 | 36 | ||
46 | |||
47 | /* | ||
48 | * Inner buffer management functions | ||
49 | */ | ||
50 | void rb_init(ring_buffer_t* buf) | ||
51 | { | ||
52 | rwlock_init(&buf->del_lock); | ||
53 | } | ||
54 | |||
55 | int rb_alloc_buf(ring_buffer_t* buf, unsigned int size) | ||
56 | { | ||
57 | unsigned long flags; | ||
58 | int ret = 0; | ||
59 | |||
60 | write_lock_irqsave(&buf->del_lock, flags); | ||
61 | |||
62 | /* kfifo size must be a power of 2 | ||
63 | * atm kfifo alloc is automatically rounding the size | ||
64 | */ | ||
65 | ret = kfifo_alloc(&buf->kfifo, size, GFP_ATOMIC); | ||
66 | |||
67 | write_unlock_irqrestore(&buf->del_lock, flags); | ||
68 | |||
69 | if(ret < 0) | ||
70 | printk(KERN_ERR "kfifo_alloc failed\n"); | ||
71 | |||
72 | return ret; | ||
73 | } | ||
74 | |||
75 | int rb_free_buf(ring_buffer_t* buf) | ||
76 | { | ||
77 | unsigned long flags; | ||
78 | |||
79 | write_lock_irqsave(&buf->del_lock, flags); | ||
80 | |||
81 | BUG_ON(!kfifo_initialized(&buf->kfifo)); | ||
82 | kfifo_free(&buf->kfifo); | ||
83 | |||
84 | write_unlock_irqrestore(&buf->del_lock, flags); | ||
85 | |||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Assumption: concurrent writes are serialized externally | ||
91 | * | ||
92 | * Will only succeed if there is enough space for all len bytes. | ||
93 | */ | ||
94 | int rb_put(ring_buffer_t* buf, char* mem, size_t len) | ||
95 | { | ||
96 | unsigned long flags; | ||
97 | int error = 0; | ||
98 | |||
99 | read_lock_irqsave(&buf->del_lock, flags); | ||
100 | |||
101 | if (!kfifo_initialized(&buf->kfifo)) { | ||
102 | error = -ENODEV; | ||
103 | goto out; | ||
104 | } | ||
105 | |||
106 | if((kfifo_in(&buf->kfifo, mem, len)) < len) { | ||
107 | error = -ENOMEM; | ||
108 | goto out; | ||
109 | } | ||
110 | |||
111 | out: | ||
112 | read_unlock_irqrestore(&buf->del_lock, flags); | ||
113 | return error; | ||
114 | } | ||
115 | |||
116 | /* Assumption: concurrent reads are serialized externally */ | ||
117 | int rb_get(ring_buffer_t* buf, char* mem, size_t len) | ||
118 | { | ||
119 | unsigned long flags; | ||
120 | int error = 0; | ||
121 | |||
122 | read_lock_irqsave(&buf->del_lock, flags); | ||
123 | if (!kfifo_initialized(&buf->kfifo)) { | ||
124 | error = -ENODEV; | ||
125 | goto out; | ||
126 | } | ||
127 | |||
128 | error = kfifo_out(&buf->kfifo, (unsigned char*)mem, len); | ||
129 | |||
130 | out: | ||
131 | read_unlock_irqrestore(&buf->del_lock, flags); | ||
132 | return error; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Device Driver management | ||
137 | */ | ||
138 | static DEFINE_RAW_SPINLOCK(log_buffer_lock); | 37 | static DEFINE_RAW_SPINLOCK(log_buffer_lock); |
139 | static trace_buffer_t log_buffer; | ||
140 | |||
141 | static void init_log_buffer(void) | ||
142 | { | ||
143 | rb_init(&log_buffer.buf); | ||
144 | atomic_set(&log_buffer.reader_cnt,0); | ||
145 | init_MUTEX(&log_buffer.reader_mutex); | ||
146 | } | ||
147 | |||
148 | static DEFINE_PER_CPU(char[MSG_SIZE], fmt_buffer); | 38 | static DEFINE_PER_CPU(char[MSG_SIZE], fmt_buffer); |
149 | 39 | ||
150 | /* | 40 | /* |
@@ -163,6 +53,10 @@ void sched_trace_log_message(const char* fmt, ...) | |||
163 | size_t len; | 53 | size_t len; |
164 | char* buf; | 54 | char* buf; |
165 | 55 | ||
56 | if (!atomic_read(&reader_cnt)) | ||
57 | /* early exit if nobody is listening */ | ||
58 | return; | ||
59 | |||
166 | va_start(args, fmt); | 60 | va_start(args, fmt); |
167 | local_irq_save(flags); | 61 | local_irq_save(flags); |
168 | 62 | ||
@@ -171,32 +65,33 @@ void sched_trace_log_message(const char* fmt, ...) | |||
171 | len = vscnprintf(buf, MSG_SIZE, fmt, args); | 65 | len = vscnprintf(buf, MSG_SIZE, fmt, args); |
172 | 66 | ||
173 | raw_spin_lock(&log_buffer_lock); | 67 | raw_spin_lock(&log_buffer_lock); |
174 | /* Don't copy the trailing null byte, we don't want null bytes | 68 | /* Don't copy the trailing null byte, we don't want null bytes in a |
175 | * in a text file. | 69 | * text file. |
176 | */ | 70 | */ |
177 | rb_put(&log_buffer.buf, buf, len); | 71 | kfifo_in(&debug_buffer, buf, len); |
178 | raw_spin_unlock(&log_buffer_lock); | 72 | raw_spin_unlock(&log_buffer_lock); |
179 | 73 | ||
180 | local_irq_restore(flags); | 74 | local_irq_restore(flags); |
181 | va_end(args); | 75 | va_end(args); |
182 | } | 76 | } |
183 | 77 | ||
78 | |||
184 | /* | 79 | /* |
185 | * log_read - Read the trace buffer | 80 | * log_read - Read the trace buffer |
186 | * | 81 | * |
187 | * This function is called as a file operation from userspace. | 82 | * This function is called as a file operation from userspace. |
188 | * Readers can sleep. Access is serialized through reader_mutex | 83 | * Readers can sleep. Access is serialized through reader_mutex |
189 | */ | 84 | */ |
190 | static ssize_t log_read(struct file *filp, char __user *to, size_t len, | 85 | static ssize_t log_read(struct file *filp, |
191 | loff_t *f_pos) | 86 | char __user *to, size_t len, |
87 | loff_t *f_pos) | ||
192 | { | 88 | { |
193 | /* we ignore f_pos, this is strictly sequential */ | 89 | /* we ignore f_pos, this is strictly sequential */ |
194 | 90 | ||
195 | ssize_t error = -EINVAL; | 91 | ssize_t error = -EINVAL; |
196 | char* mem; | 92 | char* mem; |
197 | trace_buffer_t *tbuf = filp->private_data; | ||
198 | 93 | ||
199 | if (down_interruptible(&tbuf->reader_mutex)) { | 94 | if (mutex_lock_interruptible(&reader_mutex)) { |
200 | error = -ERESTARTSYS; | 95 | error = -ERESTARTSYS; |
201 | goto out; | 96 | goto out; |
202 | } | 97 | } |
@@ -210,14 +105,14 @@ static ssize_t log_read(struct file *filp, char __user *to, size_t len, | |||
210 | goto out_unlock; | 105 | goto out_unlock; |
211 | } | 106 | } |
212 | 107 | ||
213 | error = rb_get(&tbuf->buf, mem, len); | 108 | error = kfifo_out(&debug_buffer, mem, len); |
214 | while (!error) { | 109 | while (!error) { |
215 | set_current_state(TASK_INTERRUPTIBLE); | 110 | set_current_state(TASK_INTERRUPTIBLE); |
216 | schedule_timeout(110); | 111 | schedule_timeout(110); |
217 | if (signal_pending(current)) | 112 | if (signal_pending(current)) |
218 | error = -ERESTARTSYS; | 113 | error = -ERESTARTSYS; |
219 | else | 114 | else |
220 | error = rb_get(&tbuf->buf, mem, len); | 115 | error = kfifo_out(&debug_buffer, mem, len); |
221 | } | 116 | } |
222 | 117 | ||
223 | if (error > 0 && copy_to_user(to, mem, error)) | 118 | if (error > 0 && copy_to_user(to, mem, error)) |
@@ -225,7 +120,7 @@ static ssize_t log_read(struct file *filp, char __user *to, size_t len, | |||
225 | 120 | ||
226 | kfree(mem); | 121 | kfree(mem); |
227 | out_unlock: | 122 | out_unlock: |
228 | up(&tbuf->reader_mutex); | 123 | mutex_unlock(&reader_mutex); |
229 | out: | 124 | out: |
230 | return error; | 125 | return error; |
231 | } | 126 | } |
@@ -243,36 +138,23 @@ extern int trace_recurse; | |||
243 | static int log_open(struct inode *in, struct file *filp) | 138 | static int log_open(struct inode *in, struct file *filp) |
244 | { | 139 | { |
245 | int error = -EINVAL; | 140 | int error = -EINVAL; |
246 | trace_buffer_t* tbuf; | ||
247 | |||
248 | tbuf = &log_buffer; | ||
249 | 141 | ||
250 | if (down_interruptible(&tbuf->reader_mutex)) { | 142 | if (mutex_lock_interruptible(&reader_mutex)) { |
251 | error = -ERESTARTSYS; | 143 | error = -ERESTARTSYS; |
252 | goto out; | 144 | goto out; |
253 | } | 145 | } |
254 | 146 | ||
255 | /* first open must allocate buffers */ | 147 | atomic_inc(&reader_cnt); |
256 | if (atomic_inc_return(&tbuf->reader_cnt) == 1) { | ||
257 | if ((error = rb_alloc_buf(&tbuf->buf, LITMUS_TRACE_BUF_SIZE))) | ||
258 | { | ||
259 | atomic_dec(&tbuf->reader_cnt); | ||
260 | goto out_unlock; | ||
261 | } | ||
262 | } | ||
263 | |||
264 | error = 0; | 148 | error = 0; |
265 | filp->private_data = tbuf; | ||
266 | 149 | ||
267 | printk(KERN_DEBUG | 150 | printk(KERN_DEBUG |
268 | "sched_trace kfifo with buffer starting at: 0x%p\n", | 151 | "sched_trace kfifo with buffer starting at: 0x%p\n", |
269 | (tbuf->buf.kfifo).buf); | 152 | debug_buffer.buf); |
270 | 153 | ||
271 | /* override printk() */ | 154 | /* override printk() */ |
272 | trace_override++; | 155 | trace_override++; |
273 | 156 | ||
274 | out_unlock: | 157 | mutex_unlock(&reader_mutex); |
275 | up(&tbuf->reader_mutex); | ||
276 | out: | 158 | out: |
277 | return error; | 159 | return error; |
278 | } | 160 | } |
@@ -280,26 +162,20 @@ static int log_open(struct inode *in, struct file *filp) | |||
280 | static int log_release(struct inode *in, struct file *filp) | 162 | static int log_release(struct inode *in, struct file *filp) |
281 | { | 163 | { |
282 | int error = -EINVAL; | 164 | int error = -EINVAL; |
283 | trace_buffer_t* tbuf = filp->private_data; | ||
284 | |||
285 | BUG_ON(!filp->private_data); | ||
286 | 165 | ||
287 | if (down_interruptible(&tbuf->reader_mutex)) { | 166 | if (mutex_lock_interruptible(&reader_mutex)) { |
288 | error = -ERESTARTSYS; | 167 | error = -ERESTARTSYS; |
289 | goto out; | 168 | goto out; |
290 | } | 169 | } |
291 | 170 | ||
292 | /* last release must deallocate buffers */ | 171 | atomic_dec(&reader_cnt); |
293 | if (atomic_dec_return(&tbuf->reader_cnt) == 0) { | ||
294 | error = rb_free_buf(&tbuf->buf); | ||
295 | } | ||
296 | 172 | ||
297 | /* release printk() overriding */ | 173 | /* release printk() overriding */ |
298 | trace_override--; | 174 | trace_override--; |
299 | 175 | ||
300 | printk(KERN_DEBUG "sched_trace kfifo released\n"); | 176 | printk(KERN_DEBUG "sched_trace kfifo released\n"); |
301 | 177 | ||
302 | up(&tbuf->reader_mutex); | 178 | mutex_unlock(&reader_mutex); |
303 | out: | 179 | out: |
304 | return error; | 180 | return error; |
305 | } | 181 | } |
@@ -333,7 +209,7 @@ void dump_trace_buffer(int max) | |||
333 | /* potential, but very unlikely, race... */ | 209 | /* potential, but very unlikely, race... */ |
334 | trace_recurse = 1; | 210 | trace_recurse = 1; |
335 | while ((max == 0 || count++ < max) && | 211 | while ((max == 0 || count++ < max) && |
336 | (len = rb_get(&log_buffer.buf, line, sizeof(line) - 1)) > 0) { | 212 | (len = kfifo_out(&debug_buffer, line, sizeof(line - 1))) > 0) { |
337 | line[len] = '\0'; | 213 | line[len] = '\0'; |
338 | printk("%s", line); | 214 | printk("%s", line); |
339 | } | 215 | } |
@@ -355,7 +231,6 @@ static struct sysrq_key_op sysrq_dump_trace_buffer_op = { | |||
355 | static int __init init_sched_trace(void) | 231 | static int __init init_sched_trace(void) |
356 | { | 232 | { |
357 | printk("Initializing TRACE() device\n"); | 233 | printk("Initializing TRACE() device\n"); |
358 | init_log_buffer(); | ||
359 | 234 | ||
360 | #ifdef CONFIG_MAGIC_SYSRQ | 235 | #ifdef CONFIG_MAGIC_SYSRQ |
361 | /* offer some debugging help */ | 236 | /* offer some debugging help */ |
@@ -365,7 +240,6 @@ static int __init init_sched_trace(void) | |||
365 | printk("Could not register dump-trace-buffer(Y) magic sysrq.\n"); | 240 | printk("Could not register dump-trace-buffer(Y) magic sysrq.\n"); |
366 | #endif | 241 | #endif |
367 | 242 | ||
368 | |||
369 | return misc_register(&litmus_log_dev); | 243 | return misc_register(&litmus_log_dev); |
370 | } | 244 | } |
371 | 245 | ||
diff --git a/litmus/srp.c b/litmus/srp.c index 71639b991630..cb577598ce3e 100644 --- a/litmus/srp.c +++ b/litmus/srp.c | |||
@@ -3,12 +3,12 @@ | |||
3 | /* ************************************************************************** */ | 3 | /* ************************************************************************** */ |
4 | 4 | ||
5 | #include <asm/atomic.h> | 5 | #include <asm/atomic.h> |
6 | #include <linux/sched.h> | ||
6 | #include <linux/wait.h> | 7 | #include <linux/wait.h> |
8 | |||
7 | #include <litmus/litmus.h> | 9 | #include <litmus/litmus.h> |
8 | #include <litmus/sched_plugin.h> | 10 | #include <litmus/sched_plugin.h> |
9 | |||
10 | #include <litmus/fdso.h> | 11 | #include <litmus/fdso.h> |
11 | |||
12 | #include <litmus/trace.h> | 12 | #include <litmus/trace.h> |
13 | 13 | ||
14 | 14 | ||
diff --git a/litmus/trace.c b/litmus/trace.c index 440376998dc9..b3a6b47aad6e 100644 --- a/litmus/trace.c +++ b/litmus/trace.c | |||
@@ -1,3 +1,4 @@ | |||
1 | #include <linux/sched.h> | ||
1 | #include <linux/module.h> | 2 | #include <linux/module.h> |
2 | 3 | ||
3 | #include <litmus/ftdev.h> | 4 | #include <litmus/ftdev.h> |