diff options
author | Glenn <gelliott@bonham.cs.unc.edu> | 2010-11-16 15:41:21 -0500 |
---|---|---|
committer | Glenn <gelliott@bonham.cs.unc.edu> | 2010-11-16 15:41:21 -0500 |
commit | 1b9caf2f95c97e27c91372bd41f79d3e96a99e62 (patch) | |
tree | a02ed0f31998ede99372964549e00216f0733e57 /litmus | |
parent | 9a4b9c5c9b2af2f69e7eb6d69893a767701b781b (diff) | |
parent | 1726017e944d0086f14f867befbf5ebf07adc7dd (diff) |
Merge branch 'wip-merge-2.6.36' of ssh://cvs.cs.unc.edu/cvs/proj/litmus/repo/litmus2010 into wip-merge-2.6.36
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/Kconfig | 45 | ||||
-rw-r--r-- | litmus/Makefile | 1 | ||||
-rw-r--r-- | litmus/budget.c | 4 | ||||
-rw-r--r-- | litmus/litmus.c | 8 | ||||
-rw-r--r-- | litmus/litmus_proc.c | 1 | ||||
-rw-r--r-- | litmus/preempt.c | 131 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 10 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 24 | ||||
-rw-r--r-- | litmus/sched_litmus.c | 5 | ||||
-rw-r--r-- | litmus/sched_pfair.c | 11 | ||||
-rw-r--r-- | litmus/sched_plugin.c | 46 | ||||
-rw-r--r-- | litmus/sched_psn_edf.c | 7 | ||||
-rw-r--r-- | litmus/sched_task_trace.c | 5 | ||||
-rw-r--r-- | litmus/sched_trace.c | 190 | ||||
-rw-r--r-- | litmus/srp.c | 4 | ||||
-rw-r--r-- | litmus/trace.c | 1 |
16 files changed, 263 insertions, 230 deletions
diff --git a/litmus/Kconfig b/litmus/Kconfig index 9888589ef126..d62c417f261e 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
@@ -108,6 +108,26 @@ config SCHED_TASK_TRACE | |||
108 | Say Yes for debugging. | 108 | Say Yes for debugging. |
109 | Say No for overhead tracing. | 109 | Say No for overhead tracing. |
110 | 110 | ||
111 | config SCHED_TASK_TRACE_SHIFT | ||
112 | int "Buffer size for sched_trace_xxx() events" | ||
113 | depends on SCHED_TASK_TRACE | ||
114 | range 8 13 | ||
115 | default 9 | ||
116 | help | ||
117 | |||
118 | Select the buffer size of sched_trace_xxx() events as a power of two. | ||
119 | These buffers are statically allocated as per-CPU data. Each event | ||
120 | requires 24 bytes storage plus one additional flag byte. Too large | ||
121 | buffers can cause issues with the per-cpu allocator (and waste | ||
122 | memory). Too small buffers can cause scheduling events to be lost. The | ||
123 | "right" size is workload dependent and depends on the number of tasks, | ||
124 | each task's period, each task's number of suspensions, and how often | ||
125 | the buffer is flushed. | ||
126 | |||
127 | Examples: 12 => 4k events | ||
128 | 10 => 1k events | ||
129 | 8 => 512 events | ||
130 | |||
111 | config SCHED_OVERHEAD_TRACE | 131 | config SCHED_OVERHEAD_TRACE |
112 | bool "Record timestamps for overhead measurements" | 132 | bool "Record timestamps for overhead measurements" |
113 | depends on FEATHER_TRACE | 133 | depends on FEATHER_TRACE |
@@ -129,6 +149,31 @@ config SCHED_DEBUG_TRACE | |||
129 | Say Yes for debugging. | 149 | Say Yes for debugging. |
130 | Say No for overhead tracing. | 150 | Say No for overhead tracing. |
131 | 151 | ||
152 | config SCHED_DEBUG_TRACE_SHIFT | ||
153 | int "Buffer size for TRACE() buffer" | ||
154 | depends on SCHED_DEBUG_TRACE | ||
155 | range 14 22 | ||
156 | default 18 | ||
157 | help | ||
158 | |||
159 | Select the amount of memory needed per for the TRACE() buffer, as a | ||
160 | power of two. The TRACE() buffer is global and statically allocated. If | ||
161 | the buffer is too small, there will be holes in the TRACE() log if the | ||
162 | buffer-flushing task is starved. | ||
163 | |||
164 | The default should be sufficient for most systems. Increase the buffer | ||
165 | size if the log contains holes. Reduce the buffer size when running on | ||
166 | a memory-constrained system. | ||
167 | |||
168 | Examples: 14 => 16KB | ||
169 | 18 => 256KB | ||
170 | 20 => 1MB | ||
171 | |||
172 | This buffer is exported to usespace using a misc device as | ||
173 | 'litmus/log'. On a system with default udev rules, a corresponding | ||
174 | character device node should be created at /dev/litmus/log. The buffer | ||
175 | can be flushed using cat, e.g., 'cat /dev/litmus/log > my_log_file.txt'. | ||
176 | |||
132 | endmenu | 177 | endmenu |
133 | 178 | ||
134 | endmenu | 179 | endmenu |
diff --git a/litmus/Makefile b/litmus/Makefile index 7bd1abdcb84a..b7366b530749 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -3,6 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y = sched_plugin.o litmus.o \ | 5 | obj-y = sched_plugin.o litmus.o \ |
6 | preempt.o \ | ||
6 | litmus_proc.o \ | 7 | litmus_proc.o \ |
7 | budget.o \ | 8 | budget.o \ |
8 | jobs.o \ | 9 | jobs.o \ |
diff --git a/litmus/budget.c b/litmus/budget.c index b99177a26313..310e9a3d4172 100644 --- a/litmus/budget.c +++ b/litmus/budget.c | |||
@@ -1,7 +1,9 @@ | |||
1 | #include <linux/sched.h> | ||
1 | #include <linux/percpu.h> | 2 | #include <linux/percpu.h> |
2 | #include <linux/hrtimer.h> | 3 | #include <linux/hrtimer.h> |
3 | 4 | ||
4 | #include <litmus/litmus.h> | 5 | #include <litmus/litmus.h> |
6 | #include <litmus/preempt.h> | ||
5 | 7 | ||
6 | struct enforcement_timer { | 8 | struct enforcement_timer { |
7 | /* The enforcement timer is used to accurately police | 9 | /* The enforcement timer is used to accurately police |
@@ -23,7 +25,7 @@ static enum hrtimer_restart on_enforcement_timeout(struct hrtimer *timer) | |||
23 | TRACE("enforcement timer fired.\n"); | 25 | TRACE("enforcement timer fired.\n"); |
24 | et->armed = 0; | 26 | et->armed = 0; |
25 | /* activate scheduler */ | 27 | /* activate scheduler */ |
26 | set_tsk_need_resched(current); | 28 | litmus_reschedule_local(); |
27 | local_irq_restore(flags); | 29 | local_irq_restore(flags); |
28 | 30 | ||
29 | return HRTIMER_NORESTART; | 31 | return HRTIMER_NORESTART; |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 99c35ac99870..0756d0156f8f 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -6,20 +6,16 @@ | |||
6 | #include <asm/uaccess.h> | 6 | #include <asm/uaccess.h> |
7 | #include <linux/uaccess.h> | 7 | #include <linux/uaccess.h> |
8 | #include <linux/sysrq.h> | 8 | #include <linux/sysrq.h> |
9 | 9 | #include <linux/sched.h> | |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | 12 | ||
13 | #include <litmus/litmus.h> | 13 | #include <litmus/litmus.h> |
14 | #include <linux/sched.h> | ||
15 | |||
16 | #include <litmus/bheap.h> | 14 | #include <litmus/bheap.h> |
17 | |||
18 | #include <litmus/trace.h> | 15 | #include <litmus/trace.h> |
19 | |||
20 | #include <litmus/rt_domain.h> | 16 | #include <litmus/rt_domain.h> |
21 | |||
22 | #include <litmus/litmus_proc.h> | 17 | #include <litmus/litmus_proc.h> |
18 | #include <litmus/sched_trace.h> | ||
23 | 19 | ||
24 | /* Number of RT tasks that exist in the system */ | 20 | /* Number of RT tasks that exist in the system */ |
25 | atomic_t rt_task_count = ATOMIC_INIT(0); | 21 | atomic_t rt_task_count = ATOMIC_INIT(0); |
diff --git a/litmus/litmus_proc.c b/litmus/litmus_proc.c index c10a6a6d3975..81ea5c35d291 100644 --- a/litmus/litmus_proc.c +++ b/litmus/litmus_proc.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * litmus_proc.c -- Implementation of the /proc/litmus directory tree. | 2 | * litmus_proc.c -- Implementation of the /proc/litmus directory tree. |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/sched.h> | ||
5 | #include <linux/uaccess.h> | 6 | #include <linux/uaccess.h> |
6 | 7 | ||
7 | #include <litmus/litmus.h> | 8 | #include <litmus/litmus.h> |
diff --git a/litmus/preempt.c b/litmus/preempt.c new file mode 100644 index 000000000000..ebe2e3461895 --- /dev/null +++ b/litmus/preempt.c | |||
@@ -0,0 +1,131 @@ | |||
1 | #include <linux/sched.h> | ||
2 | |||
3 | #include <litmus/litmus.h> | ||
4 | #include <litmus/preempt.h> | ||
5 | |||
6 | /* The rescheduling state of each processor. | ||
7 | */ | ||
8 | DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state); | ||
9 | |||
10 | void sched_state_will_schedule(struct task_struct* tsk) | ||
11 | { | ||
12 | /* Litmus hack: we only care about processor-local invocations of | ||
13 | * set_tsk_need_resched(). We can't reliably set the flag remotely | ||
14 | * since it might race with other updates to the scheduling state. We | ||
15 | * can't rely on the runqueue lock protecting updates to the sched | ||
16 | * state since processors do not acquire the runqueue locks for all | ||
17 | * updates to the sched state (to avoid acquiring two runqueue locks at | ||
18 | * the same time). Further, if tsk is residing on a remote processor, | ||
19 | * then that processor doesn't actually know yet that it is going to | ||
20 | * reschedule; it still must receive an IPI (unless a local invocation | ||
21 | * races). | ||
22 | */ | ||
23 | if (likely(task_cpu(tsk) == smp_processor_id())) { | ||
24 | VERIFY_SCHED_STATE(TASK_SCHEDULED | SHOULD_SCHEDULE | TASK_PICKED | WILL_SCHEDULE); | ||
25 | if (is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK)) | ||
26 | set_sched_state(PICKED_WRONG_TASK); | ||
27 | else | ||
28 | set_sched_state(WILL_SCHEDULE); | ||
29 | } else | ||
30 | /* Litmus tasks should never be subject to a remote | ||
31 | * set_tsk_need_resched(). */ | ||
32 | BUG_ON(is_realtime(tsk)); | ||
33 | TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n", | ||
34 | __builtin_return_address(0)); | ||
35 | } | ||
36 | |||
37 | /* Called by the IPI handler after another CPU called smp_send_resched(). */ | ||
38 | void sched_state_ipi(void) | ||
39 | { | ||
40 | /* If the IPI was slow, we might be in any state right now. The IPI is | ||
41 | * only meaningful if we are in SHOULD_SCHEDULE. */ | ||
42 | if (is_in_sched_state(SHOULD_SCHEDULE)) { | ||
43 | /* Cause scheduler to be invoked. | ||
44 | * This will cause a transition to WILL_SCHEDULE. */ | ||
45 | set_tsk_need_resched(current); | ||
46 | TRACE_STATE("IPI -> set_tsk_need_resched(%s/%d)\n", | ||
47 | current->comm, current->pid); | ||
48 | } else { | ||
49 | /* ignore */ | ||
50 | TRACE_STATE("ignoring IPI in state %x (%s)\n", | ||
51 | get_sched_state(), | ||
52 | sched_state_name(get_sched_state())); | ||
53 | } | ||
54 | } | ||
55 | |||
56 | /* Called by plugins to cause a CPU to reschedule. IMPORTANT: the caller must | ||
57 | * hold the lock that is used to serialize scheduling decisions. */ | ||
58 | void litmus_reschedule(int cpu) | ||
59 | { | ||
60 | int picked_transition_ok = 0; | ||
61 | int scheduled_transition_ok = 0; | ||
62 | |||
63 | /* The (remote) CPU could be in any state. */ | ||
64 | |||
65 | /* The critical states are TASK_PICKED and TASK_SCHEDULED, as the CPU | ||
66 | * is not aware of the need to reschedule at this point. */ | ||
67 | |||
68 | /* is a context switch in progress? */ | ||
69 | if (cpu_is_in_sched_state(cpu, TASK_PICKED)) | ||
70 | picked_transition_ok = sched_state_transition_on( | ||
71 | cpu, TASK_PICKED, PICKED_WRONG_TASK); | ||
72 | |||
73 | if (!picked_transition_ok && | ||
74 | cpu_is_in_sched_state(cpu, TASK_SCHEDULED)) { | ||
75 | /* We either raced with the end of the context switch, or the | ||
76 | * CPU was in TASK_SCHEDULED anyway. */ | ||
77 | scheduled_transition_ok = sched_state_transition_on( | ||
78 | cpu, TASK_SCHEDULED, SHOULD_SCHEDULE); | ||
79 | } | ||
80 | |||
81 | /* If the CPU was in state TASK_SCHEDULED, then we need to cause the | ||
82 | * scheduler to be invoked. */ | ||
83 | if (scheduled_transition_ok) { | ||
84 | if (smp_processor_id() == cpu) | ||
85 | set_tsk_need_resched(current); | ||
86 | else | ||
87 | smp_send_reschedule(cpu); | ||
88 | } | ||
89 | |||
90 | TRACE_STATE("%s picked-ok:%d sched-ok:%d\n", | ||
91 | __FUNCTION__, | ||
92 | picked_transition_ok, | ||
93 | scheduled_transition_ok); | ||
94 | } | ||
95 | |||
96 | void litmus_reschedule_local(void) | ||
97 | { | ||
98 | if (is_in_sched_state(TASK_PICKED)) | ||
99 | set_sched_state(PICKED_WRONG_TASK); | ||
100 | else if (is_in_sched_state(TASK_SCHEDULED | SHOULD_SCHEDULE)) { | ||
101 | set_sched_state(WILL_SCHEDULE); | ||
102 | set_tsk_need_resched(current); | ||
103 | } | ||
104 | } | ||
105 | |||
106 | #ifdef CONFIG_DEBUG_KERNEL | ||
107 | |||
108 | void sched_state_plugin_check(void) | ||
109 | { | ||
110 | if (!is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK)) { | ||
111 | TRACE("!!!! plugin did not call sched_state_task_picked()!" | ||
112 | "Calling sched_state_task_picked() is mandatory---fix this.\n"); | ||
113 | set_sched_state(TASK_PICKED); | ||
114 | } | ||
115 | } | ||
116 | |||
117 | #define NAME_CHECK(x) case x: return #x | ||
118 | const char* sched_state_name(int s) | ||
119 | { | ||
120 | switch (s) { | ||
121 | NAME_CHECK(TASK_SCHEDULED); | ||
122 | NAME_CHECK(SHOULD_SCHEDULE); | ||
123 | NAME_CHECK(WILL_SCHEDULE); | ||
124 | NAME_CHECK(TASK_PICKED); | ||
125 | NAME_CHECK(PICKED_WRONG_TASK); | ||
126 | default: | ||
127 | return "UNKNOWN"; | ||
128 | }; | ||
129 | } | ||
130 | |||
131 | #endif | ||
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index a729d97535e9..111e4fb1c62b 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -34,6 +34,7 @@ | |||
34 | 34 | ||
35 | #include <litmus/litmus.h> | 35 | #include <litmus/litmus.h> |
36 | #include <litmus/jobs.h> | 36 | #include <litmus/jobs.h> |
37 | #include <litmus/preempt.h> | ||
37 | #include <litmus/sched_plugin.h> | 38 | #include <litmus/sched_plugin.h> |
38 | #include <litmus/edf_common.h> | 39 | #include <litmus/edf_common.h> |
39 | #include <litmus/sched_trace.h> | 40 | #include <litmus/sched_trace.h> |
@@ -209,12 +210,6 @@ static noinline void unlink(struct task_struct* t) | |||
209 | { | 210 | { |
210 | cpu_entry_t *entry; | 211 | cpu_entry_t *entry; |
211 | 212 | ||
212 | if (unlikely(!t)) { | ||
213 | TRACE_BUG_ON(!t); | ||
214 | return; | ||
215 | } | ||
216 | |||
217 | |||
218 | if (t->rt_param.linked_on != NO_CPU) { | 213 | if (t->rt_param.linked_on != NO_CPU) { |
219 | /* unlink */ | 214 | /* unlink */ |
220 | entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on); | 215 | entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on); |
@@ -341,7 +336,7 @@ static void cedf_tick(struct task_struct* t) | |||
341 | /* np tasks will be preempted when they become | 336 | /* np tasks will be preempted when they become |
342 | * preemptable again | 337 | * preemptable again |
343 | */ | 338 | */ |
344 | set_tsk_need_resched(t); | 339 | litmus_reschedule_local(); |
345 | set_will_schedule(); | 340 | set_will_schedule(); |
346 | TRACE("cedf_scheduler_tick: " | 341 | TRACE("cedf_scheduler_tick: " |
347 | "%d is preemptable " | 342 | "%d is preemptable " |
@@ -466,6 +461,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
466 | if (exists) | 461 | if (exists) |
467 | next = prev; | 462 | next = prev; |
468 | 463 | ||
464 | sched_state_task_picked(); | ||
469 | raw_spin_unlock(&cluster->lock); | 465 | raw_spin_unlock(&cluster->lock); |
470 | 466 | ||
471 | #ifdef WANT_ALL_SCHED_EVENTS | 467 | #ifdef WANT_ALL_SCHED_EVENTS |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index e101768740ad..e9c5e531b1ae 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <litmus/edf_common.h> | 18 | #include <litmus/edf_common.h> |
19 | #include <litmus/sched_trace.h> | 19 | #include <litmus/sched_trace.h> |
20 | 20 | ||
21 | #include <litmus/preempt.h> | ||
22 | |||
21 | #include <litmus/bheap.h> | 23 | #include <litmus/bheap.h> |
22 | 24 | ||
23 | #include <linux/module.h> | 25 | #include <linux/module.h> |
@@ -95,21 +97,12 @@ typedef struct { | |||
95 | int cpu; | 97 | int cpu; |
96 | struct task_struct* linked; /* only RT tasks */ | 98 | struct task_struct* linked; /* only RT tasks */ |
97 | struct task_struct* scheduled; /* only RT tasks */ | 99 | struct task_struct* scheduled; /* only RT tasks */ |
98 | atomic_t will_schedule; /* prevent unneeded IPIs */ | ||
99 | struct bheap_node* hn; | 100 | struct bheap_node* hn; |
100 | } cpu_entry_t; | 101 | } cpu_entry_t; |
101 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); | 102 | DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); |
102 | 103 | ||
103 | cpu_entry_t* gsnedf_cpus[NR_CPUS]; | 104 | cpu_entry_t* gsnedf_cpus[NR_CPUS]; |
104 | 105 | ||
105 | #define set_will_schedule() \ | ||
106 | (atomic_set(&__get_cpu_var(gsnedf_cpu_entries).will_schedule, 1)) | ||
107 | #define clear_will_schedule() \ | ||
108 | (atomic_set(&__get_cpu_var(gsnedf_cpu_entries).will_schedule, 0)) | ||
109 | #define test_will_schedule(cpu) \ | ||
110 | (atomic_read(&per_cpu(gsnedf_cpu_entries, cpu).will_schedule)) | ||
111 | |||
112 | |||
113 | /* the cpus queue themselves according to priority in here */ | 106 | /* the cpus queue themselves according to priority in here */ |
114 | static struct bheap_node gsnedf_heap_node[NR_CPUS]; | 107 | static struct bheap_node gsnedf_heap_node[NR_CPUS]; |
115 | static struct bheap gsnedf_cpu_heap; | 108 | static struct bheap gsnedf_cpu_heap; |
@@ -217,11 +210,6 @@ static noinline void unlink(struct task_struct* t) | |||
217 | { | 210 | { |
218 | cpu_entry_t *entry; | 211 | cpu_entry_t *entry; |
219 | 212 | ||
220 | if (unlikely(!t)) { | ||
221 | TRACE_BUG_ON(!t); | ||
222 | return; | ||
223 | } | ||
224 | |||
225 | if (t->rt_param.linked_on != NO_CPU) { | 213 | if (t->rt_param.linked_on != NO_CPU) { |
226 | /* unlink */ | 214 | /* unlink */ |
227 | entry = &per_cpu(gsnedf_cpu_entries, t->rt_param.linked_on); | 215 | entry = &per_cpu(gsnedf_cpu_entries, t->rt_param.linked_on); |
@@ -341,8 +329,7 @@ static void gsnedf_tick(struct task_struct* t) | |||
341 | /* np tasks will be preempted when they become | 329 | /* np tasks will be preempted when they become |
342 | * preemptable again | 330 | * preemptable again |
343 | */ | 331 | */ |
344 | set_tsk_need_resched(t); | 332 | litmus_reschedule_local(); |
345 | set_will_schedule(); | ||
346 | TRACE("gsnedf_scheduler_tick: " | 333 | TRACE("gsnedf_scheduler_tick: " |
347 | "%d is preemptable " | 334 | "%d is preemptable " |
348 | " => FORCE_RESCHED\n", t->pid); | 335 | " => FORCE_RESCHED\n", t->pid); |
@@ -391,7 +378,6 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
391 | #endif | 378 | #endif |
392 | 379 | ||
393 | raw_spin_lock(&gsnedf_lock); | 380 | raw_spin_lock(&gsnedf_lock); |
394 | clear_will_schedule(); | ||
395 | 381 | ||
396 | /* sanity checking */ | 382 | /* sanity checking */ |
397 | BUG_ON(entry->scheduled && entry->scheduled != prev); | 383 | BUG_ON(entry->scheduled && entry->scheduled != prev); |
@@ -473,6 +459,8 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
473 | if (exists) | 459 | if (exists) |
474 | next = prev; | 460 | next = prev; |
475 | 461 | ||
462 | sched_state_task_picked(); | ||
463 | |||
476 | raw_spin_unlock(&gsnedf_lock); | 464 | raw_spin_unlock(&gsnedf_lock); |
477 | 465 | ||
478 | #ifdef WANT_ALL_SCHED_EVENTS | 466 | #ifdef WANT_ALL_SCHED_EVENTS |
@@ -780,7 +768,6 @@ static long gsnedf_activate_plugin(void) | |||
780 | for_each_online_cpu(cpu) { | 768 | for_each_online_cpu(cpu) { |
781 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | 769 | entry = &per_cpu(gsnedf_cpu_entries, cpu); |
782 | bheap_node_init(&entry->hn, entry); | 770 | bheap_node_init(&entry->hn, entry); |
783 | atomic_set(&entry->will_schedule, 0); | ||
784 | entry->linked = NULL; | 771 | entry->linked = NULL; |
785 | entry->scheduled = NULL; | 772 | entry->scheduled = NULL; |
786 | #ifdef CONFIG_RELEASE_MASTER | 773 | #ifdef CONFIG_RELEASE_MASTER |
@@ -829,7 +816,6 @@ static int __init init_gsn_edf(void) | |||
829 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 816 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
830 | entry = &per_cpu(gsnedf_cpu_entries, cpu); | 817 | entry = &per_cpu(gsnedf_cpu_entries, cpu); |
831 | gsnedf_cpus[cpu] = entry; | 818 | gsnedf_cpus[cpu] = entry; |
832 | atomic_set(&entry->will_schedule, 0); | ||
833 | entry->cpu = cpu; | 819 | entry->cpu = cpu; |
834 | entry->hn = &gsnedf_heap_node[cpu]; | 820 | entry->hn = &gsnedf_heap_node[cpu]; |
835 | bheap_node_init(&entry->hn, entry); | 821 | bheap_node_init(&entry->hn, entry); |
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index 65873152e68f..e6952896dc4b 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <litmus/litmus.h> | 3 | #include <litmus/litmus.h> |
4 | #include <litmus/budget.h> | 4 | #include <litmus/budget.h> |
5 | #include <litmus/sched_plugin.h> | 5 | #include <litmus/sched_plugin.h> |
6 | #include <litmus/preempt.h> | ||
6 | 7 | ||
7 | static void update_time_litmus(struct rq *rq, struct task_struct *p) | 8 | static void update_time_litmus(struct rq *rq, struct task_struct *p) |
8 | { | 9 | { |
@@ -52,6 +53,8 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
52 | /* let the plugin schedule */ | 53 | /* let the plugin schedule */ |
53 | next = litmus->schedule(prev); | 54 | next = litmus->schedule(prev); |
54 | 55 | ||
56 | sched_state_plugin_check(); | ||
57 | |||
55 | /* check if a global plugin pulled a task from a different RQ */ | 58 | /* check if a global plugin pulled a task from a different RQ */ |
56 | if (next && task_rq(next) != rq) { | 59 | if (next && task_rq(next) != rq) { |
57 | /* we need to migrate the task */ | 60 | /* we need to migrate the task */ |
@@ -198,7 +201,7 @@ static void yield_task_litmus(struct rq *rq) | |||
198 | * then determine if a preemption is still required. | 201 | * then determine if a preemption is still required. |
199 | */ | 202 | */ |
200 | clear_exit_np(current); | 203 | clear_exit_np(current); |
201 | set_tsk_need_resched(current); | 204 | litmus_reschedule_local(); |
202 | } | 205 | } |
203 | 206 | ||
204 | /* Plugins are responsible for this. | 207 | /* Plugins are responsible for this. |
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c index ea77d3295290..c7d5cf7aa2b3 100644 --- a/litmus/sched_pfair.c +++ b/litmus/sched_pfair.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <litmus/litmus.h> | 17 | #include <litmus/litmus.h> |
18 | #include <litmus/jobs.h> | 18 | #include <litmus/jobs.h> |
19 | #include <litmus/preempt.h> | ||
19 | #include <litmus/rt_domain.h> | 20 | #include <litmus/rt_domain.h> |
20 | #include <litmus/sched_plugin.h> | 21 | #include <litmus/sched_plugin.h> |
21 | #include <litmus/sched_trace.h> | 22 | #include <litmus/sched_trace.h> |
@@ -241,11 +242,7 @@ static void check_preempt(struct task_struct* t) | |||
241 | PTRACE_TASK(t, "linked_on:%d, scheduled_on:%d\n", | 242 | PTRACE_TASK(t, "linked_on:%d, scheduled_on:%d\n", |
242 | tsk_rt(t)->linked_on, tsk_rt(t)->scheduled_on); | 243 | tsk_rt(t)->linked_on, tsk_rt(t)->scheduled_on); |
243 | /* preempt */ | 244 | /* preempt */ |
244 | if (cpu == smp_processor_id()) | 245 | litmus_reschedule(cpu); |
245 | set_tsk_need_resched(current); | ||
246 | else { | ||
247 | smp_send_reschedule(cpu); | ||
248 | } | ||
249 | } | 246 | } |
250 | } | 247 | } |
251 | 248 | ||
@@ -545,7 +542,7 @@ static void pfair_tick(struct task_struct* t) | |||
545 | 542 | ||
546 | if (state->local != current | 543 | if (state->local != current |
547 | && (is_realtime(current) || is_present(state->local))) | 544 | && (is_realtime(current) || is_present(state->local))) |
548 | set_tsk_need_resched(current); | 545 | litmus_reschedule_local(); |
549 | } | 546 | } |
550 | 547 | ||
551 | static int safe_to_schedule(struct task_struct* t, int cpu) | 548 | static int safe_to_schedule(struct task_struct* t, int cpu) |
@@ -577,7 +574,7 @@ static struct task_struct* pfair_schedule(struct task_struct * prev) | |||
577 | if (next) | 574 | if (next) |
578 | tsk_rt(next)->scheduled_on = state->cpu; | 575 | tsk_rt(next)->scheduled_on = state->cpu; |
579 | } | 576 | } |
580 | 577 | sched_state_task_picked(); | |
581 | raw_spin_unlock(&pfair_lock); | 578 | raw_spin_unlock(&pfair_lock); |
582 | 579 | ||
583 | if (next) | 580 | if (next) |
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index d706a08fb313..d912a6494d20 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c | |||
@@ -6,10 +6,11 @@ | |||
6 | 6 | ||
7 | #include <linux/list.h> | 7 | #include <linux/list.h> |
8 | #include <linux/spinlock.h> | 8 | #include <linux/spinlock.h> |
9 | #include <linux/sched.h> | ||
9 | 10 | ||
10 | #include <litmus/litmus.h> | 11 | #include <litmus/litmus.h> |
11 | #include <litmus/sched_plugin.h> | 12 | #include <litmus/sched_plugin.h> |
12 | 13 | #include <litmus/preempt.h> | |
13 | #include <litmus/jobs.h> | 14 | #include <litmus/jobs.h> |
14 | 15 | ||
15 | /* | 16 | /* |
@@ -18,36 +19,30 @@ | |||
18 | * non-preemptive section aware and does not invoke the scheduler / send | 19 | * non-preemptive section aware and does not invoke the scheduler / send |
19 | * IPIs if the to-be-preempted task is actually non-preemptive. | 20 | * IPIs if the to-be-preempted task is actually non-preemptive. |
20 | */ | 21 | */ |
21 | void preempt_if_preemptable(struct task_struct* t, int on_cpu) | 22 | void preempt_if_preemptable(struct task_struct* t, int cpu) |
22 | { | 23 | { |
23 | /* t is the real-time task executing on CPU on_cpu If t is NULL, then | 24 | /* t is the real-time task executing on CPU on_cpu If t is NULL, then |
24 | * on_cpu is currently scheduling background work. | 25 | * on_cpu is currently scheduling background work. |
25 | */ | 26 | */ |
26 | 27 | ||
27 | int send_ipi; | 28 | int reschedule = 0; |
28 | 29 | ||
29 | if (smp_processor_id() == on_cpu) { | 30 | if (!t) |
30 | /* local CPU case */ | 31 | /* move non-real-time task out of the way */ |
31 | if (t) { | 32 | reschedule = 1; |
33 | else { | ||
34 | if (smp_processor_id() == cpu) { | ||
35 | /* local CPU case */ | ||
32 | /* check if we need to poke userspace */ | 36 | /* check if we need to poke userspace */ |
33 | if (is_user_np(t)) | 37 | if (is_user_np(t)) |
34 | /* yes, poke it */ | 38 | /* yes, poke it */ |
35 | request_exit_np(t); | 39 | request_exit_np(t); |
36 | else | 40 | else if (!is_kernel_np(t)) |
37 | /* no, see if we are allowed to preempt the | 41 | /* only if we are allowed to preempt the |
38 | * currently-executing task */ | 42 | * currently-executing task */ |
39 | if (!is_kernel_np(t)) | 43 | reschedule = 1; |
40 | set_tsk_need_resched(t); | 44 | } else { |
41 | } else | 45 | /* remote CPU case */ |
42 | /* move non-real-time task out of the way */ | ||
43 | set_tsk_need_resched(current); | ||
44 | } else { | ||
45 | /* remote CPU case */ | ||
46 | if (!t) | ||
47 | /* currently schedules non-real-time work */ | ||
48 | send_ipi = 1; | ||
49 | else { | ||
50 | /* currently schedules real-time work */ | ||
51 | if (is_user_np(t)) { | 46 | if (is_user_np(t)) { |
52 | /* need to notify user space of delayed | 47 | /* need to notify user space of delayed |
53 | * preemption */ | 48 | * preemption */ |
@@ -59,14 +54,14 @@ void preempt_if_preemptable(struct task_struct* t, int on_cpu) | |||
59 | mb(); | 54 | mb(); |
60 | } | 55 | } |
61 | /* Only send an ipi if remote task might have raced our | 56 | /* Only send an ipi if remote task might have raced our |
62 | * request, i.e., send an IPI to make sure if it exited | 57 | * request, i.e., send an IPI to make sure in case it |
63 | * its critical section. | 58 | * exited its critical section. |
64 | */ | 59 | */ |
65 | send_ipi = !is_np(t) && !is_kernel_np(t); | 60 | reschedule = !is_np(t) && !is_kernel_np(t); |
66 | } | 61 | } |
67 | if (likely(send_ipi)) | ||
68 | smp_send_reschedule(on_cpu); | ||
69 | } | 62 | } |
63 | if (likely(reschedule)) | ||
64 | litmus_reschedule(cpu); | ||
70 | } | 65 | } |
71 | 66 | ||
72 | 67 | ||
@@ -80,6 +75,7 @@ static void litmus_dummy_finish_switch(struct task_struct * prev) | |||
80 | 75 | ||
81 | static struct task_struct* litmus_dummy_schedule(struct task_struct * prev) | 76 | static struct task_struct* litmus_dummy_schedule(struct task_struct * prev) |
82 | { | 77 | { |
78 | sched_state_task_picked(); | ||
83 | return NULL; | 79 | return NULL; |
84 | } | 80 | } |
85 | 81 | ||
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index 73f64730bd5e..b89823d5c026 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c | |||
@@ -12,14 +12,14 @@ | |||
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/list.h> | 13 | #include <linux/list.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | |||
16 | #include <linux/module.h> | 15 | #include <linux/module.h> |
17 | 16 | ||
18 | #include <litmus/litmus.h> | 17 | #include <litmus/litmus.h> |
19 | #include <litmus/jobs.h> | 18 | #include <litmus/jobs.h> |
19 | #include <litmus/preempt.h> | ||
20 | #include <litmus/sched_plugin.h> | 20 | #include <litmus/sched_plugin.h> |
21 | #include <litmus/edf_common.h> | 21 | #include <litmus/edf_common.h> |
22 | 22 | #include <litmus/sched_trace.h> | |
23 | 23 | ||
24 | typedef struct { | 24 | typedef struct { |
25 | rt_domain_t domain; | 25 | rt_domain_t domain; |
@@ -109,7 +109,7 @@ static void psnedf_tick(struct task_struct *t) | |||
109 | 109 | ||
110 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | 110 | if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { |
111 | if (!is_np(t)) { | 111 | if (!is_np(t)) { |
112 | set_tsk_need_resched(t); | 112 | litmus_reschedule_local(); |
113 | TRACE("psnedf_scheduler_tick: " | 113 | TRACE("psnedf_scheduler_tick: " |
114 | "%d is preemptable " | 114 | "%d is preemptable " |
115 | " => FORCE_RESCHED\n", t->pid); | 115 | " => FORCE_RESCHED\n", t->pid); |
@@ -205,6 +205,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev) | |||
205 | } | 205 | } |
206 | 206 | ||
207 | pedf->scheduled = next; | 207 | pedf->scheduled = next; |
208 | sched_state_task_picked(); | ||
208 | raw_spin_unlock(&pedf->slock); | 209 | raw_spin_unlock(&pedf->slock); |
209 | 210 | ||
210 | return next; | 211 | return next; |
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c index 39a543e22d41..e5098ddb1ec9 100644 --- a/litmus/sched_task_trace.c +++ b/litmus/sched_task_trace.c | |||
@@ -18,7 +18,7 @@ | |||
18 | 18 | ||
19 | /* set MAJOR to 0 to have it dynamically assigned */ | 19 | /* set MAJOR to 0 to have it dynamically assigned */ |
20 | #define FT_TASK_TRACE_MAJOR 253 | 20 | #define FT_TASK_TRACE_MAJOR 253 |
21 | #define NO_EVENTS 4096 /* this is a buffer of 12 4k pages per CPU */ | 21 | #define NO_EVENTS (1 << CONFIG_SCHED_TASK_TRACE_SHIFT) |
22 | 22 | ||
23 | #define now() litmus_clock() | 23 | #define now() litmus_clock() |
24 | 24 | ||
@@ -41,6 +41,9 @@ static int __init init_sched_task_trace(void) | |||
41 | { | 41 | { |
42 | struct local_buffer* buf; | 42 | struct local_buffer* buf; |
43 | int i, ok = 0; | 43 | int i, ok = 0; |
44 | printk("Allocated %u sched_trace_xxx() events per CPU " | ||
45 | "(buffer size: %d bytes)\n", | ||
46 | NO_EVENTS, (int) sizeof(struct local_buffer)); | ||
44 | ftdev_init(&st_dev, THIS_MODULE); | 47 | ftdev_init(&st_dev, THIS_MODULE); |
45 | for (i = 0; i < NR_CPUS; i++) { | 48 | for (i = 0; i < NR_CPUS; i++) { |
46 | buf = &per_cpu(st_event_buffer, i); | 49 | buf = &per_cpu(st_event_buffer, i); |
diff --git a/litmus/sched_trace.c b/litmus/sched_trace.c index 8051b51a2022..f4171fddbbb1 100644 --- a/litmus/sched_trace.c +++ b/litmus/sched_trace.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * sched_trace.c -- record scheduling events to a byte stream. | 2 | * sched_trace.c -- record scheduling events to a byte stream. |
3 | */ | 3 | */ |
4 | #include <linux/spinlock.h> | 4 | #include <linux/spinlock.h> |
5 | #include <linux/semaphore.h> | 5 | #include <linux/mutex.h> |
6 | 6 | ||
7 | #include <linux/fs.h> | 7 | #include <linux/fs.h> |
8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
@@ -18,133 +18,23 @@ | |||
18 | 18 | ||
19 | #define SCHED_TRACE_NAME "litmus/log" | 19 | #define SCHED_TRACE_NAME "litmus/log" |
20 | 20 | ||
21 | /* Allocate a buffer of about 32k per CPU */ | 21 | /* Compute size of TRACE() buffer */ |
22 | #define LITMUS_TRACE_BUF_PAGES 8 | 22 | #define LITMUS_TRACE_BUF_SIZE (1 << CONFIG_SCHED_DEBUG_TRACE_SHIFT) |
23 | #define LITMUS_TRACE_BUF_SIZE (PAGE_SIZE * LITMUS_TRACE_BUF_PAGES * NR_CPUS) | ||
24 | 23 | ||
25 | /* Max length of one read from the buffer */ | 24 | /* Max length of one read from the buffer */ |
26 | #define MAX_READ_LEN (64 * 1024) | 25 | #define MAX_READ_LEN (64 * 1024) |
27 | 26 | ||
28 | /* Max length for one write --- from kernel --- to the buffer */ | 27 | /* Max length for one write --- by TRACE() --- to the buffer. This is used to |
28 | * allocate a per-cpu buffer for printf() formatting. */ | ||
29 | #define MSG_SIZE 255 | 29 | #define MSG_SIZE 255 |
30 | 30 | ||
31 | /* Inner ring buffer structure */ | ||
32 | typedef struct { | ||
33 | rwlock_t del_lock; | ||
34 | 31 | ||
35 | /* the buffer */ | 32 | static DEFINE_MUTEX(reader_mutex); |
36 | struct kfifo kfifo; | 33 | static atomic_t reader_cnt = ATOMIC_INIT(0); |
37 | } ring_buffer_t; | 34 | static DEFINE_KFIFO(debug_buffer, char, LITMUS_TRACE_BUF_SIZE); |
38 | 35 | ||
39 | /* Main buffer structure */ | ||
40 | typedef struct { | ||
41 | ring_buffer_t buf; | ||
42 | atomic_t reader_cnt; | ||
43 | struct semaphore reader_mutex; | ||
44 | } trace_buffer_t; | ||
45 | 36 | ||
46 | |||
47 | /* | ||
48 | * Inner buffer management functions | ||
49 | */ | ||
50 | void rb_init(ring_buffer_t* buf) | ||
51 | { | ||
52 | rwlock_init(&buf->del_lock); | ||
53 | } | ||
54 | |||
55 | int rb_alloc_buf(ring_buffer_t* buf, unsigned int size) | ||
56 | { | ||
57 | unsigned long flags; | ||
58 | int ret = 0; | ||
59 | |||
60 | write_lock_irqsave(&buf->del_lock, flags); | ||
61 | |||
62 | /* kfifo size must be a power of 2 | ||
63 | * atm kfifo alloc is automatically rounding the size | ||
64 | */ | ||
65 | ret = kfifo_alloc(&buf->kfifo, size, GFP_ATOMIC); | ||
66 | |||
67 | write_unlock_irqrestore(&buf->del_lock, flags); | ||
68 | |||
69 | if(ret < 0) | ||
70 | printk(KERN_ERR "kfifo_alloc failed\n"); | ||
71 | |||
72 | return ret; | ||
73 | } | ||
74 | |||
75 | int rb_free_buf(ring_buffer_t* buf) | ||
76 | { | ||
77 | unsigned long flags; | ||
78 | |||
79 | write_lock_irqsave(&buf->del_lock, flags); | ||
80 | |||
81 | BUG_ON(!kfifo_initialized(&buf->kfifo)); | ||
82 | kfifo_free(&buf->kfifo); | ||
83 | |||
84 | write_unlock_irqrestore(&buf->del_lock, flags); | ||
85 | |||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Assumption: concurrent writes are serialized externally | ||
91 | * | ||
92 | * Will only succeed if there is enough space for all len bytes. | ||
93 | */ | ||
94 | int rb_put(ring_buffer_t* buf, char* mem, size_t len) | ||
95 | { | ||
96 | unsigned long flags; | ||
97 | int error = 0; | ||
98 | |||
99 | read_lock_irqsave(&buf->del_lock, flags); | ||
100 | |||
101 | if (!kfifo_initialized(&buf->kfifo)) { | ||
102 | error = -ENODEV; | ||
103 | goto out; | ||
104 | } | ||
105 | |||
106 | if((kfifo_in(&buf->kfifo, mem, len)) < len) { | ||
107 | error = -ENOMEM; | ||
108 | goto out; | ||
109 | } | ||
110 | |||
111 | out: | ||
112 | read_unlock_irqrestore(&buf->del_lock, flags); | ||
113 | return error; | ||
114 | } | ||
115 | |||
116 | /* Assumption: concurrent reads are serialized externally */ | ||
117 | int rb_get(ring_buffer_t* buf, char* mem, size_t len) | ||
118 | { | ||
119 | unsigned long flags; | ||
120 | int error = 0; | ||
121 | |||
122 | read_lock_irqsave(&buf->del_lock, flags); | ||
123 | if (!kfifo_initialized(&buf->kfifo)) { | ||
124 | error = -ENODEV; | ||
125 | goto out; | ||
126 | } | ||
127 | |||
128 | error = kfifo_out(&buf->kfifo, (unsigned char*)mem, len); | ||
129 | |||
130 | out: | ||
131 | read_unlock_irqrestore(&buf->del_lock, flags); | ||
132 | return error; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Device Driver management | ||
137 | */ | ||
138 | static DEFINE_RAW_SPINLOCK(log_buffer_lock); | 37 | static DEFINE_RAW_SPINLOCK(log_buffer_lock); |
139 | static trace_buffer_t log_buffer; | ||
140 | |||
141 | static void init_log_buffer(void) | ||
142 | { | ||
143 | rb_init(&log_buffer.buf); | ||
144 | atomic_set(&log_buffer.reader_cnt,0); | ||
145 | init_MUTEX(&log_buffer.reader_mutex); | ||
146 | } | ||
147 | |||
148 | static DEFINE_PER_CPU(char[MSG_SIZE], fmt_buffer); | 38 | static DEFINE_PER_CPU(char[MSG_SIZE], fmt_buffer); |
149 | 39 | ||
150 | /* | 40 | /* |
@@ -163,6 +53,10 @@ void sched_trace_log_message(const char* fmt, ...) | |||
163 | size_t len; | 53 | size_t len; |
164 | char* buf; | 54 | char* buf; |
165 | 55 | ||
56 | if (!atomic_read(&reader_cnt)) | ||
57 | /* early exit if nobody is listening */ | ||
58 | return; | ||
59 | |||
166 | va_start(args, fmt); | 60 | va_start(args, fmt); |
167 | local_irq_save(flags); | 61 | local_irq_save(flags); |
168 | 62 | ||
@@ -171,32 +65,33 @@ void sched_trace_log_message(const char* fmt, ...) | |||
171 | len = vscnprintf(buf, MSG_SIZE, fmt, args); | 65 | len = vscnprintf(buf, MSG_SIZE, fmt, args); |
172 | 66 | ||
173 | raw_spin_lock(&log_buffer_lock); | 67 | raw_spin_lock(&log_buffer_lock); |
174 | /* Don't copy the trailing null byte, we don't want null bytes | 68 | /* Don't copy the trailing null byte, we don't want null bytes in a |
175 | * in a text file. | 69 | * text file. |
176 | */ | 70 | */ |
177 | rb_put(&log_buffer.buf, buf, len); | 71 | kfifo_in(&debug_buffer, buf, len); |
178 | raw_spin_unlock(&log_buffer_lock); | 72 | raw_spin_unlock(&log_buffer_lock); |
179 | 73 | ||
180 | local_irq_restore(flags); | 74 | local_irq_restore(flags); |
181 | va_end(args); | 75 | va_end(args); |
182 | } | 76 | } |
183 | 77 | ||
78 | |||
184 | /* | 79 | /* |
185 | * log_read - Read the trace buffer | 80 | * log_read - Read the trace buffer |
186 | * | 81 | * |
187 | * This function is called as a file operation from userspace. | 82 | * This function is called as a file operation from userspace. |
188 | * Readers can sleep. Access is serialized through reader_mutex | 83 | * Readers can sleep. Access is serialized through reader_mutex |
189 | */ | 84 | */ |
190 | static ssize_t log_read(struct file *filp, char __user *to, size_t len, | 85 | static ssize_t log_read(struct file *filp, |
191 | loff_t *f_pos) | 86 | char __user *to, size_t len, |
87 | loff_t *f_pos) | ||
192 | { | 88 | { |
193 | /* we ignore f_pos, this is strictly sequential */ | 89 | /* we ignore f_pos, this is strictly sequential */ |
194 | 90 | ||
195 | ssize_t error = -EINVAL; | 91 | ssize_t error = -EINVAL; |
196 | char* mem; | 92 | char* mem; |
197 | trace_buffer_t *tbuf = filp->private_data; | ||
198 | 93 | ||
199 | if (down_interruptible(&tbuf->reader_mutex)) { | 94 | if (mutex_lock_interruptible(&reader_mutex)) { |
200 | error = -ERESTARTSYS; | 95 | error = -ERESTARTSYS; |
201 | goto out; | 96 | goto out; |
202 | } | 97 | } |
@@ -210,14 +105,14 @@ static ssize_t log_read(struct file *filp, char __user *to, size_t len, | |||
210 | goto out_unlock; | 105 | goto out_unlock; |
211 | } | 106 | } |
212 | 107 | ||
213 | error = rb_get(&tbuf->buf, mem, len); | 108 | error = kfifo_out(&debug_buffer, mem, len); |
214 | while (!error) { | 109 | while (!error) { |
215 | set_current_state(TASK_INTERRUPTIBLE); | 110 | set_current_state(TASK_INTERRUPTIBLE); |
216 | schedule_timeout(110); | 111 | schedule_timeout(110); |
217 | if (signal_pending(current)) | 112 | if (signal_pending(current)) |
218 | error = -ERESTARTSYS; | 113 | error = -ERESTARTSYS; |
219 | else | 114 | else |
220 | error = rb_get(&tbuf->buf, mem, len); | 115 | error = kfifo_out(&debug_buffer, mem, len); |
221 | } | 116 | } |
222 | 117 | ||
223 | if (error > 0 && copy_to_user(to, mem, error)) | 118 | if (error > 0 && copy_to_user(to, mem, error)) |
@@ -225,7 +120,7 @@ static ssize_t log_read(struct file *filp, char __user *to, size_t len, | |||
225 | 120 | ||
226 | kfree(mem); | 121 | kfree(mem); |
227 | out_unlock: | 122 | out_unlock: |
228 | up(&tbuf->reader_mutex); | 123 | mutex_unlock(&reader_mutex); |
229 | out: | 124 | out: |
230 | return error; | 125 | return error; |
231 | } | 126 | } |
@@ -243,36 +138,23 @@ extern int trace_recurse; | |||
243 | static int log_open(struct inode *in, struct file *filp) | 138 | static int log_open(struct inode *in, struct file *filp) |
244 | { | 139 | { |
245 | int error = -EINVAL; | 140 | int error = -EINVAL; |
246 | trace_buffer_t* tbuf; | ||
247 | |||
248 | tbuf = &log_buffer; | ||
249 | 141 | ||
250 | if (down_interruptible(&tbuf->reader_mutex)) { | 142 | if (mutex_lock_interruptible(&reader_mutex)) { |
251 | error = -ERESTARTSYS; | 143 | error = -ERESTARTSYS; |
252 | goto out; | 144 | goto out; |
253 | } | 145 | } |
254 | 146 | ||
255 | /* first open must allocate buffers */ | 147 | atomic_inc(&reader_cnt); |
256 | if (atomic_inc_return(&tbuf->reader_cnt) == 1) { | ||
257 | if ((error = rb_alloc_buf(&tbuf->buf, LITMUS_TRACE_BUF_SIZE))) | ||
258 | { | ||
259 | atomic_dec(&tbuf->reader_cnt); | ||
260 | goto out_unlock; | ||
261 | } | ||
262 | } | ||
263 | |||
264 | error = 0; | 148 | error = 0; |
265 | filp->private_data = tbuf; | ||
266 | 149 | ||
267 | printk(KERN_DEBUG | 150 | printk(KERN_DEBUG |
268 | "sched_trace kfifo with buffer starting at: 0x%p\n", | 151 | "sched_trace kfifo with buffer starting at: 0x%p\n", |
269 | (tbuf->buf.kfifo).buf); | 152 | debug_buffer.buf); |
270 | 153 | ||
271 | /* override printk() */ | 154 | /* override printk() */ |
272 | trace_override++; | 155 | trace_override++; |
273 | 156 | ||
274 | out_unlock: | 157 | mutex_unlock(&reader_mutex); |
275 | up(&tbuf->reader_mutex); | ||
276 | out: | 158 | out: |
277 | return error; | 159 | return error; |
278 | } | 160 | } |
@@ -280,26 +162,20 @@ static int log_open(struct inode *in, struct file *filp) | |||
280 | static int log_release(struct inode *in, struct file *filp) | 162 | static int log_release(struct inode *in, struct file *filp) |
281 | { | 163 | { |
282 | int error = -EINVAL; | 164 | int error = -EINVAL; |
283 | trace_buffer_t* tbuf = filp->private_data; | ||
284 | |||
285 | BUG_ON(!filp->private_data); | ||
286 | 165 | ||
287 | if (down_interruptible(&tbuf->reader_mutex)) { | 166 | if (mutex_lock_interruptible(&reader_mutex)) { |
288 | error = -ERESTARTSYS; | 167 | error = -ERESTARTSYS; |
289 | goto out; | 168 | goto out; |
290 | } | 169 | } |
291 | 170 | ||
292 | /* last release must deallocate buffers */ | 171 | atomic_dec(&reader_cnt); |
293 | if (atomic_dec_return(&tbuf->reader_cnt) == 0) { | ||
294 | error = rb_free_buf(&tbuf->buf); | ||
295 | } | ||
296 | 172 | ||
297 | /* release printk() overriding */ | 173 | /* release printk() overriding */ |
298 | trace_override--; | 174 | trace_override--; |
299 | 175 | ||
300 | printk(KERN_DEBUG "sched_trace kfifo released\n"); | 176 | printk(KERN_DEBUG "sched_trace kfifo released\n"); |
301 | 177 | ||
302 | up(&tbuf->reader_mutex); | 178 | mutex_unlock(&reader_mutex); |
303 | out: | 179 | out: |
304 | return error; | 180 | return error; |
305 | } | 181 | } |
@@ -333,7 +209,7 @@ void dump_trace_buffer(int max) | |||
333 | /* potential, but very unlikely, race... */ | 209 | /* potential, but very unlikely, race... */ |
334 | trace_recurse = 1; | 210 | trace_recurse = 1; |
335 | while ((max == 0 || count++ < max) && | 211 | while ((max == 0 || count++ < max) && |
336 | (len = rb_get(&log_buffer.buf, line, sizeof(line) - 1)) > 0) { | 212 | (len = kfifo_out(&debug_buffer, line, sizeof(line - 1))) > 0) { |
337 | line[len] = '\0'; | 213 | line[len] = '\0'; |
338 | printk("%s", line); | 214 | printk("%s", line); |
339 | } | 215 | } |
@@ -355,7 +231,6 @@ static struct sysrq_key_op sysrq_dump_trace_buffer_op = { | |||
355 | static int __init init_sched_trace(void) | 231 | static int __init init_sched_trace(void) |
356 | { | 232 | { |
357 | printk("Initializing TRACE() device\n"); | 233 | printk("Initializing TRACE() device\n"); |
358 | init_log_buffer(); | ||
359 | 234 | ||
360 | #ifdef CONFIG_MAGIC_SYSRQ | 235 | #ifdef CONFIG_MAGIC_SYSRQ |
361 | /* offer some debugging help */ | 236 | /* offer some debugging help */ |
@@ -365,7 +240,6 @@ static int __init init_sched_trace(void) | |||
365 | printk("Could not register dump-trace-buffer(Y) magic sysrq.\n"); | 240 | printk("Could not register dump-trace-buffer(Y) magic sysrq.\n"); |
366 | #endif | 241 | #endif |
367 | 242 | ||
368 | |||
369 | return misc_register(&litmus_log_dev); | 243 | return misc_register(&litmus_log_dev); |
370 | } | 244 | } |
371 | 245 | ||
diff --git a/litmus/srp.c b/litmus/srp.c index 71639b991630..cb577598ce3e 100644 --- a/litmus/srp.c +++ b/litmus/srp.c | |||
@@ -3,12 +3,12 @@ | |||
3 | /* ************************************************************************** */ | 3 | /* ************************************************************************** */ |
4 | 4 | ||
5 | #include <asm/atomic.h> | 5 | #include <asm/atomic.h> |
6 | #include <linux/sched.h> | ||
6 | #include <linux/wait.h> | 7 | #include <linux/wait.h> |
8 | |||
7 | #include <litmus/litmus.h> | 9 | #include <litmus/litmus.h> |
8 | #include <litmus/sched_plugin.h> | 10 | #include <litmus/sched_plugin.h> |
9 | |||
10 | #include <litmus/fdso.h> | 11 | #include <litmus/fdso.h> |
11 | |||
12 | #include <litmus/trace.h> | 12 | #include <litmus/trace.h> |
13 | 13 | ||
14 | 14 | ||
diff --git a/litmus/trace.c b/litmus/trace.c index 440376998dc9..b3a6b47aad6e 100644 --- a/litmus/trace.c +++ b/litmus/trace.c | |||
@@ -1,3 +1,4 @@ | |||
1 | #include <linux/sched.h> | ||
1 | #include <linux/module.h> | 2 | #include <linux/module.h> |
2 | 3 | ||
3 | #include <litmus/ftdev.h> | 4 | #include <litmus/ftdev.h> |