diff options
-rw-r--r-- | download/2011.1/SHA256SUMS | 3 | ||||
-rw-r--r-- | download/2011.1/ft_tools-2011.1.tgz | bin | 0 -> 5511 bytes | |||
-rw-r--r-- | download/2011.1/ft_tools-config | 2 | ||||
-rw-r--r-- | download/2011.1/liblitmus-2011.1.tgz | bin | 0 -> 18647 bytes | |||
-rw-r--r-- | download/2011.1/liblitmus-config | 10 | ||||
-rw-r--r-- | download/2011.1/litmus-rt-2011.1.patch | 11754 | ||||
-rw-r--r-- | download/2011.1/x86_64-config | 2030 | ||||
-rw-r--r-- | index.html | 1298 | ||||
-rw-r--r-- | litmus2010.html | 674 |
9 files changed, 15100 insertions, 671 deletions
diff --git a/download/2011.1/SHA256SUMS b/download/2011.1/SHA256SUMS new file mode 100644 index 0000000..8738e71 --- /dev/null +++ b/download/2011.1/SHA256SUMS | |||
@@ -0,0 +1,3 @@ | |||
1 | 04d093ea79394bac8e236c171dcbe3c341d447a2b03d0221128b10c4dfb49187 ft_tools-2011.1.tgz | ||
2 | ed3950e9338ed5b82384085298e1b46f24f0efcea95724945694f47b20f156f8 liblitmus-2011.1.tgz | ||
3 | bf3398c1fce779c528fef99698dbd957a682cedc6fe0e02c861160f2549fd791 litmus-rt-2011.1.patch | ||
diff --git a/download/2011.1/ft_tools-2011.1.tgz b/download/2011.1/ft_tools-2011.1.tgz new file mode 100644 index 0000000..5c9e6f6 --- /dev/null +++ b/download/2011.1/ft_tools-2011.1.tgz | |||
Binary files differ | |||
diff --git a/download/2011.1/ft_tools-config b/download/2011.1/ft_tools-config new file mode 100644 index 0000000..972c198 --- /dev/null +++ b/download/2011.1/ft_tools-config | |||
@@ -0,0 +1,2 @@ | |||
1 | # Where can we find the liblitmus library and build system? | ||
2 | LIBLITMUS = ../liblitmus | ||
diff --git a/download/2011.1/liblitmus-2011.1.tgz b/download/2011.1/liblitmus-2011.1.tgz new file mode 100644 index 0000000..0fb89ca --- /dev/null +++ b/download/2011.1/liblitmus-2011.1.tgz | |||
Binary files differ | |||
diff --git a/download/2011.1/liblitmus-config b/download/2011.1/liblitmus-config new file mode 100644 index 0000000..92942ed --- /dev/null +++ b/download/2011.1/liblitmus-config | |||
@@ -0,0 +1,10 @@ | |||
1 | # Where can we find the LITMUS^RT kernel source? | ||
2 | LITMUS_KERNEL = ../litmus-rt | ||
3 | |||
4 | # Which architecture should we build for? | ||
5 | # If unset, the library will be built for the host architecture. | ||
6 | # ARCH = | ||
7 | |||
8 | # Should we use a prefix for compiler invocations (to use a cross | ||
9 | # compiler instead of gcc)? | ||
10 | # CROSS_COMPILE = | ||
diff --git a/download/2011.1/litmus-rt-2011.1.patch b/download/2011.1/litmus-rt-2011.1.patch new file mode 100644 index 0000000..636a26f --- /dev/null +++ b/download/2011.1/litmus-rt-2011.1.patch | |||
@@ -0,0 +1,11754 @@ | |||
1 | Makefile | 4 +- | ||
2 | arch/arm/Kconfig | 8 + | ||
3 | arch/arm/include/asm/timex.h | 2 + | ||
4 | arch/arm/include/asm/unistd.h | 3 + | ||
5 | arch/arm/kernel/calls.S | 14 + | ||
6 | arch/arm/kernel/smp.c | 4 + | ||
7 | arch/arm/mach-realview/include/mach/timex.h | 27 + | ||
8 | arch/x86/Kconfig | 8 + | ||
9 | arch/x86/include/asm/entry_arch.h | 1 + | ||
10 | arch/x86/include/asm/feather_trace.h | 17 + | ||
11 | arch/x86/include/asm/feather_trace_32.h | 79 +++ | ||
12 | arch/x86/include/asm/feather_trace_64.h | 67 ++ | ||
13 | arch/x86/include/asm/hw_irq.h | 3 + | ||
14 | arch/x86/include/asm/irq_vectors.h | 5 + | ||
15 | arch/x86/include/asm/processor.h | 4 + | ||
16 | arch/x86/include/asm/unistd_32.h | 6 +- | ||
17 | arch/x86/include/asm/unistd_64.h | 4 + | ||
18 | arch/x86/kernel/Makefile | 2 + | ||
19 | arch/x86/kernel/cpu/intel_cacheinfo.c | 17 + | ||
20 | arch/x86/kernel/entry_64.S | 2 + | ||
21 | arch/x86/kernel/ft_event.c | 118 ++++ | ||
22 | arch/x86/kernel/irqinit.c | 3 + | ||
23 | arch/x86/kernel/smp.c | 27 + | ||
24 | arch/x86/kernel/syscall_table_32.S | 14 + | ||
25 | fs/exec.c | 13 +- | ||
26 | fs/inode.c | 2 + | ||
27 | include/linux/completion.h | 1 + | ||
28 | include/linux/fs.h | 21 +- | ||
29 | include/linux/hrtimer.h | 32 + | ||
30 | include/linux/sched.h | 19 +- | ||
31 | include/linux/smp.h | 5 + | ||
32 | include/linux/tick.h | 5 + | ||
33 | include/litmus/bheap.h | 77 +++ | ||
34 | include/litmus/budget.h | 8 + | ||
35 | include/litmus/debug_trace.h | 37 ++ | ||
36 | include/litmus/edf_common.h | 27 + | ||
37 | include/litmus/fdso.h | 70 +++ | ||
38 | include/litmus/feather_buffer.h | 94 +++ | ||
39 | include/litmus/feather_trace.h | 65 ++ | ||
40 | include/litmus/ftdev.h | 52 ++ | ||
41 | include/litmus/jobs.h | 9 + | ||
42 | include/litmus/litmus.h | 241 +++++++ | ||
43 | include/litmus/litmus_proc.h | 19 + | ||
44 | include/litmus/preempt.h | 164 +++++ | ||
45 | include/litmus/rt_domain.h | 182 ++++++ | ||
46 | include/litmus/rt_param.h | 196 ++++++ | ||
47 | include/litmus/sched_plugin.h | 159 +++++ | ||
48 | include/litmus/sched_trace.h | 183 ++++++ | ||
49 | include/litmus/trace.h | 113 ++++ | ||
50 | include/litmus/unistd_32.h | 23 + | ||
51 | include/litmus/unistd_64.h | 37 ++ | ||
52 | kernel/exit.c | 4 + | ||
53 | kernel/fork.c | 7 + | ||
54 | kernel/hrtimer.c | 95 +++ | ||
55 | kernel/printk.c | 14 +- | ||
56 | kernel/sched.c | 118 ++++- | ||
57 | kernel/sched_fair.c | 2 +- | ||
58 | kernel/sched_rt.c | 2 +- | ||
59 | kernel/time/tick-sched.c | 47 ++ | ||
60 | litmus/Kconfig | 194 ++++++ | ||
61 | litmus/Makefile | 27 + | ||
62 | litmus/bheap.c | 314 ++++++++++ | ||
63 | litmus/budget.c | 111 ++++ | ||
64 | litmus/ctrldev.c | 150 +++++ | ||
65 | litmus/edf_common.c | 102 +++ | ||
66 | litmus/fdso.c | 281 +++++++++ | ||
67 | litmus/fmlp.c | 268 ++++++++ | ||
68 | litmus/ft_event.c | 43 ++ | ||
69 | litmus/ftdev.c | 440 +++++++++++++ | ||
70 | litmus/jobs.c | 43 ++ | ||
71 | litmus/litmus.c | 547 ++++++++++++++++ | ||
72 | litmus/litmus_proc.c | 259 ++++++++ | ||
73 | litmus/preempt.c | 131 ++++ | ||
74 | litmus/rt_domain.c | 355 +++++++++++ | ||
75 | litmus/sched_cedf.c | 873 ++++++++++++++++++++++++++ | ||
76 | litmus/sched_gsn_edf.c | 828 +++++++++++++++++++++++++ | ||
77 | litmus/sched_litmus.c | 320 ++++++++++ | ||
78 | litmus/sched_pfair.c | 894 +++++++++++++++++++++++++++ | ||
79 | litmus/sched_plugin.c | 253 ++++++++ | ||
80 | litmus/sched_psn_edf.c | 483 +++++++++++++++ | ||
81 | litmus/sched_task_trace.c | 226 +++++++ | ||
82 | litmus/sched_trace.c | 252 ++++++++ | ||
83 | litmus/srp.c | 318 ++++++++++ | ||
84 | litmus/sync.c | 104 +++ | ||
85 | litmus/trace.c | 122 ++++ | ||
86 | 85 files changed, 10485 insertions(+), 35 deletions(-) | ||
87 | |||
88 | diff --git a/Makefile b/Makefile | ||
89 | index 860c26a..8e53f47 100644 | ||
90 | --- a/Makefile | ||
91 | +++ b/Makefile | ||
92 | @@ -1,7 +1,7 @@ | ||
93 | VERSION = 2 | ||
94 | PATCHLEVEL = 6 | ||
95 | SUBLEVEL = 36 | ||
96 | -EXTRAVERSION = | ||
97 | +EXTRAVERSION =-litmus2010 | ||
98 | NAME = Flesh-Eating Bats with Fangs | ||
99 | |||
100 | # *DOCUMENTATION* | ||
101 | @@ -659,7 +659,7 @@ export mod_strip_cmd | ||
102 | |||
103 | |||
104 | ifeq ($(KBUILD_EXTMOD),) | ||
105 | -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ | ||
106 | +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ litmus/ | ||
107 | |||
108 | vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ | ||
109 | $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ | ||
110 | diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig | ||
111 | index 9c26ba7..babad6d 100644 | ||
112 | --- a/arch/arm/Kconfig | ||
113 | +++ b/arch/arm/Kconfig | ||
114 | @@ -1808,3 +1808,11 @@ source "security/Kconfig" | ||
115 | source "crypto/Kconfig" | ||
116 | |||
117 | source "lib/Kconfig" | ||
118 | + | ||
119 | +config ARCH_HAS_SEND_PULL_TIMERS | ||
120 | + def_bool n | ||
121 | + | ||
122 | +config ARCH_HAS_FEATHER_TRACE | ||
123 | + def_bool n | ||
124 | + | ||
125 | +source "litmus/Kconfig" | ||
126 | diff --git a/arch/arm/include/asm/timex.h b/arch/arm/include/asm/timex.h | ||
127 | index 3be8de3..8a102a3 100644 | ||
128 | --- a/arch/arm/include/asm/timex.h | ||
129 | +++ b/arch/arm/include/asm/timex.h | ||
130 | @@ -16,9 +16,11 @@ | ||
131 | |||
132 | typedef unsigned long cycles_t; | ||
133 | |||
134 | +#ifndef get_cycles | ||
135 | static inline cycles_t get_cycles (void) | ||
136 | { | ||
137 | return 0; | ||
138 | } | ||
139 | +#endif | ||
140 | |||
141 | #endif | ||
142 | diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h | ||
143 | index c891eb7..625b304 100644 | ||
144 | --- a/arch/arm/include/asm/unistd.h | ||
145 | +++ b/arch/arm/include/asm/unistd.h | ||
146 | @@ -397,6 +397,9 @@ | ||
147 | #define __NR_fanotify_mark (__NR_SYSCALL_BASE+368) | ||
148 | #define __NR_prlimit64 (__NR_SYSCALL_BASE+369) | ||
149 | |||
150 | +#define __NR_LITMUS (__NR_SYSCALL_BASE+370) | ||
151 | +#include <litmus/unistd_32.h> | ||
152 | + | ||
153 | /* | ||
154 | * The following SWIs are ARM private. | ||
155 | */ | ||
156 | diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S | ||
157 | index 5c26ecc..584a683 100644 | ||
158 | --- a/arch/arm/kernel/calls.S | ||
159 | +++ b/arch/arm/kernel/calls.S | ||
160 | @@ -379,6 +379,20 @@ | ||
161 | CALL(sys_fanotify_init) | ||
162 | CALL(sys_fanotify_mark) | ||
163 | CALL(sys_prlimit64) | ||
164 | +/* 370 */ CALL(sys_set_rt_task_param) | ||
165 | + CALL(sys_get_rt_task_param) | ||
166 | + CALL(sys_complete_job) | ||
167 | + CALL(sys_od_open) | ||
168 | + CALL(sys_od_close) | ||
169 | +/* 375 */ CALL(sys_fmlp_down) | ||
170 | + CALL(sys_fmlp_up) | ||
171 | + CALL(sys_srp_down) | ||
172 | + CALL(sys_srp_up) | ||
173 | + CALL(sys_query_job_no) | ||
174 | +/* 380 */ CALL(sys_wait_for_job_release) | ||
175 | + CALL(sys_wait_for_ts_release) | ||
176 | + CALL(sys_release_ts) | ||
177 | + CALL(sys_null_call) | ||
178 | #ifndef syscalls_counted | ||
179 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls | ||
180 | #define syscalls_counted | ||
181 | diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c | ||
182 | index 40dc74f..b72fbf3 100644 | ||
183 | --- a/arch/arm/kernel/smp.c | ||
184 | +++ b/arch/arm/kernel/smp.c | ||
185 | @@ -38,6 +38,8 @@ | ||
186 | #include <asm/localtimer.h> | ||
187 | #include <asm/smp_plat.h> | ||
188 | |||
189 | +#include <litmus/preempt.h> | ||
190 | + | ||
191 | /* | ||
192 | * as from 2.5, kernels no longer have an init_tasks structure | ||
193 | * so we need some other way of telling a new secondary core | ||
194 | @@ -533,6 +535,8 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs) | ||
195 | * nothing more to do - eveything is | ||
196 | * done on the interrupt return path | ||
197 | */ | ||
198 | + /* LITMUS^RT: take action based on scheduler state */ | ||
199 | + sched_state_ipi(); | ||
200 | break; | ||
201 | |||
202 | case IPI_CALL_FUNC: | ||
203 | diff --git a/arch/arm/mach-realview/include/mach/timex.h b/arch/arm/mach-realview/include/mach/timex.h | ||
204 | index 4eeb069..e8bcc40 100644 | ||
205 | --- a/arch/arm/mach-realview/include/mach/timex.h | ||
206 | +++ b/arch/arm/mach-realview/include/mach/timex.h | ||
207 | @@ -21,3 +21,30 @@ | ||
208 | */ | ||
209 | |||
210 | #define CLOCK_TICK_RATE (50000000 / 16) | ||
211 | + | ||
212 | +#if defined(CONFIG_MACH_REALVIEW_PB11MP) || defined(CONFIG_MACH_REALVIEW_PB1176) | ||
213 | + | ||
214 | +static inline unsigned long realview_get_arm11_cp15_ccnt(void) | ||
215 | +{ | ||
216 | + unsigned long cycles; | ||
217 | + /* Read CP15 CCNT register. */ | ||
218 | + asm volatile ("mrc p15, 0, %0, c15, c12, 1" : "=r" (cycles)); | ||
219 | + return cycles; | ||
220 | +} | ||
221 | + | ||
222 | +#define get_cycles realview_get_arm11_cp15_ccnt | ||
223 | + | ||
224 | +#elif defined(CONFIG_MACH_REALVIEW_PBA8) | ||
225 | + | ||
226 | + | ||
227 | +static inline unsigned long realview_get_a8_cp15_ccnt(void) | ||
228 | +{ | ||
229 | + unsigned long cycles; | ||
230 | + /* Read CP15 CCNT register. */ | ||
231 | + asm volatile ("mrc p15, 0, %0, c9, c13, 0" : "=r" (cycles)); | ||
232 | + return cycles; | ||
233 | +} | ||
234 | + | ||
235 | +#define get_cycles realview_get_a8_cp15_ccnt | ||
236 | + | ||
237 | +#endif | ||
238 | diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig | ||
239 | index cea0cd9..5181ed3 100644 | ||
240 | --- a/arch/x86/Kconfig | ||
241 | +++ b/arch/x86/Kconfig | ||
242 | @@ -2142,3 +2142,11 @@ source "crypto/Kconfig" | ||
243 | source "arch/x86/kvm/Kconfig" | ||
244 | |||
245 | source "lib/Kconfig" | ||
246 | + | ||
247 | +config ARCH_HAS_FEATHER_TRACE | ||
248 | + def_bool y | ||
249 | + | ||
250 | +config ARCH_HAS_SEND_PULL_TIMERS | ||
251 | + def_bool y | ||
252 | + | ||
253 | +source "litmus/Kconfig" | ||
254 | diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h | ||
255 | index 8e8ec66..5d07dea 100644 | ||
256 | --- a/arch/x86/include/asm/entry_arch.h | ||
257 | +++ b/arch/x86/include/asm/entry_arch.h | ||
258 | @@ -13,6 +13,7 @@ | ||
259 | BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR) | ||
260 | BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR) | ||
261 | BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR) | ||
262 | +BUILD_INTERRUPT(pull_timers_interrupt,PULL_TIMERS_VECTOR) | ||
263 | BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR) | ||
264 | BUILD_INTERRUPT(reboot_interrupt,REBOOT_VECTOR) | ||
265 | |||
266 | diff --git a/arch/x86/include/asm/feather_trace.h b/arch/x86/include/asm/feather_trace.h | ||
267 | new file mode 100644 | ||
268 | index 0000000..4fd3163 | ||
269 | --- /dev/null | ||
270 | +++ b/arch/x86/include/asm/feather_trace.h | ||
271 | @@ -0,0 +1,17 @@ | ||
272 | +#ifndef _ARCH_FEATHER_TRACE_H | ||
273 | +#define _ARCH_FEATHER_TRACE_H | ||
274 | + | ||
275 | +#include <asm/msr.h> | ||
276 | + | ||
277 | +static inline unsigned long long ft_timestamp(void) | ||
278 | +{ | ||
279 | + return __native_read_tsc(); | ||
280 | +} | ||
281 | + | ||
282 | +#ifdef CONFIG_X86_32 | ||
283 | +#include "feather_trace_32.h" | ||
284 | +#else | ||
285 | +#include "feather_trace_64.h" | ||
286 | +#endif | ||
287 | + | ||
288 | +#endif | ||
289 | diff --git a/arch/x86/include/asm/feather_trace_32.h b/arch/x86/include/asm/feather_trace_32.h | ||
290 | new file mode 100644 | ||
291 | index 0000000..70202f9 | ||
292 | --- /dev/null | ||
293 | +++ b/arch/x86/include/asm/feather_trace_32.h | ||
294 | @@ -0,0 +1,79 @@ | ||
295 | +/* Do not directly include this file. Include feather_trace.h instead */ | ||
296 | + | ||
297 | +#define feather_callback __attribute__((regparm(0))) | ||
298 | + | ||
299 | +/* | ||
300 | + * make the compiler reload any register that is not saved in | ||
301 | + * a cdecl function call | ||
302 | + */ | ||
303 | +#define CLOBBER_LIST "memory", "cc", "eax", "ecx", "edx" | ||
304 | + | ||
305 | +#define ft_event(id, callback) \ | ||
306 | + __asm__ __volatile__( \ | ||
307 | + "1: jmp 2f \n\t" \ | ||
308 | + " call " #callback " \n\t" \ | ||
309 | + ".section __event_table, \"aw\" \n\t" \ | ||
310 | + ".long " #id ", 0, 1b, 2f \n\t" \ | ||
311 | + ".previous \n\t" \ | ||
312 | + "2: \n\t" \ | ||
313 | + : : : CLOBBER_LIST) | ||
314 | + | ||
315 | +#define ft_event0(id, callback) \ | ||
316 | + __asm__ __volatile__( \ | ||
317 | + "1: jmp 2f \n\t" \ | ||
318 | + " subl $4, %%esp \n\t" \ | ||
319 | + " movl $" #id ", (%%esp) \n\t" \ | ||
320 | + " call " #callback " \n\t" \ | ||
321 | + " addl $4, %%esp \n\t" \ | ||
322 | + ".section __event_table, \"aw\" \n\t" \ | ||
323 | + ".long " #id ", 0, 1b, 2f \n\t" \ | ||
324 | + ".previous \n\t" \ | ||
325 | + "2: \n\t" \ | ||
326 | + : : : CLOBBER_LIST) | ||
327 | + | ||
328 | +#define ft_event1(id, callback, param) \ | ||
329 | + __asm__ __volatile__( \ | ||
330 | + "1: jmp 2f \n\t" \ | ||
331 | + " subl $8, %%esp \n\t" \ | ||
332 | + " movl %0, 4(%%esp) \n\t" \ | ||
333 | + " movl $" #id ", (%%esp) \n\t" \ | ||
334 | + " call " #callback " \n\t" \ | ||
335 | + " addl $8, %%esp \n\t" \ | ||
336 | + ".section __event_table, \"aw\" \n\t" \ | ||
337 | + ".long " #id ", 0, 1b, 2f \n\t" \ | ||
338 | + ".previous \n\t" \ | ||
339 | + "2: \n\t" \ | ||
340 | + : : "r" (param) : CLOBBER_LIST) | ||
341 | + | ||
342 | +#define ft_event2(id, callback, param, param2) \ | ||
343 | + __asm__ __volatile__( \ | ||
344 | + "1: jmp 2f \n\t" \ | ||
345 | + " subl $12, %%esp \n\t" \ | ||
346 | + " movl %1, 8(%%esp) \n\t" \ | ||
347 | + " movl %0, 4(%%esp) \n\t" \ | ||
348 | + " movl $" #id ", (%%esp) \n\t" \ | ||
349 | + " call " #callback " \n\t" \ | ||
350 | + " addl $12, %%esp \n\t" \ | ||
351 | + ".section __event_table, \"aw\" \n\t" \ | ||
352 | + ".long " #id ", 0, 1b, 2f \n\t" \ | ||
353 | + ".previous \n\t" \ | ||
354 | + "2: \n\t" \ | ||
355 | + : : "r" (param), "r" (param2) : CLOBBER_LIST) | ||
356 | + | ||
357 | + | ||
358 | +#define ft_event3(id, callback, p, p2, p3) \ | ||
359 | + __asm__ __volatile__( \ | ||
360 | + "1: jmp 2f \n\t" \ | ||
361 | + " subl $16, %%esp \n\t" \ | ||
362 | + " movl %2, 12(%%esp) \n\t" \ | ||
363 | + " movl %1, 8(%%esp) \n\t" \ | ||
364 | + " movl %0, 4(%%esp) \n\t" \ | ||
365 | + " movl $" #id ", (%%esp) \n\t" \ | ||
366 | + " call " #callback " \n\t" \ | ||
367 | + " addl $16, %%esp \n\t" \ | ||
368 | + ".section __event_table, \"aw\" \n\t" \ | ||
369 | + ".long " #id ", 0, 1b, 2f \n\t" \ | ||
370 | + ".previous \n\t" \ | ||
371 | + "2: \n\t" \ | ||
372 | + : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST) | ||
373 | + | ||
374 | diff --git a/arch/x86/include/asm/feather_trace_64.h b/arch/x86/include/asm/feather_trace_64.h | ||
375 | new file mode 100644 | ||
376 | index 0000000..54ac2ae | ||
377 | --- /dev/null | ||
378 | +++ b/arch/x86/include/asm/feather_trace_64.h | ||
379 | @@ -0,0 +1,67 @@ | ||
380 | +/* Do not directly include this file. Include feather_trace.h instead */ | ||
381 | + | ||
382 | +/* regparm is the default on x86_64 */ | ||
383 | +#define feather_callback | ||
384 | + | ||
385 | +# define _EVENT_TABLE(id,from,to) \ | ||
386 | + ".section __event_table, \"aw\"\n\t" \ | ||
387 | + ".balign 8\n\t" \ | ||
388 | + ".quad " #id ", 0, " #from ", " #to " \n\t" \ | ||
389 | + ".previous \n\t" | ||
390 | + | ||
391 | +/* | ||
392 | + * x86_64 callee only owns rbp, rbx, r12 -> r15 | ||
393 | + * the called can freely modify the others | ||
394 | + */ | ||
395 | +#define CLOBBER_LIST "memory", "cc", "rdi", "rsi", "rdx", "rcx", \ | ||
396 | + "r8", "r9", "r10", "r11", "rax" | ||
397 | + | ||
398 | +#define ft_event(id, callback) \ | ||
399 | + __asm__ __volatile__( \ | ||
400 | + "1: jmp 2f \n\t" \ | ||
401 | + " call " #callback " \n\t" \ | ||
402 | + _EVENT_TABLE(id,1b,2f) \ | ||
403 | + "2: \n\t" \ | ||
404 | + : : : CLOBBER_LIST) | ||
405 | + | ||
406 | +#define ft_event0(id, callback) \ | ||
407 | + __asm__ __volatile__( \ | ||
408 | + "1: jmp 2f \n\t" \ | ||
409 | + " movq $" #id ", %%rdi \n\t" \ | ||
410 | + " call " #callback " \n\t" \ | ||
411 | + _EVENT_TABLE(id,1b,2f) \ | ||
412 | + "2: \n\t" \ | ||
413 | + : : : CLOBBER_LIST) | ||
414 | + | ||
415 | +#define ft_event1(id, callback, param) \ | ||
416 | + __asm__ __volatile__( \ | ||
417 | + "1: jmp 2f \n\t" \ | ||
418 | + " movq %0, %%rsi \n\t" \ | ||
419 | + " movq $" #id ", %%rdi \n\t" \ | ||
420 | + " call " #callback " \n\t" \ | ||
421 | + _EVENT_TABLE(id,1b,2f) \ | ||
422 | + "2: \n\t" \ | ||
423 | + : : "r" (param) : CLOBBER_LIST) | ||
424 | + | ||
425 | +#define ft_event2(id, callback, param, param2) \ | ||
426 | + __asm__ __volatile__( \ | ||
427 | + "1: jmp 2f \n\t" \ | ||
428 | + " movq %1, %%rdx \n\t" \ | ||
429 | + " movq %0, %%rsi \n\t" \ | ||
430 | + " movq $" #id ", %%rdi \n\t" \ | ||
431 | + " call " #callback " \n\t" \ | ||
432 | + _EVENT_TABLE(id,1b,2f) \ | ||
433 | + "2: \n\t" \ | ||
434 | + : : "r" (param), "r" (param2) : CLOBBER_LIST) | ||
435 | + | ||
436 | +#define ft_event3(id, callback, p, p2, p3) \ | ||
437 | + __asm__ __volatile__( \ | ||
438 | + "1: jmp 2f \n\t" \ | ||
439 | + " movq %2, %%rcx \n\t" \ | ||
440 | + " movq %1, %%rdx \n\t" \ | ||
441 | + " movq %0, %%rsi \n\t" \ | ||
442 | + " movq $" #id ", %%rdi \n\t" \ | ||
443 | + " call " #callback " \n\t" \ | ||
444 | + _EVENT_TABLE(id,1b,2f) \ | ||
445 | + "2: \n\t" \ | ||
446 | + : : "r" (p), "r" (p2), "r" (p3) : CLOBBER_LIST) | ||
447 | diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h | ||
448 | index 46c0fe0..c174115 100644 | ||
449 | --- a/arch/x86/include/asm/hw_irq.h | ||
450 | +++ b/arch/x86/include/asm/hw_irq.h | ||
451 | @@ -53,6 +53,8 @@ extern void threshold_interrupt(void); | ||
452 | extern void call_function_interrupt(void); | ||
453 | extern void call_function_single_interrupt(void); | ||
454 | |||
455 | +extern void pull_timers_interrupt(void); | ||
456 | + | ||
457 | /* IOAPIC */ | ||
458 | #define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs)) | ||
459 | extern unsigned long io_apic_irqs; | ||
460 | @@ -122,6 +124,7 @@ extern asmlinkage void smp_irq_move_cleanup_interrupt(void); | ||
461 | extern void smp_reschedule_interrupt(struct pt_regs *); | ||
462 | extern void smp_call_function_interrupt(struct pt_regs *); | ||
463 | extern void smp_call_function_single_interrupt(struct pt_regs *); | ||
464 | +extern void smp_pull_timers_interrupt(struct pt_regs *); | ||
465 | #ifdef CONFIG_X86_32 | ||
466 | extern void smp_invalidate_interrupt(struct pt_regs *); | ||
467 | #else | ||
468 | diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h | ||
469 | index e2ca300..6143ebe 100644 | ||
470 | --- a/arch/x86/include/asm/irq_vectors.h | ||
471 | +++ b/arch/x86/include/asm/irq_vectors.h | ||
472 | @@ -109,6 +109,11 @@ | ||
473 | #define LOCAL_TIMER_VECTOR 0xef | ||
474 | |||
475 | /* | ||
476 | + * LITMUS^RT pull timers IRQ vector | ||
477 | + */ | ||
478 | +#define PULL_TIMERS_VECTOR 0xee | ||
479 | + | ||
480 | +/* | ||
481 | * Generic system vector for platform specific use | ||
482 | */ | ||
483 | #define X86_PLATFORM_IPI_VECTOR 0xed | ||
484 | diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h | ||
485 | index 325b7bd..ebaa04a 100644 | ||
486 | --- a/arch/x86/include/asm/processor.h | ||
487 | +++ b/arch/x86/include/asm/processor.h | ||
488 | @@ -169,6 +169,10 @@ extern void print_cpu_info(struct cpuinfo_x86 *); | ||
489 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | ||
490 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | ||
491 | extern unsigned short num_cache_leaves; | ||
492 | +#ifdef CONFIG_SYSFS | ||
493 | +extern int get_shared_cpu_map(cpumask_var_t mask, | ||
494 | + unsigned int cpu, int index); | ||
495 | +#endif | ||
496 | |||
497 | extern void detect_extended_topology(struct cpuinfo_x86 *c); | ||
498 | extern void detect_ht(struct cpuinfo_x86 *c); | ||
499 | diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h | ||
500 | index b766a5e..b7ba19a 100644 | ||
501 | --- a/arch/x86/include/asm/unistd_32.h | ||
502 | +++ b/arch/x86/include/asm/unistd_32.h | ||
503 | @@ -347,9 +347,13 @@ | ||
504 | #define __NR_fanotify_mark 339 | ||
505 | #define __NR_prlimit64 340 | ||
506 | |||
507 | +#define __NR_LITMUS 341 | ||
508 | + | ||
509 | +#include "litmus/unistd_32.h" | ||
510 | + | ||
511 | #ifdef __KERNEL__ | ||
512 | |||
513 | -#define NR_syscalls 341 | ||
514 | +#define NR_syscalls 341 + NR_litmus_syscalls | ||
515 | |||
516 | #define __ARCH_WANT_IPC_PARSE_VERSION | ||
517 | #define __ARCH_WANT_OLD_READDIR | ||
518 | diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h | ||
519 | index 363e9b8..332bf3c 100644 | ||
520 | --- a/arch/x86/include/asm/unistd_64.h | ||
521 | +++ b/arch/x86/include/asm/unistd_64.h | ||
522 | @@ -670,6 +670,10 @@ __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) | ||
523 | #define __NR_prlimit64 302 | ||
524 | __SYSCALL(__NR_prlimit64, sys_prlimit64) | ||
525 | |||
526 | +#define __NR_LITMUS 303 | ||
527 | + | ||
528 | +#include "litmus/unistd_64.h" | ||
529 | + | ||
530 | #ifndef __NO_STUBS | ||
531 | #define __ARCH_WANT_OLD_READDIR | ||
532 | #define __ARCH_WANT_OLD_STAT | ||
533 | diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile | ||
534 | index fedf32a..6890dbb 100644 | ||
535 | --- a/arch/x86/kernel/Makefile | ||
536 | +++ b/arch/x86/kernel/Makefile | ||
537 | @@ -118,6 +118,8 @@ obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o | ||
538 | |||
539 | obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o | ||
540 | |||
541 | +obj-$(CONFIG_FEATHER_TRACE) += ft_event.o | ||
542 | + | ||
543 | ### | ||
544 | # 64 bit specific files | ||
545 | ifeq ($(CONFIG_X86_64),y) | ||
546 | diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c | ||
547 | index 898c2f4..3fec7d9 100644 | ||
548 | --- a/arch/x86/kernel/cpu/intel_cacheinfo.c | ||
549 | +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | ||
550 | @@ -758,6 +758,23 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | ||
551 | static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); | ||
552 | #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) | ||
553 | |||
554 | +/* returns CPUs that share the index cache with cpu */ | ||
555 | +int get_shared_cpu_map(cpumask_var_t mask, unsigned int cpu, int index) | ||
556 | +{ | ||
557 | + int ret = 0; | ||
558 | + struct _cpuid4_info *this_leaf; | ||
559 | + | ||
560 | + if (index >= num_cache_leaves) { | ||
561 | + index = num_cache_leaves - 1; | ||
562 | + ret = index; | ||
563 | + } | ||
564 | + | ||
565 | + this_leaf = CPUID4_INFO_IDX(cpu,index); | ||
566 | + cpumask_copy(mask, to_cpumask(this_leaf->shared_cpu_map)); | ||
567 | + | ||
568 | + return ret; | ||
569 | +} | ||
570 | + | ||
571 | #ifdef CONFIG_SMP | ||
572 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | ||
573 | { | ||
574 | diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S | ||
575 | index 17be5ec..115e895 100644 | ||
576 | --- a/arch/x86/kernel/entry_64.S | ||
577 | +++ b/arch/x86/kernel/entry_64.S | ||
578 | @@ -1016,6 +1016,8 @@ apicinterrupt CALL_FUNCTION_VECTOR \ | ||
579 | call_function_interrupt smp_call_function_interrupt | ||
580 | apicinterrupt RESCHEDULE_VECTOR \ | ||
581 | reschedule_interrupt smp_reschedule_interrupt | ||
582 | +apicinterrupt PULL_TIMERS_VECTOR \ | ||
583 | + pull_timers_interrupt smp_pull_timers_interrupt | ||
584 | #endif | ||
585 | |||
586 | apicinterrupt ERROR_APIC_VECTOR \ | ||
587 | diff --git a/arch/x86/kernel/ft_event.c b/arch/x86/kernel/ft_event.c | ||
588 | new file mode 100644 | ||
589 | index 0000000..37cc332 | ||
590 | --- /dev/null | ||
591 | +++ b/arch/x86/kernel/ft_event.c | ||
592 | @@ -0,0 +1,118 @@ | ||
593 | +#include <linux/types.h> | ||
594 | + | ||
595 | +#include <litmus/feather_trace.h> | ||
596 | + | ||
597 | +/* the feather trace management functions assume | ||
598 | + * exclusive access to the event table | ||
599 | + */ | ||
600 | + | ||
601 | +#ifndef CONFIG_DEBUG_RODATA | ||
602 | + | ||
603 | +#define BYTE_JUMP 0xeb | ||
604 | +#define BYTE_JUMP_LEN 0x02 | ||
605 | + | ||
606 | +/* for each event, there is an entry in the event table */ | ||
607 | +struct trace_event { | ||
608 | + long id; | ||
609 | + long count; | ||
610 | + long start_addr; | ||
611 | + long end_addr; | ||
612 | +}; | ||
613 | + | ||
614 | +extern struct trace_event __start___event_table[]; | ||
615 | +extern struct trace_event __stop___event_table[]; | ||
616 | + | ||
617 | +/* Workaround: if no events are defined, then the event_table section does not | ||
618 | + * exist and the above references cause linker errors. This could probably be | ||
619 | + * fixed by adjusting the linker script, but it is easier to maintain for us if | ||
620 | + * we simply create a dummy symbol in the event table section. | ||
621 | + */ | ||
622 | +int __event_table_dummy[0] __attribute__ ((section("__event_table"))); | ||
623 | + | ||
624 | +int ft_enable_event(unsigned long id) | ||
625 | +{ | ||
626 | + struct trace_event* te = __start___event_table; | ||
627 | + int count = 0; | ||
628 | + char* delta; | ||
629 | + unsigned char* instr; | ||
630 | + | ||
631 | + while (te < __stop___event_table) { | ||
632 | + if (te->id == id && ++te->count == 1) { | ||
633 | + instr = (unsigned char*) te->start_addr; | ||
634 | + /* make sure we don't clobber something wrong */ | ||
635 | + if (*instr == BYTE_JUMP) { | ||
636 | + delta = (((unsigned char*) te->start_addr) + 1); | ||
637 | + *delta = 0; | ||
638 | + } | ||
639 | + } | ||
640 | + if (te->id == id) | ||
641 | + count++; | ||
642 | + te++; | ||
643 | + } | ||
644 | + | ||
645 | + printk(KERN_DEBUG "ft_enable_event: enabled %d events\n", count); | ||
646 | + return count; | ||
647 | +} | ||
648 | + | ||
649 | +int ft_disable_event(unsigned long id) | ||
650 | +{ | ||
651 | + struct trace_event* te = __start___event_table; | ||
652 | + int count = 0; | ||
653 | + char* delta; | ||
654 | + unsigned char* instr; | ||
655 | + | ||
656 | + while (te < __stop___event_table) { | ||
657 | + if (te->id == id && --te->count == 0) { | ||
658 | + instr = (unsigned char*) te->start_addr; | ||
659 | + if (*instr == BYTE_JUMP) { | ||
660 | + delta = (((unsigned char*) te->start_addr) + 1); | ||
661 | + *delta = te->end_addr - te->start_addr - | ||
662 | + BYTE_JUMP_LEN; | ||
663 | + } | ||
664 | + } | ||
665 | + if (te->id == id) | ||
666 | + count++; | ||
667 | + te++; | ||
668 | + } | ||
669 | + | ||
670 | + printk(KERN_DEBUG "ft_disable_event: disabled %d events\n", count); | ||
671 | + return count; | ||
672 | +} | ||
673 | + | ||
674 | +int ft_disable_all_events(void) | ||
675 | +{ | ||
676 | + struct trace_event* te = __start___event_table; | ||
677 | + int count = 0; | ||
678 | + char* delta; | ||
679 | + unsigned char* instr; | ||
680 | + | ||
681 | + while (te < __stop___event_table) { | ||
682 | + if (te->count) { | ||
683 | + instr = (unsigned char*) te->start_addr; | ||
684 | + if (*instr == BYTE_JUMP) { | ||
685 | + delta = (((unsigned char*) te->start_addr) | ||
686 | + + 1); | ||
687 | + *delta = te->end_addr - te->start_addr - | ||
688 | + BYTE_JUMP_LEN; | ||
689 | + te->count = 0; | ||
690 | + count++; | ||
691 | + } | ||
692 | + } | ||
693 | + te++; | ||
694 | + } | ||
695 | + return count; | ||
696 | +} | ||
697 | + | ||
698 | +int ft_is_event_enabled(unsigned long id) | ||
699 | +{ | ||
700 | + struct trace_event* te = __start___event_table; | ||
701 | + | ||
702 | + while (te < __stop___event_table) { | ||
703 | + if (te->id == id) | ||
704 | + return te->count; | ||
705 | + te++; | ||
706 | + } | ||
707 | + return 0; | ||
708 | +} | ||
709 | + | ||
710 | +#endif | ||
711 | diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c | ||
712 | index 990ae7c..9772b1a 100644 | ||
713 | --- a/arch/x86/kernel/irqinit.c | ||
714 | +++ b/arch/x86/kernel/irqinit.c | ||
715 | @@ -189,6 +189,9 @@ static void __init smp_intr_init(void) | ||
716 | alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, | ||
717 | call_function_single_interrupt); | ||
718 | |||
719 | + /* IPI for hrtimer pulling on remote cpus */ | ||
720 | + alloc_intr_gate(PULL_TIMERS_VECTOR, pull_timers_interrupt); | ||
721 | + | ||
722 | /* Low priority IPI to cleanup after moving an irq */ | ||
723 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | ||
724 | set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors); | ||
725 | diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c | ||
726 | index d801210..74cca60 100644 | ||
727 | --- a/arch/x86/kernel/smp.c | ||
728 | +++ b/arch/x86/kernel/smp.c | ||
729 | @@ -23,6 +23,10 @@ | ||
730 | #include <linux/cpu.h> | ||
731 | #include <linux/gfp.h> | ||
732 | |||
733 | +#include <litmus/preempt.h> | ||
734 | +#include <litmus/debug_trace.h> | ||
735 | +#include <litmus/trace.h> | ||
736 | + | ||
737 | #include <asm/mtrr.h> | ||
738 | #include <asm/tlbflush.h> | ||
739 | #include <asm/mmu_context.h> | ||
740 | @@ -118,6 +122,7 @@ static void native_smp_send_reschedule(int cpu) | ||
741 | WARN_ON(1); | ||
742 | return; | ||
743 | } | ||
744 | + TS_SEND_RESCHED_START(cpu); | ||
745 | apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR); | ||
746 | } | ||
747 | |||
748 | @@ -147,6 +152,16 @@ void native_send_call_func_ipi(const struct cpumask *mask) | ||
749 | free_cpumask_var(allbutself); | ||
750 | } | ||
751 | |||
752 | +/* trigger timers on remote cpu */ | ||
753 | +void smp_send_pull_timers(int cpu) | ||
754 | +{ | ||
755 | + if (unlikely(cpu_is_offline(cpu))) { | ||
756 | + WARN_ON(1); | ||
757 | + return; | ||
758 | + } | ||
759 | + apic->send_IPI_mask(cpumask_of(cpu), PULL_TIMERS_VECTOR); | ||
760 | +} | ||
761 | + | ||
762 | /* | ||
763 | * this function calls the 'stop' function on all other CPUs in the system. | ||
764 | */ | ||
765 | @@ -198,7 +213,10 @@ static void native_smp_send_stop(void) | ||
766 | void smp_reschedule_interrupt(struct pt_regs *regs) | ||
767 | { | ||
768 | ack_APIC_irq(); | ||
769 | + /* LITMUS^RT: this IPI might need to trigger the sched state machine. */ | ||
770 | + sched_state_ipi(); | ||
771 | inc_irq_stat(irq_resched_count); | ||
772 | + TS_SEND_RESCHED_END; | ||
773 | /* | ||
774 | * KVM uses this interrupt to force a cpu out of guest mode | ||
775 | */ | ||
776 | @@ -222,6 +240,15 @@ void smp_call_function_single_interrupt(struct pt_regs *regs) | ||
777 | irq_exit(); | ||
778 | } | ||
779 | |||
780 | +extern void hrtimer_pull(void); | ||
781 | + | ||
782 | +void smp_pull_timers_interrupt(struct pt_regs *regs) | ||
783 | +{ | ||
784 | + ack_APIC_irq(); | ||
785 | + TRACE("pull timer interrupt\n"); | ||
786 | + hrtimer_pull(); | ||
787 | +} | ||
788 | + | ||
789 | struct smp_ops smp_ops = { | ||
790 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, | ||
791 | .smp_prepare_cpus = native_smp_prepare_cpus, | ||
792 | diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S | ||
793 | index b35786d..d78c5ed 100644 | ||
794 | --- a/arch/x86/kernel/syscall_table_32.S | ||
795 | +++ b/arch/x86/kernel/syscall_table_32.S | ||
796 | @@ -340,3 +340,17 @@ ENTRY(sys_call_table) | ||
797 | .long sys_fanotify_init | ||
798 | .long sys_fanotify_mark | ||
799 | .long sys_prlimit64 /* 340 */ | ||
800 | + .long sys_set_rt_task_param /* LITMUS^RT 341 */ | ||
801 | + .long sys_get_rt_task_param | ||
802 | + .long sys_complete_job | ||
803 | + .long sys_od_open | ||
804 | + .long sys_od_close | ||
805 | + .long sys_fmlp_down | ||
806 | + .long sys_fmlp_up | ||
807 | + .long sys_srp_down | ||
808 | + .long sys_srp_up | ||
809 | + .long sys_query_job_no | ||
810 | + .long sys_wait_for_job_release | ||
811 | + .long sys_wait_for_ts_release | ||
812 | + .long sys_release_ts | ||
813 | + .long sys_null_call | ||
814 | diff --git a/fs/exec.c b/fs/exec.c | ||
815 | index 6d2b6f9..56536ad 100644 | ||
816 | --- a/fs/exec.c | ||
817 | +++ b/fs/exec.c | ||
818 | @@ -19,7 +19,7 @@ | ||
819 | * current->executable is only used by the procfs. This allows a dispatch | ||
820 | * table to check for several different types of binary formats. We keep | ||
821 | * trying until we recognize the file or we run out of supported binary | ||
822 | - * formats. | ||
823 | + * formats. | ||
824 | */ | ||
825 | |||
826 | #include <linux/slab.h> | ||
827 | @@ -55,6 +55,8 @@ | ||
828 | #include <linux/fs_struct.h> | ||
829 | #include <linux/pipe_fs_i.h> | ||
830 | |||
831 | +#include <litmus/litmus.h> | ||
832 | + | ||
833 | #include <asm/uaccess.h> | ||
834 | #include <asm/mmu_context.h> | ||
835 | #include <asm/tlb.h> | ||
836 | @@ -78,7 +80,7 @@ int __register_binfmt(struct linux_binfmt * fmt, int insert) | ||
837 | insert ? list_add(&fmt->lh, &formats) : | ||
838 | list_add_tail(&fmt->lh, &formats); | ||
839 | write_unlock(&binfmt_lock); | ||
840 | - return 0; | ||
841 | + return 0; | ||
842 | } | ||
843 | |||
844 | EXPORT_SYMBOL(__register_binfmt); | ||
845 | @@ -1064,7 +1066,7 @@ void setup_new_exec(struct linux_binprm * bprm) | ||
846 | group */ | ||
847 | |||
848 | current->self_exec_id++; | ||
849 | - | ||
850 | + | ||
851 | flush_signal_handlers(current, 0); | ||
852 | flush_old_files(current->files); | ||
853 | } | ||
854 | @@ -1154,8 +1156,8 @@ int check_unsafe_exec(struct linux_binprm *bprm) | ||
855 | return res; | ||
856 | } | ||
857 | |||
858 | -/* | ||
859 | - * Fill the binprm structure from the inode. | ||
860 | +/* | ||
861 | + * Fill the binprm structure from the inode. | ||
862 | * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes | ||
863 | * | ||
864 | * This may be called multiple times for binary chains (scripts for example). | ||
865 | @@ -1367,6 +1369,7 @@ int do_execve(const char * filename, | ||
866 | goto out_unmark; | ||
867 | |||
868 | sched_exec(); | ||
869 | + litmus_exec(); | ||
870 | |||
871 | bprm->file = file; | ||
872 | bprm->filename = filename; | ||
873 | diff --git a/fs/inode.c b/fs/inode.c | ||
874 | index 8646433..d4fe9c0 100644 | ||
875 | --- a/fs/inode.c | ||
876 | +++ b/fs/inode.c | ||
877 | @@ -266,6 +266,8 @@ void inode_init_once(struct inode *inode) | ||
878 | #ifdef CONFIG_FSNOTIFY | ||
879 | INIT_HLIST_HEAD(&inode->i_fsnotify_marks); | ||
880 | #endif | ||
881 | + INIT_LIST_HEAD(&inode->i_obj_list); | ||
882 | + mutex_init(&inode->i_obj_mutex); | ||
883 | } | ||
884 | EXPORT_SYMBOL(inode_init_once); | ||
885 | |||
886 | diff --git a/include/linux/completion.h b/include/linux/completion.h | ||
887 | index 51e3145..c63950e 100644 | ||
888 | --- a/include/linux/completion.h | ||
889 | +++ b/include/linux/completion.h | ||
890 | @@ -90,6 +90,7 @@ extern bool completion_done(struct completion *x); | ||
891 | |||
892 | extern void complete(struct completion *); | ||
893 | extern void complete_all(struct completion *); | ||
894 | +extern void complete_n(struct completion *, int n); | ||
895 | |||
896 | /** | ||
897 | * INIT_COMPLETION: - reinitialize a completion structure | ||
898 | diff --git a/include/linux/fs.h b/include/linux/fs.h | ||
899 | index 63d069b..29a6724 100644 | ||
900 | --- a/include/linux/fs.h | ||
901 | +++ b/include/linux/fs.h | ||
902 | @@ -16,8 +16,8 @@ | ||
903 | * nr_file rlimit, so it's safe to set up a ridiculously high absolute | ||
904 | * upper limit on files-per-process. | ||
905 | * | ||
906 | - * Some programs (notably those using select()) may have to be | ||
907 | - * recompiled to take full advantage of the new limits.. | ||
908 | + * Some programs (notably those using select()) may have to be | ||
909 | + * recompiled to take full advantage of the new limits.. | ||
910 | */ | ||
911 | |||
912 | /* Fixed constants first: */ | ||
913 | @@ -172,7 +172,7 @@ struct inodes_stat_t { | ||
914 | #define SEL_EX 4 | ||
915 | |||
916 | /* public flags for file_system_type */ | ||
917 | -#define FS_REQUIRES_DEV 1 | ||
918 | +#define FS_REQUIRES_DEV 1 | ||
919 | #define FS_BINARY_MOUNTDATA 2 | ||
920 | #define FS_HAS_SUBTYPE 4 | ||
921 | #define FS_REVAL_DOT 16384 /* Check the paths ".", ".." for staleness */ | ||
922 | @@ -470,7 +470,7 @@ struct iattr { | ||
923 | */ | ||
924 | #include <linux/quota.h> | ||
925 | |||
926 | -/** | ||
927 | +/** | ||
928 | * enum positive_aop_returns - aop return codes with specific semantics | ||
929 | * | ||
930 | * @AOP_WRITEPAGE_ACTIVATE: Informs the caller that page writeback has | ||
931 | @@ -480,7 +480,7 @@ struct iattr { | ||
932 | * be a candidate for writeback again in the near | ||
933 | * future. Other callers must be careful to unlock | ||
934 | * the page if they get this return. Returned by | ||
935 | - * writepage(); | ||
936 | + * writepage(); | ||
937 | * | ||
938 | * @AOP_TRUNCATED_PAGE: The AOP method that was handed a locked page has | ||
939 | * unlocked it and the page might have been truncated. | ||
940 | @@ -721,6 +721,7 @@ static inline int mapping_writably_mapped(struct address_space *mapping) | ||
941 | |||
942 | struct posix_acl; | ||
943 | #define ACL_NOT_CACHED ((void *)(-1)) | ||
944 | +struct inode_obj_id_table; | ||
945 | |||
946 | struct inode { | ||
947 | struct hlist_node i_hash; | ||
948 | @@ -784,6 +785,8 @@ struct inode { | ||
949 | struct posix_acl *i_acl; | ||
950 | struct posix_acl *i_default_acl; | ||
951 | #endif | ||
952 | + struct list_head i_obj_list; | ||
953 | + struct mutex i_obj_mutex; | ||
954 | void *i_private; /* fs or device private pointer */ | ||
955 | }; | ||
956 | |||
957 | @@ -997,10 +1000,10 @@ static inline int file_check_writeable(struct file *filp) | ||
958 | |||
959 | #define MAX_NON_LFS ((1UL<<31) - 1) | ||
960 | |||
961 | -/* Page cache limit. The filesystems should put that into their s_maxbytes | ||
962 | - limits, otherwise bad things can happen in VM. */ | ||
963 | +/* Page cache limit. The filesystems should put that into their s_maxbytes | ||
964 | + limits, otherwise bad things can happen in VM. */ | ||
965 | #if BITS_PER_LONG==32 | ||
966 | -#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) | ||
967 | +#define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) | ||
968 | #elif BITS_PER_LONG==64 | ||
969 | #define MAX_LFS_FILESIZE 0x7fffffffffffffffUL | ||
970 | #endif | ||
971 | @@ -2145,7 +2148,7 @@ extern int may_open(struct path *, int, int); | ||
972 | |||
973 | extern int kernel_read(struct file *, loff_t, char *, unsigned long); | ||
974 | extern struct file * open_exec(const char *); | ||
975 | - | ||
976 | + | ||
977 | /* fs/dcache.c -- generic fs support functions */ | ||
978 | extern int is_subdir(struct dentry *, struct dentry *); | ||
979 | extern int path_is_under(struct path *, struct path *); | ||
980 | diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h | ||
981 | index fd0c1b8..76da541 100644 | ||
982 | --- a/include/linux/hrtimer.h | ||
983 | +++ b/include/linux/hrtimer.h | ||
984 | @@ -167,6 +167,7 @@ struct hrtimer_clock_base { | ||
985 | * @nr_retries: Total number of hrtimer interrupt retries | ||
986 | * @nr_hangs: Total number of hrtimer interrupt hangs | ||
987 | * @max_hang_time: Maximum time spent in hrtimer_interrupt | ||
988 | + * @to_pull: LITMUS^RT list of timers to be pulled on this cpu | ||
989 | */ | ||
990 | struct hrtimer_cpu_base { | ||
991 | raw_spinlock_t lock; | ||
992 | @@ -180,8 +181,32 @@ struct hrtimer_cpu_base { | ||
993 | unsigned long nr_hangs; | ||
994 | ktime_t max_hang_time; | ||
995 | #endif | ||
996 | + struct list_head to_pull; | ||
997 | }; | ||
998 | |||
999 | +#ifdef CONFIG_ARCH_HAS_SEND_PULL_TIMERS | ||
1000 | + | ||
1001 | +#define HRTIMER_START_ON_INACTIVE 0 | ||
1002 | +#define HRTIMER_START_ON_QUEUED 1 | ||
1003 | + | ||
1004 | +/* | ||
1005 | + * struct hrtimer_start_on_info - save timer info on remote cpu | ||
1006 | + * @list: list of hrtimer_start_on_info on remote cpu (to_pull) | ||
1007 | + * @timer: timer to be triggered on remote cpu | ||
1008 | + * @time: time event | ||
1009 | + * @mode: timer mode | ||
1010 | + * @state: activity flag | ||
1011 | + */ | ||
1012 | +struct hrtimer_start_on_info { | ||
1013 | + struct list_head list; | ||
1014 | + struct hrtimer *timer; | ||
1015 | + ktime_t time; | ||
1016 | + enum hrtimer_mode mode; | ||
1017 | + atomic_t state; | ||
1018 | +}; | ||
1019 | + | ||
1020 | +#endif | ||
1021 | + | ||
1022 | static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) | ||
1023 | { | ||
1024 | timer->_expires = time; | ||
1025 | @@ -348,6 +373,13 @@ __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | ||
1026 | unsigned long delta_ns, | ||
1027 | const enum hrtimer_mode mode, int wakeup); | ||
1028 | |||
1029 | +#ifdef CONFIG_ARCH_HAS_SEND_PULL_TIMERS | ||
1030 | +extern void hrtimer_start_on_info_init(struct hrtimer_start_on_info *info); | ||
1031 | +extern int hrtimer_start_on(int cpu, struct hrtimer_start_on_info *info, | ||
1032 | + struct hrtimer *timer, ktime_t time, | ||
1033 | + const enum hrtimer_mode mode); | ||
1034 | +#endif | ||
1035 | + | ||
1036 | extern int hrtimer_cancel(struct hrtimer *timer); | ||
1037 | extern int hrtimer_try_to_cancel(struct hrtimer *timer); | ||
1038 | |||
1039 | diff --git a/include/linux/sched.h b/include/linux/sched.h | ||
1040 | index 1e2a6db..c9ac4fc 100644 | ||
1041 | --- a/include/linux/sched.h | ||
1042 | +++ b/include/linux/sched.h | ||
1043 | @@ -38,6 +38,7 @@ | ||
1044 | #define SCHED_BATCH 3 | ||
1045 | /* SCHED_ISO: reserved but not implemented yet */ | ||
1046 | #define SCHED_IDLE 5 | ||
1047 | +#define SCHED_LITMUS 6 | ||
1048 | /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ | ||
1049 | #define SCHED_RESET_ON_FORK 0x40000000 | ||
1050 | |||
1051 | @@ -94,6 +95,9 @@ struct sched_param { | ||
1052 | |||
1053 | #include <asm/processor.h> | ||
1054 | |||
1055 | +#include <litmus/rt_param.h> | ||
1056 | +#include <litmus/preempt.h> | ||
1057 | + | ||
1058 | struct exec_domain; | ||
1059 | struct futex_pi_state; | ||
1060 | struct robust_list_head; | ||
1061 | @@ -1159,6 +1163,7 @@ struct sched_rt_entity { | ||
1062 | }; | ||
1063 | |||
1064 | struct rcu_node; | ||
1065 | +struct od_table_entry; | ||
1066 | |||
1067 | struct task_struct { | ||
1068 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ | ||
1069 | @@ -1243,9 +1248,9 @@ struct task_struct { | ||
1070 | unsigned long stack_canary; | ||
1071 | #endif | ||
1072 | |||
1073 | - /* | ||
1074 | + /* | ||
1075 | * pointers to (original) parent process, youngest child, younger sibling, | ||
1076 | - * older sibling, respectively. (p->father can be replaced with | ||
1077 | + * older sibling, respectively. (p->father can be replaced with | ||
1078 | * p->real_parent->pid) | ||
1079 | */ | ||
1080 | struct task_struct *real_parent; /* real parent process */ | ||
1081 | @@ -1453,6 +1458,13 @@ struct task_struct { | ||
1082 | int make_it_fail; | ||
1083 | #endif | ||
1084 | struct prop_local_single dirties; | ||
1085 | + | ||
1086 | + /* LITMUS RT parameters and state */ | ||
1087 | + struct rt_param rt_param; | ||
1088 | + | ||
1089 | + /* references to PI semaphores, etc. */ | ||
1090 | + struct od_table_entry *od_table; | ||
1091 | + | ||
1092 | #ifdef CONFIG_LATENCYTOP | ||
1093 | int latency_record_count; | ||
1094 | struct latency_record latency_record[LT_SAVECOUNT]; | ||
1095 | @@ -2014,7 +2026,7 @@ static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, s | ||
1096 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); | ||
1097 | |||
1098 | return ret; | ||
1099 | -} | ||
1100 | +} | ||
1101 | |||
1102 | extern void block_all_signals(int (*notifier)(void *priv), void *priv, | ||
1103 | sigset_t *mask); | ||
1104 | @@ -2290,6 +2302,7 @@ static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) | ||
1105 | static inline void set_tsk_need_resched(struct task_struct *tsk) | ||
1106 | { | ||
1107 | set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); | ||
1108 | + sched_state_will_schedule(tsk); | ||
1109 | } | ||
1110 | |||
1111 | static inline void clear_tsk_need_resched(struct task_struct *tsk) | ||
1112 | diff --git a/include/linux/smp.h b/include/linux/smp.h | ||
1113 | index cfa2d20..f86d407 100644 | ||
1114 | --- a/include/linux/smp.h | ||
1115 | +++ b/include/linux/smp.h | ||
1116 | @@ -80,6 +80,11 @@ int smp_call_function_any(const struct cpumask *mask, | ||
1117 | void (*func)(void *info), void *info, int wait); | ||
1118 | |||
1119 | /* | ||
1120 | + * sends a 'pull timer' event to a remote CPU | ||
1121 | + */ | ||
1122 | +extern void smp_send_pull_timers(int cpu); | ||
1123 | + | ||
1124 | +/* | ||
1125 | * Generic and arch helpers | ||
1126 | */ | ||
1127 | #ifdef CONFIG_USE_GENERIC_SMP_HELPERS | ||
1128 | diff --git a/include/linux/tick.h b/include/linux/tick.h | ||
1129 | index b232ccc..1e29bd5 100644 | ||
1130 | --- a/include/linux/tick.h | ||
1131 | +++ b/include/linux/tick.h | ||
1132 | @@ -74,6 +74,11 @@ extern int tick_is_oneshot_available(void); | ||
1133 | extern struct tick_device *tick_get_device(int cpu); | ||
1134 | |||
1135 | # ifdef CONFIG_HIGH_RES_TIMERS | ||
1136 | +/* LITMUS^RT tick alignment */ | ||
1137 | +#define LINUX_DEFAULT_TICKS 0 | ||
1138 | +#define LITMUS_ALIGNED_TICKS 1 | ||
1139 | +#define LITMUS_STAGGERED_TICKS 2 | ||
1140 | + | ||
1141 | extern int tick_init_highres(void); | ||
1142 | extern int tick_program_event(ktime_t expires, int force); | ||
1143 | extern void tick_setup_sched_timer(void); | ||
1144 | diff --git a/include/litmus/bheap.h b/include/litmus/bheap.h | ||
1145 | new file mode 100644 | ||
1146 | index 0000000..cf4864a | ||
1147 | --- /dev/null | ||
1148 | +++ b/include/litmus/bheap.h | ||
1149 | @@ -0,0 +1,77 @@ | ||
1150 | +/* bheaps.h -- Binomial Heaps | ||
1151 | + * | ||
1152 | + * (c) 2008, 2009 Bjoern Brandenburg | ||
1153 | + */ | ||
1154 | + | ||
1155 | +#ifndef BHEAP_H | ||
1156 | +#define BHEAP_H | ||
1157 | + | ||
1158 | +#define NOT_IN_HEAP UINT_MAX | ||
1159 | + | ||
1160 | +struct bheap_node { | ||
1161 | + struct bheap_node* parent; | ||
1162 | + struct bheap_node* next; | ||
1163 | + struct bheap_node* child; | ||
1164 | + | ||
1165 | + unsigned int degree; | ||
1166 | + void* value; | ||
1167 | + struct bheap_node** ref; | ||
1168 | +}; | ||
1169 | + | ||
1170 | +struct bheap { | ||
1171 | + struct bheap_node* head; | ||
1172 | + /* We cache the minimum of the heap. | ||
1173 | + * This speeds up repeated peek operations. | ||
1174 | + */ | ||
1175 | + struct bheap_node* min; | ||
1176 | +}; | ||
1177 | + | ||
1178 | +typedef int (*bheap_prio_t)(struct bheap_node* a, struct bheap_node* b); | ||
1179 | + | ||
1180 | +void bheap_init(struct bheap* heap); | ||
1181 | +void bheap_node_init(struct bheap_node** ref_to_bheap_node_ptr, void* value); | ||
1182 | + | ||
1183 | +static inline int bheap_node_in_heap(struct bheap_node* h) | ||
1184 | +{ | ||
1185 | + return h->degree != NOT_IN_HEAP; | ||
1186 | +} | ||
1187 | + | ||
1188 | +static inline int bheap_empty(struct bheap* heap) | ||
1189 | +{ | ||
1190 | + return heap->head == NULL && heap->min == NULL; | ||
1191 | +} | ||
1192 | + | ||
1193 | +/* insert (and reinitialize) a node into the heap */ | ||
1194 | +void bheap_insert(bheap_prio_t higher_prio, | ||
1195 | + struct bheap* heap, | ||
1196 | + struct bheap_node* node); | ||
1197 | + | ||
1198 | +/* merge addition into target */ | ||
1199 | +void bheap_union(bheap_prio_t higher_prio, | ||
1200 | + struct bheap* target, | ||
1201 | + struct bheap* addition); | ||
1202 | + | ||
1203 | +struct bheap_node* bheap_peek(bheap_prio_t higher_prio, | ||
1204 | + struct bheap* heap); | ||
1205 | + | ||
1206 | +struct bheap_node* bheap_take(bheap_prio_t higher_prio, | ||
1207 | + struct bheap* heap); | ||
1208 | + | ||
1209 | +void bheap_uncache_min(bheap_prio_t higher_prio, struct bheap* heap); | ||
1210 | +int bheap_decrease(bheap_prio_t higher_prio, struct bheap_node* node); | ||
1211 | + | ||
1212 | +void bheap_delete(bheap_prio_t higher_prio, | ||
1213 | + struct bheap* heap, | ||
1214 | + struct bheap_node* node); | ||
1215 | + | ||
1216 | +/* allocate from memcache */ | ||
1217 | +struct bheap_node* bheap_node_alloc(int gfp_flags); | ||
1218 | +void bheap_node_free(struct bheap_node* hn); | ||
1219 | + | ||
1220 | +/* allocate a heap node for value and insert into the heap */ | ||
1221 | +int bheap_add(bheap_prio_t higher_prio, struct bheap* heap, | ||
1222 | + void* value, int gfp_flags); | ||
1223 | + | ||
1224 | +void* bheap_take_del(bheap_prio_t higher_prio, | ||
1225 | + struct bheap* heap); | ||
1226 | +#endif | ||
1227 | diff --git a/include/litmus/budget.h b/include/litmus/budget.h | ||
1228 | new file mode 100644 | ||
1229 | index 0000000..732530e | ||
1230 | --- /dev/null | ||
1231 | +++ b/include/litmus/budget.h | ||
1232 | @@ -0,0 +1,8 @@ | ||
1233 | +#ifndef _LITMUS_BUDGET_H_ | ||
1234 | +#define _LITMUS_BUDGET_H_ | ||
1235 | + | ||
1236 | +/* Update the per-processor enforcement timer (arm/reproram/cancel) for | ||
1237 | + * the next task. */ | ||
1238 | +void update_enforcement_timer(struct task_struct* t); | ||
1239 | + | ||
1240 | +#endif | ||
1241 | diff --git a/include/litmus/debug_trace.h b/include/litmus/debug_trace.h | ||
1242 | new file mode 100644 | ||
1243 | index 0000000..48d086d | ||
1244 | --- /dev/null | ||
1245 | +++ b/include/litmus/debug_trace.h | ||
1246 | @@ -0,0 +1,37 @@ | ||
1247 | +#ifndef LITMUS_DEBUG_TRACE_H | ||
1248 | +#define LITMUS_DEBUG_TRACE_H | ||
1249 | + | ||
1250 | +#ifdef CONFIG_SCHED_DEBUG_TRACE | ||
1251 | +void sched_trace_log_message(const char* fmt, ...); | ||
1252 | +void dump_trace_buffer(int max); | ||
1253 | +#else | ||
1254 | + | ||
1255 | +#define sched_trace_log_message(fmt, ...) | ||
1256 | + | ||
1257 | +#endif | ||
1258 | + | ||
1259 | +extern atomic_t __log_seq_no; | ||
1260 | + | ||
1261 | +#ifdef CONFIG_SCHED_DEBUG_TRACE_CALLER | ||
1262 | +#define TRACE_PREFIX "%d P%d [%s@%s:%d]: " | ||
1263 | +#define TRACE_ARGS atomic_add_return(1, &__log_seq_no), \ | ||
1264 | + raw_smp_processor_id(), \ | ||
1265 | + __FUNCTION__, __FILE__, __LINE__ | ||
1266 | +#else | ||
1267 | +#define TRACE_PREFIX "%d P%d: " | ||
1268 | +#define TRACE_ARGS atomic_add_return(1, &__log_seq_no), \ | ||
1269 | + raw_smp_processor_id() | ||
1270 | +#endif | ||
1271 | + | ||
1272 | +#define TRACE(fmt, args...) \ | ||
1273 | + sched_trace_log_message(TRACE_PREFIX fmt, \ | ||
1274 | + TRACE_ARGS, ## args) | ||
1275 | + | ||
1276 | +#define TRACE_TASK(t, fmt, args...) \ | ||
1277 | + TRACE("(%s/%d:%d) " fmt, (t)->comm, (t)->pid, \ | ||
1278 | + (t)->rt_param.job_params.job_no, ##args) | ||
1279 | + | ||
1280 | +#define TRACE_CUR(fmt, args...) \ | ||
1281 | + TRACE_TASK(current, fmt, ## args) | ||
1282 | + | ||
1283 | +#endif | ||
1284 | diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h | ||
1285 | new file mode 100644 | ||
1286 | index 0000000..80d4321 | ||
1287 | --- /dev/null | ||
1288 | +++ b/include/litmus/edf_common.h | ||
1289 | @@ -0,0 +1,27 @@ | ||
1290 | +/* | ||
1291 | + * EDF common data structures and utility functions shared by all EDF | ||
1292 | + * based scheduler plugins | ||
1293 | + */ | ||
1294 | + | ||
1295 | +/* CLEANUP: Add comments and make it less messy. | ||
1296 | + * | ||
1297 | + */ | ||
1298 | + | ||
1299 | +#ifndef __UNC_EDF_COMMON_H__ | ||
1300 | +#define __UNC_EDF_COMMON_H__ | ||
1301 | + | ||
1302 | +#include <litmus/rt_domain.h> | ||
1303 | + | ||
1304 | +void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
1305 | + release_jobs_t release); | ||
1306 | + | ||
1307 | +int edf_higher_prio(struct task_struct* first, | ||
1308 | + struct task_struct* second); | ||
1309 | + | ||
1310 | +int edf_ready_order(struct bheap_node* a, struct bheap_node* b); | ||
1311 | + | ||
1312 | +int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); | ||
1313 | + | ||
1314 | +int edf_set_hp_task(struct pi_semaphore *sem); | ||
1315 | +int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu); | ||
1316 | +#endif | ||
1317 | diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h | ||
1318 | new file mode 100644 | ||
1319 | index 0000000..61f1b5b | ||
1320 | --- /dev/null | ||
1321 | +++ b/include/litmus/fdso.h | ||
1322 | @@ -0,0 +1,70 @@ | ||
1323 | +/* fdso.h - file descriptor attached shared objects | ||
1324 | + * | ||
1325 | + * (c) 2007 B. Brandenburg, LITMUS^RT project | ||
1326 | + */ | ||
1327 | + | ||
1328 | +#ifndef _LINUX_FDSO_H_ | ||
1329 | +#define _LINUX_FDSO_H_ | ||
1330 | + | ||
1331 | +#include <linux/list.h> | ||
1332 | +#include <asm/atomic.h> | ||
1333 | + | ||
1334 | +#include <linux/fs.h> | ||
1335 | +#include <linux/slab.h> | ||
1336 | + | ||
1337 | +#define MAX_OBJECT_DESCRIPTORS 32 | ||
1338 | + | ||
1339 | +typedef enum { | ||
1340 | + MIN_OBJ_TYPE = 0, | ||
1341 | + | ||
1342 | + FMLP_SEM = 0, | ||
1343 | + SRP_SEM = 1, | ||
1344 | + | ||
1345 | + MAX_OBJ_TYPE = 1 | ||
1346 | +} obj_type_t; | ||
1347 | + | ||
1348 | +struct inode_obj_id { | ||
1349 | + struct list_head list; | ||
1350 | + atomic_t count; | ||
1351 | + struct inode* inode; | ||
1352 | + | ||
1353 | + obj_type_t type; | ||
1354 | + void* obj; | ||
1355 | + unsigned int id; | ||
1356 | +}; | ||
1357 | + | ||
1358 | + | ||
1359 | +struct od_table_entry { | ||
1360 | + unsigned int used; | ||
1361 | + | ||
1362 | + struct inode_obj_id* obj; | ||
1363 | + void* extra; | ||
1364 | +}; | ||
1365 | + | ||
1366 | +struct fdso_ops { | ||
1367 | + void* (*create) (void); | ||
1368 | + void (*destroy)(void*); | ||
1369 | + int (*open) (struct od_table_entry*, void* __user); | ||
1370 | + int (*close) (struct od_table_entry*); | ||
1371 | +}; | ||
1372 | + | ||
1373 | +/* translate a userspace supplied od into the raw table entry | ||
1374 | + * returns NULL if od is invalid | ||
1375 | + */ | ||
1376 | +struct od_table_entry* __od_lookup(int od); | ||
1377 | + | ||
1378 | +/* translate a userspace supplied od into the associated object | ||
1379 | + * returns NULL if od is invalid | ||
1380 | + */ | ||
1381 | +static inline void* od_lookup(int od, obj_type_t type) | ||
1382 | +{ | ||
1383 | + struct od_table_entry* e = __od_lookup(od); | ||
1384 | + return e && e->obj->type == type ? e->obj->obj : NULL; | ||
1385 | +} | ||
1386 | + | ||
1387 | +#define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) | ||
1388 | +#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) | ||
1389 | +#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) | ||
1390 | + | ||
1391 | + | ||
1392 | +#endif | ||
1393 | diff --git a/include/litmus/feather_buffer.h b/include/litmus/feather_buffer.h | ||
1394 | new file mode 100644 | ||
1395 | index 0000000..6c18277 | ||
1396 | --- /dev/null | ||
1397 | +++ b/include/litmus/feather_buffer.h | ||
1398 | @@ -0,0 +1,94 @@ | ||
1399 | +#ifndef _FEATHER_BUFFER_H_ | ||
1400 | +#define _FEATHER_BUFFER_H_ | ||
1401 | + | ||
1402 | +/* requires UINT_MAX and memcpy */ | ||
1403 | + | ||
1404 | +#define SLOT_FREE 0 | ||
1405 | +#define SLOT_BUSY 1 | ||
1406 | +#define SLOT_READY 2 | ||
1407 | + | ||
1408 | +struct ft_buffer { | ||
1409 | + unsigned int slot_count; | ||
1410 | + unsigned int slot_size; | ||
1411 | + | ||
1412 | + int free_count; | ||
1413 | + unsigned int write_idx; | ||
1414 | + unsigned int read_idx; | ||
1415 | + | ||
1416 | + char* slots; | ||
1417 | + void* buffer_mem; | ||
1418 | + unsigned int failed_writes; | ||
1419 | +}; | ||
1420 | + | ||
1421 | +static inline int init_ft_buffer(struct ft_buffer* buf, | ||
1422 | + unsigned int slot_count, | ||
1423 | + unsigned int slot_size, | ||
1424 | + char* slots, | ||
1425 | + void* buffer_mem) | ||
1426 | +{ | ||
1427 | + int i = 0; | ||
1428 | + if (!slot_count || UINT_MAX % slot_count != slot_count - 1) { | ||
1429 | + /* The slot count must divide UNIT_MAX + 1 so that when it | ||
1430 | + * wraps around the index correctly points to 0. | ||
1431 | + */ | ||
1432 | + return 0; | ||
1433 | + } else { | ||
1434 | + buf->slot_count = slot_count; | ||
1435 | + buf->slot_size = slot_size; | ||
1436 | + buf->slots = slots; | ||
1437 | + buf->buffer_mem = buffer_mem; | ||
1438 | + buf->free_count = slot_count; | ||
1439 | + buf->write_idx = 0; | ||
1440 | + buf->read_idx = 0; | ||
1441 | + buf->failed_writes = 0; | ||
1442 | + for (i = 0; i < slot_count; i++) | ||
1443 | + buf->slots[i] = SLOT_FREE; | ||
1444 | + return 1; | ||
1445 | + } | ||
1446 | +} | ||
1447 | + | ||
1448 | +static inline int ft_buffer_start_write(struct ft_buffer* buf, void **ptr) | ||
1449 | +{ | ||
1450 | + int free = fetch_and_dec(&buf->free_count); | ||
1451 | + unsigned int idx; | ||
1452 | + if (free <= 0) { | ||
1453 | + fetch_and_inc(&buf->free_count); | ||
1454 | + *ptr = 0; | ||
1455 | + fetch_and_inc(&buf->failed_writes); | ||
1456 | + return 0; | ||
1457 | + } else { | ||
1458 | + idx = fetch_and_inc((int*) &buf->write_idx) % buf->slot_count; | ||
1459 | + buf->slots[idx] = SLOT_BUSY; | ||
1460 | + *ptr = ((char*) buf->buffer_mem) + idx * buf->slot_size; | ||
1461 | + return 1; | ||
1462 | + } | ||
1463 | +} | ||
1464 | + | ||
1465 | +static inline void ft_buffer_finish_write(struct ft_buffer* buf, void *ptr) | ||
1466 | +{ | ||
1467 | + unsigned int idx = ((char*) ptr - (char*) buf->buffer_mem) / buf->slot_size; | ||
1468 | + buf->slots[idx] = SLOT_READY; | ||
1469 | +} | ||
1470 | + | ||
1471 | + | ||
1472 | +/* exclusive reader access is assumed */ | ||
1473 | +static inline int ft_buffer_read(struct ft_buffer* buf, void* dest) | ||
1474 | +{ | ||
1475 | + unsigned int idx; | ||
1476 | + if (buf->free_count == buf->slot_count) | ||
1477 | + /* nothing available */ | ||
1478 | + return 0; | ||
1479 | + idx = buf->read_idx % buf->slot_count; | ||
1480 | + if (buf->slots[idx] == SLOT_READY) { | ||
1481 | + memcpy(dest, ((char*) buf->buffer_mem) + idx * buf->slot_size, | ||
1482 | + buf->slot_size); | ||
1483 | + buf->slots[idx] = SLOT_FREE; | ||
1484 | + buf->read_idx++; | ||
1485 | + fetch_and_inc(&buf->free_count); | ||
1486 | + return 1; | ||
1487 | + } else | ||
1488 | + return 0; | ||
1489 | +} | ||
1490 | + | ||
1491 | + | ||
1492 | +#endif | ||
1493 | diff --git a/include/litmus/feather_trace.h b/include/litmus/feather_trace.h | ||
1494 | new file mode 100644 | ||
1495 | index 0000000..028dfb2 | ||
1496 | --- /dev/null | ||
1497 | +++ b/include/litmus/feather_trace.h | ||
1498 | @@ -0,0 +1,65 @@ | ||
1499 | +#ifndef _FEATHER_TRACE_H_ | ||
1500 | +#define _FEATHER_TRACE_H_ | ||
1501 | + | ||
1502 | +#include <asm/atomic.h> | ||
1503 | + | ||
1504 | +int ft_enable_event(unsigned long id); | ||
1505 | +int ft_disable_event(unsigned long id); | ||
1506 | +int ft_is_event_enabled(unsigned long id); | ||
1507 | +int ft_disable_all_events(void); | ||
1508 | + | ||
1509 | +/* atomic_* funcitons are inline anyway */ | ||
1510 | +static inline int fetch_and_inc(int *val) | ||
1511 | +{ | ||
1512 | + return atomic_add_return(1, (atomic_t*) val) - 1; | ||
1513 | +} | ||
1514 | + | ||
1515 | +static inline int fetch_and_dec(int *val) | ||
1516 | +{ | ||
1517 | + return atomic_sub_return(1, (atomic_t*) val) + 1; | ||
1518 | +} | ||
1519 | + | ||
1520 | +/* Don't use rewriting implementation if kernel text pages are read-only. | ||
1521 | + * Ftrace gets around this by using the identity mapping, but that's more | ||
1522 | + * effort that is warrented right now for Feather-Trace. | ||
1523 | + * Eventually, it may make sense to replace Feather-Trace with ftrace. | ||
1524 | + */ | ||
1525 | +#if defined(CONFIG_ARCH_HAS_FEATHER_TRACE) && !defined(CONFIG_DEBUG_RODATA) | ||
1526 | + | ||
1527 | +#include <asm/feather_trace.h> | ||
1528 | + | ||
1529 | +#else /* !__ARCH_HAS_FEATHER_TRACE */ | ||
1530 | + | ||
1531 | +/* provide default implementation */ | ||
1532 | + | ||
1533 | +#include <asm/timex.h> /* for get_cycles() */ | ||
1534 | + | ||
1535 | +static inline unsigned long long ft_timestamp(void) | ||
1536 | +{ | ||
1537 | + return get_cycles(); | ||
1538 | +} | ||
1539 | + | ||
1540 | +#define feather_callback | ||
1541 | + | ||
1542 | +#define MAX_EVENTS 1024 | ||
1543 | + | ||
1544 | +extern int ft_events[MAX_EVENTS]; | ||
1545 | + | ||
1546 | +#define ft_event(id, callback) \ | ||
1547 | + if (ft_events[id]) callback(); | ||
1548 | + | ||
1549 | +#define ft_event0(id, callback) \ | ||
1550 | + if (ft_events[id]) callback(id); | ||
1551 | + | ||
1552 | +#define ft_event1(id, callback, param) \ | ||
1553 | + if (ft_events[id]) callback(id, param); | ||
1554 | + | ||
1555 | +#define ft_event2(id, callback, param, param2) \ | ||
1556 | + if (ft_events[id]) callback(id, param, param2); | ||
1557 | + | ||
1558 | +#define ft_event3(id, callback, p, p2, p3) \ | ||
1559 | + if (ft_events[id]) callback(id, p, p2, p3); | ||
1560 | + | ||
1561 | +#endif /* __ARCH_HAS_FEATHER_TRACE */ | ||
1562 | + | ||
1563 | +#endif | ||
1564 | diff --git a/include/litmus/ftdev.h b/include/litmus/ftdev.h | ||
1565 | new file mode 100644 | ||
1566 | index 0000000..348387e | ||
1567 | --- /dev/null | ||
1568 | +++ b/include/litmus/ftdev.h | ||
1569 | @@ -0,0 +1,52 @@ | ||
1570 | +#ifndef _LITMUS_FTDEV_H_ | ||
1571 | +#define _LITMUS_FTDEV_H_ | ||
1572 | + | ||
1573 | +#include <litmus/feather_trace.h> | ||
1574 | +#include <litmus/feather_buffer.h> | ||
1575 | +#include <linux/mutex.h> | ||
1576 | +#include <linux/cdev.h> | ||
1577 | + | ||
1578 | +#define FTDEV_ENABLE_CMD 0 | ||
1579 | +#define FTDEV_DISABLE_CMD 1 | ||
1580 | + | ||
1581 | +struct ftdev; | ||
1582 | + | ||
1583 | +/* return 0 if buffer can be opened, otherwise -$REASON */ | ||
1584 | +typedef int (*ftdev_can_open_t)(struct ftdev* dev, unsigned int buf_no); | ||
1585 | +/* return 0 on success, otherwise -$REASON */ | ||
1586 | +typedef int (*ftdev_alloc_t)(struct ftdev* dev, unsigned int buf_no); | ||
1587 | +typedef void (*ftdev_free_t)(struct ftdev* dev, unsigned int buf_no); | ||
1588 | + | ||
1589 | + | ||
1590 | +struct ftdev_event; | ||
1591 | + | ||
1592 | +struct ftdev_minor { | ||
1593 | + struct ft_buffer* buf; | ||
1594 | + unsigned int readers; | ||
1595 | + struct mutex lock; | ||
1596 | + /* FIXME: filter for authorized events */ | ||
1597 | + struct ftdev_event* events; | ||
1598 | + struct device* device; | ||
1599 | +}; | ||
1600 | + | ||
1601 | +struct ftdev { | ||
1602 | + dev_t major; | ||
1603 | + struct cdev cdev; | ||
1604 | + struct class* class; | ||
1605 | + const char* name; | ||
1606 | + struct ftdev_minor* minor; | ||
1607 | + unsigned int minor_cnt; | ||
1608 | + ftdev_alloc_t alloc; | ||
1609 | + ftdev_free_t free; | ||
1610 | + ftdev_can_open_t can_open; | ||
1611 | +}; | ||
1612 | + | ||
1613 | +struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size); | ||
1614 | +void free_ft_buffer(struct ft_buffer* buf); | ||
1615 | + | ||
1616 | +int ftdev_init( struct ftdev* ftdev, struct module* owner, | ||
1617 | + const int minor_cnt, const char* name); | ||
1618 | +void ftdev_exit(struct ftdev* ftdev); | ||
1619 | +int register_ftdev(struct ftdev* ftdev); | ||
1620 | + | ||
1621 | +#endif | ||
1622 | diff --git a/include/litmus/jobs.h b/include/litmus/jobs.h | ||
1623 | new file mode 100644 | ||
1624 | index 0000000..9bd361e | ||
1625 | --- /dev/null | ||
1626 | +++ b/include/litmus/jobs.h | ||
1627 | @@ -0,0 +1,9 @@ | ||
1628 | +#ifndef __LITMUS_JOBS_H__ | ||
1629 | +#define __LITMUS_JOBS_H__ | ||
1630 | + | ||
1631 | +void prepare_for_next_period(struct task_struct *t); | ||
1632 | +void release_at(struct task_struct *t, lt_t start); | ||
1633 | +long complete_job(void); | ||
1634 | + | ||
1635 | +#endif | ||
1636 | + | ||
1637 | diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h | ||
1638 | new file mode 100644 | ||
1639 | index 0000000..2464837 | ||
1640 | --- /dev/null | ||
1641 | +++ b/include/litmus/litmus.h | ||
1642 | @@ -0,0 +1,241 @@ | ||
1643 | +/* | ||
1644 | + * Constant definitions related to | ||
1645 | + * scheduling policy. | ||
1646 | + */ | ||
1647 | + | ||
1648 | +#ifndef _LINUX_LITMUS_H_ | ||
1649 | +#define _LINUX_LITMUS_H_ | ||
1650 | + | ||
1651 | +#include <litmus/debug_trace.h> | ||
1652 | + | ||
1653 | +#ifdef CONFIG_RELEASE_MASTER | ||
1654 | +extern atomic_t release_master_cpu; | ||
1655 | +#endif | ||
1656 | + | ||
1657 | +/* in_list - is a given list_head queued on some list? | ||
1658 | + */ | ||
1659 | +static inline int in_list(struct list_head* list) | ||
1660 | +{ | ||
1661 | + return !( /* case 1: deleted */ | ||
1662 | + (list->next == LIST_POISON1 && | ||
1663 | + list->prev == LIST_POISON2) | ||
1664 | + || | ||
1665 | + /* case 2: initialized */ | ||
1666 | + (list->next == list && | ||
1667 | + list->prev == list) | ||
1668 | + ); | ||
1669 | +} | ||
1670 | + | ||
1671 | +#define NO_CPU 0xffffffff | ||
1672 | + | ||
1673 | +void litmus_fork(struct task_struct *tsk); | ||
1674 | +void litmus_exec(void); | ||
1675 | +/* clean up real-time state of a task */ | ||
1676 | +void exit_litmus(struct task_struct *dead_tsk); | ||
1677 | + | ||
1678 | +long litmus_admit_task(struct task_struct *tsk); | ||
1679 | +void litmus_exit_task(struct task_struct *tsk); | ||
1680 | + | ||
1681 | +#define is_realtime(t) ((t)->policy == SCHED_LITMUS) | ||
1682 | +#define rt_transition_pending(t) \ | ||
1683 | + ((t)->rt_param.transition_pending) | ||
1684 | + | ||
1685 | +#define tsk_rt(t) (&(t)->rt_param) | ||
1686 | + | ||
1687 | +/* Realtime utility macros */ | ||
1688 | +#define get_rt_flags(t) (tsk_rt(t)->flags) | ||
1689 | +#define set_rt_flags(t,f) (tsk_rt(t)->flags=(f)) | ||
1690 | +#define get_exec_cost(t) (tsk_rt(t)->task_params.exec_cost) | ||
1691 | +#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) | ||
1692 | +#define get_rt_period(t) (tsk_rt(t)->task_params.period) | ||
1693 | +#define get_rt_phase(t) (tsk_rt(t)->task_params.phase) | ||
1694 | +#define get_partition(t) (tsk_rt(t)->task_params.cpu) | ||
1695 | +#define get_deadline(t) (tsk_rt(t)->job_params.deadline) | ||
1696 | +#define get_release(t) (tsk_rt(t)->job_params.release) | ||
1697 | +#define get_class(t) (tsk_rt(t)->task_params.cls) | ||
1698 | + | ||
1699 | +inline static int budget_exhausted(struct task_struct* t) | ||
1700 | +{ | ||
1701 | + return get_exec_time(t) >= get_exec_cost(t); | ||
1702 | +} | ||
1703 | + | ||
1704 | +inline static lt_t budget_remaining(struct task_struct* t) | ||
1705 | +{ | ||
1706 | + if (!budget_exhausted(t)) | ||
1707 | + return get_exec_cost(t) - get_exec_time(t); | ||
1708 | + else | ||
1709 | + /* avoid overflow */ | ||
1710 | + return 0; | ||
1711 | +} | ||
1712 | + | ||
1713 | +#define budget_enforced(t) (tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT) | ||
1714 | + | ||
1715 | +#define budget_precisely_enforced(t) (tsk_rt(t)->task_params.budget_policy \ | ||
1716 | + == PRECISE_ENFORCEMENT) | ||
1717 | + | ||
1718 | +#define is_hrt(t) \ | ||
1719 | + (tsk_rt(t)->task_params.class == RT_CLASS_HARD) | ||
1720 | +#define is_srt(t) \ | ||
1721 | + (tsk_rt(t)->task_params.class == RT_CLASS_SOFT) | ||
1722 | +#define is_be(t) \ | ||
1723 | + (tsk_rt(t)->task_params.class == RT_CLASS_BEST_EFFORT) | ||
1724 | + | ||
1725 | +/* Our notion of time within LITMUS: kernel monotonic time. */ | ||
1726 | +static inline lt_t litmus_clock(void) | ||
1727 | +{ | ||
1728 | + return ktime_to_ns(ktime_get()); | ||
1729 | +} | ||
1730 | + | ||
1731 | +/* A macro to convert from nanoseconds to ktime_t. */ | ||
1732 | +#define ns_to_ktime(t) ktime_add_ns(ktime_set(0, 0), t) | ||
1733 | + | ||
1734 | +#define get_domain(t) (tsk_rt(t)->domain) | ||
1735 | + | ||
1736 | +/* Honor the flag in the preempt_count variable that is set | ||
1737 | + * when scheduling is in progress. | ||
1738 | + */ | ||
1739 | +#define is_running(t) \ | ||
1740 | + ((t)->state == TASK_RUNNING || \ | ||
1741 | + task_thread_info(t)->preempt_count & PREEMPT_ACTIVE) | ||
1742 | + | ||
1743 | +#define is_blocked(t) \ | ||
1744 | + (!is_running(t)) | ||
1745 | +#define is_released(t, now) \ | ||
1746 | + (lt_before_eq(get_release(t), now)) | ||
1747 | +#define is_tardy(t, now) \ | ||
1748 | + (lt_before_eq(tsk_rt(t)->job_params.deadline, now)) | ||
1749 | + | ||
1750 | +/* real-time comparison macros */ | ||
1751 | +#define earlier_deadline(a, b) (lt_before(\ | ||
1752 | + (a)->rt_param.job_params.deadline,\ | ||
1753 | + (b)->rt_param.job_params.deadline)) | ||
1754 | +#define earlier_release(a, b) (lt_before(\ | ||
1755 | + (a)->rt_param.job_params.release,\ | ||
1756 | + (b)->rt_param.job_params.release)) | ||
1757 | + | ||
1758 | +void preempt_if_preemptable(struct task_struct* t, int on_cpu); | ||
1759 | + | ||
1760 | +#ifdef CONFIG_SRP | ||
1761 | +void srp_ceiling_block(void); | ||
1762 | +#else | ||
1763 | +#define srp_ceiling_block() /* nothing */ | ||
1764 | +#endif | ||
1765 | + | ||
1766 | +#define bheap2task(hn) ((struct task_struct*) hn->value) | ||
1767 | + | ||
1768 | +#ifdef CONFIG_NP_SECTION | ||
1769 | + | ||
1770 | +static inline int is_kernel_np(struct task_struct *t) | ||
1771 | +{ | ||
1772 | + return tsk_rt(t)->kernel_np; | ||
1773 | +} | ||
1774 | + | ||
1775 | +static inline int is_user_np(struct task_struct *t) | ||
1776 | +{ | ||
1777 | + return tsk_rt(t)->ctrl_page ? tsk_rt(t)->ctrl_page->np_flag : 0; | ||
1778 | +} | ||
1779 | + | ||
1780 | +static inline void request_exit_np(struct task_struct *t) | ||
1781 | +{ | ||
1782 | + if (is_user_np(t)) { | ||
1783 | + /* Set the flag that tells user space to call | ||
1784 | + * into the kernel at the end of a critical section. */ | ||
1785 | + if (likely(tsk_rt(t)->ctrl_page)) { | ||
1786 | + TRACE_TASK(t, "setting delayed_preemption flag\n"); | ||
1787 | + tsk_rt(t)->ctrl_page->delayed_preemption = 1; | ||
1788 | + } | ||
1789 | + } | ||
1790 | +} | ||
1791 | + | ||
1792 | +static inline void clear_exit_np(struct task_struct *t) | ||
1793 | +{ | ||
1794 | + if (likely(tsk_rt(t)->ctrl_page)) | ||
1795 | + tsk_rt(t)->ctrl_page->delayed_preemption = 0; | ||
1796 | +} | ||
1797 | + | ||
1798 | +static inline void make_np(struct task_struct *t) | ||
1799 | +{ | ||
1800 | + tsk_rt(t)->kernel_np++; | ||
1801 | +} | ||
1802 | + | ||
1803 | +/* Caller should check if preemption is necessary when | ||
1804 | + * the function return 0. | ||
1805 | + */ | ||
1806 | +static inline int take_np(struct task_struct *t) | ||
1807 | +{ | ||
1808 | + return --tsk_rt(t)->kernel_np; | ||
1809 | +} | ||
1810 | + | ||
1811 | +#else | ||
1812 | + | ||
1813 | +static inline int is_kernel_np(struct task_struct* t) | ||
1814 | +{ | ||
1815 | + return 0; | ||
1816 | +} | ||
1817 | + | ||
1818 | +static inline int is_user_np(struct task_struct* t) | ||
1819 | +{ | ||
1820 | + return 0; | ||
1821 | +} | ||
1822 | + | ||
1823 | +static inline void request_exit_np(struct task_struct *t) | ||
1824 | +{ | ||
1825 | + /* request_exit_np() shouldn't be called if !CONFIG_NP_SECTION */ | ||
1826 | + BUG(); | ||
1827 | +} | ||
1828 | + | ||
1829 | +static inline void clear_exit_np(struct task_struct* t) | ||
1830 | +{ | ||
1831 | +} | ||
1832 | + | ||
1833 | +#endif | ||
1834 | + | ||
1835 | +static inline int is_np(struct task_struct *t) | ||
1836 | +{ | ||
1837 | +#ifdef CONFIG_SCHED_DEBUG_TRACE | ||
1838 | + int kernel, user; | ||
1839 | + kernel = is_kernel_np(t); | ||
1840 | + user = is_user_np(t); | ||
1841 | + if (kernel || user) | ||
1842 | + TRACE_TASK(t, " is non-preemptive: kernel=%d user=%d\n", | ||
1843 | + | ||
1844 | + kernel, user); | ||
1845 | + return kernel || user; | ||
1846 | +#else | ||
1847 | + return unlikely(is_kernel_np(t) || is_user_np(t)); | ||
1848 | +#endif | ||
1849 | +} | ||
1850 | + | ||
1851 | +static inline int is_present(struct task_struct* t) | ||
1852 | +{ | ||
1853 | + return t && tsk_rt(t)->present; | ||
1854 | +} | ||
1855 | + | ||
1856 | + | ||
1857 | +/* make the unit explicit */ | ||
1858 | +typedef unsigned long quanta_t; | ||
1859 | + | ||
1860 | +enum round { | ||
1861 | + FLOOR, | ||
1862 | + CEIL | ||
1863 | +}; | ||
1864 | + | ||
1865 | + | ||
1866 | +/* Tick period is used to convert ns-specified execution | ||
1867 | + * costs and periods into tick-based equivalents. | ||
1868 | + */ | ||
1869 | +extern ktime_t tick_period; | ||
1870 | + | ||
1871 | +static inline quanta_t time2quanta(lt_t time, enum round round) | ||
1872 | +{ | ||
1873 | + s64 quantum_length = ktime_to_ns(tick_period); | ||
1874 | + | ||
1875 | + if (do_div(time, quantum_length) && round == CEIL) | ||
1876 | + time++; | ||
1877 | + return (quanta_t) time; | ||
1878 | +} | ||
1879 | + | ||
1880 | +/* By how much is cpu staggered behind CPU 0? */ | ||
1881 | +u64 cpu_stagger_offset(int cpu); | ||
1882 | + | ||
1883 | +#endif | ||
1884 | diff --git a/include/litmus/litmus_proc.h b/include/litmus/litmus_proc.h | ||
1885 | new file mode 100644 | ||
1886 | index 0000000..fbc0082 | ||
1887 | --- /dev/null | ||
1888 | +++ b/include/litmus/litmus_proc.h | ||
1889 | @@ -0,0 +1,19 @@ | ||
1890 | +#include <litmus/sched_plugin.h> | ||
1891 | +#include <linux/proc_fs.h> | ||
1892 | + | ||
1893 | +int __init init_litmus_proc(void); | ||
1894 | +void exit_litmus_proc(void); | ||
1895 | + | ||
1896 | +/* | ||
1897 | + * On success, returns 0 and sets the pointer to the location of the new | ||
1898 | + * proc dir entry, otherwise returns an error code and sets pde to NULL. | ||
1899 | + */ | ||
1900 | +long make_plugin_proc_dir(struct sched_plugin* plugin, | ||
1901 | + struct proc_dir_entry** pde); | ||
1902 | + | ||
1903 | +/* | ||
1904 | + * Plugins should deallocate all child proc directory entries before | ||
1905 | + * calling this, to avoid memory leaks. | ||
1906 | + */ | ||
1907 | +void remove_plugin_proc_dir(struct sched_plugin* plugin); | ||
1908 | + | ||
1909 | diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h | ||
1910 | new file mode 100644 | ||
1911 | index 0000000..260c6fe | ||
1912 | --- /dev/null | ||
1913 | +++ b/include/litmus/preempt.h | ||
1914 | @@ -0,0 +1,164 @@ | ||
1915 | +#ifndef LITMUS_PREEMPT_H | ||
1916 | +#define LITMUS_PREEMPT_H | ||
1917 | + | ||
1918 | +#include <linux/types.h> | ||
1919 | +#include <linux/cache.h> | ||
1920 | +#include <linux/percpu.h> | ||
1921 | +#include <asm/atomic.h> | ||
1922 | + | ||
1923 | +#include <litmus/debug_trace.h> | ||
1924 | + | ||
1925 | +extern DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state); | ||
1926 | + | ||
1927 | +#ifdef CONFIG_DEBUG_KERNEL | ||
1928 | +const char* sched_state_name(int s); | ||
1929 | +#define TRACE_STATE(fmt, args...) TRACE("SCHED_STATE " fmt, args) | ||
1930 | +#else | ||
1931 | +#define TRACE_STATE(fmt, args...) /* ignore */ | ||
1932 | +#endif | ||
1933 | + | ||
1934 | +#define VERIFY_SCHED_STATE(x) \ | ||
1935 | + do { int __s = get_sched_state(); \ | ||
1936 | + if ((__s & (x)) == 0) \ | ||
1937 | + TRACE_STATE("INVALID s=0x%x (%s) not " \ | ||
1938 | + "in 0x%x (%s) [%s]\n", \ | ||
1939 | + __s, sched_state_name(__s), \ | ||
1940 | + (x), #x, __FUNCTION__); \ | ||
1941 | + } while (0); | ||
1942 | + | ||
1943 | +#define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \ | ||
1944 | + TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \ | ||
1945 | + cpu, (x), sched_state_name(x), \ | ||
1946 | + (y), sched_state_name(y)) | ||
1947 | + | ||
1948 | + | ||
1949 | +typedef enum scheduling_state { | ||
1950 | + TASK_SCHEDULED = (1 << 0), /* The currently scheduled task is the one that | ||
1951 | + * should be scheduled, and the processor does not | ||
1952 | + * plan to invoke schedule(). */ | ||
1953 | + SHOULD_SCHEDULE = (1 << 1), /* A remote processor has determined that the | ||
1954 | + * processor should reschedule, but this has not | ||
1955 | + * been communicated yet (IPI still pending). */ | ||
1956 | + WILL_SCHEDULE = (1 << 2), /* The processor has noticed that it has to | ||
1957 | + * reschedule and will do so shortly. */ | ||
1958 | + TASK_PICKED = (1 << 3), /* The processor is currently executing schedule(), | ||
1959 | + * has selected a new task to schedule, but has not | ||
1960 | + * yet performed the actual context switch. */ | ||
1961 | + PICKED_WRONG_TASK = (1 << 4), /* The processor has not yet performed the context | ||
1962 | + * switch, but a remote processor has already | ||
1963 | + * determined that a higher-priority task became | ||
1964 | + * eligible after the task was picked. */ | ||
1965 | +} sched_state_t; | ||
1966 | + | ||
1967 | +static inline sched_state_t get_sched_state_on(int cpu) | ||
1968 | +{ | ||
1969 | + return atomic_read(&per_cpu(resched_state, cpu)); | ||
1970 | +} | ||
1971 | + | ||
1972 | +static inline sched_state_t get_sched_state(void) | ||
1973 | +{ | ||
1974 | + return atomic_read(&__get_cpu_var(resched_state)); | ||
1975 | +} | ||
1976 | + | ||
1977 | +static inline int is_in_sched_state(int possible_states) | ||
1978 | +{ | ||
1979 | + return get_sched_state() & possible_states; | ||
1980 | +} | ||
1981 | + | ||
1982 | +static inline int cpu_is_in_sched_state(int cpu, int possible_states) | ||
1983 | +{ | ||
1984 | + return get_sched_state_on(cpu) & possible_states; | ||
1985 | +} | ||
1986 | + | ||
1987 | +static inline void set_sched_state(sched_state_t s) | ||
1988 | +{ | ||
1989 | + TRACE_SCHED_STATE_CHANGE(get_sched_state(), s, smp_processor_id()); | ||
1990 | + atomic_set(&__get_cpu_var(resched_state), s); | ||
1991 | +} | ||
1992 | + | ||
1993 | +static inline int sched_state_transition(sched_state_t from, sched_state_t to) | ||
1994 | +{ | ||
1995 | + sched_state_t old_state; | ||
1996 | + | ||
1997 | + old_state = atomic_cmpxchg(&__get_cpu_var(resched_state), from, to); | ||
1998 | + if (old_state == from) { | ||
1999 | + TRACE_SCHED_STATE_CHANGE(from, to, smp_processor_id()); | ||
2000 | + return 1; | ||
2001 | + } else | ||
2002 | + return 0; | ||
2003 | +} | ||
2004 | + | ||
2005 | +static inline int sched_state_transition_on(int cpu, | ||
2006 | + sched_state_t from, | ||
2007 | + sched_state_t to) | ||
2008 | +{ | ||
2009 | + sched_state_t old_state; | ||
2010 | + | ||
2011 | + old_state = atomic_cmpxchg(&per_cpu(resched_state, cpu), from, to); | ||
2012 | + if (old_state == from) { | ||
2013 | + TRACE_SCHED_STATE_CHANGE(from, to, cpu); | ||
2014 | + return 1; | ||
2015 | + } else | ||
2016 | + return 0; | ||
2017 | +} | ||
2018 | + | ||
2019 | +/* Plugins must call this function after they have decided which job to | ||
2020 | + * schedule next. IMPORTANT: this function must be called while still holding | ||
2021 | + * the lock that is used to serialize scheduling decisions. | ||
2022 | + * | ||
2023 | + * (Ideally, we would like to use runqueue locks for this purpose, but that | ||
2024 | + * would lead to deadlocks with the migration code.) | ||
2025 | + */ | ||
2026 | +static inline void sched_state_task_picked(void) | ||
2027 | +{ | ||
2028 | + VERIFY_SCHED_STATE(WILL_SCHEDULE); | ||
2029 | + | ||
2030 | + /* WILL_SCHEDULE has only a local tansition => simple store is ok */ | ||
2031 | + set_sched_state(TASK_PICKED); | ||
2032 | +} | ||
2033 | + | ||
2034 | +static inline void sched_state_entered_schedule(void) | ||
2035 | +{ | ||
2036 | + /* Update state for the case that we entered schedule() not due to | ||
2037 | + * set_tsk_need_resched() */ | ||
2038 | + set_sched_state(WILL_SCHEDULE); | ||
2039 | +} | ||
2040 | + | ||
2041 | +/* Called by schedule() to check if the scheduling decision is still valid | ||
2042 | + * after a context switch. Returns 1 if the CPU needs to reschdule. */ | ||
2043 | +static inline int sched_state_validate_switch(void) | ||
2044 | +{ | ||
2045 | + int left_state_ok = 0; | ||
2046 | + | ||
2047 | + VERIFY_SCHED_STATE(PICKED_WRONG_TASK | TASK_PICKED); | ||
2048 | + | ||
2049 | + if (is_in_sched_state(TASK_PICKED)) { | ||
2050 | + /* Might be good; let's try to transition out of this | ||
2051 | + * state. This must be done atomically since remote processors | ||
2052 | + * may try to change the state, too. */ | ||
2053 | + left_state_ok = sched_state_transition(TASK_PICKED, TASK_SCHEDULED); | ||
2054 | + } | ||
2055 | + | ||
2056 | + if (!left_state_ok) { | ||
2057 | + /* We raced with a higher-priority task arrival => not | ||
2058 | + * valid. The CPU needs to reschedule. */ | ||
2059 | + set_sched_state(WILL_SCHEDULE); | ||
2060 | + return 1; | ||
2061 | + } else | ||
2062 | + return 0; | ||
2063 | +} | ||
2064 | + | ||
2065 | +/* State transition events. See litmus/preempt.c for details. */ | ||
2066 | +void sched_state_will_schedule(struct task_struct* tsk); | ||
2067 | +void sched_state_ipi(void); | ||
2068 | +/* Cause a CPU (remote or local) to reschedule. */ | ||
2069 | +void litmus_reschedule(int cpu); | ||
2070 | +void litmus_reschedule_local(void); | ||
2071 | + | ||
2072 | +#ifdef CONFIG_DEBUG_KERNEL | ||
2073 | +void sched_state_plugin_check(void); | ||
2074 | +#else | ||
2075 | +#define sched_state_plugin_check() /* no check */ | ||
2076 | +#endif | ||
2077 | + | ||
2078 | +#endif | ||
2079 | diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h | ||
2080 | new file mode 100644 | ||
2081 | index 0000000..ac24929 | ||
2082 | --- /dev/null | ||
2083 | +++ b/include/litmus/rt_domain.h | ||
2084 | @@ -0,0 +1,182 @@ | ||
2085 | +/* CLEANUP: Add comments and make it less messy. | ||
2086 | + * | ||
2087 | + */ | ||
2088 | + | ||
2089 | +#ifndef __UNC_RT_DOMAIN_H__ | ||
2090 | +#define __UNC_RT_DOMAIN_H__ | ||
2091 | + | ||
2092 | +#include <litmus/bheap.h> | ||
2093 | + | ||
2094 | +#define RELEASE_QUEUE_SLOTS 127 /* prime */ | ||
2095 | + | ||
2096 | +struct _rt_domain; | ||
2097 | + | ||
2098 | +typedef int (*check_resched_needed_t)(struct _rt_domain *rt); | ||
2099 | +typedef void (*release_jobs_t)(struct _rt_domain *rt, struct bheap* tasks); | ||
2100 | + | ||
2101 | +struct release_queue { | ||
2102 | + /* each slot maintains a list of release heaps sorted | ||
2103 | + * by release time */ | ||
2104 | + struct list_head slot[RELEASE_QUEUE_SLOTS]; | ||
2105 | +}; | ||
2106 | + | ||
2107 | +typedef struct _rt_domain { | ||
2108 | + /* runnable rt tasks are in here */ | ||
2109 | + raw_spinlock_t ready_lock; | ||
2110 | + struct bheap ready_queue; | ||
2111 | + | ||
2112 | + /* real-time tasks waiting for release are in here */ | ||
2113 | + raw_spinlock_t release_lock; | ||
2114 | + struct release_queue release_queue; | ||
2115 | + | ||
2116 | +#ifdef CONFIG_RELEASE_MASTER | ||
2117 | + int release_master; | ||
2118 | +#endif | ||
2119 | + | ||
2120 | + /* for moving tasks to the release queue */ | ||
2121 | + raw_spinlock_t tobe_lock; | ||
2122 | + struct list_head tobe_released; | ||
2123 | + | ||
2124 | + /* how do we check if we need to kick another CPU? */ | ||
2125 | + check_resched_needed_t check_resched; | ||
2126 | + | ||
2127 | + /* how do we release jobs? */ | ||
2128 | + release_jobs_t release_jobs; | ||
2129 | + | ||
2130 | + /* how are tasks ordered in the ready queue? */ | ||
2131 | + bheap_prio_t order; | ||
2132 | +} rt_domain_t; | ||
2133 | + | ||
2134 | +struct release_heap { | ||
2135 | + /* list_head for per-time-slot list */ | ||
2136 | + struct list_head list; | ||
2137 | + lt_t release_time; | ||
2138 | + /* all tasks to be released at release_time */ | ||
2139 | + struct bheap heap; | ||
2140 | + /* used to trigger the release */ | ||
2141 | + struct hrtimer timer; | ||
2142 | + | ||
2143 | +#ifdef CONFIG_RELEASE_MASTER | ||
2144 | + /* used to delegate releases */ | ||
2145 | + struct hrtimer_start_on_info info; | ||
2146 | +#endif | ||
2147 | + /* required for the timer callback */ | ||
2148 | + rt_domain_t* dom; | ||
2149 | +}; | ||
2150 | + | ||
2151 | + | ||
2152 | +static inline struct task_struct* __next_ready(rt_domain_t* rt) | ||
2153 | +{ | ||
2154 | + struct bheap_node *hn = bheap_peek(rt->order, &rt->ready_queue); | ||
2155 | + if (hn) | ||
2156 | + return bheap2task(hn); | ||
2157 | + else | ||
2158 | + return NULL; | ||
2159 | +} | ||
2160 | + | ||
2161 | +void rt_domain_init(rt_domain_t *rt, bheap_prio_t order, | ||
2162 | + check_resched_needed_t check, | ||
2163 | + release_jobs_t relase); | ||
2164 | + | ||
2165 | +void __add_ready(rt_domain_t* rt, struct task_struct *new); | ||
2166 | +void __merge_ready(rt_domain_t* rt, struct bheap *tasks); | ||
2167 | +void __add_release(rt_domain_t* rt, struct task_struct *task); | ||
2168 | + | ||
2169 | +static inline struct task_struct* __take_ready(rt_domain_t* rt) | ||
2170 | +{ | ||
2171 | + struct bheap_node* hn = bheap_take(rt->order, &rt->ready_queue); | ||
2172 | + if (hn) | ||
2173 | + return bheap2task(hn); | ||
2174 | + else | ||
2175 | + return NULL; | ||
2176 | +} | ||
2177 | + | ||
2178 | +static inline struct task_struct* __peek_ready(rt_domain_t* rt) | ||
2179 | +{ | ||
2180 | + struct bheap_node* hn = bheap_peek(rt->order, &rt->ready_queue); | ||
2181 | + if (hn) | ||
2182 | + return bheap2task(hn); | ||
2183 | + else | ||
2184 | + return NULL; | ||
2185 | +} | ||
2186 | + | ||
2187 | +static inline int is_queued(struct task_struct *t) | ||
2188 | +{ | ||
2189 | + BUG_ON(!tsk_rt(t)->heap_node); | ||
2190 | + return bheap_node_in_heap(tsk_rt(t)->heap_node); | ||
2191 | +} | ||
2192 | + | ||
2193 | +static inline void remove(rt_domain_t* rt, struct task_struct *t) | ||
2194 | +{ | ||
2195 | + bheap_delete(rt->order, &rt->ready_queue, tsk_rt(t)->heap_node); | ||
2196 | +} | ||
2197 | + | ||
2198 | +static inline void add_ready(rt_domain_t* rt, struct task_struct *new) | ||
2199 | +{ | ||
2200 | + unsigned long flags; | ||
2201 | + /* first we need the write lock for rt_ready_queue */ | ||
2202 | + raw_spin_lock_irqsave(&rt->ready_lock, flags); | ||
2203 | + __add_ready(rt, new); | ||
2204 | + raw_spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
2205 | +} | ||
2206 | + | ||
2207 | +static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks) | ||
2208 | +{ | ||
2209 | + unsigned long flags; | ||
2210 | + raw_spin_lock_irqsave(&rt->ready_lock, flags); | ||
2211 | + __merge_ready(rt, tasks); | ||
2212 | + raw_spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
2213 | +} | ||
2214 | + | ||
2215 | +static inline struct task_struct* take_ready(rt_domain_t* rt) | ||
2216 | +{ | ||
2217 | + unsigned long flags; | ||
2218 | + struct task_struct* ret; | ||
2219 | + /* first we need the write lock for rt_ready_queue */ | ||
2220 | + raw_spin_lock_irqsave(&rt->ready_lock, flags); | ||
2221 | + ret = __take_ready(rt); | ||
2222 | + raw_spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
2223 | + return ret; | ||
2224 | +} | ||
2225 | + | ||
2226 | + | ||
2227 | +static inline void add_release(rt_domain_t* rt, struct task_struct *task) | ||
2228 | +{ | ||
2229 | + unsigned long flags; | ||
2230 | + raw_spin_lock_irqsave(&rt->tobe_lock, flags); | ||
2231 | + __add_release(rt, task); | ||
2232 | + raw_spin_unlock_irqrestore(&rt->tobe_lock, flags); | ||
2233 | +} | ||
2234 | + | ||
2235 | +#ifdef CONFIG_RELEASE_MASTER | ||
2236 | +void __add_release_on(rt_domain_t* rt, struct task_struct *task, | ||
2237 | + int target_cpu); | ||
2238 | + | ||
2239 | +static inline void add_release_on(rt_domain_t* rt, | ||
2240 | + struct task_struct *task, | ||
2241 | + int target_cpu) | ||
2242 | +{ | ||
2243 | + unsigned long flags; | ||
2244 | + raw_spin_lock_irqsave(&rt->tobe_lock, flags); | ||
2245 | + __add_release_on(rt, task, target_cpu); | ||
2246 | + raw_spin_unlock_irqrestore(&rt->tobe_lock, flags); | ||
2247 | +} | ||
2248 | +#endif | ||
2249 | + | ||
2250 | +static inline int __jobs_pending(rt_domain_t* rt) | ||
2251 | +{ | ||
2252 | + return !bheap_empty(&rt->ready_queue); | ||
2253 | +} | ||
2254 | + | ||
2255 | +static inline int jobs_pending(rt_domain_t* rt) | ||
2256 | +{ | ||
2257 | + unsigned long flags; | ||
2258 | + int ret; | ||
2259 | + /* first we need the write lock for rt_ready_queue */ | ||
2260 | + raw_spin_lock_irqsave(&rt->ready_lock, flags); | ||
2261 | + ret = !bheap_empty(&rt->ready_queue); | ||
2262 | + raw_spin_unlock_irqrestore(&rt->ready_lock, flags); | ||
2263 | + return ret; | ||
2264 | +} | ||
2265 | + | ||
2266 | +#endif | ||
2267 | diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h | ||
2268 | new file mode 100644 | ||
2269 | index 0000000..a7a183f | ||
2270 | --- /dev/null | ||
2271 | +++ b/include/litmus/rt_param.h | ||
2272 | @@ -0,0 +1,196 @@ | ||
2273 | +/* | ||
2274 | + * Definition of the scheduler plugin interface. | ||
2275 | + * | ||
2276 | + */ | ||
2277 | +#ifndef _LINUX_RT_PARAM_H_ | ||
2278 | +#define _LINUX_RT_PARAM_H_ | ||
2279 | + | ||
2280 | +/* Litmus time type. */ | ||
2281 | +typedef unsigned long long lt_t; | ||
2282 | + | ||
2283 | +static inline int lt_after(lt_t a, lt_t b) | ||
2284 | +{ | ||
2285 | + return ((long long) b) - ((long long) a) < 0; | ||
2286 | +} | ||
2287 | +#define lt_before(a, b) lt_after(b, a) | ||
2288 | + | ||
2289 | +static inline int lt_after_eq(lt_t a, lt_t b) | ||
2290 | +{ | ||
2291 | + return ((long long) a) - ((long long) b) >= 0; | ||
2292 | +} | ||
2293 | +#define lt_before_eq(a, b) lt_after_eq(b, a) | ||
2294 | + | ||
2295 | +/* different types of clients */ | ||
2296 | +typedef enum { | ||
2297 | + RT_CLASS_HARD, | ||
2298 | + RT_CLASS_SOFT, | ||
2299 | + RT_CLASS_BEST_EFFORT | ||
2300 | +} task_class_t; | ||
2301 | + | ||
2302 | +typedef enum { | ||
2303 | + NO_ENFORCEMENT, /* job may overrun unhindered */ | ||
2304 | + QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */ | ||
2305 | + PRECISE_ENFORCEMENT /* NOT IMPLEMENTED - enforced with hrtimers */ | ||
2306 | +} budget_policy_t; | ||
2307 | + | ||
2308 | +struct rt_task { | ||
2309 | + lt_t exec_cost; | ||
2310 | + lt_t period; | ||
2311 | + lt_t phase; | ||
2312 | + unsigned int cpu; | ||
2313 | + task_class_t cls; | ||
2314 | + budget_policy_t budget_policy; /* ignored by pfair */ | ||
2315 | +}; | ||
2316 | + | ||
2317 | +/* The definition of the data that is shared between the kernel and real-time | ||
2318 | + * tasks via a shared page (see litmus/ctrldev.c). | ||
2319 | + * | ||
2320 | + * WARNING: User space can write to this, so don't trust | ||
2321 | + * the correctness of the fields! | ||
2322 | + * | ||
2323 | + * This servees two purposes: to enable efficient signaling | ||
2324 | + * of non-preemptive sections (user->kernel) and | ||
2325 | + * delayed preemptions (kernel->user), and to export | ||
2326 | + * some real-time relevant statistics such as preemption and | ||
2327 | + * migration data to user space. We can't use a device to export | ||
2328 | + * statistics because we want to avoid system call overhead when | ||
2329 | + * determining preemption/migration overheads). | ||
2330 | + */ | ||
2331 | +struct control_page { | ||
2332 | + /* Is the task currently in a non-preemptive section? */ | ||
2333 | + int np_flag; | ||
2334 | + /* Should the task call into the kernel when it leaves | ||
2335 | + * its non-preemptive section? */ | ||
2336 | + int delayed_preemption; | ||
2337 | + | ||
2338 | + /* to be extended */ | ||
2339 | +}; | ||
2340 | + | ||
2341 | +/* don't export internal data structures to user space (liblitmus) */ | ||
2342 | +#ifdef __KERNEL__ | ||
2343 | + | ||
2344 | +struct _rt_domain; | ||
2345 | +struct bheap_node; | ||
2346 | +struct release_heap; | ||
2347 | + | ||
2348 | +struct rt_job { | ||
2349 | + /* Time instant the the job was or will be released. */ | ||
2350 | + lt_t release; | ||
2351 | + /* What is the current deadline? */ | ||
2352 | + lt_t deadline; | ||
2353 | + | ||
2354 | + /* How much service has this job received so far? */ | ||
2355 | + lt_t exec_time; | ||
2356 | + | ||
2357 | + /* Which job is this. This is used to let user space | ||
2358 | + * specify which job to wait for, which is important if jobs | ||
2359 | + * overrun. If we just call sys_sleep_next_period() then we | ||
2360 | + * will unintentionally miss jobs after an overrun. | ||
2361 | + * | ||
2362 | + * Increase this sequence number when a job is released. | ||
2363 | + */ | ||
2364 | + unsigned int job_no; | ||
2365 | +}; | ||
2366 | + | ||
2367 | +struct pfair_param; | ||
2368 | + | ||
2369 | +/* RT task parameters for scheduling extensions | ||
2370 | + * These parameters are inherited during clone and therefore must | ||
2371 | + * be explicitly set up before the task set is launched. | ||
2372 | + */ | ||
2373 | +struct rt_param { | ||
2374 | + /* is the task sleeping? */ | ||
2375 | + unsigned int flags:8; | ||
2376 | + | ||
2377 | + /* do we need to check for srp blocking? */ | ||
2378 | + unsigned int srp_non_recurse:1; | ||
2379 | + | ||
2380 | + /* is the task present? (true if it can be scheduled) */ | ||
2381 | + unsigned int present:1; | ||
2382 | + | ||
2383 | + /* user controlled parameters */ | ||
2384 | + struct rt_task task_params; | ||
2385 | + | ||
2386 | + /* timing parameters */ | ||
2387 | + struct rt_job job_params; | ||
2388 | + | ||
2389 | + /* task representing the current "inherited" task | ||
2390 | + * priority, assigned by inherit_priority and | ||
2391 | + * return priority in the scheduler plugins. | ||
2392 | + * could point to self if PI does not result in | ||
2393 | + * an increased task priority. | ||
2394 | + */ | ||
2395 | + struct task_struct* inh_task; | ||
2396 | + | ||
2397 | +#ifdef CONFIG_NP_SECTION | ||
2398 | + /* For the FMLP under PSN-EDF, it is required to make the task | ||
2399 | + * non-preemptive from kernel space. In order not to interfere with | ||
2400 | + * user space, this counter indicates the kernel space np setting. | ||
2401 | + * kernel_np > 0 => task is non-preemptive | ||
2402 | + */ | ||
2403 | + unsigned int kernel_np; | ||
2404 | +#endif | ||
2405 | + | ||
2406 | + /* This field can be used by plugins to store where the task | ||
2407 | + * is currently scheduled. It is the responsibility of the | ||
2408 | + * plugin to avoid race conditions. | ||
2409 | + * | ||
2410 | + * This used by GSN-EDF and PFAIR. | ||
2411 | + */ | ||
2412 | + volatile int scheduled_on; | ||
2413 | + | ||
2414 | + /* Is the stack of the task currently in use? This is updated by | ||
2415 | + * the LITMUS core. | ||
2416 | + * | ||
2417 | + * Be careful to avoid deadlocks! | ||
2418 | + */ | ||
2419 | + volatile int stack_in_use; | ||
2420 | + | ||
2421 | + /* This field can be used by plugins to store where the task | ||
2422 | + * is currently linked. It is the responsibility of the plugin | ||
2423 | + * to avoid race conditions. | ||
2424 | + * | ||
2425 | + * Used by GSN-EDF. | ||
2426 | + */ | ||
2427 | + volatile int linked_on; | ||
2428 | + | ||
2429 | + /* PFAIR/PD^2 state. Allocated on demand. */ | ||
2430 | + struct pfair_param* pfair; | ||
2431 | + | ||
2432 | + /* Fields saved before BE->RT transition. | ||
2433 | + */ | ||
2434 | + int old_policy; | ||
2435 | + int old_prio; | ||
2436 | + | ||
2437 | + /* ready queue for this task */ | ||
2438 | + struct _rt_domain* domain; | ||
2439 | + | ||
2440 | + /* heap element for this task | ||
2441 | + * | ||
2442 | + * Warning: Don't statically allocate this node. The heap | ||
2443 | + * implementation swaps these between tasks, thus after | ||
2444 | + * dequeuing from a heap you may end up with a different node | ||
2445 | + * then the one you had when enqueuing the task. For the same | ||
2446 | + * reason, don't obtain and store references to this node | ||
2447 | + * other than this pointer (which is updated by the heap | ||
2448 | + * implementation). | ||
2449 | + */ | ||
2450 | + struct bheap_node* heap_node; | ||
2451 | + struct release_heap* rel_heap; | ||
2452 | + | ||
2453 | + /* Used by rt_domain to queue task in release list. | ||
2454 | + */ | ||
2455 | + struct list_head list; | ||
2456 | + | ||
2457 | + /* Pointer to the page shared between userspace and kernel. */ | ||
2458 | + struct control_page * ctrl_page; | ||
2459 | +}; | ||
2460 | + | ||
2461 | +/* Possible RT flags */ | ||
2462 | +#define RT_F_RUNNING 0x00000000 | ||
2463 | +#define RT_F_SLEEP 0x00000001 | ||
2464 | +#define RT_F_EXIT_SEM 0x00000008 | ||
2465 | + | ||
2466 | +#endif | ||
2467 | + | ||
2468 | +#endif | ||
2469 | diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h | ||
2470 | new file mode 100644 | ||
2471 | index 0000000..2d856d5 | ||
2472 | --- /dev/null | ||
2473 | +++ b/include/litmus/sched_plugin.h | ||
2474 | @@ -0,0 +1,159 @@ | ||
2475 | +/* | ||
2476 | + * Definition of the scheduler plugin interface. | ||
2477 | + * | ||
2478 | + */ | ||
2479 | +#ifndef _LINUX_SCHED_PLUGIN_H_ | ||
2480 | +#define _LINUX_SCHED_PLUGIN_H_ | ||
2481 | + | ||
2482 | +#include <linux/sched.h> | ||
2483 | + | ||
2484 | +/* struct for semaphore with priority inheritance */ | ||
2485 | +struct pi_semaphore { | ||
2486 | + atomic_t count; | ||
2487 | + int sleepers; | ||
2488 | + wait_queue_head_t wait; | ||
2489 | + struct { | ||
2490 | + /* highest-prio holder/waiter */ | ||
2491 | + struct task_struct *task; | ||
2492 | + struct task_struct* cpu_task[NR_CPUS]; | ||
2493 | + } hp; | ||
2494 | + /* current lock holder */ | ||
2495 | + struct task_struct *holder; | ||
2496 | +}; | ||
2497 | + | ||
2498 | +/************************ setup/tear down ********************/ | ||
2499 | + | ||
2500 | +typedef long (*activate_plugin_t) (void); | ||
2501 | +typedef long (*deactivate_plugin_t) (void); | ||
2502 | + | ||
2503 | + | ||
2504 | + | ||
2505 | +/********************* scheduler invocation ******************/ | ||
2506 | + | ||
2507 | +/* Plugin-specific realtime tick handler */ | ||
2508 | +typedef void (*scheduler_tick_t) (struct task_struct *cur); | ||
2509 | +/* Novell make sched decision function */ | ||
2510 | +typedef struct task_struct* (*schedule_t)(struct task_struct * prev); | ||
2511 | +/* Clean up after the task switch has occured. | ||
2512 | + * This function is called after every (even non-rt) task switch. | ||
2513 | + */ | ||
2514 | +typedef void (*finish_switch_t)(struct task_struct *prev); | ||
2515 | + | ||
2516 | + | ||
2517 | +/********************* task state changes ********************/ | ||
2518 | + | ||
2519 | +/* Called to setup a new real-time task. | ||
2520 | + * Release the first job, enqueue, etc. | ||
2521 | + * Task may already be running. | ||
2522 | + */ | ||
2523 | +typedef void (*task_new_t) (struct task_struct *task, | ||
2524 | + int on_rq, | ||
2525 | + int running); | ||
2526 | + | ||
2527 | +/* Called to re-introduce a task after blocking. | ||
2528 | + * Can potentially be called multiple times. | ||
2529 | + */ | ||
2530 | +typedef void (*task_wake_up_t) (struct task_struct *task); | ||
2531 | +/* called to notify the plugin of a blocking real-time task | ||
2532 | + * it will only be called for real-time tasks and before schedule is called */ | ||
2533 | +typedef void (*task_block_t) (struct task_struct *task); | ||
2534 | +/* Called when a real-time task exits or changes to a different scheduling | ||
2535 | + * class. | ||
2536 | + * Free any allocated resources | ||
2537 | + */ | ||
2538 | +typedef void (*task_exit_t) (struct task_struct *); | ||
2539 | + | ||
2540 | +/* Called when the new_owner is released from the wait queue | ||
2541 | + * it should now inherit the priority from sem, _before_ it gets readded | ||
2542 | + * to any queue | ||
2543 | + */ | ||
2544 | +typedef long (*inherit_priority_t) (struct pi_semaphore *sem, | ||
2545 | + struct task_struct *new_owner); | ||
2546 | + | ||
2547 | +/* Called when the current task releases a semahpore where it might have | ||
2548 | + * inherited a piority from | ||
2549 | + */ | ||
2550 | +typedef long (*return_priority_t) (struct pi_semaphore *sem); | ||
2551 | + | ||
2552 | +/* Called when a task tries to acquire a semaphore and fails. Check if its | ||
2553 | + * priority is higher than that of the current holder. | ||
2554 | + */ | ||
2555 | +typedef long (*pi_block_t) (struct pi_semaphore *sem, struct task_struct *t); | ||
2556 | + | ||
2557 | + | ||
2558 | + | ||
2559 | + | ||
2560 | +/********************* sys call backends ********************/ | ||
2561 | +/* This function causes the caller to sleep until the next release */ | ||
2562 | +typedef long (*complete_job_t) (void); | ||
2563 | + | ||
2564 | +typedef long (*admit_task_t)(struct task_struct* tsk); | ||
2565 | + | ||
2566 | +typedef void (*release_at_t)(struct task_struct *t, lt_t start); | ||
2567 | + | ||
2568 | +struct sched_plugin { | ||
2569 | + struct list_head list; | ||
2570 | + /* basic info */ | ||
2571 | + char *plugin_name; | ||
2572 | + | ||
2573 | + /* setup */ | ||
2574 | + activate_plugin_t activate_plugin; | ||
2575 | + deactivate_plugin_t deactivate_plugin; | ||
2576 | + | ||
2577 | +#ifdef CONFIG_SRP | ||
2578 | + unsigned int srp_active; | ||
2579 | +#endif | ||
2580 | + | ||
2581 | + /* scheduler invocation */ | ||
2582 | + scheduler_tick_t tick; | ||
2583 | + schedule_t schedule; | ||
2584 | + finish_switch_t finish_switch; | ||
2585 | + | ||
2586 | + /* syscall backend */ | ||
2587 | + complete_job_t complete_job; | ||
2588 | + release_at_t release_at; | ||
2589 | + | ||
2590 | + /* task state changes */ | ||
2591 | + admit_task_t admit_task; | ||
2592 | + | ||
2593 | + task_new_t task_new; | ||
2594 | + task_wake_up_t task_wake_up; | ||
2595 | + task_block_t task_block; | ||
2596 | + task_exit_t task_exit; | ||
2597 | + | ||
2598 | +#ifdef CONFIG_FMLP | ||
2599 | + /* priority inheritance */ | ||
2600 | + unsigned int fmlp_active; | ||
2601 | + inherit_priority_t inherit_priority; | ||
2602 | + return_priority_t return_priority; | ||
2603 | + pi_block_t pi_block; | ||
2604 | +#endif | ||
2605 | +} __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | ||
2606 | + | ||
2607 | + | ||
2608 | +extern struct sched_plugin *litmus; | ||
2609 | + | ||
2610 | +int register_sched_plugin(struct sched_plugin* plugin); | ||
2611 | +struct sched_plugin* find_sched_plugin(const char* name); | ||
2612 | +int print_sched_plugins(char* buf, int max); | ||
2613 | + | ||
2614 | +static inline int srp_active(void) | ||
2615 | +{ | ||
2616 | +#ifdef CONFIG_SRP | ||
2617 | + return litmus->srp_active; | ||
2618 | +#else | ||
2619 | + return 0; | ||
2620 | +#endif | ||
2621 | +} | ||
2622 | +static inline int fmlp_active(void) | ||
2623 | +{ | ||
2624 | +#ifdef CONFIG_FMLP | ||
2625 | + return litmus->fmlp_active; | ||
2626 | +#else | ||
2627 | + return 0; | ||
2628 | +#endif | ||
2629 | +} | ||
2630 | + | ||
2631 | +extern struct sched_plugin linux_sched_plugin; | ||
2632 | + | ||
2633 | +#endif | ||
2634 | diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h | ||
2635 | new file mode 100644 | ||
2636 | index 0000000..a5f7373 | ||
2637 | --- /dev/null | ||
2638 | +++ b/include/litmus/sched_trace.h | ||
2639 | @@ -0,0 +1,183 @@ | ||
2640 | +/* | ||
2641 | + * sched_trace.h -- record scheduler events to a byte stream for offline analysis. | ||
2642 | + */ | ||
2643 | +#ifndef _LINUX_SCHED_TRACE_H_ | ||
2644 | +#define _LINUX_SCHED_TRACE_H_ | ||
2645 | + | ||
2646 | +/* all times in nanoseconds */ | ||
2647 | + | ||
2648 | +struct st_trace_header { | ||
2649 | + u8 type; /* Of what type is this record? */ | ||
2650 | + u8 cpu; /* On which CPU was it recorded? */ | ||
2651 | + u16 pid; /* PID of the task. */ | ||
2652 | + u32 job; /* The job sequence number. */ | ||
2653 | +}; | ||
2654 | + | ||
2655 | +#define ST_NAME_LEN 16 | ||
2656 | +struct st_name_data { | ||
2657 | + char cmd[ST_NAME_LEN];/* The name of the executable of this process. */ | ||
2658 | +}; | ||
2659 | + | ||
2660 | +struct st_param_data { /* regular params */ | ||
2661 | + u32 wcet; | ||
2662 | + u32 period; | ||
2663 | + u32 phase; | ||
2664 | + u8 partition; | ||
2665 | + u8 __unused[3]; | ||
2666 | +}; | ||
2667 | + | ||
2668 | +struct st_release_data { /* A job is was/is going to be released. */ | ||
2669 | + u64 release; /* What's the release time? */ | ||
2670 | + u64 deadline; /* By when must it finish? */ | ||
2671 | +}; | ||
2672 | + | ||
2673 | +struct st_assigned_data { /* A job was asigned to a CPU. */ | ||
2674 | + u64 when; | ||
2675 | + u8 target; /* Where should it execute? */ | ||
2676 | + u8 __unused[3]; | ||
2677 | +}; | ||
2678 | + | ||
2679 | +struct st_switch_to_data { /* A process was switched to on a given CPU. */ | ||
2680 | + u64 when; /* When did this occur? */ | ||
2681 | + u32 exec_time; /* Time the current job has executed. */ | ||
2682 | + | ||
2683 | +}; | ||
2684 | + | ||
2685 | +struct st_switch_away_data { /* A process was switched away from on a given CPU. */ | ||
2686 | + u64 when; | ||
2687 | + u64 exec_time; | ||
2688 | +}; | ||
2689 | + | ||
2690 | +struct st_completion_data { /* A job completed. */ | ||
2691 | + u64 when; | ||
2692 | + u8 forced:1; /* Set to 1 if job overran and kernel advanced to the | ||
2693 | + * next task automatically; set to 0 otherwise. | ||
2694 | + */ | ||
2695 | + u8 __uflags:7; | ||
2696 | + u8 __unused[3]; | ||
2697 | +}; | ||
2698 | + | ||
2699 | +struct st_block_data { /* A task blocks. */ | ||
2700 | + u64 when; | ||
2701 | + u64 __unused; | ||
2702 | +}; | ||
2703 | + | ||
2704 | +struct st_resume_data { /* A task resumes. */ | ||
2705 | + u64 when; | ||
2706 | + u64 __unused; | ||
2707 | +}; | ||
2708 | + | ||
2709 | +struct st_sys_release_data { | ||
2710 | + u64 when; | ||
2711 | + u64 release; | ||
2712 | +}; | ||
2713 | + | ||
2714 | +#define DATA(x) struct st_ ## x ## _data x; | ||
2715 | + | ||
2716 | +typedef enum { | ||
2717 | + ST_NAME = 1, /* Start at one, so that we can spot | ||
2718 | + * uninitialized records. */ | ||
2719 | + ST_PARAM, | ||
2720 | + ST_RELEASE, | ||
2721 | + ST_ASSIGNED, | ||
2722 | + ST_SWITCH_TO, | ||
2723 | + ST_SWITCH_AWAY, | ||
2724 | + ST_COMPLETION, | ||
2725 | + ST_BLOCK, | ||
2726 | + ST_RESUME, | ||
2727 | + ST_SYS_RELEASE, | ||
2728 | +} st_event_record_type_t; | ||
2729 | + | ||
2730 | +struct st_event_record { | ||
2731 | + struct st_trace_header hdr; | ||
2732 | + union { | ||
2733 | + u64 raw[2]; | ||
2734 | + | ||
2735 | + DATA(name); | ||
2736 | + DATA(param); | ||
2737 | + DATA(release); | ||
2738 | + DATA(assigned); | ||
2739 | + DATA(switch_to); | ||
2740 | + DATA(switch_away); | ||
2741 | + DATA(completion); | ||
2742 | + DATA(block); | ||
2743 | + DATA(resume); | ||
2744 | + DATA(sys_release); | ||
2745 | + | ||
2746 | + } data; | ||
2747 | +}; | ||
2748 | + | ||
2749 | +#undef DATA | ||
2750 | + | ||
2751 | +#ifdef __KERNEL__ | ||
2752 | + | ||
2753 | +#include <linux/sched.h> | ||
2754 | +#include <litmus/feather_trace.h> | ||
2755 | + | ||
2756 | +#ifdef CONFIG_SCHED_TASK_TRACE | ||
2757 | + | ||
2758 | +#define SCHED_TRACE(id, callback, task) \ | ||
2759 | + ft_event1(id, callback, task) | ||
2760 | +#define SCHED_TRACE2(id, callback, task, xtra) \ | ||
2761 | + ft_event2(id, callback, task, xtra) | ||
2762 | + | ||
2763 | +/* provide prototypes; needed on sparc64 */ | ||
2764 | +#ifndef NO_TASK_TRACE_DECLS | ||
2765 | +feather_callback void do_sched_trace_task_name(unsigned long id, | ||
2766 | + struct task_struct* task); | ||
2767 | +feather_callback void do_sched_trace_task_param(unsigned long id, | ||
2768 | + struct task_struct* task); | ||
2769 | +feather_callback void do_sched_trace_task_release(unsigned long id, | ||
2770 | + struct task_struct* task); | ||
2771 | +feather_callback void do_sched_trace_task_switch_to(unsigned long id, | ||
2772 | + struct task_struct* task); | ||
2773 | +feather_callback void do_sched_trace_task_switch_away(unsigned long id, | ||
2774 | + struct task_struct* task); | ||
2775 | +feather_callback void do_sched_trace_task_completion(unsigned long id, | ||
2776 | + struct task_struct* task, | ||
2777 | + unsigned long forced); | ||
2778 | +feather_callback void do_sched_trace_task_block(unsigned long id, | ||
2779 | + struct task_struct* task); | ||
2780 | +feather_callback void do_sched_trace_task_resume(unsigned long id, | ||
2781 | + struct task_struct* task); | ||
2782 | +feather_callback void do_sched_trace_sys_release(unsigned long id, | ||
2783 | + lt_t* start); | ||
2784 | +#endif | ||
2785 | + | ||
2786 | +#else | ||
2787 | + | ||
2788 | +#define SCHED_TRACE(id, callback, task) /* no tracing */ | ||
2789 | +#define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */ | ||
2790 | + | ||
2791 | +#endif | ||
2792 | + | ||
2793 | + | ||
2794 | +#define SCHED_TRACE_BASE_ID 500 | ||
2795 | + | ||
2796 | + | ||
2797 | +#define sched_trace_task_name(t) \ | ||
2798 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 1, do_sched_trace_task_name, t) | ||
2799 | +#define sched_trace_task_param(t) \ | ||
2800 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 2, do_sched_trace_task_param, t) | ||
2801 | +#define sched_trace_task_release(t) \ | ||
2802 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 3, do_sched_trace_task_release, t) | ||
2803 | +#define sched_trace_task_switch_to(t) \ | ||
2804 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 4, do_sched_trace_task_switch_to, t) | ||
2805 | +#define sched_trace_task_switch_away(t) \ | ||
2806 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 5, do_sched_trace_task_switch_away, t) | ||
2807 | +#define sched_trace_task_completion(t, forced) \ | ||
2808 | + SCHED_TRACE2(SCHED_TRACE_BASE_ID + 6, do_sched_trace_task_completion, t, \ | ||
2809 | + (unsigned long) forced) | ||
2810 | +#define sched_trace_task_block(t) \ | ||
2811 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 7, do_sched_trace_task_block, t) | ||
2812 | +#define sched_trace_task_resume(t) \ | ||
2813 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 8, do_sched_trace_task_resume, t) | ||
2814 | +/* when is a pointer, it does not need an explicit cast to unsigned long */ | ||
2815 | +#define sched_trace_sys_release(when) \ | ||
2816 | + SCHED_TRACE(SCHED_TRACE_BASE_ID + 9, do_sched_trace_sys_release, when) | ||
2817 | + | ||
2818 | +#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ | ||
2819 | + | ||
2820 | +#endif /* __KERNEL__ */ | ||
2821 | + | ||
2822 | +#endif | ||
2823 | diff --git a/include/litmus/trace.h b/include/litmus/trace.h | ||
2824 | new file mode 100644 | ||
2825 | index 0000000..b32c711 | ||
2826 | --- /dev/null | ||
2827 | +++ b/include/litmus/trace.h | ||
2828 | @@ -0,0 +1,113 @@ | ||
2829 | +#ifndef _SYS_TRACE_H_ | ||
2830 | +#define _SYS_TRACE_H_ | ||
2831 | + | ||
2832 | +#ifdef CONFIG_SCHED_OVERHEAD_TRACE | ||
2833 | + | ||
2834 | +#include <litmus/feather_trace.h> | ||
2835 | +#include <litmus/feather_buffer.h> | ||
2836 | + | ||
2837 | + | ||
2838 | +/*********************** TIMESTAMPS ************************/ | ||
2839 | + | ||
2840 | +enum task_type_marker { | ||
2841 | + TSK_BE, | ||
2842 | + TSK_RT, | ||
2843 | + TSK_UNKNOWN | ||
2844 | +}; | ||
2845 | + | ||
2846 | +struct timestamp { | ||
2847 | + uint64_t timestamp; | ||
2848 | + uint32_t seq_no; | ||
2849 | + uint8_t cpu; | ||
2850 | + uint8_t event; | ||
2851 | + uint8_t task_type; | ||
2852 | +}; | ||
2853 | + | ||
2854 | +/* tracing callbacks */ | ||
2855 | +feather_callback void save_timestamp(unsigned long event); | ||
2856 | +feather_callback void save_timestamp_def(unsigned long event, unsigned long type); | ||
2857 | +feather_callback void save_timestamp_task(unsigned long event, unsigned long t_ptr); | ||
2858 | +feather_callback void save_timestamp_cpu(unsigned long event, unsigned long cpu); | ||
2859 | + | ||
2860 | + | ||
2861 | +#define TIMESTAMP(id) ft_event0(id, save_timestamp) | ||
2862 | + | ||
2863 | +#define DTIMESTAMP(id, def) ft_event1(id, save_timestamp_def, (unsigned long) def) | ||
2864 | + | ||
2865 | +#define TTIMESTAMP(id, task) \ | ||
2866 | + ft_event1(id, save_timestamp_task, (unsigned long) task) | ||
2867 | + | ||
2868 | +#define CTIMESTAMP(id, cpu) \ | ||
2869 | + ft_event1(id, save_timestamp_cpu, (unsigned long) cpu) | ||
2870 | + | ||
2871 | +#else /* !CONFIG_SCHED_OVERHEAD_TRACE */ | ||
2872 | + | ||
2873 | +#define TIMESTAMP(id) /* no tracing */ | ||
2874 | + | ||
2875 | +#define DTIMESTAMP(id, def) /* no tracing */ | ||
2876 | + | ||
2877 | +#define TTIMESTAMP(id, task) /* no tracing */ | ||
2878 | + | ||
2879 | +#define CTIMESTAMP(id, cpu) /* no tracing */ | ||
2880 | + | ||
2881 | +#endif | ||
2882 | + | ||
2883 | + | ||
2884 | +/* Convention for timestamps | ||
2885 | + * ========================= | ||
2886 | + * | ||
2887 | + * In order to process the trace files with a common tool, we use the following | ||
2888 | + * convention to measure execution times: The end time id of a code segment is | ||
2889 | + * always the next number after the start time event id. | ||
2890 | + */ | ||
2891 | + | ||
2892 | +#define TS_SCHED_START DTIMESTAMP(100, TSK_UNKNOWN) /* we only | ||
2893 | + * care | ||
2894 | + * about | ||
2895 | + * next */ | ||
2896 | +#define TS_SCHED_END(t) TTIMESTAMP(101, t) | ||
2897 | +#define TS_SCHED2_START(t) TTIMESTAMP(102, t) | ||
2898 | +#define TS_SCHED2_END(t) TTIMESTAMP(103, t) | ||
2899 | + | ||
2900 | +#define TS_CXS_START(t) TTIMESTAMP(104, t) | ||
2901 | +#define TS_CXS_END(t) TTIMESTAMP(105, t) | ||
2902 | + | ||
2903 | +#define TS_RELEASE_START DTIMESTAMP(106, TSK_RT) | ||
2904 | +#define TS_RELEASE_END DTIMESTAMP(107, TSK_RT) | ||
2905 | + | ||
2906 | +#define TS_TICK_START(t) TTIMESTAMP(110, t) | ||
2907 | +#define TS_TICK_END(t) TTIMESTAMP(111, t) | ||
2908 | + | ||
2909 | + | ||
2910 | +#define TS_PLUGIN_SCHED_START /* TIMESTAMP(120) */ /* currently unused */ | ||
2911 | +#define TS_PLUGIN_SCHED_END /* TIMESTAMP(121) */ | ||
2912 | + | ||
2913 | +#define TS_PLUGIN_TICK_START /* TIMESTAMP(130) */ | ||
2914 | +#define TS_PLUGIN_TICK_END /* TIMESTAMP(131) */ | ||
2915 | + | ||
2916 | +#define TS_ENTER_NP_START TIMESTAMP(140) | ||
2917 | +#define TS_ENTER_NP_END TIMESTAMP(141) | ||
2918 | + | ||
2919 | +#define TS_EXIT_NP_START TIMESTAMP(150) | ||
2920 | +#define TS_EXIT_NP_END TIMESTAMP(151) | ||
2921 | + | ||
2922 | +#define TS_SRP_UP_START TIMESTAMP(160) | ||
2923 | +#define TS_SRP_UP_END TIMESTAMP(161) | ||
2924 | +#define TS_SRP_DOWN_START TIMESTAMP(162) | ||
2925 | +#define TS_SRP_DOWN_END TIMESTAMP(163) | ||
2926 | + | ||
2927 | +#define TS_PI_UP_START TIMESTAMP(170) | ||
2928 | +#define TS_PI_UP_END TIMESTAMP(171) | ||
2929 | +#define TS_PI_DOWN_START TIMESTAMP(172) | ||
2930 | +#define TS_PI_DOWN_END TIMESTAMP(173) | ||
2931 | + | ||
2932 | +#define TS_FIFO_UP_START TIMESTAMP(180) | ||
2933 | +#define TS_FIFO_UP_END TIMESTAMP(181) | ||
2934 | +#define TS_FIFO_DOWN_START TIMESTAMP(182) | ||
2935 | +#define TS_FIFO_DOWN_END TIMESTAMP(183) | ||
2936 | + | ||
2937 | +#define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c) | ||
2938 | +#define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN) | ||
2939 | + | ||
2940 | + | ||
2941 | +#endif /* !_SYS_TRACE_H_ */ | ||
2942 | diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h | ||
2943 | new file mode 100644 | ||
2944 | index 0000000..dbddc65 | ||
2945 | --- /dev/null | ||
2946 | +++ b/include/litmus/unistd_32.h | ||
2947 | @@ -0,0 +1,23 @@ | ||
2948 | +/* | ||
2949 | + * included from arch/x86/include/asm/unistd_32.h | ||
2950 | + * | ||
2951 | + * LITMUS^RT syscalls with "relative" numbers | ||
2952 | + */ | ||
2953 | +#define __LSC(x) (__NR_LITMUS + x) | ||
2954 | + | ||
2955 | +#define __NR_set_rt_task_param __LSC(0) | ||
2956 | +#define __NR_get_rt_task_param __LSC(1) | ||
2957 | +#define __NR_complete_job __LSC(2) | ||
2958 | +#define __NR_od_open __LSC(3) | ||
2959 | +#define __NR_od_close __LSC(4) | ||
2960 | +#define __NR_fmlp_down __LSC(5) | ||
2961 | +#define __NR_fmlp_up __LSC(6) | ||
2962 | +#define __NR_srp_down __LSC(7) | ||
2963 | +#define __NR_srp_up __LSC(8) | ||
2964 | +#define __NR_query_job_no __LSC(9) | ||
2965 | +#define __NR_wait_for_job_release __LSC(10) | ||
2966 | +#define __NR_wait_for_ts_release __LSC(11) | ||
2967 | +#define __NR_release_ts __LSC(12) | ||
2968 | +#define __NR_null_call __LSC(13) | ||
2969 | + | ||
2970 | +#define NR_litmus_syscalls 14 | ||
2971 | diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h | ||
2972 | new file mode 100644 | ||
2973 | index 0000000..f0618e7 | ||
2974 | --- /dev/null | ||
2975 | +++ b/include/litmus/unistd_64.h | ||
2976 | @@ -0,0 +1,37 @@ | ||
2977 | +/* | ||
2978 | + * included from arch/x86/include/asm/unistd_64.h | ||
2979 | + * | ||
2980 | + * LITMUS^RT syscalls with "relative" numbers | ||
2981 | + */ | ||
2982 | +#define __LSC(x) (__NR_LITMUS + x) | ||
2983 | + | ||
2984 | +#define __NR_set_rt_task_param __LSC(0) | ||
2985 | +__SYSCALL(__NR_set_rt_task_param, sys_set_rt_task_param) | ||
2986 | +#define __NR_get_rt_task_param __LSC(1) | ||
2987 | +__SYSCALL(__NR_get_rt_task_param, sys_get_rt_task_param) | ||
2988 | +#define __NR_complete_job __LSC(2) | ||
2989 | +__SYSCALL(__NR_complete_job, sys_complete_job) | ||
2990 | +#define __NR_od_open __LSC(3) | ||
2991 | +__SYSCALL(__NR_od_open, sys_od_open) | ||
2992 | +#define __NR_od_close __LSC(4) | ||
2993 | +__SYSCALL(__NR_od_close, sys_od_close) | ||
2994 | +#define __NR_fmlp_down __LSC(5) | ||
2995 | +__SYSCALL(__NR_fmlp_down, sys_fmlp_down) | ||
2996 | +#define __NR_fmlp_up __LSC(6) | ||
2997 | +__SYSCALL(__NR_fmlp_up, sys_fmlp_up) | ||
2998 | +#define __NR_srp_down __LSC(7) | ||
2999 | +__SYSCALL(__NR_srp_down, sys_srp_down) | ||
3000 | +#define __NR_srp_up __LSC(8) | ||
3001 | +__SYSCALL(__NR_srp_up, sys_srp_up) | ||
3002 | +#define __NR_query_job_no __LSC(9) | ||
3003 | +__SYSCALL(__NR_query_job_no, sys_query_job_no) | ||
3004 | +#define __NR_wait_for_job_release __LSC(10) | ||
3005 | +__SYSCALL(__NR_wait_for_job_release, sys_wait_for_job_release) | ||
3006 | +#define __NR_wait_for_ts_release __LSC(11) | ||
3007 | +__SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release) | ||
3008 | +#define __NR_release_ts __LSC(12) | ||
3009 | +__SYSCALL(__NR_release_ts, sys_release_ts) | ||
3010 | +#define __NR_null_call __LSC(13) | ||
3011 | +__SYSCALL(__NR_null_call, sys_null_call) | ||
3012 | + | ||
3013 | +#define NR_litmus_syscalls 14 | ||
3014 | diff --git a/kernel/exit.c b/kernel/exit.c | ||
3015 | index 0312022..b9d3bc6 100644 | ||
3016 | --- a/kernel/exit.c | ||
3017 | +++ b/kernel/exit.c | ||
3018 | @@ -56,6 +56,8 @@ | ||
3019 | #include <asm/pgtable.h> | ||
3020 | #include <asm/mmu_context.h> | ||
3021 | |||
3022 | +extern void exit_od_table(struct task_struct *t); | ||
3023 | + | ||
3024 | static void exit_mm(struct task_struct * tsk); | ||
3025 | |||
3026 | static void __unhash_process(struct task_struct *p, bool group_dead) | ||
3027 | @@ -960,6 +962,8 @@ NORET_TYPE void do_exit(long code) | ||
3028 | if (unlikely(tsk->audit_context)) | ||
3029 | audit_free(tsk); | ||
3030 | |||
3031 | + exit_od_table(tsk); | ||
3032 | + | ||
3033 | tsk->exit_code = code; | ||
3034 | taskstats_exit(tsk, group_dead); | ||
3035 | |||
3036 | diff --git a/kernel/fork.c b/kernel/fork.c | ||
3037 | index c445f8c..ab7f29d 100644 | ||
3038 | --- a/kernel/fork.c | ||
3039 | +++ b/kernel/fork.c | ||
3040 | @@ -75,6 +75,9 @@ | ||
3041 | |||
3042 | #include <trace/events/sched.h> | ||
3043 | |||
3044 | +#include <litmus/litmus.h> | ||
3045 | +#include <litmus/sched_plugin.h> | ||
3046 | + | ||
3047 | /* | ||
3048 | * Protected counters by write_lock_irq(&tasklist_lock) | ||
3049 | */ | ||
3050 | @@ -183,6 +186,7 @@ void __put_task_struct(struct task_struct *tsk) | ||
3051 | WARN_ON(atomic_read(&tsk->usage)); | ||
3052 | WARN_ON(tsk == current); | ||
3053 | |||
3054 | + exit_litmus(tsk); | ||
3055 | exit_creds(tsk); | ||
3056 | delayacct_tsk_free(tsk); | ||
3057 | put_signal_struct(tsk->signal); | ||
3058 | @@ -266,6 +270,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | ||
3059 | |||
3060 | tsk->stack = ti; | ||
3061 | |||
3062 | + /* Don't let the new task be a real-time task. */ | ||
3063 | + litmus_fork(tsk); | ||
3064 | + | ||
3065 | err = prop_local_init_single(&tsk->dirties); | ||
3066 | if (err) | ||
3067 | goto out; | ||
3068 | diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c | ||
3069 | index 72206cf..cb49883 100644 | ||
3070 | --- a/kernel/hrtimer.c | ||
3071 | +++ b/kernel/hrtimer.c | ||
3072 | @@ -46,6 +46,8 @@ | ||
3073 | #include <linux/sched.h> | ||
3074 | #include <linux/timer.h> | ||
3075 | |||
3076 | +#include <litmus/litmus.h> | ||
3077 | + | ||
3078 | #include <asm/uaccess.h> | ||
3079 | |||
3080 | #include <trace/events/timer.h> | ||
3081 | @@ -1042,6 +1044,98 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) | ||
3082 | } | ||
3083 | EXPORT_SYMBOL_GPL(hrtimer_start); | ||
3084 | |||
3085 | +#ifdef CONFIG_ARCH_HAS_SEND_PULL_TIMERS | ||
3086 | + | ||
3087 | +/** | ||
3088 | + * hrtimer_start_on_info_init - Initialize hrtimer_start_on_info | ||
3089 | + */ | ||
3090 | +void hrtimer_start_on_info_init(struct hrtimer_start_on_info *info) | ||
3091 | +{ | ||
3092 | + memset(info, 0, sizeof(struct hrtimer_start_on_info)); | ||
3093 | + atomic_set(&info->state, HRTIMER_START_ON_INACTIVE); | ||
3094 | +} | ||
3095 | + | ||
3096 | +/** | ||
3097 | + * hrtimer_pull - PULL_TIMERS_VECTOR callback on remote cpu | ||
3098 | + */ | ||
3099 | +void hrtimer_pull(void) | ||
3100 | +{ | ||
3101 | + struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); | ||
3102 | + struct hrtimer_start_on_info *info; | ||
3103 | + struct list_head *pos, *safe, list; | ||
3104 | + | ||
3105 | + raw_spin_lock(&base->lock); | ||
3106 | + list_replace_init(&base->to_pull, &list); | ||
3107 | + raw_spin_unlock(&base->lock); | ||
3108 | + | ||
3109 | + list_for_each_safe(pos, safe, &list) { | ||
3110 | + info = list_entry(pos, struct hrtimer_start_on_info, list); | ||
3111 | + TRACE("pulled timer 0x%x\n", info->timer); | ||
3112 | + list_del(pos); | ||
3113 | + hrtimer_start(info->timer, info->time, info->mode); | ||
3114 | + } | ||
3115 | +} | ||
3116 | + | ||
3117 | +/** | ||
3118 | + * hrtimer_start_on - trigger timer arming on remote cpu | ||
3119 | + * @cpu: remote cpu | ||
3120 | + * @info: save timer information for enqueuing on remote cpu | ||
3121 | + * @timer: timer to be pulled | ||
3122 | + * @time: expire time | ||
3123 | + * @mode: timer mode | ||
3124 | + */ | ||
3125 | +int hrtimer_start_on(int cpu, struct hrtimer_start_on_info* info, | ||
3126 | + struct hrtimer *timer, ktime_t time, | ||
3127 | + const enum hrtimer_mode mode) | ||
3128 | +{ | ||
3129 | + unsigned long flags; | ||
3130 | + struct hrtimer_cpu_base* base; | ||
3131 | + int in_use = 0, was_empty; | ||
3132 | + | ||
3133 | + /* serialize access to info through the timer base */ | ||
3134 | + lock_hrtimer_base(timer, &flags); | ||
3135 | + | ||
3136 | + in_use = (atomic_read(&info->state) != HRTIMER_START_ON_INACTIVE); | ||
3137 | + if (!in_use) { | ||
3138 | + INIT_LIST_HEAD(&info->list); | ||
3139 | + info->timer = timer; | ||
3140 | + info->time = time; | ||
3141 | + info->mode = mode; | ||
3142 | + /* mark as in use */ | ||
3143 | + atomic_set(&info->state, HRTIMER_START_ON_QUEUED); | ||
3144 | + } | ||
3145 | + | ||
3146 | + unlock_hrtimer_base(timer, &flags); | ||
3147 | + | ||
3148 | + if (!in_use) { | ||
3149 | + /* initiate pull */ | ||
3150 | + preempt_disable(); | ||
3151 | + if (cpu == smp_processor_id()) { | ||
3152 | + /* start timer locally; we may get called | ||
3153 | + * with rq->lock held, do not wake up anything | ||
3154 | + */ | ||
3155 | + TRACE("hrtimer_start_on: starting on local CPU\n"); | ||
3156 | + __hrtimer_start_range_ns(info->timer, info->time, | ||
3157 | + 0, info->mode, 0); | ||
3158 | + } else { | ||
3159 | + TRACE("hrtimer_start_on: pulling to remote CPU\n"); | ||
3160 | + base = &per_cpu(hrtimer_bases, cpu); | ||
3161 | + raw_spin_lock_irqsave(&base->lock, flags); | ||
3162 | + was_empty = list_empty(&base->to_pull); | ||
3163 | + list_add(&info->list, &base->to_pull); | ||
3164 | + raw_spin_unlock_irqrestore(&base->lock, flags); | ||
3165 | + if (was_empty) | ||
3166 | + /* only send IPI if other no else | ||
3167 | + * has done so already | ||
3168 | + */ | ||
3169 | + smp_send_pull_timers(cpu); | ||
3170 | + } | ||
3171 | + preempt_enable(); | ||
3172 | + } | ||
3173 | + return in_use; | ||
3174 | +} | ||
3175 | + | ||
3176 | +#endif | ||
3177 | |||
3178 | /** | ||
3179 | * hrtimer_try_to_cancel - try to deactivate a timer | ||
3180 | @@ -1634,6 +1728,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu) | ||
3181 | cpu_base->clock_base[i].cpu_base = cpu_base; | ||
3182 | |||
3183 | hrtimer_init_hres(cpu_base); | ||
3184 | + INIT_LIST_HEAD(&cpu_base->to_pull); | ||
3185 | } | ||
3186 | |||
3187 | #ifdef CONFIG_HOTPLUG_CPU | ||
3188 | diff --git a/kernel/printk.c b/kernel/printk.c | ||
3189 | index 8fe465a..9dc8ea1 100644 | ||
3190 | --- a/kernel/printk.c | ||
3191 | +++ b/kernel/printk.c | ||
3192 | @@ -74,6 +74,13 @@ int console_printk[4] = { | ||
3193 | }; | ||
3194 | |||
3195 | /* | ||
3196 | + * divert printk() messages when there is a LITMUS^RT debug listener | ||
3197 | + */ | ||
3198 | +#include <litmus/litmus.h> | ||
3199 | +int trace_override = 0; | ||
3200 | +int trace_recurse = 0; | ||
3201 | + | ||
3202 | +/* | ||
3203 | * Low level drivers may need that to know if they can schedule in | ||
3204 | * their unblank() callback or not. So let's export it. | ||
3205 | */ | ||
3206 | @@ -735,6 +742,9 @@ asmlinkage int vprintk(const char *fmt, va_list args) | ||
3207 | /* Emit the output into the temporary buffer */ | ||
3208 | printed_len += vscnprintf(printk_buf + printed_len, | ||
3209 | sizeof(printk_buf) - printed_len, fmt, args); | ||
3210 | + /* if LITMUS^RT tracer is active divert printk() msgs */ | ||
3211 | + if (trace_override && !trace_recurse) | ||
3212 | + TRACE("%s", printk_buf); | ||
3213 | |||
3214 | |||
3215 | p = printk_buf; | ||
3216 | @@ -804,7 +814,7 @@ asmlinkage int vprintk(const char *fmt, va_list args) | ||
3217 | * Try to acquire and then immediately release the | ||
3218 | * console semaphore. The release will do all the | ||
3219 | * actual magic (print out buffers, wake up klogd, | ||
3220 | - * etc). | ||
3221 | + * etc). | ||
3222 | * | ||
3223 | * The acquire_console_semaphore_for_printk() function | ||
3224 | * will release 'logbuf_lock' regardless of whether it | ||
3225 | @@ -1067,7 +1077,7 @@ int printk_needs_cpu(int cpu) | ||
3226 | |||
3227 | void wake_up_klogd(void) | ||
3228 | { | ||
3229 | - if (waitqueue_active(&log_wait)) | ||
3230 | + if (!trace_override && waitqueue_active(&log_wait)) | ||
3231 | __raw_get_cpu_var(printk_pending) = 1; | ||
3232 | } | ||
3233 | |||
3234 | diff --git a/kernel/sched.c b/kernel/sched.c | ||
3235 | index dc85ceb..1b13c8e 100644 | ||
3236 | --- a/kernel/sched.c | ||
3237 | +++ b/kernel/sched.c | ||
3238 | @@ -79,6 +79,11 @@ | ||
3239 | #include "sched_cpupri.h" | ||
3240 | #include "workqueue_sched.h" | ||
3241 | |||
3242 | +#include <litmus/sched_trace.h> | ||
3243 | +#include <litmus/trace.h> | ||
3244 | + | ||
3245 | +static void litmus_tick(struct rq*, struct task_struct*); | ||
3246 | + | ||
3247 | #define CREATE_TRACE_POINTS | ||
3248 | #include <trace/events/sched.h> | ||
3249 | |||
3250 | @@ -405,6 +410,12 @@ struct rt_rq { | ||
3251 | #endif | ||
3252 | }; | ||
3253 | |||
3254 | +/* Litmus related fields in a runqueue */ | ||
3255 | +struct litmus_rq { | ||
3256 | + unsigned long nr_running; | ||
3257 | + struct task_struct *prev; | ||
3258 | +}; | ||
3259 | + | ||
3260 | #ifdef CONFIG_SMP | ||
3261 | |||
3262 | /* | ||
3263 | @@ -471,6 +482,7 @@ struct rq { | ||
3264 | |||
3265 | struct cfs_rq cfs; | ||
3266 | struct rt_rq rt; | ||
3267 | + struct litmus_rq litmus; | ||
3268 | |||
3269 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
3270 | /* list of leaf cfs_rq on this cpu: */ | ||
3271 | @@ -566,8 +578,14 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | ||
3272 | * A queue event has occurred, and we're going to schedule. In | ||
3273 | * this case, we can save a useless back to back clock update. | ||
3274 | */ | ||
3275 | + /* LITMUS^RT: turning off the clock update is buggy in Linux 2.6.36; | ||
3276 | + * the scheduler can "forget" to renable the runqueue clock in some | ||
3277 | + * cases. LITMUS^RT amplifies the effects of this problem. Hence, we | ||
3278 | + * turn it off to avoid stalling clocks. */ | ||
3279 | + /* | ||
3280 | if (test_tsk_need_resched(p)) | ||
3281 | rq->skip_clock_update = 1; | ||
3282 | + */ | ||
3283 | } | ||
3284 | |||
3285 | static inline int cpu_of(struct rq *rq) | ||
3286 | @@ -1042,6 +1060,7 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer) | ||
3287 | raw_spin_lock(&rq->lock); | ||
3288 | update_rq_clock(rq); | ||
3289 | rq->curr->sched_class->task_tick(rq, rq->curr, 1); | ||
3290 | + litmus_tick(rq, rq->curr); | ||
3291 | raw_spin_unlock(&rq->lock); | ||
3292 | |||
3293 | return HRTIMER_NORESTART; | ||
3294 | @@ -1840,7 +1859,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) | ||
3295 | |||
3296 | static const struct sched_class rt_sched_class; | ||
3297 | |||
3298 | -#define sched_class_highest (&rt_sched_class) | ||
3299 | +#define sched_class_highest (&litmus_sched_class) | ||
3300 | #define for_each_class(class) \ | ||
3301 | for (class = sched_class_highest; class; class = class->next) | ||
3302 | |||
3303 | @@ -1920,6 +1939,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int flags) | ||
3304 | #include "sched_idletask.c" | ||
3305 | #include "sched_fair.c" | ||
3306 | #include "sched_rt.c" | ||
3307 | +#include "../litmus/sched_litmus.c" | ||
3308 | #ifdef CONFIG_SCHED_DEBUG | ||
3309 | # include "sched_debug.c" | ||
3310 | #endif | ||
3311 | @@ -2352,6 +2372,9 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | ||
3312 | unsigned long en_flags = ENQUEUE_WAKEUP; | ||
3313 | struct rq *rq; | ||
3314 | |||
3315 | + if (is_realtime(p)) | ||
3316 | + TRACE_TASK(p, "try_to_wake_up() state:%d\n", p->state); | ||
3317 | + | ||
3318 | this_cpu = get_cpu(); | ||
3319 | |||
3320 | smp_wmb(); | ||
3321 | @@ -2366,7 +2389,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, | ||
3322 | orig_cpu = cpu; | ||
3323 | |||
3324 | #ifdef CONFIG_SMP | ||
3325 | - if (unlikely(task_running(rq, p))) | ||
3326 | + if (unlikely(task_running(rq, p)) || is_realtime(p)) | ||
3327 | goto out_activate; | ||
3328 | |||
3329 | /* | ||
3330 | @@ -2428,6 +2451,8 @@ out_activate: | ||
3331 | out_running: | ||
3332 | ttwu_post_activation(p, rq, wake_flags, success); | ||
3333 | out: | ||
3334 | + if (is_realtime(p)) | ||
3335 | + TRACE_TASK(p, "try_to_wake_up() done state:%d\n", p->state); | ||
3336 | task_rq_unlock(rq, &flags); | ||
3337 | put_cpu(); | ||
3338 | |||
3339 | @@ -2748,6 +2773,8 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | ||
3340 | */ | ||
3341 | prev_state = prev->state; | ||
3342 | finish_arch_switch(prev); | ||
3343 | + litmus->finish_switch(prev); | ||
3344 | + prev->rt_param.stack_in_use = NO_CPU; | ||
3345 | #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW | ||
3346 | local_irq_disable(); | ||
3347 | #endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ | ||
3348 | @@ -2777,6 +2804,15 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *prev) | ||
3349 | { | ||
3350 | if (prev->sched_class->pre_schedule) | ||
3351 | prev->sched_class->pre_schedule(rq, prev); | ||
3352 | + | ||
3353 | + /* LITMUS^RT not very clean hack: we need to save the prev task | ||
3354 | + * as our scheduling decision rely on it (as we drop the rq lock | ||
3355 | + * something in prev can change...); there is no way to escape | ||
3356 | + * this ack apart from modifying pick_nex_task(rq, _prev_) or | ||
3357 | + * falling back on the previous solution of decoupling | ||
3358 | + * scheduling decisions | ||
3359 | + */ | ||
3360 | + rq->litmus.prev = prev; | ||
3361 | } | ||
3362 | |||
3363 | /* rq->lock is NOT held, but preemption is disabled */ | ||
3364 | @@ -3578,18 +3614,26 @@ void scheduler_tick(void) | ||
3365 | |||
3366 | sched_clock_tick(); | ||
3367 | |||
3368 | + TS_TICK_START(current); | ||
3369 | + | ||
3370 | raw_spin_lock(&rq->lock); | ||
3371 | update_rq_clock(rq); | ||
3372 | update_cpu_load_active(rq); | ||
3373 | curr->sched_class->task_tick(rq, curr, 0); | ||
3374 | + | ||
3375 | + /* litmus_tick may force current to resched */ | ||
3376 | + litmus_tick(rq, curr); | ||
3377 | + | ||
3378 | raw_spin_unlock(&rq->lock); | ||
3379 | |||
3380 | perf_event_task_tick(curr); | ||
3381 | |||
3382 | #ifdef CONFIG_SMP | ||
3383 | rq->idle_at_tick = idle_cpu(cpu); | ||
3384 | - trigger_load_balance(rq, cpu); | ||
3385 | + if (!is_realtime(current)) | ||
3386 | + trigger_load_balance(rq, cpu); | ||
3387 | #endif | ||
3388 | + TS_TICK_END(current); | ||
3389 | } | ||
3390 | |||
3391 | notrace unsigned long get_parent_ip(unsigned long addr) | ||
3392 | @@ -3716,12 +3760,20 @@ pick_next_task(struct rq *rq) | ||
3393 | /* | ||
3394 | * Optimization: we know that if all tasks are in | ||
3395 | * the fair class we can call that function directly: | ||
3396 | - */ | ||
3397 | - if (likely(rq->nr_running == rq->cfs.nr_running)) { | ||
3398 | + | ||
3399 | + * NOT IN LITMUS^RT! | ||
3400 | + | ||
3401 | + * This breaks many assumptions in the plugins. | ||
3402 | + * Do not uncomment without thinking long and hard | ||
3403 | + * about how this affects global plugins such as GSN-EDF. | ||
3404 | + | ||
3405 | + if (rq->nr_running == rq->cfs.nr_running) { | ||
3406 | + TRACE("taking shortcut in pick_next_task()\n"); | ||
3407 | p = fair_sched_class.pick_next_task(rq); | ||
3408 | if (likely(p)) | ||
3409 | return p; | ||
3410 | } | ||
3411 | + */ | ||
3412 | |||
3413 | class = sched_class_highest; | ||
3414 | for ( ; ; ) { | ||
3415 | @@ -3748,6 +3800,7 @@ asmlinkage void __sched schedule(void) | ||
3416 | |||
3417 | need_resched: | ||
3418 | preempt_disable(); | ||
3419 | + sched_state_entered_schedule(); | ||
3420 | cpu = smp_processor_id(); | ||
3421 | rq = cpu_rq(cpu); | ||
3422 | rcu_note_context_switch(cpu); | ||
3423 | @@ -3755,6 +3808,8 @@ need_resched: | ||
3424 | |||
3425 | release_kernel_lock(prev); | ||
3426 | need_resched_nonpreemptible: | ||
3427 | + TS_SCHED_START; | ||
3428 | + sched_trace_task_switch_away(prev); | ||
3429 | |||
3430 | schedule_debug(prev); | ||
3431 | |||
3432 | @@ -3803,7 +3858,10 @@ need_resched_nonpreemptible: | ||
3433 | rq->curr = next; | ||
3434 | ++*switch_count; | ||
3435 | |||
3436 | + TS_SCHED_END(next); | ||
3437 | + TS_CXS_START(next); | ||
3438 | context_switch(rq, prev, next); /* unlocks the rq */ | ||
3439 | + TS_CXS_END(current); | ||
3440 | /* | ||
3441 | * The context switch have flipped the stack from under us | ||
3442 | * and restored the local variables which were saved when | ||
3443 | @@ -3812,17 +3870,24 @@ need_resched_nonpreemptible: | ||
3444 | */ | ||
3445 | cpu = smp_processor_id(); | ||
3446 | rq = cpu_rq(cpu); | ||
3447 | - } else | ||
3448 | + } else { | ||
3449 | + TS_SCHED_END(prev); | ||
3450 | raw_spin_unlock_irq(&rq->lock); | ||
3451 | + } | ||
3452 | + | ||
3453 | + sched_trace_task_switch_to(current); | ||
3454 | |||
3455 | post_schedule(rq); | ||
3456 | |||
3457 | - if (unlikely(reacquire_kernel_lock(prev))) | ||
3458 | + if (sched_state_validate_switch() || unlikely(reacquire_kernel_lock(prev))) | ||
3459 | goto need_resched_nonpreemptible; | ||
3460 | |||
3461 | preempt_enable_no_resched(); | ||
3462 | if (need_resched()) | ||
3463 | goto need_resched; | ||
3464 | + | ||
3465 | + if (srp_active()) | ||
3466 | + srp_ceiling_block(); | ||
3467 | } | ||
3468 | EXPORT_SYMBOL(schedule); | ||
3469 | |||
3470 | @@ -4108,6 +4173,17 @@ void complete_all(struct completion *x) | ||
3471 | } | ||
3472 | EXPORT_SYMBOL(complete_all); | ||
3473 | |||
3474 | +void complete_n(struct completion *x, int n) | ||
3475 | +{ | ||
3476 | + unsigned long flags; | ||
3477 | + | ||
3478 | + spin_lock_irqsave(&x->wait.lock, flags); | ||
3479 | + x->done += n; | ||
3480 | + __wake_up_common(&x->wait, TASK_NORMAL, n, 0, NULL); | ||
3481 | + spin_unlock_irqrestore(&x->wait.lock, flags); | ||
3482 | +} | ||
3483 | +EXPORT_SYMBOL(complete_n); | ||
3484 | + | ||
3485 | static inline long __sched | ||
3486 | do_wait_for_common(struct completion *x, long timeout, int state) | ||
3487 | { | ||
3488 | @@ -4550,7 +4626,9 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) | ||
3489 | p->normal_prio = normal_prio(p); | ||
3490 | /* we are holding p->pi_lock already */ | ||
3491 | p->prio = rt_mutex_getprio(p); | ||
3492 | - if (rt_prio(p->prio)) | ||
3493 | + if (p->policy == SCHED_LITMUS) | ||
3494 | + p->sched_class = &litmus_sched_class; | ||
3495 | + else if (rt_prio(p->prio)) | ||
3496 | p->sched_class = &rt_sched_class; | ||
3497 | else | ||
3498 | p->sched_class = &fair_sched_class; | ||
3499 | @@ -4595,7 +4673,7 @@ recheck: | ||
3500 | |||
3501 | if (policy != SCHED_FIFO && policy != SCHED_RR && | ||
3502 | policy != SCHED_NORMAL && policy != SCHED_BATCH && | ||
3503 | - policy != SCHED_IDLE) | ||
3504 | + policy != SCHED_IDLE && policy != SCHED_LITMUS) | ||
3505 | return -EINVAL; | ||
3506 | } | ||
3507 | |||
3508 | @@ -4610,6 +4688,8 @@ recheck: | ||
3509 | return -EINVAL; | ||
3510 | if (rt_policy(policy) != (param->sched_priority != 0)) | ||
3511 | return -EINVAL; | ||
3512 | + if (policy == SCHED_LITMUS && policy == p->policy) | ||
3513 | + return -EINVAL; | ||
3514 | |||
3515 | /* | ||
3516 | * Allow unprivileged RT tasks to decrease priority: | ||
3517 | @@ -4650,6 +4730,12 @@ recheck: | ||
3518 | return retval; | ||
3519 | } | ||
3520 | |||
3521 | + if (policy == SCHED_LITMUS) { | ||
3522 | + retval = litmus_admit_task(p); | ||
3523 | + if (retval) | ||
3524 | + return retval; | ||
3525 | + } | ||
3526 | + | ||
3527 | /* | ||
3528 | * make sure no PI-waiters arrive (or leave) while we are | ||
3529 | * changing the priority of the task: | ||
3530 | @@ -4692,10 +4778,19 @@ recheck: | ||
3531 | |||
3532 | p->sched_reset_on_fork = reset_on_fork; | ||
3533 | |||
3534 | + if (p->policy == SCHED_LITMUS) | ||
3535 | + litmus_exit_task(p); | ||
3536 | + | ||
3537 | oldprio = p->prio; | ||
3538 | prev_class = p->sched_class; | ||
3539 | __setscheduler(rq, p, policy, param->sched_priority); | ||
3540 | |||
3541 | + if (policy == SCHED_LITMUS) { | ||
3542 | + p->rt_param.stack_in_use = running ? rq->cpu : NO_CPU; | ||
3543 | + p->rt_param.present = running; | ||
3544 | + litmus->task_new(p, on_rq, running); | ||
3545 | + } | ||
3546 | + | ||
3547 | if (running) | ||
3548 | p->sched_class->set_curr_task(rq); | ||
3549 | if (on_rq) { | ||
3550 | @@ -4865,10 +4960,11 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) | ||
3551 | rcu_read_lock(); | ||
3552 | |||
3553 | p = find_process_by_pid(pid); | ||
3554 | - if (!p) { | ||
3555 | + /* Don't set affinity if task not found and for LITMUS tasks */ | ||
3556 | + if (!p || is_realtime(p)) { | ||
3557 | rcu_read_unlock(); | ||
3558 | put_online_cpus(); | ||
3559 | - return -ESRCH; | ||
3560 | + return p ? -EPERM : -ESRCH; | ||
3561 | } | ||
3562 | |||
3563 | /* Prevent p going away */ | ||
3564 | diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c | ||
3565 | index db3f674..e0e8d5c 100644 | ||
3566 | --- a/kernel/sched_fair.c | ||
3567 | +++ b/kernel/sched_fair.c | ||
3568 | @@ -1654,7 +1654,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | ||
3569 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | ||
3570 | int scale = cfs_rq->nr_running >= sched_nr_latency; | ||
3571 | |||
3572 | - if (unlikely(rt_prio(p->prio))) | ||
3573 | + if (unlikely(rt_prio(p->prio)) || p->policy == SCHED_LITMUS) | ||
3574 | goto preempt; | ||
3575 | |||
3576 | if (unlikely(p->sched_class != &fair_sched_class)) | ||
3577 | diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c | ||
3578 | index d10c80e..e40e7fe 100644 | ||
3579 | --- a/kernel/sched_rt.c | ||
3580 | +++ b/kernel/sched_rt.c | ||
3581 | @@ -1013,7 +1013,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | ||
3582 | */ | ||
3583 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) | ||
3584 | { | ||
3585 | - if (p->prio < rq->curr->prio) { | ||
3586 | + if (p->prio < rq->curr->prio || p->policy == SCHED_LITMUS) { | ||
3587 | resched_task(rq->curr); | ||
3588 | return; | ||
3589 | } | ||
3590 | diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c | ||
3591 | index 3e216e0..bb2d8b7 100644 | ||
3592 | --- a/kernel/time/tick-sched.c | ||
3593 | +++ b/kernel/time/tick-sched.c | ||
3594 | @@ -768,12 +768,53 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | ||
3595 | } | ||
3596 | |||
3597 | /** | ||
3598 | + * tick_set_quanta_type - get the quanta type as a boot option | ||
3599 | + * Default is standard setup with ticks staggered over first | ||
3600 | + * half of tick period. | ||
3601 | + */ | ||
3602 | +int quanta_type = LINUX_DEFAULT_TICKS; | ||
3603 | +static int __init tick_set_quanta_type(char *str) | ||
3604 | +{ | ||
3605 | + if (strcmp("aligned", str) == 0) { | ||
3606 | + quanta_type = LITMUS_ALIGNED_TICKS; | ||
3607 | + printk(KERN_INFO "LITMUS^RT: setting aligned quanta\n"); | ||
3608 | + } | ||
3609 | + else if (strcmp("staggered", str) == 0) { | ||
3610 | + quanta_type = LITMUS_STAGGERED_TICKS; | ||
3611 | + printk(KERN_INFO "LITMUS^RT: setting staggered quanta\n"); | ||
3612 | + } | ||
3613 | + return 1; | ||
3614 | +} | ||
3615 | +__setup("quanta=", tick_set_quanta_type); | ||
3616 | + | ||
3617 | +u64 cpu_stagger_offset(int cpu) | ||
3618 | +{ | ||
3619 | + u64 offset = 0; | ||
3620 | + switch (quanta_type) { | ||
3621 | + case LITMUS_ALIGNED_TICKS: | ||
3622 | + offset = 0; | ||
3623 | + break; | ||
3624 | + case LITMUS_STAGGERED_TICKS: | ||
3625 | + offset = ktime_to_ns(tick_period); | ||
3626 | + do_div(offset, num_possible_cpus()); | ||
3627 | + offset *= cpu; | ||
3628 | + break; | ||
3629 | + default: | ||
3630 | + offset = ktime_to_ns(tick_period) >> 1; | ||
3631 | + do_div(offset, num_possible_cpus()); | ||
3632 | + offset *= cpu; | ||
3633 | + } | ||
3634 | + return offset; | ||
3635 | +} | ||
3636 | + | ||
3637 | +/** | ||
3638 | * tick_setup_sched_timer - setup the tick emulation timer | ||
3639 | */ | ||
3640 | void tick_setup_sched_timer(void) | ||
3641 | { | ||
3642 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | ||
3643 | ktime_t now = ktime_get(); | ||
3644 | + u64 offset; | ||
3645 | |||
3646 | /* | ||
3647 | * Emulate tick processing via per-CPU hrtimers: | ||
3648 | @@ -784,6 +825,12 @@ void tick_setup_sched_timer(void) | ||
3649 | /* Get the next period (per cpu) */ | ||
3650 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); | ||
3651 | |||
3652 | + /* Offset must be set correctly to achieve desired quanta type. */ | ||
3653 | + offset = cpu_stagger_offset(smp_processor_id()); | ||
3654 | + | ||
3655 | + /* Add the correct offset to expiration time */ | ||
3656 | + hrtimer_add_expires_ns(&ts->sched_timer, offset); | ||
3657 | + | ||
3658 | for (;;) { | ||
3659 | hrtimer_forward(&ts->sched_timer, now, tick_period); | ||
3660 | hrtimer_start_expires(&ts->sched_timer, | ||
3661 | diff --git a/litmus/Kconfig b/litmus/Kconfig | ||
3662 | new file mode 100644 | ||
3663 | index 0000000..a2f2678 | ||
3664 | --- /dev/null | ||
3665 | +++ b/litmus/Kconfig | ||
3666 | @@ -0,0 +1,194 @@ | ||
3667 | +menu "LITMUS^RT" | ||
3668 | + | ||
3669 | +menu "Scheduling" | ||
3670 | + | ||
3671 | +config PLUGIN_CEDF | ||
3672 | + bool "Clustered-EDF" | ||
3673 | + depends on X86 && SYSFS | ||
3674 | + default y | ||
3675 | + help | ||
3676 | + Include the Clustered EDF (C-EDF) plugin in the kernel. | ||
3677 | + This is appropriate for large platforms with shared caches. | ||
3678 | + On smaller platforms (e.g., ARM PB11MPCore), using C-EDF | ||
3679 | + makes little sense since there aren't any shared caches. | ||
3680 | + | ||
3681 | +config PLUGIN_PFAIR | ||
3682 | + bool "PFAIR" | ||
3683 | + depends on HIGH_RES_TIMERS && !NO_HZ | ||
3684 | + default y | ||
3685 | + help | ||
3686 | + Include the PFAIR plugin (i.e., the PD^2 scheduler) in the kernel. | ||
3687 | + The PFAIR plugin requires high resolution timers (for staggered quanta) | ||
3688 | + and does not support NO_HZ (quanta could be missed when the system is idle). | ||
3689 | + | ||
3690 | + If unsure, say Yes. | ||
3691 | + | ||
3692 | +config RELEASE_MASTER | ||
3693 | + bool "Release-master Support" | ||
3694 | + depends on ARCH_HAS_SEND_PULL_TIMERS | ||
3695 | + default n | ||
3696 | + help | ||
3697 | + Allow one processor to act as a dedicated interrupt processor | ||
3698 | + that services all timer interrupts, but that does not schedule | ||
3699 | + real-time tasks. See RTSS'09 paper for details | ||
3700 | + (http://www.cs.unc.edu/~anderson/papers.html). | ||
3701 | + Currently only supported by GSN-EDF. | ||
3702 | + | ||
3703 | +endmenu | ||
3704 | + | ||
3705 | +menu "Real-Time Synchronization" | ||
3706 | + | ||
3707 | +config NP_SECTION | ||
3708 | + bool "Non-preemptive section support" | ||
3709 | + default n | ||
3710 | + help | ||
3711 | + Allow tasks to become non-preemptable. | ||
3712 | + Note that plugins still need to explicitly support non-preemptivity. | ||
3713 | + Currently, only GSN-EDF and PSN-EDF have such support. | ||
3714 | + | ||
3715 | + This is required to support the FMLP. | ||
3716 | + If disabled, all tasks will be considered preemptable at all times. | ||
3717 | + | ||
3718 | +config SRP | ||
3719 | + bool "Stack Resource Policy (SRP)" | ||
3720 | + default n | ||
3721 | + help | ||
3722 | + Include support for Baker's Stack Resource Policy. | ||
3723 | + | ||
3724 | + Say Yes if you want FMLP local long critical section | ||
3725 | + synchronization support. | ||
3726 | + | ||
3727 | +config FMLP | ||
3728 | + bool "FMLP support" | ||
3729 | + depends on NP_SECTION | ||
3730 | + default n | ||
3731 | + help | ||
3732 | + Include support for deterministic multiprocessor real-time | ||
3733 | + synchronization support. | ||
3734 | + | ||
3735 | + Say Yes if you want FMLP long critical section | ||
3736 | + synchronization support. | ||
3737 | + | ||
3738 | +endmenu | ||
3739 | + | ||
3740 | +menu "Tracing" | ||
3741 | + | ||
3742 | +config FEATHER_TRACE | ||
3743 | + bool "Feather-Trace Infrastructure" | ||
3744 | + default y | ||
3745 | + help | ||
3746 | + Feather-Trace basic tracing infrastructure. Includes device file | ||
3747 | + driver and instrumentation point support. | ||
3748 | + | ||
3749 | + There are actually two implementations of Feather-Trace. | ||
3750 | + 1) A slower, but portable, default implementation. | ||
3751 | + 2) Architecture-specific implementations that rewrite kernel .text at runtime. | ||
3752 | + | ||
3753 | + If enabled, Feather-Trace will be based on 2) if available (currently only for x86). | ||
3754 | + However, if DEBUG_RODATA=y, then Feather-Trace will choose option 1) in any case | ||
3755 | + to avoid problems with write-protected .text pages. | ||
3756 | + | ||
3757 | + Bottom line: to avoid increased overheads, choose DEBUG_RODATA=n. | ||
3758 | + | ||
3759 | + Note that this option only enables the basic Feather-Trace infrastructure; | ||
3760 | + you still need to enable SCHED_TASK_TRACE and/or SCHED_OVERHEAD_TRACE to | ||
3761 | + actually enable any events. | ||
3762 | + | ||
3763 | +config SCHED_TASK_TRACE | ||
3764 | + bool "Trace real-time tasks" | ||
3765 | + depends on FEATHER_TRACE | ||
3766 | + default y | ||
3767 | + help | ||
3768 | + Include support for the sched_trace_XXX() tracing functions. This | ||
3769 | + allows the collection of real-time task events such as job | ||
3770 | + completions, job releases, early completions, etc. This results in a | ||
3771 | + small overhead in the scheduling code. Disable if the overhead is not | ||
3772 | + acceptable (e.g., benchmarking). | ||
3773 | + | ||
3774 | + Say Yes for debugging. | ||
3775 | + Say No for overhead tracing. | ||
3776 | + | ||
3777 | +config SCHED_TASK_TRACE_SHIFT | ||
3778 | + int "Buffer size for sched_trace_xxx() events" | ||
3779 | + depends on SCHED_TASK_TRACE | ||
3780 | + range 8 13 | ||
3781 | + default 9 | ||
3782 | + help | ||
3783 | + | ||
3784 | + Select the buffer size of sched_trace_xxx() events as a power of two. | ||
3785 | + These buffers are statically allocated as per-CPU data. Each event | ||
3786 | + requires 24 bytes storage plus one additional flag byte. Too large | ||
3787 | + buffers can cause issues with the per-cpu allocator (and waste | ||
3788 | + memory). Too small buffers can cause scheduling events to be lost. The | ||
3789 | + "right" size is workload dependent and depends on the number of tasks, | ||
3790 | + each task's period, each task's number of suspensions, and how often | ||
3791 | + the buffer is flushed. | ||
3792 | + | ||
3793 | + Examples: 12 => 4k events | ||
3794 | + 10 => 1k events | ||
3795 | + 8 => 512 events | ||
3796 | + | ||
3797 | +config SCHED_OVERHEAD_TRACE | ||
3798 | + bool "Record timestamps for overhead measurements" | ||
3799 | + depends on FEATHER_TRACE | ||
3800 | + default n | ||
3801 | + help | ||
3802 | + Export event stream for overhead tracing. | ||
3803 | + Say Yes for overhead tracing. | ||
3804 | + | ||
3805 | +config SCHED_DEBUG_TRACE | ||
3806 | + bool "TRACE() debugging" | ||
3807 | + default y | ||
3808 | + help | ||
3809 | + Include support for sched_trace_log_messageg(), which is used to | ||
3810 | + implement TRACE(). If disabled, no TRACE() messages will be included | ||
3811 | + in the kernel, and no overheads due to debugging statements will be | ||
3812 | + incurred by the scheduler. Disable if the overhead is not acceptable | ||
3813 | + (e.g. benchmarking). | ||
3814 | + | ||
3815 | + Say Yes for debugging. | ||
3816 | + Say No for overhead tracing. | ||
3817 | + | ||
3818 | +config SCHED_DEBUG_TRACE_SHIFT | ||
3819 | + int "Buffer size for TRACE() buffer" | ||
3820 | + depends on SCHED_DEBUG_TRACE | ||
3821 | + range 14 22 | ||
3822 | + default 18 | ||
3823 | + help | ||
3824 | + | ||
3825 | + Select the amount of memory needed per for the TRACE() buffer, as a | ||
3826 | + power of two. The TRACE() buffer is global and statically allocated. If | ||
3827 | + the buffer is too small, there will be holes in the TRACE() log if the | ||
3828 | + buffer-flushing task is starved. | ||
3829 | + | ||
3830 | + The default should be sufficient for most systems. Increase the buffer | ||
3831 | + size if the log contains holes. Reduce the buffer size when running on | ||
3832 | + a memory-constrained system. | ||
3833 | + | ||
3834 | + Examples: 14 => 16KB | ||
3835 | + 18 => 256KB | ||
3836 | + 20 => 1MB | ||
3837 | + | ||
3838 | + This buffer is exported to usespace using a misc device as | ||
3839 | + 'litmus/log'. On a system with default udev rules, a corresponding | ||
3840 | + character device node should be created at /dev/litmus/log. The buffer | ||
3841 | + can be flushed using cat, e.g., 'cat /dev/litmus/log > my_log_file.txt'. | ||
3842 | + | ||
3843 | +config SCHED_DEBUG_TRACE_CALLER | ||
3844 | + bool "Include [function@file:line] tag in TRACE() log" | ||
3845 | + depends on SCHED_DEBUG_TRACE | ||
3846 | + default n | ||
3847 | + help | ||
3848 | + With this option enabled, TRACE() prepends | ||
3849 | + | ||
3850 | + "[<function name>@<filename>:<line number>]" | ||
3851 | + | ||
3852 | + to each message in the debug log. Enable this to aid in figuring out | ||
3853 | + what was called in which order. The downside is that it adds a lot of | ||
3854 | + clutter. | ||
3855 | + | ||
3856 | + If unsure, say No. | ||
3857 | + | ||
3858 | +endmenu | ||
3859 | + | ||
3860 | +endmenu | ||
3861 | diff --git a/litmus/Makefile b/litmus/Makefile | ||
3862 | new file mode 100644 | ||
3863 | index 0000000..b7366b5 | ||
3864 | --- /dev/null | ||
3865 | +++ b/litmus/Makefile | ||
3866 | @@ -0,0 +1,27 @@ | ||
3867 | +# | ||
3868 | +# Makefile for LITMUS^RT | ||
3869 | +# | ||
3870 | + | ||
3871 | +obj-y = sched_plugin.o litmus.o \ | ||
3872 | + preempt.o \ | ||
3873 | + litmus_proc.o \ | ||
3874 | + budget.o \ | ||
3875 | + jobs.o \ | ||
3876 | + sync.o \ | ||
3877 | + rt_domain.o \ | ||
3878 | + edf_common.o \ | ||
3879 | + fdso.o \ | ||
3880 | + srp.o \ | ||
3881 | + fmlp.o \ | ||
3882 | + bheap.o \ | ||
3883 | + ctrldev.o \ | ||
3884 | + sched_gsn_edf.o \ | ||
3885 | + sched_psn_edf.o | ||
3886 | + | ||
3887 | +obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o | ||
3888 | +obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o | ||
3889 | + | ||
3890 | +obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o | ||
3891 | +obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o | ||
3892 | +obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o | ||
3893 | +obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o | ||
3894 | diff --git a/litmus/bheap.c b/litmus/bheap.c | ||
3895 | new file mode 100644 | ||
3896 | index 0000000..528af97 | ||
3897 | --- /dev/null | ||
3898 | +++ b/litmus/bheap.c | ||
3899 | @@ -0,0 +1,314 @@ | ||
3900 | +#include "linux/kernel.h" | ||
3901 | +#include "litmus/bheap.h" | ||
3902 | + | ||
3903 | +void bheap_init(struct bheap* heap) | ||
3904 | +{ | ||
3905 | + heap->head = NULL; | ||
3906 | + heap->min = NULL; | ||
3907 | +} | ||
3908 | + | ||
3909 | +void bheap_node_init(struct bheap_node** _h, void* value) | ||
3910 | +{ | ||
3911 | + struct bheap_node* h = *_h; | ||
3912 | + h->parent = NULL; | ||
3913 | + h->next = NULL; | ||
3914 | + h->child = NULL; | ||
3915 | + h->degree = NOT_IN_HEAP; | ||
3916 | + h->value = value; | ||
3917 | + h->ref = _h; | ||
3918 | +} | ||
3919 | + | ||
3920 | + | ||
3921 | +/* make child a subtree of root */ | ||
3922 | +static void __bheap_link(struct bheap_node* root, | ||
3923 | + struct bheap_node* child) | ||
3924 | +{ | ||
3925 | + child->parent = root; | ||
3926 | + child->next = root->child; | ||
3927 | + root->child = child; | ||
3928 | + root->degree++; | ||
3929 | +} | ||
3930 | + | ||
3931 | +/* merge root lists */ | ||
3932 | +static struct bheap_node* __bheap_merge(struct bheap_node* a, | ||
3933 | + struct bheap_node* b) | ||
3934 | +{ | ||
3935 | + struct bheap_node* head = NULL; | ||
3936 | + struct bheap_node** pos = &head; | ||
3937 | + | ||
3938 | + while (a && b) { | ||
3939 | + if (a->degree < b->degree) { | ||
3940 | + *pos = a; | ||
3941 | + a = a->next; | ||
3942 | + } else { | ||
3943 | + *pos = b; | ||
3944 | + b = b->next; | ||
3945 | + } | ||
3946 | + pos = &(*pos)->next; | ||
3947 | + } | ||
3948 | + if (a) | ||
3949 | + *pos = a; | ||
3950 | + else | ||
3951 | + *pos = b; | ||
3952 | + return head; | ||
3953 | +} | ||
3954 | + | ||
3955 | +/* reverse a linked list of nodes. also clears parent pointer */ | ||
3956 | +static struct bheap_node* __bheap_reverse(struct bheap_node* h) | ||
3957 | +{ | ||
3958 | + struct bheap_node* tail = NULL; | ||
3959 | + struct bheap_node* next; | ||
3960 | + | ||
3961 | + if (!h) | ||
3962 | + return h; | ||
3963 | + | ||
3964 | + h->parent = NULL; | ||
3965 | + while (h->next) { | ||
3966 | + next = h->next; | ||
3967 | + h->next = tail; | ||
3968 | + tail = h; | ||
3969 | + h = next; | ||
3970 | + h->parent = NULL; | ||
3971 | + } | ||
3972 | + h->next = tail; | ||
3973 | + return h; | ||
3974 | +} | ||
3975 | + | ||
3976 | +static void __bheap_min(bheap_prio_t higher_prio, struct bheap* heap, | ||
3977 | + struct bheap_node** prev, struct bheap_node** node) | ||
3978 | +{ | ||
3979 | + struct bheap_node *_prev, *cur; | ||
3980 | + *prev = NULL; | ||
3981 | + | ||
3982 | + if (!heap->head) { | ||
3983 | + *node = NULL; | ||
3984 | + return; | ||
3985 | + } | ||
3986 | + | ||
3987 | + *node = heap->head; | ||
3988 | + _prev = heap->head; | ||
3989 | + cur = heap->head->next; | ||
3990 | + while (cur) { | ||
3991 | + if (higher_prio(cur, *node)) { | ||
3992 | + *node = cur; | ||
3993 | + *prev = _prev; | ||
3994 | + } | ||
3995 | + _prev = cur; | ||
3996 | + cur = cur->next; | ||
3997 | + } | ||
3998 | +} | ||
3999 | + | ||
4000 | +static void __bheap_union(bheap_prio_t higher_prio, struct bheap* heap, | ||
4001 | + struct bheap_node* h2) | ||
4002 | +{ | ||
4003 | + struct bheap_node* h1; | ||
4004 | + struct bheap_node *prev, *x, *next; | ||
4005 | + if (!h2) | ||
4006 | + return; | ||
4007 | + h1 = heap->head; | ||
4008 | + if (!h1) { | ||
4009 | + heap->head = h2; | ||
4010 | + return; | ||
4011 | + } | ||
4012 | + h1 = __bheap_merge(h1, h2); | ||
4013 | + prev = NULL; | ||
4014 | + x = h1; | ||
4015 | + next = x->next; | ||
4016 | + while (next) { | ||
4017 | + if (x->degree != next->degree || | ||
4018 | + (next->next && next->next->degree == x->degree)) { | ||
4019 | + /* nothing to do, advance */ | ||
4020 | + prev = x; | ||
4021 | + x = next; | ||
4022 | + } else if (higher_prio(x, next)) { | ||
4023 | + /* x becomes the root of next */ | ||
4024 | + x->next = next->next; | ||
4025 | + __bheap_link(x, next); | ||
4026 | + } else { | ||
4027 | + /* next becomes the root of x */ | ||
4028 | + if (prev) | ||
4029 | + prev->next = next; | ||
4030 | + else | ||
4031 | + h1 = next; | ||
4032 | + __bheap_link(next, x); | ||
4033 | + x = next; | ||
4034 | + } | ||
4035 | + next = x->next; | ||
4036 | + } | ||
4037 | + heap->head = h1; | ||
4038 | +} | ||
4039 | + | ||
4040 | +static struct bheap_node* __bheap_extract_min(bheap_prio_t higher_prio, | ||
4041 | + struct bheap* heap) | ||
4042 | +{ | ||
4043 | + struct bheap_node *prev, *node; | ||
4044 | + __bheap_min(higher_prio, heap, &prev, &node); | ||
4045 | + if (!node) | ||
4046 | + return NULL; | ||
4047 | + if (prev) | ||
4048 | + prev->next = node->next; | ||
4049 | + else | ||
4050 | + heap->head = node->next; | ||
4051 | + __bheap_union(higher_prio, heap, __bheap_reverse(node->child)); | ||
4052 | + return node; | ||
4053 | +} | ||
4054 | + | ||
4055 | +/* insert (and reinitialize) a node into the heap */ | ||
4056 | +void bheap_insert(bheap_prio_t higher_prio, struct bheap* heap, | ||
4057 | + struct bheap_node* node) | ||
4058 | +{ | ||
4059 | + struct bheap_node *min; | ||
4060 | + node->child = NULL; | ||
4061 | + node->parent = NULL; | ||
4062 | + node->next = NULL; | ||
4063 | + node->degree = 0; | ||
4064 | + if (heap->min && higher_prio(node, heap->min)) { | ||
4065 | + /* swap min cache */ | ||
4066 | + min = heap->min; | ||
4067 | + min->child = NULL; | ||
4068 | + min->parent = NULL; | ||
4069 | + min->next = NULL; | ||
4070 | + min->degree = 0; | ||
4071 | + __bheap_union(higher_prio, heap, min); | ||
4072 | + heap->min = node; | ||
4073 | + } else | ||
4074 | + __bheap_union(higher_prio, heap, node); | ||
4075 | +} | ||
4076 | + | ||
4077 | +void bheap_uncache_min(bheap_prio_t higher_prio, struct bheap* heap) | ||
4078 | +{ | ||
4079 | + struct bheap_node* min; | ||
4080 | + if (heap->min) { | ||
4081 | + min = heap->min; | ||
4082 | + heap->min = NULL; | ||
4083 | + bheap_insert(higher_prio, heap, min); | ||
4084 | + } | ||
4085 | +} | ||
4086 | + | ||
4087 | +/* merge addition into target */ | ||
4088 | +void bheap_union(bheap_prio_t higher_prio, | ||
4089 | + struct bheap* target, struct bheap* addition) | ||
4090 | +{ | ||
4091 | + /* first insert any cached minima, if necessary */ | ||
4092 | + bheap_uncache_min(higher_prio, target); | ||
4093 | + bheap_uncache_min(higher_prio, addition); | ||
4094 | + __bheap_union(higher_prio, target, addition->head); | ||
4095 | + /* this is a destructive merge */ | ||
4096 | + addition->head = NULL; | ||
4097 | +} | ||
4098 | + | ||
4099 | +struct bheap_node* bheap_peek(bheap_prio_t higher_prio, | ||
4100 | + struct bheap* heap) | ||
4101 | +{ | ||
4102 | + if (!heap->min) | ||
4103 | + heap->min = __bheap_extract_min(higher_prio, heap); | ||
4104 | + return heap->min; | ||
4105 | +} | ||
4106 | + | ||
4107 | +struct bheap_node* bheap_take(bheap_prio_t higher_prio, | ||
4108 | + struct bheap* heap) | ||
4109 | +{ | ||
4110 | + struct bheap_node *node; | ||
4111 | + if (!heap->min) | ||
4112 | + heap->min = __bheap_extract_min(higher_prio, heap); | ||
4113 | + node = heap->min; | ||
4114 | + heap->min = NULL; | ||
4115 | + if (node) | ||
4116 | + node->degree = NOT_IN_HEAP; | ||
4117 | + return node; | ||
4118 | +} | ||
4119 | + | ||
4120 | +int bheap_decrease(bheap_prio_t higher_prio, struct bheap_node* node) | ||
4121 | +{ | ||
4122 | + struct bheap_node *parent; | ||
4123 | + struct bheap_node** tmp_ref; | ||
4124 | + void* tmp; | ||
4125 | + | ||
4126 | + /* bubble up */ | ||
4127 | + parent = node->parent; | ||
4128 | + while (parent && higher_prio(node, parent)) { | ||
4129 | + /* swap parent and node */ | ||
4130 | + tmp = parent->value; | ||
4131 | + parent->value = node->value; | ||
4132 | + node->value = tmp; | ||
4133 | + /* swap references */ | ||
4134 | + *(parent->ref) = node; | ||
4135 | + *(node->ref) = parent; | ||
4136 | + tmp_ref = parent->ref; | ||
4137 | + parent->ref = node->ref; | ||
4138 | + node->ref = tmp_ref; | ||
4139 | + /* step up */ | ||
4140 | + node = parent; | ||
4141 | + parent = node->parent; | ||
4142 | + } | ||
4143 | + | ||
4144 | + return parent != NULL; | ||
4145 | +} | ||
4146 | + | ||
4147 | +void bheap_delete(bheap_prio_t higher_prio, struct bheap* heap, | ||
4148 | + struct bheap_node* node) | ||
4149 | +{ | ||
4150 | + struct bheap_node *parent, *prev, *pos; | ||
4151 | + struct bheap_node** tmp_ref; | ||
4152 | + void* tmp; | ||
4153 | + | ||
4154 | + if (heap->min != node) { | ||
4155 | + /* bubble up */ | ||
4156 | + parent = node->parent; | ||
4157 | + while (parent) { | ||
4158 | + /* swap parent and node */ | ||
4159 | + tmp = parent->value; | ||
4160 | + parent->value = node->value; | ||
4161 | + node->value = tmp; | ||
4162 | + /* swap references */ | ||
4163 | + *(parent->ref) = node; | ||
4164 | + *(node->ref) = parent; | ||
4165 | + tmp_ref = parent->ref; | ||
4166 | + parent->ref = node->ref; | ||
4167 | + node->ref = tmp_ref; | ||
4168 | + /* step up */ | ||
4169 | + node = parent; | ||
4170 | + parent = node->parent; | ||
4171 | + } | ||
4172 | + /* now delete: | ||
4173 | + * first find prev */ | ||
4174 | + prev = NULL; | ||
4175 | + pos = heap->head; | ||
4176 | + while (pos != node) { | ||
4177 | + prev = pos; | ||
4178 | + pos = pos->next; | ||
4179 | + } | ||
4180 | + /* we have prev, now remove node */ | ||
4181 | + if (prev) | ||
4182 | + prev->next = node->next; | ||
4183 | + else | ||
4184 | + heap->head = node->next; | ||
4185 | + __bheap_union(higher_prio, heap, __bheap_reverse(node->child)); | ||
4186 | + } else | ||
4187 | + heap->min = NULL; | ||
4188 | + node->degree = NOT_IN_HEAP; | ||
4189 | +} | ||
4190 | + | ||
4191 | +/* allocate a heap node for value and insert into the heap */ | ||
4192 | +int bheap_add(bheap_prio_t higher_prio, struct bheap* heap, | ||
4193 | + void* value, int gfp_flags) | ||
4194 | +{ | ||
4195 | + struct bheap_node* hn = bheap_node_alloc(gfp_flags); | ||
4196 | + if (likely(hn)) { | ||
4197 | + bheap_node_init(&hn, value); | ||
4198 | + bheap_insert(higher_prio, heap, hn); | ||
4199 | + } | ||
4200 | + return hn != NULL; | ||
4201 | +} | ||
4202 | + | ||
4203 | +void* bheap_take_del(bheap_prio_t higher_prio, | ||
4204 | + struct bheap* heap) | ||
4205 | +{ | ||
4206 | + struct bheap_node* hn = bheap_take(higher_prio, heap); | ||
4207 | + void* ret = NULL; | ||
4208 | + if (hn) { | ||
4209 | + ret = hn->value; | ||
4210 | + bheap_node_free(hn); | ||
4211 | + } | ||
4212 | + return ret; | ||
4213 | +} | ||
4214 | diff --git a/litmus/budget.c b/litmus/budget.c | ||
4215 | new file mode 100644 | ||
4216 | index 0000000..310e9a3 | ||
4217 | --- /dev/null | ||
4218 | +++ b/litmus/budget.c | ||
4219 | @@ -0,0 +1,111 @@ | ||
4220 | +#include <linux/sched.h> | ||
4221 | +#include <linux/percpu.h> | ||
4222 | +#include <linux/hrtimer.h> | ||
4223 | + | ||
4224 | +#include <litmus/litmus.h> | ||
4225 | +#include <litmus/preempt.h> | ||
4226 | + | ||
4227 | +struct enforcement_timer { | ||
4228 | + /* The enforcement timer is used to accurately police | ||
4229 | + * slice budgets. */ | ||
4230 | + struct hrtimer timer; | ||
4231 | + int armed; | ||
4232 | +}; | ||
4233 | + | ||
4234 | +DEFINE_PER_CPU(struct enforcement_timer, budget_timer); | ||
4235 | + | ||
4236 | +static enum hrtimer_restart on_enforcement_timeout(struct hrtimer *timer) | ||
4237 | +{ | ||
4238 | + struct enforcement_timer* et = container_of(timer, | ||
4239 | + struct enforcement_timer, | ||
4240 | + timer); | ||
4241 | + unsigned long flags; | ||
4242 | + | ||
4243 | + local_irq_save(flags); | ||
4244 | + TRACE("enforcement timer fired.\n"); | ||
4245 | + et->armed = 0; | ||
4246 | + /* activate scheduler */ | ||
4247 | + litmus_reschedule_local(); | ||
4248 | + local_irq_restore(flags); | ||
4249 | + | ||
4250 | + return HRTIMER_NORESTART; | ||
4251 | +} | ||
4252 | + | ||
4253 | +/* assumes called with IRQs off */ | ||
4254 | +static void cancel_enforcement_timer(struct enforcement_timer* et) | ||
4255 | +{ | ||
4256 | + int ret; | ||
4257 | + | ||
4258 | + TRACE("cancelling enforcement timer.\n"); | ||
4259 | + | ||
4260 | + /* Since interrupts are disabled and et->armed is only | ||
4261 | + * modified locally, we do not need any locks. | ||
4262 | + */ | ||
4263 | + | ||
4264 | + if (et->armed) { | ||
4265 | + ret = hrtimer_try_to_cancel(&et->timer); | ||
4266 | + /* Should never be inactive. */ | ||
4267 | + BUG_ON(ret == 0); | ||
4268 | + /* Should never be running concurrently. */ | ||
4269 | + BUG_ON(ret == -1); | ||
4270 | + | ||
4271 | + et->armed = 0; | ||
4272 | + } | ||
4273 | +} | ||
4274 | + | ||
4275 | +/* assumes called with IRQs off */ | ||
4276 | +static void arm_enforcement_timer(struct enforcement_timer* et, | ||
4277 | + struct task_struct* t) | ||
4278 | +{ | ||
4279 | + lt_t when_to_fire; | ||
4280 | + TRACE_TASK(t, "arming enforcement timer.\n"); | ||
4281 | + | ||
4282 | + /* Calling this when there is no budget left for the task | ||
4283 | + * makes no sense, unless the task is non-preemptive. */ | ||
4284 | + BUG_ON(budget_exhausted(t) && (!is_np(t))); | ||
4285 | + | ||
4286 | + /* __hrtimer_start_range_ns() cancels the timer | ||
4287 | + * anyway, so we don't have to check whether it is still armed */ | ||
4288 | + | ||
4289 | + if (likely(!is_np(t))) { | ||
4290 | + when_to_fire = litmus_clock() + budget_remaining(t); | ||
4291 | + __hrtimer_start_range_ns(&et->timer, | ||
4292 | + ns_to_ktime(when_to_fire), | ||
4293 | + 0 /* delta */, | ||
4294 | + HRTIMER_MODE_ABS_PINNED, | ||
4295 | + 0 /* no wakeup */); | ||
4296 | + et->armed = 1; | ||
4297 | + } | ||
4298 | +} | ||
4299 | + | ||
4300 | + | ||
4301 | +/* expects to be called with IRQs off */ | ||
4302 | +void update_enforcement_timer(struct task_struct* t) | ||
4303 | +{ | ||
4304 | + struct enforcement_timer* et = &__get_cpu_var(budget_timer); | ||
4305 | + | ||
4306 | + if (t && budget_precisely_enforced(t)) { | ||
4307 | + /* Make sure we call into the scheduler when this budget | ||
4308 | + * expires. */ | ||
4309 | + arm_enforcement_timer(et, t); | ||
4310 | + } else if (et->armed) { | ||
4311 | + /* Make sure we don't cause unnecessary interrupts. */ | ||
4312 | + cancel_enforcement_timer(et); | ||
4313 | + } | ||
4314 | +} | ||
4315 | + | ||
4316 | + | ||
4317 | +static int __init init_budget_enforcement(void) | ||
4318 | +{ | ||
4319 | + int cpu; | ||
4320 | + struct enforcement_timer* et; | ||
4321 | + | ||
4322 | + for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
4323 | + et = &per_cpu(budget_timer, cpu); | ||
4324 | + hrtimer_init(&et->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
4325 | + et->timer.function = on_enforcement_timeout; | ||
4326 | + } | ||
4327 | + return 0; | ||
4328 | +} | ||
4329 | + | ||
4330 | +module_init(init_budget_enforcement); | ||
4331 | diff --git a/litmus/ctrldev.c b/litmus/ctrldev.c | ||
4332 | new file mode 100644 | ||
4333 | index 0000000..6677a67 | ||
4334 | --- /dev/null | ||
4335 | +++ b/litmus/ctrldev.c | ||
4336 | @@ -0,0 +1,150 @@ | ||
4337 | +#include <linux/sched.h> | ||
4338 | +#include <linux/mm.h> | ||
4339 | +#include <linux/fs.h> | ||
4340 | +#include <linux/miscdevice.h> | ||
4341 | +#include <linux/module.h> | ||
4342 | + | ||
4343 | +#include <litmus/litmus.h> | ||
4344 | + | ||
4345 | +/* only one page for now, but we might want to add a RO version at some point */ | ||
4346 | + | ||
4347 | +#define CTRL_NAME "litmus/ctrl" | ||
4348 | + | ||
4349 | +/* allocate t->rt_param.ctrl_page*/ | ||
4350 | +static int alloc_ctrl_page(struct task_struct *t) | ||
4351 | +{ | ||
4352 | + int err = 0; | ||
4353 | + | ||
4354 | + /* only allocate if the task doesn't have one yet */ | ||
4355 | + if (!tsk_rt(t)->ctrl_page) { | ||
4356 | + tsk_rt(t)->ctrl_page = (void*) get_zeroed_page(GFP_KERNEL); | ||
4357 | + if (!tsk_rt(t)->ctrl_page) | ||
4358 | + err = -ENOMEM; | ||
4359 | + /* will get de-allocated in task teardown */ | ||
4360 | + TRACE_TASK(t, "%s ctrl_page = %p\n", __FUNCTION__, | ||
4361 | + tsk_rt(t)->ctrl_page); | ||
4362 | + } | ||
4363 | + return err; | ||
4364 | +} | ||
4365 | + | ||
4366 | +static int map_ctrl_page(struct task_struct *t, struct vm_area_struct* vma) | ||
4367 | +{ | ||
4368 | + int err; | ||
4369 | + unsigned long pfn; | ||
4370 | + | ||
4371 | + struct page* ctrl = virt_to_page(tsk_rt(t)->ctrl_page); | ||
4372 | + | ||
4373 | + /* Increase ref count. Is decreased when vma is destroyed. */ | ||
4374 | + get_page(ctrl); | ||
4375 | + | ||
4376 | + /* compute page frame number */ | ||
4377 | + pfn = page_to_pfn(ctrl); | ||
4378 | + | ||
4379 | + TRACE_CUR(CTRL_NAME | ||
4380 | + ": mapping %p (pfn:%lx, %lx) to 0x%lx (prot:%lx)\n", | ||
4381 | + tsk_rt(t)->ctrl_page, pfn, page_to_pfn(ctrl), vma->vm_start, | ||
4382 | + vma->vm_page_prot); | ||
4383 | + | ||
4384 | + /* Map it into the vma. Make sure to use PAGE_SHARED, otherwise | ||
4385 | + * userspace actually gets a copy-on-write page. */ | ||
4386 | + err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, PAGE_SHARED); | ||
4387 | + | ||
4388 | + if (err) | ||
4389 | + TRACE_CUR(CTRL_NAME ": remap_pfn_range() failed (%d)\n", err); | ||
4390 | + | ||
4391 | + return err; | ||
4392 | +} | ||
4393 | + | ||
4394 | +static void litmus_ctrl_vm_close(struct vm_area_struct* vma) | ||
4395 | +{ | ||
4396 | + TRACE_CUR("%s flags=0x%x prot=0x%x\n", __FUNCTION__, | ||
4397 | + vma->vm_flags, vma->vm_page_prot); | ||
4398 | + | ||
4399 | + TRACE_CUR(CTRL_NAME | ||
4400 | + ": %p:%p vma:%p vma->vm_private_data:%p closed.\n", | ||
4401 | + (void*) vma->vm_start, (void*) vma->vm_end, vma, | ||
4402 | + vma->vm_private_data, current->comm, | ||
4403 | + current->pid); | ||
4404 | +} | ||
4405 | + | ||
4406 | +static int litmus_ctrl_vm_fault(struct vm_area_struct* vma, | ||
4407 | + struct vm_fault* vmf) | ||
4408 | +{ | ||
4409 | + /* This function should never be called, since | ||
4410 | + * all pages should have been mapped by mmap() | ||
4411 | + * already. */ | ||
4412 | + TRACE_CUR("%s flags=0x%x\n", __FUNCTION__, vma->vm_flags); | ||
4413 | + | ||
4414 | + /* nope, you only get one page */ | ||
4415 | + return VM_FAULT_SIGBUS; | ||
4416 | +} | ||
4417 | + | ||
4418 | +static struct vm_operations_struct litmus_ctrl_vm_ops = { | ||
4419 | + .close = litmus_ctrl_vm_close, | ||
4420 | + .fault = litmus_ctrl_vm_fault, | ||
4421 | +}; | ||
4422 | + | ||
4423 | +static int litmus_ctrl_mmap(struct file* filp, struct vm_area_struct* vma) | ||
4424 | +{ | ||
4425 | + int err = 0; | ||
4426 | + | ||
4427 | + /* first make sure mapper knows what he's doing */ | ||
4428 | + | ||
4429 | + /* you can only get one page */ | ||
4430 | + if (vma->vm_end - vma->vm_start != PAGE_SIZE) | ||
4431 | + return -EINVAL; | ||
4432 | + | ||
4433 | + /* you can only map the "first" page */ | ||
4434 | + if (vma->vm_pgoff != 0) | ||
4435 | + return -EINVAL; | ||
4436 | + | ||
4437 | + /* you can't share it with anyone */ | ||
4438 | + if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) | ||
4439 | + return -EINVAL; | ||
4440 | + | ||
4441 | + vma->vm_ops = &litmus_ctrl_vm_ops; | ||
4442 | + /* this mapping should not be kept across forks, | ||
4443 | + * and cannot be expanded */ | ||
4444 | + vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | ||
4445 | + | ||
4446 | + err = alloc_ctrl_page(current); | ||
4447 | + if (!err) | ||
4448 | + err = map_ctrl_page(current, vma); | ||
4449 | + | ||
4450 | + TRACE_CUR("%s flags=0x%x prot=0x%lx\n", | ||
4451 | + __FUNCTION__, vma->vm_flags, vma->vm_page_prot); | ||
4452 | + | ||
4453 | + return err; | ||
4454 | +} | ||
4455 | + | ||
4456 | +static struct file_operations litmus_ctrl_fops = { | ||
4457 | + .owner = THIS_MODULE, | ||
4458 | + .mmap = litmus_ctrl_mmap, | ||
4459 | +}; | ||
4460 | + | ||
4461 | +static struct miscdevice litmus_ctrl_dev = { | ||
4462 | + .name = CTRL_NAME, | ||
4463 | + .minor = MISC_DYNAMIC_MINOR, | ||
4464 | + .fops = &litmus_ctrl_fops, | ||
4465 | +}; | ||
4466 | + | ||
4467 | +static int __init init_litmus_ctrl_dev(void) | ||
4468 | +{ | ||
4469 | + int err; | ||
4470 | + | ||
4471 | + BUILD_BUG_ON(sizeof(struct control_page) > PAGE_SIZE); | ||
4472 | + | ||
4473 | + printk("Initializing LITMUS^RT control device.\n"); | ||
4474 | + err = misc_register(&litmus_ctrl_dev); | ||
4475 | + if (err) | ||
4476 | + printk("Could not allocate %s device (%d).\n", CTRL_NAME, err); | ||
4477 | + return err; | ||
4478 | +} | ||
4479 | + | ||
4480 | +static void __exit exit_litmus_ctrl_dev(void) | ||
4481 | +{ | ||
4482 | + misc_deregister(&litmus_ctrl_dev); | ||
4483 | +} | ||
4484 | + | ||
4485 | +module_init(init_litmus_ctrl_dev); | ||
4486 | +module_exit(exit_litmus_ctrl_dev); | ||
4487 | diff --git a/litmus/edf_common.c b/litmus/edf_common.c | ||
4488 | new file mode 100644 | ||
4489 | index 0000000..06daec6 | ||
4490 | --- /dev/null | ||
4491 | +++ b/litmus/edf_common.c | ||
4492 | @@ -0,0 +1,102 @@ | ||
4493 | +/* | ||
4494 | + * kernel/edf_common.c | ||
4495 | + * | ||
4496 | + * Common functions for EDF based scheduler. | ||
4497 | + */ | ||
4498 | + | ||
4499 | +#include <linux/percpu.h> | ||
4500 | +#include <linux/sched.h> | ||
4501 | +#include <linux/list.h> | ||
4502 | + | ||
4503 | +#include <litmus/litmus.h> | ||
4504 | +#include <litmus/sched_plugin.h> | ||
4505 | +#include <litmus/sched_trace.h> | ||
4506 | + | ||
4507 | +#include <litmus/edf_common.h> | ||
4508 | + | ||
4509 | +/* edf_higher_prio - returns true if first has a higher EDF priority | ||
4510 | + * than second. Deadline ties are broken by PID. | ||
4511 | + * | ||
4512 | + * both first and second may be NULL | ||
4513 | + */ | ||
4514 | +int edf_higher_prio(struct task_struct* first, | ||
4515 | + struct task_struct* second) | ||
4516 | +{ | ||
4517 | + struct task_struct *first_task = first; | ||
4518 | + struct task_struct *second_task = second; | ||
4519 | + | ||
4520 | + /* There is no point in comparing a task to itself. */ | ||
4521 | + if (first && first == second) { | ||
4522 | + TRACE_TASK(first, | ||
4523 | + "WARNING: pointless edf priority comparison.\n"); | ||
4524 | + return 0; | ||
4525 | + } | ||
4526 | + | ||
4527 | + | ||
4528 | + /* Check for inherited priorities. Change task | ||
4529 | + * used for comparison in such a case. | ||
4530 | + */ | ||
4531 | + if (first && first->rt_param.inh_task) | ||
4532 | + first_task = first->rt_param.inh_task; | ||
4533 | + if (second && second->rt_param.inh_task) | ||
4534 | + second_task = second->rt_param.inh_task; | ||
4535 | + | ||
4536 | + return | ||
4537 | + /* it has to exist in order to have higher priority */ | ||
4538 | + first_task && ( | ||
4539 | + /* does the second task exist and is it a real-time task? If | ||
4540 | + * not, the first task (which is a RT task) has higher | ||
4541 | + * priority. | ||
4542 | + */ | ||
4543 | + !second_task || !is_realtime(second_task) || | ||
4544 | + | ||
4545 | + /* is the deadline of the first task earlier? | ||
4546 | + * Then it has higher priority. | ||
4547 | + */ | ||
4548 | + earlier_deadline(first_task, second_task) || | ||
4549 | + | ||
4550 | + /* Do we have a deadline tie? | ||
4551 | + * Then break by PID. | ||
4552 | + */ | ||
4553 | + (get_deadline(first_task) == get_deadline(second_task) && | ||
4554 | + (first_task->pid < second_task->pid || | ||
4555 | + | ||
4556 | + /* If the PIDs are the same then the task with the inherited | ||
4557 | + * priority wins. | ||
4558 | + */ | ||
4559 | + (first_task->pid == second_task->pid && | ||
4560 | + !second->rt_param.inh_task)))); | ||
4561 | +} | ||
4562 | + | ||
4563 | +int edf_ready_order(struct bheap_node* a, struct bheap_node* b) | ||
4564 | +{ | ||
4565 | + return edf_higher_prio(bheap2task(a), bheap2task(b)); | ||
4566 | +} | ||
4567 | + | ||
4568 | +void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
4569 | + release_jobs_t release) | ||
4570 | +{ | ||
4571 | + rt_domain_init(rt, edf_ready_order, resched, release); | ||
4572 | +} | ||
4573 | + | ||
4574 | +/* need_to_preempt - check whether the task t needs to be preempted | ||
4575 | + * call only with irqs disabled and with ready_lock acquired | ||
4576 | + * THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT! | ||
4577 | + */ | ||
4578 | +int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t) | ||
4579 | +{ | ||
4580 | + /* we need the read lock for edf_ready_queue */ | ||
4581 | + /* no need to preempt if there is nothing pending */ | ||
4582 | + if (!__jobs_pending(rt)) | ||
4583 | + return 0; | ||
4584 | + /* we need to reschedule if t doesn't exist */ | ||
4585 | + if (!t) | ||
4586 | + return 1; | ||
4587 | + | ||
4588 | + /* NOTE: We cannot check for non-preemptibility since we | ||
4589 | + * don't know what address space we're currently in. | ||
4590 | + */ | ||
4591 | + | ||
4592 | + /* make sure to get non-rt stuff out of the way */ | ||
4593 | + return !is_realtime(t) || edf_higher_prio(__next_ready(rt), t); | ||
4594 | +} | ||
4595 | diff --git a/litmus/fdso.c b/litmus/fdso.c | ||
4596 | new file mode 100644 | ||
4597 | index 0000000..85be716 | ||
4598 | --- /dev/null | ||
4599 | +++ b/litmus/fdso.c | ||
4600 | @@ -0,0 +1,281 @@ | ||
4601 | +/* fdso.c - file descriptor attached shared objects | ||
4602 | + * | ||
4603 | + * (c) 2007 B. Brandenburg, LITMUS^RT project | ||
4604 | + * | ||
4605 | + * Notes: | ||
4606 | + * - objects descriptor (OD) tables are not cloned during a fork. | ||
4607 | + * - objects are created on-demand, and freed after the last reference | ||
4608 | + * is dropped. | ||
4609 | + * - for now, object types are hard coded. | ||
4610 | + * - As long as we have live objects, we keep a reference to the inode. | ||
4611 | + */ | ||
4612 | + | ||
4613 | +#include <linux/errno.h> | ||
4614 | +#include <linux/sched.h> | ||
4615 | +#include <linux/mutex.h> | ||
4616 | +#include <linux/file.h> | ||
4617 | +#include <asm/uaccess.h> | ||
4618 | + | ||
4619 | +#include <litmus/fdso.h> | ||
4620 | + | ||
4621 | +extern struct fdso_ops fmlp_sem_ops; | ||
4622 | +extern struct fdso_ops srp_sem_ops; | ||
4623 | + | ||
4624 | +static const struct fdso_ops* fdso_ops[] = { | ||
4625 | + &fmlp_sem_ops, | ||
4626 | + &srp_sem_ops, | ||
4627 | +}; | ||
4628 | + | ||
4629 | +static void* fdso_create(obj_type_t type) | ||
4630 | +{ | ||
4631 | + if (fdso_ops[type]->create) | ||
4632 | + return fdso_ops[type]->create(); | ||
4633 | + else | ||
4634 | + return NULL; | ||
4635 | +} | ||
4636 | + | ||
4637 | +static void fdso_destroy(obj_type_t type, void* obj) | ||
4638 | +{ | ||
4639 | + fdso_ops[type]->destroy(obj); | ||
4640 | +} | ||
4641 | + | ||
4642 | +static int fdso_open(struct od_table_entry* entry, void* __user config) | ||
4643 | +{ | ||
4644 | + if (fdso_ops[entry->obj->type]->open) | ||
4645 | + return fdso_ops[entry->obj->type]->open(entry, config); | ||
4646 | + else | ||
4647 | + return 0; | ||
4648 | +} | ||
4649 | + | ||
4650 | +static int fdso_close(struct od_table_entry* entry) | ||
4651 | +{ | ||
4652 | + if (fdso_ops[entry->obj->type]->close) | ||
4653 | + return fdso_ops[entry->obj->type]->close(entry); | ||
4654 | + else | ||
4655 | + return 0; | ||
4656 | +} | ||
4657 | + | ||
4658 | +/* inode must be locked already */ | ||
4659 | +static struct inode_obj_id* alloc_inode_obj(struct inode* inode, | ||
4660 | + obj_type_t type, | ||
4661 | + unsigned int id) | ||
4662 | +{ | ||
4663 | + struct inode_obj_id* obj; | ||
4664 | + void* raw_obj; | ||
4665 | + | ||
4666 | + raw_obj = fdso_create(type); | ||
4667 | + if (!raw_obj) | ||
4668 | + return NULL; | ||
4669 | + | ||
4670 | + obj = kmalloc(sizeof(*obj), GFP_KERNEL); | ||
4671 | + if (!obj) | ||
4672 | + return NULL; | ||
4673 | + INIT_LIST_HEAD(&obj->list); | ||
4674 | + atomic_set(&obj->count, 1); | ||
4675 | + obj->type = type; | ||
4676 | + obj->id = id; | ||
4677 | + obj->obj = raw_obj; | ||
4678 | + obj->inode = inode; | ||
4679 | + | ||
4680 | + list_add(&obj->list, &inode->i_obj_list); | ||
4681 | + atomic_inc(&inode->i_count); | ||
4682 | + | ||
4683 | + printk(KERN_DEBUG "alloc_inode_obj(%p, %d, %d): object created\n", inode, type, id); | ||
4684 | + return obj; | ||
4685 | +} | ||
4686 | + | ||
4687 | +/* inode must be locked already */ | ||
4688 | +static struct inode_obj_id* get_inode_obj(struct inode* inode, | ||
4689 | + obj_type_t type, | ||
4690 | + unsigned int id) | ||
4691 | +{ | ||
4692 | + struct list_head* pos; | ||
4693 | + struct inode_obj_id* obj = NULL; | ||
4694 | + | ||
4695 | + list_for_each(pos, &inode->i_obj_list) { | ||
4696 | + obj = list_entry(pos, struct inode_obj_id, list); | ||
4697 | + if (obj->id == id && obj->type == type) { | ||
4698 | + atomic_inc(&obj->count); | ||
4699 | + return obj; | ||
4700 | + } | ||
4701 | + } | ||
4702 | + printk(KERN_DEBUG "get_inode_obj(%p, %d, %d): couldn't find object\n", inode, type, id); | ||
4703 | + return NULL; | ||
4704 | +} | ||
4705 | + | ||
4706 | + | ||
4707 | +static void put_inode_obj(struct inode_obj_id* obj) | ||
4708 | +{ | ||
4709 | + struct inode* inode; | ||
4710 | + int let_go = 0; | ||
4711 | + | ||
4712 | + inode = obj->inode; | ||
4713 | + if (atomic_dec_and_test(&obj->count)) { | ||
4714 | + | ||
4715 | + mutex_lock(&inode->i_obj_mutex); | ||
4716 | + /* no new references can be obtained */ | ||
4717 | + if (!atomic_read(&obj->count)) { | ||
4718 | + list_del(&obj->list); | ||
4719 | + fdso_destroy(obj->type, obj->obj); | ||
4720 | + kfree(obj); | ||
4721 | + let_go = 1; | ||
4722 | + } | ||
4723 | + mutex_unlock(&inode->i_obj_mutex); | ||
4724 | + if (let_go) | ||
4725 | + iput(inode); | ||
4726 | + } | ||
4727 | +} | ||
4728 | + | ||
4729 | +static struct od_table_entry* get_od_entry(struct task_struct* t) | ||
4730 | +{ | ||
4731 | + struct od_table_entry* table; | ||
4732 | + int i; | ||
4733 | + | ||
4734 | + | ||
4735 | + table = t->od_table; | ||
4736 | + if (!table) { | ||
4737 | + table = kzalloc(sizeof(*table) * MAX_OBJECT_DESCRIPTORS, | ||
4738 | + GFP_KERNEL); | ||
4739 | + t->od_table = table; | ||
4740 | + } | ||
4741 | + | ||
4742 | + for (i = 0; table && i < MAX_OBJECT_DESCRIPTORS; i++) | ||
4743 | + if (!table[i].used) { | ||
4744 | + table[i].used = 1; | ||
4745 | + return table + i; | ||
4746 | + } | ||
4747 | + return NULL; | ||
4748 | +} | ||
4749 | + | ||
4750 | +static int put_od_entry(struct od_table_entry* od) | ||
4751 | +{ | ||
4752 | + put_inode_obj(od->obj); | ||
4753 | + od->used = 0; | ||
4754 | + return 0; | ||
4755 | +} | ||
4756 | + | ||
4757 | +void exit_od_table(struct task_struct* t) | ||
4758 | +{ | ||
4759 | + int i; | ||
4760 | + | ||
4761 | + if (t->od_table) { | ||
4762 | + for (i = 0; i < MAX_OBJECT_DESCRIPTORS; i++) | ||
4763 | + if (t->od_table[i].used) | ||
4764 | + put_od_entry(t->od_table + i); | ||
4765 | + kfree(t->od_table); | ||
4766 | + t->od_table = NULL; | ||
4767 | + } | ||
4768 | +} | ||
4769 | + | ||
4770 | +static int do_sys_od_open(struct file* file, obj_type_t type, int id, | ||
4771 | + void* __user config) | ||
4772 | +{ | ||
4773 | + int idx = 0, err; | ||
4774 | + struct inode* inode; | ||
4775 | + struct inode_obj_id* obj = NULL; | ||
4776 | + struct od_table_entry* entry; | ||
4777 | + | ||
4778 | + inode = file->f_dentry->d_inode; | ||
4779 | + | ||
4780 | + entry = get_od_entry(current); | ||
4781 | + if (!entry) | ||
4782 | + return -ENOMEM; | ||
4783 | + | ||
4784 | + mutex_lock(&inode->i_obj_mutex); | ||
4785 | + obj = get_inode_obj(inode, type, id); | ||
4786 | + if (!obj) | ||
4787 | + obj = alloc_inode_obj(inode, type, id); | ||
4788 | + if (!obj) { | ||
4789 | + idx = -ENOMEM; | ||
4790 | + entry->used = 0; | ||
4791 | + } else { | ||
4792 | + entry->obj = obj; | ||
4793 | + entry->extra = NULL; | ||
4794 | + idx = entry - current->od_table; | ||
4795 | + } | ||
4796 | + | ||
4797 | + mutex_unlock(&inode->i_obj_mutex); | ||
4798 | + | ||
4799 | + err = fdso_open(entry, config); | ||
4800 | + if (err < 0) { | ||
4801 | + /* The class rejected the open call. | ||
4802 | + * We need to clean up and tell user space. | ||
4803 | + */ | ||
4804 | + put_od_entry(entry); | ||
4805 | + idx = err; | ||
4806 | + } | ||
4807 | + | ||
4808 | + return idx; | ||
4809 | +} | ||
4810 | + | ||
4811 | + | ||
4812 | +struct od_table_entry* __od_lookup(int od) | ||
4813 | +{ | ||
4814 | + struct task_struct *t = current; | ||
4815 | + | ||
4816 | + if (!t->od_table) | ||
4817 | + return NULL; | ||
4818 | + if (od < 0 || od >= MAX_OBJECT_DESCRIPTORS) | ||
4819 | + return NULL; | ||
4820 | + if (!t->od_table[od].used) | ||
4821 | + return NULL; | ||
4822 | + return t->od_table + od; | ||
4823 | +} | ||
4824 | + | ||
4825 | + | ||
4826 | +asmlinkage long sys_od_open(int fd, int type, int obj_id, void* __user config) | ||
4827 | +{ | ||
4828 | + int ret = 0; | ||
4829 | + struct file* file; | ||
4830 | + | ||
4831 | + /* | ||
4832 | + 1) get file from fd, get inode from file | ||
4833 | + 2) lock inode | ||
4834 | + 3) try to lookup object | ||
4835 | + 4) if not present create and enqueue object, inc inode refcnt | ||
4836 | + 5) increment refcnt of object | ||
4837 | + 6) alloc od_table_entry, setup ptrs | ||
4838 | + 7) unlock inode | ||
4839 | + 8) return offset in od_table as OD | ||
4840 | + */ | ||
4841 | + | ||
4842 | + if (type < MIN_OBJ_TYPE || type > MAX_OBJ_TYPE) { | ||
4843 | + ret = -EINVAL; | ||
4844 | + goto out; | ||
4845 | + } | ||
4846 | + | ||
4847 | + file = fget(fd); | ||
4848 | + if (!file) { | ||
4849 | + ret = -EBADF; | ||
4850 | + goto out; | ||
4851 | + } | ||
4852 | + | ||
4853 | + ret = do_sys_od_open(file, type, obj_id, config); | ||
4854 | + | ||
4855 | + fput(file); | ||
4856 | + | ||
4857 | +out: | ||
4858 | + return ret; | ||
4859 | +} | ||
4860 | + | ||
4861 | + | ||
4862 | +asmlinkage long sys_od_close(int od) | ||
4863 | +{ | ||
4864 | + int ret = -EINVAL; | ||
4865 | + struct task_struct *t = current; | ||
4866 | + | ||
4867 | + if (od < 0 || od >= MAX_OBJECT_DESCRIPTORS) | ||
4868 | + return ret; | ||
4869 | + | ||
4870 | + if (!t->od_table || !t->od_table[od].used) | ||
4871 | + return ret; | ||
4872 | + | ||
4873 | + | ||
4874 | + /* give the class a chance to reject the close | ||
4875 | + */ | ||
4876 | + ret = fdso_close(t->od_table + od); | ||
4877 | + if (ret == 0) | ||
4878 | + ret = put_od_entry(t->od_table + od); | ||
4879 | + | ||
4880 | + return ret; | ||
4881 | +} | ||
4882 | diff --git a/litmus/fmlp.c b/litmus/fmlp.c | ||
4883 | new file mode 100644 | ||
4884 | index 0000000..a9a6385 | ||
4885 | --- /dev/null | ||
4886 | +++ b/litmus/fmlp.c | ||
4887 | @@ -0,0 +1,268 @@ | ||
4888 | +/* | ||
4889 | + * FMLP implementation. | ||
4890 | + * Much of the code here is borrowed from include/asm-i386/semaphore.h | ||
4891 | + */ | ||
4892 | + | ||
4893 | +#include <asm/atomic.h> | ||
4894 | + | ||
4895 | +#include <linux/semaphore.h> | ||
4896 | +#include <linux/sched.h> | ||
4897 | +#include <linux/wait.h> | ||
4898 | +#include <linux/spinlock.h> | ||
4899 | + | ||
4900 | +#include <litmus/litmus.h> | ||
4901 | +#include <litmus/sched_plugin.h> | ||
4902 | +#include <litmus/edf_common.h> | ||
4903 | + | ||
4904 | +#include <litmus/fdso.h> | ||
4905 | + | ||
4906 | +#include <litmus/trace.h> | ||
4907 | + | ||
4908 | +#ifdef CONFIG_FMLP | ||
4909 | + | ||
4910 | +static void* create_fmlp_semaphore(void) | ||
4911 | +{ | ||
4912 | + struct pi_semaphore* sem; | ||
4913 | + int i; | ||
4914 | + | ||
4915 | + sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
4916 | + if (!sem) | ||
4917 | + return NULL; | ||
4918 | + atomic_set(&sem->count, 1); | ||
4919 | + sem->sleepers = 0; | ||
4920 | + init_waitqueue_head(&sem->wait); | ||
4921 | + sem->hp.task = NULL; | ||
4922 | + sem->holder = NULL; | ||
4923 | + for (i = 0; i < NR_CPUS; i++) | ||
4924 | + sem->hp.cpu_task[i] = NULL; | ||
4925 | + return sem; | ||
4926 | +} | ||
4927 | + | ||
4928 | +static int open_fmlp_semaphore(struct od_table_entry* entry, void* __user arg) | ||
4929 | +{ | ||
4930 | + if (!fmlp_active()) | ||
4931 | + return -EBUSY; | ||
4932 | + return 0; | ||
4933 | +} | ||
4934 | + | ||
4935 | +static void destroy_fmlp_semaphore(void* sem) | ||
4936 | +{ | ||
4937 | + /* XXX assert invariants */ | ||
4938 | + kfree(sem); | ||
4939 | +} | ||
4940 | + | ||
4941 | +struct fdso_ops fmlp_sem_ops = { | ||
4942 | + .create = create_fmlp_semaphore, | ||
4943 | + .open = open_fmlp_semaphore, | ||
4944 | + .destroy = destroy_fmlp_semaphore | ||
4945 | +}; | ||
4946 | + | ||
4947 | +struct wq_pair { | ||
4948 | + struct task_struct* tsk; | ||
4949 | + struct pi_semaphore* sem; | ||
4950 | +}; | ||
4951 | + | ||
4952 | +static int rt_pi_wake_up(wait_queue_t *wait, unsigned mode, int sync, | ||
4953 | + void *key) | ||
4954 | +{ | ||
4955 | + struct wq_pair* wqp = (struct wq_pair*) wait->private; | ||
4956 | + set_rt_flags(wqp->tsk, RT_F_EXIT_SEM); | ||
4957 | + litmus->inherit_priority(wqp->sem, wqp->tsk); | ||
4958 | + TRACE_TASK(wqp->tsk, | ||
4959 | + "woken up by rt_pi_wake_up() (RT_F_SEM_EXIT, PI)\n"); | ||
4960 | + /* point to task for default_wake_function() */ | ||
4961 | + wait->private = wqp->tsk; | ||
4962 | + default_wake_function(wait, mode, sync, key); | ||
4963 | + | ||
4964 | + /* Always return true since we know that if we encountered a task | ||
4965 | + * that was already running the wake_up raced with the schedule in | ||
4966 | + * rt_pi_down(). In that case the task in rt_pi_down() will be scheduled | ||
4967 | + * immediately and own the lock. We must not wake up another task in | ||
4968 | + * any case. | ||
4969 | + */ | ||
4970 | + return 1; | ||
4971 | +} | ||
4972 | + | ||
4973 | +/* caller is responsible for locking */ | ||
4974 | +int edf_set_hp_task(struct pi_semaphore *sem) | ||
4975 | +{ | ||
4976 | + struct list_head *tmp, *next; | ||
4977 | + struct task_struct *queued; | ||
4978 | + int ret = 0; | ||
4979 | + | ||
4980 | + sem->hp.task = NULL; | ||
4981 | + list_for_each_safe(tmp, next, &sem->wait.task_list) { | ||
4982 | + queued = ((struct wq_pair*) | ||
4983 | + list_entry(tmp, wait_queue_t, | ||
4984 | + task_list)->private)->tsk; | ||
4985 | + | ||
4986 | + /* Compare task prios, find high prio task. */ | ||
4987 | + if (edf_higher_prio(queued, sem->hp.task)) { | ||
4988 | + sem->hp.task = queued; | ||
4989 | + ret = 1; | ||
4990 | + } | ||
4991 | + } | ||
4992 | + return ret; | ||
4993 | +} | ||
4994 | + | ||
4995 | +/* caller is responsible for locking */ | ||
4996 | +int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu) | ||
4997 | +{ | ||
4998 | + struct list_head *tmp, *next; | ||
4999 | + struct task_struct *queued; | ||
5000 | + int ret = 0; | ||
5001 | + | ||
5002 | + sem->hp.cpu_task[cpu] = NULL; | ||
5003 | + list_for_each_safe(tmp, next, &sem->wait.task_list) { | ||
5004 | + queued = ((struct wq_pair*) | ||
5005 | + list_entry(tmp, wait_queue_t, | ||
5006 | + task_list)->private)->tsk; | ||
5007 | + | ||
5008 | + /* Compare task prios, find high prio task. */ | ||
5009 | + if (get_partition(queued) == cpu && | ||
5010 | + edf_higher_prio(queued, sem->hp.cpu_task[cpu])) { | ||
5011 | + sem->hp.cpu_task[cpu] = queued; | ||
5012 | + ret = 1; | ||
5013 | + } | ||
5014 | + } | ||
5015 | + return ret; | ||
5016 | +} | ||
5017 | + | ||
5018 | +static int do_fmlp_down(struct pi_semaphore* sem) | ||
5019 | +{ | ||
5020 | + unsigned long flags; | ||
5021 | + struct task_struct *tsk = current; | ||
5022 | + struct wq_pair pair; | ||
5023 | + int suspended = 1; | ||
5024 | + wait_queue_t wait = { | ||
5025 | + .private = &pair, | ||
5026 | + .func = rt_pi_wake_up, | ||
5027 | + .task_list = {NULL, NULL} | ||
5028 | + }; | ||
5029 | + | ||
5030 | + pair.tsk = tsk; | ||
5031 | + pair.sem = sem; | ||
5032 | + spin_lock_irqsave(&sem->wait.lock, flags); | ||
5033 | + | ||
5034 | + if (atomic_dec_return(&sem->count) < 0 || | ||
5035 | + waitqueue_active(&sem->wait)) { | ||
5036 | + /* we need to suspend */ | ||
5037 | + tsk->state = TASK_UNINTERRUPTIBLE; | ||
5038 | + __add_wait_queue_tail_exclusive(&sem->wait, &wait); | ||
5039 | + | ||
5040 | + TRACE_CUR("suspends on PI lock %p\n", sem); | ||
5041 | + litmus->pi_block(sem, tsk); | ||
5042 | + | ||
5043 | + /* release lock before sleeping */ | ||
5044 | + spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
5045 | + | ||
5046 | + TS_PI_DOWN_END; | ||
5047 | + preempt_enable_no_resched(); | ||
5048 | + | ||
5049 | + | ||
5050 | + /* we depend on the FIFO order | ||
5051 | + * Thus, we don't need to recheck when we wake up, we | ||
5052 | + * are guaranteed to have the lock since there is only one | ||
5053 | + * wake up per release | ||
5054 | + */ | ||
5055 | + schedule(); | ||
5056 | + | ||
5057 | + TRACE_CUR("woke up, now owns PI lock %p\n", sem); | ||
5058 | + | ||
5059 | + /* try_to_wake_up() set our state to TASK_RUNNING, | ||
5060 | + * all we need to do is to remove our wait queue entry | ||
5061 | + */ | ||
5062 | + remove_wait_queue(&sem->wait, &wait); | ||
5063 | + } else { | ||
5064 | + /* no priority inheritance necessary, since there are no queued | ||
5065 | + * tasks. | ||
5066 | + */ | ||
5067 | + suspended = 0; | ||
5068 | + TRACE_CUR("acquired PI lock %p, no contention\n", sem); | ||
5069 | + sem->holder = tsk; | ||
5070 | + | ||
5071 | + /* don't know if we're global or partitioned. */ | ||
5072 | + sem->hp.task = tsk; | ||
5073 | + sem->hp.cpu_task[get_partition(tsk)] = tsk; | ||
5074 | + | ||
5075 | + litmus->inherit_priority(sem, tsk); | ||
5076 | + spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
5077 | + } | ||
5078 | + return suspended; | ||
5079 | +} | ||
5080 | + | ||
5081 | +static void do_fmlp_up(struct pi_semaphore* sem) | ||
5082 | +{ | ||
5083 | + unsigned long flags; | ||
5084 | + | ||
5085 | + spin_lock_irqsave(&sem->wait.lock, flags); | ||
5086 | + | ||
5087 | + TRACE_CUR("releases PI lock %p\n", sem); | ||
5088 | + litmus->return_priority(sem); | ||
5089 | + sem->holder = NULL; | ||
5090 | + if (atomic_inc_return(&sem->count) < 1) | ||
5091 | + /* there is a task queued */ | ||
5092 | + wake_up_locked(&sem->wait); | ||
5093 | + | ||
5094 | + spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
5095 | +} | ||
5096 | + | ||
5097 | +asmlinkage long sys_fmlp_down(int sem_od) | ||
5098 | +{ | ||
5099 | + long ret = 0; | ||
5100 | + struct pi_semaphore * sem; | ||
5101 | + int suspended = 0; | ||
5102 | + | ||
5103 | + preempt_disable(); | ||
5104 | + TS_PI_DOWN_START; | ||
5105 | + | ||
5106 | + sem = lookup_fmlp_sem(sem_od); | ||
5107 | + if (sem) | ||
5108 | + suspended = do_fmlp_down(sem); | ||
5109 | + else | ||
5110 | + ret = -EINVAL; | ||
5111 | + | ||
5112 | + if (!suspended) { | ||
5113 | + TS_PI_DOWN_END; | ||
5114 | + preempt_enable(); | ||
5115 | + } | ||
5116 | + | ||
5117 | + return ret; | ||
5118 | +} | ||
5119 | + | ||
5120 | +asmlinkage long sys_fmlp_up(int sem_od) | ||
5121 | +{ | ||
5122 | + long ret = 0; | ||
5123 | + struct pi_semaphore * sem; | ||
5124 | + | ||
5125 | + preempt_disable(); | ||
5126 | + TS_PI_UP_START; | ||
5127 | + | ||
5128 | + sem = lookup_fmlp_sem(sem_od); | ||
5129 | + if (sem) | ||
5130 | + do_fmlp_up(sem); | ||
5131 | + else | ||
5132 | + ret = -EINVAL; | ||
5133 | + | ||
5134 | + | ||
5135 | + TS_PI_UP_END; | ||
5136 | + preempt_enable(); | ||
5137 | + | ||
5138 | + return ret; | ||
5139 | +} | ||
5140 | + | ||
5141 | +#else | ||
5142 | + | ||
5143 | +struct fdso_ops fmlp_sem_ops = {}; | ||
5144 | + | ||
5145 | +asmlinkage long sys_fmlp_down(int sem_od) | ||
5146 | +{ | ||
5147 | + return -ENOSYS; | ||
5148 | +} | ||
5149 | + | ||
5150 | +asmlinkage long sys_fmlp_up(int sem_od) | ||
5151 | +{ | ||
5152 | + return -ENOSYS; | ||
5153 | +} | ||
5154 | + | ||
5155 | +#endif | ||
5156 | diff --git a/litmus/ft_event.c b/litmus/ft_event.c | ||
5157 | new file mode 100644 | ||
5158 | index 0000000..399a07b | ||
5159 | --- /dev/null | ||
5160 | +++ b/litmus/ft_event.c | ||
5161 | @@ -0,0 +1,43 @@ | ||
5162 | +#include <linux/types.h> | ||
5163 | + | ||
5164 | +#include <litmus/feather_trace.h> | ||
5165 | + | ||
5166 | +#if !defined(CONFIG_ARCH_HAS_FEATHER_TRACE) || defined(CONFIG_DEBUG_RODATA) | ||
5167 | +/* provide dummy implementation */ | ||
5168 | + | ||
5169 | +int ft_events[MAX_EVENTS]; | ||
5170 | + | ||
5171 | +int ft_enable_event(unsigned long id) | ||
5172 | +{ | ||
5173 | + if (id < MAX_EVENTS) { | ||
5174 | + ft_events[id]++; | ||
5175 | + return 1; | ||
5176 | + } else | ||
5177 | + return 0; | ||
5178 | +} | ||
5179 | + | ||
5180 | +int ft_disable_event(unsigned long id) | ||
5181 | +{ | ||
5182 | + if (id < MAX_EVENTS && ft_events[id]) { | ||
5183 | + ft_events[id]--; | ||
5184 | + return 1; | ||
5185 | + } else | ||
5186 | + return 0; | ||
5187 | +} | ||
5188 | + | ||
5189 | +int ft_disable_all_events(void) | ||
5190 | +{ | ||
5191 | + int i; | ||
5192 | + | ||
5193 | + for (i = 0; i < MAX_EVENTS; i++) | ||
5194 | + ft_events[i] = 0; | ||
5195 | + | ||
5196 | + return MAX_EVENTS; | ||
5197 | +} | ||
5198 | + | ||
5199 | +int ft_is_event_enabled(unsigned long id) | ||
5200 | +{ | ||
5201 | + return id < MAX_EVENTS && ft_events[id]; | ||
5202 | +} | ||
5203 | + | ||
5204 | +#endif | ||
5205 | diff --git a/litmus/ftdev.c b/litmus/ftdev.c | ||
5206 | new file mode 100644 | ||
5207 | index 0000000..4a4b2e3 | ||
5208 | --- /dev/null | ||
5209 | +++ b/litmus/ftdev.c | ||
5210 | @@ -0,0 +1,440 @@ | ||
5211 | +#include <linux/sched.h> | ||
5212 | +#include <linux/fs.h> | ||
5213 | +#include <linux/slab.h> | ||
5214 | +#include <linux/cdev.h> | ||
5215 | +#include <asm/uaccess.h> | ||
5216 | +#include <linux/module.h> | ||
5217 | +#include <linux/device.h> | ||
5218 | + | ||
5219 | +#include <litmus/litmus.h> | ||
5220 | +#include <litmus/feather_trace.h> | ||
5221 | +#include <litmus/ftdev.h> | ||
5222 | + | ||
5223 | +struct ft_buffer* alloc_ft_buffer(unsigned int count, size_t size) | ||
5224 | +{ | ||
5225 | + struct ft_buffer* buf; | ||
5226 | + size_t total = (size + 1) * count; | ||
5227 | + char* mem; | ||
5228 | + int order = 0, pages = 1; | ||
5229 | + | ||
5230 | + buf = kmalloc(sizeof(*buf), GFP_KERNEL); | ||
5231 | + if (!buf) | ||
5232 | + return NULL; | ||
5233 | + | ||
5234 | + total = (total / PAGE_SIZE) + (total % PAGE_SIZE != 0); | ||
5235 | + while (pages < total) { | ||
5236 | + order++; | ||
5237 | + pages *= 2; | ||
5238 | + } | ||
5239 | + | ||
5240 | + mem = (char*) __get_free_pages(GFP_KERNEL, order); | ||
5241 | + if (!mem) { | ||
5242 | + kfree(buf); | ||
5243 | + return NULL; | ||
5244 | + } | ||
5245 | + | ||
5246 | + if (!init_ft_buffer(buf, count, size, | ||
5247 | + mem + (count * size), /* markers at the end */ | ||
5248 | + mem)) { /* buffer objects */ | ||
5249 | + free_pages((unsigned long) mem, order); | ||
5250 | + kfree(buf); | ||
5251 | + return NULL; | ||
5252 | + } | ||
5253 | + return buf; | ||
5254 | +} | ||
5255 | + | ||
5256 | +void free_ft_buffer(struct ft_buffer* buf) | ||
5257 | +{ | ||
5258 | + int order = 0, pages = 1; | ||
5259 | + size_t total; | ||
5260 | + | ||
5261 | + if (buf) { | ||
5262 | + total = (buf->slot_size + 1) * buf->slot_count; | ||
5263 | + total = (total / PAGE_SIZE) + (total % PAGE_SIZE != 0); | ||
5264 | + while (pages < total) { | ||
5265 | + order++; | ||
5266 | + pages *= 2; | ||
5267 | + } | ||
5268 | + free_pages((unsigned long) buf->buffer_mem, order); | ||
5269 | + kfree(buf); | ||
5270 | + } | ||
5271 | +} | ||
5272 | + | ||
5273 | +struct ftdev_event { | ||
5274 | + int id; | ||
5275 | + struct ftdev_event* next; | ||
5276 | +}; | ||
5277 | + | ||
5278 | +static int activate(struct ftdev_event** chain, int id) | ||
5279 | +{ | ||
5280 | + struct ftdev_event* ev = kmalloc(sizeof(*ev), GFP_KERNEL); | ||
5281 | + if (ev) { | ||
5282 | + printk(KERN_INFO | ||
5283 | + "Enabling feather-trace event %d.\n", (int) id); | ||
5284 | + ft_enable_event(id); | ||
5285 | + ev->id = id; | ||
5286 | + ev->next = *chain; | ||
5287 | + *chain = ev; | ||
5288 | + } | ||
5289 | + return ev ? 0 : -ENOMEM; | ||
5290 | +} | ||
5291 | + | ||
5292 | +static void deactivate(struct ftdev_event** chain, int id) | ||
5293 | +{ | ||
5294 | + struct ftdev_event **cur = chain; | ||
5295 | + struct ftdev_event *nxt; | ||
5296 | + while (*cur) { | ||
5297 | + if ((*cur)->id == id) { | ||
5298 | + nxt = (*cur)->next; | ||
5299 | + kfree(*cur); | ||
5300 | + *cur = nxt; | ||
5301 | + printk(KERN_INFO | ||
5302 | + "Disabling feather-trace event %d.\n", (int) id); | ||
5303 | + ft_disable_event(id); | ||
5304 | + break; | ||
5305 | + } | ||
5306 | + cur = &(*cur)->next; | ||
5307 | + } | ||
5308 | +} | ||
5309 | + | ||
5310 | +static int ftdev_open(struct inode *in, struct file *filp) | ||
5311 | +{ | ||
5312 | + struct ftdev* ftdev; | ||
5313 | + struct ftdev_minor* ftdm; | ||
5314 | + unsigned int buf_idx = iminor(in); | ||
5315 | + int err = 0; | ||
5316 | + | ||
5317 | + ftdev = container_of(in->i_cdev, struct ftdev, cdev); | ||
5318 | + | ||
5319 | + if (buf_idx >= ftdev->minor_cnt) { | ||
5320 | + err = -ENODEV; | ||
5321 | + goto out; | ||
5322 | + } | ||
5323 | + if (ftdev->can_open && (err = ftdev->can_open(ftdev, buf_idx))) | ||
5324 | + goto out; | ||
5325 | + | ||
5326 | + ftdm = ftdev->minor + buf_idx; | ||
5327 | + filp->private_data = ftdm; | ||
5328 | + | ||
5329 | + if (mutex_lock_interruptible(&ftdm->lock)) { | ||
5330 | + err = -ERESTARTSYS; | ||
5331 | + goto out; | ||
5332 | + } | ||
5333 | + | ||
5334 | + if (!ftdm->readers && ftdev->alloc) | ||
5335 | + err = ftdev->alloc(ftdev, buf_idx); | ||
5336 | + if (0 == err) | ||
5337 | + ftdm->readers++; | ||
5338 | + | ||
5339 | + mutex_unlock(&ftdm->lock); | ||
5340 | +out: | ||
5341 | + return err; | ||
5342 | +} | ||
5343 | + | ||
5344 | +static int ftdev_release(struct inode *in, struct file *filp) | ||
5345 | +{ | ||
5346 | + struct ftdev* ftdev; | ||
5347 | + struct ftdev_minor* ftdm; | ||
5348 | + unsigned int buf_idx = iminor(in); | ||
5349 | + int err = 0; | ||
5350 | + | ||
5351 | + ftdev = container_of(in->i_cdev, struct ftdev, cdev); | ||
5352 | + | ||
5353 | + if (buf_idx >= ftdev->minor_cnt) { | ||
5354 | + err = -ENODEV; | ||
5355 | + goto out; | ||
5356 | + } | ||
5357 | + ftdm = ftdev->minor + buf_idx; | ||
5358 | + | ||
5359 | + if (mutex_lock_interruptible(&ftdm->lock)) { | ||
5360 | + err = -ERESTARTSYS; | ||
5361 | + goto out; | ||
5362 | + } | ||
5363 | + | ||
5364 | + if (ftdm->readers == 1) { | ||
5365 | + while (ftdm->events) | ||
5366 | + deactivate(&ftdm->events, ftdm->events->id); | ||
5367 | + | ||
5368 | + /* wait for any pending events to complete */ | ||
5369 | + set_current_state(TASK_UNINTERRUPTIBLE); | ||
5370 | + schedule_timeout(HZ); | ||
5371 | + | ||
5372 | + printk(KERN_ALERT "Failed trace writes: %u\n", | ||
5373 | + ftdm->buf->failed_writes); | ||
5374 | + | ||
5375 | + if (ftdev->free) | ||
5376 | + ftdev->free(ftdev, buf_idx); | ||
5377 | + } | ||
5378 | + | ||
5379 | + ftdm->readers--; | ||
5380 | + mutex_unlock(&ftdm->lock); | ||
5381 | +out: | ||
5382 | + return err; | ||
5383 | +} | ||
5384 | + | ||
5385 | +/* based on ft_buffer_read | ||
5386 | + * @returns < 0 : page fault | ||
5387 | + * = 0 : no data available | ||
5388 | + * = 1 : one slot copied | ||
5389 | + */ | ||
5390 | +static int ft_buffer_copy_to_user(struct ft_buffer* buf, char __user *dest) | ||
5391 | +{ | ||
5392 | + unsigned int idx; | ||
5393 | + int err = 0; | ||
5394 | + if (buf->free_count != buf->slot_count) { | ||
5395 | + /* data available */ | ||
5396 | + idx = buf->read_idx % buf->slot_count; | ||
5397 | + if (buf->slots[idx] == SLOT_READY) { | ||
5398 | + err = copy_to_user(dest, ((char*) buf->buffer_mem) + | ||
5399 | + idx * buf->slot_size, | ||
5400 | + buf->slot_size); | ||
5401 | + if (err == 0) { | ||
5402 | + /* copy ok */ | ||
5403 | + buf->slots[idx] = SLOT_FREE; | ||
5404 | + buf->read_idx++; | ||
5405 | + fetch_and_inc(&buf->free_count); | ||
5406 | + err = 1; | ||
5407 | + } | ||
5408 | + } | ||
5409 | + } | ||
5410 | + return err; | ||
5411 | +} | ||
5412 | + | ||
5413 | +static ssize_t ftdev_read(struct file *filp, | ||
5414 | + char __user *to, size_t len, loff_t *f_pos) | ||
5415 | +{ | ||
5416 | + /* we ignore f_pos, this is strictly sequential */ | ||
5417 | + | ||
5418 | + ssize_t err = 0; | ||
5419 | + size_t chunk; | ||
5420 | + int copied; | ||
5421 | + struct ftdev_minor* ftdm = filp->private_data; | ||
5422 | + | ||
5423 | + if (mutex_lock_interruptible(&ftdm->lock)) { | ||
5424 | + err = -ERESTARTSYS; | ||
5425 | + goto out; | ||
5426 | + } | ||
5427 | + | ||
5428 | + | ||
5429 | + chunk = ftdm->buf->slot_size; | ||
5430 | + while (len >= chunk) { | ||
5431 | + copied = ft_buffer_copy_to_user(ftdm->buf, to); | ||
5432 | + if (copied == 1) { | ||
5433 | + len -= chunk; | ||
5434 | + to += chunk; | ||
5435 | + err += chunk; | ||
5436 | + } else if (err == 0 && copied == 0 && ftdm->events) { | ||
5437 | + /* Only wait if there are any events enabled and only | ||
5438 | + * if we haven't copied some data yet. We cannot wait | ||
5439 | + * here with copied data because that data would get | ||
5440 | + * lost if the task is interrupted (e.g., killed). | ||
5441 | + */ | ||
5442 | + set_current_state(TASK_INTERRUPTIBLE); | ||
5443 | + schedule_timeout(50); | ||
5444 | + if (signal_pending(current)) { | ||
5445 | + if (err == 0) | ||
5446 | + /* nothing read yet, signal problem */ | ||
5447 | + err = -ERESTARTSYS; | ||
5448 | + break; | ||
5449 | + } | ||
5450 | + } else if (copied < 0) { | ||
5451 | + /* page fault */ | ||
5452 | + err = copied; | ||
5453 | + break; | ||
5454 | + } else | ||
5455 | + /* nothing left to get, return to user space */ | ||
5456 | + break; | ||
5457 | + } | ||
5458 | + mutex_unlock(&ftdm->lock); | ||
5459 | +out: | ||
5460 | + return err; | ||
5461 | +} | ||
5462 | + | ||
5463 | +typedef uint32_t cmd_t; | ||
5464 | + | ||
5465 | +static ssize_t ftdev_write(struct file *filp, const char __user *from, | ||
5466 | + size_t len, loff_t *f_pos) | ||
5467 | +{ | ||
5468 | + struct ftdev_minor* ftdm = filp->private_data; | ||
5469 | + ssize_t err = -EINVAL; | ||
5470 | + cmd_t cmd; | ||
5471 | + cmd_t id; | ||
5472 | + | ||
5473 | + if (len % sizeof(cmd) || len < 2 * sizeof(cmd)) | ||
5474 | + goto out; | ||
5475 | + | ||
5476 | + if (copy_from_user(&cmd, from, sizeof(cmd))) { | ||
5477 | + err = -EFAULT; | ||
5478 | + goto out; | ||
5479 | + } | ||
5480 | + len -= sizeof(cmd); | ||
5481 | + from += sizeof(cmd); | ||
5482 | + | ||
5483 | + if (cmd != FTDEV_ENABLE_CMD && cmd != FTDEV_DISABLE_CMD) | ||
5484 | + goto out; | ||
5485 | + | ||
5486 | + if (mutex_lock_interruptible(&ftdm->lock)) { | ||
5487 | + err = -ERESTARTSYS; | ||
5488 | + goto out; | ||
5489 | + } | ||
5490 | + | ||
5491 | + err = sizeof(cmd); | ||
5492 | + while (len) { | ||
5493 | + if (copy_from_user(&id, from, sizeof(cmd))) { | ||
5494 | + err = -EFAULT; | ||
5495 | + goto out_unlock; | ||
5496 | + } | ||
5497 | + /* FIXME: check id against list of acceptable events */ | ||
5498 | + len -= sizeof(cmd); | ||
5499 | + from += sizeof(cmd); | ||
5500 | + if (cmd == FTDEV_DISABLE_CMD) | ||
5501 | + deactivate(&ftdm->events, id); | ||
5502 | + else if (activate(&ftdm->events, id) != 0) { | ||
5503 | + err = -ENOMEM; | ||
5504 | + goto out_unlock; | ||
5505 | + } | ||
5506 | + err += sizeof(cmd); | ||
5507 | + } | ||
5508 | + | ||
5509 | +out_unlock: | ||
5510 | + mutex_unlock(&ftdm->lock); | ||
5511 | +out: | ||
5512 | + return err; | ||
5513 | +} | ||
5514 | + | ||
5515 | +struct file_operations ftdev_fops = { | ||
5516 | + .owner = THIS_MODULE, | ||
5517 | + .open = ftdev_open, | ||
5518 | + .release = ftdev_release, | ||
5519 | + .write = ftdev_write, | ||
5520 | + .read = ftdev_read, | ||
5521 | +}; | ||
5522 | + | ||
5523 | +int ftdev_init( struct ftdev* ftdev, struct module* owner, | ||
5524 | + const int minor_cnt, const char* name) | ||
5525 | +{ | ||
5526 | + int i, err; | ||
5527 | + | ||
5528 | + BUG_ON(minor_cnt < 1); | ||
5529 | + | ||
5530 | + cdev_init(&ftdev->cdev, &ftdev_fops); | ||
5531 | + ftdev->name = name; | ||
5532 | + ftdev->minor_cnt = minor_cnt; | ||
5533 | + ftdev->cdev.owner = owner; | ||
5534 | + ftdev->cdev.ops = &ftdev_fops; | ||
5535 | + ftdev->alloc = NULL; | ||
5536 | + ftdev->free = NULL; | ||
5537 | + ftdev->can_open = NULL; | ||
5538 | + | ||
5539 | + ftdev->minor = kcalloc(ftdev->minor_cnt, sizeof(*ftdev->minor), | ||
5540 | + GFP_KERNEL); | ||
5541 | + if (!ftdev->minor) { | ||
5542 | + printk(KERN_WARNING "ftdev(%s): Could not allocate memory\n", | ||
5543 | + ftdev->name); | ||
5544 | + err = -ENOMEM; | ||
5545 | + goto err_out; | ||
5546 | + } | ||
5547 | + | ||
5548 | + for (i = 0; i < ftdev->minor_cnt; i++) { | ||
5549 | + mutex_init(&ftdev->minor[i].lock); | ||
5550 | + ftdev->minor[i].readers = 0; | ||
5551 | + ftdev->minor[i].buf = NULL; | ||
5552 | + ftdev->minor[i].events = NULL; | ||
5553 | + } | ||
5554 | + | ||
5555 | + ftdev->class = class_create(owner, ftdev->name); | ||
5556 | + if (IS_ERR(ftdev->class)) { | ||
5557 | + err = PTR_ERR(ftdev->class); | ||
5558 | + printk(KERN_WARNING "ftdev(%s): " | ||
5559 | + "Could not create device class.\n", ftdev->name); | ||
5560 | + goto err_dealloc; | ||
5561 | + } | ||
5562 | + | ||
5563 | + return 0; | ||
5564 | + | ||
5565 | +err_dealloc: | ||
5566 | + kfree(ftdev->minor); | ||
5567 | +err_out: | ||
5568 | + return err; | ||
5569 | +} | ||
5570 | + | ||
5571 | +/* | ||
5572 | + * Destroy minor devices up to, but not including, up_to. | ||
5573 | + */ | ||
5574 | +static void ftdev_device_destroy(struct ftdev* ftdev, unsigned int up_to) | ||
5575 | +{ | ||
5576 | + dev_t minor_cntr; | ||
5577 | + | ||
5578 | + if (up_to < 1) | ||
5579 | + up_to = (ftdev->minor_cnt < 1) ? 0 : ftdev->minor_cnt; | ||
5580 | + | ||
5581 | + for (minor_cntr = 0; minor_cntr < up_to; ++minor_cntr) | ||
5582 | + device_destroy(ftdev->class, MKDEV(ftdev->major, minor_cntr)); | ||
5583 | +} | ||
5584 | + | ||
5585 | +void ftdev_exit(struct ftdev* ftdev) | ||
5586 | +{ | ||
5587 | + printk("ftdev(%s): Exiting\n", ftdev->name); | ||
5588 | + ftdev_device_destroy(ftdev, -1); | ||
5589 | + cdev_del(&ftdev->cdev); | ||
5590 | + unregister_chrdev_region(MKDEV(ftdev->major, 0), ftdev->minor_cnt); | ||
5591 | + class_destroy(ftdev->class); | ||
5592 | + kfree(ftdev->minor); | ||
5593 | +} | ||
5594 | + | ||
5595 | +int register_ftdev(struct ftdev* ftdev) | ||
5596 | +{ | ||
5597 | + struct device **device; | ||
5598 | + dev_t trace_dev_tmp, minor_cntr; | ||
5599 | + int err; | ||
5600 | + | ||
5601 | + err = alloc_chrdev_region(&trace_dev_tmp, 0, ftdev->minor_cnt, | ||
5602 | + ftdev->name); | ||
5603 | + if (err) { | ||
5604 | + printk(KERN_WARNING "ftdev(%s): " | ||
5605 | + "Could not allocate char. device region (%d minors)\n", | ||
5606 | + ftdev->name, ftdev->minor_cnt); | ||
5607 | + goto err_out; | ||
5608 | + } | ||
5609 | + | ||
5610 | + ftdev->major = MAJOR(trace_dev_tmp); | ||
5611 | + | ||
5612 | + err = cdev_add(&ftdev->cdev, trace_dev_tmp, ftdev->minor_cnt); | ||
5613 | + if (err) { | ||
5614 | + printk(KERN_WARNING "ftdev(%s): " | ||
5615 | + "Could not add cdev for major %u with %u minor(s).\n", | ||
5616 | + ftdev->name, ftdev->major, ftdev->minor_cnt); | ||
5617 | + goto err_unregister; | ||
5618 | + } | ||
5619 | + | ||
5620 | + /* create the minor device(s) */ | ||
5621 | + for (minor_cntr = 0; minor_cntr < ftdev->minor_cnt; ++minor_cntr) | ||
5622 | + { | ||
5623 | + trace_dev_tmp = MKDEV(ftdev->major, minor_cntr); | ||
5624 | + device = &ftdev->minor[minor_cntr].device; | ||
5625 | + | ||
5626 | + *device = device_create(ftdev->class, NULL, trace_dev_tmp, NULL, | ||
5627 | + "litmus/%s%d", ftdev->name, minor_cntr); | ||
5628 | + if (IS_ERR(*device)) { | ||
5629 | + err = PTR_ERR(*device); | ||
5630 | + printk(KERN_WARNING "ftdev(%s): " | ||
5631 | + "Could not create device major/minor number " | ||
5632 | + "%u/%u\n", ftdev->name, ftdev->major, | ||
5633 | + minor_cntr); | ||
5634 | + printk(KERN_WARNING "ftdev(%s): " | ||
5635 | + "will attempt deletion of allocated devices.\n", | ||
5636 | + ftdev->name); | ||
5637 | + goto err_minors; | ||
5638 | + } | ||
5639 | + } | ||
5640 | + | ||
5641 | + return 0; | ||
5642 | + | ||
5643 | +err_minors: | ||
5644 | + ftdev_device_destroy(ftdev, minor_cntr); | ||
5645 | + cdev_del(&ftdev->cdev); | ||
5646 | +err_unregister: | ||
5647 | + unregister_chrdev_region(MKDEV(ftdev->major, 0), ftdev->minor_cnt); | ||
5648 | +err_out: | ||
5649 | + return err; | ||
5650 | +} | ||
5651 | diff --git a/litmus/jobs.c b/litmus/jobs.c | ||
5652 | new file mode 100644 | ||
5653 | index 0000000..36e3146 | ||
5654 | --- /dev/null | ||
5655 | +++ b/litmus/jobs.c | ||
5656 | @@ -0,0 +1,43 @@ | ||
5657 | +/* litmus/jobs.c - common job control code | ||
5658 | + */ | ||
5659 | + | ||
5660 | +#include <linux/sched.h> | ||
5661 | + | ||
5662 | +#include <litmus/litmus.h> | ||
5663 | +#include <litmus/jobs.h> | ||
5664 | + | ||
5665 | +void prepare_for_next_period(struct task_struct *t) | ||
5666 | +{ | ||
5667 | + BUG_ON(!t); | ||
5668 | + /* prepare next release */ | ||
5669 | + t->rt_param.job_params.release = t->rt_param.job_params.deadline; | ||
5670 | + t->rt_param.job_params.deadline += get_rt_period(t); | ||
5671 | + t->rt_param.job_params.exec_time = 0; | ||
5672 | + /* update job sequence number */ | ||
5673 | + t->rt_param.job_params.job_no++; | ||
5674 | + | ||
5675 | + /* don't confuse Linux */ | ||
5676 | + t->rt.time_slice = 1; | ||
5677 | +} | ||
5678 | + | ||
5679 | +void release_at(struct task_struct *t, lt_t start) | ||
5680 | +{ | ||
5681 | + t->rt_param.job_params.deadline = start; | ||
5682 | + prepare_for_next_period(t); | ||
5683 | + set_rt_flags(t, RT_F_RUNNING); | ||
5684 | +} | ||
5685 | + | ||
5686 | + | ||
5687 | +/* | ||
5688 | + * Deactivate current task until the beginning of the next period. | ||
5689 | + */ | ||
5690 | +long complete_job(void) | ||
5691 | +{ | ||
5692 | + /* Mark that we do not excute anymore */ | ||
5693 | + set_rt_flags(current, RT_F_SLEEP); | ||
5694 | + /* call schedule, this will return when a new job arrives | ||
5695 | + * it also takes care of preparing for the next release | ||
5696 | + */ | ||
5697 | + schedule(); | ||
5698 | + return 0; | ||
5699 | +} | ||
5700 | diff --git a/litmus/litmus.c b/litmus/litmus.c | ||
5701 | new file mode 100644 | ||
5702 | index 0000000..8efd3f9 | ||
5703 | --- /dev/null | ||
5704 | +++ b/litmus/litmus.c | ||
5705 | @@ -0,0 +1,547 @@ | ||
5706 | +/* | ||
5707 | + * litmus.c -- Implementation of the LITMUS syscalls, | ||
5708 | + * the LITMUS intialization code, | ||
5709 | + * and the procfs interface.. | ||
5710 | + */ | ||
5711 | +#include <asm/uaccess.h> | ||
5712 | +#include <linux/uaccess.h> | ||
5713 | +#include <linux/sysrq.h> | ||
5714 | +#include <linux/sched.h> | ||
5715 | +#include <linux/module.h> | ||
5716 | +#include <linux/slab.h> | ||
5717 | + | ||
5718 | +#include <litmus/litmus.h> | ||
5719 | +#include <litmus/bheap.h> | ||
5720 | +#include <litmus/trace.h> | ||
5721 | +#include <litmus/rt_domain.h> | ||
5722 | +#include <litmus/litmus_proc.h> | ||
5723 | +#include <litmus/sched_trace.h> | ||
5724 | + | ||
5725 | +/* Number of RT tasks that exist in the system */ | ||
5726 | +atomic_t rt_task_count = ATOMIC_INIT(0); | ||
5727 | +static DEFINE_RAW_SPINLOCK(task_transition_lock); | ||
5728 | +/* synchronize plugin switching */ | ||
5729 | +atomic_t cannot_use_plugin = ATOMIC_INIT(0); | ||
5730 | + | ||
5731 | +/* Give log messages sequential IDs. */ | ||
5732 | +atomic_t __log_seq_no = ATOMIC_INIT(0); | ||
5733 | + | ||
5734 | +#ifdef CONFIG_RELEASE_MASTER | ||
5735 | +/* current master CPU for handling timer IRQs */ | ||
5736 | +atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU); | ||
5737 | +#endif | ||
5738 | + | ||
5739 | +static struct kmem_cache * bheap_node_cache; | ||
5740 | +extern struct kmem_cache * release_heap_cache; | ||
5741 | + | ||
5742 | +struct bheap_node* bheap_node_alloc(int gfp_flags) | ||
5743 | +{ | ||
5744 | + return kmem_cache_alloc(bheap_node_cache, gfp_flags); | ||
5745 | +} | ||
5746 | + | ||
5747 | +void bheap_node_free(struct bheap_node* hn) | ||
5748 | +{ | ||
5749 | + kmem_cache_free(bheap_node_cache, hn); | ||
5750 | +} | ||
5751 | + | ||
5752 | +struct release_heap* release_heap_alloc(int gfp_flags); | ||
5753 | +void release_heap_free(struct release_heap* rh); | ||
5754 | + | ||
5755 | +/* | ||
5756 | + * sys_set_task_rt_param | ||
5757 | + * @pid: Pid of the task which scheduling parameters must be changed | ||
5758 | + * @param: New real-time extension parameters such as the execution cost and | ||
5759 | + * period | ||
5760 | + * Syscall for manipulating with task rt extension params | ||
5761 | + * Returns EFAULT if param is NULL. | ||
5762 | + * ESRCH if pid is not corrsponding | ||
5763 | + * to a valid task. | ||
5764 | + * EINVAL if either period or execution cost is <=0 | ||
5765 | + * EPERM if pid is a real-time task | ||
5766 | + * 0 if success | ||
5767 | + * | ||
5768 | + * Only non-real-time tasks may be configured with this system call | ||
5769 | + * to avoid races with the scheduler. In practice, this means that a | ||
5770 | + * task's parameters must be set _before_ calling sys_prepare_rt_task() | ||
5771 | + * | ||
5772 | + * find_task_by_vpid() assumes that we are in the same namespace of the | ||
5773 | + * target. | ||
5774 | + */ | ||
5775 | +asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param) | ||
5776 | +{ | ||
5777 | + struct rt_task tp; | ||
5778 | + struct task_struct *target; | ||
5779 | + int retval = -EINVAL; | ||
5780 | + | ||
5781 | + printk("Setting up rt task parameters for process %d.\n", pid); | ||
5782 | + | ||
5783 | + if (pid < 0 || param == 0) { | ||
5784 | + goto out; | ||
5785 | + } | ||
5786 | + if (copy_from_user(&tp, param, sizeof(tp))) { | ||
5787 | + retval = -EFAULT; | ||
5788 | + goto out; | ||
5789 | + } | ||
5790 | + | ||
5791 | + /* Task search and manipulation must be protected */ | ||
5792 | + read_lock_irq(&tasklist_lock); | ||
5793 | + if (!(target = find_task_by_vpid(pid))) { | ||
5794 | + retval = -ESRCH; | ||
5795 | + goto out_unlock; | ||
5796 | + } | ||
5797 | + | ||
5798 | + if (is_realtime(target)) { | ||
5799 | + /* The task is already a real-time task. | ||
5800 | + * We cannot not allow parameter changes at this point. | ||
5801 | + */ | ||
5802 | + retval = -EBUSY; | ||
5803 | + goto out_unlock; | ||
5804 | + } | ||
5805 | + | ||
5806 | + if (tp.exec_cost <= 0) | ||
5807 | + goto out_unlock; | ||
5808 | + if (tp.period <= 0) | ||
5809 | + goto out_unlock; | ||
5810 | + if (!cpu_online(tp.cpu)) | ||
5811 | + goto out_unlock; | ||
5812 | + if (tp.period < tp.exec_cost) | ||
5813 | + { | ||
5814 | + printk(KERN_INFO "litmus: real-time task %d rejected " | ||
5815 | + "because wcet > period\n", pid); | ||
5816 | + goto out_unlock; | ||
5817 | + } | ||
5818 | + if (tp.budget_policy != NO_ENFORCEMENT && | ||
5819 | + tp.budget_policy != QUANTUM_ENFORCEMENT && | ||
5820 | + tp.budget_policy != PRECISE_ENFORCEMENT) | ||
5821 | + { | ||
5822 | + printk(KERN_INFO "litmus: real-time task %d rejected " | ||
5823 | + "because unsupported budget enforcement policy " | ||
5824 | + "specified (%d)\n", | ||
5825 | + pid, tp.budget_policy); | ||
5826 | + goto out_unlock; | ||
5827 | + } | ||
5828 | + | ||
5829 | + target->rt_param.task_params = tp; | ||
5830 | + | ||
5831 | + retval = 0; | ||
5832 | + out_unlock: | ||
5833 | + read_unlock_irq(&tasklist_lock); | ||
5834 | + out: | ||
5835 | + return retval; | ||
5836 | +} | ||
5837 | + | ||
5838 | +/* | ||
5839 | + * Getter of task's RT params | ||
5840 | + * returns EINVAL if param or pid is NULL | ||
5841 | + * returns ESRCH if pid does not correspond to a valid task | ||
5842 | + * returns EFAULT if copying of parameters has failed. | ||
5843 | + * | ||
5844 | + * find_task_by_vpid() assumes that we are in the same namespace of the | ||
5845 | + * target. | ||
5846 | + */ | ||
5847 | +asmlinkage long sys_get_rt_task_param(pid_t pid, struct rt_task __user * param) | ||
5848 | +{ | ||
5849 | + int retval = -EINVAL; | ||
5850 | + struct task_struct *source; | ||
5851 | + struct rt_task lp; | ||
5852 | + if (param == 0 || pid < 0) | ||
5853 | + goto out; | ||
5854 | + read_lock(&tasklist_lock); | ||
5855 | + if (!(source = find_task_by_vpid(pid))) { | ||
5856 | + retval = -ESRCH; | ||
5857 | + goto out_unlock; | ||
5858 | + } | ||
5859 | + lp = source->rt_param.task_params; | ||
5860 | + read_unlock(&tasklist_lock); | ||
5861 | + /* Do copying outside the lock */ | ||
5862 | + retval = | ||
5863 | + copy_to_user(param, &lp, sizeof(lp)) ? -EFAULT : 0; | ||
5864 | + return retval; | ||
5865 | + out_unlock: | ||
5866 | + read_unlock(&tasklist_lock); | ||
5867 | + out: | ||
5868 | + return retval; | ||
5869 | + | ||
5870 | +} | ||
5871 | + | ||
5872 | +/* | ||
5873 | + * This is the crucial function for periodic task implementation, | ||
5874 | + * It checks if a task is periodic, checks if such kind of sleep | ||
5875 | + * is permitted and calls plugin-specific sleep, which puts the | ||
5876 | + * task into a wait array. | ||
5877 | + * returns 0 on successful wakeup | ||
5878 | + * returns EPERM if current conditions do not permit such sleep | ||
5879 | + * returns EINVAL if current task is not able to go to sleep | ||
5880 | + */ | ||
5881 | +asmlinkage long sys_complete_job(void) | ||
5882 | +{ | ||
5883 | + int retval = -EPERM; | ||
5884 | + if (!is_realtime(current)) { | ||
5885 | + retval = -EINVAL; | ||
5886 | + goto out; | ||
5887 | + } | ||
5888 | + /* Task with negative or zero period cannot sleep */ | ||
5889 | + if (get_rt_period(current) <= 0) { | ||
5890 | + retval = -EINVAL; | ||
5891 | + goto out; | ||
5892 | + } | ||
5893 | + /* The plugin has to put the task into an | ||
5894 | + * appropriate queue and call schedule | ||
5895 | + */ | ||
5896 | + retval = litmus->complete_job(); | ||
5897 | + out: | ||
5898 | + return retval; | ||
5899 | +} | ||
5900 | + | ||
5901 | +/* This is an "improved" version of sys_complete_job that | ||
5902 | + * addresses the problem of unintentionally missing a job after | ||
5903 | + * an overrun. | ||
5904 | + * | ||
5905 | + * returns 0 on successful wakeup | ||
5906 | + * returns EPERM if current conditions do not permit such sleep | ||
5907 | + * returns EINVAL if current task is not able to go to sleep | ||
5908 | + */ | ||
5909 | +asmlinkage long sys_wait_for_job_release(unsigned int job) | ||
5910 | +{ | ||
5911 | + int retval = -EPERM; | ||
5912 | + if (!is_realtime(current)) { | ||
5913 | + retval = -EINVAL; | ||
5914 | + goto out; | ||
5915 | + } | ||
5916 | + | ||
5917 | + /* Task with negative or zero period cannot sleep */ | ||
5918 | + if (get_rt_period(current) <= 0) { | ||
5919 | + retval = -EINVAL; | ||
5920 | + goto out; | ||
5921 | + } | ||
5922 | + | ||
5923 | + retval = 0; | ||
5924 | + | ||
5925 | + /* first wait until we have "reached" the desired job | ||
5926 | + * | ||
5927 | + * This implementation has at least two problems: | ||
5928 | + * | ||
5929 | + * 1) It doesn't gracefully handle the wrap around of | ||
5930 | + * job_no. Since LITMUS is a prototype, this is not much | ||
5931 | + * of a problem right now. | ||
5932 | + * | ||
5933 | + * 2) It is theoretically racy if a job release occurs | ||
5934 | + * between checking job_no and calling sleep_next_period(). | ||
5935 | + * A proper solution would requiring adding another callback | ||
5936 | + * in the plugin structure and testing the condition with | ||
5937 | + * interrupts disabled. | ||
5938 | + * | ||
5939 | + * FIXME: At least problem 2 should be taken care of eventually. | ||
5940 | + */ | ||
5941 | + while (!retval && job > current->rt_param.job_params.job_no) | ||
5942 | + /* If the last job overran then job <= job_no and we | ||
5943 | + * don't send the task to sleep. | ||
5944 | + */ | ||
5945 | + retval = litmus->complete_job(); | ||
5946 | + out: | ||
5947 | + return retval; | ||
5948 | +} | ||
5949 | + | ||
5950 | +/* This is a helper syscall to query the current job sequence number. | ||
5951 | + * | ||
5952 | + * returns 0 on successful query | ||
5953 | + * returns EPERM if task is not a real-time task. | ||
5954 | + * returns EFAULT if &job is not a valid pointer. | ||
5955 | + */ | ||
5956 | +asmlinkage long sys_query_job_no(unsigned int __user *job) | ||
5957 | +{ | ||
5958 | + int retval = -EPERM; | ||
5959 | + if (is_realtime(current)) | ||
5960 | + retval = put_user(current->rt_param.job_params.job_no, job); | ||
5961 | + | ||
5962 | + return retval; | ||
5963 | +} | ||
5964 | + | ||
5965 | +/* sys_null_call() is only used for determining raw system call | ||
5966 | + * overheads (kernel entry, kernel exit). It has no useful side effects. | ||
5967 | + * If ts is non-NULL, then the current Feather-Trace time is recorded. | ||
5968 | + */ | ||
5969 | +asmlinkage long sys_null_call(cycles_t __user *ts) | ||
5970 | +{ | ||
5971 | + long ret = 0; | ||
5972 | + cycles_t now; | ||
5973 | + | ||
5974 | + if (ts) { | ||
5975 | + now = get_cycles(); | ||
5976 | + ret = put_user(now, ts); | ||
5977 | + } | ||
5978 | + | ||
5979 | + return ret; | ||
5980 | +} | ||
5981 | + | ||
5982 | +/* p is a real-time task. Re-init its state as a best-effort task. */ | ||
5983 | +static void reinit_litmus_state(struct task_struct* p, int restore) | ||
5984 | +{ | ||
5985 | + struct rt_task user_config = {}; | ||
5986 | + void* ctrl_page = NULL; | ||
5987 | + | ||
5988 | + if (restore) { | ||
5989 | + /* Safe user-space provided configuration data. | ||
5990 | + * and allocated page. */ | ||
5991 | + user_config = p->rt_param.task_params; | ||
5992 | + ctrl_page = p->rt_param.ctrl_page; | ||
5993 | + } | ||
5994 | + | ||
5995 | + /* We probably should not be inheriting any task's priority | ||
5996 | + * at this point in time. | ||
5997 | + */ | ||
5998 | + WARN_ON(p->rt_param.inh_task); | ||
5999 | + | ||
6000 | + /* We need to restore the priority of the task. */ | ||
6001 | +// __setscheduler(p, p->rt_param.old_policy, p->rt_param.old_prio); XXX why is this commented? | ||
6002 | + | ||
6003 | + /* Cleanup everything else. */ | ||
6004 | + memset(&p->rt_param, 0, sizeof(p->rt_param)); | ||
6005 | + | ||
6006 | + /* Restore preserved fields. */ | ||
6007 | + if (restore) { | ||
6008 | + p->rt_param.task_params = user_config; | ||
6009 | + p->rt_param.ctrl_page = ctrl_page; | ||
6010 | + } | ||
6011 | +} | ||
6012 | + | ||
6013 | +long litmus_admit_task(struct task_struct* tsk) | ||
6014 | +{ | ||
6015 | + long retval = 0; | ||
6016 | + unsigned long flags; | ||
6017 | + | ||
6018 | + BUG_ON(is_realtime(tsk)); | ||
6019 | + | ||
6020 | + if (get_rt_period(tsk) == 0 || | ||
6021 | + get_exec_cost(tsk) > get_rt_period(tsk)) { | ||
6022 | + TRACE_TASK(tsk, "litmus admit: invalid task parameters " | ||
6023 | + "(%lu, %lu)\n", | ||
6024 | + get_exec_cost(tsk), get_rt_period(tsk)); | ||
6025 | + retval = -EINVAL; | ||
6026 | + goto out; | ||
6027 | + } | ||
6028 | + | ||
6029 | + if (!cpu_online(get_partition(tsk))) { | ||
6030 | + TRACE_TASK(tsk, "litmus admit: cpu %d is not online\n", | ||
6031 | + get_partition(tsk)); | ||
6032 | + retval = -EINVAL; | ||
6033 | + goto out; | ||
6034 | + } | ||
6035 | + | ||
6036 | + INIT_LIST_HEAD(&tsk_rt(tsk)->list); | ||
6037 | + | ||
6038 | + /* avoid scheduler plugin changing underneath us */ | ||
6039 | + raw_spin_lock_irqsave(&task_transition_lock, flags); | ||
6040 | + | ||
6041 | + /* allocate heap node for this task */ | ||
6042 | + tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); | ||
6043 | + tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC); | ||
6044 | + | ||
6045 | + if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) { | ||
6046 | + printk(KERN_WARNING "litmus: no more heap node memory!?\n"); | ||
6047 | + | ||
6048 | + bheap_node_free(tsk_rt(tsk)->heap_node); | ||
6049 | + release_heap_free(tsk_rt(tsk)->rel_heap); | ||
6050 | + | ||
6051 | + retval = -ENOMEM; | ||
6052 | + goto out_unlock; | ||
6053 | + } else { | ||
6054 | + bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); | ||
6055 | + } | ||
6056 | + | ||
6057 | + retval = litmus->admit_task(tsk); | ||
6058 | + | ||
6059 | + if (!retval) { | ||
6060 | + sched_trace_task_name(tsk); | ||
6061 | + sched_trace_task_param(tsk); | ||
6062 | + atomic_inc(&rt_task_count); | ||
6063 | + } | ||
6064 | + | ||
6065 | +out_unlock: | ||
6066 | + raw_spin_unlock_irqrestore(&task_transition_lock, flags); | ||
6067 | +out: | ||
6068 | + return retval; | ||
6069 | +} | ||
6070 | + | ||
6071 | +void litmus_exit_task(struct task_struct* tsk) | ||
6072 | +{ | ||
6073 | + if (is_realtime(tsk)) { | ||
6074 | + sched_trace_task_completion(tsk, 1); | ||
6075 | + | ||
6076 | + litmus->task_exit(tsk); | ||
6077 | + | ||
6078 | + BUG_ON(bheap_node_in_heap(tsk_rt(tsk)->heap_node)); | ||
6079 | + bheap_node_free(tsk_rt(tsk)->heap_node); | ||
6080 | + release_heap_free(tsk_rt(tsk)->rel_heap); | ||
6081 | + | ||
6082 | + atomic_dec(&rt_task_count); | ||
6083 | + reinit_litmus_state(tsk, 1); | ||
6084 | + } | ||
6085 | +} | ||
6086 | + | ||
6087 | +/* IPI callback to synchronize plugin switching */ | ||
6088 | +static void synch_on_plugin_switch(void* info) | ||
6089 | +{ | ||
6090 | + atomic_inc(&cannot_use_plugin); | ||
6091 | + while (atomic_read(&cannot_use_plugin) > 0) | ||
6092 | + cpu_relax(); | ||
6093 | +} | ||
6094 | + | ||
6095 | +/* Switching a plugin in use is tricky. | ||
6096 | + * We must watch out that no real-time tasks exists | ||
6097 | + * (and that none is created in parallel) and that the plugin is not | ||
6098 | + * currently in use on any processor (in theory). | ||
6099 | + */ | ||
6100 | +int switch_sched_plugin(struct sched_plugin* plugin) | ||
6101 | +{ | ||
6102 | + unsigned long flags; | ||
6103 | + int ret = 0; | ||
6104 | + | ||
6105 | + BUG_ON(!plugin); | ||
6106 | + | ||
6107 | + /* forbid other cpus to use the plugin */ | ||
6108 | + atomic_set(&cannot_use_plugin, 1); | ||
6109 | + /* send IPI to force other CPUs to synch with us */ | ||
6110 | + smp_call_function(synch_on_plugin_switch, NULL, 0); | ||
6111 | + | ||
6112 | + /* wait until all other CPUs have started synch */ | ||
6113 | + while (atomic_read(&cannot_use_plugin) < num_online_cpus()) | ||
6114 | + cpu_relax(); | ||
6115 | + | ||
6116 | + /* stop task transitions */ | ||
6117 | + raw_spin_lock_irqsave(&task_transition_lock, flags); | ||
6118 | + | ||
6119 | + /* don't switch if there are active real-time tasks */ | ||
6120 | + if (atomic_read(&rt_task_count) == 0) { | ||
6121 | + ret = litmus->deactivate_plugin(); | ||
6122 | + if (0 != ret) | ||
6123 | + goto out; | ||
6124 | + ret = plugin->activate_plugin(); | ||
6125 | + if (0 != ret) { | ||
6126 | + printk(KERN_INFO "Can't activate %s (%d).\n", | ||
6127 | + plugin->plugin_name, ret); | ||
6128 | + plugin = &linux_sched_plugin; | ||
6129 | + } | ||
6130 | + printk(KERN_INFO "Switching to LITMUS^RT plugin %s.\n", plugin->plugin_name); | ||
6131 | + litmus = plugin; | ||
6132 | + } else | ||
6133 | + ret = -EBUSY; | ||
6134 | +out: | ||
6135 | + raw_spin_unlock_irqrestore(&task_transition_lock, flags); | ||
6136 | + atomic_set(&cannot_use_plugin, 0); | ||
6137 | + return ret; | ||
6138 | +} | ||
6139 | + | ||
6140 | +/* Called upon fork. | ||
6141 | + * p is the newly forked task. | ||
6142 | + */ | ||
6143 | +void litmus_fork(struct task_struct* p) | ||
6144 | +{ | ||
6145 | + if (is_realtime(p)) | ||
6146 | + /* clean out any litmus related state, don't preserve anything */ | ||
6147 | + reinit_litmus_state(p, 0); | ||
6148 | + else | ||
6149 | + /* non-rt tasks might have ctrl_page set */ | ||
6150 | + tsk_rt(p)->ctrl_page = NULL; | ||
6151 | + | ||
6152 | + /* od tables are never inherited across a fork */ | ||
6153 | + p->od_table = NULL; | ||
6154 | +} | ||
6155 | + | ||
6156 | +/* Called upon execve(). | ||
6157 | + * current is doing the exec. | ||
6158 | + * Don't let address space specific stuff leak. | ||
6159 | + */ | ||
6160 | +void litmus_exec(void) | ||
6161 | +{ | ||
6162 | + struct task_struct* p = current; | ||
6163 | + | ||
6164 | + if (is_realtime(p)) { | ||
6165 | + WARN_ON(p->rt_param.inh_task); | ||
6166 | + if (tsk_rt(p)->ctrl_page) { | ||
6167 | + free_page((unsigned long) tsk_rt(p)->ctrl_page); | ||
6168 | + tsk_rt(p)->ctrl_page = NULL; | ||
6169 | + } | ||
6170 | + } | ||
6171 | +} | ||
6172 | + | ||
6173 | +void exit_litmus(struct task_struct *dead_tsk) | ||
6174 | +{ | ||
6175 | + /* We also allow non-RT tasks to | ||
6176 | + * allocate control pages to allow | ||
6177 | + * measurements with non-RT tasks. | ||
6178 | + * So check if we need to free the page | ||
6179 | + * in any case. | ||
6180 | + */ | ||
6181 | + if (tsk_rt(dead_tsk)->ctrl_page) { | ||
6182 | + TRACE_TASK(dead_tsk, | ||
6183 | + "freeing ctrl_page %p\n", | ||
6184 | + tsk_rt(dead_tsk)->ctrl_page); | ||
6185 | + free_page((unsigned long) tsk_rt(dead_tsk)->ctrl_page); | ||
6186 | + } | ||
6187 | + | ||
6188 | + /* main cleanup only for RT tasks */ | ||
6189 | + if (is_realtime(dead_tsk)) | ||
6190 | + litmus_exit_task(dead_tsk); | ||
6191 | +} | ||
6192 | + | ||
6193 | + | ||
6194 | +#ifdef CONFIG_MAGIC_SYSRQ | ||
6195 | +int sys_kill(int pid, int sig); | ||
6196 | + | ||
6197 | +static void sysrq_handle_kill_rt_tasks(int key) | ||
6198 | +{ | ||
6199 | + struct task_struct *t; | ||
6200 | + read_lock(&tasklist_lock); | ||
6201 | + for_each_process(t) { | ||
6202 | + if (is_realtime(t)) { | ||
6203 | + sys_kill(t->pid, SIGKILL); | ||
6204 | + } | ||
6205 | + } | ||
6206 | + read_unlock(&tasklist_lock); | ||
6207 | +} | ||
6208 | + | ||
6209 | +static struct sysrq_key_op sysrq_kill_rt_tasks_op = { | ||
6210 | + .handler = sysrq_handle_kill_rt_tasks, | ||
6211 | + .help_msg = "quit-rt-tasks(X)", | ||
6212 | + .action_msg = "sent SIGKILL to all LITMUS^RT real-time tasks", | ||
6213 | +}; | ||
6214 | +#endif | ||
6215 | + | ||
6216 | +extern struct sched_plugin linux_sched_plugin; | ||
6217 | + | ||
6218 | +static int __init _init_litmus(void) | ||
6219 | +{ | ||
6220 | + /* Common initializers, | ||
6221 | + * mode change lock is used to enforce single mode change | ||
6222 | + * operation. | ||
6223 | + */ | ||
6224 | + printk("Starting LITMUS^RT kernel\n"); | ||
6225 | + | ||
6226 | + register_sched_plugin(&linux_sched_plugin); | ||
6227 | + | ||
6228 | + bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC); | ||
6229 | + release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC); | ||
6230 | + | ||
6231 | +#ifdef CONFIG_MAGIC_SYSRQ | ||
6232 | + /* offer some debugging help */ | ||
6233 | + if (!register_sysrq_key('x', &sysrq_kill_rt_tasks_op)) | ||
6234 | + printk("Registered kill rt tasks magic sysrq.\n"); | ||
6235 | + else | ||
6236 | + printk("Could not register kill rt tasks magic sysrq.\n"); | ||
6237 | +#endif | ||
6238 | + | ||
6239 | + init_litmus_proc(); | ||
6240 | + | ||
6241 | + return 0; | ||
6242 | +} | ||
6243 | + | ||
6244 | +static void _exit_litmus(void) | ||
6245 | +{ | ||
6246 | + exit_litmus_proc(); | ||
6247 | + kmem_cache_destroy(bheap_node_cache); | ||
6248 | + kmem_cache_destroy(release_heap_cache); | ||
6249 | +} | ||
6250 | + | ||
6251 | +module_init(_init_litmus); | ||
6252 | +module_exit(_exit_litmus); | ||
6253 | diff --git a/litmus/litmus_proc.c b/litmus/litmus_proc.c | ||
6254 | new file mode 100644 | ||
6255 | index 0000000..81ea5c3 | ||
6256 | --- /dev/null | ||
6257 | +++ b/litmus/litmus_proc.c | ||
6258 | @@ -0,0 +1,259 @@ | ||
6259 | +/* | ||
6260 | + * litmus_proc.c -- Implementation of the /proc/litmus directory tree. | ||
6261 | + */ | ||
6262 | + | ||
6263 | +#include <linux/sched.h> | ||
6264 | +#include <linux/uaccess.h> | ||
6265 | + | ||
6266 | +#include <litmus/litmus.h> | ||
6267 | +#include <litmus/litmus_proc.h> | ||
6268 | + | ||
6269 | +/* in litmus/litmus.c */ | ||
6270 | +extern atomic_t rt_task_count; | ||
6271 | + | ||
6272 | +static struct proc_dir_entry *litmus_dir = NULL, | ||
6273 | + *curr_file = NULL, | ||
6274 | + *stat_file = NULL, | ||
6275 | + *plugs_dir = NULL, | ||
6276 | +#ifdef CONFIG_RELEASE_MASTER | ||
6277 | + *release_master_file = NULL, | ||
6278 | +#endif | ||
6279 | + *plugs_file = NULL; | ||
6280 | + | ||
6281 | +/* in litmus/sync.c */ | ||
6282 | +int count_tasks_waiting_for_release(void); | ||
6283 | + | ||
6284 | +static int proc_read_stats(char *page, char **start, | ||
6285 | + off_t off, int count, | ||
6286 | + int *eof, void *data) | ||
6287 | +{ | ||
6288 | + int len; | ||
6289 | + | ||
6290 | + len = snprintf(page, PAGE_SIZE, | ||
6291 | + "real-time tasks = %d\n" | ||
6292 | + "ready for release = %d\n", | ||
6293 | + atomic_read(&rt_task_count), | ||
6294 | + count_tasks_waiting_for_release()); | ||
6295 | + return len; | ||
6296 | +} | ||
6297 | + | ||
6298 | +static int proc_read_plugins(char *page, char **start, | ||
6299 | + off_t off, int count, | ||
6300 | + int *eof, void *data) | ||
6301 | +{ | ||
6302 | + int len; | ||
6303 | + | ||
6304 | + len = print_sched_plugins(page, PAGE_SIZE); | ||
6305 | + return len; | ||
6306 | +} | ||
6307 | + | ||
6308 | +static int proc_read_curr(char *page, char **start, | ||
6309 | + off_t off, int count, | ||
6310 | + int *eof, void *data) | ||
6311 | +{ | ||
6312 | + int len; | ||
6313 | + | ||
6314 | + len = snprintf(page, PAGE_SIZE, "%s\n", litmus->plugin_name); | ||
6315 | + return len; | ||
6316 | +} | ||
6317 | + | ||
6318 | +/* in litmus/litmus.c */ | ||
6319 | +int switch_sched_plugin(struct sched_plugin*); | ||
6320 | + | ||
6321 | +static int proc_write_curr(struct file *file, | ||
6322 | + const char *buffer, | ||
6323 | + unsigned long count, | ||
6324 | + void *data) | ||
6325 | +{ | ||
6326 | + int len, ret; | ||
6327 | + char name[65]; | ||
6328 | + struct sched_plugin* found; | ||
6329 | + | ||
6330 | + if(count > 64) | ||
6331 | + len = 64; | ||
6332 | + else | ||
6333 | + len = count; | ||
6334 | + | ||
6335 | + if(copy_from_user(name, buffer, len)) | ||
6336 | + return -EFAULT; | ||
6337 | + | ||
6338 | + name[len] = '\0'; | ||
6339 | + /* chomp name */ | ||
6340 | + if (len > 1 && name[len - 1] == '\n') | ||
6341 | + name[len - 1] = '\0'; | ||
6342 | + | ||
6343 | + found = find_sched_plugin(name); | ||
6344 | + | ||
6345 | + if (found) { | ||
6346 | + ret = switch_sched_plugin(found); | ||
6347 | + if (ret != 0) | ||
6348 | + printk(KERN_INFO "Could not switch plugin: %d\n", ret); | ||
6349 | + } else | ||
6350 | + printk(KERN_INFO "Plugin '%s' is unknown.\n", name); | ||
6351 | + | ||
6352 | + return len; | ||
6353 | +} | ||
6354 | + | ||
6355 | +#ifdef CONFIG_RELEASE_MASTER | ||
6356 | +static int proc_read_release_master(char *page, char **start, | ||
6357 | + off_t off, int count, | ||
6358 | + int *eof, void *data) | ||
6359 | +{ | ||
6360 | + int len, master; | ||
6361 | + master = atomic_read(&release_master_cpu); | ||
6362 | + if (master == NO_CPU) | ||
6363 | + len = snprintf(page, PAGE_SIZE, "NO_CPU\n"); | ||
6364 | + else | ||
6365 | + len = snprintf(page, PAGE_SIZE, "%d\n", master); | ||
6366 | + return len; | ||
6367 | +} | ||
6368 | + | ||
6369 | +static int proc_write_release_master(struct file *file, | ||
6370 | + const char *buffer, | ||
6371 | + unsigned long count, | ||
6372 | + void *data) | ||
6373 | +{ | ||
6374 | + int cpu, err, online = 0; | ||
6375 | + char msg[64]; | ||
6376 | + | ||
6377 | + if (count > 63) | ||
6378 | + return -EINVAL; | ||
6379 | + | ||
6380 | + if (copy_from_user(msg, buffer, count)) | ||
6381 | + return -EFAULT; | ||
6382 | + | ||
6383 | + /* terminate */ | ||
6384 | + msg[count] = '\0'; | ||
6385 | + /* chomp */ | ||
6386 | + if (count > 1 && msg[count - 1] == '\n') | ||
6387 | + msg[count - 1] = '\0'; | ||
6388 | + | ||
6389 | + if (strcmp(msg, "NO_CPU") == 0) { | ||
6390 | + atomic_set(&release_master_cpu, NO_CPU); | ||
6391 | + return count; | ||
6392 | + } else { | ||
6393 | + err = sscanf(msg, "%d", &cpu); | ||
6394 | + if (err == 1 && cpu >= 0 && (online = cpu_online(cpu))) { | ||
6395 | + atomic_set(&release_master_cpu, cpu); | ||
6396 | + return count; | ||
6397 | + } else { | ||
6398 | + TRACE("invalid release master: '%s' " | ||
6399 | + "(err:%d cpu:%d online:%d)\n", | ||
6400 | + msg, err, cpu, online); | ||
6401 | + return -EINVAL; | ||
6402 | + } | ||
6403 | + } | ||
6404 | +} | ||
6405 | +#endif | ||
6406 | + | ||
6407 | +int __init init_litmus_proc(void) | ||
6408 | +{ | ||
6409 | + litmus_dir = proc_mkdir("litmus", NULL); | ||
6410 | + if (!litmus_dir) { | ||
6411 | + printk(KERN_ERR "Could not allocate LITMUS^RT procfs entry.\n"); | ||
6412 | + return -ENOMEM; | ||
6413 | + } | ||
6414 | + | ||
6415 | + curr_file = create_proc_entry("active_plugin", | ||
6416 | + 0644, litmus_dir); | ||
6417 | + if (!curr_file) { | ||
6418 | + printk(KERN_ERR "Could not allocate active_plugin " | ||
6419 | + "procfs entry.\n"); | ||
6420 | + return -ENOMEM; | ||
6421 | + } | ||
6422 | + curr_file->read_proc = proc_read_curr; | ||
6423 | + curr_file->write_proc = proc_write_curr; | ||
6424 | + | ||
6425 | +#ifdef CONFIG_RELEASE_MASTER | ||
6426 | + release_master_file = create_proc_entry("release_master", | ||
6427 | + 0644, litmus_dir); | ||
6428 | + if (!release_master_file) { | ||
6429 | + printk(KERN_ERR "Could not allocate release_master " | ||
6430 | + "procfs entry.\n"); | ||
6431 | + return -ENOMEM; | ||
6432 | + } | ||
6433 | + release_master_file->read_proc = proc_read_release_master; | ||
6434 | + release_master_file->write_proc = proc_write_release_master; | ||
6435 | +#endif | ||
6436 | + | ||
6437 | + stat_file = create_proc_read_entry("stats", 0444, litmus_dir, | ||
6438 | + proc_read_stats, NULL); | ||
6439 | + | ||
6440 | + plugs_dir = proc_mkdir("plugins", litmus_dir); | ||
6441 | + if (!plugs_dir){ | ||
6442 | + printk(KERN_ERR "Could not allocate plugins directory " | ||
6443 | + "procfs entry.\n"); | ||
6444 | + return -ENOMEM; | ||
6445 | + } | ||
6446 | + | ||
6447 | + plugs_file = create_proc_read_entry("loaded", 0444, plugs_dir, | ||
6448 | + proc_read_plugins, NULL); | ||
6449 | + | ||
6450 | + return 0; | ||
6451 | +} | ||
6452 | + | ||
6453 | +void exit_litmus_proc(void) | ||
6454 | +{ | ||
6455 | + if (plugs_file) | ||
6456 | + remove_proc_entry("loaded", plugs_dir); | ||
6457 | + if (plugs_dir) | ||
6458 | + remove_proc_entry("plugins", litmus_dir); | ||
6459 | + if (stat_file) | ||
6460 | + remove_proc_entry("stats", litmus_dir); | ||
6461 | + if (curr_file) | ||
6462 | + remove_proc_entry("active_plugin", litmus_dir); | ||
6463 | +#ifdef CONFIG_RELEASE_MASTER | ||
6464 | + if (release_master_file) | ||
6465 | + remove_proc_entry("release_master", litmus_dir); | ||
6466 | +#endif | ||
6467 | + if (litmus_dir) | ||
6468 | + remove_proc_entry("litmus", NULL); | ||
6469 | +} | ||
6470 | + | ||
6471 | +long make_plugin_proc_dir(struct sched_plugin* plugin, | ||
6472 | + struct proc_dir_entry** pde_in) | ||
6473 | +{ | ||
6474 | + struct proc_dir_entry *pde_new = NULL; | ||
6475 | + long rv; | ||
6476 | + | ||
6477 | + if (!plugin || !plugin->plugin_name){ | ||
6478 | + printk(KERN_ERR "Invalid plugin struct passed to %s.\n", | ||
6479 | + __func__); | ||
6480 | + rv = -EINVAL; | ||
6481 | + goto out_no_pde; | ||
6482 | + } | ||
6483 | + | ||
6484 | + if (!plugs_dir){ | ||
6485 | + printk(KERN_ERR "Could not make plugin sub-directory, because " | ||
6486 | + "/proc/litmus/plugins does not exist.\n"); | ||
6487 | + rv = -ENOENT; | ||
6488 | + goto out_no_pde; | ||
6489 | + } | ||
6490 | + | ||
6491 | + pde_new = proc_mkdir(plugin->plugin_name, plugs_dir); | ||
6492 | + if (!pde_new){ | ||
6493 | + printk(KERN_ERR "Could not make plugin sub-directory: " | ||
6494 | + "out of memory?.\n"); | ||
6495 | + rv = -ENOMEM; | ||
6496 | + goto out_no_pde; | ||
6497 | + } | ||
6498 | + | ||
6499 | + rv = 0; | ||
6500 | + *pde_in = pde_new; | ||
6501 | + goto out_ok; | ||
6502 | + | ||
6503 | +out_no_pde: | ||
6504 | + *pde_in = NULL; | ||
6505 | +out_ok: | ||
6506 | + return rv; | ||
6507 | +} | ||
6508 | + | ||
6509 | +void remove_plugin_proc_dir(struct sched_plugin* plugin) | ||
6510 | +{ | ||
6511 | + if (!plugin || !plugin->plugin_name){ | ||
6512 | + printk(KERN_ERR "Invalid plugin struct passed to %s.\n", | ||
6513 | + __func__); | ||
6514 | + return; | ||
6515 | + } | ||
6516 | + remove_proc_entry(plugin->plugin_name, plugs_dir); | ||
6517 | +} | ||
6518 | diff --git a/litmus/preempt.c b/litmus/preempt.c | ||
6519 | new file mode 100644 | ||
6520 | index 0000000..ebe2e34 | ||
6521 | --- /dev/null | ||
6522 | +++ b/litmus/preempt.c | ||
6523 | @@ -0,0 +1,131 @@ | ||
6524 | +#include <linux/sched.h> | ||
6525 | + | ||
6526 | +#include <litmus/litmus.h> | ||
6527 | +#include <litmus/preempt.h> | ||
6528 | + | ||
6529 | +/* The rescheduling state of each processor. | ||
6530 | + */ | ||
6531 | +DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, resched_state); | ||
6532 | + | ||
6533 | +void sched_state_will_schedule(struct task_struct* tsk) | ||
6534 | +{ | ||
6535 | + /* Litmus hack: we only care about processor-local invocations of | ||
6536 | + * set_tsk_need_resched(). We can't reliably set the flag remotely | ||
6537 | + * since it might race with other updates to the scheduling state. We | ||
6538 | + * can't rely on the runqueue lock protecting updates to the sched | ||
6539 | + * state since processors do not acquire the runqueue locks for all | ||
6540 | + * updates to the sched state (to avoid acquiring two runqueue locks at | ||
6541 | + * the same time). Further, if tsk is residing on a remote processor, | ||
6542 | + * then that processor doesn't actually know yet that it is going to | ||
6543 | + * reschedule; it still must receive an IPI (unless a local invocation | ||
6544 | + * races). | ||
6545 | + */ | ||
6546 | + if (likely(task_cpu(tsk) == smp_processor_id())) { | ||
6547 | + VERIFY_SCHED_STATE(TASK_SCHEDULED | SHOULD_SCHEDULE | TASK_PICKED | WILL_SCHEDULE); | ||
6548 | + if (is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK)) | ||
6549 | + set_sched_state(PICKED_WRONG_TASK); | ||
6550 | + else | ||
6551 | + set_sched_state(WILL_SCHEDULE); | ||
6552 | + } else | ||
6553 | + /* Litmus tasks should never be subject to a remote | ||
6554 | + * set_tsk_need_resched(). */ | ||
6555 | + BUG_ON(is_realtime(tsk)); | ||
6556 | + TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n", | ||
6557 | + __builtin_return_address(0)); | ||
6558 | +} | ||
6559 | + | ||
6560 | +/* Called by the IPI handler after another CPU called smp_send_resched(). */ | ||
6561 | +void sched_state_ipi(void) | ||
6562 | +{ | ||
6563 | + /* If the IPI was slow, we might be in any state right now. The IPI is | ||
6564 | + * only meaningful if we are in SHOULD_SCHEDULE. */ | ||
6565 | + if (is_in_sched_state(SHOULD_SCHEDULE)) { | ||
6566 | + /* Cause scheduler to be invoked. | ||
6567 | + * This will cause a transition to WILL_SCHEDULE. */ | ||
6568 | + set_tsk_need_resched(current); | ||
6569 | + TRACE_STATE("IPI -> set_tsk_need_resched(%s/%d)\n", | ||
6570 | + current->comm, current->pid); | ||
6571 | + } else { | ||
6572 | + /* ignore */ | ||
6573 | + TRACE_STATE("ignoring IPI in state %x (%s)\n", | ||
6574 | + get_sched_state(), | ||
6575 | + sched_state_name(get_sched_state())); | ||
6576 | + } | ||
6577 | +} | ||
6578 | + | ||
6579 | +/* Called by plugins to cause a CPU to reschedule. IMPORTANT: the caller must | ||
6580 | + * hold the lock that is used to serialize scheduling decisions. */ | ||
6581 | +void litmus_reschedule(int cpu) | ||
6582 | +{ | ||
6583 | + int picked_transition_ok = 0; | ||
6584 | + int scheduled_transition_ok = 0; | ||
6585 | + | ||
6586 | + /* The (remote) CPU could be in any state. */ | ||
6587 | + | ||
6588 | + /* The critical states are TASK_PICKED and TASK_SCHEDULED, as the CPU | ||
6589 | + * is not aware of the need to reschedule at this point. */ | ||
6590 | + | ||
6591 | + /* is a context switch in progress? */ | ||
6592 | + if (cpu_is_in_sched_state(cpu, TASK_PICKED)) | ||
6593 | + picked_transition_ok = sched_state_transition_on( | ||
6594 | + cpu, TASK_PICKED, PICKED_WRONG_TASK); | ||
6595 | + | ||
6596 | + if (!picked_transition_ok && | ||
6597 | + cpu_is_in_sched_state(cpu, TASK_SCHEDULED)) { | ||
6598 | + /* We either raced with the end of the context switch, or the | ||
6599 | + * CPU was in TASK_SCHEDULED anyway. */ | ||
6600 | + scheduled_transition_ok = sched_state_transition_on( | ||
6601 | + cpu, TASK_SCHEDULED, SHOULD_SCHEDULE); | ||
6602 | + } | ||
6603 | + | ||
6604 | + /* If the CPU was in state TASK_SCHEDULED, then we need to cause the | ||
6605 | + * scheduler to be invoked. */ | ||
6606 | + if (scheduled_transition_ok) { | ||
6607 | + if (smp_processor_id() == cpu) | ||
6608 | + set_tsk_need_resched(current); | ||
6609 | + else | ||
6610 | + smp_send_reschedule(cpu); | ||
6611 | + } | ||
6612 | + | ||
6613 | + TRACE_STATE("%s picked-ok:%d sched-ok:%d\n", | ||
6614 | + __FUNCTION__, | ||
6615 | + picked_transition_ok, | ||
6616 | + scheduled_transition_ok); | ||
6617 | +} | ||
6618 | + | ||
6619 | +void litmus_reschedule_local(void) | ||
6620 | +{ | ||
6621 | + if (is_in_sched_state(TASK_PICKED)) | ||
6622 | + set_sched_state(PICKED_WRONG_TASK); | ||
6623 | + else if (is_in_sched_state(TASK_SCHEDULED | SHOULD_SCHEDULE)) { | ||
6624 | + set_sched_state(WILL_SCHEDULE); | ||
6625 | + set_tsk_need_resched(current); | ||
6626 | + } | ||
6627 | +} | ||
6628 | + | ||
6629 | +#ifdef CONFIG_DEBUG_KERNEL | ||
6630 | + | ||
6631 | +void sched_state_plugin_check(void) | ||
6632 | +{ | ||
6633 | + if (!is_in_sched_state(TASK_PICKED | PICKED_WRONG_TASK)) { | ||
6634 | + TRACE("!!!! plugin did not call sched_state_task_picked()!" | ||
6635 | + "Calling sched_state_task_picked() is mandatory---fix this.\n"); | ||
6636 | + set_sched_state(TASK_PICKED); | ||
6637 | + } | ||
6638 | +} | ||
6639 | + | ||
6640 | +#define NAME_CHECK(x) case x: return #x | ||
6641 | +const char* sched_state_name(int s) | ||
6642 | +{ | ||
6643 | + switch (s) { | ||
6644 | + NAME_CHECK(TASK_SCHEDULED); | ||
6645 | + NAME_CHECK(SHOULD_SCHEDULE); | ||
6646 | + NAME_CHECK(WILL_SCHEDULE); | ||
6647 | + NAME_CHECK(TASK_PICKED); | ||
6648 | + NAME_CHECK(PICKED_WRONG_TASK); | ||
6649 | + default: | ||
6650 | + return "UNKNOWN"; | ||
6651 | + }; | ||
6652 | +} | ||
6653 | + | ||
6654 | +#endif | ||
6655 | diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c | ||
6656 | new file mode 100644 | ||
6657 | index 0000000..81a5ac1 | ||
6658 | --- /dev/null | ||
6659 | +++ b/litmus/rt_domain.c | ||
6660 | @@ -0,0 +1,355 @@ | ||
6661 | +/* | ||
6662 | + * litmus/rt_domain.c | ||
6663 | + * | ||
6664 | + * LITMUS real-time infrastructure. This file contains the | ||
6665 | + * functions that manipulate RT domains. RT domains are an abstraction | ||
6666 | + * of a ready queue and a release queue. | ||
6667 | + */ | ||
6668 | + | ||
6669 | +#include <linux/percpu.h> | ||
6670 | +#include <linux/sched.h> | ||
6671 | +#include <linux/list.h> | ||
6672 | +#include <linux/slab.h> | ||
6673 | + | ||
6674 | +#include <litmus/litmus.h> | ||
6675 | +#include <litmus/sched_plugin.h> | ||
6676 | +#include <litmus/sched_trace.h> | ||
6677 | + | ||
6678 | +#include <litmus/rt_domain.h> | ||
6679 | + | ||
6680 | +#include <litmus/trace.h> | ||
6681 | + | ||
6682 | +#include <litmus/bheap.h> | ||
6683 | + | ||
6684 | +/* Uncomment when debugging timer races... */ | ||
6685 | +#if 0 | ||
6686 | +#define VTRACE_TASK TRACE_TASK | ||
6687 | +#define VTRACE TRACE | ||
6688 | +#else | ||
6689 | +#define VTRACE_TASK(t, fmt, args...) /* shut up */ | ||
6690 | +#define VTRACE(fmt, args...) /* be quiet already */ | ||
6691 | +#endif | ||
6692 | + | ||
6693 | +static int dummy_resched(rt_domain_t *rt) | ||
6694 | +{ | ||
6695 | + return 0; | ||
6696 | +} | ||
6697 | + | ||
6698 | +static int dummy_order(struct bheap_node* a, struct bheap_node* b) | ||
6699 | +{ | ||
6700 | + return 0; | ||
6701 | +} | ||
6702 | + | ||
6703 | +/* default implementation: use default lock */ | ||
6704 | +static void default_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
6705 | +{ | ||
6706 | + merge_ready(rt, tasks); | ||
6707 | +} | ||
6708 | + | ||
6709 | +static unsigned int time2slot(lt_t time) | ||
6710 | +{ | ||
6711 | + return (unsigned int) time2quanta(time, FLOOR) % RELEASE_QUEUE_SLOTS; | ||
6712 | +} | ||
6713 | + | ||
6714 | +static enum hrtimer_restart on_release_timer(struct hrtimer *timer) | ||
6715 | +{ | ||
6716 | + unsigned long flags; | ||
6717 | + struct release_heap* rh; | ||
6718 | + | ||
6719 | + VTRACE("on_release_timer(0x%p) starts.\n", timer); | ||
6720 | + | ||
6721 | + TS_RELEASE_START; | ||
6722 | + | ||
6723 | + rh = container_of(timer, struct release_heap, timer); | ||
6724 | + | ||
6725 | + raw_spin_lock_irqsave(&rh->dom->release_lock, flags); | ||
6726 | + VTRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock); | ||
6727 | + /* remove from release queue */ | ||
6728 | + list_del(&rh->list); | ||
6729 | + raw_spin_unlock_irqrestore(&rh->dom->release_lock, flags); | ||
6730 | + VTRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock); | ||
6731 | + | ||
6732 | + /* call release callback */ | ||
6733 | + rh->dom->release_jobs(rh->dom, &rh->heap); | ||
6734 | + /* WARNING: rh can be referenced from other CPUs from now on. */ | ||
6735 | + | ||
6736 | + TS_RELEASE_END; | ||
6737 | + | ||
6738 | + VTRACE("on_release_timer(0x%p) ends.\n", timer); | ||
6739 | + | ||
6740 | + return HRTIMER_NORESTART; | ||
6741 | +} | ||
6742 | + | ||
6743 | +/* allocated in litmus.c */ | ||
6744 | +struct kmem_cache * release_heap_cache; | ||
6745 | + | ||
6746 | +struct release_heap* release_heap_alloc(int gfp_flags) | ||
6747 | +{ | ||
6748 | + struct release_heap* rh; | ||
6749 | + rh= kmem_cache_alloc(release_heap_cache, gfp_flags); | ||
6750 | + if (rh) { | ||
6751 | + /* initialize timer */ | ||
6752 | + hrtimer_init(&rh->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
6753 | + rh->timer.function = on_release_timer; | ||
6754 | + } | ||
6755 | + return rh; | ||
6756 | +} | ||
6757 | + | ||
6758 | +void release_heap_free(struct release_heap* rh) | ||
6759 | +{ | ||
6760 | + /* make sure timer is no longer in use */ | ||
6761 | + hrtimer_cancel(&rh->timer); | ||
6762 | + kmem_cache_free(release_heap_cache, rh); | ||
6763 | +} | ||
6764 | + | ||
6765 | +/* Caller must hold release lock. | ||
6766 | + * Will return heap for given time. If no such heap exists prior to | ||
6767 | + * the invocation it will be created. | ||
6768 | + */ | ||
6769 | +static struct release_heap* get_release_heap(rt_domain_t *rt, | ||
6770 | + struct task_struct* t, | ||
6771 | + int use_task_heap) | ||
6772 | +{ | ||
6773 | + struct list_head* pos; | ||
6774 | + struct release_heap* heap = NULL; | ||
6775 | + struct release_heap* rh; | ||
6776 | + lt_t release_time = get_release(t); | ||
6777 | + unsigned int slot = time2slot(release_time); | ||
6778 | + | ||
6779 | + /* initialize pos for the case that the list is empty */ | ||
6780 | + pos = rt->release_queue.slot[slot].next; | ||
6781 | + list_for_each(pos, &rt->release_queue.slot[slot]) { | ||
6782 | + rh = list_entry(pos, struct release_heap, list); | ||
6783 | + if (release_time == rh->release_time) { | ||
6784 | + /* perfect match -- this happens on hyperperiod | ||
6785 | + * boundaries | ||
6786 | + */ | ||
6787 | + heap = rh; | ||
6788 | + break; | ||
6789 | + } else if (lt_before(release_time, rh->release_time)) { | ||
6790 | + /* we need to insert a new node since rh is | ||
6791 | + * already in the future | ||
6792 | + */ | ||
6793 | + break; | ||
6794 | + } | ||
6795 | + } | ||
6796 | + if (!heap && use_task_heap) { | ||
6797 | + /* use pre-allocated release heap */ | ||
6798 | + rh = tsk_rt(t)->rel_heap; | ||
6799 | + | ||
6800 | + rh->dom = rt; | ||
6801 | + rh->release_time = release_time; | ||
6802 | + | ||
6803 | + /* add to release queue */ | ||
6804 | + list_add(&rh->list, pos->prev); | ||
6805 | + heap = rh; | ||
6806 | + } | ||
6807 | + return heap; | ||
6808 | +} | ||
6809 | + | ||
6810 | +static void reinit_release_heap(struct task_struct* t) | ||
6811 | +{ | ||
6812 | + struct release_heap* rh; | ||
6813 | + | ||
6814 | + /* use pre-allocated release heap */ | ||
6815 | + rh = tsk_rt(t)->rel_heap; | ||
6816 | + | ||
6817 | + /* Make sure it is safe to use. The timer callback could still | ||
6818 | + * be executing on another CPU; hrtimer_cancel() will wait | ||
6819 | + * until the timer callback has completed. However, under no | ||
6820 | + * circumstances should the timer be active (= yet to be | ||
6821 | + * triggered). | ||
6822 | + * | ||
6823 | + * WARNING: If the CPU still holds the release_lock at this point, | ||
6824 | + * deadlock may occur! | ||
6825 | + */ | ||
6826 | + BUG_ON(hrtimer_cancel(&rh->timer)); | ||
6827 | + | ||
6828 | + /* initialize */ | ||
6829 | + bheap_init(&rh->heap); | ||
6830 | +#ifdef CONFIG_RELEASE_MASTER | ||
6831 | + atomic_set(&rh->info.state, HRTIMER_START_ON_INACTIVE); | ||
6832 | +#endif | ||
6833 | +} | ||
6834 | +/* arm_release_timer() - start local release timer or trigger | ||
6835 | + * remote timer (pull timer) | ||
6836 | + * | ||
6837 | + * Called by add_release() with: | ||
6838 | + * - tobe_lock taken | ||
6839 | + * - IRQ disabled | ||
6840 | + */ | ||
6841 | +#ifdef CONFIG_RELEASE_MASTER | ||
6842 | +#define arm_release_timer(t) arm_release_timer_on((t), NO_CPU) | ||
6843 | +static void arm_release_timer_on(rt_domain_t *_rt , int target_cpu) | ||
6844 | +#else | ||
6845 | +static void arm_release_timer(rt_domain_t *_rt) | ||
6846 | +#endif | ||
6847 | +{ | ||
6848 | + rt_domain_t *rt = _rt; | ||
6849 | + struct list_head list; | ||
6850 | + struct list_head *pos, *safe; | ||
6851 | + struct task_struct* t; | ||
6852 | + struct release_heap* rh; | ||
6853 | + | ||
6854 | + VTRACE("arm_release_timer() at %llu\n", litmus_clock()); | ||
6855 | + list_replace_init(&rt->tobe_released, &list); | ||
6856 | + | ||
6857 | + list_for_each_safe(pos, safe, &list) { | ||
6858 | + /* pick task of work list */ | ||
6859 | + t = list_entry(pos, struct task_struct, rt_param.list); | ||
6860 | + sched_trace_task_release(t); | ||
6861 | + list_del(pos); | ||
6862 | + | ||
6863 | + /* put into release heap while holding release_lock */ | ||
6864 | + raw_spin_lock(&rt->release_lock); | ||
6865 | + VTRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock); | ||
6866 | + | ||
6867 | + rh = get_release_heap(rt, t, 0); | ||
6868 | + if (!rh) { | ||
6869 | + /* need to use our own, but drop lock first */ | ||
6870 | + raw_spin_unlock(&rt->release_lock); | ||
6871 | + VTRACE_TASK(t, "Dropped release_lock 0x%p\n", | ||
6872 | + &rt->release_lock); | ||
6873 | + | ||
6874 | + reinit_release_heap(t); | ||
6875 | + VTRACE_TASK(t, "release_heap ready\n"); | ||
6876 | + | ||
6877 | + raw_spin_lock(&rt->release_lock); | ||
6878 | + VTRACE_TASK(t, "Re-acquired release_lock 0x%p\n", | ||
6879 | + &rt->release_lock); | ||
6880 | + | ||
6881 | + rh = get_release_heap(rt, t, 1); | ||
6882 | + } | ||
6883 | + bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node); | ||
6884 | + VTRACE_TASK(t, "arm_release_timer(): added to release heap\n"); | ||
6885 | + | ||
6886 | + raw_spin_unlock(&rt->release_lock); | ||
6887 | + VTRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock); | ||
6888 | + | ||
6889 | + /* To avoid arming the timer multiple times, we only let the | ||
6890 | + * owner do the arming (which is the "first" task to reference | ||
6891 | + * this release_heap anyway). | ||
6892 | + */ | ||
6893 | + if (rh == tsk_rt(t)->rel_heap) { | ||
6894 | + VTRACE_TASK(t, "arming timer 0x%p\n", &rh->timer); | ||
6895 | + /* we cannot arm the timer using hrtimer_start() | ||
6896 | + * as it may deadlock on rq->lock | ||
6897 | + * | ||
6898 | + * PINNED mode is ok on both local and remote CPU | ||
6899 | + */ | ||
6900 | +#ifdef CONFIG_RELEASE_MASTER | ||
6901 | + if (rt->release_master == NO_CPU && | ||
6902 | + target_cpu == NO_CPU) | ||
6903 | +#endif | ||
6904 | + __hrtimer_start_range_ns(&rh->timer, | ||
6905 | + ns_to_ktime(rh->release_time), | ||
6906 | + 0, HRTIMER_MODE_ABS_PINNED, 0); | ||
6907 | +#ifdef CONFIG_RELEASE_MASTER | ||
6908 | + else | ||
6909 | + hrtimer_start_on( | ||
6910 | + /* target_cpu overrides release master */ | ||
6911 | + (target_cpu != NO_CPU ? | ||
6912 | + target_cpu : rt->release_master), | ||
6913 | + &rh->info, &rh->timer, | ||
6914 | + ns_to_ktime(rh->release_time), | ||
6915 | + HRTIMER_MODE_ABS_PINNED); | ||
6916 | +#endif | ||
6917 | + } else | ||
6918 | + VTRACE_TASK(t, "0x%p is not my timer\n", &rh->timer); | ||
6919 | + } | ||
6920 | +} | ||
6921 | + | ||
6922 | +void rt_domain_init(rt_domain_t *rt, | ||
6923 | + bheap_prio_t order, | ||
6924 | + check_resched_needed_t check, | ||
6925 | + release_jobs_t release | ||
6926 | + ) | ||
6927 | +{ | ||
6928 | + int i; | ||
6929 | + | ||
6930 | + BUG_ON(!rt); | ||
6931 | + if (!check) | ||
6932 | + check = dummy_resched; | ||
6933 | + if (!release) | ||
6934 | + release = default_release_jobs; | ||
6935 | + if (!order) | ||
6936 | + order = dummy_order; | ||
6937 | + | ||
6938 | +#ifdef CONFIG_RELEASE_MASTER | ||
6939 | + rt->release_master = NO_CPU; | ||
6940 | +#endif | ||
6941 | + | ||
6942 | + bheap_init(&rt->ready_queue); | ||
6943 | + INIT_LIST_HEAD(&rt->tobe_released); | ||
6944 | + for (i = 0; i < RELEASE_QUEUE_SLOTS; i++) | ||
6945 | + INIT_LIST_HEAD(&rt->release_queue.slot[i]); | ||
6946 | + | ||
6947 | + raw_spin_lock_init(&rt->ready_lock); | ||
6948 | + raw_spin_lock_init(&rt->release_lock); | ||
6949 | + raw_spin_lock_init(&rt->tobe_lock); | ||
6950 | + | ||
6951 | + rt->check_resched = check; | ||
6952 | + rt->release_jobs = release; | ||
6953 | + rt->order = order; | ||
6954 | +} | ||
6955 | + | ||
6956 | +/* add_ready - add a real-time task to the rt ready queue. It must be runnable. | ||
6957 | + * @new: the newly released task | ||
6958 | + */ | ||
6959 | +void __add_ready(rt_domain_t* rt, struct task_struct *new) | ||
6960 | +{ | ||
6961 | + TRACE("rt: adding %s/%d (%llu, %llu) rel=%llu to ready queue at %llu\n", | ||
6962 | + new->comm, new->pid, get_exec_cost(new), get_rt_period(new), | ||
6963 | + get_release(new), litmus_clock()); | ||
6964 | + | ||
6965 | + BUG_ON(bheap_node_in_heap(tsk_rt(new)->heap_node)); | ||
6966 | + | ||
6967 | + bheap_insert(rt->order, &rt->ready_queue, tsk_rt(new)->heap_node); | ||
6968 | + rt->check_resched(rt); | ||
6969 | +} | ||
6970 | + | ||
6971 | +/* merge_ready - Add a sorted set of tasks to the rt ready queue. They must be runnable. | ||
6972 | + * @tasks - the newly released tasks | ||
6973 | + */ | ||
6974 | +void __merge_ready(rt_domain_t* rt, struct bheap* tasks) | ||
6975 | +{ | ||
6976 | + bheap_union(rt->order, &rt->ready_queue, tasks); | ||
6977 | + rt->check_resched(rt); | ||
6978 | +} | ||
6979 | + | ||
6980 | + | ||
6981 | +#ifdef CONFIG_RELEASE_MASTER | ||
6982 | +void __add_release_on(rt_domain_t* rt, struct task_struct *task, | ||
6983 | + int target_cpu) | ||
6984 | +{ | ||
6985 | + TRACE_TASK(task, "add_release_on(), rel=%llu, target=%d\n", | ||
6986 | + get_release(task), target_cpu); | ||
6987 | + list_add(&tsk_rt(task)->list, &rt->tobe_released); | ||
6988 | + task->rt_param.domain = rt; | ||
6989 | + | ||
6990 | + /* start release timer */ | ||
6991 | + TS_SCHED2_START(task); | ||
6992 | + | ||
6993 | + arm_release_timer_on(rt, target_cpu); | ||
6994 | + | ||
6995 | + TS_SCHED2_END(task); | ||
6996 | +} | ||
6997 | +#endif | ||
6998 | + | ||
6999 | +/* add_release - add a real-time task to the rt release queue. | ||
7000 | + * @task: the sleeping task | ||
7001 | + */ | ||
7002 | +void __add_release(rt_domain_t* rt, struct task_struct *task) | ||
7003 | +{ | ||
7004 | + TRACE_TASK(task, "add_release(), rel=%llu\n", get_release(task)); | ||
7005 | + list_add(&tsk_rt(task)->list, &rt->tobe_released); | ||
7006 | + task->rt_param.domain = rt; | ||
7007 | + | ||
7008 | + /* start release timer */ | ||
7009 | + TS_SCHED2_START(task); | ||
7010 | + | ||
7011 | + arm_release_timer(rt); | ||
7012 | + | ||
7013 | + TS_SCHED2_END(task); | ||
7014 | +} | ||
7015 | + | ||
7016 | diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c | ||
7017 | new file mode 100644 | ||
7018 | index 0000000..098a449 | ||
7019 | --- /dev/null | ||
7020 | +++ b/litmus/sched_cedf.c | ||
7021 | @@ -0,0 +1,873 @@ | ||
7022 | +/* | ||
7023 | + * litmus/sched_cedf.c | ||
7024 | + * | ||
7025 | + * Implementation of the C-EDF scheduling algorithm. | ||
7026 | + * | ||
7027 | + * This implementation is based on G-EDF: | ||
7028 | + * - CPUs are clustered around L2 or L3 caches. | ||
7029 | + * - Clusters topology is automatically detected (this is arch dependent | ||
7030 | + * and is working only on x86 at the moment --- and only with modern | ||
7031 | + * cpus that exports cpuid4 information) | ||
7032 | + * - The plugins _does not_ attempt to put tasks in the right cluster i.e. | ||
7033 | + * the programmer needs to be aware of the topology to place tasks | ||
7034 | + * in the desired cluster | ||
7035 | + * - default clustering is around L2 cache (cache index = 2) | ||
7036 | + * supported clusters are: L1 (private cache: pedf), L2, L3, ALL (all | ||
7037 | + * online_cpus are placed in a single cluster). | ||
7038 | + * | ||
7039 | + * For details on functions, take a look at sched_gsn_edf.c | ||
7040 | + * | ||
7041 | + * Currently, we do not support changes in the number of online cpus. | ||
7042 | + * If the num_online_cpus() dynamically changes, the plugin is broken. | ||
7043 | + * | ||
7044 | + * This version uses the simple approach and serializes all scheduling | ||
7045 | + * decisions by the use of a queue lock. This is probably not the | ||
7046 | + * best way to do it, but it should suffice for now. | ||
7047 | + */ | ||
7048 | + | ||
7049 | +#include <linux/spinlock.h> | ||
7050 | +#include <linux/percpu.h> | ||
7051 | +#include <linux/sched.h> | ||
7052 | +#include <linux/slab.h> | ||
7053 | + | ||
7054 | +#include <linux/module.h> | ||
7055 | + | ||
7056 | +#include <litmus/litmus.h> | ||
7057 | +#include <litmus/jobs.h> | ||
7058 | +#include <litmus/preempt.h> | ||
7059 | +#include <litmus/sched_plugin.h> | ||
7060 | +#include <litmus/edf_common.h> | ||
7061 | +#include <litmus/sched_trace.h> | ||
7062 | + | ||
7063 | +#include <litmus/bheap.h> | ||
7064 | + | ||
7065 | +/* to configure the cluster size */ | ||
7066 | +#include <litmus/litmus_proc.h> | ||
7067 | +#include <linux/uaccess.h> | ||
7068 | + | ||
7069 | +/* Reference configuration variable. Determines which cache level is used to | ||
7070 | + * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that | ||
7071 | + * all CPUs form a single cluster (just like GSN-EDF). | ||
7072 | + */ | ||
7073 | +static enum { | ||
7074 | + GLOBAL_CLUSTER = 0, | ||
7075 | + L1_CLUSTER = 1, | ||
7076 | + L2_CLUSTER = 2, | ||
7077 | + L3_CLUSTER = 3 | ||
7078 | +} cluster_config = GLOBAL_CLUSTER; | ||
7079 | + | ||
7080 | +struct clusterdomain; | ||
7081 | + | ||
7082 | +/* cpu_entry_t - maintain the linked and scheduled state | ||
7083 | + * | ||
7084 | + * A cpu also contains a pointer to the cedf_domain_t cluster | ||
7085 | + * that owns it (struct clusterdomain*) | ||
7086 | + */ | ||
7087 | +typedef struct { | ||
7088 | + int cpu; | ||
7089 | + struct clusterdomain* cluster; /* owning cluster */ | ||
7090 | + struct task_struct* linked; /* only RT tasks */ | ||
7091 | + struct task_struct* scheduled; /* only RT tasks */ | ||
7092 | + atomic_t will_schedule; /* prevent unneeded IPIs */ | ||
7093 | + struct bheap_node* hn; | ||
7094 | +} cpu_entry_t; | ||
7095 | + | ||
7096 | +/* one cpu_entry_t per CPU */ | ||
7097 | +DEFINE_PER_CPU(cpu_entry_t, cedf_cpu_entries); | ||
7098 | + | ||
7099 | +#define set_will_schedule() \ | ||
7100 | + (atomic_set(&__get_cpu_var(cedf_cpu_entries).will_schedule, 1)) | ||
7101 | +#define clear_will_schedule() \ | ||
7102 | + (atomic_set(&__get_cpu_var(cedf_cpu_entries).will_schedule, 0)) | ||
7103 | +#define test_will_schedule(cpu) \ | ||
7104 | + (atomic_read(&per_cpu(cedf_cpu_entries, cpu).will_schedule)) | ||
7105 | + | ||
7106 | +/* | ||
7107 | + * In C-EDF there is a cedf domain _per_ cluster | ||
7108 | + * The number of clusters is dynamically determined accordingly to the | ||
7109 | + * total cpu number and the cluster size | ||
7110 | + */ | ||
7111 | +typedef struct clusterdomain { | ||
7112 | + /* rt_domain for this cluster */ | ||
7113 | + rt_domain_t domain; | ||
7114 | + /* cpus in this cluster */ | ||
7115 | + cpu_entry_t* *cpus; | ||
7116 | + /* map of this cluster cpus */ | ||
7117 | + cpumask_var_t cpu_map; | ||
7118 | + /* the cpus queue themselves according to priority in here */ | ||
7119 | + struct bheap_node *heap_node; | ||
7120 | + struct bheap cpu_heap; | ||
7121 | + /* lock for this cluster */ | ||
7122 | +#define lock domain.ready_lock | ||
7123 | +} cedf_domain_t; | ||
7124 | + | ||
7125 | +/* a cedf_domain per cluster; allocation is done at init/activation time */ | ||
7126 | +cedf_domain_t *cedf; | ||
7127 | + | ||
7128 | +#define remote_cluster(cpu) ((cedf_domain_t *) per_cpu(cedf_cpu_entries, cpu).cluster) | ||
7129 | +#define task_cpu_cluster(task) remote_cluster(get_partition(task)) | ||
7130 | + | ||
7131 | +/* Uncomment WANT_ALL_SCHED_EVENTS if you want to see all scheduling | ||
7132 | + * decisions in the TRACE() log; uncomment VERBOSE_INIT for verbose | ||
7133 | + * information during the initialization of the plugin (e.g., topology) | ||
7134 | +#define WANT_ALL_SCHED_EVENTS | ||
7135 | + */ | ||
7136 | +#define VERBOSE_INIT | ||
7137 | + | ||
7138 | +static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | ||
7139 | +{ | ||
7140 | + cpu_entry_t *a, *b; | ||
7141 | + a = _a->value; | ||
7142 | + b = _b->value; | ||
7143 | + /* Note that a and b are inverted: we want the lowest-priority CPU at | ||
7144 | + * the top of the heap. | ||
7145 | + */ | ||
7146 | + return edf_higher_prio(b->linked, a->linked); | ||
7147 | +} | ||
7148 | + | ||
7149 | +/* update_cpu_position - Move the cpu entry to the correct place to maintain | ||
7150 | + * order in the cpu queue. Caller must hold cedf lock. | ||
7151 | + */ | ||
7152 | +static void update_cpu_position(cpu_entry_t *entry) | ||
7153 | +{ | ||
7154 | + cedf_domain_t *cluster = entry->cluster; | ||
7155 | + | ||
7156 | + if (likely(bheap_node_in_heap(entry->hn))) | ||
7157 | + bheap_delete(cpu_lower_prio, | ||
7158 | + &cluster->cpu_heap, | ||
7159 | + entry->hn); | ||
7160 | + | ||
7161 | + bheap_insert(cpu_lower_prio, &cluster->cpu_heap, entry->hn); | ||
7162 | +} | ||
7163 | + | ||
7164 | +/* caller must hold cedf lock */ | ||
7165 | +static cpu_entry_t* lowest_prio_cpu(cedf_domain_t *cluster) | ||
7166 | +{ | ||
7167 | + struct bheap_node* hn; | ||
7168 | + hn = bheap_peek(cpu_lower_prio, &cluster->cpu_heap); | ||
7169 | + return hn->value; | ||
7170 | +} | ||
7171 | + | ||
7172 | + | ||
7173 | +/* link_task_to_cpu - Update the link of a CPU. | ||
7174 | + * Handles the case where the to-be-linked task is already | ||
7175 | + * scheduled on a different CPU. | ||
7176 | + */ | ||
7177 | +static noinline void link_task_to_cpu(struct task_struct* linked, | ||
7178 | + cpu_entry_t *entry) | ||
7179 | +{ | ||
7180 | + cpu_entry_t *sched; | ||
7181 | + struct task_struct* tmp; | ||
7182 | + int on_cpu; | ||
7183 | + | ||
7184 | + BUG_ON(linked && !is_realtime(linked)); | ||
7185 | + | ||
7186 | + /* Currently linked task is set to be unlinked. */ | ||
7187 | + if (entry->linked) { | ||
7188 | + entry->linked->rt_param.linked_on = NO_CPU; | ||
7189 | + } | ||
7190 | + | ||
7191 | + /* Link new task to CPU. */ | ||
7192 | + if (linked) { | ||
7193 | + set_rt_flags(linked, RT_F_RUNNING); | ||
7194 | + /* handle task is already scheduled somewhere! */ | ||
7195 | + on_cpu = linked->rt_param.scheduled_on; | ||
7196 | + if (on_cpu != NO_CPU) { | ||
7197 | + sched = &per_cpu(cedf_cpu_entries, on_cpu); | ||
7198 | + /* this should only happen if not linked already */ | ||
7199 | + BUG_ON(sched->linked == linked); | ||
7200 | + | ||
7201 | + /* If we are already scheduled on the CPU to which we | ||
7202 | + * wanted to link, we don't need to do the swap -- | ||
7203 | + * we just link ourselves to the CPU and depend on | ||
7204 | + * the caller to get things right. | ||
7205 | + */ | ||
7206 | + if (entry != sched) { | ||
7207 | + TRACE_TASK(linked, | ||
7208 | + "already scheduled on %d, updating link.\n", | ||
7209 | + sched->cpu); | ||
7210 | + tmp = sched->linked; | ||
7211 | + linked->rt_param.linked_on = sched->cpu; | ||
7212 | + sched->linked = linked; | ||
7213 | + update_cpu_position(sched); | ||
7214 | + linked = tmp; | ||
7215 | + } | ||
7216 | + } | ||
7217 | + if (linked) /* might be NULL due to swap */ | ||
7218 | + linked->rt_param.linked_on = entry->cpu; | ||
7219 | + } | ||
7220 | + entry->linked = linked; | ||
7221 | +#ifdef WANT_ALL_SCHED_EVENTS | ||
7222 | + if (linked) | ||
7223 | + TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | ||
7224 | + else | ||
7225 | + TRACE("NULL linked to %d.\n", entry->cpu); | ||
7226 | +#endif | ||
7227 | + update_cpu_position(entry); | ||
7228 | +} | ||
7229 | + | ||
7230 | +/* unlink - Make sure a task is not linked any longer to an entry | ||
7231 | + * where it was linked before. Must hold cedf_lock. | ||
7232 | + */ | ||
7233 | +static noinline void unlink(struct task_struct* t) | ||
7234 | +{ | ||
7235 | + cpu_entry_t *entry; | ||
7236 | + | ||
7237 | + if (t->rt_param.linked_on != NO_CPU) { | ||
7238 | + /* unlink */ | ||
7239 | + entry = &per_cpu(cedf_cpu_entries, t->rt_param.linked_on); | ||
7240 | + t->rt_param.linked_on = NO_CPU; | ||
7241 | + link_task_to_cpu(NULL, entry); | ||
7242 | + } else if (is_queued(t)) { | ||
7243 | + /* This is an interesting situation: t is scheduled, | ||
7244 | + * but was just recently unlinked. It cannot be | ||
7245 | + * linked anywhere else (because then it would have | ||
7246 | + * been relinked to this CPU), thus it must be in some | ||
7247 | + * queue. We must remove it from the list in this | ||
7248 | + * case. | ||
7249 | + * | ||
7250 | + * in C-EDF case is should be somewhere in the queue for | ||
7251 | + * its domain, therefore and we can get the domain using | ||
7252 | + * task_cpu_cluster | ||
7253 | + */ | ||
7254 | + remove(&(task_cpu_cluster(t))->domain, t); | ||
7255 | + } | ||
7256 | +} | ||
7257 | + | ||
7258 | + | ||
7259 | +/* preempt - force a CPU to reschedule | ||
7260 | + */ | ||
7261 | +static void preempt(cpu_entry_t *entry) | ||
7262 | +{ | ||
7263 | + preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
7264 | +} | ||
7265 | + | ||
7266 | +/* requeue - Put an unlinked task into gsn-edf domain. | ||
7267 | + * Caller must hold cedf_lock. | ||
7268 | + */ | ||
7269 | +static noinline void requeue(struct task_struct* task) | ||
7270 | +{ | ||
7271 | + cedf_domain_t *cluster = task_cpu_cluster(task); | ||
7272 | + BUG_ON(!task); | ||
7273 | + /* sanity check before insertion */ | ||
7274 | + BUG_ON(is_queued(task)); | ||
7275 | + | ||
7276 | + if (is_released(task, litmus_clock())) | ||
7277 | + __add_ready(&cluster->domain, task); | ||
7278 | + else { | ||
7279 | + /* it has got to wait */ | ||
7280 | + add_release(&cluster->domain, task); | ||
7281 | + } | ||
7282 | +} | ||
7283 | + | ||
7284 | +/* check for any necessary preemptions */ | ||
7285 | +static void check_for_preemptions(cedf_domain_t *cluster) | ||
7286 | +{ | ||
7287 | + struct task_struct *task; | ||
7288 | + cpu_entry_t* last; | ||
7289 | + | ||
7290 | + for(last = lowest_prio_cpu(cluster); | ||
7291 | + edf_preemption_needed(&cluster->domain, last->linked); | ||
7292 | + last = lowest_prio_cpu(cluster)) { | ||
7293 | + /* preemption necessary */ | ||
7294 | + task = __take_ready(&cluster->domain); | ||
7295 | + TRACE("check_for_preemptions: attempting to link task %d to %d\n", | ||
7296 | + task->pid, last->cpu); | ||
7297 | + if (last->linked) | ||
7298 | + requeue(last->linked); | ||
7299 | + link_task_to_cpu(task, last); | ||
7300 | + preempt(last); | ||
7301 | + } | ||
7302 | +} | ||
7303 | + | ||
7304 | +/* cedf_job_arrival: task is either resumed or released */ | ||
7305 | +static noinline void cedf_job_arrival(struct task_struct* task) | ||
7306 | +{ | ||
7307 | + cedf_domain_t *cluster = task_cpu_cluster(task); | ||
7308 | + BUG_ON(!task); | ||
7309 | + | ||
7310 | + requeue(task); | ||
7311 | + check_for_preemptions(cluster); | ||
7312 | +} | ||
7313 | + | ||
7314 | +static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
7315 | +{ | ||
7316 | + cedf_domain_t* cluster = container_of(rt, cedf_domain_t, domain); | ||
7317 | + unsigned long flags; | ||
7318 | + | ||
7319 | + raw_spin_lock_irqsave(&cluster->lock, flags); | ||
7320 | + | ||
7321 | + __merge_ready(&cluster->domain, tasks); | ||
7322 | + check_for_preemptions(cluster); | ||
7323 | + | ||
7324 | + raw_spin_unlock_irqrestore(&cluster->lock, flags); | ||
7325 | +} | ||
7326 | + | ||
7327 | +/* caller holds cedf_lock */ | ||
7328 | +static noinline void job_completion(struct task_struct *t, int forced) | ||
7329 | +{ | ||
7330 | + BUG_ON(!t); | ||
7331 | + | ||
7332 | + sched_trace_task_completion(t, forced); | ||
7333 | + | ||
7334 | + TRACE_TASK(t, "job_completion().\n"); | ||
7335 | + | ||
7336 | + /* set flags */ | ||
7337 | + set_rt_flags(t, RT_F_SLEEP); | ||
7338 | + /* prepare for next period */ | ||
7339 | + prepare_for_next_period(t); | ||
7340 | + if (is_released(t, litmus_clock())) | ||
7341 | + sched_trace_task_release(t); | ||
7342 | + /* unlink */ | ||
7343 | + unlink(t); | ||
7344 | + /* requeue | ||
7345 | + * But don't requeue a blocking task. */ | ||
7346 | + if (is_running(t)) | ||
7347 | + cedf_job_arrival(t); | ||
7348 | +} | ||
7349 | + | ||
7350 | +/* cedf_tick - this function is called for every local timer | ||
7351 | + * interrupt. | ||
7352 | + * | ||
7353 | + * checks whether the current task has expired and checks | ||
7354 | + * whether we need to preempt it if it has not expired | ||
7355 | + */ | ||
7356 | +static void cedf_tick(struct task_struct* t) | ||
7357 | +{ | ||
7358 | + if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | ||
7359 | + if (!is_np(t)) { | ||
7360 | + /* np tasks will be preempted when they become | ||
7361 | + * preemptable again | ||
7362 | + */ | ||
7363 | + litmus_reschedule_local(); | ||
7364 | + set_will_schedule(); | ||
7365 | + TRACE("cedf_scheduler_tick: " | ||
7366 | + "%d is preemptable " | ||
7367 | + " => FORCE_RESCHED\n", t->pid); | ||
7368 | + } else if (is_user_np(t)) { | ||
7369 | + TRACE("cedf_scheduler_tick: " | ||
7370 | + "%d is non-preemptable, " | ||
7371 | + "preemption delayed.\n", t->pid); | ||
7372 | + request_exit_np(t); | ||
7373 | + } | ||
7374 | + } | ||
7375 | +} | ||
7376 | + | ||
7377 | +/* Getting schedule() right is a bit tricky. schedule() may not make any | ||
7378 | + * assumptions on the state of the current task since it may be called for a | ||
7379 | + * number of reasons. The reasons include a scheduler_tick() determined that it | ||
7380 | + * was necessary, because sys_exit_np() was called, because some Linux | ||
7381 | + * subsystem determined so, or even (in the worst case) because there is a bug | ||
7382 | + * hidden somewhere. Thus, we must take extreme care to determine what the | ||
7383 | + * current state is. | ||
7384 | + * | ||
7385 | + * The CPU could currently be scheduling a task (or not), be linked (or not). | ||
7386 | + * | ||
7387 | + * The following assertions for the scheduled task could hold: | ||
7388 | + * | ||
7389 | + * - !is_running(scheduled) // the job blocks | ||
7390 | + * - scheduled->timeslice == 0 // the job completed (forcefully) | ||
7391 | + * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) | ||
7392 | + * - linked != scheduled // we need to reschedule (for any reason) | ||
7393 | + * - is_np(scheduled) // rescheduling must be delayed, | ||
7394 | + * sys_exit_np must be requested | ||
7395 | + * | ||
7396 | + * Any of these can occur together. | ||
7397 | + */ | ||
7398 | +static struct task_struct* cedf_schedule(struct task_struct * prev) | ||
7399 | +{ | ||
7400 | + cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries); | ||
7401 | + cedf_domain_t *cluster = entry->cluster; | ||
7402 | + int out_of_time, sleep, preempt, np, exists, blocks; | ||
7403 | + struct task_struct* next = NULL; | ||
7404 | + | ||
7405 | + raw_spin_lock(&cluster->lock); | ||
7406 | + clear_will_schedule(); | ||
7407 | + | ||
7408 | + /* sanity checking */ | ||
7409 | + BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
7410 | + BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
7411 | + BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
7412 | + | ||
7413 | + /* (0) Determine state */ | ||
7414 | + exists = entry->scheduled != NULL; | ||
7415 | + blocks = exists && !is_running(entry->scheduled); | ||
7416 | + out_of_time = exists && | ||
7417 | + budget_enforced(entry->scheduled) && | ||
7418 | + budget_exhausted(entry->scheduled); | ||
7419 | + np = exists && is_np(entry->scheduled); | ||
7420 | + sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | ||
7421 | + preempt = entry->scheduled != entry->linked; | ||
7422 | + | ||
7423 | +#ifdef WANT_ALL_SCHED_EVENTS | ||
7424 | + TRACE_TASK(prev, "invoked cedf_schedule.\n"); | ||
7425 | +#endif | ||
7426 | + | ||
7427 | + if (exists) | ||
7428 | + TRACE_TASK(prev, | ||
7429 | + "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " | ||
7430 | + "state:%d sig:%d\n", | ||
7431 | + blocks, out_of_time, np, sleep, preempt, | ||
7432 | + prev->state, signal_pending(prev)); | ||
7433 | + if (entry->linked && preempt) | ||
7434 | + TRACE_TASK(prev, "will be preempted by %s/%d\n", | ||
7435 | + entry->linked->comm, entry->linked->pid); | ||
7436 | + | ||
7437 | + | ||
7438 | + /* If a task blocks we have no choice but to reschedule. | ||
7439 | + */ | ||
7440 | + if (blocks) | ||
7441 | + unlink(entry->scheduled); | ||
7442 | + | ||
7443 | + /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
7444 | + * We need to make sure to update the link structure anyway in case | ||
7445 | + * that we are still linked. Multiple calls to request_exit_np() don't | ||
7446 | + * hurt. | ||
7447 | + */ | ||
7448 | + if (np && (out_of_time || preempt || sleep)) { | ||
7449 | + unlink(entry->scheduled); | ||
7450 | + request_exit_np(entry->scheduled); | ||
7451 | + } | ||
7452 | + | ||
7453 | + /* Any task that is preemptable and either exhausts its execution | ||
7454 | + * budget or wants to sleep completes. We may have to reschedule after | ||
7455 | + * this. Don't do a job completion if we block (can't have timers running | ||
7456 | + * for blocked jobs). Preemption go first for the same reason. | ||
7457 | + */ | ||
7458 | + if (!np && (out_of_time || sleep) && !blocks && !preempt) | ||
7459 | + job_completion(entry->scheduled, !sleep); | ||
7460 | + | ||
7461 | + /* Link pending task if we became unlinked. | ||
7462 | + */ | ||
7463 | + if (!entry->linked) | ||
7464 | + link_task_to_cpu(__take_ready(&cluster->domain), entry); | ||
7465 | + | ||
7466 | + /* The final scheduling decision. Do we need to switch for some reason? | ||
7467 | + * If linked is different from scheduled, then select linked as next. | ||
7468 | + */ | ||
7469 | + if ((!np || blocks) && | ||
7470 | + entry->linked != entry->scheduled) { | ||
7471 | + /* Schedule a linked job? */ | ||
7472 | + if (entry->linked) { | ||
7473 | + entry->linked->rt_param.scheduled_on = entry->cpu; | ||
7474 | + next = entry->linked; | ||
7475 | + } | ||
7476 | + if (entry->scheduled) { | ||
7477 | + /* not gonna be scheduled soon */ | ||
7478 | + entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
7479 | + TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | ||
7480 | + } | ||
7481 | + } else | ||
7482 | + /* Only override Linux scheduler if we have a real-time task | ||
7483 | + * scheduled that needs to continue. | ||
7484 | + */ | ||
7485 | + if (exists) | ||
7486 | + next = prev; | ||
7487 | + | ||
7488 | + sched_state_task_picked(); | ||
7489 | + raw_spin_unlock(&cluster->lock); | ||
7490 | + | ||
7491 | +#ifdef WANT_ALL_SCHED_EVENTS | ||
7492 | + TRACE("cedf_lock released, next=0x%p\n", next); | ||
7493 | + | ||
7494 | + if (next) | ||
7495 | + TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
7496 | + else if (exists && !next) | ||
7497 | + TRACE("becomes idle at %llu.\n", litmus_clock()); | ||
7498 | +#endif | ||
7499 | + | ||
7500 | + | ||
7501 | + return next; | ||
7502 | +} | ||
7503 | + | ||
7504 | + | ||
7505 | +/* _finish_switch - we just finished the switch away from prev | ||
7506 | + */ | ||
7507 | +static void cedf_finish_switch(struct task_struct *prev) | ||
7508 | +{ | ||
7509 | + cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries); | ||
7510 | + | ||
7511 | + entry->scheduled = is_realtime(current) ? current : NULL; | ||
7512 | +#ifdef WANT_ALL_SCHED_EVENTS | ||
7513 | + TRACE_TASK(prev, "switched away from\n"); | ||
7514 | +#endif | ||
7515 | +} | ||
7516 | + | ||
7517 | + | ||
7518 | +/* Prepare a task for running in RT mode | ||
7519 | + */ | ||
7520 | +static void cedf_task_new(struct task_struct * t, int on_rq, int running) | ||
7521 | +{ | ||
7522 | + unsigned long flags; | ||
7523 | + cpu_entry_t* entry; | ||
7524 | + cedf_domain_t* cluster; | ||
7525 | + | ||
7526 | + TRACE("gsn edf: task new %d\n", t->pid); | ||
7527 | + | ||
7528 | + /* the cluster doesn't change even if t is running */ | ||
7529 | + cluster = task_cpu_cluster(t); | ||
7530 | + | ||
7531 | + raw_spin_lock_irqsave(&cluster->domain.ready_lock, flags); | ||
7532 | + | ||
7533 | + /* setup job params */ | ||
7534 | + release_at(t, litmus_clock()); | ||
7535 | + | ||
7536 | + if (running) { | ||
7537 | + entry = &per_cpu(cedf_cpu_entries, task_cpu(t)); | ||
7538 | + BUG_ON(entry->scheduled); | ||
7539 | + | ||
7540 | + entry->scheduled = t; | ||
7541 | + tsk_rt(t)->scheduled_on = task_cpu(t); | ||
7542 | + } else { | ||
7543 | + t->rt_param.scheduled_on = NO_CPU; | ||
7544 | + } | ||
7545 | + t->rt_param.linked_on = NO_CPU; | ||
7546 | + | ||
7547 | + cedf_job_arrival(t); | ||
7548 | + raw_spin_unlock_irqrestore(&(cluster->domain.ready_lock), flags); | ||
7549 | +} | ||
7550 | + | ||
7551 | +static void cedf_task_wake_up(struct task_struct *task) | ||
7552 | +{ | ||
7553 | + unsigned long flags; | ||
7554 | + lt_t now; | ||
7555 | + cedf_domain_t *cluster; | ||
7556 | + | ||
7557 | + TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
7558 | + | ||
7559 | + cluster = task_cpu_cluster(task); | ||
7560 | + | ||
7561 | + raw_spin_lock_irqsave(&cluster->lock, flags); | ||
7562 | + /* We need to take suspensions because of semaphores into | ||
7563 | + * account! If a job resumes after being suspended due to acquiring | ||
7564 | + * a semaphore, it should never be treated as a new job release. | ||
7565 | + */ | ||
7566 | + if (get_rt_flags(task) == RT_F_EXIT_SEM) { | ||
7567 | + set_rt_flags(task, RT_F_RUNNING); | ||
7568 | + } else { | ||
7569 | + now = litmus_clock(); | ||
7570 | + if (is_tardy(task, now)) { | ||
7571 | + /* new sporadic release */ | ||
7572 | + release_at(task, now); | ||
7573 | + sched_trace_task_release(task); | ||
7574 | + } | ||
7575 | + else { | ||
7576 | + if (task->rt.time_slice) { | ||
7577 | + /* came back in time before deadline | ||
7578 | + */ | ||
7579 | + set_rt_flags(task, RT_F_RUNNING); | ||
7580 | + } | ||
7581 | + } | ||
7582 | + } | ||
7583 | + cedf_job_arrival(task); | ||
7584 | + raw_spin_unlock_irqrestore(&cluster->lock, flags); | ||
7585 | +} | ||
7586 | + | ||
7587 | +static void cedf_task_block(struct task_struct *t) | ||
7588 | +{ | ||
7589 | + unsigned long flags; | ||
7590 | + cedf_domain_t *cluster; | ||
7591 | + | ||
7592 | + TRACE_TASK(t, "block at %llu\n", litmus_clock()); | ||
7593 | + | ||
7594 | + cluster = task_cpu_cluster(t); | ||
7595 | + | ||
7596 | + /* unlink if necessary */ | ||
7597 | + raw_spin_lock_irqsave(&cluster->lock, flags); | ||
7598 | + unlink(t); | ||
7599 | + raw_spin_unlock_irqrestore(&cluster->lock, flags); | ||
7600 | + | ||
7601 | + BUG_ON(!is_realtime(t)); | ||
7602 | +} | ||
7603 | + | ||
7604 | + | ||
7605 | +static void cedf_task_exit(struct task_struct * t) | ||
7606 | +{ | ||
7607 | + unsigned long flags; | ||
7608 | + cedf_domain_t *cluster = task_cpu_cluster(t); | ||
7609 | + | ||
7610 | + /* unlink if necessary */ | ||
7611 | + raw_spin_lock_irqsave(&cluster->lock, flags); | ||
7612 | + unlink(t); | ||
7613 | + if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
7614 | + cpu_entry_t *cpu; | ||
7615 | + cpu = &per_cpu(cedf_cpu_entries, tsk_rt(t)->scheduled_on); | ||
7616 | + cpu->scheduled = NULL; | ||
7617 | + tsk_rt(t)->scheduled_on = NO_CPU; | ||
7618 | + } | ||
7619 | + raw_spin_unlock_irqrestore(&cluster->lock, flags); | ||
7620 | + | ||
7621 | + BUG_ON(!is_realtime(t)); | ||
7622 | + TRACE_TASK(t, "RIP\n"); | ||
7623 | +} | ||
7624 | + | ||
7625 | +static long cedf_admit_task(struct task_struct* tsk) | ||
7626 | +{ | ||
7627 | + return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; | ||
7628 | +} | ||
7629 | + | ||
7630 | +/* total number of cluster */ | ||
7631 | +static int num_clusters; | ||
7632 | +/* we do not support cluster of different sizes */ | ||
7633 | +static unsigned int cluster_size; | ||
7634 | + | ||
7635 | +#ifdef VERBOSE_INIT | ||
7636 | +static void print_cluster_topology(cpumask_var_t mask, int cpu) | ||
7637 | +{ | ||
7638 | + int chk; | ||
7639 | + char buf[255]; | ||
7640 | + | ||
7641 | + chk = cpulist_scnprintf(buf, 254, mask); | ||
7642 | + buf[chk] = '\0'; | ||
7643 | + printk(KERN_INFO "CPU = %d, shared cpu(s) = %s\n", cpu, buf); | ||
7644 | + | ||
7645 | +} | ||
7646 | +#endif | ||
7647 | + | ||
7648 | +static int clusters_allocated = 0; | ||
7649 | + | ||
7650 | +static void cleanup_cedf(void) | ||
7651 | +{ | ||
7652 | + int i; | ||
7653 | + | ||
7654 | + if (clusters_allocated) { | ||
7655 | + for (i = 0; i < num_clusters; i++) { | ||
7656 | + kfree(cedf[i].cpus); | ||
7657 | + kfree(cedf[i].heap_node); | ||
7658 | + free_cpumask_var(cedf[i].cpu_map); | ||
7659 | + } | ||
7660 | + | ||
7661 | + kfree(cedf); | ||
7662 | + } | ||
7663 | +} | ||
7664 | + | ||
7665 | +static long cedf_activate_plugin(void) | ||
7666 | +{ | ||
7667 | + int i, j, cpu, ccpu, cpu_count; | ||
7668 | + cpu_entry_t *entry; | ||
7669 | + | ||
7670 | + cpumask_var_t mask; | ||
7671 | + int chk = 0; | ||
7672 | + | ||
7673 | + /* de-allocate old clusters, if any */ | ||
7674 | + cleanup_cedf(); | ||
7675 | + | ||
7676 | + printk(KERN_INFO "C-EDF: Activate Plugin, cluster configuration = %d\n", | ||
7677 | + cluster_config); | ||
7678 | + | ||
7679 | + /* need to get cluster_size first */ | ||
7680 | + if(!zalloc_cpumask_var(&mask, GFP_ATOMIC)) | ||
7681 | + return -ENOMEM; | ||
7682 | + | ||
7683 | + if (unlikely(cluster_config == GLOBAL_CLUSTER)) { | ||
7684 | + cluster_size = num_online_cpus(); | ||
7685 | + } else { | ||
7686 | + chk = get_shared_cpu_map(mask, 0, cluster_config); | ||
7687 | + if (chk) { | ||
7688 | + /* if chk != 0 then it is the max allowed index */ | ||
7689 | + printk(KERN_INFO "C-EDF: Cluster configuration = %d " | ||
7690 | + "is not supported on this hardware.\n", | ||
7691 | + cluster_config); | ||
7692 | + /* User should notice that the configuration failed, so | ||
7693 | + * let's bail out. */ | ||
7694 | + return -EINVAL; | ||
7695 | + } | ||
7696 | + | ||
7697 | + cluster_size = cpumask_weight(mask); | ||
7698 | + } | ||
7699 | + | ||
7700 | + if ((num_online_cpus() % cluster_size) != 0) { | ||
7701 | + /* this can't be right, some cpus are left out */ | ||
7702 | + printk(KERN_ERR "C-EDF: Trying to group %d cpus in %d!\n", | ||
7703 | + num_online_cpus(), cluster_size); | ||
7704 | + return -1; | ||
7705 | + } | ||
7706 | + | ||
7707 | + num_clusters = num_online_cpus() / cluster_size; | ||
7708 | + printk(KERN_INFO "C-EDF: %d cluster(s) of size = %d\n", | ||
7709 | + num_clusters, cluster_size); | ||
7710 | + | ||
7711 | + /* initialize clusters */ | ||
7712 | + cedf = kmalloc(num_clusters * sizeof(cedf_domain_t), GFP_ATOMIC); | ||
7713 | + for (i = 0; i < num_clusters; i++) { | ||
7714 | + | ||
7715 | + cedf[i].cpus = kmalloc(cluster_size * sizeof(cpu_entry_t), | ||
7716 | + GFP_ATOMIC); | ||
7717 | + cedf[i].heap_node = kmalloc( | ||
7718 | + cluster_size * sizeof(struct bheap_node), | ||
7719 | + GFP_ATOMIC); | ||
7720 | + bheap_init(&(cedf[i].cpu_heap)); | ||
7721 | + edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs); | ||
7722 | + | ||
7723 | + if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC)) | ||
7724 | + return -ENOMEM; | ||
7725 | + } | ||
7726 | + | ||
7727 | + /* cycle through cluster and add cpus to them */ | ||
7728 | + for (i = 0; i < num_clusters; i++) { | ||
7729 | + | ||
7730 | + for_each_online_cpu(cpu) { | ||
7731 | + /* check if the cpu is already in a cluster */ | ||
7732 | + for (j = 0; j < num_clusters; j++) | ||
7733 | + if (cpumask_test_cpu(cpu, cedf[j].cpu_map)) | ||
7734 | + break; | ||
7735 | + /* if it is in a cluster go to next cpu */ | ||
7736 | + if (j < num_clusters && | ||
7737 | + cpumask_test_cpu(cpu, cedf[j].cpu_map)) | ||
7738 | + continue; | ||
7739 | + | ||
7740 | + /* this cpu isn't in any cluster */ | ||
7741 | + /* get the shared cpus */ | ||
7742 | + if (unlikely(cluster_config == GLOBAL_CLUSTER)) | ||
7743 | + cpumask_copy(mask, cpu_online_mask); | ||
7744 | + else | ||
7745 | + get_shared_cpu_map(mask, cpu, cluster_config); | ||
7746 | + | ||
7747 | + cpumask_copy(cedf[i].cpu_map, mask); | ||
7748 | +#ifdef VERBOSE_INIT | ||
7749 | + print_cluster_topology(mask, cpu); | ||
7750 | +#endif | ||
7751 | + /* add cpus to current cluster and init cpu_entry_t */ | ||
7752 | + cpu_count = 0; | ||
7753 | + for_each_cpu(ccpu, cedf[i].cpu_map) { | ||
7754 | + | ||
7755 | + entry = &per_cpu(cedf_cpu_entries, ccpu); | ||
7756 | + cedf[i].cpus[cpu_count] = entry; | ||
7757 | + atomic_set(&entry->will_schedule, 0); | ||
7758 | + entry->cpu = ccpu; | ||
7759 | + entry->cluster = &cedf[i]; | ||
7760 | + entry->hn = &(cedf[i].heap_node[cpu_count]); | ||
7761 | + bheap_node_init(&entry->hn, entry); | ||
7762 | + | ||
7763 | + cpu_count++; | ||
7764 | + | ||
7765 | + entry->linked = NULL; | ||
7766 | + entry->scheduled = NULL; | ||
7767 | + update_cpu_position(entry); | ||
7768 | + } | ||
7769 | + /* done with this cluster */ | ||
7770 | + break; | ||
7771 | + } | ||
7772 | + } | ||
7773 | + | ||
7774 | + free_cpumask_var(mask); | ||
7775 | + clusters_allocated = 1; | ||
7776 | + return 0; | ||
7777 | +} | ||
7778 | + | ||
7779 | +/* Plugin object */ | ||
7780 | +static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { | ||
7781 | + .plugin_name = "C-EDF", | ||
7782 | + .finish_switch = cedf_finish_switch, | ||
7783 | + .tick = cedf_tick, | ||
7784 | + .task_new = cedf_task_new, | ||
7785 | + .complete_job = complete_job, | ||
7786 | + .task_exit = cedf_task_exit, | ||
7787 | + .schedule = cedf_schedule, | ||
7788 | + .task_wake_up = cedf_task_wake_up, | ||
7789 | + .task_block = cedf_task_block, | ||
7790 | + .admit_task = cedf_admit_task, | ||
7791 | + .activate_plugin = cedf_activate_plugin, | ||
7792 | +}; | ||
7793 | + | ||
7794 | + | ||
7795 | +/* proc file interface to configure the cluster size */ | ||
7796 | + | ||
7797 | +static int proc_read_cluster_size(char *page, char **start, | ||
7798 | + off_t off, int count, | ||
7799 | + int *eof, void *data) | ||
7800 | +{ | ||
7801 | + int len; | ||
7802 | + switch (cluster_config) { | ||
7803 | + case GLOBAL_CLUSTER: | ||
7804 | + len = snprintf(page, PAGE_SIZE, "ALL\n"); | ||
7805 | + break; | ||
7806 | + case L1_CLUSTER: | ||
7807 | + case L2_CLUSTER: | ||
7808 | + case L3_CLUSTER: | ||
7809 | + len = snprintf(page, PAGE_SIZE, "L%d\n", cluster_config); | ||
7810 | + break; | ||
7811 | + default: | ||
7812 | + /* This should be impossible, but let's be paranoid. */ | ||
7813 | + len = snprintf(page, PAGE_SIZE, "INVALID (%d)\n", | ||
7814 | + cluster_config); | ||
7815 | + break; | ||
7816 | + } | ||
7817 | + return len; | ||
7818 | +} | ||
7819 | + | ||
7820 | +static int proc_write_cluster_size(struct file *file, | ||
7821 | + const char *buffer, | ||
7822 | + unsigned long count, | ||
7823 | + void *data) | ||
7824 | +{ | ||
7825 | + int len; | ||
7826 | + /* L2, L3 */ | ||
7827 | + char cache_name[33]; | ||
7828 | + | ||
7829 | + if(count > 32) | ||
7830 | + len = 32; | ||
7831 | + else | ||
7832 | + len = count; | ||
7833 | + | ||
7834 | + if(copy_from_user(cache_name, buffer, len)) | ||
7835 | + return -EFAULT; | ||
7836 | + | ||
7837 | + cache_name[len] = '\0'; | ||
7838 | + /* chomp name */ | ||
7839 | + if (len > 1 && cache_name[len - 1] == '\n') | ||
7840 | + cache_name[len - 1] = '\0'; | ||
7841 | + | ||
7842 | + /* do a quick and dirty comparison to find the cluster size */ | ||
7843 | + if (!strcmp(cache_name, "L2")) | ||
7844 | + cluster_config = L2_CLUSTER; | ||
7845 | + else if (!strcmp(cache_name, "L3")) | ||
7846 | + cluster_config = L3_CLUSTER; | ||
7847 | + else if (!strcmp(cache_name, "L1")) | ||
7848 | + cluster_config = L1_CLUSTER; | ||
7849 | + else if (!strcmp(cache_name, "ALL")) | ||
7850 | + cluster_config = GLOBAL_CLUSTER; | ||
7851 | + else | ||
7852 | + printk(KERN_INFO "Cluster '%s' is unknown.\n", cache_name); | ||
7853 | + | ||
7854 | + return len; | ||
7855 | +} | ||
7856 | + | ||
7857 | + | ||
7858 | +static struct proc_dir_entry *cluster_file = NULL, *cedf_dir = NULL; | ||
7859 | + | ||
7860 | + | ||
7861 | +static int __init init_cedf(void) | ||
7862 | +{ | ||
7863 | + int err, fs; | ||
7864 | + | ||
7865 | + err = register_sched_plugin(&cedf_plugin); | ||
7866 | + if (!err) { | ||
7867 | + fs = make_plugin_proc_dir(&cedf_plugin, &cedf_dir); | ||
7868 | + if (!fs) { | ||
7869 | + cluster_file = create_proc_entry("cluster", 0644, cedf_dir); | ||
7870 | + if (!cluster_file) { | ||
7871 | + printk(KERN_ERR "Could not allocate C-EDF/cluster " | ||
7872 | + "procfs entry.\n"); | ||
7873 | + } else { | ||
7874 | + cluster_file->read_proc = proc_read_cluster_size; | ||
7875 | + cluster_file->write_proc = proc_write_cluster_size; | ||
7876 | + } | ||
7877 | + } else { | ||
7878 | + printk(KERN_ERR "Could not allocate C-EDF procfs dir.\n"); | ||
7879 | + } | ||
7880 | + } | ||
7881 | + return err; | ||
7882 | +} | ||
7883 | + | ||
7884 | +static void clean_cedf(void) | ||
7885 | +{ | ||
7886 | + cleanup_cedf(); | ||
7887 | + if (cluster_file) | ||
7888 | + remove_proc_entry("cluster", cedf_dir); | ||
7889 | + if (cedf_dir) | ||
7890 | + remove_plugin_proc_dir(&cedf_plugin); | ||
7891 | +} | ||
7892 | + | ||
7893 | +module_init(init_cedf); | ||
7894 | +module_exit(clean_cedf); | ||
7895 | diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c | ||
7896 | new file mode 100644 | ||
7897 | index 0000000..e9c5e53 | ||
7898 | --- /dev/null | ||
7899 | +++ b/litmus/sched_gsn_edf.c | ||
7900 | @@ -0,0 +1,828 @@ | ||
7901 | +/* | ||
7902 | + * litmus/sched_gsn_edf.c | ||
7903 | + * | ||
7904 | + * Implementation of the GSN-EDF scheduling algorithm. | ||
7905 | + * | ||
7906 | + * This version uses the simple approach and serializes all scheduling | ||
7907 | + * decisions by the use of a queue lock. This is probably not the | ||
7908 | + * best way to do it, but it should suffice for now. | ||
7909 | + */ | ||
7910 | + | ||
7911 | +#include <linux/spinlock.h> | ||
7912 | +#include <linux/percpu.h> | ||
7913 | +#include <linux/sched.h> | ||
7914 | + | ||
7915 | +#include <litmus/litmus.h> | ||
7916 | +#include <litmus/jobs.h> | ||
7917 | +#include <litmus/sched_plugin.h> | ||
7918 | +#include <litmus/edf_common.h> | ||
7919 | +#include <litmus/sched_trace.h> | ||
7920 | + | ||
7921 | +#include <litmus/preempt.h> | ||
7922 | + | ||
7923 | +#include <litmus/bheap.h> | ||
7924 | + | ||
7925 | +#include <linux/module.h> | ||
7926 | + | ||
7927 | +/* Overview of GSN-EDF operations. | ||
7928 | + * | ||
7929 | + * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This | ||
7930 | + * description only covers how the individual operations are implemented in | ||
7931 | + * LITMUS. | ||
7932 | + * | ||
7933 | + * link_task_to_cpu(T, cpu) - Low-level operation to update the linkage | ||
7934 | + * structure (NOT the actually scheduled | ||
7935 | + * task). If there is another linked task To | ||
7936 | + * already it will set To->linked_on = NO_CPU | ||
7937 | + * (thereby removing its association with this | ||
7938 | + * CPU). However, it will not requeue the | ||
7939 | + * previously linked task (if any). It will set | ||
7940 | + * T's state to RT_F_RUNNING and check whether | ||
7941 | + * it is already running somewhere else. If T | ||
7942 | + * is scheduled somewhere else it will link | ||
7943 | + * it to that CPU instead (and pull the linked | ||
7944 | + * task to cpu). T may be NULL. | ||
7945 | + * | ||
7946 | + * unlink(T) - Unlink removes T from all scheduler data | ||
7947 | + * structures. If it is linked to some CPU it | ||
7948 | + * will link NULL to that CPU. If it is | ||
7949 | + * currently queued in the gsnedf queue it will | ||
7950 | + * be removed from the rt_domain. It is safe to | ||
7951 | + * call unlink(T) if T is not linked. T may not | ||
7952 | + * be NULL. | ||
7953 | + * | ||
7954 | + * requeue(T) - Requeue will insert T into the appropriate | ||
7955 | + * queue. If the system is in real-time mode and | ||
7956 | + * the T is released already, it will go into the | ||
7957 | + * ready queue. If the system is not in | ||
7958 | + * real-time mode is T, then T will go into the | ||
7959 | + * release queue. If T's release time is in the | ||
7960 | + * future, it will go into the release | ||
7961 | + * queue. That means that T's release time/job | ||
7962 | + * no/etc. has to be updated before requeu(T) is | ||
7963 | + * called. It is not safe to call requeue(T) | ||
7964 | + * when T is already queued. T may not be NULL. | ||
7965 | + * | ||
7966 | + * gsnedf_job_arrival(T) - This is the catch all function when T enters | ||
7967 | + * the system after either a suspension or at a | ||
7968 | + * job release. It will queue T (which means it | ||
7969 | + * is not safe to call gsnedf_job_arrival(T) if | ||
7970 | + * T is already queued) and then check whether a | ||
7971 | + * preemption is necessary. If a preemption is | ||
7972 | + * necessary it will update the linkage | ||
7973 | + * accordingly and cause scheduled to be called | ||
7974 | + * (either with an IPI or need_resched). It is | ||
7975 | + * safe to call gsnedf_job_arrival(T) if T's | ||
7976 | + * next job has not been actually released yet | ||
7977 | + * (releast time in the future). T will be put | ||
7978 | + * on the release queue in that case. | ||
7979 | + * | ||
7980 | + * job_completion(T) - Take care of everything that needs to be done | ||
7981 | + * to prepare T for its next release and place | ||
7982 | + * it in the right queue with | ||
7983 | + * gsnedf_job_arrival(). | ||
7984 | + * | ||
7985 | + * | ||
7986 | + * When we now that T is linked to CPU then link_task_to_cpu(NULL, CPU) is | ||
7987 | + * equivalent to unlink(T). Note that if you unlink a task from a CPU none of | ||
7988 | + * the functions will automatically propagate pending task from the ready queue | ||
7989 | + * to a linked task. This is the job of the calling function ( by means of | ||
7990 | + * __take_ready). | ||
7991 | + */ | ||
7992 | + | ||
7993 | + | ||
7994 | +/* cpu_entry_t - maintain the linked and scheduled state | ||
7995 | + */ | ||
7996 | +typedef struct { | ||
7997 | + int cpu; | ||
7998 | + struct task_struct* linked; /* only RT tasks */ | ||
7999 | + struct task_struct* scheduled; /* only RT tasks */ | ||
8000 | + struct bheap_node* hn; | ||
8001 | +} cpu_entry_t; | ||
8002 | +DEFINE_PER_CPU(cpu_entry_t, gsnedf_cpu_entries); | ||
8003 | + | ||
8004 | +cpu_entry_t* gsnedf_cpus[NR_CPUS]; | ||
8005 | + | ||
8006 | +/* the cpus queue themselves according to priority in here */ | ||
8007 | +static struct bheap_node gsnedf_heap_node[NR_CPUS]; | ||
8008 | +static struct bheap gsnedf_cpu_heap; | ||
8009 | + | ||
8010 | +static rt_domain_t gsnedf; | ||
8011 | +#define gsnedf_lock (gsnedf.ready_lock) | ||
8012 | + | ||
8013 | + | ||
8014 | +/* Uncomment this if you want to see all scheduling decisions in the | ||
8015 | + * TRACE() log. | ||
8016 | +#define WANT_ALL_SCHED_EVENTS | ||
8017 | + */ | ||
8018 | + | ||
8019 | +static int cpu_lower_prio(struct bheap_node *_a, struct bheap_node *_b) | ||
8020 | +{ | ||
8021 | + cpu_entry_t *a, *b; | ||
8022 | + a = _a->value; | ||
8023 | + b = _b->value; | ||
8024 | + /* Note that a and b are inverted: we want the lowest-priority CPU at | ||
8025 | + * the top of the heap. | ||
8026 | + */ | ||
8027 | + return edf_higher_prio(b->linked, a->linked); | ||
8028 | +} | ||
8029 | + | ||
8030 | +/* update_cpu_position - Move the cpu entry to the correct place to maintain | ||
8031 | + * order in the cpu queue. Caller must hold gsnedf lock. | ||
8032 | + */ | ||
8033 | +static void update_cpu_position(cpu_entry_t *entry) | ||
8034 | +{ | ||
8035 | + if (likely(bheap_node_in_heap(entry->hn))) | ||
8036 | + bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | ||
8037 | + bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, entry->hn); | ||
8038 | +} | ||
8039 | + | ||
8040 | +/* caller must hold gsnedf lock */ | ||
8041 | +static cpu_entry_t* lowest_prio_cpu(void) | ||
8042 | +{ | ||
8043 | + struct bheap_node* hn; | ||
8044 | + hn = bheap_peek(cpu_lower_prio, &gsnedf_cpu_heap); | ||
8045 | + return hn->value; | ||
8046 | +} | ||
8047 | + | ||
8048 | + | ||
8049 | +/* link_task_to_cpu - Update the link of a CPU. | ||
8050 | + * Handles the case where the to-be-linked task is already | ||
8051 | + * scheduled on a different CPU. | ||
8052 | + */ | ||
8053 | +static noinline void link_task_to_cpu(struct task_struct* linked, | ||
8054 | + cpu_entry_t *entry) | ||
8055 | +{ | ||
8056 | + cpu_entry_t *sched; | ||
8057 | + struct task_struct* tmp; | ||
8058 | + int on_cpu; | ||
8059 | + | ||
8060 | + BUG_ON(linked && !is_realtime(linked)); | ||
8061 | + | ||
8062 | + /* Currently linked task is set to be unlinked. */ | ||
8063 | + if (entry->linked) { | ||
8064 | + entry->linked->rt_param.linked_on = NO_CPU; | ||
8065 | + } | ||
8066 | + | ||
8067 | + /* Link new task to CPU. */ | ||
8068 | + if (linked) { | ||
8069 | + set_rt_flags(linked, RT_F_RUNNING); | ||
8070 | + /* handle task is already scheduled somewhere! */ | ||
8071 | + on_cpu = linked->rt_param.scheduled_on; | ||
8072 | + if (on_cpu != NO_CPU) { | ||
8073 | + sched = &per_cpu(gsnedf_cpu_entries, on_cpu); | ||
8074 | + /* this should only happen if not linked already */ | ||
8075 | + BUG_ON(sched->linked == linked); | ||
8076 | + | ||
8077 | + /* If we are already scheduled on the CPU to which we | ||
8078 | + * wanted to link, we don't need to do the swap -- | ||
8079 | + * we just link ourselves to the CPU and depend on | ||
8080 | + * the caller to get things right. | ||
8081 | + */ | ||
8082 | + if (entry != sched) { | ||
8083 | + TRACE_TASK(linked, | ||
8084 | + "already scheduled on %d, updating link.\n", | ||
8085 | + sched->cpu); | ||
8086 | + tmp = sched->linked; | ||
8087 | + linked->rt_param.linked_on = sched->cpu; | ||
8088 | + sched->linked = linked; | ||
8089 | + update_cpu_position(sched); | ||
8090 | + linked = tmp; | ||
8091 | + } | ||
8092 | + } | ||
8093 | + if (linked) /* might be NULL due to swap */ | ||
8094 | + linked->rt_param.linked_on = entry->cpu; | ||
8095 | + } | ||
8096 | + entry->linked = linked; | ||
8097 | +#ifdef WANT_ALL_SCHED_EVENTS | ||
8098 | + if (linked) | ||
8099 | + TRACE_TASK(linked, "linked to %d.\n", entry->cpu); | ||
8100 | + else | ||
8101 | + TRACE("NULL linked to %d.\n", entry->cpu); | ||
8102 | +#endif | ||
8103 | + update_cpu_position(entry); | ||
8104 | +} | ||
8105 | + | ||
8106 | +/* unlink - Make sure a task is not linked any longer to an entry | ||
8107 | + * where it was linked before. Must hold gsnedf_lock. | ||
8108 | + */ | ||
8109 | +static noinline void unlink(struct task_struct* t) | ||
8110 | +{ | ||
8111 | + cpu_entry_t *entry; | ||
8112 | + | ||
8113 | + if (t->rt_param.linked_on != NO_CPU) { | ||
8114 | + /* unlink */ | ||
8115 | + entry = &per_cpu(gsnedf_cpu_entries, t->rt_param.linked_on); | ||
8116 | + t->rt_param.linked_on = NO_CPU; | ||
8117 | + link_task_to_cpu(NULL, entry); | ||
8118 | + } else if (is_queued(t)) { | ||
8119 | + /* This is an interesting situation: t is scheduled, | ||
8120 | + * but was just recently unlinked. It cannot be | ||
8121 | + * linked anywhere else (because then it would have | ||
8122 | + * been relinked to this CPU), thus it must be in some | ||
8123 | + * queue. We must remove it from the list in this | ||
8124 | + * case. | ||
8125 | + */ | ||
8126 | + remove(&gsnedf, t); | ||
8127 | + } | ||
8128 | +} | ||
8129 | + | ||
8130 | + | ||
8131 | +/* preempt - force a CPU to reschedule | ||
8132 | + */ | ||
8133 | +static void preempt(cpu_entry_t *entry) | ||
8134 | +{ | ||
8135 | + preempt_if_preemptable(entry->scheduled, entry->cpu); | ||
8136 | +} | ||
8137 | + | ||
8138 | +/* requeue - Put an unlinked task into gsn-edf domain. | ||
8139 | + * Caller must hold gsnedf_lock. | ||
8140 | + */ | ||
8141 | +static noinline void requeue(struct task_struct* task) | ||
8142 | +{ | ||
8143 | + BUG_ON(!task); | ||
8144 | + /* sanity check before insertion */ | ||
8145 | + BUG_ON(is_queued(task)); | ||
8146 | + | ||
8147 | + if (is_released(task, litmus_clock())) | ||
8148 | + __add_ready(&gsnedf, task); | ||
8149 | + else { | ||
8150 | + /* it has got to wait */ | ||
8151 | + add_release(&gsnedf, task); | ||
8152 | + } | ||
8153 | +} | ||
8154 | + | ||
8155 | +/* check for any necessary preemptions */ | ||
8156 | +static void check_for_preemptions(void) | ||
8157 | +{ | ||
8158 | + struct task_struct *task; | ||
8159 | + cpu_entry_t* last; | ||
8160 | + | ||
8161 | + for(last = lowest_prio_cpu(); | ||
8162 | + edf_preemption_needed(&gsnedf, last->linked); | ||
8163 | + last = lowest_prio_cpu()) { | ||
8164 | + /* preemption necessary */ | ||
8165 | + task = __take_ready(&gsnedf); | ||
8166 | + TRACE("check_for_preemptions: attempting to link task %d to %d\n", | ||
8167 | + task->pid, last->cpu); | ||
8168 | + if (last->linked) | ||
8169 | + requeue(last->linked); | ||
8170 | + link_task_to_cpu(task, last); | ||
8171 | + preempt(last); | ||
8172 | + } | ||
8173 | +} | ||
8174 | + | ||
8175 | +/* gsnedf_job_arrival: task is either resumed or released */ | ||
8176 | +static noinline void gsnedf_job_arrival(struct task_struct* task) | ||
8177 | +{ | ||
8178 | + BUG_ON(!task); | ||
8179 | + | ||
8180 | + requeue(task); | ||
8181 | + check_for_preemptions(); | ||
8182 | +} | ||
8183 | + | ||
8184 | +static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks) | ||
8185 | +{ | ||
8186 | + unsigned long flags; | ||
8187 | + | ||
8188 | + raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
8189 | + | ||
8190 | + __merge_ready(rt, tasks); | ||
8191 | + check_for_preemptions(); | ||
8192 | + | ||
8193 | + raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
8194 | +} | ||
8195 | + | ||
8196 | +/* caller holds gsnedf_lock */ | ||
8197 | +static noinline void job_completion(struct task_struct *t, int forced) | ||
8198 | +{ | ||
8199 | + BUG_ON(!t); | ||
8200 | + | ||
8201 | + sched_trace_task_completion(t, forced); | ||
8202 | + | ||
8203 | + TRACE_TASK(t, "job_completion().\n"); | ||
8204 | + | ||
8205 | + /* set flags */ | ||
8206 | + set_rt_flags(t, RT_F_SLEEP); | ||
8207 | + /* prepare for next period */ | ||
8208 | + prepare_for_next_period(t); | ||
8209 | + if (is_released(t, litmus_clock())) | ||
8210 | + sched_trace_task_release(t); | ||
8211 | + /* unlink */ | ||
8212 | + unlink(t); | ||
8213 | + /* requeue | ||
8214 | + * But don't requeue a blocking task. */ | ||
8215 | + if (is_running(t)) | ||
8216 | + gsnedf_job_arrival(t); | ||
8217 | +} | ||
8218 | + | ||
8219 | +/* gsnedf_tick - this function is called for every local timer | ||
8220 | + * interrupt. | ||
8221 | + * | ||
8222 | + * checks whether the current task has expired and checks | ||
8223 | + * whether we need to preempt it if it has not expired | ||
8224 | + */ | ||
8225 | +static void gsnedf_tick(struct task_struct* t) | ||
8226 | +{ | ||
8227 | + if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | ||
8228 | + if (!is_np(t)) { | ||
8229 | + /* np tasks will be preempted when they become | ||
8230 | + * preemptable again | ||
8231 | + */ | ||
8232 | + litmus_reschedule_local(); | ||
8233 | + TRACE("gsnedf_scheduler_tick: " | ||
8234 | + "%d is preemptable " | ||
8235 | + " => FORCE_RESCHED\n", t->pid); | ||
8236 | + } else if (is_user_np(t)) { | ||
8237 | + TRACE("gsnedf_scheduler_tick: " | ||
8238 | + "%d is non-preemptable, " | ||
8239 | + "preemption delayed.\n", t->pid); | ||
8240 | + request_exit_np(t); | ||
8241 | + } | ||
8242 | + } | ||
8243 | +} | ||
8244 | + | ||
8245 | +/* Getting schedule() right is a bit tricky. schedule() may not make any | ||
8246 | + * assumptions on the state of the current task since it may be called for a | ||
8247 | + * number of reasons. The reasons include a scheduler_tick() determined that it | ||
8248 | + * was necessary, because sys_exit_np() was called, because some Linux | ||
8249 | + * subsystem determined so, or even (in the worst case) because there is a bug | ||
8250 | + * hidden somewhere. Thus, we must take extreme care to determine what the | ||
8251 | + * current state is. | ||
8252 | + * | ||
8253 | + * The CPU could currently be scheduling a task (or not), be linked (or not). | ||
8254 | + * | ||
8255 | + * The following assertions for the scheduled task could hold: | ||
8256 | + * | ||
8257 | + * - !is_running(scheduled) // the job blocks | ||
8258 | + * - scheduled->timeslice == 0 // the job completed (forcefully) | ||
8259 | + * - get_rt_flag() == RT_F_SLEEP // the job completed (by syscall) | ||
8260 | + * - linked != scheduled // we need to reschedule (for any reason) | ||
8261 | + * - is_np(scheduled) // rescheduling must be delayed, | ||
8262 | + * sys_exit_np must be requested | ||
8263 | + * | ||
8264 | + * Any of these can occur together. | ||
8265 | + */ | ||
8266 | +static struct task_struct* gsnedf_schedule(struct task_struct * prev) | ||
8267 | +{ | ||
8268 | + cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); | ||
8269 | + int out_of_time, sleep, preempt, np, exists, blocks; | ||
8270 | + struct task_struct* next = NULL; | ||
8271 | + | ||
8272 | +#ifdef CONFIG_RELEASE_MASTER | ||
8273 | + /* Bail out early if we are the release master. | ||
8274 | + * The release master never schedules any real-time tasks. | ||
8275 | + */ | ||
8276 | + if (gsnedf.release_master == entry->cpu) | ||
8277 | + return NULL; | ||
8278 | +#endif | ||
8279 | + | ||
8280 | + raw_spin_lock(&gsnedf_lock); | ||
8281 | + | ||
8282 | + /* sanity checking */ | ||
8283 | + BUG_ON(entry->scheduled && entry->scheduled != prev); | ||
8284 | + BUG_ON(entry->scheduled && !is_realtime(prev)); | ||
8285 | + BUG_ON(is_realtime(prev) && !entry->scheduled); | ||
8286 | + | ||
8287 | + /* (0) Determine state */ | ||
8288 | + exists = entry->scheduled != NULL; | ||
8289 | + blocks = exists && !is_running(entry->scheduled); | ||
8290 | + out_of_time = exists && | ||
8291 | + budget_enforced(entry->scheduled) && | ||
8292 | + budget_exhausted(entry->scheduled); | ||
8293 | + np = exists && is_np(entry->scheduled); | ||
8294 | + sleep = exists && get_rt_flags(entry->scheduled) == RT_F_SLEEP; | ||
8295 | + preempt = entry->scheduled != entry->linked; | ||
8296 | + | ||
8297 | +#ifdef WANT_ALL_SCHED_EVENTS | ||
8298 | + TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); | ||
8299 | +#endif | ||
8300 | + | ||
8301 | + if (exists) | ||
8302 | + TRACE_TASK(prev, | ||
8303 | + "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " | ||
8304 | + "state:%d sig:%d\n", | ||
8305 | + blocks, out_of_time, np, sleep, preempt, | ||
8306 | + prev->state, signal_pending(prev)); | ||
8307 | + if (entry->linked && preempt) | ||
8308 | + TRACE_TASK(prev, "will be preempted by %s/%d\n", | ||
8309 | + entry->linked->comm, entry->linked->pid); | ||
8310 | + | ||
8311 | + | ||
8312 | + /* If a task blocks we have no choice but to reschedule. | ||
8313 | + */ | ||
8314 | + if (blocks) | ||
8315 | + unlink(entry->scheduled); | ||
8316 | + | ||
8317 | + /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
8318 | + * We need to make sure to update the link structure anyway in case | ||
8319 | + * that we are still linked. Multiple calls to request_exit_np() don't | ||
8320 | + * hurt. | ||
8321 | + */ | ||
8322 | + if (np && (out_of_time || preempt || sleep)) { | ||
8323 | + unlink(entry->scheduled); | ||
8324 | + request_exit_np(entry->scheduled); | ||
8325 | + } | ||
8326 | + | ||
8327 | + /* Any task that is preemptable and either exhausts its execution | ||
8328 | + * budget or wants to sleep completes. We may have to reschedule after | ||
8329 | + * this. Don't do a job completion if we block (can't have timers running | ||
8330 | + * for blocked jobs). Preemption go first for the same reason. | ||
8331 | + */ | ||
8332 | + if (!np && (out_of_time || sleep) && !blocks && !preempt) | ||
8333 | + job_completion(entry->scheduled, !sleep); | ||
8334 | + | ||
8335 | + /* Link pending task if we became unlinked. | ||
8336 | + */ | ||
8337 | + if (!entry->linked) | ||
8338 | + link_task_to_cpu(__take_ready(&gsnedf), entry); | ||
8339 | + | ||
8340 | + /* The final scheduling decision. Do we need to switch for some reason? | ||
8341 | + * If linked is different from scheduled, then select linked as next. | ||
8342 | + */ | ||
8343 | + if ((!np || blocks) && | ||
8344 | + entry->linked != entry->scheduled) { | ||
8345 | + /* Schedule a linked job? */ | ||
8346 | + if (entry->linked) { | ||
8347 | + entry->linked->rt_param.scheduled_on = entry->cpu; | ||
8348 | + next = entry->linked; | ||
8349 | + } | ||
8350 | + if (entry->scheduled) { | ||
8351 | + /* not gonna be scheduled soon */ | ||
8352 | + entry->scheduled->rt_param.scheduled_on = NO_CPU; | ||
8353 | + TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); | ||
8354 | + } | ||
8355 | + } else | ||
8356 | + /* Only override Linux scheduler if we have a real-time task | ||
8357 | + * scheduled that needs to continue. | ||
8358 | + */ | ||
8359 | + if (exists) | ||
8360 | + next = prev; | ||
8361 | + | ||
8362 | + sched_state_task_picked(); | ||
8363 | + | ||
8364 | + raw_spin_unlock(&gsnedf_lock); | ||
8365 | + | ||
8366 | +#ifdef WANT_ALL_SCHED_EVENTS | ||
8367 | + TRACE("gsnedf_lock released, next=0x%p\n", next); | ||
8368 | + | ||
8369 | + if (next) | ||
8370 | + TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
8371 | + else if (exists && !next) | ||
8372 | + TRACE("becomes idle at %llu.\n", litmus_clock()); | ||
8373 | +#endif | ||
8374 | + | ||
8375 | + | ||
8376 | + return next; | ||
8377 | +} | ||
8378 | + | ||
8379 | + | ||
8380 | +/* _finish_switch - we just finished the switch away from prev | ||
8381 | + */ | ||
8382 | +static void gsnedf_finish_switch(struct task_struct *prev) | ||
8383 | +{ | ||
8384 | + cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); | ||
8385 | + | ||
8386 | + entry->scheduled = is_realtime(current) ? current : NULL; | ||
8387 | +#ifdef WANT_ALL_SCHED_EVENTS | ||
8388 | + TRACE_TASK(prev, "switched away from\n"); | ||
8389 | +#endif | ||
8390 | +} | ||
8391 | + | ||
8392 | + | ||
8393 | +/* Prepare a task for running in RT mode | ||
8394 | + */ | ||
8395 | +static void gsnedf_task_new(struct task_struct * t, int on_rq, int running) | ||
8396 | +{ | ||
8397 | + unsigned long flags; | ||
8398 | + cpu_entry_t* entry; | ||
8399 | + | ||
8400 | + TRACE("gsn edf: task new %d\n", t->pid); | ||
8401 | + | ||
8402 | + raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
8403 | + | ||
8404 | + /* setup job params */ | ||
8405 | + release_at(t, litmus_clock()); | ||
8406 | + | ||
8407 | + if (running) { | ||
8408 | + entry = &per_cpu(gsnedf_cpu_entries, task_cpu(t)); | ||
8409 | + BUG_ON(entry->scheduled); | ||
8410 | + | ||
8411 | +#ifdef CONFIG_RELEASE_MASTER | ||
8412 | + if (entry->cpu != gsnedf.release_master) { | ||
8413 | +#endif | ||
8414 | + entry->scheduled = t; | ||
8415 | + tsk_rt(t)->scheduled_on = task_cpu(t); | ||
8416 | +#ifdef CONFIG_RELEASE_MASTER | ||
8417 | + } else { | ||
8418 | + /* do not schedule on release master */ | ||
8419 | + preempt(entry); /* force resched */ | ||
8420 | + tsk_rt(t)->scheduled_on = NO_CPU; | ||
8421 | + } | ||
8422 | +#endif | ||
8423 | + } else { | ||
8424 | + t->rt_param.scheduled_on = NO_CPU; | ||
8425 | + } | ||
8426 | + t->rt_param.linked_on = NO_CPU; | ||
8427 | + | ||
8428 | + gsnedf_job_arrival(t); | ||
8429 | + raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
8430 | +} | ||
8431 | + | ||
8432 | +static void gsnedf_task_wake_up(struct task_struct *task) | ||
8433 | +{ | ||
8434 | + unsigned long flags; | ||
8435 | + lt_t now; | ||
8436 | + | ||
8437 | + TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
8438 | + | ||
8439 | + raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
8440 | + /* We need to take suspensions because of semaphores into | ||
8441 | + * account! If a job resumes after being suspended due to acquiring | ||
8442 | + * a semaphore, it should never be treated as a new job release. | ||
8443 | + */ | ||
8444 | + if (get_rt_flags(task) == RT_F_EXIT_SEM) { | ||
8445 | + set_rt_flags(task, RT_F_RUNNING); | ||
8446 | + } else { | ||
8447 | + now = litmus_clock(); | ||
8448 | + if (is_tardy(task, now)) { | ||
8449 | + /* new sporadic release */ | ||
8450 | + release_at(task, now); | ||
8451 | + sched_trace_task_release(task); | ||
8452 | + } | ||
8453 | + else { | ||
8454 | + if (task->rt.time_slice) { | ||
8455 | + /* came back in time before deadline | ||
8456 | + */ | ||
8457 | + set_rt_flags(task, RT_F_RUNNING); | ||
8458 | + } | ||
8459 | + } | ||
8460 | + } | ||
8461 | + gsnedf_job_arrival(task); | ||
8462 | + raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
8463 | +} | ||
8464 | + | ||
8465 | +static void gsnedf_task_block(struct task_struct *t) | ||
8466 | +{ | ||
8467 | + unsigned long flags; | ||
8468 | + | ||
8469 | + TRACE_TASK(t, "block at %llu\n", litmus_clock()); | ||
8470 | + | ||
8471 | + /* unlink if necessary */ | ||
8472 | + raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
8473 | + unlink(t); | ||
8474 | + raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
8475 | + | ||
8476 | + BUG_ON(!is_realtime(t)); | ||
8477 | +} | ||
8478 | + | ||
8479 | + | ||
8480 | +static void gsnedf_task_exit(struct task_struct * t) | ||
8481 | +{ | ||
8482 | + unsigned long flags; | ||
8483 | + | ||
8484 | + /* unlink if necessary */ | ||
8485 | + raw_spin_lock_irqsave(&gsnedf_lock, flags); | ||
8486 | + unlink(t); | ||
8487 | + if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
8488 | + gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | ||
8489 | + tsk_rt(t)->scheduled_on = NO_CPU; | ||
8490 | + } | ||
8491 | + raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | ||
8492 | + | ||
8493 | + BUG_ON(!is_realtime(t)); | ||
8494 | + TRACE_TASK(t, "RIP\n"); | ||
8495 | +} | ||
8496 | + | ||
8497 | +#ifdef CONFIG_FMLP | ||
8498 | + | ||
8499 | +/* Update the queue position of a task that got it's priority boosted via | ||
8500 | + * priority inheritance. */ | ||
8501 | +static void update_queue_position(struct task_struct *holder) | ||
8502 | +{ | ||
8503 | + /* We don't know whether holder is in the ready queue. It should, but | ||
8504 | + * on a budget overrun it may already be in a release queue. Hence, | ||
8505 | + * calling unlink() is not possible since it assumes that the task is | ||
8506 | + * not in a release queue. However, we can safely check whether | ||
8507 | + * sem->holder is currently in a queue or scheduled after locking both | ||
8508 | + * the release and the ready queue lock. */ | ||
8509 | + | ||
8510 | + /* Assumption: caller holds gsnedf_lock */ | ||
8511 | + | ||
8512 | + int check_preempt = 0; | ||
8513 | + | ||
8514 | + if (tsk_rt(holder)->linked_on != NO_CPU) { | ||
8515 | + TRACE_TASK(holder, "%s: linked on %d\n", | ||
8516 | + __FUNCTION__, tsk_rt(holder)->linked_on); | ||
8517 | + /* Holder is scheduled; need to re-order CPUs. | ||
8518 | + * We can't use heap_decrease() here since | ||
8519 | + * the cpu_heap is ordered in reverse direction, so | ||
8520 | + * it is actually an increase. */ | ||
8521 | + bheap_delete(cpu_lower_prio, &gsnedf_cpu_heap, | ||
8522 | + gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); | ||
8523 | + bheap_insert(cpu_lower_prio, &gsnedf_cpu_heap, | ||
8524 | + gsnedf_cpus[tsk_rt(holder)->linked_on]->hn); | ||
8525 | + } else { | ||
8526 | + /* holder may be queued: first stop queue changes */ | ||
8527 | + raw_spin_lock(&gsnedf.release_lock); | ||
8528 | + if (is_queued(holder)) { | ||
8529 | + TRACE_TASK(holder, "%s: is queued\n", | ||
8530 | + __FUNCTION__); | ||
8531 | + /* We need to update the position | ||
8532 | + * of holder in some heap. Note that this | ||
8533 | + * may be a release heap. */ | ||
8534 | + check_preempt = | ||
8535 | + !bheap_decrease(edf_ready_order, | ||
8536 | + tsk_rt(holder)->heap_node); | ||
8537 | + } else { | ||
8538 | + /* Nothing to do: if it is not queued and not linked | ||
8539 | + * then it is currently being moved by other code | ||
8540 | + * (e.g., a timer interrupt handler) that will use the | ||
8541 | + * correct priority when enqueuing the task. */ | ||
8542 | + TRACE_TASK(holder, "%s: is NOT queued => Done.\n", | ||
8543 | + __FUNCTION__); | ||
8544 | + } | ||
8545 | + raw_spin_unlock(&gsnedf.release_lock); | ||
8546 | + | ||
8547 | + /* If holder was enqueued in a release heap, then the following | ||
8548 | + * preemption check is pointless, but we can't easily detect | ||
8549 | + * that case. If you want to fix this, then consider that | ||
8550 | + * simply adding a state flag requires O(n) time to update when | ||
8551 | + * releasing n tasks, which conflicts with the goal to have | ||
8552 | + * O(log n) merges. */ | ||
8553 | + if (check_preempt) { | ||
8554 | + /* heap_decrease() hit the top level of the heap: make | ||
8555 | + * sure preemption checks get the right task, not the | ||
8556 | + * potentially stale cache. */ | ||
8557 | + bheap_uncache_min(edf_ready_order, | ||
8558 | + &gsnedf.ready_queue); | ||
8559 | + check_for_preemptions(); | ||
8560 | + } | ||
8561 | + } | ||
8562 | +} | ||
8563 | + | ||
8564 | +static long gsnedf_pi_block(struct pi_semaphore *sem, | ||
8565 | + struct task_struct *new_waiter) | ||
8566 | +{ | ||
8567 | + /* This callback has to handle the situation where a new waiter is | ||
8568 | + * added to the wait queue of the semaphore. | ||
8569 | + * | ||
8570 | + * We must check if has a higher priority than the currently | ||
8571 | + * highest-priority task, and then potentially reschedule. | ||
8572 | + */ | ||
8573 | + | ||
8574 | + BUG_ON(!new_waiter); | ||
8575 | + | ||
8576 | + if (edf_higher_prio(new_waiter, sem->hp.task)) { | ||
8577 | + TRACE_TASK(new_waiter, " boosts priority via %p\n", sem); | ||
8578 | + /* called with IRQs disabled */ | ||
8579 | + raw_spin_lock(&gsnedf_lock); | ||
8580 | + /* store new highest-priority task */ | ||
8581 | + sem->hp.task = new_waiter; | ||
8582 | + if (sem->holder) { | ||
8583 | + TRACE_TASK(sem->holder, | ||
8584 | + " holds %p and will inherit from %s/%d\n", | ||
8585 | + sem, | ||
8586 | + new_waiter->comm, new_waiter->pid); | ||
8587 | + /* let holder inherit */ | ||
8588 | + sem->holder->rt_param.inh_task = new_waiter; | ||
8589 | + update_queue_position(sem->holder); | ||
8590 | + } | ||
8591 | + raw_spin_unlock(&gsnedf_lock); | ||
8592 | + } | ||
8593 | + | ||
8594 | + return 0; | ||
8595 | +} | ||
8596 | + | ||
8597 | +static long gsnedf_inherit_priority(struct pi_semaphore *sem, | ||
8598 | + struct task_struct *new_owner) | ||
8599 | +{ | ||
8600 | + /* We don't need to acquire the gsnedf_lock since at the time of this | ||
8601 | + * call new_owner isn't actually scheduled yet (it's still sleeping) | ||
8602 | + * and since the calling function already holds sem->wait.lock, which | ||
8603 | + * prevents concurrent sem->hp.task changes. | ||
8604 | + */ | ||
8605 | + | ||
8606 | + if (sem->hp.task && sem->hp.task != new_owner) { | ||
8607 | + new_owner->rt_param.inh_task = sem->hp.task; | ||
8608 | + TRACE_TASK(new_owner, "inherited priority from %s/%d\n", | ||
8609 | + sem->hp.task->comm, sem->hp.task->pid); | ||
8610 | + } else | ||
8611 | + TRACE_TASK(new_owner, | ||
8612 | + "cannot inherit priority, " | ||
8613 | + "no higher priority job waits.\n"); | ||
8614 | + return 0; | ||
8615 | +} | ||
8616 | + | ||
8617 | +/* This function is called on a semaphore release, and assumes that | ||
8618 | + * the current task is also the semaphore holder. | ||
8619 | + */ | ||
8620 | +static long gsnedf_return_priority(struct pi_semaphore *sem) | ||
8621 | +{ | ||
8622 | + struct task_struct* t = current; | ||
8623 | + int ret = 0; | ||
8624 | + | ||
8625 | + /* Find new highest-priority semaphore task | ||
8626 | + * if holder task is the current hp.task. | ||
8627 | + * | ||
8628 | + * Calling function holds sem->wait.lock. | ||
8629 | + */ | ||
8630 | + if (t == sem->hp.task) | ||
8631 | + edf_set_hp_task(sem); | ||
8632 | + | ||
8633 | + TRACE_CUR("gsnedf_return_priority for lock %p\n", sem); | ||
8634 | + | ||
8635 | + if (t->rt_param.inh_task) { | ||
8636 | + /* interrupts already disabled by PI code */ | ||
8637 | + raw_spin_lock(&gsnedf_lock); | ||
8638 | + | ||
8639 | + /* Reset inh_task to NULL. */ | ||
8640 | + t->rt_param.inh_task = NULL; | ||
8641 | + | ||
8642 | + /* Check if rescheduling is necessary */ | ||
8643 | + unlink(t); | ||
8644 | + gsnedf_job_arrival(t); | ||
8645 | + raw_spin_unlock(&gsnedf_lock); | ||
8646 | + } | ||
8647 | + | ||
8648 | + return ret; | ||
8649 | +} | ||
8650 | + | ||
8651 | +#endif | ||
8652 | + | ||
8653 | +static long gsnedf_admit_task(struct task_struct* tsk) | ||
8654 | +{ | ||
8655 | + return 0; | ||
8656 | +} | ||
8657 | + | ||
8658 | +static long gsnedf_activate_plugin(void) | ||
8659 | +{ | ||
8660 | + int cpu; | ||
8661 | + cpu_entry_t *entry; | ||
8662 | + | ||
8663 | + bheap_init(&gsnedf_cpu_heap); | ||
8664 | +#ifdef CONFIG_RELEASE_MASTER | ||
8665 | + gsnedf.release_master = atomic_read(&release_master_cpu); | ||
8666 | +#endif | ||
8667 | + | ||
8668 | + for_each_online_cpu(cpu) { | ||
8669 | + entry = &per_cpu(gsnedf_cpu_entries, cpu); | ||
8670 | + bheap_node_init(&entry->hn, entry); | ||
8671 | + entry->linked = NULL; | ||
8672 | + entry->scheduled = NULL; | ||
8673 | +#ifdef CONFIG_RELEASE_MASTER | ||
8674 | + if (cpu != gsnedf.release_master) { | ||
8675 | +#endif | ||
8676 | + TRACE("GSN-EDF: Initializing CPU #%d.\n", cpu); | ||
8677 | + update_cpu_position(entry); | ||
8678 | +#ifdef CONFIG_RELEASE_MASTER | ||
8679 | + } else { | ||
8680 | + TRACE("GSN-EDF: CPU %d is release master.\n", cpu); | ||
8681 | + } | ||
8682 | +#endif | ||
8683 | + } | ||
8684 | + return 0; | ||
8685 | +} | ||
8686 | + | ||
8687 | +/* Plugin object */ | ||
8688 | +static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | ||
8689 | + .plugin_name = "GSN-EDF", | ||
8690 | + .finish_switch = gsnedf_finish_switch, | ||
8691 | + .tick = gsnedf_tick, | ||
8692 | + .task_new = gsnedf_task_new, | ||
8693 | + .complete_job = complete_job, | ||
8694 | + .task_exit = gsnedf_task_exit, | ||
8695 | + .schedule = gsnedf_schedule, | ||
8696 | + .task_wake_up = gsnedf_task_wake_up, | ||
8697 | + .task_block = gsnedf_task_block, | ||
8698 | +#ifdef CONFIG_FMLP | ||
8699 | + .fmlp_active = 1, | ||
8700 | + .pi_block = gsnedf_pi_block, | ||
8701 | + .inherit_priority = gsnedf_inherit_priority, | ||
8702 | + .return_priority = gsnedf_return_priority, | ||
8703 | +#endif | ||
8704 | + .admit_task = gsnedf_admit_task, | ||
8705 | + .activate_plugin = gsnedf_activate_plugin, | ||
8706 | +}; | ||
8707 | + | ||
8708 | + | ||
8709 | +static int __init init_gsn_edf(void) | ||
8710 | +{ | ||
8711 | + int cpu; | ||
8712 | + cpu_entry_t *entry; | ||
8713 | + | ||
8714 | + bheap_init(&gsnedf_cpu_heap); | ||
8715 | + /* initialize CPU state */ | ||
8716 | + for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
8717 | + entry = &per_cpu(gsnedf_cpu_entries, cpu); | ||
8718 | + gsnedf_cpus[cpu] = entry; | ||
8719 | + entry->cpu = cpu; | ||
8720 | + entry->hn = &gsnedf_heap_node[cpu]; | ||
8721 | + bheap_node_init(&entry->hn, entry); | ||
8722 | + } | ||
8723 | + edf_domain_init(&gsnedf, NULL, gsnedf_release_jobs); | ||
8724 | + return register_sched_plugin(&gsn_edf_plugin); | ||
8725 | +} | ||
8726 | + | ||
8727 | + | ||
8728 | +module_init(init_gsn_edf); | ||
8729 | diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c | ||
8730 | new file mode 100644 | ||
8731 | index 0000000..e695289 | ||
8732 | --- /dev/null | ||
8733 | +++ b/litmus/sched_litmus.c | ||
8734 | @@ -0,0 +1,320 @@ | ||
8735 | +/* This file is included from kernel/sched.c */ | ||
8736 | + | ||
8737 | +#include <litmus/litmus.h> | ||
8738 | +#include <litmus/budget.h> | ||
8739 | +#include <litmus/sched_plugin.h> | ||
8740 | +#include <litmus/preempt.h> | ||
8741 | + | ||
8742 | +static void update_time_litmus(struct rq *rq, struct task_struct *p) | ||
8743 | +{ | ||
8744 | + u64 delta = rq->clock - p->se.exec_start; | ||
8745 | + if (unlikely((s64)delta < 0)) | ||
8746 | + delta = 0; | ||
8747 | + /* per job counter */ | ||
8748 | + p->rt_param.job_params.exec_time += delta; | ||
8749 | + /* task counter */ | ||
8750 | + p->se.sum_exec_runtime += delta; | ||
8751 | + /* sched_clock() */ | ||
8752 | + p->se.exec_start = rq->clock; | ||
8753 | + cpuacct_charge(p, delta); | ||
8754 | +} | ||
8755 | + | ||
8756 | +static void double_rq_lock(struct rq *rq1, struct rq *rq2); | ||
8757 | +static void double_rq_unlock(struct rq *rq1, struct rq *rq2); | ||
8758 | + | ||
8759 | +/* | ||
8760 | + * litmus_tick gets called by scheduler_tick() with HZ freq | ||
8761 | + * Interrupts are disabled | ||
8762 | + */ | ||
8763 | +static void litmus_tick(struct rq *rq, struct task_struct *p) | ||
8764 | +{ | ||
8765 | + TS_PLUGIN_TICK_START; | ||
8766 | + | ||
8767 | + if (is_realtime(p)) | ||
8768 | + update_time_litmus(rq, p); | ||
8769 | + | ||
8770 | + /* plugin tick */ | ||
8771 | + litmus->tick(p); | ||
8772 | + | ||
8773 | + TS_PLUGIN_TICK_END; | ||
8774 | + | ||
8775 | + return; | ||
8776 | +} | ||
8777 | + | ||
8778 | +static struct task_struct * | ||
8779 | +litmus_schedule(struct rq *rq, struct task_struct *prev) | ||
8780 | +{ | ||
8781 | + struct rq* other_rq; | ||
8782 | + struct task_struct *next; | ||
8783 | + | ||
8784 | + long was_running; | ||
8785 | + lt_t _maybe_deadlock = 0; | ||
8786 | + | ||
8787 | + /* let the plugin schedule */ | ||
8788 | + next = litmus->schedule(prev); | ||
8789 | + | ||
8790 | + sched_state_plugin_check(); | ||
8791 | + | ||
8792 | + /* check if a global plugin pulled a task from a different RQ */ | ||
8793 | + if (next && task_rq(next) != rq) { | ||
8794 | + /* we need to migrate the task */ | ||
8795 | + other_rq = task_rq(next); | ||
8796 | + TRACE_TASK(next, "migrate from %d\n", other_rq->cpu); | ||
8797 | + | ||
8798 | + /* while we drop the lock, the prev task could change its | ||
8799 | + * state | ||
8800 | + */ | ||
8801 | + was_running = is_running(prev); | ||
8802 | + mb(); | ||
8803 | + raw_spin_unlock(&rq->lock); | ||
8804 | + | ||
8805 | + /* Don't race with a concurrent switch. This could deadlock in | ||
8806 | + * the case of cross or circular migrations. It's the job of | ||
8807 | + * the plugin to make sure that doesn't happen. | ||
8808 | + */ | ||
8809 | + TRACE_TASK(next, "stack_in_use=%d\n", | ||
8810 | + next->rt_param.stack_in_use); | ||
8811 | + if (next->rt_param.stack_in_use != NO_CPU) { | ||
8812 | + TRACE_TASK(next, "waiting to deschedule\n"); | ||
8813 | + _maybe_deadlock = litmus_clock(); | ||
8814 | + } | ||
8815 | + while (next->rt_param.stack_in_use != NO_CPU) { | ||
8816 | + cpu_relax(); | ||
8817 | + mb(); | ||
8818 | + if (next->rt_param.stack_in_use == NO_CPU) | ||
8819 | + TRACE_TASK(next,"descheduled. Proceeding.\n"); | ||
8820 | + | ||
8821 | + if (lt_before(_maybe_deadlock + 10000000, | ||
8822 | + litmus_clock())) { | ||
8823 | + /* We've been spinning for 10ms. | ||
8824 | + * Something can't be right! | ||
8825 | + * Let's abandon the task and bail out; at least | ||
8826 | + * we will have debug info instead of a hard | ||
8827 | + * deadlock. | ||
8828 | + */ | ||
8829 | + TRACE_TASK(next,"stack too long in use. " | ||
8830 | + "Deadlock?\n"); | ||
8831 | + next = NULL; | ||
8832 | + | ||
8833 | + /* bail out */ | ||
8834 | + raw_spin_lock(&rq->lock); | ||
8835 | + return next; | ||
8836 | + } | ||
8837 | + } | ||
8838 | +#ifdef __ARCH_WANT_UNLOCKED_CTXSW | ||
8839 | + if (next->oncpu) | ||
8840 | + TRACE_TASK(next, "waiting for !oncpu"); | ||
8841 | + while (next->oncpu) { | ||
8842 | + cpu_relax(); | ||
8843 | + mb(); | ||
8844 | + } | ||
8845 | +#endif | ||
8846 | + double_rq_lock(rq, other_rq); | ||
8847 | + mb(); | ||
8848 | + if (is_realtime(prev) && is_running(prev) != was_running) { | ||
8849 | + TRACE_TASK(prev, | ||
8850 | + "state changed while we dropped" | ||
8851 | + " the lock: is_running=%d, was_running=%d\n", | ||
8852 | + is_running(prev), was_running); | ||
8853 | + if (is_running(prev) && !was_running) { | ||
8854 | + /* prev task became unblocked | ||
8855 | + * we need to simulate normal sequence of events | ||
8856 | + * to scheduler plugins. | ||
8857 | + */ | ||
8858 | + litmus->task_block(prev); | ||
8859 | + litmus->task_wake_up(prev); | ||
8860 | + } | ||
8861 | + } | ||
8862 | + | ||
8863 | + set_task_cpu(next, smp_processor_id()); | ||
8864 | + | ||
8865 | + /* DEBUG: now that we have the lock we need to make sure a | ||
8866 | + * couple of things still hold: | ||
8867 | + * - it is still a real-time task | ||
8868 | + * - it is still runnable (could have been stopped) | ||
8869 | + * If either is violated, then the active plugin is | ||
8870 | + * doing something wrong. | ||
8871 | + */ | ||
8872 | + if (!is_realtime(next) || !is_running(next)) { | ||
8873 | + /* BAD BAD BAD */ | ||
8874 | + TRACE_TASK(next,"BAD: migration invariant FAILED: " | ||
8875 | + "rt=%d running=%d\n", | ||
8876 | + is_realtime(next), | ||
8877 | + is_running(next)); | ||
8878 | + /* drop the task */ | ||
8879 | + next = NULL; | ||
8880 | + } | ||
8881 | + /* release the other CPU's runqueue, but keep ours */ | ||
8882 | + raw_spin_unlock(&other_rq->lock); | ||
8883 | + } | ||
8884 | + if (next) { | ||
8885 | + next->rt_param.stack_in_use = rq->cpu; | ||
8886 | + next->se.exec_start = rq->clock; | ||
8887 | + } | ||
8888 | + | ||
8889 | + update_enforcement_timer(next); | ||
8890 | + return next; | ||
8891 | +} | ||
8892 | + | ||
8893 | +static void enqueue_task_litmus(struct rq *rq, struct task_struct *p, | ||
8894 | + int flags) | ||
8895 | +{ | ||
8896 | + if (flags & ENQUEUE_WAKEUP) { | ||
8897 | + sched_trace_task_resume(p); | ||
8898 | + tsk_rt(p)->present = 1; | ||
8899 | + /* LITMUS^RT plugins need to update the state | ||
8900 | + * _before_ making it available in global structures. | ||
8901 | + * Linux gets away with being lazy about the task state | ||
8902 | + * update. We can't do that, hence we update the task | ||
8903 | + * state already here. | ||
8904 | + * | ||
8905 | + * WARNING: this needs to be re-evaluated when porting | ||
8906 | + * to newer kernel versions. | ||
8907 | + */ | ||
8908 | + p->state = TASK_RUNNING; | ||
8909 | + litmus->task_wake_up(p); | ||
8910 | + | ||
8911 | + rq->litmus.nr_running++; | ||
8912 | + } else | ||
8913 | + TRACE_TASK(p, "ignoring an enqueue, not a wake up.\n"); | ||
8914 | +} | ||
8915 | + | ||
8916 | +static void dequeue_task_litmus(struct rq *rq, struct task_struct *p, | ||
8917 | + int flags) | ||
8918 | +{ | ||
8919 | + if (flags & DEQUEUE_SLEEP) { | ||
8920 | + litmus->task_block(p); | ||
8921 | + tsk_rt(p)->present = 0; | ||
8922 | + sched_trace_task_block(p); | ||
8923 | + | ||
8924 | + rq->litmus.nr_running--; | ||
8925 | + } else | ||
8926 | + TRACE_TASK(p, "ignoring a dequeue, not going to sleep.\n"); | ||
8927 | +} | ||
8928 | + | ||
8929 | +static void yield_task_litmus(struct rq *rq) | ||
8930 | +{ | ||
8931 | + BUG_ON(rq->curr != current); | ||
8932 | + /* sched_yield() is called to trigger delayed preemptions. | ||
8933 | + * Thus, mark the current task as needing to be rescheduled. | ||
8934 | + * This will cause the scheduler plugin to be invoked, which can | ||
8935 | + * then determine if a preemption is still required. | ||
8936 | + */ | ||
8937 | + clear_exit_np(current); | ||
8938 | + litmus_reschedule_local(); | ||
8939 | +} | ||
8940 | + | ||
8941 | +/* Plugins are responsible for this. | ||
8942 | + */ | ||
8943 | +static void check_preempt_curr_litmus(struct rq *rq, struct task_struct *p, int flags) | ||
8944 | +{ | ||
8945 | +} | ||
8946 | + | ||
8947 | +static void put_prev_task_litmus(struct rq *rq, struct task_struct *p) | ||
8948 | +{ | ||
8949 | +} | ||
8950 | + | ||
8951 | +static void pre_schedule_litmus(struct rq *rq, struct task_struct *prev) | ||
8952 | +{ | ||
8953 | + update_time_litmus(rq, prev); | ||
8954 | + if (!is_running(prev)) | ||
8955 | + tsk_rt(prev)->present = 0; | ||
8956 | +} | ||
8957 | + | ||
8958 | +/* pick_next_task_litmus() - litmus_schedule() function | ||
8959 | + * | ||
8960 | + * return the next task to be scheduled | ||
8961 | + */ | ||
8962 | +static struct task_struct *pick_next_task_litmus(struct rq *rq) | ||
8963 | +{ | ||
8964 | + /* get the to-be-switched-out task (prev) */ | ||
8965 | + struct task_struct *prev = rq->litmus.prev; | ||
8966 | + struct task_struct *next; | ||
8967 | + | ||
8968 | + /* if not called from schedule() but from somewhere | ||
8969 | + * else (e.g., migration), return now! | ||
8970 | + */ | ||
8971 | + if(!rq->litmus.prev) | ||
8972 | + return NULL; | ||
8973 | + | ||
8974 | + rq->litmus.prev = NULL; | ||
8975 | + | ||
8976 | + TS_PLUGIN_SCHED_START; | ||
8977 | + next = litmus_schedule(rq, prev); | ||
8978 | + TS_PLUGIN_SCHED_END; | ||
8979 | + | ||
8980 | + return next; | ||
8981 | +} | ||
8982 | + | ||
8983 | +static void task_tick_litmus(struct rq *rq, struct task_struct *p, int queued) | ||
8984 | +{ | ||
8985 | + /* nothing to do; tick related tasks are done by litmus_tick() */ | ||
8986 | + return; | ||
8987 | +} | ||
8988 | + | ||
8989 | +static void switched_to_litmus(struct rq *rq, struct task_struct *p, int running) | ||
8990 | +{ | ||
8991 | +} | ||
8992 | + | ||
8993 | +static void prio_changed_litmus(struct rq *rq, struct task_struct *p, | ||
8994 | + int oldprio, int running) | ||
8995 | +{ | ||
8996 | +} | ||
8997 | + | ||
8998 | +unsigned int get_rr_interval_litmus(struct rq *rq, struct task_struct *p) | ||
8999 | +{ | ||
9000 | + /* return infinity */ | ||
9001 | + return 0; | ||
9002 | +} | ||
9003 | + | ||
9004 | +/* This is called when a task became a real-time task, either due to a SCHED_* | ||
9005 | + * class transition or due to PI mutex inheritance. We don't handle Linux PI | ||
9006 | + * mutex inheritance yet (and probably never will). Use LITMUS provided | ||
9007 | + * synchronization primitives instead. | ||
9008 | + */ | ||
9009 | +static void set_curr_task_litmus(struct rq *rq) | ||
9010 | +{ | ||
9011 | + rq->curr->se.exec_start = rq->clock; | ||
9012 | +} | ||
9013 | + | ||
9014 | + | ||
9015 | +#ifdef CONFIG_SMP | ||
9016 | +/* execve tries to rebalance task in this scheduling domain. | ||
9017 | + * We don't care about the scheduling domain; can gets called from | ||
9018 | + * exec, fork, wakeup. | ||
9019 | + */ | ||
9020 | +static int select_task_rq_litmus(struct rq *rq, struct task_struct *p, | ||
9021 | + int sd_flag, int flags) | ||
9022 | +{ | ||
9023 | + /* preemption is already disabled. | ||
9024 | + * We don't want to change cpu here | ||
9025 | + */ | ||
9026 | + return task_cpu(p); | ||
9027 | +} | ||
9028 | +#endif | ||
9029 | + | ||
9030 | +static const struct sched_class litmus_sched_class = { | ||
9031 | + .next = &rt_sched_class, | ||
9032 | + .enqueue_task = enqueue_task_litmus, | ||
9033 | + .dequeue_task = dequeue_task_litmus, | ||
9034 | + .yield_task = yield_task_litmus, | ||
9035 | + | ||
9036 | + .check_preempt_curr = check_preempt_curr_litmus, | ||
9037 | + | ||
9038 | + .pick_next_task = pick_next_task_litmus, | ||
9039 | + .put_prev_task = put_prev_task_litmus, | ||
9040 | + | ||
9041 | +#ifdef CONFIG_SMP | ||
9042 | + .select_task_rq = select_task_rq_litmus, | ||
9043 | + | ||
9044 | + .pre_schedule = pre_schedule_litmus, | ||
9045 | +#endif | ||
9046 | + | ||
9047 | + .set_curr_task = set_curr_task_litmus, | ||
9048 | + .task_tick = task_tick_litmus, | ||
9049 | + | ||
9050 | + .get_rr_interval = get_rr_interval_litmus, | ||
9051 | + | ||
9052 | + .prio_changed = prio_changed_litmus, | ||
9053 | + .switched_to = switched_to_litmus, | ||
9054 | +}; | ||
9055 | diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c | ||
9056 | new file mode 100644 | ||
9057 | index 0000000..c7d5cf7 | ||
9058 | --- /dev/null | ||
9059 | +++ b/litmus/sched_pfair.c | ||
9060 | @@ -0,0 +1,894 @@ | ||
9061 | +/* | ||
9062 | + * kernel/sched_pfair.c | ||
9063 | + * | ||
9064 | + * Implementation of the (global) Pfair scheduling algorithm. | ||
9065 | + * | ||
9066 | + */ | ||
9067 | + | ||
9068 | +#include <asm/div64.h> | ||
9069 | +#include <linux/delay.h> | ||
9070 | +#include <linux/module.h> | ||
9071 | +#include <linux/spinlock.h> | ||
9072 | +#include <linux/percpu.h> | ||
9073 | +#include <linux/sched.h> | ||
9074 | +#include <linux/list.h> | ||
9075 | +#include <linux/slab.h> | ||
9076 | + | ||
9077 | +#include <litmus/litmus.h> | ||
9078 | +#include <litmus/jobs.h> | ||
9079 | +#include <litmus/preempt.h> | ||
9080 | +#include <litmus/rt_domain.h> | ||
9081 | +#include <litmus/sched_plugin.h> | ||
9082 | +#include <litmus/sched_trace.h> | ||
9083 | + | ||
9084 | +#include <litmus/bheap.h> | ||
9085 | + | ||
9086 | +struct subtask { | ||
9087 | + /* measured in quanta relative to job release */ | ||
9088 | + quanta_t release; | ||
9089 | + quanta_t deadline; | ||
9090 | + quanta_t overlap; /* called "b bit" by PD^2 */ | ||
9091 | + quanta_t group_deadline; | ||
9092 | +}; | ||
9093 | + | ||
9094 | +struct pfair_param { | ||
9095 | + quanta_t quanta; /* number of subtasks */ | ||
9096 | + quanta_t cur; /* index of current subtask */ | ||
9097 | + | ||
9098 | + quanta_t release; /* in quanta */ | ||
9099 | + quanta_t period; /* in quanta */ | ||
9100 | + | ||
9101 | + quanta_t last_quantum; /* when scheduled last */ | ||
9102 | + int last_cpu; /* where scheduled last */ | ||
9103 | + | ||
9104 | + unsigned int sporadic_release; /* On wakeup, new sporadic release? */ | ||
9105 | + | ||
9106 | + struct subtask subtasks[0]; /* allocate together with pfair_param */ | ||
9107 | +}; | ||
9108 | + | ||
9109 | +#define tsk_pfair(tsk) ((tsk)->rt_param.pfair) | ||
9110 | + | ||
9111 | +struct pfair_state { | ||
9112 | + int cpu; | ||
9113 | + volatile quanta_t cur_tick; /* updated by the CPU that is advancing | ||
9114 | + * the time */ | ||
9115 | + volatile quanta_t local_tick; /* What tick is the local CPU currently | ||
9116 | + * executing? Updated only by the local | ||
9117 | + * CPU. In QEMU, this may lag behind the | ||
9118 | + * current tick. In a real system, with | ||
9119 | + * proper timers and aligned quanta, | ||
9120 | + * that should only be the | ||
9121 | + * case for a very short time after the | ||
9122 | + * time advanced. With staggered quanta, | ||
9123 | + * it will lag for the duration of the | ||
9124 | + * offset. | ||
9125 | + */ | ||
9126 | + | ||
9127 | + struct task_struct* linked; /* the task that should be executing */ | ||
9128 | + struct task_struct* local; /* the local copy of linked */ | ||
9129 | + struct task_struct* scheduled; /* what is actually scheduled */ | ||
9130 | + | ||
9131 | + unsigned long missed_quanta; | ||
9132 | + lt_t offset; /* stagger offset */ | ||
9133 | +}; | ||
9134 | + | ||
9135 | +/* Currently, we limit the maximum period of any task to 2000 quanta. | ||
9136 | + * The reason is that it makes the implementation easier since we do not | ||
9137 | + * need to reallocate the release wheel on task arrivals. | ||
9138 | + * In the future | ||
9139 | + */ | ||
9140 | +#define PFAIR_MAX_PERIOD 2000 | ||
9141 | + | ||
9142 | +/* This is the release queue wheel. It is indexed by pfair_time % | ||
9143 | + * PFAIR_MAX_PERIOD. Each heap is ordered by PFAIR priority, so that it can be | ||
9144 | + * merged with the ready queue. | ||
9145 | + */ | ||
9146 | +static struct bheap release_queue[PFAIR_MAX_PERIOD]; | ||
9147 | + | ||
9148 | +DEFINE_PER_CPU(struct pfair_state, pfair_state); | ||
9149 | +struct pfair_state* *pstate; /* short cut */ | ||
9150 | + | ||
9151 | +static quanta_t pfair_time = 0; /* the "official" PFAIR clock */ | ||
9152 | +static quanta_t merge_time = 0; /* Updated after the release queue has been | ||
9153 | + * merged. Used by drop_all_references(). | ||
9154 | + */ | ||
9155 | + | ||
9156 | +static rt_domain_t pfair; | ||
9157 | + | ||
9158 | +/* The pfair_lock is used to serialize all scheduling events. | ||
9159 | + */ | ||
9160 | +#define pfair_lock pfair.ready_lock | ||
9161 | + | ||
9162 | +/* Enable for lots of trace info. | ||
9163 | + * #define PFAIR_DEBUG | ||
9164 | + */ | ||
9165 | + | ||
9166 | +#ifdef PFAIR_DEBUG | ||
9167 | +#define PTRACE_TASK(t, f, args...) TRACE_TASK(t, f, ## args) | ||
9168 | +#define PTRACE(f, args...) TRACE(f, ## args) | ||
9169 | +#else | ||
9170 | +#define PTRACE_TASK(t, f, args...) | ||
9171 | +#define PTRACE(f, args...) | ||
9172 | +#endif | ||
9173 | + | ||
9174 | +/* gcc will inline all of these accessor functions... */ | ||
9175 | +static struct subtask* cur_subtask(struct task_struct* t) | ||
9176 | +{ | ||
9177 | + return tsk_pfair(t)->subtasks + tsk_pfair(t)->cur; | ||
9178 | +} | ||
9179 | + | ||
9180 | +static quanta_t cur_deadline(struct task_struct* t) | ||
9181 | +{ | ||
9182 | + return cur_subtask(t)->deadline + tsk_pfair(t)->release; | ||
9183 | +} | ||
9184 | + | ||
9185 | + | ||
9186 | +static quanta_t cur_sub_release(struct task_struct* t) | ||
9187 | +{ | ||
9188 | + return cur_subtask(t)->release + tsk_pfair(t)->release; | ||
9189 | +} | ||
9190 | + | ||
9191 | +static quanta_t cur_release(struct task_struct* t) | ||
9192 | +{ | ||
9193 | +#ifdef EARLY_RELEASE | ||
9194 | + /* only the release of the first subtask counts when we early | ||
9195 | + * release */ | ||
9196 | + return tsk_pfair(t)->release; | ||
9197 | +#else | ||
9198 | + return cur_sub_release(t); | ||
9199 | +#endif | ||
9200 | +} | ||
9201 | + | ||
9202 | +static quanta_t cur_overlap(struct task_struct* t) | ||
9203 | +{ | ||
9204 | + return cur_subtask(t)->overlap; | ||
9205 | +} | ||
9206 | + | ||
9207 | +static quanta_t cur_group_deadline(struct task_struct* t) | ||
9208 | +{ | ||
9209 | + quanta_t gdl = cur_subtask(t)->group_deadline; | ||
9210 | + if (gdl) | ||
9211 | + return gdl + tsk_pfair(t)->release; | ||
9212 | + else | ||
9213 | + return gdl; | ||
9214 | +} | ||
9215 | + | ||
9216 | + | ||
9217 | +static int pfair_higher_prio(struct task_struct* first, | ||
9218 | + struct task_struct* second) | ||
9219 | +{ | ||
9220 | + return /* first task must exist */ | ||
9221 | + first && ( | ||
9222 | + /* Does the second task exist and is it a real-time task? If | ||
9223 | + * not, the first task (which is a RT task) has higher | ||
9224 | + * priority. | ||
9225 | + */ | ||
9226 | + !second || !is_realtime(second) || | ||
9227 | + | ||
9228 | + /* Is the (subtask) deadline of the first task earlier? | ||
9229 | + * Then it has higher priority. | ||
9230 | + */ | ||
9231 | + time_before(cur_deadline(first), cur_deadline(second)) || | ||
9232 | + | ||
9233 | + /* Do we have a deadline tie? | ||
9234 | + * Then break by B-bit. | ||
9235 | + */ | ||
9236 | + (cur_deadline(first) == cur_deadline(second) && | ||
9237 | + (cur_overlap(first) > cur_overlap(second) || | ||
9238 | + | ||
9239 | + /* Do we have a B-bit tie? | ||
9240 | + * Then break by group deadline. | ||
9241 | + */ | ||
9242 | + (cur_overlap(first) == cur_overlap(second) && | ||
9243 | + (time_after(cur_group_deadline(first), | ||
9244 | + cur_group_deadline(second)) || | ||
9245 | + | ||
9246 | + /* Do we have a group deadline tie? | ||
9247 | + * Then break by PID, which are unique. | ||
9248 | + */ | ||
9249 | + (cur_group_deadline(first) == | ||
9250 | + cur_group_deadline(second) && | ||
9251 | + first->pid < second->pid)))))); | ||
9252 | +} | ||
9253 | + | ||
9254 | +int pfair_ready_order(struct bheap_node* a, struct bheap_node* b) | ||
9255 | +{ | ||
9256 | + return pfair_higher_prio(bheap2task(a), bheap2task(b)); | ||
9257 | +} | ||
9258 | + | ||
9259 | +/* return the proper release queue for time t */ | ||
9260 | +static struct bheap* relq(quanta_t t) | ||
9261 | +{ | ||
9262 | + struct bheap* rq = &release_queue[t % PFAIR_MAX_PERIOD]; | ||
9263 | + return rq; | ||
9264 | +} | ||
9265 | + | ||
9266 | +static void prepare_release(struct task_struct* t, quanta_t at) | ||
9267 | +{ | ||
9268 | + tsk_pfair(t)->release = at; | ||
9269 | + tsk_pfair(t)->cur = 0; | ||
9270 | +} | ||
9271 | + | ||
9272 | +static void __pfair_add_release(struct task_struct* t, struct bheap* queue) | ||
9273 | +{ | ||
9274 | + bheap_insert(pfair_ready_order, queue, | ||
9275 | + tsk_rt(t)->heap_node); | ||
9276 | +} | ||
9277 | + | ||
9278 | +static void pfair_add_release(struct task_struct* t) | ||
9279 | +{ | ||
9280 | + BUG_ON(bheap_node_in_heap(tsk_rt(t)->heap_node)); | ||
9281 | + __pfair_add_release(t, relq(cur_release(t))); | ||
9282 | +} | ||
9283 | + | ||
9284 | +/* pull released tasks from the release queue */ | ||
9285 | +static void poll_releases(quanta_t time) | ||
9286 | +{ | ||
9287 | + __merge_ready(&pfair, relq(time)); | ||
9288 | + merge_time = time; | ||
9289 | +} | ||
9290 | + | ||
9291 | +static void check_preempt(struct task_struct* t) | ||
9292 | +{ | ||
9293 | + int cpu = NO_CPU; | ||
9294 | + if (tsk_rt(t)->linked_on != tsk_rt(t)->scheduled_on && | ||
9295 | + tsk_rt(t)->present) { | ||
9296 | + /* the task can be scheduled and | ||
9297 | + * is not scheduled where it ought to be scheduled | ||
9298 | + */ | ||
9299 | + cpu = tsk_rt(t)->linked_on != NO_CPU ? | ||
9300 | + tsk_rt(t)->linked_on : | ||
9301 | + tsk_rt(t)->scheduled_on; | ||
9302 | + PTRACE_TASK(t, "linked_on:%d, scheduled_on:%d\n", | ||
9303 | + tsk_rt(t)->linked_on, tsk_rt(t)->scheduled_on); | ||
9304 | + /* preempt */ | ||
9305 | + litmus_reschedule(cpu); | ||
9306 | + } | ||
9307 | +} | ||
9308 | + | ||
9309 | +/* caller must hold pfair_lock */ | ||
9310 | +static void drop_all_references(struct task_struct *t) | ||
9311 | +{ | ||
9312 | + int cpu; | ||
9313 | + struct pfair_state* s; | ||
9314 | + struct bheap* q; | ||
9315 | + if (bheap_node_in_heap(tsk_rt(t)->heap_node)) { | ||
9316 | + /* figure out what queue the node is in */ | ||
9317 | + if (time_before_eq(cur_release(t), merge_time)) | ||
9318 | + q = &pfair.ready_queue; | ||
9319 | + else | ||
9320 | + q = relq(cur_release(t)); | ||
9321 | + bheap_delete(pfair_ready_order, q, | ||
9322 | + tsk_rt(t)->heap_node); | ||
9323 | + } | ||
9324 | + for (cpu = 0; cpu < num_online_cpus(); cpu++) { | ||
9325 | + s = &per_cpu(pfair_state, cpu); | ||
9326 | + if (s->linked == t) | ||
9327 | + s->linked = NULL; | ||
9328 | + if (s->local == t) | ||
9329 | + s->local = NULL; | ||
9330 | + if (s->scheduled == t) | ||
9331 | + s->scheduled = NULL; | ||
9332 | + } | ||
9333 | +} | ||
9334 | + | ||
9335 | +/* returns 1 if the task needs to go the release queue */ | ||
9336 | +static int advance_subtask(quanta_t time, struct task_struct* t, int cpu) | ||
9337 | +{ | ||
9338 | + struct pfair_param* p = tsk_pfair(t); | ||
9339 | + int to_relq; | ||
9340 | + p->cur = (p->cur + 1) % p->quanta; | ||
9341 | + if (!p->cur) { | ||
9342 | + sched_trace_task_completion(t, 1); | ||
9343 | + if (tsk_rt(t)->present) { | ||
9344 | + /* we start a new job */ | ||
9345 | + prepare_for_next_period(t); | ||
9346 | + sched_trace_task_release(t); | ||
9347 | + get_rt_flags(t) = RT_F_RUNNING; | ||
9348 | + p->release += p->period; | ||
9349 | + } else { | ||
9350 | + /* remove task from system until it wakes */ | ||
9351 | + drop_all_references(t); | ||
9352 | + tsk_pfair(t)->sporadic_release = 1; | ||
9353 | + TRACE_TASK(t, "on %d advanced to subtask %lu (not present)\n", | ||
9354 | + cpu, p->cur); | ||
9355 | + return 0; | ||
9356 | + } | ||
9357 | + } | ||
9358 | + to_relq = time_after(cur_release(t), time); | ||
9359 | + TRACE_TASK(t, "on %d advanced to subtask %lu -> to_relq=%d\n", | ||
9360 | + cpu, p->cur, to_relq); | ||
9361 | + return to_relq; | ||
9362 | +} | ||
9363 | + | ||
9364 | +static void advance_subtasks(quanta_t time) | ||
9365 | +{ | ||
9366 | + int cpu, missed; | ||
9367 | + struct task_struct* l; | ||
9368 | + struct pfair_param* p; | ||
9369 | + | ||
9370 | + for_each_online_cpu(cpu) { | ||
9371 | + l = pstate[cpu]->linked; | ||
9372 | + missed = pstate[cpu]->linked != pstate[cpu]->local; | ||
9373 | + if (l) { | ||
9374 | + p = tsk_pfair(l); | ||
9375 | + p->last_quantum = time; | ||
9376 | + p->last_cpu = cpu; | ||
9377 | + if (advance_subtask(time, l, cpu)) { | ||
9378 | + pstate[cpu]->linked = NULL; | ||
9379 | + pfair_add_release(l); | ||
9380 | + } | ||
9381 | + } | ||
9382 | + } | ||
9383 | +} | ||
9384 | + | ||
9385 | +static int target_cpu(quanta_t time, struct task_struct* t, int default_cpu) | ||
9386 | +{ | ||
9387 | + int cpu; | ||
9388 | + if (tsk_rt(t)->scheduled_on != NO_CPU) { | ||
9389 | + /* always observe scheduled_on linkage */ | ||
9390 | + default_cpu = tsk_rt(t)->scheduled_on; | ||
9391 | + } else if (tsk_pfair(t)->last_quantum == time - 1) { | ||
9392 | + /* back2back quanta */ | ||
9393 | + /* Only observe last_quantum if no scheduled_on is in the way. | ||
9394 | + * This should only kick in if a CPU missed quanta, and that | ||
9395 | + * *should* only happen in QEMU. | ||
9396 | + */ | ||
9397 | + cpu = tsk_pfair(t)->last_cpu; | ||
9398 | + if (!pstate[cpu]->linked || | ||
9399 | + tsk_rt(pstate[cpu]->linked)->scheduled_on != cpu) { | ||
9400 | + default_cpu = cpu; | ||
9401 | + } | ||
9402 | + } | ||
9403 | + return default_cpu; | ||
9404 | +} | ||
9405 | + | ||
9406 | +/* returns one if linking was redirected */ | ||
9407 | +static int pfair_link(quanta_t time, int cpu, | ||
9408 | + struct task_struct* t) | ||
9409 | +{ | ||
9410 | + int target = target_cpu(time, t, cpu); | ||
9411 | + struct task_struct* prev = pstate[cpu]->linked; | ||
9412 | + struct task_struct* other; | ||
9413 | + | ||
9414 | + if (target != cpu) { | ||
9415 | + other = pstate[target]->linked; | ||
9416 | + pstate[target]->linked = t; | ||
9417 | + tsk_rt(t)->linked_on = target; | ||
9418 | + if (!other) | ||
9419 | + /* linked ok, but reschedule this CPU */ | ||
9420 | + return 1; | ||
9421 | + if (target < cpu) { | ||
9422 | + /* link other to cpu instead */ | ||
9423 | + tsk_rt(other)->linked_on = cpu; | ||
9424 | + pstate[cpu]->linked = other; | ||
9425 | + if (prev) { | ||
9426 | + /* prev got pushed back into the ready queue */ | ||
9427 | + tsk_rt(prev)->linked_on = NO_CPU; | ||
9428 | + __add_ready(&pfair, prev); | ||
9429 | + } | ||
9430 | + /* we are done with this cpu */ | ||
9431 | + return 0; | ||
9432 | + } else { | ||
9433 | + /* re-add other, it's original CPU was not considered yet */ | ||
9434 | + tsk_rt(other)->linked_on = NO_CPU; | ||
9435 | + __add_ready(&pfair, other); | ||
9436 | + /* reschedule this CPU */ | ||
9437 | + return 1; | ||
9438 | + } | ||
9439 | + } else { | ||
9440 | + pstate[cpu]->linked = t; | ||
9441 | + tsk_rt(t)->linked_on = cpu; | ||
9442 | + if (prev) { | ||
9443 | + /* prev got pushed back into the ready queue */ | ||
9444 | + tsk_rt(prev)->linked_on = NO_CPU; | ||
9445 | + __add_ready(&pfair, prev); | ||
9446 | + } | ||
9447 | + /* we are done with this CPU */ | ||
9448 | + return 0; | ||
9449 | + } | ||
9450 | +} | ||
9451 | + | ||
9452 | +static void schedule_subtasks(quanta_t time) | ||
9453 | +{ | ||
9454 | + int cpu, retry; | ||
9455 | + | ||
9456 | + for_each_online_cpu(cpu) { | ||
9457 | + retry = 1; | ||
9458 | + while (retry) { | ||
9459 | + if (pfair_higher_prio(__peek_ready(&pfair), | ||
9460 | + pstate[cpu]->linked)) | ||
9461 | + retry = pfair_link(time, cpu, | ||
9462 | + __take_ready(&pfair)); | ||
9463 | + else | ||
9464 | + retry = 0; | ||
9465 | + } | ||
9466 | + } | ||
9467 | +} | ||
9468 | + | ||
9469 | +static void schedule_next_quantum(quanta_t time) | ||
9470 | +{ | ||
9471 | + int cpu; | ||
9472 | + | ||
9473 | + /* called with interrupts disabled */ | ||
9474 | + PTRACE("--- Q %lu at %llu PRE-SPIN\n", | ||
9475 | + time, litmus_clock()); | ||
9476 | + raw_spin_lock(&pfair_lock); | ||
9477 | + PTRACE("<<< Q %lu at %llu\n", | ||
9478 | + time, litmus_clock()); | ||
9479 | + | ||
9480 | + sched_trace_quantum_boundary(); | ||
9481 | + | ||
9482 | + advance_subtasks(time); | ||
9483 | + poll_releases(time); | ||
9484 | + schedule_subtasks(time); | ||
9485 | + | ||
9486 | + for (cpu = 0; cpu < num_online_cpus(); cpu++) | ||
9487 | + if (pstate[cpu]->linked) | ||
9488 | + PTRACE_TASK(pstate[cpu]->linked, | ||
9489 | + " linked on %d.\n", cpu); | ||
9490 | + else | ||
9491 | + PTRACE("(null) linked on %d.\n", cpu); | ||
9492 | + | ||
9493 | + /* We are done. Advance time. */ | ||
9494 | + mb(); | ||
9495 | + for (cpu = 0; cpu < num_online_cpus(); cpu++) { | ||
9496 | + if (pstate[cpu]->local_tick != pstate[cpu]->cur_tick) { | ||
9497 | + TRACE("BAD Quantum not acked on %d " | ||
9498 | + "(l:%lu c:%lu p:%lu)\n", | ||
9499 | + cpu, | ||
9500 | + pstate[cpu]->local_tick, | ||
9501 | + pstate[cpu]->cur_tick, | ||
9502 | + pfair_time); | ||
9503 | + pstate[cpu]->missed_quanta++; | ||
9504 | + } | ||
9505 | + pstate[cpu]->cur_tick = time; | ||
9506 | + } | ||
9507 | + PTRACE(">>> Q %lu at %llu\n", | ||
9508 | + time, litmus_clock()); | ||
9509 | + raw_spin_unlock(&pfair_lock); | ||
9510 | +} | ||
9511 | + | ||
9512 | +static noinline void wait_for_quantum(quanta_t q, struct pfair_state* state) | ||
9513 | +{ | ||
9514 | + quanta_t loc; | ||
9515 | + | ||
9516 | + goto first; /* skip mb() on first iteration */ | ||
9517 | + do { | ||
9518 | + cpu_relax(); | ||
9519 | + mb(); | ||
9520 | + first: loc = state->cur_tick; | ||
9521 | + /* FIXME: what if loc > cur? */ | ||
9522 | + } while (time_before(loc, q)); | ||
9523 | + PTRACE("observed cur_tick:%lu >= q:%lu\n", | ||
9524 | + loc, q); | ||
9525 | +} | ||
9526 | + | ||
9527 | +static quanta_t current_quantum(struct pfair_state* state) | ||
9528 | +{ | ||
9529 | + lt_t t = litmus_clock() - state->offset; | ||
9530 | + return time2quanta(t, FLOOR); | ||
9531 | +} | ||
9532 | + | ||
9533 | +static void catchup_quanta(quanta_t from, quanta_t target, | ||
9534 | + struct pfair_state* state) | ||
9535 | +{ | ||
9536 | + quanta_t cur = from, time; | ||
9537 | + TRACE("+++< BAD catching up quanta from %lu to %lu\n", | ||
9538 | + from, target); | ||
9539 | + while (time_before(cur, target)) { | ||
9540 | + wait_for_quantum(cur, state); | ||
9541 | + cur++; | ||
9542 | + time = cmpxchg(&pfair_time, | ||
9543 | + cur - 1, /* expected */ | ||
9544 | + cur /* next */ | ||
9545 | + ); | ||
9546 | + if (time == cur - 1) | ||
9547 | + schedule_next_quantum(cur); | ||
9548 | + } | ||
9549 | + TRACE("+++> catching up done\n"); | ||
9550 | +} | ||
9551 | + | ||
9552 | +/* pfair_tick - this function is called for every local timer | ||
9553 | + * interrupt. | ||
9554 | + */ | ||
9555 | +static void pfair_tick(struct task_struct* t) | ||
9556 | +{ | ||
9557 | + struct pfair_state* state = &__get_cpu_var(pfair_state); | ||
9558 | + quanta_t time, cur; | ||
9559 | + int retry = 10; | ||
9560 | + | ||
9561 | + do { | ||
9562 | + cur = current_quantum(state); | ||
9563 | + PTRACE("q %lu at %llu\n", cur, litmus_clock()); | ||
9564 | + | ||
9565 | + /* Attempt to advance time. First CPU to get here | ||
9566 | + * will prepare the next quantum. | ||
9567 | + */ | ||
9568 | + time = cmpxchg(&pfair_time, | ||
9569 | + cur - 1, /* expected */ | ||
9570 | + cur /* next */ | ||
9571 | + ); | ||
9572 | + if (time == cur - 1) { | ||
9573 | + /* exchange succeeded */ | ||
9574 | + wait_for_quantum(cur - 1, state); | ||
9575 | + schedule_next_quantum(cur); | ||
9576 | + retry = 0; | ||
9577 | + } else if (time_before(time, cur - 1)) { | ||
9578 | + /* the whole system missed a tick !? */ | ||
9579 | + catchup_quanta(time, cur, state); | ||
9580 | + retry--; | ||
9581 | + } else if (time_after(time, cur)) { | ||
9582 | + /* our timer lagging behind!? */ | ||
9583 | + TRACE("BAD pfair_time:%lu > cur:%lu\n", time, cur); | ||
9584 | + retry--; | ||
9585 | + } else { | ||
9586 | + /* Some other CPU already started scheduling | ||
9587 | + * this quantum. Let it do its job and then update. | ||
9588 | + */ | ||
9589 | + retry = 0; | ||
9590 | + } | ||
9591 | + } while (retry); | ||
9592 | + | ||
9593 | + /* Spin locally until time advances. */ | ||
9594 | + wait_for_quantum(cur, state); | ||
9595 | + | ||
9596 | + /* copy assignment */ | ||
9597 | + /* FIXME: what if we race with a future update? Corrupted state? */ | ||
9598 | + state->local = state->linked; | ||
9599 | + /* signal that we are done */ | ||
9600 | + mb(); | ||
9601 | + state->local_tick = state->cur_tick; | ||
9602 | + | ||
9603 | + if (state->local != current | ||
9604 | + && (is_realtime(current) || is_present(state->local))) | ||
9605 | + litmus_reschedule_local(); | ||
9606 | +} | ||
9607 | + | ||
9608 | +static int safe_to_schedule(struct task_struct* t, int cpu) | ||
9609 | +{ | ||
9610 | + int where = tsk_rt(t)->scheduled_on; | ||
9611 | + if (where != NO_CPU && where != cpu) { | ||
9612 | + TRACE_TASK(t, "BAD: can't be scheduled on %d, " | ||
9613 | + "scheduled already on %d.\n", cpu, where); | ||
9614 | + return 0; | ||
9615 | + } else | ||
9616 | + return tsk_rt(t)->present && get_rt_flags(t) == RT_F_RUNNING; | ||
9617 | +} | ||
9618 | + | ||
9619 | +static struct task_struct* pfair_schedule(struct task_struct * prev) | ||
9620 | +{ | ||
9621 | + struct pfair_state* state = &__get_cpu_var(pfair_state); | ||
9622 | + int blocks; | ||
9623 | + struct task_struct* next = NULL; | ||
9624 | + | ||
9625 | + raw_spin_lock(&pfair_lock); | ||
9626 | + | ||
9627 | + blocks = is_realtime(prev) && !is_running(prev); | ||
9628 | + | ||
9629 | + if (state->local && safe_to_schedule(state->local, state->cpu)) | ||
9630 | + next = state->local; | ||
9631 | + | ||
9632 | + if (prev != next) { | ||
9633 | + tsk_rt(prev)->scheduled_on = NO_CPU; | ||
9634 | + if (next) | ||
9635 | + tsk_rt(next)->scheduled_on = state->cpu; | ||
9636 | + } | ||
9637 | + sched_state_task_picked(); | ||
9638 | + raw_spin_unlock(&pfair_lock); | ||
9639 | + | ||
9640 | + if (next) | ||
9641 | + TRACE_TASK(next, "scheduled rel=%lu at %lu (%llu)\n", | ||
9642 | + tsk_pfair(next)->release, pfair_time, litmus_clock()); | ||
9643 | + else if (is_realtime(prev)) | ||
9644 | + TRACE("Becomes idle at %lu (%llu)\n", pfair_time, litmus_clock()); | ||
9645 | + | ||
9646 | + return next; | ||
9647 | +} | ||
9648 | + | ||
9649 | +static void pfair_task_new(struct task_struct * t, int on_rq, int running) | ||
9650 | +{ | ||
9651 | + unsigned long flags; | ||
9652 | + | ||
9653 | + TRACE("pfair: task new %d state:%d\n", t->pid, t->state); | ||
9654 | + | ||
9655 | + raw_spin_lock_irqsave(&pfair_lock, flags); | ||
9656 | + if (running) | ||
9657 | + t->rt_param.scheduled_on = task_cpu(t); | ||
9658 | + else | ||
9659 | + t->rt_param.scheduled_on = NO_CPU; | ||
9660 | + | ||
9661 | + prepare_release(t, pfair_time + 1); | ||
9662 | + tsk_pfair(t)->sporadic_release = 0; | ||
9663 | + pfair_add_release(t); | ||
9664 | + check_preempt(t); | ||
9665 | + | ||
9666 | + raw_spin_unlock_irqrestore(&pfair_lock, flags); | ||
9667 | +} | ||
9668 | + | ||
9669 | +static void pfair_task_wake_up(struct task_struct *t) | ||
9670 | +{ | ||
9671 | + unsigned long flags; | ||
9672 | + lt_t now; | ||
9673 | + | ||
9674 | + TRACE_TASK(t, "wakes at %llu, release=%lu, pfair_time:%lu\n", | ||
9675 | + litmus_clock(), cur_release(t), pfair_time); | ||
9676 | + | ||
9677 | + raw_spin_lock_irqsave(&pfair_lock, flags); | ||
9678 | + | ||
9679 | + /* It is a little unclear how to deal with Pfair | ||
9680 | + * tasks that block for a while and then wake. For now, | ||
9681 | + * if a task blocks and wakes before its next job release, | ||
9682 | + * then it may resume if it is currently linked somewhere | ||
9683 | + * (as if it never blocked at all). Otherwise, we have a | ||
9684 | + * new sporadic job release. | ||
9685 | + */ | ||
9686 | + if (tsk_pfair(t)->sporadic_release) { | ||
9687 | + now = litmus_clock(); | ||
9688 | + release_at(t, now); | ||
9689 | + prepare_release(t, time2quanta(now, CEIL)); | ||
9690 | + sched_trace_task_release(t); | ||
9691 | + /* FIXME: race with pfair_time advancing */ | ||
9692 | + pfair_add_release(t); | ||
9693 | + tsk_pfair(t)->sporadic_release = 0; | ||
9694 | + } | ||
9695 | + | ||
9696 | + check_preempt(t); | ||
9697 | + | ||
9698 | + raw_spin_unlock_irqrestore(&pfair_lock, flags); | ||
9699 | + TRACE_TASK(t, "wake up done at %llu\n", litmus_clock()); | ||
9700 | +} | ||
9701 | + | ||
9702 | +static void pfair_task_block(struct task_struct *t) | ||
9703 | +{ | ||
9704 | + BUG_ON(!is_realtime(t)); | ||
9705 | + TRACE_TASK(t, "blocks at %llu, state:%d\n", | ||
9706 | + litmus_clock(), t->state); | ||
9707 | +} | ||
9708 | + | ||
9709 | +static void pfair_task_exit(struct task_struct * t) | ||
9710 | +{ | ||
9711 | + unsigned long flags; | ||
9712 | + | ||
9713 | + BUG_ON(!is_realtime(t)); | ||
9714 | + | ||
9715 | + /* Remote task from release or ready queue, and ensure | ||
9716 | + * that it is not the scheduled task for ANY CPU. We | ||
9717 | + * do this blanket check because occassionally when | ||
9718 | + * tasks exit while blocked, the task_cpu of the task | ||
9719 | + * might not be the same as the CPU that the PFAIR scheduler | ||
9720 | + * has chosen for it. | ||
9721 | + */ | ||
9722 | + raw_spin_lock_irqsave(&pfair_lock, flags); | ||
9723 | + | ||
9724 | + TRACE_TASK(t, "RIP, state:%d\n", t->state); | ||
9725 | + drop_all_references(t); | ||
9726 | + | ||
9727 | + raw_spin_unlock_irqrestore(&pfair_lock, flags); | ||
9728 | + | ||
9729 | + kfree(t->rt_param.pfair); | ||
9730 | + t->rt_param.pfair = NULL; | ||
9731 | +} | ||
9732 | + | ||
9733 | + | ||
9734 | +static void pfair_release_at(struct task_struct* task, lt_t start) | ||
9735 | +{ | ||
9736 | + unsigned long flags; | ||
9737 | + quanta_t release; | ||
9738 | + | ||
9739 | + BUG_ON(!is_realtime(task)); | ||
9740 | + | ||
9741 | + raw_spin_lock_irqsave(&pfair_lock, flags); | ||
9742 | + release_at(task, start); | ||
9743 | + release = time2quanta(start, CEIL); | ||
9744 | + | ||
9745 | + if (release - pfair_time >= PFAIR_MAX_PERIOD) | ||
9746 | + release = pfair_time + PFAIR_MAX_PERIOD; | ||
9747 | + | ||
9748 | + TRACE_TASK(task, "sys release at %lu\n", release); | ||
9749 | + | ||
9750 | + drop_all_references(task); | ||
9751 | + prepare_release(task, release); | ||
9752 | + pfair_add_release(task); | ||
9753 | + | ||
9754 | + /* Clear sporadic release flag, since this release subsumes any | ||
9755 | + * sporadic release on wake. | ||
9756 | + */ | ||
9757 | + tsk_pfair(task)->sporadic_release = 0; | ||
9758 | + | ||
9759 | + raw_spin_unlock_irqrestore(&pfair_lock, flags); | ||
9760 | +} | ||
9761 | + | ||
9762 | +static void init_subtask(struct subtask* sub, unsigned long i, | ||
9763 | + lt_t quanta, lt_t period) | ||
9764 | +{ | ||
9765 | + /* since i is zero-based, the formulas are shifted by one */ | ||
9766 | + lt_t tmp; | ||
9767 | + | ||
9768 | + /* release */ | ||
9769 | + tmp = period * i; | ||
9770 | + do_div(tmp, quanta); /* floor */ | ||
9771 | + sub->release = (quanta_t) tmp; | ||
9772 | + | ||
9773 | + /* deadline */ | ||
9774 | + tmp = period * (i + 1); | ||
9775 | + if (do_div(tmp, quanta)) /* ceil */ | ||
9776 | + tmp++; | ||
9777 | + sub->deadline = (quanta_t) tmp; | ||
9778 | + | ||
9779 | + /* next release */ | ||
9780 | + tmp = period * (i + 1); | ||
9781 | + do_div(tmp, quanta); /* floor */ | ||
9782 | + sub->overlap = sub->deadline - (quanta_t) tmp; | ||
9783 | + | ||
9784 | + /* Group deadline. | ||
9785 | + * Based on the formula given in Uma's thesis. | ||
9786 | + */ | ||
9787 | + if (2 * quanta >= period) { | ||
9788 | + /* heavy */ | ||
9789 | + tmp = (sub->deadline - (i + 1)) * period; | ||
9790 | + if (period > quanta && | ||
9791 | + do_div(tmp, (period - quanta))) /* ceil */ | ||
9792 | + tmp++; | ||
9793 | + sub->group_deadline = (quanta_t) tmp; | ||
9794 | + } else | ||
9795 | + sub->group_deadline = 0; | ||
9796 | +} | ||
9797 | + | ||
9798 | +static void dump_subtasks(struct task_struct* t) | ||
9799 | +{ | ||
9800 | + unsigned long i; | ||
9801 | + for (i = 0; i < t->rt_param.pfair->quanta; i++) | ||
9802 | + TRACE_TASK(t, "SUBTASK %lu: rel=%lu dl=%lu bbit:%lu gdl:%lu\n", | ||
9803 | + i + 1, | ||
9804 | + t->rt_param.pfair->subtasks[i].release, | ||
9805 | + t->rt_param.pfair->subtasks[i].deadline, | ||
9806 | + t->rt_param.pfair->subtasks[i].overlap, | ||
9807 | + t->rt_param.pfair->subtasks[i].group_deadline); | ||
9808 | +} | ||
9809 | + | ||
9810 | +static long pfair_admit_task(struct task_struct* t) | ||
9811 | +{ | ||
9812 | + lt_t quanta; | ||
9813 | + lt_t period; | ||
9814 | + s64 quantum_length = ktime_to_ns(tick_period); | ||
9815 | + struct pfair_param* param; | ||
9816 | + unsigned long i; | ||
9817 | + | ||
9818 | + /* Pfair is a tick-based method, so the time | ||
9819 | + * of interest is jiffies. Calculate tick-based | ||
9820 | + * times for everything. | ||
9821 | + * (Ceiling of exec cost, floor of period.) | ||
9822 | + */ | ||
9823 | + | ||
9824 | + quanta = get_exec_cost(t); | ||
9825 | + period = get_rt_period(t); | ||
9826 | + | ||
9827 | + quanta = time2quanta(get_exec_cost(t), CEIL); | ||
9828 | + | ||
9829 | + if (do_div(period, quantum_length)) | ||
9830 | + printk(KERN_WARNING | ||
9831 | + "The period of %s/%d is not a multiple of %llu.\n", | ||
9832 | + t->comm, t->pid, (unsigned long long) quantum_length); | ||
9833 | + | ||
9834 | + if (period >= PFAIR_MAX_PERIOD) { | ||
9835 | + printk(KERN_WARNING | ||
9836 | + "PFAIR: Rejecting task %s/%d; its period is too long.\n", | ||
9837 | + t->comm, t->pid); | ||
9838 | + return -EINVAL; | ||
9839 | + } | ||
9840 | + | ||
9841 | + if (quanta == period) { | ||
9842 | + /* special case: task has weight 1.0 */ | ||
9843 | + printk(KERN_INFO | ||
9844 | + "Admitting weight 1.0 task. (%s/%d, %llu, %llu).\n", | ||
9845 | + t->comm, t->pid, quanta, period); | ||
9846 | + quanta = 1; | ||
9847 | + period = 1; | ||
9848 | + } | ||
9849 | + | ||
9850 | + param = kmalloc(sizeof(*param) + | ||
9851 | + quanta * sizeof(struct subtask), GFP_ATOMIC); | ||
9852 | + | ||
9853 | + if (!param) | ||
9854 | + return -ENOMEM; | ||
9855 | + | ||
9856 | + param->quanta = quanta; | ||
9857 | + param->cur = 0; | ||
9858 | + param->release = 0; | ||
9859 | + param->period = period; | ||
9860 | + | ||
9861 | + for (i = 0; i < quanta; i++) | ||
9862 | + init_subtask(param->subtasks + i, i, quanta, period); | ||
9863 | + | ||
9864 | + if (t->rt_param.pfair) | ||
9865 | + /* get rid of stale allocation */ | ||
9866 | + kfree(t->rt_param.pfair); | ||
9867 | + | ||
9868 | + t->rt_param.pfair = param; | ||
9869 | + | ||
9870 | + /* spew out some debug info */ | ||
9871 | + dump_subtasks(t); | ||
9872 | + | ||
9873 | + return 0; | ||
9874 | +} | ||
9875 | + | ||
9876 | +static long pfair_activate_plugin(void) | ||
9877 | +{ | ||
9878 | + int cpu; | ||
9879 | + struct pfair_state* state; | ||
9880 | + | ||
9881 | + state = &__get_cpu_var(pfair_state); | ||
9882 | + pfair_time = current_quantum(state); | ||
9883 | + | ||
9884 | + TRACE("Activating PFAIR at q=%lu\n", pfair_time); | ||
9885 | + | ||
9886 | + for (cpu = 0; cpu < num_online_cpus(); cpu++) { | ||
9887 | + state = &per_cpu(pfair_state, cpu); | ||
9888 | + state->cur_tick = pfair_time; | ||
9889 | + state->local_tick = pfair_time; | ||
9890 | + state->missed_quanta = 0; | ||
9891 | + state->offset = cpu_stagger_offset(cpu); | ||
9892 | + } | ||
9893 | + | ||
9894 | + return 0; | ||
9895 | +} | ||
9896 | + | ||
9897 | +/* Plugin object */ | ||
9898 | +static struct sched_plugin pfair_plugin __cacheline_aligned_in_smp = { | ||
9899 | + .plugin_name = "PFAIR", | ||
9900 | + .tick = pfair_tick, | ||
9901 | + .task_new = pfair_task_new, | ||
9902 | + .task_exit = pfair_task_exit, | ||
9903 | + .schedule = pfair_schedule, | ||
9904 | + .task_wake_up = pfair_task_wake_up, | ||
9905 | + .task_block = pfair_task_block, | ||
9906 | + .admit_task = pfair_admit_task, | ||
9907 | + .release_at = pfair_release_at, | ||
9908 | + .complete_job = complete_job, | ||
9909 | + .activate_plugin = pfair_activate_plugin, | ||
9910 | +}; | ||
9911 | + | ||
9912 | +static int __init init_pfair(void) | ||
9913 | +{ | ||
9914 | + int cpu, i; | ||
9915 | + struct pfair_state *state; | ||
9916 | + | ||
9917 | + | ||
9918 | + /* | ||
9919 | + * initialize short_cut for per-cpu pfair state; | ||
9920 | + * there may be a problem here if someone removes a cpu | ||
9921 | + * while we are doing this initialization... and if cpus | ||
9922 | + * are added / removed later... is it a _real_ problem? | ||
9923 | + */ | ||
9924 | + pstate = kmalloc(sizeof(struct pfair_state*) * num_online_cpus(), GFP_KERNEL); | ||
9925 | + | ||
9926 | + /* initialize release queue */ | ||
9927 | + for (i = 0; i < PFAIR_MAX_PERIOD; i++) | ||
9928 | + bheap_init(&release_queue[i]); | ||
9929 | + | ||
9930 | + /* initialize CPU state */ | ||
9931 | + for (cpu = 0; cpu < num_online_cpus(); cpu++) { | ||
9932 | + state = &per_cpu(pfair_state, cpu); | ||
9933 | + state->cpu = cpu; | ||
9934 | + state->cur_tick = 0; | ||
9935 | + state->local_tick = 0; | ||
9936 | + state->linked = NULL; | ||
9937 | + state->local = NULL; | ||
9938 | + state->scheduled = NULL; | ||
9939 | + state->missed_quanta = 0; | ||
9940 | + state->offset = cpu_stagger_offset(cpu); | ||
9941 | + pstate[cpu] = state; | ||
9942 | + } | ||
9943 | + | ||
9944 | + rt_domain_init(&pfair, pfair_ready_order, NULL, NULL); | ||
9945 | + return register_sched_plugin(&pfair_plugin); | ||
9946 | +} | ||
9947 | + | ||
9948 | +static void __exit clean_pfair(void) | ||
9949 | +{ | ||
9950 | + kfree(pstate); | ||
9951 | +} | ||
9952 | + | ||
9953 | +module_init(init_pfair); | ||
9954 | +module_exit(clean_pfair); | ||
9955 | diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c | ||
9956 | new file mode 100644 | ||
9957 | index 0000000..d912a64 | ||
9958 | --- /dev/null | ||
9959 | +++ b/litmus/sched_plugin.c | ||
9960 | @@ -0,0 +1,253 @@ | ||
9961 | +/* sched_plugin.c -- core infrastructure for the scheduler plugin system | ||
9962 | + * | ||
9963 | + * This file includes the initialization of the plugin system, the no-op Linux | ||
9964 | + * scheduler plugin, some dummy functions, and some helper functions. | ||
9965 | + */ | ||
9966 | + | ||
9967 | +#include <linux/list.h> | ||
9968 | +#include <linux/spinlock.h> | ||
9969 | +#include <linux/sched.h> | ||
9970 | + | ||
9971 | +#include <litmus/litmus.h> | ||
9972 | +#include <litmus/sched_plugin.h> | ||
9973 | +#include <litmus/preempt.h> | ||
9974 | +#include <litmus/jobs.h> | ||
9975 | + | ||
9976 | +/* | ||
9977 | + * Generic function to trigger preemption on either local or remote cpu | ||
9978 | + * from scheduler plugins. The key feature is that this function is | ||
9979 | + * non-preemptive section aware and does not invoke the scheduler / send | ||
9980 | + * IPIs if the to-be-preempted task is actually non-preemptive. | ||
9981 | + */ | ||
9982 | +void preempt_if_preemptable(struct task_struct* t, int cpu) | ||
9983 | +{ | ||
9984 | + /* t is the real-time task executing on CPU on_cpu If t is NULL, then | ||
9985 | + * on_cpu is currently scheduling background work. | ||
9986 | + */ | ||
9987 | + | ||
9988 | + int reschedule = 0; | ||
9989 | + | ||
9990 | + if (!t) | ||
9991 | + /* move non-real-time task out of the way */ | ||
9992 | + reschedule = 1; | ||
9993 | + else { | ||
9994 | + if (smp_processor_id() == cpu) { | ||
9995 | + /* local CPU case */ | ||
9996 | + /* check if we need to poke userspace */ | ||
9997 | + if (is_user_np(t)) | ||
9998 | + /* yes, poke it */ | ||
9999 | + request_exit_np(t); | ||
10000 | + else if (!is_kernel_np(t)) | ||
10001 | + /* only if we are allowed to preempt the | ||
10002 | + * currently-executing task */ | ||
10003 | + reschedule = 1; | ||
10004 | + } else { | ||
10005 | + /* remote CPU case */ | ||
10006 | + if (is_user_np(t)) { | ||
10007 | + /* need to notify user space of delayed | ||
10008 | + * preemption */ | ||
10009 | + | ||
10010 | + /* to avoid a race, set the flag, then test | ||
10011 | + * again */ | ||
10012 | + request_exit_np(t); | ||
10013 | + /* make sure it got written */ | ||
10014 | + mb(); | ||
10015 | + } | ||
10016 | + /* Only send an ipi if remote task might have raced our | ||
10017 | + * request, i.e., send an IPI to make sure in case it | ||
10018 | + * exited its critical section. | ||
10019 | + */ | ||
10020 | + reschedule = !is_np(t) && !is_kernel_np(t); | ||
10021 | + } | ||
10022 | + } | ||
10023 | + if (likely(reschedule)) | ||
10024 | + litmus_reschedule(cpu); | ||
10025 | +} | ||
10026 | + | ||
10027 | + | ||
10028 | +/************************************************************* | ||
10029 | + * Dummy plugin functions * | ||
10030 | + *************************************************************/ | ||
10031 | + | ||
10032 | +static void litmus_dummy_finish_switch(struct task_struct * prev) | ||
10033 | +{ | ||
10034 | +} | ||
10035 | + | ||
10036 | +static struct task_struct* litmus_dummy_schedule(struct task_struct * prev) | ||
10037 | +{ | ||
10038 | + sched_state_task_picked(); | ||
10039 | + return NULL; | ||
10040 | +} | ||
10041 | + | ||
10042 | +static void litmus_dummy_tick(struct task_struct* tsk) | ||
10043 | +{ | ||
10044 | +} | ||
10045 | + | ||
10046 | +static long litmus_dummy_admit_task(struct task_struct* tsk) | ||
10047 | +{ | ||
10048 | + printk(KERN_CRIT "LITMUS^RT: Linux plugin rejects %s/%d.\n", | ||
10049 | + tsk->comm, tsk->pid); | ||
10050 | + return -EINVAL; | ||
10051 | +} | ||
10052 | + | ||
10053 | +static void litmus_dummy_task_new(struct task_struct *t, int on_rq, int running) | ||
10054 | +{ | ||
10055 | +} | ||
10056 | + | ||
10057 | +static void litmus_dummy_task_wake_up(struct task_struct *task) | ||
10058 | +{ | ||
10059 | +} | ||
10060 | + | ||
10061 | +static void litmus_dummy_task_block(struct task_struct *task) | ||
10062 | +{ | ||
10063 | +} | ||
10064 | + | ||
10065 | +static void litmus_dummy_task_exit(struct task_struct *task) | ||
10066 | +{ | ||
10067 | +} | ||
10068 | + | ||
10069 | +static long litmus_dummy_complete_job(void) | ||
10070 | +{ | ||
10071 | + return -ENOSYS; | ||
10072 | +} | ||
10073 | + | ||
10074 | +static long litmus_dummy_activate_plugin(void) | ||
10075 | +{ | ||
10076 | + return 0; | ||
10077 | +} | ||
10078 | + | ||
10079 | +static long litmus_dummy_deactivate_plugin(void) | ||
10080 | +{ | ||
10081 | + return 0; | ||
10082 | +} | ||
10083 | + | ||
10084 | +#ifdef CONFIG_FMLP | ||
10085 | + | ||
10086 | +static long litmus_dummy_inherit_priority(struct pi_semaphore *sem, | ||
10087 | + struct task_struct *new_owner) | ||
10088 | +{ | ||
10089 | + return -ENOSYS; | ||
10090 | +} | ||
10091 | + | ||
10092 | +static long litmus_dummy_return_priority(struct pi_semaphore *sem) | ||
10093 | +{ | ||
10094 | + return -ENOSYS; | ||
10095 | +} | ||
10096 | + | ||
10097 | +static long litmus_dummy_pi_block(struct pi_semaphore *sem, | ||
10098 | + struct task_struct *new_waiter) | ||
10099 | +{ | ||
10100 | + return -ENOSYS; | ||
10101 | +} | ||
10102 | + | ||
10103 | +#endif | ||
10104 | + | ||
10105 | + | ||
10106 | +/* The default scheduler plugin. It doesn't do anything and lets Linux do its | ||
10107 | + * job. | ||
10108 | + */ | ||
10109 | +struct sched_plugin linux_sched_plugin = { | ||
10110 | + .plugin_name = "Linux", | ||
10111 | + .tick = litmus_dummy_tick, | ||
10112 | + .task_new = litmus_dummy_task_new, | ||
10113 | + .task_exit = litmus_dummy_task_exit, | ||
10114 | + .task_wake_up = litmus_dummy_task_wake_up, | ||
10115 | + .task_block = litmus_dummy_task_block, | ||
10116 | + .complete_job = litmus_dummy_complete_job, | ||
10117 | + .schedule = litmus_dummy_schedule, | ||
10118 | + .finish_switch = litmus_dummy_finish_switch, | ||
10119 | + .activate_plugin = litmus_dummy_activate_plugin, | ||
10120 | + .deactivate_plugin = litmus_dummy_deactivate_plugin, | ||
10121 | +#ifdef CONFIG_FMLP | ||
10122 | + .inherit_priority = litmus_dummy_inherit_priority, | ||
10123 | + .return_priority = litmus_dummy_return_priority, | ||
10124 | + .pi_block = litmus_dummy_pi_block, | ||
10125 | +#endif | ||
10126 | + .admit_task = litmus_dummy_admit_task | ||
10127 | +}; | ||
10128 | + | ||
10129 | +/* | ||
10130 | + * The reference to current plugin that is used to schedule tasks within | ||
10131 | + * the system. It stores references to actual function implementations | ||
10132 | + * Should be initialized by calling "init_***_plugin()" | ||
10133 | + */ | ||
10134 | +struct sched_plugin *litmus = &linux_sched_plugin; | ||
10135 | + | ||
10136 | +/* the list of registered scheduling plugins */ | ||
10137 | +static LIST_HEAD(sched_plugins); | ||
10138 | +static DEFINE_RAW_SPINLOCK(sched_plugins_lock); | ||
10139 | + | ||
10140 | +#define CHECK(func) {\ | ||
10141 | + if (!plugin->func) \ | ||
10142 | + plugin->func = litmus_dummy_ ## func;} | ||
10143 | + | ||
10144 | +/* FIXME: get reference to module */ | ||
10145 | +int register_sched_plugin(struct sched_plugin* plugin) | ||
10146 | +{ | ||
10147 | + printk(KERN_INFO "Registering LITMUS^RT plugin %s.\n", | ||
10148 | + plugin->plugin_name); | ||
10149 | + | ||
10150 | + /* make sure we don't trip over null pointers later */ | ||
10151 | + CHECK(finish_switch); | ||
10152 | + CHECK(schedule); | ||
10153 | + CHECK(tick); | ||
10154 | + CHECK(task_wake_up); | ||
10155 | + CHECK(task_exit); | ||
10156 | + CHECK(task_block); | ||
10157 | + CHECK(task_new); | ||
10158 | + CHECK(complete_job); | ||
10159 | + CHECK(activate_plugin); | ||
10160 | + CHECK(deactivate_plugin); | ||
10161 | +#ifdef CONFIG_FMLP | ||
10162 | + CHECK(inherit_priority); | ||
10163 | + CHECK(return_priority); | ||
10164 | + CHECK(pi_block); | ||
10165 | +#endif | ||
10166 | + CHECK(admit_task); | ||
10167 | + | ||
10168 | + if (!plugin->release_at) | ||
10169 | + plugin->release_at = release_at; | ||
10170 | + | ||
10171 | + raw_spin_lock(&sched_plugins_lock); | ||
10172 | + list_add(&plugin->list, &sched_plugins); | ||
10173 | + raw_spin_unlock(&sched_plugins_lock); | ||
10174 | + | ||
10175 | + return 0; | ||
10176 | +} | ||
10177 | + | ||
10178 | + | ||
10179 | +/* FIXME: reference counting, etc. */ | ||
10180 | +struct sched_plugin* find_sched_plugin(const char* name) | ||
10181 | +{ | ||
10182 | + struct list_head *pos; | ||
10183 | + struct sched_plugin *plugin; | ||
10184 | + | ||
10185 | + raw_spin_lock(&sched_plugins_lock); | ||
10186 | + list_for_each(pos, &sched_plugins) { | ||
10187 | + plugin = list_entry(pos, struct sched_plugin, list); | ||
10188 | + if (!strcmp(plugin->plugin_name, name)) | ||
10189 | + goto out_unlock; | ||
10190 | + } | ||
10191 | + plugin = NULL; | ||
10192 | + | ||
10193 | +out_unlock: | ||
10194 | + raw_spin_unlock(&sched_plugins_lock); | ||
10195 | + return plugin; | ||
10196 | +} | ||
10197 | + | ||
10198 | +int print_sched_plugins(char* buf, int max) | ||
10199 | +{ | ||
10200 | + int count = 0; | ||
10201 | + struct list_head *pos; | ||
10202 | + struct sched_plugin *plugin; | ||
10203 | + | ||
10204 | + raw_spin_lock(&sched_plugins_lock); | ||
10205 | + list_for_each(pos, &sched_plugins) { | ||
10206 | + plugin = list_entry(pos, struct sched_plugin, list); | ||
10207 | + count += snprintf(buf + count, max - count, "%s\n", plugin->plugin_name); | ||
10208 | + if (max - count <= 0) | ||
10209 | + break; | ||
10210 | + } | ||
10211 | + raw_spin_unlock(&sched_plugins_lock); | ||
10212 | + return count; | ||
10213 | +} | ||
10214 | diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c | ||
10215 | new file mode 100644 | ||
10216 | index 0000000..b89823d | ||
10217 | --- /dev/null | ||
10218 | +++ b/litmus/sched_psn_edf.c | ||
10219 | @@ -0,0 +1,483 @@ | ||
10220 | +/* | ||
10221 | + * kernel/sched_psn_edf.c | ||
10222 | + * | ||
10223 | + * Implementation of the PSN-EDF scheduler plugin. | ||
10224 | + * Based on kern/sched_part_edf.c and kern/sched_gsn_edf.c. | ||
10225 | + * | ||
10226 | + * Suspensions and non-preemptable sections are supported. | ||
10227 | + * Priority inheritance is not supported. | ||
10228 | + */ | ||
10229 | + | ||
10230 | +#include <linux/percpu.h> | ||
10231 | +#include <linux/sched.h> | ||
10232 | +#include <linux/list.h> | ||
10233 | +#include <linux/spinlock.h> | ||
10234 | +#include <linux/module.h> | ||
10235 | + | ||
10236 | +#include <litmus/litmus.h> | ||
10237 | +#include <litmus/jobs.h> | ||
10238 | +#include <litmus/preempt.h> | ||
10239 | +#include <litmus/sched_plugin.h> | ||
10240 | +#include <litmus/edf_common.h> | ||
10241 | +#include <litmus/sched_trace.h> | ||
10242 | + | ||
10243 | +typedef struct { | ||
10244 | + rt_domain_t domain; | ||
10245 | + int cpu; | ||
10246 | + struct task_struct* scheduled; /* only RT tasks */ | ||
10247 | +/* | ||
10248 | + * scheduling lock slock | ||
10249 | + * protects the domain and serializes scheduling decisions | ||
10250 | + */ | ||
10251 | +#define slock domain.ready_lock | ||
10252 | + | ||
10253 | +} psnedf_domain_t; | ||
10254 | + | ||
10255 | +DEFINE_PER_CPU(psnedf_domain_t, psnedf_domains); | ||
10256 | + | ||
10257 | +#define local_edf (&__get_cpu_var(psnedf_domains).domain) | ||
10258 | +#define local_pedf (&__get_cpu_var(psnedf_domains)) | ||
10259 | +#define remote_edf(cpu) (&per_cpu(psnedf_domains, cpu).domain) | ||
10260 | +#define remote_pedf(cpu) (&per_cpu(psnedf_domains, cpu)) | ||
10261 | +#define task_edf(task) remote_edf(get_partition(task)) | ||
10262 | +#define task_pedf(task) remote_pedf(get_partition(task)) | ||
10263 | + | ||
10264 | + | ||
10265 | +static void psnedf_domain_init(psnedf_domain_t* pedf, | ||
10266 | + check_resched_needed_t check, | ||
10267 | + release_jobs_t release, | ||
10268 | + int cpu) | ||
10269 | +{ | ||
10270 | + edf_domain_init(&pedf->domain, check, release); | ||
10271 | + pedf->cpu = cpu; | ||
10272 | + pedf->scheduled = NULL; | ||
10273 | +} | ||
10274 | + | ||
10275 | +static void requeue(struct task_struct* t, rt_domain_t *edf) | ||
10276 | +{ | ||
10277 | + if (t->state != TASK_RUNNING) | ||
10278 | + TRACE_TASK(t, "requeue: !TASK_RUNNING\n"); | ||
10279 | + | ||
10280 | + set_rt_flags(t, RT_F_RUNNING); | ||
10281 | + if (is_released(t, litmus_clock())) | ||
10282 | + __add_ready(edf, t); | ||
10283 | + else | ||
10284 | + add_release(edf, t); /* it has got to wait */ | ||
10285 | +} | ||
10286 | + | ||
10287 | +/* we assume the lock is being held */ | ||
10288 | +static void preempt(psnedf_domain_t *pedf) | ||
10289 | +{ | ||
10290 | + preempt_if_preemptable(pedf->scheduled, pedf->cpu); | ||
10291 | +} | ||
10292 | + | ||
10293 | +/* This check is trivial in partioned systems as we only have to consider | ||
10294 | + * the CPU of the partition. | ||
10295 | + */ | ||
10296 | +static int psnedf_check_resched(rt_domain_t *edf) | ||
10297 | +{ | ||
10298 | + psnedf_domain_t *pedf = container_of(edf, psnedf_domain_t, domain); | ||
10299 | + | ||
10300 | + /* because this is a callback from rt_domain_t we already hold | ||
10301 | + * the necessary lock for the ready queue | ||
10302 | + */ | ||
10303 | + if (edf_preemption_needed(edf, pedf->scheduled)) { | ||
10304 | + preempt(pedf); | ||
10305 | + return 1; | ||
10306 | + } else | ||
10307 | + return 0; | ||
10308 | +} | ||
10309 | + | ||
10310 | +static void job_completion(struct task_struct* t, int forced) | ||
10311 | +{ | ||
10312 | + sched_trace_task_completion(t,forced); | ||
10313 | + TRACE_TASK(t, "job_completion().\n"); | ||
10314 | + | ||
10315 | + set_rt_flags(t, RT_F_SLEEP); | ||
10316 | + prepare_for_next_period(t); | ||
10317 | +} | ||
10318 | + | ||
10319 | +static void psnedf_tick(struct task_struct *t) | ||
10320 | +{ | ||
10321 | + psnedf_domain_t *pedf = local_pedf; | ||
10322 | + | ||
10323 | + /* Check for inconsistency. We don't need the lock for this since | ||
10324 | + * ->scheduled is only changed in schedule, which obviously is not | ||
10325 | + * executing in parallel on this CPU | ||
10326 | + */ | ||
10327 | + BUG_ON(is_realtime(t) && t != pedf->scheduled); | ||
10328 | + | ||
10329 | + if (is_realtime(t) && budget_enforced(t) && budget_exhausted(t)) { | ||
10330 | + if (!is_np(t)) { | ||
10331 | + litmus_reschedule_local(); | ||
10332 | + TRACE("psnedf_scheduler_tick: " | ||
10333 | + "%d is preemptable " | ||
10334 | + " => FORCE_RESCHED\n", t->pid); | ||
10335 | + } else if (is_user_np(t)) { | ||
10336 | + TRACE("psnedf_scheduler_tick: " | ||
10337 | + "%d is non-preemptable, " | ||
10338 | + "preemption delayed.\n", t->pid); | ||
10339 | + request_exit_np(t); | ||
10340 | + } | ||
10341 | + } | ||
10342 | +} | ||
10343 | + | ||
10344 | +static struct task_struct* psnedf_schedule(struct task_struct * prev) | ||
10345 | +{ | ||
10346 | + psnedf_domain_t* pedf = local_pedf; | ||
10347 | + rt_domain_t* edf = &pedf->domain; | ||
10348 | + struct task_struct* next; | ||
10349 | + | ||
10350 | + int out_of_time, sleep, preempt, | ||
10351 | + np, exists, blocks, resched; | ||
10352 | + | ||
10353 | + raw_spin_lock(&pedf->slock); | ||
10354 | + | ||
10355 | + /* sanity checking | ||
10356 | + * differently from gedf, when a task exits (dead) | ||
10357 | + * pedf->schedule may be null and prev _is_ realtime | ||
10358 | + */ | ||
10359 | + BUG_ON(pedf->scheduled && pedf->scheduled != prev); | ||
10360 | + BUG_ON(pedf->scheduled && !is_realtime(prev)); | ||
10361 | + | ||
10362 | + /* (0) Determine state */ | ||
10363 | + exists = pedf->scheduled != NULL; | ||
10364 | + blocks = exists && !is_running(pedf->scheduled); | ||
10365 | + out_of_time = exists && | ||
10366 | + budget_enforced(pedf->scheduled) && | ||
10367 | + budget_exhausted(pedf->scheduled); | ||
10368 | + np = exists && is_np(pedf->scheduled); | ||
10369 | + sleep = exists && get_rt_flags(pedf->scheduled) == RT_F_SLEEP; | ||
10370 | + preempt = edf_preemption_needed(edf, prev); | ||
10371 | + | ||
10372 | + /* If we need to preempt do so. | ||
10373 | + * The following checks set resched to 1 in case of special | ||
10374 | + * circumstances. | ||
10375 | + */ | ||
10376 | + resched = preempt; | ||
10377 | + | ||
10378 | + /* If a task blocks we have no choice but to reschedule. | ||
10379 | + */ | ||
10380 | + if (blocks) | ||
10381 | + resched = 1; | ||
10382 | + | ||
10383 | + /* Request a sys_exit_np() call if we would like to preempt but cannot. | ||
10384 | + * Multiple calls to request_exit_np() don't hurt. | ||
10385 | + */ | ||
10386 | + if (np && (out_of_time || preempt || sleep)) | ||
10387 | + request_exit_np(pedf->scheduled); | ||
10388 | + | ||
10389 | + /* Any task that is preemptable and either exhausts its execution | ||
10390 | + * budget or wants to sleep completes. We may have to reschedule after | ||
10391 | + * this. | ||
10392 | + */ | ||
10393 | + if (!np && (out_of_time || sleep) && !blocks) { | ||
10394 | + job_completion(pedf->scheduled, !sleep); | ||
10395 | + resched = 1; | ||
10396 | + } | ||
10397 | + | ||
10398 | + /* The final scheduling decision. Do we need to switch for some reason? | ||
10399 | + * Switch if we are in RT mode and have no task or if we need to | ||
10400 | + * resched. | ||
10401 | + */ | ||
10402 | + next = NULL; | ||
10403 | + if ((!np || blocks) && (resched || !exists)) { | ||
10404 | + /* When preempting a task that does not block, then | ||
10405 | + * re-insert it into either the ready queue or the | ||
10406 | + * release queue (if it completed). requeue() picks | ||
10407 | + * the appropriate queue. | ||
10408 | + */ | ||
10409 | + if (pedf->scheduled && !blocks) | ||
10410 | + requeue(pedf->scheduled, edf); | ||
10411 | + next = __take_ready(edf); | ||
10412 | + } else | ||
10413 | + /* Only override Linux scheduler if we have a real-time task | ||
10414 | + * scheduled that needs to continue. | ||
10415 | + */ | ||
10416 | + if (exists) | ||
10417 | + next = prev; | ||
10418 | + | ||
10419 | + if (next) { | ||
10420 | + TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); | ||
10421 | + set_rt_flags(next, RT_F_RUNNING); | ||
10422 | + } else { | ||
10423 | + TRACE("becoming idle at %llu\n", litmus_clock()); | ||
10424 | + } | ||
10425 | + | ||
10426 | + pedf->scheduled = next; | ||
10427 | + sched_state_task_picked(); | ||
10428 | + raw_spin_unlock(&pedf->slock); | ||
10429 | + | ||
10430 | + return next; | ||
10431 | +} | ||
10432 | + | ||
10433 | + | ||
10434 | +/* Prepare a task for running in RT mode | ||
10435 | + */ | ||
10436 | +static void psnedf_task_new(struct task_struct * t, int on_rq, int running) | ||
10437 | +{ | ||
10438 | + rt_domain_t* edf = task_edf(t); | ||
10439 | + psnedf_domain_t* pedf = task_pedf(t); | ||
10440 | + unsigned long flags; | ||
10441 | + | ||
10442 | + TRACE_TASK(t, "psn edf: task new, cpu = %d\n", | ||
10443 | + t->rt_param.task_params.cpu); | ||
10444 | + | ||
10445 | + /* setup job parameters */ | ||
10446 | + release_at(t, litmus_clock()); | ||
10447 | + | ||
10448 | + /* The task should be running in the queue, otherwise signal | ||
10449 | + * code will try to wake it up with fatal consequences. | ||
10450 | + */ | ||
10451 | + raw_spin_lock_irqsave(&pedf->slock, flags); | ||
10452 | + if (running) { | ||
10453 | + /* there shouldn't be anything else running at the time */ | ||
10454 | + BUG_ON(pedf->scheduled); | ||
10455 | + pedf->scheduled = t; | ||
10456 | + } else { | ||
10457 | + requeue(t, edf); | ||
10458 | + /* maybe we have to reschedule */ | ||
10459 | + preempt(pedf); | ||
10460 | + } | ||
10461 | + raw_spin_unlock_irqrestore(&pedf->slock, flags); | ||
10462 | +} | ||
10463 | + | ||
10464 | +static void psnedf_task_wake_up(struct task_struct *task) | ||
10465 | +{ | ||
10466 | + unsigned long flags; | ||
10467 | + psnedf_domain_t* pedf = task_pedf(task); | ||
10468 | + rt_domain_t* edf = task_edf(task); | ||
10469 | + lt_t now; | ||
10470 | + | ||
10471 | + TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); | ||
10472 | + raw_spin_lock_irqsave(&pedf->slock, flags); | ||
10473 | + BUG_ON(is_queued(task)); | ||
10474 | + /* We need to take suspensions because of semaphores into | ||
10475 | + * account! If a job resumes after being suspended due to acquiring | ||
10476 | + * a semaphore, it should never be treated as a new job release. | ||
10477 | + * | ||
10478 | + * FIXME: This should be done in some more predictable and userspace-controlled way. | ||
10479 | + */ | ||
10480 | + now = litmus_clock(); | ||
10481 | + if (is_tardy(task, now) && | ||
10482 | + get_rt_flags(task) != RT_F_EXIT_SEM) { | ||
10483 | + /* new sporadic release */ | ||
10484 | + release_at(task, now); | ||
10485 | + sched_trace_task_release(task); | ||
10486 | + } | ||
10487 | + | ||
10488 | + /* Only add to ready queue if it is not the currently-scheduled | ||
10489 | + * task. This could be the case if a task was woken up concurrently | ||
10490 | + * on a remote CPU before the executing CPU got around to actually | ||
10491 | + * de-scheduling the task, i.e., wake_up() raced with schedule() | ||
10492 | + * and won. | ||
10493 | + */ | ||
10494 | + if (pedf->scheduled != task) | ||
10495 | + requeue(task, edf); | ||
10496 | + | ||
10497 | + raw_spin_unlock_irqrestore(&pedf->slock, flags); | ||
10498 | + TRACE_TASK(task, "wake up done\n"); | ||
10499 | +} | ||
10500 | + | ||
10501 | +static void psnedf_task_block(struct task_struct *t) | ||
10502 | +{ | ||
10503 | + /* only running tasks can block, thus t is in no queue */ | ||
10504 | + TRACE_TASK(t, "block at %llu, state=%d\n", litmus_clock(), t->state); | ||
10505 | + | ||
10506 | + BUG_ON(!is_realtime(t)); | ||
10507 | + BUG_ON(is_queued(t)); | ||
10508 | +} | ||
10509 | + | ||
10510 | +static void psnedf_task_exit(struct task_struct * t) | ||
10511 | +{ | ||
10512 | + unsigned long flags; | ||
10513 | + psnedf_domain_t* pedf = task_pedf(t); | ||
10514 | + rt_domain_t* edf; | ||
10515 | + | ||
10516 | + raw_spin_lock_irqsave(&pedf->slock, flags); | ||
10517 | + if (is_queued(t)) { | ||
10518 | + /* dequeue */ | ||
10519 | + edf = task_edf(t); | ||
10520 | + remove(edf, t); | ||
10521 | + } | ||
10522 | + if (pedf->scheduled == t) | ||
10523 | + pedf->scheduled = NULL; | ||
10524 | + | ||
10525 | + TRACE_TASK(t, "RIP, now reschedule\n"); | ||
10526 | + | ||
10527 | + preempt(pedf); | ||
10528 | + raw_spin_unlock_irqrestore(&pedf->slock, flags); | ||
10529 | +} | ||
10530 | + | ||
10531 | +#ifdef CONFIG_FMLP | ||
10532 | +static long psnedf_pi_block(struct pi_semaphore *sem, | ||
10533 | + struct task_struct *new_waiter) | ||
10534 | +{ | ||
10535 | + psnedf_domain_t* pedf; | ||
10536 | + rt_domain_t* edf; | ||
10537 | + struct task_struct* t; | ||
10538 | + int cpu = get_partition(new_waiter); | ||
10539 | + | ||
10540 | + BUG_ON(!new_waiter); | ||
10541 | + | ||
10542 | + if (edf_higher_prio(new_waiter, sem->hp.cpu_task[cpu])) { | ||
10543 | + TRACE_TASK(new_waiter, " boosts priority\n"); | ||
10544 | + pedf = task_pedf(new_waiter); | ||
10545 | + edf = task_edf(new_waiter); | ||
10546 | + | ||
10547 | + /* interrupts already disabled */ | ||
10548 | + raw_spin_lock(&pedf->slock); | ||
10549 | + | ||
10550 | + /* store new highest-priority task */ | ||
10551 | + sem->hp.cpu_task[cpu] = new_waiter; | ||
10552 | + if (sem->holder && | ||
10553 | + get_partition(sem->holder) == get_partition(new_waiter)) { | ||
10554 | + /* let holder inherit */ | ||
10555 | + sem->holder->rt_param.inh_task = new_waiter; | ||
10556 | + t = sem->holder; | ||
10557 | + if (is_queued(t)) { | ||
10558 | + /* queued in domain*/ | ||
10559 | + remove(edf, t); | ||
10560 | + /* readd to make priority change take place */ | ||
10561 | + /* FIXME: this looks outdated */ | ||
10562 | + if (is_released(t, litmus_clock())) | ||
10563 | + __add_ready(edf, t); | ||
10564 | + else | ||
10565 | + add_release(edf, t); | ||
10566 | + } | ||
10567 | + } | ||
10568 | + | ||
10569 | + /* check if we need to reschedule */ | ||
10570 | + if (edf_preemption_needed(edf, current)) | ||
10571 | + preempt(pedf); | ||
10572 | + | ||
10573 | + raw_spin_unlock(&pedf->slock); | ||
10574 | + } | ||
10575 | + | ||
10576 | + return 0; | ||
10577 | +} | ||
10578 | + | ||
10579 | +static long psnedf_inherit_priority(struct pi_semaphore *sem, | ||
10580 | + struct task_struct *new_owner) | ||
10581 | +{ | ||
10582 | + int cpu = get_partition(new_owner); | ||
10583 | + | ||
10584 | + new_owner->rt_param.inh_task = sem->hp.cpu_task[cpu]; | ||
10585 | + if (sem->hp.cpu_task[cpu] && new_owner != sem->hp.cpu_task[cpu]) { | ||
10586 | + TRACE_TASK(new_owner, | ||
10587 | + "inherited priority from %s/%d\n", | ||
10588 | + sem->hp.cpu_task[cpu]->comm, | ||
10589 | + sem->hp.cpu_task[cpu]->pid); | ||
10590 | + } else | ||
10591 | + TRACE_TASK(new_owner, | ||
10592 | + "cannot inherit priority: " | ||
10593 | + "no higher priority job waits on this CPU!\n"); | ||
10594 | + /* make new owner non-preemptable as required by FMLP under | ||
10595 | + * PSN-EDF. | ||
10596 | + */ | ||
10597 | + make_np(new_owner); | ||
10598 | + return 0; | ||
10599 | +} | ||
10600 | + | ||
10601 | + | ||
10602 | +/* This function is called on a semaphore release, and assumes that | ||
10603 | + * the current task is also the semaphore holder. | ||
10604 | + */ | ||
10605 | +static long psnedf_return_priority(struct pi_semaphore *sem) | ||
10606 | +{ | ||
10607 | + struct task_struct* t = current; | ||
10608 | + psnedf_domain_t* pedf = task_pedf(t); | ||
10609 | + rt_domain_t* edf = task_edf(t); | ||
10610 | + int ret = 0; | ||
10611 | + int cpu = get_partition(current); | ||
10612 | + int still_np; | ||
10613 | + | ||
10614 | + | ||
10615 | + /* Find new highest-priority semaphore task | ||
10616 | + * if holder task is the current hp.cpu_task[cpu]. | ||
10617 | + * | ||
10618 | + * Calling function holds sem->wait.lock. | ||
10619 | + */ | ||
10620 | + if (t == sem->hp.cpu_task[cpu]) | ||
10621 | + edf_set_hp_cpu_task(sem, cpu); | ||
10622 | + | ||
10623 | + still_np = take_np(current); | ||
10624 | + | ||
10625 | + /* Since we don't nest resources, this | ||
10626 | + * should always be zero */ | ||
10627 | + BUG_ON(still_np); | ||
10628 | + | ||
10629 | + if (current->rt_param.inh_task) { | ||
10630 | + TRACE_CUR("return priority of %s/%d\n", | ||
10631 | + current->rt_param.inh_task->comm, | ||
10632 | + current->rt_param.inh_task->pid); | ||
10633 | + } else | ||
10634 | + TRACE_CUR(" no priority to return %p\n", sem); | ||
10635 | + | ||
10636 | + | ||
10637 | + /* Always check for delayed preemptions that might have become | ||
10638 | + * necessary due to non-preemptive execution. | ||
10639 | + */ | ||
10640 | + raw_spin_lock(&pedf->slock); | ||
10641 | + | ||
10642 | + /* Reset inh_task to NULL. */ | ||
10643 | + current->rt_param.inh_task = NULL; | ||
10644 | + | ||
10645 | + /* check if we need to reschedule */ | ||
10646 | + if (edf_preemption_needed(edf, current)) | ||
10647 | + preempt(pedf); | ||
10648 | + | ||
10649 | + raw_spin_unlock(&pedf->slock); | ||
10650 | + | ||
10651 | + | ||
10652 | + return ret; | ||
10653 | +} | ||
10654 | + | ||
10655 | +#endif | ||
10656 | + | ||
10657 | +static long psnedf_admit_task(struct task_struct* tsk) | ||
10658 | +{ | ||
10659 | + return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; | ||
10660 | +} | ||
10661 | + | ||
10662 | +/* Plugin object */ | ||
10663 | +static struct sched_plugin psn_edf_plugin __cacheline_aligned_in_smp = { | ||
10664 | + .plugin_name = "PSN-EDF", | ||
10665 | +#ifdef CONFIG_SRP | ||
10666 | + .srp_active = 1, | ||
10667 | +#endif | ||
10668 | + .tick = psnedf_tick, | ||
10669 | + .task_new = psnedf_task_new, | ||
10670 | + .complete_job = complete_job, | ||
10671 | + .task_exit = psnedf_task_exit, | ||
10672 | + .schedule = psnedf_schedule, | ||
10673 | + .task_wake_up = psnedf_task_wake_up, | ||
10674 | + .task_block = psnedf_task_block, | ||
10675 | +#ifdef CONFIG_FMLP | ||
10676 | + .fmlp_active = 1, | ||
10677 | + .pi_block = psnedf_pi_block, | ||
10678 | + .inherit_priority = psnedf_inherit_priority, | ||
10679 | + .return_priority = psnedf_return_priority, | ||
10680 | +#endif | ||
10681 | + .admit_task = psnedf_admit_task | ||
10682 | +}; | ||
10683 | + | ||
10684 | + | ||
10685 | +static int __init init_psn_edf(void) | ||
10686 | +{ | ||
10687 | + int i; | ||
10688 | + | ||
10689 | + /* We do not really want to support cpu hotplug, do we? ;) | ||
10690 | + * However, if we are so crazy to do so, | ||
10691 | + * we cannot use num_online_cpu() | ||
10692 | + */ | ||
10693 | + for (i = 0; i < num_online_cpus(); i++) { | ||
10694 | + psnedf_domain_init(remote_pedf(i), | ||
10695 | + psnedf_check_resched, | ||
10696 | + NULL, i); | ||
10697 | + } | ||
10698 | + return register_sched_plugin(&psn_edf_plugin); | ||
10699 | +} | ||
10700 | + | ||
10701 | +module_init(init_psn_edf); | ||
10702 | + | ||
10703 | diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c | ||
10704 | new file mode 100644 | ||
10705 | index 0000000..a15b25d | ||
10706 | --- /dev/null | ||
10707 | +++ b/litmus/sched_task_trace.c | ||
10708 | @@ -0,0 +1,226 @@ | ||
10709 | +/* | ||
10710 | + * sched_task_trace.c -- record scheduling events to a byte stream | ||
10711 | + */ | ||
10712 | + | ||
10713 | +#define NO_TASK_TRACE_DECLS | ||
10714 | + | ||
10715 | +#include <linux/module.h> | ||
10716 | +#include <linux/sched.h> | ||
10717 | +#include <linux/percpu.h> | ||
10718 | + | ||
10719 | +#include <litmus/ftdev.h> | ||
10720 | +#include <litmus/litmus.h> | ||
10721 | + | ||
10722 | +#include <litmus/sched_trace.h> | ||
10723 | +#include <litmus/feather_trace.h> | ||
10724 | +#include <litmus/ftdev.h> | ||
10725 | + | ||
10726 | + | ||
10727 | +#define NO_EVENTS (1 << CONFIG_SCHED_TASK_TRACE_SHIFT) | ||
10728 | + | ||
10729 | +#define now() litmus_clock() | ||
10730 | + | ||
10731 | +struct local_buffer { | ||
10732 | + struct st_event_record record[NO_EVENTS]; | ||
10733 | + char flag[NO_EVENTS]; | ||
10734 | + struct ft_buffer ftbuf; | ||
10735 | +}; | ||
10736 | + | ||
10737 | +DEFINE_PER_CPU(struct local_buffer, st_event_buffer); | ||
10738 | + | ||
10739 | +static struct ftdev st_dev; | ||
10740 | + | ||
10741 | +static int st_dev_can_open(struct ftdev *dev, unsigned int cpu) | ||
10742 | +{ | ||
10743 | + return cpu_online(cpu) ? 0 : -ENODEV; | ||
10744 | +} | ||
10745 | + | ||
10746 | +static int __init init_sched_task_trace(void) | ||
10747 | +{ | ||
10748 | + struct local_buffer* buf; | ||
10749 | + int i, ok = 0, err; | ||
10750 | + printk("Allocated %u sched_trace_xxx() events per CPU " | ||
10751 | + "(buffer size: %d bytes)\n", | ||
10752 | + NO_EVENTS, (int) sizeof(struct local_buffer)); | ||
10753 | + | ||
10754 | + err = ftdev_init(&st_dev, THIS_MODULE, | ||
10755 | + num_online_cpus(), "sched_trace"); | ||
10756 | + if (err) | ||
10757 | + goto err_out; | ||
10758 | + | ||
10759 | + for (i = 0; i < st_dev.minor_cnt; i++) { | ||
10760 | + buf = &per_cpu(st_event_buffer, i); | ||
10761 | + ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS, | ||
10762 | + sizeof(struct st_event_record), | ||
10763 | + buf->flag, | ||
10764 | + buf->record); | ||
10765 | + st_dev.minor[i].buf = &buf->ftbuf; | ||
10766 | + } | ||
10767 | + if (ok == st_dev.minor_cnt) { | ||
10768 | + st_dev.can_open = st_dev_can_open; | ||
10769 | + err = register_ftdev(&st_dev); | ||
10770 | + if (err) | ||
10771 | + goto err_dealloc; | ||
10772 | + } else { | ||
10773 | + err = -EINVAL; | ||
10774 | + goto err_dealloc; | ||
10775 | + } | ||
10776 | + | ||
10777 | + return 0; | ||
10778 | + | ||
10779 | +err_dealloc: | ||
10780 | + ftdev_exit(&st_dev); | ||
10781 | +err_out: | ||
10782 | + printk(KERN_WARNING "Could not register sched_trace module\n"); | ||
10783 | + return err; | ||
10784 | +} | ||
10785 | + | ||
10786 | +static void __exit exit_sched_task_trace(void) | ||
10787 | +{ | ||
10788 | + ftdev_exit(&st_dev); | ||
10789 | +} | ||
10790 | + | ||
10791 | +module_init(init_sched_task_trace); | ||
10792 | +module_exit(exit_sched_task_trace); | ||
10793 | + | ||
10794 | + | ||
10795 | +static inline struct st_event_record* get_record(u8 type, struct task_struct* t) | ||
10796 | +{ | ||
10797 | + struct st_event_record* rec = NULL; | ||
10798 | + struct local_buffer* buf; | ||
10799 | + | ||
10800 | + buf = &get_cpu_var(st_event_buffer); | ||
10801 | + if (ft_buffer_start_write(&buf->ftbuf, (void**) &rec)) { | ||
10802 | + rec->hdr.type = type; | ||
10803 | + rec->hdr.cpu = smp_processor_id(); | ||
10804 | + rec->hdr.pid = t ? t->pid : 0; | ||
10805 | + rec->hdr.job = t ? t->rt_param.job_params.job_no : 0; | ||
10806 | + } else { | ||
10807 | + put_cpu_var(st_event_buffer); | ||
10808 | + } | ||
10809 | + /* rec will be NULL if it failed */ | ||
10810 | + return rec; | ||
10811 | +} | ||
10812 | + | ||
10813 | +static inline void put_record(struct st_event_record* rec) | ||
10814 | +{ | ||
10815 | + struct local_buffer* buf; | ||
10816 | + buf = &__get_cpu_var(st_event_buffer); | ||
10817 | + ft_buffer_finish_write(&buf->ftbuf, rec); | ||
10818 | + put_cpu_var(st_event_buffer); | ||
10819 | +} | ||
10820 | + | ||
10821 | +feather_callback void do_sched_trace_task_name(unsigned long id, unsigned long _task) | ||
10822 | +{ | ||
10823 | + struct task_struct *t = (struct task_struct*) _task; | ||
10824 | + struct st_event_record* rec = get_record(ST_NAME, t); | ||
10825 | + int i; | ||
10826 | + if (rec) { | ||
10827 | + for (i = 0; i < min(TASK_COMM_LEN, ST_NAME_LEN); i++) | ||
10828 | + rec->data.name.cmd[i] = t->comm[i]; | ||
10829 | + put_record(rec); | ||
10830 | + } | ||
10831 | +} | ||
10832 | + | ||
10833 | +feather_callback void do_sched_trace_task_param(unsigned long id, unsigned long _task) | ||
10834 | +{ | ||
10835 | + struct task_struct *t = (struct task_struct*) _task; | ||
10836 | + struct st_event_record* rec = get_record(ST_PARAM, t); | ||
10837 | + if (rec) { | ||
10838 | + rec->data.param.wcet = get_exec_cost(t); | ||
10839 | + rec->data.param.period = get_rt_period(t); | ||
10840 | + rec->data.param.phase = get_rt_phase(t); | ||
10841 | + rec->data.param.partition = get_partition(t); | ||
10842 | + put_record(rec); | ||
10843 | + } | ||
10844 | +} | ||
10845 | + | ||
10846 | +feather_callback void do_sched_trace_task_release(unsigned long id, unsigned long _task) | ||
10847 | +{ | ||
10848 | + struct task_struct *t = (struct task_struct*) _task; | ||
10849 | + struct st_event_record* rec = get_record(ST_RELEASE, t); | ||
10850 | + if (rec) { | ||
10851 | + rec->data.release.release = get_release(t); | ||
10852 | + rec->data.release.deadline = get_deadline(t); | ||
10853 | + put_record(rec); | ||
10854 | + } | ||
10855 | +} | ||
10856 | + | ||
10857 | +/* skipped: st_assigned_data, we don't use it atm */ | ||
10858 | + | ||
10859 | +feather_callback void do_sched_trace_task_switch_to(unsigned long id, | ||
10860 | + unsigned long _task) | ||
10861 | +{ | ||
10862 | + struct task_struct *t = (struct task_struct*) _task; | ||
10863 | + struct st_event_record* rec; | ||
10864 | + if (is_realtime(t)) { | ||
10865 | + rec = get_record(ST_SWITCH_TO, t); | ||
10866 | + if (rec) { | ||
10867 | + rec->data.switch_to.when = now(); | ||
10868 | + rec->data.switch_to.exec_time = get_exec_time(t); | ||
10869 | + put_record(rec); | ||
10870 | + } | ||
10871 | + } | ||
10872 | +} | ||
10873 | + | ||
10874 | +feather_callback void do_sched_trace_task_switch_away(unsigned long id, | ||
10875 | + unsigned long _task) | ||
10876 | +{ | ||
10877 | + struct task_struct *t = (struct task_struct*) _task; | ||
10878 | + struct st_event_record* rec; | ||
10879 | + if (is_realtime(t)) { | ||
10880 | + rec = get_record(ST_SWITCH_AWAY, t); | ||
10881 | + if (rec) { | ||
10882 | + rec->data.switch_away.when = now(); | ||
10883 | + rec->data.switch_away.exec_time = get_exec_time(t); | ||
10884 | + put_record(rec); | ||
10885 | + } | ||
10886 | + } | ||
10887 | +} | ||
10888 | + | ||
10889 | +feather_callback void do_sched_trace_task_completion(unsigned long id, | ||
10890 | + unsigned long _task, | ||
10891 | + unsigned long forced) | ||
10892 | +{ | ||
10893 | + struct task_struct *t = (struct task_struct*) _task; | ||
10894 | + struct st_event_record* rec = get_record(ST_COMPLETION, t); | ||
10895 | + if (rec) { | ||
10896 | + rec->data.completion.when = now(); | ||
10897 | + rec->data.completion.forced = forced; | ||
10898 | + put_record(rec); | ||
10899 | + } | ||
10900 | +} | ||
10901 | + | ||
10902 | +feather_callback void do_sched_trace_task_block(unsigned long id, | ||
10903 | + unsigned long _task) | ||
10904 | +{ | ||
10905 | + struct task_struct *t = (struct task_struct*) _task; | ||
10906 | + struct st_event_record* rec = get_record(ST_BLOCK, t); | ||
10907 | + if (rec) { | ||
10908 | + rec->data.block.when = now(); | ||
10909 | + put_record(rec); | ||
10910 | + } | ||
10911 | +} | ||
10912 | + | ||
10913 | +feather_callback void do_sched_trace_task_resume(unsigned long id, | ||
10914 | + unsigned long _task) | ||
10915 | +{ | ||
10916 | + struct task_struct *t = (struct task_struct*) _task; | ||
10917 | + struct st_event_record* rec = get_record(ST_RESUME, t); | ||
10918 | + if (rec) { | ||
10919 | + rec->data.resume.when = now(); | ||
10920 | + put_record(rec); | ||
10921 | + } | ||
10922 | +} | ||
10923 | + | ||
10924 | +feather_callback void do_sched_trace_sys_release(unsigned long id, | ||
10925 | + unsigned long _start) | ||
10926 | +{ | ||
10927 | + lt_t *start = (lt_t*) _start; | ||
10928 | + struct st_event_record* rec = get_record(ST_SYS_RELEASE, NULL); | ||
10929 | + if (rec) { | ||
10930 | + rec->data.sys_release.when = now(); | ||
10931 | + rec->data.sys_release.release = *start; | ||
10932 | + put_record(rec); | ||
10933 | + } | ||
10934 | +} | ||
10935 | diff --git a/litmus/sched_trace.c b/litmus/sched_trace.c | ||
10936 | new file mode 100644 | ||
10937 | index 0000000..f4171fd | ||
10938 | --- /dev/null | ||
10939 | +++ b/litmus/sched_trace.c | ||
10940 | @@ -0,0 +1,252 @@ | ||
10941 | +/* | ||
10942 | + * sched_trace.c -- record scheduling events to a byte stream. | ||
10943 | + */ | ||
10944 | +#include <linux/spinlock.h> | ||
10945 | +#include <linux/mutex.h> | ||
10946 | + | ||
10947 | +#include <linux/fs.h> | ||
10948 | +#include <linux/slab.h> | ||
10949 | +#include <linux/miscdevice.h> | ||
10950 | +#include <asm/uaccess.h> | ||
10951 | +#include <linux/module.h> | ||
10952 | +#include <linux/sysrq.h> | ||
10953 | + | ||
10954 | +#include <linux/kfifo.h> | ||
10955 | + | ||
10956 | +#include <litmus/sched_trace.h> | ||
10957 | +#include <litmus/litmus.h> | ||
10958 | + | ||
10959 | +#define SCHED_TRACE_NAME "litmus/log" | ||
10960 | + | ||
10961 | +/* Compute size of TRACE() buffer */ | ||
10962 | +#define LITMUS_TRACE_BUF_SIZE (1 << CONFIG_SCHED_DEBUG_TRACE_SHIFT) | ||
10963 | + | ||
10964 | +/* Max length of one read from the buffer */ | ||
10965 | +#define MAX_READ_LEN (64 * 1024) | ||
10966 | + | ||
10967 | +/* Max length for one write --- by TRACE() --- to the buffer. This is used to | ||
10968 | + * allocate a per-cpu buffer for printf() formatting. */ | ||
10969 | +#define MSG_SIZE 255 | ||
10970 | + | ||
10971 | + | ||
10972 | +static DEFINE_MUTEX(reader_mutex); | ||
10973 | +static atomic_t reader_cnt = ATOMIC_INIT(0); | ||
10974 | +static DEFINE_KFIFO(debug_buffer, char, LITMUS_TRACE_BUF_SIZE); | ||
10975 | + | ||
10976 | + | ||
10977 | +static DEFINE_RAW_SPINLOCK(log_buffer_lock); | ||
10978 | +static DEFINE_PER_CPU(char[MSG_SIZE], fmt_buffer); | ||
10979 | + | ||
10980 | +/* | ||
10981 | + * sched_trace_log_message - Write to the trace buffer (log_buffer) | ||
10982 | + * | ||
10983 | + * This is the only function accessing the log_buffer from inside the | ||
10984 | + * kernel for writing. | ||
10985 | + * Concurrent access to sched_trace_log_message must be serialized using | ||
10986 | + * log_buffer_lock | ||
10987 | + * The maximum length of a formatted message is 255 | ||
10988 | + */ | ||
10989 | +void sched_trace_log_message(const char* fmt, ...) | ||
10990 | +{ | ||
10991 | + unsigned long flags; | ||
10992 | + va_list args; | ||
10993 | + size_t len; | ||
10994 | + char* buf; | ||
10995 | + | ||
10996 | + if (!atomic_read(&reader_cnt)) | ||
10997 | + /* early exit if nobody is listening */ | ||
10998 | + return; | ||
10999 | + | ||
11000 | + va_start(args, fmt); | ||
11001 | + local_irq_save(flags); | ||
11002 | + | ||
11003 | + /* format message */ | ||
11004 | + buf = __get_cpu_var(fmt_buffer); | ||
11005 | + len = vscnprintf(buf, MSG_SIZE, fmt, args); | ||
11006 | + | ||
11007 | + raw_spin_lock(&log_buffer_lock); | ||
11008 | + /* Don't copy the trailing null byte, we don't want null bytes in a | ||
11009 | + * text file. | ||
11010 | + */ | ||
11011 | + kfifo_in(&debug_buffer, buf, len); | ||
11012 | + raw_spin_unlock(&log_buffer_lock); | ||
11013 | + | ||
11014 | + local_irq_restore(flags); | ||
11015 | + va_end(args); | ||
11016 | +} | ||
11017 | + | ||
11018 | + | ||
11019 | +/* | ||
11020 | + * log_read - Read the trace buffer | ||
11021 | + * | ||
11022 | + * This function is called as a file operation from userspace. | ||
11023 | + * Readers can sleep. Access is serialized through reader_mutex | ||
11024 | + */ | ||
11025 | +static ssize_t log_read(struct file *filp, | ||
11026 | + char __user *to, size_t len, | ||
11027 | + loff_t *f_pos) | ||
11028 | +{ | ||
11029 | + /* we ignore f_pos, this is strictly sequential */ | ||
11030 | + | ||
11031 | + ssize_t error = -EINVAL; | ||
11032 | + char* mem; | ||
11033 | + | ||
11034 | + if (mutex_lock_interruptible(&reader_mutex)) { | ||
11035 | + error = -ERESTARTSYS; | ||
11036 | + goto out; | ||
11037 | + } | ||
11038 | + | ||
11039 | + if (len > MAX_READ_LEN) | ||
11040 | + len = MAX_READ_LEN; | ||
11041 | + | ||
11042 | + mem = kmalloc(len, GFP_KERNEL); | ||
11043 | + if (!mem) { | ||
11044 | + error = -ENOMEM; | ||
11045 | + goto out_unlock; | ||
11046 | + } | ||
11047 | + | ||
11048 | + error = kfifo_out(&debug_buffer, mem, len); | ||
11049 | + while (!error) { | ||
11050 | + set_current_state(TASK_INTERRUPTIBLE); | ||
11051 | + schedule_timeout(110); | ||
11052 | + if (signal_pending(current)) | ||
11053 | + error = -ERESTARTSYS; | ||
11054 | + else | ||
11055 | + error = kfifo_out(&debug_buffer, mem, len); | ||
11056 | + } | ||
11057 | + | ||
11058 | + if (error > 0 && copy_to_user(to, mem, error)) | ||
11059 | + error = -EFAULT; | ||
11060 | + | ||
11061 | + kfree(mem); | ||
11062 | + out_unlock: | ||
11063 | + mutex_unlock(&reader_mutex); | ||
11064 | + out: | ||
11065 | + return error; | ||
11066 | +} | ||
11067 | + | ||
11068 | +/* | ||
11069 | + * Enable redirection of printk() messages to the trace buffer. | ||
11070 | + * Defined in kernel/printk.c | ||
11071 | + */ | ||
11072 | +extern int trace_override; | ||
11073 | +extern int trace_recurse; | ||
11074 | + | ||
11075 | +/* | ||
11076 | + * log_open - open the global log message ring buffer. | ||
11077 | + */ | ||
11078 | +static int log_open(struct inode *in, struct file *filp) | ||
11079 | +{ | ||
11080 | + int error = -EINVAL; | ||
11081 | + | ||
11082 | + if (mutex_lock_interruptible(&reader_mutex)) { | ||
11083 | + error = -ERESTARTSYS; | ||
11084 | + goto out; | ||
11085 | + } | ||
11086 | + | ||
11087 | + atomic_inc(&reader_cnt); | ||
11088 | + error = 0; | ||
11089 | + | ||
11090 | + printk(KERN_DEBUG | ||
11091 | + "sched_trace kfifo with buffer starting at: 0x%p\n", | ||
11092 | + debug_buffer.buf); | ||
11093 | + | ||
11094 | + /* override printk() */ | ||
11095 | + trace_override++; | ||
11096 | + | ||
11097 | + mutex_unlock(&reader_mutex); | ||
11098 | + out: | ||
11099 | + return error; | ||
11100 | +} | ||
11101 | + | ||
11102 | +static int log_release(struct inode *in, struct file *filp) | ||
11103 | +{ | ||
11104 | + int error = -EINVAL; | ||
11105 | + | ||
11106 | + if (mutex_lock_interruptible(&reader_mutex)) { | ||
11107 | + error = -ERESTARTSYS; | ||
11108 | + goto out; | ||
11109 | + } | ||
11110 | + | ||
11111 | + atomic_dec(&reader_cnt); | ||
11112 | + | ||
11113 | + /* release printk() overriding */ | ||
11114 | + trace_override--; | ||
11115 | + | ||
11116 | + printk(KERN_DEBUG "sched_trace kfifo released\n"); | ||
11117 | + | ||
11118 | + mutex_unlock(&reader_mutex); | ||
11119 | + out: | ||
11120 | + return error; | ||
11121 | +} | ||
11122 | + | ||
11123 | +/* | ||
11124 | + * log_fops - The file operations for accessing the global LITMUS log message | ||
11125 | + * buffer. | ||
11126 | + * | ||
11127 | + * Except for opening the device file it uses the same operations as trace_fops. | ||
11128 | + */ | ||
11129 | +static struct file_operations log_fops = { | ||
11130 | + .owner = THIS_MODULE, | ||
11131 | + .open = log_open, | ||
11132 | + .release = log_release, | ||
11133 | + .read = log_read, | ||
11134 | +}; | ||
11135 | + | ||
11136 | +static struct miscdevice litmus_log_dev = { | ||
11137 | + .name = SCHED_TRACE_NAME, | ||
11138 | + .minor = MISC_DYNAMIC_MINOR, | ||
11139 | + .fops = &log_fops, | ||
11140 | +}; | ||
11141 | + | ||
11142 | +#ifdef CONFIG_MAGIC_SYSRQ | ||
11143 | +void dump_trace_buffer(int max) | ||
11144 | +{ | ||
11145 | + char line[80]; | ||
11146 | + int len; | ||
11147 | + int count = 0; | ||
11148 | + | ||
11149 | + /* potential, but very unlikely, race... */ | ||
11150 | + trace_recurse = 1; | ||
11151 | + while ((max == 0 || count++ < max) && | ||
11152 | + (len = kfifo_out(&debug_buffer, line, sizeof(line - 1))) > 0) { | ||
11153 | + line[len] = '\0'; | ||
11154 | + printk("%s", line); | ||
11155 | + } | ||
11156 | + trace_recurse = 0; | ||
11157 | +} | ||
11158 | + | ||
11159 | +static void sysrq_dump_trace_buffer(int key) | ||
11160 | +{ | ||
11161 | + dump_trace_buffer(100); | ||
11162 | +} | ||
11163 | + | ||
11164 | +static struct sysrq_key_op sysrq_dump_trace_buffer_op = { | ||
11165 | + .handler = sysrq_dump_trace_buffer, | ||
11166 | + .help_msg = "dump-trace-buffer(Y)", | ||
11167 | + .action_msg = "writing content of TRACE() buffer", | ||
11168 | +}; | ||
11169 | +#endif | ||
11170 | + | ||
11171 | +static int __init init_sched_trace(void) | ||
11172 | +{ | ||
11173 | + printk("Initializing TRACE() device\n"); | ||
11174 | + | ||
11175 | +#ifdef CONFIG_MAGIC_SYSRQ | ||
11176 | + /* offer some debugging help */ | ||
11177 | + if (!register_sysrq_key('y', &sysrq_dump_trace_buffer_op)) | ||
11178 | + printk("Registered dump-trace-buffer(Y) magic sysrq.\n"); | ||
11179 | + else | ||
11180 | + printk("Could not register dump-trace-buffer(Y) magic sysrq.\n"); | ||
11181 | +#endif | ||
11182 | + | ||
11183 | + return misc_register(&litmus_log_dev); | ||
11184 | +} | ||
11185 | + | ||
11186 | +static void __exit exit_sched_trace(void) | ||
11187 | +{ | ||
11188 | + misc_deregister(&litmus_log_dev); | ||
11189 | +} | ||
11190 | + | ||
11191 | +module_init(init_sched_trace); | ||
11192 | +module_exit(exit_sched_trace); | ||
11193 | diff --git a/litmus/srp.c b/litmus/srp.c | ||
11194 | new file mode 100644 | ||
11195 | index 0000000..cb57759 | ||
11196 | --- /dev/null | ||
11197 | +++ b/litmus/srp.c | ||
11198 | @@ -0,0 +1,318 @@ | ||
11199 | +/* ************************************************************************** */ | ||
11200 | +/* STACK RESOURCE POLICY */ | ||
11201 | +/* ************************************************************************** */ | ||
11202 | + | ||
11203 | +#include <asm/atomic.h> | ||
11204 | +#include <linux/sched.h> | ||
11205 | +#include <linux/wait.h> | ||
11206 | + | ||
11207 | +#include <litmus/litmus.h> | ||
11208 | +#include <litmus/sched_plugin.h> | ||
11209 | +#include <litmus/fdso.h> | ||
11210 | +#include <litmus/trace.h> | ||
11211 | + | ||
11212 | + | ||
11213 | +#ifdef CONFIG_SRP | ||
11214 | + | ||
11215 | +struct srp_priority { | ||
11216 | + struct list_head list; | ||
11217 | + unsigned int period; | ||
11218 | + pid_t pid; | ||
11219 | +}; | ||
11220 | + | ||
11221 | +#define list2prio(l) list_entry(l, struct srp_priority, list) | ||
11222 | + | ||
11223 | +/* SRP task priority comparison function. Smaller periods have highest | ||
11224 | + * priority, tie-break is PID. Special case: period == 0 <=> no priority | ||
11225 | + */ | ||
11226 | +static int srp_higher_prio(struct srp_priority* first, | ||
11227 | + struct srp_priority* second) | ||
11228 | +{ | ||
11229 | + if (!first->period) | ||
11230 | + return 0; | ||
11231 | + else | ||
11232 | + return !second->period || | ||
11233 | + first->period < second->period || ( | ||
11234 | + first->period == second->period && | ||
11235 | + first->pid < second->pid); | ||
11236 | +} | ||
11237 | + | ||
11238 | +struct srp { | ||
11239 | + struct list_head ceiling; | ||
11240 | + wait_queue_head_t ceiling_blocked; | ||
11241 | +}; | ||
11242 | + | ||
11243 | + | ||
11244 | +atomic_t srp_objects_in_use = ATOMIC_INIT(0); | ||
11245 | + | ||
11246 | +DEFINE_PER_CPU(struct srp, srp); | ||
11247 | + | ||
11248 | + | ||
11249 | +/* Initialize SRP semaphores at boot time. */ | ||
11250 | +static int __init srp_init(void) | ||
11251 | +{ | ||
11252 | + int i; | ||
11253 | + | ||
11254 | + printk("Initializing SRP per-CPU ceilings..."); | ||
11255 | + for (i = 0; i < NR_CPUS; i++) { | ||
11256 | + init_waitqueue_head(&per_cpu(srp, i).ceiling_blocked); | ||
11257 | + INIT_LIST_HEAD(&per_cpu(srp, i).ceiling); | ||
11258 | + } | ||
11259 | + printk(" done!\n"); | ||
11260 | + | ||
11261 | + return 0; | ||
11262 | +} | ||
11263 | +module_init(srp_init); | ||
11264 | + | ||
11265 | + | ||
11266 | +#define system_ceiling(srp) list2prio(srp->ceiling.next) | ||
11267 | + | ||
11268 | + | ||
11269 | +#define UNDEF_SEM -2 | ||
11270 | + | ||
11271 | + | ||
11272 | +/* struct for uniprocessor SRP "semaphore" */ | ||
11273 | +struct srp_semaphore { | ||
11274 | + struct srp_priority ceiling; | ||
11275 | + struct task_struct* owner; | ||
11276 | + int cpu; /* cpu associated with this "semaphore" and resource */ | ||
11277 | +}; | ||
11278 | + | ||
11279 | +#define ceiling2sem(c) container_of(c, struct srp_semaphore, ceiling) | ||
11280 | + | ||
11281 | +static int srp_exceeds_ceiling(struct task_struct* first, | ||
11282 | + struct srp* srp) | ||
11283 | +{ | ||
11284 | + return list_empty(&srp->ceiling) || | ||
11285 | + get_rt_period(first) < system_ceiling(srp)->period || | ||
11286 | + (get_rt_period(first) == system_ceiling(srp)->period && | ||
11287 | + first->pid < system_ceiling(srp)->pid) || | ||
11288 | + ceiling2sem(system_ceiling(srp))->owner == first; | ||
11289 | +} | ||
11290 | + | ||
11291 | +static void srp_add_prio(struct srp* srp, struct srp_priority* prio) | ||
11292 | +{ | ||
11293 | + struct list_head *pos; | ||
11294 | + if (in_list(&prio->list)) { | ||
11295 | + printk(KERN_CRIT "WARNING: SRP violation detected, prio is already in " | ||
11296 | + "ceiling list! cpu=%d, srp=%p\n", smp_processor_id(), ceiling2sem(prio)); | ||
11297 | + return; | ||
11298 | + } | ||
11299 | + list_for_each(pos, &srp->ceiling) | ||
11300 | + if (unlikely(srp_higher_prio(prio, list2prio(pos)))) { | ||
11301 | + __list_add(&prio->list, pos->prev, pos); | ||
11302 | + return; | ||
11303 | + } | ||
11304 | + | ||
11305 | + list_add_tail(&prio->list, &srp->ceiling); | ||
11306 | +} | ||
11307 | + | ||
11308 | + | ||
11309 | +static void* create_srp_semaphore(void) | ||
11310 | +{ | ||
11311 | + struct srp_semaphore* sem; | ||
11312 | + | ||
11313 | + sem = kmalloc(sizeof(*sem), GFP_KERNEL); | ||
11314 | + if (!sem) | ||
11315 | + return NULL; | ||
11316 | + | ||
11317 | + INIT_LIST_HEAD(&sem->ceiling.list); | ||
11318 | + sem->ceiling.period = 0; | ||
11319 | + sem->cpu = UNDEF_SEM; | ||
11320 | + sem->owner = NULL; | ||
11321 | + atomic_inc(&srp_objects_in_use); | ||
11322 | + return sem; | ||
11323 | +} | ||
11324 | + | ||
11325 | +static noinline int open_srp_semaphore(struct od_table_entry* entry, void* __user arg) | ||
11326 | +{ | ||
11327 | + struct srp_semaphore* sem = (struct srp_semaphore*) entry->obj->obj; | ||
11328 | + int ret = 0; | ||
11329 | + struct task_struct* t = current; | ||
11330 | + struct srp_priority t_prio; | ||
11331 | + | ||
11332 | + TRACE("opening SRP semaphore %p, cpu=%d\n", sem, sem->cpu); | ||
11333 | + if (!srp_active()) | ||
11334 | + return -EBUSY; | ||
11335 | + | ||
11336 | + if (sem->cpu == UNDEF_SEM) | ||
11337 | + sem->cpu = get_partition(t); | ||
11338 | + else if (sem->cpu != get_partition(t)) | ||
11339 | + ret = -EPERM; | ||
11340 | + | ||
11341 | + if (ret == 0) { | ||
11342 | + t_prio.period = get_rt_period(t); | ||
11343 | + t_prio.pid = t->pid; | ||
11344 | + if (srp_higher_prio(&t_prio, &sem->ceiling)) { | ||
11345 | + sem->ceiling.period = t_prio.period; | ||
11346 | + sem->ceiling.pid = t_prio.pid; | ||
11347 | + } | ||
11348 | + } | ||
11349 | + | ||
11350 | + return ret; | ||
11351 | +} | ||
11352 | + | ||
11353 | +static void destroy_srp_semaphore(void* sem) | ||
11354 | +{ | ||
11355 | + /* XXX invariants */ | ||
11356 | + atomic_dec(&srp_objects_in_use); | ||
11357 | + kfree(sem); | ||
11358 | +} | ||
11359 | + | ||
11360 | +struct fdso_ops srp_sem_ops = { | ||
11361 | + .create = create_srp_semaphore, | ||
11362 | + .open = open_srp_semaphore, | ||
11363 | + .destroy = destroy_srp_semaphore | ||
11364 | +}; | ||
11365 | + | ||
11366 | + | ||
11367 | +static void do_srp_down(struct srp_semaphore* sem) | ||
11368 | +{ | ||
11369 | + /* Update ceiling. */ | ||
11370 | + srp_add_prio(&__get_cpu_var(srp), &sem->ceiling); | ||
11371 | + WARN_ON(sem->owner != NULL); | ||
11372 | + sem->owner = current; | ||
11373 | + TRACE_CUR("acquired srp 0x%p\n", sem); | ||
11374 | +} | ||
11375 | + | ||
11376 | +static void do_srp_up(struct srp_semaphore* sem) | ||
11377 | +{ | ||
11378 | + /* Determine new system priority ceiling for this CPU. */ | ||
11379 | + WARN_ON(!in_list(&sem->ceiling.list)); | ||
11380 | + if (in_list(&sem->ceiling.list)) | ||
11381 | + list_del(&sem->ceiling.list); | ||
11382 | + | ||
11383 | + sem->owner = NULL; | ||
11384 | + | ||
11385 | + /* Wake tasks on this CPU, if they exceed current ceiling. */ | ||
11386 | + TRACE_CUR("released srp 0x%p\n", sem); | ||
11387 | + wake_up_all(&__get_cpu_var(srp).ceiling_blocked); | ||
11388 | +} | ||
11389 | + | ||
11390 | +/* Adjust the system-wide priority ceiling if resource is claimed. */ | ||
11391 | +asmlinkage long sys_srp_down(int sem_od) | ||
11392 | +{ | ||
11393 | + int cpu; | ||
11394 | + int ret = -EINVAL; | ||
11395 | + struct srp_semaphore* sem; | ||
11396 | + | ||
11397 | + /* disabling preemptions is sufficient protection since | ||
11398 | + * SRP is strictly per CPU and we don't interfere with any | ||
11399 | + * interrupt handlers | ||
11400 | + */ | ||
11401 | + preempt_disable(); | ||
11402 | + TS_SRP_DOWN_START; | ||
11403 | + | ||
11404 | + cpu = smp_processor_id(); | ||
11405 | + sem = lookup_srp_sem(sem_od); | ||
11406 | + if (sem && sem->cpu == cpu) { | ||
11407 | + do_srp_down(sem); | ||
11408 | + ret = 0; | ||
11409 | + } | ||
11410 | + | ||
11411 | + TS_SRP_DOWN_END; | ||
11412 | + preempt_enable(); | ||
11413 | + return ret; | ||
11414 | +} | ||
11415 | + | ||
11416 | +/* Adjust the system-wide priority ceiling if resource is freed. */ | ||
11417 | +asmlinkage long sys_srp_up(int sem_od) | ||
11418 | +{ | ||
11419 | + int cpu; | ||
11420 | + int ret = -EINVAL; | ||
11421 | + struct srp_semaphore* sem; | ||
11422 | + | ||
11423 | + preempt_disable(); | ||
11424 | + TS_SRP_UP_START; | ||
11425 | + | ||
11426 | + cpu = smp_processor_id(); | ||
11427 | + sem = lookup_srp_sem(sem_od); | ||
11428 | + | ||
11429 | + if (sem && sem->cpu == cpu) { | ||
11430 | + do_srp_up(sem); | ||
11431 | + ret = 0; | ||
11432 | + } | ||
11433 | + | ||
11434 | + TS_SRP_UP_END; | ||
11435 | + preempt_enable(); | ||
11436 | + return ret; | ||
11437 | +} | ||
11438 | + | ||
11439 | +static int srp_wake_up(wait_queue_t *wait, unsigned mode, int sync, | ||
11440 | + void *key) | ||
11441 | +{ | ||
11442 | + int cpu = smp_processor_id(); | ||
11443 | + struct task_struct *tsk = wait->private; | ||
11444 | + if (cpu != get_partition(tsk)) | ||
11445 | + TRACE_TASK(tsk, "srp_wake_up on wrong cpu, partition is %d\b", | ||
11446 | + get_partition(tsk)); | ||
11447 | + else if (srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) | ||
11448 | + return default_wake_function(wait, mode, sync, key); | ||
11449 | + return 0; | ||
11450 | +} | ||
11451 | + | ||
11452 | + | ||
11453 | + | ||
11454 | +static void do_ceiling_block(struct task_struct *tsk) | ||
11455 | +{ | ||
11456 | + wait_queue_t wait = { | ||
11457 | + .private = tsk, | ||
11458 | + .func = srp_wake_up, | ||
11459 | + .task_list = {NULL, NULL} | ||
11460 | + }; | ||
11461 | + | ||
11462 | + tsk->state = TASK_UNINTERRUPTIBLE; | ||
11463 | + add_wait_queue(&__get_cpu_var(srp).ceiling_blocked, &wait); | ||
11464 | + tsk->rt_param.srp_non_recurse = 1; | ||
11465 | + preempt_enable_no_resched(); | ||
11466 | + schedule(); | ||
11467 | + preempt_disable(); | ||
11468 | + tsk->rt_param.srp_non_recurse = 0; | ||
11469 | + remove_wait_queue(&__get_cpu_var(srp).ceiling_blocked, &wait); | ||
11470 | +} | ||
11471 | + | ||
11472 | +/* Wait for current task priority to exceed system-wide priority ceiling. | ||
11473 | + */ | ||
11474 | +void srp_ceiling_block(void) | ||
11475 | +{ | ||
11476 | + struct task_struct *tsk = current; | ||
11477 | + | ||
11478 | + /* Only applies to real-time tasks, but optimize for RT tasks. */ | ||
11479 | + if (unlikely(!is_realtime(tsk))) | ||
11480 | + return; | ||
11481 | + | ||
11482 | + /* Avoid recursive ceiling blocking. */ | ||
11483 | + if (unlikely(tsk->rt_param.srp_non_recurse)) | ||
11484 | + return; | ||
11485 | + | ||
11486 | + /* Bail out early if there aren't any SRP resources around. */ | ||
11487 | + if (likely(!atomic_read(&srp_objects_in_use))) | ||
11488 | + return; | ||
11489 | + | ||
11490 | + preempt_disable(); | ||
11491 | + if (!srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) { | ||
11492 | + TRACE_CUR("is priority ceiling blocked.\n"); | ||
11493 | + while (!srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) | ||
11494 | + do_ceiling_block(tsk); | ||
11495 | + TRACE_CUR("finally exceeds system ceiling.\n"); | ||
11496 | + } else | ||
11497 | + TRACE_CUR("is not priority ceiling blocked\n"); | ||
11498 | + preempt_enable(); | ||
11499 | +} | ||
11500 | + | ||
11501 | + | ||
11502 | +#else | ||
11503 | + | ||
11504 | +asmlinkage long sys_srp_down(int sem_od) | ||
11505 | +{ | ||
11506 | + return -ENOSYS; | ||
11507 | +} | ||
11508 | + | ||
11509 | +asmlinkage long sys_srp_up(int sem_od) | ||
11510 | +{ | ||
11511 | + return -ENOSYS; | ||
11512 | +} | ||
11513 | + | ||
11514 | +struct fdso_ops srp_sem_ops = {}; | ||
11515 | + | ||
11516 | +#endif | ||
11517 | diff --git a/litmus/sync.c b/litmus/sync.c | ||
11518 | new file mode 100644 | ||
11519 | index 0000000..bf75fde | ||
11520 | --- /dev/null | ||
11521 | +++ b/litmus/sync.c | ||
11522 | @@ -0,0 +1,104 @@ | ||
11523 | +/* litmus/sync.c - Support for synchronous and asynchronous task system releases. | ||
11524 | + * | ||
11525 | + * | ||
11526 | + */ | ||
11527 | + | ||
11528 | +#include <asm/atomic.h> | ||
11529 | +#include <asm/uaccess.h> | ||
11530 | +#include <linux/spinlock.h> | ||
11531 | +#include <linux/list.h> | ||
11532 | +#include <linux/sched.h> | ||
11533 | +#include <linux/completion.h> | ||
11534 | + | ||
11535 | +#include <litmus/litmus.h> | ||
11536 | +#include <litmus/sched_plugin.h> | ||
11537 | +#include <litmus/jobs.h> | ||
11538 | + | ||
11539 | +#include <litmus/sched_trace.h> | ||
11540 | + | ||
11541 | +static DECLARE_COMPLETION(ts_release); | ||
11542 | + | ||
11543 | +static long do_wait_for_ts_release(void) | ||
11544 | +{ | ||
11545 | + long ret = 0; | ||
11546 | + | ||
11547 | + /* If the interruption races with a release, the completion object | ||
11548 | + * may have a non-zero counter. To avoid this problem, this should | ||
11549 | + * be replaced by wait_for_completion(). | ||
11550 | + * | ||
11551 | + * For debugging purposes, this is interruptible for now. | ||
11552 | + */ | ||
11553 | + ret = wait_for_completion_interruptible(&ts_release); | ||
11554 | + | ||
11555 | + return ret; | ||
11556 | +} | ||
11557 | + | ||
11558 | +int count_tasks_waiting_for_release(void) | ||
11559 | +{ | ||
11560 | + unsigned long flags; | ||
11561 | + int task_count = 0; | ||
11562 | + struct list_head *pos; | ||
11563 | + | ||
11564 | + spin_lock_irqsave(&ts_release.wait.lock, flags); | ||
11565 | + list_for_each(pos, &ts_release.wait.task_list) { | ||
11566 | + task_count++; | ||
11567 | + } | ||
11568 | + spin_unlock_irqrestore(&ts_release.wait.lock, flags); | ||
11569 | + | ||
11570 | + return task_count; | ||
11571 | +} | ||
11572 | + | ||
11573 | +static long do_release_ts(lt_t start) | ||
11574 | +{ | ||
11575 | + int task_count = 0; | ||
11576 | + unsigned long flags; | ||
11577 | + struct list_head *pos; | ||
11578 | + struct task_struct *t; | ||
11579 | + | ||
11580 | + | ||
11581 | + spin_lock_irqsave(&ts_release.wait.lock, flags); | ||
11582 | + TRACE("<<<<<< synchronous task system release >>>>>>\n"); | ||
11583 | + | ||
11584 | + sched_trace_sys_release(&start); | ||
11585 | + list_for_each(pos, &ts_release.wait.task_list) { | ||
11586 | + t = (struct task_struct*) list_entry(pos, | ||
11587 | + struct __wait_queue, | ||
11588 | + task_list)->private; | ||
11589 | + task_count++; | ||
11590 | + litmus->release_at(t, start + t->rt_param.task_params.phase); | ||
11591 | + sched_trace_task_release(t); | ||
11592 | + } | ||
11593 | + | ||
11594 | + spin_unlock_irqrestore(&ts_release.wait.lock, flags); | ||
11595 | + | ||
11596 | + complete_n(&ts_release, task_count); | ||
11597 | + | ||
11598 | + return task_count; | ||
11599 | +} | ||
11600 | + | ||
11601 | + | ||
11602 | +asmlinkage long sys_wait_for_ts_release(void) | ||
11603 | +{ | ||
11604 | + long ret = -EPERM; | ||
11605 | + struct task_struct *t = current; | ||
11606 | + | ||
11607 | + if (is_realtime(t)) | ||
11608 | + ret = do_wait_for_ts_release(); | ||
11609 | + | ||
11610 | + return ret; | ||
11611 | +} | ||
11612 | + | ||
11613 | + | ||
11614 | +asmlinkage long sys_release_ts(lt_t __user *__delay) | ||
11615 | +{ | ||
11616 | + long ret; | ||
11617 | + lt_t delay; | ||
11618 | + | ||
11619 | + /* FIXME: check capabilities... */ | ||
11620 | + | ||
11621 | + ret = copy_from_user(&delay, __delay, sizeof(delay)); | ||
11622 | + if (ret == 0) | ||
11623 | + ret = do_release_ts(litmus_clock() + delay); | ||
11624 | + | ||
11625 | + return ret; | ||
11626 | +} | ||
11627 | diff --git a/litmus/trace.c b/litmus/trace.c | ||
11628 | new file mode 100644 | ||
11629 | index 0000000..e7ea1c2 | ||
11630 | --- /dev/null | ||
11631 | +++ b/litmus/trace.c | ||
11632 | @@ -0,0 +1,122 @@ | ||
11633 | +#include <linux/sched.h> | ||
11634 | +#include <linux/module.h> | ||
11635 | + | ||
11636 | +#include <litmus/ftdev.h> | ||
11637 | +#include <litmus/litmus.h> | ||
11638 | +#include <litmus/trace.h> | ||
11639 | + | ||
11640 | +/******************************************************************************/ | ||
11641 | +/* Allocation */ | ||
11642 | +/******************************************************************************/ | ||
11643 | + | ||
11644 | +static struct ftdev overhead_dev; | ||
11645 | + | ||
11646 | +#define trace_ts_buf overhead_dev.minor[0].buf | ||
11647 | + | ||
11648 | +static unsigned int ts_seq_no = 0; | ||
11649 | + | ||
11650 | +static inline void __save_timestamp_cpu(unsigned long event, | ||
11651 | + uint8_t type, uint8_t cpu) | ||
11652 | +{ | ||
11653 | + unsigned int seq_no; | ||
11654 | + struct timestamp *ts; | ||
11655 | + seq_no = fetch_and_inc((int *) &ts_seq_no); | ||
11656 | + if (ft_buffer_start_write(trace_ts_buf, (void**) &ts)) { | ||
11657 | + ts->event = event; | ||
11658 | + ts->timestamp = ft_timestamp(); | ||
11659 | + ts->seq_no = seq_no; | ||
11660 | + ts->cpu = cpu; | ||
11661 | + ts->task_type = type; | ||
11662 | + ft_buffer_finish_write(trace_ts_buf, ts); | ||
11663 | + } | ||
11664 | +} | ||
11665 | + | ||
11666 | +static inline void __save_timestamp(unsigned long event, | ||
11667 | + uint8_t type) | ||
11668 | +{ | ||
11669 | + __save_timestamp_cpu(event, type, raw_smp_processor_id()); | ||
11670 | +} | ||
11671 | + | ||
11672 | +feather_callback void save_timestamp(unsigned long event) | ||
11673 | +{ | ||
11674 | + __save_timestamp(event, TSK_UNKNOWN); | ||
11675 | +} | ||
11676 | + | ||
11677 | +feather_callback void save_timestamp_def(unsigned long event, | ||
11678 | + unsigned long type) | ||
11679 | +{ | ||
11680 | + __save_timestamp(event, (uint8_t) type); | ||
11681 | +} | ||
11682 | + | ||
11683 | +feather_callback void save_timestamp_task(unsigned long event, | ||
11684 | + unsigned long t_ptr) | ||
11685 | +{ | ||
11686 | + int rt = is_realtime((struct task_struct *) t_ptr); | ||
11687 | + __save_timestamp(event, rt ? TSK_RT : TSK_BE); | ||
11688 | +} | ||
11689 | + | ||
11690 | +feather_callback void save_timestamp_cpu(unsigned long event, | ||
11691 | + unsigned long cpu) | ||
11692 | +{ | ||
11693 | + __save_timestamp_cpu(event, TSK_UNKNOWN, cpu); | ||
11694 | +} | ||
11695 | + | ||
11696 | +/******************************************************************************/ | ||
11697 | +/* DEVICE FILE DRIVER */ | ||
11698 | +/******************************************************************************/ | ||
11699 | + | ||
11700 | +/* | ||
11701 | + * should be 8M; it is the max we can ask to buddy system allocator (MAX_ORDER) | ||
11702 | + * and we might not get as much | ||
11703 | + */ | ||
11704 | +#define NO_TIMESTAMPS (2 << 11) | ||
11705 | + | ||
11706 | +static int alloc_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) | ||
11707 | +{ | ||
11708 | + unsigned int count = NO_TIMESTAMPS; | ||
11709 | + while (count && !trace_ts_buf) { | ||
11710 | + printk("time stamp buffer: trying to allocate %u time stamps.\n", count); | ||
11711 | + ftdev->minor[idx].buf = alloc_ft_buffer(count, sizeof(struct timestamp)); | ||
11712 | + count /= 2; | ||
11713 | + } | ||
11714 | + return ftdev->minor[idx].buf ? 0 : -ENOMEM; | ||
11715 | +} | ||
11716 | + | ||
11717 | +static void free_timestamp_buffer(struct ftdev* ftdev, unsigned int idx) | ||
11718 | +{ | ||
11719 | + free_ft_buffer(ftdev->minor[idx].buf); | ||
11720 | + ftdev->minor[idx].buf = NULL; | ||
11721 | +} | ||
11722 | + | ||
11723 | +static int __init init_ft_overhead_trace(void) | ||
11724 | +{ | ||
11725 | + int err; | ||
11726 | + | ||
11727 | + printk("Initializing Feather-Trace overhead tracing device.\n"); | ||
11728 | + err = ftdev_init(&overhead_dev, THIS_MODULE, 1, "ft_trace"); | ||
11729 | + if (err) | ||
11730 | + goto err_out; | ||
11731 | + | ||
11732 | + overhead_dev.alloc = alloc_timestamp_buffer; | ||
11733 | + overhead_dev.free = free_timestamp_buffer; | ||
11734 | + | ||
11735 | + err = register_ftdev(&overhead_dev); | ||
11736 | + if (err) | ||
11737 | + goto err_dealloc; | ||
11738 | + | ||
11739 | + return 0; | ||
11740 | + | ||
11741 | +err_dealloc: | ||
11742 | + ftdev_exit(&overhead_dev); | ||
11743 | +err_out: | ||
11744 | + printk(KERN_WARNING "Could not register ft_trace module.\n"); | ||
11745 | + return err; | ||
11746 | +} | ||
11747 | + | ||
11748 | +static void __exit exit_ft_overhead_trace(void) | ||
11749 | +{ | ||
11750 | + ftdev_exit(&overhead_dev); | ||
11751 | +} | ||
11752 | + | ||
11753 | +module_init(init_ft_overhead_trace); | ||
11754 | +module_exit(exit_ft_overhead_trace); | ||
diff --git a/download/2011.1/x86_64-config b/download/2011.1/x86_64-config new file mode 100644 index 0000000..002bda7 --- /dev/null +++ b/download/2011.1/x86_64-config | |||
@@ -0,0 +1,2030 @@ | |||
1 | # | ||
2 | # Automatically generated make config: don't edit | ||
3 | # Linux kernel version: 2.6.36-litmus2010 | ||
4 | # Fri Oct 22 21:47:58 2010 | ||
5 | # | ||
6 | CONFIG_64BIT=y | ||
7 | # CONFIG_X86_32 is not set | ||
8 | CONFIG_X86_64=y | ||
9 | CONFIG_X86=y | ||
10 | CONFIG_INSTRUCTION_DECODER=y | ||
11 | CONFIG_OUTPUT_FORMAT="elf64-x86-64" | ||
12 | CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" | ||
13 | CONFIG_GENERIC_CMOS_UPDATE=y | ||
14 | CONFIG_CLOCKSOURCE_WATCHDOG=y | ||
15 | CONFIG_GENERIC_CLOCKEVENTS=y | ||
16 | CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y | ||
17 | CONFIG_LOCKDEP_SUPPORT=y | ||
18 | CONFIG_STACKTRACE_SUPPORT=y | ||
19 | CONFIG_HAVE_LATENCYTOP_SUPPORT=y | ||
20 | CONFIG_MMU=y | ||
21 | CONFIG_ZONE_DMA=y | ||
22 | CONFIG_NEED_DMA_MAP_STATE=y | ||
23 | CONFIG_NEED_SG_DMA_LENGTH=y | ||
24 | CONFIG_GENERIC_ISA_DMA=y | ||
25 | CONFIG_GENERIC_IOMAP=y | ||
26 | CONFIG_GENERIC_BUG=y | ||
27 | CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y | ||
28 | CONFIG_GENERIC_HWEIGHT=y | ||
29 | CONFIG_ARCH_MAY_HAVE_PC_FDC=y | ||
30 | # CONFIG_RWSEM_GENERIC_SPINLOCK is not set | ||
31 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y | ||
32 | CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y | ||
33 | CONFIG_GENERIC_CALIBRATE_DELAY=y | ||
34 | CONFIG_GENERIC_TIME_VSYSCALL=y | ||
35 | CONFIG_ARCH_HAS_CPU_RELAX=y | ||
36 | CONFIG_ARCH_HAS_DEFAULT_IDLE=y | ||
37 | CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y | ||
38 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y | ||
39 | CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y | ||
40 | CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y | ||
41 | CONFIG_HAVE_CPUMASK_OF_CPU_MAP=y | ||
42 | CONFIG_ARCH_HIBERNATION_POSSIBLE=y | ||
43 | CONFIG_ARCH_SUSPEND_POSSIBLE=y | ||
44 | CONFIG_ZONE_DMA32=y | ||
45 | CONFIG_ARCH_POPULATES_NODE_MAP=y | ||
46 | CONFIG_AUDIT_ARCH=y | ||
47 | CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y | ||
48 | CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y | ||
49 | CONFIG_HAVE_EARLY_RES=y | ||
50 | CONFIG_GENERIC_HARDIRQS=y | ||
51 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y | ||
52 | CONFIG_GENERIC_IRQ_PROBE=y | ||
53 | CONFIG_GENERIC_PENDING_IRQ=y | ||
54 | CONFIG_USE_GENERIC_SMP_HELPERS=y | ||
55 | CONFIG_X86_64_SMP=y | ||
56 | CONFIG_X86_HT=y | ||
57 | CONFIG_X86_TRAMPOLINE=y | ||
58 | CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" | ||
59 | # CONFIG_KTIME_SCALAR is not set | ||
60 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | ||
61 | CONFIG_CONSTRUCTORS=y | ||
62 | |||
63 | # | ||
64 | # General setup | ||
65 | # | ||
66 | CONFIG_EXPERIMENTAL=y | ||
67 | CONFIG_LOCK_KERNEL=y | ||
68 | CONFIG_INIT_ENV_ARG_LIMIT=32 | ||
69 | CONFIG_CROSS_COMPILE="" | ||
70 | CONFIG_LOCALVERSION="" | ||
71 | # CONFIG_LOCALVERSION_AUTO is not set | ||
72 | CONFIG_HAVE_KERNEL_GZIP=y | ||
73 | CONFIG_HAVE_KERNEL_BZIP2=y | ||
74 | CONFIG_HAVE_KERNEL_LZMA=y | ||
75 | CONFIG_HAVE_KERNEL_LZO=y | ||
76 | CONFIG_KERNEL_GZIP=y | ||
77 | # CONFIG_KERNEL_BZIP2 is not set | ||
78 | # CONFIG_KERNEL_LZMA is not set | ||
79 | # CONFIG_KERNEL_LZO is not set | ||
80 | CONFIG_SWAP=y | ||
81 | CONFIG_SYSVIPC=y | ||
82 | CONFIG_SYSVIPC_SYSCTL=y | ||
83 | CONFIG_POSIX_MQUEUE=y | ||
84 | CONFIG_POSIX_MQUEUE_SYSCTL=y | ||
85 | CONFIG_BSD_PROCESS_ACCT=y | ||
86 | CONFIG_BSD_PROCESS_ACCT_V3=y | ||
87 | # CONFIG_TASKSTATS is not set | ||
88 | # CONFIG_AUDIT is not set | ||
89 | |||
90 | # | ||
91 | # RCU Subsystem | ||
92 | # | ||
93 | CONFIG_TREE_RCU=y | ||
94 | # CONFIG_TREE_PREEMPT_RCU is not set | ||
95 | # CONFIG_RCU_TRACE is not set | ||
96 | CONFIG_RCU_FANOUT=32 | ||
97 | # CONFIG_RCU_FANOUT_EXACT is not set | ||
98 | # CONFIG_TREE_RCU_TRACE is not set | ||
99 | CONFIG_IKCONFIG=y | ||
100 | CONFIG_IKCONFIG_PROC=y | ||
101 | CONFIG_LOG_BUF_SHIFT=17 | ||
102 | CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y | ||
103 | # CONFIG_CGROUPS is not set | ||
104 | # CONFIG_SYSFS_DEPRECATED_V2 is not set | ||
105 | # CONFIG_RELAY is not set | ||
106 | CONFIG_NAMESPACES=y | ||
107 | # CONFIG_UTS_NS is not set | ||
108 | # CONFIG_IPC_NS is not set | ||
109 | # CONFIG_USER_NS is not set | ||
110 | # CONFIG_PID_NS is not set | ||
111 | # CONFIG_NET_NS is not set | ||
112 | # CONFIG_BLK_DEV_INITRD is not set | ||
113 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
114 | CONFIG_SYSCTL=y | ||
115 | CONFIG_ANON_INODES=y | ||
116 | # CONFIG_EMBEDDED is not set | ||
117 | CONFIG_SYSCTL_SYSCALL=y | ||
118 | CONFIG_KALLSYMS=y | ||
119 | CONFIG_KALLSYMS_ALL=y | ||
120 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | ||
121 | CONFIG_HOTPLUG=y | ||
122 | CONFIG_PRINTK=y | ||
123 | CONFIG_BUG=y | ||
124 | CONFIG_ELF_CORE=y | ||
125 | CONFIG_PCSPKR_PLATFORM=y | ||
126 | CONFIG_BASE_FULL=y | ||
127 | CONFIG_FUTEX=y | ||
128 | CONFIG_EPOLL=y | ||
129 | CONFIG_SIGNALFD=y | ||
130 | CONFIG_TIMERFD=y | ||
131 | CONFIG_EVENTFD=y | ||
132 | CONFIG_SHMEM=y | ||
133 | CONFIG_AIO=y | ||
134 | CONFIG_HAVE_PERF_EVENTS=y | ||
135 | |||
136 | # | ||
137 | # Kernel Performance Events And Counters | ||
138 | # | ||
139 | CONFIG_PERF_EVENTS=y | ||
140 | CONFIG_PERF_COUNTERS=y | ||
141 | # CONFIG_DEBUG_PERF_USE_VMALLOC is not set | ||
142 | CONFIG_VM_EVENT_COUNTERS=y | ||
143 | CONFIG_PCI_QUIRKS=y | ||
144 | CONFIG_SLUB_DEBUG=y | ||
145 | # CONFIG_COMPAT_BRK is not set | ||
146 | # CONFIG_SLAB is not set | ||
147 | CONFIG_SLUB=y | ||
148 | # CONFIG_PROFILING is not set | ||
149 | CONFIG_HAVE_OPROFILE=y | ||
150 | # CONFIG_KPROBES is not set | ||
151 | CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y | ||
152 | CONFIG_HAVE_IOREMAP_PROT=y | ||
153 | CONFIG_HAVE_KPROBES=y | ||
154 | CONFIG_HAVE_KRETPROBES=y | ||
155 | CONFIG_HAVE_OPTPROBES=y | ||
156 | CONFIG_HAVE_ARCH_TRACEHOOK=y | ||
157 | CONFIG_HAVE_DMA_ATTRS=y | ||
158 | CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y | ||
159 | CONFIG_HAVE_DMA_API_DEBUG=y | ||
160 | CONFIG_HAVE_HW_BREAKPOINT=y | ||
161 | CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y | ||
162 | CONFIG_HAVE_USER_RETURN_NOTIFIER=y | ||
163 | CONFIG_HAVE_PERF_EVENTS_NMI=y | ||
164 | |||
165 | # | ||
166 | # GCOV-based kernel profiling | ||
167 | # | ||
168 | # CONFIG_GCOV_KERNEL is not set | ||
169 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set | ||
170 | CONFIG_SLABINFO=y | ||
171 | CONFIG_RT_MUTEXES=y | ||
172 | CONFIG_BASE_SMALL=0 | ||
173 | CONFIG_MODULES=y | ||
174 | CONFIG_MODULE_FORCE_LOAD=y | ||
175 | CONFIG_MODULE_UNLOAD=y | ||
176 | CONFIG_MODULE_FORCE_UNLOAD=y | ||
177 | CONFIG_MODVERSIONS=y | ||
178 | # CONFIG_MODULE_SRCVERSION_ALL is not set | ||
179 | CONFIG_STOP_MACHINE=y | ||
180 | CONFIG_BLOCK=y | ||
181 | # CONFIG_BLK_DEV_BSG is not set | ||
182 | # CONFIG_BLK_DEV_INTEGRITY is not set | ||
183 | |||
184 | # | ||
185 | # IO Schedulers | ||
186 | # | ||
187 | CONFIG_IOSCHED_NOOP=y | ||
188 | CONFIG_IOSCHED_DEADLINE=y | ||
189 | CONFIG_IOSCHED_CFQ=y | ||
190 | # CONFIG_DEFAULT_DEADLINE is not set | ||
191 | CONFIG_DEFAULT_CFQ=y | ||
192 | # CONFIG_DEFAULT_NOOP is not set | ||
193 | CONFIG_DEFAULT_IOSCHED="cfq" | ||
194 | # CONFIG_INLINE_SPIN_TRYLOCK is not set | ||
195 | # CONFIG_INLINE_SPIN_TRYLOCK_BH is not set | ||
196 | # CONFIG_INLINE_SPIN_LOCK is not set | ||
197 | # CONFIG_INLINE_SPIN_LOCK_BH is not set | ||
198 | # CONFIG_INLINE_SPIN_LOCK_IRQ is not set | ||
199 | # CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set | ||
200 | # CONFIG_INLINE_SPIN_UNLOCK is not set | ||
201 | # CONFIG_INLINE_SPIN_UNLOCK_BH is not set | ||
202 | # CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set | ||
203 | # CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set | ||
204 | # CONFIG_INLINE_READ_TRYLOCK is not set | ||
205 | # CONFIG_INLINE_READ_LOCK is not set | ||
206 | # CONFIG_INLINE_READ_LOCK_BH is not set | ||
207 | # CONFIG_INLINE_READ_LOCK_IRQ is not set | ||
208 | # CONFIG_INLINE_READ_LOCK_IRQSAVE is not set | ||
209 | # CONFIG_INLINE_READ_UNLOCK is not set | ||
210 | # CONFIG_INLINE_READ_UNLOCK_BH is not set | ||
211 | # CONFIG_INLINE_READ_UNLOCK_IRQ is not set | ||
212 | # CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set | ||
213 | # CONFIG_INLINE_WRITE_TRYLOCK is not set | ||
214 | # CONFIG_INLINE_WRITE_LOCK is not set | ||
215 | # CONFIG_INLINE_WRITE_LOCK_BH is not set | ||
216 | # CONFIG_INLINE_WRITE_LOCK_IRQ is not set | ||
217 | # CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set | ||
218 | # CONFIG_INLINE_WRITE_UNLOCK is not set | ||
219 | # CONFIG_INLINE_WRITE_UNLOCK_BH is not set | ||
220 | # CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set | ||
221 | # CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set | ||
222 | # CONFIG_MUTEX_SPIN_ON_OWNER is not set | ||
223 | # CONFIG_FREEZER is not set | ||
224 | |||
225 | # | ||
226 | # Processor type and features | ||
227 | # | ||
228 | CONFIG_TICK_ONESHOT=y | ||
229 | # CONFIG_NO_HZ is not set | ||
230 | CONFIG_HIGH_RES_TIMERS=y | ||
231 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | ||
232 | CONFIG_SMP=y | ||
233 | # CONFIG_SPARSE_IRQ is not set | ||
234 | # CONFIG_X86_MPPARSE is not set | ||
235 | # CONFIG_X86_EXTENDED_PLATFORM is not set | ||
236 | CONFIG_SCHED_OMIT_FRAME_POINTER=y | ||
237 | # CONFIG_PARAVIRT_GUEST is not set | ||
238 | CONFIG_NO_BOOTMEM=y | ||
239 | # CONFIG_MEMTEST is not set | ||
240 | # CONFIG_MK8 is not set | ||
241 | # CONFIG_MPSC is not set | ||
242 | CONFIG_MCORE2=y | ||
243 | # CONFIG_MATOM is not set | ||
244 | # CONFIG_GENERIC_CPU is not set | ||
245 | CONFIG_X86_CPU=y | ||
246 | CONFIG_X86_INTERNODE_CACHE_SHIFT=6 | ||
247 | CONFIG_X86_CMPXCHG=y | ||
248 | CONFIG_X86_L1_CACHE_SHIFT=6 | ||
249 | CONFIG_X86_XADD=y | ||
250 | CONFIG_X86_WP_WORKS_OK=y | ||
251 | CONFIG_X86_INTEL_USERCOPY=y | ||
252 | CONFIG_X86_USE_PPRO_CHECKSUM=y | ||
253 | CONFIG_X86_P6_NOP=y | ||
254 | CONFIG_X86_TSC=y | ||
255 | CONFIG_X86_CMPXCHG64=y | ||
256 | CONFIG_X86_CMOV=y | ||
257 | CONFIG_X86_MINIMUM_CPU_FAMILY=64 | ||
258 | CONFIG_X86_DEBUGCTLMSR=y | ||
259 | CONFIG_CPU_SUP_INTEL=y | ||
260 | CONFIG_CPU_SUP_AMD=y | ||
261 | CONFIG_CPU_SUP_CENTAUR=y | ||
262 | CONFIG_HPET_TIMER=y | ||
263 | CONFIG_HPET_EMULATE_RTC=y | ||
264 | CONFIG_DMI=y | ||
265 | CONFIG_GART_IOMMU=y | ||
266 | # CONFIG_CALGARY_IOMMU is not set | ||
267 | # CONFIG_AMD_IOMMU is not set | ||
268 | CONFIG_SWIOTLB=y | ||
269 | CONFIG_IOMMU_HELPER=y | ||
270 | # CONFIG_IOMMU_API is not set | ||
271 | # CONFIG_MAXSMP is not set | ||
272 | CONFIG_NR_CPUS=8 | ||
273 | # CONFIG_SCHED_SMT is not set | ||
274 | CONFIG_SCHED_MC=y | ||
275 | # CONFIG_PREEMPT_NONE is not set | ||
276 | # CONFIG_PREEMPT_VOLUNTARY is not set | ||
277 | CONFIG_PREEMPT=y | ||
278 | CONFIG_X86_LOCAL_APIC=y | ||
279 | CONFIG_X86_IO_APIC=y | ||
280 | # CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS is not set | ||
281 | # CONFIG_X86_MCE is not set | ||
282 | # CONFIG_I8K is not set | ||
283 | # CONFIG_MICROCODE is not set | ||
284 | CONFIG_X86_MSR=y | ||
285 | CONFIG_X86_CPUID=y | ||
286 | CONFIG_ARCH_PHYS_ADDR_T_64BIT=y | ||
287 | CONFIG_DIRECT_GBPAGES=y | ||
288 | # CONFIG_NUMA is not set | ||
289 | CONFIG_ARCH_PROC_KCORE_TEXT=y | ||
290 | CONFIG_ARCH_SPARSEMEM_DEFAULT=y | ||
291 | CONFIG_ARCH_SPARSEMEM_ENABLE=y | ||
292 | CONFIG_ARCH_SELECT_MEMORY_MODEL=y | ||
293 | CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 | ||
294 | CONFIG_SELECT_MEMORY_MODEL=y | ||
295 | CONFIG_SPARSEMEM_MANUAL=y | ||
296 | CONFIG_SPARSEMEM=y | ||
297 | CONFIG_HAVE_MEMORY_PRESENT=y | ||
298 | CONFIG_SPARSEMEM_EXTREME=y | ||
299 | CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y | ||
300 | CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y | ||
301 | CONFIG_SPARSEMEM_VMEMMAP=y | ||
302 | # CONFIG_MEMORY_HOTPLUG is not set | ||
303 | CONFIG_PAGEFLAGS_EXTENDED=y | ||
304 | CONFIG_SPLIT_PTLOCK_CPUS=999999 | ||
305 | CONFIG_PHYS_ADDR_T_64BIT=y | ||
306 | CONFIG_ZONE_DMA_FLAG=1 | ||
307 | CONFIG_BOUNCE=y | ||
308 | CONFIG_VIRT_TO_BUS=y | ||
309 | # CONFIG_KSM is not set | ||
310 | CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 | ||
311 | # CONFIG_X86_CHECK_BIOS_CORRUPTION is not set | ||
312 | CONFIG_X86_RESERVE_LOW_64K=y | ||
313 | CONFIG_MTRR=y | ||
314 | CONFIG_MTRR_SANITIZER=y | ||
315 | CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=0 | ||
316 | CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 | ||
317 | CONFIG_X86_PAT=y | ||
318 | CONFIG_ARCH_USES_PG_UNCACHED=y | ||
319 | # CONFIG_EFI is not set | ||
320 | CONFIG_SECCOMP=y | ||
321 | # CONFIG_CC_STACKPROTECTOR is not set | ||
322 | # CONFIG_HZ_100 is not set | ||
323 | # CONFIG_HZ_250 is not set | ||
324 | # CONFIG_HZ_300 is not set | ||
325 | CONFIG_HZ_1000=y | ||
326 | CONFIG_HZ=1000 | ||
327 | CONFIG_SCHED_HRTICK=y | ||
328 | # CONFIG_KEXEC is not set | ||
329 | # CONFIG_CRASH_DUMP is not set | ||
330 | CONFIG_PHYSICAL_START=0x1000000 | ||
331 | # CONFIG_RELOCATABLE is not set | ||
332 | CONFIG_PHYSICAL_ALIGN=0x1000000 | ||
333 | # CONFIG_HOTPLUG_CPU is not set | ||
334 | # CONFIG_CMDLINE_BOOL is not set | ||
335 | CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y | ||
336 | |||
337 | # | ||
338 | # Power management and ACPI options | ||
339 | # | ||
340 | CONFIG_PM=y | ||
341 | # CONFIG_PM_DEBUG is not set | ||
342 | # CONFIG_SUSPEND is not set | ||
343 | # CONFIG_HIBERNATION is not set | ||
344 | # CONFIG_PM_RUNTIME is not set | ||
345 | CONFIG_ACPI=y | ||
346 | # CONFIG_ACPI_PROCFS is not set | ||
347 | # CONFIG_ACPI_PROCFS_POWER is not set | ||
348 | CONFIG_ACPI_SYSFS_POWER=y | ||
349 | # CONFIG_ACPI_EC_DEBUGFS is not set | ||
350 | # CONFIG_ACPI_PROC_EVENT is not set | ||
351 | CONFIG_ACPI_AC=y | ||
352 | # CONFIG_ACPI_BATTERY is not set | ||
353 | CONFIG_ACPI_BUTTON=y | ||
354 | CONFIG_ACPI_FAN=y | ||
355 | CONFIG_ACPI_DOCK=y | ||
356 | CONFIG_ACPI_PROCESSOR=y | ||
357 | # CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set | ||
358 | CONFIG_ACPI_THERMAL=y | ||
359 | # CONFIG_ACPI_CUSTOM_DSDT is not set | ||
360 | CONFIG_ACPI_BLACKLIST_YEAR=0 | ||
361 | # CONFIG_ACPI_DEBUG is not set | ||
362 | # CONFIG_ACPI_PCI_SLOT is not set | ||
363 | CONFIG_X86_PM_TIMER=y | ||
364 | # CONFIG_ACPI_CONTAINER is not set | ||
365 | # CONFIG_ACPI_SBS is not set | ||
366 | # CONFIG_ACPI_HED is not set | ||
367 | # CONFIG_ACPI_APEI is not set | ||
368 | # CONFIG_SFI is not set | ||
369 | |||
370 | # | ||
371 | # CPU Frequency scaling | ||
372 | # | ||
373 | CONFIG_CPU_FREQ=y | ||
374 | CONFIG_CPU_FREQ_TABLE=y | ||
375 | # CONFIG_CPU_FREQ_DEBUG is not set | ||
376 | # CONFIG_CPU_FREQ_STAT is not set | ||
377 | CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y | ||
378 | # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set | ||
379 | # CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set | ||
380 | # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set | ||
381 | CONFIG_CPU_FREQ_GOV_PERFORMANCE=y | ||
382 | # CONFIG_CPU_FREQ_GOV_POWERSAVE is not set | ||
383 | # CONFIG_CPU_FREQ_GOV_USERSPACE is not set | ||
384 | # CONFIG_CPU_FREQ_GOV_ONDEMAND is not set | ||
385 | # CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set | ||
386 | |||
387 | # | ||
388 | # CPUFreq processor drivers | ||
389 | # | ||
390 | # CONFIG_X86_PCC_CPUFREQ is not set | ||
391 | CONFIG_X86_ACPI_CPUFREQ=y | ||
392 | # CONFIG_X86_POWERNOW_K8 is not set | ||
393 | # CONFIG_X86_SPEEDSTEP_CENTRINO is not set | ||
394 | # CONFIG_X86_P4_CLOCKMOD is not set | ||
395 | |||
396 | # | ||
397 | # shared options | ||
398 | # | ||
399 | # CONFIG_X86_SPEEDSTEP_LIB is not set | ||
400 | CONFIG_CPU_IDLE=y | ||
401 | CONFIG_CPU_IDLE_GOV_LADDER=y | ||
402 | # CONFIG_INTEL_IDLE is not set | ||
403 | |||
404 | # | ||
405 | # Memory power savings | ||
406 | # | ||
407 | # CONFIG_I7300_IDLE is not set | ||
408 | |||
409 | # | ||
410 | # Bus options (PCI etc.) | ||
411 | # | ||
412 | CONFIG_PCI=y | ||
413 | CONFIG_PCI_DIRECT=y | ||
414 | CONFIG_PCI_MMCONFIG=y | ||
415 | CONFIG_PCI_DOMAINS=y | ||
416 | # CONFIG_PCI_CNB20LE_QUIRK is not set | ||
417 | # CONFIG_PCIEPORTBUS is not set | ||
418 | CONFIG_ARCH_SUPPORTS_MSI=y | ||
419 | # CONFIG_PCI_MSI is not set | ||
420 | # CONFIG_PCI_DEBUG is not set | ||
421 | # CONFIG_PCI_STUB is not set | ||
422 | CONFIG_HT_IRQ=y | ||
423 | # CONFIG_PCI_IOV is not set | ||
424 | CONFIG_PCI_IOAPIC=y | ||
425 | CONFIG_ISA_DMA_API=y | ||
426 | CONFIG_K8_NB=y | ||
427 | # CONFIG_PCCARD is not set | ||
428 | # CONFIG_HOTPLUG_PCI is not set | ||
429 | |||
430 | # | ||
431 | # Executable file formats / Emulations | ||
432 | # | ||
433 | CONFIG_BINFMT_ELF=y | ||
434 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | ||
435 | # CONFIG_HAVE_AOUT is not set | ||
436 | CONFIG_BINFMT_MISC=y | ||
437 | # CONFIG_IA32_EMULATION is not set | ||
438 | # CONFIG_COMPAT_FOR_U64_ALIGNMENT is not set | ||
439 | CONFIG_NET=y | ||
440 | |||
441 | # | ||
442 | # Networking options | ||
443 | # | ||
444 | CONFIG_PACKET=y | ||
445 | CONFIG_UNIX=y | ||
446 | CONFIG_XFRM=y | ||
447 | CONFIG_XFRM_USER=y | ||
448 | # CONFIG_XFRM_SUB_POLICY is not set | ||
449 | # CONFIG_XFRM_MIGRATE is not set | ||
450 | # CONFIG_XFRM_STATISTICS is not set | ||
451 | CONFIG_XFRM_IPCOMP=y | ||
452 | CONFIG_NET_KEY=y | ||
453 | # CONFIG_NET_KEY_MIGRATE is not set | ||
454 | CONFIG_INET=y | ||
455 | CONFIG_IP_MULTICAST=y | ||
456 | CONFIG_IP_ADVANCED_ROUTER=y | ||
457 | CONFIG_ASK_IP_FIB_HASH=y | ||
458 | # CONFIG_IP_FIB_TRIE is not set | ||
459 | CONFIG_IP_FIB_HASH=y | ||
460 | CONFIG_IP_MULTIPLE_TABLES=y | ||
461 | CONFIG_IP_ROUTE_MULTIPATH=y | ||
462 | CONFIG_IP_ROUTE_VERBOSE=y | ||
463 | # CONFIG_IP_PNP is not set | ||
464 | CONFIG_NET_IPIP=y | ||
465 | CONFIG_NET_IPGRE=y | ||
466 | CONFIG_NET_IPGRE_BROADCAST=y | ||
467 | CONFIG_IP_MROUTE=y | ||
468 | # CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set | ||
469 | CONFIG_IP_PIMSM_V1=y | ||
470 | CONFIG_IP_PIMSM_V2=y | ||
471 | # CONFIG_ARPD is not set | ||
472 | CONFIG_SYN_COOKIES=y | ||
473 | CONFIG_INET_AH=y | ||
474 | CONFIG_INET_ESP=y | ||
475 | CONFIG_INET_IPCOMP=y | ||
476 | CONFIG_INET_XFRM_TUNNEL=y | ||
477 | CONFIG_INET_TUNNEL=y | ||
478 | CONFIG_INET_XFRM_MODE_TRANSPORT=y | ||
479 | CONFIG_INET_XFRM_MODE_TUNNEL=y | ||
480 | CONFIG_INET_XFRM_MODE_BEET=y | ||
481 | CONFIG_INET_LRO=y | ||
482 | CONFIG_INET_DIAG=y | ||
483 | CONFIG_INET_TCP_DIAG=y | ||
484 | # CONFIG_TCP_CONG_ADVANCED is not set | ||
485 | CONFIG_TCP_CONG_CUBIC=y | ||
486 | CONFIG_DEFAULT_TCP_CONG="cubic" | ||
487 | # CONFIG_TCP_MD5SIG is not set | ||
488 | # CONFIG_IPV6 is not set | ||
489 | CONFIG_NETWORK_SECMARK=y | ||
490 | # CONFIG_NETWORK_PHY_TIMESTAMPING is not set | ||
491 | CONFIG_NETFILTER=y | ||
492 | # CONFIG_NETFILTER_DEBUG is not set | ||
493 | CONFIG_NETFILTER_ADVANCED=y | ||
494 | |||
495 | # | ||
496 | # Core Netfilter Configuration | ||
497 | # | ||
498 | CONFIG_NETFILTER_NETLINK=y | ||
499 | CONFIG_NETFILTER_NETLINK_QUEUE=y | ||
500 | CONFIG_NETFILTER_NETLINK_LOG=y | ||
501 | CONFIG_NF_CONNTRACK=y | ||
502 | CONFIG_NF_CONNTRACK_MARK=y | ||
503 | CONFIG_NF_CONNTRACK_SECMARK=y | ||
504 | CONFIG_NF_CONNTRACK_EVENTS=y | ||
505 | # CONFIG_NF_CT_PROTO_DCCP is not set | ||
506 | # CONFIG_NF_CT_PROTO_SCTP is not set | ||
507 | # CONFIG_NF_CT_PROTO_UDPLITE is not set | ||
508 | CONFIG_NF_CONNTRACK_AMANDA=y | ||
509 | CONFIG_NF_CONNTRACK_FTP=y | ||
510 | # CONFIG_NF_CONNTRACK_H323 is not set | ||
511 | # CONFIG_NF_CONNTRACK_IRC is not set | ||
512 | CONFIG_NF_CONNTRACK_NETBIOS_NS=y | ||
513 | # CONFIG_NF_CONNTRACK_PPTP is not set | ||
514 | # CONFIG_NF_CONNTRACK_SANE is not set | ||
515 | # CONFIG_NF_CONNTRACK_SIP is not set | ||
516 | CONFIG_NF_CONNTRACK_TFTP=y | ||
517 | # CONFIG_NF_CT_NETLINK is not set | ||
518 | CONFIG_NETFILTER_XTABLES=y | ||
519 | |||
520 | # | ||
521 | # Xtables combined modules | ||
522 | # | ||
523 | CONFIG_NETFILTER_XT_MARK=y | ||
524 | CONFIG_NETFILTER_XT_CONNMARK=y | ||
525 | |||
526 | # | ||
527 | # Xtables targets | ||
528 | # | ||
529 | CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y | ||
530 | CONFIG_NETFILTER_XT_TARGET_CONNMARK=y | ||
531 | # CONFIG_NETFILTER_XT_TARGET_CONNSECMARK is not set | ||
532 | # CONFIG_NETFILTER_XT_TARGET_CT is not set | ||
533 | CONFIG_NETFILTER_XT_TARGET_HL=y | ||
534 | # CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set | ||
535 | CONFIG_NETFILTER_XT_TARGET_MARK=y | ||
536 | CONFIG_NETFILTER_XT_TARGET_NFLOG=y | ||
537 | CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y | ||
538 | CONFIG_NETFILTER_XT_TARGET_NOTRACK=y | ||
539 | CONFIG_NETFILTER_XT_TARGET_RATEEST=y | ||
540 | # CONFIG_NETFILTER_XT_TARGET_TEE is not set | ||
541 | CONFIG_NETFILTER_XT_TARGET_TRACE=y | ||
542 | # CONFIG_NETFILTER_XT_TARGET_SECMARK is not set | ||
543 | CONFIG_NETFILTER_XT_TARGET_TCPMSS=y | ||
544 | |||
545 | # | ||
546 | # Xtables matches | ||
547 | # | ||
548 | # CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set | ||
549 | # CONFIG_NETFILTER_XT_MATCH_COMMENT is not set | ||
550 | CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y | ||
551 | CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y | ||
552 | CONFIG_NETFILTER_XT_MATCH_CONNMARK=y | ||
553 | CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y | ||
554 | # CONFIG_NETFILTER_XT_MATCH_CPU is not set | ||
555 | # CONFIG_NETFILTER_XT_MATCH_DCCP is not set | ||
556 | # CONFIG_NETFILTER_XT_MATCH_DSCP is not set | ||
557 | # CONFIG_NETFILTER_XT_MATCH_ESP is not set | ||
558 | # CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set | ||
559 | # CONFIG_NETFILTER_XT_MATCH_HELPER is not set | ||
560 | CONFIG_NETFILTER_XT_MATCH_HL=y | ||
561 | CONFIG_NETFILTER_XT_MATCH_IPRANGE=y | ||
562 | CONFIG_NETFILTER_XT_MATCH_LENGTH=y | ||
563 | CONFIG_NETFILTER_XT_MATCH_LIMIT=y | ||
564 | CONFIG_NETFILTER_XT_MATCH_MAC=y | ||
565 | CONFIG_NETFILTER_XT_MATCH_MARK=y | ||
566 | CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y | ||
567 | # CONFIG_NETFILTER_XT_MATCH_OSF is not set | ||
568 | CONFIG_NETFILTER_XT_MATCH_OWNER=y | ||
569 | # CONFIG_NETFILTER_XT_MATCH_POLICY is not set | ||
570 | CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y | ||
571 | # CONFIG_NETFILTER_XT_MATCH_QUOTA is not set | ||
572 | # CONFIG_NETFILTER_XT_MATCH_RATEEST is not set | ||
573 | # CONFIG_NETFILTER_XT_MATCH_REALM is not set | ||
574 | # CONFIG_NETFILTER_XT_MATCH_RECENT is not set | ||
575 | # CONFIG_NETFILTER_XT_MATCH_SCTP is not set | ||
576 | # CONFIG_NETFILTER_XT_MATCH_STATE is not set | ||
577 | # CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set | ||
578 | CONFIG_NETFILTER_XT_MATCH_STRING=y | ||
579 | CONFIG_NETFILTER_XT_MATCH_TCPMSS=y | ||
580 | CONFIG_NETFILTER_XT_MATCH_TIME=y | ||
581 | CONFIG_NETFILTER_XT_MATCH_U32=y | ||
582 | # CONFIG_IP_VS is not set | ||
583 | |||
584 | # | ||
585 | # IP: Netfilter Configuration | ||
586 | # | ||
587 | CONFIG_NF_DEFRAG_IPV4=y | ||
588 | CONFIG_NF_CONNTRACK_IPV4=y | ||
589 | CONFIG_NF_CONNTRACK_PROC_COMPAT=y | ||
590 | # CONFIG_IP_NF_QUEUE is not set | ||
591 | CONFIG_IP_NF_IPTABLES=y | ||
592 | CONFIG_IP_NF_MATCH_ADDRTYPE=y | ||
593 | CONFIG_IP_NF_MATCH_AH=y | ||
594 | CONFIG_IP_NF_MATCH_ECN=y | ||
595 | CONFIG_IP_NF_MATCH_TTL=y | ||
596 | CONFIG_IP_NF_FILTER=y | ||
597 | CONFIG_IP_NF_TARGET_REJECT=y | ||
598 | CONFIG_IP_NF_TARGET_LOG=y | ||
599 | CONFIG_IP_NF_TARGET_ULOG=y | ||
600 | # CONFIG_NF_NAT is not set | ||
601 | # CONFIG_IP_NF_MANGLE is not set | ||
602 | CONFIG_IP_NF_TARGET_TTL=y | ||
603 | CONFIG_IP_NF_RAW=y | ||
604 | CONFIG_IP_NF_ARPTABLES=y | ||
605 | CONFIG_IP_NF_ARPFILTER=y | ||
606 | CONFIG_IP_NF_ARP_MANGLE=y | ||
607 | # CONFIG_IP_DCCP is not set | ||
608 | # CONFIG_IP_SCTP is not set | ||
609 | # CONFIG_RDS is not set | ||
610 | # CONFIG_TIPC is not set | ||
611 | # CONFIG_ATM is not set | ||
612 | # CONFIG_L2TP is not set | ||
613 | # CONFIG_BRIDGE is not set | ||
614 | # CONFIG_NET_DSA is not set | ||
615 | # CONFIG_VLAN_8021Q is not set | ||
616 | # CONFIG_DECNET is not set | ||
617 | # CONFIG_LLC2 is not set | ||
618 | # CONFIG_IPX is not set | ||
619 | # CONFIG_ATALK is not set | ||
620 | # CONFIG_X25 is not set | ||
621 | # CONFIG_LAPB is not set | ||
622 | # CONFIG_ECONET is not set | ||
623 | # CONFIG_WAN_ROUTER is not set | ||
624 | # CONFIG_PHONET is not set | ||
625 | # CONFIG_IEEE802154 is not set | ||
626 | # CONFIG_NET_SCHED is not set | ||
627 | # CONFIG_DCB is not set | ||
628 | CONFIG_RPS=y | ||
629 | |||
630 | # | ||
631 | # Network testing | ||
632 | # | ||
633 | # CONFIG_NET_PKTGEN is not set | ||
634 | # CONFIG_HAMRADIO is not set | ||
635 | # CONFIG_CAN is not set | ||
636 | # CONFIG_IRDA is not set | ||
637 | # CONFIG_BT is not set | ||
638 | # CONFIG_AF_RXRPC is not set | ||
639 | CONFIG_FIB_RULES=y | ||
640 | CONFIG_WIRELESS=y | ||
641 | # CONFIG_CFG80211 is not set | ||
642 | # CONFIG_LIB80211 is not set | ||
643 | |||
644 | # | ||
645 | # CFG80211 needs to be enabled for MAC80211 | ||
646 | # | ||
647 | |||
648 | # | ||
649 | # Some wireless drivers require a rate control algorithm | ||
650 | # | ||
651 | # CONFIG_WIMAX is not set | ||
652 | # CONFIG_RFKILL is not set | ||
653 | # CONFIG_NET_9P is not set | ||
654 | # CONFIG_CAIF is not set | ||
655 | |||
656 | # | ||
657 | # Device Drivers | ||
658 | # | ||
659 | |||
660 | # | ||
661 | # Generic Driver Options | ||
662 | # | ||
663 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
664 | # CONFIG_DEVTMPFS is not set | ||
665 | CONFIG_STANDALONE=y | ||
666 | CONFIG_PREVENT_FIRMWARE_BUILD=y | ||
667 | CONFIG_FW_LOADER=y | ||
668 | CONFIG_FIRMWARE_IN_KERNEL=y | ||
669 | CONFIG_EXTRA_FIRMWARE="" | ||
670 | # CONFIG_DEBUG_DRIVER is not set | ||
671 | # CONFIG_DEBUG_DEVRES is not set | ||
672 | # CONFIG_SYS_HYPERVISOR is not set | ||
673 | # CONFIG_CONNECTOR is not set | ||
674 | # CONFIG_MTD is not set | ||
675 | # CONFIG_PARPORT is not set | ||
676 | CONFIG_PNP=y | ||
677 | # CONFIG_PNP_DEBUG_MESSAGES is not set | ||
678 | |||
679 | # | ||
680 | # Protocols | ||
681 | # | ||
682 | CONFIG_PNPACPI=y | ||
683 | CONFIG_BLK_DEV=y | ||
684 | CONFIG_BLK_DEV_FD=y | ||
685 | # CONFIG_BLK_CPQ_DA is not set | ||
686 | # CONFIG_BLK_CPQ_CISS_DA is not set | ||
687 | # CONFIG_BLK_DEV_DAC960 is not set | ||
688 | # CONFIG_BLK_DEV_UMEM is not set | ||
689 | # CONFIG_BLK_DEV_COW_COMMON is not set | ||
690 | CONFIG_BLK_DEV_LOOP=y | ||
691 | # CONFIG_BLK_DEV_CRYPTOLOOP is not set | ||
692 | |||
693 | # | ||
694 | # DRBD disabled because PROC_FS, INET or CONNECTOR not selected | ||
695 | # | ||
696 | # CONFIG_BLK_DEV_NBD is not set | ||
697 | # CONFIG_BLK_DEV_SX8 is not set | ||
698 | # CONFIG_BLK_DEV_UB is not set | ||
699 | # CONFIG_BLK_DEV_RAM is not set | ||
700 | CONFIG_CDROM_PKTCDVD=y | ||
701 | CONFIG_CDROM_PKTCDVD_BUFFERS=8 | ||
702 | # CONFIG_CDROM_PKTCDVD_WCACHE is not set | ||
703 | # CONFIG_ATA_OVER_ETH is not set | ||
704 | # CONFIG_BLK_DEV_HD is not set | ||
705 | # CONFIG_MISC_DEVICES is not set | ||
706 | CONFIG_HAVE_IDE=y | ||
707 | CONFIG_IDE=y | ||
708 | |||
709 | # | ||
710 | # Please see Documentation/ide/ide.txt for help/info on IDE drives | ||
711 | # | ||
712 | CONFIG_IDE_XFER_MODE=y | ||
713 | CONFIG_IDE_ATAPI=y | ||
714 | # CONFIG_BLK_DEV_IDE_SATA is not set | ||
715 | CONFIG_IDE_GD=y | ||
716 | CONFIG_IDE_GD_ATA=y | ||
717 | # CONFIG_IDE_GD_ATAPI is not set | ||
718 | CONFIG_BLK_DEV_IDECD=y | ||
719 | CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y | ||
720 | # CONFIG_BLK_DEV_IDETAPE is not set | ||
721 | CONFIG_BLK_DEV_IDEACPI=y | ||
722 | # CONFIG_IDE_TASK_IOCTL is not set | ||
723 | CONFIG_IDE_PROC_FS=y | ||
724 | |||
725 | # | ||
726 | # IDE chipset support/bugfixes | ||
727 | # | ||
728 | CONFIG_IDE_GENERIC=y | ||
729 | # CONFIG_BLK_DEV_PLATFORM is not set | ||
730 | # CONFIG_BLK_DEV_CMD640 is not set | ||
731 | CONFIG_BLK_DEV_IDEPNP=y | ||
732 | CONFIG_BLK_DEV_IDEDMA_SFF=y | ||
733 | |||
734 | # | ||
735 | # PCI IDE chipsets support | ||
736 | # | ||
737 | CONFIG_BLK_DEV_IDEPCI=y | ||
738 | # CONFIG_IDEPCI_PCIBUS_ORDER is not set | ||
739 | # CONFIG_BLK_DEV_GENERIC is not set | ||
740 | # CONFIG_BLK_DEV_OPTI621 is not set | ||
741 | # CONFIG_BLK_DEV_RZ1000 is not set | ||
742 | CONFIG_BLK_DEV_IDEDMA_PCI=y | ||
743 | # CONFIG_BLK_DEV_AEC62XX is not set | ||
744 | # CONFIG_BLK_DEV_ALI15X3 is not set | ||
745 | # CONFIG_BLK_DEV_AMD74XX is not set | ||
746 | # CONFIG_BLK_DEV_ATIIXP is not set | ||
747 | # CONFIG_BLK_DEV_CMD64X is not set | ||
748 | # CONFIG_BLK_DEV_TRIFLEX is not set | ||
749 | # CONFIG_BLK_DEV_CS5520 is not set | ||
750 | # CONFIG_BLK_DEV_CS5530 is not set | ||
751 | # CONFIG_BLK_DEV_HPT366 is not set | ||
752 | # CONFIG_BLK_DEV_JMICRON is not set | ||
753 | # CONFIG_BLK_DEV_SC1200 is not set | ||
754 | CONFIG_BLK_DEV_PIIX=y | ||
755 | # CONFIG_BLK_DEV_IT8172 is not set | ||
756 | # CONFIG_BLK_DEV_IT8213 is not set | ||
757 | # CONFIG_BLK_DEV_IT821X is not set | ||
758 | # CONFIG_BLK_DEV_NS87415 is not set | ||
759 | # CONFIG_BLK_DEV_PDC202XX_OLD is not set | ||
760 | # CONFIG_BLK_DEV_PDC202XX_NEW is not set | ||
761 | # CONFIG_BLK_DEV_SVWKS is not set | ||
762 | # CONFIG_BLK_DEV_SIIMAGE is not set | ||
763 | # CONFIG_BLK_DEV_SIS5513 is not set | ||
764 | # CONFIG_BLK_DEV_SLC90E66 is not set | ||
765 | # CONFIG_BLK_DEV_TRM290 is not set | ||
766 | # CONFIG_BLK_DEV_VIA82CXXX is not set | ||
767 | # CONFIG_BLK_DEV_TC86C001 is not set | ||
768 | CONFIG_BLK_DEV_IDEDMA=y | ||
769 | |||
770 | # | ||
771 | # SCSI device support | ||
772 | # | ||
773 | CONFIG_SCSI_MOD=y | ||
774 | # CONFIG_RAID_ATTRS is not set | ||
775 | CONFIG_SCSI=y | ||
776 | CONFIG_SCSI_DMA=y | ||
777 | # CONFIG_SCSI_TGT is not set | ||
778 | # CONFIG_SCSI_NETLINK is not set | ||
779 | # CONFIG_SCSI_PROC_FS is not set | ||
780 | |||
781 | # | ||
782 | # SCSI support type (disk, tape, CD-ROM) | ||
783 | # | ||
784 | CONFIG_BLK_DEV_SD=y | ||
785 | # CONFIG_CHR_DEV_ST is not set | ||
786 | # CONFIG_CHR_DEV_OSST is not set | ||
787 | CONFIG_BLK_DEV_SR=y | ||
788 | # CONFIG_BLK_DEV_SR_VENDOR is not set | ||
789 | CONFIG_CHR_DEV_SG=y | ||
790 | # CONFIG_CHR_DEV_SCH is not set | ||
791 | # CONFIG_SCSI_MULTI_LUN is not set | ||
792 | # CONFIG_SCSI_CONSTANTS is not set | ||
793 | # CONFIG_SCSI_LOGGING is not set | ||
794 | CONFIG_SCSI_SCAN_ASYNC=y | ||
795 | CONFIG_SCSI_WAIT_SCAN=m | ||
796 | |||
797 | # | ||
798 | # SCSI Transports | ||
799 | # | ||
800 | # CONFIG_SCSI_SPI_ATTRS is not set | ||
801 | # CONFIG_SCSI_FC_ATTRS is not set | ||
802 | # CONFIG_SCSI_ISCSI_ATTRS is not set | ||
803 | # CONFIG_SCSI_SAS_LIBSAS is not set | ||
804 | # CONFIG_SCSI_SRP_ATTRS is not set | ||
805 | # CONFIG_SCSI_LOWLEVEL is not set | ||
806 | # CONFIG_SCSI_DH is not set | ||
807 | # CONFIG_SCSI_OSD_INITIATOR is not set | ||
808 | CONFIG_ATA=y | ||
809 | # CONFIG_ATA_NONSTANDARD is not set | ||
810 | CONFIG_ATA_VERBOSE_ERROR=y | ||
811 | CONFIG_ATA_ACPI=y | ||
812 | CONFIG_SATA_PMP=y | ||
813 | |||
814 | # | ||
815 | # Controllers with non-SFF native interface | ||
816 | # | ||
817 | CONFIG_SATA_AHCI=y | ||
818 | # CONFIG_SATA_AHCI_PLATFORM is not set | ||
819 | # CONFIG_SATA_INIC162X is not set | ||
820 | # CONFIG_SATA_SIL24 is not set | ||
821 | CONFIG_ATA_SFF=y | ||
822 | |||
823 | # | ||
824 | # SFF controllers with custom DMA interface | ||
825 | # | ||
826 | # CONFIG_PDC_ADMA is not set | ||
827 | # CONFIG_SATA_QSTOR is not set | ||
828 | # CONFIG_SATA_SX4 is not set | ||
829 | CONFIG_ATA_BMDMA=y | ||
830 | |||
831 | # | ||
832 | # SATA SFF controllers with BMDMA | ||
833 | # | ||
834 | CONFIG_ATA_PIIX=y | ||
835 | # CONFIG_SATA_MV is not set | ||
836 | # CONFIG_SATA_NV is not set | ||
837 | # CONFIG_SATA_PROMISE is not set | ||
838 | # CONFIG_SATA_SIL is not set | ||
839 | # CONFIG_SATA_SIS is not set | ||
840 | # CONFIG_SATA_SVW is not set | ||
841 | # CONFIG_SATA_ULI is not set | ||
842 | # CONFIG_SATA_VIA is not set | ||
843 | # CONFIG_SATA_VITESSE is not set | ||
844 | |||
845 | # | ||
846 | # PATA SFF controllers with BMDMA | ||
847 | # | ||
848 | # CONFIG_PATA_ALI is not set | ||
849 | # CONFIG_PATA_AMD is not set | ||
850 | # CONFIG_PATA_ARTOP is not set | ||
851 | # CONFIG_PATA_ATIIXP is not set | ||
852 | # CONFIG_PATA_ATP867X is not set | ||
853 | # CONFIG_PATA_CMD64X is not set | ||
854 | # CONFIG_PATA_CS5520 is not set | ||
855 | # CONFIG_PATA_CS5530 is not set | ||
856 | # CONFIG_PATA_CYPRESS is not set | ||
857 | # CONFIG_PATA_EFAR is not set | ||
858 | # CONFIG_PATA_HPT366 is not set | ||
859 | # CONFIG_PATA_HPT37X is not set | ||
860 | # CONFIG_PATA_HPT3X2N is not set | ||
861 | # CONFIG_PATA_HPT3X3 is not set | ||
862 | # CONFIG_PATA_IT8213 is not set | ||
863 | # CONFIG_PATA_IT821X is not set | ||
864 | # CONFIG_PATA_JMICRON is not set | ||
865 | # CONFIG_PATA_MARVELL is not set | ||
866 | # CONFIG_PATA_NETCELL is not set | ||
867 | # CONFIG_PATA_NINJA32 is not set | ||
868 | # CONFIG_PATA_NS87415 is not set | ||
869 | # CONFIG_PATA_OLDPIIX is not set | ||
870 | # CONFIG_PATA_OPTIDMA is not set | ||
871 | # CONFIG_PATA_PDC2027X is not set | ||
872 | # CONFIG_PATA_PDC_OLD is not set | ||
873 | # CONFIG_PATA_RADISYS is not set | ||
874 | # CONFIG_PATA_RDC is not set | ||
875 | # CONFIG_PATA_SC1200 is not set | ||
876 | # CONFIG_PATA_SCH is not set | ||
877 | # CONFIG_PATA_SERVERWORKS is not set | ||
878 | # CONFIG_PATA_SIL680 is not set | ||
879 | # CONFIG_PATA_SIS is not set | ||
880 | # CONFIG_PATA_TOSHIBA is not set | ||
881 | # CONFIG_PATA_TRIFLEX is not set | ||
882 | # CONFIG_PATA_VIA is not set | ||
883 | # CONFIG_PATA_WINBOND is not set | ||
884 | |||
885 | # | ||
886 | # PIO-only SFF controllers | ||
887 | # | ||
888 | # CONFIG_PATA_CMD640_PCI is not set | ||
889 | # CONFIG_PATA_MPIIX is not set | ||
890 | # CONFIG_PATA_NS87410 is not set | ||
891 | # CONFIG_PATA_OPTI is not set | ||
892 | # CONFIG_PATA_RZ1000 is not set | ||
893 | |||
894 | # | ||
895 | # Generic fallback / legacy drivers | ||
896 | # | ||
897 | # CONFIG_PATA_ACPI is not set | ||
898 | # CONFIG_ATA_GENERIC is not set | ||
899 | # CONFIG_PATA_LEGACY is not set | ||
900 | CONFIG_MD=y | ||
901 | # CONFIG_BLK_DEV_MD is not set | ||
902 | CONFIG_BLK_DEV_DM=y | ||
903 | # CONFIG_DM_DEBUG is not set | ||
904 | # CONFIG_DM_CRYPT is not set | ||
905 | # CONFIG_DM_SNAPSHOT is not set | ||
906 | # CONFIG_DM_MIRROR is not set | ||
907 | # CONFIG_DM_ZERO is not set | ||
908 | # CONFIG_DM_MULTIPATH is not set | ||
909 | # CONFIG_DM_DELAY is not set | ||
910 | # CONFIG_DM_UEVENT is not set | ||
911 | # CONFIG_FUSION is not set | ||
912 | |||
913 | # | ||
914 | # IEEE 1394 (FireWire) support | ||
915 | # | ||
916 | |||
917 | # | ||
918 | # You can enable one or both FireWire driver stacks. | ||
919 | # | ||
920 | |||
921 | # | ||
922 | # The newer stack is recommended. | ||
923 | # | ||
924 | # CONFIG_FIREWIRE is not set | ||
925 | # CONFIG_IEEE1394 is not set | ||
926 | # CONFIG_FIREWIRE_NOSY is not set | ||
927 | # CONFIG_I2O is not set | ||
928 | # CONFIG_MACINTOSH_DRIVERS is not set | ||
929 | CONFIG_NETDEVICES=y | ||
930 | CONFIG_DUMMY=y | ||
931 | # CONFIG_BONDING is not set | ||
932 | # CONFIG_MACVLAN is not set | ||
933 | # CONFIG_EQUALIZER is not set | ||
934 | CONFIG_TUN=y | ||
935 | # CONFIG_VETH is not set | ||
936 | # CONFIG_NET_SB1000 is not set | ||
937 | # CONFIG_ARCNET is not set | ||
938 | CONFIG_PHYLIB=y | ||
939 | |||
940 | # | ||
941 | # MII PHY device drivers | ||
942 | # | ||
943 | # CONFIG_MARVELL_PHY is not set | ||
944 | # CONFIG_DAVICOM_PHY is not set | ||
945 | # CONFIG_QSEMI_PHY is not set | ||
946 | # CONFIG_LXT_PHY is not set | ||
947 | # CONFIG_CICADA_PHY is not set | ||
948 | # CONFIG_VITESSE_PHY is not set | ||
949 | # CONFIG_SMSC_PHY is not set | ||
950 | # CONFIG_BROADCOM_PHY is not set | ||
951 | # CONFIG_ICPLUS_PHY is not set | ||
952 | # CONFIG_REALTEK_PHY is not set | ||
953 | # CONFIG_NATIONAL_PHY is not set | ||
954 | # CONFIG_STE10XP is not set | ||
955 | # CONFIG_LSI_ET1011C_PHY is not set | ||
956 | # CONFIG_MICREL_PHY is not set | ||
957 | # CONFIG_FIXED_PHY is not set | ||
958 | # CONFIG_MDIO_BITBANG is not set | ||
959 | CONFIG_NET_ETHERNET=y | ||
960 | CONFIG_MII=y | ||
961 | # CONFIG_HAPPYMEAL is not set | ||
962 | # CONFIG_SUNGEM is not set | ||
963 | # CONFIG_CASSINI is not set | ||
964 | # CONFIG_NET_VENDOR_3COM is not set | ||
965 | # CONFIG_ETHOC is not set | ||
966 | # CONFIG_DNET is not set | ||
967 | # CONFIG_NET_TULIP is not set | ||
968 | # CONFIG_HP100 is not set | ||
969 | # CONFIG_IBM_NEW_EMAC_ZMII is not set | ||
970 | # CONFIG_IBM_NEW_EMAC_RGMII is not set | ||
971 | # CONFIG_IBM_NEW_EMAC_TAH is not set | ||
972 | # CONFIG_IBM_NEW_EMAC_EMAC4 is not set | ||
973 | # CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set | ||
974 | # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set | ||
975 | # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set | ||
976 | CONFIG_NET_PCI=y | ||
977 | CONFIG_PCNET32=y | ||
978 | CONFIG_AMD8111_ETH=y | ||
979 | # CONFIG_ADAPTEC_STARFIRE is not set | ||
980 | # CONFIG_KSZ884X_PCI is not set | ||
981 | # CONFIG_B44 is not set | ||
982 | # CONFIG_FORCEDETH is not set | ||
983 | CONFIG_E100=y | ||
984 | # CONFIG_FEALNX is not set | ||
985 | # CONFIG_NATSEMI is not set | ||
986 | # CONFIG_NE2K_PCI is not set | ||
987 | CONFIG_8139CP=y | ||
988 | CONFIG_8139TOO=y | ||
989 | CONFIG_8139TOO_PIO=y | ||
990 | # CONFIG_8139TOO_TUNE_TWISTER is not set | ||
991 | CONFIG_8139TOO_8129=y | ||
992 | # CONFIG_8139_OLD_RX_RESET is not set | ||
993 | # CONFIG_R6040 is not set | ||
994 | # CONFIG_SIS900 is not set | ||
995 | # CONFIG_EPIC100 is not set | ||
996 | # CONFIG_SMSC9420 is not set | ||
997 | # CONFIG_SUNDANCE is not set | ||
998 | # CONFIG_TLAN is not set | ||
999 | # CONFIG_KS8851_MLL is not set | ||
1000 | # CONFIG_VIA_RHINE is not set | ||
1001 | # CONFIG_SC92031 is not set | ||
1002 | # CONFIG_ATL2 is not set | ||
1003 | CONFIG_NETDEV_1000=y | ||
1004 | # CONFIG_ACENIC is not set | ||
1005 | # CONFIG_DL2K is not set | ||
1006 | CONFIG_E1000=y | ||
1007 | # CONFIG_E1000E is not set | ||
1008 | # CONFIG_IP1000 is not set | ||
1009 | # CONFIG_IGB is not set | ||
1010 | # CONFIG_IGBVF is not set | ||
1011 | # CONFIG_NS83820 is not set | ||
1012 | # CONFIG_HAMACHI is not set | ||
1013 | # CONFIG_YELLOWFIN is not set | ||
1014 | # CONFIG_R8169 is not set | ||
1015 | # CONFIG_SIS190 is not set | ||
1016 | # CONFIG_SKGE is not set | ||
1017 | # CONFIG_SKY2 is not set | ||
1018 | # CONFIG_VIA_VELOCITY is not set | ||
1019 | # CONFIG_TIGON3 is not set | ||
1020 | # CONFIG_BNX2 is not set | ||
1021 | # CONFIG_CNIC is not set | ||
1022 | # CONFIG_QLA3XXX is not set | ||
1023 | # CONFIG_ATL1 is not set | ||
1024 | # CONFIG_ATL1E is not set | ||
1025 | # CONFIG_ATL1C is not set | ||
1026 | # CONFIG_JME is not set | ||
1027 | # CONFIG_NETDEV_10000 is not set | ||
1028 | # CONFIG_TR is not set | ||
1029 | CONFIG_WLAN=y | ||
1030 | # CONFIG_AIRO is not set | ||
1031 | # CONFIG_ATMEL is not set | ||
1032 | # CONFIG_PRISM54 is not set | ||
1033 | # CONFIG_USB_ZD1201 is not set | ||
1034 | # CONFIG_HOSTAP is not set | ||
1035 | |||
1036 | # | ||
1037 | # Enable WiMAX (Networking options) to see the WiMAX drivers | ||
1038 | # | ||
1039 | |||
1040 | # | ||
1041 | # USB Network Adapters | ||
1042 | # | ||
1043 | # CONFIG_USB_CATC is not set | ||
1044 | # CONFIG_USB_KAWETH is not set | ||
1045 | # CONFIG_USB_PEGASUS is not set | ||
1046 | # CONFIG_USB_RTL8150 is not set | ||
1047 | # CONFIG_USB_USBNET is not set | ||
1048 | # CONFIG_USB_IPHETH is not set | ||
1049 | # CONFIG_WAN is not set | ||
1050 | |||
1051 | # | ||
1052 | # CAIF transport drivers | ||
1053 | # | ||
1054 | # CONFIG_FDDI is not set | ||
1055 | # CONFIG_HIPPI is not set | ||
1056 | # CONFIG_PPP is not set | ||
1057 | # CONFIG_SLIP is not set | ||
1058 | # CONFIG_NET_FC is not set | ||
1059 | CONFIG_NETCONSOLE=m | ||
1060 | CONFIG_NETCONSOLE_DYNAMIC=y | ||
1061 | CONFIG_NETPOLL=y | ||
1062 | # CONFIG_NETPOLL_TRAP is not set | ||
1063 | CONFIG_NET_POLL_CONTROLLER=y | ||
1064 | # CONFIG_VMXNET3 is not set | ||
1065 | # CONFIG_ISDN is not set | ||
1066 | # CONFIG_PHONE is not set | ||
1067 | |||
1068 | # | ||
1069 | # Input device support | ||
1070 | # | ||
1071 | CONFIG_INPUT=y | ||
1072 | CONFIG_INPUT_FF_MEMLESS=y | ||
1073 | # CONFIG_INPUT_POLLDEV is not set | ||
1074 | # CONFIG_INPUT_SPARSEKMAP is not set | ||
1075 | |||
1076 | # | ||
1077 | # Userland interfaces | ||
1078 | # | ||
1079 | CONFIG_INPUT_MOUSEDEV=y | ||
1080 | CONFIG_INPUT_MOUSEDEV_PSAUX=y | ||
1081 | CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 | ||
1082 | CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 | ||
1083 | # CONFIG_INPUT_JOYDEV is not set | ||
1084 | CONFIG_INPUT_EVDEV=y | ||
1085 | # CONFIG_INPUT_EVBUG is not set | ||
1086 | |||
1087 | # | ||
1088 | # Input Device Drivers | ||
1089 | # | ||
1090 | CONFIG_INPUT_KEYBOARD=y | ||
1091 | # CONFIG_KEYBOARD_ADP5588 is not set | ||
1092 | CONFIG_KEYBOARD_ATKBD=y | ||
1093 | # CONFIG_KEYBOARD_QT2160 is not set | ||
1094 | # CONFIG_KEYBOARD_LKKBD is not set | ||
1095 | # CONFIG_KEYBOARD_TCA6416 is not set | ||
1096 | # CONFIG_KEYBOARD_MAX7359 is not set | ||
1097 | # CONFIG_KEYBOARD_MCS is not set | ||
1098 | # CONFIG_KEYBOARD_NEWTON is not set | ||
1099 | # CONFIG_KEYBOARD_OPENCORES is not set | ||
1100 | # CONFIG_KEYBOARD_STOWAWAY is not set | ||
1101 | # CONFIG_KEYBOARD_SUNKBD is not set | ||
1102 | # CONFIG_KEYBOARD_XTKBD is not set | ||
1103 | CONFIG_INPUT_MOUSE=y | ||
1104 | CONFIG_MOUSE_PS2=y | ||
1105 | CONFIG_MOUSE_PS2_ALPS=y | ||
1106 | CONFIG_MOUSE_PS2_LOGIPS2PP=y | ||
1107 | CONFIG_MOUSE_PS2_SYNAPTICS=y | ||
1108 | CONFIG_MOUSE_PS2_LIFEBOOK=y | ||
1109 | CONFIG_MOUSE_PS2_TRACKPOINT=y | ||
1110 | # CONFIG_MOUSE_PS2_ELANTECH is not set | ||
1111 | # CONFIG_MOUSE_PS2_SENTELIC is not set | ||
1112 | # CONFIG_MOUSE_PS2_TOUCHKIT is not set | ||
1113 | # CONFIG_MOUSE_SERIAL is not set | ||
1114 | # CONFIG_MOUSE_APPLETOUCH is not set | ||
1115 | # CONFIG_MOUSE_BCM5974 is not set | ||
1116 | # CONFIG_MOUSE_VSXXXAA is not set | ||
1117 | # CONFIG_MOUSE_SYNAPTICS_I2C is not set | ||
1118 | # CONFIG_INPUT_JOYSTICK is not set | ||
1119 | # CONFIG_INPUT_TABLET is not set | ||
1120 | # CONFIG_INPUT_TOUCHSCREEN is not set | ||
1121 | # CONFIG_INPUT_MISC is not set | ||
1122 | |||
1123 | # | ||
1124 | # Hardware I/O ports | ||
1125 | # | ||
1126 | CONFIG_SERIO=y | ||
1127 | CONFIG_SERIO_I8042=y | ||
1128 | CONFIG_SERIO_SERPORT=y | ||
1129 | # CONFIG_SERIO_CT82C710 is not set | ||
1130 | # CONFIG_SERIO_PCIPS2 is not set | ||
1131 | CONFIG_SERIO_LIBPS2=y | ||
1132 | CONFIG_SERIO_RAW=y | ||
1133 | # CONFIG_SERIO_ALTERA_PS2 is not set | ||
1134 | # CONFIG_GAMEPORT is not set | ||
1135 | |||
1136 | # | ||
1137 | # Character devices | ||
1138 | # | ||
1139 | CONFIG_VT=y | ||
1140 | CONFIG_CONSOLE_TRANSLATIONS=y | ||
1141 | CONFIG_VT_CONSOLE=y | ||
1142 | CONFIG_HW_CONSOLE=y | ||
1143 | # CONFIG_VT_HW_CONSOLE_BINDING is not set | ||
1144 | # CONFIG_DEVKMEM is not set | ||
1145 | # CONFIG_SERIAL_NONSTANDARD is not set | ||
1146 | # CONFIG_N_GSM is not set | ||
1147 | # CONFIG_NOZOMI is not set | ||
1148 | |||
1149 | # | ||
1150 | # Serial drivers | ||
1151 | # | ||
1152 | CONFIG_SERIAL_8250=y | ||
1153 | CONFIG_SERIAL_8250_CONSOLE=y | ||
1154 | CONFIG_FIX_EARLYCON_MEM=y | ||
1155 | CONFIG_SERIAL_8250_PCI=y | ||
1156 | CONFIG_SERIAL_8250_PNP=y | ||
1157 | CONFIG_SERIAL_8250_NR_UARTS=32 | ||
1158 | CONFIG_SERIAL_8250_RUNTIME_UARTS=4 | ||
1159 | CONFIG_SERIAL_8250_EXTENDED=y | ||
1160 | # CONFIG_SERIAL_8250_MANY_PORTS is not set | ||
1161 | # CONFIG_SERIAL_8250_SHARE_IRQ is not set | ||
1162 | # CONFIG_SERIAL_8250_DETECT_IRQ is not set | ||
1163 | # CONFIG_SERIAL_8250_RSA is not set | ||
1164 | |||
1165 | # | ||
1166 | # Non-8250 serial port support | ||
1167 | # | ||
1168 | # CONFIG_SERIAL_MFD_HSU is not set | ||
1169 | CONFIG_SERIAL_CORE=y | ||
1170 | CONFIG_SERIAL_CORE_CONSOLE=y | ||
1171 | # CONFIG_SERIAL_JSM is not set | ||
1172 | # CONFIG_SERIAL_TIMBERDALE is not set | ||
1173 | # CONFIG_SERIAL_ALTERA_JTAGUART is not set | ||
1174 | # CONFIG_SERIAL_ALTERA_UART is not set | ||
1175 | CONFIG_UNIX98_PTYS=y | ||
1176 | CONFIG_DEVPTS_MULTIPLE_INSTANCES=y | ||
1177 | # CONFIG_LEGACY_PTYS is not set | ||
1178 | # CONFIG_IPMI_HANDLER is not set | ||
1179 | CONFIG_HW_RANDOM=y | ||
1180 | # CONFIG_HW_RANDOM_TIMERIOMEM is not set | ||
1181 | CONFIG_HW_RANDOM_INTEL=y | ||
1182 | CONFIG_HW_RANDOM_AMD=y | ||
1183 | # CONFIG_HW_RANDOM_VIA is not set | ||
1184 | # CONFIG_NVRAM is not set | ||
1185 | CONFIG_RTC=y | ||
1186 | # CONFIG_R3964 is not set | ||
1187 | # CONFIG_APPLICOM is not set | ||
1188 | # CONFIG_MWAVE is not set | ||
1189 | # CONFIG_RAW_DRIVER is not set | ||
1190 | CONFIG_HPET=y | ||
1191 | CONFIG_HPET_MMAP=y | ||
1192 | CONFIG_HANGCHECK_TIMER=y | ||
1193 | # CONFIG_TCG_TPM is not set | ||
1194 | # CONFIG_TELCLOCK is not set | ||
1195 | CONFIG_DEVPORT=y | ||
1196 | # CONFIG_RAMOOPS is not set | ||
1197 | CONFIG_I2C=y | ||
1198 | CONFIG_I2C_BOARDINFO=y | ||
1199 | CONFIG_I2C_COMPAT=y | ||
1200 | CONFIG_I2C_CHARDEV=y | ||
1201 | # CONFIG_I2C_MUX is not set | ||
1202 | CONFIG_I2C_HELPER_AUTO=y | ||
1203 | |||
1204 | # | ||
1205 | # I2C Hardware Bus support | ||
1206 | # | ||
1207 | |||
1208 | # | ||
1209 | # PC SMBus host controller drivers | ||
1210 | # | ||
1211 | # CONFIG_I2C_ALI1535 is not set | ||
1212 | # CONFIG_I2C_ALI1563 is not set | ||
1213 | # CONFIG_I2C_ALI15X3 is not set | ||
1214 | # CONFIG_I2C_AMD756 is not set | ||
1215 | # CONFIG_I2C_AMD8111 is not set | ||
1216 | CONFIG_I2C_I801=y | ||
1217 | CONFIG_I2C_ISCH=y | ||
1218 | # CONFIG_I2C_PIIX4 is not set | ||
1219 | # CONFIG_I2C_NFORCE2 is not set | ||
1220 | # CONFIG_I2C_SIS5595 is not set | ||
1221 | # CONFIG_I2C_SIS630 is not set | ||
1222 | # CONFIG_I2C_SIS96X is not set | ||
1223 | # CONFIG_I2C_VIA is not set | ||
1224 | # CONFIG_I2C_VIAPRO is not set | ||
1225 | |||
1226 | # | ||
1227 | # ACPI drivers | ||
1228 | # | ||
1229 | # CONFIG_I2C_SCMI is not set | ||
1230 | |||
1231 | # | ||
1232 | # I2C system bus drivers (mostly embedded / system-on-chip) | ||
1233 | # | ||
1234 | # CONFIG_I2C_OCORES is not set | ||
1235 | # CONFIG_I2C_PCA_PLATFORM is not set | ||
1236 | # CONFIG_I2C_SIMTEC is not set | ||
1237 | # CONFIG_I2C_XILINX is not set | ||
1238 | |||
1239 | # | ||
1240 | # External I2C/SMBus adapter drivers | ||
1241 | # | ||
1242 | # CONFIG_I2C_PARPORT_LIGHT is not set | ||
1243 | # CONFIG_I2C_TAOS_EVM is not set | ||
1244 | # CONFIG_I2C_TINY_USB is not set | ||
1245 | |||
1246 | # | ||
1247 | # Other I2C/SMBus bus drivers | ||
1248 | # | ||
1249 | # CONFIG_I2C_STUB is not set | ||
1250 | # CONFIG_I2C_DEBUG_CORE is not set | ||
1251 | # CONFIG_I2C_DEBUG_ALGO is not set | ||
1252 | # CONFIG_I2C_DEBUG_BUS is not set | ||
1253 | # CONFIG_SPI is not set | ||
1254 | |||
1255 | # | ||
1256 | # PPS support | ||
1257 | # | ||
1258 | # CONFIG_PPS is not set | ||
1259 | CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y | ||
1260 | # CONFIG_GPIOLIB is not set | ||
1261 | # CONFIG_W1 is not set | ||
1262 | CONFIG_POWER_SUPPLY=y | ||
1263 | # CONFIG_POWER_SUPPLY_DEBUG is not set | ||
1264 | # CONFIG_PDA_POWER is not set | ||
1265 | # CONFIG_TEST_POWER is not set | ||
1266 | # CONFIG_BATTERY_DS2760 is not set | ||
1267 | # CONFIG_BATTERY_DS2782 is not set | ||
1268 | # CONFIG_BATTERY_BQ27x00 is not set | ||
1269 | # CONFIG_BATTERY_MAX17040 is not set | ||
1270 | # CONFIG_HWMON is not set | ||
1271 | CONFIG_THERMAL=y | ||
1272 | # CONFIG_WATCHDOG is not set | ||
1273 | CONFIG_SSB_POSSIBLE=y | ||
1274 | |||
1275 | # | ||
1276 | # Sonics Silicon Backplane | ||
1277 | # | ||
1278 | # CONFIG_SSB is not set | ||
1279 | CONFIG_MFD_SUPPORT=y | ||
1280 | CONFIG_MFD_CORE=y | ||
1281 | # CONFIG_MFD_88PM860X is not set | ||
1282 | # CONFIG_MFD_SM501 is not set | ||
1283 | # CONFIG_HTC_PASIC3 is not set | ||
1284 | # CONFIG_TPS6507X is not set | ||
1285 | # CONFIG_TWL4030_CORE is not set | ||
1286 | # CONFIG_MFD_STMPE is not set | ||
1287 | # CONFIG_MFD_TC35892 is not set | ||
1288 | # CONFIG_MFD_TMIO is not set | ||
1289 | # CONFIG_PMIC_DA903X is not set | ||
1290 | # CONFIG_PMIC_ADP5520 is not set | ||
1291 | # CONFIG_MFD_MAX8925 is not set | ||
1292 | # CONFIG_MFD_MAX8998 is not set | ||
1293 | # CONFIG_MFD_WM8400 is not set | ||
1294 | # CONFIG_MFD_WM831X is not set | ||
1295 | # CONFIG_MFD_WM8350_I2C is not set | ||
1296 | # CONFIG_MFD_WM8994 is not set | ||
1297 | # CONFIG_MFD_PCF50633 is not set | ||
1298 | # CONFIG_ABX500_CORE is not set | ||
1299 | CONFIG_LPC_SCH=y | ||
1300 | # CONFIG_MFD_RDC321X is not set | ||
1301 | # CONFIG_MFD_JANZ_CMODIO is not set | ||
1302 | # CONFIG_REGULATOR is not set | ||
1303 | # CONFIG_MEDIA_SUPPORT is not set | ||
1304 | |||
1305 | # | ||
1306 | # Graphics support | ||
1307 | # | ||
1308 | # CONFIG_AGP is not set | ||
1309 | CONFIG_VGA_ARB=y | ||
1310 | CONFIG_VGA_ARB_MAX_GPUS=16 | ||
1311 | # CONFIG_VGA_SWITCHEROO is not set | ||
1312 | # CONFIG_DRM is not set | ||
1313 | CONFIG_VGASTATE=y | ||
1314 | CONFIG_VIDEO_OUTPUT_CONTROL=y | ||
1315 | CONFIG_FB=y | ||
1316 | # CONFIG_FIRMWARE_EDID is not set | ||
1317 | # CONFIG_FB_DDC is not set | ||
1318 | CONFIG_FB_BOOT_VESA_SUPPORT=y | ||
1319 | CONFIG_FB_CFB_FILLRECT=y | ||
1320 | CONFIG_FB_CFB_COPYAREA=y | ||
1321 | CONFIG_FB_CFB_IMAGEBLIT=y | ||
1322 | # CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set | ||
1323 | # CONFIG_FB_SYS_FILLRECT is not set | ||
1324 | # CONFIG_FB_SYS_COPYAREA is not set | ||
1325 | # CONFIG_FB_SYS_IMAGEBLIT is not set | ||
1326 | # CONFIG_FB_FOREIGN_ENDIAN is not set | ||
1327 | # CONFIG_FB_SYS_FOPS is not set | ||
1328 | # CONFIG_FB_SVGALIB is not set | ||
1329 | # CONFIG_FB_MACMODES is not set | ||
1330 | # CONFIG_FB_BACKLIGHT is not set | ||
1331 | # CONFIG_FB_MODE_HELPERS is not set | ||
1332 | # CONFIG_FB_TILEBLITTING is not set | ||
1333 | |||
1334 | # | ||
1335 | # Frame buffer hardware drivers | ||
1336 | # | ||
1337 | # CONFIG_FB_CIRRUS is not set | ||
1338 | # CONFIG_FB_PM2 is not set | ||
1339 | # CONFIG_FB_CYBER2000 is not set | ||
1340 | # CONFIG_FB_ARC is not set | ||
1341 | # CONFIG_FB_ASILIANT is not set | ||
1342 | # CONFIG_FB_IMSTT is not set | ||
1343 | CONFIG_FB_VGA16=y | ||
1344 | CONFIG_FB_VESA=y | ||
1345 | # CONFIG_FB_N411 is not set | ||
1346 | # CONFIG_FB_HGA is not set | ||
1347 | # CONFIG_FB_S1D13XXX is not set | ||
1348 | # CONFIG_FB_NVIDIA is not set | ||
1349 | # CONFIG_FB_RIVA is not set | ||
1350 | # CONFIG_FB_LE80578 is not set | ||
1351 | # CONFIG_FB_MATROX is not set | ||
1352 | # CONFIG_FB_RADEON is not set | ||
1353 | # CONFIG_FB_ATY128 is not set | ||
1354 | # CONFIG_FB_ATY is not set | ||
1355 | # CONFIG_FB_S3 is not set | ||
1356 | # CONFIG_FB_SAVAGE is not set | ||
1357 | # CONFIG_FB_SIS is not set | ||
1358 | # CONFIG_FB_VIA is not set | ||
1359 | # CONFIG_FB_NEOMAGIC is not set | ||
1360 | # CONFIG_FB_KYRO is not set | ||
1361 | # CONFIG_FB_3DFX is not set | ||
1362 | # CONFIG_FB_VOODOO1 is not set | ||
1363 | # CONFIG_FB_VT8623 is not set | ||
1364 | # CONFIG_FB_TRIDENT is not set | ||
1365 | # CONFIG_FB_ARK is not set | ||
1366 | # CONFIG_FB_PM3 is not set | ||
1367 | # CONFIG_FB_CARMINE is not set | ||
1368 | # CONFIG_FB_GEODE is not set | ||
1369 | # CONFIG_FB_TMIO is not set | ||
1370 | # CONFIG_FB_VIRTUAL is not set | ||
1371 | # CONFIG_FB_METRONOME is not set | ||
1372 | # CONFIG_FB_MB862XX is not set | ||
1373 | # CONFIG_FB_BROADSHEET is not set | ||
1374 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set | ||
1375 | |||
1376 | # | ||
1377 | # Display device support | ||
1378 | # | ||
1379 | # CONFIG_DISPLAY_SUPPORT is not set | ||
1380 | |||
1381 | # | ||
1382 | # Console display driver support | ||
1383 | # | ||
1384 | CONFIG_VGA_CONSOLE=y | ||
1385 | CONFIG_VGACON_SOFT_SCROLLBACK=y | ||
1386 | CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=256 | ||
1387 | CONFIG_DUMMY_CONSOLE=y | ||
1388 | CONFIG_FRAMEBUFFER_CONSOLE=y | ||
1389 | # CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set | ||
1390 | # CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set | ||
1391 | # CONFIG_FONTS is not set | ||
1392 | CONFIG_FONT_8x8=y | ||
1393 | CONFIG_FONT_8x16=y | ||
1394 | CONFIG_LOGO=y | ||
1395 | CONFIG_LOGO_LINUX_MONO=y | ||
1396 | # CONFIG_LOGO_LINUX_VGA16 is not set | ||
1397 | # CONFIG_LOGO_LINUX_CLUT224 is not set | ||
1398 | # CONFIG_SOUND is not set | ||
1399 | CONFIG_HID_SUPPORT=y | ||
1400 | CONFIG_HID=y | ||
1401 | CONFIG_HIDRAW=y | ||
1402 | |||
1403 | # | ||
1404 | # USB Input Devices | ||
1405 | # | ||
1406 | CONFIG_USB_HID=y | ||
1407 | CONFIG_HID_PID=y | ||
1408 | CONFIG_USB_HIDDEV=y | ||
1409 | |||
1410 | # | ||
1411 | # Special HID drivers | ||
1412 | # | ||
1413 | # CONFIG_HID_3M_PCT is not set | ||
1414 | CONFIG_HID_A4TECH=y | ||
1415 | # CONFIG_HID_ACRUX_FF is not set | ||
1416 | CONFIG_HID_APPLE=y | ||
1417 | CONFIG_HID_BELKIN=y | ||
1418 | # CONFIG_HID_CANDO is not set | ||
1419 | CONFIG_HID_CHERRY=y | ||
1420 | CONFIG_HID_CHICONY=y | ||
1421 | CONFIG_HID_CYPRESS=y | ||
1422 | CONFIG_HID_DRAGONRISE=y | ||
1423 | # CONFIG_DRAGONRISE_FF is not set | ||
1424 | # CONFIG_HID_EGALAX is not set | ||
1425 | CONFIG_HID_EZKEY=y | ||
1426 | CONFIG_HID_KYE=y | ||
1427 | CONFIG_HID_GYRATION=y | ||
1428 | CONFIG_HID_TWINHAN=y | ||
1429 | CONFIG_HID_KENSINGTON=y | ||
1430 | CONFIG_HID_LOGITECH=y | ||
1431 | # CONFIG_LOGITECH_FF is not set | ||
1432 | CONFIG_LOGIRUMBLEPAD2_FF=y | ||
1433 | # CONFIG_LOGIG940_FF is not set | ||
1434 | CONFIG_HID_MICROSOFT=y | ||
1435 | # CONFIG_HID_MOSART is not set | ||
1436 | CONFIG_HID_MONTEREY=y | ||
1437 | CONFIG_HID_NTRIG=y | ||
1438 | CONFIG_HID_ORTEK=y | ||
1439 | CONFIG_HID_PANTHERLORD=y | ||
1440 | # CONFIG_PANTHERLORD_FF is not set | ||
1441 | CONFIG_HID_PETALYNX=y | ||
1442 | # CONFIG_HID_PICOLCD is not set | ||
1443 | # CONFIG_HID_QUANTA is not set | ||
1444 | # CONFIG_HID_ROCCAT is not set | ||
1445 | # CONFIG_HID_ROCCAT_KONE is not set | ||
1446 | CONFIG_HID_SAMSUNG=y | ||
1447 | CONFIG_HID_SONY=y | ||
1448 | # CONFIG_HID_STANTUM is not set | ||
1449 | CONFIG_HID_SUNPLUS=y | ||
1450 | CONFIG_HID_GREENASIA=y | ||
1451 | # CONFIG_GREENASIA_FF is not set | ||
1452 | CONFIG_HID_SMARTJOYPLUS=y | ||
1453 | # CONFIG_SMARTJOYPLUS_FF is not set | ||
1454 | CONFIG_HID_TOPSEED=y | ||
1455 | CONFIG_HID_THRUSTMASTER=y | ||
1456 | # CONFIG_THRUSTMASTER_FF is not set | ||
1457 | CONFIG_HID_ZEROPLUS=y | ||
1458 | # CONFIG_ZEROPLUS_FF is not set | ||
1459 | # CONFIG_HID_ZYDACRON is not set | ||
1460 | CONFIG_USB_SUPPORT=y | ||
1461 | CONFIG_USB_ARCH_HAS_HCD=y | ||
1462 | CONFIG_USB_ARCH_HAS_OHCI=y | ||
1463 | CONFIG_USB_ARCH_HAS_EHCI=y | ||
1464 | CONFIG_USB=y | ||
1465 | # CONFIG_USB_DEBUG is not set | ||
1466 | CONFIG_USB_ANNOUNCE_NEW_DEVICES=y | ||
1467 | |||
1468 | # | ||
1469 | # Miscellaneous USB options | ||
1470 | # | ||
1471 | # CONFIG_USB_DEVICEFS is not set | ||
1472 | # CONFIG_USB_DEVICE_CLASS is not set | ||
1473 | # CONFIG_USB_DYNAMIC_MINORS is not set | ||
1474 | CONFIG_USB_MON=y | ||
1475 | # CONFIG_USB_WUSB is not set | ||
1476 | # CONFIG_USB_WUSB_CBAF is not set | ||
1477 | |||
1478 | # | ||
1479 | # USB Host Controller Drivers | ||
1480 | # | ||
1481 | # CONFIG_USB_C67X00_HCD is not set | ||
1482 | # CONFIG_USB_XHCI_HCD is not set | ||
1483 | CONFIG_USB_EHCI_HCD=y | ||
1484 | CONFIG_USB_EHCI_ROOT_HUB_TT=y | ||
1485 | # CONFIG_USB_EHCI_TT_NEWSCHED is not set | ||
1486 | # CONFIG_USB_OXU210HP_HCD is not set | ||
1487 | # CONFIG_USB_ISP116X_HCD is not set | ||
1488 | # CONFIG_USB_ISP1760_HCD is not set | ||
1489 | # CONFIG_USB_ISP1362_HCD is not set | ||
1490 | CONFIG_USB_OHCI_HCD=y | ||
1491 | # CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set | ||
1492 | # CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set | ||
1493 | CONFIG_USB_OHCI_LITTLE_ENDIAN=y | ||
1494 | CONFIG_USB_UHCI_HCD=y | ||
1495 | # CONFIG_USB_SL811_HCD is not set | ||
1496 | # CONFIG_USB_R8A66597_HCD is not set | ||
1497 | # CONFIG_USB_WHCI_HCD is not set | ||
1498 | # CONFIG_USB_HWA_HCD is not set | ||
1499 | |||
1500 | # | ||
1501 | # USB Device Class drivers | ||
1502 | # | ||
1503 | # CONFIG_USB_ACM is not set | ||
1504 | # CONFIG_USB_PRINTER is not set | ||
1505 | # CONFIG_USB_WDM is not set | ||
1506 | # CONFIG_USB_TMC is not set | ||
1507 | |||
1508 | # | ||
1509 | # NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may | ||
1510 | # | ||
1511 | |||
1512 | # | ||
1513 | # also be needed; see USB_STORAGE Help for more info | ||
1514 | # | ||
1515 | CONFIG_USB_STORAGE=y | ||
1516 | # CONFIG_USB_STORAGE_DEBUG is not set | ||
1517 | CONFIG_USB_STORAGE_DATAFAB=y | ||
1518 | CONFIG_USB_STORAGE_FREECOM=y | ||
1519 | CONFIG_USB_STORAGE_ISD200=y | ||
1520 | CONFIG_USB_STORAGE_USBAT=y | ||
1521 | CONFIG_USB_STORAGE_SDDR09=y | ||
1522 | CONFIG_USB_STORAGE_SDDR55=y | ||
1523 | CONFIG_USB_STORAGE_JUMPSHOT=y | ||
1524 | CONFIG_USB_STORAGE_ALAUDA=y | ||
1525 | CONFIG_USB_STORAGE_ONETOUCH=y | ||
1526 | CONFIG_USB_STORAGE_KARMA=y | ||
1527 | CONFIG_USB_STORAGE_CYPRESS_ATACB=y | ||
1528 | # CONFIG_USB_LIBUSUAL is not set | ||
1529 | |||
1530 | # | ||
1531 | # USB Imaging devices | ||
1532 | # | ||
1533 | # CONFIG_USB_MDC800 is not set | ||
1534 | # CONFIG_USB_MICROTEK is not set | ||
1535 | |||
1536 | # | ||
1537 | # USB port drivers | ||
1538 | # | ||
1539 | # CONFIG_USB_SERIAL is not set | ||
1540 | |||
1541 | # | ||
1542 | # USB Miscellaneous drivers | ||
1543 | # | ||
1544 | # CONFIG_USB_EMI62 is not set | ||
1545 | # CONFIG_USB_EMI26 is not set | ||
1546 | # CONFIG_USB_ADUTUX is not set | ||
1547 | # CONFIG_USB_SEVSEG is not set | ||
1548 | # CONFIG_USB_RIO500 is not set | ||
1549 | # CONFIG_USB_LEGOTOWER is not set | ||
1550 | # CONFIG_USB_LCD is not set | ||
1551 | # CONFIG_USB_LED is not set | ||
1552 | # CONFIG_USB_CYPRESS_CY7C63 is not set | ||
1553 | # CONFIG_USB_CYTHERM is not set | ||
1554 | # CONFIG_USB_IDMOUSE is not set | ||
1555 | # CONFIG_USB_FTDI_ELAN is not set | ||
1556 | # CONFIG_USB_APPLEDISPLAY is not set | ||
1557 | # CONFIG_USB_SISUSBVGA is not set | ||
1558 | # CONFIG_USB_LD is not set | ||
1559 | # CONFIG_USB_TRANCEVIBRATOR is not set | ||
1560 | # CONFIG_USB_IOWARRIOR is not set | ||
1561 | # CONFIG_USB_TEST is not set | ||
1562 | # CONFIG_USB_ISIGHTFW is not set | ||
1563 | # CONFIG_USB_GADGET is not set | ||
1564 | |||
1565 | # | ||
1566 | # OTG and related infrastructure | ||
1567 | # | ||
1568 | # CONFIG_NOP_USB_XCEIV is not set | ||
1569 | # CONFIG_UWB is not set | ||
1570 | # CONFIG_MMC is not set | ||
1571 | # CONFIG_MEMSTICK is not set | ||
1572 | # CONFIG_NEW_LEDS is not set | ||
1573 | # CONFIG_ACCESSIBILITY is not set | ||
1574 | # CONFIG_INFINIBAND is not set | ||
1575 | # CONFIG_EDAC is not set | ||
1576 | # CONFIG_RTC_CLASS is not set | ||
1577 | # CONFIG_DMADEVICES is not set | ||
1578 | # CONFIG_AUXDISPLAY is not set | ||
1579 | # CONFIG_UIO is not set | ||
1580 | # CONFIG_STAGING is not set | ||
1581 | # CONFIG_X86_PLATFORM_DEVICES is not set | ||
1582 | |||
1583 | # | ||
1584 | # Firmware Drivers | ||
1585 | # | ||
1586 | CONFIG_EDD=y | ||
1587 | # CONFIG_EDD_OFF is not set | ||
1588 | CONFIG_FIRMWARE_MEMMAP=y | ||
1589 | # CONFIG_DELL_RBU is not set | ||
1590 | # CONFIG_DCDBAS is not set | ||
1591 | # CONFIG_DMIID is not set | ||
1592 | # CONFIG_ISCSI_IBFT_FIND is not set | ||
1593 | |||
1594 | # | ||
1595 | # File systems | ||
1596 | # | ||
1597 | CONFIG_EXT2_FS=y | ||
1598 | CONFIG_EXT2_FS_XATTR=y | ||
1599 | CONFIG_EXT2_FS_POSIX_ACL=y | ||
1600 | CONFIG_EXT2_FS_SECURITY=y | ||
1601 | # CONFIG_EXT2_FS_XIP is not set | ||
1602 | CONFIG_EXT3_FS=y | ||
1603 | CONFIG_EXT3_DEFAULTS_TO_ORDERED=y | ||
1604 | CONFIG_EXT3_FS_XATTR=y | ||
1605 | CONFIG_EXT3_FS_POSIX_ACL=y | ||
1606 | CONFIG_EXT3_FS_SECURITY=y | ||
1607 | # CONFIG_EXT4_FS is not set | ||
1608 | CONFIG_JBD=y | ||
1609 | # CONFIG_JBD_DEBUG is not set | ||
1610 | CONFIG_FS_MBCACHE=y | ||
1611 | # CONFIG_REISERFS_FS is not set | ||
1612 | # CONFIG_JFS_FS is not set | ||
1613 | CONFIG_FS_POSIX_ACL=y | ||
1614 | # CONFIG_XFS_FS is not set | ||
1615 | # CONFIG_GFS2_FS is not set | ||
1616 | # CONFIG_OCFS2_FS is not set | ||
1617 | # CONFIG_BTRFS_FS is not set | ||
1618 | # CONFIG_NILFS2_FS is not set | ||
1619 | CONFIG_FILE_LOCKING=y | ||
1620 | CONFIG_FSNOTIFY=y | ||
1621 | CONFIG_DNOTIFY=y | ||
1622 | CONFIG_INOTIFY_USER=y | ||
1623 | # CONFIG_QUOTA is not set | ||
1624 | # CONFIG_AUTOFS_FS is not set | ||
1625 | # CONFIG_AUTOFS4_FS is not set | ||
1626 | # CONFIG_FUSE_FS is not set | ||
1627 | |||
1628 | # | ||
1629 | # Caches | ||
1630 | # | ||
1631 | CONFIG_FSCACHE=y | ||
1632 | CONFIG_FSCACHE_STATS=y | ||
1633 | # CONFIG_FSCACHE_HISTOGRAM is not set | ||
1634 | # CONFIG_FSCACHE_DEBUG is not set | ||
1635 | # CONFIG_FSCACHE_OBJECT_LIST is not set | ||
1636 | CONFIG_CACHEFILES=y | ||
1637 | # CONFIG_CACHEFILES_DEBUG is not set | ||
1638 | # CONFIG_CACHEFILES_HISTOGRAM is not set | ||
1639 | |||
1640 | # | ||
1641 | # CD-ROM/DVD Filesystems | ||
1642 | # | ||
1643 | CONFIG_ISO9660_FS=y | ||
1644 | CONFIG_JOLIET=y | ||
1645 | CONFIG_ZISOFS=y | ||
1646 | CONFIG_UDF_FS=y | ||
1647 | CONFIG_UDF_NLS=y | ||
1648 | |||
1649 | # | ||
1650 | # DOS/FAT/NT Filesystems | ||
1651 | # | ||
1652 | CONFIG_FAT_FS=y | ||
1653 | CONFIG_MSDOS_FS=y | ||
1654 | CONFIG_VFAT_FS=y | ||
1655 | CONFIG_FAT_DEFAULT_CODEPAGE=437 | ||
1656 | CONFIG_FAT_DEFAULT_IOCHARSET="utf8" | ||
1657 | CONFIG_NTFS_FS=y | ||
1658 | # CONFIG_NTFS_DEBUG is not set | ||
1659 | CONFIG_NTFS_RW=y | ||
1660 | |||
1661 | # | ||
1662 | # Pseudo filesystems | ||
1663 | # | ||
1664 | CONFIG_PROC_FS=y | ||
1665 | CONFIG_PROC_KCORE=y | ||
1666 | CONFIG_PROC_SYSCTL=y | ||
1667 | CONFIG_PROC_PAGE_MONITOR=y | ||
1668 | CONFIG_SYSFS=y | ||
1669 | CONFIG_TMPFS=y | ||
1670 | # CONFIG_TMPFS_POSIX_ACL is not set | ||
1671 | # CONFIG_HUGETLBFS is not set | ||
1672 | # CONFIG_HUGETLB_PAGE is not set | ||
1673 | CONFIG_CONFIGFS_FS=y | ||
1674 | # CONFIG_MISC_FILESYSTEMS is not set | ||
1675 | # CONFIG_NETWORK_FILESYSTEMS is not set | ||
1676 | |||
1677 | # | ||
1678 | # Partition Types | ||
1679 | # | ||
1680 | CONFIG_PARTITION_ADVANCED=y | ||
1681 | # CONFIG_ACORN_PARTITION is not set | ||
1682 | # CONFIG_OSF_PARTITION is not set | ||
1683 | # CONFIG_AMIGA_PARTITION is not set | ||
1684 | # CONFIG_ATARI_PARTITION is not set | ||
1685 | # CONFIG_MAC_PARTITION is not set | ||
1686 | CONFIG_MSDOS_PARTITION=y | ||
1687 | # CONFIG_BSD_DISKLABEL is not set | ||
1688 | # CONFIG_MINIX_SUBPARTITION is not set | ||
1689 | # CONFIG_SOLARIS_X86_PARTITION is not set | ||
1690 | # CONFIG_UNIXWARE_DISKLABEL is not set | ||
1691 | CONFIG_LDM_PARTITION=y | ||
1692 | CONFIG_LDM_DEBUG=y | ||
1693 | # CONFIG_SGI_PARTITION is not set | ||
1694 | # CONFIG_ULTRIX_PARTITION is not set | ||
1695 | # CONFIG_SUN_PARTITION is not set | ||
1696 | # CONFIG_KARMA_PARTITION is not set | ||
1697 | # CONFIG_EFI_PARTITION is not set | ||
1698 | # CONFIG_SYSV68_PARTITION is not set | ||
1699 | CONFIG_NLS=y | ||
1700 | CONFIG_NLS_DEFAULT="utf8" | ||
1701 | CONFIG_NLS_CODEPAGE_437=y | ||
1702 | # CONFIG_NLS_CODEPAGE_737 is not set | ||
1703 | # CONFIG_NLS_CODEPAGE_775 is not set | ||
1704 | CONFIG_NLS_CODEPAGE_850=y | ||
1705 | # CONFIG_NLS_CODEPAGE_852 is not set | ||
1706 | # CONFIG_NLS_CODEPAGE_855 is not set | ||
1707 | # CONFIG_NLS_CODEPAGE_857 is not set | ||
1708 | # CONFIG_NLS_CODEPAGE_860 is not set | ||
1709 | # CONFIG_NLS_CODEPAGE_861 is not set | ||
1710 | # CONFIG_NLS_CODEPAGE_862 is not set | ||
1711 | # CONFIG_NLS_CODEPAGE_863 is not set | ||
1712 | # CONFIG_NLS_CODEPAGE_864 is not set | ||
1713 | # CONFIG_NLS_CODEPAGE_865 is not set | ||
1714 | # CONFIG_NLS_CODEPAGE_866 is not set | ||
1715 | # CONFIG_NLS_CODEPAGE_869 is not set | ||
1716 | CONFIG_NLS_CODEPAGE_936=y | ||
1717 | # CONFIG_NLS_CODEPAGE_950 is not set | ||
1718 | # CONFIG_NLS_CODEPAGE_932 is not set | ||
1719 | # CONFIG_NLS_CODEPAGE_949 is not set | ||
1720 | # CONFIG_NLS_CODEPAGE_874 is not set | ||
1721 | # CONFIG_NLS_ISO8859_8 is not set | ||
1722 | # CONFIG_NLS_CODEPAGE_1250 is not set | ||
1723 | # CONFIG_NLS_CODEPAGE_1251 is not set | ||
1724 | CONFIG_NLS_ASCII=y | ||
1725 | CONFIG_NLS_ISO8859_1=y | ||
1726 | # CONFIG_NLS_ISO8859_2 is not set | ||
1727 | # CONFIG_NLS_ISO8859_3 is not set | ||
1728 | # CONFIG_NLS_ISO8859_4 is not set | ||
1729 | # CONFIG_NLS_ISO8859_5 is not set | ||
1730 | # CONFIG_NLS_ISO8859_6 is not set | ||
1731 | # CONFIG_NLS_ISO8859_7 is not set | ||
1732 | # CONFIG_NLS_ISO8859_9 is not set | ||
1733 | # CONFIG_NLS_ISO8859_13 is not set | ||
1734 | # CONFIG_NLS_ISO8859_14 is not set | ||
1735 | CONFIG_NLS_ISO8859_15=y | ||
1736 | # CONFIG_NLS_KOI8_R is not set | ||
1737 | # CONFIG_NLS_KOI8_U is not set | ||
1738 | CONFIG_NLS_UTF8=y | ||
1739 | # CONFIG_DLM is not set | ||
1740 | |||
1741 | # | ||
1742 | # Kernel hacking | ||
1743 | # | ||
1744 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y | ||
1745 | CONFIG_PRINTK_TIME=y | ||
1746 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | ||
1747 | # CONFIG_ENABLE_MUST_CHECK is not set | ||
1748 | CONFIG_FRAME_WARN=0 | ||
1749 | CONFIG_MAGIC_SYSRQ=y | ||
1750 | # CONFIG_STRIP_ASM_SYMS is not set | ||
1751 | # CONFIG_UNUSED_SYMBOLS is not set | ||
1752 | CONFIG_DEBUG_FS=y | ||
1753 | # CONFIG_HEADERS_CHECK is not set | ||
1754 | CONFIG_DEBUG_KERNEL=y | ||
1755 | # CONFIG_DEBUG_SHIRQ is not set | ||
1756 | # CONFIG_LOCKUP_DETECTOR is not set | ||
1757 | # CONFIG_HARDLOCKUP_DETECTOR is not set | ||
1758 | CONFIG_DETECT_HUNG_TASK=y | ||
1759 | # CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set | ||
1760 | CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 | ||
1761 | # CONFIG_SCHED_DEBUG is not set | ||
1762 | # CONFIG_SCHEDSTATS is not set | ||
1763 | # CONFIG_TIMER_STATS is not set | ||
1764 | # CONFIG_DEBUG_OBJECTS is not set | ||
1765 | # CONFIG_SLUB_DEBUG_ON is not set | ||
1766 | # CONFIG_SLUB_STATS is not set | ||
1767 | # CONFIG_DEBUG_KMEMLEAK is not set | ||
1768 | CONFIG_DEBUG_PREEMPT=y | ||
1769 | CONFIG_DEBUG_RT_MUTEXES=y | ||
1770 | CONFIG_DEBUG_PI_LIST=y | ||
1771 | # CONFIG_RT_MUTEX_TESTER is not set | ||
1772 | CONFIG_DEBUG_SPINLOCK=y | ||
1773 | CONFIG_DEBUG_MUTEXES=y | ||
1774 | CONFIG_DEBUG_LOCK_ALLOC=y | ||
1775 | CONFIG_PROVE_LOCKING=y | ||
1776 | # CONFIG_PROVE_RCU is not set | ||
1777 | CONFIG_LOCKDEP=y | ||
1778 | # CONFIG_LOCK_STAT is not set | ||
1779 | # CONFIG_DEBUG_LOCKDEP is not set | ||
1780 | CONFIG_TRACE_IRQFLAGS=y | ||
1781 | CONFIG_DEBUG_SPINLOCK_SLEEP=y | ||
1782 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set | ||
1783 | CONFIG_STACKTRACE=y | ||
1784 | # CONFIG_DEBUG_KOBJECT is not set | ||
1785 | CONFIG_DEBUG_BUGVERBOSE=y | ||
1786 | CONFIG_DEBUG_INFO=y | ||
1787 | # CONFIG_DEBUG_INFO_REDUCED is not set | ||
1788 | # CONFIG_DEBUG_VM is not set | ||
1789 | # CONFIG_DEBUG_VIRTUAL is not set | ||
1790 | # CONFIG_DEBUG_WRITECOUNT is not set | ||
1791 | CONFIG_DEBUG_MEMORY_INIT=y | ||
1792 | # CONFIG_DEBUG_LIST is not set | ||
1793 | # CONFIG_DEBUG_SG is not set | ||
1794 | # CONFIG_DEBUG_NOTIFIERS is not set | ||
1795 | # CONFIG_DEBUG_CREDENTIALS is not set | ||
1796 | CONFIG_ARCH_WANT_FRAME_POINTERS=y | ||
1797 | CONFIG_FRAME_POINTER=y | ||
1798 | # CONFIG_BOOT_PRINTK_DELAY is not set | ||
1799 | # CONFIG_RCU_TORTURE_TEST is not set | ||
1800 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | ||
1801 | # CONFIG_BACKTRACE_SELF_TEST is not set | ||
1802 | # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set | ||
1803 | # CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set | ||
1804 | # CONFIG_LKDTM is not set | ||
1805 | # CONFIG_FAULT_INJECTION is not set | ||
1806 | # CONFIG_LATENCYTOP is not set | ||
1807 | # CONFIG_SYSCTL_SYSCALL_CHECK is not set | ||
1808 | # CONFIG_DEBUG_PAGEALLOC is not set | ||
1809 | CONFIG_USER_STACKTRACE_SUPPORT=y | ||
1810 | CONFIG_HAVE_FUNCTION_TRACER=y | ||
1811 | CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y | ||
1812 | CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y | ||
1813 | CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y | ||
1814 | CONFIG_HAVE_DYNAMIC_FTRACE=y | ||
1815 | CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y | ||
1816 | CONFIG_HAVE_SYSCALL_TRACEPOINTS=y | ||
1817 | CONFIG_TRACING_SUPPORT=y | ||
1818 | # CONFIG_FTRACE is not set | ||
1819 | # CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set | ||
1820 | # CONFIG_DYNAMIC_DEBUG is not set | ||
1821 | # CONFIG_DMA_API_DEBUG is not set | ||
1822 | # CONFIG_ATOMIC64_SELFTEST is not set | ||
1823 | # CONFIG_SAMPLES is not set | ||
1824 | CONFIG_HAVE_ARCH_KGDB=y | ||
1825 | # CONFIG_KGDB is not set | ||
1826 | CONFIG_HAVE_ARCH_KMEMCHECK=y | ||
1827 | # CONFIG_KMEMCHECK is not set | ||
1828 | # CONFIG_STRICT_DEVMEM is not set | ||
1829 | CONFIG_X86_VERBOSE_BOOTUP=y | ||
1830 | CONFIG_EARLY_PRINTK=y | ||
1831 | # CONFIG_EARLY_PRINTK_DBGP is not set | ||
1832 | # CONFIG_DEBUG_STACKOVERFLOW is not set | ||
1833 | # CONFIG_DEBUG_STACK_USAGE is not set | ||
1834 | # CONFIG_DEBUG_PER_CPU_MAPS is not set | ||
1835 | # CONFIG_X86_PTDUMP is not set | ||
1836 | # CONFIG_DEBUG_RODATA is not set | ||
1837 | # CONFIG_DEBUG_NX_TEST is not set | ||
1838 | # CONFIG_IOMMU_DEBUG is not set | ||
1839 | # CONFIG_IOMMU_STRESS is not set | ||
1840 | CONFIG_HAVE_MMIOTRACE_SUPPORT=y | ||
1841 | CONFIG_IO_DELAY_TYPE_0X80=0 | ||
1842 | CONFIG_IO_DELAY_TYPE_0XED=1 | ||
1843 | CONFIG_IO_DELAY_TYPE_UDELAY=2 | ||
1844 | CONFIG_IO_DELAY_TYPE_NONE=3 | ||
1845 | CONFIG_IO_DELAY_0X80=y | ||
1846 | # CONFIG_IO_DELAY_0XED is not set | ||
1847 | # CONFIG_IO_DELAY_UDELAY is not set | ||
1848 | # CONFIG_IO_DELAY_NONE is not set | ||
1849 | CONFIG_DEFAULT_IO_DELAY_TYPE=0 | ||
1850 | # CONFIG_DEBUG_BOOT_PARAMS is not set | ||
1851 | # CONFIG_CPA_DEBUG is not set | ||
1852 | # CONFIG_OPTIMIZE_INLINING is not set | ||
1853 | # CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set | ||
1854 | |||
1855 | # | ||
1856 | # Security options | ||
1857 | # | ||
1858 | # CONFIG_KEYS is not set | ||
1859 | # CONFIG_SECURITY is not set | ||
1860 | # CONFIG_SECURITYFS is not set | ||
1861 | CONFIG_DEFAULT_SECURITY_DAC=y | ||
1862 | CONFIG_DEFAULT_SECURITY="" | ||
1863 | CONFIG_CRYPTO=y | ||
1864 | |||
1865 | # | ||
1866 | # Crypto core or helper | ||
1867 | # | ||
1868 | CONFIG_CRYPTO_FIPS=y | ||
1869 | CONFIG_CRYPTO_ALGAPI=y | ||
1870 | CONFIG_CRYPTO_ALGAPI2=y | ||
1871 | CONFIG_CRYPTO_AEAD=y | ||
1872 | CONFIG_CRYPTO_AEAD2=y | ||
1873 | CONFIG_CRYPTO_BLKCIPHER=y | ||
1874 | CONFIG_CRYPTO_BLKCIPHER2=y | ||
1875 | CONFIG_CRYPTO_HASH=y | ||
1876 | CONFIG_CRYPTO_HASH2=y | ||
1877 | CONFIG_CRYPTO_RNG=y | ||
1878 | CONFIG_CRYPTO_RNG2=y | ||
1879 | CONFIG_CRYPTO_PCOMP=y | ||
1880 | CONFIG_CRYPTO_PCOMP2=y | ||
1881 | CONFIG_CRYPTO_MANAGER=y | ||
1882 | CONFIG_CRYPTO_MANAGER2=y | ||
1883 | CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y | ||
1884 | CONFIG_CRYPTO_GF128MUL=y | ||
1885 | CONFIG_CRYPTO_NULL=y | ||
1886 | # CONFIG_CRYPTO_PCRYPT is not set | ||
1887 | CONFIG_CRYPTO_WORKQUEUE=y | ||
1888 | # CONFIG_CRYPTO_CRYPTD is not set | ||
1889 | CONFIG_CRYPTO_AUTHENC=y | ||
1890 | # CONFIG_CRYPTO_TEST is not set | ||
1891 | |||
1892 | # | ||
1893 | # Authenticated Encryption with Associated Data | ||
1894 | # | ||
1895 | CONFIG_CRYPTO_CCM=y | ||
1896 | CONFIG_CRYPTO_GCM=y | ||
1897 | CONFIG_CRYPTO_SEQIV=y | ||
1898 | |||
1899 | # | ||
1900 | # Block modes | ||
1901 | # | ||
1902 | CONFIG_CRYPTO_CBC=y | ||
1903 | CONFIG_CRYPTO_CTR=y | ||
1904 | CONFIG_CRYPTO_CTS=y | ||
1905 | CONFIG_CRYPTO_ECB=y | ||
1906 | # CONFIG_CRYPTO_LRW is not set | ||
1907 | CONFIG_CRYPTO_PCBC=y | ||
1908 | # CONFIG_CRYPTO_XTS is not set | ||
1909 | |||
1910 | # | ||
1911 | # Hash modes | ||
1912 | # | ||
1913 | CONFIG_CRYPTO_HMAC=y | ||
1914 | CONFIG_CRYPTO_XCBC=y | ||
1915 | # CONFIG_CRYPTO_VMAC is not set | ||
1916 | |||
1917 | # | ||
1918 | # Digest | ||
1919 | # | ||
1920 | CONFIG_CRYPTO_CRC32C=y | ||
1921 | CONFIG_CRYPTO_CRC32C_INTEL=y | ||
1922 | CONFIG_CRYPTO_GHASH=y | ||
1923 | # CONFIG_CRYPTO_MD4 is not set | ||
1924 | CONFIG_CRYPTO_MD5=y | ||
1925 | # CONFIG_CRYPTO_MICHAEL_MIC is not set | ||
1926 | CONFIG_CRYPTO_RMD128=y | ||
1927 | CONFIG_CRYPTO_RMD160=y | ||
1928 | CONFIG_CRYPTO_RMD256=y | ||
1929 | CONFIG_CRYPTO_RMD320=y | ||
1930 | CONFIG_CRYPTO_SHA1=y | ||
1931 | CONFIG_CRYPTO_SHA256=y | ||
1932 | CONFIG_CRYPTO_SHA512=y | ||
1933 | CONFIG_CRYPTO_TGR192=y | ||
1934 | CONFIG_CRYPTO_WP512=y | ||
1935 | # CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL is not set | ||
1936 | |||
1937 | # | ||
1938 | # Ciphers | ||
1939 | # | ||
1940 | CONFIG_CRYPTO_AES=y | ||
1941 | # CONFIG_CRYPTO_AES_X86_64 is not set | ||
1942 | # CONFIG_CRYPTO_AES_NI_INTEL is not set | ||
1943 | # CONFIG_CRYPTO_ANUBIS is not set | ||
1944 | CONFIG_CRYPTO_ARC4=y | ||
1945 | # CONFIG_CRYPTO_BLOWFISH is not set | ||
1946 | # CONFIG_CRYPTO_CAMELLIA is not set | ||
1947 | CONFIG_CRYPTO_CAST5=y | ||
1948 | CONFIG_CRYPTO_CAST6=y | ||
1949 | CONFIG_CRYPTO_DES=y | ||
1950 | CONFIG_CRYPTO_FCRYPT=y | ||
1951 | # CONFIG_CRYPTO_KHAZAD is not set | ||
1952 | # CONFIG_CRYPTO_SALSA20 is not set | ||
1953 | # CONFIG_CRYPTO_SALSA20_X86_64 is not set | ||
1954 | # CONFIG_CRYPTO_SEED is not set | ||
1955 | CONFIG_CRYPTO_SERPENT=y | ||
1956 | CONFIG_CRYPTO_TEA=y | ||
1957 | CONFIG_CRYPTO_TWOFISH=y | ||
1958 | CONFIG_CRYPTO_TWOFISH_COMMON=y | ||
1959 | # CONFIG_CRYPTO_TWOFISH_X86_64 is not set | ||
1960 | |||
1961 | # | ||
1962 | # Compression | ||
1963 | # | ||
1964 | CONFIG_CRYPTO_DEFLATE=y | ||
1965 | CONFIG_CRYPTO_ZLIB=y | ||
1966 | CONFIG_CRYPTO_LZO=y | ||
1967 | |||
1968 | # | ||
1969 | # Random Number Generation | ||
1970 | # | ||
1971 | CONFIG_CRYPTO_ANSI_CPRNG=y | ||
1972 | # CONFIG_CRYPTO_HW is not set | ||
1973 | CONFIG_HAVE_KVM=y | ||
1974 | # CONFIG_VIRTUALIZATION is not set | ||
1975 | # CONFIG_BINARY_PRINTF is not set | ||
1976 | |||
1977 | # | ||
1978 | # Library routines | ||
1979 | # | ||
1980 | CONFIG_BITREVERSE=y | ||
1981 | CONFIG_GENERIC_FIND_FIRST_BIT=y | ||
1982 | CONFIG_GENERIC_FIND_NEXT_BIT=y | ||
1983 | CONFIG_GENERIC_FIND_LAST_BIT=y | ||
1984 | CONFIG_CRC_CCITT=y | ||
1985 | CONFIG_CRC16=y | ||
1986 | # CONFIG_CRC_T10DIF is not set | ||
1987 | CONFIG_CRC_ITU_T=y | ||
1988 | CONFIG_CRC32=y | ||
1989 | # CONFIG_CRC7 is not set | ||
1990 | CONFIG_LIBCRC32C=y | ||
1991 | CONFIG_ZLIB_INFLATE=y | ||
1992 | CONFIG_ZLIB_DEFLATE=y | ||
1993 | CONFIG_LZO_COMPRESS=y | ||
1994 | CONFIG_LZO_DECOMPRESS=y | ||
1995 | CONFIG_TEXTSEARCH=y | ||
1996 | CONFIG_TEXTSEARCH_KMP=y | ||
1997 | CONFIG_TEXTSEARCH_BM=y | ||
1998 | CONFIG_TEXTSEARCH_FSM=y | ||
1999 | CONFIG_HAS_IOMEM=y | ||
2000 | CONFIG_HAS_IOPORT=y | ||
2001 | CONFIG_HAS_DMA=y | ||
2002 | CONFIG_NLATTR=y | ||
2003 | CONFIG_ARCH_HAS_FEATHER_TRACE=y | ||
2004 | CONFIG_ARCH_HAS_SEND_PULL_TIMERS=y | ||
2005 | |||
2006 | # | ||
2007 | # LITMUS^RT | ||
2008 | # | ||
2009 | |||
2010 | # | ||
2011 | # Scheduling | ||
2012 | # | ||
2013 | CONFIG_PLUGIN_CEDF=y | ||
2014 | CONFIG_PLUGIN_PFAIR=y | ||
2015 | CONFIG_RELEASE_MASTER=y | ||
2016 | |||
2017 | # | ||
2018 | # Real-Time Synchronization | ||
2019 | # | ||
2020 | CONFIG_NP_SECTION=y | ||
2021 | CONFIG_SRP=y | ||
2022 | CONFIG_FMLP=y | ||
2023 | |||
2024 | # | ||
2025 | # Tracing | ||
2026 | # | ||
2027 | CONFIG_FEATHER_TRACE=y | ||
2028 | CONFIG_SCHED_TASK_TRACE=y | ||
2029 | CONFIG_SCHED_OVERHEAD_TRACE=y | ||
2030 | CONFIG_SCHED_DEBUG_TRACE=y | ||
@@ -1,676 +1,632 @@ | |||
1 | 1 | ||
2 | <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> | 2 | <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> |
3 | <html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en"> | 3 | <html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en"> |
4 | <head> | 4 | <head> |
5 | <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/> | 5 | <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/> |
6 | <meta name="verify-v1" content="pZNmf5XyUUfAPdlSPbFSavMUsLgVsmBYOXzOhbIy2gw=" /> | 6 | <meta name="verify-v1" content="pZNmf5XyUUfAPdlSPbFSavMUsLgVsmBYOXzOhbIy2gw=" /> |
7 | <link rel="stylesheet" type="text/css" href="inc/format.css"/> | 7 | <link rel="stylesheet" type="text/css" href="inc/format.css"/> |
8 | <title>LITMUS RT: Linux Testbed for Multiprocessor Scheduling in Real-Time Systems</title> | 8 | <title>LITMUS RT: Linux Testbed for Multiprocessor Scheduling in Real-Time Systems</title> |
9 | </head> | 9 | </head> |
10 | <body> | 10 | <body> |
11 | <div class="logobox"> | 11 | <div class="logobox"> |
12 | <img src="inc/litmusrt.png" alt="LITMUS^RT: Linux Testbed for Multiprocessor Scheduling in Real-Time Systems" /> | 12 | <img src="inc/litmusrt.png" alt="LITMUS^RT: Linux Testbed for Multiprocessor Scheduling in Real-Time Systems" /> |
13 | <p class="authors"> | 13 | <p class="authors"> |
14 | <a href="http://www.cs.unc.edu/~anderson/">Dr. James H. Anderson & | 14 | <a href="http://www.cs.unc.edu/~anderson/">Dr. James H. Anderson & |
15 | Students</a>, | 15 | Students</a>, |
16 | <a href="http://www.unc.edu">The University of North Carolina at Chapel Hill</a> | 16 | <a href="http://www.unc.edu">The University of North Carolina at Chapel Hill</a> |
17 | </p> | 17 | </p> |
18 | 18 | ||
19 | </div> | 19 | </div> |
20 | 20 | ||
21 | <div class="alertbox"> | 21 | <div class="alertbox"> |
22 | <h3 class="notopmargin">Interested in kernel hacking and real-time systems? We're hiring!</h3> | 22 | <h3 class="notopmargin">Interested in kernel hacking and real-time systems? We're hiring!</h3> |
23 | <p> | 23 | <p> |
24 | <a href="http://www.cs.unc.edu/rt">UNC's real-time group</a> is actively recruiting <strong>PhD-seeking graduate students</strong> with a strong background in Linux kernel development. Applicants must be highly-motivated self-starters and eager to complete a <a href="http://www.cs.unc.edu/Admin/AcademicPrograms/Doctoral/DoctoralReqOfficial.html">rigorous PhD program in computer science</a>. Having contributed to Linux (or other open source systems-level projects) and/or maintained an open source project is definitively a plus. Â (Extra bonus points for submitting a high-quality patch to LITMUS<sup>RT</sup>.) | 24 | <a href="http://www.cs.unc.edu/rt">UNC's real-time group</a> is actively recruiting <strong>PhD-seeking graduate students</strong> with a strong background in Linux kernel development. Applicants must be highly-motivated self-starters and eager to complete a <a href="http://www.cs.unc.edu/Admin/AcademicPrograms/Doctoral/DoctoralReqOfficial.html">rigorous PhD program in computer science</a>. Having contributed to Linux (or other open source systems-level projects) and/or maintained an open source project is definitively a plus. Â (Extra bonus points for submitting a high-quality patch to LITMUS<sup>RT</sup>.) |
25 | </p> | 25 | </p> |
26 | <p class="nobottommargin"> | 26 | <p class="nobottommargin"> |
27 | Have a look at our group's <a href="http://www.cs.unc.edu/%7Eanderson/papers.html">recent publications</a> to get a feel for the kind of work that we are doing. Sounds good? Great! Next step: check out the graduate school's <a href="http://www.cs.unc.edu/Admissions/">application requirements</a> and, if you meet the requirements, contact <a href="http://www.cs.unc.edu/~anderson">Prof. Anderson</a>. | 27 | Have a look at our group's <a href="http://www.cs.unc.edu/%7Eanderson/papers.html">recent publications</a> to get a feel for the kind of work that we are doing. Sounds good? Great! Next step: check out the graduate school's <a href="http://www.cs.unc.edu/Admissions/">application requirements</a> and, if you meet the requirements, contact <a href="http://www.cs.unc.edu/~anderson">Prof. Anderson</a>. |
28 | <br/><strong>Fun, challenging problems are waiting!</strong> | 28 | <br/><strong>Fun, challenging problems are waiting!</strong> |
29 | </p> | 29 | </p> |
30 | </div> | 30 | </div> |
31 | 31 | ||
32 | 32 | ||
33 | 33 | ||
34 | <div class="nav"> | 34 | <div class="nav"> |
35 | <p> | 35 | <p> |
36 | <a href="#about">about</a> - | 36 | <a href="#about">about</a> - |
37 | <a href="#support">support</a> - | 37 | <a href="#support">support</a> - |
38 | <a href="#collaborators">collaborators</a> - | 38 | <a href="#collaborators">collaborators</a> - |
39 | <a href="#publications">publications</a> - | 39 | <a href="#publications">publications</a> - |
40 | <a href="#download">download</a> - | 40 | <a href="#download">download</a> - |
41 | <a href="#install">installation</a> - | 41 | <a href="#install">installation</a> - |
42 | <a href="#doc">documentation</a> - | 42 | <a href="#doc">documentation</a> - |
43 | <a href="#development">development</a> | 43 | <a href="#development">development</a> |
44 | </p> | 44 | </p> |
45 | </div> | 45 | </div> |
46 | 46 | ||
47 | <h2 id="about">About</h2> | 47 | <h2 id="about">About</h2> |
48 | <div class="box"> | 48 | <div class="box"> |
49 | <p class="nomargin"> | 49 | <p class="nomargin"> |
50 | The LITMUS<sup>RT</sup> project is a soft real-time extension of the Linux | 50 | The LITMUS<sup>RT</sup> project is a soft real-time extension of the Linux |
51 | kernel with focus on multiprocessor real-time scheduling and | 51 | kernel with focus on multiprocessor real-time scheduling and |
52 | synchronization. The Linux kernel is modified | 52 | synchronization. The Linux kernel is modified |
53 | to support the sporadic task | 53 | to support the sporadic task |
54 | model and modular scheduler plugins. Both partitioned and global scheduling | 54 | model and modular scheduler plugins. Both partitioned and global scheduling |
55 | is supported. | 55 | is supported. |
56 | </p> | 56 | </p> |
57 | <p>LITMUS<sup>RT</sup> is the subject of <a href="slides/rtns2010-keynote.pptx">Prof. Anderson's RTNS'10 keynote address</a>. Please have a look at the linked slides for an in-depth discussion of our motivation, the project, and recent research. | 57 | <p>Please have a look at <a href="slides/rtns2010-keynote.pptx">Prof. Anderson's RTNS'10 keynote address</a> slides for a recent overview of our motivation, the project, and recent results. |
58 | </p> | 58 | </p> |
59 | <h3>Goals</h3> | 59 | <h3>Goals</h3> |
60 | <p class="notopmargin"> | 60 | <p class="notopmargin"> |
61 | The primary purpose of the LITMUS<sup>RT</sup> project is to <strong>provide a useful experimental platform for applied real-time systems research</strong>. In that regard, LITMUS<sup>RT</sup> provides abstractions and interfaces within the kernel that simplify the prototyping of multiprocessor real-time scheduling and synchronization algorithms (compared to modifying a "vanilla" Linux kernel). As a secondary goal, LITMUS<sup>RT</sup> serves as a <strong>proof of concept</strong>, showing that algorithms such as PFAIR can be implemented on current hardware. Finally, we hope that parts of LITMUS<sup>RT</sup> and the "lessons learned" may find value as blueprints/sources of inspiration for other (both commercial and open source) implementation efforts. | 61 | The primary purpose of the LITMUS<sup>RT</sup> project is to <strong>provide a useful experimental platform for applied real-time systems research</strong>. In that regard, LITMUS<sup>RT</sup> provides abstractions and interfaces within the kernel that simplify the prototyping of multiprocessor real-time scheduling and synchronization algorithms (compared to modifying a "vanilla" Linux kernel). As a secondary goal, LITMUS<sup>RT</sup> serves as a <strong>proof of concept</strong>, showing that algorithms such as PFAIR can be implemented on current hardware. Finally, we hope that parts of LITMUS<sup>RT</sup> and the "lessons learned" may find value as blueprints/sources of inspiration for other (both commercial and open source) implementation efforts. |
62 | </p> | 62 | </p> |
63 | <h3>Non-Goals</h3> | 63 | <h3>Non-Goals</h3> |
64 | <p class="notopmargin"> | 64 | <p class="notopmargin"> |
65 | LITMUS<sup>RT</sup> is not a production-quality system, and we have currently no plans to turn it into one. LITMUS<sup>RT</sup> is not "stable," <em>i.e.</em>, interfaces and implementations may change without warning between releases. POSIX-compliance is not a goal; the LITMUS<sup>RT</sup>-API offers alternate system call interfaces. While we aim to follow Linux-coding guidelines, LITMUS<sup>RT</sup> is not targeted at being merged into mainline Linux. Rather, we hope that some of the ideas protoyped in LITMUS<sup>RT</sup> may eventually find adoption in Linux. | 65 | LITMUS<sup>RT</sup> is not a production-quality system, and we have currently no plans to turn it into one. LITMUS<sup>RT</sup> is not "stable," <em>i.e.</em>, interfaces and implementations may change without warning between releases. POSIX-compliance is not a goal; the LITMUS<sup>RT</sup>-API offers alternate system call interfaces. While we aim to follow Linux-coding guidelines, LITMUS<sup>RT</sup> is not targeted at being merged into mainline Linux. Rather, we hope that some of the ideas protoyped in LITMUS<sup>RT</sup> may eventually find adoption in Linux. |
66 | </p> | 66 | </p> |
67 | <h3>Current Version</h3> | 67 | <h3>Current Version</h3> |
68 | <p class="notopmargin"> | 68 | <p class="notopmargin"> |
69 | The current version of LITMUS<sup>RT</sup> is <strong>2010.2</strong> and is based on Linux 2.6.34. | 69 | The current version of LITMUS<sup>RT</sup> is <strong>2011.1</strong> and is based on Linux 2.6.36. |
70 | It was released on 10/21/2010 and includes plugins for the following | 70 | It was released on 10/21/2010 and includes plugins for the following |
71 | scheduling policies: | 71 | scheduling policies: |
72 | </p> | 72 | </p> |
73 | <ul> | 73 | <ul> |
74 | <li> Partitioned EDF with synchronization support (PSN-EDF)</li> | 74 | <li> Partitioned EDF with synchronization support (PSN-EDF)</li> |
75 | <li> Global EDF with synchronization support (GSN-EDF)</li> | 75 | <li> Global EDF with synchronization support (GSN-EDF)</li> |
76 | <li> Clustered EDF (C-EDF) </li> | 76 | <li> Clustered EDF (C-EDF) </li> |
77 | <li> PFAIR (both staggered and aligned quanta are supported)</li> | 77 | <li> PFAIR (both staggered and aligned quanta are supported)</li> |
78 | </ul> | 78 | </ul> |
79 | <p> | 79 | <p> |
80 | Please refer to the <a href="#download">download</a> and <a href="#install">installation</a> sections for details. | 80 | Please refer to the <a href="#download">download</a> and <a href="#install">installation</a> sections for details. |
81 | </p> | 81 | </p> |
82 | <p>Earlier versions, which supported additional scheduling policies, are discussed on separate pages dedicated to the <a href="litmus2008.html">LITMUS<sup>RT</sup> 2008</a> (based on Linux 2.6.24) and <a href="litmus2007.html">LITMUS<sup>RT</sup> 2007 </a> (based on Linux 2.6.20) series. | 82 | <p>Earlier versions, which supported additional scheduling policies, are discussed on separate pages dedicated to the <a href="litmus2010.html">LITMUS<sup>RT</sup> 2010</a> (based on Linux 2.6.34), <a href="litmus2008.html">LITMUS<sup>RT</sup> 2008</a> (based on Linux 2.6.24), and <a href="litmus2007.html">LITMUS<sup>RT</sup> 2007 </a> (based on Linux 2.6.20) series. |
83 | </p> | 83 | </p> |
84 | <p class="nobottommargin"> | 84 | <p class="nobottommargin"> |
85 | The first version of LITMUS<sup>RT</sup> (implemented in Spring 2006) | 85 | The first version of LITMUS<sup>RT</sup> (implemented in Spring 2006) |
86 | was based on Linux 2.6.9. | 86 | was based on Linux 2.6.9. |
87 | </p> | 87 | </p> |
88 | <h3> Development Plans </h3> | 88 | <h3> Development Plans </h3> |
89 | There are plans to port LITMUS<sup>RT</sup> to PowerPC and ARM platforms. Please contact us for details. | 89 | There are plans to port LITMUS<sup>RT</sup> to the PowerPC platform. Please contact us for details. |
90 | </div> | 90 | </div> |
91 | 91 | ||
92 | 92 | ||
93 | 93 | ||
94 | <h2 id="support">Support</h2> | 94 | <h2 id="support">Support</h2> |
95 | <div class="box"> | 95 | <div class="box"> |
96 | <p class="nomargin"> | 96 | <p class="nomargin"> |
97 | The LITMUS<sup>RT</sup> development effort is being supported by grants from | 97 | The LITMUS<sup>RT</sup> development effort is being supported by grants from |
98 | AT&T, IBM, and Northrop Grumman Corps.; the National Science Foundation (grants CNS 0834270 and CNS 0834132); the U.S. | 98 | AT&T, IBM, and Northrop Grumman Corps.; the National Science Foundation (grants CNS 0834270 and CNS 0834132); the U.S. |
99 | Army Research Office (grant W911NF-09-1-0535); and the Air Force Office of Scientific Research (grant FA 9550-09-1-0549). | 99 | Army Research Office (grant W911NF-09-1-0535); and the Air Force Office of Scientific Research (grant FA 9550-09-1-0549). |
100 | </p> | 100 | </p> |
101 | </div> | 101 | </div> |
102 | 102 | ||
103 | <h2 id="collaborators">Collaborators</h2> | 103 | <h2 id="collaborators">Collaborators</h2> |
104 | <div class="box"> | 104 | <div class="box"> |
105 | <p class="notopmargin"> The LITMUS<sup>RT</sup> project is led by <a | 105 | <p class="notopmargin"> The LITMUS<sup>RT</sup> project is led by <a |
106 | href="http://www.cs.unc.edu/~anderson/">Dr. James H. Anderson</a>. | 106 | href="http://www.cs.unc.edu/~anderson/">Dr. James H. Anderson</a>. |
107 | </p> | 107 | </p> |
108 | <p> | 108 | <p> |
109 | The developers of the current version are: | 109 | The developers of the current version are: |
110 | </p> | 110 | </p> |
111 | <ul> | 111 | <ul> |
112 | <li> | 112 | <li> |
113 | <a href="http://www.cs.unc.edu/~bbb/">Björn B. Brandenburg</a> (UNC Chapel Hill, maintainer) | 113 | <a href="http://www.cs.unc.edu/~bbb/">Björn B. Brandenburg</a> (UNC Chapel Hill, maintainer) |
114 | </li> | 114 | </li> |
115 | <li> <a href="http://www.sprg.uniroma2.it/home/bastoni/">Andrea Bastoni</a> (University of Rome "Tor Vergata") | 115 | <li> <a href="http://www.sprg.uniroma2.it/home/bastoni/">Andrea Bastoni</a> (University of Rome "Tor Vergata") |
116 | </li> | 116 | </li> |
117 | <li> <a href="http://www.cs.unc.edu/~gelliott">Glenn Elliot</a> (UNC Chapel Hill) | 117 | <li> <a href="http://www.cs.unc.edu/~gelliott">Glenn Elliot</a> (UNC Chapel Hill) |
118 | </li> | 118 | </li> |
119 | </ul> | 119 | <li> <a href="http://www.cs.unc.edu/~cjk">Christopher Kenna</a> (UNC Chapel Hill) |
120 | <p > | 120 | </li> |
121 | Additional collaborators contributed to the previous <a href="litmus2008.html#collaborators">LITMUS<sup>RT</sup> 2008</a> and <a href="litmus2007.html#collaborators">LITMUS<sup>RT</sup> 2007</a> versions. | 121 | |
122 | </p> | 122 | </ul> |
123 | <p class="nobottommargin"> | 123 | <p > |
124 | We always welcome new contributors; please see <a href="#development">Development</a> below for information on how to get | 124 | Additional collaborators contributed to the previous <a href="litmus2010.html#collaborators">LITMUS<sup>RT</sup> 2010</a>, |
125 | access to our source code repository. | 125 | <a href="litmus2008.html#collaborators">LITMUS<sup>RT</sup> 2008</a>, and <a href="litmus2007.html#collaborators">LITMUS<sup>RT</sup> 2007</a> versions. |
126 | </p> | 126 | </p> |
127 | </div> | 127 | <p class="nobottommargin"> |
128 | 128 | We always welcome new contributors; please see <a href="#development">Development</a> below for information on how to get | |
129 | 129 | access to our source code repository. | |
130 | <h2 id="publications">Publications</h2> | 130 | </p> |
131 | <div class="box"> | 131 | </div> |
132 | 132 | ||
133 | <ol class="nomargin"> | 133 | |
134 | 134 | <h2 id="publications">Publications</h2> | |
135 | <li><p> | 135 | <div class="box"> |
136 | A.Bastoni, B. Brandenburg and J. Anderson, | 136 | |
137 | “An Empirical Comparison of Global, Partitioned, and Clustered Multiprocessor Real-Time Schedulers”, | 137 | <ol class="nomargin"> |
138 | <cite>Proceedings of the 31th IEEE Real-Time Systems Symposium</cite>, to appear, December 2010. | 138 | |
139 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss10c.pdf">PDF</a>. | 139 | <li><p> |
140 | Longer version with all graphs: <a href="http://www.cs.unc.edu/~anderson/papers/rtss10clong.pdf">PDF</a> </p> | 140 | A.Bastoni, B. Brandenburg and J. Anderson, |
141 | </li> | 141 | “An Empirical Comparison of Global, Partitioned, and Clustered Multiprocessor Real-Time Schedulers”, |
142 | 142 | <cite>Proceedings of the 31th IEEE Real-Time Systems Symposium</cite>, pp. 14-24, December 2010. | |
143 | <li><p>J. Anderson, “Real-Time Multiprocessor Scheduling: Connecting Theory and Practice ”, keynote address, <cite>18th International Conference on Real-Time and Network Systems</cite>, November 4, 2010. | 143 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss10c.pdf">PDF</a>. |
144 | <a href="slides/rtns2010-keynote.pptx">PPTX</a>. | 144 | Longer version with all graphs: <a href="http://www.cs.unc.edu/~anderson/papers/rtss10clong.pdf">PDF</a> </p> |
145 | </p> | 145 | </li> |
146 | </li> | 146 | |
147 | 147 | <li><p>J. Anderson, “Real-Time Multiprocessor Scheduling: Connecting Theory and Practice ”, keynote address, <cite>18th International Conference on Real-Time and Network Systems</cite>, November 4, 2010. | |
148 | 148 | <a href="slides/rtns2010-keynote.pptx">PPTX</a>. | |
149 | <li><p> | 149 | </p> |
150 | G. Elliott and J. Anderson, | 150 | </li> |
151 | “Real-Time Multiprocessor Systems with GPUs”, | 151 | |
152 | <cite>Proceedings of the 18th International Conference on Real-Time and Network Systems</cite>, to appear, November 2010. | 152 | |
153 | <a href="http://www.cs.unc.edu/~anderson/papers/rtns10.pdf">PDF</a>. | 153 | <li><p> |
154 | Longer version with appendices: <a href="http://www.cs.unc.edu/~anderson/papers/rtns10extended.pdf">PDF</a> </p> | 154 | G. Elliott and J. Anderson, |
155 | </li> | 155 | “Real-Time Multiprocessor Systems with GPUs”, |
156 | 156 | <cite>Proceedings of the 18th International Conference on Real-Time and Network Systems</cite>, pp. 197-206, November 2010. | |
157 | <li> | 157 | <a href="http://www.cs.unc.edu/~anderson/papers/rtns10.pdf">PDF</a>. |
158 | <p> | 158 | Longer version with appendices: <a href="http://www.cs.unc.edu/~anderson/papers/rtns10extended.pdf">PDF</a> </p> |
159 | A.Bastoni, B. Brandenburg and J. Anderson | 159 | </li> |
160 | “Cache-Related Preemption and Migration Delays: Empirical Approximation and Impact on Schedulability”, | 160 | |
161 | <cite>Proceedings of the Sixth International Workshop on Operating Systems Platforms for Embedded Real-Time Applications</cite>, , pp. 33-44, July 2010. | 161 | <li> |
162 | <a href="http://www.cs.unc.edu/~anderson/papers/ospert10.pdf">PDF</a>. | 162 | <p> |
163 | </p> | 163 | A.Bastoni, B. Brandenburg and J. Anderson |
164 | </li> | 164 | “Cache-Related Preemption and Migration Delays: Empirical Approximation and Impact on Schedulability”, |
165 | 165 | <cite>Proceedings of the Sixth International Workshop on Operating Systems Platforms for Embedded Real-Time Applications</cite>, pp. 33-44, July 2010. | |
166 | <li><p> | 166 | <a href="http://www.cs.unc.edu/~anderson/papers/ospert10.pdf">PDF</a>. |
167 | B. Brandenburg and J. Anderson, | 167 | </p> |
168 | “On the Implementation of Global Real-Time | 168 | </li> |
169 | Schedulers”, <cite>Proceedings of the 30th IEEE Real-Time Systems Symposium</cite>, pp. 214-224, December 2009. | 169 | |
170 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a.pdf">PDF</a>. | 170 | <li><p> |
171 | Longer version with all graphs: | 171 | B. Brandenburg and J. Anderson, |
172 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a_long.pdf">PDF</a>. | 172 | “On the Implementation of Global Real-Time |
173 | </p> | 173 | Schedulers”, <cite>Proceedings of the 30th IEEE Real-Time Systems Symposium</cite>, pp. 214-224, December 2009. |
174 | <p> For reference, all evaluated plugins are provided as part of the following patch (against version 2008.3). | 174 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a.pdf">PDF</a>. |
175 | </p> | 175 | Longer version with all graphs: |
176 | <ul> | 176 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a_long.pdf">PDF</a>. |
177 | <li> | 177 | </p> |
178 | <a href="download/RTSS09/litmus-rt-RTSS09.patch">litmus-rt-RTSS09.patch</a> | 178 | <p> For reference, all evaluated plugins are provided as part of the following patch (against version 2008.3). |
179 | </li> | 179 | </p> |
180 | </ul> | 180 | <ul> |
181 | 181 | <li> | |
182 | </li> | 182 | <a href="download/RTSS09/litmus-rt-RTSS09.patch">litmus-rt-RTSS09.patch</a> |
183 | <li> | 183 | </li> |
184 | <p> | 184 | </ul> |
185 | B. Brandenburg and J. Anderson | 185 | |
186 | “Reader-Writer Synchronization for Shared-Memory Multiprocessor Real-Time Systems”, | 186 | </li> |
187 | <cite>Proceedings of the 21st Euromicro Conference on Real-Time Systems</cite>, pp. 184-193, July 2009. | 187 | <li> |
188 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09b.pdf">PDF</a>. | 188 | <p> |
189 | Long version with blocking terms: | 189 | B. Brandenburg and J. Anderson |
190 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09b-long.pdf">PDF</a>. | 190 | “Reader-Writer Synchronization for Shared-Memory Multiprocessor Real-Time Systems”, |
191 | </p> | 191 | <cite>Proceedings of the 21st Euromicro Conference on Real-Time Systems</cite>, pp. 184-193, July 2009. |
192 | </li> | 192 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09b.pdf">PDF</a>. |
193 | 193 | Long version with blocking terms: | |
194 | <li> | 194 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09b-long.pdf">PDF</a>. |
195 | <p> | 195 | </p> |
196 | J. Calandrino and J. Anderson | 196 | </li> |
197 | “On the Design and Implementation of a Cache-Aware Multicore Real-Time Scheduler”, | 197 | |
198 | <cite>Proceedings of the 21st Euromicro Conference on Real-Time Systems</cite>, pp. 194-204, July 2009. | 198 | <li> |
199 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09c.pdf">PDF</a>. | 199 | <p> |
200 | </p> | 200 | J. Calandrino and J. Anderson |
201 | </li> | 201 | “On the Design and Implementation of a Cache-Aware Multicore Real-Time Scheduler”, |
202 | 202 | <cite>Proceedings of the 21st Euromicro Conference on Real-Time Systems</cite>, pp. 194-204, July 2009. | |
203 | <li> | 203 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09c.pdf">PDF</a>. |
204 | <p> | 204 | </p> |
205 | M. Mollison, B. Brandenburg, and J. Anderson | 205 | </li> |
206 | “Towards Unit Testing Real-Time Schedulers in LITMUS<sup>RT</sup>”, | 206 | |
207 | <cite>Proceedings of the Fifth International Workshop on Operating Systems Platforms for Embedded Real-Time Applications</cite>, pp. 33-39, July 2009. | 207 | <li> |
208 | <a href="http://www.cs.unc.edu/~anderson/papers/ospert09.pdf">PDF</a>. | 208 | <p> |
209 | </p> | 209 | M. Mollison, B. Brandenburg, and J. Anderson |
210 | </li> | 210 | “Towards Unit Testing Real-Time Schedulers in LITMUS<sup>RT</sup>”, |
211 | 211 | <cite>Proceedings of the Fifth International Workshop on Operating Systems Platforms for Embedded Real-Time Applications</cite>, pp. 33-39, July 2009. | |
212 | <li> | 212 | <a href="http://www.cs.unc.edu/~anderson/papers/ospert09.pdf">PDF</a>. |
213 | <p> | 213 | </p> |
214 | B. Brandenburg and J. Anderson, | 214 | </li> |
215 | “A Comparison of the M-PCP, D-PCP, and FMLP on LITMUS<sup>RT</sup>”, | 215 | |
216 | <cite>Proceedings of the 12th International Conference on Principles of Distributed Systems</cite>, pp. 105-124, December 2008. | 216 | <li> |
217 | <a href="http://www.cs.unc.edu/~anderson/papers/opodis08.pdf">PDF</a>. | 217 | <p> |
218 | </p> | 218 | B. Brandenburg and J. Anderson, |
219 | </li> | 219 | “A Comparison of the M-PCP, D-PCP, and FMLP on LITMUS<sup>RT</sup>”, |
220 | 220 | <cite>Proceedings of the 12th International Conference on Principles of Distributed Systems</cite>, pp. 105-124, December 2008. | |
221 | <li> | 221 | <a href="http://www.cs.unc.edu/~anderson/papers/opodis08.pdf">PDF</a>. |
222 | <p> | 222 | </p> |
223 | B. Brandenburg, J. Calandrino, and J. Anderson, | 223 | </li> |
224 | “On the Scalability of Real-Time Scheduling Algorithms on Multicore Platforms: A Case Study”, | 224 | |
225 | <cite>Proceedings of the 29th IEEE Real-Time Systems Symposium</cite>, | 225 | <li> |
226 | pp. 157-169, December 2008. | 226 | <p> |
227 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss08b.pdf">PDF</a>. | 227 | B. Brandenburg, J. Calandrino, and J. Anderson, |
228 | </p> | 228 | “On the Scalability of Real-Time Scheduling Algorithms on Multicore Platforms: A Case Study”, |
229 | </li> | 229 | <cite>Proceedings of the 29th IEEE Real-Time Systems Symposium</cite>, |
230 | 230 | pp. 157-169, December 2008. | |
231 | <li> | 231 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss08b.pdf">PDF</a>. |
232 | <p> | 232 | </p> |
233 | B. Brandenburg and J. Anderson, | 233 | </li> |
234 | “An Implementation of the PCP, SRP, D-PCP, M-PCP, | 234 | |
235 | and FMLP Real-Time Synchronization Protocols in LITMUS<sup>RT</sup>”, | 235 | <li> |
236 | <cite>Proceedings of the 14th IEEE International Conference on Embedded and Real-Time Computing Systems and Applications</cite>, pp. 185-194, August 2008. | 236 | <p> |
237 | <a href="http://www.cs.unc.edu/~anderson/papers/rtcsa08.pdf">PDF</a>. | 237 | B. Brandenburg and J. Anderson, |
238 | </p> | 238 | “An Implementation of the PCP, SRP, D-PCP, M-PCP, |
239 | <p><strong>Note:</strong> The work described in this paper took part in a branch that is currently not part of | 239 | and FMLP Real-Time Synchronization Protocols in LITMUS<sup>RT</sup>”, |
240 | the main distribution. For reference, we provide the branch as a separate download: | 240 | <cite>Proceedings of the 14th IEEE International Conference on Embedded and Real-Time Computing Systems and Applications</cite>, pp. 185-194, August 2008. |
241 | </p> | 241 | <a href="http://www.cs.unc.edu/~anderson/papers/rtcsa08.pdf">PDF</a>. |
242 | <ul> | 242 | </p> |
243 | <li> | 243 | <p><strong>Note:</strong> The work described in this paper took part in a branch that is currently not part of |
244 | <a href="download/RTCSA08/litmus-rt-RTCSA08.patch">litmus-rt-RTCSA08.patch</a> | 244 | the main distribution. For reference, we provide the branch as a separate download: |
245 | </li> | 245 | </p> |
246 | <li> | 246 | <ul> |
247 | <a href="download/RTCSA08/liblitmus-RTCSA08.tgz">liblitmus-RTCSA08.tgz</a> | 247 | <li> |
248 | </li> | 248 | <a href="download/RTCSA08/litmus-rt-RTCSA08.patch">litmus-rt-RTCSA08.patch</a> |
249 | <li><a href="download/RTCSA08/SHA256SUMS">SHA256 check sums</a> | 249 | </li> |
250 | </li> | 250 | <li> |
251 | </ul> | 251 | <a href="download/RTCSA08/liblitmus-RTCSA08.tgz">liblitmus-RTCSA08.tgz</a> |
252 | <p>Please don't use this version for active development. If you are interested in this work, it would be best | 252 | </li> |
253 | to first port the desired features to a current version of LTIMUS<sup>RT</sup> and merge them into the main distribution. | 253 | <li><a href="download/RTCSA08/SHA256SUMS">SHA256 check sums</a> |
254 | </p> | 254 | </li> |
255 | 255 | </ul> | |
256 | </li> | 256 | <p>Please don't use this version for active development. If you are interested in this work, it would be best |
257 | 257 | to first port the desired features to a current version of LTIMUS<sup>RT</sup> and merge them into the main distribution. | |
258 | <li> | 258 | </p> |
259 | <p> | 259 | |
260 | A. Block, B. Brandenburg, J. Anderson, | 260 | </li> |
261 | and S. Quint, “An Adaptive Framework for Multiprocessor Real-Time Systems”, | 261 | |
262 | <cite>Proceedings of the 20th Euromicro Conference on Real-Time Systems</cite>, pp. 23-33, July 2008. | 262 | <li> |
263 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts08b.pdf">PDF</a>. | 263 | <p> |
264 | </p> | 264 | A. Block, B. Brandenburg, J. Anderson, |
265 | </li> | 265 | and S. Quint, “An Adaptive Framework for Multiprocessor Real-Time Systems”, |
266 | 266 | <cite>Proceedings of the 20th Euromicro Conference on Real-Time Systems</cite>, pp. 23-33, July 2008. | |
267 | <li> | 267 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts08b.pdf">PDF</a>. |
268 | <p> | 268 | </p> |
269 | B. Brandenburg, J. Calandrino, A. Block, | 269 | </li> |
270 | H. Leontyev, and J. Anderson, “Real-Time Synchronization | 270 | |
271 | on Multiprocessors: To Block or Not to Block, to Suspend or | 271 | <li> |
272 | Spin?”, <cite> Proceedings of the 14th IEEE Real-Time and Embedded | 272 | <p> |
273 | Technology and Applications Symposium</cite>, pp. 342-353, April 2008. | 273 | B. Brandenburg, J. Calandrino, A. Block, |
274 | <a href="http://www.cs.unc.edu/~anderson/papers/rtas08.pdf">PDF</a>. | 274 | H. Leontyev, and J. Anderson, “Real-Time Synchronization |
275 | </p> | 275 | on Multiprocessors: To Block or Not to Block, to Suspend or |
276 | <p> | 276 | Spin?”, <cite> Proceedings of the 14th IEEE Real-Time and Embedded |
277 | Extended version, including all graphs: | 277 | Technology and Applications Symposium</cite>, pp. 342-353, April 2008. |
278 | <a href="http://www.cs.unc.edu/~anderson/papers/rtas08along.pdf">PDF</a>. | 278 | <a href="http://www.cs.unc.edu/~anderson/papers/rtas08.pdf">PDF</a>. |
279 | </p> | 279 | </p> |
280 | </li> | 280 | <p> |
281 | 281 | Extended version, including all graphs: | |
282 | <li> | 282 | <a href="http://www.cs.unc.edu/~anderson/papers/rtas08along.pdf">PDF</a>. |
283 | <p> | 283 | </p> |
284 | B. Brandenburg, A. Block, J. Calandrino, U. Devi, H. Leontyev, and J. Anderson, | 284 | </li> |
285 | "LITMUS<sup>RT</sup>: A Status Report", <cite> Proceedings of the 9th | 285 | |
286 | Real-Time Linux Workshop</cite>, pp. 107-123, November 2007. | 286 | <li> |
287 | <a href="http://www.cs.unc.edu/~anderson/papers/rtlws07.pdf">PDF</a>. | 287 | <p> |
288 | </p> | 288 | B. Brandenburg, A. Block, J. Calandrino, U. Devi, H. Leontyev, and J. Anderson, |
289 | </li> | 289 | "LITMUS<sup>RT</sup>: A Status Report", <cite> Proceedings of the 9th |
290 | 290 | Real-Time Linux Workshop</cite>, pp. 107-123, November 2007. | |
291 | <li> | 291 | <a href="http://www.cs.unc.edu/~anderson/papers/rtlws07.pdf">PDF</a>. |
292 | <p> | 292 | </p> |
293 | B. Brandenburg and J. Anderson, "Integrating Hard/Soft Real-Time Tasks | 293 | </li> |
294 | and Best-Effort Jobs on Multiprocessors", <cite> Proceedings of the 19th Euromicro | 294 | |
295 | Conference on Real-Time Systems</cite>, pp. 61-70, July 2007. | 295 | <li> |
296 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts07b.pdf">PDF</a>. | 296 | <p> |
297 | </p> | 297 | B. Brandenburg and J. Anderson, "Integrating Hard/Soft Real-Time Tasks |
298 | </li> | 298 | and Best-Effort Jobs on Multiprocessors", <cite> Proceedings of the 19th Euromicro |
299 | 299 | Conference on Real-Time Systems</cite>, pp. 61-70, July 2007. | |
300 | 300 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts07b.pdf">PDF</a>. | |
301 | <li> | 301 | </p> |
302 | <p> | 302 | </li> |
303 | J. Calandrino, H. Leontyev, A. Block, U. Devi, and J. Anderson, | 303 | |
304 | "LITMUS<sup>RT</sup>: A Testbed for Empirically Comparing Real-Time | 304 | |
305 | Multiprocessor Schedulers ", <cite>Proceedings of the 27th IEEE Real-Time Systems | 305 | <li> |
306 | Symposium</cite>, pp. 111-123, December 2006. | 306 | <p> |
307 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss06b.pdf">PDF</a>. | 307 | J. Calandrino, H. Leontyev, A. Block, U. Devi, and J. Anderson, |
308 | </p> | 308 | "LITMUS<sup>RT</sup>: A Testbed for Empirically Comparing Real-Time |
309 | </li> | 309 | Multiprocessor Schedulers ", <cite>Proceedings of the 27th IEEE Real-Time Systems |
310 | 310 | Symposium</cite>, pp. 111-123, December 2006. | |
311 | </ol> | 311 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss06b.pdf">PDF</a>. |
312 | </div> | 312 | </p> |
313 | 313 | </li> | |
314 | <h2 id="download">Download</h2> | 314 | |
315 | <div class="box"> | 315 | </ol> |
316 | <p class="notopmargin"> | 316 | </div> |
317 | The source code of LITMUS<sup>RT</sup> is made available as open source | 317 | |
318 | under the terms of the <a href="http://www.gnu.org/licenses/gpl.txt">GNU | 318 | <h2 id="download">Download</h2> |
319 | General Public License (GPL)</a>. LITMUS<sup>RT</sup> is released as a patch against Linux. Additionally, | 319 | <div class="box"> |
320 | it is also available as a <span class="src">git</span> repository (see <a href="#development">Development</a> below). | 320 | <p class="notopmargin"> |
321 | </p> | 321 | The source code of LITMUS<sup>RT</sup> is made available as open source |
322 | <p> | 322 | under the terms of the <a href="http://www.gnu.org/licenses/gpl.txt">GNU |
323 | The current release of LITMUS<sup>RT</sup> is 2010.2. | 323 | General Public License (GPL)</a>. LITMUS<sup>RT</sup> is released as a patch against Linux. Additionally, |
324 | It consists of our Linux kernel modifications in the form of | 324 | it is also available as a <span class="src">git</span> repository (see <a href="#development">Development</a> below). |
325 | a patch against Linux 2.6.34 and | 325 | </p> |
326 | 326 | <p> | |
327 | <span class="src">liblitmus</span>, the user-space API for real-time | 327 | The current release of LITMUS<sup>RT</sup> is 2010.2. |
328 | tasks, as well as <span class="src">ft_tools</span>, a collection of tools | 328 | It consists of our Linux kernel modifications in the form of |
329 | used for tracing with <a href="http://www.cs.unc.edu/~bbb/feathertrace/">Feather-Trace</a> (which is part of the LITMUS<sup>RT</sup> patch). | 329 | a patch against Linux 2.6.34 and |
330 | </p> | 330 | |
331 | 331 | <span class="src">liblitmus</span>, the user-space API for real-time | |
332 | 332 | tasks, as well as <span class="src">ft_tools</span>, a collection of tools | |
333 | <h3 class="relname">LITMUS<sup>RT</sup> 2010.2</h3> | 333 | used for tracing with <a href="http://www.cs.unc.edu/~bbb/feathertrace/">Feather-Trace</a> (which is part of the LITMUS<sup>RT</sup> patch). |
334 | <div class="release"> | 334 | </p> |
335 | <p> | 335 | |
336 | Based on Linux 2.6.34. Released in October 2010. | 336 | |
337 | 337 | ||
338 | </p> | 338 | |
339 | <h4>Files:</h4> | 339 | |
340 | <ul class="notopmargin"> | 340 | <h3 class="relname">LITMUS<sup>RT</sup> 2011.1</h3> |
341 | <li> | 341 | <div class="release"> |
342 | <a href="download/2010.2/litmus-rt-2010.2.patch">litmus-rt-2010.2.patch</a> | 342 | <p> |
343 | </li> | 343 | Based on Linux 2.6.36. Released in January 2011. |
344 | <li> | 344 | |
345 | <a href="download/2010.2/liblitmus-2010.2.tgz">liblitmus-2010.2.tgz</a> | 345 | </p> |
346 | </li> | 346 | <h4>Files:</h4> |
347 | <li> | 347 | <ul class="notopmargin"> |
348 | <a href="download/2010.2/ft_tools-2010.2.tgz">ft_tools-2010.2.tgz</a> | 348 | <li> |
349 | </li> | 349 | <a href="download/2011.1/litmus-rt-2011.1.patch">litmus-rt-2011.1.patch</a> |
350 | <li><a href="download/2010.2/SHA256SUMS">SHA256 check sums</a> | 350 | </li> |
351 | </li> | 351 | <li> |
352 | </ul> | 352 | <a href="download/2011.1/liblitmus-2011.1.tgz">liblitmus-2011.1.tgz</a> |
353 | <h4>Major changes since LITMUS<sup>RT</sup> 2010.1:</h4> | 353 | </li> |
354 | <ul class="notopmargin"> | 354 | <li> |
355 | <li> | 355 | <a href="download/2011.1/ft_tools-2011.1.tgz">ft_tools-2011.1.tgz</a> |
356 | Rebased LITMUS<sup>RT</sup> from Linux 2.6.32 to Linux 2.6.34. | 356 | </li> |
357 | </li> | 357 | <li><a href="download/2011.1/SHA256SUMS">SHA256 check sums</a> |
358 | <li> | 358 | </li> |
359 | Added support for configurable budget enforcement (no enforcement, coarse-grained enforcement on timer ticks, and precise enforcement using high-resolution timers). | 359 | </ul> |
360 | </li> | 360 | <h4>Major changes (since LITMUS<sup>RT</sup> 2010.2):</h4> |
361 | <li>Add support for one single cluster (all cpus) under C-EDF</li> | 361 | <ul class="notopmargin"> |
362 | <li>Made some features optional (C-EDF, PFair, release-master mode).</li> | 362 | <li> |
363 | <li>Fixed several link and compile errors.</li> | 363 | Rebased LITMUS<sup>RT</sup> from Linux 2.6.34 to Linux 2.6.36. |
364 | </ul> | 364 | </li> |
365 | </div> | 365 | <li> |
366 | 366 | Added support for the ARM architecture (tested on a PB11MPCore baseboard with a four-core ARM11 MPCore CPU). | |
367 | 367 | </li> | |
368 | <h3 class="relname">LITMUS<sup>RT</sup> 2010.1</h3> | 368 | |
369 | <div class="release"> | 369 | <li> |
370 | <p> | 370 | Feather-Trace devices are now allocated dynamically and are properly registered with <span class="src">sysfs</span>. This avoids bugs due to major device number collisions and removes the need for manual device node creation (on a system with standard <span class="src">udev</span> rules). |
371 | Based on Linux 2.6.32. Released in May 2010. | 371 | </li> |
372 | 372 | ||
373 | </p> | 373 | <li> |
374 | <h4>Files:</h4> | 374 | Improved debug tracing output and made trace buffer size configurable. |
375 | <ul class="notopmargin"> | 375 | </li> |
376 | <li> | 376 | |
377 | <a href="download/2010.1/litmus-rt-2010.1.patch">litmus-rt-2010.1.patch</a> | 377 | <li> |
378 | </li> | 378 | Various bug fixes concerning C-EDF cluster size changes. The cluster size |
379 | <li> | 379 | can now be configured with the file <span class="src">/proc/litmus/plugins/C-EDF/cluster</span>. |
380 | <a href="download/2010.1/liblitmus-2010.1.tgz">liblitmus-2010.1.tgz</a> | 380 | </li> |
381 | </li> | 381 | |
382 | <li> | 382 | <li> |
383 | <a href="download/2010.1/ft_tools-2010.1.tgz">ft_tools-2010.1.tgz</a> | 383 | Various <span class="src">KConfig</span> cleanups and improvements. |
384 | </li> | 384 | </li> |
385 | <li><a href="download/2010.1/SHA256SUMS">SHA256 check sums</a> | 385 | |
386 | </li> | 386 | <li> |
387 | </ul> | 387 | Dropped <span class="src">scons</span> as the build system for <span class="src">liblitmus</span> |
388 | <h4>Major changes (since LITMUS<sup>RT</sup> 2008.3):</h4> | 388 | and reverted to makefiles. |
389 | <ul class="notopmargin"> | 389 | </li> |
390 | <li> | 390 | |
391 | Rebased LITMUS<sup>RT</sup> from Linux 2.6.24 to Linux 2.6.32. | 391 | <li> |
392 | </li> | 392 | Added cscope and TAG file generation to <span class="src">liblitmus</span>. |
393 | <li> | 393 | </li> |
394 | Added support for Intel x86-64 systems. | 394 | |
395 | </li> | 395 | <li> |
396 | 396 | <span class="src">st_trace</span> can now be controlled with signals (part of <span class="src">ft_tools</span>). | |
397 | <li> | 397 | </li> |
398 | Dropped sparc64 support. | 398 | |
399 | </li> | 399 | </ul> |
400 | 400 | </div> | |
401 | <li> | 401 | <p> |
402 | Ported Feather-Trace to x86-64. | 402 | Please note that the current implementation is a <em>prototype</em> with |
403 | </li> | 403 | certain limitations. It is not secure in a multiuser context, |
404 | 404 | <em>i.e.</em>, real-time system calls do not require superuser | |
405 | <li> | 405 | privileges. |
406 | Integrated recent changes in Linux's hrtimer infrastructure, which made the "norq" (no runqueue locks held) callbacks unnecessary. | 406 | </p> |
407 | </li> | 407 | |
408 | 408 | <p class="nobottommargin"> | |
409 | <li> | 409 | |
410 | Added the "LITMUS<sup>RT</sup> control device", a portable | 410 | Older releases: <a href="litmus2010.html">LITMUS<sup>RT</sup> 2010 series</a>, <a href="litmus2008.html">LITMUS<sup>RT</sup> 2008 series</a>, <a href="litmus2007.html">LITMUS<sup>RT</sup> 2007 series</a>. |
411 | mechanism for sharing a memory page between the kernel and user space tasks. | 411 | </p> |
412 | </li> | 412 | |
413 | 413 | </div> | |
414 | <li> | 414 | |
415 | Re-implemented support for non-preemptive sections on top of the control page. | 415 | |
416 | </li> | 416 | |
417 | 417 | <h2 id="install">Installation</h2> | |
418 | <li> | 418 | <div class="box"> |
419 | Improved C-EDF plugin. C-EDF now supports different cluster sizes (based on L2 and L3 cache sharing) and supports dynamic changes of cluster size (this requires reloading the plugin). | 419 | <p class="notopmargin"> |
420 | </li> | 420 | The current release of LITMUS<sup>RT</sup> consists of an |
421 | 421 | extension of the Linux kernel that adds support for the sporadic task | |
422 | 422 | model, a scheduler plugin infrastructure, and some scheduler plugins, as | |
423 | <li> | 423 | well as a user-space library that provides the LITMUS<sup>RT</sup> |
424 | Reimplemented debug tracing on top of Linux's "misc device class" and kfifo buffers. | 424 | real-time API. Note that the current implementation works on the |
425 | </li> | 425 | Intel (both x86-32 and x86-64) and on the ARM architecture (ARMV6). |
426 | |||
427 | <li> | ||
428 | Improved build system of liblitmus. Users no longer have to edit the SConstruct file manually. Instead, they should provide a .config file (see <a href="#install">installation instructions</a>). | ||
429 | </li> | ||
430 | |||
431 | <li> Added some synchronization to plugin switching to avoid sporadic crashes.</li> | ||
432 | <li>Misc. bugfixes.</li> | ||
433 | |||
434 | </ul> | ||
435 | </div> | ||
436 | <p> | ||
437 | Please note that the current implementation is a <em>prototype</em> with | ||
438 | certain limitations. Most notably, it is not secure in a multiuser context, | ||
439 | <em>i.e.</em>, real-time system calls do not require superuser | ||
440 | privileges. | ||
441 | </p> | ||
442 | |||
443 | <p class="nobottommargin"> | ||
444 | |||
445 | Older releases: <a href="litmus2008.html">LITMUS<sup>RT</sup> 2008 series</a>, <a href="litmus2007.html">LITMUS<sup>RT</sup> 2007 series</a>. | ||
446 | </p> | ||
447 | |||
448 | </div> | ||
449 | |||
450 | |||
451 | |||
452 | <h2 id="install">Installation</h2> | ||
453 | <div class="box"> | ||
454 | <p class="notopmargin"> | ||
455 | The current release of LITMUS<sup>RT</sup> consists of an | ||
456 | extension of the Linux kernel that adds support for the sporadic task | ||
457 | model, a scheduler plugin infrastructure, and some scheduler plugins, as | ||
458 | well as a user-space library that provides the LITMUS<sup>RT</sup> | ||
459 | real-time API. Note that the current implementation only works on the | ||
460 | Intel x86-32 and x86-64 architectures. | ||
461 | </p> | ||
462 | <h3>Patching the Kernel</h3> | ||
463 | <p class="notopmargin"> | ||
464 | The extension to the Linux kernel is released as a patch against Linux | ||
465 | 2.6.34. To install the LITMUS<sup>RT</sup> kernel, first <a | ||
466 | href="http://www.kernel.org/pub/linux/kernel/v2.6/linux-2.6.34.tar.bz2">download the Linux | ||
467 | kernel 2.6.34</a> and untar it in a directory of your choice (hereafter | ||
468 | referred to as <span class="src">$DIR</span>). Second, apply the | ||
469 | LITMUS<sup>RT</sup> patch (see <a href="#download">Section Download</a>) | ||
470 | and configure, compile, and install the kernel as usual. The patch is <span | ||
471 | class="src">-p1</span> applicable. | ||
472 | To summarize, the LITMUS<sup>RT</sup> kernel can be obtained, patched, and | ||
473 | compiled with the following commands: | ||
474 | </p> | ||
475 | <pre class="shell">cd $DIR | ||
476 | # get Linux 2.6.34 | ||
477 | wget http://www.kernel.org/pub/linux/kernel/v2.6/linux-2.6.34.tar.bz2 | ||
478 | tar xjf linux-2.6.34.tar.bz2 | ||
479 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2010.2/litmus-rt-2010.2.patch | ||
480 | mv linux-2.6.34 litmus2010 | ||
481 | # apply the LITMUS RT patch | ||
482 | cd litmus2010 | ||
483 | patch -p1 < ../litmus-rt-2010.2.patch | ||
484 | # create a working kernel configuration | ||
485 | # - select HZ=1000 | ||
486 | # - enable in-kernel preemptions | ||
487 | # - disable NO_HZ | ||
488 | # - don't use power management options like frequency scaling | ||
489 | # - disable support for group scheduling | ||
490 | # - disable "Write protect kernel read-only data structures" (in kernel debug) | ||
491 | make menuconfig | ||
492 | # compile the kernel | ||
493 | make bzImage | ||
494 | make modules | ||
495 | # proceed to install kernel, build initrd, etc. | ||
496 | ... | ||
497 | </pre> | ||
498 | <p> | ||
499 | When configuring the kernel, note that there is a menu (at the very end of the list) | ||
500 | with LITMUS<sup>RT</sup>-specific configuration options. For reference, we provide sample <a href="download/2010.2/32bit-config">32-bit </a> and <a href="download/2010.2/64bit-config">64-bit configurations</a> that are known to work under KVM. | ||
501 | </p> | ||
502 | |||
503 | <h3>Libraries</h3> | ||
504 | <p class="notopmargin"> | ||
505 | The user-space library for real-time tasks, <span class="src">liblitmus</span>, | ||
506 | depends on the LITMUS<sup>RT</sup> kernel kernel and provides its own build system (based on <a href="http://www.scons.org/">scons</a>). | ||
507 | In order to compile <span class="src">liblitmus</span>, you need to adjust the | ||
508 | variable <span class="src">LITMUS_KERNEL</span> in the <span class="src">.config</span> file to point to your | ||
509 | copy of the kernel. The variables required for building <span class="src">liblitmus </span> can be listed using <span class="src"> scons -h </span> command. For reference, we provide a sample <a href="download/2010.2/liblitmus-2010-config"> config </a> file. | ||
510 | Sample output of <span class="src"> scons -h </span> is as shown below. | ||
511 | </p> | ||
512 | <pre class="shell"> | ||
513 | scons -h | ||
514 | scons: Reading SConscript files ... | ||
515 | scons: done reading SConscript files. | ||
516 | |||
517 | ============================================= | ||
518 | liblitmus --- The LITMUS^RT Userspace Library | ||
519 | |||
520 | There are a number of user-configurable build | ||
521 | variables. These can either be set on the | ||
522 | command line (e.g., scons ARCH=x86) or read | ||
523 | from a local configuration file (.config). | ||
524 | |||
525 | Run 'scons --dump-config' to see the final | ||
526 | build configuration. | ||
527 | |||
528 | Build Variables | ||
529 | --------------- | ||
530 | |||
531 | LITMUS_KERNEL: Where to find the LITMUS^RT kernel. ( /path/to/LITMUS_KERNEL ) | ||
532 | default: ../litmus2010 | ||
533 | actual: ../litmus2010 | ||
534 | |||
535 | ARCH: Target architecture. (x86_64|sparc64|x86|i686) | ||
536 | default: x86_64 | ||
537 | actual: x86_64 | ||
538 | |||
539 | Use scons -H for help about command-line options. | ||
540 | |||
541 | </pre> | ||
542 | <p class="notopmargin"> | ||
543 | To summarize, the <span class="src"> liblitmus </span> can be obtained and | ||
544 | compiled with the following commands: | ||
545 | </p> | ||
546 | <pre class="shell"> | ||
547 | cd $DIR | ||
548 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2010.1/liblitmus-2010.2.tgz | ||
549 | tar xzf liblitmus-2010.2.tgz | ||
550 | cd liblitmus | ||
551 | # change LITMUS_KERNEL in .config to point to the kernel source | ||
552 | scons | ||
553 | </pre> | ||
554 | <p class="nobottommargin"> | ||
555 | Please refer to the <a href="#doc">documentation</a> on how to use the LITMUS<sup>RT</sup> | ||
556 | real-time API as provided by <span class="src">liblitmus</span>. | ||
557 | </p> | ||
558 | |||
559 | </div> | ||
560 | |||
561 | |||
562 | <h2 id="doc">Documentation</h2> | ||
563 | <div class="box"> | ||
564 | |||
565 | <p class="notopmargin"> | ||
566 | Unfortunately, most of the documentation has yet to be written. To get an overview of | ||
567 | the architecture of the kernel extension, we recommend reading the paper | ||
568 | <a href="http://www.cs.unc.edu/~anderson/papers/rtlws07.pdf">“LITMUS<sup>RT</sup>: | ||
569 | A Status Report”</a>. | ||
570 | </p> | ||
571 | <h3>Real-Time Scheduling Policies</h3> | ||
572 | <p class="qa"> | ||
573 | The kernel contains the following real-time scheduling policy implementations: | ||
574 | </p> | ||
575 | <ul> | ||
576 | <li> | ||
577 | PFAIR, an implementation of the PD<sup>2</sup> algorithm, | ||
578 | </li> | ||
579 | <li> | ||
580 | PSN-EDF, a partitioned EDF (P-EDF) implementation with support for the real-time synchronization protocol | ||
581 | FMLP, | ||
582 | </li> | ||
583 | <li> | ||
584 | GSN-EDF, a global EDF (G-EDF) implementation with support for the real-time synchronization protocol | ||
585 | FMLP, | ||
586 | </li> | ||
587 | <li> | ||
588 | C-EDF (Clustered EDF), a hybrid of G-EDF and P-EDF, and | ||
589 | </li> | ||
590 | <li> | ||
591 | Linux, a placeholder policy that disables all real-time functionality added by the LITMUS<sup>RT</sup> patch. | ||
592 | </li> | ||
593 | </ul> | ||
594 | <p> | ||
595 | Only one policy can be active at any time. Initially (<em>i.e.,</em> during and after boot), the "Linux" policy is active. | ||
596 | You can use the tool <span class="src">showsched</span> (part of <span class="src">liblitmus</span>) to display | ||
597 | the name of the currently active policy. | ||
598 | </p> | ||
599 | <h3>Changing the Active Policy</h3> | ||
600 | <p class="qa"> | ||
601 | You can use the tool <span class="src">setsched</span> (part of <span class="src">liblitmus</span>) | ||
602 | to select a new plugin at run time. | ||
603 | </p> | ||
604 | <div class="screenshot"> | ||
605 | <img src="gfx/setsched.png" alt="Screen shot of setsched"/> | ||
606 | </div> | ||
607 | <p> | ||
608 | Only root can change the active policy, and only when there are no real-time tasks present. | ||
609 | </p> | ||
610 | <p> | ||
611 | If you do not have the <span class="src">dialog</span> utility installed, then you can still used <span class="src">setsched</span> by passing the desired scheduling policy as a commandline parameter, <em>e.g.</em> type <span class="src"> setsched PFAIR </span> to activate the PFAIR plugin. | ||
612 | </p> | 426 | </p> |
427 | <h3>Patching the Kernel</h3> | ||
428 | <p class="notopmargin"> | ||
429 | The extension to the Linux kernel is released as a patch against Linux | ||
430 | 2.6.36. To install the LITMUS<sup>RT</sup> kernel, first <a | ||
431 | href="http://www.kernel.org/pub/linux/kernel/v2.6/linux-2.6.36.tar.bz2">download the Linux | ||
432 | kernel 2.6.36</a> and untar it in a directory of your choice (hereafter | ||
433 | referred to as <span class="src">$DIR</span>). Second, apply the | ||
434 | LITMUS<sup>RT</sup> patch (see <a href="#download">Section Download</a>) | ||
435 | and configure, compile, and install the kernel as usual. The patch is <span | ||
436 | class="src">-p1</span> applicable. | ||
437 | To summarize, the LITMUS<sup>RT</sup> kernel can be obtained, patched, and | ||
438 | compiled with the following commands: | ||
439 | </p> | ||
440 | <pre class="shell">cd $DIR | ||
441 | # get Linux 2.6.36 | ||
442 | wget http://www.kernel.org/pub/linux/kernel/v2.6/linux-2.6.36.tar.bz2 | ||
443 | tar xjf linux-2.6.36.tar.bz2 | ||
444 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2011.1/litmus-rt-2011.1.patch | ||
445 | mv linux-2.6.36 litmus-rt | ||
446 | # apply the LITMUS RT patch | ||
447 | cd litmus-rt | ||
448 | patch -p1 < ../litmus-rt-2011.1.patch | ||
449 | # create a working kernel configuration | ||
450 | # - select HZ=1000 | ||
451 | # - enable in-kernel preemptions | ||
452 | # - disable NO_HZ | ||
453 | # - don't use power management options like frequency scaling | ||
454 | # - disable support for group scheduling | ||
455 | # - disable "Write protect kernel read-only data structures" (in kernel debug) | ||
456 | make menuconfig | ||
457 | # compile the kernel | ||
458 | make bzImage | ||
459 | make modules | ||
460 | # proceed to install kernel, build initrd, etc. | ||
461 | ... | ||
462 | </pre> | ||
463 | <p> | ||
464 | When configuring the kernel, note that there is a menu (at the very end of the list) | ||
465 | with LITMUS<sup>RT</sup>-specific configuration options. For reference, we provide sample <a href="download/2011.1/x86_64-config">64-bit configuration</a> that is known to work under KVM. | ||
466 | </p> | ||
467 | |||
468 | <h3>Libraries</h3> | ||
469 | <p class="notopmargin"> | ||
470 | The user-space library for real-time tasks, <span class="src">liblitmus</span>, | ||
471 | depends on the LITMUS<sup>RT</sup> kernel kernel and provides its own build system. Run <span class="src">make help</span> for instructions. Note that | ||
472 | you will have to create a <span class="src">.config</span> file in the library directory to point the build system to the kernel sources. | ||
473 | For reference, we provide a sample <a href="download/2011.1/liblitmus-config">liblitmus configuration file</a>. | ||
474 | </p> | ||
475 | <p class="notopmargin"> | ||
476 | To summarize, the <span class="src">liblitmus</span> can be obtained and | ||
477 | compiled with the following commands: | ||
478 | </p> | ||
479 | <pre class="shell"> | ||
480 | cd $DIR | ||
481 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2011.1/liblitmus-2011.1.tgz | ||
482 | tar xzf liblitmus-2011.1.tgz | ||
483 | cd liblitmus | ||
484 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2011.1/liblitmus-config | ||
485 | mv liblitmus-config .config | ||
486 | make | ||
487 | </pre> | ||
488 | |||
489 | <h3>Tools</h3> | ||
490 | <p class="notopmargin"> | ||
491 | The Feather-Trace tools (<span class="src">ft_tools</span>) depend on the <span class="src">liblitmus</span> build system. Before the <span class="src">ft_tools</span> project can be built, you must have successfully compiled <span class="src">liblitmus</span> first. | ||
492 | For reference, we provide a sample <a href="download/2011.1/ft_tools-config">ft_tools configuration file</a>. | ||
493 | </p> | ||
494 | <p class="notopmargin"> | ||
495 | To summarize, the <span class="src">ft_tools</span> can be obtained and | ||
496 | compiled with the following commands: | ||
497 | </p> | ||
498 | <pre class="shell"> | ||
499 | cd $DIR | ||
500 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2011.1/ft_tools-2011.1.tgz | ||
501 | tar xzf ft_tools-2011.1.tgz | ||
502 | cd ft_tools | ||
503 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2011.1/ft_tools-config | ||
504 | mv ft_tools-config .config | ||
505 | make | ||
506 | </pre> | ||
507 | |||
613 | 508 | ||
509 | <p class="nobottommargin"> | ||
510 | Please refer to the <a href="#doc">documentation</a> on how to use the LITMUS<sup>RT</sup> | ||
511 | real-time API as provided by <span class="src">liblitmus</span>. | ||
512 | </p> | ||
513 | </div> | ||
514 | |||
515 | |||
516 | <h2 id="doc">Documentation</h2> | ||
517 | <div class="box"> | ||
518 | |||
519 | <p class="notopmargin"> | ||
520 | Unfortunately, most of the documentation has yet to be written. To get an overview of | ||
521 | the architecture of the kernel extension, we recommend reading the paper | ||
522 | <a href="http://www.cs.unc.edu/~anderson/papers/rtlws07.pdf">“LITMUS<sup>RT</sup>: | ||
523 | A Status Report”</a>. | ||
524 | </p> | ||
525 | <h3>Real-Time Scheduling Policies</h3> | ||
526 | <p class="qa"> | ||
527 | The kernel contains the following real-time scheduling policy implementations: | ||
528 | </p> | ||
529 | <ul> | ||
530 | <li> | ||
531 | PFAIR, an implementation of the PD<sup>2</sup> algorithm, | ||
532 | </li> | ||
533 | <li> | ||
534 | PSN-EDF, a partitioned EDF (P-EDF) implementation with support for the real-time synchronization protocol | ||
535 | FMLP, | ||
536 | </li> | ||
537 | <li> | ||
538 | GSN-EDF, a global EDF (G-EDF) implementation with support for the real-time synchronization protocol | ||
539 | FMLP, | ||
540 | </li> | ||
541 | <li> | ||
542 | C-EDF (Clustered EDF), a hybrid of G-EDF and P-EDF, and | ||
543 | </li> | ||
544 | <li> | ||
545 | Linux, a placeholder policy that disables all real-time functionality added by the LITMUS<sup>RT</sup> patch. | ||
546 | </li> | ||
547 | </ul> | ||
548 | <p> | ||
549 | Only one policy can be active at any time. Initially (<em>i.e.,</em> during and after boot), the "Linux" policy is active. | ||
550 | You can use the tool <span class="src">showsched</span> (part of <span class="src">liblitmus</span>) to display | ||
551 | the name of the currently active policy. | ||
552 | </p> | ||
553 | <h3>Changing the Active Policy</h3> | ||
554 | <p class="qa"> | ||
555 | You can use the tool <span class="src">setsched</span> (part of <span class="src">liblitmus</span>) | ||
556 | to select a new plugin at run time. | ||
557 | </p> | ||
558 | <div class="screenshot"> | ||
559 | <img src="gfx/setsched.png" alt="Screen shot of setsched"/> | ||
560 | </div> | ||
561 | <p> | ||
562 | Only root can change the active policy, and only when there are no real-time tasks present. | ||
563 | </p> | ||
564 | <p> | ||
565 | If you do not have the <span class="src">dialog</span> utility installed, then you can still used <span class="src">setsched</span> by passing the desired scheduling policy as a commandline parameter, <em>e.g.</em> type <span class="src"> setsched PFAIR </span> to activate the PFAIR plugin. | ||
566 | </p> | ||
567 | <p>The list of loaded plugins is exported at <span class="src">/proc/litmus/plugins/loaded</span>; the active plugin is reported at <span class="src">/proc/litmus/active_plugin</span>. | ||
568 | |||
614 | <h3>Selecting the C-EDF Cluster Size</h3> | 569 | <h3>Selecting the C-EDF Cluster Size</h3> |
615 | <p class="qa"> | 570 | <p class="qa"> |
616 | The C-EDF plugin can create clusters based on the sharing of L2 or L3 caches. When the plugin is activated (see above), it configures clusters based on the value last written to <span class="src"> /proc/litmus/cluster_cache</span> (either "L2" or "L3"). Note that the C-EDF must be reloaded (for example by switching to the Linux plugin and back to C-EDF) to enact a change to the desired cluster size; changing the cluster size while C-EDF is active is not supported. | 571 | The C-EDF plugin can create clusters based on the cache topology. When the plugin is activated (see above), it configures clusters based on the value ("L1", "L2", "L3", or "ALL") last written to <span class="src">/proc/litmus/plugins/C-EDF/cluster</span>. Note that the C-EDF must be reloaded (for example by switching to the Linux plugin and back to C-EDF) to enact a change to the desired cluster size; changing the cluster size while C-EDF is active is not supported. |
617 | </p> | 572 | </p> |
573 | |||
618 | 574 | ||
619 | <h3>Writing Real-Time Tasks</h3> | 575 | <h3>Writing Real-Time Tasks</h3> |
620 | <p class="qa"> | 576 | <p class="qa"> |
621 | The user space library that provides the LITMUS<sup>RT</sup> API, | 577 | The user space library that provides the LITMUS<sup>RT</sup> API, |
622 | <span class="src">liblitmus</span>, contains two example real-time tasks | 578 | <span class="src">liblitmus</span>, contains two example real-time tasks |
623 | (<span class="src">base_task.c</span> and | 579 | (<span class="src">base_task.c</span> and |
624 | <span class="src">base_mt_task.c</span>) | 580 | <span class="src">base_mt_task.c</span>) |
625 | that both illustrate how to use the API and provide a skeleton for real-time | 581 | that both illustrate how to use the API and provide a skeleton for real-time |
626 | task development. To get started with development, please take a look at these example | 582 | task development. To get started with development, please take a look at these example |
627 | programs. | 583 | programs. |
628 | </p> | 584 | </p> |
629 | <h3>Tracing Overheads and Scheduling Decisions</h3> | 585 | <h3>Tracing Overheads and Scheduling Decisions</h3> |
630 | <p class="qa">LITMUS<sup>RT</sup> provides numerous tracing facilities that are discussed in-depth in the tutorial <a href="doc/tracing.html">Tracing with LITMUS<sup>RT</sup></a>. | 586 | <p class="qa">LITMUS<sup>RT</sup> provides numerous tracing facilities that are discussed in-depth in the tutorial <a href="doc/tracing.html">Tracing with LITMUS<sup>RT</sup></a>. |
631 | </p> | 587 | </p> |
632 | <p class="nobottommargin"> | 588 | <p class="nobottommargin"> |
633 | Please contact the <a href="#collaborators">current maintainer</a> if you have any | 589 | Please contact the <a href="#collaborators">current maintainer</a> if you have any |
634 | questions. | 590 | questions. |
635 | </p> | 591 | </p> |
636 | 592 | ||
637 | 593 | ||
638 | </div> | 594 | </div> |
639 | 595 | ||
640 | <h2 id="development">Development</h2> | 596 | <h2 id="development">Development</h2> |
641 | <div class="box"> | 597 | <div class="box"> |
642 | <p class="nomargin"> | 598 | <p class="nomargin"> |
643 | Patches and suggestions are very welcome! | 599 | Patches and suggestions are very welcome! |
644 | Both the LITMUS<sup>RT</sup> kernel and liblitmus are available | 600 | Both the LITMUS<sup>RT</sup> kernel and liblitmus are available |
645 | as a public <a href="http://git-scm.com/">git</a> repository at | 601 | as a public <a href="http://git-scm.com/">git</a> repository at |
646 | <a href="public-repository/index.html">http://www.cs.unc.edu/~anderson/litmus-rt/public-repository</a>. | 602 | <a href="public-repository/index.html">http://www.cs.unc.edu/~anderson/litmus-rt/public-repository</a>. |
647 | </p> | 603 | </p> |
648 | </div> | 604 | </div> |
649 | 605 | ||
650 | 606 | ||
651 | 607 | ||
652 | <h2 id="credits">Credits</h2> | 608 | <h2 id="credits">Credits</h2> |
653 | <div class="box"> | 609 | <div class="box"> |
654 | <div style="float: right;"> | 610 | <div style="float: right;"> |
655 | <a href="http://validator.w3.org/check?uri=referer"><img | 611 | <a href="http://validator.w3.org/check?uri=referer"><img |
656 | src="http://www.w3.org/Icons/valid-xhtml10" | 612 | src="http://www.w3.org/Icons/valid-xhtml10" |
657 | alt="Valid XHTML 1.0 Strict" height="31" width="88"/></a> | 613 | alt="Valid XHTML 1.0 Strict" height="31" width="88"/></a> |
658 | </div> | 614 | </div> |
659 | 615 | ||
660 | <p class="nomargin"> | 616 | <p class="nomargin"> |
661 | Linux is a registered trademark of Linus Torvalds. <br /> The | 617 | Linux is a registered trademark of Linus Torvalds. <br /> The |
662 | LITMUS<sup>RT</sup> logo was designed by Jasper McChesney of <a href="http://www.breakforsense.net/">Break for Sense Design</a>. <br /> | 618 | LITMUS<sup>RT</sup> logo was designed by Jasper McChesney of <a href="http://www.breakforsense.net/">Break for Sense Design</a>. <br /> |
663 | Web design by Björn Brandenburg. | 619 | Web design by Björn Brandenburg. |
664 | </p> | 620 | </p> |
665 | 621 | ||
666 | 622 | ||
667 | </div> | 623 | </div> |
668 | 624 | ||
669 | <script src="http://www.google-analytics.com/urchin.js" type="text/javascript"> | 625 | <script src="http://www.google-analytics.com/urchin.js" type="text/javascript"> |
670 | </script> | 626 | </script> |
671 | <script type="text/javascript"> | 627 | <script type="text/javascript"> |
672 | _uacct = "UA-3184628-1"; | 628 | _uacct = "UA-3184628-1"; |
673 | urchinTracker(); | 629 | urchinTracker(); |
674 | </script> | 630 | </script> |
675 | </body> | 631 | </body> |
676 | </html> | 632 | </html> |
diff --git a/litmus2010.html b/litmus2010.html new file mode 100644 index 0000000..4872502 --- /dev/null +++ b/litmus2010.html | |||
@@ -0,0 +1,674 @@ | |||
1 | |||
2 | <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> | ||
3 | <html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en"> | ||
4 | <head> | ||
5 | <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/> | ||
6 | <meta name="verify-v1" content="pZNmf5XyUUfAPdlSPbFSavMUsLgVsmBYOXzOhbIy2gw=" /> | ||
7 | <link rel="stylesheet" type="text/css" href="inc/format.css"/> | ||
8 | <title>LITMUS RT: Linux Testbed for Multiprocessor Scheduling in Real-Time Systems</title> | ||
9 | </head> | ||
10 | <body> | ||
11 | <div class="logobox"> | ||
12 | <img src="inc/litmusrt.png" alt="LITMUS^RT: Linux Testbed for Multiprocessor Scheduling in Real-Time Systems" /> | ||
13 | <p class="authors"> | ||
14 | <a href="http://www.cs.unc.edu/~anderson/">Dr. James H. Anderson & | ||
15 | Students</a>, | ||
16 | <a href="http://www.unc.edu">The University of North Carolina at Chapel Hill</a> | ||
17 | </p> | ||
18 | |||
19 | </div> | ||
20 | |||
21 | <div class="alertbox"> | ||
22 | <p class="nomargin"> | ||
23 | <em><b>NOTE:</b> This web page discusses an older version of | ||
24 | LITMUS<sup>RT</sup>. Please use the | ||
25 | <a href="index.html">current version</a> unless you | ||
26 | have specific interest in the 2010 series. | ||
27 | </em> | ||
28 | </p> | ||
29 | </div> | ||
30 | |||
31 | |||
32 | <div class="nav"> | ||
33 | <p> | ||
34 | <a href="#about">about</a> - | ||
35 | <a href="#support">support</a> - | ||
36 | <a href="#collaborators">collaborators</a> - | ||
37 | <a href="#publications">publications</a> - | ||
38 | <a href="#download">download</a> - | ||
39 | <a href="#install">installation</a> - | ||
40 | <a href="#doc">documentation</a> - | ||
41 | <a href="#development">development</a> | ||
42 | </p> | ||
43 | </div> | ||
44 | |||
45 | <h2 id="about">About</h2> | ||
46 | <div class="box"> | ||
47 | <p class="nomargin"> | ||
48 | The LITMUS<sup>RT</sup> project is a soft real-time extension of the Linux | ||
49 | kernel with focus on multiprocessor real-time scheduling and | ||
50 | synchronization. The Linux kernel is modified | ||
51 | to support the sporadic task | ||
52 | model and modular scheduler plugins. Both partitioned and global scheduling | ||
53 | is supported. | ||
54 | </p> | ||
55 | <p>LITMUS<sup>RT</sup> is the subject of <a href="slides/rtns2010-keynote.pptx">Prof. Anderson's RTNS'10 keynote address</a>. Please have a look at the linked slides for an in-depth discussion of our motivation, the project, and recent research. | ||
56 | </p> | ||
57 | <h3>Goals</h3> | ||
58 | <p class="notopmargin"> | ||
59 | The primary purpose of the LITMUS<sup>RT</sup> project is to <strong>provide a useful experimental platform for applied real-time systems research</strong>. In that regard, LITMUS<sup>RT</sup> provides abstractions and interfaces within the kernel that simplify the prototyping of multiprocessor real-time scheduling and synchronization algorithms (compared to modifying a "vanilla" Linux kernel). As a secondary goal, LITMUS<sup>RT</sup> serves as a <strong>proof of concept</strong>, showing that algorithms such as PFAIR can be implemented on current hardware. Finally, we hope that parts of LITMUS<sup>RT</sup> and the "lessons learned" may find value as blueprints/sources of inspiration for other (both commercial and open source) implementation efforts. | ||
60 | </p> | ||
61 | <h3>Non-Goals</h3> | ||
62 | <p class="notopmargin"> | ||
63 | LITMUS<sup>RT</sup> is not a production-quality system, and we have currently no plans to turn it into one. LITMUS<sup>RT</sup> is not "stable," <em>i.e.</em>, interfaces and implementations may change without warning between releases. POSIX-compliance is not a goal; the LITMUS<sup>RT</sup>-API offers alternate system call interfaces. While we aim to follow Linux-coding guidelines, LITMUS<sup>RT</sup> is not targeted at being merged into mainline Linux. Rather, we hope that some of the ideas protoyped in LITMUS<sup>RT</sup> may eventually find adoption in Linux. | ||
64 | </p> | ||
65 | <h3>Current Version</h3> | ||
66 | <p class="notopmargin"> | ||
67 | The current version of LITMUS<sup>RT</sup> is <strong>2010.2</strong> and is based on Linux 2.6.34. | ||
68 | It was released on 10/21/2010 and includes plugins for the following | ||
69 | scheduling policies: | ||
70 | </p> | ||
71 | <ul> | ||
72 | <li> Partitioned EDF with synchronization support (PSN-EDF)</li> | ||
73 | <li> Global EDF with synchronization support (GSN-EDF)</li> | ||
74 | <li> Clustered EDF (C-EDF) </li> | ||
75 | <li> PFAIR (both staggered and aligned quanta are supported)</li> | ||
76 | </ul> | ||
77 | <p> | ||
78 | Please refer to the <a href="#download">download</a> and <a href="#install">installation</a> sections for details. | ||
79 | </p> | ||
80 | <p>Earlier versions, which supported additional scheduling policies, are discussed on separate pages dedicated to the <a href="litmus2008.html">LITMUS<sup>RT</sup> 2008</a> (based on Linux 2.6.24) and <a href="litmus2007.html">LITMUS<sup>RT</sup> 2007 </a> (based on Linux 2.6.20) series. | ||
81 | </p> | ||
82 | <p class="nobottommargin"> | ||
83 | The first version of LITMUS<sup>RT</sup> (implemented in Spring 2006) | ||
84 | was based on Linux 2.6.9. | ||
85 | </p> | ||
86 | <h3> Development Plans </h3> | ||
87 | There are plans to port LITMUS<sup>RT</sup> to PowerPC and ARM platforms. Please contact us for details. | ||
88 | </div> | ||
89 | |||
90 | |||
91 | |||
92 | <h2 id="support">Support</h2> | ||
93 | <div class="box"> | ||
94 | <p class="nomargin"> | ||
95 | The LITMUS<sup>RT</sup> development effort is being supported by grants from | ||
96 | AT&T, IBM, and Northrop Grumman Corps.; the National Science Foundation (grants CNS 0834270 and CNS 0834132); the U.S. | ||
97 | Army Research Office (grant W911NF-09-1-0535); and the Air Force Office of Scientific Research (grant FA 9550-09-1-0549). | ||
98 | </p> | ||
99 | </div> | ||
100 | |||
101 | <h2 id="collaborators">Collaborators</h2> | ||
102 | <div class="box"> | ||
103 | <p class="notopmargin"> The LITMUS<sup>RT</sup> project is led by <a | ||
104 | href="http://www.cs.unc.edu/~anderson/">Dr. James H. Anderson</a>. | ||
105 | </p> | ||
106 | <p> | ||
107 | The developers of the current version are: | ||
108 | </p> | ||
109 | <ul> | ||
110 | <li> | ||
111 | <a href="http://www.cs.unc.edu/~bbb/">Björn B. Brandenburg</a> (UNC Chapel Hill, maintainer) | ||
112 | </li> | ||
113 | <li> <a href="http://www.sprg.uniroma2.it/home/bastoni/">Andrea Bastoni</a> (University of Rome "Tor Vergata") | ||
114 | </li> | ||
115 | <li> <a href="http://www.cs.unc.edu/~gelliott">Glenn Elliot</a> (UNC Chapel Hill) | ||
116 | </li> | ||
117 | </ul> | ||
118 | <p > | ||
119 | Additional collaborators contributed to the previous <a href="litmus2008.html#collaborators">LITMUS<sup>RT</sup> 2008</a> and <a href="litmus2007.html#collaborators">LITMUS<sup>RT</sup> 2007</a> versions. | ||
120 | </p> | ||
121 | <p class="nobottommargin"> | ||
122 | We always welcome new contributors; please see <a href="#development">Development</a> below for information on how to get | ||
123 | access to our source code repository. | ||
124 | </p> | ||
125 | </div> | ||
126 | |||
127 | |||
128 | <h2 id="publications">Publications</h2> | ||
129 | <div class="box"> | ||
130 | |||
131 | <ol class="nomargin"> | ||
132 | |||
133 | <li><p> | ||
134 | A.Bastoni, B. Brandenburg and J. Anderson, | ||
135 | “An Empirical Comparison of Global, Partitioned, and Clustered Multiprocessor Real-Time Schedulers”, | ||
136 | <cite>Proceedings of the 31th IEEE Real-Time Systems Symposium</cite>, to appear, December 2010. | ||
137 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss10c.pdf">PDF</a>. | ||
138 | Longer version with all graphs: <a href="http://www.cs.unc.edu/~anderson/papers/rtss10clong.pdf">PDF</a> </p> | ||
139 | </li> | ||
140 | |||
141 | <li><p>J. Anderson, “Real-Time Multiprocessor Scheduling: Connecting Theory and Practice ”, keynote address, <cite>18th International Conference on Real-Time and Network Systems</cite>, November 4, 2010. | ||
142 | <a href="slides/rtns2010-keynote.pptx">PPTX</a>. | ||
143 | </p> | ||
144 | </li> | ||
145 | |||
146 | |||
147 | <li><p> | ||
148 | G. Elliott and J. Anderson, | ||
149 | “Real-Time Multiprocessor Systems with GPUs”, | ||
150 | <cite>Proceedings of the 18th International Conference on Real-Time and Network Systems</cite>, to appear, November 2010. | ||
151 | <a href="http://www.cs.unc.edu/~anderson/papers/rtns10.pdf">PDF</a>. | ||
152 | Longer version with appendices: <a href="http://www.cs.unc.edu/~anderson/papers/rtns10extended.pdf">PDF</a> </p> | ||
153 | </li> | ||
154 | |||
155 | <li> | ||
156 | <p> | ||
157 | A.Bastoni, B. Brandenburg and J. Anderson | ||
158 | “Cache-Related Preemption and Migration Delays: Empirical Approximation and Impact on Schedulability”, | ||
159 | <cite>Proceedings of the Sixth International Workshop on Operating Systems Platforms for Embedded Real-Time Applications</cite>, , pp. 33-44, July 2010. | ||
160 | <a href="http://www.cs.unc.edu/~anderson/papers/ospert10.pdf">PDF</a>. | ||
161 | </p> | ||
162 | </li> | ||
163 | |||
164 | <li><p> | ||
165 | B. Brandenburg and J. Anderson, | ||
166 | “On the Implementation of Global Real-Time | ||
167 | Schedulers”, <cite>Proceedings of the 30th IEEE Real-Time Systems Symposium</cite>, pp. 214-224, December 2009. | ||
168 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a.pdf">PDF</a>. | ||
169 | Longer version with all graphs: | ||
170 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss09a_long.pdf">PDF</a>. | ||
171 | </p> | ||
172 | <p> For reference, all evaluated plugins are provided as part of the following patch (against version 2008.3). | ||
173 | </p> | ||
174 | <ul> | ||
175 | <li> | ||
176 | <a href="download/RTSS09/litmus-rt-RTSS09.patch">litmus-rt-RTSS09.patch</a> | ||
177 | </li> | ||
178 | </ul> | ||
179 | |||
180 | </li> | ||
181 | <li> | ||
182 | <p> | ||
183 | B. Brandenburg and J. Anderson | ||
184 | “Reader-Writer Synchronization for Shared-Memory Multiprocessor Real-Time Systems”, | ||
185 | <cite>Proceedings of the 21st Euromicro Conference on Real-Time Systems</cite>, pp. 184-193, July 2009. | ||
186 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09b.pdf">PDF</a>. | ||
187 | Long version with blocking terms: | ||
188 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09b-long.pdf">PDF</a>. | ||
189 | </p> | ||
190 | </li> | ||
191 | |||
192 | <li> | ||
193 | <p> | ||
194 | J. Calandrino and J. Anderson | ||
195 | “On the Design and Implementation of a Cache-Aware Multicore Real-Time Scheduler”, | ||
196 | <cite>Proceedings of the 21st Euromicro Conference on Real-Time Systems</cite>, pp. 194-204, July 2009. | ||
197 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts09c.pdf">PDF</a>. | ||
198 | </p> | ||
199 | </li> | ||
200 | |||
201 | <li> | ||
202 | <p> | ||
203 | M. Mollison, B. Brandenburg, and J. Anderson | ||
204 | “Towards Unit Testing Real-Time Schedulers in LITMUS<sup>RT</sup>”, | ||
205 | <cite>Proceedings of the Fifth International Workshop on Operating Systems Platforms for Embedded Real-Time Applications</cite>, pp. 33-39, July 2009. | ||
206 | <a href="http://www.cs.unc.edu/~anderson/papers/ospert09.pdf">PDF</a>. | ||
207 | </p> | ||
208 | </li> | ||
209 | |||
210 | <li> | ||
211 | <p> | ||
212 | B. Brandenburg and J. Anderson, | ||
213 | “A Comparison of the M-PCP, D-PCP, and FMLP on LITMUS<sup>RT</sup>”, | ||
214 | <cite>Proceedings of the 12th International Conference on Principles of Distributed Systems</cite>, pp. 105-124, December 2008. | ||
215 | <a href="http://www.cs.unc.edu/~anderson/papers/opodis08.pdf">PDF</a>. | ||
216 | </p> | ||
217 | </li> | ||
218 | |||
219 | <li> | ||
220 | <p> | ||
221 | B. Brandenburg, J. Calandrino, and J. Anderson, | ||
222 | “On the Scalability of Real-Time Scheduling Algorithms on Multicore Platforms: A Case Study”, | ||
223 | <cite>Proceedings of the 29th IEEE Real-Time Systems Symposium</cite>, | ||
224 | pp. 157-169, December 2008. | ||
225 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss08b.pdf">PDF</a>. | ||
226 | </p> | ||
227 | </li> | ||
228 | |||
229 | <li> | ||
230 | <p> | ||
231 | B. Brandenburg and J. Anderson, | ||
232 | “An Implementation of the PCP, SRP, D-PCP, M-PCP, | ||
233 | and FMLP Real-Time Synchronization Protocols in LITMUS<sup>RT</sup>”, | ||
234 | <cite>Proceedings of the 14th IEEE International Conference on Embedded and Real-Time Computing Systems and Applications</cite>, pp. 185-194, August 2008. | ||
235 | <a href="http://www.cs.unc.edu/~anderson/papers/rtcsa08.pdf">PDF</a>. | ||
236 | </p> | ||
237 | <p><strong>Note:</strong> The work described in this paper took part in a branch that is currently not part of | ||
238 | the main distribution. For reference, we provide the branch as a separate download: | ||
239 | </p> | ||
240 | <ul> | ||
241 | <li> | ||
242 | <a href="download/RTCSA08/litmus-rt-RTCSA08.patch">litmus-rt-RTCSA08.patch</a> | ||
243 | </li> | ||
244 | <li> | ||
245 | <a href="download/RTCSA08/liblitmus-RTCSA08.tgz">liblitmus-RTCSA08.tgz</a> | ||
246 | </li> | ||
247 | <li><a href="download/RTCSA08/SHA256SUMS">SHA256 check sums</a> | ||
248 | </li> | ||
249 | </ul> | ||
250 | <p>Please don't use this version for active development. If you are interested in this work, it would be best | ||
251 | to first port the desired features to a current version of LTIMUS<sup>RT</sup> and merge them into the main distribution. | ||
252 | </p> | ||
253 | |||
254 | </li> | ||
255 | |||
256 | <li> | ||
257 | <p> | ||
258 | A. Block, B. Brandenburg, J. Anderson, | ||
259 | and S. Quint, “An Adaptive Framework for Multiprocessor Real-Time Systems”, | ||
260 | <cite>Proceedings of the 20th Euromicro Conference on Real-Time Systems</cite>, pp. 23-33, July 2008. | ||
261 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts08b.pdf">PDF</a>. | ||
262 | </p> | ||
263 | </li> | ||
264 | |||
265 | <li> | ||
266 | <p> | ||
267 | B. Brandenburg, J. Calandrino, A. Block, | ||
268 | H. Leontyev, and J. Anderson, “Real-Time Synchronization | ||
269 | on Multiprocessors: To Block or Not to Block, to Suspend or | ||
270 | Spin?”, <cite> Proceedings of the 14th IEEE Real-Time and Embedded | ||
271 | Technology and Applications Symposium</cite>, pp. 342-353, April 2008. | ||
272 | <a href="http://www.cs.unc.edu/~anderson/papers/rtas08.pdf">PDF</a>. | ||
273 | </p> | ||
274 | <p> | ||
275 | Extended version, including all graphs: | ||
276 | <a href="http://www.cs.unc.edu/~anderson/papers/rtas08along.pdf">PDF</a>. | ||
277 | </p> | ||
278 | </li> | ||
279 | |||
280 | <li> | ||
281 | <p> | ||
282 | B. Brandenburg, A. Block, J. Calandrino, U. Devi, H. Leontyev, and J. Anderson, | ||
283 | "LITMUS<sup>RT</sup>: A Status Report", <cite> Proceedings of the 9th | ||
284 | Real-Time Linux Workshop</cite>, pp. 107-123, November 2007. | ||
285 | <a href="http://www.cs.unc.edu/~anderson/papers/rtlws07.pdf">PDF</a>. | ||
286 | </p> | ||
287 | </li> | ||
288 | |||
289 | <li> | ||
290 | <p> | ||
291 | B. Brandenburg and J. Anderson, "Integrating Hard/Soft Real-Time Tasks | ||
292 | and Best-Effort Jobs on Multiprocessors", <cite> Proceedings of the 19th Euromicro | ||
293 | Conference on Real-Time Systems</cite>, pp. 61-70, July 2007. | ||
294 | <a href="http://www.cs.unc.edu/~anderson/papers/ecrts07b.pdf">PDF</a>. | ||
295 | </p> | ||
296 | </li> | ||
297 | |||
298 | |||
299 | <li> | ||
300 | <p> | ||
301 | J. Calandrino, H. Leontyev, A. Block, U. Devi, and J. Anderson, | ||
302 | "LITMUS<sup>RT</sup>: A Testbed for Empirically Comparing Real-Time | ||
303 | Multiprocessor Schedulers ", <cite>Proceedings of the 27th IEEE Real-Time Systems | ||
304 | Symposium</cite>, pp. 111-123, December 2006. | ||
305 | <a href="http://www.cs.unc.edu/~anderson/papers/rtss06b.pdf">PDF</a>. | ||
306 | </p> | ||
307 | </li> | ||
308 | |||
309 | </ol> | ||
310 | </div> | ||
311 | |||
312 | <h2 id="download">Download</h2> | ||
313 | <div class="box"> | ||
314 | <p class="notopmargin"> | ||
315 | The source code of LITMUS<sup>RT</sup> is made available as open source | ||
316 | under the terms of the <a href="http://www.gnu.org/licenses/gpl.txt">GNU | ||
317 | General Public License (GPL)</a>. LITMUS<sup>RT</sup> is released as a patch against Linux. Additionally, | ||
318 | it is also available as a <span class="src">git</span> repository (see <a href="#development">Development</a> below). | ||
319 | </p> | ||
320 | <p> | ||
321 | The current release of LITMUS<sup>RT</sup> is 2010.2. | ||
322 | It consists of our Linux kernel modifications in the form of | ||
323 | a patch against Linux 2.6.34 and | ||
324 | |||
325 | <span class="src">liblitmus</span>, the user-space API for real-time | ||
326 | tasks, as well as <span class="src">ft_tools</span>, a collection of tools | ||
327 | used for tracing with <a href="http://www.cs.unc.edu/~bbb/feathertrace/">Feather-Trace</a> (which is part of the LITMUS<sup>RT</sup> patch). | ||
328 | </p> | ||
329 | |||
330 | |||
331 | <h3 class="relname">LITMUS<sup>RT</sup> 2010.2</h3> | ||
332 | <div class="release"> | ||
333 | <p> | ||
334 | Based on Linux 2.6.34. Released in October 2010. | ||
335 | |||
336 | </p> | ||
337 | <h4>Files:</h4> | ||
338 | <ul class="notopmargin"> | ||
339 | <li> | ||
340 | <a href="download/2010.2/litmus-rt-2010.2.patch">litmus-rt-2010.2.patch</a> | ||
341 | </li> | ||
342 | <li> | ||
343 | <a href="download/2010.2/liblitmus-2010.2.tgz">liblitmus-2010.2.tgz</a> | ||
344 | </li> | ||
345 | <li> | ||
346 | <a href="download/2010.2/ft_tools-2010.2.tgz">ft_tools-2010.2.tgz</a> | ||
347 | </li> | ||
348 | <li><a href="download/2010.2/SHA256SUMS">SHA256 check sums</a> | ||
349 | </li> | ||
350 | </ul> | ||
351 | <h4>Major changes since LITMUS<sup>RT</sup> 2010.1:</h4> | ||
352 | <ul class="notopmargin"> | ||
353 | <li> | ||
354 | Rebased LITMUS<sup>RT</sup> from Linux 2.6.32 to Linux 2.6.34. | ||
355 | </li> | ||
356 | <li> | ||
357 | Added support for configurable budget enforcement (no enforcement, coarse-grained enforcement on timer ticks, and precise enforcement using high-resolution timers). | ||
358 | </li> | ||
359 | <li>Add support for one single cluster (all cpus) under C-EDF</li> | ||
360 | <li>Made some features optional (C-EDF, PFair, release-master mode).</li> | ||
361 | <li>Fixed several link and compile errors.</li> | ||
362 | </ul> | ||
363 | </div> | ||
364 | |||
365 | |||
366 | <h3 class="relname">LITMUS<sup>RT</sup> 2010.1</h3> | ||
367 | <div class="release"> | ||
368 | <p> | ||
369 | Based on Linux 2.6.32. Released in May 2010. | ||
370 | |||
371 | </p> | ||
372 | <h4>Files:</h4> | ||
373 | <ul class="notopmargin"> | ||
374 | <li> | ||
375 | <a href="download/2010.1/litmus-rt-2010.1.patch">litmus-rt-2010.1.patch</a> | ||
376 | </li> | ||
377 | <li> | ||
378 | <a href="download/2010.1/liblitmus-2010.1.tgz">liblitmus-2010.1.tgz</a> | ||
379 | </li> | ||
380 | <li> | ||
381 | <a href="download/2010.1/ft_tools-2010.1.tgz">ft_tools-2010.1.tgz</a> | ||
382 | </li> | ||
383 | <li><a href="download/2010.1/SHA256SUMS">SHA256 check sums</a> | ||
384 | </li> | ||
385 | </ul> | ||
386 | <h4>Major changes (since LITMUS<sup>RT</sup> 2008.3):</h4> | ||
387 | <ul class="notopmargin"> | ||
388 | <li> | ||
389 | Rebased LITMUS<sup>RT</sup> from Linux 2.6.24 to Linux 2.6.32. | ||
390 | </li> | ||
391 | <li> | ||
392 | Added support for Intel x86-64 systems. | ||
393 | </li> | ||
394 | |||
395 | <li> | ||
396 | Dropped sparc64 support. | ||
397 | </li> | ||
398 | |||
399 | <li> | ||
400 | Ported Feather-Trace to x86-64. | ||
401 | </li> | ||
402 | |||
403 | <li> | ||
404 | Integrated recent changes in Linux's hrtimer infrastructure, which made the "norq" (no runqueue locks held) callbacks unnecessary. | ||
405 | </li> | ||
406 | |||
407 | <li> | ||
408 | Added the "LITMUS<sup>RT</sup> control device", a portable | ||
409 | mechanism for sharing a memory page between the kernel and user space tasks. | ||
410 | </li> | ||
411 | |||
412 | <li> | ||
413 | Re-implemented support for non-preemptive sections on top of the control page. | ||
414 | </li> | ||
415 | |||
416 | <li> | ||
417 | Improved C-EDF plugin. C-EDF now supports different cluster sizes (based on L2 and L3 cache sharing) and supports dynamic changes of cluster size (this requires reloading the plugin). | ||
418 | </li> | ||
419 | |||
420 | |||
421 | <li> | ||
422 | Reimplemented debug tracing on top of Linux's "misc device class" and kfifo buffers. | ||
423 | </li> | ||
424 | |||
425 | <li> | ||
426 | Improved build system of liblitmus. Users no longer have to edit the SConstruct file manually. Instead, they should provide a .config file (see <a href="#install">installation instructions</a>). | ||
427 | </li> | ||
428 | |||
429 | <li> Added some synchronization to plugin switching to avoid sporadic crashes.</li> | ||
430 | <li>Misc. bugfixes.</li> | ||
431 | |||
432 | </ul> | ||
433 | </div> | ||
434 | <p> | ||
435 | Please note that the current implementation is a <em>prototype</em> with | ||
436 | certain limitations. Most notably, it is not secure in a multiuser context, | ||
437 | <em>i.e.</em>, real-time system calls do not require superuser | ||
438 | privileges. | ||
439 | </p> | ||
440 | |||
441 | <p class="nobottommargin"> | ||
442 | |||
443 | Older releases: <a href="litmus2008.html">LITMUS<sup>RT</sup> 2008 series</a>, <a href="litmus2007.html">LITMUS<sup>RT</sup> 2007 series</a>. | ||
444 | </p> | ||
445 | |||
446 | </div> | ||
447 | |||
448 | |||
449 | |||
450 | <h2 id="install">Installation</h2> | ||
451 | <div class="box"> | ||
452 | <p class="notopmargin"> | ||
453 | The current release of LITMUS<sup>RT</sup> consists of an | ||
454 | extension of the Linux kernel that adds support for the sporadic task | ||
455 | model, a scheduler plugin infrastructure, and some scheduler plugins, as | ||
456 | well as a user-space library that provides the LITMUS<sup>RT</sup> | ||
457 | real-time API. Note that the current implementation only works on the | ||
458 | Intel x86-32 and x86-64 architectures. | ||
459 | </p> | ||
460 | <h3>Patching the Kernel</h3> | ||
461 | <p class="notopmargin"> | ||
462 | The extension to the Linux kernel is released as a patch against Linux | ||
463 | 2.6.34. To install the LITMUS<sup>RT</sup> kernel, first <a | ||
464 | href="http://www.kernel.org/pub/linux/kernel/v2.6/linux-2.6.34.tar.bz2">download the Linux | ||
465 | kernel 2.6.34</a> and untar it in a directory of your choice (hereafter | ||
466 | referred to as <span class="src">$DIR</span>). Second, apply the | ||
467 | LITMUS<sup>RT</sup> patch (see <a href="#download">Section Download</a>) | ||
468 | and configure, compile, and install the kernel as usual. The patch is <span | ||
469 | class="src">-p1</span> applicable. | ||
470 | To summarize, the LITMUS<sup>RT</sup> kernel can be obtained, patched, and | ||
471 | compiled with the following commands: | ||
472 | </p> | ||
473 | <pre class="shell">cd $DIR | ||
474 | # get Linux 2.6.34 | ||
475 | wget http://www.kernel.org/pub/linux/kernel/v2.6/linux-2.6.34.tar.bz2 | ||
476 | tar xjf linux-2.6.34.tar.bz2 | ||
477 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2010.2/litmus-rt-2010.2.patch | ||
478 | mv linux-2.6.34 litmus2010 | ||
479 | # apply the LITMUS RT patch | ||
480 | cd litmus2010 | ||
481 | patch -p1 < ../litmus-rt-2010.2.patch | ||
482 | # create a working kernel configuration | ||
483 | # - select HZ=1000 | ||
484 | # - enable in-kernel preemptions | ||
485 | # - disable NO_HZ | ||
486 | # - don't use power management options like frequency scaling | ||
487 | # - disable support for group scheduling | ||
488 | # - disable "Write protect kernel read-only data structures" (in kernel debug) | ||
489 | make menuconfig | ||
490 | # compile the kernel | ||
491 | make bzImage | ||
492 | make modules | ||
493 | # proceed to install kernel, build initrd, etc. | ||
494 | ... | ||
495 | </pre> | ||
496 | <p> | ||
497 | When configuring the kernel, note that there is a menu (at the very end of the list) | ||
498 | with LITMUS<sup>RT</sup>-specific configuration options. For reference, we provide sample <a href="download/2010.2/32bit-config">32-bit </a> and <a href="download/2010.2/64bit-config">64-bit configurations</a> that are known to work under KVM. | ||
499 | </p> | ||
500 | |||
501 | <h3>Libraries</h3> | ||
502 | <p class="notopmargin"> | ||
503 | The user-space library for real-time tasks, <span class="src">liblitmus</span>, | ||
504 | depends on the LITMUS<sup>RT</sup> kernel kernel and provides its own build system (based on <a href="http://www.scons.org/">scons</a>). | ||
505 | In order to compile <span class="src">liblitmus</span>, you need to adjust the | ||
506 | variable <span class="src">LITMUS_KERNEL</span> in the <span class="src">.config</span> file to point to your | ||
507 | copy of the kernel. The variables required for building <span class="src">liblitmus </span> can be listed using <span class="src"> scons -h </span> command. For reference, we provide a sample <a href="download/2010.2/liblitmus-2010-config"> config </a> file. | ||
508 | Sample output of <span class="src"> scons -h </span> is as shown below. | ||
509 | </p> | ||
510 | <pre class="shell"> | ||
511 | scons -h | ||
512 | scons: Reading SConscript files ... | ||
513 | scons: done reading SConscript files. | ||
514 | |||
515 | ============================================= | ||
516 | liblitmus --- The LITMUS^RT Userspace Library | ||
517 | |||
518 | There are a number of user-configurable build | ||
519 | variables. These can either be set on the | ||
520 | command line (e.g., scons ARCH=x86) or read | ||
521 | from a local configuration file (.config). | ||
522 | |||
523 | Run 'scons --dump-config' to see the final | ||
524 | build configuration. | ||
525 | |||
526 | Build Variables | ||
527 | --------------- | ||
528 | |||
529 | LITMUS_KERNEL: Where to find the LITMUS^RT kernel. ( /path/to/LITMUS_KERNEL ) | ||
530 | default: ../litmus2010 | ||
531 | actual: ../litmus2010 | ||
532 | |||
533 | ARCH: Target architecture. (x86_64|sparc64|x86|i686) | ||
534 | default: x86_64 | ||
535 | actual: x86_64 | ||
536 | |||
537 | Use scons -H for help about command-line options. | ||
538 | |||
539 | </pre> | ||
540 | <p class="notopmargin"> | ||
541 | To summarize, the <span class="src"> liblitmus </span> can be obtained and | ||
542 | compiled with the following commands: | ||
543 | </p> | ||
544 | <pre class="shell"> | ||
545 | cd $DIR | ||
546 | wget http://www.cs.unc.edu/~anderson/litmus-rt/download/2010.1/liblitmus-2010.2.tgz | ||
547 | tar xzf liblitmus-2010.2.tgz | ||
548 | cd liblitmus | ||
549 | # change LITMUS_KERNEL in .config to point to the kernel source | ||
550 | scons | ||
551 | </pre> | ||
552 | <p class="nobottommargin"> | ||
553 | Please refer to the <a href="#doc">documentation</a> on how to use the LITMUS<sup>RT</sup> | ||
554 | real-time API as provided by <span class="src">liblitmus</span>. | ||
555 | </p> | ||
556 | |||
557 | </div> | ||
558 | |||
559 | |||
560 | <h2 id="doc">Documentation</h2> | ||
561 | <div class="box"> | ||
562 | |||
563 | <p class="notopmargin"> | ||
564 | Unfortunately, most of the documentation has yet to be written. To get an overview of | ||
565 | the architecture of the kernel extension, we recommend reading the paper | ||
566 | <a href="http://www.cs.unc.edu/~anderson/papers/rtlws07.pdf">“LITMUS<sup>RT</sup>: | ||
567 | A Status Report”</a>. | ||
568 | </p> | ||
569 | <h3>Real-Time Scheduling Policies</h3> | ||
570 | <p class="qa"> | ||
571 | The kernel contains the following real-time scheduling policy implementations: | ||
572 | </p> | ||
573 | <ul> | ||
574 | <li> | ||
575 | PFAIR, an implementation of the PD<sup>2</sup> algorithm, | ||
576 | </li> | ||
577 | <li> | ||
578 | PSN-EDF, a partitioned EDF (P-EDF) implementation with support for the real-time synchronization protocol | ||
579 | FMLP, | ||
580 | </li> | ||
581 | <li> | ||
582 | GSN-EDF, a global EDF (G-EDF) implementation with support for the real-time synchronization protocol | ||
583 | FMLP, | ||
584 | </li> | ||
585 | <li> | ||
586 | C-EDF (Clustered EDF), a hybrid of G-EDF and P-EDF, and | ||
587 | </li> | ||
588 | <li> | ||
589 | Linux, a placeholder policy that disables all real-time functionality added by the LITMUS<sup>RT</sup> patch. | ||
590 | </li> | ||
591 | </ul> | ||
592 | <p> | ||
593 | Only one policy can be active at any time. Initially (<em>i.e.,</em> during and after boot), the "Linux" policy is active. | ||
594 | You can use the tool <span class="src">showsched</span> (part of <span class="src">liblitmus</span>) to display | ||
595 | the name of the currently active policy. | ||
596 | </p> | ||
597 | <h3>Changing the Active Policy</h3> | ||
598 | <p class="qa"> | ||
599 | You can use the tool <span class="src">setsched</span> (part of <span class="src">liblitmus</span>) | ||
600 | to select a new plugin at run time. | ||
601 | </p> | ||
602 | <div class="screenshot"> | ||
603 | <img src="gfx/setsched.png" alt="Screen shot of setsched"/> | ||
604 | </div> | ||
605 | <p> | ||
606 | Only root can change the active policy, and only when there are no real-time tasks present. | ||
607 | </p> | ||
608 | <p> | ||
609 | If you do not have the <span class="src">dialog</span> utility installed, then you can still used <span class="src">setsched</span> by passing the desired scheduling policy as a commandline parameter, <em>e.g.</em> type <span class="src"> setsched PFAIR </span> to activate the PFAIR plugin. | ||
610 | </p> | ||
611 | |||
612 | <h3>Selecting the C-EDF Cluster Size</h3> | ||
613 | <p class="qa"> | ||
614 | The C-EDF plugin can create clusters based on the sharing of L2 or L3 caches. When the plugin is activated (see above), it configures clusters based on the value last written to <span class="src"> /proc/litmus/cluster_cache</span> (either "L2" or "L3"). Note that the C-EDF must be reloaded (for example by switching to the Linux plugin and back to C-EDF) to enact a change to the desired cluster size; changing the cluster size while C-EDF is active is not supported. | ||
615 | </p> | ||
616 | |||
617 | <h3>Writing Real-Time Tasks</h3> | ||
618 | <p class="qa"> | ||
619 | The user space library that provides the LITMUS<sup>RT</sup> API, | ||
620 | <span class="src">liblitmus</span>, contains two example real-time tasks | ||
621 | (<span class="src">base_task.c</span> and | ||
622 | <span class="src">base_mt_task.c</span>) | ||
623 | that both illustrate how to use the API and provide a skeleton for real-time | ||
624 | task development. To get started with development, please take a look at these example | ||
625 | programs. | ||
626 | </p> | ||
627 | <h3>Tracing Overheads and Scheduling Decisions</h3> | ||
628 | <p class="qa">LITMUS<sup>RT</sup> provides numerous tracing facilities that are discussed in-depth in the tutorial <a href="doc/tracing.html">Tracing with LITMUS<sup>RT</sup></a>. | ||
629 | </p> | ||
630 | <p class="nobottommargin"> | ||
631 | Please contact the <a href="#collaborators">current maintainer</a> if you have any | ||
632 | questions. | ||
633 | </p> | ||
634 | |||
635 | |||
636 | </div> | ||
637 | |||
638 | <h2 id="development">Development</h2> | ||
639 | <div class="box"> | ||
640 | <p class="nomargin"> | ||
641 | Patches and suggestions are very welcome! | ||
642 | Both the LITMUS<sup>RT</sup> kernel and liblitmus are available | ||
643 | as a public <a href="http://git-scm.com/">git</a> repository at | ||
644 | <a href="public-repository/index.html">http://www.cs.unc.edu/~anderson/litmus-rt/public-repository</a>. | ||
645 | </p> | ||
646 | </div> | ||
647 | |||
648 | |||
649 | |||
650 | <h2 id="credits">Credits</h2> | ||
651 | <div class="box"> | ||
652 | <div style="float: right;"> | ||
653 | <a href="http://validator.w3.org/check?uri=referer"><img | ||
654 | src="http://www.w3.org/Icons/valid-xhtml10" | ||
655 | alt="Valid XHTML 1.0 Strict" height="31" width="88"/></a> | ||
656 | </div> | ||
657 | |||
658 | <p class="nomargin"> | ||
659 | Linux is a registered trademark of Linus Torvalds. <br /> The | ||
660 | LITMUS<sup>RT</sup> logo was designed by Jasper McChesney of <a href="http://www.breakforsense.net/">Break for Sense Design</a>. <br /> | ||
661 | Web design by Björn Brandenburg. | ||
662 | </p> | ||
663 | |||
664 | |||
665 | </div> | ||
666 | |||
667 | <script src="http://www.google-analytics.com/urchin.js" type="text/javascript"> | ||
668 | </script> | ||
669 | <script type="text/javascript"> | ||
670 | _uacct = "UA-3184628-1"; | ||
671 | urchinTracker(); | ||
672 | </script> | ||
673 | </body> | ||
674 | </html> | ||