aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/Kconfig
blob: babb43deffb5d8fced8d71567f0f5881b47c8aa0 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
menu "LITMUS^RT"

menu "Scheduling"

config PLUGIN_CEDF
        bool "Clustered-EDF"
	depends on X86 && SYSFS
        default y
        help
          Include the Clustered EDF (C-EDF) plugin in the kernel.
          This is appropriate for large platforms with shared caches.
          On smaller platforms (e.g., ARM PB11MPCore), using C-EDF
          makes little sense since there aren't any shared caches.

config PLUGIN_PFAIR
	bool "PFAIR"
	default y
	help
	  Include the PFAIR plugin (i.e., the PD^2 scheduler) in the kernel.
	  The PFAIR plugin requires high resolution timers (for staggered
	  quanta) and also requires HZ_PERIODIC (i.e., periodic timer ticks
	  even if a processor is idle, as quanta could be missed otherwise).
	  Further, the PFAIR plugin uses the system tick and thus requires
	  HZ=1000 to achive reasonable granularity.

	  If unsure, say Yes.

config RELEASE_MASTER
        bool "Release-master Support"
	depends on ARCH_HAS_SEND_PULL_TIMERS && SMP
	default n
	help
           Allow one processor to act as a dedicated interrupt processor
           that services all timer interrupts, but that does not schedule
           real-time tasks. See RTSS'09 paper for details
	   (http://www.cs.unc.edu/~anderson/papers.html).

config PREFER_LOCAL_LINKING
       bool "Link newly arrived tasks locally if possible"
       depends on SMP
       default y
       help
          In linking-based schedulers such as GSN-EDF, if an idle CPU processes
	  a job arrival (i.e., when a job resumed or was released), it can
	  either link the task to itself and schedule it immediately (to avoid
	  unnecessary scheduling latency) or it can try to link it to the CPU
	  where it executed previously (to maximize cache affinity, at the
	  expense of increased latency due to the need to send an IPI).

	  In lightly loaded systems, this option can significantly reduce
	  scheduling latencies. In heavily loaded systems (where CPUs are
	  rarely idle), it will likely make hardly a difference.

	  If unsure, say yes.

config LITMUS_QUANTUM_LENGTH_US
    int "quantum length (in us)"
    default 1000
    range 500 10000
    help 
      Determine the desired quantum length, in microseconds, which 
      is used to determine the granularity of scheduling in
      quantum-driven plugins (primarily PFAIR). This parameter does not
      affect event-driven plugins (such as the EDF-based plugins and P-FP).
      Default: 1000us = 1ms.

config BUG_ON_MIGRATION_DEADLOCK
       bool "Panic on suspected migration deadlock"
       default y
       help
          This is a debugging option. The LITMUS^RT migration support code for
	  global scheduling contains a simple heuristic to detect when the
	  system deadlocks due to circular stack dependencies.

	  For example, such a deadlock exists if CPU 0 waits for task A's stack
	  to become available while using task B's stack, and CPU 1 waits for
	  task B's stack to become available while using task A's stack. Such
	  a situation can arise in (buggy) global scheduling plugins.

	  With this option enabled, such a scenario with result in a BUG().
	  You can turn off this option when debugging on real hardware (e.g.,
	  to rescue traces, etc. that would be hard to get after a panic).

	  Only turn this off if you really know what you are doing. If this
	  BUG() triggers, the scheduler is broken and turning off this option
	  won't fix it.


endmenu

menu "Real-Time Synchronization"

config NP_SECTION
        bool "Non-preemptive section support"
	default y
	help
	  Allow tasks to become non-preemptable.
          Note that plugins still need to explicitly support non-preemptivity.
          Currently, only the GSN-EDF, PSN-EDF, and P-FP plugins have such support.

	  This is required to support locking protocols such as the FMLP.
	  If disabled, all tasks will be considered preemptable at all times.

config LITMUS_LOCKING
        bool "Support for real-time locking protocols"
	depends on NP_SECTION
	default y
	help
	  Enable LITMUS^RT's multiprocessor real-time locking protocols with
	  predicable maximum blocking times.

	  Say Yes if you want to include locking protocols such as the FMLP and
	  Baker's SRP.

endmenu

menu "Performance Enhancements"

config SCHED_CPU_AFFINITY
	bool "Local Migration Affinity"
	depends on X86 && SYSFS
	default y
	help
	  Rescheduled tasks prefer CPUs near to their previously used CPU.
	  This may improve cache performance through possible preservation of
	  cache affinity, at the expense of (slightly) more involved scheduling
	  logic.

	  Warning: May make bugs harder to find since tasks may migrate less often.

	  NOTES:
		* Feature is not utilized by PFair/PD^2.

	  Say Yes if unsure.

config ALLOW_EARLY_RELEASE
	bool "Allow Early Releasing"
	default y
	help
	  Allow tasks to release jobs early (while still maintaining job
	  precedence constraints). Only supported by EDF schedulers. Early
	  releasing must be explicitly requested by real-time tasks via
	  the task_params passed to sys_set_task_rt_param().

	  Early releasing can improve job response times while maintaining
	  real-time correctness. However, it can easily peg your CPUs
	  since tasks never suspend to wait for their next job. As such, early
	  releasing is really only useful in the context of implementing
	  bandwidth servers, interrupt handling threads, or short-lived
	  computations.

	  Beware that early releasing may affect real-time analysis
	  if using locking protocols or I/O.

	  Say Yes if unsure.

choice
	prompt "EDF Tie-Break Behavior"
	default EDF_TIE_BREAK_LATENESS_NORM
	help
	  Allows the configuration of tie-breaking behavior when the deadlines
	  of two EDF-scheduled tasks are equal.

	config EDF_TIE_BREAK_LATENESS
	bool "Lateness-based Tie Break"
	help
	  Break ties between two jobs, A and B, based upon the lateness of their
	  prior jobs. The job with the greatest lateness has priority. Note that
	  lateness has a negative value if the prior job finished before its
	  deadline.

	config EDF_TIE_BREAK_LATENESS_NORM
	bool "Normalized Lateness-based Tie Break"
	help
	  Break ties between two jobs, A and B, based upon the lateness, normalized
	  by relative deadline, of their prior jobs. The job with the greatest
	  normalized lateness has priority. Note that lateness has a negative value
	  if the prior job finished before its deadline.

	  Normalized lateness tie-breaks are likely desireable over non-normalized
	  tie-breaks if the execution times and/or relative deadlines of tasks in a
	  task set vary greatly.

	config EDF_TIE_BREAK_HASH
	bool "Hash-based Tie Breaks"
	help
	  Break ties between two jobs, A and B, with equal deadlines by using a
	  uniform hash; i.e.: hash(A.pid, A.job_num) < hash(B.pid, B.job_num). Job
	  A has ~50% of winning a given tie-break.

	config EDF_PID_TIE_BREAK
	bool "PID-based Tie Breaks"
	help
	  Break ties based upon OS-assigned thread IDs. Use this option if
	  required by algorithm's real-time analysis or per-task response-time
	  jitter must be minimized.

	  NOTES:
	    * This tie-breaking method was default in Litmus 2012.2 and before.

endchoice

endmenu

menu "Tracing"

config FEATHER_TRACE
	bool "Feather-Trace Infrastructure"
	depends on !RELOCATABLE
	default y
	help
	  Feather-Trace basic tracing infrastructure. Includes device file
	  driver and instrumentation point support.

	  There are actually two implementations of Feather-Trace.
	  1) A slower, but portable, default implementation.
	  2) Architecture-specific implementations that rewrite kernel .text at runtime.

	  If enabled, Feather-Trace will be based on 2) if available (currently only for x86).
	  However, if DEBUG_RODATA=y, then Feather-Trace will choose option 1) in any case
	  to avoid problems with write-protected .text pages.

	  Bottom line: to avoid increased overheads, choose DEBUG_RODATA=n.

	  Note that this option only enables the basic Feather-Trace infrastructure;
	  you still need to enable SCHED_TASK_TRACE and/or SCHED_OVERHEAD_TRACE to
	  actually enable any events.

config SCHED_TASK_TRACE
	bool "Trace real-time tasks"
	depends on FEATHER_TRACE
	default y
	help
	  Include support for the sched_trace_XXX() tracing functions. This
          allows the collection of real-time task events such as job
	  completions, job releases, early completions, etc. This results in  a
	  small overhead in the scheduling code. Disable if the overhead is not
	  acceptable (e.g., benchmarking).

	  Say Yes for debugging.
	  Say No for overhead tracing.

config SCHED_TASK_TRACE_SHIFT
       int "Buffer size for sched_trace_xxx() events"
       depends on SCHED_TASK_TRACE
       range 8 13
       default 9
       help

         Select the buffer size of sched_trace_xxx() events as a power of two.
	 These buffers are statically allocated as per-CPU data. Each event
	 requires 24 bytes storage plus one additional flag byte. Too large
	 buffers can cause issues with the per-cpu allocator (and waste
	 memory). Too small buffers can cause scheduling events to be lost. The
	 "right" size is workload dependent and depends on the number of tasks,
	 each task's period, each task's number of suspensions, and how often
	 the buffer is flushed.

	 Examples: 12 =>   4k events
		   10 =>   1k events
		    8 =>  512 events

config SCHED_LITMUS_TRACEPOINT
	bool "Enable Event/Tracepoint Tracing for real-time task tracing"
	depends on TRACEPOINTS
	default n
	help
	  Enable kernel-style events (tracepoint) for Litmus. Litmus events
	  trace the same functions as the above sched_trace_XXX(), but can
	  be enabled independently.
	  Litmus tracepoints can be recorded and analyzed together (single
	  time reference) with all other kernel tracing events (e.g.,
	  sched:sched_switch, etc.).

	  This also enables a quick way to visualize schedule traces using
	  trace-cmd utility and kernelshark visualizer.

	  Say Yes for debugging and visualization purposes.
	  Say No for overhead tracing.

config SCHED_OVERHEAD_TRACE
	bool "Record timestamps for overhead measurements"
	depends on FEATHER_TRACE
	default y
	help
	  Export event stream for overhead tracing.
	  Say Yes for overhead tracing.

config SCHED_OVERHEAD_TRACE_SHIFT
       int "Buffer size for Feather-Trace overhead data"
       depends on SCHED_OVERHEAD_TRACE
       range 15 32
       default 22
       help

         Select the buffer size for the Feather-Trace overhead tracing
         infrastructure (/dev/litmus/ft_trace0 & ftcat) as a power of two.  The
         larger the buffer, the less likely the chance of buffer overflows if
         the ftcat process is starved by real-time activity. In machines with
         large memories, large buffer sizes are recommended.

	 Examples: 16 =>   2 MB
		   24 => 512 MB
		   26 =>  2G MB

config SCHED_DEBUG_TRACE
	bool "TRACE() debugging"
	default n
	help
	  Include support for sched_trace_log_messageg(), which is used to
	  implement TRACE(). If disabled, no TRACE() messages will be included
	  in the kernel, and no overheads due to debugging statements will be
	  incurred by the scheduler. Disable if the overhead is not acceptable
	  (e.g. benchmarking).

	  Say Yes for debugging.
	  Say No for overhead tracing.

config SCHED_DEBUG_TRACE_SHIFT
       int "Buffer size for TRACE() buffer"
       depends on SCHED_DEBUG_TRACE
       range 14 22
       default 18
       help

	Select the amount of memory needed per for the TRACE() buffer, as a
	power of two. The TRACE() buffer is global and statically allocated. If
	the buffer is too small, there will be holes in the TRACE() log if the
	buffer-flushing task is starved.

	The default should be sufficient for most systems. Increase the buffer
	size if the log contains holes. Reduce the buffer size when running on
	a memory-constrained system.

	Examples: 14 =>  16KB
		  18 => 256KB
		  20 =>   1MB

        This buffer is exported to usespace using a misc device as
        'litmus/log'. On a system with default udev rules, a corresponding
        character device node should be created at /dev/litmus/log. The buffer
        can be flushed using cat, e.g., 'cat /dev/litmus/log > my_log_file.txt'.

config SCHED_DEBUG_TRACE_CALLER
       bool "Include [function@file:line] tag in TRACE() log"
       depends on SCHED_DEBUG_TRACE
       default n
       help
         With this option enabled, TRACE() prepends

	      "[<function name>@<filename>:<line number>]"

	 to each message in the debug log. Enable this to aid in figuring out
         what was called in which order. The downside is that it adds a lot of
         clutter.

	 If unsure, say No.

config PREEMPT_STATE_TRACE
       bool "Trace preemption state machine transitions"
       depends on SCHED_DEBUG_TRACE && DEBUG_KERNEL
       default n
       help
         With this option enabled, each CPU will log when it transitions
	 states in the preemption state machine. This state machine is
	 used to determine how to react to IPIs (avoid races with in-flight IPIs).

	 Warning: this creates a lot of information in the debug trace. Only
	 recommended when you are debugging preemption-related races.

	 If unsure, say No.

endmenu

endmenu