aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/Kconfig
blob: a1a6cc69934840f1831f959670c5269371f43ea6 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
menu "LITMUS^RT"

menu "Scheduling"

config PLUGIN_CEDF
        bool "Clustered-EDF"
	depends on X86 && SYSFS
        default y
        help
          Include the Clustered EDF (C-EDF) plugin in the kernel.
          This is appropriate for large platforms with shared caches.
          On smaller platforms (e.g., ARM PB11MPCore), using C-EDF
          makes little sense since there aren't any shared caches.

config RECURSIVE_READYQ_LOCK
	bool "Recursive Ready Queue Lock"
	default n
	help
	  Protects ready queues with a raw recursive spinlock instead
	  of a normal raw spinlock.

	  If unsure, say No.

config PLUGIN_PFAIR
	bool "PFAIR"
	depends on HIGH_RES_TIMERS && !NO_HZ
	default y
	help
	  Include the PFAIR plugin (i.e., the PD^2 scheduler) in the kernel.
	  The PFAIR plugin requires high resolution timers (for staggered quanta)
	  and does not support NO_HZ (quanta could be missed when the system is idle).

	  If unsure, say Yes.

config RELEASE_MASTER
        bool "Release-master Support"
	depends on ARCH_HAS_SEND_PULL_TIMERS
	default n
	help
           Allow one processor to act as a dedicated interrupt processor
           that services all timer interrupts, but that does not schedule
           real-time tasks. See RTSS'09 paper for details
	   (http://www.cs.unc.edu/~anderson/papers.html).
           Currently only supported by GSN-EDF.

config REALTIME_AUX_TASKS
	bool "Real-Time Auxillary Tasks"
	depends on LITMUS_LOCKING
	default n
	help
		Adds a system call that forces all non-real-time threads in a process
		to become auxillary real-time tasks. These tasks inherit the priority of
		the highest-prio *BLOCKED* (but NOT blocked on a Litmus lock) real-time
		task (non-auxillary) in the process. This allows the integration of COTS
		code that has background helper threads used primarily for message passing
		and synchronization. If these background threads are NOT real-time scheduled,
		then unbounded priority inversions may occur if a real-time task blocks on
		a non-real-time thread.

		Beware of the following pitfalls:
		  1) Auxillary threads should not be CPU intensive. They should mostly
		     block on mutexes and condition variables. Violating this will
			 likely prevent meaningful analysis.
		  2) Since there may be more than one auxillary thread per process,
		     priority inversions may occur with respect to single-threaded
			 task models if/when one of threads are scheduled simultanously
			 with another of the same identity.

choice
	prompt "Scheduling prioritization of AUX tasks."
	depends on REALTIME_AUX_TASKS
	default REALTIME_AUX_TASK_PRIORITY_INHERITANCE
	help
		Select the prioritization method for auxillary tasks.

config REALTIME_AUX_TASK_PRIORITY_BOOSTED
	bool "Boosted"
	help
		Run all auxillary task threads at a maximum priority. Useful for
		temporarily working around bugs during development.

		BEWARE: Run-away auxillary tasks will clobber CPUs.

config REALTIME_AUX_TASK_PRIORITY_INHERITANCE
	bool "Inheritance"
	help
		Auxillary tasks inherit the maximum priority from blocked real-time
		threads within the same process.

		Additional pitfall:
		  3) Busy-wait deadlock is likely between normal real-time tasks and
		     auxillary tasks synchronize using _preemptive_ spinlocks that do
			 not use priority inheritance.

		These pitfalls are mitgated by the fact that auxillary tasks only
		inherit priorities from blocked tasks (Blocking signifies that the
		blocked task _may_ be waiting on an auxillary task to perform some
		work.). Futher, auxillary tasks without an inherited priority are
		_always_ scheduled with a priority less than any normal real-time task!!

		NOTE: Aux tasks do not _directly_ inherit a priority from rt tasks that
		are blocked on Litmus locks. Aux task should be COTS code that know nothing
		of Litmus, so they won't hold Litmus locks. Nothing the aux task can do can
		_directly_ unblock the rt task blocked on a Litmus lock. However, the lock
		holder that blocks the rt task CAN block on I/O and contribute its priority
		to the aux tasks. Aux tasks may still _indirectly_ inherit the priority of
		the blocked rt task via the lock holder.
endchoice

endmenu


menu "Real-Time Synchronization"

config NP_SECTION
        bool "Non-preemptive section support"
	default n
	help
	  Allow tasks to become non-preemptable.
          Note that plugins still need to explicitly support non-preemptivity.
          Currently, only GSN-EDF and PSN-EDF have such support.

	  This is required to support locking protocols such as the FMLP.
	  If disabled, all tasks will be considered preemptable at all times.

config LITMUS_LOCKING
        bool "Support for real-time locking protocols"
	depends on NP_SECTION
	default n
	help
	  Enable LITMUS^RT's deterministic multiprocessor real-time
	  locking protocols.

	  Say Yes if you want to include locking protocols such as the FMLP and
	  Baker's SRP.

config LITMUS_AFFINITY_LOCKING
	bool "Enable affinity infrastructure in k-exclusion locking protocols."
	depends on LITMUS_LOCKING
	default n
	help
	  Enable affinity tracking infrastructure in k-exclusion locking protocols.
	  This only enabled the *infrastructure* not actual affinity algorithms.

	  If unsure, say No.

config LITMUS_NESTED_LOCKING
		bool "Support for nested inheritance in locking protocols"
	depends on LITMUS_LOCKING
	default n
	help
	  Enable nested priority inheritance.

config LITMUS_DGL_SUPPORT
	bool "Support for dynamic group locks"
	depends on LITMUS_NESTED_LOCKING
	default n
	help
	  Enable dynamic group lock support.

config LITMUS_MAX_DGL_SIZE
	int "Maximum size of a dynamic group lock."
	depends on LITMUS_DGL_SUPPORT
	range 1 128
	default "10"
	help
		Dynamic group lock data structures are allocated on the process
		stack when a group is requested. We set a maximum size of
		locks in a dynamic group lock to avoid dynamic allocation.

		TODO: Batch DGL requests exceeding LITMUS_MAX_DGL_SIZE.

endmenu

menu "Performance Enhancements"

config SCHED_CPU_AFFINITY
	bool "Local Migration Affinity"
	depends on X86
	default y
	help
	  Rescheduled tasks prefer CPUs near to their previously used CPU.  This
	  may improve performance through possible preservation of cache affinity.

	  Warning: May make bugs harder to find since tasks may migrate less often.

	  NOTES:
	  	* Feature is not utilized by PFair/PD^2.

	  Say Yes if unsure.

config ALLOW_EARLY_RELEASE
	bool "Allow Early Releasing"
	default y
	help
	  Allow tasks to release jobs early (while still maintaining job
	  precedence constraints). Only supported by EDF schedulers. Early
	  releasing must be explicitly requested by real-time tasks via
	  the task_params passed to sys_set_task_rt_param().

	  Early releasing can improve job response times while maintaining
	  real-time correctness. However, it can easily peg your CPUs
	  since tasks never suspend to wait for their next job. As such, early
	  releasing is really only useful in the context of implementing
	  bandwidth servers, interrupt handling threads, or short-lived
	  computations.

	  Beware that early releasing may affect real-time analysis
	  if using locking protocols or I/O.

	  Say Yes if unsure.

choice
	prompt "EDF Tie-Break Behavior"
	default EDF_TIE_BREAK_LATENESS_NORM
	help
	  Allows the configuration of tie-breaking behavior when the deadlines
	  of two EDF-scheduled tasks are equal.
	
	config EDF_TIE_BREAK_LATENESS
	bool "Lateness-based Tie Break"
	help
	  Break ties between two jobs, A and B, based upon the lateness of their
	  prior jobs. The job with the greatest lateness has priority. Note that
	  lateness has a negative value if the prior job finished before its
	  deadline.
	
	config EDF_TIE_BREAK_LATENESS_NORM
	bool "Normalized Lateness-based Tie Break"
	help
	  Break ties between two jobs, A and B, based upon the lateness, normalized
	  by relative deadline, of their prior jobs. The job with the greatest
	  normalized lateness has priority. Note that lateness has a negative value
	  if the prior job finished before its deadline.
	  
	  Normalized lateness tie-breaks are likely desireable over non-normalized
	  tie-breaks if the execution times and/or relative deadlines of tasks in a
	  task set vary greatly.
	
	config EDF_TIE_BREAK_HASH
	bool "Hash-based Tie Breaks"
	help
	  Break ties between two jobs, A and B, with equal deadlines by using a
	  uniform hash; i.e.: hash(A.pid, A.job_num) < hash(B.pid, B.job_num). Job
	  A has ~50% of winning a given tie-break.

	  NOTES:
	    * This method doesn't work very well if a tied job has a low-valued
		  hash while the jobs it ties with do not make progress (that is,
		  they don't increment to new job numbers). The job with a low-valued
		  hash job will lose most tie-breaks. This is usually not a problem
		  unless you are doing something funky in Litmus (ex. worker threads
		  that do not increment job numbers).
	
	config EDF_PID_TIE_BREAK
	bool "PID-based Tie Breaks"
	help
	  Break ties based upon OS-assigned thread IDs. Use this option if
	  required by algorithm's real-time analysis or per-task response-time
	  jitter must be minimized.
	
	  NOTES:
	    * This tie-breaking method was default in Litmus 2012.2 and before.
		
endchoice

endmenu

menu "Tracing"

config FEATHER_TRACE
	bool "Feather-Trace Infrastructure"
	default y
	help
	  Feather-Trace basic tracing infrastructure. Includes device file
	  driver and instrumentation point support.

	  There are actually two implementations of Feather-Trace.
	  1) A slower, but portable, default implementation.
	  2) Architecture-specific implementations that rewrite kernel .text at runtime.

	  If enabled, Feather-Trace will be based on 2) if available (currently only for x86).
	  However, if DEBUG_RODATA=y, then Feather-Trace will choose option 1) in any case
	  to avoid problems with write-protected .text pages.

	  Bottom line: to avoid increased overheads, choose DEBUG_RODATA=n.

	  Note that this option only enables the basic Feather-Trace infrastructure;
	  you still need to enable SCHED_TASK_TRACE and/or SCHED_OVERHEAD_TRACE to
	  actually enable any events.

config SCHED_TASK_TRACE
	bool "Trace real-time tasks"
	depends on FEATHER_TRACE
	default y
	help
	  Include support for the sched_trace_XXX() tracing functions. This
          allows the collection of real-time task events such as job
	  completions, job releases, early completions, etc. This results in  a
	  small overhead in the scheduling code. Disable if the overhead is not
	  acceptable (e.g., benchmarking).

	  Say Yes for debugging.
	  Say No for overhead tracing.

config SCHED_TASK_TRACE_SHIFT
       int "Buffer size for sched_trace_xxx() events"
       depends on SCHED_TASK_TRACE
       range 8 15
       default 9
       help

         Select the buffer size of sched_trace_xxx() events as a power of two.
	 These buffers are statically allocated as per-CPU data. Each event
	 requires 24 bytes storage plus one additional flag byte. Too large
	 buffers can cause issues with the per-cpu allocator (and waste
	 memory). Too small buffers can cause scheduling events to be lost. The
	 "right" size is workload dependent and depends on the number of tasks,
	 each task's period, each task's number of suspensions, and how often
	 the buffer is flushed.

	 Examples: 12 =>   4k events
		   10 =>   1k events
		    8 =>  512 events

config SCHED_LITMUS_TRACEPOINT
	bool "Enable Event/Tracepoint Tracing for real-time task tracing"
	depends on TRACEPOINTS
	default n
	help
	  Enable kernel-style events (tracepoint) for Litmus. Litmus events
	  trace the same functions as the above sched_trace_XXX(), but can
	  be enabled independently.
	  Litmus tracepoints can be recorded and analyzed together (single
	  time reference) with all other kernel tracing events (e.g.,
	  sched:sched_switch, etc.).

	  This also enables a quick way to visualize schedule traces using
	  trace-cmd utility and kernelshark visualizer.

	  Say Yes for debugging and visualization purposes.
	  Say No for overhead tracing.

config SCHED_OVERHEAD_TRACE
	bool "Record timestamps for overhead measurements"
	depends on FEATHER_TRACE
	default n
	help
	  Export event stream for overhead tracing.
	  Say Yes for overhead tracing.

config SCHED_DEBUG_TRACE
	bool "TRACE() debugging"
	default y
	help
	  Include support for sched_trace_log_messageg(), which is used to
	  implement TRACE(). If disabled, no TRACE() messages will be included
	  in the kernel, and no overheads due to debugging statements will be
	  incurred by the scheduler. Disable if the overhead is not acceptable
	  (e.g. benchmarking).

	  Say Yes for debugging.
	  Say No for overhead tracing.

config SCHED_DEBUG_TRACE_SHIFT
       int "Buffer size for TRACE() buffer"
       depends on SCHED_DEBUG_TRACE
       range 14 22
       default 18
       help

	Select the amount of memory needed per for the TRACE() buffer, as a
	power of two. The TRACE() buffer is global and statically allocated. If
	the buffer is too small, there will be holes in the TRACE() log if the
	buffer-flushing task is starved.

	The default should be sufficient for most systems. Increase the buffer
	size if the log contains holes. Reduce the buffer size when running on
	a memory-constrained system.

	Examples: 14 =>  16KB
		  18 => 256KB
		  20 =>   1MB

        This buffer is exported to usespace using a misc device as
        'litmus/log'. On a system with default udev rules, a corresponding
        character device node should be created at /dev/litmus/log. The buffer
        can be flushed using cat, e.g., 'cat /dev/litmus/log > my_log_file.txt'.

config SCHED_DEBUG_TRACE_CALLER
       bool "Include [function@file:line] tag in TRACE() log"
       depends on SCHED_DEBUG_TRACE
       default n
       help
         With this option enabled, TRACE() prepends

	      "[<function name>@<filename>:<line number>]"

	 to each message in the debug log. Enable this to aid in figuring out
         what was called in which order. The downside is that it adds a lot of
         clutter.

	 If unsure, say No.

config PREEMPT_STATE_TRACE
       bool "Trace preemption state machine transitions"
       depends on SCHED_DEBUG_TRACE && DEBUG_KERNEL
       default n
       help
         With this option enabled, each CPU will log when it transitions
	 states in the preemption state machine. This state machine is
	 used to determine how to react to IPIs (avoid races with in-flight IPIs).

	 Warning: this creates a lot of information in the debug trace. Only
	 recommended when you are debugging preemption-related races.

	 If unsure, say No.

endmenu

menu "Interrupt Handling"

choice 
	prompt "Scheduling of interrupt bottom-halves in Litmus."
	default LITMUS_SOFTIRQD_NONE
	depends on LITMUS_LOCKING
	help
		Schedule tasklets with known priorities in Litmus.

config LITMUS_SOFTIRQD_NONE
	bool "No tasklet scheduling in Litmus."
	help
	  Don't schedule tasklets in Litmus.  Default.

config LITMUS_SOFTIRQD
	bool "Enable klmirqd interrupt (and workqueue) handling threads."
	help
	  Create klmirqd interrupt handling threads.  Work must be
	  specifically dispatched to these workers.  (Softirqs for
	  Litmus tasks are not magically redirected to klmirqd.)

	  G-EDF, C-EDF ONLY for now!


#config LITMUS_PAI_SOFTIRQD
#	bool "Defer tasklets to context switch points."
#	help
#	  Only execute scheduled tasklet bottom halves at
#	  scheduling points.  Trades context switch overhead
#	  at the cost of non-preemptive durations of bottom half
#	  processing.
#		 
#	  G-EDF, C-EDF ONLY for now!	 
		 
endchoice	   
	   

config LITMUS_NVIDIA
	  bool "Litmus handling of NVIDIA driver."
	  default n
	  help
	    Enable Litmus control of NVIDIA driver tasklet/workqueues.

		If unsure, say No.

config LITMUS_NVIDIA_NONSPLIT_INTERRUPTS
	  bool "Execute NVIDIA interrupts with top-halves."
	  depends on LITMUS_NVIDIA
	  default n
	  help
	    Tasklets orginating from the NVIDIA driver are executed
		immediatly in interrupt-space. This implements non-split
		interrupt handling for GPUs. Feature intended mainly for
		debugging, as it allows one to avoid having to rely
		upon PAI or klmirqd interrupt handling.

		If unsure, say No.

choice
	  prompt "Litmus handling of NVIDIA workqueues."
	  depends on LITMUS_NVIDIA
	  default LITMUS_NVIDIA_WORKQ_OFF
	  help
	  	Select method for handling NVIDIA workqueues.

config LITMUS_NVIDIA_WORKQ_OFF
	  bool "Use Linux's default work queues."
	  help
	  	Let Linux process all NVIDIA work queue items.

config LITMUS_NVIDIA_WORKQ_ON
	  bool "Schedule work with interrupt thread."
	  depends on LITMUS_SOFTIRQD
	  help
	  	Direct work queue items from NVIDIA devices Litmus's
		klmirqd handling routines. Use the same thread
		as interrupt handling.

config LITMUS_NVIDIA_WORKQ_ON_DEDICATED
	  bool "Sechedule work in dedicated threads."
	  depends on LITMUS_SOFTIRQD
	  help
	  	Direct work queue items from NVIDIA devices to Litmus's
		klmirqd handling routines. Use dedicated thread for
		work (seperate thread from interrupt handling).

endchoice

config LITMUS_AFFINITY_AWARE_GPU_ASSINGMENT
	  bool "Enable affinity-aware heuristics to improve GPU assignment."
	  depends on LITMUS_NVIDIA && LITMUS_AFFINITY_LOCKING
	  default n
	  help
	    Enable several heuristics to improve the assignment
		of GPUs to real-time tasks to reduce the overheads
		of memory migrations.

		If unsure, say No.

config NV_DEVICE_NUM
	   int "Number of NVIDIA GPUs."
	   depends on LITMUS_NVIDIA
	   range 1 16
	   default "1"
	   help
	     Should be (<= to the number of CPUs) and
		 (<= to the number of GPUs) in your system.

choice
	  prompt "CUDA/Driver Version Support"
	  default CUDA_5_0
	  depends on LITMUS_NVIDIA
	  help
	  	Select the version of CUDA/driver to support.

config CUDA_5_X
	  bool "CUDA 5.0+"
	  depends on LITMUS_NVIDIA && REALTIME_AUX_TASKS
	  help
	    Support CUDA 5.0 (dev. driver version: x86_64-310.x)

config CUDA_5_0
	  bool "CUDA 5.0"
	  depends on LITMUS_NVIDIA && REALTIME_AUX_TASKS
	  help
	    Support CUDA 5.0 (dev. driver version: x86_64-304.54)

config CUDA_4_2
	  bool "CUDA 4.2"
	  depends on LITMUS_NVIDIA
	  help
		Support CUDA 4.2 (dev driver version: x86_64-295.40)

config CUDA_4_0
	  bool "CUDA 4.0"
	  depends on LITMUS_NVIDIA
	  help
		Support CUDA 4.0 (dev. driver version: x86_64-270.41)

config CUDA_3_2
	  bool "CUDA 3.2"
	  depends on LITMUS_NVIDIA
	  help
	  	Support CUDA 3.2 (dev. driver version: x86_64-260.24)

endchoice

config LITMUS_NV_KLMIRQD_DEBUG
	  bool "Raise fake sporadic tasklets to test nv klimirqd threads."
	  depends on LITMUS_NVIDIA && LITMUS_SOFTIRQD
	  default n
	  help
		Causes tasklets to be sporadically dispatched to waiting klmirqd
		threads.  WARNING! Kernel panic may occur if you switch between
		LITMUS plugins!

endmenu

endmenu