aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/edf_common.c
blob: 441fbfddf0c2facaddf526759e5cd77d6d7d047a (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
/*
 * kernel/edf_common.c
 *
 * Common functions for EDF based scheduler.
 */

#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/list.h>

#include <litmus/litmus.h>
#include <litmus/sched_plugin.h>
#include <litmus/sched_trace.h>

#ifdef CONFIG_LITMUS_NESTED_LOCKING
#include <litmus/locking.h>
#endif

#include <litmus/edf_common.h>

#ifdef CONFIG_EDF_TIE_BREAK_LATENESS_NORM
#include <litmus/fpmath.h>
#endif

#ifdef CONFIG_EDF_TIE_BREAK_HASH
#include <linux/hash.h>
static inline long edf_hash(struct task_struct *t)
{
	/* pid is 32 bits, so normally we would shove that into the
	 * upper 32-bits and and put the job number in the bottom
	 * and hash the 64-bit number with hash_64(). Sadly,
	 * in testing, hash_64() doesn't distribute keys were the
	 * upper bits are close together (as would be the case with
	 * pids) and job numbers are equal (as would be the case with
	 * synchronous task sets with all relative deadlines equal).
	 *
	 * A 2006 Linux patch proposed the following solution
	 * (but for some reason it wasn't accepted...).
	 *
	 * At least this workaround works for 32-bit systems as well.
	 */
	return hash_32(hash_32((u32)tsk_rt(t)->job_params.job_no, 32) ^ t->pid, 32);
}
#endif


/* edf_higher_prio -  returns true if first has a higher EDF priority
 *                    than second. Deadline ties are broken by PID.
 *
 * both first and second may be NULL
 */
#ifdef CONFIG_LITMUS_NESTED_LOCKING
int __edf_higher_prio(
	struct task_struct* first, comparison_mode_t first_mode,
	struct task_struct* second, comparison_mode_t second_mode)
#else
int edf_higher_prio(struct task_struct* first, struct task_struct* second)
#endif
{
	struct task_struct *first_task = first;
	struct task_struct *second_task = second;

	/* There is no point in comparing a task to itself. */
	if (first && first == second) {
		TRACE_CUR("WARNING: pointless edf priority comparison: %s/%d\n", first->comm, first->pid);
		WARN_ON(1);
		return 0;
	}


	/* check for NULL tasks */
	if (!first || !second) {
		return first && !second;
	}

	/* There is some goofy stuff in this code here.  There are three subclasses
	 * within the SCHED_LITMUS scheduling class:
	 * 1) Auxiliary tasks: COTS helper threads from the application level that
	 *      are forced to be real-time.
	 * 2) klmirqd interrupt threads: Litmus threaded interrupt handlers.
	 * 3) Normal Litmus tasks.
	 *
	 * At their base priorities, #3 > #2 > #1.  However, #1 and #2 threads might
	 * inherit a priority from a task of #3.
	 *
	 * The code proceeds in the following manner:
	 * 1) Make aux and klmirqd threads with base-priorities have low priorities.
	 * 2) Determine effective priorities.
	 * 3) Perform priority comparison.  Favor #3 over #1 and #2 in case of tie.
	 */


#if defined(CONFIG_REALTIME_AUX_TASK_PRIORITY_BOOSTED)
	/* run aux tasks at max priority */
	/* TODO: Actually use prio-boosting. */
	if (first->rt_param.is_aux_task != second->rt_param.is_aux_task)
	{
		return (first->rt_param.is_aux_task > second->rt_param.is_aux_task);
	}
	else if(first->rt_param.is_aux_task && second->rt_param.is_aux_task)
	{
		if(first->group_leader == second->group_leader) {
			TRACE_CUR("aux tie break!\n");  // tie-break by BASE priority of the aux tasks
			goto aux_tie_break;
		}
		first = first->group_leader;
		second = second->group_leader;
	}
#elif defined(CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE)
	{
		int first_lo_aux = first->rt_param.is_aux_task && !first->rt_param.inh_task;
		int second_lo_aux = second->rt_param.is_aux_task && !second->rt_param.inh_task;

		/* prioritize aux tasks without inheritance below real-time tasks */
		if (first_lo_aux || second_lo_aux) {
			// one of these is an aux task without inheritance.
			if(first_lo_aux && second_lo_aux) {
				TRACE_CUR("aux tie break!\n");  // tie-break by BASE priority of the aux tasks
				goto aux_tie_break;
			}
			else {

				// make the aux thread lowest priority real-time task
				int temp = 0;
				if (first_lo_aux && is_realtime(second)) {
//					temp = 0;
				}
				else if(second_lo_aux && is_realtime(first)) {
					temp = 1;
				}
				TRACE_CUR("%s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, temp);
				return temp;
			}
		}

		if (first->rt_param.is_aux_task && second->rt_param.is_aux_task &&
			first->rt_param.inh_task == second->rt_param.inh_task) {
			// inh_task is !NULL for both tasks since neither was a lo_aux task.
			// Both aux tasks inherit from the same task, so tie-break
			// by base priority of the aux tasks.
			TRACE_CUR("aux tie break!\n");
			goto aux_tie_break;
		}
	}
#endif

#ifdef CONFIG_LITMUS_SOFTIRQD
	{
		int first_lo_klmirqd = first->rt_param.is_interrupt_thread && !first->rt_param.inh_task;
		int second_lo_klmirqd = second->rt_param.is_interrupt_thread && !second->rt_param.inh_task;

		/* prioritize aux tasks without inheritance below real-time tasks */
		if (first_lo_klmirqd || second_lo_klmirqd) {
			// one of these is an klmirqd thread without inheritance.
			if(first_lo_klmirqd && second_lo_klmirqd) {
				TRACE_CUR("klmirqd tie break!\n");  // tie-break by BASE priority of the aux tasks
				goto klmirqd_tie_break;
			}
			else {
				// make the klmirqd thread the lowest-priority real-time task
				// but (above low-prio aux tasks and Linux tasks)
				int temp = 0;
				if (first_lo_klmirqd && is_realtime(second)) {
//					temp = 0;
				}
				else if(second_lo_klmirqd && is_realtime(first)) {
					temp = 1;
				}
				TRACE_CUR("%s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, temp);
				return temp;
			}
		}

		if (first->rt_param.is_interrupt_thread && second->rt_param.is_interrupt_thread &&
			first->rt_param.inh_task == second->rt_param.inh_task) {
			// inh_task is !NULL for both tasks since neither was a lo_klmirqd task.
			// Both klmirqd tasks inherit from the same task, so tie-break
			// by base priority of the klmirqd tasks.
			TRACE_CUR("klmirqd tie break!\n");
			goto klmirqd_tie_break;
		}
	}
#endif


#ifdef CONFIG_LITMUS_LOCKING
	/* Check for EFFECTIVE priorities. Change task
	 * used for comparison in such a case.
	 */
	if (unlikely(first->rt_param.inh_task)
#ifdef CONFIG_LITMUS_NESTED_LOCKING
		&& (first_mode == EFFECTIVE)
#endif
		) {
		first_task = first->rt_param.inh_task;
	}
	if (unlikely(second->rt_param.inh_task)
#ifdef CONFIG_LITMUS_NESTED_LOCKING
		&& (second_mode == EFFECTIVE)
#endif
		) {
		second_task = second->rt_param.inh_task;
	}

	/* Check for priority boosting. Tie-break by start of boosting.
	 */
	if (unlikely(is_priority_boosted(first_task))) {
		/* first_task is boosted, how about second_task? */
		if (!is_priority_boosted(second_task) ||
		    lt_before(get_boost_start(first_task),
					  get_boost_start(second_task))) {
			return 1;
		}
		else {
			return 0;
		}
	}
	else if (unlikely(is_priority_boosted(second_task))) {
		/* second_task is boosted, first is not*/
		return 0;
	}

#endif

#ifdef CONFIG_REALTIME_AUX_TASKS
aux_tie_break:
#endif
#ifdef CONFIG_LITMUS_SOFTIRQD
klmirqd_tie_break:
#endif

	if (!is_realtime(second_task)) {
		return 1;
	}
	else if (earlier_deadline(first_task, second_task)) {
		return 1;
	}
	else if (get_deadline(first_task) == get_deadline(second_task)) {
		/* Need to tie break. All methods must set pid_break to 0/1 if
		 * first_task does not have priority over second_task.
		 */
		int pid_break;

#if defined(CONFIG_EDF_TIE_BREAK_LATENESS)
		/* Tie break by lateness. Jobs with greater lateness get
		 * priority. This should spread tardiness across all tasks,
		 * especially in task sets where all tasks have the same
		 * period and relative deadlines.
		 */
		if (get_lateness(first_task) > get_lateness(second_task)) {
			return 1;
		}
		pid_break = (get_lateness(first_task) == get_lateness(second_task));


#elif defined(CONFIG_EDF_TIE_BREAK_LATENESS_NORM)
		/* Tie break by lateness, normalized by relative deadline. Jobs with
		 * greater normalized lateness get priority.
		 *
		 * Note: Considered using the algebraically equivalent
		 *	lateness(first)*relative_deadline(second) >
					lateness(second)*relative_deadline(first)
		 * to avoid fixed-point math, but values are prone to overflow if inputs
		 * are on the order of several seconds, even in 64-bit.
		 */
		fp_t fnorm = _frac(get_lateness(first_task),
						   get_rt_relative_deadline(first_task));
		fp_t snorm = _frac(get_lateness(second_task),
						   get_rt_relative_deadline(second_task));
		if (_gt(fnorm, snorm)) {
			return 1;
		}
		pid_break = _eq(fnorm, snorm);


#elif defined(CONFIG_EDF_TIE_BREAK_HASH)
		/* Tie break by comparing hashs of (pid, job#) tuple.  There should be
		 * a 50% chance that first_task has a higher priority than second_task.
		 */
		long fhash = edf_hash(first_task);
		long shash = edf_hash(second_task);
		if (fhash < shash) {
			return 1;
		}
		pid_break = (fhash == shash);
#else


		/* CONFIG_EDF_PID_TIE_BREAK */
		pid_break = 1; // fall through to tie-break by pid;
#endif

		/* Tie break by pid */
		if(pid_break) {
			if (first_task->pid < second_task->pid) {
				return 1;
			}
			else if (first_task->pid == second_task->pid) {
#ifdef CONFIG_LITMUS_SOFTIRQD
				if (first_task->rt_param.is_interrupt_thread < second_task->rt_param.is_interrupt_thread) {
					return 1;
				}
				else if (first_task->rt_param.is_interrupt_thread == second_task->rt_param.is_interrupt_thread) {
#endif

#if defined(CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE)
				if (tsk_rt(first)->is_aux_task < tsk_rt(second)->is_aux_task) {
					return 1;
				}
				else if (tsk_rt(first)->is_aux_task == tsk_rt(second)->is_aux_task) {
#endif

				/* Something could be wrong if you get this far. */
				if (unlikely(first->rt_param.inh_task == second->rt_param.inh_task)) {
					/* Both tasks have the same inherited priority.
					 * Likely in a bug-condition.
				     */
					if (first->pid < second->pid) {
						return 1;
					}
					else if (first->pid == second->pid) {
						//WARN_ON(1);
					}
				}
				else {
					/* At least one task must inherit */
					BUG_ON(!first->rt_param.inh_task &&
						   !second->rt_param.inh_task);

					/* The task withOUT the inherited priority wins. */
					if (second->rt_param.inh_task) {
						/*
						 * common with aux tasks.
						TRACE_CUR("unusual comparison: "
							"first = %s/%d  first_task = %s/%d  "
							"second = %s/%d  second_task = %s/%d\n",
							first->comm, first->pid,
							(first->rt_param.inh_task) ? first->rt_param.inh_task->comm : "(nil)",
							(first->rt_param.inh_task) ? first->rt_param.inh_task->pid : 0,
							second->comm, second->pid,
							(second->rt_param.inh_task) ? second->rt_param.inh_task->comm : "(nil)",
							(second->rt_param.inh_task) ? second->rt_param.inh_task->pid : 0);
						 */
						return 1;
					}
				}
#if defined(CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE)
				}
#endif

#ifdef CONFIG_LITMUS_SOFTIRQD
				}
#endif

			}
		}
	}

	return 0; /* fall-through. prio(second_task) > prio(first_task) */
}


#ifdef CONFIG_LITMUS_NESTED_LOCKING
int edf_higher_prio(struct task_struct* first, struct task_struct* second)
{
	return __edf_higher_prio(first, EFFECTIVE, second, EFFECTIVE);
}

int edf_max_heap_order(struct binheap_node *a, struct binheap_node *b)
{
	struct nested_info *l_a = (struct nested_info *)binheap_entry(a, struct nested_info, hp_binheap_node);
	struct nested_info *l_b = (struct nested_info *)binheap_entry(b, struct nested_info, hp_binheap_node);

	return __edf_higher_prio(l_a->hp_waiter_eff_prio, EFFECTIVE, l_b->hp_waiter_eff_prio, EFFECTIVE);
}

int edf_min_heap_order(struct binheap_node *a, struct binheap_node *b)
{
	return edf_max_heap_order(b, a);  // swap comparison
}

int edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b)
{
	struct nested_info *l_a = (struct nested_info *)binheap_entry(a, struct nested_info, hp_binheap_node);
	struct nested_info *l_b = (struct nested_info *)binheap_entry(b, struct nested_info, hp_binheap_node);

	return __edf_higher_prio(l_a->hp_waiter_eff_prio, BASE, l_b->hp_waiter_eff_prio, BASE);
}

int edf_min_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b)
{
	return edf_max_heap_base_priority_order(b, a);  // swap comparison
}
#endif


int edf_ready_order(struct bheap_node* a, struct bheap_node* b)
{
	return edf_higher_prio(bheap2task(a), bheap2task(b));
}

void edf_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
		      release_jobs_t release)
{
	rt_domain_init(rt,  edf_ready_order, resched, release);
}

/* need_to_preempt - check whether the task t needs to be preempted
 *                   call only with irqs disabled and with  ready_lock acquired
 *                   THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT!
 */
int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t)
{
	/* we need the read lock for edf_ready_queue */
	/* no need to preempt if there is nothing pending */
	if (!__jobs_pending(rt))
		return 0;
	/* we need to reschedule if t doesn't exist */
	if (!t)
		return 1;

	/* NOTE: We cannot check for non-preemptibility since we
	 *       don't know what address space we're currently in.
	 */

	/* make sure to get non-rt stuff out of the way */
	return !is_realtime(t) || edf_higher_prio(__next_ready(rt), t);
}