1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
|
/*
* kernel/edzl_common.c
*
* Common functions for EDZL based scheduler.
*/
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <litmus/litmus.h>
#include <litmus/sched_plugin.h>
#include <litmus/sched_trace.h>
#include <litmus/edf_common.h>
#include <litmus/edzl_common.h>
int edzl_higher_prio(struct task_struct* first,
struct task_struct* second)
{
struct task_struct *first_task = first;
struct task_struct *second_task = second;
/* There is no point in comparing a task to itself. */
if (first && first == second) {
TRACE_TASK(first,
"WARNING: pointless edf priority comparison.\n");
return 0;
}
/* Check for inherited priorities. Change task
* used for comparison in such a case.
*/
if (first && first->rt_param.inh_task)
first_task = first->rt_param.inh_task;
if (second && second->rt_param.inh_task)
second_task = second->rt_param.inh_task;
/* null checks & rt checks */
if(!first_task)
return 0;
else if(!second_task || !is_realtime(second_task))
return 1;
if(likely(get_zerolaxity(first_task) == get_zerolaxity(second_task)))
{
/* edf order if both tasks have the same laxity state */
return(edf_higher_prio(first_task, second_task));
}
else
{
return(get_zerolaxity(first_task));
}
}
int edzl_ready_order(struct bheap_node* a, struct bheap_node* b)
{
return edzl_higher_prio(bheap2task(a), bheap2task(b));
}
void edzl_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
release_jobs_t release)
{
rt_domain_init(rt, edzl_ready_order, resched, release);
}
/* need_to_preempt - check whether the task t needs to be preempted
* call only with irqs disabled and with ready_lock acquired
* THIS DOES NOT TAKE NON-PREEMPTIVE SECTIONS INTO ACCOUNT!
*/
int edzl_preemption_needed(rt_domain_t* rt, struct task_struct *t)
{
/* we need the read lock for edzl_ready_queue */
/* no need to preempt if there is nothing pending */
if (!__jobs_pending(rt))
return 0;
/* we need to reschedule if t doesn't exist */
if (!t)
return 1;
/* make sure to get non-rt stuff out of the way */
if (!is_realtime(t))
return 1;
/* NOTE: We cannot check for non-preemptibility since we
* don't know what address space we're currently in.
*/
/* Detect zero-laxity as needed. Easier to do it here than in tick.
(No timer is used to detect zero-laxity while a job is running.) */
if(unlikely(!get_zerolaxity(t) && laxity_remaining(t) == 0))
{
set_zerolaxity(t, 1);
}
return edzl_higher_prio(__next_ready(rt), t);
}
#ifdef CONFIG_PLUGIN_AEDZL
int aedzl_preemption_needed(rt_domain_t* rt, struct task_struct *t)
{
/* we need the read lock for edzl_ready_queue */
/* no need to preempt if there is nothing pending */
if (!__jobs_pending(rt))
return 0;
/* we need to reschedule if t doesn't exist */
if (!t)
return 1;
/* make sure to get non-rt stuff out of the way */
if (!is_realtime(t))
return 1;
/* Detect zero-laxity as needed. Easier to do it here than in tick.
(No timer is used to detect zero-laxity while a job is running.) */
if(unlikely(!get_zerolaxity(t) && laxity_remaining_est(t) == 0))
{
set_zerolaxity(t, 1);
}
return edzl_higher_prio(__next_ready(rt), t);
}
#endif
|