aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/pfair_common.c
blob: c50fdab46083f81dc3dc6dadaa2ab2d3c7d08a6a (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
/*
 * Common functions for PFAIR based scheduler.
 */

#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/list.h>

#include <linux/litmus.h>
#include <linux/sched_plugin.h>
#include <linux/sched_trace.h>

#include <linux/pfair_common.h>
#include <linux/pfair_math.h>
/*  Comparison of two tasks whether 
 *  the lhs has higher priority than the rhs	*/
int is_pfair_hp(struct task_struct *lhs, struct task_struct *rhs)
{
	/*	Favor subtasks with earlier deadlines */
	if(time_before(get_deadline(lhs), get_deadline(rhs)))
		return 1;
	if(get_deadline(lhs) == get_deadline(rhs)) {
		/* If deadlines are equal, 
		 * favor non-zero b-bit (a heavy task) */
		if(lhs->rt_param.times.b_bit > rhs->rt_param.times.b_bit)
			return 1;
		
		if(lhs->rt_param.times.b_bit == rhs->rt_param.times.b_bit &&
			       	lhs->rt_param.times.b_bit == 1)
			/* If b-bit is 1, favor tasks with later 
			 * group deadline */
			return time_after(lhs->rt_param.times.group_deadline,
				rhs->rt_param.times.group_deadline);
		
	}
	return 0;
}

void pfair_domain_init(pfair_domain_t *pfair) 
{
	BUG_ON(!pfair);
	INIT_LIST_HEAD(&pfair->ready_queue);
	INIT_LIST_HEAD(&pfair->release_queue);
	queue_lock_init(&pfair->pfair_lock);
	cpus_setall(pfair->domain_cpus);
	/* Use cpu 0 to keep the system alive 
	 * TODO: Remove later or make it configurable
	 * */
	cpu_clear(0, pfair->domain_cpus);
}


/* add_ready - add a real-time task to the PFAIR ready queue. 
 * It must be runnable. Global domain lock must be held before
 * calling this function.
 *
 * @new:      the newly released task
 */
void pfair_add_ready(pfair_domain_t* pfair, struct task_struct *new) 
{
	struct list_head *pos;
	struct task_struct *queued;

	BUG_ON(!new);
	/* find a spot where our deadline is earlier than the next */
	list_for_each(pos, &pfair->ready_queue) {		
		queued = list_entry(pos, struct task_struct, rt_list);
		if (unlikely(is_pfair_hp(new, queued))) {
			/* the task at pos has a later deadline */
			/* insert the new task in front of it */
			__list_add(&new->rt_list, pos->prev, pos);
			return;
		}
	}
	/* if we get to this point either the list is empty or new has the
	 * lowest priority. Let's add it to the end. */
	list_add_tail(&new->rt_list, &pfair->ready_queue);
}
/**
 *	Extraction function.
 */
struct task_struct* __pfair_take_ready(pfair_domain_t* pfair) 
{
	struct task_struct *t = NULL;
	/* either not yet released, preempted, or non-rt */
	if (!list_empty(&pfair->ready_queue)) {

		/* take next rt task */
		t = list_entry(pfair->ready_queue.next, struct task_struct, 
			       rt_list);

		/* kick it out of the ready list */
		list_del(&t->rt_list);
	}
	return t;
}


/* add_release - add a real-time task to the PFAIR release queue.
 * Domain lock must be acquired before the function is called.
 *
 * @task:        the sleeping task
 */
void pfair_add_release(pfair_domain_t* pfair, struct task_struct *task) 
{
	struct list_head *pos;
	struct task_struct *queued;

	BUG_ON(!task);
	/* find a spot where our deadline is earlier than the next */
	list_for_each_prev(pos, &pfair->release_queue) {		
		queued = list_entry(pos, struct task_struct, rt_list);
		if ((unlikely(time_before(queued->rt_param.times.release,
					 task->rt_param.times.release)))) {
			/* the task at pos has an earlier release */
			/* insert the new task in behind it */
			__list_add(&task->rt_list, pos, pos->next);
			return;	
		}
	}
	/* if we get to this point either the list is empty or task has the
	 * earliest release. Let's add it to the front. */
	list_add(&task->rt_list, &pfair->release_queue);
}
/**
 *	This function is called from tick handler, it acquires the lock
 *	automatically. Only one processor effectively merges the queues.
 */
void pfair_try_release_pending(pfair_domain_t* pfair) 
{
	unsigned long flags;
	struct list_head *pos, *save;
	struct task_struct *queued;
	queue_lock_irqsave(&pfair->pfair_lock, flags);

	list_for_each_safe(pos, save, &pfair->release_queue) {
		queued = list_entry(pos, struct task_struct, rt_list);
		if (likely(time_before_eq(
				   queued->rt_param.times.release, jiffies))) {
			/* this one is ready to go*/
			list_del(pos);
			set_rt_flags(queued, RT_F_RUNNING);
				
			sched_trace_job_release(queued);
			/* now it can be picked up */
			barrier();
			pfair_add_ready(pfair, queued);
		} 
		else
			/* the release queue is ordered */
			break;			
	}
	queue_unlock_irqrestore(&pfair->pfair_lock, flags);
}	    	    
/*
 *	Subtask preparation. Assuming that last_release
 *	denotes the time when the job was released.
 */
void pfair_prepare_next_subtask(struct task_struct *t)
{
	BUG_ON(!t);	
	/* assign subtask release time, deadline, b-bit,
	 * and group deadline
	*/
	t->rt_param.times.release   = t->rt_param.times.last_release
						+release_time(t);
	t->rt_param.times.deadline  = t->rt_param.times.last_release
						+pfair_deadline(t);
	t->rt_param.times.b_bit		 = b_bit(t);
	t->rt_param.times.group_deadline = t->rt_param.times.last_release
						+group_deadline(t); 
}

void pfair_prepare_next_job(struct task_struct *t)
{
	BUG_ON(!t);	

	/* prepare next job release */
	/* make passed quantums zero so that we could compute new release times
	* and deadlines for subtasks correctly
	*/
	t->rt_param.times.exec_time = 0;
	/* assign job-wide release time, 
	* this is the starting point to 
	* compute subtask releases, deadlines and group deadlines 
	*/
	t->rt_param.times.last_release = t->rt_param.times.last_release
		+get_rt_period(t);
	/*	Release the first subtask.	*/	
	pfair_prepare_next_subtask(t);
	t->first_time_slice = 0; 
	/* Increase job sequence number */
	t->rt_param.times.job_no++;
}

void __pfair_prepare_new_release(struct task_struct *t, jiffie_t start) 
{
	t->rt_param.times.release = start;
	t->rt_param.times.last_release = start;
	t->rt_param.times.exec_time = 0;
	t->first_time_slice = 0;
	pfair_prepare_next_subtask(t);
	set_rt_flags(t, RT_F_RUNNING);	
}

void pfair_prepare_new_releases(pfair_domain_t *pfair, jiffie_t start) 
{
	unsigned long flags;
	struct list_head tmp_list;
	struct list_head *pos, *n;
	struct task_struct *t;
	
	INIT_LIST_HEAD(&tmp_list);
	
	queue_lock_irqsave(&pfair->pfair_lock, flags);


	while (!list_empty(&pfair->release_queue)) {
		pos = pfair->release_queue.next;
		list_del(pos);
		list_add(pos, &tmp_list);	       
	}
	while (!list_empty(&pfair->ready_queue)) {
		pos = pfair->ready_queue.next;
		list_del(pos);
		list_add(pos, &tmp_list);
	}

	list_for_each_safe(pos, n, &tmp_list) {
		t = list_entry(pos, struct task_struct, rt_list);
		list_del(pos);
		__pfair_prepare_new_release(t, start);
		pfair_add_release(pfair, t);
	}
	queue_unlock_irqrestore(&pfair->pfair_lock, flags);
}