aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2011-04-03 04:43:36 -0400
committerJonathan Herman <hermanjl@cs.unc.edu>2011-04-03 04:43:36 -0400
commitc0574e57efc2efcf40d8e3dba034deb04dad11c1 (patch)
treeea039786410fad6cf43582d1977b2d23cf8b245b
parent83c82067a1e75567003da012159c92708ae26014 (diff)
Better config release master support and simpler servers.
-rw-r--r--include/litmus/servers.h99
-rw-r--r--litmus/litmus.c3
-rw-r--r--litmus/sched_edf_hsb.c942
-rw-r--r--litmus/servers.c475
4 files changed, 856 insertions, 663 deletions
diff --git a/include/litmus/servers.h b/include/litmus/servers.h
index d04e08ca35ca..106f5ece53a8 100644
--- a/include/litmus/servers.h
+++ b/include/litmus/servers.h
@@ -6,6 +6,7 @@ struct periodic_server;
6struct proc_read_args; 6struct proc_read_args;
7struct proc_dir_entry; 7struct proc_dir_entry;
8struct server_domain; 8struct server_domain;
9struct server_release_heap;
9 10
10#define SERVER_RELEASE_QUEUE_SLOTS 127 11#define SERVER_RELEASE_QUEUE_SLOTS 127
11 12
@@ -37,23 +38,30 @@ typedef void (*stop_servers_t)(void);
37typedef void (*servers_released_t)(struct list_head *servers); 38typedef void (*servers_released_t)(struct list_head *servers);
38 39
39typedef struct server { 40typedef struct server {
40 int id; 41 /* Specified by the user */
42 int id;
43 lt_t wcet;
44 lt_t period;
41 45
42 /* Managed internally */ 46 /* Managed internally */
47 lt_t deadline;
48 lt_t release;
43 lt_t budget; /* The remaining budget for current period */ 49 lt_t budget; /* The remaining budget for current period */
44 lt_t start_time; /* The time the server started executing or 0 */ 50 lt_t start_time; /* The time the server started executing or 0 */
45 int running; /* True if server is currently executing */ 51 int running; /* True if server is currently executing */
46 int job_no; /* Current job of server */ 52 int job_no; /* Current job of server */
47 53
48 int type; /* Optional for differentiating b/w released servers */ 54 int type; /* Optional */
49 55
50 struct task_struct* scheduled; 56 /* Used for grouped releases */
57 struct server_release_heap *release_heap;
58 struct list_head release_list;
51 59
52 /* Optional lock for timer fires */ 60 /* Useful in general */
53 raw_spinlock_t* timer_lock; 61 struct bheap_node *hn;
62 struct list_head list;
54 63
55 /* TODO REMOVE ME NOT NECESSARY */ 64 struct task_struct* scheduled;
56 struct server_domain* domain;
57} server_t; 65} server_t;
58 66
59/* 67/*
@@ -61,6 +69,7 @@ typedef struct server {
61 */ 69 */
62typedef struct completion_timer { 70typedef struct completion_timer {
63 int armed; 71 int armed;
72 int cpu;
64 struct hrtimer timer; 73 struct hrtimer timer;
65 struct hrtimer_start_on_info info; 74 struct hrtimer_start_on_info info;
66 struct server_domain *domain; 75 struct server_domain *domain;
@@ -87,17 +96,6 @@ typedef struct server_domain {
87 struct list_head tobe_released; 96 struct list_head tobe_released;
88} server_domain_t; 97} server_domain_t;
89 98
90
91/*
92 * Everything needed to release some servers.
93 */
94typedef struct server_release_info {
95 struct hrtimer timer;
96 struct hrtimer_start_on_info info;
97 int cpu;
98 servers_released_t servers_released;
99} server_release_info_t;
100
101/* 99/*
102 * A group of servers releasing simultaneously. 100 * A group of servers releasing simultaneously.
103 */ 101 */
@@ -106,49 +104,23 @@ typedef struct server_release_heap {
106 lt_t release_time; 104 lt_t release_time;
107 105
108 struct list_head servers; 106 struct list_head servers;
109 server_domain_t *domain;
110 server_release_info_t info;
111} server_release_heap_t;
112
113
114typedef struct periodic_server {
115 server_t server;
116 107
117 /* Specified by the user */ 108 struct hrtimer timer;
118 lt_t wcet; 109 struct hrtimer_start_on_info info;
119 lt_t period;
120 int release_cpu;
121
122 /* Managed internally */
123 lt_t deadline;
124 lt_t release;
125
126 /* Used to maintain server state */
127 struct hrtimer release_timer;
128 post_release_t post_release;
129} pserver_t;
130
131#define get_server_budget(s) (((server_t*)s)->budget)
132#define get_server_deadline(s) (((pserver_t*)s)->deadline)
133#define get_server_id(s) (((server_t*)s)->id)
134#define get_server_period(s) (((pserver_t*)s)->period)
135#define get_server_release(s) (((pserver_t*)s)->release)
136#define get_server_running(s) (((server_t*)s)->running)
137#define get_server_scheduled(s) (((server_t*)s)->scheduled)
138#define get_server_wcet(s) (((pserver_t*)s)->wcet)
139 110
111 server_domain_t *domain;
112} server_release_heap_t;
140 113
141void server_init(server_t *server, int id, raw_spinlock_t *timer_lock); 114void server_init(server_t *server, int id,
142void pserver_init(pserver_t *server, int id, raw_spinlock_t *timer_lock, 115 lt_t wcet, lt_t period, int grouped);
143 lt_t wcet, lt_t period);
144 116
145void server_domain_init(server_domain_t *domain, 117void server_domain_init(server_domain_t *domain,
146 servers_released_t servers_released, 118 servers_released_t servers_released,
147 server_completed_t server_completed, 119 server_completed_t server_completed,
148 int release_master, raw_spinlock_t* timer_lock); 120 int release_master, raw_spinlock_t* timer_lock);
149void server_domain_exit(server_domain_t *domain); 121void server_domain_exit(server_domain_t *domain);
150void add_server_release(server_t *server, server_domain_t *server_domain); 122
151void stop_server_releasing(server_domain_t *server_domain); 123int add_server_release(server_t *server, server_domain_t *server_domain);
152 124
153/* 125/*
154 * Runs a task on the server. If the server runs out of budget 126 * Runs a task on the server. If the server runs out of budget
@@ -167,34 +139,21 @@ void server_run(server_t *server, struct task_struct *task,
167void server_stop(server_t *server, server_domain_t *domain); 139void server_stop(server_t *server, server_domain_t *domain);
168 140
169/* 141/*
170 * Starts a timer which will call server_release every period. This 142 * Begins a server's next period.
171 * can either be called with a global timer or a per-cpu timer.
172 */
173void pserver_start_releasing(pserver_t *server, lt_t time);
174void pserver_start_cpu_releasing(pserver_t *server, lt_t time, int cpu);
175
176/*
177 * Stops periodically releasing the server.
178 */ 143 */
179void pserver_stop_releasing(pserver_t *server); 144void server_release(server_t *server);
180 145
181/* 146/*
182 * Begins a server's next period. 147 * Set the next period to begin at the given time.
183 */ 148 */
184void server_release(server_t *server, lt_t budget); 149void server_release_at(server_t *server, lt_t time);
185static inline void pserver_release(pserver_t *server)
186{
187 server->release = server->deadline;
188 server->deadline += server->period;
189 server_release(&server->server, server->wcet);
190}
191 150
192/* 151/*
193 * Call once for every server which should be printed 152 * Call once for every server which should be printed
194 * out on a proc dir read. Should be called inside a list_servers_t 153 * out on a proc dir read. Should be called inside a list_servers_t
195 * method. 154 * method.
196 */ 155 */
197void server_proc_read_single(pserver_t *server, int cpu, 156void server_proc_read_single(server_t *server, int cpu,
198 struct proc_read_args *args); 157 struct proc_read_args *args);
199 158
200/* 159/*
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 11ccaafd50de..8a643dffc02d 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -33,6 +33,7 @@ atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU);
33 33
34static struct kmem_cache * bheap_node_cache; 34static struct kmem_cache * bheap_node_cache;
35extern struct kmem_cache * release_heap_cache; 35extern struct kmem_cache * release_heap_cache;
36extern struct kmem_cache * server_release_cache;
36 37
37struct bheap_node* bheap_node_alloc(int gfp_flags) 38struct bheap_node* bheap_node_alloc(int gfp_flags)
38{ 39{
@@ -521,6 +522,7 @@ static int __init _init_litmus(void)
521 522
522 bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC); 523 bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC);
523 release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC); 524 release_heap_cache = KMEM_CACHE(release_heap, SLAB_PANIC);
525 server_release_cache = KMEM_CACHE(release_heap, SLAB_PANIC);
524 526
525#ifdef CONFIG_MAGIC_SYSRQ 527#ifdef CONFIG_MAGIC_SYSRQ
526 /* offer some debugging help */ 528 /* offer some debugging help */
@@ -540,6 +542,7 @@ static void _exit_litmus(void)
540 exit_litmus_proc(); 542 exit_litmus_proc();
541 kmem_cache_destroy(bheap_node_cache); 543 kmem_cache_destroy(bheap_node_cache);
542 kmem_cache_destroy(release_heap_cache); 544 kmem_cache_destroy(release_heap_cache);
545 kmem_cache_destroy(server_release_cache);
543} 546}
544 547
545module_init(_init_litmus); 548module_init(_init_litmus);
diff --git a/litmus/sched_edf_hsb.c b/litmus/sched_edf_hsb.c
index 37ea8da5f84e..9f588b089f17 100644
--- a/litmus/sched_edf_hsb.c
+++ b/litmus/sched_edf_hsb.c
@@ -14,6 +14,9 @@
14 * and should never be run when the plugin is already running: 14 * and should never be run when the plugin is already running:
15 * stop_[hrt|be]_servers 15 * stop_[hrt|be]_servers
16 * admit_[hrt|be]_server 16 * admit_[hrt|be]_server
17 *
18 * TODO BE SERVER TASK PREEMPTGION A;SDIFHSAKEUHFLKH
19 *
17 */ 20 */
18#include <linux/module.h> 21#include <linux/module.h>
19#include <linux/uaccess.h> 22#include <linux/uaccess.h>
@@ -33,6 +36,10 @@
33#include <litmus/servers.h> 36#include <litmus/servers.h>
34 37
35#define DEBUG_EDF_HSB 38#define DEBUG_EDF_HSB
39
40/* DOES NOT WORK */
41//#define SLACK_ON_MASTER
42
36#define BE_PROC_NAME "be_servers" 43#define BE_PROC_NAME "be_servers"
37#define HRT_PROC_NAME "hrt_servers" 44#define HRT_PROC_NAME "hrt_servers"
38#define BE_SERVER_BASE 100 45#define BE_SERVER_BASE 100
@@ -66,64 +73,42 @@ atomic_t servers_running = ATOMIC_INIT(0);
66#endif 73#endif
67 74
68typedef struct { 75typedef struct {
69 pserver_t pserver; 76 server_t server;
70 struct list_head list; /* For list of all BE servers */
71 struct bheap_node* hn; /* For list of ready BE servers */
72} be_server_t;
73
74typedef struct {
75 pserver_t pserver;
76 rt_domain_t hrt_domain; /* EDF for HRT tasks assigned here */ 77 rt_domain_t hrt_domain; /* EDF for HRT tasks assigned here */
78 int ready;
77 int no_slack; 79 int no_slack;
78 struct hrtimer slack_timer; /* Server has no slack when: 80 struct hrtimer slack_timer; /* Server has no slack when:
79 * (deadline - budget) <= current_time. 81 * (deadline - budget) <= current_time.
80 */ 82 */
83 struct hrtimer_start_on_info slack_timer_info;
81} hrt_server_t; 84} hrt_server_t;
82 85
83typedef struct { 86typedef struct {
84 int cpu; 87 int cpu;
85 struct task_struct* scheduled; /* Task that should be running */ 88 struct task_struct* scheduled; /* Task that should be running */
86 struct task_struct* linked; /* Task that actually is running */ 89 struct task_struct* linked; /* Task that actually is running */
87 struct bheap_node* hn; 90 server_t *linked_server; /* The server running on this cpu.
88 lt_t global_deadline; 91 * Note that what it is 'running' is
92 * linked, not scheduled.
93 */
89 hrt_server_t hrt_server; /* One HRT server per CPU */ 94 hrt_server_t hrt_server; /* One HRT server per CPU */
90 server_t srt_server; /* SRT tasks share single server but save 95 struct bheap_node* hn; /* For the cpu_heap */
91 * their server state in the task_struct.
92 */
93 server_t *linked_server; /* The server running on this cpu.
94 * Note that what it is 'running' is
95 * linked, not scheduled.
96 */
97} cpu_entry_t; 96} cpu_entry_t;
98 97
99/* Used to save SRT server state per task */ 98/* CPU state */
100typedef struct {
101 lt_t budget;
102 int job_no;
103 lt_t release;
104 lt_t deadline;
105} srt_state_t;
106
107/*
108 * Per-CPU variables
109 */
110DEFINE_PER_CPU_SHARED_ALIGNED(cpu_entry_t, cpu_entries); 99DEFINE_PER_CPU_SHARED_ALIGNED(cpu_entry_t, cpu_entries);
111DEFINE_PER_CPU(struct hrtimer_start_on_info, slack_timer_infos); 100static struct bheap cpu_heap;
101static struct bheap_node cpu_heap_node[NR_CPUS];
112 102
113/* 103static rt_domain_t srt_domain; /* SRT tasks (and servers) */
114 * Global variables
115 */
116static rt_domain_t srt_domain; /* SRT tasks */
117static rt_domain_t be_domain; /* BE tasks */ 104static rt_domain_t be_domain; /* BE tasks */
118 105
119static server_domain_t server_domain; 106static server_domain_t server_domain; /* Useful tools for server scheduling */
120static int release_master;
121 107
122static struct list_head be_servers; /* All BE servers */ 108static struct list_head be_servers; /* All BE servers */
123static struct bheap be_ready_servers; /* Runnable BE servers */ 109static struct bheap be_ready_servers; /* Runnable BE servers */
124 110
125static struct bheap cpu_heap; 111static int release_master; /* CPU which will release tasks and global servers */
126static struct bheap_node cpu_heap_node[NR_CPUS];
127 112
128static struct proc_dir_entry *edf_hsb_proc_dir = NULL; 113static struct proc_dir_entry *edf_hsb_proc_dir = NULL;
129static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp; 114static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp;
@@ -131,9 +116,13 @@ static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp;
131#define local_cpu_entry (&__get_cpu_var(cpu_entries)) 116#define local_cpu_entry (&__get_cpu_var(cpu_entries))
132#define task_sched_entry(task) (&per_cpu(cpu_entries, task_cpu(task))) 117#define task_sched_entry(task) (&per_cpu(cpu_entries, task_cpu(task)))
133#define task_linked_entry(task) (&per_cpu(cpu_entries, task->rt_param.linked_on)) 118#define task_linked_entry(task) (&per_cpu(cpu_entries, task->rt_param.linked_on))
119#define task_job_no(task) (tsk_rt(task)->job_params.job_no)
134#define global_lock (&srt_domain.ready_lock) 120#define global_lock (&srt_domain.ready_lock)
135#define is_active_plugin (litmus == &edf_hsb_plugin) 121#define is_active_plugin (litmus == &edf_hsb_plugin)
136 122
123/*
124 * FIFO for BE tasks.
125 */
137static inline int be_higher_prio(struct task_struct *first, struct task_struct *second) 126static inline int be_higher_prio(struct task_struct *first, struct task_struct *second)
138{ 127{
139 return 128 return
@@ -146,7 +135,7 @@ static inline int be_higher_prio(struct task_struct *first, struct task_struct *
146} 135}
147 136
148/* 137/*
149 * BE jobs are FIFO. 138 * FIFO for BE tasks.
150 */ 139 */
151static int be_ready_order(struct bheap_node *a, struct bheap_node *b) 140static int be_ready_order(struct bheap_node *a, struct bheap_node *b)
152{ 141{
@@ -161,39 +150,36 @@ static int be_ready_order(struct bheap_node *a, struct bheap_node *b)
161} 150}
162 151
163/* 152/*
153 * Orders all servers by EDF.
154 */
155static inline int server_higher_prio(server_t *first, server_t *second)
156{
157 return /* EDF for servers */
158 lt_before(first->deadline, second->deadline) ||
159 /* Break by id */
160 (first->deadline == second->deadline &&
161 first->id < second->id);
162}
163
164/*
164 * Orders BE servers by EDF. 165 * Orders BE servers by EDF.
165 */ 166 */
166static int be_server_order(struct bheap_node *a, struct bheap_node *b) 167static int server_order(struct bheap_node *a, struct bheap_node *b)
167{ 168{
168 be_server_t *first, *second; 169 server_t *first, *second;
169 first = a->value; 170 first = a->value;
170 second = b->value; 171 second = b->value;
171 172
172 if (lt_before(get_server_deadline(first), 173 if (lt_before(first->deadline, second->deadline) ||
173 get_server_deadline(second)) || 174 (first->deadline == second->deadline &&
174 (get_server_deadline(first) == get_server_deadline(second) && 175 first->id < second->id))
175 get_server_id(first) < get_server_id(second)))
176 TRACE_SUB("server %d has higher prio than %d", 176 TRACE_SUB("server %d has higher prio than %d",
177 get_server_id(first), get_server_id(second)); 177 first->id, second->id);
178 else 178 else
179 TRACE_SUB("server %d does not have higher prio than %d", 179 TRACE_SUB("server %d does not have higher prio than %d",
180 get_server_id(first), get_server_id(second)); 180 first->id, second->id);
181 181
182 return lt_before(get_server_deadline(first), 182 return server_higher_prio(first, second);
183 get_server_deadline(second)) ||
184 (get_server_deadline(first) == get_server_deadline(second) &&
185 get_server_id(first) < get_server_id(second));
186}
187
188/*
189 * Will (rightfully) crash if nothing is linked.
190 */
191static inline lt_t effective_deadline(cpu_entry_t *entry)
192{
193 if (is_be(entry->linked))
194 return get_server_deadline(entry->linked_server);
195 else
196 return get_deadline(entry->linked);
197} 183}
198 184
199/* 185/*
@@ -203,63 +189,35 @@ static inline lt_t effective_deadline(cpu_entry_t *entry)
203static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b) 189static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b)
204{ 190{
205 cpu_entry_t *first, *second; 191 cpu_entry_t *first, *second;
206 int first_class = 4, second_class = 4;
207 int rv = 0;
208 first = a->value; 192 first = a->value;
209 second = b->value; 193 second = b->value;
210 194 int rv = 0;
211 if (first->linked)
212 first_class = get_class(first->linked);
213 if (second->linked)
214 second_class = get_class(second->linked);
215
216 195
217 TRACE_SUB("comparing P%d and P%d", first->cpu, second->cpu); 196 TRACE_SUB("comparing P%d and P%d", first->cpu, second->cpu);
218 197
219 if (first->linked && second->linked) { 198 if (first->linked && second->linked) {
220 199 TRACE_SUB("1");
221 /* Treat BE servers as EDF SRT tasks */ 200 /* HRT always wins */
222 if ((first_class | second_class) == 201 if (is_hrt(first->linked) && !is_hrt(second->linked)) {
223 (RT_CLASS_SOFT | RT_CLASS_BEST_EFFORT) || 202 TRACE_SUB("2");
224 (first_class & second_class) == RT_CLASS_BEST_EFFORT || 203 return 0;
225 (first_class & second_class) == RT_CLASS_SOFT) { 204 } else if (is_hrt(second->linked) && !is_hrt(first->linked)) {
226 TRACE_SUB("%d %d %d %d %d %d", first_class, second_class, 205 TRACE_SUB("3");
227 first_class | second_class, 206 return 1;
228 RT_CLASS_SOFT | RT_CLASS_BEST_EFFORT,
229 RT_CLASS_BEST_EFFORT,
230 RT_CLASS_SOFT);
231 rv = lt_before(effective_deadline(second),
232 effective_deadline(first));
233 goto out;
234 }
235 /* Otherwise put HRT at the end of the list */
236 if (first_class != second_class){
237 TRACE_SUB("1");
238 rv = second_class < first_class;
239 goto out;
240 }
241 /* Reverse FIFO for BE */
242 if (first_class == RT_CLASS_BEST_EFFORT) {
243 TRACE_SUB("1");
244 rv = lt_before(get_release(second->linked),
245 get_release(first->linked));
246 goto out;
247 } 207 }
248 /* Sort HRT by CPU (just for a consistent scheme) */ 208 else if (is_hrt(first->linked) && is_hrt(second->linked)) {
249 if (first_class == RT_CLASS_HARD) { 209 TRACE_SUB("4");
250 TRACE_SUB("1"); 210 return first->cpu > second->cpu;
251 rv = first->cpu < second->cpu;
252 goto out;
253 } 211 }
254 TRACE_SUB("1");
255 /* Reverse EDF for SRT */
256 rv = edf_higher_prio(second->linked,
257 first->linked);
258 goto out;
259 }
260 212
213 rv = !server_higher_prio(first->linked_server,
214 second->linked_server);
215 TRACE_SUB("rv: %d", rv);
216 return rv;
217 }
218
261 rv = second->linked && !first->linked; 219 rv = second->linked && !first->linked;
262 out: 220 TRACE_SUB("rv: %d");
263 return rv; 221 return rv;
264} 222}
265 223
@@ -269,7 +227,7 @@ static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b)
269static inline void update_cpu_position(cpu_entry_t *entry) 227static inline void update_cpu_position(cpu_entry_t *entry)
270{ 228{
271 if (likely(bheap_node_in_heap(entry->hn))) 229 if (likely(bheap_node_in_heap(entry->hn)))
272 bheap_delete(cpu_lower_prio, &cpu_heap, entry->hn); 230 bheap_delete(server_order, &cpu_heap, entry->hn);
273 TRACE_SUB("Updating position of cpu P%d", entry->cpu); 231 TRACE_SUB("Updating position of cpu P%d", entry->cpu);
274 bheap_insert(cpu_lower_prio, &cpu_heap, entry->hn); 232 bheap_insert(cpu_lower_prio, &cpu_heap, entry->hn);
275} 233}
@@ -284,76 +242,103 @@ static inline cpu_entry_t* lowest_prio_cpu(void)
284} 242}
285 243
286/* 244/*
245 * Do all hrt_servers have a non-zero WCET and period? Return 0 if so.
246 */
247static inline int check_hrt_server_initialized(hrt_server_t *hrt_server)
248{
249 return hrt_server->server.wcet && hrt_server->server.period;
250}
251
252/*
287 * Assumes called with local IRQs off 253 * Assumes called with local IRQs off
288 * 254 *
289 * This method arms the slack timer if the following conditions are true: 255 * This method arms the slack timer if the following conditions are true:
290 *- There is server budget remaining. 256 *- There is server budget remaining.
291 *- The no_slack flag is not already set. 257 *- The no_slack flag is not already set.
292 */ 258 */
293static void slack_timer_arm(hrt_server_t *server) 259static void slack_timer_arm(hrt_server_t *hrt_server)
294{ 260{
295 cpu_entry_t *entry = container_of(server, cpu_entry_t, hrt_server); 261 int cpu;
296 struct hrtimer *timer = &server->slack_timer; 262 cpu_entry_t *entry;
297 lt_t when_to_fire = get_server_deadline(server) - 263 struct hrtimer *timer;
298 get_server_budget(server); 264 lt_t when_to_fire;
265
266 if (!check_hrt_server_initialized(hrt_server))
267 return;
268
269 timer = &hrt_server->slack_timer;
270 entry = container_of(hrt_server, cpu_entry_t, hrt_server);
271
272#ifdef SLACK_ON_MASTER
273 if (release_master != NO_CPU)
274 cpu = release_master;
275 else
276#endif
277 cpu = entry->cpu;
278
279 TRACE_SUB("attempting to arm timer %p on cpu P%d",
280 timer, cpu);
281
282 when_to_fire = hrt_server->server.deadline - hrt_server->server.budget;
299 283
300 /* Ensure the timer is needed */ 284 /* Ensure the timer is needed */
301 if (hrtimer_active(timer)) { 285 if (hrtimer_active(timer)) {
302 TRACE_SUB("not arming slack timer, already armed"); 286 TRACE_SUB("not arming slack timer, already armed");
303 return; 287 return;
304 } else if (get_server_deadline(server) == 0) { 288 } else if (hrt_server->server.deadline == 0) {
305 TRACE_SUB("not arming slack timer, server not releasing"); 289 TRACE_SUB("not arming slack timer, server not releasing");
306 return; 290 return;
307 } else if (server->no_slack || get_server_budget(server) <= 0) { 291 } else if (hrt_server->no_slack || hrt_server->server.budget <= 0 ||
308 TRACE_SUB("not arming slack timer, no_slack: %d, budget: %llu", 292 !hrt_server->ready) {
309 server->no_slack, get_server_budget(server)); 293 TRACE_SUB("not arming slack timer, no_slack: %d, budget: %llu,"
294 "ready: %d", hrt_server->no_slack,
295 hrt_server->server.budget, hrt_server->ready);
310 return; 296 return;
311 } 297 }
312 298
313 BUG_ON(when_to_fire >= get_server_deadline(server)); 299 BUG_ON(when_to_fire >= hrt_server->server.deadline);
314 BUG_ON(entry->cpu != smp_processor_id());
315 300
301 /* Set timer */
316 if (lt_after_eq(litmus_clock(), when_to_fire)) { 302 if (lt_after_eq(litmus_clock(), when_to_fire)) {
317 TRACE_SUB("slack timer fired immediately, time was %llu,", 303 TRACE_SUB("slack timer fired immediately, time was %llu,",
318 TIME(when_to_fire)); 304 TIME(when_to_fire));
319 server->no_slack = 1; 305 hrt_server->no_slack = 1;
306 } else if (cpu != smp_processor_id()) {
307 atomic_set(&hrt_server->slack_timer_info.state,
308 HRTIMER_START_ON_INACTIVE);
309 hrtimer_start_on(cpu,
310 &hrt_server->slack_timer_info,
311 &hrt_server->slack_timer,
312 ns_to_ktime(when_to_fire),
313 HRTIMER_MODE_ABS_PINNED);
320 } else { 314 } else {
321 TRACE_SUB("slack timer armed to fire at %llu on %d", 315 __hrtimer_start_range_ns(&hrt_server->slack_timer,
322 TIME(when_to_fire), entry->cpu); 316 ns_to_ktime(when_to_fire),
323 __hrtimer_start_range_ns(timer, ns_to_ktime(when_to_fire),
324 0 /* delta */, 317 0 /* delta */,
325 HRTIMER_MODE_ABS_PINNED, 318 HRTIMER_MODE_ABS_PINNED,
326 0 /* no wakeup */); 319 0 /* no wakeup */);
327 } 320 }
321
322 TRACE_SUB("slack timer armed to fire at %llu on %d",
323 TIME(when_to_fire), entry->cpu);
328} 324}
329 325
330/* 326/*
331 * Will do nothing if the timer is not armed. 327 * Will do nothing if the timer is not armed.
332 */ 328 */
333static inline void slack_timer_cancel(hrt_server_t *server) 329static inline void slack_timer_cancel(hrt_server_t *hrt_server)
334{ 330{
335 int ret; 331 int ret;
336 cpu_entry_t *entry = container_of(server, cpu_entry_t, hrt_server); 332 cpu_entry_t *entry = container_of(hrt_server, cpu_entry_t, hrt_server);
337 TRACE_SUB("cancelling slack timer on P%d", entry->cpu); 333 TRACE_SUB("cancelling slack timer %p on P%d",
338 if (hrtimer_active(&server->slack_timer)) { 334 hrt_server->slack_timer, entry->cpu);
339 ret = hrtimer_try_to_cancel(&server->slack_timer); 335 if (hrtimer_active(&hrt_server->slack_timer)) {
336 ret = hrtimer_try_to_cancel(&hrt_server->slack_timer);
340 if (ret == -1) 337 if (ret == -1)
341 TRACE_SUB("slack timer was running concurrently"); 338 TRACE_SUB("slack timer was running concurrently");
342 } 339 }
343} 340}
344 341
345static inline void release_srt_server(struct task_struct *task,
346 srt_state_t *server_state)
347{
348 TRACE_TASK_SUB(task, "releasing SRT server %d",
349 server_state->job_no);
350 server_state->release = get_release(task);
351 server_state->deadline = server_state->release +
352 get_rt_period(task);
353 server_state->budget = get_exec_cost(task);
354 server_state->job_no++;
355}
356
357/* 342/*
358 * Begins server execution and arms any timers necessary. 343 * Begins server execution and arms any timers necessary.
359 * This will also load any SRT server state into the entry's 344 * This will also load any SRT server state into the entry's
@@ -362,54 +347,35 @@ static inline void release_srt_server(struct task_struct *task,
362 * tasks as this method cannot determine which BE server to use. 347 * tasks as this method cannot determine which BE server to use.
363 */ 348 */
364static noinline void link_server(cpu_entry_t *entry, 349static noinline void link_server(cpu_entry_t *entry,
365 be_server_t *be_server) 350 server_t *be_server)
366{ 351{
367 srt_state_t *server_state; 352 server_t *srt_server;
368 353
369 if (is_srt(entry->linked)) { 354 if (is_srt(entry->linked)) {
370 server_state = tsk_rt(entry->linked)->plugin_data; 355 srt_server = tsk_rt(entry->linked)->plugin_data;
371
372 /* Re-release the server if the job passed a release time.
373 * Note that the job could have overrun its deadline. In
374 * this case, the server will re-release but the job will
375 * not until it can exhaust its exec time.
376 */
377 if (lt_before(server_state->release,
378 get_release(entry->linked))) {
379 release_srt_server(entry->linked, server_state);
380 }
381
382 TRACE_TASK_SUB(entry->linked, "resuming SRT server," 356 TRACE_TASK_SUB(entry->linked, "resuming SRT server,"
383 "budget*: %llu, exec_time: %llu, deadline: %llu," 357 "budget*: %llu, exec_time: %llu, deadline: %llu,"
384 "job_no: %d", 358 "job_no: %d",
385 server_state->budget, get_exec_time(entry->linked), 359 srt_server->budget, get_exec_time(entry->linked),
386 get_deadline(entry->linked), server_state->job_no); 360 get_deadline(entry->linked), srt_server->job_no);
387 BUG_ON(lt_after(server_state->budget, 361 BUG_ON(lt_after(srt_server->budget,
388 get_exec_cost(entry->linked))); 362 get_exec_cost(entry->linked)));
389 BUG_ON(server_state->job_no <
390 tsk_rt(entry->linked)->job_params.job_no);
391 BUG_ON(lt_after(get_deadline(entry->linked),
392 server_state->deadline));
393 363
394 entry->srt_server.budget = server_state->budget; 364 BUG_ON(srt_server->job_no < task_job_no(entry->linked));
395 entry->srt_server.job_no = server_state->job_no; 365 BUG_ON(lt_after(get_deadline(entry->linked),
396 entry->srt_server.id = entry->linked->pid; 366 srt_server->deadline));
397 367
398 entry->linked_server = (server_t*)&entry->srt_server; 368 entry->linked_server = srt_server;
399 } else if (is_hrt(entry->linked)) { 369 } else if (is_hrt(entry->linked)) {
400 BUG_ON(is_be(entry->linked) || is_srt(entry->linked));
401 /* HRT servers should never, ever migrate */ 370 /* HRT servers should never, ever migrate */
402 BUG_ON(entry->cpu != task_cpu(entry->linked)); 371 BUG_ON(entry->cpu != task_cpu(entry->linked));
403 372 slack_timer_cancel(&entry->hrt_server);
404 entry->linked_server = (server_t*)&entry->hrt_server; 373 entry->linked_server = &entry->hrt_server.server;
405 } else { /* BE */ 374 } else { /* BE */
406 BUG_ON(!be_server); 375 BUG_ON(!be_server);
407 BUG_ON(bheap_node_in_heap(be_server->hn)); 376 BUG_ON(bheap_node_in_heap(be_server->hn));
408 entry->linked_server = (server_t*)be_server; 377 entry->linked_server = be_server;
409 sched_trace_action(entry->linked, 378 sched_trace_action(entry->linked, 200 + be_server->id);
410 200 + get_server_id(be_server));
411
412
413 } 379 }
414 380
415 server_run(entry->linked_server, entry->linked, &server_domain); 381 server_run(entry->linked_server, entry->linked, &server_domain);
@@ -421,49 +387,53 @@ static noinline void link_server(cpu_entry_t *entry,
421 * This must be called BEFORE a task is unlinked. 387 * This must be called BEFORE a task is unlinked.
422 */ 388 */
423static noinline void unlink_server(cpu_entry_t *entry, 389static noinline void unlink_server(cpu_entry_t *entry,
424 be_server_t *next_server) 390 server_t *be_server)
425{ 391{
426 srt_state_t *server_state; 392 lt_t now;
393 int added;
394 hrt_server_t *hrt_server = &entry->hrt_server;
427 395
428 if (entry->linked_server) { 396 if (!entry->linked_server)
429 server_stop(entry->linked_server, &server_domain); 397 return;
430 398
431 /* Save the state of the SRT server inside the task. 399 server_stop(entry->linked_server, &server_domain);
432 * This allows for sharing of a single SRT server per cpu. 400 now = litmus_clock();
433 */ 401
434 if (is_srt(entry->linked)) { 402 if (is_hrt(entry->linked) && !hrt_server->ready) {
435 server_state = tsk_rt(entry->linked)->plugin_data; 403 added = 0;
436 404 if (lt_before(now, entry->linked_server->release)) {
437 TRACE_TASK_SUB(entry->linked, "saving SRT server, job: %d," 405 /* Try to add to release */
438 "budget*: %llu, exec_time: %llu, deadline: %llu", 406 added = add_server_release(entry->linked_server,
439 entry->srt_server.job_no, 407 &server_domain);
440 entry->srt_server.budget,
441 get_exec_time(entry->linked),
442 get_deadline(entry->linked));
443 BUG_ON(lt_after(entry->srt_server.budget,
444 get_exec_cost(entry->linked)));
445 BUG_ON(entry->srt_server.job_no + 1 <
446 tsk_rt(entry->linked)->job_params.job_no);
447 BUG_ON(lt_after(get_deadline(entry->linked),
448 server_state->deadline));
449
450 server_state->budget = entry->srt_server.budget;
451 server_state->job_no = entry->srt_server.job_no;
452 entry->srt_server.id = -1;
453 entry->srt_server.budget = 0;
454 } 408 }
455 409
456 /* Requeue eligible BE servers if they are not 410 /* We missed the release! */
457 * about to run again. 411 if (!added) {
458 */ 412 TRACE_SUB("%d missed and ready", entry->cpu);
459 if (is_be(entry->linked) && 413 hrt_server->ready = 1;
460 (be_server_t*)entry->linked_server != next_server && 414 }
461 get_server_budget(entry->linked_server)) { 415
462 BUG_ON(bheap_node_in_heap(((be_server_t*)entry->linked_server)->hn)); 416 }
463 TRACE_SUB("inserting be server %d", 417
464 get_server_id(entry->linked_server)); 418 /* Requeue eligible BE servers if they are not about to run again */
465 bheap_insert(be_server_order, &be_ready_servers, 419 if (is_be(entry->linked) && entry->linked_server != be_server) {
466 ((be_server_t*)entry->linked_server)->hn); 420 if (lt_before(entry->linked_server->release, now)) {
421 TRACE_SUB("inserting %d", entry->linked_server->id);
422 bheap_insert(server_order, &be_ready_servers,
423 entry->linked_server->hn);
424 } else {
425 /* Try to add to release */
426 added = add_server_release(entry->linked_server,
427 &server_domain);
428
429 /* We missed the release! */
430 if (!added) {
431 TRACE_SUB("%d missed release",
432 entry->linked_server);
433 bheap_insert(server_order,
434 &be_ready_servers,
435 entry->linked_server->hn);
436 }
467 } 437 }
468 } 438 }
469} 439}
@@ -475,10 +445,10 @@ static noinline void unlink_server(cpu_entry_t *entry,
475 */ 445 */
476static noinline void link_task_to_cpu(cpu_entry_t *entry, 446static noinline void link_task_to_cpu(cpu_entry_t *entry,
477 struct task_struct* linked, 447 struct task_struct* linked,
478 be_server_t* be_server) 448 server_t* be_server)
479{ 449{
480 cpu_entry_t *sched; 450 cpu_entry_t *sched;
481 be_server_t *tmp_server; 451 server_t *tmp_server;
482 struct task_struct *tmp_task; 452 struct task_struct *tmp_task;
483 int on_cpu; 453 int on_cpu;
484 454
@@ -516,7 +486,8 @@ static noinline void link_task_to_cpu(cpu_entry_t *entry,
516 sched->cpu); 486 sched->cpu);
517 487
518 tmp_task = sched->linked; 488 tmp_task = sched->linked;
519 tmp_server = (be_server_t*)sched->linked_server; 489 tmp_server = sched->linked_server;
490
520 unlink_server(sched, tmp_server); 491 unlink_server(sched, tmp_server);
521 492
522 linked->rt_param.linked_on = sched->cpu; 493 linked->rt_param.linked_on = sched->cpu;
@@ -524,6 +495,7 @@ static noinline void link_task_to_cpu(cpu_entry_t *entry,
524 link_server(sched, be_server); 495 link_server(sched, be_server);
525 496
526 update_cpu_position(sched); 497 update_cpu_position(sched);
498 BUG_ON(!entry->linked && entry->linked_server);
527 499
528 linked = tmp_task; 500 linked = tmp_task;
529 be_server = tmp_server; 501 be_server = tmp_server;
@@ -537,6 +509,12 @@ static noinline void link_task_to_cpu(cpu_entry_t *entry,
537 link_server(entry, be_server); 509 link_server(entry, be_server);
538 else 510 else
539 entry->linked_server = NULL; 511 entry->linked_server = NULL;
512 BUG_ON(!entry->linked && entry->linked_server);
513 /* Slack timer needs to be recalculated */
514 if (!entry->linked || !is_hrt(entry->linked)) {
515 slack_timer_arm(&entry->hrt_server);
516 }
517
540 518
541 update_cpu_position(entry); 519 update_cpu_position(entry);
542 520
@@ -569,7 +547,7 @@ static noinline void unlink(struct task_struct* t)
569 cpu_entry_t *entry; 547 cpu_entry_t *entry;
570 548
571 if (t->rt_param.linked_on != NO_CPU) { 549 if (t->rt_param.linked_on != NO_CPU) {
572 /* unlink */ 550 /* Unlink */
573 entry = task_linked_entry(t); 551 entry = task_linked_entry(t);
574 link_task_to_cpu(entry, NULL, NULL); 552 link_task_to_cpu(entry, NULL, NULL);
575 } else if (is_queued(t)) { 553 } else if (is_queued(t)) {
@@ -580,7 +558,6 @@ static noinline void unlink(struct task_struct* t)
580 * been preempted but completes before it is 558 * been preempted but completes before it is
581 * descheduled. 559 * descheduled.
582 */ 560 */
583 TRACE_SUB("this");
584 entry = task_sched_entry(t); 561 entry = task_sched_entry(t);
585 remove(get_rt_domain(entry, t), t); 562 remove(get_rt_domain(entry, t), t);
586 } 563 }
@@ -591,12 +568,31 @@ static noinline void unlink(struct task_struct* t)
591 * job's deadline is earlier than the server's next deadline, or the server 568 * job's deadline is earlier than the server's next deadline, or the server
592 * has zero slack time in its current period. 569 * has zero slack time in its current period.
593 */ 570 */
594static inline int is_eligible(struct task_struct *task, hrt_server_t *server) 571static inline int is_eligible(struct task_struct *task,
572 hrt_server_t *hrt_server)
595{ 573{
596 TRACE_SUB("slack: %d, svd: %llu, tdu: %llu", server->no_slack, 574 TRACE_SUB("slack: %d, svd: %llu, tdu: %llu, ready: %d",
597 get_server_deadline(server), get_deadline(task)); 575 hrt_server->no_slack, hrt_server->server.deadline,
598 return server->no_slack || 576 get_deadline(task), hrt_server->ready);
599 lt_after_eq(get_server_deadline(server), get_deadline(task)); 577 return hrt_server->ready &&
578 (hrt_server->no_slack ||
579 lt_after_eq(hrt_server->server.deadline, get_deadline(task)));
580}
581
582/*
583 * Set the server to release at the closest preceding deadline to
584 * the time given.
585 */
586static inline void catchup_server(server_t *server, lt_t time)
587{
588 lt_t diff = time - server->deadline;
589 lt_t sub = diff % server->period;
590
591 BUG_ON(server->running);
592
593 server_release_at(server, time - sub);
594 TRACE_SUB("catching up server %d to %llu",
595 server->id, server->deadline);
600} 596}
601 597
602/* 598/*
@@ -604,27 +600,35 @@ static inline int is_eligible(struct task_struct *task, hrt_server_t *server)
604 * the server is ineligible or there are no eligible jobs, 600 * the server is ineligible or there are no eligible jobs,
605 * returns NULL. 601 * returns NULL.
606 * 602 *
607 * No locks here, because this method accesses only CPU-local state, and is
608 * called from schedule(), which disables interrupts.
609 *
610 * Assumes called with local IRQs disabled. 603 * Assumes called with local IRQs disabled.
611 */ 604 */
612static struct task_struct* next_eligible_hrt(hrt_server_t *server) 605static struct task_struct* next_eligible_hrt(hrt_server_t *hrt_server)
613{ 606{
607 lt_t now = litmus_clock();
614 struct task_struct *task = NULL; 608 struct task_struct *task = NULL;
615 609
610 /* Catch up server if it is late */
611 if (hrt_server->server.deadline && !hrt_server->server.running &&
612 lt_before_eq(hrt_server->server.deadline, now)) {
613 catchup_server(&hrt_server->server, now);
614 slack_timer_arm(hrt_server);
615 TRACE_SUB("P%d now ready", hrt_server->server.id);
616 hrt_server->ready = 1;
617 sched_trace_action(NULL, 2);
618 }
619
616 TRACE_SUB("searching for eligible hrt"); 620 TRACE_SUB("searching for eligible hrt");
617 621
618 if (get_server_budget(server) > 0) { 622 if (hrt_server->server.budget > 0) {
619 task = __peek_ready(&server->hrt_domain); 623 task = __peek_ready(&hrt_server->hrt_domain);
620 if (task && !is_eligible(task, server)) { 624 if (task && !is_eligible(task, hrt_server)) {
621 TRACE_TASK_SUB(task, "wasn't eligible"); 625 TRACE_TASK_SUB(task, "wasn't eligible");
622 task = NULL; 626 task = NULL;
623 } else if (task) { 627 } else if (task) {
624 TRACE_TASK_SUB(task, "was eligible, slack: %d, td: %llu" 628 TRACE_TASK_SUB(task, "was eligible, slack: %d, td: %llu"
625 "sd: %llu", server->no_slack, 629 "sd: %llu", hrt_server->no_slack,
626 TIME(get_deadline(task)), 630 TIME(get_deadline(task)),
627 TIME(get_server_deadline(server))); 631 TIME(hrt_server->server.deadline));
628 } 632 }
629 } 633 }
630 634
@@ -632,6 +636,87 @@ static struct task_struct* next_eligible_hrt(hrt_server_t *server)
632} 636}
633 637
634/* 638/*
639 * The highest priority BE server.
640 */
641static inline server_t* next_be_server(void)
642{
643 struct bheap_node *hn = bheap_peek(server_order, &be_ready_servers);
644 return (hn) ? hn->value : NULL;
645}
646
647/*
648 * Either an srt task or a be server is next. The deadline of the chosen
649 * task / server is put into deadline.
650 * TODO: remove this class business.
651 */
652static task_class_t next_global_task(struct task_struct **task_srt,
653 server_t **server_be,
654 lt_t *deadline)
655{
656 task_class_t rv = RT_CLASS_HARD; /* Represents invalid here */
657 struct task_struct *next_srt;
658 server_t *be_server, *srt_server;
659 lt_t now = litmus_clock();
660
661 *task_srt = NULL;
662 *server_be = NULL;
663 *deadline = 0;
664
665 be_server = next_be_server();
666 next_srt = __peek_ready(&srt_domain);
667
668 /* Catch up any late be servers. This happens when the servers
669 * could not find tasks to schedule or if the system is
670 * overutilized.
671 */
672 while (be_server && lt_before_eq(be_server->deadline, now)) {
673 bheap_delete(server_order, &be_ready_servers,
674 be_server->hn);
675 catchup_server(be_server, now);
676 sched_trace_action(NULL, 100 + be_server->id);
677 bheap_insert(server_order, &be_ready_servers,
678 be_server->hn);
679 be_server = next_be_server();
680 }
681
682 /* Catch up srt server. This happens when the job is tardy due
683 * to overutilization of the system.
684 */
685 if (next_srt) {
686 srt_server = tsk_rt(next_srt)->plugin_data;
687 if (srt_server->deadline < get_deadline(next_srt)) {
688 TRACE_SUB("catching up SRT to %llu",
689 get_release(next_srt));
690 server_release_at(srt_server, get_release(next_srt));
691 srt_server->job_no = task_job_no(next_srt);
692 }
693 }
694
695 TRACE_SUB("be_server: %d, next_srt: %d",
696 (be_server) ? be_server->id : -1,
697 (next_srt) ? next_srt->pid : -1);
698 BUG_ON(next_srt && !is_srt(next_srt));
699
700 if (next_srt && (!be_server ||
701 lt_before(get_deadline(next_srt),
702 be_server->deadline))) {
703 /* SRT is next task */
704 *server_be = NULL;
705 *task_srt = next_srt;
706 *deadline = get_deadline(next_srt);
707 rv = RT_CLASS_SOFT;
708 } else if (be_server) {
709 /* BE is next task */
710 *server_be = be_server;
711 *task_srt = NULL;
712 *deadline = be_server->deadline;
713 rv = RT_CLASS_BEST_EFFORT;
714 }
715
716 return rv;
717}
718
719/*
635 * Adds a task to the appropriate queue (ready / release) in a domain. 720 * Adds a task to the appropriate queue (ready / release) in a domain.
636 */ 721 */
637static noinline void requeue(struct task_struct *task, rt_domain_t *domain) 722static noinline void requeue(struct task_struct *task, rt_domain_t *domain)
@@ -658,7 +743,7 @@ static noinline void requeue(struct task_struct *task, rt_domain_t *domain)
658 * be tasks. For other tasks, the server can be calculated later. 743 * be tasks. For other tasks, the server can be calculated later.
659 */ 744 */
660static void preempt(cpu_entry_t *entry, struct task_struct *next, 745static void preempt(cpu_entry_t *entry, struct task_struct *next,
661 be_server_t *be_server) 746 server_t *be_server)
662{ 747{
663 rt_domain_t *domain; 748 rt_domain_t *domain;
664 749
@@ -671,55 +756,11 @@ static void preempt(cpu_entry_t *entry, struct task_struct *next,
671 preempt_if_preemptable(entry->scheduled, entry->cpu); 756 preempt_if_preemptable(entry->scheduled, entry->cpu);
672} 757}
673 758
674
675static inline be_server_t* next_be_server(void)
676{
677 struct bheap_node *hn = bheap_peek(be_server_order, &be_ready_servers);
678 return (hn) ? hn->value : NULL;
679}
680
681static task_class_t next_global_task(struct task_struct **task_srt,
682 be_server_t **be_server,
683 lt_t *deadline)
684{
685 task_class_t rv = RT_CLASS_HARD; /* Represents invalid here */
686 struct task_struct *next_srt;
687 be_server_t *next_server;
688
689 *task_srt = NULL;
690 *be_server = NULL;
691 *deadline = 0;
692
693 next_server = next_be_server();
694 next_srt = __peek_ready(&srt_domain);
695
696 TRACE_SUB("be_server: %d, next_srt: %d",
697 (next_server) ? get_server_id(next_server) : -1,
698 (next_srt) ? next_srt->pid : -1);
699 BUG_ON(next_srt && !is_srt(next_srt));
700
701 if (next_srt && (!next_server ||
702 lt_before(get_deadline(next_srt),
703 get_server_deadline(next_server)))) {
704 /* SRT is next task */
705 *task_srt = next_srt;
706 *deadline = get_deadline(next_srt);
707 rv = RT_CLASS_SOFT;
708 } else if (next_server) {
709 /* BE is next task */
710 *be_server = next_server;
711 *deadline = get_server_deadline(next_server);
712 rv = RT_CLASS_BEST_EFFORT;
713 }
714
715 return rv;
716}
717
718static void check_for_global_preempt(void) 759static void check_for_global_preempt(void)
719{ 760{
720 task_class_t class; 761 task_class_t class;
721 cpu_entry_t *entry; 762 cpu_entry_t *entry;
722 be_server_t *be_server = NULL; 763 server_t *be_server = NULL;
723 struct task_struct *next_srt, *next_be; 764 struct task_struct *next_srt, *next_be;
724 struct task_struct *next_task = (struct task_struct*)1; /* Not null */ 765 struct task_struct *next_task = (struct task_struct*)1; /* Not null */
725 lt_t deadline; 766 lt_t deadline;
@@ -728,7 +769,7 @@ static void check_for_global_preempt(void)
728 entry = lowest_prio_cpu()) { 769 entry = lowest_prio_cpu()) {
729 next_task = NULL; 770 next_task = NULL;
730 771
731 TRACE_SUB("1"); 772 TRACE_SUB("CPU P%d", entry->cpu);
732 773
733 /* HRT always wins */ 774 /* HRT always wins */
734 if (entry->linked && is_hrt(entry->linked)) 775 if (entry->linked && is_hrt(entry->linked))
@@ -770,35 +811,24 @@ static void check_for_global_preempt(void)
770 TRACE_SUB("7"); 811 TRACE_SUB("7");
771 /* Something is scheduled, compare effective deadlines */ 812 /* Something is scheduled, compare effective deadlines */
772 if ((next_srt || be_server) && 813 if ((next_srt || be_server) &&
773 lt_before(deadline, effective_deadline(entry))) { 814 lt_before(deadline, entry->linked_server->deadline)){
774 TRACE_SUB("8"); 815 TRACE_SUB("8");
775 /* Swap two BE servers here */ 816 /* Swap two BE servers here */
776 if ((class & get_class(entry->linked)) == 817 if ((class & get_class(entry->linked)) ==
777 RT_CLASS_BEST_EFFORT) { 818 RT_CLASS_BEST_EFFORT) {
778 TRACE_SUB("9"); 819 TRACE_SUB("9");
779 bheap_delete(be_server_order, 820 bheap_delete(server_order,
780 &be_ready_servers, 821 &be_ready_servers,
781 be_server->hn); 822 be_server->hn);
782 unlink_server(entry, be_server); 823 unlink_server(entry, be_server);
783 link_server(entry, be_server); 824 link_server(entry, be_server);
825 update_cpu_position(entry);
784 next_task = entry->linked; 826 next_task = entry->linked;
785 goto loop_end; 827 goto loop_end;
786 } 828 }
787 TRACE_SUB("10"); 829 TRACE_SUB("10");
788 goto loop_end_preempt; 830 goto loop_end_preempt;
789 } 831 }
790 TRACE_SUB("11");
791 /* A BE server is scheduled, check for FIFO preemption
792 * within the server itself.
793 */
794 if (is_be(entry->linked) && next_be &&
795 lt_before(get_release(next_be),
796 get_release(entry->linked))) {
797 TRACE_SUB("12");
798 be_server = (be_server_t*)entry->linked_server;
799 class = RT_CLASS_BEST_EFFORT;
800 goto loop_end_preempt;
801 }
802 goto loop_end; 832 goto loop_end;
803 833
804 loop_end_preempt: 834 loop_end_preempt:
@@ -809,8 +839,8 @@ static void check_for_global_preempt(void)
809 next_task = next_be; 839 next_task = next_be;
810 if (bheap_node_in_heap(be_server->hn)) { 840 if (bheap_node_in_heap(be_server->hn)) {
811 TRACE_SUB("deleting be server %d", 841 TRACE_SUB("deleting be server %d",
812 get_server_id(be_server)); 842 be_server->id);
813 bheap_delete(be_server_order, &be_ready_servers, 843 bheap_delete(server_order, &be_ready_servers,
814 be_server->hn); 844 be_server->hn);
815 } 845 }
816 } 846 }
@@ -891,7 +921,7 @@ static void edf_hsb_pick_next(cpu_entry_t *entry)
891 task_class_t class; 921 task_class_t class;
892 lt_t deadline; 922 lt_t deadline;
893 struct task_struct *next_hrt, *next_srt, *next_be; 923 struct task_struct *next_hrt, *next_srt, *next_be;
894 be_server_t *be_server; 924 server_t *be_server;
895 925
896 BUG_ON(entry->linked); 926 BUG_ON(entry->linked);
897 927
@@ -905,16 +935,16 @@ static void edf_hsb_pick_next(cpu_entry_t *entry)
905 class = next_global_task(&next_srt, 935 class = next_global_task(&next_srt,
906 &be_server, 936 &be_server,
907 &deadline); 937 &deadline);
908 if (class == RT_CLASS_SOFT) { 938 if (next_srt) {
909 remove(&srt_domain, next_srt); 939 remove(&srt_domain, next_srt);
910 link_task_to_cpu(entry, next_srt, NULL); 940 link_task_to_cpu(entry, next_srt, NULL);
911 } else if (class == RT_CLASS_BEST_EFFORT) { 941 } else if (be_server) {
912 next_be = __take_ready(&be_domain); 942 next_be = __take_ready(&be_domain);
913 943
914 if (next_be) { 944 if (next_be) {
915 TRACE_SUB("deleting be server %d", 945 TRACE_SUB("deleting be server %d",
916 get_server_id(be_server)); 946 be_server->id);
917 bheap_delete(be_server_order, &be_ready_servers, 947 bheap_delete(server_order, &be_ready_servers,
918 be_server->hn); 948 be_server->hn);
919 link_task_to_cpu(entry, next_be, be_server); 949 link_task_to_cpu(entry, next_be, be_server);
920 } else { 950 } else {
@@ -1009,14 +1039,16 @@ static void release_be_jobs(rt_domain_t *domain, struct bheap *tasks)
1009static enum hrtimer_restart slack_timer_fire(struct hrtimer *timer) 1039static enum hrtimer_restart slack_timer_fire(struct hrtimer *timer)
1010{ 1040{
1011 unsigned long flags; 1041 unsigned long flags;
1012 cpu_entry_t *entry = local_cpu_entry; 1042 hrt_server_t *server = container_of(timer, hrt_server_t, slack_timer);
1013 hrt_server_t *server = &entry->hrt_server; 1043 cpu_entry_t *entry = container_of(server, cpu_entry_t, hrt_server);
1014 1044
1015 if (!is_active_plugin) { 1045 if (!is_active_plugin) {
1016 TRACE("Not active plugin, timer not doing anything\n"); 1046 TRACE("Not active plugin, timer not doing anything\n");
1017 goto out; 1047 goto out;
1018 } 1048 }
1019 1049
1050 BUG_ON(!server->ready);
1051
1020 raw_spin_lock_irqsave(global_lock, flags); 1052 raw_spin_lock_irqsave(global_lock, flags);
1021 1053
1022 TRACE_TIMER("slack timer fired"); 1054 TRACE_TIMER("slack timer fired");
@@ -1033,20 +1065,27 @@ static enum hrtimer_restart slack_timer_fire(struct hrtimer *timer)
1033 1065
1034static void job_completion(cpu_entry_t *entry, struct task_struct* task) 1066static void job_completion(cpu_entry_t *entry, struct task_struct* task)
1035{ 1067{
1036 srt_state_t *server_state = tsk_rt(task)->plugin_data; 1068 server_t *srt_server;
1037 set_rt_flags(task, RT_F_SLEEP); 1069 set_rt_flags(task, RT_F_SLEEP);
1038 1070
1039 TRACE_TASK_SUB(task, "completed"); 1071 TRACE_TASK_SUB(task, "completed");
1040 1072
1041 unlink(task); 1073 unlink(task);
1042 1074
1043 if (is_srt(task) && 1075 if (is_srt(task)) {
1044 (server_state->job_no > tsk_rt(task)->job_params.job_no)) { 1076 srt_server = tsk_rt(task)->plugin_data;
1077
1045 /* If the task is behind the server it must release immediately, 1078 /* If the task is behind the server it must release immediately,
1046 * leaving its release time and deadline unchanged. 1079 * leaving its release time and deadline unchanged.
1047 */ 1080 */
1048 TRACE_TASK_SUB(task, "catching up"); 1081 if (srt_server->job_no > tsk_rt(task)->job_params.job_no) {
1049 tsk_rt(task)->job_params.job_no++; 1082 TRACE_TASK_SUB(task, "catching up");
1083 tsk_rt(task)->job_params.job_no++;
1084 } else {
1085 /* Otherwise release them both */
1086 prepare_for_next_period(task);
1087 server_release(srt_server);
1088 }
1050 } else { 1089 } else {
1051 prepare_for_next_period(task); 1090 prepare_for_next_period(task);
1052 } 1091 }
@@ -1066,21 +1105,16 @@ static void job_completion(cpu_entry_t *entry, struct task_struct* task)
1066 */ 1105 */
1067static void server_completed(server_t *server, struct task_struct *task) 1106static void server_completed(server_t *server, struct task_struct *task)
1068{ 1107{
1108 hrt_server_t *hrt_server;
1069 cpu_entry_t *entry = task_linked_entry(task); 1109 cpu_entry_t *entry = task_linked_entry(task);
1070 srt_state_t *server_state;
1071 1110
1072 TRACE_TASK_TIMER(task, "server %d completed, task exec: %llu", 1111 TRACE_TASK_TIMER(task, "server %d completed, task exec: %llu",
1073 server->id, get_exec_time(task)); 1112 server->id, get_exec_time(task));
1074
1075 BUG_ON(entry->linked != task); 1113 BUG_ON(entry->linked != task);
1076 sched_trace_action(entry->linked, 1); 1114 sched_trace_action(entry->linked, 1);
1077 unlink(task);
1078 1115
1079 if (is_srt(task)) { 1116 if (is_srt(task)) {
1080 TRACE_TASK_SUB(task, "must wait on server"); 1117 TRACE_TASK_SUB(task, "must wait on server");
1081 server_state = tsk_rt(task)->plugin_data;
1082
1083 BUG_ON(server_state->budget);
1084 1118
1085 /* The job must now take the priority and release time 1119 /* The job must now take the priority and release time
1086 * of the next server. We do this so that we can still 1120 * of the next server. We do this so that we can still
@@ -1090,86 +1124,104 @@ static void server_completed(server_t *server, struct task_struct *task)
1090 * easily detect the job catching up later. It will not 1124 * easily detect the job catching up later. It will not
1091 * wake up until its next release. 1125 * wake up until its next release.
1092 */ 1126 */
1093 tsk_rt(task)->job_params.release = 1127 tsk_rt(task)->job_params.release = server->deadline;
1094 server_state->deadline; 1128 tsk_rt(task)->job_params.deadline = server->deadline +
1095 tsk_rt(task)->job_params.deadline = 1129 get_rt_period(task);
1096 server_state->deadline + get_rt_period(task);
1097 } 1130 }
1098 1131
1099 /* Need to pick the next task to run */ 1132 /* Someone else may have brought this server forward.
1133 * In reality, this only happens to HRT servers. In this case,
1134 * we do not want to prepare
1135 */
1136 server_release(server);
1137
1138 if (is_hrt(task)) {
1139 hrt_server = container_of(server, hrt_server_t, server);
1140 TRACE_SUB("P%d no longer ready", entry->cpu);
1141 hrt_server->ready = 0;
1142 }
1143
1144 unlink(task);
1100 requeue(task, get_rt_domain(entry, task)); 1145 requeue(task, get_rt_domain(entry, task));
1146
1147 /* Need to pick the next task to run */
1101 edf_hsb_pick_next(entry); 1148 edf_hsb_pick_next(entry);
1102 preempt_if_preemptable(entry->scheduled, entry->cpu); 1149 preempt_if_preemptable(entry->scheduled, entry->cpu);
1103} 1150}
1104 1151
1105/* 1152static void hrt_server_released(server_t *server)
1106 * Assumes called with local irqs disabled.
1107 */
1108static void hrt_server_released(pserver_t *_server)
1109{ 1153{
1110 cpu_entry_t *entry = local_cpu_entry; 1154 hrt_server_t *hrt_server = container_of(server, hrt_server_t, server);
1111 hrt_server_t *server = &entry->hrt_server; 1155 cpu_entry_t *entry = container_of(hrt_server, cpu_entry_t, hrt_server);
1156
1157 BUG_ON(hrtimer_active(&hrt_server->slack_timer));
1158 sched_trace_action(entry->scheduled, 2);
1159 TRACE_TIMER("hrt server released on P%d", entry->cpu);
1160
1161 hrt_server->no_slack = 0;
1162 hrt_server->ready = 1;
1163
1164 /* Boot off an HRT task which has become uneligible */
1165 if (entry->linked && is_hrt(entry->linked) &&
1166 !is_eligible(entry->linked, hrt_server)) {
1167
1168 requeue(entry->linked, &hrt_server->hrt_domain);
1169 unlink(entry->linked);
1170
1171 server_release(server);
1172 edf_hsb_pick_next(entry);
1173 preempt_if_preemptable(entry->scheduled, entry->cpu);
1174 } else {
1175 /* Otherwise check to see if a different task should * be running.
1176 */
1177 check_for_hrt_preempt(entry);
1178
1179 /* Ensure slack timer is only running if the current
1180 * job is not HRT.
1181 */
1182 if (entry->linked && is_hrt(entry->linked))
1183 slack_timer_cancel(hrt_server);
1184 else
1185 slack_timer_arm(hrt_server);
1186 }
1112 1187
1113 BUG_ON(hrtimer_active(&server->slack_timer));
1114 sched_trace_action(entry->scheduled, 2);
1115 TRACE_TIMER("hrt server released on P%d", entry->cpu);
1116
1117 server->no_slack = 0;
1118 check_for_hrt_preempt(entry);
1119 slack_timer_arm(server);
1120} 1188}
1121 1189
1122/* 1190static void servers_released(struct list_head *servers)
1123 * Assumes called with local irqs disabled.
1124 */
1125static void be_server_released(pserver_t *server)
1126{ 1191{
1127 be_server_t *be_server = container_of((pserver_t*)server, 1192 int was_be = 0;
1128 be_server_t, pserver); 1193 unsigned long flags;
1194 struct list_head *pos;
1195 server_t *server;
1129 1196
1130 sched_trace_action(get_server_scheduled(server), 100 + server->server.id); 1197 raw_spin_lock_irqsave(global_lock, flags);
1131 BUG_ON(bheap_node_in_heap(be_server->hn) &&
1132 get_server_running(be_server));
1133 1198
1134 if (bheap_node_in_heap(be_server->hn)) { 1199 sched_trace_action(NULL, 2);
1135 TRACE_SUB("deleting be server %d",
1136 get_server_id(server));
1137 bheap_delete(be_server_order, &be_ready_servers,
1138 be_server->hn);
1139 }
1140 1200
1141 /* It is very possible for a server that is already running to 1201 list_for_each(pos, servers) {
1142 * release. Do not add if this is the case. 1202 server = list_entry(pos, server_t, release_list);
1143 */
1144 if (!get_server_running(be_server)) {
1145 TRACE_SUB("inserting be server %d",
1146 get_server_id(server));
1147 bheap_insert(be_server_order, &be_ready_servers, be_server->hn);
1148 }
1149 1203
1150 check_for_global_preempt(); 1204 if (server->type == RT_CLASS_BEST_EFFORT) {
1151} 1205 was_be = 1;
1206 BUG_ON(bheap_node_in_heap(server->hn));
1207 TRACE_SUB("inserting be server %d", server->id);
1208 bheap_insert(server_order, &be_ready_servers,
1209 server->hn);
1210 } else { /* HRT server */
1211 hrt_server_released(server);
1212 }
1213 }
1152 1214
1153static void servers_released(struct list_head *servers) 1215 if (was_be)
1154{ 1216 check_for_global_preempt();
1155 1217
1218 raw_spin_unlock_irqrestore(global_lock, flags);
1156} 1219}
1157 1220
1158
1159/****************************************************************************** 1221/******************************************************************************
1160 * Server management methods 1222 * Server management methods
1161 ******************************************************************************/ 1223 ******************************************************************************/
1162 1224
1163/*
1164 * Do all hrt_servers have a non-zero WCET and period? Return 0 if so.
1165 */
1166static int check_hrt_server_initialized(cpu_entry_t* entry)
1167{
1168 hrt_server_t *hrt_server = &entry->hrt_server;
1169 return get_server_wcet(hrt_server) && get_server_period(hrt_server);
1170}
1171
1172
1173static int curr_be = 0; 1225static int curr_be = 0;
1174 1226
1175/* 1227/*
@@ -1180,7 +1232,7 @@ static int admit_be_server(unsigned long long wcet,
1180 int cpu) 1232 int cpu)
1181{ 1233{
1182 int rv = 0; 1234 int rv = 0;
1183 be_server_t *be_server; 1235 server_t *be_server;
1184 1236
1185 /* if (is_active_plugin) { */ 1237 /* if (is_active_plugin) { */
1186 /* rv = -EPERM; */ 1238 /* rv = -EPERM; */
@@ -1191,16 +1243,12 @@ static int admit_be_server(unsigned long long wcet,
1191 goto out; 1243 goto out;
1192 } 1244 }
1193 1245
1194 be_server = kmalloc(sizeof(be_server_t), GFP_ATOMIC); 1246 be_server = kmalloc(sizeof(server_t), GFP_ATOMIC);
1195 be_server->hn = kmalloc(sizeof(struct bheap_node), GFP_ATOMIC); 1247 server_init(be_server, BE_SERVER_BASE + ++curr_be,
1196 1248 wcet, period, 1);
1197 pserver_init(&be_server->pserver, BE_SERVER_BASE + ++curr_be, 1249 be_server->type = RT_CLASS_BEST_EFFORT;
1198 global_lock, wcet, period);
1199 be_server->pserver.post_release = be_server_released;
1200 1250
1201 bheap_node_init(&be_server->hn, be_server); 1251 TRACE("created BE server %d (%llu, %llu)\n", be_server->id,
1202
1203 TRACE("created BE server %d (%llu, %llu)\n", get_server_id(be_server),
1204 wcet, period); 1252 wcet, period);
1205 1253
1206 list_add(&be_server->list, &be_servers); 1254 list_add(&be_server->list, &be_servers);
@@ -1214,12 +1262,11 @@ static int admit_be_server(unsigned long long wcet,
1214static void list_be_servers(struct proc_read_args *args) 1262static void list_be_servers(struct proc_read_args *args)
1215{ 1263{
1216 struct list_head *pos; 1264 struct list_head *pos;
1217 be_server_t *be_server; 1265 server_t *be_server;
1218 1266
1219 list_for_each(pos, &be_servers) { 1267 list_for_each(pos, &be_servers) {
1220 be_server = list_entry(pos, be_server_t, list); 1268 be_server = list_entry(pos, server_t, list);
1221 server_proc_read_single(&be_server->pserver, NO_CPU, 1269 server_proc_read_single(be_server, NO_CPU, args);
1222 args);
1223 } 1270 }
1224} 1271}
1225 1272
@@ -1228,19 +1275,15 @@ static void list_be_servers(struct proc_read_args *args)
1228 */ 1275 */
1229static void stop_be_servers(void) 1276static void stop_be_servers(void)
1230{ 1277{
1231 be_server_t *be_server; 1278 server_t *be_server;
1232 struct list_head *pos, *safe; 1279 struct list_head *pos, *safe;
1233 1280
1234 /* if (is_active_plugin) */
1235 /* return; */
1236
1237 list_for_each_safe(pos, safe, &be_servers) { 1281 list_for_each_safe(pos, safe, &be_servers) {
1238 be_server = list_entry(pos, be_server_t, list); 1282 be_server = list_entry(pos, server_t, list);
1239 pserver_stop_releasing(&be_server->pserver);
1240 1283
1241 list_del(pos); 1284 list_del(pos);
1242 if (bheap_node_in_heap(be_server->hn)) 1285 if (bheap_node_in_heap(be_server->hn))
1243 bheap_delete(be_server_order, &be_ready_servers, 1286 bheap_delete(server_order, &be_ready_servers,
1244 be_server->hn); 1287 be_server->hn);
1245 1288
1246 kfree(be_server->hn); 1289 kfree(be_server->hn);
@@ -1260,18 +1303,13 @@ static int admit_hrt_server(unsigned long long wcet,
1260 hrt_server_t *hrt_server = &entry->hrt_server; 1303 hrt_server_t *hrt_server = &entry->hrt_server;
1261 struct hrtimer *slack_timer = &hrt_server->slack_timer; 1304 struct hrtimer *slack_timer = &hrt_server->slack_timer;
1262 1305
1263 /* if (is_active_plugin) { */
1264 /* rv = -EPERM; */
1265 /* goto out; */
1266 /* } */
1267
1268 TRACE("created HRT server %d (%llu, %llu)\n", cpu, 1306 TRACE("created HRT server %d (%llu, %llu)\n", cpu,
1269 wcet, period); 1307 wcet, period);
1270 1308
1271 hrt_server->no_slack = 0; 1309 hrt_server->no_slack = 0;
1272 1310
1273 pserver_init(&hrt_server->pserver, cpu, global_lock, wcet, period); 1311 server_init(&hrt_server->server, cpu, wcet, period, 1);
1274 hrt_server->pserver.post_release = hrt_server_released; 1312 hrt_server->server.type = RT_CLASS_HARD;
1275 1313
1276 edf_domain_init(&hrt_server->hrt_domain, NULL, 1314 edf_domain_init(&hrt_server->hrt_domain, NULL,
1277 release_hrt_jobs); 1315 release_hrt_jobs);
@@ -1296,7 +1334,7 @@ static void list_hrt_servers(struct proc_read_args *args)
1296 for_each_online_cpu(cpu) { 1334 for_each_online_cpu(cpu) {
1297 entry = &per_cpu(cpu_entries, cpu); 1335 entry = &per_cpu(cpu_entries, cpu);
1298 hrt_server = &entry->hrt_server; 1336 hrt_server = &entry->hrt_server;
1299 server_proc_read_single(&hrt_server->pserver, cpu, args); 1337 server_proc_read_single(&hrt_server->server, cpu, args);
1300 } 1338 }
1301} 1339}
1302 1340
@@ -1309,19 +1347,15 @@ static void stop_hrt_servers(void)
1309 cpu_entry_t *entry; 1347 cpu_entry_t *entry;
1310 hrt_server_t *hrt_server; 1348 hrt_server_t *hrt_server;
1311 1349
1312 /* if (is_active_plugin) */
1313 /* return; */
1314
1315 for_each_online_cpu(cpu) { 1350 for_each_online_cpu(cpu) {
1316 entry = &per_cpu(cpu_entries, cpu); 1351 entry = &per_cpu(cpu_entries, cpu);
1317 hrt_server = &entry->hrt_server; 1352 hrt_server = &entry->hrt_server;
1318 1353
1319 pserver_stop_releasing(&hrt_server->pserver);
1320 slack_timer_cancel(hrt_server); 1354 slack_timer_cancel(hrt_server);
1321 1355
1322 hrt_server->no_slack = 0; 1356 hrt_server->no_slack = 0;
1323 hrt_server->pserver.period = 0; 1357 hrt_server->server.period = 0;
1324 hrt_server->pserver.wcet = 0; 1358 hrt_server->server.wcet = 0;
1325 } 1359 }
1326} 1360}
1327 1361
@@ -1330,36 +1364,47 @@ static void stop_hrt_servers(void)
1330 */ 1364 */
1331static void start_servers(lt_t time) 1365static void start_servers(lt_t time)
1332{ 1366{
1333 int cpu; 1367 int cpu, slack_cpu;
1334 lt_t slack_fire; 1368 lt_t slack_fire;
1335 cpu_entry_t *entry; 1369 cpu_entry_t *entry;
1336 pserver_t *server; 1370 server_t *server;
1337 be_server_t *be_server; 1371 server_t *be_server;
1338 struct list_head *pos; 1372 struct list_head *pos;
1339 1373
1340 if (atomic_read(&servers_running)) 1374 if (atomic_read(&servers_running))
1341 return; 1375 return;
1342 atomic_set(&servers_running, 1); 1376 atomic_set(&servers_running, 1);
1343 1377
1378 TRACE_SUB("staring servers at %llu", time);
1379
1344 /* Start HRT servers */ 1380 /* Start HRT servers */
1345 for_each_online_cpu(cpu) { 1381 for_each_online_cpu(cpu) {
1346 entry = &per_cpu(cpu_entries, cpu); 1382 entry = &per_cpu(cpu_entries, cpu);
1347 server = &entry->hrt_server.pserver; 1383 server = &entry->hrt_server.server;
1348 1384
1349 if (!check_hrt_server_initialized(entry)) 1385 if (!check_hrt_server_initialized(&entry->hrt_server))
1350 goto loop_end; 1386 goto loop_end;
1351 1387
1352 TRACE("Setting up cpu %d to have timer deadline %llu\n", 1388 TRACE("Setting up cpu %d to have timer deadline %llu\n",
1353 cpu, TIME(get_server_deadline(server))); 1389 cpu, TIME(server->deadline));
1390
1391 server_release_at(server, time);
1354 1392
1355 pserver_start_cpu_releasing(server, time, cpu); 1393 TRACE_SUB("P%d now ready", cpu);
1394 entry->hrt_server.ready = 1;
1356 1395
1357 slack_fire = get_server_deadline(server) - 1396 slack_fire = server->deadline - server->budget;
1358 get_server_budget(server);
1359 TRACE_SUB("slack timer %d armed to fire at %llu, %p", 1397 TRACE_SUB("slack timer %d armed to fire at %llu, %p",
1360 cpu, TIME(slack_fire), &entry->hrt_server.slack_timer); 1398 cpu, TIME(slack_fire), &entry->hrt_server.slack_timer);
1361 1399
1362 hrtimer_start_on(cpu, &per_cpu(slack_timer_infos, cpu), 1400#ifdef SLACK_ON_MASTER
1401 if (release_master != NO_CPU)
1402 slack_cpu = release_master;
1403 else
1404#endif
1405 slack_cpu = cpu;
1406
1407 hrtimer_start_on(slack_cpu, &entry->hrt_server.slack_timer_info,
1363 &entry->hrt_server.slack_timer, 1408 &entry->hrt_server.slack_timer,
1364 ns_to_ktime(slack_fire), 1409 ns_to_ktime(slack_fire),
1365 HRTIMER_MODE_ABS_PINNED); 1410 HRTIMER_MODE_ABS_PINNED);
@@ -1369,13 +1414,13 @@ static void start_servers(lt_t time)
1369 1414
1370 /* Start BE servers */ 1415 /* Start BE servers */
1371 list_for_each(pos, &be_servers) { 1416 list_for_each(pos, &be_servers) {
1372 be_server = list_entry(pos, be_server_t, list); 1417 be_server = list_entry(pos, server_t, list);
1373 TRACE("Releasing BE server %d\n", get_server_id(be_server)); 1418 TRACE("Releasing BE server %d\n", be_server->id);
1374 BUG_ON(bheap_node_in_heap(be_server->hn)); 1419 BUG_ON(bheap_node_in_heap(be_server->hn));
1375 bheap_insert(be_server_order, &be_ready_servers, be_server->hn); 1420 bheap_insert(server_order, &be_ready_servers, be_server->hn);
1376 TRACE_SUB("inserting be server %d", 1421 TRACE_SUB("inserting be server %d", be_server->id);
1377 get_server_id(server)); 1422
1378 pserver_start_releasing(&be_server->pserver, time); 1423 server_release_at(be_server, time);
1379 } 1424 }
1380} 1425}
1381 1426
@@ -1398,6 +1443,13 @@ static long edf_hsb_activate_plugin(void)
1398 raw_spin_unlock_irqrestore(global_lock, flags); 1443 raw_spin_unlock_irqrestore(global_lock, flags);
1399#endif 1444#endif
1400 1445
1446#ifdef CONFIG_RELEASE_MASTER
1447 release_master = atomic_read(&release_master_cpu);
1448#else
1449 release_master = NO_CPU;
1450#endif
1451 server_domain.release_master = release_master;
1452
1401 TRACE("activating EDF-HSB plugin.\n"); 1453 TRACE("activating EDF-HSB plugin.\n");
1402 return 0; 1454 return 0;
1403} 1455}
@@ -1410,7 +1462,7 @@ static long edf_hsb_admit_task(struct task_struct *task)
1410 cpu_entry_t *entry = task_sched_entry(task); 1462 cpu_entry_t *entry = task_sched_entry(task);
1411 1463
1412 if (is_hrt(task)) { 1464 if (is_hrt(task)) {
1413 return check_hrt_server_initialized(entry) && 1465 return check_hrt_server_initialized(&entry->hrt_server) &&
1414 ((task_cpu(task) == task->rt_param.task_params.cpu) && 1466 ((task_cpu(task) == task->rt_param.task_params.cpu) &&
1415 (task_cpu(task) == entry->cpu)) ? 0 : -EINVAL; 1467 (task_cpu(task) == entry->cpu)) ? 0 : -EINVAL;
1416 } else { 1468 } else {
@@ -1503,10 +1555,11 @@ static struct task_struct* edf_hsb_schedule(struct task_struct *prev)
1503 BUG_ON(curr && curr != prev); 1555 BUG_ON(curr && curr != prev);
1504 BUG_ON(curr && !is_realtime(prev)); 1556 BUG_ON(curr && !is_realtime(prev));
1505 TRACE("server_budget: %llu, server_deadline: %llu, " 1557 TRACE("server_budget: %llu, server_deadline: %llu, "
1506 "curr_time: %llu, no_slack: %d\n", 1558 "curr_time: %llu, no_slack: %d, ready: %d\n",
1507 TIME(get_server_budget(&entry->hrt_server)), 1559 TIME(entry->hrt_server.server.budget),
1508 TIME(get_server_deadline(&entry->hrt_server)), 1560 TIME(entry->hrt_server.server.deadline),
1509 TIME(litmus_clock()), entry->hrt_server.no_slack); 1561 TIME(litmus_clock()), entry->hrt_server.no_slack,
1562 entry->hrt_server.ready);
1510 1563
1511 /* Determine state */ 1564 /* Determine state */
1512 blocks = curr && !is_running(curr); 1565 blocks = curr && !is_running(curr);
@@ -1561,23 +1614,26 @@ static struct task_struct* edf_hsb_schedule(struct task_struct *prev)
1561static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running) 1614static void edf_hsb_task_new(struct task_struct *task, int on_rq, int running)
1562{ 1615{
1563 unsigned long flags; 1616 unsigned long flags;
1564 srt_state_t *server_state = NULL; 1617 server_t *srt_server = NULL;
1565 cpu_entry_t *entry = task_sched_entry(task); 1618 cpu_entry_t *entry = task_sched_entry(task);
1566 1619
1567 TRACE_TASK(task, "edf_hsb: task new at %llu\n", TIME(litmus_clock())); 1620 TRACE_TASK(task, "edf_hsb: task new at %llu\n", TIME(litmus_clock()));
1568 1621
1569 raw_spin_lock_irqsave(global_lock, flags); 1622 raw_spin_lock_irqsave(global_lock, flags);
1570 1623
1624 /* Setup job parameters */
1625 release_at(task, litmus_clock());
1626
1571 /* Create struct to store SRT server state on suspension */ 1627 /* Create struct to store SRT server state on suspension */
1572 if (is_srt(task)) { 1628 if (is_srt(task)) {
1573 server_state = kmalloc(sizeof(srt_state_t), GFP_ATOMIC); 1629 srt_server = kmalloc(sizeof(server_t), GFP_ATOMIC);
1574 server_state->job_no = tsk_rt(task)->job_params.job_no; 1630 server_init(srt_server, task->pid, get_exec_cost(task),
1575 release_srt_server(task, server_state); 1631 get_rt_period(task), 0);
1632 srt_server->type = RT_CLASS_SOFT;
1633 srt_server->job_no = tsk_rt(task)->job_params.job_no;
1634 server_release_at(srt_server, get_release(task));
1576 } 1635 }
1577 tsk_rt(task)->plugin_data = server_state; 1636 tsk_rt(task)->plugin_data = srt_server;
1578
1579 /* Setup job parameters */
1580 release_at(task, litmus_clock());
1581 1637
1582 /* Already running, update the cpu entry. 1638 /* Already running, update the cpu entry.
1583 * This tends to happen when the first tasks enter the system. 1639 * This tends to happen when the first tasks enter the system.
@@ -1658,7 +1714,6 @@ static struct sched_plugin edf_hsb_plugin __cacheline_aligned_in_smp = {
1658static int __init init_edf_hsb(void) 1714static int __init init_edf_hsb(void)
1659{ 1715{
1660 cpu_entry_t *entry; 1716 cpu_entry_t *entry;
1661 server_t *srt_server;
1662 hrt_server_t *hrt_server; 1717 hrt_server_t *hrt_server;
1663 int rv, cpu; 1718 int rv, cpu;
1664 1719
@@ -1688,23 +1743,16 @@ static int __init init_edf_hsb(void)
1688 edf_domain_init(&srt_domain, NULL, release_srt_jobs); 1743 edf_domain_init(&srt_domain, NULL, release_srt_jobs);
1689 rt_domain_init(&be_domain, be_ready_order, 1744 rt_domain_init(&be_domain, be_ready_order,
1690 NULL, release_be_jobs); 1745 NULL, release_be_jobs);
1746 server_domain_init(&server_domain, servers_released,
1747 server_completed, NO_CPU, global_lock);
1691 1748
1692 /* Global collections */ 1749 /* Global collections */
1693 bheap_init(&cpu_heap); 1750 bheap_init(&cpu_heap);
1694 bheap_init(&be_ready_servers); 1751 bheap_init(&be_ready_servers);
1695 INIT_LIST_HEAD(&be_servers); 1752 INIT_LIST_HEAD(&be_servers);
1696 1753
1697#ifdef CONFIG_RELEASE_MASTER
1698 release_master = atomic_read(&release_master_cpu);
1699#else
1700 release_master = NO_CPU;
1701#endif
1702 server_domain_init(&server_domain, servers_released, server_completed,
1703 release_master, global_lock);
1704
1705 for_each_online_cpu(cpu) { 1754 for_each_online_cpu(cpu) {
1706 entry = &per_cpu(cpu_entries, cpu); 1755 entry = &per_cpu(cpu_entries, cpu);
1707 srt_server = &entry->srt_server;
1708 hrt_server = &entry->hrt_server; 1756 hrt_server = &entry->hrt_server;
1709 1757
1710 entry->cpu = cpu; 1758 entry->cpu = cpu;
@@ -1713,19 +1761,17 @@ static int __init init_edf_hsb(void)
1713 entry->linked_server = NULL; 1761 entry->linked_server = NULL;
1714 1762
1715 /* HRT server */ 1763 /* HRT server */
1716 hrt_server->pserver.deadline = 0; 1764 hrt_server->server.deadline = 0;
1717 hrt_server->pserver.period = 0; 1765 hrt_server->server.period = 0;
1718 hrt_server->pserver.wcet = 0; 1766 hrt_server->server.wcet = 0;
1767 hrt_server->ready = 0;
1719 1768
1720 hrtimer_start_on_info_init(&per_cpu(slack_timer_infos, cpu)); 1769 hrtimer_start_on_info_init(&hrt_server->slack_timer_info);
1721 1770
1722 /* CPU entry bheap nodes */ 1771 /* CPU entry bheap nodes */
1723 entry->hn = &cpu_heap_node[cpu]; 1772 entry->hn = &cpu_heap_node[cpu];
1724 bheap_node_init(&entry->hn, entry); 1773 bheap_node_init(&entry->hn, entry);
1725 bheap_insert(cpu_lower_prio, &cpu_heap, entry->hn); 1774 bheap_insert(cpu_lower_prio, &cpu_heap, entry->hn);
1726
1727 /* SRT server */
1728 server_init(srt_server, -1, global_lock);
1729 } 1775 }
1730 1776
1731 out: 1777 out:
diff --git a/litmus/servers.c b/litmus/servers.c
index a43aad336367..9ee58198b0d6 100644
--- a/litmus/servers.c
+++ b/litmus/servers.c
@@ -1,9 +1,13 @@
1/*
2 * TODO: memory leaks for stopping
3 */
1#include <linux/hrtimer.h> 4#include <linux/hrtimer.h>
2#include <linux/percpu.h> 5#include <linux/percpu.h>
3#include <linux/sched.h> 6#include <linux/sched.h>
4#include <linux/uaccess.h> 7#include <linux/uaccess.h>
5#include <linux/ctype.h> 8#include <linux/ctype.h>
6 9
10#include <litmus/bheap.h>
7#include <litmus/litmus.h> 11#include <litmus/litmus.h>
8#include <litmus/litmus_proc.h> 12#include <litmus/litmus_proc.h>
9#include <litmus/sched_trace.h> 13#include <litmus/sched_trace.h>
@@ -11,6 +15,9 @@
11 15
12#define DEBUG_SERVERS 16#define DEBUG_SERVERS
13 17
18/* Not working */
19/* #define COMPLETION_ON_MASTER */
20
14#define TIME(x) \ 21#define TIME(x) \
15 ({lt_t y = x; \ 22 ({lt_t y = x; \
16 do_div(y, NSEC_PER_MSEC); \ 23 do_div(y, NSEC_PER_MSEC); \
@@ -81,6 +88,7 @@ static inline int timer_cancel(struct hrtimer *timer)
81 88
82static int completion_timer_arm(server_domain_t* domain, int cpu) 89static int completion_timer_arm(server_domain_t* domain, int cpu)
83{ 90{
91 int err = 0, on_cpu;
84 lt_t now = litmus_clock(); 92 lt_t now = litmus_clock();
85 server_t *server = domain->running[cpu]; 93 server_t *server = domain->running[cpu];
86 lt_t budget_exhausted = now + server->budget; 94 lt_t budget_exhausted = now + server->budget;
@@ -95,13 +103,40 @@ static int completion_timer_arm(server_domain_t* domain, int cpu)
95 return 0; 103 return 0;
96 } 104 }
97 105
106 if (domain->completion_timers[cpu].armed) {
107 TRACE_SUB(server, "cannot arm completion, waiting for arm");
108 return 0;
109 }
110
111 /* This happens when a server is run late enough that it would complete
112 * after its released. This will almost certainly cause a reschedule,
113 * so we just force the server to complete here and trust the callback
114 * to get things right.
115 */
116 if (lt_before_eq(server->deadline, budget_exhausted)) {
117 budget_exhausted = server->deadline;
118 }
119
98 TRACE_SUB(server, "start time: %llu", server->start_time); 120 TRACE_SUB(server, "start time: %llu", server->start_time);
99 121
122#ifdef COMPLETION_ON_MASTER
123 if (domain->release_master != NO_CPU)
124 on_cpu = domain->release_master;
125 else
126#endif
127 on_cpu = cpu;
128
100 if (cpu != smp_processor_id()) { 129 if (cpu != smp_processor_id()) {
101 hrtimer_start_on(cpu, &domain->completion_timers[cpu].info, 130 err = hrtimer_start_on(on_cpu,
102 &domain->completion_timers[cpu].timer, 131 &domain->completion_timers[cpu].info,
103 ns_to_ktime(budget_exhausted), 132 &domain->completion_timers[cpu].timer,
104 HRTIMER_MODE_ABS_PINNED); 133 ns_to_ktime(budget_exhausted),
134 HRTIMER_MODE_ABS_PINNED);
135 if (err) {
136 TRACE_SUB(server, "failed to arm completion");
137 } else {
138 TRACE_SUB(server, "success on P%d!", on_cpu);
139 }
105 } else { 140 } else {
106 __hrtimer_start_range_ns(&domain->completion_timers[cpu].timer, 141 __hrtimer_start_range_ns(&domain->completion_timers[cpu].timer,
107 ns_to_ktime(budget_exhausted), 142 ns_to_ktime(budget_exhausted),
@@ -132,9 +167,6 @@ void server_run(server_t *server, struct task_struct *task,
132 server->running = 1; 167 server->running = 1;
133 server->start_time = litmus_clock(); 168 server->start_time = litmus_clock();
134 169
135 /* TODO REMOVE ME */
136 server->domain = domain;
137
138 domain->running[cpu] = server; 170 domain->running[cpu] = server;
139 domain->completion_timers[cpu].armed =completion_timer_arm(domain, cpu); 171 domain->completion_timers[cpu].armed =completion_timer_arm(domain, cpu);
140 172
@@ -147,28 +179,26 @@ static enum hrtimer_restart completion_timer_fire(struct hrtimer *timer)
147 unsigned long flags; 179 unsigned long flags;
148 enum hrtimer_restart rv; 180 enum hrtimer_restart rv;
149 struct task_struct *was_running; 181 struct task_struct *was_running;
150 completion_timer_t *container; 182 completion_timer_t *completion_timer;
151 server_domain_t *domain; 183 server_domain_t *domain;
152 server_t *server; 184 server_t *server;
153 lt_t budget_exhausted; 185 lt_t budget_exhausted;
154 186
155 rv = HRTIMER_NORESTART; 187 rv = HRTIMER_NORESTART;
156 cpu = smp_processor_id();
157 188
158 189 completion_timer = container_of(timer, completion_timer_t, timer);
159 /* Use fancy pointer arithmetic to get the domain */ 190 domain = completion_timer->domain;
160 container = container_of(timer, completion_timer_t, timer); 191 cpu = completion_timer->cpu;
161 domain = container->domain;
162 192
163 raw_spin_lock_irqsave(domain->timer_lock, flags); 193 raw_spin_lock_irqsave(domain->timer_lock, flags);
164 194
165 server = domain->running[cpu]; 195 _TRACE_TIMER("completion timer firing on P%d");
166 TRACE_TIMER(server, "completion timer firing on P%d, remaining budget: %llu",
167 cpu, server->budget);
168 196
169 /* We got the lock before someone tried to re-arm. Proceed. */ 197 /* We got the lock before someone tried to re-arm. Proceed. */
170 if (domain->completion_timers[cpu].armed) { 198 if (completion_timer->armed) {
199 server = domain->running[cpu];
171 TRACE_SUB(server, "completed"); 200 TRACE_SUB(server, "completed");
201
172 was_running = server->scheduled; 202 was_running = server->scheduled;
173 203
174 server->budget = 0; 204 server->budget = 0;
@@ -183,13 +213,17 @@ static enum hrtimer_restart completion_timer_fire(struct hrtimer *timer)
183 /* Someone either beat us to the lock or hooked up a new server 213 /* Someone either beat us to the lock or hooked up a new server
184 * when we called server_completed. Rearm the timer. 214 * when we called server_completed. Rearm the timer.
185 */ 215 */
186 if (domain->running[cpu] && !domain->completion_timers[cpu].armed) { 216 if (domain->running[cpu] && !completion_timer->armed) {
187 TRACE_SUB(server, "rearming on P%d", cpu);
188 server = domain->running[cpu]; 217 server = domain->running[cpu];
218 TRACE_SUB(server, "rearming on P%d", cpu);
189 budget_exhausted = server->start_time + server->budget; 219 budget_exhausted = server->start_time + server->budget;
190 hrtimer_set_expires(timer, ns_to_ktime(budget_exhausted)); 220 hrtimer_set_expires(timer, ns_to_ktime(budget_exhausted));
191 domain->completion_timers[cpu].armed = 1; 221 completion_timer->armed = 1;
192 rv = HRTIMER_RESTART; 222 rv = HRTIMER_RESTART;
223 } else {
224 atomic_set(&completion_timer->info.state,
225 HRTIMER_START_ON_INACTIVE);
226
193 } 227 }
194 228
195 raw_spin_unlock_irqrestore(domain->timer_lock, flags); 229 raw_spin_unlock_irqrestore(domain->timer_lock, flags);
@@ -197,83 +231,48 @@ static enum hrtimer_restart completion_timer_fire(struct hrtimer *timer)
197 return rv; 231 return rv;
198} 232}
199 233
200static enum hrtimer_restart release_timer_fire(struct hrtimer *timer) 234struct kmem_cache *server_release_cache; /* In litmus.c */
201{ 235static enum hrtimer_restart release_servers_fire(struct hrtimer *timer);
202 unsigned long flags;
203 int was_running;
204 pserver_t *server = container_of(timer, pserver_t, release_timer);
205 struct task_struct *was_scheduled;
206
207 if (server->server.timer_lock)
208 raw_spin_lock_irqsave(server->server.timer_lock, flags);
209 else
210 local_irq_save(flags);
211
212 was_scheduled = server->server.scheduled;
213
214 TRACE_TIMER(&server->server,
215 "release timer firing, remaining budget: %llu",
216 get_server_budget(server));
217
218 was_running = server->server.running;
219
220 /* This would have been armed if the budget would exhaust
221 * after the server release.
222 */
223 if (was_running)
224 timer_cancel(&server->server.domain->completion_timers[
225 server->server.scheduled->rt_param.linked_on].timer);
226
227
228 pserver_release(server);
229 server->post_release(server);
230
231 /* Need to arm the budget timer if the server continues running
232 * the same task.
233 */
234 if (was_running && was_scheduled == get_server_scheduled(server))
235 completion_timer_arm(server->server.domain, was_scheduled->rt_param.linked_on );
236
237 hrtimer_set_expires(timer, ns_to_ktime(server->deadline));
238
239 if (server->server.timer_lock)
240 raw_spin_unlock_irqrestore(server->server.timer_lock, flags);
241 else
242 local_irq_restore(flags);
243 236
244 return HRTIMER_RESTART; 237/*
238 * Initialize heap.
239 */
240static server_release_heap_t* release_heap_alloc(int gfp_flags)
241{
242 server_release_heap_t *rh;
243 rh = kmem_cache_alloc(server_release_cache, gfp_flags);
244 if (rh) {
245 hrtimer_init(&rh->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
246 rh->timer.function = release_servers_fire;
247 }
248 return rh;
245} 249}
246 250
247void server_init(server_t *server, int id, raw_spinlock_t *timer_lock) 251void server_init(server_t *server, int id, lt_t wcet, lt_t period, int grouped)
248{ 252{
249 server->id = id; 253 server->id = id;
254 server->wcet = wcet;
255 server->period = period;
250 256
251 server->running = 0; 257 server->deadline = 0;
252 server->budget = 0; 258 server->release = 0;
253 server->job_no = 1; /* NOT SURE */ 259 server->budget = 0;
254 server->start_time = 0; 260 server->start_time = 0;
255 server->scheduled = NULL;
256 server->timer_lock = timer_lock;
257}
258 261
259void pserver_init(pserver_t *server, int id, raw_spinlock_t *timer_lock, 262 server->running = 0;
260 lt_t wcet, lt_t period) 263 server->job_no = 0;
261{ 264 server->scheduled = NULL;
262 server_init(&server->server, id, timer_lock);
263
264 server->wcet = wcet;
265 server->period = period;
266 265
267 server->deadline = 0; 266 server->hn = kmalloc(sizeof(struct bheap_node), GFP_ATOMIC);
268 server->post_release = NULL; 267 bheap_node_init(&server->hn, server);
268 INIT_LIST_HEAD(&server->list);
269 269
270 hrtimer_init(&server->release_timer, 270 if (grouped) {
271 CLOCK_MONOTONIC, 271 server->release_heap = release_heap_alloc(GFP_ATOMIC);
272 HRTIMER_MODE_ABS); 272 INIT_LIST_HEAD(&server->release_list);
273 server->release_timer.function = release_timer_fire; 273 }
274} 274}
275 275
276
277/* 276/*
278 * Handles subtraction of lt_t without underflows. 277 * Handles subtraction of lt_t without underflows.
279 */ 278 */
@@ -297,75 +296,62 @@ void server_stop(server_t *server, server_domain_t *domain)
297 } 296 }
298 297
299 BUG_ON(!server->running); 298 BUG_ON(!server->running);
300 cpu = server->scheduled->rt_param.linked_on;
301
302 TRACE_SUB(server, "stopping server, start: %llu, end: %llu", 299 TRACE_SUB(server, "stopping server, start: %llu, end: %llu",
303 server->start_time, now); 300 server->start_time, now);
304 301
302
303 /* Calculate remaining budget */
305 elapsed_time = lt_subtract(now, server->start_time); 304 elapsed_time = lt_subtract(now, server->start_time);
306 server->budget -= elapsed_time; 305 server->budget -= elapsed_time;
307 306
307 TRACE_SUB(server, "new budget: %llu", TIME(server->budget));
308
309 /* Set domain state */
310 cpu = server->scheduled->rt_param.linked_on;
308 domain->completion_timers[cpu].armed = 0; 311 domain->completion_timers[cpu].armed = 0;
309 domain->running[cpu] = NULL; 312 domain->running[cpu] = NULL;
310 timer_cancel(&domain->completion_timers[cpu].timer); 313 timer_cancel(&domain->completion_timers[cpu].timer);
311 314
312 TRACE_SUB(server, "new budget: %llu", TIME(server->budget)); 315 /* Make server inactive */
313 316 server->running = 0;
314 server->running = 0; 317 server->scheduled = NULL;
315 server->scheduled = NULL;
316 server->start_time = 0; 318 server->start_time = 0;
317} 319}
318 320
319void pserver_start_releasing(pserver_t *server, lt_t time) 321void server_release(server_t *server)
320{ 322{
321 if (hrtimer_active(&server->release_timer)) 323 BUG_ON(!server->deadline);
322 timer_cancel(&server->release_timer);
323 324
324 server->deadline = time; 325 server->budget = server->wcet;
325 pserver_release(server); 326 server->release = server->deadline;
326 327 server->deadline += server->period;
327 __hrtimer_start_range_ns(&server->release_timer, 328 ++server->job_no;
328 ns_to_ktime(server->deadline),
329 0 /* delta */,
330 HRTIMER_MODE_ABS_PINNED,
331 0 /* no wakeup */);
332}
333
334void pserver_start_cpu_releasing(pserver_t *server, lt_t time, int cpu)
335{
336 if (hrtimer_active(&server->release_timer))
337 timer_cancel(&server->release_timer);
338
339 server->deadline = time;
340 pserver_release(server);
341 329
342 hrtimer_start_on_info_init(&per_cpu(server_cpu_infos, cpu)); 330 TRACE_SUB(server, "budget: %llu, release: %llu,"
331 "deadline: %llu, period: %llu, job: %d",
332 server->budget, server->release, server->deadline,
333 server->period, server->job_no);
334 TRACE_SUB(server, "budget: %llu, release: %llu,"
335 "deadline: %llu, period: %llu, job: %d",
336 TIME(server->budget), TIME(server->release), TIME(server->deadline),
337 TIME(server->period), server->job_no);
343 338
344 hrtimer_start_on(cpu, &per_cpu(server_cpu_infos, cpu), 339 /* Need to reset for budget calculations */
345 &server->release_timer, 340 if (server->running)
346 ns_to_ktime(server->deadline), 341 server->start_time = litmus_clock();
347 HRTIMER_MODE_ABS_PINNED);
348} 342}
349 343
350void pserver_stop_releasing(pserver_t *server) 344void server_release_at(server_t *server, lt_t time)
351{ 345{
352 if (hrtimer_active(&server->release_timer)) 346 server->deadline = time;
353 timer_cancel(&server->release_timer); 347 server_release(server);
354
355 BUG_ON(get_server_running(server));
356 348
357 server->deadline = 0; 349 TRACE_SUB(server, "releasing at %llu", time);
358} 350}
359 351
360void server_release(server_t *server, lt_t budget) 352/******************************************************************************
361{ 353 * Proc methods
362 server->budget = budget; 354 ******************************************************************************/
363 ++server->job_no;
364
365 /* Need to reset for budget calculations */
366 if (server->running)
367 server->start_time = litmus_clock();
368}
369 355
370static int server_proc_read(char* page, char **start, off_t off, 356static int server_proc_read(char* page, char **start, off_t off,
371 int count, int *eof, void *data) 357 int count, int *eof, void *data)
@@ -386,25 +372,21 @@ static int server_proc_read(char* page, char **start, off_t off,
386 return length; 372 return length;
387} 373}
388 374
389void server_proc_read_single(pserver_t *server, int cpu, 375void server_proc_read_single(server_t *server, int cpu,
390 struct proc_read_args *args) 376 struct proc_read_args *args)
391{ 377{
392 lt_t wcet, period;
393 wcet = get_server_wcet(server);
394 period = get_server_period(server);
395
396 if (cpu == NO_CPU) { 378 if (cpu == NO_CPU) {
397 args->length += 379 args->length +=
398 snprintf(args->page + args->length, 380 snprintf(args->page + args->length,
399 PAGE_SIZE - args->length, 381 PAGE_SIZE - args->length,
400 "%8llu %8llu\n", 382 "%8llu %8llu\n",
401 TIME(wcet), TIME(period)); 383 server->wcet, server->period);
402 } else { 384 } else {
403 args->length += 385 args->length +=
404 snprintf(args->page + args->length, 386 snprintf(args->page + args->length,
405 PAGE_SIZE - args->length, 387 PAGE_SIZE - args->length,
406 "%8llu %8llu %3d\n", 388 "%8llu %8llu %3d\n",
407 TIME(wcet), TIME(period), cpu); 389 server->wcet, server->period, cpu);
408 } 390 }
409} 391}
410 392
@@ -509,8 +491,7 @@ static int server_proc_write(struct file *file, const char __user *input,
509 ret = server_param_check(wcet, period, cpu); 491 ret = server_param_check(wcet, period, cpu);
510 if (ret) goto loop_end; 492 if (ret) goto loop_end;
511 493
512 ret = methods->admit_server(wcet * NSEC_PER_MSEC, 494 ret = methods->admit_server(wcet, period, cpu);
513 period * NSEC_PER_MSEC, cpu);
514 if (ret) { 495 if (ret) {
515 printk(KERN_WARNING "Litmus plugin rejects server with " 496 printk(KERN_WARNING "Litmus plugin rejects server with "
516 "period: %llu, wcet: %llu, cpu: %d\n", 497 "period: %llu, wcet: %llu, cpu: %d\n",
@@ -559,6 +540,10 @@ void server_proc_exit(struct proc_dir_entry *proc_dir, char *file)
559 remove_proc_entry(file, proc_dir); 540 remove_proc_entry(file, proc_dir);
560} 541}
561 542
543/******************************************************************************
544 * Domain methods
545 ******************************************************************************/
546
562void server_domain_init(server_domain_t *domain, 547void server_domain_init(server_domain_t *domain,
563 servers_released_t servers_released, 548 servers_released_t servers_released,
564 server_completed_t server_completed, 549 server_completed_t server_completed,
@@ -575,7 +560,7 @@ void server_domain_init(server_domain_t *domain,
575 raw_spin_lock_init(&domain->tobe_lock); 560 raw_spin_lock_init(&domain->tobe_lock);
576 561
577 562
578 domain->release_master = NO_CPU; 563 domain->release_master = release_master;
579 domain->timer_lock = timer_lock; 564 domain->timer_lock = timer_lock;
580 domain->server_completed = server_completed; 565 domain->server_completed = server_completed;
581 domain->servers_released = servers_released; 566 domain->servers_released = servers_released;
@@ -587,6 +572,7 @@ void server_domain_init(server_domain_t *domain,
587 for_each_online_cpu(i) { 572 for_each_online_cpu(i) {
588 domain->running[i] = NULL; 573 domain->running[i] = NULL;
589 domain->completion_timers[i].armed = 0; 574 domain->completion_timers[i].armed = 0;
575 domain->completion_timers[i].cpu = i;
590 hrtimer_init(&domain->completion_timers[i].timer, 576 hrtimer_init(&domain->completion_timers[i].timer,
591 CLOCK_MONOTONIC, 577 CLOCK_MONOTONIC,
592 HRTIMER_MODE_ABS); 578 HRTIMER_MODE_ABS);
@@ -603,13 +589,212 @@ void server_domain_exit(server_domain_t *domain)
603 kfree(domain->running); 589 kfree(domain->running);
604} 590}
605 591
606void add_server_release(server_t *server, server_domain_t *server_domain) 592static unsigned int time2slot(lt_t time)
593{
594 return (unsigned int) time2quanta(time, FLOOR) %
595 SERVER_RELEASE_QUEUE_SLOTS;
596}
597
598/*
599 * Send a list of servers to a client callback.
600 */
601static enum hrtimer_restart release_servers_fire(struct hrtimer *timer)
602{
603 unsigned long flags;
604 server_release_heap_t *rh;
605
606 _TRACE_SUB("on_release_timer(0x%p) starts.", timer);
607
608 rh = container_of(timer, server_release_heap_t, timer);
609
610 raw_spin_lock_irqsave(&rh->domain->release_lock, flags);
611 _TRACE_SUB("CB has the release_lock");
612
613 /* Remove from release queue */
614 list_del(&rh->list);
615
616 raw_spin_unlock_irqrestore(&rh->domain->release_lock, flags);
617 _TRACE_SUB("CB returned release_lock");
618
619 /* Call release callback */
620 rh->domain->servers_released(&rh->servers);
621 /* WARNING: rh can be referenced from other CPUs from now on. */
622
623 _TRACE_SUB("on_release_timer(0x%p) ends.", timer);
624
625 return HRTIMER_NORESTART;
626}
627
628/*
629 * Caller must hold release lock.
630 * Will return heap for given time. If no such heap exists prior to
631 * the invocation it will be created.
632 */
633static server_release_heap_t* get_release_heap(server_domain_t *rt,
634 server_t *server,
635 int use_server_heap)
636{
637 struct list_head *pos;
638 server_release_heap_t *heap = NULL;
639 server_release_heap_t *rh;
640 lt_t release_time = server->release;
641 unsigned int slot = time2slot(release_time);
642
643 _TRACE_SUB("searching for release time %llu", release_time);
644
645 /* Initialize pos for the case that the list is empty */
646 pos = rt->release_queue[slot].next;
647 list_for_each(pos, &rt->release_queue[slot]) {
648 rh = list_entry(pos, server_release_heap_t, list);
649 if (release_time == rh->release_time) {
650 /* Perfect match -- this happens on hyperperiod
651 * boundaries
652 */
653 heap = rh;
654 break;
655 } else if (lt_before(release_time, rh->release_time)) {
656 /* We need to insert a new node since rh is
657 * already in the future
658 */
659 break;
660 }
661 }
662 if (!heap && use_server_heap) {
663 /* Use pre-allocated release heap */
664 rh = server->release_heap;
665 rh->domain = rt;
666 rh->release_time = release_time;
667
668 /* Add to release queue */
669 list_add(&rh->list, pos->prev);
670 heap = rh;
671 }
672 return heap;
673}
674
675/*
676 * Prepare a server's release_heap for use.
677 */
678static int reinit_release_heap(server_t *server)
607{ 679{
608 TRACE_SUB(server, "adding to release"); 680 int rv = 0;
681 server_release_heap_t* rh;
682
683 /* Use pre-allocated release heap */
684 rh = server->release_heap;
685
686 /* WARNING: If the CPU still holds the release_lock at this point,
687 * deadlock may occur!
688 */
689 rv = hrtimer_try_to_cancel(&rh->timer);
690
691 /* The timer callback is running, it is useless to add
692 * to the release heap now.
693 */
694 if (rv == -1) {
695 rv = 0;
696 goto out;
697 }
698
699 /* Under no cirumstances should the timer have been active
700 * but not running.
701 */
702 /* TODO: stop living dangerously */
703 //BUG_ON(rv == 1);
704 rv = 1;
705
706 /* initialize */
707 INIT_LIST_HEAD(&rh->servers);
708 atomic_set(&rh->info.state, HRTIMER_START_ON_INACTIVE);
709 out:
710 return rv;
609} 711}
610 712
611void stop_server_releasing(server_domain_t *server_domain) 713/*
714 * Arm the release timer for the next set of servers.
715 */
716static int arm_release_timer(server_domain_t *domain)
612{ 717{
718 int rv = 1;
719 struct list_head list;
720 struct list_head *pos, *safe;
721 server_t *server;
722 server_release_heap_t *rh;
723
724 _TRACE_SUB("arm_release_timer() at %llu", litmus_clock());
725 list_replace_init(&domain->tobe_released, &list);
726
727 list_for_each_safe(pos, safe, &list) {
728 /* Pick server from work list */
729 server = list_entry(pos, server_t, release_list);
730 list_del(pos);
731
732 /* Put into release heap while holding release_lock */
733 raw_spin_lock(&domain->release_lock);
734 TRACE_SUB(server, "I have the release_lock");
735
736 rh = get_release_heap(domain, server, 0);
737 if (!rh) {
738 /* Need to use our own, but drop lock first */
739 raw_spin_unlock(&domain->release_lock);
740 TRACE_SUB(server, "Dropped release_lock");
741
742 rv = reinit_release_heap(server);
743
744 /* Bail! We missed the release time */
745 if (!rv) {
746 TRACE_SUB(server, "missed release");
747 rv = 0;
748 goto out;
749 }
750
751 TRACE_SUB(server, "release_heap ready");
752
753 raw_spin_lock(&domain->release_lock);
754 TRACE_SUB(server, "Re-acquired release_lock");
613 755
756 rh = get_release_heap(domain, server, 1);
757 }
758
759 list_add(&server->release_list, &rh->servers);
760 TRACE_SUB(server, "arm_release_timer(): added to release heap");
761
762 raw_spin_unlock(&domain->release_lock);
763 TRACE_SUB(server, "Returned the release_lock");
764
765 /* To avoid arming the timer multiple times, we only let the
766 * owner do the arming (which is the "first" task to reference
767 * this release_heap anyway).
768 */
769 if (rh == server->release_heap) {
770 TRACE_SUB(server, "arming timer 0x%p at %llu on P%d",
771 &rh->timer, rh->release_time,
772 domain->release_master);
773 /* We cannot arm the timer using hrtimer_start()
774 * as it may deadlock on rq->lock
775 *
776 * PINNED mode is ok on both local and remote CPU
777 */
778 if (domain->release_master == NO_CPU) {
779 __hrtimer_start_range_ns(&rh->timer,
780 ns_to_ktime(rh->release_time),
781 0, HRTIMER_MODE_ABS_PINNED, 0);
782 } else {
783 hrtimer_start_on(domain->release_master,
784 &rh->info, &rh->timer,
785 ns_to_ktime(rh->release_time),
786 HRTIMER_MODE_ABS_PINNED);
787 }
788 } else
789 TRACE_SUB(server, "0x%p is not my timer", &rh->timer);
790 }
791 out:
792 return rv;
614} 793}
615 794
795int add_server_release(server_t *server, server_domain_t *domain)
796{
797 TRACE_SUB(server, "adding to release at %llu", server->release);
798 list_add(&server->release_list, &domain->tobe_released);
799 return arm_release_timer(domain);
800}