aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
Diffstat (limited to 'litmus')
-rw-r--r--litmus/Kconfig8
-rw-r--r--litmus/budget.c2
-rw-r--r--litmus/dgl.c49
-rw-r--r--litmus/domain.c6
-rw-r--r--litmus/rt_domain.c4
-rw-r--r--litmus/sched_mc.c287
6 files changed, 233 insertions, 123 deletions
diff --git a/litmus/Kconfig b/litmus/Kconfig
index 91bf81ea9fae..5e80197c44db 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -81,6 +81,14 @@ config PLUGIN_MC_LEVEL_A_MAX_TASKS
81 help 81 help
82 The maximum number of level A tasks allowed (per-cpu) in level A. 82 The maximum number of level A tasks allowed (per-cpu) in level A.
83 83
84config PLUGIN_MC_LINUX_SLACK_STEALING
85 bool "Run background tasks in slack"
86 depends on PLUGIN
87 default n
88 help
89 Allow non-SCHED_LITMUS tasks to run in slack time generated by tasks
90 of the lowest criticality level.
91
84config PLUGIN_MC_RELEASE_MASTER 92config PLUGIN_MC_RELEASE_MASTER
85 bool "Release-master support for MC" 93 bool "Release-master support for MC"
86 depends on PLUGIN_MC && RELEASE_MASTER 94 depends on PLUGIN_MC && RELEASE_MASTER
diff --git a/litmus/budget.c b/litmus/budget.c
index b1c0a4b84c02..dd5a2b1e45d9 100644
--- a/litmus/budget.c
+++ b/litmus/budget.c
@@ -9,6 +9,8 @@
9 9
10DEFINE_PER_CPU(struct enforcement_timer, budget_timer); 10DEFINE_PER_CPU(struct enforcement_timer, budget_timer);
11 11
12#define TRACE(fmt, args...) STRACE(fmt, ## args)
13
12enum hrtimer_restart on_enforcement_timeout(struct hrtimer *timer) 14enum hrtimer_restart on_enforcement_timeout(struct hrtimer *timer)
13{ 15{
14 struct enforcement_timer* et = container_of(timer, 16 struct enforcement_timer* et = container_of(timer,
diff --git a/litmus/dgl.c b/litmus/dgl.c
index e2286b3e9239..cced7c259735 100644
--- a/litmus/dgl.c
+++ b/litmus/dgl.c
@@ -28,6 +28,11 @@
28#define req_group(r, i) (container_of(((void*)r) - sizeof(*r)*(i), \ 28#define req_group(r, i) (container_of(((void*)r) - sizeof(*r)*(i), \
29 struct dgl_group_req, requests)) 29 struct dgl_group_req, requests))
30 30
31#define TRACE(fmt, args...) STRACE(fmt, ## args)
32#define TRACE_GREQ(greq, fmt, args...) \
33 TRACE("(greq-%s/%d) " fmt, (greq->task ? greq->task->comm : "greq"), \
34 (greq->task ? greq->task->pid : (int)greq), ## args)
35
31/* Resource id -> word, bit */ 36/* Resource id -> word, bit */
32static inline void mask_idx(int resource, int *word, int *bit) 37static inline void mask_idx(int resource, int *word, int *bit)
33{ 38{
@@ -35,7 +40,6 @@ static inline void mask_idx(int resource, int *word, int *bit)
35 *bit = resource % MASK_SIZE; 40 *bit = resource % MASK_SIZE;
36} 41}
37 42
38
39static void print_waiting(struct dgl *dgl, struct dgl_resource *resource) 43static void print_waiting(struct dgl *dgl, struct dgl_resource *resource)
40{ 44{
41 struct dgl_req *pos; 45 struct dgl_req *pos;
@@ -45,12 +49,14 @@ static void print_waiting(struct dgl *dgl, struct dgl_resource *resource)
45 TRACE("List for rid %d\n", resource_id(dgl, resource)); 49 TRACE("List for rid %d\n", resource_id(dgl, resource));
46 list_for_each_entry(pos, &resource->waiting, list) { 50 list_for_each_entry(pos, &resource->waiting, list) {
47 greq = pos->greq; 51 greq = pos->greq;
48 TRACE(" 0x%p with timestamp %llu\n", greq, greq->ts); 52 TRACE_GREQ(greq, "with timestamp %llu\n", greq->ts);
49 BUG_ON(greq->ts < last); 53 BUG_ON(greq->ts < last);
50 last = greq->ts; 54 last = greq->ts;
51 } 55 }
52} 56}
53 57
58static void dummy_acquired(int cpu){}
59
54void dgl_init(struct dgl *dgl, unsigned long num_resources, 60void dgl_init(struct dgl *dgl, unsigned long num_resources,
55 unsigned long num_replicas) 61 unsigned long num_replicas)
56{ 62{
@@ -78,6 +84,7 @@ void dgl_init(struct dgl *dgl, unsigned long num_resources,
78 dgl->requests = 0; 84 dgl->requests = 0;
79 dgl->running = 0; 85 dgl->running = 0;
80 dgl->ts = 0; 86 dgl->ts = 0;
87 dgl->cpu_acquired = dummy_acquired;
81} 88}
82 89
83void dgl_free(struct dgl *dgl) 90void dgl_free(struct dgl *dgl)
@@ -130,7 +137,7 @@ void set_req(struct dgl *dgl, struct dgl_group_req *greq,
130 mask_idx(resource, &word, &bit); 137 mask_idx(resource, &word, &bit);
131 __set_bit(bit, &greq->requested[word]); 138 __set_bit(bit, &greq->requested[word]);
132 139
133 TRACE("0x%p requesting %d of %d\n", greq, replicas, resource); 140 TRACE_GREQ(greq, "requesting %d of %d\n", replicas, resource);
134 141
135 req = &greq->requests[resource]; 142 req = &greq->requests[resource];
136 req->greq = greq; 143 req->greq = greq;
@@ -153,27 +160,27 @@ static unsigned long try_acquire(struct dgl *dgl, struct dgl_resource *resource,
153 rid = resource_id(dgl, resource); 160 rid = resource_id(dgl, resource);
154 greq = req->greq; 161 greq = req->greq;
155 162
156 TRACE("0x%p greq\n", greq); 163 TRACE_GREQ(greq, "try acquire\n");
157 164
158 head = resource->waiting.next == &req->list; 165 head = resource->waiting.next == &req->list;
159 empty = list_empty(&resource->waiting); 166 empty = list_empty(&resource->waiting);
160 room = resource->free_replicas >= req->replicas; 167 room = resource->free_replicas >= req->replicas;
161 168
162 if (! (room && (head || empty)) ) { 169 if (! (room && (head || empty)) ) {
163 TRACE("0x%p cannot acquire %d replicas, %d free\n", 170 TRACE_GREQ(greq, "cannot acquire %d replicas, %d free\n",
164 greq, req->replicas, resource->free_replicas, 171 req->replicas, resource->free_replicas,
165 room, head, empty); 172 room, head, empty);
166 return 0; 173 return 0;
167 } 174 }
168 175
169 resource->free_replicas -= req->replicas; 176 resource->free_replicas -= req->replicas;
170 177
171 TRACE("0x%p acquired %d replicas of rid %d\n", 178 TRACE_GREQ(greq, "0x%p acquired %d replicas of rid %d\n",
172 greq, req->replicas, rid); 179 req->replicas, rid);
173 180
174 mask_idx(rid, &word, &bit); 181 mask_idx(rid, &word, &bit);
175 182
176 TRACE("0x%p, %lu, 0x%p\n", greq->waiting, greq->waiting[word], 183 TRACE_GREQ(greq, "0x%p, %lu, 0x%p\n", greq->waiting, greq->waiting[word],
177 &greq->waiting[word]); 184 &greq->waiting[word]);
178 185
179 clear_bit(bit, &greq->waiting[word]); 186 clear_bit(bit, &greq->waiting[word]);
@@ -186,11 +193,19 @@ static unsigned long try_acquire(struct dgl *dgl, struct dgl_resource *resource,
186 } 193 }
187 194
188 if (!waiting) { 195 if (!waiting) {
189 TRACE("0x%p acquired all resources\n", greq); 196 TRACE_GREQ(greq, "acquired all resources on cpu %d\n", greq->cpu);
190 BUG_ON(dgl->acquired[greq->cpu]); 197 BUG_ON(dgl->acquired[greq->cpu]);
191 dgl->acquired[greq->cpu] = greq; 198 dgl->acquired[greq->cpu] = greq;
192 litmus_reschedule(greq->cpu); 199 litmus_reschedule(greq->cpu);
193 dgl->running++; 200 dgl->running++;
201
202 if (greq->task) {
203 BUG_ON(tsk_rt(greq->task)->linked_on == NO_CPU);
204 set_rt_flags(greq->task, RT_F_RUNNING);
205 sched_trace_task_resume(greq->task);
206 }
207
208 dgl->cpu_acquired(greq->cpu);
194 } 209 }
195 210
196 return 1; 211 return 1;
@@ -208,7 +223,7 @@ void add_group_req(struct dgl *dgl, struct dgl_group_req *greq, int cpu)
208 greq->cpu = cpu; 223 greq->cpu = cpu;
209 greq->ts = dgl->ts++; 224 greq->ts = dgl->ts++;
210 225
211 TRACE("0x%p group request added for CPU %d\n", greq, cpu); 226 TRACE_GREQ(greq, "group request added for CPU %d\n", cpu);
212 BUG_ON(dgl->acquired[cpu] == greq); 227 BUG_ON(dgl->acquired[cpu] == greq);
213 228
214 ++dgl->requests; 229 ++dgl->requests;
@@ -225,14 +240,14 @@ void add_group_req(struct dgl *dgl, struct dgl_group_req *greq, int cpu)
225 all_succ &= succ; 240 all_succ &= succ;
226 241
227 if (!succ) { 242 if (!succ) {
228 TRACE("0x%p waiting on rid %d\n", greq, i); 243 TRACE_GREQ(greq, "waiting on rid %d\n", i);
229 list_add_tail(&req->list, &resource->waiting); 244 list_add_tail(&req->list, &resource->waiting);
230 } 245 }
231 } 246 }
232 247
233 /* Grant empty requests */ 248 /* Grant empty requests */
234 if (all_succ && !dgl->acquired[cpu]) { 249 if (all_succ && !dgl->acquired[cpu]) {
235 TRACE("0x%p empty group request acquired cpu %d\n", greq, cpu); 250 TRACE_GREQ(greq, "empty group request acquired cpu %d\n", cpu);
236 dgl->acquired[cpu] = greq; 251 dgl->acquired[cpu] = greq;
237 ++dgl->running; 252 ++dgl->running;
238 } 253 }
@@ -251,12 +266,12 @@ void remove_group_req(struct dgl *dgl, struct dgl_group_req *greq)
251 struct dgl_req *req, *next; 266 struct dgl_req *req, *next;
252 struct dgl_resource *resource; 267 struct dgl_resource *resource;
253 268
254 TRACE("0x%p removing group request for CPU %d\n", greq, greq->cpu); 269 TRACE_GREQ(greq, "removing group request for CPU %d\n", greq, greq->cpu);
255 270
256 --dgl->requests; 271 --dgl->requests;
257 272
258 if (dgl->acquired[greq->cpu] == greq) { 273 if (dgl->acquired[greq->cpu] == greq) {
259 TRACE("0x%p no longer acquired on CPU %d\n", greq, greq->cpu); 274 TRACE_GREQ(greq, "no longer acquired on CPU %d\n", greq->cpu);
260 dgl->acquired[greq->cpu] = NULL; 275 dgl->acquired[greq->cpu] = NULL;
261 --dgl->running; 276 --dgl->running;
262 } 277 }
@@ -275,8 +290,8 @@ void remove_group_req(struct dgl *dgl, struct dgl_group_req *greq)
275 /* Have resource */ 290 /* Have resource */
276 resource->free_replicas += req->replicas; 291 resource->free_replicas += req->replicas;
277 BUG_ON(resource->free_replicas > dgl->num_replicas); 292 BUG_ON(resource->free_replicas > dgl->num_replicas);
278 TRACE("0x%p releasing %d of %d replicas, rid %d\n", 293 TRACE_GREQ(greq, "releasing %d of %d replicas, rid %d\n",
279 greq, req->replicas, resource->free_replicas, i); 294 req->replicas, resource->free_replicas, i);
280 295
281 if (!list_empty(&resource->waiting)) { 296 if (!list_empty(&resource->waiting)) {
282 /* Give it to the next guy */ 297 /* Give it to the next guy */
diff --git a/litmus/domain.c b/litmus/domain.c
index 54c060d4c8af..0852f30b428e 100644
--- a/litmus/domain.c
+++ b/litmus/domain.c
@@ -3,6 +3,9 @@
3 3
4#include <litmus/domain.h> 4#include <litmus/domain.h>
5 5
6void dummy_acquire(struct task_struct *t){};
7void dummy_release(struct task_struct *t){};
8
6void domain_init(domain_t *dom, 9void domain_init(domain_t *dom,
7 raw_spinlock_t *lock, 10 raw_spinlock_t *lock,
8 requeue_t requeue, 11 requeue_t requeue,
@@ -16,4 +19,7 @@ void domain_init(domain_t *dom,
16 dom->peek_ready = peek_ready; 19 dom->peek_ready = peek_ready;
17 dom->take_ready = take_ready; 20 dom->take_ready = take_ready;
18 dom->higher_prio = priority; 21 dom->higher_prio = priority;
22
23 dom->acquire_resources = dummy_acquire;
24 dom->release_resources = dummy_release;
19} 25}
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c
index b70c94695a58..687dc129bc2a 100644
--- a/litmus/rt_domain.c
+++ b/litmus/rt_domain.c
@@ -22,8 +22,10 @@
22#include <litmus/event_group.h> 22#include <litmus/event_group.h>
23#endif 23#endif
24 24
25#define TRACE(fmt, args...) STRACE(fmt, ## args)
26
25/* Uncomment when debugging timer races... */ 27/* Uncomment when debugging timer races... */
26#if 1 28#if 0
27#define VTRACE_TASK TRACE_TASK 29#define VTRACE_TASK TRACE_TASK
28#define VTRACE TRACE 30#define VTRACE TRACE
29#else 31#else
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 8c2238c6bf43..d0a56fc664c0 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -1,52 +1,8 @@
1/* 1/*
2 * litmus/sched_mc.c 2 * litmus/sched_mc.c
3 *
4 * Implementation of the Mixed Criticality scheduling algorithm. 3 * Implementation of the Mixed Criticality scheduling algorithm.
5 * 4 *
6 * (Per Mollison, Erickson, Anderson, Baruah, Scoredos 2010) 5 * (Per Mollison, Erickson, Anderson, Baruah, Scoredos 2010)
7 *
8 * Absolute first: relative time spent doing different parts of release
9 * and scheduling overhead needs to be measured and graphed.
10 *
11 * Domain locks should be more fine-grained. There is no reason to hold the
12 * ready-queue lock when adding a task to the release-queue.
13 *
14 * The levels should be converted to linked-lists so that they are more
15 * adaptable and need not be identical on all processors.
16 *
17 * The interaction between remove_from_all and other concurrent operations
18 * should be re-examined. If a job_completion and a preemption happen
19 * simultaneously, a task could be requeued, removed, then requeued again.
20 *
21 * Level-C tasks should be able to swap CPUs a-la GSN-EDF. They should also
22 * try and swap with the last CPU they were on. This could be complicated for
23 * ghost tasks.
24 *
25 * Locking for timer-merging could be infinitely more fine-grained. A second
26 * hash could select a lock to use based on queue slot. This approach might
27 * also help with add_release in rt_domains.
28 *
29 * It should be possible to reserve a CPU for ftdumping.
30 *
31 * The real_deadline business seems sloppy.
32 *
33 * The amount of data in the header file should be cut down. The use of the
34 * header file in general needs to be re-examined.
35 *
36 * The plugin needs to be modified so that it doesn't freeze when it is
37 * deactivated in a VM.
38 *
39 * The locking in check_for_preempt is not fine-grained enough.
40 *
41 * The size of the structures could be smaller. Debugging info might be
42 * excessive as things currently stand.
43 *
44 * The macro can_requeue has been expanded too much. Anything beyond
45 * scheduled_on is a hack!
46 *
47 * Domain names (rt_domain) are still clumsy.
48 *
49 * Should BE be moved into the kernel? This will require benchmarking.
50 */ 6 */
51#include <linux/spinlock.h> 7#include <linux/spinlock.h>
52#include <linux/percpu.h> 8#include <linux/percpu.h>
@@ -67,9 +23,9 @@
67#include <litmus/bheap.h> 23#include <litmus/bheap.h>
68#include <litmus/event_group.h> 24#include <litmus/event_group.h>
69#include <litmus/budget.h> 25#include <litmus/budget.h>
70
71#include <litmus/sched_mc.h> 26#include <litmus/sched_mc.h>
72#include <litmus/ce_domain.h> 27#include <litmus/ce_domain.h>
28#include <litmus/dgl.h>
73 29
74/** 30/**
75 * struct cpu_entry - State of a CPU for the entire MC system 31 * struct cpu_entry - State of a CPU for the entire MC system
@@ -84,6 +40,7 @@
84 */ 40 */
85struct cpu_entry { 41struct cpu_entry {
86 int cpu; 42 int cpu;
43 int lock_acquired;
87 struct task_struct* scheduled; 44 struct task_struct* scheduled;
88 struct task_struct* will_schedule; 45 struct task_struct* will_schedule;
89 struct task_struct* linked; 46 struct task_struct* linked;
@@ -98,11 +55,20 @@ struct cpu_entry {
98#endif 55#endif
99}; 56};
100 57
58static struct dgl group_lock;
59static raw_spinlock_t dgl_lock;
60
101DEFINE_PER_CPU(struct cpu_entry, cpus); 61DEFINE_PER_CPU(struct cpu_entry, cpus);
102#ifdef CONFIG_RELEASE_MASTER 62#ifdef CONFIG_RELEASE_MASTER
103static int interrupt_cpu; 63static int interrupt_cpu;
104#endif 64#endif
105 65
66#ifdef CONFIG_NP_SECTION
67#define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c])
68#else
69#define has_resources(t, c) (1)
70#endif
71
106#define domain_data(dom) (container_of(dom, struct domain_data, domain)) 72#define domain_data(dom) (container_of(dom, struct domain_data, domain))
107#define is_global(dom) (domain_data(dom)->heap) 73#define is_global(dom) (domain_data(dom)->heap)
108#define is_global_task(t) (is_global(get_task_domain(t))) 74#define is_global_task(t) (is_global(get_task_domain(t)))
@@ -117,16 +83,50 @@ static int interrupt_cpu;
117#define crit_cpu(ce) \ 83#define crit_cpu(ce) \
118 (container_of((void*)((ce) - (ce)->level), struct cpu_entry, crit_entries)) 84 (container_of((void*)((ce) - (ce)->level), struct cpu_entry, crit_entries))
119#define get_crit_entry_for(cpu, level) (&per_cpu(cpus, cpu).crit_entries[level]) 85#define get_crit_entry_for(cpu, level) (&per_cpu(cpus, cpu).crit_entries[level])
120#define TRACE_ENTRY(e, fmt, args...) \
121 STRACE("P%d, linked=" TS " " fmt, e->cpu, TA(e->linked), ##args)
122#define TRACE_CRIT_ENTRY(ce, fmt, args...) \
123 STRACE("%s P%d, linked=" TS " " fmt, \
124 (ce)->domain->name, crit_cpu(ce)->cpu, TA((ce)->linked), ##args)
125 86
126static int sid(struct crit_entry *ce) 87/*
88 * Put in requests for resources needed by @t. If @t is a server, this will
89 * set @t's np flag to reflect resources held by @t's children.
90 */
91static void acquire_resources(struct task_struct *t)
92{
93 int cpu;
94 struct task_struct *sched;
95
96 /* Can't contend for resources if not logically running */
97 BUG_ON(tsk_rt(t)->linked_on == NO_CPU);
98
99 raw_spin_lock(&dgl_lock);
100 if (is_kernel_np(t)) {
101 TRACE_MC_TASK(t, "Already contending for resources\n");
102 return;
103 }
104
105 cpu = tsk_rt(t)->linked_on;
106
107 if (!has_resources(t, cpu)) {
108 set_rt_flags(t, RT_F_BLOCKED);
109 sched_trace_task_block(t);
110 TRACE_MC_TASK(t, "Blocked at %llu\n", litmus_clock());
111 add_group_req(&group_lock, tsk_rt(t)->req, cpu);
112 make_np(t);
113 }
114
115 raw_spin_unlock(&dgl_lock);
116}
117
118static void release_resources(struct task_struct *t)
127{ 119{
128 int level = ce->level * num_online_cpus() + crit_cpu(ce)->cpu + 1; 120 if (is_kernel_np(t)) {
129 return -level; 121 TRACE_MC_TASK(t, "Releasing resources\n");
122
123 raw_spin_lock(&dgl_lock);
124 remove_group_req(&group_lock, tsk_rt(t)->req);
125 raw_spin_unlock(&dgl_lock);
126 take_np(t);
127 } else {
128 TRACE_MC_TASK(t, "No resources to release!\n");
129 }
130} 130}
131 131
132/* 132/*
@@ -145,13 +145,13 @@ static int cpu_lower_prio(struct bheap_node *a, struct bheap_node *b)
145 145
146 if (first->state == CS_REMOVED || second->state == CS_REMOVED) { 146 if (first->state == CS_REMOVED || second->state == CS_REMOVED) {
147 /* Removed entries go at the back of the heap */ 147 /* Removed entries go at the back of the heap */
148 return first->state != CS_REMOVED && 148 return second->state == CS_REMOVED &&
149 second->state != CS_REMOVED; 149 first->state != CS_REMOVED;
150 } else if (!first_link || !second_link) { 150 } else if (!first_link || !second_link) {
151 /* Entry with nothing scheduled is lowest priority */ 151 /* Entry with nothing scheduled is lowest priority (front) */
152 return second_link && !first_link; 152 return second_link && !first_link;
153 } else { 153 } else {
154 /* Sort by deadlines of tasks */ 154 /* Sort by deadlines of tasks (later deadlines first) */
155 domain = get_task_domain(first_link); 155 domain = get_task_domain(first_link);
156 return domain->higher_prio(second_link, first_link); 156 return domain->higher_prio(second_link, first_link);
157 } 157 }
@@ -168,7 +168,8 @@ static int mc_preempt_needed(struct domain *dom, struct task_struct* curr)
168 return next && !curr; 168 return next && !curr;
169 } else { 169 } else {
170 BUG_ON(tsk_mc_crit(next) != tsk_mc_crit(curr)); 170 BUG_ON(tsk_mc_crit(next) != tsk_mc_crit(curr));
171 return get_task_domain(next)->higher_prio(next, curr); 171 return !is_np(curr) &&
172 get_task_domain(next)->higher_prio(next, curr);
172 } 173 }
173} 174}
174 175
@@ -268,14 +269,11 @@ static void update_ghost_time(struct task_struct *p)
268 BUG_ON(!is_ghost(p)); 269 BUG_ON(!is_ghost(p));
269 if (unlikely ((s64)delta < 0)) { 270 if (unlikely ((s64)delta < 0)) {
270 delta = 0; 271 delta = 0;
271 TRACE_MC_TASK(p, "WARNING: negative time delta\n");
272 } 272 }
273 if (budget_remaining(p) <= delta) { 273 if (budget_remaining(p) <= delta) {
274 TRACE_MC_TASK(p, "Ghost job could have ended\n");
275 tsk_rt(p)->job_params.exec_time = get_exec_cost(p); 274 tsk_rt(p)->job_params.exec_time = get_exec_cost(p);
276 p->se.exec_start = clock; 275 p->se.exec_start = clock;
277 } else { 276 } else {
278 TRACE_MC_TASK(p, "Ghost job updated, but didn't finish\n");
279 tsk_rt(p)->job_params.exec_time += delta; 277 tsk_rt(p)->job_params.exec_time += delta;
280 p->se.exec_start = clock; 278 p->se.exec_start = clock;
281 } 279 }
@@ -298,6 +296,8 @@ static void link_task_to_crit(struct crit_entry *ce,
298 296
299 /* Unlink last task */ 297 /* Unlink last task */
300 if (ce->linked) { 298 if (ce->linked) {
299 ce->domain->release_resources(ce->linked);
300
301 TRACE_MC_TASK(ce->linked, "Unlinking\n"); 301 TRACE_MC_TASK(ce->linked, "Unlinking\n");
302 ce->linked->rt_param.linked_on = NO_CPU; 302 ce->linked->rt_param.linked_on = NO_CPU;
303 if (is_ghost(ce->linked)) { 303 if (is_ghost(ce->linked)) {
@@ -307,7 +307,7 @@ static void link_task_to_crit(struct crit_entry *ce,
307 update_ghost_time(ce->linked); 307 update_ghost_time(ce->linked);
308 } 308 }
309 } 309 }
310 sched_trace_server_switch_away(sid(ce), 0, -ce->linked->pid, 310 sched_trace_server_switch_away(ce_sid(ce), 0, -ce->linked->pid,
311 get_rt_job(ce->linked)); 311 get_rt_job(ce->linked));
312 } 312 }
313 313
@@ -318,14 +318,17 @@ static void link_task_to_crit(struct crit_entry *ce,
318 if (is_ghost(task) && CRIT_LEVEL_A != tsk_mc_crit(task)) { 318 if (is_ghost(task) && CRIT_LEVEL_A != tsk_mc_crit(task)) {
319 /* There is a level-A timer that will force a 319 /* There is a level-A timer that will force a
320 * preemption, so we don't set this for level-A 320 * preemption, so we don't set this for level-A
321 * tasks. Otherwise reset the budget timer. 321 * tasks. Otherwise reset the budget timer
322 */ 322 */
323 task->se.exec_start = litmus_clock(); 323 task->se.exec_start = litmus_clock();
324 when_to_fire = task->se.exec_start + budget_remaining(task); 324 when_to_fire = task->se.exec_start + budget_remaining(task);
325 arm_ghost(ce, when_to_fire); 325 arm_ghost(ce, when_to_fire);
326 } 326 }
327 sched_trace_server_switch_to(sid(ce), 0, -task->pid, 327 sched_trace_server_switch_to(ce_sid(ce), 0, -task->pid,
328 get_rt_job(ce->linked)); 328 get_rt_job(ce->linked));
329
330 if (!is_ghost(task))
331 ce->domain->acquire_resources(task);
329 } 332 }
330} 333}
331 334
@@ -448,12 +451,17 @@ static void link_task_to_cpu(struct cpu_entry *entry, struct task_struct *task)
448 /* Higher criticality crit entries are now usable */ 451 /* Higher criticality crit entries are now usable */
449 for (; i < entry_level(entry) + 1; i++) { 452 for (; i < entry_level(entry) + 1; i++) {
450 ce = &entry->crit_entries[i]; 453 ce = &entry->crit_entries[i];
451 if (!can_use(ce)) { 454 if (!can_use(ce))
452 ce->state = CS_ACTIVATE; 455 ce->state = CS_ACTIVATE;
453 }
454 } 456 }
455} 457}
456 458
459static void preempt_cpu(struct cpu_entry *entry, struct task_struct *t)
460{
461 link_task_to_cpu(entry, t);
462 litmus_reschedule(entry->cpu);
463}
464
457/** 465/**
458 * preempt() - Preempt a logically running task with a higher priority one. 466 * preempt() - Preempt a logically running task with a higher priority one.
459 * @dom Domain from which to draw higher priority task 467 * @dom Domain from which to draw higher priority task
@@ -461,7 +469,7 @@ static void link_task_to_cpu(struct cpu_entry *entry, struct task_struct *task)
461 * 469 *
462 * Caller must hold the lock for @dom and @ce's CPU lock. 470 * Caller must hold the lock for @dom and @ce's CPU lock.
463 */ 471 */
464static void preempt(struct domain *dom, struct crit_entry *ce) 472static void preempt_crit(struct domain *dom, struct crit_entry *ce)
465{ 473{
466 struct task_struct *task = dom->take_ready(dom); 474 struct task_struct *task = dom->take_ready(dom);
467 struct cpu_entry *entry = crit_cpu(ce); 475 struct cpu_entry *entry = crit_cpu(ce);
@@ -483,14 +491,12 @@ static void preempt(struct domain *dom, struct crit_entry *ce)
483 * been disabled and a preemption could not have occurred 491 * been disabled and a preemption could not have occurred
484 */ 492 */
485 if (!is_ghost(task)) { 493 if (!is_ghost(task)) {
486 link_task_to_cpu(entry, task); 494 preempt_cpu(entry, task);
487 preempt_if_preemptable(entry->scheduled, entry->cpu);
488 } else if (old && old == entry->linked) { 495 } else if (old && old == entry->linked) {
489 /* Preempted a running task with a ghost job. Null needs to be 496 /* Preempted a running task with a ghost job. Null needs to be
490 * running. 497 * running.
491 */ 498 */
492 link_task_to_cpu(entry, NULL); 499 preempt_cpu(entry, NULL);
493 preempt_if_preemptable(entry->scheduled, entry->cpu);
494 } 500 }
495} 501}
496 502
@@ -525,6 +531,7 @@ static void update_crit_levels(struct cpu_entry *entry)
525 readmit[i] = (!global_preempted) ? ce->linked : NULL; 531 readmit[i] = (!global_preempted) ? ce->linked : NULL;
526 532
527 ce->state = CS_REMOVE; 533 ce->state = CS_REMOVE;
534 TRACE_CRIT_ENTRY(ce, "(CS_REMOVE)\n");
528 if (ce->linked) 535 if (ce->linked)
529 link_task_to_crit(ce, NULL); 536 link_task_to_crit(ce, NULL);
530 } 537 }
@@ -548,13 +555,14 @@ static void update_crit_levels(struct cpu_entry *entry)
548 */ 555 */
549static void check_for_preempt(struct domain *dom) 556static void check_for_preempt(struct domain *dom)
550{ 557{
551 int recheck = 1; 558 int recheck = 1, higher_prio, was_ghost;
552 struct cpu_entry *entry; 559 struct cpu_entry *entry;
553 struct crit_entry *ce; 560 struct crit_entry *ce;
554 561
562
555 if (is_global(dom)) { 563 if (is_global(dom)) {
556 /* Loop until we find a non-preemptable CPU */ 564 /* Loop until we find a non-preemptable CPU */
557 while ((ce = lowest_prio_cpu(dom)) && recheck) { 565 while (recheck &&(ce = lowest_prio_cpu(dom))) {
558 entry = crit_cpu(ce); 566 entry = crit_cpu(ce);
559 recheck = 1; 567 recheck = 1;
560 568
@@ -567,12 +575,10 @@ static void check_for_preempt(struct domain *dom)
567 fix_crit_position(ce); 575 fix_crit_position(ce);
568 else if (mc_preempt_needed(dom, ce->linked)) 576 else if (mc_preempt_needed(dom, ce->linked))
569 /* Success! Check for more preemptions */ 577 /* Success! Check for more preemptions */
570 preempt(dom, ce); 578 preempt_crit(dom, ce);
571 else { 579 else
572 /* Failure! */ 580 /* Failure! */
573 recheck = 0; 581 recheck = 0;
574 TRACE_CRIT_ENTRY(ce, "Stopped global check\n");
575 }
576 raw_spin_unlock(&entry->lock); 582 raw_spin_unlock(&entry->lock);
577 } 583 }
578 } else /* Partitioned */ { 584 } else /* Partitioned */ {
@@ -583,12 +589,23 @@ static void check_for_preempt(struct domain *dom)
583 dom->peek_ready(dom); 589 dom->peek_ready(dom);
584 590
585 raw_spin_lock(&entry->lock); 591 raw_spin_lock(&entry->lock);
586 if (can_use(ce) && mc_preempt_needed(dom, ce->linked)) { 592
587 preempt(dom, ce); 593 if (can_use(ce)) {
588 update_crit_levels(entry); 594 was_ghost = ce->linked && !is_ghost(ce->linked) &&
589 } else { 595 ce->linked != entry->linked;
590 raw_spin_unlock(&entry->lock); 596 higher_prio = mc_preempt_needed(dom, ce->linked);
597
598 if (was_ghost)
599 preempt_cpu(entry, ce->linked);
600 else if (higher_prio)
601 preempt_crit(dom, ce);
602
603 if (was_ghost || higher_prio) {
604 update_crit_levels(entry);
605 return;
606 }
591 } 607 }
608 raw_spin_unlock(&entry->lock);
592 } 609 }
593} 610}
594 611
@@ -648,6 +665,8 @@ static void remove_from_all(struct task_struct* task)
648 */ 665 */
649static void job_completion(struct task_struct *task, int forced) 666static void job_completion(struct task_struct *task, int forced)
650{ 667{
668 lt_t now;
669 int release_server;
651 TRACE_MC_TASK(task, "Completed\n"); 670 TRACE_MC_TASK(task, "Completed\n");
652 671
653 /* Logically stop the task execution */ 672 /* Logically stop the task execution */
@@ -655,20 +674,34 @@ static void job_completion(struct task_struct *task, int forced)
655 remove_from_all(task); 674 remove_from_all(task);
656 675
657 if (!forced) { 676 if (!forced) {
658 /* Userspace releases */ 677 /* Userspace signaled job completion */
659 sched_trace_task_completion(current, 0); 678 sched_trace_task_completion(current, 0);
660 setup_user_release(current, get_user_deadline(current)); 679 setup_user_release(current, get_user_deadline(current));
680
661 } 681 }
662 682
683 release_server = budget_exhausted(task);
684#ifndef CONFIG_PLUGIN_MC_LINUX_SLACK_STEALING
685 /* Release lowest-criticality task's servers with their userspace tasks,
686 * preventing them from getting behind userspace and forcing idleness
687 */
688 if (tsk_mc_crit(task) == NUM_CRIT_LEVELS - 1)
689 release_server = 1;
690#endif
663 691
664 /* If server has run out of budget, wait until next release 692 /* If server has run out of budget, wait until next release
665 * TODO: Level A does this independently and should not. 693 * TODO: Level A does this independently and should not.
666 */ 694 */
667 if (budget_exhausted(task) && CRIT_LEVEL_A != tsk_mc_crit(task)) { 695 if (release_server && CRIT_LEVEL_A != tsk_mc_crit(task)) {
668 sched_trace_server_completion(-task->pid, get_rt_job(task)); 696 sched_trace_server_completion(-task->pid, get_rt_job(task));
669 prepare_for_next_period(task); 697 prepare_for_next_period(task);
670 } 698 }
671 699
700 now = litmus_clock();
701 if (lt_before(get_user_release(task), now) || forced) {
702 set_rt_flags(task, RT_F_RUNNING);
703 }
704
672 /* Requeue non-blocking tasks */ 705 /* Requeue non-blocking tasks */
673 if (is_running(task)) 706 if (is_running(task))
674 job_arrival(task); 707 job_arrival(task);
@@ -691,7 +724,6 @@ static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer)
691 struct task_struct *tmp = NULL; 724 struct task_struct *tmp = NULL;
692 725
693 local_irq_save(flags); 726 local_irq_save(flags);
694 TRACE("Ghost exhausted\n");
695 TRACE_CRIT_ENTRY(ce, "Firing here at %llu\n", litmus_clock()); 727 TRACE_CRIT_ENTRY(ce, "Firing here at %llu\n", litmus_clock());
696 728
697 /* Due to race conditions, we cannot just set the linked 729 /* Due to race conditions, we cannot just set the linked
@@ -796,7 +828,6 @@ static enum hrtimer_restart ce_timer_function(struct hrtimer *timer)
796} 828}
797#endif /* CONFIG_MERGE_TIMERS */ 829#endif /* CONFIG_MERGE_TIMERS */
798 830
799
800/** 831/**
801 * mc_release_jobs() - Add heap of tasks to the system, check for preemptions. 832 * mc_release_jobs() - Add heap of tasks to the system, check for preemptions.
802 */ 833 */
@@ -820,8 +851,12 @@ static void mc_release_jobs(rt_domain_t* rt, struct bheap* tasks)
820static void mc_task_new(struct task_struct *t, int on_rq, int running) 851static void mc_task_new(struct task_struct *t, int on_rq, int running)
821{ 852{
822 unsigned long flags; 853 unsigned long flags;
854 int i;
823 struct cpu_entry* entry; 855 struct cpu_entry* entry;
824 enum crit_level level = tsk_mc_crit(t); 856 enum crit_level level = tsk_mc_crit(t);
857 struct dgl_group_req *req;
858 struct control_page *cp = tsk_rt(t)->ctrl_page;
859 struct color_ctrl_page *ccp = tsk_rt(t)->color_ctrl_page;
825 860
826 local_irq_save(flags); 861 local_irq_save(flags);
827 TRACE("New mixed criticality task %d\n", t->pid); 862 TRACE("New mixed criticality task %d\n", t->pid);
@@ -836,6 +871,18 @@ static void mc_task_new(struct task_struct *t, int on_rq, int running)
836 entry = &per_cpu(cpus, task_cpu(t)); 871 entry = &per_cpu(cpus, task_cpu(t));
837 t->rt_param._domain = entry->crit_entries[level].domain; 872 t->rt_param._domain = entry->crit_entries[level].domain;
838 873
874 /* Setup color request */
875 req = kmalloc(sizeof(*req), GFP_ATOMIC);
876 req->task = t;
877 tsk_rt(t)->req = req;
878 if (cp && ccp) {
879 TRACE_MC_TASK(t, "Initializing group request\n");
880 cp->colors_updated = 0;
881 dgl_group_req_init(&group_lock, req);
882 for (i = 0; ccp->pages[i]; ++i)
883 set_req(&group_lock, req, ccp->colors[i], ccp->pages[i]);
884 }
885
839 /* Userspace and kernelspace view of task state may differ. 886 /* Userspace and kernelspace view of task state may differ.
840 * Model kernel state as an additional container 887 * Model kernel state as an additional container
841 */ 888 */
@@ -1019,6 +1066,16 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
1019 job_arrival(entry->scheduled); 1066 job_arrival(entry->scheduled);
1020 } 1067 }
1021 1068
1069 /* Signal sent by lock acquisition */
1070 if (entry->lock_acquired < NUM_CRIT_LEVELS) {
1071 STRACE("Lock acquired for %d\n", entry->lock_acquired);
1072 dom = entry->crit_entries[entry->lock_acquired].domain;
1073 raw_spin_lock(dom->lock);
1074 check_for_preempt(dom);
1075 raw_spin_unlock(dom->lock);
1076 entry->lock_acquired = NUM_CRIT_LEVELS;
1077 }
1078
1022 /* Pick next task if none is linked */ 1079 /* Pick next task if none is linked */
1023 raw_spin_lock(&entry->lock); 1080 raw_spin_lock(&entry->lock);
1024 for (i = 0; i < NUM_CRIT_LEVELS && !entry->linked; i++) { 1081 for (i = 0; i < NUM_CRIT_LEVELS && !entry->linked; i++) {
@@ -1037,23 +1094,26 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
1037 1094
1038 raw_spin_lock(&entry->lock); 1095 raw_spin_lock(&entry->lock);
1039 1096
1040 if (!entry->linked && !ce->linked && dtask && can_use(ce)) { 1097 ready_task = NULL;
1041 /* Pop dtask */ 1098 if (!entry->linked && can_use(ce)) {
1042 dom->take_ready(dom); 1099 if (ce->linked) {
1043 1100 ready_task = ce->linked;
1044 link_task_to_crit(ce, dtask); 1101 } else if (dtask) {
1045 update_crit_position(ce); 1102 /* Need a new task */
1046 1103 dom->take_ready(dom);
1047 /* Actual running task found */ 1104 ready_task = dtask;
1048 ready_task = (is_ghost(dtask)) ? NULL : dtask; 1105
1049 if (ready_task) { 1106 link_task_to_crit(ce, dtask);
1050 link_task_to_cpu(entry, ready_task); 1107 update_crit_position(ce);
1051 raw_spin_unlock(dom->lock);
1052 update_crit_levels(entry);
1053 raw_spin_lock(&entry->lock);
1054 continue;
1055 } 1108 }
1056 } 1109 }
1110 if (ready_task && !is_ghost(ready_task)) {
1111 link_task_to_cpu(entry, ready_task);
1112 raw_spin_unlock(dom->lock);
1113 update_crit_levels(entry);
1114 raw_spin_lock(&entry->lock);
1115 continue;
1116 }
1057 raw_spin_unlock(dom->lock); 1117 raw_spin_unlock(dom->lock);
1058 } 1118 }
1059 1119
@@ -1070,7 +1130,8 @@ static struct task_struct* mc_schedule(struct task_struct* prev)
1070 BUG_ON(!get_rt_job(next)); 1130 BUG_ON(!get_rt_job(next));
1071 TRACE_MC_TASK(next, "Picked this task\n"); 1131 TRACE_MC_TASK(next, "Picked this task\n");
1072 } else if (exists && !next) 1132 } else if (exists && !next)
1073 TRACE_ENTRY(entry, "Becomes idle at %llu\n", litmus_clock()); 1133 STRACE("CPU %d becomes idle at %llu\n",
1134 entry->cpu, litmus_clock());
1074 return next; 1135 return next;
1075} 1136}
1076 1137
@@ -1173,7 +1234,7 @@ static void mc_release_ts(lt_t time)
1173 entry = &per_cpu(cpus, cpu); 1234 entry = &per_cpu(cpus, cpu);
1174 sched_trace_container_param(++cont_id, (const char*)&name); 1235 sched_trace_container_param(++cont_id, (const char*)&name);
1175 ce = &entry->crit_entries[level]; 1236 ce = &entry->crit_entries[level];
1176 sched_trace_server_param(sid(ce), cont_id, 0, 0); 1237 sched_trace_server_param(ce_sid(ce), cont_id, 0, 0);
1177 } 1238 }
1178 1239
1179 level = CRIT_LEVEL_B; 1240 level = CRIT_LEVEL_B;
@@ -1182,7 +1243,7 @@ static void mc_release_ts(lt_t time)
1182 entry = &per_cpu(cpus, cpu); 1243 entry = &per_cpu(cpus, cpu);
1183 sched_trace_container_param(++cont_id, (const char*)&name); 1244 sched_trace_container_param(++cont_id, (const char*)&name);
1184 ce = &entry->crit_entries[level]; 1245 ce = &entry->crit_entries[level];
1185 sched_trace_server_param(sid(ce), cont_id, 0, 0); 1246 sched_trace_server_param(ce_sid(ce), cont_id, 0, 0);
1186 } 1247 }
1187 1248
1188 level = CRIT_LEVEL_C; 1249 level = CRIT_LEVEL_C;
@@ -1191,7 +1252,7 @@ static void mc_release_ts(lt_t time)
1191 for_each_online_cpu(cpu) { 1252 for_each_online_cpu(cpu) {
1192 entry = &per_cpu(cpus, cpu); 1253 entry = &per_cpu(cpus, cpu);
1193 ce = &entry->crit_entries[level]; 1254 ce = &entry->crit_entries[level];
1194 sched_trace_server_param(sid(ce), cont_id, 0, 0); 1255 sched_trace_server_param(ce_sid(ce), cont_id, 0, 0);
1195 } 1256 }
1196 1257
1197 mc_ce_release_at_common(NULL, time); 1258 mc_ce_release_at_common(NULL, time);
@@ -1291,6 +1352,14 @@ static inline void init_edf_domain(struct domain *dom, rt_domain_t *rt,
1291#endif 1352#endif
1292} 1353}
1293 1354
1355static void cpu_acquired(int cpu)
1356{
1357 struct cpu_entry *entry = &per_cpu(cpus, cpu);
1358 STRACE("Lock acquired for cpu %d\n", cpu);
1359 entry->lock_acquired = CRIT_LEVEL_B;
1360 litmus_reschedule(entry->cpu);
1361}
1362
1294struct domain_data *ce_domain_for(int); 1363struct domain_data *ce_domain_for(int);
1295static int __init init_mc(void) 1364static int __init init_mc(void)
1296{ 1365{
@@ -1308,6 +1377,7 @@ static int __init init_mc(void)
1308 entry->cpu = cpu; 1377 entry->cpu = cpu;
1309 entry->scheduled = NULL; 1378 entry->scheduled = NULL;
1310 entry->linked = NULL; 1379 entry->linked = NULL;
1380 entry->lock_acquired = NUM_CRIT_LEVELS;
1311 1381
1312 raw_spin_lock_init(&entry->lock); 1382 raw_spin_lock_init(&entry->lock);
1313 1383
@@ -1333,6 +1403,8 @@ static int __init init_mc(void)
1333 rt = &per_cpu(_mc_crit_b_rt, cpu); 1403 rt = &per_cpu(_mc_crit_b_rt, cpu);
1334 init_local_domain(entry, dom_data, CRIT_LEVEL_B); 1404 init_local_domain(entry, dom_data, CRIT_LEVEL_B);
1335 init_edf_domain(&dom_data->domain, rt, CRIT_LEVEL_B, 1, cpu); 1405 init_edf_domain(&dom_data->domain, rt, CRIT_LEVEL_B, 1, cpu);
1406 dom_data->domain.acquire_resources = acquire_resources;
1407 dom_data->domain.release_resources = release_resources;
1336 b_dom_lock = dom_data->domain.lock; 1408 b_dom_lock = dom_data->domain.lock;
1337 raw_spin_lock_init(b_dom_lock); 1409 raw_spin_lock_init(b_dom_lock);
1338 dom_data->domain.name = "LVL-B"; 1410 dom_data->domain.name = "LVL-B";
@@ -1347,6 +1419,11 @@ static int __init init_mc(void)
1347 raw_spin_lock_init(c_dom_lock); 1419 raw_spin_lock_init(c_dom_lock);
1348 _mc_crit_c.domain.name = "LVL-C"; 1420 _mc_crit_c.domain.name = "LVL-C";
1349 1421
1422 dgl_init(&group_lock, color_cache_info.nr_colors,
1423 color_cache_info.ways);
1424 group_lock.cpu_acquired = cpu_acquired;
1425 raw_spin_lock_init(&dgl_lock);
1426
1350 return register_sched_plugin(&mc_plugin); 1427 return register_sched_plugin(&mc_plugin);
1351} 1428}
1352 1429