diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2012-10-12 01:59:42 -0400 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2012-10-12 01:59:42 -0400 |
commit | 12826089e20a61cc5afe2bf1108f561952ec7f9a (patch) | |
tree | f014e6a01733a188bb6a9831717d1e8f8423d92e /litmus | |
parent | bc4e46db66fc4470806f570d02982eedae140c4c (diff) |
Added preemptive locking.
Currently is always preempting. This needs to be configurable via proc.
Diffstat (limited to 'litmus')
-rw-r--r-- | litmus/color_dev.c | 2 | ||||
-rw-r--r-- | litmus/dgl.c | 347 | ||||
-rw-r--r-- | litmus/sched_color.c | 3 | ||||
-rw-r--r-- | litmus/sched_mc.c | 416 |
4 files changed, 550 insertions, 218 deletions
diff --git a/litmus/color_dev.c b/litmus/color_dev.c index 6287d14405e8..df43c9f1e37d 100644 --- a/litmus/color_dev.c +++ b/litmus/color_dev.c | |||
@@ -7,7 +7,7 @@ | |||
7 | #include <linux/highmem.h> | 7 | #include <linux/highmem.h> |
8 | #include <asm/io.h> /* page_to_phys on SPARC */ | 8 | #include <asm/io.h> /* page_to_phys on SPARC */ |
9 | 9 | ||
10 | #include <asm/glue-cache.h> | 10 | /* #include <asm/glue-cache.h> */ |
11 | 11 | ||
12 | #include <litmus/litmus.h> | 12 | #include <litmus/litmus.h> |
13 | #include <litmus/color.h> | 13 | #include <litmus/color.h> |
diff --git a/litmus/dgl.c b/litmus/dgl.c index 7331855d43f7..065a747f7dce 100644 --- a/litmus/dgl.c +++ b/litmus/dgl.c | |||
@@ -28,10 +28,13 @@ | |||
28 | #define req_group(r, i) (container_of(((void*)r) - sizeof(*r)*(i), \ | 28 | #define req_group(r, i) (container_of(((void*)r) - sizeof(*r)*(i), \ |
29 | struct dgl_group_req, requests)) | 29 | struct dgl_group_req, requests)) |
30 | 30 | ||
31 | /* Server ID for trace records */ | ||
32 | #define cpu_sid(dgl, cpu) (dgl->sid_start + cpu) | ||
33 | |||
31 | #define TRACE(fmt, args...) STRACE(fmt, ## args) | 34 | #define TRACE(fmt, args...) STRACE(fmt, ## args) |
32 | #define TRACE_GREQ(greq, fmt, args...) \ | 35 | #define TRACE_GREQ(greq, fmt, args...) \ |
33 | TRACE("(greq-%s/%d) " fmt, (greq->task ? greq->task->comm : "greq"), \ | 36 | TRACE("(greq-%s/%llu) " fmt, (greq->task ? greq->task->comm : "greq"), \ |
34 | (greq->task ? greq->task->pid : (int)greq), ## args) | 37 | (greq->task ? greq->task->pid : (unsigned long long)greq), ## args) |
35 | 38 | ||
36 | /* Resource id -> word, bit */ | 39 | /* Resource id -> word, bit */ |
37 | static inline void mask_idx(int resource, int *word, int *bit) | 40 | static inline void mask_idx(int resource, int *word, int *bit) |
@@ -40,6 +43,13 @@ static inline void mask_idx(int resource, int *word, int *bit) | |||
40 | *bit = resource % MASK_SIZE; | 43 | *bit = resource % MASK_SIZE; |
41 | } | 44 | } |
42 | 45 | ||
46 | static int higher_priority(struct dgl_group_req *a, struct dgl_group_req *b) | ||
47 | { | ||
48 | /* TODO: will break without tasks */ | ||
49 | return (a->priority < b->priority || | ||
50 | (a->priority == b->priority && (a->task->pid < b->task->pid))); | ||
51 | } | ||
52 | |||
43 | static void print_waiting(struct dgl *dgl, struct dgl_resource *resource) | 53 | static void print_waiting(struct dgl *dgl, struct dgl_resource *resource) |
44 | { | 54 | { |
45 | struct dgl_req *pos; | 55 | struct dgl_req *pos; |
@@ -49,13 +59,223 @@ static void print_waiting(struct dgl *dgl, struct dgl_resource *resource) | |||
49 | TRACE("List for rid %d\n", resource_id(dgl, resource)); | 59 | TRACE("List for rid %d\n", resource_id(dgl, resource)); |
50 | list_for_each_entry(pos, &resource->waiting, list) { | 60 | list_for_each_entry(pos, &resource->waiting, list) { |
51 | greq = pos->greq; | 61 | greq = pos->greq; |
52 | TRACE_GREQ(greq, "with timestamp %llu\n", greq->ts); | 62 | TRACE_GREQ(greq, "with priority %llu\n", greq->priority); |
53 | BUG_ON(greq->ts < last); | 63 | last = greq->priority; |
54 | last = greq->ts; | 64 | } |
65 | } | ||
66 | |||
67 | /* | ||
68 | * Add @req to @list in priority order (higher numbers are lower priority). | ||
69 | */ | ||
70 | static void add_req(struct dgl_req *req, struct list_head *list) | ||
71 | { | ||
72 | struct dgl_req *pos; | ||
73 | struct list_head *last = list; | ||
74 | int check = 0; | ||
75 | |||
76 | BUG_ON(in_list(&req->list)); | ||
77 | |||
78 | list_for_each_entry(pos, list, list) { | ||
79 | BUG_ON(pos == req); | ||
80 | if (!higher_priority(pos->greq, req->greq)) { | ||
81 | break; | ||
82 | } | ||
83 | last = &pos->list; | ||
84 | |||
85 | BUG_ON(check++ > 10); | ||
86 | } | ||
87 | list_add(&req->list, last); | ||
88 | } | ||
89 | |||
90 | static void release_cpu(struct dgl *dgl, struct dgl_group_req *greq) | ||
91 | { | ||
92 | int cpu = greq->cpu; | ||
93 | struct task_struct *task = greq->task; | ||
94 | |||
95 | TRACE_GREQ(greq, "no longer acquired on CPU %d\n", cpu); | ||
96 | BUG_ON(dgl->acquired[cpu] != greq); | ||
97 | |||
98 | dgl->acquired[cpu] = NULL; | ||
99 | --dgl->running; | ||
100 | |||
101 | /* if (task) { */ | ||
102 | /* sched_trace_server_switch_away(cpu_sid(dgl, cpu), 0, */ | ||
103 | /* -task->pid, get_rt_job(task)); */ | ||
104 | /* } */ | ||
105 | } | ||
106 | |||
107 | static void take_cpu(struct dgl *dgl, struct dgl_group_req *greq) | ||
108 | { | ||
109 | int cpu = greq->cpu; | ||
110 | struct task_struct *task = greq->task; | ||
111 | |||
112 | TRACE_GREQ(greq, "Taking cpu %d\n", cpu); | ||
113 | BUG_ON(dgl->acquired[cpu]); | ||
114 | |||
115 | dgl->acquired[cpu] = greq; | ||
116 | ++dgl->running; | ||
117 | |||
118 | /* if (task) { */ | ||
119 | /* sched_trace_server_switch_to(cpu_sid(dgl, cpu), 0, */ | ||
120 | /* -task->pid, get_rt_job(task)); */ | ||
121 | /* } */ | ||
122 | } | ||
123 | |||
124 | static int try_bump_lp(struct dgl *dgl, struct dgl_resource *resource, | ||
125 | struct dgl_req *req) | ||
126 | { | ||
127 | int lower = 0, word, bit, rid; | ||
128 | struct dgl_req *acquired, *tmp; | ||
129 | struct dgl_group_req *greq, *bumper = req->greq; | ||
130 | |||
131 | rid = resource_id(dgl, resource); | ||
132 | mask_idx(rid, &word, &bit); | ||
133 | |||
134 | TRACE_GREQ(bumper, "Trying to bump with priority %llu on resource with %d free\n", | ||
135 | bumper->priority, resource->free_replicas); | ||
136 | |||
137 | BUG_ON(resource->free_replicas > req->replicas); | ||
138 | |||
139 | /* Find lower-priority holders */ | ||
140 | list_for_each_entry(acquired, &resource->acquired, list) { | ||
141 | BUG_ON(acquired == req); | ||
142 | greq = acquired->greq; | ||
143 | |||
144 | if (higher_priority(greq, bumper) || | ||
145 | lower + resource->free_replicas >= req->replicas) { | ||
146 | TRACE_GREQ(greq, "Has higher priority %llu or lower good: %d\n", | ||
147 | greq->priority, lower); | ||
148 | break; | ||
149 | } else { | ||
150 | TRACE_GREQ(greq, "Getting bumped with prio %llu\n", greq->priority); | ||
151 | } | ||
152 | lower += acquired->replicas; | ||
153 | } | ||
154 | |||
155 | if (lower + resource->free_replicas < req->replicas) { | ||
156 | TRACE_GREQ(bumper, "Failed to acquire replicas\n"); | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | BUG_ON(!lower); | ||
161 | |||
162 | /* Bump 'em */ | ||
163 | resource->free_replicas += lower; | ||
164 | |||
165 | BUG_ON(resource->free_replicas > dgl->num_replicas); | ||
166 | TRACE_GREQ(bumper, "Freeing %d replicas for total of %d free\n", | ||
167 | lower, resource->free_replicas); | ||
168 | |||
169 | list_for_each_entry_safe(acquired, tmp, &resource->acquired, list) { | ||
170 | greq = acquired->greq; | ||
171 | |||
172 | TRACE_GREQ(greq, "Bumped\n"); | ||
173 | |||
174 | /* Now in waiting list */ | ||
175 | list_del_init(&acquired->list); | ||
176 | add_req(acquired, &resource->waiting); | ||
177 | |||
178 | BUG_ON(!acquired->satisfied); | ||
179 | acquired->satisfied = 0; | ||
180 | |||
181 | lower -= acquired->replicas; | ||
182 | |||
183 | __set_bit(bit, &greq->waiting[word]); | ||
184 | if (dgl->acquired[greq->cpu] == greq) { | ||
185 | release_cpu(dgl, greq); | ||
186 | dgl->cpu_preempted(greq); | ||
187 | } | ||
188 | |||
189 | if (!lower) | ||
190 | break; | ||
55 | } | 191 | } |
192 | |||
193 | return 1; | ||
56 | } | 194 | } |
57 | 195 | ||
58 | static void dummy_acquired(int cpu){} | 196 | /* |
197 | * Attempt to fulfill request @req for @resource. | ||
198 | * Return 1 if successful. If the matching group request has acquired all of | ||
199 | * its needed resources, this will then set that req as dgl->acquired[cpu]. | ||
200 | */ | ||
201 | static unsigned long try_acquire(struct dgl *dgl, struct dgl_resource *resource, | ||
202 | struct dgl_req *req) | ||
203 | { | ||
204 | int word, bit, rid, room; | ||
205 | unsigned long waiting; | ||
206 | struct dgl_group_req *greq; | ||
207 | |||
208 | rid = resource_id(dgl, resource); | ||
209 | greq = req->greq; | ||
210 | |||
211 | TRACE_GREQ(greq, "try acquire\n"); | ||
212 | |||
213 | room = resource->free_replicas >= req->replicas; | ||
214 | if (!room) room = try_bump_lp(dgl, resource, req); | ||
215 | |||
216 | if (!room) { | ||
217 | TRACE_GREQ(greq, "cannot acquire %d replicas, %d free\n", | ||
218 | req->replicas, resource->free_replicas); | ||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | resource->free_replicas -= req->replicas; | ||
223 | |||
224 | BUG_ON(resource->free_replicas < 0 || resource->free_replicas > dgl->num_replicas); | ||
225 | |||
226 | /* Move from waiting to acquired */ | ||
227 | req->satisfied = 1; | ||
228 | list_del_init(&req->list); | ||
229 | add_req(req, &resource->acquired); | ||
230 | |||
231 | TRACE_GREQ(greq, "acquired %d replicas of rid %d\n", | ||
232 | req->replicas, rid); | ||
233 | |||
234 | mask_idx(rid, &word, &bit); | ||
235 | clear_bit(bit, &greq->waiting[word]); | ||
236 | |||
237 | /* Check for other waiting resources */ | ||
238 | waiting = 0; | ||
239 | for (word = 0; word < MASK_WORDS(dgl); word++) { | ||
240 | waiting |= greq->waiting[word]; | ||
241 | if (waiting) | ||
242 | break; | ||
243 | } | ||
244 | |||
245 | if (!waiting) { | ||
246 | TRACE_GREQ(greq, "acquired all resources on cpu %d\n", greq->cpu); | ||
247 | BUG_ON(dgl->acquired[greq->cpu]); | ||
248 | |||
249 | take_cpu(dgl, greq); | ||
250 | dgl->cpu_acquired(greq); | ||
251 | } | ||
252 | |||
253 | return 1; | ||
254 | } | ||
255 | |||
256 | static void try_pull_hp(struct dgl *dgl, struct dgl_resource *resource) | ||
257 | { | ||
258 | int succ; | ||
259 | struct dgl_req *waiting, *tmp; | ||
260 | |||
261 | TRACE("Pulling into resource\n"); | ||
262 | |||
263 | list_for_each_entry_safe(waiting, tmp, &resource->waiting, list) { | ||
264 | if (!resource->free_replicas) { | ||
265 | TRACE("Quitting with no more free replicas\n"); | ||
266 | break; | ||
267 | } | ||
268 | succ = try_acquire(dgl, resource, waiting); | ||
269 | } | ||
270 | } | ||
271 | |||
272 | static void dummy_acquired(struct dgl_group_req *greq){} | ||
273 | static void dummy_preempted(struct dgl_group_req *greq){} | ||
274 | |||
275 | static unsigned long long timestamp_prio (struct dgl *dgl, struct dgl_group_req *greq) | ||
276 | { | ||
277 | return dgl->ts++; | ||
278 | } | ||
59 | 279 | ||
60 | void dgl_init(struct dgl *dgl, unsigned long num_resources, | 280 | void dgl_init(struct dgl *dgl, unsigned long num_resources, |
61 | unsigned long num_replicas) | 281 | unsigned long num_replicas) |
@@ -78,6 +298,7 @@ void dgl_init(struct dgl *dgl, unsigned long num_resources, | |||
78 | resource = &dgl->resources[i]; | 298 | resource = &dgl->resources[i]; |
79 | 299 | ||
80 | INIT_LIST_HEAD(&resource->waiting); | 300 | INIT_LIST_HEAD(&resource->waiting); |
301 | INIT_LIST_HEAD(&resource->acquired); | ||
81 | resource->free_replicas = dgl->num_replicas; | 302 | resource->free_replicas = dgl->num_replicas; |
82 | } | 303 | } |
83 | 304 | ||
@@ -85,6 +306,9 @@ void dgl_init(struct dgl *dgl, unsigned long num_resources, | |||
85 | dgl->running = 0; | 306 | dgl->running = 0; |
86 | dgl->ts = 0; | 307 | dgl->ts = 0; |
87 | dgl->cpu_acquired = dummy_acquired; | 308 | dgl->cpu_acquired = dummy_acquired; |
309 | dgl->cpu_preempted = dummy_preempted; | ||
310 | dgl->assign_priority = timestamp_prio; | ||
311 | dgl->sid_start = 0; | ||
88 | } | 312 | } |
89 | 313 | ||
90 | void dgl_free(struct dgl *dgl) | 314 | void dgl_free(struct dgl *dgl) |
@@ -140,69 +364,13 @@ void set_req(struct dgl *dgl, struct dgl_group_req *greq, | |||
140 | TRACE_GREQ(greq, "requesting %d of %d\n", replicas, resource); | 364 | TRACE_GREQ(greq, "requesting %d of %d\n", replicas, resource); |
141 | 365 | ||
142 | req = &greq->requests[resource]; | 366 | req = &greq->requests[resource]; |
367 | |||
143 | req->greq = greq; | 368 | req->greq = greq; |
144 | INIT_LIST_HEAD(&req->list); | ||
145 | req->replicas = replicas; | 369 | req->replicas = replicas; |
146 | } | 370 | req->satisfied = 0; |
147 | |||
148 | /* | ||
149 | * Attempt to fulfill request @req for @resource. | ||
150 | * Return 1 if successful. If the matching group request has acquired all of | ||
151 | * its needed resources, this will then set that req as dgl->acquired[cpu]. | ||
152 | */ | ||
153 | static unsigned long try_acquire(struct dgl *dgl, struct dgl_resource *resource, | ||
154 | struct dgl_req *req) | ||
155 | { | ||
156 | int word, bit, rid, head, empty, room; | ||
157 | unsigned long waiting; | ||
158 | struct dgl_group_req *greq; | ||
159 | 371 | ||
160 | rid = resource_id(dgl, resource); | 372 | INIT_LIST_HEAD(&req->list); |
161 | greq = req->greq; | 373 | BUG_ON(in_list(&req->list)); |
162 | |||
163 | TRACE_GREQ(greq, "try acquire\n"); | ||
164 | |||
165 | head = resource->waiting.next == &req->list; | ||
166 | empty = list_empty(&resource->waiting); | ||
167 | room = resource->free_replicas >= req->replicas; | ||
168 | |||
169 | if (! (room && (head || empty)) ) { | ||
170 | TRACE_GREQ(greq, "cannot acquire %d replicas, %d free\n", | ||
171 | req->replicas, resource->free_replicas, | ||
172 | room, head, empty); | ||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | resource->free_replicas -= req->replicas; | ||
177 | |||
178 | TRACE_GREQ(greq, "0x%p acquired %d replicas of rid %d\n", | ||
179 | req->replicas, rid); | ||
180 | |||
181 | mask_idx(rid, &word, &bit); | ||
182 | |||
183 | TRACE_GREQ(greq, "0x%p, %lu, 0x%p\n", greq->waiting, greq->waiting[word], | ||
184 | &greq->waiting[word]); | ||
185 | |||
186 | clear_bit(bit, &greq->waiting[word]); | ||
187 | |||
188 | waiting = 0; | ||
189 | for (word = 0; word < MASK_WORDS(dgl); word++) { | ||
190 | waiting |= greq->waiting[word]; | ||
191 | if (waiting) | ||
192 | break; | ||
193 | } | ||
194 | |||
195 | if (!waiting) { | ||
196 | TRACE_GREQ(greq, "acquired all resources on cpu %d\n", greq->cpu); | ||
197 | BUG_ON(dgl->acquired[greq->cpu]); | ||
198 | dgl->acquired[greq->cpu] = greq; | ||
199 | litmus_reschedule(greq->cpu); | ||
200 | dgl->running++; | ||
201 | |||
202 | dgl->cpu_acquired(greq->cpu); | ||
203 | } | ||
204 | |||
205 | return 1; | ||
206 | } | 374 | } |
207 | 375 | ||
208 | /** | 376 | /** |
@@ -215,7 +383,8 @@ void add_group_req(struct dgl *dgl, struct dgl_group_req *greq, int cpu) | |||
215 | struct dgl_resource *resource; | 383 | struct dgl_resource *resource; |
216 | 384 | ||
217 | greq->cpu = cpu; | 385 | greq->cpu = cpu; |
218 | greq->ts = dgl->ts++; | 386 | |
387 | greq->priority = dgl->assign_priority(dgl, greq); | ||
219 | 388 | ||
220 | TRACE_GREQ(greq, "group request added for CPU %d\n", cpu); | 389 | TRACE_GREQ(greq, "group request added for CPU %d\n", cpu); |
221 | BUG_ON(dgl->acquired[cpu] == greq); | 390 | BUG_ON(dgl->acquired[cpu] == greq); |
@@ -230,20 +399,27 @@ void add_group_req(struct dgl *dgl, struct dgl_group_req *greq, int cpu) | |||
230 | req = &greq->requests[i]; | 399 | req = &greq->requests[i]; |
231 | resource = &dgl->resources[i]; | 400 | resource = &dgl->resources[i]; |
232 | 401 | ||
402 | TRACE_GREQ(greq, "Request %d has list 0x%p\n", i, &req->list); | ||
403 | |||
404 | if (in_list(&req->list)) { | ||
405 | TRACE_GREQ(greq, "0x%p -> Request %d -> 0x%p\n", | ||
406 | req->list.prev, i, req->list.next); | ||
407 | list_del_init(&req->list); | ||
408 | } | ||
409 | BUG_ON(in_list(&req->list)); | ||
410 | |||
233 | succ = try_acquire(dgl, resource, req); | 411 | succ = try_acquire(dgl, resource, req); |
234 | all_succ &= succ; | 412 | all_succ &= succ; |
235 | 413 | ||
236 | if (!succ) { | 414 | if (!succ) { |
237 | TRACE_GREQ(greq, "waiting on rid %d\n", i); | 415 | TRACE_GREQ(greq, "waiting on rid %d\n", i); |
238 | list_add_tail(&req->list, &resource->waiting); | 416 | add_req(req, &resource->waiting); |
239 | } | 417 | } |
240 | } | 418 | } |
241 | 419 | ||
242 | /* Grant empty requests */ | 420 | /* Grant empty requests */ |
243 | if (all_succ && !dgl->acquired[cpu]) { | 421 | if (all_succ && !dgl->acquired[cpu]) { |
244 | TRACE_GREQ(greq, "empty group request acquired cpu %d\n", cpu); | 422 | take_cpu(dgl, greq); |
245 | dgl->acquired[cpu] = greq; | ||
246 | ++dgl->running; | ||
247 | } | 423 | } |
248 | 424 | ||
249 | BUG_ON(dgl->requests && !dgl->running); | 425 | BUG_ON(dgl->requests && !dgl->running); |
@@ -257,27 +433,27 @@ void add_group_req(struct dgl *dgl, struct dgl_group_req *greq, int cpu) | |||
257 | void remove_group_req(struct dgl *dgl, struct dgl_group_req *greq) | 433 | void remove_group_req(struct dgl *dgl, struct dgl_group_req *greq) |
258 | { | 434 | { |
259 | int b, w, i; | 435 | int b, w, i; |
260 | struct dgl_req *req, *next; | 436 | struct dgl_req *req; |
261 | struct dgl_resource *resource; | 437 | struct dgl_resource *resource; |
262 | 438 | ||
263 | TRACE_GREQ(greq, "removing group request for CPU %d\n", greq, greq->cpu); | 439 | TRACE_GREQ(greq, "removing group request for CPU %d\n", greq->cpu); |
264 | 440 | ||
265 | --dgl->requests; | 441 | --dgl->requests; |
266 | 442 | ||
267 | if (dgl->acquired[greq->cpu] == greq) { | 443 | if (dgl->acquired[greq->cpu] == greq) { |
268 | TRACE_GREQ(greq, "no longer acquired on CPU %d\n", greq->cpu); | 444 | release_cpu(dgl, greq); |
269 | dgl->acquired[greq->cpu] = NULL; | ||
270 | --dgl->running; | ||
271 | } | 445 | } |
272 | 446 | ||
273 | for_each_resource(greq->requested, dgl, w, b, i) { | 447 | for_each_resource(greq->requested, dgl, w, b, i) { |
274 | req = &greq->requests[i]; | 448 | req = &greq->requests[i]; |
275 | resource = &dgl->resources[i]; | 449 | resource = &dgl->resources[i]; |
276 | 450 | ||
277 | if (!list_empty(&req->list)) { | 451 | /* Remove from waiting or acquired list */ |
452 | list_del_init(&req->list); | ||
453 | |||
454 | if (!req->satisfied) { | ||
278 | /* Waiting on resource */ | 455 | /* Waiting on resource */ |
279 | clear_bit(b, &greq->waiting[w]); | 456 | clear_bit(b, &greq->waiting[w]); |
280 | list_del_init(&req->list); | ||
281 | TRACE("Quitting 0x%p from rid %d\n", | 457 | TRACE("Quitting 0x%p from rid %d\n", |
282 | req, i); | 458 | req, i); |
283 | } else { | 459 | } else { |
@@ -286,19 +462,12 @@ void remove_group_req(struct dgl *dgl, struct dgl_group_req *greq) | |||
286 | BUG_ON(resource->free_replicas > dgl->num_replicas); | 462 | BUG_ON(resource->free_replicas > dgl->num_replicas); |
287 | TRACE_GREQ(greq, "releasing %d of %d replicas, rid %d\n", | 463 | TRACE_GREQ(greq, "releasing %d of %d replicas, rid %d\n", |
288 | req->replicas, resource->free_replicas, i); | 464 | req->replicas, resource->free_replicas, i); |
465 | req->satisfied = 0; | ||
289 | 466 | ||
290 | if (!list_empty(&resource->waiting)) { | 467 | if (!list_empty(&resource->waiting)) { |
291 | /* Give it to the next guy */ | 468 | try_pull_hp(dgl, resource); |
292 | next = list_first_entry(&resource->waiting, | 469 | } else if (list_empty(&resource->acquired)) { |
293 | struct dgl_req, | 470 | BUG_ON(resource->free_replicas != dgl->num_replicas); |
294 | list); | ||
295 | |||
296 | BUG_ON(next->greq->ts < greq->ts); | ||
297 | |||
298 | if (try_acquire(dgl, resource, next)) { | ||
299 | list_del_init(&next->list); | ||
300 | print_waiting(dgl, resource); | ||
301 | } | ||
302 | } | 471 | } |
303 | } | 472 | } |
304 | } | 473 | } |
diff --git a/litmus/sched_color.c b/litmus/sched_color.c index 28a7cfa401b4..f7e4be700548 100644 --- a/litmus/sched_color.c +++ b/litmus/sched_color.c | |||
@@ -64,7 +64,8 @@ static raw_spinlock_t dgl_lock; | |||
64 | #define is_fifo_server(s) ((s)->sid > num_online_cpus()) | 64 | #define is_fifo_server(s) ((s)->sid > num_online_cpus()) |
65 | #define lock_if(lock, cond) do { if (cond) raw_spin_lock(lock);} while(0) | 65 | #define lock_if(lock, cond) do { if (cond) raw_spin_lock(lock);} while(0) |
66 | #define unlock_if(lock, cond) do { if (cond) raw_spin_unlock(lock);} while(0) | 66 | #define unlock_if(lock, cond) do { if (cond) raw_spin_unlock(lock);} while(0) |
67 | 67 | #define crit_cpu(ce) \ | |
68 | (container_of((void*)((ce) - (ce)->level), struct cpu_entry, crit_entries)) | ||
68 | #ifdef CONFIG_NP_SECTION | 69 | #ifdef CONFIG_NP_SECTION |
69 | #define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c]) | 70 | #define has_resources(t, c) (tsk_rt(t)->req == group_lock.acquired[c]) |
70 | #else | 71 | #else |
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c index 0ee7be46f1ee..14d41701ce68 100644 --- a/litmus/sched_mc.c +++ b/litmus/sched_mc.c | |||
@@ -28,6 +28,15 @@ | |||
28 | #include <litmus/ce_domain.h> | 28 | #include <litmus/ce_domain.h> |
29 | #include <litmus/dgl.h> | 29 | #include <litmus/dgl.h> |
30 | 30 | ||
31 | |||
32 | /** | ||
33 | * Signal triggerred via lock event. | ||
34 | */ | ||
35 | struct lock_signal { | ||
36 | enum crit_level level; | ||
37 | enum { NONE=0, ACQUIRED, PREEMPTED} type; | ||
38 | }; | ||
39 | |||
31 | /** | 40 | /** |
32 | * struct cpu_entry - State of a CPU for the entire MC system | 41 | * struct cpu_entry - State of a CPU for the entire MC system |
33 | * @cpu CPU id | 42 | * @cpu CPU id |
@@ -41,11 +50,12 @@ | |||
41 | */ | 50 | */ |
42 | struct cpu_entry { | 51 | struct cpu_entry { |
43 | int cpu; | 52 | int cpu; |
44 | enum crit_level crit_signal; | ||
45 | struct task_struct* scheduled; | 53 | struct task_struct* scheduled; |
46 | struct task_struct* will_schedule; | 54 | struct task_struct* will_schedule; |
47 | struct task_struct* linked; | 55 | struct task_struct* linked; |
56 | struct lock_signal signal; | ||
48 | raw_spinlock_t lock; | 57 | raw_spinlock_t lock; |
58 | raw_spinlock_t signal_lock; | ||
49 | struct crit_entry crit_entries[NUM_CRIT_LEVELS]; | 59 | struct crit_entry crit_entries[NUM_CRIT_LEVELS]; |
50 | #ifdef CONFIG_PLUGIN_MC_REDIRECT | 60 | #ifdef CONFIG_PLUGIN_MC_REDIRECT |
51 | struct list_head redir; | 61 | struct list_head redir; |
@@ -74,16 +84,78 @@ static int interrupt_cpu; | |||
74 | #define is_global(dom) (domain_data(dom)->heap) | 84 | #define is_global(dom) (domain_data(dom)->heap) |
75 | #define is_global_task(t) (is_global(get_task_domain(t))) | 85 | #define is_global_task(t) (is_global(get_task_domain(t))) |
76 | #define can_use(ce) \ | 86 | #define can_use(ce) \ |
77 | ((ce)->state == CS_ACTIVE || (ce->state == CS_ACTIVATE)) | 87 | ((ce)->state != CS_REMOVE && (ce)->state != CS_REMOVED) |
78 | #define can_requeue(t) \ | 88 | #define can_requeue(t) \ |
79 | ((t)->rt_param.linked_on == NO_CPU && /* Not linked anywhere */ \ | 89 | ((t)->rt_param.linked_on == NO_CPU && /* Not linked anywhere */ \ |
80 | !is_queued(t) && /* Not gonna be linked */ \ | 90 | !is_queued(t) && /* Not gonna be linked */ \ |
81 | (!is_global_task(t) || (t)->rt_param.scheduled_on == NO_CPU)) | 91 | (!is_global_task(t) || (t)->rt_param.scheduled_on == NO_CPU)) |
82 | #define entry_level(e) \ | 92 | #define entry_level(e) \ |
83 | (((e)->linked) ? tsk_mc_crit((e)->linked) : NUM_CRIT_LEVELS - 1) | 93 | (((e)->linked) ? tsk_mc_crit((e)->linked) : NUM_CRIT_LEVELS - 1) |
94 | #define get_crit_entry_for(cpu, level) (&per_cpu(cpus, cpu).crit_entries[level]) | ||
84 | #define crit_cpu(ce) \ | 95 | #define crit_cpu(ce) \ |
85 | (container_of((void*)((ce) - (ce)->level), struct cpu_entry, crit_entries)) | 96 | (container_of((void*)((ce) - (ce)->level), struct cpu_entry, crit_entries)) |
86 | #define get_crit_entry_for(cpu, level) (&per_cpu(cpus, cpu).crit_entries[level]) | 97 | |
98 | |||
99 | #ifdef CONFIG_PREEMPT_STATE_TRACE | ||
100 | #define NCS(x) case x: return #x | ||
101 | #define ce_partition(ce) (crit_cpu(ce)->cpu + 1) | ||
102 | #define ce_st(ce, cpu) sched_trace_server_switch_to(cpu, 0, ce_sid(ce), 0) | ||
103 | #define ce_sa(ce, cpu) sched_trace_server_switch_away(cpu, 0, ce_sid(ce), 0) | ||
104 | |||
105 | static inline const char* ce_state_name(enum crit_state state) | ||
106 | { | ||
107 | switch(state) { | ||
108 | NCS(CS_ACTIVE);NCS(CS_ACTIVATE); | ||
109 | NCS(CS_REMOVE);NCS(CS_REMOVED); | ||
110 | NCS(CS_BLOCKED); default: return "UNKNOWN"; | ||
111 | } | ||
112 | } | ||
113 | |||
114 | static inline void ce_state_change(struct crit_entry *ce, enum crit_state state) | ||
115 | { | ||
116 | switch (ce->state) { | ||
117 | case CS_BLOCKED: | ||
118 | sched_trace_server_resume(ce_sid(ce)); | ||
119 | break; | ||
120 | case CS_ACTIVE: | ||
121 | if (state == CS_ACTIVATE) return; | ||
122 | BUG_ON(state == CS_ACTIVATE); | ||
123 | ce_sa(ce, ce_partition(ce)); | ||
124 | break; | ||
125 | case CS_ACTIVATE: | ||
126 | case CS_REMOVE: | ||
127 | if (state == CS_REMOVE || state == CS_ACTIVATE) | ||
128 | sched_trace_server_completion(ce_sid(ce), 0); | ||
129 | else | ||
130 | ce_sa(ce, 0); | ||
131 | break; | ||
132 | case CS_REMOVED: | ||
133 | if (state == CS_REMOVE) return; | ||
134 | break; | ||
135 | }; | ||
136 | |||
137 | switch (state) { | ||
138 | case CS_BLOCKED: | ||
139 | sched_trace_server_block(ce_sid(ce)); | ||
140 | break; | ||
141 | case CS_ACTIVE: | ||
142 | ce_st(ce, ce_partition(ce)); | ||
143 | break; | ||
144 | case CS_ACTIVATE: | ||
145 | case CS_REMOVE: | ||
146 | if (ce->state != CS_REMOVE && ce->state != CS_ACTIVATE) | ||
147 | ce_st(ce, 0); | ||
148 | break; | ||
149 | } | ||
150 | |||
151 | TRACE_CRIT_ENTRY((ce), "(%s)->(%s)\n", | ||
152 | ce_state_name((ce)->state), | ||
153 | ce_state_name(state)); | ||
154 | (ce)->state = state; | ||
155 | } | ||
156 | #else | ||
157 | #define ce_state_change(ce, next) do{(ce)->state = next}while(0) | ||
158 | #endif | ||
87 | 159 | ||
88 | /* | 160 | /* |
89 | * Put in requests for resources needed by @t. | 161 | * Put in requests for resources needed by @t. |
@@ -96,10 +168,10 @@ static int acquire_resources(struct task_struct *t) | |||
96 | BUG_ON(tsk_rt(t)->linked_on == NO_CPU); | 168 | BUG_ON(tsk_rt(t)->linked_on == NO_CPU); |
97 | 169 | ||
98 | raw_spin_lock(&dgl_lock); | 170 | raw_spin_lock(&dgl_lock); |
99 | if (is_kernel_np(t)) { | 171 | /* if (is_kernel_np(t)) { */ |
100 | TRACE_MC_TASK(t, "Already contending for resources\n"); | 172 | /* TRACE_MC_TASK(t, "Already contending for resources\n"); */ |
101 | return; | 173 | /* return; */ |
102 | } | 174 | /* } */ |
103 | 175 | ||
104 | cpu = tsk_rt(t)->linked_on; | 176 | cpu = tsk_rt(t)->linked_on; |
105 | 177 | ||
@@ -108,7 +180,7 @@ static int acquire_resources(struct task_struct *t) | |||
108 | sched_trace_task_block(t); | 180 | sched_trace_task_block(t); |
109 | TRACE_MC_TASK(t, "Blocked at %llu\n", litmus_clock()); | 181 | TRACE_MC_TASK(t, "Blocked at %llu\n", litmus_clock()); |
110 | add_group_req(&group_lock, tsk_rt(t)->req, cpu); | 182 | add_group_req(&group_lock, tsk_rt(t)->req, cpu); |
111 | make_np(t); | 183 | /* make_np(t); */ |
112 | } | 184 | } |
113 | 185 | ||
114 | acquired = has_resources(t, cpu); | 186 | acquired = has_resources(t, cpu); |
@@ -119,20 +191,23 @@ static int acquire_resources(struct task_struct *t) | |||
119 | 191 | ||
120 | static void release_resources(struct task_struct *t) | 192 | static void release_resources(struct task_struct *t) |
121 | { | 193 | { |
122 | if (is_kernel_np(t)) { | 194 | /* if (is_kernel_np(t)) { */ |
123 | TRACE_MC_TASK(t, "Releasing resources\n"); | 195 | TRACE_MC_TASK(t, "Releasing resources\n"); |
124 | 196 | ||
125 | raw_spin_lock(&dgl_lock); | 197 | raw_spin_lock(&dgl_lock); |
126 | if (get_rt_flags(t) & RT_F_BLOCKED) { | 198 | |
127 | TRACE_MC_TASK(t, "Forced off before we got our stuff\n"); | 199 | if (get_rt_flags(t) & RT_F_BLOCKED) { |
128 | clear_rt_flags(t, RT_F_BLOCKED); | 200 | TRACE_MC_TASK(t, "Forced off before we got our stuff\n"); |
129 | } | 201 | clear_rt_flags(t, RT_F_BLOCKED); |
202 | sched_trace_task_resume(t); | ||
203 | } | ||
204 | |||
130 | remove_group_req(&group_lock, tsk_rt(t)->req); | 205 | remove_group_req(&group_lock, tsk_rt(t)->req); |
131 | raw_spin_unlock(&dgl_lock); | 206 | raw_spin_unlock(&dgl_lock); |
132 | take_np(t); | 207 | take_np(t); |
133 | } else { | 208 | /* } else { */ |
134 | TRACE_MC_TASK(t, "No resources to release!\n"); | 209 | /* TRACE_MC_TASK(t, "No resources to release!\n"); */ |
135 | } | 210 | /* } */ |
136 | } | 211 | } |
137 | 212 | ||
138 | /* | 213 | /* |
@@ -203,10 +278,10 @@ static void fix_crit_position(struct crit_entry *ce) | |||
203 | { | 278 | { |
204 | if (is_global(ce->domain)) { | 279 | if (is_global(ce->domain)) { |
205 | if (CS_ACTIVATE == ce->state) { | 280 | if (CS_ACTIVATE == ce->state) { |
206 | ce->state = CS_ACTIVE; | 281 | ce_state_change(ce, CS_ACTIVE); |
207 | update_crit_position(ce); | 282 | update_crit_position(ce); |
208 | } else if (CS_REMOVE == ce->state) { | 283 | } else if (CS_REMOVE == ce->state) { |
209 | ce->state = CS_REMOVED; | 284 | ce_state_change(ce, CS_REMOVED); |
210 | update_crit_position(ce); | 285 | update_crit_position(ce); |
211 | } | 286 | } |
212 | } | 287 | } |
@@ -249,9 +324,28 @@ static inline void cancel_ghost(struct crit_entry *ce) | |||
249 | } | 324 | } |
250 | 325 | ||
251 | /* | 326 | /* |
327 | * Time accounting for ghost tasks. | ||
328 | * Must be called before a decision is made involving the task's budget. | ||
329 | */ | ||
330 | static void update_server_time(struct task_struct *p) | ||
331 | { | ||
332 | u64 clock = litmus_clock(); | ||
333 | u64 delta = clock - p->rt_param.last_exec_time; | ||
334 | if (unlikely ((s64)delta < 0)) { | ||
335 | delta = 0; | ||
336 | } | ||
337 | if (budget_remaining(p) <= delta) { | ||
338 | tsk_rt(p)->job_params.exec_time = get_exec_cost(p); | ||
339 | } else { | ||
340 | tsk_rt(p)->job_params.exec_time += delta; | ||
341 | } | ||
342 | p->rt_param.last_exec_time = clock; | ||
343 | } | ||
344 | |||
345 | /* | ||
252 | * Arm ghost timer. Will merge timers if the option is specified. | 346 | * Arm ghost timer. Will merge timers if the option is specified. |
253 | */ | 347 | */ |
254 | static inline void start_crit(struct crit_entry *ce) | 348 | static void start_crit(struct crit_entry *ce) |
255 | { | 349 | { |
256 | lt_t fire; | 350 | lt_t fire; |
257 | struct task_struct *task = ce->linked; | 351 | struct task_struct *task = ce->linked; |
@@ -281,23 +375,17 @@ static inline void start_crit(struct crit_entry *ce) | |||
281 | get_rt_job(task)); | 375 | get_rt_job(task)); |
282 | } | 376 | } |
283 | 377 | ||
284 | /* | 378 | static void stop_crit(struct crit_entry *ce) |
285 | * Time accounting for ghost tasks. | ||
286 | * Must be called before a decision is made involving the task's budget. | ||
287 | */ | ||
288 | static void update_server_time(struct task_struct *p) | ||
289 | { | 379 | { |
290 | u64 clock = litmus_clock(); | 380 | if (is_ghost(ce->linked)) { |
291 | u64 delta = clock - p->rt_param.last_exec_time; | 381 | cancel_ghost(ce); |
292 | if (unlikely ((s64)delta < 0)) { | 382 | if (!budget_exhausted(ce->linked)) { |
293 | delta = 0; | 383 | /* Job isn't finished, so do accounting */ |
294 | } | 384 | update_server_time(ce->linked); |
295 | if (budget_remaining(p) <= delta) { | 385 | } |
296 | tsk_rt(p)->job_params.exec_time = get_exec_cost(p); | ||
297 | } else { | ||
298 | tsk_rt(p)->job_params.exec_time += delta; | ||
299 | } | 386 | } |
300 | p->rt_param.last_exec_time = clock; | 387 | sched_trace_server_switch_away(ce_sid(ce), 0, -ce->linked->pid, |
388 | get_rt_job(ce->linked)); | ||
301 | } | 389 | } |
302 | 390 | ||
303 | /** | 391 | /** |
@@ -317,19 +405,12 @@ static void link_task_to_crit(struct crit_entry *ce, | |||
317 | if (ce->linked) { | 405 | if (ce->linked) { |
318 | ce->domain->release_resources(ce->linked); | 406 | ce->domain->release_resources(ce->linked); |
319 | if (ce->state == CS_BLOCKED) | 407 | if (ce->state == CS_BLOCKED) |
320 | ce->state == CS_ACTIVE; | 408 | ce_state_change(ce, CS_ACTIVE); |
321 | 409 | ||
322 | TRACE_MC_TASK(ce->linked, "Unlinking\n"); | 410 | TRACE_MC_TASK(ce->linked, "Unlinking\n"); |
411 | |||
323 | ce->linked->rt_param.linked_on = NO_CPU; | 412 | ce->linked->rt_param.linked_on = NO_CPU; |
324 | if (is_ghost(ce->linked)) { | 413 | stop_crit(ce); |
325 | cancel_ghost(ce); | ||
326 | if (!budget_exhausted(ce->linked)) { | ||
327 | /* Job isn't finished, so do accounting */ | ||
328 | update_server_time(ce->linked); | ||
329 | } | ||
330 | } | ||
331 | sched_trace_server_switch_away(ce_sid(ce), 0, -ce->linked->pid, | ||
332 | get_rt_job(ce->linked)); | ||
333 | } | 414 | } |
334 | 415 | ||
335 | /* Actually link task */ | 416 | /* Actually link task */ |
@@ -341,7 +422,7 @@ static void link_task_to_crit(struct crit_entry *ce, | |||
341 | if (ce->domain->acquire_resources(task)) | 422 | if (ce->domain->acquire_resources(task)) |
342 | start_crit(ce); | 423 | start_crit(ce); |
343 | else | 424 | else |
344 | ce->state = CS_BLOCKED; | 425 | ce_state_change(ce, CS_BLOCKED); |
345 | } | 426 | } |
346 | } | 427 | } |
347 | 428 | ||
@@ -445,7 +526,7 @@ static void link_task_to_cpu(struct cpu_entry *entry, struct task_struct *task) | |||
445 | struct crit_entry *ce; | 526 | struct crit_entry *ce; |
446 | TRACE_MC_TASK(task, "Linking to P%d\n", entry->cpu); | 527 | TRACE_MC_TASK(task, "Linking to P%d\n", entry->cpu); |
447 | BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu); | 528 | BUG_ON(task && tsk_rt(task)->linked_on != entry->cpu); |
448 | BUG_ON(task && (is_ghost(task) || is_cblocked(task))); | 529 | BUG_ON(task && is_ghost(task)); |
449 | 530 | ||
450 | if (entry->linked) { | 531 | if (entry->linked) { |
451 | sched_trace_server_switch_away(-entry->linked->pid, | 532 | sched_trace_server_switch_away(-entry->linked->pid, |
@@ -467,8 +548,9 @@ static void link_task_to_cpu(struct cpu_entry *entry, struct task_struct *task) | |||
467 | /* Higher criticality crit entries are now usable */ | 548 | /* Higher criticality crit entries are now usable */ |
468 | for (; i < entry_level(entry) + 1; i++) { | 549 | for (; i < entry_level(entry) + 1; i++) { |
469 | ce = &entry->crit_entries[i]; | 550 | ce = &entry->crit_entries[i]; |
470 | if (!can_use(ce)) | 551 | if (!can_use(ce)) { |
471 | ce->state = CS_ACTIVATE; | 552 | ce_state_change(ce, CS_ACTIVATE); |
553 | } | ||
472 | } | 554 | } |
473 | } | 555 | } |
474 | 556 | ||
@@ -544,10 +626,12 @@ static void update_crit_levels(struct cpu_entry *entry) | |||
544 | */ | 626 | */ |
545 | readmit[i] = (!global_preempted) ? ce->linked : NULL; | 627 | readmit[i] = (!global_preempted) ? ce->linked : NULL; |
546 | 628 | ||
547 | ce->state = CS_REMOVE; | 629 | if (ce->state != CS_REMOVE) { |
548 | TRACE_CRIT_ENTRY(ce, "(CS_REMOVE)\n"); | 630 | ce_state_change(ce, CS_REMOVE); |
549 | if (ce->linked) | 631 | TRACE_CRIT_ENTRY(ce, "(CS_REMOVE)\n"); |
550 | link_task_to_crit(ce, NULL); | 632 | if (ce->linked) |
633 | link_task_to_crit(ce, NULL); | ||
634 | } | ||
551 | } | 635 | } |
552 | /* Need to unlock so we can access domains */ | 636 | /* Need to unlock so we can access domains */ |
553 | raw_spin_unlock(&entry->lock); | 637 | raw_spin_unlock(&entry->lock); |
@@ -569,7 +653,7 @@ static void update_crit_levels(struct cpu_entry *entry) | |||
569 | */ | 653 | */ |
570 | static void check_for_preempt(struct domain *dom) | 654 | static void check_for_preempt(struct domain *dom) |
571 | { | 655 | { |
572 | int recheck = 1, higher_prio, was_inelig, update = 0; | 656 | int recheck = 1; |
573 | struct cpu_entry *entry; | 657 | struct cpu_entry *entry; |
574 | struct crit_entry *ce; | 658 | struct crit_entry *ce; |
575 | 659 | ||
@@ -604,25 +688,12 @@ static void check_for_preempt(struct domain *dom) | |||
604 | 688 | ||
605 | raw_spin_lock(&entry->lock); | 689 | raw_spin_lock(&entry->lock); |
606 | 690 | ||
607 | if (can_use(ce)) { | 691 | if (can_use(ce) && mc_preempt_needed(dom, ce->linked)) { |
608 | was_inelig = ce->linked && !is_ghost(ce->linked) && | 692 | preempt_crit(dom, ce); |
609 | ce->linked != entry->linked && !is_cblocked(ce->linked); | ||
610 | higher_prio = mc_preempt_needed(dom, ce->linked); | ||
611 | |||
612 | if (was_inelig) { | ||
613 | preempt_cpu(entry, ce->linked); | ||
614 | start_crit(ce); | ||
615 | } else if (higher_prio) | ||
616 | preempt_crit(dom, ce); | ||
617 | update = was_inelig || higher_prio; | ||
618 | |||
619 | } else { | ||
620 | TRACE_CRIT_ENTRY(ce, "Can't use!\n"); | ||
621 | } | ||
622 | if (update) | ||
623 | update_crit_levels(entry); | 693 | update_crit_levels(entry); |
624 | else | 694 | } else { |
625 | raw_spin_unlock(&entry->lock); | 695 | raw_spin_unlock(&entry->lock); |
696 | } | ||
626 | } | 697 | } |
627 | } | 698 | } |
628 | 699 | ||
@@ -679,7 +750,6 @@ static void remove_from_all(struct task_struct* task) | |||
679 | */ | 750 | */ |
680 | static void job_completion(struct task_struct *task, int forced) | 751 | static void job_completion(struct task_struct *task, int forced) |
681 | { | 752 | { |
682 | lt_t now; | ||
683 | int release_server; | 753 | int release_server; |
684 | struct cpu_entry *entry; | 754 | struct cpu_entry *entry; |
685 | struct crit_entry *ce; | 755 | struct crit_entry *ce; |
@@ -712,8 +782,6 @@ static void job_completion(struct task_struct *task, int forced) | |||
712 | remove_from_all(task); | 782 | remove_from_all(task); |
713 | } | 783 | } |
714 | 784 | ||
715 | BUG_ON(get_rt_flags(task) & RT_F_BLOCKED); | ||
716 | |||
717 | if (lt_before(get_user_release(task), litmus_clock())) { | 785 | if (lt_before(get_user_release(task), litmus_clock())) { |
718 | TRACE_TASK(task, "Executable task going back to running\n"); | 786 | TRACE_TASK(task, "Executable task going back to running\n"); |
719 | clear_rt_flags(task, RT_F_SLEEP); | 787 | clear_rt_flags(task, RT_F_SLEEP); |
@@ -729,7 +797,7 @@ static void job_completion(struct task_struct *task, int forced) | |||
729 | if (is_running(task)) { | 797 | if (is_running(task)) { |
730 | job_arrival(task); | 798 | job_arrival(task); |
731 | } | 799 | } |
732 | } else if (is_ghost(task)) { | 800 | } else if (is_ghost(task) || is_cblocked(task)) { |
733 | entry = &per_cpu(cpus, tsk_rt(task)->linked_on); | 801 | entry = &per_cpu(cpus, tsk_rt(task)->linked_on); |
734 | ce = &entry->crit_entries[tsk_mc_crit(task)]; | 802 | ce = &entry->crit_entries[tsk_mc_crit(task)]; |
735 | 803 | ||
@@ -739,7 +807,8 @@ static void job_completion(struct task_struct *task, int forced) | |||
739 | link_task_to_cpu(entry, NULL); | 807 | link_task_to_cpu(entry, NULL); |
740 | sched_trace_server_switch_away(ce_sid(ce),0,-task->pid, | 808 | sched_trace_server_switch_away(ce_sid(ce),0,-task->pid, |
741 | get_rt_job(task)); | 809 | get_rt_job(task)); |
742 | start_crit(ce); | 810 | if (ce->state == CS_ACTIVE) |
811 | start_crit(ce); | ||
743 | } | 812 | } |
744 | raw_spin_unlock(&entry->lock); | 813 | raw_spin_unlock(&entry->lock); |
745 | } | 814 | } |
@@ -759,7 +828,8 @@ static enum hrtimer_restart mc_ghost_exhausted(struct hrtimer *timer) | |||
759 | struct cpu_entry *entry = crit_cpu(ce); | 828 | struct cpu_entry *entry = crit_cpu(ce); |
760 | struct task_struct *tmp = NULL; | 829 | struct task_struct *tmp = NULL; |
761 | #endif | 830 | #endif |
762 | TRACE_CRIT_ENTRY(ce, "Firing here at %llu\n", litmus_clock()); | 831 | TRACE("Firing here at %llu\n", litmus_clock()); |
832 | TRACE_CRIT_ENTRY(ce, "For this\n"); | ||
763 | 833 | ||
764 | raw_spin_lock(&entry->lock); | 834 | raw_spin_lock(&entry->lock); |
765 | 835 | ||
@@ -1068,7 +1138,7 @@ void pick_next_task(struct cpu_entry *entry) | |||
1068 | raw_spin_lock(&entry->lock); | 1138 | raw_spin_lock(&entry->lock); |
1069 | 1139 | ||
1070 | ready_task = NULL; | 1140 | ready_task = NULL; |
1071 | if (!entry->linked && can_use(ce)) { | 1141 | if (!entry->linked && can_use(ce) && ce->state != CS_BLOCKED) { |
1072 | if (ce->linked) { | 1142 | if (ce->linked) { |
1073 | ready_task = ce->linked; | 1143 | ready_task = ce->linked; |
1074 | } else if (dtask) { | 1144 | } else if (dtask) { |
@@ -1092,6 +1162,58 @@ void pick_next_task(struct cpu_entry *entry) | |||
1092 | } | 1162 | } |
1093 | } | 1163 | } |
1094 | 1164 | ||
1165 | /* | ||
1166 | * Caller must hold no locks, when it returns CPU lock will be held. | ||
1167 | */ | ||
1168 | static void process_signals(struct cpu_entry *entry) | ||
1169 | { | ||
1170 | int type; | ||
1171 | enum crit_level level; | ||
1172 | struct crit_entry *ce; | ||
1173 | |||
1174 | /* Read current signal */ | ||
1175 | raw_spin_lock(&entry->signal_lock); | ||
1176 | level = entry->signal.level; | ||
1177 | type = entry->signal.type; | ||
1178 | entry->signal.type = NONE; | ||
1179 | raw_spin_unlock(&entry->signal_lock); | ||
1180 | |||
1181 | raw_spin_lock(&entry->lock); | ||
1182 | |||
1183 | if (type) { | ||
1184 | /* Don't care what type of signal it was due to race conditions: | ||
1185 | * just use the signal as a hint to check CPU state | ||
1186 | */ | ||
1187 | ce = &entry->crit_entries[level]; | ||
1188 | |||
1189 | if (ce->state == CS_BLOCKED && type == ACQUIRED) { | ||
1190 | TRACE_CRIT_ENTRY(ce, "Processing acquired signal\n"); | ||
1191 | BUG_ON(!ce->linked); | ||
1192 | |||
1193 | ce_state_change(ce, CS_ACTIVE); | ||
1194 | if (!is_ghost(ce->linked)) { | ||
1195 | link_task_to_cpu(entry, ce->linked); | ||
1196 | update_crit_levels(entry); | ||
1197 | raw_spin_lock(&entry->lock); | ||
1198 | } | ||
1199 | start_crit(ce); | ||
1200 | } else if ((ce->state == CS_ACTIVE || ce->state == CS_ACTIVATE) && | ||
1201 | ce->linked && type == PREEMPTED) { | ||
1202 | TRACE_CRIT_ENTRY(ce, "Processing preempted signal\n"); | ||
1203 | |||
1204 | stop_crit(ce); | ||
1205 | ce_state_change(ce, CS_BLOCKED); | ||
1206 | if (entry->linked == ce->linked) { | ||
1207 | link_task_to_cpu(entry, NULL); | ||
1208 | } | ||
1209 | |||
1210 | } else { | ||
1211 | TRACE_CRIT_ENTRY(ce, "Missed a signal, state: (%s)\n", | ||
1212 | ce_state_name(ce->state)); | ||
1213 | } | ||
1214 | } | ||
1215 | } | ||
1216 | |||
1095 | /** | 1217 | /** |
1096 | * mc_schedule() - Return next task which should be scheduled. | 1218 | * mc_schedule() - Return next task which should be scheduled. |
1097 | */ | 1219 | */ |
@@ -1101,7 +1223,6 @@ static struct task_struct* mc_schedule(struct task_struct* prev) | |||
1101 | int out_of_time, sleep, preempt, exists, blocks, global, lower; | 1223 | int out_of_time, sleep, preempt, exists, blocks, global, lower; |
1102 | struct cpu_entry* entry = &__get_cpu_var(cpus); | 1224 | struct cpu_entry* entry = &__get_cpu_var(cpus); |
1103 | struct task_struct *next = NULL; | 1225 | struct task_struct *next = NULL; |
1104 | struct crit_entry *ce; | ||
1105 | 1226 | ||
1106 | local_irq_save(flags); | 1227 | local_irq_save(flags); |
1107 | 1228 | ||
@@ -1164,22 +1285,10 @@ static struct task_struct* mc_schedule(struct task_struct* prev) | |||
1164 | job_arrival(entry->scheduled); | 1285 | job_arrival(entry->scheduled); |
1165 | } | 1286 | } |
1166 | 1287 | ||
1167 | /* Call before processing signals so any subsequent signal will cause | ||
1168 | * a reschedule. | ||
1169 | */ | ||
1170 | sched_state_task_picked(); | 1288 | sched_state_task_picked(); |
1171 | 1289 | ||
1172 | /* A remote processor unblocked one of our crit levels */ | 1290 | /* Method will return with entry locked */ |
1173 | if (entry->crit_signal != NUM_CRIT_LEVELS) { | 1291 | process_signals(entry); |
1174 | ce = &entry->crit_entries[entry->crit_signal]; | ||
1175 | TRACE_CRIT_ENTRY(ce, "Processing signal for %d\n", entry->crit_signal); | ||
1176 | raw_spin_lock(ce->domain->lock); | ||
1177 | check_for_preempt(ce->domain); | ||
1178 | raw_spin_unlock(ce->domain->lock); | ||
1179 | entry->crit_signal = NUM_CRIT_LEVELS; | ||
1180 | } | ||
1181 | |||
1182 | raw_spin_lock(&entry->lock); | ||
1183 | 1292 | ||
1184 | /* Pick next task if none is linked */ | 1293 | /* Pick next task if none is linked */ |
1185 | if (!entry->linked) | 1294 | if (!entry->linked) |
@@ -1191,6 +1300,7 @@ static struct task_struct* mc_schedule(struct task_struct* prev) | |||
1191 | next->rt_param.scheduled_on = entry->cpu; | 1300 | next->rt_param.scheduled_on = entry->cpu; |
1192 | entry->will_schedule = next; | 1301 | entry->will_schedule = next; |
1193 | 1302 | ||
1303 | |||
1194 | raw_spin_unlock_irqrestore(&entry->lock, flags); | 1304 | raw_spin_unlock_irqrestore(&entry->lock, flags); |
1195 | 1305 | ||
1196 | if (next) { | 1306 | if (next) { |
@@ -1225,6 +1335,54 @@ long mc_deactivate_plugin(void) | |||
1225 | return mc_ce_deactivate_plugin_common(); | 1335 | return mc_ce_deactivate_plugin_common(); |
1226 | } | 1336 | } |
1227 | 1337 | ||
1338 | static unsigned long long deadline_prio(struct dgl *dgl, struct dgl_group_req *greq) | ||
1339 | { | ||
1340 | return get_deadline(greq->task); | ||
1341 | } | ||
1342 | |||
1343 | /* | ||
1344 | * Setup and send signal to CPU for resource acquisition. To avoid touching | ||
1345 | * CPU locks, all CPU state modifications are delayed until the signal is | ||
1346 | * processed. | ||
1347 | */ | ||
1348 | static void cpu_acquired(struct dgl_group_req *greq) | ||
1349 | { | ||
1350 | struct cpu_entry *entry = &per_cpu(cpus, greq->cpu); | ||
1351 | |||
1352 | TRACE_MC_TASK(greq->task, "Acquired CPU %d\n", greq->cpu); | ||
1353 | |||
1354 | sched_trace_task_resume(greq->task); | ||
1355 | clear_rt_flags(greq->task, RT_F_BLOCKED); | ||
1356 | |||
1357 | raw_spin_lock(&entry->signal_lock); | ||
1358 | |||
1359 | entry->signal.type = ACQUIRED; | ||
1360 | entry->signal.level = CRIT_LEVEL_B; | ||
1361 | |||
1362 | litmus_reschedule(greq->cpu); | ||
1363 | |||
1364 | raw_spin_unlock(&entry->signal_lock); | ||
1365 | } | ||
1366 | |||
1367 | static void cpu_preempted(struct dgl_group_req *greq) | ||
1368 | { | ||
1369 | struct cpu_entry *entry = &per_cpu(cpus, greq->cpu); | ||
1370 | |||
1371 | TRACE_MC_TASK(greq->task, "Dropping CPU %d\n", entry->cpu); | ||
1372 | |||
1373 | sched_trace_task_block(greq->task); | ||
1374 | set_rt_flags(greq->task, RT_F_BLOCKED); | ||
1375 | |||
1376 | raw_spin_lock(&entry->signal_lock); | ||
1377 | |||
1378 | entry->signal.type = PREEMPTED; | ||
1379 | entry->signal.level = CRIT_LEVEL_B; | ||
1380 | |||
1381 | litmus_reschedule(greq->cpu); | ||
1382 | |||
1383 | raw_spin_unlock(&entry->signal_lock); | ||
1384 | } | ||
1385 | |||
1228 | /* ************************************************************************** | 1386 | /* ************************************************************************** |
1229 | * Initialization | 1387 | * Initialization |
1230 | * ************************************************************************** */ | 1388 | * ************************************************************************** */ |
@@ -1265,6 +1423,8 @@ static long mc_activate_plugin(void) | |||
1265 | #endif | 1423 | #endif |
1266 | #endif | 1424 | #endif |
1267 | 1425 | ||
1426 | group_lock.assign_priority = deadline_prio; | ||
1427 | |||
1268 | for_each_online_cpu(cpu) { | 1428 | for_each_online_cpu(cpu) { |
1269 | BUG_ON(NR_CPUS <= n); | 1429 | BUG_ON(NR_CPUS <= n); |
1270 | dom = per_cpu(cpus, cpu).crit_entries[CRIT_LEVEL_A].domain; | 1430 | dom = per_cpu(cpus, cpu).crit_entries[CRIT_LEVEL_A].domain; |
@@ -1286,7 +1446,6 @@ out: | |||
1286 | return ret; | 1446 | return ret; |
1287 | } | 1447 | } |
1288 | 1448 | ||
1289 | |||
1290 | static void mc_release_ts(lt_t time) | 1449 | static void mc_release_ts(lt_t time) |
1291 | { | 1450 | { |
1292 | int cpu, cont_id = -1; | 1451 | int cpu, cont_id = -1; |
@@ -1302,6 +1461,7 @@ static void mc_release_ts(lt_t time) | |||
1302 | sched_trace_container_param(++cont_id, (const char*)&name); | 1461 | sched_trace_container_param(++cont_id, (const char*)&name); |
1303 | ce = &entry->crit_entries[level]; | 1462 | ce = &entry->crit_entries[level]; |
1304 | sched_trace_server_param(ce_sid(ce), cont_id, 0, 0); | 1463 | sched_trace_server_param(ce_sid(ce), cont_id, 0, 0); |
1464 | ce_st(ce, ce_partition(ce)); | ||
1305 | } | 1465 | } |
1306 | 1466 | ||
1307 | level = CRIT_LEVEL_B; | 1467 | level = CRIT_LEVEL_B; |
@@ -1311,6 +1471,7 @@ static void mc_release_ts(lt_t time) | |||
1311 | sched_trace_container_param(++cont_id, (const char*)&name); | 1471 | sched_trace_container_param(++cont_id, (const char*)&name); |
1312 | ce = &entry->crit_entries[level]; | 1472 | ce = &entry->crit_entries[level]; |
1313 | sched_trace_server_param(ce_sid(ce), cont_id, 0, 0); | 1473 | sched_trace_server_param(ce_sid(ce), cont_id, 0, 0); |
1474 | ce_st(ce, ce_partition(ce)); | ||
1314 | } | 1475 | } |
1315 | 1476 | ||
1316 | level = CRIT_LEVEL_C; | 1477 | level = CRIT_LEVEL_C; |
@@ -1320,8 +1481,15 @@ static void mc_release_ts(lt_t time) | |||
1320 | entry = &per_cpu(cpus, cpu); | 1481 | entry = &per_cpu(cpus, cpu); |
1321 | ce = &entry->crit_entries[level]; | 1482 | ce = &entry->crit_entries[level]; |
1322 | sched_trace_server_param(ce_sid(ce), cont_id, 0, 0); | 1483 | sched_trace_server_param(ce_sid(ce), cont_id, 0, 0); |
1484 | ce_st(ce, ce_partition(ce)); | ||
1323 | } | 1485 | } |
1324 | 1486 | ||
1487 | /* strcpy(name, "DGL"); */ | ||
1488 | /* sched_trace_container_param(++cont_id, (const char*)&name); */ | ||
1489 | /* for_each_online_cpu(cpu) { */ | ||
1490 | /* sched_trace_server_param(group_lock.sid_start + cpu, cont_id, 0, 0); */ | ||
1491 | /* } */ | ||
1492 | |||
1325 | mc_ce_release_at_common(NULL, time); | 1493 | mc_ce_release_at_common(NULL, time); |
1326 | } | 1494 | } |
1327 | 1495 | ||
@@ -1419,44 +1587,38 @@ static inline void init_edf_domain(struct domain *dom, rt_domain_t *rt, | |||
1419 | #endif | 1587 | #endif |
1420 | } | 1588 | } |
1421 | 1589 | ||
1422 | /* | 1590 | static void init_lock(raw_spinlock_t *lock, int cpu, char *name) |
1423 | * Setup and send signal to CPU for resource acquisition. To avoid touching | ||
1424 | * CPU locks, all CPU state modifications are delayed until the signal is | ||
1425 | * processed. | ||
1426 | */ | ||
1427 | static void cpu_acquired(int cpu) | ||
1428 | { | 1591 | { |
1429 | struct cpu_entry *entry = &per_cpu(cpus, cpu); | 1592 | int name_size; |
1430 | struct crit_entry *ce = &entry->crit_entries[CRIT_LEVEL_B]; | 1593 | char *lock_name; |
1594 | struct lock_class_key *lock_key; | ||
1431 | 1595 | ||
1432 | TRACE_CRIT_ENTRY(ce, "Acquired lock\n"); | 1596 | /* Create unique name / key so that lockdep is not triggered */ |
1597 | name_size = sizeof(*lock_name) * LITMUS_LOCKDEP_NAME_MAX_LEN; | ||
1598 | lock_name = kmalloc(name_size, GFP_ATOMIC); | ||
1599 | lock_key = kmalloc(sizeof(*lock_key), GFP_ATOMIC); | ||
1433 | 1600 | ||
1434 | BUG_ON(!ce->linked); | 1601 | raw_spin_lock_init(lock); |
1435 | 1602 | ||
1436 | clear_rt_flags(ce->linked, RT_F_BLOCKED); | 1603 | LOCKDEP_DYNAMIC_ALLOC(lock, lock_key, lock_name, "%s%d", name, cpu); |
1437 | sched_trace_task_resume(ce->linked); | 1604 | } |
1438 | 1605 | ||
1439 | if (ce->state == CS_BLOCKED) { | 1606 | static char* domain_name(const char *name, int cpu) |
1440 | entry->crit_signal = CRIT_LEVEL_B; | 1607 | { |
1441 | /* Yes this is ok for race conditions, but only in the system | 1608 | char *buf = kmalloc(LITMUS_LOCKDEP_NAME_MAX_LEN * sizeof(char), GFP_ATOMIC); |
1442 | * for the MC-COLOR paper | 1609 | snprintf(buf, LITMUS_LOCKDEP_NAME_MAX_LEN, "%s%d", name, cpu); |
1443 | */ | 1610 | return buf; |
1444 | ce->state = CS_ACTIVE; | ||
1445 | litmus_reschedule(cpu); | ||
1446 | } | ||
1447 | } | 1611 | } |
1448 | 1612 | ||
1449 | struct domain_data *ce_domain_for(int); | 1613 | struct domain_data *ce_domain_for(int); |
1450 | static int __init init_mc(void) | 1614 | static int __init init_mc(void) |
1451 | { | 1615 | { |
1452 | int cpu, name_size; | 1616 | int cpu; |
1453 | char *lock_name; | ||
1454 | rt_domain_t *rt; | 1617 | rt_domain_t *rt; |
1455 | raw_spinlock_t *a_dom_lock, *b_dom_lock, *c_dom_lock; /* For lock debugger */ | 1618 | raw_spinlock_t *a_dom_lock, *b_dom_lock, *c_dom_lock; /* For lock debugger */ |
1456 | struct cpu_entry *entry; | 1619 | struct cpu_entry *entry; |
1457 | struct domain_data *dom_data; | 1620 | struct domain_data *dom_data; |
1458 | struct ce_dom_data *ce_data; | 1621 | struct ce_dom_data *ce_data; |
1459 | struct lock_class_key *lock_key; | ||
1460 | 1622 | ||
1461 | for_each_online_cpu(cpu) { | 1623 | for_each_online_cpu(cpu) { |
1462 | entry = &per_cpu(cpus, cpu); | 1624 | entry = &per_cpu(cpus, cpu); |
@@ -1465,15 +1627,11 @@ static int __init init_mc(void) | |||
1465 | entry->cpu = cpu; | 1627 | entry->cpu = cpu; |
1466 | entry->scheduled = NULL; | 1628 | entry->scheduled = NULL; |
1467 | entry->linked = NULL; | 1629 | entry->linked = NULL; |
1468 | entry->crit_signal = NUM_CRIT_LEVELS; | 1630 | entry->signal.level = CRIT_LEVEL_A; |
1631 | entry->signal.type = NONE; | ||
1469 | 1632 | ||
1470 | /* Trick lockdep for CPU locks */ | ||
1471 | name_size = sizeof(*lock_name) * LITMUS_LOCKDEP_NAME_MAX_LEN; | ||
1472 | lock_name = kmalloc(name_size, GFP_ATOMIC); | ||
1473 | lock_key = kmalloc(sizeof(*lock_key), GFP_ATOMIC); | ||
1474 | raw_spin_lock_init(&entry->lock); | 1633 | raw_spin_lock_init(&entry->lock); |
1475 | LOCKDEP_DYNAMIC_ALLOC(&entry->lock, lock_key, lock_name, | 1634 | raw_spin_lock_init(&entry->signal_lock); |
1476 | "entry%d", cpu); | ||
1477 | 1635 | ||
1478 | #ifdef CONFIG_PLUGIN_MC_REDIRECT | 1636 | #ifdef CONFIG_PLUGIN_MC_REDIRECT |
1479 | raw_spin_lock_init(&entry->redir_lock); | 1637 | raw_spin_lock_init(&entry->redir_lock); |
@@ -1490,7 +1648,7 @@ static int __init init_mc(void) | |||
1490 | ce_peek_and_take_ready, ce_higher_prio, ce_data, cpu, | 1648 | ce_peek_and_take_ready, ce_higher_prio, ce_data, cpu, |
1491 | ce_timer_function); | 1649 | ce_timer_function); |
1492 | init_local_domain(entry, dom_data, CRIT_LEVEL_A); | 1650 | init_local_domain(entry, dom_data, CRIT_LEVEL_A); |
1493 | dom_data->domain.name = "LVL-A"; | 1651 | dom_data->domain.name = domain_name("LVL-A", cpu); |
1494 | 1652 | ||
1495 | /* CRIT_LEVEL_B */ | 1653 | /* CRIT_LEVEL_B */ |
1496 | dom_data = &per_cpu(_mc_crit_b, cpu); | 1654 | dom_data = &per_cpu(_mc_crit_b, cpu); |
@@ -1501,7 +1659,8 @@ static int __init init_mc(void) | |||
1501 | dom_data->domain.release_resources = release_resources; | 1659 | dom_data->domain.release_resources = release_resources; |
1502 | b_dom_lock = dom_data->domain.lock; | 1660 | b_dom_lock = dom_data->domain.lock; |
1503 | raw_spin_lock_init(b_dom_lock); | 1661 | raw_spin_lock_init(b_dom_lock); |
1504 | dom_data->domain.name = "LVL-B"; | 1662 | |
1663 | dom_data->domain.name = domain_name("LVL-B", cpu); | ||
1505 | } | 1664 | } |
1506 | 1665 | ||
1507 | /* CRIT_LEVEL_C */ | 1666 | /* CRIT_LEVEL_C */ |
@@ -1512,12 +1671,15 @@ static int __init init_mc(void) | |||
1512 | c_dom_lock = _mc_crit_c.domain.lock; | 1671 | c_dom_lock = _mc_crit_c.domain.lock; |
1513 | raw_spin_lock_init(c_dom_lock); | 1672 | raw_spin_lock_init(c_dom_lock); |
1514 | _mc_crit_c.domain.name = "LVL-C"; | 1673 | _mc_crit_c.domain.name = "LVL-C"; |
1515 | 1674 | /* GROUP LOCK */ | |
1516 | dgl_init(&group_lock, color_cache_info.nr_colors, | 1675 | dgl_init(&group_lock, color_cache_info.nr_colors, |
1517 | color_cache_info.ways); | 1676 | color_cache_info.ways); |
1518 | group_lock.cpu_acquired = cpu_acquired; | 1677 | group_lock.cpu_acquired = cpu_acquired; |
1678 | group_lock.cpu_preempted = cpu_preempted; | ||
1519 | raw_spin_lock_init(&dgl_lock); | 1679 | raw_spin_lock_init(&dgl_lock); |
1520 | 1680 | ||
1681 | group_lock.sid_start = num_online_cpus() * NUM_CRIT_LEVELS + 1; | ||
1682 | |||
1521 | return register_sched_plugin(&mc_plugin); | 1683 | return register_sched_plugin(&mc_plugin); |
1522 | } | 1684 | } |
1523 | 1685 | ||