diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2013-04-24 20:32:41 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2013-04-24 20:32:41 -0400 |
commit | 5e73c0954ed9e0599051d79e529e92fd87ce99d2 (patch) | |
tree | 560f4bd5be676926edb5b8006115db74a82f454f | |
parent | 1a7abf153a44781cedd7f529a70769c769f58029 (diff) |
slew of bug fixes: mostly races and sched-atomic
-rw-r--r-- | include/litmus/budget.h | 18 | ||||
-rw-r--r-- | include/litmus/jobs.h | 1 | ||||
-rw-r--r-- | include/litmus/litmus.h | 2 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 2 | ||||
-rw-r--r-- | kernel/sched.c | 6 | ||||
-rw-r--r-- | litmus/budget.c | 41 | ||||
-rw-r--r-- | litmus/edf_common.c | 2 | ||||
-rw-r--r-- | litmus/ikglp_lock.c | 182 | ||||
-rw-r--r-- | litmus/jobs.c | 3 | ||||
-rw-r--r-- | litmus/litmus.c | 75 | ||||
-rw-r--r-- | litmus/litmus_softirq.c | 55 | ||||
-rw-r--r-- | litmus/nvidia_info.c | 101 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 126 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 16 | ||||
-rw-r--r-- | litmus/sched_litmus.c | 54 | ||||
-rw-r--r-- | litmus/sched_pfp.c | 8 | ||||
-rw-r--r-- | litmus/sched_plugin.c | 6 | ||||
-rw-r--r-- | litmus/sched_psn_edf.c | 4 | ||||
-rw-r--r-- | litmus/sync.c | 10 |
19 files changed, 594 insertions, 118 deletions
diff --git a/include/litmus/budget.h b/include/litmus/budget.h index d5d4d61eb06a..0ae9c9f30023 100644 --- a/include/litmus/budget.h +++ b/include/litmus/budget.h | |||
@@ -11,14 +11,20 @@ struct enforcement_timer | |||
11 | raw_spinlock_t lock; | 11 | raw_spinlock_t lock; |
12 | struct hrtimer timer; | 12 | struct hrtimer timer; |
13 | int armed:1; | 13 | int armed:1; |
14 | unsigned int job_when_armed; | ||
14 | }; | 15 | }; |
15 | 16 | ||
17 | int cancel_enforcement_timer(struct task_struct* t); | ||
18 | |||
16 | typedef void (*scheduled_t)(struct task_struct* t); | 19 | typedef void (*scheduled_t)(struct task_struct* t); |
17 | typedef void (*blocked_t)(struct task_struct* t); | 20 | typedef void (*blocked_t)(struct task_struct* t); |
18 | typedef void (*preempt_t)(struct task_struct* t); | 21 | typedef void (*preempt_t)(struct task_struct* t); |
19 | typedef void (*sleep_t)(struct task_struct* t); | 22 | typedef void (*sleep_t)(struct task_struct* t); |
20 | typedef void (*wakeup_t)(struct task_struct* t); | 23 | typedef void (*wakeup_t)(struct task_struct* t); |
21 | typedef enum hrtimer_restart (*exhausted_t)(struct task_struct* t); | 24 | |
25 | #define IN_SCHEDULE 1 | ||
26 | |||
27 | typedef enum hrtimer_restart (*exhausted_t)(struct task_struct* t, int in_schedule); | ||
22 | typedef void (*exit_t)(struct task_struct* t); | 28 | typedef void (*exit_t)(struct task_struct* t); |
23 | typedef void (*inherit_t)(struct task_struct* t, struct task_struct* prio_inh); | 29 | typedef void (*inherit_t)(struct task_struct* t, struct task_struct* prio_inh); |
24 | typedef void (*disinherit_t)(struct task_struct* t, struct task_struct* prio_inh); | 30 | typedef void (*disinherit_t)(struct task_struct* t, struct task_struct* prio_inh); |
@@ -111,7 +117,15 @@ void reevaluate_inheritance(struct task_struct* t); | |||
111 | } \ | 117 | } \ |
112 | }while(0) | 118 | }while(0) |
113 | 119 | ||
114 | #define budget_state_machine2(a, b, evt) \ | 120 | #define budget_state_machine2(t, evt, param) \ |
121 | do { \ | ||
122 | if (get_budget_timer(t).ops && \ | ||
123 | get_budget_timer(t).ops->evt != NULL) { \ | ||
124 | get_budget_timer(t).ops->evt(t, param); \ | ||
125 | } \ | ||
126 | }while(0) | ||
127 | |||
128 | #define budget_state_machine_chgprio(a, b, evt) \ | ||
115 | do { \ | 129 | do { \ |
116 | if (get_budget_timer(a).ops && \ | 130 | if (get_budget_timer(a).ops && \ |
117 | get_budget_timer(b).ops && \ | 131 | get_budget_timer(b).ops && \ |
diff --git a/include/litmus/jobs.h b/include/litmus/jobs.h index ec73cc6ae7f8..797e1d470a69 100644 --- a/include/litmus/jobs.h +++ b/include/litmus/jobs.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | void prepare_for_next_period(struct task_struct *t); | 4 | void prepare_for_next_period(struct task_struct *t); |
5 | void release_at(struct task_struct *t, lt_t start); | 5 | void release_at(struct task_struct *t, lt_t start); |
6 | void setup_release(struct task_struct *t, lt_t start); | ||
6 | long complete_job(void); | 7 | long complete_job(void); |
7 | 8 | ||
8 | #if defined(CONFIG_REALTIME_AUX_TASKS) || defined(CONFIG_LITMUS_NVIDIA) | 9 | #if defined(CONFIG_REALTIME_AUX_TASKS) || defined(CONFIG_LITMUS_NVIDIA) |
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 7efd146c2198..cb87dcb5f5f7 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -142,7 +142,7 @@ clear_bit(flag_nr, &tsk_rt(t)->budget.flags)) | |||
142 | #define bt_flags_reset(t) (\ | 142 | #define bt_flags_reset(t) (\ |
143 | tsk_rt(t)->budget.flags = 0) | 143 | tsk_rt(t)->budget.flags = 0) |
144 | 144 | ||
145 | #define requeue_preempted_job(t) \ | 145 | #define should_requeue_preempted_job(t) \ |
146 | (t && !is_completed(t) && (!budget_exhausted(t) || !budget_enforced(t))) | 146 | (t && !is_completed(t) && (!budget_exhausted(t) || !budget_enforced(t))) |
147 | 147 | ||
148 | 148 | ||
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index c480d95690f8..6b6e19752a81 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -335,6 +335,8 @@ struct klmirqd_info | |||
335 | struct list_head worklist; | 335 | struct list_head worklist; |
336 | 336 | ||
337 | struct list_head klmirqd_reg; | 337 | struct list_head klmirqd_reg; |
338 | |||
339 | struct completion* exited; | ||
338 | }; | 340 | }; |
339 | #endif | 341 | #endif |
340 | 342 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 51fb72b5af79..7d27a3851a4d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -91,6 +91,7 @@ | |||
91 | #endif | 91 | #endif |
92 | 92 | ||
93 | static void litmus_tick(struct rq*, struct task_struct*); | 93 | static void litmus_tick(struct rq*, struct task_struct*); |
94 | static void litmus_handle_budget_exhaustion(struct task_struct*); | ||
94 | 95 | ||
95 | /* | 96 | /* |
96 | * Convert user-nice values [ -20 ... 0 ... 19 ] | 97 | * Convert user-nice values [ -20 ... 0 ... 19 ] |
@@ -4432,6 +4433,11 @@ litmus_need_resched_nonpreemptible: | |||
4432 | 4433 | ||
4433 | post_schedule(rq); | 4434 | post_schedule(rq); |
4434 | 4435 | ||
4436 | if (is_realtime(current) && | ||
4437 | unlikely(budget_enforced(current) && budget_exhausted(current))) { | ||
4438 | litmus_handle_budget_exhaustion(current); | ||
4439 | } | ||
4440 | |||
4435 | if (sched_state_validate_switch()) { | 4441 | if (sched_state_validate_switch()) { |
4436 | TS_SCHED2_END(prev); | 4442 | TS_SCHED2_END(prev); |
4437 | goto litmus_need_resched_nonpreemptible; | 4443 | goto litmus_need_resched_nonpreemptible; |
diff --git a/litmus/budget.c b/litmus/budget.c index 9ae530ad3b2b..4f38b3d8d45c 100644 --- a/litmus/budget.c +++ b/litmus/budget.c | |||
@@ -9,10 +9,10 @@ | |||
9 | #include <litmus/budget.h> | 9 | #include <litmus/budget.h> |
10 | #include <litmus/signal.h> | 10 | #include <litmus/signal.h> |
11 | 11 | ||
12 | inline static void cancel_enforcement_timer(struct task_struct* t) | 12 | int cancel_enforcement_timer(struct task_struct* t) |
13 | { | 13 | { |
14 | struct enforcement_timer* et; | 14 | struct enforcement_timer* et; |
15 | int ret; | 15 | int ret = 0; |
16 | unsigned long flags; | 16 | unsigned long flags; |
17 | 17 | ||
18 | BUG_ON(!t); | 18 | BUG_ON(!t); |
@@ -20,22 +20,28 @@ inline static void cancel_enforcement_timer(struct task_struct* t) | |||
20 | 20 | ||
21 | et = &tsk_rt(t)->budget.timer; | 21 | et = &tsk_rt(t)->budget.timer; |
22 | 22 | ||
23 | TRACE("cancelling enforcement timer.\n"); | 23 | TRACE_TASK(t, "canceling enforcement timer.\n"); |
24 | 24 | ||
25 | if (et->armed) { | 25 | if (et->armed) { |
26 | raw_spin_lock_irqsave(&et->lock, flags); | 26 | raw_spin_lock_irqsave(&et->lock, flags); |
27 | if (et->armed) { | 27 | if (et->armed) { |
28 | ret = hrtimer_try_to_cancel(&et->timer); | 28 | ret = hrtimer_try_to_cancel(&et->timer); |
29 | et->armed = 0; | 29 | if (ret < 0) |
30 | } | 30 | TRACE_TASK(t, "timer already running. failed to cancel.\n"); |
31 | else { | 31 | else { |
32 | TRACE("timer was not armed (race).\n"); | 32 | TRACE_TASK(t, "canceled timer with %lld ns remaining.\n", |
33 | ktime_to_ns(hrtimer_expires_remaining(&et->timer))); | ||
34 | et->armed = 0; | ||
35 | } | ||
33 | } | 36 | } |
37 | else | ||
38 | TRACE_TASK(t, "timer was not armed (race).\n"); | ||
34 | raw_spin_unlock_irqrestore(&et->lock, flags); | 39 | raw_spin_unlock_irqrestore(&et->lock, flags); |
35 | } | 40 | } |
36 | else { | 41 | else |
37 | TRACE("timer was not armed.\n"); | 42 | TRACE_TASK(t, "timer was not armed.\n"); |
38 | } | 43 | |
44 | return ret; | ||
39 | } | 45 | } |
40 | 46 | ||
41 | 47 | ||
@@ -283,15 +289,14 @@ void sobliv_on_wakeup(struct task_struct* t) | |||
283 | void sobliv_on_inherit(struct task_struct* t, struct task_struct* prio_inh) | 289 | void sobliv_on_inherit(struct task_struct* t, struct task_struct* prio_inh) |
284 | { | 290 | { |
285 | /* TODO: Budget credit accounting. */ | 291 | /* TODO: Budget credit accounting. */ |
286 | |||
287 | BUG_ON(!prio_inh); | 292 | BUG_ON(!prio_inh); |
288 | TRACE_TASK(t, "called %s\n", __FUNCTION__); | 293 | // TRACE_TASK(t, "called %s\n", __FUNCTION__); |
289 | } | 294 | } |
290 | 295 | ||
291 | void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh) | 296 | void sobliv_on_disinherit(struct task_struct* t, struct task_struct* prio_inh) |
292 | { | 297 | { |
293 | /* TODO: Budget credit accounting. */ | 298 | /* TODO: Budget credit accounting. */ |
294 | TRACE_TASK(t, "called %s\n", __FUNCTION__); | 299 | // TRACE_TASK(t, "called %s\n", __FUNCTION__); |
295 | } | 300 | } |
296 | 301 | ||
297 | void sobliv_on_enter_top_m(struct task_struct* t) | 302 | void sobliv_on_enter_top_m(struct task_struct* t) |
@@ -397,7 +402,7 @@ void reevaluate_inheritance(struct task_struct* t) | |||
397 | 402 | ||
398 | static enum hrtimer_restart __on_timeout(struct hrtimer *timer) | 403 | static enum hrtimer_restart __on_timeout(struct hrtimer *timer) |
399 | { | 404 | { |
400 | enum hrtimer_restart restart; | 405 | enum hrtimer_restart restart = HRTIMER_NORESTART; |
401 | unsigned long flags; | 406 | unsigned long flags; |
402 | 407 | ||
403 | struct budget_tracker* bt = | 408 | struct budget_tracker* bt = |
@@ -420,12 +425,18 @@ static enum hrtimer_restart __on_timeout(struct hrtimer *timer) | |||
420 | tsk_rt(t)->budget.timer.armed = 0; | 425 | tsk_rt(t)->budget.timer.armed = 0; |
421 | raw_spin_unlock_irqrestore(&bt->timer.lock, flags); | 426 | raw_spin_unlock_irqrestore(&bt->timer.lock, flags); |
422 | 427 | ||
423 | restart = bt->ops->on_exhausted(t); | 428 | if (unlikely(bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE))) { |
429 | TRACE_TASK(t, "spurious exhastion while waiting for release. dropping.\n"); | ||
430 | goto out; | ||
431 | } | ||
432 | |||
433 | restart = bt->ops->on_exhausted(t,!IN_SCHEDULE); | ||
424 | 434 | ||
425 | raw_spin_lock_irqsave(&bt->timer.lock, flags); | 435 | raw_spin_lock_irqsave(&bt->timer.lock, flags); |
426 | tsk_rt(t)->budget.timer.armed = (restart == HRTIMER_RESTART); | 436 | tsk_rt(t)->budget.timer.armed = (restart == HRTIMER_RESTART); |
427 | raw_spin_unlock_irqrestore(&bt->timer.lock, flags); | 437 | raw_spin_unlock_irqrestore(&bt->timer.lock, flags); |
428 | 438 | ||
439 | out: | ||
429 | return restart; | 440 | return restart; |
430 | } | 441 | } |
431 | 442 | ||
diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 7f0bdaceab5b..5519e05aaf83 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c | |||
@@ -63,7 +63,7 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
63 | /* There is no point in comparing a task to itself. */ | 63 | /* There is no point in comparing a task to itself. */ |
64 | if (first && first == second) { | 64 | if (first && first == second) { |
65 | TRACE_CUR("WARNING: pointless edf priority comparison: %s/%d\n", first->comm, first->pid); | 65 | TRACE_CUR("WARNING: pointless edf priority comparison: %s/%d\n", first->comm, first->pid); |
66 | WARN_ON(1); | 66 | // WARN_ON(1); |
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
69 | 69 | ||
diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c index 4963820eaee2..fb0080352a9f 100644 --- a/litmus/ikglp_lock.c +++ b/litmus/ikglp_lock.c | |||
@@ -5,6 +5,8 @@ | |||
5 | #include <litmus/sched_plugin.h> | 5 | #include <litmus/sched_plugin.h> |
6 | #include <litmus/fdso.h> | 6 | #include <litmus/fdso.h> |
7 | 7 | ||
8 | #include <litmus/litmus_proc.h> | ||
9 | |||
8 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | 10 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) |
9 | #include <litmus/gpu_affinity.h> | 11 | #include <litmus/gpu_affinity.h> |
10 | #include <litmus/nvidia_info.h> | 12 | #include <litmus/nvidia_info.h> |
@@ -326,6 +328,159 @@ static void print_donors(struct binheap_node *n, int depth) | |||
326 | } | 328 | } |
327 | #endif | 329 | #endif |
328 | 330 | ||
331 | struct ikglp_proc_print_heap_args | ||
332 | { | ||
333 | struct ikglp_semaphore *sem; | ||
334 | int *size; | ||
335 | char **next; | ||
336 | }; | ||
337 | |||
338 | static void __ikglp_pq_to_proc(struct binheap_node *n, void *args) | ||
339 | { | ||
340 | struct ikglp_proc_print_heap_args *hargs; | ||
341 | ikglp_heap_node_t *request; | ||
342 | int w; | ||
343 | |||
344 | if (!n) | ||
345 | return; | ||
346 | |||
347 | hargs = (struct ikglp_proc_print_heap_args*) args; | ||
348 | request = binheap_entry(n, ikglp_heap_node_t, node); | ||
349 | |||
350 | w = scnprintf(*(hargs->next), *(hargs->size), "\t%s/%d\n", | ||
351 | request->task->comm, request->task->pid); | ||
352 | *(hargs->size) -= w; | ||
353 | *(hargs->next) += w; | ||
354 | } | ||
355 | |||
356 | static void __ikglp_donor_to_proc(struct binheap_node *n, void *args) | ||
357 | { | ||
358 | struct ikglp_proc_print_heap_args *hargs; | ||
359 | ikglp_wait_state_t *donor_node; | ||
360 | int w; | ||
361 | |||
362 | if (!n) | ||
363 | return; | ||
364 | |||
365 | hargs = (struct ikglp_proc_print_heap_args*) args; | ||
366 | donor_node = binheap_entry(n, ikglp_wait_state_t, node); | ||
367 | |||
368 | w = scnprintf(*(hargs->next), *(hargs->size), "\t%s/%d (donee: %s/%d)\n", | ||
369 | donor_node->task->comm, | ||
370 | donor_node->task->pid, | ||
371 | donor_node->donee_info->task->comm, | ||
372 | donor_node->donee_info->task->pid); | ||
373 | *(hargs->size) -= w; | ||
374 | *(hargs->next) += w; | ||
375 | } | ||
376 | |||
377 | |||
378 | static int ikglp_proc_print(char *page, char **start, off_t off, int count, int *eof, void *data) | ||
379 | { | ||
380 | struct ikglp_semaphore *sem = ikglp_from_lock((struct litmus_lock*)data); | ||
381 | |||
382 | int attempts = 0; | ||
383 | const int max_attempts = 10; | ||
384 | int locked = 0; | ||
385 | unsigned long flags; | ||
386 | |||
387 | int size = count; | ||
388 | char *next = page; | ||
389 | |||
390 | struct ikglp_proc_print_heap_args heap_args = {sem, &size, &next}; | ||
391 | |||
392 | int w; | ||
393 | int i; | ||
394 | |||
395 | while(attempts < max_attempts) | ||
396 | { | ||
397 | locked = raw_spin_trylock_irqsave(&sem->real_lock, flags); | ||
398 | |||
399 | if (unlikely(!locked)) { | ||
400 | ++attempts; | ||
401 | cpu_relax(); | ||
402 | } | ||
403 | else { | ||
404 | break; | ||
405 | } | ||
406 | } | ||
407 | |||
408 | if (!locked) { | ||
409 | w = scnprintf(next, size, "%s is busy.\n", sem->litmus_lock.name); | ||
410 | size -= w; | ||
411 | next += w; | ||
412 | return count - size; | ||
413 | } | ||
414 | |||
415 | w = scnprintf(next, size, "nr_replicas: %u\n", sem->nr_replicas); size -= w; next += w; | ||
416 | w = scnprintf(next, size, "max_fifo_len: %u\n", sem->max_fifo_len); size -= w; next += w; | ||
417 | w = scnprintf(next, size, "max_in_fifos: %u\n", sem->max_in_fifos); size -= w; next += w; | ||
418 | w = scnprintf(next, size, "current nr_in_fifos: %u\n", sem->nr_in_fifos); size -= w; next += w; | ||
419 | w = scnprintf(next, size, "nr in top-m: %u\n\n", sem->top_m_size); size -= w; next += w; | ||
420 | |||
421 | for (i = 0; i < sem->nr_replicas; ++i) | ||
422 | { | ||
423 | struct fifo_queue *fq = &sem->fifo_queues[i]; | ||
424 | w = scnprintf(next, size, "replica %d: owner = %s/%d (Virtually Unlocked = %u), hp waiter = %s/%d, length = %u\n", | ||
425 | i, | ||
426 | (fq->owner) ? fq->owner->comm : "null", | ||
427 | (fq->owner) ? fq->owner->pid : 0, | ||
428 | fq->is_vunlocked, | ||
429 | (fq->hp_waiter) ? fq->hp_waiter->comm : "null", | ||
430 | (fq->hp_waiter) ? fq->hp_waiter->pid : 0, | ||
431 | fq->count); | ||
432 | size -= w; next += w; | ||
433 | |||
434 | |||
435 | if (waitqueue_active(&fq->wait)) { | ||
436 | struct list_head *pos; | ||
437 | list_for_each(pos, &fq->wait.task_list) { | ||
438 | wait_queue_t *q = list_entry(pos, wait_queue_t, task_list); | ||
439 | struct task_struct *blocked_task = (struct task_struct*) q->private; | ||
440 | w = scnprintf(next, size, | ||
441 | "\t%s/%d (inh: %s/%d)\n", | ||
442 | blocked_task->comm, blocked_task->pid, | ||
443 | (tsk_rt(blocked_task)->inh_task) ? | ||
444 | tsk_rt(blocked_task)->inh_task->comm : "null", | ||
445 | (tsk_rt(blocked_task)->inh_task) ? | ||
446 | tsk_rt(blocked_task)->inh_task->pid : 0); | ||
447 | size -= w; | ||
448 | next += w; | ||
449 | } | ||
450 | } | ||
451 | else { | ||
452 | w = scnprintf(next, size, "\t<NONE>\n"); | ||
453 | size -= w; | ||
454 | next += w; | ||
455 | } | ||
456 | } | ||
457 | |||
458 | if (binheap_empty(&sem->priority_queue)) { | ||
459 | w = scnprintf(next, size, "pq:\n\t<NONE>\n"); | ||
460 | size -= w; | ||
461 | next += w; | ||
462 | } | ||
463 | else { | ||
464 | w = scnprintf(next, size, "donors:\n"); size -= w; next += w; | ||
465 | binheap_for_each(&sem->priority_queue, __ikglp_pq_to_proc, &heap_args); | ||
466 | } | ||
467 | |||
468 | if (binheap_empty(&sem->donors)) { | ||
469 | w = scnprintf(next, size, "donors:\n\t<NONE>\n"); | ||
470 | size -= w; | ||
471 | next += w; | ||
472 | } | ||
473 | else { | ||
474 | w = scnprintf(next, size, "donors:\n"); size -= w; next += w; | ||
475 | binheap_for_each(&sem->donors, __ikglp_donor_to_proc, &heap_args); | ||
476 | } | ||
477 | |||
478 | raw_spin_unlock_irqrestore(&sem->real_lock, flags); | ||
479 | |||
480 | return count - size; | ||
481 | } | ||
482 | |||
483 | |||
329 | static void ikglp_add_global_list(struct ikglp_semaphore *sem, | 484 | static void ikglp_add_global_list(struct ikglp_semaphore *sem, |
330 | struct task_struct *t, | 485 | struct task_struct *t, |
331 | ikglp_heap_node_t *node) | 486 | ikglp_heap_node_t *node) |
@@ -2011,6 +2166,32 @@ void ikglp_free(struct litmus_lock* l) | |||
2011 | 2166 | ||
2012 | 2167 | ||
2013 | 2168 | ||
2169 | static void ikglp_proc_add(struct litmus_lock *l) | ||
2170 | { | ||
2171 | if (!l->name) | ||
2172 | l->name = kmalloc(LOCK_NAME_LEN*sizeof(char), GFP_KERNEL); | ||
2173 | snprintf(l->name, LOCK_NAME_LEN, "ikglp-%d", l->ident); | ||
2174 | litmus_add_proc_lock(l, ikglp_proc_print); | ||
2175 | } | ||
2176 | |||
2177 | static void ikglp_proc_remove(struct litmus_lock *l) | ||
2178 | { | ||
2179 | if (l->name) { | ||
2180 | litmus_remove_proc_lock(l); | ||
2181 | |||
2182 | kfree(l->name); | ||
2183 | l->name = NULL; | ||
2184 | } | ||
2185 | } | ||
2186 | |||
2187 | |||
2188 | static struct litmus_lock_proc_ops ikglp_proc_ops = | ||
2189 | { | ||
2190 | .add = ikglp_proc_add, | ||
2191 | .remove = ikglp_proc_remove | ||
2192 | }; | ||
2193 | |||
2194 | |||
2014 | struct litmus_lock* ikglp_new(unsigned int m, | 2195 | struct litmus_lock* ikglp_new(unsigned int m, |
2015 | struct litmus_lock_ops* ops, | 2196 | struct litmus_lock_ops* ops, |
2016 | void* __user uarg) | 2197 | void* __user uarg) |
@@ -2067,6 +2248,7 @@ struct litmus_lock* ikglp_new(unsigned int m, | |||
2067 | } | 2248 | } |
2068 | 2249 | ||
2069 | sem->litmus_lock.ops = ops; | 2250 | sem->litmus_lock.ops = ops; |
2251 | sem->litmus_lock.proc = &ikglp_proc_ops; | ||
2070 | 2252 | ||
2071 | #ifdef CONFIG_DEBUG_SPINLOCK | 2253 | #ifdef CONFIG_DEBUG_SPINLOCK |
2072 | { | 2254 | { |
diff --git a/litmus/jobs.c b/litmus/jobs.c index 018b4982afb5..ecfaf4cd477a 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c | |||
@@ -6,7 +6,8 @@ | |||
6 | #include <litmus/litmus.h> | 6 | #include <litmus/litmus.h> |
7 | #include <litmus/jobs.h> | 7 | #include <litmus/jobs.h> |
8 | 8 | ||
9 | static inline void setup_release(struct task_struct *t, lt_t release) | 9 | //static inline void setup_release(struct task_struct *t, lt_t release) |
10 | void setup_release(struct task_struct *t, lt_t release) | ||
10 | { | 11 | { |
11 | /* prepare next release */ | 12 | /* prepare next release */ |
12 | t->rt_param.job_params.release = release; | 13 | t->rt_param.job_params.release = release; |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 8bb05ab0e021..112e269a6348 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -654,6 +654,30 @@ out: | |||
654 | return ret; | 654 | return ret; |
655 | } | 655 | } |
656 | 656 | ||
657 | |||
658 | |||
659 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
660 | struct kill_workers_data | ||
661 | { | ||
662 | struct work_struct work; | ||
663 | }; | ||
664 | |||
665 | static void __kill_workers(struct work_struct *w) | ||
666 | { | ||
667 | struct kill_workers_data *work = | ||
668 | container_of(w, struct kill_workers_data, work); | ||
669 | |||
670 | TRACE_TASK(current, "is processing litmus-worker-shutdown request.\n"); | ||
671 | |||
672 | #ifdef CONFIG_LITMUS_NVIDIA | ||
673 | shutdown_nvidia_info(); | ||
674 | #endif | ||
675 | if (!klmirqd_is_dead()) | ||
676 | kill_klmirqd(); | ||
677 | kfree(work); | ||
678 | } | ||
679 | #endif | ||
680 | |||
657 | /* Switching a plugin in use is tricky. | 681 | /* Switching a plugin in use is tricky. |
658 | * We must watch out that no real-time tasks exists | 682 | * We must watch out that no real-time tasks exists |
659 | * (and that none is created in parallel) and that the plugin is not | 683 | * (and that none is created in parallel) and that the plugin is not |
@@ -661,18 +685,59 @@ out: | |||
661 | */ | 685 | */ |
662 | int switch_sched_plugin(struct sched_plugin* plugin) | 686 | int switch_sched_plugin(struct sched_plugin* plugin) |
663 | { | 687 | { |
688 | int ret; | ||
689 | lt_t maybe_deadlock; | ||
690 | lt_t spin_time = 100000000; /* 100 ms */ | ||
691 | |||
664 | BUG_ON(!plugin); | 692 | BUG_ON(!plugin); |
665 | 693 | ||
666 | #ifdef CONFIG_LITMUS_SOFTIRQD | 694 | #ifdef CONFIG_LITMUS_SOFTIRQD |
667 | if (!klmirqd_is_dead()) { | 695 | if (!klmirqd_is_dead()) { |
668 | kill_klmirqd(); | 696 | struct kill_workers_data *wq_job = |
697 | kmalloc(sizeof(struct kill_workers_data), GFP_ATOMIC); | ||
698 | INIT_WORK(&wq_job->work, __kill_workers); | ||
699 | |||
700 | schedule_work(&wq_job->work); | ||
701 | |||
702 | /* we're atomic, so try to spin until workers are shut down */ | ||
703 | maybe_deadlock = litmus_clock(); | ||
704 | while (!klmirqd_is_dead()) { | ||
705 | cpu_relax(); | ||
706 | mb(); | ||
707 | |||
708 | if (lt_before(maybe_deadlock + spin_time, litmus_clock())) { | ||
709 | printk("Could not kill klmirqd! Try again if running " | ||
710 | "on uniprocessor.\n"); | ||
711 | ret = -EBUSY; | ||
712 | goto out; | ||
713 | } | ||
714 | } | ||
715 | |||
716 | TRACE("klmirqd dead! task count = %d\n", atomic_read(&rt_task_count)); | ||
669 | } | 717 | } |
718 | #else | ||
719 | #ifdef CONFIG_LITMUS_NVIDIA | ||
720 | /* nvidia handling is not threaded */ | ||
721 | shutdown_nvidia_info(); | ||
722 | #endif | ||
670 | #endif | 723 | #endif |
671 | 724 | ||
672 | if (atomic_read(&rt_task_count) == 0) | 725 | maybe_deadlock = litmus_clock(); |
673 | return stop_machine(do_plugin_switch, plugin, NULL); | 726 | while (atomic_read(&rt_task_count) != 0) { |
674 | else | 727 | cpu_relax(); |
675 | return -EBUSY; | 728 | mb(); |
729 | |||
730 | if (lt_before(maybe_deadlock + spin_time, litmus_clock())) { | ||
731 | ret = -EBUSY; | ||
732 | goto out; | ||
733 | } | ||
734 | } | ||
735 | |||
736 | ret = stop_machine(do_plugin_switch, plugin, NULL); | ||
737 | TRACE("stop_machine returned: %d\n", ret); | ||
738 | |||
739 | out: | ||
740 | return ret; | ||
676 | } | 741 | } |
677 | 742 | ||
678 | /* Called upon fork. | 743 | /* Called upon fork. |
diff --git a/litmus/litmus_softirq.c b/litmus/litmus_softirq.c index 8bee9697dabd..fbc6ab7cc5f5 100644 --- a/litmus/litmus_softirq.c +++ b/litmus/litmus_softirq.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/smp.h> | 6 | #include <linux/smp.h> |
7 | #include <linux/slab.h> | 7 | #include <linux/slab.h> |
8 | #include <linux/mutex.h> | 8 | #include <linux/mutex.h> |
9 | #include <linux/completion.h> | ||
9 | 10 | ||
10 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
11 | #include <linux/cpuset.h> | 12 | #include <linux/cpuset.h> |
@@ -29,9 +30,10 @@ struct klmirqd_registration | |||
29 | { | 30 | { |
30 | raw_spinlock_t lock; | 31 | raw_spinlock_t lock; |
31 | u32 nr_threads; | 32 | u32 nr_threads; |
33 | struct list_head threads; | ||
34 | |||
32 | unsigned int initialized:1; | 35 | unsigned int initialized:1; |
33 | unsigned int shuttingdown:1; | 36 | unsigned int shuttingdown:1; |
34 | struct list_head threads; | ||
35 | }; | 37 | }; |
36 | 38 | ||
37 | static atomic_t klmirqd_id_gen = ATOMIC_INIT(-1); | 39 | static atomic_t klmirqd_id_gen = ATOMIC_INIT(-1); |
@@ -92,13 +94,18 @@ void kill_klmirqd(void) | |||
92 | 94 | ||
93 | if(info->terminating != 1) | 95 | if(info->terminating != 1) |
94 | { | 96 | { |
97 | struct completion exit; | ||
98 | init_completion(&exit); | ||
99 | |||
95 | info->terminating = 1; | 100 | info->terminating = 1; |
101 | info->exited = &exit; | ||
96 | mb(); /* just to be sure? */ | 102 | mb(); /* just to be sure? */ |
97 | flush_pending(info->klmirqd); | 103 | flush_pending(info->klmirqd); |
98 | 104 | ||
99 | /* signal termination */ | 105 | /* signal termination */ |
100 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | 106 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); |
101 | kthread_stop(info->klmirqd); | 107 | kthread_stop(info->klmirqd); |
108 | wait_for_completion(&exit); /* gets completed when task exits rt-mode */ | ||
102 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | 109 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); |
103 | } | 110 | } |
104 | } | 111 | } |
@@ -126,14 +133,21 @@ void kill_klmirqd_thread(struct task_struct* klmirqd_thread) | |||
126 | info = tsk_rt(klmirqd_thread)->klmirqd_info; | 133 | info = tsk_rt(klmirqd_thread)->klmirqd_info; |
127 | 134 | ||
128 | if(info->terminating != 1) { | 135 | if(info->terminating != 1) { |
136 | struct completion exit; | ||
137 | init_completion(&exit); | ||
129 | info->terminating = 1; | 138 | info->terminating = 1; |
139 | info->exited = &exit; | ||
130 | mb(); | 140 | mb(); |
131 | 141 | ||
132 | flush_pending(klmirqd_thread); | 142 | flush_pending(klmirqd_thread); |
143 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
144 | |||
133 | kthread_stop(klmirqd_thread); | 145 | kthread_stop(klmirqd_thread); |
146 | wait_for_completion(&exit); /* gets completed when task exits rt-mode */ | ||
147 | } | ||
148 | else { | ||
149 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
134 | } | 150 | } |
135 | |||
136 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
137 | } | 151 | } |
138 | 152 | ||
139 | struct klmirqd_launch_data | 153 | struct klmirqd_launch_data |
@@ -157,8 +171,6 @@ static void __launch_klmirqd_thread(struct work_struct *work) | |||
157 | 171 | ||
158 | TRACE("Creating klmirqd thread\n"); | 172 | TRACE("Creating klmirqd thread\n"); |
159 | 173 | ||
160 | |||
161 | |||
162 | if (launch_data->cpu_affinity != -1) { | 174 | if (launch_data->cpu_affinity != -1) { |
163 | if (launch_data->name[0] == '\0') { | 175 | if (launch_data->name[0] == '\0') { |
164 | id = atomic_inc_return(&klmirqd_id_gen); | 176 | id = atomic_inc_return(&klmirqd_id_gen); |
@@ -295,8 +307,9 @@ static int become_litmus_daemon(struct task_struct* tsk) | |||
295 | static int become_normal_daemon(struct task_struct* tsk) | 307 | static int become_normal_daemon(struct task_struct* tsk) |
296 | { | 308 | { |
297 | int ret = 0; | 309 | int ret = 0; |
298 | |||
299 | struct sched_param param = { .sched_priority = 0}; | 310 | struct sched_param param = { .sched_priority = 0}; |
311 | |||
312 | TRACE_TASK(tsk, "exiting real-time mode\n"); | ||
300 | sched_setscheduler_nocheck(tsk, SCHED_NORMAL, ¶m); | 313 | sched_setscheduler_nocheck(tsk, SCHED_NORMAL, ¶m); |
301 | 314 | ||
302 | return ret; | 315 | return ret; |
@@ -315,22 +328,25 @@ static int register_klmirqd(struct task_struct* tsk) | |||
315 | goto out; | 328 | goto out; |
316 | } | 329 | } |
317 | 330 | ||
331 | /* allocate and initialize klmirqd data for the thread */ | ||
332 | info = kmalloc(sizeof(struct klmirqd_info), GFP_KERNEL); | ||
333 | if (!info) { | ||
334 | TRACE("Failed to allocate klmirqd_info struct!\n"); | ||
335 | retval = -1; /* todo: pick better code */ | ||
336 | goto out; | ||
337 | } | ||
338 | |||
318 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | 339 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); |
319 | 340 | ||
320 | if (!__klmirqd_is_ready()) { | 341 | if (!__klmirqd_is_ready()) { |
321 | TRACE("klmirqd is not ready! Did you forget to initialize it?\n"); | 342 | TRACE("klmirqd is not ready! Did you forget to initialize it?\n"); |
343 | kfree(info); | ||
322 | WARN_ON(1); | 344 | WARN_ON(1); |
323 | retval = -1; | 345 | retval = -1; |
324 | goto out_unlock; | 346 | goto out_unlock; |
325 | } | 347 | } |
326 | 348 | ||
327 | /* allocate and initialize klmirqd data for the thread */ | 349 | /* allocate and initialize klmirqd data for the thread */ |
328 | info = kmalloc(sizeof(struct klmirqd_info), GFP_KERNEL); | ||
329 | if (!info) { | ||
330 | TRACE("Failed to allocate klmirqd_info struct!\n"); | ||
331 | retval = -1; /* todo: pick better code */ | ||
332 | goto out_unlock; | ||
333 | } | ||
334 | memset(info, 0, sizeof(struct klmirqd_info)); | 350 | memset(info, 0, sizeof(struct klmirqd_info)); |
335 | info->klmirqd = tsk; | 351 | info->klmirqd = tsk; |
336 | info->pending_tasklets_hi.tail = &info->pending_tasklets_hi.head; | 352 | info->pending_tasklets_hi.tail = &info->pending_tasklets_hi.head; |
@@ -338,7 +354,7 @@ static int register_klmirqd(struct task_struct* tsk) | |||
338 | INIT_LIST_HEAD(&info->worklist); | 354 | INIT_LIST_HEAD(&info->worklist); |
339 | INIT_LIST_HEAD(&info->klmirqd_reg); | 355 | INIT_LIST_HEAD(&info->klmirqd_reg); |
340 | raw_spin_lock_init(&info->lock); | 356 | raw_spin_lock_init(&info->lock); |
341 | 357 | info->exited = NULL; | |
342 | 358 | ||
343 | /* now register with klmirqd */ | 359 | /* now register with klmirqd */ |
344 | list_add_tail(&info->klmirqd_reg, &klmirqd_state.threads); | 360 | list_add_tail(&info->klmirqd_reg, &klmirqd_state.threads); |
@@ -360,8 +376,9 @@ static int unregister_klmirqd(struct task_struct* tsk) | |||
360 | unsigned long flags; | 376 | unsigned long flags; |
361 | struct klmirqd_info *info = tsk_rt(tsk)->klmirqd_info; | 377 | struct klmirqd_info *info = tsk_rt(tsk)->klmirqd_info; |
362 | 378 | ||
379 | TRACE_CUR("unregistering.\n"); | ||
380 | |||
363 | if (!tsk_rt(tsk)->is_interrupt_thread || !info) { | 381 | if (!tsk_rt(tsk)->is_interrupt_thread || !info) { |
364 | TRACE("%s/%d is not a klmirqd thread!\n", tsk->comm, tsk->pid); | ||
365 | WARN_ON(1); | 382 | WARN_ON(1); |
366 | retval = -1; | 383 | retval = -1; |
367 | goto out; | 384 | goto out; |
@@ -734,6 +751,7 @@ static int run_klmirqd(void* callback) | |||
734 | { | 751 | { |
735 | int retval = 0; | 752 | int retval = 0; |
736 | struct klmirqd_info* info = NULL; | 753 | struct klmirqd_info* info = NULL; |
754 | struct completion* exit = NULL; | ||
737 | klmirqd_callback_t* cb = (klmirqd_callback_t*)(callback); | 755 | klmirqd_callback_t* cb = (klmirqd_callback_t*)(callback); |
738 | 756 | ||
739 | retval = become_litmus_daemon(current); | 757 | retval = become_litmus_daemon(current); |
@@ -828,11 +846,18 @@ static int run_klmirqd(void* callback) | |||
828 | 846 | ||
829 | failed_unregister: | 847 | failed_unregister: |
830 | /* remove our registration from klmirqd */ | 848 | /* remove our registration from klmirqd */ |
849 | exit = info->exited; | ||
850 | mb(); | ||
831 | unregister_klmirqd(current); | 851 | unregister_klmirqd(current); |
832 | 852 | ||
833 | failed_sched_normal: | 853 | failed_sched_normal: |
834 | become_normal_daemon(current); | 854 | become_normal_daemon(current); |
835 | 855 | ||
856 | if (exit) { | ||
857 | TRACE_TASK(current, "signalling exit\n"); | ||
858 | complete(exit); | ||
859 | } | ||
860 | |||
836 | failed: | 861 | failed: |
837 | return retval; | 862 | return retval; |
838 | } | 863 | } |
@@ -1008,7 +1033,7 @@ int __litmus_tasklet_schedule(struct tasklet_struct *t, struct task_struct* klmi | |||
1008 | if (unlikely(!is_realtime(klmirqd_thread) || | 1033 | if (unlikely(!is_realtime(klmirqd_thread) || |
1009 | !tsk_rt(klmirqd_thread)->is_interrupt_thread || | 1034 | !tsk_rt(klmirqd_thread)->is_interrupt_thread || |
1010 | !tsk_rt(klmirqd_thread)->klmirqd_info)) { | 1035 | !tsk_rt(klmirqd_thread)->klmirqd_info)) { |
1011 | TRACE("%s: %s/%d can't handle tasklets\n", klmirqd_thread->comm, klmirqd_thread->pid); | 1036 | TRACE("can't handle tasklets\n"); |
1012 | return ret; | 1037 | return ret; |
1013 | } | 1038 | } |
1014 | 1039 | ||
diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c index 3951fc92ad03..0050ce65e521 100644 --- a/litmus/nvidia_info.c +++ b/litmus/nvidia_info.c | |||
@@ -259,47 +259,67 @@ static struct notifier_block nvidia_going = { | |||
259 | .priority = 1, | 259 | .priority = 1, |
260 | }; | 260 | }; |
261 | 261 | ||
262 | int init_nvidia_info(void) | 262 | |
263 | struct init_nvinfo_wq_data | ||
264 | { | ||
265 | struct work_struct work; | ||
266 | }; | ||
267 | |||
268 | static void __init_nvidia_info(struct work_struct *w) | ||
263 | { | 269 | { |
270 | struct init_nvinfo_wq_data *work = | ||
271 | container_of(w, struct init_nvinfo_wq_data, work); | ||
272 | struct module* mod; | ||
273 | |||
264 | mutex_lock(&module_mutex); | 274 | mutex_lock(&module_mutex); |
265 | nvidia_mod = find_module("nvidia"); | 275 | mod = find_module("nvidia"); |
266 | mutex_unlock(&module_mutex); | 276 | mutex_unlock(&module_mutex); |
267 | if(nvidia_mod != NULL) | 277 | |
268 | { | 278 | if(mod != NULL) { |
269 | TRACE("%s : Found NVIDIA module. Core Code: %p to %p\n", __FUNCTION__, | 279 | TRACE("%s : Found NVIDIA module. Core Code: %p to %p\n", __FUNCTION__, |
270 | (void*)(nvidia_mod->module_core), | 280 | (void*)(mod->module_core), |
271 | (void*)(nvidia_mod->module_core) + nvidia_mod->core_size); | 281 | (void*)(mod->module_core) + mod->core_size); |
272 | init_nv_device_reg(); | ||
273 | 282 | ||
283 | init_nv_device_reg(); | ||
284 | nvidia_mod = mod; /* make module visible to others */ | ||
274 | register_module_notifier(&nvidia_going); | 285 | register_module_notifier(&nvidia_going); |
275 | |||
276 | return(0); | ||
277 | } | 286 | } |
278 | else | 287 | else { |
279 | { | ||
280 | TRACE("%s : Could not find NVIDIA module! Loaded?\n", __FUNCTION__); | 288 | TRACE("%s : Could not find NVIDIA module! Loaded?\n", __FUNCTION__); |
281 | |||
282 | init_nv_device_reg(); | 289 | init_nv_device_reg(); |
283 | return(0); | ||
284 | } | 290 | } |
291 | |||
292 | kfree(work); | ||
293 | } | ||
294 | |||
295 | int init_nvidia_info(void) | ||
296 | { | ||
297 | struct init_nvinfo_wq_data *wq_job = | ||
298 | kmalloc(sizeof(struct init_nvinfo_wq_data), GFP_ATOMIC); | ||
299 | INIT_WORK(&wq_job->work, __init_nvidia_info); | ||
300 | schedule_work(&wq_job->work); | ||
301 | return 0; | ||
285 | } | 302 | } |
286 | 303 | ||
287 | void shutdown_nvidia_info(void) | 304 | void shutdown_nvidia_info(void) |
288 | { | 305 | { |
289 | nvidia_mod = NULL; | 306 | if (nvidia_mod) { |
290 | mb(); | 307 | nvidia_mod = NULL; |
308 | mb(); | ||
291 | 309 | ||
292 | unregister_module_notifier(&nvidia_going); | 310 | unregister_module_notifier(&nvidia_going); |
293 | shutdown_nv_device_reg(); | 311 | shutdown_nv_device_reg(); |
312 | } | ||
294 | } | 313 | } |
295 | 314 | ||
296 | /* works with pointers to static data inside the module too. */ | 315 | /* works with pointers to static data inside the module too. */ |
297 | int is_nvidia_func(void* func_addr) | 316 | int is_nvidia_func(void* func_addr) |
298 | { | 317 | { |
299 | int ret = 0; | 318 | int ret = 0; |
300 | if(nvidia_mod) | 319 | struct module* mod = nvidia_mod; |
320 | if(mod) | ||
301 | { | 321 | { |
302 | ret = within_module_core((long unsigned int)func_addr, nvidia_mod); | 322 | ret = within_module_core((long unsigned int)func_addr, mod); |
303 | /* | 323 | /* |
304 | if(ret) | 324 | if(ret) |
305 | { | 325 | { |
@@ -485,7 +505,6 @@ static int init_nv_device_reg(void) | |||
485 | memset(NV_DEVICE_REG, 0, sizeof(NV_DEVICE_REG)); | 505 | memset(NV_DEVICE_REG, 0, sizeof(NV_DEVICE_REG)); |
486 | mb(); | 506 | mb(); |
487 | 507 | ||
488 | |||
489 | for(i = 0; i < num_online_gpus(); ++i) { | 508 | for(i = 0; i < num_online_gpus(); ++i) { |
490 | raw_spin_lock_init(&NV_DEVICE_REG[i].lock); | 509 | raw_spin_lock_init(&NV_DEVICE_REG[i].lock); |
491 | INIT_BINHEAP_HANDLE(&NV_DEVICE_REG[i].owners, gpu_owner_max_priority_order); | 510 | INIT_BINHEAP_HANDLE(&NV_DEVICE_REG[i].owners, gpu_owner_max_priority_order); |
@@ -558,18 +577,20 @@ static int shutdown_nv_device_reg(void) | |||
558 | ) | 577 | ) |
559 | { | 578 | { |
560 | raw_spin_lock_irqsave(®->lock, flags); | 579 | raw_spin_lock_irqsave(®->lock, flags); |
561 | |||
562 | if (reg->interrupt_thread && reg->interrupt_ready) { | 580 | if (reg->interrupt_thread && reg->interrupt_ready) { |
563 | struct task_struct* th = reg->interrupt_thread; | 581 | struct task_struct* th = reg->interrupt_thread; |
564 | reg->interrupt_thread = NULL; | 582 | reg->interrupt_thread = NULL; |
565 | mb(); | 583 | mb(); |
566 | reg->interrupt_ready = 0; | 584 | reg->interrupt_ready = 0; |
567 | mb(); | 585 | mb(); |
568 | 586 | raw_spin_unlock_irqrestore(®->lock, flags); | |
569 | kill_klmirqd_thread(th); | 587 | kill_klmirqd_thread(th); |
570 | } | 588 | } |
589 | else | ||
590 | raw_spin_unlock_irqrestore(®->lock, flags); | ||
571 | 591 | ||
572 | #ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED | 592 | #ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED |
593 | raw_spin_lock_irqsave(®->lock, flags); | ||
573 | if (reg->workq_thread && reg->workq_ready) { | 594 | if (reg->workq_thread && reg->workq_ready) { |
574 | struct task_struct* th = reg->workq_thread; | 595 | struct task_struct* th = reg->workq_thread; |
575 | reg->workq_thread = NULL; | 596 | reg->workq_thread = NULL; |
@@ -577,10 +598,12 @@ static int shutdown_nv_device_reg(void) | |||
577 | reg->workq_ready = 0; | 598 | reg->workq_ready = 0; |
578 | mb(); | 599 | mb(); |
579 | 600 | ||
601 | raw_spin_unlock_irqrestore(®->lock, flags); | ||
580 | kill_klmirqd_thread(th); | 602 | kill_klmirqd_thread(th); |
581 | } | 603 | } |
604 | else | ||
605 | raw_spin_unlock_irqrestore(®->lock, flags); | ||
582 | #endif | 606 | #endif |
583 | raw_spin_unlock_irqrestore(®->lock, flags); | ||
584 | } | 607 | } |
585 | 608 | ||
586 | while (!binheap_empty(®->owners)) { | 609 | while (!binheap_empty(®->owners)) { |
@@ -642,6 +665,8 @@ static struct task_struct* __get_klm_thread(nv_device_registry_t* reg, nvklmtype | |||
642 | klmirqd = reg->workq_thread; | 665 | klmirqd = reg->workq_thread; |
643 | break; | 666 | break; |
644 | #endif | 667 | #endif |
668 | default: | ||
669 | break; | ||
645 | } | 670 | } |
646 | 671 | ||
647 | return klmirqd; | 672 | return klmirqd; |
@@ -671,9 +696,22 @@ static void __unlock_klm_thread(nv_device_registry_t* reg, unsigned long* flags, | |||
671 | struct task_struct* get_and_lock_nvklmirqd_thread(u32 target_device_id, unsigned long* flags) | 696 | struct task_struct* get_and_lock_nvklmirqd_thread(u32 target_device_id, unsigned long* flags) |
672 | { | 697 | { |
673 | nv_device_registry_t *reg; | 698 | nv_device_registry_t *reg; |
699 | struct task_struct *th; | ||
674 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | 700 | BUG_ON(target_device_id >= NV_DEVICE_NUM); |
701 | |||
702 | if (unlikely(nvidia_mod == NULL)) | ||
703 | return NULL; | ||
704 | |||
675 | reg = &NV_DEVICE_REG[target_device_id]; | 705 | reg = &NV_DEVICE_REG[target_device_id]; |
676 | return __get_and_lock_klm_thread(reg, flags, INTERRUPT_TH); | 706 | th = __get_and_lock_klm_thread(reg, flags, INTERRUPT_TH); |
707 | |||
708 | barrier(); | ||
709 | if (unlikely(nvidia_mod == NULL)) { | ||
710 | th = NULL; | ||
711 | __unlock_klm_thread(reg, flags, INTERRUPT_TH); | ||
712 | } | ||
713 | |||
714 | return th; | ||
677 | } | 715 | } |
678 | 716 | ||
679 | void unlock_nvklmirqd_thread(u32 target_device_id, unsigned long* flags) | 717 | void unlock_nvklmirqd_thread(u32 target_device_id, unsigned long* flags) |
@@ -700,9 +738,22 @@ struct task_struct* get_nvklmirqd_thread(u32 target_device_id) | |||
700 | struct task_struct* get_and_lock_nvklmworkqd_thread(u32 target_device_id, unsigned long* flags) | 738 | struct task_struct* get_and_lock_nvklmworkqd_thread(u32 target_device_id, unsigned long* flags) |
701 | { | 739 | { |
702 | nv_device_registry_t *reg; | 740 | nv_device_registry_t *reg; |
741 | struct task_struct *th; | ||
703 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | 742 | BUG_ON(target_device_id >= NV_DEVICE_NUM); |
743 | |||
744 | if (unlikely(nvidia_mod == NULL)) | ||
745 | return NULL; | ||
746 | |||
704 | reg = &NV_DEVICE_REG[target_device_id]; | 747 | reg = &NV_DEVICE_REG[target_device_id]; |
705 | return __get_and_lock_klm_thread(reg, flags, WORKQ_TH); | 748 | th = __get_and_lock_klm_thread(reg, flags, WORKQ_TH); |
749 | |||
750 | barrier(); | ||
751 | if (unlikely(nvidia_mod == NULL)) { | ||
752 | th = NULL; | ||
753 | __unlock_klm_thread(reg, flags, WORKQ_TH); | ||
754 | } | ||
755 | |||
756 | return th; | ||
706 | } | 757 | } |
707 | 758 | ||
708 | void unlock_nvklmworkqd_thread(u32 target_device_id, unsigned long* flags) | 759 | void unlock_nvklmworkqd_thread(u32 target_device_id, unsigned long* flags) |
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 50eb4d446303..1a7f0ffdae33 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -527,11 +527,11 @@ static void check_for_preemptions(cedf_domain_t *cluster) | |||
527 | &per_cpu(cedf_cpu_entries, task_cpu(task))); | 527 | &per_cpu(cedf_cpu_entries, task_cpu(task))); |
528 | if(affinity) | 528 | if(affinity) |
529 | last = affinity; | 529 | last = affinity; |
530 | else if(requeue_preempted_job(last->linked)) | 530 | else if(should_requeue_preempted_job(last->linked)) |
531 | requeue(last->linked); | 531 | requeue(last->linked); |
532 | } | 532 | } |
533 | #else | 533 | #else |
534 | if (requeue_preempted_job(last->linked)) | 534 | if (should_requeue_preempted_job(last->linked)) |
535 | requeue(last->linked); | 535 | requeue(last->linked); |
536 | #endif | 536 | #endif |
537 | link_task_to_cpu(task, last); | 537 | link_task_to_cpu(task, last); |
@@ -674,39 +674,56 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
674 | } | 674 | } |
675 | } | 675 | } |
676 | 676 | ||
677 | static enum hrtimer_restart cedf_simple_on_exhausted(struct task_struct *t) | 677 | static enum hrtimer_restart cedf_simple_on_exhausted(struct task_struct *t, int in_schedule) |
678 | { | 678 | { |
679 | /* Assumption: t is scheduled on the CPU executing this callback */ | 679 | /* Assumption: t is scheduled on the CPU executing this callback */ |
680 | 680 | ||
681 | if (in_schedule) { | ||
682 | BUG_ON(tsk_rt(t)->scheduled_on != smp_processor_id()); | ||
683 | if (budget_precisely_tracked(t) && cancel_enforcement_timer(t) < 0) { | ||
684 | TRACE_TASK(t, "raced with timer. deffering to timer.\n"); | ||
685 | goto out; | ||
686 | } | ||
687 | } | ||
688 | |||
681 | if (budget_signalled(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) { | 689 | if (budget_signalled(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) { |
682 | /* signal exhaustion */ | 690 | /* signal exhaustion */ |
683 | send_sigbudget(t); /* will set BTF_SIG_BUDGET_SENT */ | 691 | send_sigbudget(t); /* will set BTF_SIG_BUDGET_SENT */ |
684 | } | 692 | } |
685 | 693 | ||
686 | if (budget_enforced(t) && !bt_flag_test_and_set(t, BTF_BUDGET_EXHAUSTED)) { | 694 | if (budget_enforced(t) && !bt_flag_test_and_set(t, BTF_BUDGET_EXHAUSTED)) { |
687 | if (!is_np(t)) { | 695 | if (likely(!is_np(t))) { |
688 | /* np tasks will be preempted when they become | 696 | /* np tasks will be preempted when they become |
689 | * preemptable again | 697 | * preemptable again |
690 | */ | 698 | */ |
691 | TRACE_TASK(t, "is preemptable => FORCE_RESCHED\n"); | 699 | if (!in_schedule) { |
692 | 700 | TRACE_TASK(t, "is preemptable => FORCE_RESCHED\n"); | |
693 | litmus_reschedule_local(); | 701 | litmus_reschedule_local(); |
694 | set_will_schedule(); | 702 | set_will_schedule(); |
703 | } | ||
695 | } else if (is_user_np(t)) { | 704 | } else if (is_user_np(t)) { |
696 | TRACE_TASK(t, "is non-preemptable, preemption delayed.\n"); | 705 | TRACE_TASK(t, "is non-preemptable, preemption delayed.\n"); |
697 | |||
698 | request_exit_np(t); | 706 | request_exit_np(t); |
699 | } | 707 | } |
700 | } | 708 | } |
701 | 709 | ||
710 | out: | ||
702 | return HRTIMER_NORESTART; | 711 | return HRTIMER_NORESTART; |
703 | } | 712 | } |
704 | 713 | ||
705 | 714 | ||
706 | static enum hrtimer_restart cedf_simple_io_on_exhausted(struct task_struct *t) | 715 | static enum hrtimer_restart cedf_simple_io_on_exhausted(struct task_struct *t, int in_schedule) |
707 | { | 716 | { |
708 | enum hrtimer_restart restart = HRTIMER_NORESTART; | 717 | enum hrtimer_restart restart = HRTIMER_NORESTART; |
709 | 718 | ||
719 | if (in_schedule) { | ||
720 | BUG_ON(tsk_rt(t)->scheduled_on != smp_processor_id()); | ||
721 | if (budget_precisely_tracked(t) && cancel_enforcement_timer(t) == -1) { | ||
722 | TRACE_TASK(t, "raced with timer. deffering to timer.\n"); | ||
723 | goto out; | ||
724 | } | ||
725 | } | ||
726 | |||
710 | /* t may or may not be scheduled */ | 727 | /* t may or may not be scheduled */ |
711 | 728 | ||
712 | if (budget_signalled(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) { | 729 | if (budget_signalled(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) { |
@@ -720,24 +737,28 @@ static enum hrtimer_restart cedf_simple_io_on_exhausted(struct task_struct *t) | |||
720 | 737 | ||
721 | if (budget_enforced(t) && !bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED)) { | 738 | if (budget_enforced(t) && !bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED)) { |
722 | int cpu = (tsk_rt(t)->linked_on != NO_CPU) ? | 739 | int cpu = (tsk_rt(t)->linked_on != NO_CPU) ? |
723 | tsk_rt(t)->linked_on : tsk_rt(t)->scheduled_on; | 740 | tsk_rt(t)->linked_on : tsk_rt(t)->scheduled_on; |
724 | 741 | ||
725 | if (is_np(t) && is_user_np(t)) { | 742 | if (is_np(t) && is_user_np(t)) { |
726 | TRACE_TASK(t, "is non-preemptable, preemption delayed.\n"); | ||
727 | bt_flag_set(t, BTF_BUDGET_EXHAUSTED); | 743 | bt_flag_set(t, BTF_BUDGET_EXHAUSTED); |
744 | TRACE_TASK(t, "is non-preemptable, preemption delayed.\n"); | ||
728 | request_exit_np(t); | 745 | request_exit_np(t); |
729 | } | 746 | } |
730 | /* where do we need to call resched? */ | 747 | /* where do we need to call resched? */ |
731 | else if (cpu == smp_processor_id()) { | 748 | else if (cpu == smp_processor_id()) { |
732 | TRACE_TASK(t, "is preemptable => FORCE_RESCHED\n"); | ||
733 | bt_flag_set(t, BTF_BUDGET_EXHAUSTED); | 749 | bt_flag_set(t, BTF_BUDGET_EXHAUSTED); |
734 | litmus_reschedule_local(); | 750 | if (!in_schedule) { |
735 | set_will_schedule(); | 751 | TRACE_TASK(t, "is preemptable => FORCE_RESCHED\n"); |
752 | litmus_reschedule_local(); | ||
753 | set_will_schedule(); | ||
754 | } | ||
736 | } | 755 | } |
737 | else if (cpu != NO_CPU) { | 756 | else if (cpu != NO_CPU) { |
738 | TRACE_TASK(t, "is preemptable on remote cpu (%d) => FORCE_RESCHED\n", cpu); | ||
739 | bt_flag_set(t, BTF_BUDGET_EXHAUSTED); | 757 | bt_flag_set(t, BTF_BUDGET_EXHAUSTED); |
740 | litmus_reschedule(cpu); | 758 | if (!in_schedule) { |
759 | TRACE_TASK(t, "is preemptable on remote cpu (%d) => FORCE_RESCHED\n", cpu); | ||
760 | litmus_reschedule(cpu); | ||
761 | } | ||
741 | } | 762 | } |
742 | else if (unlikely(tsk_rt(t)->blocked_lock)) { | 763 | else if (unlikely(tsk_rt(t)->blocked_lock)) { |
743 | /* we shouldn't be draining while waiting for litmus lock, but we | 764 | /* we shouldn't be draining while waiting for litmus lock, but we |
@@ -749,6 +770,8 @@ static enum hrtimer_restart cedf_simple_io_on_exhausted(struct task_struct *t) | |||
749 | cedf_domain_t *cluster; | 770 | cedf_domain_t *cluster; |
750 | unsigned long flags; | 771 | unsigned long flags; |
751 | 772 | ||
773 | BUG_ON(in_schedule); | ||
774 | |||
752 | cluster = task_cpu_cluster(t); | 775 | cluster = task_cpu_cluster(t); |
753 | 776 | ||
754 | // 1) refresh budget through job completion | 777 | // 1) refresh budget through job completion |
@@ -843,6 +866,7 @@ static enum hrtimer_restart cedf_simple_io_on_exhausted(struct task_struct *t) | |||
843 | } | 866 | } |
844 | } | 867 | } |
845 | 868 | ||
869 | out: | ||
846 | return restart; | 870 | return restart; |
847 | } | 871 | } |
848 | 872 | ||
@@ -875,10 +899,18 @@ static void cedf_trigger_vunlock(struct task_struct *t) | |||
875 | } | 899 | } |
876 | #endif | 900 | #endif |
877 | 901 | ||
878 | static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t) | 902 | static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t, int in_schedule) |
879 | { | 903 | { |
880 | enum hrtimer_restart restart = HRTIMER_NORESTART; | 904 | enum hrtimer_restart restart = HRTIMER_NORESTART; |
881 | 905 | ||
906 | if (in_schedule) { | ||
907 | BUG_ON(tsk_rt(t)->scheduled_on != smp_processor_id()); | ||
908 | if (budget_precisely_tracked(t) && cancel_enforcement_timer(t) == -1) { | ||
909 | TRACE_TASK(t, "raced with timer. deffering to timer.\n"); | ||
910 | goto out; | ||
911 | } | ||
912 | } | ||
913 | |||
882 | /* t may or may not be scheduled */ | 914 | /* t may or may not be scheduled */ |
883 | 915 | ||
884 | if (budget_signalled(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) { | 916 | if (budget_signalled(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) { |
@@ -911,21 +943,27 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t) | |||
911 | } | 943 | } |
912 | /* where do we need to call resched? */ | 944 | /* where do we need to call resched? */ |
913 | else if (cpu == smp_processor_id()) { | 945 | else if (cpu == smp_processor_id()) { |
914 | TRACE_TASK(t, "is preemptable => FORCE_RESCHED\n"); | ||
915 | bt_flag_set(t, BTF_BUDGET_EXHAUSTED); | 946 | bt_flag_set(t, BTF_BUDGET_EXHAUSTED); |
916 | litmus_reschedule_local(); | 947 | if (!in_schedule) { |
917 | set_will_schedule(); | 948 | TRACE_TASK(t, "is preemptable => FORCE_RESCHED\n"); |
949 | litmus_reschedule_local(); | ||
950 | set_will_schedule(); | ||
951 | } | ||
918 | } | 952 | } |
919 | else if (cpu != NO_CPU) { | 953 | else if (cpu != NO_CPU) { |
920 | TRACE_TASK(t, "is preemptable on remote cpu (%d) => FORCE_RESCHED\n", cpu); | ||
921 | bt_flag_set(t, BTF_BUDGET_EXHAUSTED); | 954 | bt_flag_set(t, BTF_BUDGET_EXHAUSTED); |
922 | litmus_reschedule(cpu); | 955 | if (!in_schedule) { |
956 | litmus_reschedule(cpu); | ||
957 | TRACE_TASK(t, "is preemptable on remote cpu (%d) => FORCE_RESCHED\n", cpu); | ||
958 | } | ||
923 | } | 959 | } |
924 | else { | 960 | else { |
925 | lt_t remaining; | 961 | lt_t remaining; |
926 | cedf_domain_t *cluster; | 962 | cedf_domain_t *cluster; |
927 | unsigned long flags; | 963 | unsigned long flags; |
928 | 964 | ||
965 | BUG_ON(in_schedule); | ||
966 | |||
929 | cluster = task_cpu_cluster(t); | 967 | cluster = task_cpu_cluster(t); |
930 | 968 | ||
931 | // 1) refresh budget through job completion | 969 | // 1) refresh budget through job completion |
@@ -1025,6 +1063,7 @@ static enum hrtimer_restart cedf_sobliv_on_exhausted(struct task_struct *t) | |||
1025 | } | 1063 | } |
1026 | } | 1064 | } |
1027 | 1065 | ||
1066 | out: | ||
1028 | return restart; | 1067 | return restart; |
1029 | } | 1068 | } |
1030 | 1069 | ||
@@ -1041,7 +1080,7 @@ static void cedf_tick(struct task_struct* t) | |||
1041 | tsk_rt(t)->budget.ops && budget_quantum_tracked(t) && | 1080 | tsk_rt(t)->budget.ops && budget_quantum_tracked(t) && |
1042 | budget_exhausted(t)) { | 1081 | budget_exhausted(t)) { |
1043 | TRACE_TASK(t, "budget exhausted\n"); | 1082 | TRACE_TASK(t, "budget exhausted\n"); |
1044 | budget_state_machine(t,on_exhausted); | 1083 | budget_state_machine2(t,on_exhausted,!IN_SCHEDULE); |
1045 | } | 1084 | } |
1046 | } | 1085 | } |
1047 | 1086 | ||
@@ -1388,17 +1427,28 @@ static struct task_struct* cedf_schedule(struct task_struct * prev) | |||
1388 | } | 1427 | } |
1389 | #endif | 1428 | #endif |
1390 | 1429 | ||
1430 | /* Detect and handle budget exhaustion if it hasn't already been done. | ||
1431 | * Do this before acquring any locks. */ | ||
1432 | if (prev && is_realtime(prev) && | ||
1433 | budget_exhausted(prev) && | ||
1434 | !is_completed(prev) && /* don't bother with jobs on their way out */ | ||
1435 | ((budget_enforced(prev) && !bt_flag_is_set(prev, BTF_BUDGET_EXHAUSTED)) || | ||
1436 | (budget_signalled(prev) && !bt_flag_is_set(prev, BTF_SIG_BUDGET_SENT))) ) { | ||
1437 | TRACE_TASK(prev, "handling exhaustion in schedule() at %llu\n", litmus_clock()); | ||
1438 | budget_state_machine2(prev,on_exhausted,IN_SCHEDULE); | ||
1439 | } | ||
1440 | |||
1391 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1441 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1392 | /* prevent updates to inheritance relations while we work with 'prev' */ | 1442 | /* prevent updates to inheritance relations while we work with 'prev' */ |
1393 | /* recheck inheritance if the task holds locks, is running, and will | 1443 | /* recheck inheritance if the task holds locks, is running, and will |
1394 | * have its deadline pushed out by job_completion() */ | 1444 | * have its deadline pushed out by job_completion() */ |
1395 | recheck_inheritance = | 1445 | recheck_inheritance = |
1396 | (prev != NULL) && | 1446 | prev && |
1397 | is_realtime(prev) && | 1447 | is_realtime(prev) && |
1398 | holds_locks(prev) && | 1448 | holds_locks(prev) && |
1399 | !is_np(prev) && | 1449 | !is_np(prev) && |
1400 | !is_completed(prev) && | 1450 | !is_completed(prev) && |
1401 | is_running(prev) && | 1451 | is_running(prev) && |
1402 | budget_enforced(prev) && | 1452 | budget_enforced(prev) && |
1403 | bt_flag_is_set(prev, BTF_BUDGET_EXHAUSTED); | 1453 | bt_flag_is_set(prev, BTF_BUDGET_EXHAUSTED); |
1404 | if (recheck_inheritance) { | 1454 | if (recheck_inheritance) { |
@@ -1726,6 +1776,16 @@ static void cedf_task_exit(struct task_struct * t) | |||
1726 | cedf_change_prio_pai_tasklet(t, NULL); | 1776 | cedf_change_prio_pai_tasklet(t, NULL); |
1727 | #endif | 1777 | #endif |
1728 | 1778 | ||
1779 | /* | ||
1780 | BUG: t is forced to exit by another task. | ||
1781 | meanwhile, the scheduler selects to migrate to be scheduled | ||
1782 | |||
1783 | -- this triggers BAD BAD BAD | ||
1784 | |||
1785 | if (current != t) and t is linked (but not scheduled?), do something. | ||
1786 | |||
1787 | */ | ||
1788 | |||
1729 | /* unlink if necessary */ | 1789 | /* unlink if necessary */ |
1730 | raw_readyq_lock_irqsave(&cluster->cluster_lock, flags); | 1790 | raw_readyq_lock_irqsave(&cluster->cluster_lock, flags); |
1731 | 1791 | ||
@@ -1936,7 +1996,7 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1936 | 1996 | ||
1937 | /* clear out old inheritance relation */ | 1997 | /* clear out old inheritance relation */ |
1938 | if (old_prio_inh) { | 1998 | if (old_prio_inh) { |
1939 | budget_state_machine2(t,old_prio_inh,on_disinherit); | 1999 | budget_state_machine_chgprio(t,old_prio_inh,on_disinherit); |
1940 | clear_inh_task_linkback(t, old_prio_inh); | 2000 | clear_inh_task_linkback(t, old_prio_inh); |
1941 | } | 2001 | } |
1942 | 2002 | ||
@@ -1946,7 +2006,7 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1946 | 2006 | ||
1947 | /* update inheritance relation */ | 2007 | /* update inheritance relation */ |
1948 | if (prio_inh) | 2008 | if (prio_inh) |
1949 | budget_state_machine2(t,prio_inh,on_inherit); | 2009 | budget_state_machine_chgprio(t,prio_inh,on_inherit); |
1950 | 2010 | ||
1951 | linked_on = tsk_rt(t)->linked_on; | 2011 | linked_on = tsk_rt(t)->linked_on; |
1952 | 2012 | ||
@@ -2133,7 +2193,7 @@ static int __decrease_priority_inheritance(struct task_struct* t, | |||
2133 | 2193 | ||
2134 | /* clear out old inheritance relation */ | 2194 | /* clear out old inheritance relation */ |
2135 | if (old_prio_inh) { | 2195 | if (old_prio_inh) { |
2136 | budget_state_machine2(t,old_prio_inh,on_disinherit); | 2196 | budget_state_machine_chgprio(t,old_prio_inh,on_disinherit); |
2137 | clear_inh_task_linkback(t, old_prio_inh); | 2197 | clear_inh_task_linkback(t, old_prio_inh); |
2138 | } | 2198 | } |
2139 | 2199 | ||
@@ -2149,7 +2209,7 @@ static int __decrease_priority_inheritance(struct task_struct* t, | |||
2149 | tsk_rt(t)->inh_task = prio_inh; | 2209 | tsk_rt(t)->inh_task = prio_inh; |
2150 | 2210 | ||
2151 | if (prio_inh) | 2211 | if (prio_inh) |
2152 | budget_state_machine2(t,prio_inh,on_inherit); | 2212 | budget_state_machine_chgprio(t,prio_inh,on_inherit); |
2153 | 2213 | ||
2154 | if(tsk_rt(t)->scheduled_on != NO_CPU) { | 2214 | if(tsk_rt(t)->scheduled_on != NO_CPU) { |
2155 | TRACE_TASK(t, "is scheduled.\n"); | 2215 | TRACE_TASK(t, "is scheduled.\n"); |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index 0f96fb7ddb25..4aeb7a0db3bd 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -362,11 +362,11 @@ static void check_for_preemptions(void) | |||
362 | &per_cpu(gsnedf_cpu_entries, task_cpu(task))); | 362 | &per_cpu(gsnedf_cpu_entries, task_cpu(task))); |
363 | if (affinity) | 363 | if (affinity) |
364 | last = affinity; | 364 | last = affinity; |
365 | else if (requeue_preempted_job(last->linked)) | 365 | else if (should_requeue_preempted_job(last->linked)) |
366 | requeue(last->linked); | 366 | requeue(last->linked); |
367 | } | 367 | } |
368 | #else | 368 | #else |
369 | if (requeue_preempted_job(last->linked)) | 369 | if (should_requeue_preempted_job(last->linked)) |
370 | requeue(last->linked); | 370 | requeue(last->linked); |
371 | #endif | 371 | #endif |
372 | 372 | ||
@@ -420,7 +420,7 @@ static noinline void job_completion(struct task_struct *t, int forced) | |||
420 | gsnedf_job_arrival(t); | 420 | gsnedf_job_arrival(t); |
421 | } | 421 | } |
422 | 422 | ||
423 | static enum hrtimer_restart gsnedf_simple_on_exhausted(struct task_struct *t) | 423 | static enum hrtimer_restart gsnedf_simple_on_exhausted(struct task_struct *t, int in_schedule) |
424 | { | 424 | { |
425 | /* Assumption: t is scheduled on the CPU executing this callback */ | 425 | /* Assumption: t is scheduled on the CPU executing this callback */ |
426 | 426 | ||
@@ -461,7 +461,7 @@ static void gsnedf_tick(struct task_struct* t) | |||
461 | tsk_rt(t)->budget.ops && budget_quantum_tracked(t) && | 461 | tsk_rt(t)->budget.ops && budget_quantum_tracked(t) && |
462 | budget_exhausted(t)) { | 462 | budget_exhausted(t)) { |
463 | TRACE_TASK(t, "budget exhausted\n"); | 463 | TRACE_TASK(t, "budget exhausted\n"); |
464 | budget_state_machine(t,on_exhausted); | 464 | budget_state_machine2(t,on_exhausted,!IN_SCHEDULE); |
465 | } | 465 | } |
466 | } | 466 | } |
467 | 467 | ||
@@ -1171,14 +1171,14 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1171 | if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) { | 1171 | if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) { |
1172 | #endif | 1172 | #endif |
1173 | if (tsk_rt(t)->inh_task) | 1173 | if (tsk_rt(t)->inh_task) |
1174 | budget_state_machine2(t,tsk_rt(t)->inh_task,on_disinherit); | 1174 | budget_state_machine_chgprio(t,tsk_rt(t)->inh_task,on_disinherit); |
1175 | 1175 | ||
1176 | TRACE_TASK(t, "inherits priority from %s/%d\n", | 1176 | TRACE_TASK(t, "inherits priority from %s/%d\n", |
1177 | prio_inh->comm, prio_inh->pid); | 1177 | prio_inh->comm, prio_inh->pid); |
1178 | tsk_rt(t)->inh_task = prio_inh; | 1178 | tsk_rt(t)->inh_task = prio_inh; |
1179 | 1179 | ||
1180 | if (prio_inh) | 1180 | if (prio_inh) |
1181 | budget_state_machine2(t,prio_inh,on_inherit); | 1181 | budget_state_machine_chgprio(t,prio_inh,on_inherit); |
1182 | 1182 | ||
1183 | linked_on = tsk_rt(t)->linked_on; | 1183 | linked_on = tsk_rt(t)->linked_on; |
1184 | 1184 | ||
@@ -1309,7 +1309,7 @@ static int __decrease_priority_inheritance(struct task_struct* t, | |||
1309 | #endif | 1309 | #endif |
1310 | 1310 | ||
1311 | if (tsk_rt(t)->inh_task) | 1311 | if (tsk_rt(t)->inh_task) |
1312 | budget_state_machine2(t,tsk_rt(t)->inh_task,on_disinherit); | 1312 | budget_state_machine_chgprio(t,tsk_rt(t)->inh_task,on_disinherit); |
1313 | 1313 | ||
1314 | /* A job only stops inheriting a priority when it releases a | 1314 | /* A job only stops inheriting a priority when it releases a |
1315 | * resource. Thus we can make the following assumption.*/ | 1315 | * resource. Thus we can make the following assumption.*/ |
@@ -1322,7 +1322,7 @@ static int __decrease_priority_inheritance(struct task_struct* t, | |||
1322 | tsk_rt(t)->inh_task = prio_inh; | 1322 | tsk_rt(t)->inh_task = prio_inh; |
1323 | 1323 | ||
1324 | if (prio_inh) | 1324 | if (prio_inh) |
1325 | budget_state_machine2(t,prio_inh,on_inherit); | 1325 | budget_state_machine_chgprio(t,prio_inh,on_inherit); |
1326 | 1326 | ||
1327 | if(tsk_rt(t)->scheduled_on != NO_CPU) { | 1327 | if(tsk_rt(t)->scheduled_on != NO_CPU) { |
1328 | TRACE_TASK(t, "is scheduled.\n"); | 1328 | TRACE_TASK(t, "is scheduled.\n"); |
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c index eadd4fb8e5a4..3a594cd51a44 100644 --- a/litmus/sched_litmus.c +++ b/litmus/sched_litmus.c | |||
@@ -126,7 +126,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
126 | } | 126 | } |
127 | } | 127 | } |
128 | 128 | ||
129 | set_task_cpu(next, smp_processor_id()); | 129 | // set_task_cpu(next, smp_processor_id()); |
130 | 130 | ||
131 | /* DEBUG: now that we have the lock we need to make sure a | 131 | /* DEBUG: now that we have the lock we need to make sure a |
132 | * couple of things still hold: | 132 | * couple of things still hold: |
@@ -135,7 +135,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
135 | * If either is violated, then the active plugin is | 135 | * If either is violated, then the active plugin is |
136 | * doing something wrong. | 136 | * doing something wrong. |
137 | */ | 137 | */ |
138 | if (!is_realtime(next) || !is_running(next)) { | 138 | if (unlikely(!is_realtime(next) || !is_running(next))) { |
139 | /* BAD BAD BAD */ | 139 | /* BAD BAD BAD */ |
140 | TRACE_TASK(next,"BAD: migration invariant FAILED: " | 140 | TRACE_TASK(next,"BAD: migration invariant FAILED: " |
141 | "rt=%d running=%d\n", | 141 | "rt=%d running=%d\n", |
@@ -144,6 +144,10 @@ litmus_schedule(struct rq *rq, struct task_struct *prev) | |||
144 | /* drop the task */ | 144 | /* drop the task */ |
145 | next = NULL; | 145 | next = NULL; |
146 | } | 146 | } |
147 | else { | ||
148 | set_task_cpu(next, smp_processor_id()); | ||
149 | } | ||
150 | |||
147 | /* release the other CPU's runqueue, but keep ours */ | 151 | /* release the other CPU's runqueue, but keep ours */ |
148 | raw_spin_unlock(&other_rq->lock); | 152 | raw_spin_unlock(&other_rq->lock); |
149 | } | 153 | } |
@@ -230,6 +234,52 @@ static void pre_schedule_litmus(struct rq *rq, struct task_struct *prev) | |||
230 | tsk_rt(prev)->present = 0; | 234 | tsk_rt(prev)->present = 0; |
231 | } | 235 | } |
232 | 236 | ||
237 | static void litmus_handle_budget_exhaustion(struct task_struct *t) | ||
238 | { | ||
239 | /* We're unlikely to pick a task that has an exhausted budget, so this | ||
240 | * provides a failsafe. */ | ||
241 | |||
242 | /* BUG: Virtual unlock of OMLP-family locking protocols is not triggered. | ||
243 | * | ||
244 | * TODO-FIX: Add a new virtual-unlock call to budget state machine and do | ||
245 | * the virtual unlock in plugin::schedule(), instead of in budget | ||
246 | * timer handler. This bug should only be raised EXTREMELY infrequently. | ||
247 | */ | ||
248 | |||
249 | int handle_exhausion = 1; | ||
250 | |||
251 | BUG_ON(current != t); | ||
252 | |||
253 | if (is_np(t) && bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED)) { | ||
254 | /* ignore. will handle exhausion in the future. */ | ||
255 | TRACE_TASK(t, "task is np and already flagged as exhausted. allow scheduling.\n"); | ||
256 | return; | ||
257 | } | ||
258 | |||
259 | if (unlikely(bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE))) { | ||
260 | TRACE_TASK(t, "waiting for release. skipping exhaustion.\n"); | ||
261 | return; | ||
262 | } | ||
263 | |||
264 | if (budget_precisely_tracked(t)) { | ||
265 | if (cancel_enforcement_timer(t) < 0) { | ||
266 | TRACE_TASK(t, "schedule() raced with timer. deffering to timer.\n"); | ||
267 | handle_exhausion = 0; | ||
268 | } | ||
269 | } | ||
270 | |||
271 | if (handle_exhausion) { | ||
272 | if (likely(!is_np(t))) { | ||
273 | TRACE_TASK(t, "picked task without budget => FORCE_RESCHED.\n"); | ||
274 | litmus_reschedule_local(); | ||
275 | } | ||
276 | else if (is_user_np(t)) { | ||
277 | TRACE_TASK(t, "is non-preemptable, preemption delayed.\n"); | ||
278 | request_exit_np(t); | ||
279 | } | ||
280 | } | ||
281 | } | ||
282 | |||
233 | /* pick_next_task_litmus() - litmus_schedule() function | 283 | /* pick_next_task_litmus() - litmus_schedule() function |
234 | * | 284 | * |
235 | * return the next task to be scheduled | 285 | * return the next task to be scheduled |
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c index 891469054ab4..d9d293ec98f9 100644 --- a/litmus/sched_pfp.c +++ b/litmus/sched_pfp.c | |||
@@ -132,7 +132,7 @@ static void job_completion(struct task_struct* t, int forced) | |||
132 | sched_trace_task_release(t); | 132 | sched_trace_task_release(t); |
133 | } | 133 | } |
134 | 134 | ||
135 | static enum hrtimer_restart pfp_simple_on_exhausted(struct task_struct *t) | 135 | static enum hrtimer_restart pfp_simple_on_exhausted(struct task_struct *t, int in_schedule) |
136 | { | 136 | { |
137 | /* Assumption: t is scheduled on the CPU executing this callback */ | 137 | /* Assumption: t is scheduled on the CPU executing this callback */ |
138 | 138 | ||
@@ -175,7 +175,7 @@ static void pfp_tick(struct task_struct *t) | |||
175 | tsk_rt(t)->budget.ops && budget_quantum_tracked(t) && | 175 | tsk_rt(t)->budget.ops && budget_quantum_tracked(t) && |
176 | budget_exhausted(t)) { | 176 | budget_exhausted(t)) { |
177 | TRACE_TASK(t, "budget exhausted\n"); | 177 | TRACE_TASK(t, "budget exhausted\n"); |
178 | budget_state_machine(t,on_exhausted); | 178 | budget_state_machine2(t,on_exhausted,IN_SCHEDULE); |
179 | } | 179 | } |
180 | } | 180 | } |
181 | 181 | ||
@@ -485,12 +485,12 @@ static void fp_set_prio_inh(pfp_domain_t* pfp, struct task_struct* t, | |||
485 | fp_dequeue(pfp, t); | 485 | fp_dequeue(pfp, t); |
486 | 486 | ||
487 | if (t->rt_param.inh_task) | 487 | if (t->rt_param.inh_task) |
488 | budget_state_machine2(t,t->rt_param.inh_task,on_disinherit); | 488 | budget_state_machine_chgprio(t,t->rt_param.inh_task,on_disinherit); |
489 | 489 | ||
490 | t->rt_param.inh_task = prio_inh; | 490 | t->rt_param.inh_task = prio_inh; |
491 | 491 | ||
492 | if (prio_inh) | 492 | if (prio_inh) |
493 | budget_state_machine2(t,prio_inh,on_inherit); | 493 | budget_state_machine_chgprio(t,prio_inh,on_inherit); |
494 | 494 | ||
495 | if (requeue) | 495 | if (requeue) |
496 | /* add again to the right queue */ | 496 | /* add again to the right queue */ |
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index cfc572584fca..35a98eb806a2 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c | |||
@@ -122,14 +122,14 @@ static long litmus_dummy_complete_job(void) | |||
122 | 122 | ||
123 | static long litmus_dummy_activate_plugin(void) | 123 | static long litmus_dummy_activate_plugin(void) |
124 | { | 124 | { |
125 | #ifdef CONFIG_LITMUS_NVIDIA | ||
126 | shutdown_nvidia_info(); | ||
127 | #endif | ||
128 | return 0; | 125 | return 0; |
129 | } | 126 | } |
130 | 127 | ||
131 | static long litmus_dummy_deactivate_plugin(void) | 128 | static long litmus_dummy_deactivate_plugin(void) |
132 | { | 129 | { |
130 | //#ifdef CONFIG_LITMUS_NVIDIA | ||
131 | // shutdown_nvidia_info(); | ||
132 | //#endif | ||
133 | return 0; | 133 | return 0; |
134 | } | 134 | } |
135 | 135 | ||
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c index b8246acacaa9..a51b444b30e1 100644 --- a/litmus/sched_psn_edf.c +++ b/litmus/sched_psn_edf.c | |||
@@ -164,7 +164,7 @@ static void job_completion(struct task_struct* t, int forced) | |||
164 | prepare_for_next_period(t); | 164 | prepare_for_next_period(t); |
165 | } | 165 | } |
166 | 166 | ||
167 | static enum hrtimer_restart psnedf_simple_on_exhausted(struct task_struct *t) | 167 | static enum hrtimer_restart psnedf_simple_on_exhausted(struct task_struct *t, int in_schedule) |
168 | { | 168 | { |
169 | /* Assumption: t is scheduled on the CPU executing this callback */ | 169 | /* Assumption: t is scheduled on the CPU executing this callback */ |
170 | 170 | ||
@@ -205,7 +205,7 @@ static void psnedf_tick(struct task_struct *t) | |||
205 | tsk_rt(t)->budget.ops && budget_quantum_tracked(t) && | 205 | tsk_rt(t)->budget.ops && budget_quantum_tracked(t) && |
206 | budget_exhausted(t)) { | 206 | budget_exhausted(t)) { |
207 | TRACE_TASK(t, "budget exhausted\n"); | 207 | TRACE_TASK(t, "budget exhausted\n"); |
208 | budget_state_machine(t,on_exhausted); | 208 | budget_state_machine2(t,on_exhausted,!IN_SCHEDULE); |
209 | } | 209 | } |
210 | } | 210 | } |
211 | 211 | ||
diff --git a/litmus/sync.c b/litmus/sync.c index dfd9e40ab28d..89a28f8a1cc1 100644 --- a/litmus/sync.c +++ b/litmus/sync.c | |||
@@ -60,11 +60,18 @@ static long do_wait_for_ts_release(struct timespec *wake) | |||
60 | #endif | 60 | #endif |
61 | bt_flag_set(t, BTF_WAITING_FOR_RELEASE); | 61 | bt_flag_set(t, BTF_WAITING_FOR_RELEASE); |
62 | mb(); | 62 | mb(); |
63 | budget_state_machine(t,on_exit); // do this here and not in schedule()? | ||
63 | } | 64 | } |
64 | 65 | ||
66 | TRACE_TASK(t, "waiting for ts release.\n"); | ||
67 | if (is_rt) | ||
68 | BUG_ON(!bt_flag_is_set(t, BTF_WAITING_FOR_RELEASE)); | ||
69 | |||
65 | /* We are enqueued, now we wait for someone to wake us up. */ | 70 | /* We are enqueued, now we wait for someone to wake us up. */ |
66 | ret = wait_for_completion_interruptible(&wait.completion); | 71 | ret = wait_for_completion_interruptible(&wait.completion); |
67 | 72 | ||
73 | TRACE_TASK(t, "released by ts release!\n"); | ||
74 | |||
68 | if (is_rt) { | 75 | if (is_rt) { |
69 | bt_flag_clear(t, BTF_WAITING_FOR_RELEASE); | 76 | bt_flag_clear(t, BTF_WAITING_FOR_RELEASE); |
70 | #if defined(CONFIG_REALTIME_AUX_TASKS) || defined(CONFIG_LITMUS_NVIDIA) | 77 | #if defined(CONFIG_REALTIME_AUX_TASKS) || defined(CONFIG_LITMUS_NVIDIA) |
@@ -92,7 +99,8 @@ static long do_wait_for_ts_release(struct timespec *wake) | |||
92 | /* completion succeeded, set up release. subtract off | 99 | /* completion succeeded, set up release. subtract off |
93 | * period because schedule()->job_completion() will | 100 | * period because schedule()->job_completion() will |
94 | * advances us to the correct time */ | 101 | * advances us to the correct time */ |
95 | litmus->release_at(t, phasedRelease - t->rt_param.task_params.period); | 102 | //litmus->release_at(t, phasedRelease - t->rt_param.task_params.period); |
103 | setup_release(t, phasedRelease - t->rt_param.task_params.period); // breaks pfair | ||
96 | schedule(); | 104 | schedule(); |
97 | } | 105 | } |
98 | else { | 106 | else { |