aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-03-17 17:23:36 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2013-03-17 17:23:36 -0400
commit469aaad39c956446b8a31d351ee36bedd87ac18a (patch)
tree3e2864a01df8a04ab7a406342627d3dc850760af
parent9374a7c30b6906d01c548833fb8a7b65ba4b5ccc (diff)
Per-task budget high-resolution timers (hrtimers).
As a step towards implementing more complex budget tracking method (ex. BWI, VXR, etc.), we need per-task budget trackers because we may be required to drain budget from a task, even while it is suspended or blocked. This patch does: 1) Replaces the per-CPU hrtimers with per-task hrtimers. 2) Plugin architecture for different budget policies. This patch creates three budget draining policies: SIMPLE, SAWARE (suspension-aware), and SOBLIV (suspension-oblivious). However, only SIMPLE is supported by this patch. SIMPLE (default): Budget drains while the task is scheduled. Budget is preserved across self-suspensions (but not job completions, of course). Only SIMPLE is supported in this patch. (Maintaining current Litmus functionality.) SAWARE: Draining according to suspension-aware analysis. Budget should drain whenever a task is among the top-m tasks in its cluster, where m is the number of processors in said cluster. This draining should happen whether or not the task is actually scheduled. SOBLIV: Draining according to suspension-oblivious analysis. Budget should drain whenever the task is scheduled or suspended (but not due to preemption). Exception: Draining should halt when we can prove that the task is not among the top-m tasks blocked on the same lock (i.e., on the PQ in the OMLP-family locking protocols).
-rw-r--r--include/litmus/budget.h106
-rw-r--r--include/litmus/fdso.h2
-rw-r--r--include/litmus/gpu_affinity.h8
-rw-r--r--include/litmus/litmus.h2
-rw-r--r--include/litmus/rt_param.h26
-rw-r--r--include/litmus/sched_plugin.h1
-rw-r--r--litmus/budget.c217
-rw-r--r--litmus/jobs.c4
-rw-r--r--litmus/litmus.c2
-rw-r--r--litmus/sched_cedf.c129
-rw-r--r--litmus/sched_gsn_edf.c114
-rw-r--r--litmus/sched_litmus.c6
-rw-r--r--litmus/sched_pfair.c10
-rw-r--r--litmus/sched_pfp.c107
-rw-r--r--litmus/sched_psn_edf.c107
15 files changed, 566 insertions, 275 deletions
diff --git a/include/litmus/budget.h b/include/litmus/budget.h
index 763b31c0e9f6..2a3511245f7a 100644
--- a/include/litmus/budget.h
+++ b/include/litmus/budget.h
@@ -1,26 +1,14 @@
1#ifndef _LITMUS_BUDGET_H_ 1#ifndef _LITMUS_BUDGET_H_
2#define _LITMUS_BUDGET_H_ 2#define _LITMUS_BUDGET_H_
3 3
4/* Update the per-processor enforcement timer (arm/reproram/cancel) for 4#include <linux/hrtimer.h>
5 * the next task. */ 5#include <linux/semaphore.h>
6void update_enforcement_timer(struct task_struct* t);
7 6
8/* Send SIG_BUDGET to a real-time task. */ 7#define budget_exhausted(t) \
9void send_sigbudget(struct task_struct* t); 8 (get_exec_time(t) >= get_exec_cost(t))
10 9
11inline static int budget_exhausted(struct task_struct* t) 10#define budget_remaining(t) \
12{ 11 ((!budget_exhausted(t)) ? (get_exec_cost(t) - get_exec_time(t)) : 0)
13 return get_exec_time(t) >= get_exec_cost(t);
14}
15
16inline static lt_t budget_remaining(struct task_struct* t)
17{
18 if (!budget_exhausted(t))
19 return get_exec_cost(t) - get_exec_time(t);
20 else
21 /* avoid overflow */
22 return 0;
23}
24 12
25#define budget_enforced(t) (\ 13#define budget_enforced(t) (\
26 tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT) 14 tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT)
@@ -29,21 +17,87 @@ inline static lt_t budget_remaining(struct task_struct* t)
29 tsk_rt(t)->task_params.budget_policy == PRECISE_ENFORCEMENT || \ 17 tsk_rt(t)->task_params.budget_policy == PRECISE_ENFORCEMENT || \
30 tsk_rt(t)->task_params.budget_signal_policy == PRECISE_SIGNALS) 18 tsk_rt(t)->task_params.budget_signal_policy == PRECISE_SIGNALS)
31 19
20#define budget_quantum_tracked(t) (\
21 tsk_rt(t)->task_params.budget_policy == QUANTUM_ENFORCEMENT || \
22 tsk_rt(t)->task_params.budget_signal_policy == QUANTUM_SIGNALS)
23
32#define budget_signalled(t) (\ 24#define budget_signalled(t) (\
33 tsk_rt(t)->task_params.budget_signal_policy != NO_SIGNALS) 25 tsk_rt(t)->task_params.budget_signal_policy != NO_SIGNALS)
34 26
35#define budget_precisely_signalled(t) (\ 27#define budget_precisely_signalled(t) (\
36 tsk_rt(t)->task_params.budget_policy == PRECISE_SIGNALS) 28 tsk_rt(t)->task_params.budget_policy == PRECISE_SIGNALS)
37 29
38#define sigbudget_sent(t) (\ 30#define bt_flag_is_set(t, flag_nr) (\
39 test_bit(RT_JOB_SIG_BUDGET_SENT, &tsk_rt(t)->job_params.flags)) 31 test_bit(flag_nr, &tsk_rt(t)->budget.flags))
32
33#define bt_flag_test_and_set(t, flag_nr) (\
34 test_and_set_bit(flag_nr, &tsk_rt(t)->budget.flags))
35
36#define bt_flag_set(t, flag_nr) (\
37 set_bit(flag_nr, &tsk_rt(t)->budget.flags))
38
39#define bt_flag_clear(t, flag_nr) (\
40 clear_bit(flag_nr, &tsk_rt(t)->budget.flags))
41
42#define bt_flags_reset(t) (\
43 tsk_rt(t)->budget.flags = 0)
44
45#define requeue_preempted_job(t) \
46 (t && (!budget_exhausted(t) || !budget_enforced(t)))
47
48struct enforcement_timer
49{
50 raw_spinlock_t lock;
51 struct hrtimer timer;
52 int armed:1;
53};
54
55typedef void (*scheduled_t)(struct task_struct* t);
56typedef void (*blocked_t)(struct task_struct* t);
57typedef void (*preempt_or_sleep_t)(struct task_struct* t);
58typedef void (*exhausted_t)(struct task_struct* t);
59typedef void (*exit_t)(struct task_struct* t);
40 60
41static inline int requeue_preempted_job(struct task_struct* t) 61struct budget_tracker_ops
42{ 62{
43 /* Add task to ready queue only if not subject to budget enforcement or 63 scheduled_t on_scheduled; /* called from litmus_schedule(). */
44 * if the job has budget remaining. t may be NULL. 64 blocked_t on_blocked; /* called from plugin::schedule() */
45 */ 65 preempt_or_sleep_t on_preempt_or_sleep; /* called from plugin::schedule() */
46 return t && (!budget_exhausted(t) || !budget_enforced(t)); 66
47} 67 exit_t on_exit; /* task exiting rt mode */
68
69 exhausted_t on_exhausted; /* called by plugin::tick() or timer interrupt */
70};
71
72struct budget_tracker
73{
74 struct enforcement_timer timer;
75 const struct budget_tracker_ops* ops;
76 unsigned long flags;
77};
78
79/* budget tracker flags */
80enum BT_FLAGS
81{
82 BTF_BUDGET_EXHAUSTED = 0,
83 BTF_SIG_BUDGET_SENT = 1,
84};
85
86/* Functions for simple DRAIN_SIMPLE policy common
87 * to every scheduler. Scheduler must provided
88 * implementation for simple_on_exhausted().
89 */
90void simple_on_scheduled(struct task_struct* t);
91void simple_on_blocked(struct task_struct* t);
92void simple_on_preempt_or_sleep(struct task_struct* t);
93void simple_on_exit(struct task_struct* t);
94
95
96void init_budget_tracker(struct budget_tracker* bt,
97 const struct budget_tracker_ops* ops);
98
99
100/* Send SIG_BUDGET to a real-time task. */
101void send_sigbudget(struct task_struct* t);
48 102
49#endif 103#endif
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
index f7887288d8f5..2d8e6c43d908 100644
--- a/include/litmus/fdso.h
+++ b/include/litmus/fdso.h
@@ -36,7 +36,7 @@ typedef enum {
36 KFMLP_GPU_AFF_OBS = 12, 36 KFMLP_GPU_AFF_OBS = 12,
37 37
38 PRIOQ_MUTEX = 13, 38 PRIOQ_MUTEX = 13,
39 39
40 MAX_OBJ_TYPE = 13 40 MAX_OBJ_TYPE = 13
41} obj_type_t; 41} obj_type_t;
42 42
diff --git a/include/litmus/gpu_affinity.h b/include/litmus/gpu_affinity.h
index 47da725717b0..f610f58b1f3b 100644
--- a/include/litmus/gpu_affinity.h
+++ b/include/litmus/gpu_affinity.h
@@ -11,6 +11,7 @@ gpu_migration_dist_t gpu_migration_distance(int a, int b);
11static inline void reset_gpu_tracker(struct task_struct* t) 11static inline void reset_gpu_tracker(struct task_struct* t)
12{ 12{
13 t->rt_param.accum_gpu_time = 0; 13 t->rt_param.accum_gpu_time = 0;
14 t->rt_param.gpu_time_stamp = 0;
14} 15}
15 16
16static inline void start_gpu_tracker(struct task_struct* t) 17static inline void start_gpu_tracker(struct task_struct* t)
@@ -22,11 +23,16 @@ static inline void stop_gpu_tracker(struct task_struct* t)
22{ 23{
23 lt_t now = litmus_clock(); 24 lt_t now = litmus_clock();
24 t->rt_param.accum_gpu_time += (now - t->rt_param.gpu_time_stamp); 25 t->rt_param.accum_gpu_time += (now - t->rt_param.gpu_time_stamp);
26 t->rt_param.gpu_time_stamp = 0;
25} 27}
26 28
27static inline lt_t get_gpu_time(struct task_struct* t) 29static inline lt_t get_gpu_time(struct task_struct* t)
28{ 30{
29 return t->rt_param.accum_gpu_time; 31 lt_t accum = t->rt_param.accum_gpu_time;
32 if (t->rt_param.gpu_time_stamp != 0) {
33 accum += (litmus_clock() - t->rt_param.gpu_time_stamp);
34 }
35 return accum;
30} 36}
31 37
32static inline lt_t get_gpu_estimate(struct task_struct* t, gpu_migration_dist_t dist) 38static inline lt_t get_gpu_estimate(struct task_struct* t, gpu_migration_dist_t dist)
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 70b421d59d34..f6ea5f6e80ee 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -62,6 +62,7 @@ void litmus_exit_task(struct task_struct *tsk);
62#define get_priority(t) (tsk_rt(t)->task_params.priority) 62#define get_priority(t) (tsk_rt(t)->task_params.priority)
63#define get_class(t) (tsk_rt(t)->task_params.cls) 63#define get_class(t) (tsk_rt(t)->task_params.cls)
64#define get_release_policy(t) (tsk_rt(t)->task_params.release_policy) 64#define get_release_policy(t) (tsk_rt(t)->task_params.release_policy)
65#define get_drain_policy(t) (tsk_rt(t)->task_params.drain_policy)
65 66
66/* job_param macros */ 67/* job_param macros */
67#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) 68#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time)
@@ -69,6 +70,7 @@ void litmus_exit_task(struct task_struct *tsk);
69#define get_period(t) (tsk_rt(t)->task_params.period) 70#define get_period(t) (tsk_rt(t)->task_params.period)
70#define get_release(t) (tsk_rt(t)->job_params.release) 71#define get_release(t) (tsk_rt(t)->job_params.release)
71#define get_lateness(t) (tsk_rt(t)->job_params.lateness) 72#define get_lateness(t) (tsk_rt(t)->job_params.lateness)
73#define get_budget_timer(t) (tsk_rt(t)->job_params.budget_timer)
72 74
73#define effective_priority(t) ((!(tsk_rt(t)->inh_task)) ? t : tsk_rt(t)->inh_task) 75#define effective_priority(t) ((!(tsk_rt(t)->inh_task)) ? t : tsk_rt(t)->inh_task)
74#define base_priority(t) (t) 76#define base_priority(t) (t)
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index bf0ee8dbae6e..887075b908ca 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -36,6 +36,14 @@ typedef enum {
36} budget_policy_t; 36} budget_policy_t;
37 37
38typedef enum { 38typedef enum {
39 /* all drain mechanisms are ignored if budget enforcement or signalling
40 is not in use. */
41 DRAIN_SIMPLE, /* drains while task is linked */
42 DRAIN_SAWARE, /* drains according to suspension-aware analysis */
43 DRAIN_SOBLIV, /* drains according to suspension-obliv analysis */
44} budget_drain_policy_t;
45
46typedef enum {
39 NO_SIGNALS, /* job receives no signals when it exhausts its budget */ 47 NO_SIGNALS, /* job receives no signals when it exhausts its budget */
40 QUANTUM_SIGNALS, /* budget signals are only sent on quantum boundaries */ 48 QUANTUM_SIGNALS, /* budget signals are only sent on quantum boundaries */
41 PRECISE_SIGNALS, /* budget signals are triggered with hrtimers */ 49 PRECISE_SIGNALS, /* budget signals are triggered with hrtimers */
@@ -132,6 +140,7 @@ struct rt_task {
132 unsigned int priority; 140 unsigned int priority;
133 task_class_t cls; 141 task_class_t cls;
134 budget_policy_t budget_policy; /* ignored by pfair */ 142 budget_policy_t budget_policy; /* ignored by pfair */
143 budget_drain_policy_t drain_policy;
135 budget_signal_policy_t budget_signal_policy; /* currently ignored by pfair */ 144 budget_signal_policy_t budget_signal_policy; /* currently ignored by pfair */
136 release_policy_t release_policy; 145 release_policy_t release_policy;
137}; 146};
@@ -213,8 +222,9 @@ struct control_page {
213/* don't export internal data structures to user space (liblitmus) */ 222/* don't export internal data structures to user space (liblitmus) */
214#ifdef __KERNEL__ 223#ifdef __KERNEL__
215 224
216#include <litmus/binheap.h>
217#include <linux/semaphore.h> 225#include <linux/semaphore.h>
226#include <litmus/budget.h>
227#include <litmus/binheap.h>
218 228
219#ifdef CONFIG_LITMUS_SOFTIRQD 229#ifdef CONFIG_LITMUS_SOFTIRQD
220#include <linux/interrupt.h> 230#include <linux/interrupt.h>
@@ -247,15 +257,8 @@ struct rt_job {
247 * Increase this sequence number when a job is released. 257 * Increase this sequence number when a job is released.
248 */ 258 */
249 unsigned int job_no; 259 unsigned int job_no;
250
251 /* bits:
252 * 0th: Set if a budget exhaustion signal has already been sent for
253 * the current job. */
254 unsigned long flags;
255}; 260};
256 261
257#define RT_JOB_SIG_BUDGET_SENT 0
258
259struct pfair_param; 262struct pfair_param;
260 263
261enum klmirqd_sem_status 264enum klmirqd_sem_status
@@ -278,12 +281,12 @@ typedef enum gpu_migration_dist
278 MIG_LAST = MIG_NONE 281 MIG_LAST = MIG_NONE
279} gpu_migration_dist_t; 282} gpu_migration_dist_t;
280 283
281typedef struct feedback_est{ 284typedef struct feedback_est
285{
282 fp_t est; 286 fp_t est;
283 fp_t accum_err; 287 fp_t accum_err;
284} feedback_est_t; 288} feedback_est_t;
285 289
286
287#define AVG_EST_WINDOW_SIZE 20 290#define AVG_EST_WINDOW_SIZE 20
288 291
289typedef int (*notify_rsrc_exit_t)(struct task_struct* tsk); 292typedef int (*notify_rsrc_exit_t)(struct task_struct* tsk);
@@ -417,7 +420,6 @@ struct rt_param {
417 struct binheap_node aux_task_owner_node; 420 struct binheap_node aux_task_owner_node;
418#endif 421#endif
419 422
420
421#ifdef CONFIG_NP_SECTION 423#ifdef CONFIG_NP_SECTION
422 /* For the FMLP under PSN-EDF, it is required to make the task 424 /* For the FMLP under PSN-EDF, it is required to make the task
423 * non-preemptive from kernel space. In order not to interfere with 425 * non-preemptive from kernel space. In order not to interfere with
@@ -427,6 +429,8 @@ struct rt_param {
427 unsigned int kernel_np; 429 unsigned int kernel_np;
428#endif 430#endif
429 431
432 struct budget_tracker budget;
433
430 /* This field can be used by plugins to store where the task 434 /* This field can be used by plugins to store where the task
431 * is currently scheduled. It is the responsibility of the 435 * is currently scheduled. It is the responsibility of the
432 * plugin to avoid race conditions. 436 * plugin to avoid race conditions.
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
index 78cec30866ac..6e7d6df2fb78 100644
--- a/include/litmus/sched_plugin.h
+++ b/include/litmus/sched_plugin.h
@@ -130,6 +130,7 @@ struct sched_plugin {
130 schedule_t schedule; 130 schedule_t schedule;
131 finish_switch_t finish_switch; 131 finish_switch_t finish_switch;
132 132
133
133 /* syscall backend */ 134 /* syscall backend */
134 complete_job_t complete_job; 135 complete_job_t complete_job;
135 release_at_t release_at; 136 release_at_t release_at;
diff --git a/litmus/budget.c b/litmus/budget.c
index 2ec9d383c332..559c54709acc 100644
--- a/litmus/budget.c
+++ b/litmus/budget.c
@@ -5,102 +5,100 @@
5 5
6#include <litmus/litmus.h> 6#include <litmus/litmus.h>
7#include <litmus/preempt.h> 7#include <litmus/preempt.h>
8 8#include <litmus/sched_plugin.h>
9#include <litmus/budget.h> 9#include <litmus/budget.h>
10#include <litmus/signal.h> 10#include <litmus/signal.h>
11 11
12struct enforcement_timer { 12inline static void cancel_enforcement_timer(struct task_struct* t)
13 /* The enforcement timer is used to accurately police
14 * slice budgets. */
15 struct hrtimer timer;
16 int armed;
17};
18
19static DEFINE_PER_CPU(struct enforcement_timer, budget_timer);
20
21static enum hrtimer_restart on_enforcement_timeout(struct hrtimer *timer)
22{ 13{
23 struct enforcement_timer* et = container_of(timer, 14 struct enforcement_timer* et;
24 struct enforcement_timer, 15 int ret;
25 timer);
26 unsigned long flags; 16 unsigned long flags;
27 17
28 local_irq_save(flags); 18 BUG_ON(!t);
29 TRACE("enforcement timer fired.\n"); 19 BUG_ON(!is_realtime(t));
30 et->armed = 0;
31 /* activate scheduler */
32 litmus_reschedule_local();
33 local_irq_restore(flags);
34
35 return HRTIMER_NORESTART;
36}
37 20
38/* assumes called with IRQs off */ 21 et = &tsk_rt(t)->budget.timer;
39static void cancel_enforcement_timer(struct enforcement_timer* et)
40{
41 int ret;
42 22
43 TRACE("cancelling enforcement timer.\n"); 23 TRACE("cancelling enforcement timer.\n");
44 24
45 /* Since interrupts are disabled and et->armed is only
46 * modified locally, we do not need any locks.
47 */
48
49 if (et->armed) { 25 if (et->armed) {
50 ret = hrtimer_try_to_cancel(&et->timer); 26 raw_spin_lock_irqsave(&et->lock, flags);
51 /* Should never be inactive. */ 27 if (et->armed) {
52 BUG_ON(ret == 0); 28 ret = hrtimer_try_to_cancel(&et->timer);
53 /* Should never be running concurrently. */ 29 et->armed = 0;
54 BUG_ON(ret == -1); 30 }
55 31 else {
56 et->armed = 0; 32 TRACE("timer was not armed (race).\n");
33 }
34 raw_spin_unlock_irqrestore(&et->lock, flags);
35 }
36 else {
37 TRACE("timer was not armed.\n");
57 } 38 }
58} 39}
59 40
60/* assumes called with IRQs off */ 41inline static void arm_enforcement_timer(struct task_struct* t)
61static void arm_enforcement_timer(struct enforcement_timer* et,
62 struct task_struct* t)
63{ 42{
43 struct enforcement_timer* et;
64 lt_t when_to_fire; 44 lt_t when_to_fire;
65 TRACE_TASK(t, "arming enforcement timer.\n"); 45 unsigned long flags;
46
47 BUG_ON(!t);
48 BUG_ON(!is_realtime(t));
49
50 et = &tsk_rt(t)->budget.timer;
51 if (et->armed) {
52 TRACE_TASK(t, "timer already armed!\n");
53 return;
54 }
66 55
67 /* Calling this when there is no budget left for the task 56 /* Calling this when there is no budget left for the task
68 * makes no sense, unless the task is non-preemptive. */ 57 * makes no sense, unless the task is non-preemptive. */
69 BUG_ON(budget_exhausted(t) && !is_np(t)); 58 if (budget_exhausted(t)) {
59 TRACE_TASK(t, "can't arm timer because no budget remaining\n");
60 return;
61 }
62
63 if ( (!budget_enforced(t) ||
64 (budget_enforced(t) && bt_flag_is_set(t, BTF_BUDGET_EXHAUSTED)))
65 &&
66 (!budget_signalled(t) ||
67 (budget_signalled(t) && bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)))) {
68 TRACE_TASK(t, "trying to arm timer when budget has already been exhausted.\n");
69 return;
70 }
71
72 TRACE_TASK(t, "arming enforcement timer.\n");
70 73
71 /* __hrtimer_start_range_ns() cancels the timer 74 /* __hrtimer_start_range_ns() cancels the timer
72 * anyway, so we don't have to check whether it is still armed */ 75 * anyway, so we don't have to check whether it is still armed */
76 raw_spin_lock_irqsave(&et->lock, flags);
73 77
74 if (likely(!is_np(t))) { 78 if (et->armed) {
75 when_to_fire = litmus_clock() + budget_remaining(t); 79 TRACE_TASK(t, "timer already armed (race)!\n");
76 __hrtimer_start_range_ns(&et->timer, 80 goto out;
77 ns_to_ktime(when_to_fire),
78 0 /* delta */,
79 HRTIMER_MODE_ABS_PINNED,
80 0 /* no wakeup */);
81 et->armed = 1;
82 } 81 }
83}
84 82
83 when_to_fire = litmus_clock() + budget_remaining(t);
85 84
86/* expects to be called with IRQs off */ 85 TRACE_TASK(t, "bremaining: %ld, when_to_fire: %ld\n", budget_remaining(t), when_to_fire);
87void update_enforcement_timer(struct task_struct* t) 86
88{ 87 __hrtimer_start_range_ns(&et->timer,
89 struct enforcement_timer* et = &__get_cpu_var(budget_timer); 88 ns_to_ktime(when_to_fire),
90 89 0 /* delta */,
91 if (t && budget_precisely_tracked(t) && !sigbudget_sent(t)) { 90 HRTIMER_MODE_ABS_PINNED, // TODO: need to use non-pinned?
92 /* Make sure we call into the scheduler when this budget 91 0 /* no wakeup */);
93 * expires. */ 92 et->armed = 1;
94 arm_enforcement_timer(et, t); 93
95 } else if (et->armed) { 94out:
96 /* Make sure we don't cause unnecessary interrupts. */ 95 raw_spin_unlock_irqrestore(&et->lock, flags);
97 cancel_enforcement_timer(et);
98 }
99} 96}
100 97
98
101void send_sigbudget(struct task_struct* t) 99void send_sigbudget(struct task_struct* t)
102{ 100{
103 if (!test_and_set_bit(RT_JOB_SIG_BUDGET_SENT, &tsk_rt(t)->job_params.flags)) { 101 if (!bt_flag_test_and_set(t, BTF_SIG_BUDGET_SENT)) {
104 /* signal has not yet been sent and we are responsible for sending 102 /* signal has not yet been sent and we are responsible for sending
105 * since we just set the sent-bit when it was previously 0. */ 103 * since we just set the sent-bit when it was previously 0. */
106 104
@@ -109,17 +107,86 @@ void send_sigbudget(struct task_struct* t)
109 } 107 }
110} 108}
111 109
112static int __init init_budget_enforcement(void) 110
111void simple_on_scheduled(struct task_struct* t)
113{ 112{
114 int cpu; 113 BUG_ON(!t);
115 struct enforcement_timer* et; 114
115 if (budget_precisely_tracked(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) {
116 BUG_ON(tsk_rt(t)->budget.timer.armed);
117 arm_enforcement_timer(t);
118 }
119}
120
121static void __simple_on_unscheduled(struct task_struct* t)
122{
123 BUG_ON(!t);
116 124
117 for (cpu = 0; cpu < NR_CPUS; cpu++) { 125 if (budget_precisely_tracked(t)) {
118 et = &per_cpu(budget_timer, cpu); 126 cancel_enforcement_timer(t);
119 hrtimer_init(&et->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
120 et->timer.function = on_enforcement_timeout;
121 } 127 }
122 return 0;
123} 128}
124 129
125module_init(init_budget_enforcement); 130void simple_on_blocked(struct task_struct* t)
131{
132 __simple_on_unscheduled(t);
133}
134
135void simple_on_preempt_or_sleep(struct task_struct* t)
136{
137 __simple_on_unscheduled(t);
138}
139
140void simple_on_exit(struct task_struct* t)
141{
142 __simple_on_unscheduled(t);
143}
144
145
146
147
148static enum hrtimer_restart __on_timeout(struct hrtimer *timer)
149{
150 unsigned long flags;
151 struct budget_tracker* bt =
152 container_of(
153 container_of(timer,
154 struct enforcement_timer,
155 timer),
156 struct budget_tracker,
157 timer);
158
159 struct task_struct* t =
160 container_of(
161 container_of(bt, struct rt_param, budget),
162 struct task_struct,
163 rt_param);
164
165 TRACE_TASK(t, "budget timer interrupt fired at time %lu\n", litmus_clock());
166
167 raw_spin_lock_irqsave(&bt->timer.lock, flags);
168 tsk_rt(t)->budget.timer.armed = 0;
169 raw_spin_unlock_irqrestore(&bt->timer.lock, flags);
170
171 bt->ops->on_exhausted(t);
172
173 return HRTIMER_NORESTART;
174}
175
176
177void init_budget_tracker(struct budget_tracker* bt, const struct budget_tracker_ops* ops)
178{
179 BUG_ON(!bt);
180 BUG_ON(!ops);
181
182 BUG_ON(!ops->on_scheduled);
183 BUG_ON(!ops->on_blocked);
184 BUG_ON(!ops->on_preempt_or_sleep);
185 BUG_ON(!ops->on_exhausted);
186
187 memset(bt, 0, sizeof(*bt));
188 raw_spin_lock_init(&bt->timer.lock);
189 hrtimer_init(&bt->timer.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
190 bt->timer.timer.function = __on_timeout;
191 bt->ops = ops;
192} \ No newline at end of file
diff --git a/litmus/jobs.c b/litmus/jobs.c
index 991c6e60be74..1479cddad9de 100644
--- a/litmus/jobs.c
+++ b/litmus/jobs.c
@@ -13,7 +13,9 @@ static inline void setup_release(struct task_struct *t, lt_t release)
13 t->rt_param.job_params.deadline = release + get_rt_relative_deadline(t); 13 t->rt_param.job_params.deadline = release + get_rt_relative_deadline(t);
14 t->rt_param.job_params.exec_time = 0; 14 t->rt_param.job_params.exec_time = 0;
15 15
16 clear_bit(RT_JOB_SIG_BUDGET_SENT, &t->rt_param.job_params.flags); 16 /* kludge - TODO: Move this to budget.h/.c */
17 if (t->rt_param.budget.ops)
18 bt_flags_reset(t);
17 19
18 /* update job sequence number */ 20 /* update job sequence number */
19 t->rt_param.job_params.job_no++; 21 t->rt_param.job_params.job_no++;
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 97cbe0461a93..e8130e362c84 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -19,6 +19,8 @@
19#include <litmus/litmus_proc.h> 19#include <litmus/litmus_proc.h>
20#include <litmus/sched_trace.h> 20#include <litmus/sched_trace.h>
21 21
22#include <litmus/budget.h>
23
22#ifdef CONFIG_SCHED_CPU_AFFINITY 24#ifdef CONFIG_SCHED_CPU_AFFINITY
23#include <litmus/affinity.h> 25#include <litmus/affinity.h>
24#endif 26#endif
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 78ae5a080138..8fe646f1f0c5 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -444,6 +444,35 @@ static noinline void job_completion(struct task_struct *t, int forced)
444 } 444 }
445} 445}
446 446
447static void cedf_simple_on_exhausted(struct task_struct *t)
448{
449 /* Assumption: t is scheduled on the CPU executing this callback */
450
451 if (budget_signalled(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) {
452 /* signal exhaustion */
453 send_sigbudget(t); /* will set BTF_SIG_BUDGET_SENT */
454 }
455
456 if (budget_enforced(t) && !bt_flag_test_and_set(t, BTF_BUDGET_EXHAUSTED)) {
457 if (!is_np(t)) {
458 /* np tasks will be preempted when they become
459 * preemptable again
460 */
461 litmus_reschedule_local();
462 set_will_schedule();
463 TRACE("cedf_scheduler_tick: "
464 "%d is preemptable "
465 " => FORCE_RESCHED\n", t->pid);
466 } else if (is_user_np(t)) {
467 TRACE("cedf_scheduler_tick: "
468 "%d is non-preemptable, "
469 "preemption delayed.\n", t->pid);
470 request_exit_np(t);
471 }
472 }
473}
474
475
447/* cedf_tick - this function is called for every local timer 476/* cedf_tick - this function is called for every local timer
448 * interrupt. 477 * interrupt.
449 * 478 *
@@ -452,48 +481,18 @@ static noinline void job_completion(struct task_struct *t, int forced)
452 */ 481 */
453static void cedf_tick(struct task_struct* t) 482static void cedf_tick(struct task_struct* t)
454{ 483{
455 if (is_realtime(t) && budget_exhausted(t)) 484 if (is_realtime(t) &&
456 { 485 tsk_rt(t)->budget.ops && budget_quantum_tracked(t) &&
457 if (budget_signalled(t) && !sigbudget_sent(t)) { 486 budget_exhausted(t)) {
458 /* signal exhaustion */ 487 TRACE_TASK(t, "budget exhausted\n");
459 send_sigbudget(t); 488 tsk_rt(t)->budget.ops->on_exhausted(t);
460 }
461
462 if (budget_enforced(t)) {
463 if (!is_np(t)) {
464 /* np tasks will be preempted when they become
465 * preemptable again
466 */
467 litmus_reschedule_local();
468 set_will_schedule();
469 TRACE("cedf_scheduler_tick: "
470 "%d is preemptable "
471 " => FORCE_RESCHED\n", t->pid);
472 } else if (is_user_np(t)) {
473 TRACE("cedf_scheduler_tick: "
474 "%d is non-preemptable, "
475 "preemption delayed.\n", t->pid);
476 request_exit_np(t);
477 }
478 }
479 } 489 }
480} 490}
481 491
482 492
483 493
484
485
486
487
488
489
490
491
492
493
494#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 494#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
495 495
496
497static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed) 496static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
498{ 497{
499 if (!atomic_read(&tasklet->count)) { 498 if (!atomic_read(&tasklet->count)) {
@@ -787,6 +786,7 @@ static void cedf_change_prio_pai_tasklet(struct task_struct *old_prio,
787 786
788#endif // PAI 787#endif // PAI
789 788
789
790/* Getting schedule() right is a bit tricky. schedule() may not make any 790/* Getting schedule() right is a bit tricky. schedule() may not make any
791 * assumptions on the state of the current task since it may be called for a 791 * assumptions on the state of the current task since it may be called for a
792 * number of reasons. The reasons include a scheduler_tick() determined that it 792 * number of reasons. The reasons include a scheduler_tick() determined that it
@@ -812,7 +812,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
812{ 812{
813 cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries); 813 cpu_entry_t* entry = &__get_cpu_var(cedf_cpu_entries);
814 cedf_domain_t *cluster = entry->cluster; 814 cedf_domain_t *cluster = entry->cluster;
815 int out_of_time, signal_budget, sleep, preempt, np, exists, blocks; 815 int out_of_time, sleep, preempt, np, exists, blocks;
816 struct task_struct* next = NULL; 816 struct task_struct* next = NULL;
817 817
818#ifdef CONFIG_RELEASE_MASTER 818#ifdef CONFIG_RELEASE_MASTER
@@ -838,11 +838,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
838 blocks = exists && !is_running(entry->scheduled); 838 blocks = exists && !is_running(entry->scheduled);
839 out_of_time = exists && 839 out_of_time = exists &&
840 budget_enforced(entry->scheduled) && 840 budget_enforced(entry->scheduled) &&
841 budget_exhausted(entry->scheduled); 841 bt_flag_is_set(entry->scheduled, BTF_BUDGET_EXHAUSTED);
842 signal_budget = exists &&
843 budget_signalled(entry->scheduled) &&
844 budget_exhausted(entry->scheduled) &&
845 !sigbudget_sent(entry->scheduled);
846 np = exists && is_np(entry->scheduled); 842 np = exists && is_np(entry->scheduled);
847 sleep = exists && is_completed(entry->scheduled); 843 sleep = exists && is_completed(entry->scheduled);
848 preempt = entry->scheduled != entry->linked; 844 preempt = entry->scheduled != entry->linked;
@@ -851,12 +847,13 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
851 TRACE_TASK(prev, "invoked cedf_schedule.\n"); 847 TRACE_TASK(prev, "invoked cedf_schedule.\n");
852#endif 848#endif
853 849
854 if (exists) 850 if (exists) {
855 TRACE_TASK(prev, 851 TRACE_TASK(prev,
856 "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " 852 "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d "
857 "state:%d sig:%d\n", 853 "state:%d sig:%d\n",
858 blocks, out_of_time, np, sleep, preempt, 854 blocks, out_of_time, np, sleep, preempt,
859 prev->state, signal_pending(prev)); 855 prev->state, signal_pending(prev));
856 }
860 if (entry->linked && preempt) 857 if (entry->linked && preempt)
861 TRACE_TASK(prev, "will be preempted by %s/%d\n", 858 TRACE_TASK(prev, "will be preempted by %s/%d\n",
862 entry->linked->comm, entry->linked->pid); 859 entry->linked->comm, entry->linked->pid);
@@ -872,9 +869,13 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
872 } 869 }
873#endif 870#endif
874 871
875 /* Send the signal that the budget has been exhausted */ 872 /* Do budget stuff */
876 if (signal_budget) 873 if (tsk_rt(prev)->budget.ops) {
877 send_sigbudget(entry->scheduled); 874 if (blocks)
875 tsk_rt(prev)->budget.ops->on_blocked(prev);
876 else if (preempt || sleep)
877 tsk_rt(prev)->budget.ops->on_preempt_or_sleep(prev);
878 }
878 879
879 /* If a task blocks we have no choice but to reschedule. 880 /* If a task blocks we have no choice but to reschedule.
880 */ 881 */
@@ -942,6 +943,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
942 } 943 }
943 } 944 }
944 945
946
945#ifdef CONFIG_REALTIME_AUX_TASKS 947#ifdef CONFIG_REALTIME_AUX_TASKS
946out_set_state: 948out_set_state:
947#endif 949#endif
@@ -1111,6 +1113,10 @@ static void cedf_task_exit(struct task_struct * t)
1111 /* unlink if necessary */ 1113 /* unlink if necessary */
1112 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 1114 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
1113 1115
1116 /* disable budget enforcement */
1117 if (tsk_rt(t)->budget.ops)
1118 tsk_rt(t)->budget.ops->on_exit(t);
1119
1114#ifdef CONFIG_REALTIME_AUX_TASKS 1120#ifdef CONFIG_REALTIME_AUX_TASKS
1115 /* make sure we clean up on our way out */ 1121 /* make sure we clean up on our way out */
1116 if (unlikely(tsk_rt(t)->is_aux_task)) { 1122 if (unlikely(tsk_rt(t)->is_aux_task)) {
@@ -1141,15 +1147,44 @@ static void cedf_task_exit(struct task_struct * t)
1141 TRACE_TASK(t, "RIP\n"); 1147 TRACE_TASK(t, "RIP\n");
1142} 1148}
1143 1149
1150
1151
1152
1153
1154
1155static struct budget_tracker_ops cedf_drain_simple_ops =
1156{
1157 .on_scheduled = simple_on_scheduled,
1158 .on_blocked = simple_on_blocked,
1159 .on_preempt_or_sleep = simple_on_preempt_or_sleep,
1160 .on_exit = simple_on_exit,
1161
1162 .on_exhausted = cedf_simple_on_exhausted,
1163};
1164
1165
1144static long cedf_admit_task(struct task_struct* tsk) 1166static long cedf_admit_task(struct task_struct* tsk)
1145{ 1167{
1168 if (remote_cluster(task_cpu(tsk)) != task_cpu_cluster(tsk))
1169 return -EINVAL;
1170
1171 if (budget_enforced(tsk) || budget_signalled(tsk)) {
1172 switch(get_drain_policy(tsk)) {
1173 case DRAIN_SIMPLE:
1174 init_budget_tracker(&tsk_rt(tsk)->budget, &cedf_drain_simple_ops);
1175 break;
1176 default:
1177 TRACE_TASK(tsk, "Unsupported budget draining mode.\n");
1178 return -EINVAL;
1179 }
1180 }
1181
1146#ifdef CONFIG_LITMUS_NESTED_LOCKING 1182#ifdef CONFIG_LITMUS_NESTED_LOCKING
1147 INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, 1183 INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks,
1148 edf_max_heap_base_priority_order); 1184 edf_max_heap_base_priority_order);
1149#endif 1185#endif
1150 1186
1151 return (remote_cluster(task_cpu(tsk)) == task_cpu_cluster(tsk)) ? 1187 return 0;
1152 0 : -EINVAL;
1153} 1188}
1154 1189
1155 1190
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 08cdf5c0e492..15ac94038702 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -421,6 +421,33 @@ static noinline void job_completion(struct task_struct *t, int forced)
421 gsnedf_job_arrival(t); 421 gsnedf_job_arrival(t);
422} 422}
423 423
424static void gsnedf_simple_on_exhausted(struct task_struct *t)
425{
426 /* Assumption: t is scheduled on the CPU executing this callback */
427
428 if (budget_signalled(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) {
429 /* signal exhaustion */
430 send_sigbudget(t); /* will set BTF_SIG_BUDGET_SENT */
431 }
432
433 if (budget_enforced(t) && !bt_flag_test_and_set(t, BTF_BUDGET_EXHAUSTED)) {
434 if (!is_np(t)) {
435 /* np tasks will be preempted when they become
436 * preemptable again
437 */
438 litmus_reschedule_local();
439 TRACE("cedf_scheduler_tick: "
440 "%d is preemptable "
441 " => FORCE_RESCHED\n", t->pid);
442 } else if (is_user_np(t)) {
443 TRACE("cedf_scheduler_tick: "
444 "%d is non-preemptable, "
445 "preemption delayed.\n", t->pid);
446 request_exit_np(t);
447 }
448 }
449}
450
424/* gsnedf_tick - this function is called for every local timer 451/* gsnedf_tick - this function is called for every local timer
425 * interrupt. 452 * interrupt.
426 * 453 *
@@ -429,41 +456,16 @@ static noinline void job_completion(struct task_struct *t, int forced)
429 */ 456 */
430static void gsnedf_tick(struct task_struct* t) 457static void gsnedf_tick(struct task_struct* t)
431{ 458{
432 if (is_realtime(t) && budget_exhausted(t)) 459 if (is_realtime(t) &&
433 { 460 tsk_rt(t)->budget.ops && budget_quantum_tracked(t) &&
434 if (budget_signalled(t) && !sigbudget_sent(t)) { 461 budget_exhausted(t)) {
435 /* signal exhaustion */ 462 TRACE_TASK(t, "budget exhausted\n");
436 send_sigbudget(t); 463 tsk_rt(t)->budget.ops->on_exhausted(t);
437 }
438
439 if (budget_enforced(t)) {
440 if (!is_np(t)) {
441 /* np tasks will be preempted when they become
442 * preemptable again
443 */
444 litmus_reschedule_local();
445 TRACE("gsnedf_scheduler_tick: "
446 "%d is preemptable "
447 " => FORCE_RESCHED\n", t->pid);
448 } else if (is_user_np(t)) {
449 TRACE("gsnedf_scheduler_tick: "
450 "%d is non-preemptable, "
451 "preemption delayed.\n", t->pid);
452 request_exit_np(t);
453 }
454 }
455 }
456
457 /*
458 if(is_realtime(t)) {
459 TRACE_TASK(t, "tick %llu\n", litmus_clock());
460 } 464 }
461 */
462} 465}
463 466
464 467
465 468
466
467#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 469#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
468 470
469 471
@@ -797,11 +799,9 @@ static void gsnedf_change_prio_pai_tasklet(struct task_struct *old_prio,
797static struct task_struct* gsnedf_schedule(struct task_struct * prev) 799static struct task_struct* gsnedf_schedule(struct task_struct * prev)
798{ 800{
799 cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); 801 cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries);
800 int out_of_time, signal_budget, sleep, preempt, np, exists, blocks; 802 int out_of_time, sleep, preempt, np, exists, blocks;
801 struct task_struct* next = NULL; 803 struct task_struct* next = NULL;
802 804
803 //int completion = 0;
804
805#ifdef CONFIG_RELEASE_MASTER 805#ifdef CONFIG_RELEASE_MASTER
806 /* Bail out early if we are the release master. 806 /* Bail out early if we are the release master.
807 * The release master never schedules any real-time tasks. 807 * The release master never schedules any real-time tasks.
@@ -824,11 +824,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
824 blocks = exists && !is_running(entry->scheduled); 824 blocks = exists && !is_running(entry->scheduled);
825 out_of_time = exists && 825 out_of_time = exists &&
826 budget_enforced(entry->scheduled) && 826 budget_enforced(entry->scheduled) &&
827 budget_exhausted(entry->scheduled); 827 bt_flag_is_set(entry->scheduled, BTF_BUDGET_EXHAUSTED);
828 signal_budget = exists &&
829 budget_signalled(entry->scheduled) &&
830 budget_exhausted(entry->scheduled) &&
831 !sigbudget_sent(entry->scheduled);
832 np = exists && is_np(entry->scheduled); 828 np = exists && is_np(entry->scheduled);
833 sleep = exists && is_completed(entry->scheduled); 829 sleep = exists && is_completed(entry->scheduled);
834 preempt = entry->scheduled != entry->linked; 830 preempt = entry->scheduled != entry->linked;
@@ -839,9 +835,9 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
839 835
840 if (exists) { 836 if (exists) {
841 TRACE_TASK(prev, 837 TRACE_TASK(prev,
842 "blocks:%d out_of_time:%d signal_budget: %d np:%d sleep:%d preempt:%d " 838 "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d "
843 "state:%d sig:%d\n", 839 "state:%d sig:%d\n",
844 blocks, out_of_time, signal_budget, np, sleep, preempt, 840 blocks, out_of_time, np, sleep, preempt,
845 prev->state, signal_pending(prev)); 841 prev->state, signal_pending(prev));
846 } 842 }
847 843
@@ -849,9 +845,12 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
849 TRACE_TASK(prev, "will be preempted by %s/%d\n", 845 TRACE_TASK(prev, "will be preempted by %s/%d\n",
850 entry->linked->comm, entry->linked->pid); 846 entry->linked->comm, entry->linked->pid);
851 847
852 /* Send the signal that the budget has been exhausted */ 848 /* Do budget stuff */
853 if (signal_budget) { 849 if (tsk_rt(prev)->budget.ops) {
854 send_sigbudget(entry->scheduled); 850 if (blocks)
851 tsk_rt(prev)->budget.ops->on_blocked(prev);
852 else if (preempt || sleep)
853 tsk_rt(prev)->budget.ops->on_preempt_or_sleep(prev);
855 } 854 }
856 855
857 /* If a task blocks we have no choice but to reschedule. 856 /* If a task blocks we have no choice but to reschedule.
@@ -1086,6 +1085,10 @@ static void gsnedf_task_exit(struct task_struct * t)
1086 /* unlink if necessary */ 1085 /* unlink if necessary */
1087 raw_spin_lock_irqsave(&gsnedf_lock, flags); 1086 raw_spin_lock_irqsave(&gsnedf_lock, flags);
1088 1087
1088 /* disable budget enforcement */
1089 if (tsk_rt(t)->budget.ops)
1090 tsk_rt(t)->budget.ops->on_exit(t);
1091
1089#ifdef CONFIG_REALTIME_AUX_TASKS 1092#ifdef CONFIG_REALTIME_AUX_TASKS
1090 /* make sure we clean up on our way out */ 1093 /* make sure we clean up on our way out */
1091 if (unlikely(tsk_rt(t)->is_aux_task)) { 1094 if (unlikely(tsk_rt(t)->is_aux_task)) {
@@ -1115,8 +1118,29 @@ static void gsnedf_task_exit(struct task_struct * t)
1115} 1118}
1116 1119
1117 1120
1121static struct budget_tracker_ops gsnedf_drain_simple_ops =
1122{
1123 .on_scheduled = simple_on_scheduled,
1124 .on_blocked = simple_on_blocked,
1125 .on_preempt_or_sleep = simple_on_preempt_or_sleep,
1126 .on_exit = simple_on_exit,
1127
1128 .on_exhausted = gsnedf_simple_on_exhausted,
1129};
1130
1118static long gsnedf_admit_task(struct task_struct* tsk) 1131static long gsnedf_admit_task(struct task_struct* tsk)
1119{ 1132{
1133 if (budget_enforced(tsk) || budget_signalled(tsk)) {
1134 switch(get_drain_policy(tsk)) {
1135 case DRAIN_SIMPLE:
1136 init_budget_tracker(&tsk_rt(tsk)->budget, &gsnedf_drain_simple_ops);
1137 break;
1138 default:
1139 TRACE_TASK(tsk, "Unsupported budget draining mode.\n");
1140 return -EINVAL;
1141 }
1142 }
1143
1120#ifdef CONFIG_LITMUS_NESTED_LOCKING 1144#ifdef CONFIG_LITMUS_NESTED_LOCKING
1121 INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, 1145 INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks,
1122 edf_max_heap_base_priority_order); 1146 edf_max_heap_base_priority_order);
@@ -1126,10 +1150,6 @@ static long gsnedf_admit_task(struct task_struct* tsk)
1126} 1150}
1127 1151
1128 1152
1129
1130
1131
1132
1133#ifdef CONFIG_LITMUS_LOCKING 1153#ifdef CONFIG_LITMUS_LOCKING
1134 1154
1135#include <litmus/fdso.h> 1155#include <litmus/fdso.h>
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c
index 9de03c95b825..60b58bb29ac4 100644
--- a/litmus/sched_litmus.c
+++ b/litmus/sched_litmus.c
@@ -51,7 +51,7 @@ litmus_schedule(struct rq *rq, struct task_struct *prev)
51 lt_t _maybe_deadlock = 0; 51 lt_t _maybe_deadlock = 0;
52 52
53 /* let the plugin schedule */ 53 /* let the plugin schedule */
54 next = litmus->schedule(prev); 54 next = litmus->schedule(prev); /* may disable prev's budget timer */
55 55
56 sched_state_plugin_check(); 56 sched_state_plugin_check();
57 57
@@ -150,9 +150,11 @@ litmus_schedule(struct rq *rq, struct task_struct *prev)
150 if (next) { 150 if (next) {
151 next->rt_param.stack_in_use = rq->cpu; 151 next->rt_param.stack_in_use = rq->cpu;
152 next->se.exec_start = rq->clock; 152 next->se.exec_start = rq->clock;
153
154 if (is_realtime(next) && tsk_rt(next)->budget.ops)
155 tsk_rt(next)->budget.ops->on_scheduled(next);
153 } 156 }
154 157
155 update_enforcement_timer(next);
156 return next; 158 return next;
157} 159}
158 160
diff --git a/litmus/sched_pfair.c b/litmus/sched_pfair.c
index d5fb3a832adc..c06326faf9ce 100644
--- a/litmus/sched_pfair.c
+++ b/litmus/sched_pfair.c
@@ -837,6 +837,16 @@ static void dump_subtasks(struct task_struct* t)
837 t->rt_param.pfair->subtasks[i].group_deadline); 837 t->rt_param.pfair->subtasks[i].group_deadline);
838} 838}
839 839
840static struct budget_tracker_ops pfair_drain_simple_ops =
841{
842 .on_scheduled = simple_on_scheduled,
843 .on_blocked = simple_on_blocked,
844 .on_preempt_or_sleep = simple_on_preempt_or_sleep,
845 .on_exit = simple_on_exit,
846
847 .on_exhausted = pfair_simple_on_exhausted,
848};
849
840static long pfair_admit_task(struct task_struct* t) 850static long pfair_admit_task(struct task_struct* t)
841{ 851{
842 lt_t quanta; 852 lt_t quanta;
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index 6edec830f063..4a8b8e084f6e 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -132,6 +132,33 @@ static void job_completion(struct task_struct* t, int forced)
132 sched_trace_task_release(t); 132 sched_trace_task_release(t);
133} 133}
134 134
135static void pfp_simple_on_exhausted(struct task_struct *t)
136{
137 /* Assumption: t is scheduled on the CPU executing this callback */
138
139 if (budget_signalled(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) {
140 /* signal exhaustion */
141 send_sigbudget(t); /* will set BTF_SIG_BUDGET_SENT */
142 }
143
144 if (budget_enforced(t) && !bt_flag_test_and_set(t, BTF_BUDGET_EXHAUSTED)) {
145 if (!is_np(t)) {
146 /* np tasks will be preempted when they become
147 * preemptable again
148 */
149 litmus_reschedule_local();
150 TRACE("cedf_scheduler_tick: "
151 "%d is preemptable "
152 " => FORCE_RESCHED\n", t->pid);
153 } else if (is_user_np(t)) {
154 TRACE("cedf_scheduler_tick: "
155 "%d is non-preemptable, "
156 "preemption delayed.\n", t->pid);
157 request_exit_np(t);
158 }
159 }
160}
161
135static void pfp_tick(struct task_struct *t) 162static void pfp_tick(struct task_struct *t)
136{ 163{
137 pfp_domain_t *pfp = local_pfp; 164 pfp_domain_t *pfp = local_pfp;
@@ -142,26 +169,11 @@ static void pfp_tick(struct task_struct *t)
142 */ 169 */
143 BUG_ON(is_realtime(t) && t != pfp->scheduled); 170 BUG_ON(is_realtime(t) && t != pfp->scheduled);
144 171
145 if (is_realtime(t) && budget_exhausted(t)) 172 if (is_realtime(t) &&
146 { 173 tsk_rt(t)->budget.ops && budget_quantum_tracked(t) &&
147 if (budget_signalled(t) && !sigbudget_sent(t)) { 174 budget_exhausted(t)) {
148 /* signal exhaustion */ 175 TRACE_TASK(t, "budget exhausted\n");
149 send_sigbudget(t); 176 tsk_rt(t)->budget.ops->on_exhausted(t);
150 }
151
152 if (budget_enforced(t)) {
153 if (!is_np(t)) {
154 litmus_reschedule_local();
155 TRACE("pfp_scheduler_tick: "
156 "%d is preemptable "
157 " => FORCE_RESCHED\n", t->pid);
158 } else if (is_user_np(t)) {
159 TRACE("pfp_scheduler_tick: "
160 "%d is non-preemptable, "
161 "preemption delayed.\n", t->pid);
162 request_exit_np(t);
163 }
164 }
165 } 177 }
166} 178}
167 179
@@ -170,7 +182,7 @@ static struct task_struct* pfp_schedule(struct task_struct * prev)
170 pfp_domain_t* pfp = local_pfp; 182 pfp_domain_t* pfp = local_pfp;
171 struct task_struct* next; 183 struct task_struct* next;
172 184
173 int out_of_time, signal_budget, sleep, preempt, np, exists, blocks, resched, migrate; 185 int out_of_time, sleep, preempt, np, exists, blocks, resched, migrate;
174 186
175 raw_spin_lock(&pfp->slock); 187 raw_spin_lock(&pfp->slock);
176 188
@@ -186,11 +198,7 @@ static struct task_struct* pfp_schedule(struct task_struct * prev)
186 blocks = exists && !is_running(pfp->scheduled); 198 blocks = exists && !is_running(pfp->scheduled);
187 out_of_time = exists && 199 out_of_time = exists &&
188 budget_enforced(pfp->scheduled) && 200 budget_enforced(pfp->scheduled) &&
189 budget_exhausted(pfp->scheduled); 201 bt_flag_is_set(pfp->scheduled, BTF_BUDGET_EXHAUSTED);
190 signal_budget = exists &&
191 budget_signalled(pfp->scheduled) &&
192 budget_exhausted(pfp->scheduled) &&
193 !sigbudget_sent(pfp->scheduled);
194 np = exists && is_np(pfp->scheduled); 202 np = exists && is_np(pfp->scheduled);
195 sleep = exists && is_completed(pfp->scheduled); 203 sleep = exists && is_completed(pfp->scheduled);
196 migrate = exists && get_partition(pfp->scheduled) != pfp->cpu; 204 migrate = exists && get_partition(pfp->scheduled) != pfp->cpu;
@@ -202,9 +210,13 @@ static struct task_struct* pfp_schedule(struct task_struct * prev)
202 */ 210 */
203 resched = preempt; 211 resched = preempt;
204 212
205 /* Send the signal that the budget has been exhausted */ 213 /* Do budget stuff */
206 if (signal_budget) 214 if (tsk_rt(prev)->budget.ops) {
207 send_sigbudget(pfp->scheduled); 215 if (blocks)
216 tsk_rt(prev)->budget.ops->on_blocked(prev);
217 else if (preempt || sleep)
218 tsk_rt(prev)->budget.ops->on_preempt_or_sleep(prev);
219 }
208 220
209 /* If a task blocks we have no choice but to reschedule. 221 /* If a task blocks we have no choice but to reschedule.
210 */ 222 */
@@ -418,6 +430,11 @@ static void pfp_task_exit(struct task_struct * t)
418 rt_domain_t* dom; 430 rt_domain_t* dom;
419 431
420 raw_spin_lock_irqsave(&pfp->slock, flags); 432 raw_spin_lock_irqsave(&pfp->slock, flags);
433
434 /* disable budget enforcement */
435 if (tsk_rt(t)->budget.ops)
436 tsk_rt(t)->budget.ops->on_exit(t);
437
421 if (is_queued(t)) { 438 if (is_queued(t)) {
422 BUG(); /* This currently doesn't work. */ 439 BUG(); /* This currently doesn't work. */
423 /* dequeue */ 440 /* dequeue */
@@ -1692,17 +1709,43 @@ static long pfp_allocate_lock(struct litmus_lock **lock, int type,
1692 1709
1693#endif 1710#endif
1694 1711
1712static struct budget_tracker_ops pfp_drain_simple_ops =
1713{
1714 .on_scheduled = simple_on_scheduled,
1715 .on_blocked = simple_on_blocked,
1716 .on_preempt_or_sleep = simple_on_preempt_or_sleep,
1717 .on_exit = simple_on_exit,
1718
1719 .on_exhausted = pfp_simple_on_exhausted,
1720};
1721
1695static long pfp_admit_task(struct task_struct* tsk) 1722static long pfp_admit_task(struct task_struct* tsk)
1696{ 1723{
1724 long ret = 0;
1725
1697 if (task_cpu(tsk) == tsk->rt_param.task_params.cpu && 1726 if (task_cpu(tsk) == tsk->rt_param.task_params.cpu &&
1698#ifdef CONFIG_RELEASE_MASTER 1727#ifdef CONFIG_RELEASE_MASTER
1699 /* don't allow tasks on release master CPU */ 1728 /* don't allow tasks on release master CPU */
1700 task_cpu(tsk) != remote_dom(task_cpu(tsk))->release_master && 1729 task_cpu(tsk) != remote_dom(task_cpu(tsk))->release_master &&
1701#endif 1730#endif
1702 litmus_is_valid_fixed_prio(get_priority(tsk))) 1731 litmus_is_valid_fixed_prio(get_priority(tsk))) {
1703 return 0; 1732
1733 if (budget_enforced(tsk) || budget_signalled(tsk)) {
1734 switch(get_drain_policy(tsk)) {
1735 case DRAIN_SIMPLE:
1736 init_budget_tracker(&tsk_rt(tsk)->budget, &pfp_drain_simple_ops);
1737 break;
1738 default:
1739 TRACE_TASK(tsk, "Unsupported budget draining mode.\n");
1740 ret = -EINVAL;
1741 break;
1742 }
1743 }
1744 }
1704 else 1745 else
1705 return -EINVAL; 1746 ret = -EINVAL;
1747
1748 return ret;
1706} 1749}
1707 1750
1708static long pfp_activate_plugin(void) 1751static long pfp_activate_plugin(void)
diff --git a/litmus/sched_psn_edf.c b/litmus/sched_psn_edf.c
index d1177cab152d..3b3edfe908ff 100644
--- a/litmus/sched_psn_edf.c
+++ b/litmus/sched_psn_edf.c
@@ -164,6 +164,33 @@ static void job_completion(struct task_struct* t, int forced)
164 prepare_for_next_period(t); 164 prepare_for_next_period(t);
165} 165}
166 166
167static void psnedf_simple_on_exhausted(struct task_struct *t)
168{
169 /* Assumption: t is scheduled on the CPU executing this callback */
170
171 if (budget_signalled(t) && !bt_flag_is_set(t, BTF_SIG_BUDGET_SENT)) {
172 /* signal exhaustion */
173 send_sigbudget(t); /* will set BTF_SIG_BUDGET_SENT */
174 }
175
176 if (budget_enforced(t) && !bt_flag_test_and_set(t, BTF_BUDGET_EXHAUSTED)) {
177 if (!is_np(t)) {
178 /* np tasks will be preempted when they become
179 * preemptable again
180 */
181 litmus_reschedule_local();
182 TRACE("cedf_scheduler_tick: "
183 "%d is preemptable "
184 " => FORCE_RESCHED\n", t->pid);
185 } else if (is_user_np(t)) {
186 TRACE("cedf_scheduler_tick: "
187 "%d is non-preemptable, "
188 "preemption delayed.\n", t->pid);
189 request_exit_np(t);
190 }
191 }
192}
193
167static void psnedf_tick(struct task_struct *t) 194static void psnedf_tick(struct task_struct *t)
168{ 195{
169 psnedf_domain_t *pedf = local_pedf; 196 psnedf_domain_t *pedf = local_pedf;
@@ -174,26 +201,11 @@ static void psnedf_tick(struct task_struct *t)
174 */ 201 */
175 BUG_ON(is_realtime(t) && t != pedf->scheduled); 202 BUG_ON(is_realtime(t) && t != pedf->scheduled);
176 203
177 if (is_realtime(t) && budget_exhausted(t)) 204 if (is_realtime(t) &&
178 { 205 tsk_rt(t)->budget.ops && budget_quantum_tracked(t) &&
179 if (budget_signalled(t) && !sigbudget_sent(t)) { 206 budget_exhausted(t)) {
180 /* signal exhaustion */ 207 TRACE_TASK(t, "budget exhausted\n");
181 send_sigbudget(t); 208 tsk_rt(t)->budget.ops->on_exhausted(t);
182 }
183
184 if (budget_enforced(t)) {
185 if (!is_np(t)) {
186 litmus_reschedule_local();
187 TRACE("psnedf_scheduler_tick: "
188 "%d is preemptable "
189 " => FORCE_RESCHED\n", t->pid);
190 } else if (is_user_np(t)) {
191 TRACE("psnedf_scheduler_tick: "
192 "%d is non-preemptable, "
193 "preemption delayed.\n", t->pid);
194 request_exit_np(t);
195 }
196 }
197 } 209 }
198} 210}
199 211
@@ -203,7 +215,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
203 rt_domain_t* edf = &pedf->domain; 215 rt_domain_t* edf = &pedf->domain;
204 struct task_struct* next; 216 struct task_struct* next;
205 217
206 int out_of_time, signal_budget, sleep, preempt, np, exists, blocks, resched; 218 int out_of_time, sleep, preempt, np, exists, blocks, resched;
207 219
208 raw_spin_lock(&pedf->slock); 220 raw_spin_lock(&pedf->slock);
209 221
@@ -219,11 +231,7 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
219 blocks = exists && !is_running(pedf->scheduled); 231 blocks = exists && !is_running(pedf->scheduled);
220 out_of_time = exists && 232 out_of_time = exists &&
221 budget_enforced(pedf->scheduled) && 233 budget_enforced(pedf->scheduled) &&
222 budget_exhausted(pedf->scheduled); 234 bt_flag_is_set(pedf->scheduled, BTF_BUDGET_EXHAUSTED);
223 signal_budget = exists &&
224 budget_signalled(pedf->scheduled) &&
225 budget_exhausted(pedf->scheduled) &&
226 !sigbudget_sent(pedf->scheduled);
227 np = exists && is_np(pedf->scheduled); 235 np = exists && is_np(pedf->scheduled);
228 sleep = exists && is_completed(pedf->scheduled); 236 sleep = exists && is_completed(pedf->scheduled);
229 preempt = edf_preemption_needed(edf, prev); 237 preempt = edf_preemption_needed(edf, prev);
@@ -234,9 +242,13 @@ static struct task_struct* psnedf_schedule(struct task_struct * prev)
234 */ 242 */
235 resched = preempt; 243 resched = preempt;
236 244
237 /* Send the signal that the budget has been exhausted */ 245 /* Do budget stuff */
238 if (signal_budget) 246 if (tsk_rt(prev)->budget.ops) {
239 send_sigbudget(pedf->scheduled); 247 if (blocks)
248 tsk_rt(prev)->budget.ops->on_blocked(prev);
249 else if (preempt || sleep)
250 tsk_rt(prev)->budget.ops->on_preempt_or_sleep(prev);
251 }
240 252
241 /* If a task blocks we have no choice but to reschedule. 253 /* If a task blocks we have no choice but to reschedule.
242 */ 254 */
@@ -380,6 +392,11 @@ static void psnedf_task_exit(struct task_struct * t)
380 rt_domain_t* edf; 392 rt_domain_t* edf;
381 393
382 raw_spin_lock_irqsave(&pedf->slock, flags); 394 raw_spin_lock_irqsave(&pedf->slock, flags);
395
396 /* disable budget enforcement */
397 if (tsk_rt(t)->budget.ops)
398 tsk_rt(t)->budget.ops->on_exit(t);
399
383 if (is_queued(t)) { 400 if (is_queued(t)) {
384 /* dequeue */ 401 /* dequeue */
385 edf = task_edf(t); 402 edf = task_edf(t);
@@ -626,17 +643,43 @@ static long psnedf_activate_plugin(void)
626 return 0; 643 return 0;
627} 644}
628 645
646static struct budget_tracker_ops psnedf_drain_simple_ops =
647{
648 .on_scheduled = simple_on_scheduled,
649 .on_blocked = simple_on_blocked,
650 .on_preempt_or_sleep = simple_on_preempt_or_sleep,
651 .on_exit = simple_on_exit,
652
653 .on_exhausted = psnedf_simple_on_exhausted,
654};
655
629static long psnedf_admit_task(struct task_struct* tsk) 656static long psnedf_admit_task(struct task_struct* tsk)
630{ 657{
658 long ret = 0;
659
631 if (task_cpu(tsk) == tsk->rt_param.task_params.cpu 660 if (task_cpu(tsk) == tsk->rt_param.task_params.cpu
632#ifdef CONFIG_RELEASE_MASTER 661#ifdef CONFIG_RELEASE_MASTER
633 /* don't allow tasks on release master CPU */ 662 /* don't allow tasks on release master CPU */
634 && task_cpu(tsk) != remote_edf(task_cpu(tsk))->release_master 663 && task_cpu(tsk) != remote_edf(task_cpu(tsk))->release_master
635#endif 664#endif
636 ) 665 ) {
637 return 0; 666
667 if (budget_enforced(tsk) || budget_signalled(tsk)) {
668 switch(get_drain_policy(tsk)) {
669 case DRAIN_SIMPLE:
670 init_budget_tracker(&tsk_rt(tsk)->budget, &psnedf_drain_simple_ops);
671 break;
672 default:
673 TRACE_TASK(tsk, "Unsupported budget draining mode.\n");
674 ret = -EINVAL;
675 break;
676 }
677 }
678 }
638 else 679 else
639 return -EINVAL; 680 ret = -EINVAL;
681
682 return ret;
640} 683}
641 684
642/* Plugin object */ 685/* Plugin object */