aboutsummaryrefslogtreecommitdiffstats
path: root/include/litmus
diff options
context:
space:
mode:
Diffstat (limited to 'include/litmus')
-rw-r--r--include/litmus/aux_tasks.h23
-rw-r--r--include/litmus/budget.h20
-rw-r--r--include/litmus/edf_common.h12
-rw-r--r--include/litmus/fdso.h16
-rw-r--r--include/litmus/fpmath.h5
-rw-r--r--include/litmus/gpu_affinity.h66
-rw-r--r--include/litmus/ikglp_lock.h164
-rw-r--r--include/litmus/kexclu_affinity.h35
-rw-r--r--include/litmus/kfmlp_lock.h97
-rw-r--r--include/litmus/litmus.h17
-rw-r--r--include/litmus/litmus_softirq.h166
-rw-r--r--include/litmus/locking.h145
-rw-r--r--include/litmus/nvidia_info.h51
-rw-r--r--include/litmus/preempt.h2
-rw-r--r--include/litmus/rsm_lock.h54
-rw-r--r--include/litmus/rt_param.h200
-rw-r--r--include/litmus/sched_plugin.h80
-rw-r--r--include/litmus/sched_trace.h218
-rw-r--r--include/litmus/sched_trace_external.h78
-rw-r--r--include/litmus/signal.h47
-rw-r--r--include/litmus/trace.h25
-rw-r--r--include/litmus/unistd_32.h8
-rw-r--r--include/litmus/unistd_64.h12
23 files changed, 1499 insertions, 42 deletions
diff --git a/include/litmus/aux_tasks.h b/include/litmus/aux_tasks.h
new file mode 100644
index 000000000000..255bbafcc6b7
--- /dev/null
+++ b/include/litmus/aux_tasks.h
@@ -0,0 +1,23 @@
1#ifndef LITMUS_AUX_taskS
2#define LITMUS_AUX_taskS
3
4struct task_struct;
5
6int make_aux_task_if_required(struct task_struct *t);
7
8/* call on an aux task when it exits real-time */
9int exit_aux_task(struct task_struct *t);
10
11/* call when an aux_owner becomes real-time */
12long enable_aux_task_owner(struct task_struct *t);
13
14/* call when an aux_owner exits real-time */
15long disable_aux_task_owner(struct task_struct *t);
16
17/* call when an aux_owner increases its priority */
18int aux_task_owner_increase_priority(struct task_struct *t);
19
20/* call when an aux_owner decreases its priority */
21int aux_task_owner_decrease_priority(struct task_struct *t);
22
23#endif
diff --git a/include/litmus/budget.h b/include/litmus/budget.h
index 33344ee8d5f9..763b31c0e9f6 100644
--- a/include/litmus/budget.h
+++ b/include/litmus/budget.h
@@ -5,6 +5,9 @@
5 * the next task. */ 5 * the next task. */
6void update_enforcement_timer(struct task_struct* t); 6void update_enforcement_timer(struct task_struct* t);
7 7
8/* Send SIG_BUDGET to a real-time task. */
9void send_sigbudget(struct task_struct* t);
10
8inline static int budget_exhausted(struct task_struct* t) 11inline static int budget_exhausted(struct task_struct* t)
9{ 12{
10 return get_exec_time(t) >= get_exec_cost(t); 13 return get_exec_time(t) >= get_exec_cost(t);
@@ -19,10 +22,21 @@ inline static lt_t budget_remaining(struct task_struct* t)
19 return 0; 22 return 0;
20} 23}
21 24
22#define budget_enforced(t) (tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT) 25#define budget_enforced(t) (\
26 tsk_rt(t)->task_params.budget_policy != NO_ENFORCEMENT)
27
28#define budget_precisely_tracked(t) (\
29 tsk_rt(t)->task_params.budget_policy == PRECISE_ENFORCEMENT || \
30 tsk_rt(t)->task_params.budget_signal_policy == PRECISE_SIGNALS)
31
32#define budget_signalled(t) (\
33 tsk_rt(t)->task_params.budget_signal_policy != NO_SIGNALS)
34
35#define budget_precisely_signalled(t) (\
36 tsk_rt(t)->task_params.budget_policy == PRECISE_SIGNALS)
23 37
24#define budget_precisely_enforced(t) (tsk_rt(t)->task_params.budget_policy \ 38#define sigbudget_sent(t) (\
25 == PRECISE_ENFORCEMENT) 39 test_bit(RT_JOB_SIG_BUDGET_SENT, &tsk_rt(t)->job_params.flags))
26 40
27static inline int requeue_preempted_job(struct task_struct* t) 41static inline int requeue_preempted_job(struct task_struct* t)
28{ 42{
diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h
index bbaf22ea7f12..63dff7efe8fb 100644
--- a/include/litmus/edf_common.h
+++ b/include/litmus/edf_common.h
@@ -20,6 +20,18 @@ int edf_higher_prio(struct task_struct* first,
20 20
21int edf_ready_order(struct bheap_node* a, struct bheap_node* b); 21int edf_ready_order(struct bheap_node* a, struct bheap_node* b);
22 22
23#ifdef CONFIG_LITMUS_NESTED_LOCKING
24/* binheap_nodes must be embedded within 'struct litmus_lock' */
25int edf_max_heap_order(struct binheap_node *a, struct binheap_node *b);
26int edf_min_heap_order(struct binheap_node *a, struct binheap_node *b);
27int edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b);
28int edf_min_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b);
29
30int __edf_higher_prio(struct task_struct* first, comparison_mode_t first_mode,
31 struct task_struct* second, comparison_mode_t second_mode);
32
33#endif
34
23int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); 35int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t);
24 36
25#endif 37#endif
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
index f2115b83f1e4..1469c0fd0460 100644
--- a/include/litmus/fdso.h
+++ b/include/litmus/fdso.h
@@ -24,9 +24,18 @@ typedef enum {
24 MPCP_VS_SEM = 3, 24 MPCP_VS_SEM = 3,
25 DPCP_SEM = 4, 25 DPCP_SEM = 4,
26 26
27 PCP_SEM = 5, 27 PCP_SEM = 5,
28 28
29 MAX_OBJ_TYPE = 5 29 RSM_MUTEX = 6,
30 IKGLP_SEM = 7,
31 KFMLP_SEM = 8,
32
33 IKGLP_SIMPLE_GPU_AFF_OBS = 9,
34 IKGLP_GPU_AFF_OBS = 10,
35 KFMLP_SIMPLE_GPU_AFF_OBS = 11,
36 KFMLP_GPU_AFF_OBS = 12,
37
38 MAX_OBJ_TYPE = 12
30} obj_type_t; 39} obj_type_t;
31 40
32struct inode_obj_id { 41struct inode_obj_id {
@@ -70,8 +79,11 @@ static inline void* od_lookup(int od, obj_type_t type)
70} 79}
71 80
72#define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) 81#define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM))
82#define lookup_kfmlp_sem(od)((struct pi_semaphore*) od_lookup(od, KFMLP_SEM))
73#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) 83#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM))
74#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) 84#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID))
75 85
86#define lookup_rsm_mutex(od)((struct litmus_lock*) od_lookup(od, FMLP_SEM))
87
76 88
77#endif 89#endif
diff --git a/include/litmus/fpmath.h b/include/litmus/fpmath.h
index 642de98542c8..3d545fd2f5c5 100644
--- a/include/litmus/fpmath.h
+++ b/include/litmus/fpmath.h
@@ -1,11 +1,12 @@
1#ifndef __FP_MATH_H__ 1#ifndef __FP_MATH_H__
2#define __FP_MATH_H__ 2#define __FP_MATH_H__
3 3
4#ifdef __KERNEL__
4#include <linux/math64.h> 5#include <linux/math64.h>
5 6#else
6#ifndef __KERNEL__
7#include <stdint.h> 7#include <stdint.h>
8#define abs(x) (((x) < 0) ? -(x) : x) 8#define abs(x) (((x) < 0) ? -(x) : x)
9#define div64_s64(a, b) (a)/(b)
9#endif 10#endif
10 11
11// Use 64-bit because we want to track things at the nanosecond scale. 12// Use 64-bit because we want to track things at the nanosecond scale.
diff --git a/include/litmus/gpu_affinity.h b/include/litmus/gpu_affinity.h
new file mode 100644
index 000000000000..47da725717b0
--- /dev/null
+++ b/include/litmus/gpu_affinity.h
@@ -0,0 +1,66 @@
1#ifndef LITMUS_GPU_AFFINITY_H
2#define LITMUS_GPU_AFFINITY_H
3
4#include <litmus/rt_param.h>
5#include <litmus/sched_plugin.h>
6#include <litmus/litmus.h>
7
8void update_gpu_estimate(struct task_struct* t, lt_t observed);
9gpu_migration_dist_t gpu_migration_distance(int a, int b);
10
11static inline void reset_gpu_tracker(struct task_struct* t)
12{
13 t->rt_param.accum_gpu_time = 0;
14}
15
16static inline void start_gpu_tracker(struct task_struct* t)
17{
18 t->rt_param.gpu_time_stamp = litmus_clock();
19}
20
21static inline void stop_gpu_tracker(struct task_struct* t)
22{
23 lt_t now = litmus_clock();
24 t->rt_param.accum_gpu_time += (now - t->rt_param.gpu_time_stamp);
25}
26
27static inline lt_t get_gpu_time(struct task_struct* t)
28{
29 return t->rt_param.accum_gpu_time;
30}
31
32static inline lt_t get_gpu_estimate(struct task_struct* t, gpu_migration_dist_t dist)
33{
34 int i;
35 lt_t val;
36
37 if(dist == MIG_NONE) {
38 dist = MIG_LOCAL;
39 }
40
41 val = t->rt_param.gpu_migration_est[dist].avg;
42 for(i = dist-1; i >= 0; --i) {
43 if(t->rt_param.gpu_migration_est[i].avg > val) {
44 val = t->rt_param.gpu_migration_est[i].avg;
45 }
46 }
47
48#if 0
49// int i;
50// fpbuf_t temp = _fp_to_integer(t->rt_param.gpu_migration_est[dist].est);
51// lt_t val = (temp >= 0) ? temp : 0; // never allow negative estimates...
52 lt_t val = t->rt_param.gpu_migration_est[dist].avg;
53
54// WARN_ON(temp < 0);
55
56 // lower-bound a distant migration to be at least equal to the level
57 // below it.
58// for(i = dist-1; (val == 0) && (i >= MIG_LOCAL); --i) {
59// val = _fp_to_integer(t->rt_param.gpu_migration_est[i].est);
60// }
61#endif
62
63 return ((val > 0) ? val : dist+1);
64}
65
66#endif
diff --git a/include/litmus/ikglp_lock.h b/include/litmus/ikglp_lock.h
new file mode 100644
index 000000000000..af155eadbb35
--- /dev/null
+++ b/include/litmus/ikglp_lock.h
@@ -0,0 +1,164 @@
1#ifndef LITMUS_IKGLP_H
2#define LITMUS_IKGLP_H
3
4#include <litmus/litmus.h>
5#include <litmus/binheap.h>
6#include <litmus/locking.h>
7
8#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
9#include <litmus/kexclu_affinity.h>
10
11struct ikglp_affinity;
12#endif
13
14typedef struct ikglp_heap_node
15{
16 struct task_struct *task;
17 struct binheap_node node;
18} ikglp_heap_node_t;
19
20struct fifo_queue;
21struct ikglp_wait_state;
22
23typedef struct ikglp_donee_heap_node
24{
25 struct task_struct *task;
26 struct fifo_queue *fq;
27 struct ikglp_wait_state *donor_info; // cross-linked with ikglp_wait_state_t of donor
28
29 struct binheap_node node;
30} ikglp_donee_heap_node_t;
31
32// Maintains the state of a request as it goes through the IKGLP
33typedef struct ikglp_wait_state {
34 struct task_struct *task; // pointer back to the requesting task
35
36 // Data for while waiting in FIFO Queue
37 wait_queue_t fq_node;
38 ikglp_heap_node_t global_heap_node;
39 ikglp_donee_heap_node_t donee_heap_node;
40
41 // Data for while waiting in PQ
42 ikglp_heap_node_t pq_node;
43
44 // Data for while waiting as a donor
45 ikglp_donee_heap_node_t *donee_info; // cross-linked with donee's ikglp_donee_heap_node_t
46 struct nested_info prio_donation;
47 struct binheap_node node;
48} ikglp_wait_state_t;
49
50/* struct for semaphore with priority inheritance */
51struct fifo_queue
52{
53 wait_queue_head_t wait;
54 struct task_struct* owner;
55
56 // used for bookkeepping
57 ikglp_heap_node_t global_heap_node;
58 ikglp_donee_heap_node_t donee_heap_node;
59
60 struct task_struct* hp_waiter;
61 int count; /* number of waiters + holder */
62
63 struct nested_info nest;
64};
65
66struct ikglp_semaphore
67{
68 struct litmus_lock litmus_lock;
69
70 raw_spinlock_t lock;
71 raw_spinlock_t real_lock;
72
73 int nr_replicas; // AKA k
74 int m;
75
76 int max_fifo_len; // max len of a fifo queue
77 int nr_in_fifos;
78
79 struct binheap top_m; // min heap, base prio
80 int top_m_size; // number of nodes in top_m
81
82 struct binheap not_top_m; // max heap, base prio
83
84 struct binheap donees; // min-heap, base prio
85 struct fifo_queue *shortest_fifo_queue; // pointer to shortest fifo queue
86
87 /* data structures for holding requests */
88 struct fifo_queue *fifo_queues; // array nr_replicas in length
89 struct binheap priority_queue; // max-heap, base prio
90 struct binheap donors; // max-heap, base prio
91
92#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
93 struct ikglp_affinity *aff_obs;
94#endif
95};
96
97static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock)
98{
99 return container_of(lock, struct ikglp_semaphore, litmus_lock);
100}
101
102int ikglp_lock(struct litmus_lock* l);
103int ikglp_unlock(struct litmus_lock* l);
104int ikglp_close(struct litmus_lock* l);
105void ikglp_free(struct litmus_lock* l);
106struct litmus_lock* ikglp_new(int m, struct litmus_lock_ops*, void* __user arg);
107
108
109
110#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
111
112struct ikglp_queue_info
113{
114 struct fifo_queue* q;
115 lt_t estimated_len;
116 int *nr_cur_users;
117 int64_t *nr_aff_users;
118};
119
120struct ikglp_affinity_ops
121{
122 struct fifo_queue* (*advise_enqueue)(struct ikglp_affinity* aff, struct task_struct* t); // select FIFO
123 ikglp_wait_state_t* (*advise_steal)(struct ikglp_affinity* aff, struct fifo_queue* dst); // select steal from FIFO
124 ikglp_donee_heap_node_t* (*advise_donee_selection)(struct ikglp_affinity* aff, struct task_struct* t); // select a donee
125 ikglp_wait_state_t* (*advise_donor_to_fq)(struct ikglp_affinity* aff, struct fifo_queue* dst); // select a donor to move to PQ
126
127 void (*notify_enqueue)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // fifo enqueue
128 void (*notify_dequeue)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // fifo dequeue
129 void (*notify_acquired)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // replica acquired
130 void (*notify_freed)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // replica freed
131 int (*replica_to_resource)(struct ikglp_affinity* aff, struct fifo_queue* fq); // convert a replica # to a GPU (includes offsets and simult user folding)
132
133 int (*notify_exit)(struct ikglp_affinity* aff, struct task_struct* t);
134};
135
136struct ikglp_affinity
137{
138 struct affinity_observer obs;
139 struct ikglp_affinity_ops *ops;
140 struct ikglp_queue_info *q_info;
141 int *nr_cur_users_on_rsrc;
142 int64_t *nr_aff_on_rsrc;
143 int offset;
144 int nr_simult;
145 int nr_rsrc;
146 int relax_max_fifo_len;
147};
148
149static inline struct ikglp_affinity* ikglp_aff_obs_from_aff_obs(struct affinity_observer* aff_obs)
150{
151 return container_of(aff_obs, struct ikglp_affinity, obs);
152}
153
154int ikglp_aff_obs_close(struct affinity_observer*);
155void ikglp_aff_obs_free(struct affinity_observer*);
156struct affinity_observer* ikglp_gpu_aff_obs_new(struct affinity_observer_ops*,
157 void* __user arg);
158struct affinity_observer* ikglp_simple_gpu_aff_obs_new(struct affinity_observer_ops*,
159 void* __user arg);
160#endif
161
162
163
164#endif
diff --git a/include/litmus/kexclu_affinity.h b/include/litmus/kexclu_affinity.h
new file mode 100644
index 000000000000..f6355de49074
--- /dev/null
+++ b/include/litmus/kexclu_affinity.h
@@ -0,0 +1,35 @@
1#ifndef LITMUS_AFF_OBS_H
2#define LITMUS_AFF_OBS_H
3
4#include <litmus/locking.h>
5
6struct affinity_observer_ops;
7
8struct affinity_observer
9{
10 struct affinity_observer_ops* ops;
11 int type;
12 int ident;
13
14 struct litmus_lock* lock; // the lock under observation
15};
16
17typedef int (*aff_obs_open_t)(struct affinity_observer* aff_obs,
18 void* __user arg);
19typedef int (*aff_obs_close_t)(struct affinity_observer* aff_obs);
20typedef void (*aff_obs_free_t)(struct affinity_observer* aff_obs);
21
22struct affinity_observer_ops
23{
24 aff_obs_open_t open;
25 aff_obs_close_t close;
26 aff_obs_free_t deallocate;
27};
28
29struct litmus_lock* get_lock_from_od(int od);
30
31void affinity_observer_new(struct affinity_observer* aff,
32 struct affinity_observer_ops* ops,
33 struct affinity_observer_args* args);
34
35#endif
diff --git a/include/litmus/kfmlp_lock.h b/include/litmus/kfmlp_lock.h
new file mode 100644
index 000000000000..5f0aae6e6f42
--- /dev/null
+++ b/include/litmus/kfmlp_lock.h
@@ -0,0 +1,97 @@
1#ifndef LITMUS_KFMLP_H
2#define LITMUS_KFMLP_H
3
4#include <litmus/litmus.h>
5#include <litmus/locking.h>
6
7#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
8#include <litmus/kexclu_affinity.h>
9
10struct kfmlp_affinity;
11#endif
12
13/* struct for semaphore with priority inheritance */
14struct kfmlp_queue
15{
16 wait_queue_head_t wait;
17 struct task_struct* owner;
18 struct task_struct* hp_waiter;
19 int count; /* number of waiters + holder */
20};
21
22struct kfmlp_semaphore
23{
24 struct litmus_lock litmus_lock;
25
26 spinlock_t lock;
27
28 int num_resources; /* aka k */
29
30 struct kfmlp_queue *queues; /* array */
31 struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */
32
33#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
34 struct kfmlp_affinity *aff_obs;
35#endif
36};
37
38static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock)
39{
40 return container_of(lock, struct kfmlp_semaphore, litmus_lock);
41}
42
43int kfmlp_lock(struct litmus_lock* l);
44int kfmlp_unlock(struct litmus_lock* l);
45int kfmlp_close(struct litmus_lock* l);
46void kfmlp_free(struct litmus_lock* l);
47struct litmus_lock* kfmlp_new(struct litmus_lock_ops*, void* __user arg);
48
49#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
50
51struct kfmlp_queue_info
52{
53 struct kfmlp_queue* q;
54 lt_t estimated_len;
55 int *nr_cur_users;
56};
57
58struct kfmlp_affinity_ops
59{
60 struct kfmlp_queue* (*advise_enqueue)(struct kfmlp_affinity* aff, struct task_struct* t);
61 struct task_struct* (*advise_steal)(struct kfmlp_affinity* aff, wait_queue_t** to_steal, struct kfmlp_queue** to_steal_from);
62 void (*notify_enqueue)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t);
63 void (*notify_dequeue)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t);
64 void (*notify_acquired)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t);
65 void (*notify_freed)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t);
66 int (*replica_to_resource)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq);
67};
68
69struct kfmlp_affinity
70{
71 struct affinity_observer obs;
72 struct kfmlp_affinity_ops *ops;
73 struct kfmlp_queue_info *q_info;
74 int *nr_cur_users_on_rsrc;
75 int offset;
76 int nr_simult;
77 int nr_rsrc;
78};
79
80static inline struct kfmlp_affinity* kfmlp_aff_obs_from_aff_obs(struct affinity_observer* aff_obs)
81{
82 return container_of(aff_obs, struct kfmlp_affinity, obs);
83}
84
85int kfmlp_aff_obs_close(struct affinity_observer*);
86void kfmlp_aff_obs_free(struct affinity_observer*);
87struct affinity_observer* kfmlp_gpu_aff_obs_new(struct affinity_observer_ops*,
88 void* __user arg);
89struct affinity_observer* kfmlp_simple_gpu_aff_obs_new(struct affinity_observer_ops*,
90 void* __user arg);
91
92
93#endif
94
95#endif
96
97
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 875783e6a67b..2da61fa58bdc 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -26,16 +26,20 @@ static inline int in_list(struct list_head* list)
26 ); 26 );
27} 27}
28 28
29
29struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); 30struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq);
30 31
31#define NO_CPU 0xffffffff 32#define NO_CPU 0xffffffff
32 33
33void litmus_fork(struct task_struct *tsk); 34void litmus_fork(struct task_struct *tsk);
35void litmus_post_fork_thread(struct task_struct *tsk);
34void litmus_exec(void); 36void litmus_exec(void);
35/* clean up real-time state of a task */ 37/* clean up real-time state of a task */
36void exit_litmus(struct task_struct *dead_tsk); 38void exit_litmus(struct task_struct *dead_tsk);
37 39
38long litmus_admit_task(struct task_struct *tsk); 40long litmus_admit_task(struct task_struct *tsk);
41
42void litmus_pre_exit_task(struct task_struct *tsk); // called before litmus_exit_task, but without run queue locks held
39void litmus_exit_task(struct task_struct *tsk); 43void litmus_exit_task(struct task_struct *tsk);
40 44
41#define is_realtime(t) ((t)->policy == SCHED_LITMUS) 45#define is_realtime(t) ((t)->policy == SCHED_LITMUS)
@@ -43,6 +47,7 @@ void litmus_exit_task(struct task_struct *tsk);
43 ((t)->rt_param.transition_pending) 47 ((t)->rt_param.transition_pending)
44 48
45#define tsk_rt(t) (&(t)->rt_param) 49#define tsk_rt(t) (&(t)->rt_param)
50#define tsk_aux(t) (&(t)->aux_data)
46 51
47/* Realtime utility macros */ 52/* Realtime utility macros */
48#define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) 53#define is_priority_boosted(t) (tsk_rt(t)->priority_boosted)
@@ -60,9 +65,13 @@ void litmus_exit_task(struct task_struct *tsk);
60/* job_param macros */ 65/* job_param macros */
61#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time) 66#define get_exec_time(t) (tsk_rt(t)->job_params.exec_time)
62#define get_deadline(t) (tsk_rt(t)->job_params.deadline) 67#define get_deadline(t) (tsk_rt(t)->job_params.deadline)
68#define get_period(t) (tsk_rt(t)->task_params.period)
63#define get_release(t) (tsk_rt(t)->job_params.release) 69#define get_release(t) (tsk_rt(t)->job_params.release)
64#define get_lateness(t) (tsk_rt(t)->job_params.lateness) 70#define get_lateness(t) (tsk_rt(t)->job_params.lateness)
65 71
72#define effective_priority(t) ((!(tsk_rt(t)->inh_task)) ? t : tsk_rt(t)->inh_task)
73#define base_priority(t) (t)
74
66#define is_hrt(t) \ 75#define is_hrt(t) \
67 (tsk_rt(t)->task_params.cls == RT_CLASS_HARD) 76 (tsk_rt(t)->task_params.cls == RT_CLASS_HARD)
68#define is_srt(t) \ 77#define is_srt(t) \
@@ -99,10 +108,12 @@ static inline lt_t litmus_clock(void)
99#define earlier_deadline(a, b) (lt_before(\ 108#define earlier_deadline(a, b) (lt_before(\
100 (a)->rt_param.job_params.deadline,\ 109 (a)->rt_param.job_params.deadline,\
101 (b)->rt_param.job_params.deadline)) 110 (b)->rt_param.job_params.deadline))
111#define shorter_period(a, b) (lt_before(\
112 (a)->rt_param.task_params.period,\
113 (b)->rt_param.task_params.period))
102#define earlier_release(a, b) (lt_before(\ 114#define earlier_release(a, b) (lt_before(\
103 (a)->rt_param.job_params.release,\ 115 (a)->rt_param.job_params.release,\
104 (b)->rt_param.job_params.release)) 116 (b)->rt_param.job_params.release))
105
106void preempt_if_preemptable(struct task_struct* t, int on_cpu); 117void preempt_if_preemptable(struct task_struct* t, int on_cpu);
107 118
108#ifdef CONFIG_LITMUS_LOCKING 119#ifdef CONFIG_LITMUS_LOCKING
@@ -174,8 +185,10 @@ static inline int request_exit_np_atomic(struct task_struct *t)
174 * retry loop here since tasks might exploit that to 185 * retry loop here since tasks might exploit that to
175 * keep the kernel busy indefinitely. */ 186 * keep the kernel busy indefinitely. */
176 } 187 }
177 } else 188 }
189 else {
178 return 0; 190 return 0;
191 }
179} 192}
180 193
181#else 194#else
diff --git a/include/litmus/litmus_softirq.h b/include/litmus/litmus_softirq.h
new file mode 100644
index 000000000000..cfef08187464
--- /dev/null
+++ b/include/litmus/litmus_softirq.h
@@ -0,0 +1,166 @@
1#ifndef __LITMUS_SOFTIRQ_H
2#define __LITMUS_SOFTIRQ_H
3
4#include <linux/interrupt.h>
5#include <linux/workqueue.h>
6
7/*
8 Threaded tasklet/workqueue handling for Litmus.
9 Items are scheduled in the following order: hi-tasklet,
10 lo-tasklet, workqueue. Items are scheduled in FIFO order
11 within each of these classes.
12
13 klmirqd assumes the priority of the owner of the
14 tasklet when the tasklet is next to execute.
15
16 The base-priority of a klimirqd thread is below all regular
17 real-time tasks, but above all other Linux scheduling
18 classes (klmirqd threads are within the SHCED_LITMUS class).
19 Regular real-time tasks may increase the priority of
20 a klmirqd thread, but klmirqd is unaware of this
21 (this was not the case in prior incarnations of klmirqd).
22 */
23
24
25/* Initialize klmirqd */
26void init_klmirqd(void);
27
28/* Raises a flag to tell klmirqds to terminate.
29 Termination is async, so some threads may be running
30 after function return. */
31void kill_klmirqd(void);
32
33void kill_klmirqd_thread(struct task_struct* klmirqd_thread);
34
35/* Returns 1 if all NR_LITMUS_SOFTIRQD klitirqs are ready
36 to handle tasklets. 0, otherwise.*/
37int klmirqd_is_ready(void);
38
39/* Returns 1 if no NR_LITMUS_SOFTIRQD klitirqs are ready
40 to handle tasklets. 0, otherwise.*/
41int klmirqd_is_dead(void);
42
43
44typedef int (*klmirqd_cb_t) (void *arg);
45
46typedef struct
47{
48 klmirqd_cb_t func;
49 void* arg;
50} klmirqd_callback_t;
51
52/* Launches a klmirqd thread with the provided affinity.
53
54 Actual launch of threads is deffered to kworker's
55 workqueue, so daemons will likely not be immediately
56 running when this function returns, though the required
57 data will be initialized.
58
59 cpu == -1 for no affinity
60
61 provide a name at most 31 (32, + null terminator) characters long.
62 name == NULL for a default name. (all names are appended with
63 base-CPU affinity)
64 */
65#define MAX_KLMIRQD_NAME_LEN 31
66int launch_klmirqd_thread(char* name, int cpu, klmirqd_callback_t* cb);
67
68
69/* Flushes all pending work out to the OS for regular
70 * tasklet/work processing.
71 */
72void flush_pending(struct task_struct* klmirqd_thread);
73
74extern int __litmus_tasklet_schedule(
75 struct tasklet_struct *t,
76 struct task_struct *klmirqd_thread);
77
78/* schedule a tasklet on klmirqd #k_id */
79static inline int litmus_tasklet_schedule(
80 struct tasklet_struct *t,
81 struct task_struct *klmirqd_thread)
82{
83 int ret = 0;
84 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
85 ret = __litmus_tasklet_schedule(t, klmirqd_thread);
86 }
87 return(ret);
88}
89
90/* for use by __tasklet_schedule() */
91static inline int _litmus_tasklet_schedule(
92 struct tasklet_struct *t,
93 struct task_struct *klmirqd_thread)
94{
95 return(__litmus_tasklet_schedule(t, klmirqd_thread));
96}
97
98
99
100
101extern int __litmus_tasklet_hi_schedule(struct tasklet_struct *t,
102 struct task_struct *klmirqd_thread);
103
104/* schedule a hi tasklet on klmirqd #k_id */
105static inline int litmus_tasklet_hi_schedule(struct tasklet_struct *t,
106 struct task_struct *klmirqd_thread)
107{
108 int ret = 0;
109 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
110 ret = __litmus_tasklet_hi_schedule(t, klmirqd_thread);
111 }
112 return(ret);
113}
114
115/* for use by __tasklet_hi_schedule() */
116static inline int _litmus_tasklet_hi_schedule(struct tasklet_struct *t,
117 struct task_struct *klmirqd_thread)
118{
119 return(__litmus_tasklet_hi_schedule(t, klmirqd_thread));
120}
121
122
123
124
125
126extern int __litmus_tasklet_hi_schedule_first(
127 struct tasklet_struct *t,
128 struct task_struct *klmirqd_thread);
129
130/* schedule a hi tasklet on klmirqd #k_id on next go-around */
131/* PRECONDITION: Interrupts must be disabled. */
132static inline int litmus_tasklet_hi_schedule_first(
133 struct tasklet_struct *t,
134 struct task_struct *klmirqd_thread)
135{
136 int ret = 0;
137 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
138 ret = __litmus_tasklet_hi_schedule_first(t, klmirqd_thread);
139 }
140 return(ret);
141}
142
143/* for use by __tasklet_hi_schedule_first() */
144static inline int _litmus_tasklet_hi_schedule_first(
145 struct tasklet_struct *t,
146 struct task_struct *klmirqd_thread)
147{
148 return(__litmus_tasklet_hi_schedule_first(t, klmirqd_thread));
149}
150
151
152
153//////////////
154
155extern int __litmus_schedule_work(
156 struct work_struct* w,
157 struct task_struct *klmirqd_thread);
158
159static inline int litmus_schedule_work(
160 struct work_struct* w,
161 struct task_struct *klmirqd_thread)
162{
163 return(__litmus_schedule_work(w, klmirqd_thread));
164}
165
166#endif
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index 4d7b870cb443..4a5f198a0407 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -1,28 +1,163 @@
1#ifndef LITMUS_LOCKING_H 1#ifndef LITMUS_LOCKING_H
2#define LITMUS_LOCKING_H 2#define LITMUS_LOCKING_H
3 3
4#include <linux/list.h>
5
4struct litmus_lock_ops; 6struct litmus_lock_ops;
5 7
8#ifdef CONFIG_LITMUS_NESTED_LOCKING
9struct nested_info
10{
11 struct litmus_lock *lock;
12 struct task_struct *hp_waiter_eff_prio;
13 struct task_struct **hp_waiter_ptr;
14 struct binheap_node hp_binheap_node;
15};
16
17static inline struct task_struct* top_priority(struct binheap* handle) {
18 if(!binheap_empty(handle)) {
19 return (struct task_struct*)(binheap_top_entry(handle, struct nested_info, hp_binheap_node)->hp_waiter_eff_prio);
20 }
21 return NULL;
22}
23
24void print_hp_waiters(struct binheap_node* n, int depth);
25#endif
26
27
6/* Generic base struct for LITMUS^RT userspace semaphores. 28/* Generic base struct for LITMUS^RT userspace semaphores.
7 * This structure should be embedded in protocol-specific semaphores. 29 * This structure should be embedded in protocol-specific semaphores.
8 */ 30 */
9struct litmus_lock { 31struct litmus_lock {
10 struct litmus_lock_ops *ops; 32 struct litmus_lock_ops *ops;
11 int type; 33 int type;
34
35 int ident;
36
37#ifdef CONFIG_LITMUS_NESTED_LOCKING
38 struct nested_info nest;
39//#ifdef CONFIG_DEBUG_SPINLOCK
40 char cheat_lockdep[2];
41 struct lock_class_key key;
42//#endif
43#endif
12}; 44};
13 45
46#ifdef CONFIG_LITMUS_DGL_SUPPORT
47
48#define MAX_DGL_SIZE CONFIG_LITMUS_MAX_DGL_SIZE
49
50typedef struct dgl_wait_state {
51 struct task_struct *task; /* task waiting on DGL */
52 struct litmus_lock *locks[MAX_DGL_SIZE]; /* requested locks in DGL */
53 int size; /* size of the DGL */
54 int nr_remaining; /* nr locks remainging before DGL is complete */
55 int last_primary; /* index lock in locks[] that has active priority */
56 wait_queue_t wq_nodes[MAX_DGL_SIZE];
57} dgl_wait_state_t;
58
59void wake_or_wait_on_next_lock(dgl_wait_state_t *dgl_wait);
60void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lock*/);
61
62void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait);
63int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key);
64void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task);
65#endif
66
67typedef int (*lock_op_t)(struct litmus_lock *l);
68typedef lock_op_t lock_close_t;
69typedef lock_op_t lock_lock_t;
70typedef lock_op_t lock_unlock_t;
71
72typedef int (*lock_open_t)(struct litmus_lock *l, void* __user arg);
73typedef void (*lock_free_t)(struct litmus_lock *l);
74
14struct litmus_lock_ops { 75struct litmus_lock_ops {
15 /* Current task tries to obtain / drop a reference to a lock. 76 /* Current task tries to obtain / drop a reference to a lock.
16 * Optional methods, allowed by default. */ 77 * Optional methods, allowed by default. */
17 int (*open)(struct litmus_lock*, void* __user); 78 lock_open_t open;
18 int (*close)(struct litmus_lock*); 79 lock_close_t close;
19 80
20 /* Current tries to lock/unlock this lock (mandatory methods). */ 81 /* Current tries to lock/unlock this lock (mandatory methods). */
21 int (*lock)(struct litmus_lock*); 82 lock_lock_t lock;
22 int (*unlock)(struct litmus_lock*); 83 lock_unlock_t unlock;
23 84
24 /* The lock is no longer being referenced (mandatory method). */ 85 /* The lock is no longer being referenced (mandatory method). */
25 void (*deallocate)(struct litmus_lock*); 86 lock_free_t deallocate;
87
88#ifdef CONFIG_LITMUS_NESTED_LOCKING
89 void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags);
90 void (*propagate_decrease_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags);
91#endif
92
93#ifdef CONFIG_LITMUS_DGL_SUPPORT
94 raw_spinlock_t* (*get_dgl_spin_lock)(struct litmus_lock *l);
95 int (*dgl_lock)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node);
96 int (*is_owner)(struct litmus_lock *l, struct task_struct *t);
97 void (*enable_priority)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait);
98#endif
26}; 99};
27 100
101
102/*
103 Nested inheritance can be achieved with fine-grain locking when there is
104 no need for DGL support, presuming locks are acquired in a partial order
105 (no cycles!). However, DGLs allow locks to be acquired in any order. This
106 makes nested inheritance very difficult (we don't yet know a solution) to
107 realize with fine-grain locks, so we use a big lock instead.
108
109 Code contains both fine-grain and coarse-grain methods together, side-by-side.
110 Each lock operation *IS NOT* surrounded by ifdef/endif to help make code more
111 readable. However, this leads to the odd situation where both code paths
112 appear together in code as if they were both active together.
113
114 THIS IS NOT REALLY THE CASE! ONLY ONE CODE PATH IS ACTUALLY ACTIVE!
115
116 Example:
117 lock_global_irqsave(coarseLock, flags);
118 lock_fine_irqsave(fineLock, flags);
119
120 Reality (coarse):
121 lock_global_irqsave(coarseLock, flags);
122 //lock_fine_irqsave(fineLock, flags);
123
124 Reality (fine):
125 //lock_global_irqsave(coarseLock, flags);
126 lock_fine_irqsave(fineLock, flags);
127
128 Be careful when you read code involving nested inheritance.
129 */
130#if defined(CONFIG_LITMUS_DGL_SUPPORT)
131/* DGL requires a big lock to implement nested inheritance */
132#define lock_global_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags))
133#define lock_global(lock) raw_spin_lock((lock))
134#define unlock_global_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags))
135#define unlock_global(lock) raw_spin_unlock((lock))
136
137/* fine-grain locking are no-ops with DGL support */
138#define lock_fine_irqsave(lock, flags)
139#define lock_fine(lock)
140#define unlock_fine_irqrestore(lock, flags)
141#define unlock_fine(lock)
142
143#elif defined(CONFIG_LITMUS_NESTED_LOCKING)
144/* Use fine-grain locking when DGLs are disabled. */
145/* global locking are no-ops without DGL support */
146#define lock_global_irqsave(lock, flags)
147#define lock_global(lock)
148#define unlock_global_irqrestore(lock, flags)
149#define unlock_global(lock)
150
151#define lock_fine_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags))
152#define lock_fine(lock) raw_spin_lock((lock))
153#define unlock_fine_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags))
154#define unlock_fine(lock) raw_spin_unlock((lock))
155
156#endif
157
158
159void suspend_for_lock(void);
160
161
28#endif 162#endif
163
diff --git a/include/litmus/nvidia_info.h b/include/litmus/nvidia_info.h
new file mode 100644
index 000000000000..8c2a5524512e
--- /dev/null
+++ b/include/litmus/nvidia_info.h
@@ -0,0 +1,51 @@
1#ifndef __LITMUS_NVIDIA_H
2#define __LITMUS_NVIDIA_H
3
4#include <linux/interrupt.h>
5
6
7#include <litmus/litmus_softirq.h>
8
9#define NV_DEVICE_NUM CONFIG_NV_DEVICE_NUM
10
11/* TODO: Make this a function that checks the PCIe bus or maybe proc settings */
12#define num_online_gpus() (NV_DEVICE_NUM)
13
14
15/* Functions used for decoding NVIDIA blobs. */
16
17int init_nvidia_info(void);
18void shutdown_nvidia_info(void);
19
20int is_nvidia_func(void* func_addr);
21
22void dump_nvidia_info(const struct tasklet_struct *t);
23
24// Returns the Nvidia device # associated with provided tasklet and work_struct.
25u32 get_tasklet_nv_device_num(const struct tasklet_struct *t);
26u32 get_work_nv_device_num(const struct work_struct *t);
27
28/* Functions for figuring out the priority of GPU-using tasks */
29
30struct task_struct* get_nv_max_device_owner(u32 target_device_id);
31
32#ifdef CONFIG_LITMUS_SOFTIRQD
33struct task_struct* get_nv_klmirqd_thread(u32 target_device_id);
34#endif
35
36/* call when the GPU-holding task, t, blocks */
37long enable_gpu_owner(struct task_struct *t);
38
39/* call when the GPU-holding task, t, resumes */
40long disable_gpu_owner(struct task_struct *t);
41
42/* call when the GPU-holding task, t, increases its priority */
43int gpu_owner_increase_priority(struct task_struct *t);
44
45/* call when the GPU-holding task, t, decreases its priority */
46int gpu_owner_decrease_priority(struct task_struct *t);
47
48
49int reg_nv_device(int reg_device_id, int reg_action, struct task_struct *t);
50
51#endif
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h
index 380b886d78ff..8f3a9ca2d4e3 100644
--- a/include/litmus/preempt.h
+++ b/include/litmus/preempt.h
@@ -26,12 +26,12 @@ const char* sched_state_name(int s);
26 (x), #x, __FUNCTION__); \ 26 (x), #x, __FUNCTION__); \
27 } while (0); 27 } while (0);
28 28
29//#define TRACE_SCHED_STATE_CHANGE(x, y, cpu) /* ignore */
29#define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \ 30#define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \
30 TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \ 31 TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \
31 cpu, (x), sched_state_name(x), \ 32 cpu, (x), sched_state_name(x), \
32 (y), sched_state_name(y)) 33 (y), sched_state_name(y))
33 34
34
35typedef enum scheduling_state { 35typedef enum scheduling_state {
36 TASK_SCHEDULED = (1 << 0), /* The currently scheduled task is the one that 36 TASK_SCHEDULED = (1 << 0), /* The currently scheduled task is the one that
37 * should be scheduled, and the processor does not 37 * should be scheduled, and the processor does not
diff --git a/include/litmus/rsm_lock.h b/include/litmus/rsm_lock.h
new file mode 100644
index 000000000000..a15189683de4
--- /dev/null
+++ b/include/litmus/rsm_lock.h
@@ -0,0 +1,54 @@
1#ifndef LITMUS_RSM_H
2#define LITMUS_RSM_H
3
4#include <litmus/litmus.h>
5#include <litmus/binheap.h>
6#include <litmus/locking.h>
7
8/* struct for semaphore with priority inheritance */
9struct rsm_mutex {
10 struct litmus_lock litmus_lock;
11
12 /* current resource holder */
13 struct task_struct *owner;
14
15 /* highest-priority waiter */
16 struct task_struct *hp_waiter;
17
18 /* FIFO queue of waiting tasks -- for now. time stamp in the future. */
19 wait_queue_head_t wait;
20
21 /* we do some nesting within spinlocks, so we can't use the normal
22 sleeplocks found in wait_queue_head_t. */
23 raw_spinlock_t lock;
24};
25
26static inline struct rsm_mutex* rsm_mutex_from_lock(struct litmus_lock* lock)
27{
28 return container_of(lock, struct rsm_mutex, litmus_lock);
29}
30
31#ifdef CONFIG_LITMUS_DGL_SUPPORT
32int rsm_mutex_is_owner(struct litmus_lock *l, struct task_struct *t);
33int rsm_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node);
34void rsm_mutex_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait);
35#endif
36
37void rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l,
38 struct task_struct* t,
39 raw_spinlock_t* to_unlock,
40 unsigned long irqflags);
41
42void rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l,
43 struct task_struct* t,
44 raw_spinlock_t* to_unlock,
45 unsigned long irqflags);
46
47int rsm_mutex_lock(struct litmus_lock* l);
48int rsm_mutex_unlock(struct litmus_lock* l);
49int rsm_mutex_close(struct litmus_lock* l);
50void rsm_mutex_free(struct litmus_lock* l);
51struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops*);
52
53
54#endif \ No newline at end of file
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 4cd06dd32906..39685a351cb1 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -1,9 +1,11 @@
1#ifndef _LINUX_RT_PARAM_H_
2#define _LINUX_RT_PARAM_H_
1/* 3/*
2 * Definition of the scheduler plugin interface. 4 * Definition of the scheduler plugin interface.
3 * 5 *
4 */ 6 */
5#ifndef _LINUX_RT_PARAM_H_ 7
6#define _LINUX_RT_PARAM_H_ 8#include <litmus/fpmath.h>
7 9
8/* Litmus time type. */ 10/* Litmus time type. */
9typedef unsigned long long lt_t; 11typedef unsigned long long lt_t;
@@ -30,9 +32,43 @@ typedef enum {
30typedef enum { 32typedef enum {
31 NO_ENFORCEMENT, /* job may overrun unhindered */ 33 NO_ENFORCEMENT, /* job may overrun unhindered */
32 QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */ 34 QUANTUM_ENFORCEMENT, /* budgets are only checked on quantum boundaries */
33 PRECISE_ENFORCEMENT /* budgets are enforced with hrtimers */ 35 PRECISE_ENFORCEMENT, /* budgets are enforced with hrtimers */
34} budget_policy_t; 36} budget_policy_t;
35 37
38typedef enum {
39 NO_SIGNALS, /* job receives no signals when it exhausts its budget */
40 QUANTUM_SIGNALS, /* budget signals are only sent on quantum boundaries */
41 PRECISE_SIGNALS, /* budget signals are triggered with hrtimers */
42} budget_signal_policy_t;
43
44typedef enum {
45 AUX_ENABLE = 0x1,
46 AUX_CURRENT = (AUX_ENABLE<<1),
47 AUX_FUTURE = (AUX_CURRENT<<2)
48} aux_flags_t;
49
50/* mirror of st_event_record_type_t
51 * Assume all are UNsupported, unless otherwise stated. */
52typedef enum {
53 ST_INJECT_NAME = 1, /* supported */
54 ST_INJECT_PARAM, /* supported */
55 ST_INJECT_RELEASE, /* supported */
56 ST_INJECT_ASSIGNED,
57 ST_INJECT_SWITCH_TO,
58 ST_INJECT_SWITCH_AWAY,
59 ST_INJECT_COMPLETION, /* supported */
60 ST_INJECT_BLOCK,
61 ST_INJECT_RESUME,
62 ST_INJECT_ACTION,
63 ST_INJECT_SYS_RELEASE, /* supported */
64} sched_trace_injection_events_t;
65
66struct st_inject_args {
67 lt_t release;
68 lt_t deadline;
69 unsigned int job_no;
70};
71
36/* We use the common priority interpretation "lower index == higher priority", 72/* We use the common priority interpretation "lower index == higher priority",
37 * which is commonly used in fixed-priority schedulability analysis papers. 73 * which is commonly used in fixed-priority schedulability analysis papers.
38 * So, a numerically lower priority value implies higher scheduling priority, 74 * So, a numerically lower priority value implies higher scheduling priority,
@@ -62,6 +98,7 @@ struct rt_task {
62 unsigned int priority; 98 unsigned int priority;
63 task_class_t cls; 99 task_class_t cls;
64 budget_policy_t budget_policy; /* ignored by pfair */ 100 budget_policy_t budget_policy; /* ignored by pfair */
101 budget_signal_policy_t budget_signal_policy; /* currently ignored by pfair */
65}; 102};
66 103
67union np_flag { 104union np_flag {
@@ -74,6 +111,19 @@ union np_flag {
74 } np; 111 } np;
75}; 112};
76 113
114struct affinity_observer_args
115{
116 int lock_od;
117};
118
119struct gpu_affinity_observer_args
120{
121 struct affinity_observer_args obs;
122 int replica_to_gpu_offset;
123 int nr_simult_users;
124 int relaxed_rules;
125};
126
77/* The definition of the data that is shared between the kernel and real-time 127/* The definition of the data that is shared between the kernel and real-time
78 * tasks via a shared page (see litmus/ctrldev.c). 128 * tasks via a shared page (see litmus/ctrldev.c).
79 * 129 *
@@ -115,6 +165,13 @@ struct control_page {
115/* don't export internal data structures to user space (liblitmus) */ 165/* don't export internal data structures to user space (liblitmus) */
116#ifdef __KERNEL__ 166#ifdef __KERNEL__
117 167
168#include <litmus/binheap.h>
169#include <linux/semaphore.h>
170
171#ifdef CONFIG_LITMUS_SOFTIRQD
172#include <linux/interrupt.h>
173#endif
174
118struct _rt_domain; 175struct _rt_domain;
119struct bheap_node; 176struct bheap_node;
120struct release_heap; 177struct release_heap;
@@ -142,10 +199,82 @@ struct rt_job {
142 * Increase this sequence number when a job is released. 199 * Increase this sequence number when a job is released.
143 */ 200 */
144 unsigned int job_no; 201 unsigned int job_no;
202
203 /* bits:
204 * 0th: Set if a budget exhaustion signal has already been sent for
205 * the current job. */
206 unsigned long flags;
145}; 207};
146 208
209#define RT_JOB_SIG_BUDGET_SENT 0
210
147struct pfair_param; 211struct pfair_param;
148 212
213enum klmirqd_sem_status
214{
215 NEED_TO_REACQUIRE,
216 REACQUIRING,
217 NOT_HELD,
218 HELD
219};
220
221typedef enum gpu_migration_dist
222{
223 // TODO: Make this variable against NR_NVIDIA_GPUS
224 MIG_LOCAL = 0,
225 MIG_NEAR = 1,
226 MIG_MED = 2,
227 MIG_FAR = 3, // 8 GPUs in a binary tree hierarchy
228 MIG_NONE = 4,
229
230 MIG_LAST = MIG_NONE
231} gpu_migration_dist_t;
232
233typedef struct feedback_est{
234 fp_t est;
235 fp_t accum_err;
236} feedback_est_t;
237
238
239#define AVG_EST_WINDOW_SIZE 20
240
241typedef int (*notify_rsrc_exit_t)(struct task_struct* tsk);
242
243typedef struct avg_est{
244 lt_t history[AVG_EST_WINDOW_SIZE];
245 uint16_t count;
246 uint16_t idx;
247 lt_t sum;
248 lt_t std;
249 lt_t avg;
250} avg_est_t;
251
252
253
254#ifdef CONFIG_LITMUS_SOFTIRQD
255struct klmirqd_info
256{
257 struct task_struct* klmirqd;
258 struct task_struct* current_owner;
259 unsigned int terminating:1;
260
261 raw_spinlock_t lock;
262
263 u32 pending;
264 atomic_t num_hi_pending;
265 atomic_t num_low_pending;
266 atomic_t num_work_pending;
267
268 /* in order of priority */
269 struct tasklet_head pending_tasklets_hi;
270 struct tasklet_head pending_tasklets;
271 struct list_head worklist;
272
273 struct list_head klmirqd_reg;
274};
275#endif
276
277
149/* RT task parameters for scheduling extensions 278/* RT task parameters for scheduling extensions
150 * These parameters are inherited during clone and therefore must 279 * These parameters are inherited during clone and therefore must
151 * be explicitly set up before the task set is launched. 280 * be explicitly set up before the task set is launched.
@@ -163,6 +292,40 @@ struct rt_param {
163 /* has the task completed? */ 292 /* has the task completed? */
164 unsigned int completed:1; 293 unsigned int completed:1;
165 294
295#ifdef CONFIG_LITMUS_SOFTIRQD
296 /* proxy threads have minimum priority by default */
297 unsigned int is_interrupt_thread:1;
298
299 /* pointer to data used by klmirqd thread.
300 *
301 * ptr only valid if is_interrupt_thread == 1
302 */
303 struct klmirqd_info* klmirqd_info;
304#endif
305
306#ifdef CONFIG_LITMUS_NVIDIA
307 /* number of top-half interrupts handled on behalf of current job */
308 atomic_t nv_int_count;
309 long unsigned int held_gpus; // bitmap of held GPUs.
310 struct binheap_node gpu_owner_node; // just one GPU for now...
311 unsigned int hide_from_gpu:1;
312
313#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
314 avg_est_t gpu_migration_est[MIG_LAST+1];
315
316 gpu_migration_dist_t gpu_migration;
317 int last_gpu;
318
319 notify_rsrc_exit_t rsrc_exit_cb;
320 void* rsrc_exit_cb_args;
321
322 lt_t accum_gpu_time;
323 lt_t gpu_time_stamp;
324
325 unsigned int suspend_gpu_tracker_on_block:1;
326#endif
327#endif
328
166#ifdef CONFIG_LITMUS_LOCKING 329#ifdef CONFIG_LITMUS_LOCKING
167 /* Is the task being priority-boosted by a locking protocol? */ 330 /* Is the task being priority-boosted by a locking protocol? */
168 unsigned int priority_boosted:1; 331 unsigned int priority_boosted:1;
@@ -182,7 +345,26 @@ struct rt_param {
182 * could point to self if PI does not result in 345 * could point to self if PI does not result in
183 * an increased task priority. 346 * an increased task priority.
184 */ 347 */
185 struct task_struct* inh_task; 348 struct task_struct* inh_task;
349
350#ifdef CONFIG_LITMUS_NESTED_LOCKING
351 raw_spinlock_t hp_blocked_tasks_lock;
352 struct binheap hp_blocked_tasks;
353
354 /* pointer to lock upon which is currently blocked */
355 struct litmus_lock* blocked_lock;
356#endif
357
358
359#ifdef CONFIG_REALTIME_AUX_TASKS
360 unsigned int is_aux_task:1;
361 unsigned int has_aux_tasks:1;
362 unsigned int hide_from_aux_tasks:1;
363
364 struct list_head aux_task_node;
365 struct binheap_node aux_task_owner_node;
366#endif
367
186 368
187#ifdef CONFIG_NP_SECTION 369#ifdef CONFIG_NP_SECTION
188 /* For the FMLP under PSN-EDF, it is required to make the task 370 /* For the FMLP under PSN-EDF, it is required to make the task
@@ -248,6 +430,16 @@ struct rt_param {
248 struct control_page * ctrl_page; 430 struct control_page * ctrl_page;
249}; 431};
250 432
433#ifdef CONFIG_REALTIME_AUX_TASKS
434struct aux_data
435{
436 struct list_head aux_tasks;
437 struct binheap aux_task_owners;
438 unsigned int initialized:1;
439 unsigned int aux_future:1;
440};
251#endif 441#endif
252 442
443#endif /* __KERNEL */
444
253#endif 445#endif
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
index 1546ab7f1d66..d0e7d74bb45e 100644
--- a/include/litmus/sched_plugin.h
+++ b/include/litmus/sched_plugin.h
@@ -11,6 +11,12 @@
11#include <litmus/locking.h> 11#include <litmus/locking.h>
12#endif 12#endif
13 13
14#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
15#include <litmus/kexclu_affinity.h>
16#endif
17
18#include <linux/interrupt.h>
19
14/************************ setup/tear down ********************/ 20/************************ setup/tear down ********************/
15 21
16typedef long (*activate_plugin_t) (void); 22typedef long (*activate_plugin_t) (void);
@@ -29,7 +35,6 @@ typedef struct task_struct* (*schedule_t)(struct task_struct * prev);
29 */ 35 */
30typedef void (*finish_switch_t)(struct task_struct *prev); 36typedef void (*finish_switch_t)(struct task_struct *prev);
31 37
32
33/********************* task state changes ********************/ 38/********************* task state changes ********************/
34 39
35/* Called to setup a new real-time task. 40/* Called to setup a new real-time task.
@@ -60,6 +65,49 @@ typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type,
60 void* __user config); 65 void* __user config);
61#endif 66#endif
62 67
68struct affinity_observer;
69typedef long (*allocate_affinity_observer_t) (
70 struct affinity_observer **aff_obs, int type,
71 void* __user config);
72
73typedef void (*increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh);
74typedef void (*decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh);
75
76typedef int (*__increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh);
77typedef int (*__decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh);
78
79typedef void (*nested_increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh,
80 raw_spinlock_t *to_unlock, unsigned long irqflags);
81typedef void (*nested_decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh,
82 raw_spinlock_t *to_unlock, unsigned long irqflags);
83
84
85typedef int (*enqueue_pai_tasklet_t)(struct tasklet_struct* tasklet);
86typedef void (*change_prio_pai_tasklet_t)(struct task_struct *old_prio,
87 struct task_struct *new_prio);
88typedef void (*run_tasklets_t)(struct task_struct* next);
89
90typedef raw_spinlock_t* (*get_dgl_spinlock_t) (struct task_struct *t);
91
92
93typedef int (*higher_prio_t)(struct task_struct* a, struct task_struct* b);
94
95#ifdef CONFIG_LITMUS_NESTED_LOCKING
96
97typedef enum
98{
99 BASE,
100 EFFECTIVE
101} comparison_mode_t;
102
103typedef int (*__higher_prio_t)(struct task_struct* a, comparison_mode_t a_mod,
104 struct task_struct* b, comparison_mode_t b_mod);
105#endif
106
107#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD)
108typedef int (*default_cpu_for_gpu_t)(int gpu);
109#endif
110
63 111
64/********************* sys call backends ********************/ 112/********************* sys call backends ********************/
65/* This function causes the caller to sleep until the next release */ 113/* This function causes the caller to sleep until the next release */
@@ -90,14 +138,42 @@ struct sched_plugin {
90 /* task state changes */ 138 /* task state changes */
91 admit_task_t admit_task; 139 admit_task_t admit_task;
92 140
93 task_new_t task_new; 141 task_new_t task_new;
94 task_wake_up_t task_wake_up; 142 task_wake_up_t task_wake_up;
95 task_block_t task_block; 143 task_block_t task_block;
96 task_exit_t task_exit; 144 task_exit_t task_exit;
97 145
146 higher_prio_t compare;
147
98#ifdef CONFIG_LITMUS_LOCKING 148#ifdef CONFIG_LITMUS_LOCKING
99 /* locking protocols */ 149 /* locking protocols */
100 allocate_lock_t allocate_lock; 150 allocate_lock_t allocate_lock;
151 increase_prio_t increase_prio;
152 decrease_prio_t decrease_prio;
153
154 __increase_prio_t __increase_prio;
155 __decrease_prio_t __decrease_prio;
156#endif
157#ifdef CONFIG_LITMUS_NESTED_LOCKING
158 nested_increase_prio_t nested_increase_prio;
159 nested_decrease_prio_t nested_decrease_prio;
160 __higher_prio_t __compare;
161#endif
162#ifdef CONFIG_LITMUS_DGL_SUPPORT
163 get_dgl_spinlock_t get_dgl_spinlock;
164#endif
165
166#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
167 allocate_affinity_observer_t allocate_aff_obs;
168#endif
169#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
170 enqueue_pai_tasklet_t enqueue_pai_tasklet;
171 change_prio_pai_tasklet_t change_prio_pai_tasklet;
172 run_tasklets_t run_tasklets;
173#endif
174
175#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD)
176 default_cpu_for_gpu_t map_gpu_to_cpu;
101#endif 177#endif
102} __attribute__ ((__aligned__(SMP_CACHE_BYTES))); 178} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
103 179
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
index 82bde8241298..7af12f49c600 100644
--- a/include/litmus/sched_trace.h
+++ b/include/litmus/sched_trace.h
@@ -10,13 +10,14 @@ struct st_trace_header {
10 u8 type; /* Of what type is this record? */ 10 u8 type; /* Of what type is this record? */
11 u8 cpu; /* On which CPU was it recorded? */ 11 u8 cpu; /* On which CPU was it recorded? */
12 u16 pid; /* PID of the task. */ 12 u16 pid; /* PID of the task. */
13 u32 job; /* The job sequence number. */ 13 u32 job:24; /* The job sequence number. */
14}; 14 u8 extra;
15} __attribute__((packed));
15 16
16#define ST_NAME_LEN 16 17#define ST_NAME_LEN 16
17struct st_name_data { 18struct st_name_data {
18 char cmd[ST_NAME_LEN];/* The name of the executable of this process. */ 19 char cmd[ST_NAME_LEN];/* The name of the executable of this process. */
19}; 20} __attribute__((packed));
20 21
21struct st_param_data { /* regular params */ 22struct st_param_data { /* regular params */
22 u32 wcet; 23 u32 wcet;
@@ -25,30 +26,29 @@ struct st_param_data { /* regular params */
25 u8 partition; 26 u8 partition;
26 u8 class; 27 u8 class;
27 u8 __unused[2]; 28 u8 __unused[2];
28}; 29} __attribute__((packed));
29 30
30struct st_release_data { /* A job is was/is going to be released. */ 31struct st_release_data { /* A job is was/is going to be released. */
31 u64 release; /* What's the release time? */ 32 u64 release; /* What's the release time? */
32 u64 deadline; /* By when must it finish? */ 33 u64 deadline; /* By when must it finish? */
33}; 34} __attribute__((packed));
34 35
35struct st_assigned_data { /* A job was asigned to a CPU. */ 36struct st_assigned_data { /* A job was asigned to a CPU. */
36 u64 when; 37 u64 when;
37 u8 target; /* Where should it execute? */ 38 u8 target; /* Where should it execute? */
38 u8 __unused[7]; 39 u8 __unused[7];
39}; 40} __attribute__((packed));
40 41
41struct st_switch_to_data { /* A process was switched to on a given CPU. */ 42struct st_switch_to_data { /* A process was switched to on a given CPU. */
42 u64 when; /* When did this occur? */ 43 u64 when; /* When did this occur? */
43 u32 exec_time; /* Time the current job has executed. */ 44 u32 exec_time; /* Time the current job has executed. */
44 u8 __unused[4]; 45 u8 __unused[4];
45 46} __attribute__((packed));
46};
47 47
48struct st_switch_away_data { /* A process was switched away from on a given CPU. */ 48struct st_switch_away_data { /* A process was switched away from on a given CPU. */
49 u64 when; 49 u64 when;
50 u64 exec_time; 50 u64 exec_time;
51}; 51} __attribute__((packed));
52 52
53struct st_completion_data { /* A job completed. */ 53struct st_completion_data { /* A job completed. */
54 u64 when; 54 u64 when;
@@ -56,35 +56,108 @@ struct st_completion_data { /* A job completed. */
56 * next task automatically; set to 0 otherwise. 56 * next task automatically; set to 0 otherwise.
57 */ 57 */
58 u8 __uflags:7; 58 u8 __uflags:7;
59 u8 __unused[7]; 59 u16 nv_int_count;
60}; 60 u8 __unused[5];
61} __attribute__((packed));
61 62
62struct st_block_data { /* A task blocks. */ 63struct st_block_data { /* A task blocks. */
63 u64 when; 64 u64 when;
64 u64 __unused; 65 u64 __unused;
65}; 66} __attribute__((packed));
66 67
67struct st_resume_data { /* A task resumes. */ 68struct st_resume_data { /* A task resumes. */
68 u64 when; 69 u64 when;
69 u64 __unused; 70 u64 __unused;
70}; 71} __attribute__((packed));
71 72
72struct st_action_data { 73struct st_action_data {
73 u64 when; 74 u64 when;
74 u8 action; 75 u8 action;
75 u8 __unused[7]; 76 u8 __unused[7];
76}; 77} __attribute__((packed));
77 78
78struct st_sys_release_data { 79struct st_sys_release_data {
79 u64 when; 80 u64 when;
80 u64 release; 81 u64 release;
81}; 82} __attribute__((packed));
83
84
85struct st_tasklet_release_data {
86 u64 when;
87 u64 __unused;
88} __attribute__((packed));
89
90struct st_tasklet_begin_data {
91 u64 when;
92 u16 exe_pid;
93 u8 __unused[6];
94} __attribute__((packed));
95
96struct st_tasklet_end_data {
97 u64 when;
98 u16 exe_pid;
99 u8 flushed;
100 u8 __unused[5];
101} __attribute__((packed));
102
103
104struct st_work_release_data {
105 u64 when;
106 u64 __unused;
107} __attribute__((packed));
108
109struct st_work_begin_data {
110 u64 when;
111 u16 exe_pid;
112 u8 __unused[6];
113} __attribute__((packed));
114
115struct st_work_end_data {
116 u64 when;
117 u16 exe_pid;
118 u8 flushed;
119 u8 __unused[5];
120} __attribute__((packed));
121
122struct st_effective_priority_change_data {
123 u64 when;
124 u16 inh_pid;
125 u8 __unused[6];
126} __attribute__((packed));
127
128struct st_nv_interrupt_begin_data {
129 u64 when;
130 u32 device;
131 u32 serialNumber;
132} __attribute__((packed));
133
134struct st_nv_interrupt_end_data {
135 u64 when;
136 u32 device;
137 u32 serialNumber;
138} __attribute__((packed));
139
140struct st_prediction_err_data {
141 u64 distance;
142 u64 rel_err;
143} __attribute__((packed));
144
145struct st_migration_data {
146 u64 observed;
147 u64 estimated;
148} __attribute__((packed));
149
150struct migration_info {
151 u64 observed;
152 u64 estimated;
153 u8 distance;
154} __attribute__((packed));
82 155
83#define DATA(x) struct st_ ## x ## _data x; 156#define DATA(x) struct st_ ## x ## _data x;
84 157
85typedef enum { 158typedef enum {
86 ST_NAME = 1, /* Start at one, so that we can spot 159 ST_NAME = 1, /* Start at one, so that we can spot
87 * uninitialized records. */ 160 * uninitialized records. */
88 ST_PARAM, 161 ST_PARAM,
89 ST_RELEASE, 162 ST_RELEASE,
90 ST_ASSIGNED, 163 ST_ASSIGNED,
@@ -94,7 +167,19 @@ typedef enum {
94 ST_BLOCK, 167 ST_BLOCK,
95 ST_RESUME, 168 ST_RESUME,
96 ST_ACTION, 169 ST_ACTION,
97 ST_SYS_RELEASE 170 ST_SYS_RELEASE,
171 ST_TASKLET_RELEASE,
172 ST_TASKLET_BEGIN,
173 ST_TASKLET_END,
174 ST_WORK_RELEASE,
175 ST_WORK_BEGIN,
176 ST_WORK_END,
177 ST_EFF_PRIO_CHANGE,
178 ST_NV_INTERRUPT_BEGIN,
179 ST_NV_INTERRUPT_END,
180
181 ST_PREDICTION_ERR,
182 ST_MIGRATION,
98} st_event_record_type_t; 183} st_event_record_type_t;
99 184
100struct st_event_record { 185struct st_event_record {
@@ -113,8 +198,20 @@ struct st_event_record {
113 DATA(resume); 198 DATA(resume);
114 DATA(action); 199 DATA(action);
115 DATA(sys_release); 200 DATA(sys_release);
201 DATA(tasklet_release);
202 DATA(tasklet_begin);
203 DATA(tasklet_end);
204 DATA(work_release);
205 DATA(work_begin);
206 DATA(work_end);
207 DATA(effective_priority_change);
208 DATA(nv_interrupt_begin);
209 DATA(nv_interrupt_end);
210
211 DATA(prediction_err);
212 DATA(migration);
116 } data; 213 } data;
117}; 214} __attribute__((packed));
118 215
119#undef DATA 216#undef DATA
120 217
@@ -129,6 +226,8 @@ struct st_event_record {
129 ft_event1(id, callback, task) 226 ft_event1(id, callback, task)
130#define SCHED_TRACE2(id, callback, task, xtra) \ 227#define SCHED_TRACE2(id, callback, task, xtra) \
131 ft_event2(id, callback, task, xtra) 228 ft_event2(id, callback, task, xtra)
229#define SCHED_TRACE3(id, callback, task, xtra1, xtra2) \
230 ft_event3(id, callback, task, xtra1, xtra2)
132 231
133/* provide prototypes; needed on sparc64 */ 232/* provide prototypes; needed on sparc64 */
134#ifndef NO_TASK_TRACE_DECLS 233#ifndef NO_TASK_TRACE_DECLS
@@ -155,12 +254,58 @@ feather_callback void do_sched_trace_action(unsigned long id,
155feather_callback void do_sched_trace_sys_release(unsigned long id, 254feather_callback void do_sched_trace_sys_release(unsigned long id,
156 lt_t* start); 255 lt_t* start);
157 256
257
258feather_callback void do_sched_trace_tasklet_release(unsigned long id,
259 struct task_struct* owner);
260feather_callback void do_sched_trace_tasklet_begin(unsigned long id,
261 struct task_struct* owner);
262feather_callback void do_sched_trace_tasklet_end(unsigned long id,
263 struct task_struct* owner,
264 unsigned long flushed);
265
266feather_callback void do_sched_trace_work_release(unsigned long id,
267 struct task_struct* owner);
268feather_callback void do_sched_trace_work_begin(unsigned long id,
269 struct task_struct* owner,
270 struct task_struct* exe);
271feather_callback void do_sched_trace_work_end(unsigned long id,
272 struct task_struct* owner,
273 struct task_struct* exe,
274 unsigned long flushed);
275
276feather_callback void do_sched_trace_eff_prio_change(unsigned long id,
277 struct task_struct* task,
278 struct task_struct* inh);
279
280feather_callback void do_sched_trace_nv_interrupt_begin(unsigned long id,
281 u32 device);
282feather_callback void do_sched_trace_nv_interrupt_end(unsigned long id,
283 unsigned long unused);
284
285feather_callback void do_sched_trace_prediction_err(unsigned long id,
286 struct task_struct* task,
287 gpu_migration_dist_t* distance,
288 fp_t* rel_err);
289
290
291
292
293
294feather_callback void do_sched_trace_migration(unsigned long id,
295 struct task_struct* task,
296 struct migration_info* mig_info);
297
298
299/* returns true if we're tracing an interrupt on current CPU */
300/* int is_interrupt_tracing_active(void); */
301
158#endif 302#endif
159 303
160#else 304#else
161 305
162#define SCHED_TRACE(id, callback, task) /* no tracing */ 306#define SCHED_TRACE(id, callback, task) /* no tracing */
163#define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */ 307#define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */
308#define SCHED_TRACE3(id, callback, task, xtra1, xtra2)
164 309
165#endif 310#endif
166 311
@@ -252,6 +397,41 @@ feather_callback void do_sched_trace_sys_release(unsigned long id,
252 trace_litmus_sys_release(when); \ 397 trace_litmus_sys_release(when); \
253 } while (0) 398 } while (0)
254 399
400#define sched_trace_tasklet_release(t) \
401 SCHED_TRACE(SCHED_TRACE_BASE_ID + 11, do_sched_trace_tasklet_release, t)
402
403#define sched_trace_tasklet_begin(t) \
404 SCHED_TRACE(SCHED_TRACE_BASE_ID + 12, do_sched_trace_tasklet_begin, t)
405
406#define sched_trace_tasklet_end(t, flushed) \
407 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 13, do_sched_trace_tasklet_end, t, flushed)
408
409
410#define sched_trace_work_release(t) \
411 SCHED_TRACE(SCHED_TRACE_BASE_ID + 14, do_sched_trace_work_release, t)
412
413#define sched_trace_work_begin(t, e) \
414 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 15, do_sched_trace_work_begin, t, e)
415
416#define sched_trace_work_end(t, e, flushed) \
417 SCHED_TRACE3(SCHED_TRACE_BASE_ID + 16, do_sched_trace_work_end, t, e, flushed)
418
419
420#define sched_trace_eff_prio_change(t, inh) \
421 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 17, do_sched_trace_eff_prio_change, t, inh)
422
423
424#define sched_trace_nv_interrupt_begin(d) \
425 SCHED_TRACE(SCHED_TRACE_BASE_ID + 18, do_sched_trace_nv_interrupt_begin, d)
426#define sched_trace_nv_interrupt_end(d) \
427 SCHED_TRACE(SCHED_TRACE_BASE_ID + 19, do_sched_trace_nv_interrupt_end, d)
428
429#define sched_trace_prediction_err(t, dist, rel_err) \
430 SCHED_TRACE3(SCHED_TRACE_BASE_ID + 20, do_sched_trace_prediction_err, t, dist, rel_err)
431
432#define sched_trace_migration(t, mig_info) \
433 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 21, do_sched_trace_migration, t, mig_info)
434
255#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ 435#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */
256 436
257#endif /* __KERNEL__ */ 437#endif /* __KERNEL__ */
diff --git a/include/litmus/sched_trace_external.h b/include/litmus/sched_trace_external.h
new file mode 100644
index 000000000000..e70e45e4cf51
--- /dev/null
+++ b/include/litmus/sched_trace_external.h
@@ -0,0 +1,78 @@
1/*
2 * sched_trace.h -- record scheduler events to a byte stream for offline analysis.
3 */
4#ifndef _LINUX_SCHED_TRACE_EXTERNAL_H_
5#define _LINUX_SCHED_TRACE_EXTERNAL_H_
6
7
8#ifdef CONFIG_SCHED_TASK_TRACE
9extern void __sched_trace_tasklet_begin_external(struct task_struct* t);
10static inline void sched_trace_tasklet_begin_external(struct task_struct* t)
11{
12 __sched_trace_tasklet_begin_external(t);
13}
14
15extern void __sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed);
16static inline void sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed)
17{
18 __sched_trace_tasklet_end_external(t, flushed);
19}
20
21extern void __sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e);
22static inline void sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e)
23{
24 __sched_trace_work_begin_external(t, e);
25}
26
27extern void __sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f);
28static inline void sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f)
29{
30 __sched_trace_work_end_external(t, e, f);
31}
32
33#ifdef CONFIG_LITMUS_NVIDIA
34extern void __sched_trace_nv_interrupt_begin_external(u32 device);
35static inline void sched_trace_nv_interrupt_begin_external(u32 device)
36{
37 __sched_trace_nv_interrupt_begin_external(device);
38}
39
40extern void __sched_trace_nv_interrupt_end_external(u32 device);
41static inline void sched_trace_nv_interrupt_end_external(u32 device)
42{
43 __sched_trace_nv_interrupt_end_external(device);
44}
45#endif
46
47#else
48
49// no tracing.
50static inline void sched_trace_tasklet_begin_external(struct task_struct* t){}
51static inline void sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed){}
52static inline void sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e){}
53static inline void sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f){}
54
55#ifdef CONFIG_LITMUS_NVIDIA
56static inline void sched_trace_nv_interrupt_begin_external(u32 device){}
57static inline void sched_trace_nv_interrupt_end_external(u32 device){}
58#endif
59
60#endif
61
62
63#ifdef CONFIG_LITMUS_NVIDIA
64
65#define EX_TS(evt) \
66extern void __##evt(void); \
67static inline void EX_##evt(void) { __##evt(); }
68
69EX_TS(TS_NV_TOPISR_START)
70EX_TS(TS_NV_TOPISR_END)
71EX_TS(TS_NV_BOTISR_START)
72EX_TS(TS_NV_BOTISR_END)
73EX_TS(TS_NV_RELEASE_BOTISR_START)
74EX_TS(TS_NV_RELEASE_BOTISR_END)
75
76#endif
77
78#endif
diff --git a/include/litmus/signal.h b/include/litmus/signal.h
new file mode 100644
index 000000000000..38c3207951e0
--- /dev/null
+++ b/include/litmus/signal.h
@@ -0,0 +1,47 @@
1#ifndef LITMUS_SIGNAL_H
2#define LITMUS_SIGNAL_H
3
4#ifdef __KERNEL__
5#include <linux/signal.h>
6#else
7#include <signal.h>
8#endif
9
10/* Signals used by Litmus to asynchronously communicate events
11 * to real-time tasks.
12 *
13 * Signal values overlap with [SIGRTMIN, SIGRTMAX], so beware of
14 * application-level conflicts when dealing with COTS user-level
15 * code.
16 */
17
18/* Sent to a Litmus task when all of the following conditions are true:
19 * (1) The task has exhausted its budget.
20 * (2) budget_signal_policy is QUANTUM_SIGNALS or PRECISE_SIGNALS.
21 *
22 * Note: If a task does not have a registered handler for SIG_BUDGET,
23 * the signal will cause the task to terminate (default action).
24 */
25
26/* Assigned values start at SIGRTMAX and decrease, hopefully reducing
27 * likelihood of user-level conflicts.
28 */
29#define SIG_BUDGET (SIGRTMAX - 0)
30
31/*
32Future signals could include:
33
34#define SIG_DEADLINE_MISS (SIGRTMAX - 1)
35#define SIG_CRIT_LEVEL_CHANGE (SIGRTMAX - 2)
36*/
37
38#define SIGLITMUSMIN SIG_BUDGET
39
40#ifdef __KERNEL__
41#if (SIGLITMUSMIN < SIGRTMIN)
42/* no compile-time check in user-space since SIGRTMIN may be a variable. */
43#error "Too many LITMUS^RT signals!"
44#endif
45#endif
46
47#endif
diff --git a/include/litmus/trace.h b/include/litmus/trace.h
index 8ad4966c602e..15bd645d2466 100644
--- a/include/litmus/trace.h
+++ b/include/litmus/trace.h
@@ -137,9 +137,34 @@ feather_callback void save_timestamp_hide_irq(unsigned long event);
137#define TS_EXIT_NP_START TIMESTAMP(150) 137#define TS_EXIT_NP_START TIMESTAMP(150)
138#define TS_EXIT_NP_END TIMESTAMP(151) 138#define TS_EXIT_NP_END TIMESTAMP(151)
139 139
140#ifdef CONFIG_LITMUS_DGL_SUPPORT
141#define TS_DGL_LOCK_START TIMESTAMP(175)
142#define TS_DGL_LOCK_SUSPEND TIMESTAMP(176)
143#define TS_DGL_LOCK_RESUME TIMESTAMP(177)
144#define TS_DGL_LOCK_END TIMESTAMP(178)
145#define TS_DGL_UNLOCK_START TIMESTAMP(185)
146#define TS_DGL_UNLOCK_END TIMESTAMP(186)
147#endif
148
140#define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c) 149#define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c)
141#define TS_SEND_RESCHED_END TIMESTAMP_IN_IRQ(191) 150#define TS_SEND_RESCHED_END TIMESTAMP_IN_IRQ(191)
142 151
152#ifdef CONFIG_LITMUS_NVIDIA
153#define TS_NV_TOPISR_START TIMESTAMP(200)
154#define TS_NV_TOPISR_END TIMESTAMP(201)
155
156#define TS_NV_BOTISR_START TIMESTAMP(202)
157#define TS_NV_BOTISR_END TIMESTAMP(203)
158
159#define TS_NV_RELEASE_BOTISR_START TIMESTAMP(204)
160#define TS_NV_RELEASE_BOTISR_END TIMESTAMP(205)
161#endif
162
163#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
164#define TS_NV_SCHED_BOTISR_START TIMESTAMP(206)
165#define TS_NV_SCHED_BOTISR_END TIMESTAMP(207)
166#endif
167
143#define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when)) 168#define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when))
144 169
145#endif /* !_SYS_TRACE_H_ */ 170#endif /* !_SYS_TRACE_H_ */
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h
index 94264c27d9ac..d1fe84a5d574 100644
--- a/include/litmus/unistd_32.h
+++ b/include/litmus/unistd_32.h
@@ -17,5 +17,11 @@
17#define __NR_wait_for_ts_release __LSC(9) 17#define __NR_wait_for_ts_release __LSC(9)
18#define __NR_release_ts __LSC(10) 18#define __NR_release_ts __LSC(10)
19#define __NR_null_call __LSC(11) 19#define __NR_null_call __LSC(11)
20#define __NR_litmus_dgl_lock __LSC(12)
21#define __NR_litmus_dgl_unlock __LSC(13)
20 22
21#define NR_litmus_syscalls 12 23#define __NR_set_aux_tasks __LSC(14)
24
25#define __NR_sched_trace_event __LSC(15)
26
27#define NR_litmus_syscalls 16
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h
index d5ced0d2642c..75f9fcb897f5 100644
--- a/include/litmus/unistd_64.h
+++ b/include/litmus/unistd_64.h
@@ -29,5 +29,15 @@ __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release)
29__SYSCALL(__NR_release_ts, sys_release_ts) 29__SYSCALL(__NR_release_ts, sys_release_ts)
30#define __NR_null_call __LSC(11) 30#define __NR_null_call __LSC(11)
31__SYSCALL(__NR_null_call, sys_null_call) 31__SYSCALL(__NR_null_call, sys_null_call)
32#define __NR_litmus_dgl_lock __LSC(12)
33__SYSCALL(__NR_litmus_dgl_lock, sys_litmus_dgl_lock)
34#define __NR_litmus_dgl_unlock __LSC(13)
35__SYSCALL(__NR_litmus_dgl_unlock, sys_litmus_dgl_unlock)
32 36
33#define NR_litmus_syscalls 12 37#define __NR_set_aux_tasks __LSC(14)
38__SYSCALL(__NR_set_aux_tasks, sys_set_aux_tasks)
39
40#define __NR_sched_trace_event __LSC(15)
41__SYSCALL(__NR_sched_trace_event, sys_sched_trace_event)
42
43#define NR_litmus_syscalls 16