diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-05-26 17:29:58 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-05-26 17:29:58 -0400 |
commit | a463f9a9e04385f0729f7435a0a6dff7d89b25de (patch) | |
tree | 00ff42c305926c800e18b13df8440a4de1a1a041 /include/litmus | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
GPUSync patch for Litmus 2012.1.
Diffstat (limited to 'include/litmus')
-rw-r--r-- | include/litmus/binheap.h | 207 | ||||
-rw-r--r-- | include/litmus/edf_common.h | 12 | ||||
-rw-r--r-- | include/litmus/fdso.h | 14 | ||||
-rw-r--r-- | include/litmus/fpmath.h | 145 | ||||
-rw-r--r-- | include/litmus/gpu_affinity.h | 49 | ||||
-rw-r--r-- | include/litmus/ikglp_lock.h | 160 | ||||
-rw-r--r-- | include/litmus/kexclu_affinity.h | 35 | ||||
-rw-r--r-- | include/litmus/kfmlp_lock.h | 97 | ||||
-rw-r--r-- | include/litmus/litmus.h | 9 | ||||
-rw-r--r-- | include/litmus/litmus_softirq.h | 199 | ||||
-rw-r--r-- | include/litmus/locking.h | 142 | ||||
-rw-r--r-- | include/litmus/nvidia_info.h | 46 | ||||
-rw-r--r-- | include/litmus/preempt.h | 2 | ||||
-rw-r--r-- | include/litmus/rsm_lock.h | 54 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 100 | ||||
-rw-r--r-- | include/litmus/sched_plugin.h | 76 | ||||
-rw-r--r-- | include/litmus/sched_trace.h | 218 | ||||
-rw-r--r-- | include/litmus/sched_trace_external.h | 78 | ||||
-rw-r--r-- | include/litmus/trace.h | 34 | ||||
-rw-r--r-- | include/litmus/unistd_32.h | 5 | ||||
-rw-r--r-- | include/litmus/unistd_64.h | 9 |
21 files changed, 1658 insertions, 33 deletions
diff --git a/include/litmus/binheap.h b/include/litmus/binheap.h new file mode 100644 index 000000000000..9e966e3886cb --- /dev/null +++ b/include/litmus/binheap.h | |||
@@ -0,0 +1,207 @@ | |||
1 | #ifndef LITMUS_BINARY_HEAP_H | ||
2 | #define LITMUS_BINARY_HEAP_H | ||
3 | |||
4 | #include <linux/kernel.h> | ||
5 | |||
6 | /** | ||
7 | * Simple binary heap with add, arbitrary delete, delete_root, and top | ||
8 | * operations. | ||
9 | * | ||
10 | * Style meant to conform with list.h. | ||
11 | * | ||
12 | * Motivation: Linux's prio_heap.h is of fixed size. Litmus's binomial | ||
13 | * heap may be overkill (and perhaps not general enough) for some applications. | ||
14 | * | ||
15 | * Note: In order to make node swaps fast, a node inserted with a data pointer | ||
16 | * may not always hold said data pointer. This is similar to the binomial heap | ||
17 | * implementation. This does make node deletion tricky since we have to | ||
18 | * (1) locate the node that holds the data pointer to delete, and (2) the | ||
19 | * node that was originally inserted with said data pointer. These have to be | ||
20 | * coalesced into a single node before removal (see usage of | ||
21 | * __binheap_safe_swap()). We have to track node references to accomplish this. | ||
22 | */ | ||
23 | |||
24 | struct binheap_node { | ||
25 | void *data; | ||
26 | struct binheap_node *parent; | ||
27 | struct binheap_node *left; | ||
28 | struct binheap_node *right; | ||
29 | |||
30 | /* pointer to binheap_node that holds *data for which this binheap_node | ||
31 | * was originally inserted. (*data "owns" this node) | ||
32 | */ | ||
33 | struct binheap_node *ref; | ||
34 | struct binheap_node **ref_ptr; | ||
35 | }; | ||
36 | |||
37 | /** | ||
38 | * Signature of compator function. Assumed 'less-than' (min-heap). | ||
39 | * Pass in 'greater-than' for max-heap. | ||
40 | * | ||
41 | * TODO: Consider macro-based implementation that allows comparator to be | ||
42 | * inlined (similar to Linux red/black tree) for greater efficiency. | ||
43 | */ | ||
44 | typedef int (*binheap_order_t)(struct binheap_node *a, | ||
45 | struct binheap_node *b); | ||
46 | |||
47 | |||
48 | struct binheap_handle { | ||
49 | struct binheap_node *root; | ||
50 | |||
51 | /* pointer to node to take next inserted child */ | ||
52 | struct binheap_node *next; | ||
53 | |||
54 | /* pointer to last node in complete binary tree */ | ||
55 | struct binheap_node *last; | ||
56 | |||
57 | /* comparator function pointer */ | ||
58 | binheap_order_t compare; | ||
59 | }; | ||
60 | |||
61 | |||
62 | #define BINHEAP_POISON ((void*)(0xdeadbeef)) | ||
63 | |||
64 | |||
65 | /** | ||
66 | * binheap_entry - get the struct for this heap node. | ||
67 | * Only valid when called upon heap nodes other than the root handle. | ||
68 | * @ptr: the heap node. | ||
69 | * @type: the type of struct pointed to by binheap_node::data. | ||
70 | * @member: unused. | ||
71 | */ | ||
72 | #define binheap_entry(ptr, type, member) \ | ||
73 | ((type *)((ptr)->data)) | ||
74 | |||
75 | /** | ||
76 | * binheap_node_container - get the struct that contains this node. | ||
77 | * Only valid when called upon heap nodes other than the root handle. | ||
78 | * @ptr: the heap node. | ||
79 | * @type: the type of struct the node is embedded in. | ||
80 | * @member: the name of the binheap_struct within the (type) struct. | ||
81 | */ | ||
82 | #define binheap_node_container(ptr, type, member) \ | ||
83 | container_of((ptr), type, member) | ||
84 | |||
85 | /** | ||
86 | * binheap_top_entry - get the struct for the node at the top of the heap. | ||
87 | * Only valid when called upon the heap handle node. | ||
88 | * @ptr: the special heap-handle node. | ||
89 | * @type: the type of the struct the head is embedded in. | ||
90 | * @member: the name of the binheap_struct within the (type) struct. | ||
91 | */ | ||
92 | #define binheap_top_entry(ptr, type, member) \ | ||
93 | binheap_entry((ptr)->root, type, member) | ||
94 | |||
95 | /** | ||
96 | * binheap_delete_root - remove the root element from the heap. | ||
97 | * @handle: handle to the heap. | ||
98 | * @type: the type of the struct the head is embedded in. | ||
99 | * @member: the name of the binheap_struct within the (type) struct. | ||
100 | */ | ||
101 | #define binheap_delete_root(handle, type, member) \ | ||
102 | __binheap_delete_root((handle), &((type *)((handle)->root->data))->member) | ||
103 | |||
104 | /** | ||
105 | * binheap_delete - remove an arbitrary element from the heap. | ||
106 | * @to_delete: pointer to node to be removed. | ||
107 | * @handle: handle to the heap. | ||
108 | */ | ||
109 | #define binheap_delete(to_delete, handle) \ | ||
110 | __binheap_delete((to_delete), (handle)) | ||
111 | |||
112 | /** | ||
113 | * binheap_add - insert an element to the heap | ||
114 | * new_node: node to add. | ||
115 | * @handle: handle to the heap. | ||
116 | * @type: the type of the struct the head is embedded in. | ||
117 | * @member: the name of the binheap_struct within the (type) struct. | ||
118 | */ | ||
119 | #define binheap_add(new_node, handle, type, member) \ | ||
120 | __binheap_add((new_node), (handle), container_of((new_node), type, member)) | ||
121 | |||
122 | /** | ||
123 | * binheap_decrease - re-eval the position of a node (based upon its | ||
124 | * original data pointer). | ||
125 | * @handle: handle to the heap. | ||
126 | * @orig_node: node that was associated with the data pointer | ||
127 | * (whose value has changed) when said pointer was | ||
128 | * added to the heap. | ||
129 | */ | ||
130 | #define binheap_decrease(orig_node, handle) \ | ||
131 | __binheap_decrease((orig_node), (handle)) | ||
132 | |||
133 | #define BINHEAP_NODE_INIT() { NULL, BINHEAP_POISON, NULL, NULL , NULL, NULL} | ||
134 | |||
135 | #define BINHEAP_NODE(name) \ | ||
136 | struct binheap_node name = BINHEAP_NODE_INIT() | ||
137 | |||
138 | |||
139 | static inline void INIT_BINHEAP_NODE(struct binheap_node *n) | ||
140 | { | ||
141 | n->data = NULL; | ||
142 | n->parent = BINHEAP_POISON; | ||
143 | n->left = NULL; | ||
144 | n->right = NULL; | ||
145 | n->ref = NULL; | ||
146 | n->ref_ptr = NULL; | ||
147 | } | ||
148 | |||
149 | static inline void INIT_BINHEAP_HANDLE( | ||
150 | struct binheap_handle *handle, | ||
151 | binheap_order_t compare) | ||
152 | { | ||
153 | handle->root = NULL; | ||
154 | handle->next = NULL; | ||
155 | handle->last = NULL; | ||
156 | handle->compare = compare; | ||
157 | } | ||
158 | |||
159 | /* Returns true (1) if binheap is empty. */ | ||
160 | static inline int binheap_empty(struct binheap_handle *handle) | ||
161 | { | ||
162 | return(handle->root == NULL); | ||
163 | } | ||
164 | |||
165 | /* Returns true (1) if binheap node is in a heap. */ | ||
166 | static inline int binheap_is_in_heap(struct binheap_node *node) | ||
167 | { | ||
168 | return (node->parent != BINHEAP_POISON); | ||
169 | } | ||
170 | |||
171 | |||
172 | int binheap_is_in_this_heap(struct binheap_node *node, struct binheap_handle* heap); | ||
173 | |||
174 | |||
175 | |||
176 | void __binheap_add(struct binheap_node *new_node, | ||
177 | struct binheap_handle *handle, | ||
178 | void *data); | ||
179 | |||
180 | |||
181 | /** | ||
182 | * Removes the root node from the heap. The node is removed after coalescing | ||
183 | * the binheap_node with its original data pointer at the root of the tree. | ||
184 | * | ||
185 | * The 'last' node in the tree is then swapped up to the root and bubbled | ||
186 | * down. | ||
187 | */ | ||
188 | void __binheap_delete_root(struct binheap_handle *handle, | ||
189 | struct binheap_node *container); | ||
190 | |||
191 | /** | ||
192 | * Delete an arbitrary node. Bubble node to delete up to the root, | ||
193 | * and then delete to root. | ||
194 | */ | ||
195 | void __binheap_delete( | ||
196 | struct binheap_node *node_to_delete, | ||
197 | struct binheap_handle *handle); | ||
198 | |||
199 | /** | ||
200 | * Bubble up a node whose pointer has decreased in value. | ||
201 | */ | ||
202 | void __binheap_decrease(struct binheap_node *orig_node, | ||
203 | struct binheap_handle *handle); | ||
204 | |||
205 | |||
206 | #endif | ||
207 | |||
diff --git a/include/litmus/edf_common.h b/include/litmus/edf_common.h index bbaf22ea7f12..63dff7efe8fb 100644 --- a/include/litmus/edf_common.h +++ b/include/litmus/edf_common.h | |||
@@ -20,6 +20,18 @@ int edf_higher_prio(struct task_struct* first, | |||
20 | 20 | ||
21 | int edf_ready_order(struct bheap_node* a, struct bheap_node* b); | 21 | int edf_ready_order(struct bheap_node* a, struct bheap_node* b); |
22 | 22 | ||
23 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
24 | /* binheap_nodes must be embedded within 'struct litmus_lock' */ | ||
25 | int edf_max_heap_order(struct binheap_node *a, struct binheap_node *b); | ||
26 | int edf_min_heap_order(struct binheap_node *a, struct binheap_node *b); | ||
27 | int edf_max_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b); | ||
28 | int edf_min_heap_base_priority_order(struct binheap_node *a, struct binheap_node *b); | ||
29 | |||
30 | int __edf_higher_prio(struct task_struct* first, comparison_mode_t first_mode, | ||
31 | struct task_struct* second, comparison_mode_t second_mode); | ||
32 | |||
33 | #endif | ||
34 | |||
23 | int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); | 35 | int edf_preemption_needed(rt_domain_t* rt, struct task_struct *t); |
24 | 36 | ||
25 | #endif | 37 | #endif |
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h index caf2a1e6918c..1f5d3bd1a1db 100644 --- a/include/litmus/fdso.h +++ b/include/litmus/fdso.h | |||
@@ -20,7 +20,16 @@ typedef enum { | |||
20 | FMLP_SEM = 0, | 20 | FMLP_SEM = 0, |
21 | SRP_SEM = 1, | 21 | SRP_SEM = 1, |
22 | 22 | ||
23 | MAX_OBJ_TYPE = 1 | 23 | RSM_MUTEX = 2, |
24 | IKGLP_SEM = 3, | ||
25 | KFMLP_SEM = 4, | ||
26 | |||
27 | IKGLP_SIMPLE_GPU_AFF_OBS = 5, | ||
28 | IKGLP_GPU_AFF_OBS = 6, | ||
29 | KFMLP_SIMPLE_GPU_AFF_OBS = 7, | ||
30 | KFMLP_GPU_AFF_OBS = 8, | ||
31 | |||
32 | MAX_OBJ_TYPE = 8 | ||
24 | } obj_type_t; | 33 | } obj_type_t; |
25 | 34 | ||
26 | struct inode_obj_id { | 35 | struct inode_obj_id { |
@@ -64,8 +73,11 @@ static inline void* od_lookup(int od, obj_type_t type) | |||
64 | } | 73 | } |
65 | 74 | ||
66 | #define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) | 75 | #define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) |
76 | #define lookup_kfmlp_sem(od)((struct pi_semaphore*) od_lookup(od, KFMLP_SEM)) | ||
67 | #define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) | 77 | #define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) |
68 | #define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) | 78 | #define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) |
69 | 79 | ||
80 | #define lookup_rsm_mutex(od)((struct litmus_lock*) od_lookup(od, FMLP_SEM)) | ||
81 | |||
70 | 82 | ||
71 | #endif | 83 | #endif |
diff --git a/include/litmus/fpmath.h b/include/litmus/fpmath.h new file mode 100644 index 000000000000..04d4bcaeae96 --- /dev/null +++ b/include/litmus/fpmath.h | |||
@@ -0,0 +1,145 @@ | |||
1 | #ifndef __FP_MATH_H__ | ||
2 | #define __FP_MATH_H__ | ||
3 | |||
4 | #ifndef __KERNEL__ | ||
5 | #include <stdint.h> | ||
6 | #define abs(x) (((x) < 0) ? -(x) : x) | ||
7 | #endif | ||
8 | |||
9 | // Use 64-bit because we want to track things at the nanosecond scale. | ||
10 | // This can lead to very large numbers. | ||
11 | typedef int64_t fpbuf_t; | ||
12 | typedef struct | ||
13 | { | ||
14 | fpbuf_t val; | ||
15 | } fp_t; | ||
16 | |||
17 | #define FP_SHIFT 10 | ||
18 | #define ROUND_BIT (FP_SHIFT - 1) | ||
19 | |||
20 | #define _fp(x) ((fp_t) {x}) | ||
21 | |||
22 | #ifdef __KERNEL__ | ||
23 | static const fp_t LITMUS_FP_ZERO = {.val = 0}; | ||
24 | static const fp_t LITMUS_FP_ONE = {.val = (1 << FP_SHIFT)}; | ||
25 | #endif | ||
26 | |||
27 | static inline fp_t FP(fpbuf_t x) | ||
28 | { | ||
29 | return _fp(((fpbuf_t) x) << FP_SHIFT); | ||
30 | } | ||
31 | |||
32 | /* divide two integers to obtain a fixed point value */ | ||
33 | static inline fp_t _frac(fpbuf_t a, fpbuf_t b) | ||
34 | { | ||
35 | return _fp(FP(a).val / (b)); | ||
36 | } | ||
37 | |||
38 | static inline fpbuf_t _point(fp_t x) | ||
39 | { | ||
40 | return (x.val % (1 << FP_SHIFT)); | ||
41 | |||
42 | } | ||
43 | |||
44 | #define fp2str(x) x.val | ||
45 | /*(x.val >> FP_SHIFT), (x.val % (1 << FP_SHIFT)) */ | ||
46 | #define _FP_ "%ld/1024" | ||
47 | |||
48 | static inline fpbuf_t _floor(fp_t x) | ||
49 | { | ||
50 | return x.val >> FP_SHIFT; | ||
51 | } | ||
52 | |||
53 | /* FIXME: negative rounding */ | ||
54 | static inline fpbuf_t _round(fp_t x) | ||
55 | { | ||
56 | return _floor(x) + ((x.val >> ROUND_BIT) & 1); | ||
57 | } | ||
58 | |||
59 | /* multiply two fixed point values */ | ||
60 | static inline fp_t _mul(fp_t a, fp_t b) | ||
61 | { | ||
62 | return _fp((a.val * b.val) >> FP_SHIFT); | ||
63 | } | ||
64 | |||
65 | static inline fp_t _div(fp_t a, fp_t b) | ||
66 | { | ||
67 | #if !defined(__KERNEL__) && !defined(unlikely) | ||
68 | #define unlikely(x) (x) | ||
69 | #define DO_UNDEF_UNLIKELY | ||
70 | #endif | ||
71 | /* try not to overflow */ | ||
72 | if (unlikely( a.val > (2l << ((sizeof(fpbuf_t)*8) - FP_SHIFT)) )) | ||
73 | return _fp((a.val / b.val) << FP_SHIFT); | ||
74 | else | ||
75 | return _fp((a.val << FP_SHIFT) / b.val); | ||
76 | #ifdef DO_UNDEF_UNLIKELY | ||
77 | #undef unlikely | ||
78 | #undef DO_UNDEF_UNLIKELY | ||
79 | #endif | ||
80 | } | ||
81 | |||
82 | static inline fp_t _add(fp_t a, fp_t b) | ||
83 | { | ||
84 | return _fp(a.val + b.val); | ||
85 | } | ||
86 | |||
87 | static inline fp_t _sub(fp_t a, fp_t b) | ||
88 | { | ||
89 | return _fp(a.val - b.val); | ||
90 | } | ||
91 | |||
92 | static inline fp_t _neg(fp_t x) | ||
93 | { | ||
94 | return _fp(-x.val); | ||
95 | } | ||
96 | |||
97 | static inline fp_t _abs(fp_t x) | ||
98 | { | ||
99 | return _fp(abs(x.val)); | ||
100 | } | ||
101 | |||
102 | /* works the same as casting float/double to integer */ | ||
103 | static inline fpbuf_t _fp_to_integer(fp_t x) | ||
104 | { | ||
105 | return _floor(_abs(x)) * ((x.val > 0) ? 1 : -1); | ||
106 | } | ||
107 | |||
108 | static inline fp_t _integer_to_fp(fpbuf_t x) | ||
109 | { | ||
110 | return _frac(x,1); | ||
111 | } | ||
112 | |||
113 | static inline int _leq(fp_t a, fp_t b) | ||
114 | { | ||
115 | return a.val <= b.val; | ||
116 | } | ||
117 | |||
118 | static inline int _geq(fp_t a, fp_t b) | ||
119 | { | ||
120 | return a.val >= b.val; | ||
121 | } | ||
122 | |||
123 | static inline int _lt(fp_t a, fp_t b) | ||
124 | { | ||
125 | return a.val < b.val; | ||
126 | } | ||
127 | |||
128 | static inline int _gt(fp_t a, fp_t b) | ||
129 | { | ||
130 | return a.val > b.val; | ||
131 | } | ||
132 | |||
133 | static inline int _eq(fp_t a, fp_t b) | ||
134 | { | ||
135 | return a.val == b.val; | ||
136 | } | ||
137 | |||
138 | static inline fp_t _max(fp_t a, fp_t b) | ||
139 | { | ||
140 | if (a.val < b.val) | ||
141 | return b; | ||
142 | else | ||
143 | return a; | ||
144 | } | ||
145 | #endif | ||
diff --git a/include/litmus/gpu_affinity.h b/include/litmus/gpu_affinity.h new file mode 100644 index 000000000000..6b3fb8b28745 --- /dev/null +++ b/include/litmus/gpu_affinity.h | |||
@@ -0,0 +1,49 @@ | |||
1 | #ifndef LITMUS_GPU_AFFINITY_H | ||
2 | #define LITMUS_GPU_AFFINITY_H | ||
3 | |||
4 | #include <litmus/rt_param.h> | ||
5 | #include <litmus/sched_plugin.h> | ||
6 | #include <litmus/litmus.h> | ||
7 | |||
8 | void update_gpu_estimate(struct task_struct* t, lt_t observed); | ||
9 | gpu_migration_dist_t gpu_migration_distance(int a, int b); | ||
10 | |||
11 | static inline void reset_gpu_tracker(struct task_struct* t) | ||
12 | { | ||
13 | t->rt_param.accum_gpu_time = 0; | ||
14 | } | ||
15 | |||
16 | static inline void start_gpu_tracker(struct task_struct* t) | ||
17 | { | ||
18 | t->rt_param.gpu_time_stamp = litmus_clock(); | ||
19 | } | ||
20 | |||
21 | static inline void stop_gpu_tracker(struct task_struct* t) | ||
22 | { | ||
23 | lt_t now = litmus_clock(); | ||
24 | t->rt_param.accum_gpu_time += (now - t->rt_param.gpu_time_stamp); | ||
25 | } | ||
26 | |||
27 | static inline lt_t get_gpu_time(struct task_struct* t) | ||
28 | { | ||
29 | return t->rt_param.accum_gpu_time; | ||
30 | } | ||
31 | |||
32 | static inline lt_t get_gpu_estimate(struct task_struct* t, gpu_migration_dist_t dist) | ||
33 | { | ||
34 | int i; | ||
35 | fpbuf_t temp = _fp_to_integer(t->rt_param.gpu_migration_est[dist].est); | ||
36 | lt_t val = (temp >= 0) ? temp : 0; // never allow negative estimates... | ||
37 | |||
38 | WARN_ON(temp < 0); | ||
39 | |||
40 | // lower-bound a distant migration to be at least equal to the level | ||
41 | // below it. | ||
42 | for(i = dist-1; (val == 0) && (i >= MIG_LOCAL); --i) { | ||
43 | val = _fp_to_integer(t->rt_param.gpu_migration_est[i].est); | ||
44 | } | ||
45 | |||
46 | return ((val > 0) ? val : dist+1); | ||
47 | } | ||
48 | |||
49 | #endif | ||
diff --git a/include/litmus/ikglp_lock.h b/include/litmus/ikglp_lock.h new file mode 100644 index 000000000000..af6f15178cb1 --- /dev/null +++ b/include/litmus/ikglp_lock.h | |||
@@ -0,0 +1,160 @@ | |||
1 | #ifndef LITMUS_IKGLP_H | ||
2 | #define LITMUS_IKGLP_H | ||
3 | |||
4 | #include <litmus/litmus.h> | ||
5 | #include <litmus/binheap.h> | ||
6 | #include <litmus/locking.h> | ||
7 | |||
8 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
9 | #include <litmus/kexclu_affinity.h> | ||
10 | |||
11 | struct ikglp_affinity; | ||
12 | #endif | ||
13 | |||
14 | typedef struct ikglp_heap_node | ||
15 | { | ||
16 | struct task_struct *task; | ||
17 | struct binheap_node node; | ||
18 | } ikglp_heap_node_t; | ||
19 | |||
20 | struct fifo_queue; | ||
21 | struct ikglp_wait_state; | ||
22 | |||
23 | typedef struct ikglp_donee_heap_node | ||
24 | { | ||
25 | struct task_struct *task; | ||
26 | struct fifo_queue *fq; | ||
27 | struct ikglp_wait_state *donor_info; // cross-linked with ikglp_wait_state_t of donor | ||
28 | |||
29 | struct binheap_node node; | ||
30 | } ikglp_donee_heap_node_t; | ||
31 | |||
32 | // Maintains the state of a request as it goes through the IKGLP | ||
33 | typedef struct ikglp_wait_state { | ||
34 | struct task_struct *task; // pointer back to the requesting task | ||
35 | |||
36 | // Data for while waiting in FIFO Queue | ||
37 | wait_queue_t fq_node; | ||
38 | ikglp_heap_node_t global_heap_node; | ||
39 | ikglp_donee_heap_node_t donee_heap_node; | ||
40 | |||
41 | // Data for while waiting in PQ | ||
42 | ikglp_heap_node_t pq_node; | ||
43 | |||
44 | // Data for while waiting as a donor | ||
45 | ikglp_donee_heap_node_t *donee_info; // cross-linked with donee's ikglp_donee_heap_node_t | ||
46 | struct nested_info prio_donation; | ||
47 | struct binheap_node node; | ||
48 | } ikglp_wait_state_t; | ||
49 | |||
50 | /* struct for semaphore with priority inheritance */ | ||
51 | struct fifo_queue | ||
52 | { | ||
53 | wait_queue_head_t wait; | ||
54 | struct task_struct* owner; | ||
55 | |||
56 | // used for bookkeepping | ||
57 | ikglp_heap_node_t global_heap_node; | ||
58 | ikglp_donee_heap_node_t donee_heap_node; | ||
59 | |||
60 | struct task_struct* hp_waiter; | ||
61 | int count; /* number of waiters + holder */ | ||
62 | |||
63 | struct nested_info nest; | ||
64 | }; | ||
65 | |||
66 | struct ikglp_semaphore | ||
67 | { | ||
68 | struct litmus_lock litmus_lock; | ||
69 | |||
70 | raw_spinlock_t lock; | ||
71 | raw_spinlock_t real_lock; | ||
72 | |||
73 | int nr_replicas; // AKA k | ||
74 | int m; | ||
75 | |||
76 | int max_fifo_len; // max len of a fifo queue | ||
77 | int nr_in_fifos; | ||
78 | |||
79 | struct binheap_handle top_m; // min heap, base prio | ||
80 | int top_m_size; // number of nodes in top_m | ||
81 | |||
82 | struct binheap_handle not_top_m; // max heap, base prio | ||
83 | |||
84 | struct binheap_handle donees; // min-heap, base prio | ||
85 | struct fifo_queue *shortest_fifo_queue; // pointer to shortest fifo queue | ||
86 | |||
87 | /* data structures for holding requests */ | ||
88 | struct fifo_queue *fifo_queues; // array nr_replicas in length | ||
89 | struct binheap_handle priority_queue; // max-heap, base prio | ||
90 | struct binheap_handle donors; // max-heap, base prio | ||
91 | |||
92 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
93 | struct ikglp_affinity *aff_obs; | ||
94 | #endif | ||
95 | }; | ||
96 | |||
97 | static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock) | ||
98 | { | ||
99 | return container_of(lock, struct ikglp_semaphore, litmus_lock); | ||
100 | } | ||
101 | |||
102 | int ikglp_lock(struct litmus_lock* l); | ||
103 | int ikglp_unlock(struct litmus_lock* l); | ||
104 | int ikglp_close(struct litmus_lock* l); | ||
105 | void ikglp_free(struct litmus_lock* l); | ||
106 | struct litmus_lock* ikglp_new(int m, struct litmus_lock_ops*, void* __user arg); | ||
107 | |||
108 | |||
109 | |||
110 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | ||
111 | |||
112 | struct ikglp_queue_info | ||
113 | { | ||
114 | struct fifo_queue* q; | ||
115 | lt_t estimated_len; | ||
116 | int *nr_cur_users; | ||
117 | }; | ||
118 | |||
119 | struct ikglp_affinity_ops | ||
120 | { | ||
121 | struct fifo_queue* (*advise_enqueue)(struct ikglp_affinity* aff, struct task_struct* t); // select FIFO | ||
122 | ikglp_wait_state_t* (*advise_steal)(struct ikglp_affinity* aff, struct fifo_queue* dst); // select steal from FIFO | ||
123 | ikglp_donee_heap_node_t* (*advise_donee_selection)(struct ikglp_affinity* aff, struct task_struct* t); // select a donee | ||
124 | ikglp_wait_state_t* (*advise_donor_to_fq)(struct ikglp_affinity* aff, struct fifo_queue* dst); // select a donor to move to PQ | ||
125 | |||
126 | void (*notify_enqueue)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // fifo enqueue | ||
127 | void (*notify_dequeue)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // fifo dequeue | ||
128 | void (*notify_acquired)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // replica acquired | ||
129 | void (*notify_freed)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // replica freed | ||
130 | int (*replica_to_resource)(struct ikglp_affinity* aff, struct fifo_queue* fq); // convert a replica # to a GPU (includes offsets and simult user folding) | ||
131 | }; | ||
132 | |||
133 | struct ikglp_affinity | ||
134 | { | ||
135 | struct affinity_observer obs; | ||
136 | struct ikglp_affinity_ops *ops; | ||
137 | struct ikglp_queue_info *q_info; | ||
138 | int *nr_cur_users_on_rsrc; | ||
139 | int offset; | ||
140 | int nr_simult; | ||
141 | int nr_rsrc; | ||
142 | int relax_max_fifo_len; | ||
143 | }; | ||
144 | |||
145 | static inline struct ikglp_affinity* ikglp_aff_obs_from_aff_obs(struct affinity_observer* aff_obs) | ||
146 | { | ||
147 | return container_of(aff_obs, struct ikglp_affinity, obs); | ||
148 | } | ||
149 | |||
150 | int ikglp_aff_obs_close(struct affinity_observer*); | ||
151 | void ikglp_aff_obs_free(struct affinity_observer*); | ||
152 | struct affinity_observer* ikglp_gpu_aff_obs_new(struct affinity_observer_ops*, | ||
153 | void* __user arg); | ||
154 | struct affinity_observer* ikglp_simple_gpu_aff_obs_new(struct affinity_observer_ops*, | ||
155 | void* __user arg); | ||
156 | #endif | ||
157 | |||
158 | |||
159 | |||
160 | #endif | ||
diff --git a/include/litmus/kexclu_affinity.h b/include/litmus/kexclu_affinity.h new file mode 100644 index 000000000000..f6355de49074 --- /dev/null +++ b/include/litmus/kexclu_affinity.h | |||
@@ -0,0 +1,35 @@ | |||
1 | #ifndef LITMUS_AFF_OBS_H | ||
2 | #define LITMUS_AFF_OBS_H | ||
3 | |||
4 | #include <litmus/locking.h> | ||
5 | |||
6 | struct affinity_observer_ops; | ||
7 | |||
8 | struct affinity_observer | ||
9 | { | ||
10 | struct affinity_observer_ops* ops; | ||
11 | int type; | ||
12 | int ident; | ||
13 | |||
14 | struct litmus_lock* lock; // the lock under observation | ||
15 | }; | ||
16 | |||
17 | typedef int (*aff_obs_open_t)(struct affinity_observer* aff_obs, | ||
18 | void* __user arg); | ||
19 | typedef int (*aff_obs_close_t)(struct affinity_observer* aff_obs); | ||
20 | typedef void (*aff_obs_free_t)(struct affinity_observer* aff_obs); | ||
21 | |||
22 | struct affinity_observer_ops | ||
23 | { | ||
24 | aff_obs_open_t open; | ||
25 | aff_obs_close_t close; | ||
26 | aff_obs_free_t deallocate; | ||
27 | }; | ||
28 | |||
29 | struct litmus_lock* get_lock_from_od(int od); | ||
30 | |||
31 | void affinity_observer_new(struct affinity_observer* aff, | ||
32 | struct affinity_observer_ops* ops, | ||
33 | struct affinity_observer_args* args); | ||
34 | |||
35 | #endif | ||
diff --git a/include/litmus/kfmlp_lock.h b/include/litmus/kfmlp_lock.h new file mode 100644 index 000000000000..5f0aae6e6f42 --- /dev/null +++ b/include/litmus/kfmlp_lock.h | |||
@@ -0,0 +1,97 @@ | |||
1 | #ifndef LITMUS_KFMLP_H | ||
2 | #define LITMUS_KFMLP_H | ||
3 | |||
4 | #include <litmus/litmus.h> | ||
5 | #include <litmus/locking.h> | ||
6 | |||
7 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
8 | #include <litmus/kexclu_affinity.h> | ||
9 | |||
10 | struct kfmlp_affinity; | ||
11 | #endif | ||
12 | |||
13 | /* struct for semaphore with priority inheritance */ | ||
14 | struct kfmlp_queue | ||
15 | { | ||
16 | wait_queue_head_t wait; | ||
17 | struct task_struct* owner; | ||
18 | struct task_struct* hp_waiter; | ||
19 | int count; /* number of waiters + holder */ | ||
20 | }; | ||
21 | |||
22 | struct kfmlp_semaphore | ||
23 | { | ||
24 | struct litmus_lock litmus_lock; | ||
25 | |||
26 | spinlock_t lock; | ||
27 | |||
28 | int num_resources; /* aka k */ | ||
29 | |||
30 | struct kfmlp_queue *queues; /* array */ | ||
31 | struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */ | ||
32 | |||
33 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
34 | struct kfmlp_affinity *aff_obs; | ||
35 | #endif | ||
36 | }; | ||
37 | |||
38 | static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock) | ||
39 | { | ||
40 | return container_of(lock, struct kfmlp_semaphore, litmus_lock); | ||
41 | } | ||
42 | |||
43 | int kfmlp_lock(struct litmus_lock* l); | ||
44 | int kfmlp_unlock(struct litmus_lock* l); | ||
45 | int kfmlp_close(struct litmus_lock* l); | ||
46 | void kfmlp_free(struct litmus_lock* l); | ||
47 | struct litmus_lock* kfmlp_new(struct litmus_lock_ops*, void* __user arg); | ||
48 | |||
49 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | ||
50 | |||
51 | struct kfmlp_queue_info | ||
52 | { | ||
53 | struct kfmlp_queue* q; | ||
54 | lt_t estimated_len; | ||
55 | int *nr_cur_users; | ||
56 | }; | ||
57 | |||
58 | struct kfmlp_affinity_ops | ||
59 | { | ||
60 | struct kfmlp_queue* (*advise_enqueue)(struct kfmlp_affinity* aff, struct task_struct* t); | ||
61 | struct task_struct* (*advise_steal)(struct kfmlp_affinity* aff, wait_queue_t** to_steal, struct kfmlp_queue** to_steal_from); | ||
62 | void (*notify_enqueue)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t); | ||
63 | void (*notify_dequeue)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t); | ||
64 | void (*notify_acquired)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t); | ||
65 | void (*notify_freed)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t); | ||
66 | int (*replica_to_resource)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq); | ||
67 | }; | ||
68 | |||
69 | struct kfmlp_affinity | ||
70 | { | ||
71 | struct affinity_observer obs; | ||
72 | struct kfmlp_affinity_ops *ops; | ||
73 | struct kfmlp_queue_info *q_info; | ||
74 | int *nr_cur_users_on_rsrc; | ||
75 | int offset; | ||
76 | int nr_simult; | ||
77 | int nr_rsrc; | ||
78 | }; | ||
79 | |||
80 | static inline struct kfmlp_affinity* kfmlp_aff_obs_from_aff_obs(struct affinity_observer* aff_obs) | ||
81 | { | ||
82 | return container_of(aff_obs, struct kfmlp_affinity, obs); | ||
83 | } | ||
84 | |||
85 | int kfmlp_aff_obs_close(struct affinity_observer*); | ||
86 | void kfmlp_aff_obs_free(struct affinity_observer*); | ||
87 | struct affinity_observer* kfmlp_gpu_aff_obs_new(struct affinity_observer_ops*, | ||
88 | void* __user arg); | ||
89 | struct affinity_observer* kfmlp_simple_gpu_aff_obs_new(struct affinity_observer_ops*, | ||
90 | void* __user arg); | ||
91 | |||
92 | |||
93 | #endif | ||
94 | |||
95 | #endif | ||
96 | |||
97 | |||
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 0b071fd359f9..71df378236f5 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -26,6 +26,7 @@ static inline int in_list(struct list_head* list) | |||
26 | ); | 26 | ); |
27 | } | 27 | } |
28 | 28 | ||
29 | |||
29 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); | 30 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); |
30 | 31 | ||
31 | #define NO_CPU 0xffffffff | 32 | #define NO_CPU 0xffffffff |
@@ -53,12 +54,16 @@ void litmus_exit_task(struct task_struct *tsk); | |||
53 | #define get_rt_phase(t) (tsk_rt(t)->task_params.phase) | 54 | #define get_rt_phase(t) (tsk_rt(t)->task_params.phase) |
54 | #define get_partition(t) (tsk_rt(t)->task_params.cpu) | 55 | #define get_partition(t) (tsk_rt(t)->task_params.cpu) |
55 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) | 56 | #define get_deadline(t) (tsk_rt(t)->job_params.deadline) |
57 | #define get_period(t) (tsk_rt(t)->task_params.period) | ||
56 | #define get_release(t) (tsk_rt(t)->job_params.release) | 58 | #define get_release(t) (tsk_rt(t)->job_params.release) |
57 | #define get_class(t) (tsk_rt(t)->task_params.cls) | 59 | #define get_class(t) (tsk_rt(t)->task_params.cls) |
58 | 60 | ||
59 | #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) | 61 | #define is_priority_boosted(t) (tsk_rt(t)->priority_boosted) |
60 | #define get_boost_start(t) (tsk_rt(t)->boost_start_time) | 62 | #define get_boost_start(t) (tsk_rt(t)->boost_start_time) |
61 | 63 | ||
64 | #define effective_priority(t) ((!(tsk_rt(t)->inh_task)) ? t : tsk_rt(t)->inh_task) | ||
65 | #define base_priority(t) (t) | ||
66 | |||
62 | inline static int budget_exhausted(struct task_struct* t) | 67 | inline static int budget_exhausted(struct task_struct* t) |
63 | { | 68 | { |
64 | return get_exec_time(t) >= get_exec_cost(t); | 69 | return get_exec_time(t) >= get_exec_cost(t); |
@@ -114,10 +119,12 @@ static inline lt_t litmus_clock(void) | |||
114 | #define earlier_deadline(a, b) (lt_before(\ | 119 | #define earlier_deadline(a, b) (lt_before(\ |
115 | (a)->rt_param.job_params.deadline,\ | 120 | (a)->rt_param.job_params.deadline,\ |
116 | (b)->rt_param.job_params.deadline)) | 121 | (b)->rt_param.job_params.deadline)) |
122 | #define shorter_period(a, b) (lt_before(\ | ||
123 | (a)->rt_param.task_params.period,\ | ||
124 | (b)->rt_param.task_params.period)) | ||
117 | #define earlier_release(a, b) (lt_before(\ | 125 | #define earlier_release(a, b) (lt_before(\ |
118 | (a)->rt_param.job_params.release,\ | 126 | (a)->rt_param.job_params.release,\ |
119 | (b)->rt_param.job_params.release)) | 127 | (b)->rt_param.job_params.release)) |
120 | |||
121 | void preempt_if_preemptable(struct task_struct* t, int on_cpu); | 128 | void preempt_if_preemptable(struct task_struct* t, int on_cpu); |
122 | 129 | ||
123 | #ifdef CONFIG_LITMUS_LOCKING | 130 | #ifdef CONFIG_LITMUS_LOCKING |
diff --git a/include/litmus/litmus_softirq.h b/include/litmus/litmus_softirq.h new file mode 100644 index 000000000000..1eb5ea1a6c4b --- /dev/null +++ b/include/litmus/litmus_softirq.h | |||
@@ -0,0 +1,199 @@ | |||
1 | #ifndef __LITMUS_SOFTIRQ_H | ||
2 | #define __LITMUS_SOFTIRQ_H | ||
3 | |||
4 | #include <linux/interrupt.h> | ||
5 | #include <linux/workqueue.h> | ||
6 | |||
7 | /* | ||
8 | Threaded tasklet handling for Litmus. Tasklets | ||
9 | are scheduled with the priority of the tasklet's | ||
10 | owner---that is, the RT task on behalf the tasklet | ||
11 | runs. | ||
12 | |||
13 | Tasklets are current scheduled in FIFO order with | ||
14 | NO priority inheritance for "blocked" tasklets. | ||
15 | |||
16 | klitirqd assumes the priority of the owner of the | ||
17 | tasklet when the tasklet is next to execute. | ||
18 | |||
19 | Currently, hi-tasklets are scheduled before | ||
20 | low-tasklets, regardless of priority of low-tasklets. | ||
21 | And likewise, low-tasklets are scheduled before work | ||
22 | queue objects. This priority inversion probably needs | ||
23 | to be fixed, though it is not an issue if our work with | ||
24 | GPUs as GPUs are owned (and associated klitirqds) for | ||
25 | exclusive time periods, thus no inversions can | ||
26 | occur. | ||
27 | */ | ||
28 | |||
29 | |||
30 | |||
31 | #define NR_LITMUS_SOFTIRQD CONFIG_NR_LITMUS_SOFTIRQD | ||
32 | |||
33 | /* Spawns NR_LITMUS_SOFTIRQD klitirqd daemons. | ||
34 | Actual launch of threads is deffered to kworker's | ||
35 | workqueue, so daemons will likely not be immediately | ||
36 | running when this function returns, though the required | ||
37 | data will be initialized. | ||
38 | |||
39 | @affinity_set: an array expressing the processor affinity | ||
40 | for each of the NR_LITMUS_SOFTIRQD daemons. May be set | ||
41 | to NULL for global scheduling. | ||
42 | |||
43 | - Examples - | ||
44 | 8-CPU system with two CPU clusters: | ||
45 | affinity[] = {0, 0, 0, 0, 3, 3, 3, 3} | ||
46 | NOTE: Daemons not actually bound to specified CPU, but rather | ||
47 | cluster in which the CPU resides. | ||
48 | |||
49 | 8-CPU system, partitioned: | ||
50 | affinity[] = {0, 1, 2, 3, 4, 5, 6, 7} | ||
51 | |||
52 | FIXME: change array to a CPU topology or array of cpumasks | ||
53 | |||
54 | */ | ||
55 | void spawn_klitirqd(int* affinity); | ||
56 | |||
57 | |||
58 | /* Raises a flag to tell klitirqds to terminate. | ||
59 | Termination is async, so some threads may be running | ||
60 | after function return. */ | ||
61 | void kill_klitirqd(void); | ||
62 | |||
63 | |||
64 | /* Returns 1 if all NR_LITMUS_SOFTIRQD klitirqs are ready | ||
65 | to handle tasklets. 0, otherwise.*/ | ||
66 | int klitirqd_is_ready(void); | ||
67 | |||
68 | /* Returns 1 if no NR_LITMUS_SOFTIRQD klitirqs are ready | ||
69 | to handle tasklets. 0, otherwise.*/ | ||
70 | int klitirqd_is_dead(void); | ||
71 | |||
72 | /* Flushes all pending work out to the OS for regular | ||
73 | * tasklet/work processing of the specified 'owner' | ||
74 | * | ||
75 | * PRECOND: klitirqd_thread must have a clear entry | ||
76 | * in the GPU registry, otherwise this call will become | ||
77 | * a no-op as work will loop back to the klitirqd_thread. | ||
78 | * | ||
79 | * Pass NULL for owner to flush ALL pending items. | ||
80 | */ | ||
81 | void flush_pending(struct task_struct* klitirqd_thread, | ||
82 | struct task_struct* owner); | ||
83 | |||
84 | struct task_struct* get_klitirqd(unsigned int k_id); | ||
85 | |||
86 | |||
87 | extern int __litmus_tasklet_schedule( | ||
88 | struct tasklet_struct *t, | ||
89 | unsigned int k_id); | ||
90 | |||
91 | /* schedule a tasklet on klitirqd #k_id */ | ||
92 | static inline int litmus_tasklet_schedule( | ||
93 | struct tasklet_struct *t, | ||
94 | unsigned int k_id) | ||
95 | { | ||
96 | int ret = 0; | ||
97 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
98 | ret = __litmus_tasklet_schedule(t, k_id); | ||
99 | return(ret); | ||
100 | } | ||
101 | |||
102 | /* for use by __tasklet_schedule() */ | ||
103 | static inline int _litmus_tasklet_schedule( | ||
104 | struct tasklet_struct *t, | ||
105 | unsigned int k_id) | ||
106 | { | ||
107 | return(__litmus_tasklet_schedule(t, k_id)); | ||
108 | } | ||
109 | |||
110 | |||
111 | |||
112 | |||
113 | extern int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
114 | unsigned int k_id); | ||
115 | |||
116 | /* schedule a hi tasklet on klitirqd #k_id */ | ||
117 | static inline int litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
118 | unsigned int k_id) | ||
119 | { | ||
120 | int ret = 0; | ||
121 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
122 | ret = __litmus_tasklet_hi_schedule(t, k_id); | ||
123 | return(ret); | ||
124 | } | ||
125 | |||
126 | /* for use by __tasklet_hi_schedule() */ | ||
127 | static inline int _litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
128 | unsigned int k_id) | ||
129 | { | ||
130 | return(__litmus_tasklet_hi_schedule(t, k_id)); | ||
131 | } | ||
132 | |||
133 | |||
134 | |||
135 | |||
136 | |||
137 | extern int __litmus_tasklet_hi_schedule_first( | ||
138 | struct tasklet_struct *t, | ||
139 | unsigned int k_id); | ||
140 | |||
141 | /* schedule a hi tasklet on klitirqd #k_id on next go-around */ | ||
142 | /* PRECONDITION: Interrupts must be disabled. */ | ||
143 | static inline int litmus_tasklet_hi_schedule_first( | ||
144 | struct tasklet_struct *t, | ||
145 | unsigned int k_id) | ||
146 | { | ||
147 | int ret = 0; | ||
148 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
149 | ret = __litmus_tasklet_hi_schedule_first(t, k_id); | ||
150 | return(ret); | ||
151 | } | ||
152 | |||
153 | /* for use by __tasklet_hi_schedule_first() */ | ||
154 | static inline int _litmus_tasklet_hi_schedule_first( | ||
155 | struct tasklet_struct *t, | ||
156 | unsigned int k_id) | ||
157 | { | ||
158 | return(__litmus_tasklet_hi_schedule_first(t, k_id)); | ||
159 | } | ||
160 | |||
161 | |||
162 | |||
163 | ////////////// | ||
164 | |||
165 | extern int __litmus_schedule_work( | ||
166 | struct work_struct* w, | ||
167 | unsigned int k_id); | ||
168 | |||
169 | static inline int litmus_schedule_work( | ||
170 | struct work_struct* w, | ||
171 | unsigned int k_id) | ||
172 | { | ||
173 | return(__litmus_schedule_work(w, k_id)); | ||
174 | } | ||
175 | |||
176 | |||
177 | |||
178 | ///////////// mutex operations for client threads. | ||
179 | |||
180 | void down_and_set_stat(struct task_struct* t, | ||
181 | enum klitirqd_sem_status to_set, | ||
182 | struct mutex* sem); | ||
183 | |||
184 | void __down_and_reset_and_set_stat(struct task_struct* t, | ||
185 | enum klitirqd_sem_status to_reset, | ||
186 | enum klitirqd_sem_status to_set, | ||
187 | struct mutex* sem); | ||
188 | |||
189 | void up_and_set_stat(struct task_struct* t, | ||
190 | enum klitirqd_sem_status to_set, | ||
191 | struct mutex* sem); | ||
192 | |||
193 | |||
194 | |||
195 | void release_klitirqd_lock(struct task_struct* t); | ||
196 | |||
197 | int reacquire_klitirqd_lock(struct task_struct* t); | ||
198 | |||
199 | #endif | ||
diff --git a/include/litmus/locking.h b/include/litmus/locking.h index 4d7b870cb443..36647fee03e4 100644 --- a/include/litmus/locking.h +++ b/include/litmus/locking.h | |||
@@ -1,28 +1,160 @@ | |||
1 | #ifndef LITMUS_LOCKING_H | 1 | #ifndef LITMUS_LOCKING_H |
2 | #define LITMUS_LOCKING_H | 2 | #define LITMUS_LOCKING_H |
3 | 3 | ||
4 | #include <linux/list.h> | ||
5 | |||
4 | struct litmus_lock_ops; | 6 | struct litmus_lock_ops; |
5 | 7 | ||
8 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
9 | struct nested_info | ||
10 | { | ||
11 | struct litmus_lock *lock; | ||
12 | struct task_struct *hp_waiter_eff_prio; | ||
13 | struct task_struct **hp_waiter_ptr; | ||
14 | struct binheap_node hp_binheap_node; | ||
15 | }; | ||
16 | |||
17 | static inline struct task_struct* top_priority(struct binheap_handle* handle) { | ||
18 | if(!binheap_empty(handle)) { | ||
19 | return (struct task_struct*)(binheap_top_entry(handle, struct nested_info, hp_binheap_node)->hp_waiter_eff_prio); | ||
20 | } | ||
21 | return NULL; | ||
22 | } | ||
23 | |||
24 | void print_hp_waiters(struct binheap_node* n, int depth); | ||
25 | #endif | ||
26 | |||
27 | |||
6 | /* Generic base struct for LITMUS^RT userspace semaphores. | 28 | /* Generic base struct for LITMUS^RT userspace semaphores. |
7 | * This structure should be embedded in protocol-specific semaphores. | 29 | * This structure should be embedded in protocol-specific semaphores. |
8 | */ | 30 | */ |
9 | struct litmus_lock { | 31 | struct litmus_lock { |
10 | struct litmus_lock_ops *ops; | 32 | struct litmus_lock_ops *ops; |
11 | int type; | 33 | int type; |
34 | |||
35 | int ident; | ||
36 | |||
37 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
38 | struct nested_info nest; | ||
39 | //#ifdef CONFIG_DEBUG_SPINLOCK | ||
40 | char cheat_lockdep[2]; | ||
41 | struct lock_class_key key; | ||
42 | //#endif | ||
43 | #endif | ||
12 | }; | 44 | }; |
13 | 45 | ||
46 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
47 | |||
48 | #define MAX_DGL_SIZE CONFIG_LITMUS_MAX_DGL_SIZE | ||
49 | |||
50 | typedef struct dgl_wait_state { | ||
51 | struct task_struct *task; /* task waiting on DGL */ | ||
52 | struct litmus_lock *locks[MAX_DGL_SIZE]; /* requested locks in DGL */ | ||
53 | int size; /* size of the DGL */ | ||
54 | int nr_remaining; /* nr locks remainging before DGL is complete */ | ||
55 | int last_primary; /* index lock in locks[] that has active priority */ | ||
56 | wait_queue_t wq_nodes[MAX_DGL_SIZE]; | ||
57 | } dgl_wait_state_t; | ||
58 | |||
59 | void wake_or_wait_on_next_lock(dgl_wait_state_t *dgl_wait); | ||
60 | void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lock*/); | ||
61 | |||
62 | void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait); | ||
63 | int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key); | ||
64 | void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task); | ||
65 | #endif | ||
66 | |||
67 | typedef int (*lock_op_t)(struct litmus_lock *l); | ||
68 | typedef lock_op_t lock_close_t; | ||
69 | typedef lock_op_t lock_lock_t; | ||
70 | typedef lock_op_t lock_unlock_t; | ||
71 | |||
72 | typedef int (*lock_open_t)(struct litmus_lock *l, void* __user arg); | ||
73 | typedef void (*lock_free_t)(struct litmus_lock *l); | ||
74 | |||
14 | struct litmus_lock_ops { | 75 | struct litmus_lock_ops { |
15 | /* Current task tries to obtain / drop a reference to a lock. | 76 | /* Current task tries to obtain / drop a reference to a lock. |
16 | * Optional methods, allowed by default. */ | 77 | * Optional methods, allowed by default. */ |
17 | int (*open)(struct litmus_lock*, void* __user); | 78 | lock_open_t open; |
18 | int (*close)(struct litmus_lock*); | 79 | lock_close_t close; |
19 | 80 | ||
20 | /* Current tries to lock/unlock this lock (mandatory methods). */ | 81 | /* Current tries to lock/unlock this lock (mandatory methods). */ |
21 | int (*lock)(struct litmus_lock*); | 82 | lock_lock_t lock; |
22 | int (*unlock)(struct litmus_lock*); | 83 | lock_unlock_t unlock; |
23 | 84 | ||
24 | /* The lock is no longer being referenced (mandatory method). */ | 85 | /* The lock is no longer being referenced (mandatory method). */ |
25 | void (*deallocate)(struct litmus_lock*); | 86 | lock_free_t deallocate; |
87 | |||
88 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
89 | void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); | ||
90 | void (*propagate_decrease_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); | ||
91 | #endif | ||
92 | |||
93 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
94 | raw_spinlock_t* (*get_dgl_spin_lock)(struct litmus_lock *l); | ||
95 | int (*dgl_lock)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node); | ||
96 | int (*is_owner)(struct litmus_lock *l, struct task_struct *t); | ||
97 | void (*enable_priority)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); | ||
98 | #endif | ||
26 | }; | 99 | }; |
27 | 100 | ||
101 | |||
102 | /* | ||
103 | Nested inheritance can be achieved with fine-grain locking when there is | ||
104 | no need for DGL support, presuming locks are acquired in a partial order | ||
105 | (no cycles!). However, DGLs allow locks to be acquired in any order. This | ||
106 | makes nested inheritance very difficult (we don't yet know a solution) to | ||
107 | realize with fine-grain locks, so we use a big lock instead. | ||
108 | |||
109 | Code contains both fine-grain and coarse-grain methods together, side-by-side. | ||
110 | Each lock operation *IS NOT* surrounded by ifdef/endif to help make code more | ||
111 | readable. However, this leads to the odd situation where both code paths | ||
112 | appear together in code as if they were both active together. | ||
113 | |||
114 | THIS IS NOT REALLY THE CASE! ONLY ONE CODE PATH IS ACTUALLY ACTIVE! | ||
115 | |||
116 | Example: | ||
117 | lock_global_irqsave(coarseLock, flags); | ||
118 | lock_fine_irqsave(fineLock, flags); | ||
119 | |||
120 | Reality (coarse): | ||
121 | lock_global_irqsave(coarseLock, flags); | ||
122 | //lock_fine_irqsave(fineLock, flags); | ||
123 | |||
124 | Reality (fine): | ||
125 | //lock_global_irqsave(coarseLock, flags); | ||
126 | lock_fine_irqsave(fineLock, flags); | ||
127 | |||
128 | Be careful when you read code involving nested inheritance. | ||
129 | */ | ||
130 | #if defined(CONFIG_LITMUS_DGL_SUPPORT) | ||
131 | /* DGL requires a big lock to implement nested inheritance */ | ||
132 | #define lock_global_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags)) | ||
133 | #define lock_global(lock) raw_spin_lock((lock)) | ||
134 | #define unlock_global_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags)) | ||
135 | #define unlock_global(lock) raw_spin_unlock((lock)) | ||
136 | |||
137 | /* fine-grain locking are no-ops with DGL support */ | ||
138 | #define lock_fine_irqsave(lock, flags) | ||
139 | #define lock_fine(lock) | ||
140 | #define unlock_fine_irqrestore(lock, flags) | ||
141 | #define unlock_fine(lock) | ||
142 | |||
143 | #elif defined(CONFIG_LITMUS_NESTED_LOCKING) | ||
144 | /* Use fine-grain locking when DGLs are disabled. */ | ||
145 | /* global locking are no-ops without DGL support */ | ||
146 | #define lock_global_irqsave(lock, flags) | ||
147 | #define lock_global(lock) | ||
148 | #define unlock_global_irqrestore(lock, flags) | ||
149 | #define unlock_global(lock) | ||
150 | |||
151 | #define lock_fine_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags)) | ||
152 | #define lock_fine(lock) raw_spin_lock((lock)) | ||
153 | #define unlock_fine_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags)) | ||
154 | #define unlock_fine(lock) raw_spin_unlock((lock)) | ||
155 | |||
28 | #endif | 156 | #endif |
157 | |||
158 | |||
159 | #endif | ||
160 | |||
diff --git a/include/litmus/nvidia_info.h b/include/litmus/nvidia_info.h new file mode 100644 index 000000000000..97c9577141db --- /dev/null +++ b/include/litmus/nvidia_info.h | |||
@@ -0,0 +1,46 @@ | |||
1 | #ifndef __LITMUS_NVIDIA_H | ||
2 | #define __LITMUS_NVIDIA_H | ||
3 | |||
4 | #include <linux/interrupt.h> | ||
5 | |||
6 | |||
7 | #include <litmus/litmus_softirq.h> | ||
8 | |||
9 | |||
10 | //#define NV_DEVICE_NUM NR_LITMUS_SOFTIRQD | ||
11 | #define NV_DEVICE_NUM CONFIG_NV_DEVICE_NUM | ||
12 | #define NV_MAX_SIMULT_USERS CONFIG_NV_MAX_SIMULT_USERS | ||
13 | |||
14 | int init_nvidia_info(void); | ||
15 | void shutdown_nvidia_info(void); | ||
16 | |||
17 | int is_nvidia_func(void* func_addr); | ||
18 | |||
19 | void dump_nvidia_info(const struct tasklet_struct *t); | ||
20 | |||
21 | |||
22 | // Returns the Nvidia device # associated with provided tasklet and work_struct. | ||
23 | u32 get_tasklet_nv_device_num(const struct tasklet_struct *t); | ||
24 | u32 get_work_nv_device_num(const struct work_struct *t); | ||
25 | |||
26 | |||
27 | int init_nv_device_reg(void); | ||
28 | //int get_nv_device_id(struct task_struct* owner); | ||
29 | |||
30 | |||
31 | int reg_nv_device(int reg_device_id, int register_device, struct task_struct *t); | ||
32 | |||
33 | struct task_struct* get_nv_max_device_owner(u32 target_device_id); | ||
34 | //int is_nv_device_owner(u32 target_device_id); | ||
35 | |||
36 | void lock_nv_registry(u32 reg_device_id, unsigned long* flags); | ||
37 | void unlock_nv_registry(u32 reg_device_id, unsigned long* flags); | ||
38 | |||
39 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
40 | void pai_check_priority_increase(struct task_struct *t, int reg_device_id); | ||
41 | void pai_check_priority_decrease(struct task_struct *t, int reg_device_id); | ||
42 | #endif | ||
43 | |||
44 | //void increment_nv_int_count(u32 device); | ||
45 | |||
46 | #endif | ||
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h index 380b886d78ff..8f3a9ca2d4e3 100644 --- a/include/litmus/preempt.h +++ b/include/litmus/preempt.h | |||
@@ -26,12 +26,12 @@ const char* sched_state_name(int s); | |||
26 | (x), #x, __FUNCTION__); \ | 26 | (x), #x, __FUNCTION__); \ |
27 | } while (0); | 27 | } while (0); |
28 | 28 | ||
29 | //#define TRACE_SCHED_STATE_CHANGE(x, y, cpu) /* ignore */ | ||
29 | #define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \ | 30 | #define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \ |
30 | TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \ | 31 | TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \ |
31 | cpu, (x), sched_state_name(x), \ | 32 | cpu, (x), sched_state_name(x), \ |
32 | (y), sched_state_name(y)) | 33 | (y), sched_state_name(y)) |
33 | 34 | ||
34 | |||
35 | typedef enum scheduling_state { | 35 | typedef enum scheduling_state { |
36 | TASK_SCHEDULED = (1 << 0), /* The currently scheduled task is the one that | 36 | TASK_SCHEDULED = (1 << 0), /* The currently scheduled task is the one that |
37 | * should be scheduled, and the processor does not | 37 | * should be scheduled, and the processor does not |
diff --git a/include/litmus/rsm_lock.h b/include/litmus/rsm_lock.h new file mode 100644 index 000000000000..a15189683de4 --- /dev/null +++ b/include/litmus/rsm_lock.h | |||
@@ -0,0 +1,54 @@ | |||
1 | #ifndef LITMUS_RSM_H | ||
2 | #define LITMUS_RSM_H | ||
3 | |||
4 | #include <litmus/litmus.h> | ||
5 | #include <litmus/binheap.h> | ||
6 | #include <litmus/locking.h> | ||
7 | |||
8 | /* struct for semaphore with priority inheritance */ | ||
9 | struct rsm_mutex { | ||
10 | struct litmus_lock litmus_lock; | ||
11 | |||
12 | /* current resource holder */ | ||
13 | struct task_struct *owner; | ||
14 | |||
15 | /* highest-priority waiter */ | ||
16 | struct task_struct *hp_waiter; | ||
17 | |||
18 | /* FIFO queue of waiting tasks -- for now. time stamp in the future. */ | ||
19 | wait_queue_head_t wait; | ||
20 | |||
21 | /* we do some nesting within spinlocks, so we can't use the normal | ||
22 | sleeplocks found in wait_queue_head_t. */ | ||
23 | raw_spinlock_t lock; | ||
24 | }; | ||
25 | |||
26 | static inline struct rsm_mutex* rsm_mutex_from_lock(struct litmus_lock* lock) | ||
27 | { | ||
28 | return container_of(lock, struct rsm_mutex, litmus_lock); | ||
29 | } | ||
30 | |||
31 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
32 | int rsm_mutex_is_owner(struct litmus_lock *l, struct task_struct *t); | ||
33 | int rsm_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node); | ||
34 | void rsm_mutex_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); | ||
35 | #endif | ||
36 | |||
37 | void rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, | ||
38 | struct task_struct* t, | ||
39 | raw_spinlock_t* to_unlock, | ||
40 | unsigned long irqflags); | ||
41 | |||
42 | void rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | ||
43 | struct task_struct* t, | ||
44 | raw_spinlock_t* to_unlock, | ||
45 | unsigned long irqflags); | ||
46 | |||
47 | int rsm_mutex_lock(struct litmus_lock* l); | ||
48 | int rsm_mutex_unlock(struct litmus_lock* l); | ||
49 | int rsm_mutex_close(struct litmus_lock* l); | ||
50 | void rsm_mutex_free(struct litmus_lock* l); | ||
51 | struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops*); | ||
52 | |||
53 | |||
54 | #endif \ No newline at end of file | ||
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index d6d799174160..0198884eab86 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -5,6 +5,8 @@ | |||
5 | #ifndef _LINUX_RT_PARAM_H_ | 5 | #ifndef _LINUX_RT_PARAM_H_ |
6 | #define _LINUX_RT_PARAM_H_ | 6 | #define _LINUX_RT_PARAM_H_ |
7 | 7 | ||
8 | #include <litmus/fpmath.h> | ||
9 | |||
8 | /* Litmus time type. */ | 10 | /* Litmus time type. */ |
9 | typedef unsigned long long lt_t; | 11 | typedef unsigned long long lt_t; |
10 | 12 | ||
@@ -24,6 +26,7 @@ static inline int lt_after_eq(lt_t a, lt_t b) | |||
24 | typedef enum { | 26 | typedef enum { |
25 | RT_CLASS_HARD, | 27 | RT_CLASS_HARD, |
26 | RT_CLASS_SOFT, | 28 | RT_CLASS_SOFT, |
29 | RT_CLASS_SOFT_W_SLIP, | ||
27 | RT_CLASS_BEST_EFFORT | 30 | RT_CLASS_BEST_EFFORT |
28 | } task_class_t; | 31 | } task_class_t; |
29 | 32 | ||
@@ -52,6 +55,19 @@ union np_flag { | |||
52 | } np; | 55 | } np; |
53 | }; | 56 | }; |
54 | 57 | ||
58 | struct affinity_observer_args | ||
59 | { | ||
60 | int lock_od; | ||
61 | }; | ||
62 | |||
63 | struct gpu_affinity_observer_args | ||
64 | { | ||
65 | struct affinity_observer_args obs; | ||
66 | int replica_to_gpu_offset; | ||
67 | int nr_simult_users; | ||
68 | int relaxed_rules; | ||
69 | }; | ||
70 | |||
55 | /* The definition of the data that is shared between the kernel and real-time | 71 | /* The definition of the data that is shared between the kernel and real-time |
56 | * tasks via a shared page (see litmus/ctrldev.c). | 72 | * tasks via a shared page (see litmus/ctrldev.c). |
57 | * | 73 | * |
@@ -75,6 +91,9 @@ struct control_page { | |||
75 | /* don't export internal data structures to user space (liblitmus) */ | 91 | /* don't export internal data structures to user space (liblitmus) */ |
76 | #ifdef __KERNEL__ | 92 | #ifdef __KERNEL__ |
77 | 93 | ||
94 | #include <litmus/binheap.h> | ||
95 | #include <linux/semaphore.h> | ||
96 | |||
78 | struct _rt_domain; | 97 | struct _rt_domain; |
79 | struct bheap_node; | 98 | struct bheap_node; |
80 | struct release_heap; | 99 | struct release_heap; |
@@ -100,6 +119,31 @@ struct rt_job { | |||
100 | 119 | ||
101 | struct pfair_param; | 120 | struct pfair_param; |
102 | 121 | ||
122 | enum klitirqd_sem_status | ||
123 | { | ||
124 | NEED_TO_REACQUIRE, | ||
125 | REACQUIRING, | ||
126 | NOT_HELD, | ||
127 | HELD | ||
128 | }; | ||
129 | |||
130 | typedef enum gpu_migration_dist | ||
131 | { | ||
132 | // TODO: Make this variable against NR_NVIDIA_GPUS | ||
133 | MIG_LOCAL = 0, | ||
134 | MIG_NEAR = 1, | ||
135 | MIG_MED = 2, | ||
136 | MIG_FAR = 3, // 8 GPUs in a binary tree hierarchy | ||
137 | MIG_NONE = 4, | ||
138 | |||
139 | MIG_LAST = MIG_NONE | ||
140 | } gpu_migration_dist_t; | ||
141 | |||
142 | typedef struct feedback_est{ | ||
143 | fp_t est; | ||
144 | fp_t accum_err; | ||
145 | } feedback_est_t; | ||
146 | |||
103 | /* RT task parameters for scheduling extensions | 147 | /* RT task parameters for scheduling extensions |
104 | * These parameters are inherited during clone and therefore must | 148 | * These parameters are inherited during clone and therefore must |
105 | * be explicitly set up before the task set is launched. | 149 | * be explicitly set up before the task set is launched. |
@@ -114,6 +158,52 @@ struct rt_param { | |||
114 | /* is the task present? (true if it can be scheduled) */ | 158 | /* is the task present? (true if it can be scheduled) */ |
115 | unsigned int present:1; | 159 | unsigned int present:1; |
116 | 160 | ||
161 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
162 | /* proxy threads have minimum priority by default */ | ||
163 | unsigned int is_proxy_thread:1; | ||
164 | |||
165 | /* pointer to klitirqd currently working on this | ||
166 | task_struct's behalf. only set by the task pointed | ||
167 | to by klitirqd. | ||
168 | |||
169 | ptr only valid if is_proxy_thread == 0 | ||
170 | */ | ||
171 | struct task_struct* cur_klitirqd; | ||
172 | |||
173 | /* Used to implement mutual execution exclusion between | ||
174 | * job and klitirqd execution. Job must always hold | ||
175 | * it's klitirqd_sem to execute. klitirqd instance | ||
176 | * must hold the semaphore before executing on behalf | ||
177 | * of a job. | ||
178 | */ | ||
179 | struct mutex klitirqd_sem; | ||
180 | |||
181 | /* status of held klitirqd_sem, even if the held klitirqd_sem is from | ||
182 | another task (only proxy threads do this though). | ||
183 | */ | ||
184 | atomic_t klitirqd_sem_stat; | ||
185 | #endif | ||
186 | |||
187 | #ifdef CONFIG_LITMUS_NVIDIA | ||
188 | /* number of top-half interrupts handled on behalf of current job */ | ||
189 | atomic_t nv_int_count; | ||
190 | long unsigned int held_gpus; // bitmap of held GPUs. | ||
191 | |||
192 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
193 | fp_t gpu_fb_param_a[MIG_LAST+1]; | ||
194 | fp_t gpu_fb_param_b[MIG_LAST+1]; | ||
195 | |||
196 | gpu_migration_dist_t gpu_migration; | ||
197 | int last_gpu; | ||
198 | feedback_est_t gpu_migration_est[MIG_LAST+1]; // local, near, med, far | ||
199 | |||
200 | lt_t accum_gpu_time; | ||
201 | lt_t gpu_time_stamp; | ||
202 | |||
203 | unsigned int suspend_gpu_tracker_on_block:1; | ||
204 | #endif | ||
205 | #endif | ||
206 | |||
117 | #ifdef CONFIG_LITMUS_LOCKING | 207 | #ifdef CONFIG_LITMUS_LOCKING |
118 | /* Is the task being priority-boosted by a locking protocol? */ | 208 | /* Is the task being priority-boosted by a locking protocol? */ |
119 | unsigned int priority_boosted:1; | 209 | unsigned int priority_boosted:1; |
@@ -133,7 +223,15 @@ struct rt_param { | |||
133 | * could point to self if PI does not result in | 223 | * could point to self if PI does not result in |
134 | * an increased task priority. | 224 | * an increased task priority. |
135 | */ | 225 | */ |
136 | struct task_struct* inh_task; | 226 | struct task_struct* inh_task; |
227 | |||
228 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
229 | raw_spinlock_t hp_blocked_tasks_lock; | ||
230 | struct binheap_handle hp_blocked_tasks; | ||
231 | |||
232 | /* pointer to lock upon which is currently blocked */ | ||
233 | struct litmus_lock* blocked_lock; | ||
234 | #endif | ||
137 | 235 | ||
138 | #ifdef CONFIG_NP_SECTION | 236 | #ifdef CONFIG_NP_SECTION |
139 | /* For the FMLP under PSN-EDF, it is required to make the task | 237 | /* For the FMLP under PSN-EDF, it is required to make the task |
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index 6e7cabdddae8..24a6858b4b0b 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h | |||
@@ -11,6 +11,12 @@ | |||
11 | #include <litmus/locking.h> | 11 | #include <litmus/locking.h> |
12 | #endif | 12 | #endif |
13 | 13 | ||
14 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
15 | #include <litmus/kexclu_affinity.h> | ||
16 | #endif | ||
17 | |||
18 | #include <linux/interrupt.h> | ||
19 | |||
14 | /************************ setup/tear down ********************/ | 20 | /************************ setup/tear down ********************/ |
15 | 21 | ||
16 | typedef long (*activate_plugin_t) (void); | 22 | typedef long (*activate_plugin_t) (void); |
@@ -29,7 +35,6 @@ typedef struct task_struct* (*schedule_t)(struct task_struct * prev); | |||
29 | */ | 35 | */ |
30 | typedef void (*finish_switch_t)(struct task_struct *prev); | 36 | typedef void (*finish_switch_t)(struct task_struct *prev); |
31 | 37 | ||
32 | |||
33 | /********************* task state changes ********************/ | 38 | /********************* task state changes ********************/ |
34 | 39 | ||
35 | /* Called to setup a new real-time task. | 40 | /* Called to setup a new real-time task. |
@@ -58,6 +63,47 @@ typedef void (*task_exit_t) (struct task_struct *); | |||
58 | typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type, | 63 | typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type, |
59 | void* __user config); | 64 | void* __user config); |
60 | 65 | ||
66 | struct affinity_observer; | ||
67 | typedef long (*allocate_affinity_observer_t) ( | ||
68 | struct affinity_observer **aff_obs, int type, | ||
69 | void* __user config); | ||
70 | |||
71 | typedef void (*increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh); | ||
72 | typedef void (*decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh); | ||
73 | typedef void (*nested_increase_prio_t)(struct task_struct* t, struct task_struct* prio_inh, | ||
74 | raw_spinlock_t *to_unlock, unsigned long irqflags); | ||
75 | typedef void (*nested_decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh, | ||
76 | raw_spinlock_t *to_unlock, unsigned long irqflags); | ||
77 | |||
78 | typedef void (*increase_prio_klitirq_t)(struct task_struct* klitirqd, | ||
79 | struct task_struct* old_owner, | ||
80 | struct task_struct* new_owner); | ||
81 | typedef void (*decrease_prio_klitirqd_t)(struct task_struct* klitirqd, | ||
82 | struct task_struct* old_owner); | ||
83 | |||
84 | |||
85 | typedef int (*enqueue_pai_tasklet_t)(struct tasklet_struct* tasklet); | ||
86 | typedef void (*change_prio_pai_tasklet_t)(struct task_struct *old_prio, | ||
87 | struct task_struct *new_prio); | ||
88 | typedef void (*run_tasklets_t)(struct task_struct* next); | ||
89 | |||
90 | typedef raw_spinlock_t* (*get_dgl_spinlock_t) (struct task_struct *t); | ||
91 | |||
92 | |||
93 | typedef int (*higher_prio_t)(struct task_struct* a, struct task_struct* b); | ||
94 | |||
95 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
96 | |||
97 | typedef enum | ||
98 | { | ||
99 | BASE, | ||
100 | EFFECTIVE | ||
101 | } comparison_mode_t; | ||
102 | |||
103 | typedef int (*__higher_prio_t)(struct task_struct* a, comparison_mode_t a_mod, | ||
104 | struct task_struct* b, comparison_mode_t b_mod); | ||
105 | #endif | ||
106 | |||
61 | 107 | ||
62 | /********************* sys call backends ********************/ | 108 | /********************* sys call backends ********************/ |
63 | /* This function causes the caller to sleep until the next release */ | 109 | /* This function causes the caller to sleep until the next release */ |
@@ -88,14 +134,40 @@ struct sched_plugin { | |||
88 | /* task state changes */ | 134 | /* task state changes */ |
89 | admit_task_t admit_task; | 135 | admit_task_t admit_task; |
90 | 136 | ||
91 | task_new_t task_new; | 137 | task_new_t task_new; |
92 | task_wake_up_t task_wake_up; | 138 | task_wake_up_t task_wake_up; |
93 | task_block_t task_block; | 139 | task_block_t task_block; |
94 | task_exit_t task_exit; | 140 | task_exit_t task_exit; |
95 | 141 | ||
142 | higher_prio_t compare; | ||
143 | |||
96 | #ifdef CONFIG_LITMUS_LOCKING | 144 | #ifdef CONFIG_LITMUS_LOCKING |
97 | /* locking protocols */ | 145 | /* locking protocols */ |
98 | allocate_lock_t allocate_lock; | 146 | allocate_lock_t allocate_lock; |
147 | increase_prio_t increase_prio; | ||
148 | decrease_prio_t decrease_prio; | ||
149 | #endif | ||
150 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | ||
151 | nested_increase_prio_t nested_increase_prio; | ||
152 | nested_decrease_prio_t nested_decrease_prio; | ||
153 | __higher_prio_t __compare; | ||
154 | #endif | ||
155 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
156 | get_dgl_spinlock_t get_dgl_spinlock; | ||
157 | #endif | ||
158 | |||
159 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
160 | allocate_affinity_observer_t allocate_aff_obs; | ||
161 | #endif | ||
162 | |||
163 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
164 | increase_prio_klitirq_t increase_prio_klitirqd; | ||
165 | decrease_prio_klitirqd_t decrease_prio_klitirqd; | ||
166 | #endif | ||
167 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
168 | enqueue_pai_tasklet_t enqueue_pai_tasklet; | ||
169 | change_prio_pai_tasklet_t change_prio_pai_tasklet; | ||
170 | run_tasklets_t run_tasklets; | ||
99 | #endif | 171 | #endif |
100 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | 172 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); |
101 | 173 | ||
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index 7ca34cb13881..b1b71f6c5f0c 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h | |||
@@ -10,13 +10,14 @@ struct st_trace_header { | |||
10 | u8 type; /* Of what type is this record? */ | 10 | u8 type; /* Of what type is this record? */ |
11 | u8 cpu; /* On which CPU was it recorded? */ | 11 | u8 cpu; /* On which CPU was it recorded? */ |
12 | u16 pid; /* PID of the task. */ | 12 | u16 pid; /* PID of the task. */ |
13 | u32 job; /* The job sequence number. */ | 13 | u32 job:24; /* The job sequence number. */ |
14 | }; | 14 | u8 extra; |
15 | } __attribute__((packed)); | ||
15 | 16 | ||
16 | #define ST_NAME_LEN 16 | 17 | #define ST_NAME_LEN 16 |
17 | struct st_name_data { | 18 | struct st_name_data { |
18 | char cmd[ST_NAME_LEN];/* The name of the executable of this process. */ | 19 | char cmd[ST_NAME_LEN];/* The name of the executable of this process. */ |
19 | }; | 20 | } __attribute__((packed)); |
20 | 21 | ||
21 | struct st_param_data { /* regular params */ | 22 | struct st_param_data { /* regular params */ |
22 | u32 wcet; | 23 | u32 wcet; |
@@ -25,30 +26,29 @@ struct st_param_data { /* regular params */ | |||
25 | u8 partition; | 26 | u8 partition; |
26 | u8 class; | 27 | u8 class; |
27 | u8 __unused[2]; | 28 | u8 __unused[2]; |
28 | }; | 29 | } __attribute__((packed)); |
29 | 30 | ||
30 | struct st_release_data { /* A job is was/is going to be released. */ | 31 | struct st_release_data { /* A job is was/is going to be released. */ |
31 | u64 release; /* What's the release time? */ | 32 | u64 release; /* What's the release time? */ |
32 | u64 deadline; /* By when must it finish? */ | 33 | u64 deadline; /* By when must it finish? */ |
33 | }; | 34 | } __attribute__((packed)); |
34 | 35 | ||
35 | struct st_assigned_data { /* A job was asigned to a CPU. */ | 36 | struct st_assigned_data { /* A job was asigned to a CPU. */ |
36 | u64 when; | 37 | u64 when; |
37 | u8 target; /* Where should it execute? */ | 38 | u8 target; /* Where should it execute? */ |
38 | u8 __unused[7]; | 39 | u8 __unused[7]; |
39 | }; | 40 | } __attribute__((packed)); |
40 | 41 | ||
41 | struct st_switch_to_data { /* A process was switched to on a given CPU. */ | 42 | struct st_switch_to_data { /* A process was switched to on a given CPU. */ |
42 | u64 when; /* When did this occur? */ | 43 | u64 when; /* When did this occur? */ |
43 | u32 exec_time; /* Time the current job has executed. */ | 44 | u32 exec_time; /* Time the current job has executed. */ |
44 | u8 __unused[4]; | 45 | u8 __unused[4]; |
45 | 46 | } __attribute__((packed)); | |
46 | }; | ||
47 | 47 | ||
48 | struct st_switch_away_data { /* A process was switched away from on a given CPU. */ | 48 | struct st_switch_away_data { /* A process was switched away from on a given CPU. */ |
49 | u64 when; | 49 | u64 when; |
50 | u64 exec_time; | 50 | u64 exec_time; |
51 | }; | 51 | } __attribute__((packed)); |
52 | 52 | ||
53 | struct st_completion_data { /* A job completed. */ | 53 | struct st_completion_data { /* A job completed. */ |
54 | u64 when; | 54 | u64 when; |
@@ -56,35 +56,108 @@ struct st_completion_data { /* A job completed. */ | |||
56 | * next task automatically; set to 0 otherwise. | 56 | * next task automatically; set to 0 otherwise. |
57 | */ | 57 | */ |
58 | u8 __uflags:7; | 58 | u8 __uflags:7; |
59 | u8 __unused[7]; | 59 | u16 nv_int_count; |
60 | }; | 60 | u8 __unused[5]; |
61 | } __attribute__((packed)); | ||
61 | 62 | ||
62 | struct st_block_data { /* A task blocks. */ | 63 | struct st_block_data { /* A task blocks. */ |
63 | u64 when; | 64 | u64 when; |
64 | u64 __unused; | 65 | u64 __unused; |
65 | }; | 66 | } __attribute__((packed)); |
66 | 67 | ||
67 | struct st_resume_data { /* A task resumes. */ | 68 | struct st_resume_data { /* A task resumes. */ |
68 | u64 when; | 69 | u64 when; |
69 | u64 __unused; | 70 | u64 __unused; |
70 | }; | 71 | } __attribute__((packed)); |
71 | 72 | ||
72 | struct st_action_data { | 73 | struct st_action_data { |
73 | u64 when; | 74 | u64 when; |
74 | u8 action; | 75 | u8 action; |
75 | u8 __unused[7]; | 76 | u8 __unused[7]; |
76 | }; | 77 | } __attribute__((packed)); |
77 | 78 | ||
78 | struct st_sys_release_data { | 79 | struct st_sys_release_data { |
79 | u64 when; | 80 | u64 when; |
80 | u64 release; | 81 | u64 release; |
81 | }; | 82 | } __attribute__((packed)); |
83 | |||
84 | |||
85 | struct st_tasklet_release_data { | ||
86 | u64 when; | ||
87 | u64 __unused; | ||
88 | } __attribute__((packed)); | ||
89 | |||
90 | struct st_tasklet_begin_data { | ||
91 | u64 when; | ||
92 | u16 exe_pid; | ||
93 | u8 __unused[6]; | ||
94 | } __attribute__((packed)); | ||
95 | |||
96 | struct st_tasklet_end_data { | ||
97 | u64 when; | ||
98 | u16 exe_pid; | ||
99 | u8 flushed; | ||
100 | u8 __unused[5]; | ||
101 | } __attribute__((packed)); | ||
102 | |||
103 | |||
104 | struct st_work_release_data { | ||
105 | u64 when; | ||
106 | u64 __unused; | ||
107 | } __attribute__((packed)); | ||
108 | |||
109 | struct st_work_begin_data { | ||
110 | u64 when; | ||
111 | u16 exe_pid; | ||
112 | u8 __unused[6]; | ||
113 | } __attribute__((packed)); | ||
114 | |||
115 | struct st_work_end_data { | ||
116 | u64 when; | ||
117 | u16 exe_pid; | ||
118 | u8 flushed; | ||
119 | u8 __unused[5]; | ||
120 | } __attribute__((packed)); | ||
121 | |||
122 | struct st_effective_priority_change_data { | ||
123 | u64 when; | ||
124 | u16 inh_pid; | ||
125 | u8 __unused[6]; | ||
126 | } __attribute__((packed)); | ||
127 | |||
128 | struct st_nv_interrupt_begin_data { | ||
129 | u64 when; | ||
130 | u32 device; | ||
131 | u32 serialNumber; | ||
132 | } __attribute__((packed)); | ||
133 | |||
134 | struct st_nv_interrupt_end_data { | ||
135 | u64 when; | ||
136 | u32 device; | ||
137 | u32 serialNumber; | ||
138 | } __attribute__((packed)); | ||
139 | |||
140 | struct st_prediction_err_data { | ||
141 | u64 distance; | ||
142 | u64 rel_err; | ||
143 | } __attribute__((packed)); | ||
144 | |||
145 | struct st_migration_data { | ||
146 | u64 observed; | ||
147 | u64 estimated; | ||
148 | } __attribute__((packed)); | ||
149 | |||
150 | struct migration_info { | ||
151 | u64 observed; | ||
152 | u64 estimated; | ||
153 | u8 distance; | ||
154 | } __attribute__((packed)); | ||
82 | 155 | ||
83 | #define DATA(x) struct st_ ## x ## _data x; | 156 | #define DATA(x) struct st_ ## x ## _data x; |
84 | 157 | ||
85 | typedef enum { | 158 | typedef enum { |
86 | ST_NAME = 1, /* Start at one, so that we can spot | 159 | ST_NAME = 1, /* Start at one, so that we can spot |
87 | * uninitialized records. */ | 160 | * uninitialized records. */ |
88 | ST_PARAM, | 161 | ST_PARAM, |
89 | ST_RELEASE, | 162 | ST_RELEASE, |
90 | ST_ASSIGNED, | 163 | ST_ASSIGNED, |
@@ -94,7 +167,19 @@ typedef enum { | |||
94 | ST_BLOCK, | 167 | ST_BLOCK, |
95 | ST_RESUME, | 168 | ST_RESUME, |
96 | ST_ACTION, | 169 | ST_ACTION, |
97 | ST_SYS_RELEASE | 170 | ST_SYS_RELEASE, |
171 | ST_TASKLET_RELEASE, | ||
172 | ST_TASKLET_BEGIN, | ||
173 | ST_TASKLET_END, | ||
174 | ST_WORK_RELEASE, | ||
175 | ST_WORK_BEGIN, | ||
176 | ST_WORK_END, | ||
177 | ST_EFF_PRIO_CHANGE, | ||
178 | ST_NV_INTERRUPT_BEGIN, | ||
179 | ST_NV_INTERRUPT_END, | ||
180 | |||
181 | ST_PREDICTION_ERR, | ||
182 | ST_MIGRATION, | ||
98 | } st_event_record_type_t; | 183 | } st_event_record_type_t; |
99 | 184 | ||
100 | struct st_event_record { | 185 | struct st_event_record { |
@@ -113,8 +198,20 @@ struct st_event_record { | |||
113 | DATA(resume); | 198 | DATA(resume); |
114 | DATA(action); | 199 | DATA(action); |
115 | DATA(sys_release); | 200 | DATA(sys_release); |
201 | DATA(tasklet_release); | ||
202 | DATA(tasklet_begin); | ||
203 | DATA(tasklet_end); | ||
204 | DATA(work_release); | ||
205 | DATA(work_begin); | ||
206 | DATA(work_end); | ||
207 | DATA(effective_priority_change); | ||
208 | DATA(nv_interrupt_begin); | ||
209 | DATA(nv_interrupt_end); | ||
210 | |||
211 | DATA(prediction_err); | ||
212 | DATA(migration); | ||
116 | } data; | 213 | } data; |
117 | }; | 214 | } __attribute__((packed)); |
118 | 215 | ||
119 | #undef DATA | 216 | #undef DATA |
120 | 217 | ||
@@ -129,6 +226,8 @@ struct st_event_record { | |||
129 | ft_event1(id, callback, task) | 226 | ft_event1(id, callback, task) |
130 | #define SCHED_TRACE2(id, callback, task, xtra) \ | 227 | #define SCHED_TRACE2(id, callback, task, xtra) \ |
131 | ft_event2(id, callback, task, xtra) | 228 | ft_event2(id, callback, task, xtra) |
229 | #define SCHED_TRACE3(id, callback, task, xtra1, xtra2) \ | ||
230 | ft_event3(id, callback, task, xtra1, xtra2) | ||
132 | 231 | ||
133 | /* provide prototypes; needed on sparc64 */ | 232 | /* provide prototypes; needed on sparc64 */ |
134 | #ifndef NO_TASK_TRACE_DECLS | 233 | #ifndef NO_TASK_TRACE_DECLS |
@@ -155,12 +254,58 @@ feather_callback void do_sched_trace_action(unsigned long id, | |||
155 | feather_callback void do_sched_trace_sys_release(unsigned long id, | 254 | feather_callback void do_sched_trace_sys_release(unsigned long id, |
156 | lt_t* start); | 255 | lt_t* start); |
157 | 256 | ||
257 | |||
258 | feather_callback void do_sched_trace_tasklet_release(unsigned long id, | ||
259 | struct task_struct* owner); | ||
260 | feather_callback void do_sched_trace_tasklet_begin(unsigned long id, | ||
261 | struct task_struct* owner); | ||
262 | feather_callback void do_sched_trace_tasklet_end(unsigned long id, | ||
263 | struct task_struct* owner, | ||
264 | unsigned long flushed); | ||
265 | |||
266 | feather_callback void do_sched_trace_work_release(unsigned long id, | ||
267 | struct task_struct* owner); | ||
268 | feather_callback void do_sched_trace_work_begin(unsigned long id, | ||
269 | struct task_struct* owner, | ||
270 | struct task_struct* exe); | ||
271 | feather_callback void do_sched_trace_work_end(unsigned long id, | ||
272 | struct task_struct* owner, | ||
273 | struct task_struct* exe, | ||
274 | unsigned long flushed); | ||
275 | |||
276 | feather_callback void do_sched_trace_eff_prio_change(unsigned long id, | ||
277 | struct task_struct* task, | ||
278 | struct task_struct* inh); | ||
279 | |||
280 | feather_callback void do_sched_trace_nv_interrupt_begin(unsigned long id, | ||
281 | u32 device); | ||
282 | feather_callback void do_sched_trace_nv_interrupt_end(unsigned long id, | ||
283 | unsigned long unused); | ||
284 | |||
285 | feather_callback void do_sched_trace_prediction_err(unsigned long id, | ||
286 | struct task_struct* task, | ||
287 | gpu_migration_dist_t* distance, | ||
288 | fp_t* rel_err); | ||
289 | |||
290 | |||
291 | |||
292 | |||
293 | |||
294 | feather_callback void do_sched_trace_migration(unsigned long id, | ||
295 | struct task_struct* task, | ||
296 | struct migration_info* mig_info); | ||
297 | |||
298 | |||
299 | /* returns true if we're tracing an interrupt on current CPU */ | ||
300 | /* int is_interrupt_tracing_active(void); */ | ||
301 | |||
158 | #endif | 302 | #endif |
159 | 303 | ||
160 | #else | 304 | #else |
161 | 305 | ||
162 | #define SCHED_TRACE(id, callback, task) /* no tracing */ | 306 | #define SCHED_TRACE(id, callback, task) /* no tracing */ |
163 | #define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */ | 307 | #define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */ |
308 | #define SCHED_TRACE3(id, callback, task, xtra1, xtra2) | ||
164 | 309 | ||
165 | #endif | 310 | #endif |
166 | 311 | ||
@@ -193,6 +338,41 @@ feather_callback void do_sched_trace_sys_release(unsigned long id, | |||
193 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when) | 338 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when) |
194 | 339 | ||
195 | 340 | ||
341 | #define sched_trace_tasklet_release(t) \ | ||
342 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 11, do_sched_trace_tasklet_release, t) | ||
343 | |||
344 | #define sched_trace_tasklet_begin(t) \ | ||
345 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 12, do_sched_trace_tasklet_begin, t) | ||
346 | |||
347 | #define sched_trace_tasklet_end(t, flushed) \ | ||
348 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 13, do_sched_trace_tasklet_end, t, flushed) | ||
349 | |||
350 | |||
351 | #define sched_trace_work_release(t) \ | ||
352 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 14, do_sched_trace_work_release, t) | ||
353 | |||
354 | #define sched_trace_work_begin(t, e) \ | ||
355 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 15, do_sched_trace_work_begin, t, e) | ||
356 | |||
357 | #define sched_trace_work_end(t, e, flushed) \ | ||
358 | SCHED_TRACE3(SCHED_TRACE_BASE_ID + 16, do_sched_trace_work_end, t, e, flushed) | ||
359 | |||
360 | |||
361 | #define sched_trace_eff_prio_change(t, inh) \ | ||
362 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 17, do_sched_trace_eff_prio_change, t, inh) | ||
363 | |||
364 | |||
365 | #define sched_trace_nv_interrupt_begin(d) \ | ||
366 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 18, do_sched_trace_nv_interrupt_begin, d) | ||
367 | #define sched_trace_nv_interrupt_end(d) \ | ||
368 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 19, do_sched_trace_nv_interrupt_end, d) | ||
369 | |||
370 | #define sched_trace_prediction_err(t, dist, rel_err) \ | ||
371 | SCHED_TRACE3(SCHED_TRACE_BASE_ID + 20, do_sched_trace_prediction_err, t, dist, rel_err) | ||
372 | |||
373 | #define sched_trace_migration(t, mig_info) \ | ||
374 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 21, do_sched_trace_migration, t, mig_info) | ||
375 | |||
196 | #define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ | 376 | #define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ |
197 | 377 | ||
198 | #endif /* __KERNEL__ */ | 378 | #endif /* __KERNEL__ */ |
diff --git a/include/litmus/sched_trace_external.h b/include/litmus/sched_trace_external.h new file mode 100644 index 000000000000..e70e45e4cf51 --- /dev/null +++ b/include/litmus/sched_trace_external.h | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * sched_trace.h -- record scheduler events to a byte stream for offline analysis. | ||
3 | */ | ||
4 | #ifndef _LINUX_SCHED_TRACE_EXTERNAL_H_ | ||
5 | #define _LINUX_SCHED_TRACE_EXTERNAL_H_ | ||
6 | |||
7 | |||
8 | #ifdef CONFIG_SCHED_TASK_TRACE | ||
9 | extern void __sched_trace_tasklet_begin_external(struct task_struct* t); | ||
10 | static inline void sched_trace_tasklet_begin_external(struct task_struct* t) | ||
11 | { | ||
12 | __sched_trace_tasklet_begin_external(t); | ||
13 | } | ||
14 | |||
15 | extern void __sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed); | ||
16 | static inline void sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed) | ||
17 | { | ||
18 | __sched_trace_tasklet_end_external(t, flushed); | ||
19 | } | ||
20 | |||
21 | extern void __sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e); | ||
22 | static inline void sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e) | ||
23 | { | ||
24 | __sched_trace_work_begin_external(t, e); | ||
25 | } | ||
26 | |||
27 | extern void __sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f); | ||
28 | static inline void sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f) | ||
29 | { | ||
30 | __sched_trace_work_end_external(t, e, f); | ||
31 | } | ||
32 | |||
33 | #ifdef CONFIG_LITMUS_NVIDIA | ||
34 | extern void __sched_trace_nv_interrupt_begin_external(u32 device); | ||
35 | static inline void sched_trace_nv_interrupt_begin_external(u32 device) | ||
36 | { | ||
37 | __sched_trace_nv_interrupt_begin_external(device); | ||
38 | } | ||
39 | |||
40 | extern void __sched_trace_nv_interrupt_end_external(u32 device); | ||
41 | static inline void sched_trace_nv_interrupt_end_external(u32 device) | ||
42 | { | ||
43 | __sched_trace_nv_interrupt_end_external(device); | ||
44 | } | ||
45 | #endif | ||
46 | |||
47 | #else | ||
48 | |||
49 | // no tracing. | ||
50 | static inline void sched_trace_tasklet_begin_external(struct task_struct* t){} | ||
51 | static inline void sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed){} | ||
52 | static inline void sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e){} | ||
53 | static inline void sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f){} | ||
54 | |||
55 | #ifdef CONFIG_LITMUS_NVIDIA | ||
56 | static inline void sched_trace_nv_interrupt_begin_external(u32 device){} | ||
57 | static inline void sched_trace_nv_interrupt_end_external(u32 device){} | ||
58 | #endif | ||
59 | |||
60 | #endif | ||
61 | |||
62 | |||
63 | #ifdef CONFIG_LITMUS_NVIDIA | ||
64 | |||
65 | #define EX_TS(evt) \ | ||
66 | extern void __##evt(void); \ | ||
67 | static inline void EX_##evt(void) { __##evt(); } | ||
68 | |||
69 | EX_TS(TS_NV_TOPISR_START) | ||
70 | EX_TS(TS_NV_TOPISR_END) | ||
71 | EX_TS(TS_NV_BOTISR_START) | ||
72 | EX_TS(TS_NV_BOTISR_END) | ||
73 | EX_TS(TS_NV_RELEASE_BOTISR_START) | ||
74 | EX_TS(TS_NV_RELEASE_BOTISR_END) | ||
75 | |||
76 | #endif | ||
77 | |||
78 | #endif | ||
diff --git a/include/litmus/trace.h b/include/litmus/trace.h index e809376d6487..e078aee4234d 100644 --- a/include/litmus/trace.h +++ b/include/litmus/trace.h | |||
@@ -103,14 +103,46 @@ feather_callback void save_task_latency(unsigned long event, unsigned long when_ | |||
103 | #define TS_LOCK_START TIMESTAMP(170) | 103 | #define TS_LOCK_START TIMESTAMP(170) |
104 | #define TS_LOCK_SUSPEND TIMESTAMP(171) | 104 | #define TS_LOCK_SUSPEND TIMESTAMP(171) |
105 | #define TS_LOCK_RESUME TIMESTAMP(172) | 105 | #define TS_LOCK_RESUME TIMESTAMP(172) |
106 | #define TS_LOCK_END TIMESTAMP(173) | 106 | #define TS_LOCK_END TIMESTAMP(173) |
107 | |||
108 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
109 | #define TS_DGL_LOCK_START TIMESTAMP(175) | ||
110 | #define TS_DGL_LOCK_SUSPEND TIMESTAMP(176) | ||
111 | #define TS_DGL_LOCK_RESUME TIMESTAMP(177) | ||
112 | #define TS_DGL_LOCK_END TIMESTAMP(178) | ||
113 | #endif | ||
107 | 114 | ||
108 | #define TS_UNLOCK_START TIMESTAMP(180) | 115 | #define TS_UNLOCK_START TIMESTAMP(180) |
109 | #define TS_UNLOCK_END TIMESTAMP(181) | 116 | #define TS_UNLOCK_END TIMESTAMP(181) |
110 | 117 | ||
118 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
119 | #define TS_DGL_UNLOCK_START TIMESTAMP(185) | ||
120 | #define TS_DGL_UNLOCK_END TIMESTAMP(186) | ||
121 | #endif | ||
122 | |||
111 | #define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c) | 123 | #define TS_SEND_RESCHED_START(c) CTIMESTAMP(190, c) |
112 | #define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN) | 124 | #define TS_SEND_RESCHED_END DTIMESTAMP(191, TSK_UNKNOWN) |
113 | 125 | ||
114 | #define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when)) | 126 | #define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when)) |
115 | 127 | ||
128 | |||
129 | #ifdef CONFIG_LITMUS_NVIDIA | ||
130 | |||
131 | #define TS_NV_TOPISR_START TIMESTAMP(200) | ||
132 | #define TS_NV_TOPISR_END TIMESTAMP(201) | ||
133 | |||
134 | #define TS_NV_BOTISR_START TIMESTAMP(202) | ||
135 | #define TS_NV_BOTISR_END TIMESTAMP(203) | ||
136 | |||
137 | #define TS_NV_RELEASE_BOTISR_START TIMESTAMP(204) | ||
138 | #define TS_NV_RELEASE_BOTISR_END TIMESTAMP(205) | ||
139 | |||
140 | #endif | ||
141 | |||
142 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
143 | #define TS_NV_SCHED_BOTISR_START TIMESTAMP(206) | ||
144 | #define TS_NV_SCHED_BOTISR_END TIMESTAMP(207) | ||
145 | #endif | ||
146 | |||
147 | |||
116 | #endif /* !_SYS_TRACE_H_ */ | 148 | #endif /* !_SYS_TRACE_H_ */ |
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h index 94264c27d9ac..4fa514c89605 100644 --- a/include/litmus/unistd_32.h +++ b/include/litmus/unistd_32.h | |||
@@ -17,5 +17,8 @@ | |||
17 | #define __NR_wait_for_ts_release __LSC(9) | 17 | #define __NR_wait_for_ts_release __LSC(9) |
18 | #define __NR_release_ts __LSC(10) | 18 | #define __NR_release_ts __LSC(10) |
19 | #define __NR_null_call __LSC(11) | 19 | #define __NR_null_call __LSC(11) |
20 | #define __NR_litmus_dgl_lock __LSC(12) | ||
21 | #define __NR_litmus_dgl_unlock __LSC(13) | ||
22 | #define __NR_register_nv_device __LSC(14) | ||
20 | 23 | ||
21 | #define NR_litmus_syscalls 12 | 24 | #define NR_litmus_syscalls 15 |
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h index d5ced0d2642c..f80dc45dc185 100644 --- a/include/litmus/unistd_64.h +++ b/include/litmus/unistd_64.h | |||
@@ -29,5 +29,12 @@ __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release) | |||
29 | __SYSCALL(__NR_release_ts, sys_release_ts) | 29 | __SYSCALL(__NR_release_ts, sys_release_ts) |
30 | #define __NR_null_call __LSC(11) | 30 | #define __NR_null_call __LSC(11) |
31 | __SYSCALL(__NR_null_call, sys_null_call) | 31 | __SYSCALL(__NR_null_call, sys_null_call) |
32 | #define __NR_litmus_dgl_lock __LSC(12) | ||
33 | __SYSCALL(__NR_litmus_dgl_lock, sys_litmus_dgl_lock) | ||
34 | #define __NR_litmus_dgl_unlock __LSC(13) | ||
35 | __SYSCALL(__NR_litmus_dgl_unlock, sys_litmus_dgl_unlock) | ||
36 | #define __NR_register_nv_device __LSC(14) | ||
37 | __SYSCALL(__NR_register_nv_device, sys_register_nv_device) | ||
32 | 38 | ||
33 | #define NR_litmus_syscalls 12 | 39 | |
40 | #define NR_litmus_syscalls 15 | ||