aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/litmus/gpu_affinity.h7
-rw-r--r--include/litmus/nvidia_info.h1
-rw-r--r--include/litmus/rt_param.h5
-rw-r--r--include/litmus/sched_trace.h44
4 files changed, 49 insertions, 8 deletions
diff --git a/include/litmus/gpu_affinity.h b/include/litmus/gpu_affinity.h
index d4db2003ad86..6b3fb8b28745 100644
--- a/include/litmus/gpu_affinity.h
+++ b/include/litmus/gpu_affinity.h
@@ -43,10 +43,7 @@ static inline lt_t get_gpu_estimate(struct task_struct* t, gpu_migration_dist_t
43 val = _fp_to_integer(t->rt_param.gpu_migration_est[i].est); 43 val = _fp_to_integer(t->rt_param.gpu_migration_est[i].est);
44 } 44 }
45 45
46 // minimum value is 1 (val is 0 if we haven't run with local affinity yet) 46 return ((val > 0) ? val : dist+1);
47 // TODO: pick a better default min-value. 1 is too small. perhaps
48 // task execution time?
49 return ((val > 0) ? val : 1);
50} 47}
51 48
52#endif \ No newline at end of file 49#endif
diff --git a/include/litmus/nvidia_info.h b/include/litmus/nvidia_info.h
index 580728051d4e..97c9577141db 100644
--- a/include/litmus/nvidia_info.h
+++ b/include/litmus/nvidia_info.h
@@ -12,6 +12,7 @@
12#define NV_MAX_SIMULT_USERS CONFIG_NV_MAX_SIMULT_USERS 12#define NV_MAX_SIMULT_USERS CONFIG_NV_MAX_SIMULT_USERS
13 13
14int init_nvidia_info(void); 14int init_nvidia_info(void);
15void shutdown_nvidia_info(void);
15 16
16int is_nvidia_func(void* func_addr); 17int is_nvidia_func(void* func_addr);
17 18
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 4553521146cc..0198884eab86 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -26,6 +26,7 @@ static inline int lt_after_eq(lt_t a, lt_t b)
26typedef enum { 26typedef enum {
27 RT_CLASS_HARD, 27 RT_CLASS_HARD,
28 RT_CLASS_SOFT, 28 RT_CLASS_SOFT,
29 RT_CLASS_SOFT_W_SLIP,
29 RT_CLASS_BEST_EFFORT 30 RT_CLASS_BEST_EFFORT
30} task_class_t; 31} task_class_t;
31 32
@@ -189,8 +190,8 @@ struct rt_param {
189 long unsigned int held_gpus; // bitmap of held GPUs. 190 long unsigned int held_gpus; // bitmap of held GPUs.
190 191
191#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 192#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
192 fp_t gpu_fb_param_a; 193 fp_t gpu_fb_param_a[MIG_LAST+1];
193 fp_t gpu_fb_param_b; 194 fp_t gpu_fb_param_b[MIG_LAST+1];
194 195
195 gpu_migration_dist_t gpu_migration; 196 gpu_migration_dist_t gpu_migration;
196 int last_gpu; 197 int last_gpu;
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
index 232c7588d103..b1b71f6c5f0c 100644
--- a/include/litmus/sched_trace.h
+++ b/include/litmus/sched_trace.h
@@ -10,7 +10,8 @@ struct st_trace_header {
10 u8 type; /* Of what type is this record? */ 10 u8 type; /* Of what type is this record? */
11 u8 cpu; /* On which CPU was it recorded? */ 11 u8 cpu; /* On which CPU was it recorded? */
12 u16 pid; /* PID of the task. */ 12 u16 pid; /* PID of the task. */
13 u32 job; /* The job sequence number. */ 13 u32 job:24; /* The job sequence number. */
14 u8 extra;
14} __attribute__((packed)); 15} __attribute__((packed));
15 16
16#define ST_NAME_LEN 16 17#define ST_NAME_LEN 16
@@ -136,6 +137,22 @@ struct st_nv_interrupt_end_data {
136 u32 serialNumber; 137 u32 serialNumber;
137} __attribute__((packed)); 138} __attribute__((packed));
138 139
140struct st_prediction_err_data {
141 u64 distance;
142 u64 rel_err;
143} __attribute__((packed));
144
145struct st_migration_data {
146 u64 observed;
147 u64 estimated;
148} __attribute__((packed));
149
150struct migration_info {
151 u64 observed;
152 u64 estimated;
153 u8 distance;
154} __attribute__((packed));
155
139#define DATA(x) struct st_ ## x ## _data x; 156#define DATA(x) struct st_ ## x ## _data x;
140 157
141typedef enum { 158typedef enum {
@@ -160,6 +177,9 @@ typedef enum {
160 ST_EFF_PRIO_CHANGE, 177 ST_EFF_PRIO_CHANGE,
161 ST_NV_INTERRUPT_BEGIN, 178 ST_NV_INTERRUPT_BEGIN,
162 ST_NV_INTERRUPT_END, 179 ST_NV_INTERRUPT_END,
180
181 ST_PREDICTION_ERR,
182 ST_MIGRATION,
163} st_event_record_type_t; 183} st_event_record_type_t;
164 184
165struct st_event_record { 185struct st_event_record {
@@ -187,6 +207,9 @@ struct st_event_record {
187 DATA(effective_priority_change); 207 DATA(effective_priority_change);
188 DATA(nv_interrupt_begin); 208 DATA(nv_interrupt_begin);
189 DATA(nv_interrupt_end); 209 DATA(nv_interrupt_end);
210
211 DATA(prediction_err);
212 DATA(migration);
190 } data; 213 } data;
191} __attribute__((packed)); 214} __attribute__((packed));
192 215
@@ -259,6 +282,19 @@ feather_callback void do_sched_trace_nv_interrupt_begin(unsigned long id,
259feather_callback void do_sched_trace_nv_interrupt_end(unsigned long id, 282feather_callback void do_sched_trace_nv_interrupt_end(unsigned long id,
260 unsigned long unused); 283 unsigned long unused);
261 284
285feather_callback void do_sched_trace_prediction_err(unsigned long id,
286 struct task_struct* task,
287 gpu_migration_dist_t* distance,
288 fp_t* rel_err);
289
290
291
292
293
294feather_callback void do_sched_trace_migration(unsigned long id,
295 struct task_struct* task,
296 struct migration_info* mig_info);
297
262 298
263/* returns true if we're tracing an interrupt on current CPU */ 299/* returns true if we're tracing an interrupt on current CPU */
264/* int is_interrupt_tracing_active(void); */ 300/* int is_interrupt_tracing_active(void); */
@@ -331,6 +367,12 @@ feather_callback void do_sched_trace_nv_interrupt_end(unsigned long id,
331#define sched_trace_nv_interrupt_end(d) \ 367#define sched_trace_nv_interrupt_end(d) \
332 SCHED_TRACE(SCHED_TRACE_BASE_ID + 19, do_sched_trace_nv_interrupt_end, d) 368 SCHED_TRACE(SCHED_TRACE_BASE_ID + 19, do_sched_trace_nv_interrupt_end, d)
333 369
370#define sched_trace_prediction_err(t, dist, rel_err) \
371 SCHED_TRACE3(SCHED_TRACE_BASE_ID + 20, do_sched_trace_prediction_err, t, dist, rel_err)
372
373#define sched_trace_migration(t, mig_info) \
374 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 21, do_sched_trace_migration, t, mig_info)
375
334#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ 376#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */
335 377
336#endif /* __KERNEL__ */ 378#endif /* __KERNEL__ */