blob: 47da725717b0015a021c59e079441996c048084d (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
|
#ifndef LITMUS_GPU_AFFINITY_H
#define LITMUS_GPU_AFFINITY_H
#include <litmus/rt_param.h>
#include <litmus/sched_plugin.h>
#include <litmus/litmus.h>
void update_gpu_estimate(struct task_struct* t, lt_t observed);
gpu_migration_dist_t gpu_migration_distance(int a, int b);
static inline void reset_gpu_tracker(struct task_struct* t)
{
t->rt_param.accum_gpu_time = 0;
}
static inline void start_gpu_tracker(struct task_struct* t)
{
t->rt_param.gpu_time_stamp = litmus_clock();
}
static inline void stop_gpu_tracker(struct task_struct* t)
{
lt_t now = litmus_clock();
t->rt_param.accum_gpu_time += (now - t->rt_param.gpu_time_stamp);
}
static inline lt_t get_gpu_time(struct task_struct* t)
{
return t->rt_param.accum_gpu_time;
}
static inline lt_t get_gpu_estimate(struct task_struct* t, gpu_migration_dist_t dist)
{
int i;
lt_t val;
if(dist == MIG_NONE) {
dist = MIG_LOCAL;
}
val = t->rt_param.gpu_migration_est[dist].avg;
for(i = dist-1; i >= 0; --i) {
if(t->rt_param.gpu_migration_est[i].avg > val) {
val = t->rt_param.gpu_migration_est[i].avg;
}
}
#if 0
// int i;
// fpbuf_t temp = _fp_to_integer(t->rt_param.gpu_migration_est[dist].est);
// lt_t val = (temp >= 0) ? temp : 0; // never allow negative estimates...
lt_t val = t->rt_param.gpu_migration_est[dist].avg;
// WARN_ON(temp < 0);
// lower-bound a distant migration to be at least equal to the level
// below it.
// for(i = dist-1; (val == 0) && (i >= MIG_LOCAL); --i) {
// val = _fp_to_integer(t->rt_param.gpu_migration_est[i].est);
// }
#endif
return ((val > 0) ? val : dist+1);
}
#endif
|