diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-04-18 16:24:56 -0400 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-04-18 16:24:56 -0400 |
commit | 6ab36ca992441f7353840c70fc91d99a500a940e (patch) | |
tree | f7b89f379a04a681a80eee32f86a2405b162616f | |
parent | 440aa2083245b81583980e3f4177f3b4cc805556 (diff) |
Fixed and tested aff-aware KFMLP. (finally!)
-rw-r--r-- | include/litmus/fpmath.h | 21 | ||||
-rw-r--r-- | include/litmus/gpu_affinity.h | 16 | ||||
-rw-r--r-- | include/litmus/kexclu_affinity.h | 4 | ||||
-rw-r--r-- | include/litmus/litmus.h | 3 | ||||
-rw-r--r-- | include/litmus/nvidia_info.h | 2 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 8 | ||||
-rw-r--r-- | litmus/edf_common.c | 76 | ||||
-rw-r--r-- | litmus/fdso.c | 5 | ||||
-rw-r--r-- | litmus/gpu_affinity.c | 87 | ||||
-rw-r--r-- | litmus/kexclu_affinity.c | 10 | ||||
-rw-r--r-- | litmus/kfmlp_lock.c | 173 | ||||
-rw-r--r-- | litmus/litmus.c | 56 | ||||
-rw-r--r-- | litmus/locking.c | 5 | ||||
-rw-r--r-- | litmus/nvidia_info.c | 16 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 31 | ||||
-rw-r--r-- | litmus/sched_plugin.c | 2 |
16 files changed, 334 insertions, 181 deletions
diff --git a/include/litmus/fpmath.h b/include/litmus/fpmath.h index 35f81683d6ab..d062b5ab5dc2 100644 --- a/include/litmus/fpmath.h +++ b/include/litmus/fpmath.h | |||
@@ -1,6 +1,11 @@ | |||
1 | #ifndef __FP_MATH_H__ | 1 | #ifndef __FP_MATH_H__ |
2 | #define __FP_MATH_H__ | 2 | #define __FP_MATH_H__ |
3 | 3 | ||
4 | #ifndef __KERNEL__ | ||
5 | #include <stdint.h> | ||
6 | #define abs(x) (((x) < 0) ? -(x) : x) | ||
7 | #endif | ||
8 | |||
4 | // Use 64-bit because we want to track things at the nanosecond scale. | 9 | // Use 64-bit because we want to track things at the nanosecond scale. |
5 | // This can lead to very large numbers. | 10 | // This can lead to very large numbers. |
6 | typedef int64_t fpbuf_t; | 11 | typedef int64_t fpbuf_t; |
@@ -11,7 +16,6 @@ typedef struct | |||
11 | 16 | ||
12 | #define FP_SHIFT 10 | 17 | #define FP_SHIFT 10 |
13 | #define ROUND_BIT (FP_SHIFT - 1) | 18 | #define ROUND_BIT (FP_SHIFT - 1) |
14 | #define ONE FP(1) | ||
15 | 19 | ||
16 | #define _fp(x) ((fp_t) {x}) | 20 | #define _fp(x) ((fp_t) {x}) |
17 | 21 | ||
@@ -29,8 +33,6 @@ static inline fp_t _frac(fpbuf_t a, fpbuf_t b) | |||
29 | return _fp(FP(a).val / (b)); | 33 | return _fp(FP(a).val / (b)); |
30 | } | 34 | } |
31 | 35 | ||
32 | #ifdef __KERNEL__ | ||
33 | |||
34 | static inline fpbuf_t _point(fp_t x) | 36 | static inline fpbuf_t _point(fp_t x) |
35 | { | 37 | { |
36 | return (x.val % (1 << FP_SHIFT)); | 38 | return (x.val % (1 << FP_SHIFT)); |
@@ -60,11 +62,19 @@ static inline fp_t _mul(fp_t a, fp_t b) | |||
60 | 62 | ||
61 | static inline fp_t _div(fp_t a, fp_t b) | 63 | static inline fp_t _div(fp_t a, fp_t b) |
62 | { | 64 | { |
63 | /* try not to overflow */ | 65 | #if !defined(__KERNEL__) && !defined(unlikely) |
64 | if (unlikely( a.val > (2l << (BITS_PER_LONG - FP_SHIFT)) )) | 66 | #define unlikely(x) (x) |
67 | #define DO_UNDEF_UNLIKELY | ||
68 | #endif | ||
69 | /* try not to overflow */ | ||
70 | if (unlikely( a.val > (2l << ((sizeof(fpbuf_t)*8) - FP_SHIFT)) )) | ||
65 | return _fp((a.val / b.val) << FP_SHIFT); | 71 | return _fp((a.val / b.val) << FP_SHIFT); |
66 | else | 72 | else |
67 | return _fp((a.val << FP_SHIFT) / b.val); | 73 | return _fp((a.val << FP_SHIFT) / b.val); |
74 | #ifdef DO_UNDEF_UNLIKELY | ||
75 | #undef unlikely | ||
76 | #undef DO_UNDEF_UNLIKELY | ||
77 | #endif | ||
68 | } | 78 | } |
69 | 79 | ||
70 | static inline fp_t _add(fp_t a, fp_t b) | 80 | static inline fp_t _add(fp_t a, fp_t b) |
@@ -131,4 +141,3 @@ static inline fp_t _max(fp_t a, fp_t b) | |||
131 | return a; | 141 | return a; |
132 | } | 142 | } |
133 | #endif | 143 | #endif |
134 | #endif | ||
diff --git a/include/litmus/gpu_affinity.h b/include/litmus/gpu_affinity.h index c29ff3de997c..ca4d10b93203 100644 --- a/include/litmus/gpu_affinity.h +++ b/include/litmus/gpu_affinity.h | |||
@@ -31,9 +31,21 @@ static inline lt_t get_gpu_time(struct task_struct* t) | |||
31 | 31 | ||
32 | static inline lt_t get_gpu_estimate(struct task_struct* t, gpu_migration_dist_t dist) | 32 | static inline lt_t get_gpu_estimate(struct task_struct* t, gpu_migration_dist_t dist) |
33 | { | 33 | { |
34 | lt_t val = _fp_to_integer(t->rt_param.gpu_migration_est[dist].est); | 34 | int i; |
35 | fpbuf_t temp = _fp_to_integer(t->rt_param.gpu_migration_est[dist].est); | ||
36 | lt_t val = (temp >= 0) ? temp : 0; // never allow negative estimates... | ||
35 | 37 | ||
36 | // minimum value is 1. | 38 | WARN_ON(temp < 0); |
39 | |||
40 | // lower-bound a distant migration to be at least equal to the level | ||
41 | // below it. | ||
42 | for(i = dist-1; (val == 0) && (i >= MIG_LOCAL); --i) { | ||
43 | val = _fp_to_integer(t->rt_param.gpu_migration_est[i].est); | ||
44 | } | ||
45 | |||
46 | // minimum value is 1 (val is 0 if we haven't run with local affinity yet) | ||
47 | // TODO: pick a better default min-value. 1 is too small. perhaps | ||
48 | // task execution time? | ||
37 | return ((val > 0) ? val : 1); | 49 | return ((val > 0) ? val : 1); |
38 | } | 50 | } |
39 | 51 | ||
diff --git a/include/litmus/kexclu_affinity.h b/include/litmus/kexclu_affinity.h index f5b5e7b1f359..51e097f8ec54 100644 --- a/include/litmus/kexclu_affinity.h +++ b/include/litmus/kexclu_affinity.h | |||
@@ -28,4 +28,8 @@ struct affinity_observer_ops | |||
28 | 28 | ||
29 | struct litmus_lock* get_lock_from_od(int od); | 29 | struct litmus_lock* get_lock_from_od(int od); |
30 | 30 | ||
31 | void affinity_observer_new(struct affinity_observer* aff, | ||
32 | struct affinity_observer_ops* ops, | ||
33 | struct affinity_observer_args* args); | ||
34 | |||
31 | #endif | 35 | #endif |
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index e2df49b171c5..71df378236f5 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -125,9 +125,6 @@ static inline lt_t litmus_clock(void) | |||
125 | #define earlier_release(a, b) (lt_before(\ | 125 | #define earlier_release(a, b) (lt_before(\ |
126 | (a)->rt_param.job_params.release,\ | 126 | (a)->rt_param.job_params.release,\ |
127 | (b)->rt_param.job_params.release)) | 127 | (b)->rt_param.job_params.release)) |
128 | #define shorter_period(a, b) (lt_before(\ | ||
129 | (a)->rt_param.task_params.period,\ | ||
130 | (b)->rt_param.task_params.period)) | ||
131 | void preempt_if_preemptable(struct task_struct* t, int on_cpu); | 128 | void preempt_if_preemptable(struct task_struct* t, int on_cpu); |
132 | 129 | ||
133 | #ifdef CONFIG_LITMUS_LOCKING | 130 | #ifdef CONFIG_LITMUS_LOCKING |
diff --git a/include/litmus/nvidia_info.h b/include/litmus/nvidia_info.h index dd41c4c72b85..856c575374d3 100644 --- a/include/litmus/nvidia_info.h +++ b/include/litmus/nvidia_info.h | |||
@@ -26,7 +26,7 @@ int init_nv_device_reg(void); | |||
26 | //int get_nv_device_id(struct task_struct* owner); | 26 | //int get_nv_device_id(struct task_struct* owner); |
27 | 27 | ||
28 | 28 | ||
29 | int reg_nv_device(int reg_device_id, int register_device); | 29 | int reg_nv_device(int reg_device_id, int register_device, struct task_struct *t); |
30 | 30 | ||
31 | struct task_struct* get_nv_max_device_owner(u32 target_device_id); | 31 | struct task_struct* get_nv_max_device_owner(u32 target_device_id); |
32 | //int is_nv_device_owner(u32 target_device_id); | 32 | //int is_nv_device_owner(u32 target_device_id); |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index ad46ab4c64cc..11f081527545 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -126,12 +126,14 @@ enum klitirqd_sem_status | |||
126 | 126 | ||
127 | typedef enum gpu_migration_dist | 127 | typedef enum gpu_migration_dist |
128 | { | 128 | { |
129 | // TODO: Make this variable against NR_NVIDIA_GPUS | ||
129 | MIG_LOCAL = 0, | 130 | MIG_LOCAL = 0, |
130 | MIG_NEAR = 1, | 131 | MIG_NEAR = 1, |
131 | MIG_MED = 2, | 132 | MIG_MED = 2, |
132 | MIG_FAR = 3, | 133 | MIG_FAR = 3, // 8 GPUs in a binary tree hierarchy |
134 | MIG_NONE = 4, | ||
133 | 135 | ||
134 | MIG_LAST = MIG_FAR | 136 | MIG_LAST = MIG_NONE |
135 | } gpu_migration_dist_t; | 137 | } gpu_migration_dist_t; |
136 | 138 | ||
137 | typedef struct feedback_est{ | 139 | typedef struct feedback_est{ |
@@ -190,7 +192,7 @@ struct rt_param { | |||
190 | 192 | ||
191 | gpu_migration_dist_t gpu_migration; | 193 | gpu_migration_dist_t gpu_migration; |
192 | int last_gpu; | 194 | int last_gpu; |
193 | feedback_est_t gpu_migration_est[MIG_LAST]; // local, near, med, far | 195 | feedback_est_t gpu_migration_est[MIG_LAST+1]; // local, near, med, far |
194 | 196 | ||
195 | lt_t accum_gpu_time; | 197 | lt_t accum_gpu_time; |
196 | lt_t gpu_time_stamp; | 198 | lt_t gpu_time_stamp; |
diff --git a/litmus/edf_common.c b/litmus/edf_common.c index 989757cdcc5c..250808e934a6 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c | |||
@@ -45,8 +45,9 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
45 | 45 | ||
46 | 46 | ||
47 | /* check for NULL tasks */ | 47 | /* check for NULL tasks */ |
48 | if (!first || !second) | 48 | if (!first || !second) { |
49 | return first && !second; | 49 | return first && !second; |
50 | } | ||
50 | 51 | ||
51 | #ifdef CONFIG_LITMUS_LOCKING | 52 | #ifdef CONFIG_LITMUS_LOCKING |
52 | /* Check for EFFECTIVE priorities. Change task | 53 | /* Check for EFFECTIVE priorities. Change task |
@@ -73,59 +74,68 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
73 | /* first_task is boosted, how about second_task? */ | 74 | /* first_task is boosted, how about second_task? */ |
74 | if (!is_priority_boosted(second_task) || | 75 | if (!is_priority_boosted(second_task) || |
75 | lt_before(get_boost_start(first_task), | 76 | lt_before(get_boost_start(first_task), |
76 | get_boost_start(second_task))) | 77 | get_boost_start(second_task))) { |
77 | return 1; | 78 | return 1; |
78 | else | 79 | } |
80 | else { | ||
79 | return 0; | 81 | return 0; |
80 | } else if (unlikely(is_priority_boosted(second_task))) | 82 | } |
83 | } | ||
84 | else if (unlikely(is_priority_boosted(second_task))) { | ||
81 | /* second_task is boosted, first is not*/ | 85 | /* second_task is boosted, first is not*/ |
82 | return 0; | 86 | return 0; |
87 | } | ||
83 | 88 | ||
84 | #endif | 89 | #endif |
85 | 90 | ||
86 | // // rate-monotonic for testing | 91 | // // rate-monotonic for testing |
87 | // return !is_realtime(second_task) || | 92 | // if (!is_realtime(second_task)) { |
88 | // | 93 | // return true; |
89 | // /* is the deadline of the first task earlier? | 94 | // } |
90 | // * Then it has higher priority. | 95 | // |
91 | // */ | 96 | // if (shorter_period(first_task, second_task)) { |
92 | // shorter_period(first_task, second_task) || | 97 | // return true; |
93 | // | 98 | // } |
94 | // /* Do we have a deadline tie? | 99 | // |
95 | // * Then break by PID. | 100 | // if (get_period(first_task) == get_period(second_task)) { |
96 | // */ | 101 | // if (first_task->pid < second_task->pid) { |
97 | // (get_period(first_task) == get_period(second_task) && | 102 | // return true; |
98 | // (first_task->pid < second_task->pid || | 103 | // } |
99 | // | 104 | // else if (first_task->pid == second_task->pid) { |
100 | // /* If the PIDs are the same then the task with the EFFECTIVE | 105 | // return !second->rt_param.inh_task; |
101 | // * priority wins. | 106 | // } |
102 | // */ | 107 | // } |
103 | // (first_task->pid == second_task->pid && | 108 | |
104 | // !second->rt_param.inh_task))); | 109 | if (!is_realtime(second_task)) { |
105 | |||
106 | if (!is_realtime(second_task)) | ||
107 | return true; | 110 | return true; |
111 | } | ||
108 | 112 | ||
109 | if (earlier_deadline(first_task, second_task)) | 113 | if (earlier_deadline(first_task, second_task)) { |
110 | return true; | 114 | return true; |
111 | 115 | } | |
112 | if (get_deadline(first_task) == get_deadline(second_task)) { | 116 | if (get_deadline(first_task) == get_deadline(second_task)) { |
117 | |||
113 | if (shorter_period(first_task, second_task)) { | 118 | if (shorter_period(first_task, second_task)) { |
114 | return true; | 119 | return true; |
115 | } | 120 | } |
116 | if (get_rt_period(first_task) == get_rt_period(second_task)) { | 121 | if (get_rt_period(first_task) == get_rt_period(second_task)) { |
117 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
118 | if (first_task->rt_param.is_proxy_thread < | ||
119 | second_task->rt_param.is_proxy_thread) { | ||
120 | return true; | ||
121 | } | ||
122 | #endif | ||
123 | if (first_task->pid < second_task->pid) { | 122 | if (first_task->pid < second_task->pid) { |
124 | return true; | 123 | return true; |
125 | } | 124 | } |
126 | if (first_task->pid == second_task->pid) { | 125 | if (first_task->pid == second_task->pid) { |
126 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
127 | if (first_task->rt_param.is_proxy_thread < | ||
128 | second_task->rt_param.is_proxy_thread) { | ||
129 | return true; | ||
130 | } | ||
131 | if(first_task->rt_param.is_proxy_thread == second_task->rt_param.is_proxy_thread) { | ||
132 | return !second->rt_param.inh_task; | ||
133 | } | ||
134 | #else | ||
127 | return !second->rt_param.inh_task; | 135 | return !second->rt_param.inh_task; |
128 | } | 136 | #endif |
137 | } | ||
138 | |||
129 | } | 139 | } |
130 | } | 140 | } |
131 | 141 | ||
diff --git a/litmus/fdso.c b/litmus/fdso.c index 0bacea179660..5a4f45c3251b 100644 --- a/litmus/fdso.c +++ b/litmus/fdso.c | |||
@@ -28,8 +28,9 @@ static const struct fdso_ops* fdso_ops[] = { | |||
28 | &generic_lock_ops, /* RSM_MUTEX */ | 28 | &generic_lock_ops, /* RSM_MUTEX */ |
29 | &generic_lock_ops, /* IKGLP_SEM */ | 29 | &generic_lock_ops, /* IKGLP_SEM */ |
30 | &generic_lock_ops, /* KFMLP_SEM */ | 30 | &generic_lock_ops, /* KFMLP_SEM */ |
31 | &generic_affinity_ops, /* IKGLP_AFF_OBS */ | 31 | &generic_affinity_ops, /* IKGLP_GPU_AFF_OBS */ |
32 | &generic_affinity_ops, /* KFMLP_AFF_OBS */ | 32 | &generic_affinity_ops, /* KFMLP_SIMPLE_GPU_AFF_OBS */ |
33 | &generic_affinity_ops, /* KFMLP_GPU_AFF_OBS */ | ||
33 | }; | 34 | }; |
34 | 35 | ||
35 | static int fdso_create(void** obj_ref, obj_type_t type, void* __user config) | 36 | static int fdso_create(void** obj_ref, obj_type_t type, void* __user config) |
diff --git a/litmus/gpu_affinity.c b/litmus/gpu_affinity.c index 43171390bed7..87349fe10a9b 100644 --- a/litmus/gpu_affinity.c +++ b/litmus/gpu_affinity.c | |||
@@ -5,15 +5,16 @@ | |||
5 | #include <litmus/litmus.h> | 5 | #include <litmus/litmus.h> |
6 | #include <litmus/gpu_affinity.h> | 6 | #include <litmus/gpu_affinity.h> |
7 | 7 | ||
8 | static void update_estimate(feedback_est_t* fb, fp_t* a, fp_t* b, lt_t observed) | 8 | #define OBSERVATION_CAP 2*1e9 |
9 | |||
10 | static void update_estimate(feedback_est_t* fb, fp_t a, fp_t b, lt_t observed) | ||
9 | { | 11 | { |
10 | fp_t err, new; | 12 | fp_t err, new; |
11 | fp_t actual = _frac(observed, 1); // observed is in ns, so beware of overflow! | 13 | fp_t actual = _integer_to_fp(observed); |
12 | 14 | ||
13 | err = _sub(actual, fb->est); | 15 | err = _sub(actual, fb->est); |
14 | new = _add(_mul(*a, err), | 16 | new = _add(_mul(a, err), _mul(b, fb->accum_err)); |
15 | _mul(*b, fb->accum_err)); | 17 | |
16 | |||
17 | fb->est = new; | 18 | fb->est = new; |
18 | fb->accum_err = _add(fb->accum_err, err); | 19 | fb->accum_err = _add(fb->accum_err, err); |
19 | } | 20 | } |
@@ -22,47 +23,69 @@ void update_gpu_estimate(struct task_struct *t, lt_t observed) | |||
22 | { | 23 | { |
23 | feedback_est_t *fb = &(tsk_rt(t)->gpu_migration_est[tsk_rt(t)->gpu_migration]); | 24 | feedback_est_t *fb = &(tsk_rt(t)->gpu_migration_est[tsk_rt(t)->gpu_migration]); |
24 | 25 | ||
25 | TRACE_TASK(t, "GPU est update before (dist = %d): %d.%d\n", | 26 | WARN_ON(tsk_rt(t)->gpu_migration > MIG_LAST); |
26 | tsk_rt(t)->gpu_migration, | ||
27 | _fp_to_integer(fb->est), | ||
28 | _point(fb->est)); | ||
29 | |||
30 | update_estimate(fb, | ||
31 | &tsk_rt(t)->gpu_fb_param_a, | ||
32 | &tsk_rt(t)->gpu_fb_param_b, | ||
33 | observed); | ||
34 | 27 | ||
35 | TRACE_TASK(t, "GPU est update after (dist = %d): %d.%d\n", | 28 | if(unlikely(fb->est.val == 0)) { |
29 | // kludge-- cap observed values to prevent whacky estimations. | ||
30 | // whacky stuff happens during the first few jobs. | ||
31 | if(unlikely(observed > OBSERVATION_CAP)) { | ||
32 | TRACE_TASK(t, "Crazy observation was capped: %llu -> %llu\n", | ||
33 | observed, OBSERVATION_CAP); | ||
34 | observed = OBSERVATION_CAP; | ||
35 | } | ||
36 | |||
37 | // take the first observation as our estimate | ||
38 | // (initial value of 0 was bogus anyhow) | ||
39 | fb->est = _integer_to_fp(observed); | ||
40 | fb->accum_err = _div(fb->est, _integer_to_fp(2)); // ...seems to work. | ||
41 | } | ||
42 | else { | ||
43 | update_estimate(fb, | ||
44 | tsk_rt(t)->gpu_fb_param_a, | ||
45 | tsk_rt(t)->gpu_fb_param_b, | ||
46 | observed); | ||
47 | |||
48 | if(_fp_to_integer(fb->est) <= 0) { | ||
49 | // TODO: talk to Jonathan about how well this works. | ||
50 | // Maybe we should average the observed and est instead? | ||
51 | TRACE_TASK(t, "Invalid estimate. Patching.\n"); | ||
52 | fb->est = _integer_to_fp(observed); | ||
53 | fb->accum_err = _div(fb->est, _integer_to_fp(2)); // ...seems to work. | ||
54 | } | ||
55 | } | ||
56 | |||
57 | TRACE_TASK(t, "GPU est update after (dist = %d, obs = %llu): %d.%d\n", | ||
36 | tsk_rt(t)->gpu_migration, | 58 | tsk_rt(t)->gpu_migration, |
59 | observed, | ||
37 | _fp_to_integer(fb->est), | 60 | _fp_to_integer(fb->est), |
38 | _point(fb->est)); | 61 | _point(fb->est)); |
39 | } | 62 | } |
40 | 63 | ||
41 | gpu_migration_dist_t gpu_migration_distance(int a, int b) | 64 | gpu_migration_dist_t gpu_migration_distance(int a, int b) |
42 | { | 65 | { |
43 | // GPUs organized in a binary hierarchy, no more than 2^MIG_LAST GPUs | 66 | // GPUs organized in a binary hierarchy, no more than 2^MIG_FAR GPUs |
44 | int i; | 67 | int i; |
45 | int level; | 68 | int dist; |
46 | int max_level; | ||
47 | 69 | ||
48 | if(unlikely(a < 0 || b < 0)) { | 70 | if(likely(a >= 0 && b >= 0)) { |
49 | return MIG_LAST; | 71 | for(i = 0; i <= MIG_FAR; ++i) { |
72 | if(a>>i == b>>i) { | ||
73 | dist = i; | ||
74 | goto out; | ||
75 | } | ||
76 | } | ||
77 | dist = MIG_NONE; // hopefully never reached. | ||
78 | TRACE_CUR("WARNING: GPU distance too far! %d -> %d\n", a, b); | ||
50 | } | 79 | } |
51 | 80 | else { | |
52 | if(a == b) { | 81 | dist = MIG_NONE; |
53 | return MIG_LOCAL; | ||
54 | } | 82 | } |
55 | 83 | ||
56 | for(i = 1, level = 2, max_level = 1<<MIG_LAST; | 84 | out: |
57 | level <= max_level; | 85 | TRACE_CUR("Distance %d -> %d is %d\n", |
58 | ++i, level <<= 1) { | 86 | a, b, dist); |
59 | if(a/level == b/level) { | ||
60 | return (gpu_migration_dist_t)(i); | ||
61 | } | ||
62 | } | ||
63 | 87 | ||
64 | WARN_ON(1); | 88 | return dist; |
65 | return MIG_LAST; | ||
66 | } | 89 | } |
67 | 90 | ||
68 | 91 | ||
diff --git a/litmus/kexclu_affinity.c b/litmus/kexclu_affinity.c index a06df3e1acbd..552179bf797d 100644 --- a/litmus/kexclu_affinity.c +++ b/litmus/kexclu_affinity.c | |||
@@ -39,7 +39,7 @@ static int create_generic_aff_obs(void** obj_ref, obj_type_t type, void* __user | |||
39 | err = litmus->allocate_aff_obs(&aff_obs, type, arg); | 39 | err = litmus->allocate_aff_obs(&aff_obs, type, arg); |
40 | if (err == 0) { | 40 | if (err == 0) { |
41 | BUG_ON(!aff_obs->lock); | 41 | BUG_ON(!aff_obs->lock); |
42 | aff_obs->ident = atomic_inc_return(&aff_obs_id_gen); | 42 | aff_obs->type = type; |
43 | *obj_ref = aff_obs; | 43 | *obj_ref = aff_obs; |
44 | } | 44 | } |
45 | return err; | 45 | return err; |
@@ -82,3 +82,11 @@ struct litmus_lock* get_lock_from_od(int od) | |||
82 | return NULL; | 82 | return NULL; |
83 | } | 83 | } |
84 | 84 | ||
85 | void affinity_observer_new(struct affinity_observer* aff, | ||
86 | struct affinity_observer_ops* ops, | ||
87 | struct affinity_observer_args* args) | ||
88 | { | ||
89 | aff->ops = ops; | ||
90 | aff->lock = get_lock_from_od(args->lock_od); | ||
91 | aff->ident = atomic_inc_return(&aff_obs_id_gen); | ||
92 | } \ No newline at end of file | ||
diff --git a/litmus/kfmlp_lock.c b/litmus/kfmlp_lock.c index b30e5b589882..7cdca1b7b50a 100644 --- a/litmus/kfmlp_lock.c +++ b/litmus/kfmlp_lock.c | |||
@@ -99,12 +99,19 @@ static struct task_struct* kfmlp_select_hp_steal(struct kfmlp_semaphore* sem, wa | |||
99 | if(*to_steal_from) | 99 | if(*to_steal_from) |
100 | { | 100 | { |
101 | struct list_head *pos; | 101 | struct list_head *pos; |
102 | struct task_struct *target = (*to_steal_from)->hp_waiter; | ||
103 | |||
104 | TRACE_CUR("want to steal hp_waiter (%s/%d) from queue %d\n", | ||
105 | target->comm, | ||
106 | target->pid, | ||
107 | kfmlp_get_idx(sem, *to_steal_from)); | ||
108 | |||
102 | list_for_each(pos, &(*to_steal_from)->wait.task_list) | 109 | list_for_each(pos, &(*to_steal_from)->wait.task_list) |
103 | { | 110 | { |
104 | wait_queue_t *node = list_entry(pos, wait_queue_t, task_list); | 111 | wait_queue_t *node = list_entry(pos, wait_queue_t, task_list); |
105 | struct task_struct *queued = (struct task_struct*) node->private; | 112 | struct task_struct *queued = (struct task_struct*) node->private; |
106 | /* Compare task prios, find high prio task. */ | 113 | /* Compare task prios, find high prio task. */ |
107 | if (queued == (*to_steal_from)->hp_waiter) | 114 | if (queued == target) |
108 | { | 115 | { |
109 | *to_steal = node; | 116 | *to_steal = node; |
110 | 117 | ||
@@ -115,6 +122,11 @@ static struct task_struct* kfmlp_select_hp_steal(struct kfmlp_semaphore* sem, wa | |||
115 | return queued; | 122 | return queued; |
116 | } | 123 | } |
117 | } | 124 | } |
125 | |||
126 | TRACE_CUR("Could not find %s/%d in queue %d!!! THIS IS A BUG!\n", | ||
127 | target->comm, | ||
128 | target->pid, | ||
129 | kfmlp_get_idx(sem, *to_steal_from)); | ||
118 | } | 130 | } |
119 | 131 | ||
120 | return NULL; | 132 | return NULL; |
@@ -133,6 +145,11 @@ static void kfmlp_steal_node(struct kfmlp_semaphore *sem, | |||
133 | if(t == src->hp_waiter) { | 145 | if(t == src->hp_waiter) { |
134 | src->hp_waiter = kfmlp_find_hp_waiter(src, NULL); | 146 | src->hp_waiter = kfmlp_find_hp_waiter(src, NULL); |
135 | 147 | ||
148 | TRACE_CUR("queue %d: %s/%d is new hp_waiter\n", | ||
149 | kfmlp_get_idx(sem, src), | ||
150 | (src->hp_waiter) ? src->hp_waiter->comm : "nil", | ||
151 | (src->hp_waiter) ? src->hp_waiter->pid : -1); | ||
152 | |||
136 | if(src->owner && tsk_rt(src->owner)->inh_task == t) { | 153 | if(src->owner && tsk_rt(src->owner)->inh_task == t) { |
137 | litmus->decrease_prio(src->owner, src->hp_waiter); | 154 | litmus->decrease_prio(src->owner, src->hp_waiter); |
138 | } | 155 | } |
@@ -140,6 +157,7 @@ static void kfmlp_steal_node(struct kfmlp_semaphore *sem, | |||
140 | 157 | ||
141 | if(sem->shortest_queue->count > src->count) { | 158 | if(sem->shortest_queue->count > src->count) { |
142 | sem->shortest_queue = src; | 159 | sem->shortest_queue = src; |
160 | TRACE_CUR("queue %d is the shortest\n", kfmlp_get_idx(sem, sem->shortest_queue)); | ||
143 | } | 161 | } |
144 | 162 | ||
145 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | 163 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING |
@@ -155,6 +173,10 @@ static void kfmlp_steal_node(struct kfmlp_semaphore *sem, | |||
155 | if(litmus->compare(t, dst->hp_waiter)) { | 173 | if(litmus->compare(t, dst->hp_waiter)) { |
156 | dst->hp_waiter = t; | 174 | dst->hp_waiter = t; |
157 | 175 | ||
176 | TRACE_CUR("queue %d: %s/%d is new hp_waiter\n", | ||
177 | kfmlp_get_idx(sem, dst), | ||
178 | t->comm, t->pid); | ||
179 | |||
158 | if(dst->owner && litmus->compare(t, dst->owner)) | 180 | if(dst->owner && litmus->compare(t, dst->owner)) |
159 | { | 181 | { |
160 | litmus->increase_prio(dst->owner, t); | 182 | litmus->increase_prio(dst->owner, t); |
@@ -264,8 +286,9 @@ int kfmlp_lock(struct litmus_lock* l) | |||
264 | 286 | ||
265 | if (my_queue->owner) { | 287 | if (my_queue->owner) { |
266 | /* resource is not free => must suspend and wait */ | 288 | /* resource is not free => must suspend and wait */ |
267 | TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n", | 289 | TRACE_CUR("queue %d: Resource is not free => must suspend and wait. (queue size = %d)\n", |
268 | kfmlp_get_idx(sem, my_queue)); | 290 | kfmlp_get_idx(sem, my_queue), |
291 | my_queue->count); | ||
269 | 292 | ||
270 | init_waitqueue_entry(&wait, t); | 293 | init_waitqueue_entry(&wait, t); |
271 | 294 | ||
@@ -274,29 +297,37 @@ int kfmlp_lock(struct litmus_lock* l) | |||
274 | 297 | ||
275 | __add_wait_queue_tail_exclusive(&my_queue->wait, &wait); | 298 | __add_wait_queue_tail_exclusive(&my_queue->wait, &wait); |
276 | 299 | ||
300 | TRACE_CUR("queue %d: hp_waiter is currently %s/%d\n", | ||
301 | kfmlp_get_idx(sem, my_queue), | ||
302 | (my_queue->hp_waiter) ? my_queue->hp_waiter->comm : "nil", | ||
303 | (my_queue->hp_waiter) ? my_queue->hp_waiter->pid : -1); | ||
304 | |||
277 | /* check if we need to activate priority inheritance */ | 305 | /* check if we need to activate priority inheritance */ |
278 | //if (edf_higher_prio(t, my_queue->hp_waiter)) | 306 | //if (edf_higher_prio(t, my_queue->hp_waiter)) |
279 | if (litmus->compare(t, my_queue->hp_waiter)) | 307 | if (litmus->compare(t, my_queue->hp_waiter)) { |
280 | { | ||
281 | my_queue->hp_waiter = t; | 308 | my_queue->hp_waiter = t; |
309 | TRACE_CUR("queue %d: %s/%d is new hp_waiter\n", | ||
310 | kfmlp_get_idx(sem, my_queue), | ||
311 | t->comm, t->pid); | ||
312 | |||
282 | //if (edf_higher_prio(t, my_queue->owner)) | 313 | //if (edf_higher_prio(t, my_queue->owner)) |
283 | if (litmus->compare(t, my_queue->owner)) | 314 | if (litmus->compare(t, my_queue->owner)) { |
284 | { | ||
285 | litmus->increase_prio(my_queue->owner, my_queue->hp_waiter); | 315 | litmus->increase_prio(my_queue->owner, my_queue->hp_waiter); |
286 | } | 316 | } |
287 | } | 317 | } |
288 | 318 | ||
289 | ++(my_queue->count); | 319 | ++(my_queue->count); |
290 | 320 | ||
291 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
292 | if(my_queue == sem->shortest_queue) { | 321 | if(my_queue == sem->shortest_queue) { |
293 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | 322 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); |
323 | TRACE_CUR("queue %d is the shortest\n", | ||
324 | kfmlp_get_idx(sem, sem->shortest_queue)); | ||
294 | } | 325 | } |
326 | |||
327 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
295 | if(sem->aff_obs) { | 328 | if(sem->aff_obs) { |
296 | sem->aff_obs->ops->notify_enqueue(sem->aff_obs, my_queue, t); | 329 | sem->aff_obs->ops->notify_enqueue(sem->aff_obs, my_queue, t); |
297 | } | 330 | } |
298 | #else | ||
299 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
300 | #endif | 331 | #endif |
301 | 332 | ||
302 | /* release lock before sleeping */ | 333 | /* release lock before sleeping */ |
@@ -309,13 +340,11 @@ int kfmlp_lock(struct litmus_lock* l) | |||
309 | schedule(); | 340 | schedule(); |
310 | 341 | ||
311 | 342 | ||
312 | if(my_queue->owner == t) | 343 | if(my_queue->owner == t) { |
313 | { | ||
314 | TRACE_CUR("queue %d: acquired through waiting\n", | 344 | TRACE_CUR("queue %d: acquired through waiting\n", |
315 | kfmlp_get_idx(sem, my_queue)); | 345 | kfmlp_get_idx(sem, my_queue)); |
316 | } | 346 | } |
317 | else | 347 | else { |
318 | { | ||
319 | /* this case may happen if our wait entry was stolen | 348 | /* this case may happen if our wait entry was stolen |
320 | between queues. record where we went. */ | 349 | between queues. record where we went. */ |
321 | my_queue = kfmlp_get_queue(sem, t); | 350 | my_queue = kfmlp_get_queue(sem, t); |
@@ -325,8 +354,7 @@ int kfmlp_lock(struct litmus_lock* l) | |||
325 | kfmlp_get_idx(sem, my_queue)); | 354 | kfmlp_get_idx(sem, my_queue)); |
326 | } | 355 | } |
327 | } | 356 | } |
328 | else | 357 | else { |
329 | { | ||
330 | TRACE_CUR("queue %d: acquired immediately\n", | 358 | TRACE_CUR("queue %d: acquired immediately\n", |
331 | kfmlp_get_idx(sem, my_queue)); | 359 | kfmlp_get_idx(sem, my_queue)); |
332 | 360 | ||
@@ -334,16 +362,17 @@ int kfmlp_lock(struct litmus_lock* l) | |||
334 | 362 | ||
335 | ++(my_queue->count); | 363 | ++(my_queue->count); |
336 | 364 | ||
337 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
338 | if(my_queue == sem->shortest_queue) { | 365 | if(my_queue == sem->shortest_queue) { |
339 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | 366 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); |
367 | TRACE_CUR("queue %d is the shortest\n", | ||
368 | kfmlp_get_idx(sem, sem->shortest_queue)); | ||
340 | } | 369 | } |
370 | |||
371 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | ||
341 | if(sem->aff_obs) { | 372 | if(sem->aff_obs) { |
342 | sem->aff_obs->ops->notify_enqueue(sem->aff_obs, my_queue, t); | 373 | sem->aff_obs->ops->notify_enqueue(sem->aff_obs, my_queue, t); |
343 | sem->aff_obs->ops->notify_acquired(sem->aff_obs, my_queue, t); | 374 | sem->aff_obs->ops->notify_acquired(sem->aff_obs, my_queue, t); |
344 | } | 375 | } |
345 | #else | ||
346 | sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); | ||
347 | #endif | 376 | #endif |
348 | 377 | ||
349 | spin_unlock_irqrestore(&sem->lock, flags); | 378 | spin_unlock_irqrestore(&sem->lock, flags); |
@@ -370,12 +399,16 @@ int kfmlp_unlock(struct litmus_lock* l) | |||
370 | goto out; | 399 | goto out; |
371 | } | 400 | } |
372 | 401 | ||
402 | TRACE_CUR("queue %d: unlocking\n", kfmlp_get_idx(sem, my_queue)); | ||
403 | |||
373 | my_queue->owner = NULL; // clear ownership | 404 | my_queue->owner = NULL; // clear ownership |
374 | --(my_queue->count); | 405 | --(my_queue->count); |
375 | 406 | ||
376 | if(my_queue->count < sem->shortest_queue->count) | 407 | if(my_queue->count < sem->shortest_queue->count) |
377 | { | 408 | { |
378 | sem->shortest_queue = my_queue; | 409 | sem->shortest_queue = my_queue; |
410 | TRACE_CUR("queue %d is the shortest\n", | ||
411 | kfmlp_get_idx(sem, sem->shortest_queue)); | ||
379 | } | 412 | } |
380 | 413 | ||
381 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | 414 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING |
@@ -424,13 +457,13 @@ RETRY: | |||
424 | /* wake up next */ | 457 | /* wake up next */ |
425 | wake_up_process(next); | 458 | wake_up_process(next); |
426 | } | 459 | } |
427 | else | 460 | else { |
428 | { | ||
429 | // TODO: put this stealing logic before we attempt to release | 461 | // TODO: put this stealing logic before we attempt to release |
430 | // our resource. (simplifies code and gets rid of ugly goto RETRY. | 462 | // our resource. (simplifies code and gets rid of ugly goto RETRY. |
431 | wait_queue_t *wait; | 463 | wait_queue_t *wait; |
432 | 464 | ||
433 | TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue)); | 465 | TRACE_CUR("queue %d: looking to steal someone...\n", |
466 | kfmlp_get_idx(sem, my_queue)); | ||
434 | 467 | ||
435 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | 468 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING |
436 | next = (sem->aff_obs) ? | 469 | next = (sem->aff_obs) ? |
@@ -441,16 +474,18 @@ RETRY: | |||
441 | #endif | 474 | #endif |
442 | 475 | ||
443 | if(next) { | 476 | if(next) { |
444 | kfmlp_steal_node(sem, my_queue, wait, to_steal_from); | 477 | TRACE_CUR("queue %d: stealing %s/%d from queue %d\n", |
445 | 478 | kfmlp_get_idx(sem, my_queue), | |
446 | TRACE_CUR("queued %d: stole %s/%d from queue %d\n", | ||
447 | next->comm, next->pid, | 479 | next->comm, next->pid, |
448 | kfmlp_get_idx(sem, to_steal_from)); | 480 | kfmlp_get_idx(sem, to_steal_from)); |
481 | |||
482 | kfmlp_steal_node(sem, my_queue, wait, to_steal_from); | ||
449 | 483 | ||
450 | goto RETRY; // will succeed this time. | 484 | goto RETRY; // will succeed this time. |
451 | } | 485 | } |
452 | else { | 486 | else { |
453 | TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue)); | 487 | TRACE_CUR("queue %d: no one to steal.\n", |
488 | kfmlp_get_idx(sem, my_queue)); | ||
454 | } | 489 | } |
455 | } | 490 | } |
456 | 491 | ||
@@ -597,7 +632,8 @@ static struct affinity_observer* kfmlp_aff_obs_new(struct affinity_observer_ops* | |||
597 | return(NULL); | 632 | return(NULL); |
598 | } | 633 | } |
599 | 634 | ||
600 | kfmlp_aff->obs.ops = ops; | 635 | affinity_observer_new(&kfmlp_aff->obs, ops, &aff_args.obs); |
636 | |||
601 | kfmlp_aff->ops = kfmlp_ops; | 637 | kfmlp_aff->ops = kfmlp_ops; |
602 | kfmlp_aff->offset = aff_args.replica_to_gpu_offset; | 638 | kfmlp_aff->offset = aff_args.replica_to_gpu_offset; |
603 | 639 | ||
@@ -645,34 +681,50 @@ struct kfmlp_queue* gpu_kfmlp_advise_enqueue(struct kfmlp_affinity* aff, struct | |||
645 | 681 | ||
646 | // simply pick the shortest queue if, we have no affinity, or we have | 682 | // simply pick the shortest queue if, we have no affinity, or we have |
647 | // affinity with the shortest | 683 | // affinity with the shortest |
648 | if((tsk_rt(t)->last_gpu < 0) || | 684 | if(unlikely(tsk_rt(t)->last_gpu < 0)) { |
649 | ((kfmlp_get_idx(sem, aff->shortest_queue->q) + aff->offset) == tsk_rt(t)->last_gpu)) { | ||
650 | // we have affinity with the shorest queue. pick it. | 685 | // we have affinity with the shorest queue. pick it. |
651 | to_enqueue = aff->shortest_queue->q; | 686 | shortest = aff->shortest_queue; |
652 | 687 | TRACE_CUR("special case: no affinity\n"); | |
653 | TRACE_CUR("special case: no affinity or have affinity with shortest\n"); | ||
654 | |||
655 | goto out; | 688 | goto out; |
656 | } | 689 | } |
657 | 690 | ||
658 | // enqueue where we will have the shortest time to completion | 691 | // all things being equal, let's start with the queue with which we have |
692 | // affinity. this helps us maintain affinity even when we don't have | ||
693 | // an estiamte for local-affinity execution time (i.e., 2nd time on GPU) | ||
694 | shortest = &aff->q_info[tsk_rt(t)->last_gpu - aff->offset]; | ||
695 | |||
696 | if(shortest == aff->shortest_queue) { | ||
697 | TRACE_CUR("special case: have affinity with shortest queue\n"); | ||
698 | goto out; | ||
699 | } | ||
659 | 700 | ||
660 | shortest = &aff->q_info[0]; | 701 | min_len = shortest->estimated_len + get_gpu_estimate(t, MIG_LOCAL); |
661 | min_len = shortest->estimated_len + get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, 0 + aff->offset)); | ||
662 | 702 | ||
663 | for(i = 1; i < sem->num_resources; ++i) { | 703 | TRACE_CUR("cs is %llu on queue %d: est len = %llu\n", |
664 | lt_t est_len = | 704 | get_gpu_estimate(t, MIG_LOCAL), |
665 | aff->q_info[i].estimated_len + | 705 | kfmlp_get_idx(sem, shortest->q), |
666 | get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, i + aff->offset)); | 706 | min_len); |
667 | 707 | ||
668 | if(est_len < min_len) { | 708 | for(i = 0; i < sem->num_resources; ++i) { |
669 | shortest = &aff->q_info[i]; | 709 | if(&aff->q_info[i] != shortest) { |
670 | min_len = est_len; | 710 | |
711 | lt_t est_len = | ||
712 | aff->q_info[i].estimated_len + | ||
713 | get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, i + aff->offset)); | ||
714 | if(est_len < min_len) { | ||
715 | shortest = &aff->q_info[i]; | ||
716 | min_len = est_len; | ||
717 | } | ||
718 | |||
719 | TRACE_CUR("cs is %llu on queue %d: est len = %llu\n", | ||
720 | get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, i + aff->offset)), | ||
721 | kfmlp_get_idx(sem, aff->q_info[i].q), | ||
722 | est_len); | ||
671 | } | 723 | } |
672 | } | 724 | } |
673 | to_enqueue = shortest->q; | ||
674 | 725 | ||
675 | out: | 726 | out: |
727 | to_enqueue = shortest->q; | ||
676 | TRACE_CUR("enqueue on fq %d (non-aff wanted fq %d)\n", | 728 | TRACE_CUR("enqueue on fq %d (non-aff wanted fq %d)\n", |
677 | kfmlp_get_idx(sem, to_enqueue), | 729 | kfmlp_get_idx(sem, to_enqueue), |
678 | kfmlp_get_idx(sem, sem->shortest_queue)); | 730 | kfmlp_get_idx(sem, sem->shortest_queue)); |
@@ -698,17 +750,20 @@ void gpu_kfmlp_notify_enqueue(struct kfmlp_affinity* aff, struct kfmlp_queue* fq | |||
698 | int gpu = aff->offset + replica; | 750 | int gpu = aff->offset + replica; |
699 | struct kfmlp_queue_info *info = &aff->q_info[replica]; | 751 | struct kfmlp_queue_info *info = &aff->q_info[replica]; |
700 | lt_t est_time; | 752 | lt_t est_time; |
753 | lt_t est_len_before; | ||
701 | 754 | ||
702 | if(current == t) { | 755 | if(current == t) { |
703 | tsk_rt(t)->suspend_gpu_tracker_on_block = 1; | 756 | tsk_rt(t)->suspend_gpu_tracker_on_block = 1; |
704 | } | 757 | } |
705 | 758 | ||
759 | est_len_before = info->estimated_len; | ||
706 | est_time = get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, gpu)); | 760 | est_time = get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, gpu)); |
707 | info->estimated_len += est_time; | 761 | info->estimated_len += est_time; |
708 | 762 | ||
709 | TRACE_CUR("fq %d est len is now %llu\n", | 763 | TRACE_CUR("fq %d: q_len (%llu) + est_cs (%llu) = %llu\n", |
710 | kfmlp_get_idx(sem, aff->shortest_queue->q), | 764 | kfmlp_get_idx(sem, info->q), |
711 | aff->shortest_queue->estimated_len); | 765 | est_len_before, est_time, |
766 | info->estimated_len); | ||
712 | 767 | ||
713 | if(aff->shortest_queue == info) { | 768 | if(aff->shortest_queue == info) { |
714 | // we may no longer be the shortest | 769 | // we may no longer be the shortest |
@@ -764,7 +819,7 @@ void gpu_kfmlp_notify_acquired(struct kfmlp_affinity* aff, struct kfmlp_queue* f | |||
764 | TRACE_CUR("%s/%d acquired gpu %d. migration type = %d\n", | 819 | TRACE_CUR("%s/%d acquired gpu %d. migration type = %d\n", |
765 | t->comm, t->pid, gpu, tsk_rt(t)->gpu_migration); | 820 | t->comm, t->pid, gpu, tsk_rt(t)->gpu_migration); |
766 | 821 | ||
767 | reg_nv_device(gpu, 1); // register | 822 | reg_nv_device(gpu, 1, t); // register |
768 | 823 | ||
769 | tsk_rt(t)->suspend_gpu_tracker_on_block = 0; | 824 | tsk_rt(t)->suspend_gpu_tracker_on_block = 0; |
770 | reset_gpu_tracker(t); | 825 | reset_gpu_tracker(t); |
@@ -782,7 +837,7 @@ void gpu_kfmlp_notify_freed(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, | |||
782 | est_time = get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, gpu)); | 837 | est_time = get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, gpu)); |
783 | 838 | ||
784 | tsk_rt(t)->last_gpu = gpu; | 839 | tsk_rt(t)->last_gpu = gpu; |
785 | reg_nv_device(gpu, 0); // unregister | 840 | reg_nv_device(gpu, 0, t); // unregister |
786 | 841 | ||
787 | // update estimates | 842 | // update estimates |
788 | update_gpu_estimate(t, get_gpu_time(t)); | 843 | update_gpu_estimate(t, get_gpu_time(t)); |
@@ -822,21 +877,25 @@ struct affinity_observer* kfmlp_gpu_aff_obs_new(struct affinity_observer_ops* op | |||
822 | struct kfmlp_queue* simple_gpu_kfmlp_advise_enqueue(struct kfmlp_affinity* aff, struct task_struct* t) | 877 | struct kfmlp_queue* simple_gpu_kfmlp_advise_enqueue(struct kfmlp_affinity* aff, struct task_struct* t) |
823 | { | 878 | { |
824 | struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock); | 879 | struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock); |
880 | // TRACE_CUR("Simple GPU KFMLP advise_enqueue invoked\n"); | ||
825 | return sem->shortest_queue; | 881 | return sem->shortest_queue; |
826 | } | 882 | } |
827 | 883 | ||
828 | struct task_struct* simple_gpu_kfmlp_advise_steal(struct kfmlp_affinity* aff, wait_queue_t** to_steal, struct kfmlp_queue** to_steal_from) | 884 | struct task_struct* simple_gpu_kfmlp_advise_steal(struct kfmlp_affinity* aff, wait_queue_t** to_steal, struct kfmlp_queue** to_steal_from) |
829 | { | 885 | { |
830 | struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock); | 886 | struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock); |
887 | // TRACE_CUR("Simple GPU KFMLP advise_steal invoked\n"); | ||
831 | return kfmlp_select_hp_steal(sem, to_steal, to_steal_from); | 888 | return kfmlp_select_hp_steal(sem, to_steal, to_steal_from); |
832 | } | 889 | } |
833 | 890 | ||
834 | void simple_gpu_kfmlp_notify_enqueue(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t) | 891 | void simple_gpu_kfmlp_notify_enqueue(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t) |
835 | { | 892 | { |
893 | // TRACE_CUR("Simple GPU KFMLP notify_enqueue invoked\n"); | ||
836 | } | 894 | } |
837 | 895 | ||
838 | void simple_gpu_kfmlp_notify_dequeue(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t) | 896 | void simple_gpu_kfmlp_notify_dequeue(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t) |
839 | { | 897 | { |
898 | // TRACE_CUR("Simple GPU KFMLP notify_dequeue invoked\n"); | ||
840 | } | 899 | } |
841 | 900 | ||
842 | void simple_gpu_kfmlp_notify_acquired(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t) | 901 | void simple_gpu_kfmlp_notify_acquired(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t) |
@@ -844,7 +903,9 @@ void simple_gpu_kfmlp_notify_acquired(struct kfmlp_affinity* aff, struct kfmlp_q | |||
844 | struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock); | 903 | struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock); |
845 | int gpu = kfmlp_get_idx(sem, fq) + aff->offset; | 904 | int gpu = kfmlp_get_idx(sem, fq) + aff->offset; |
846 | 905 | ||
847 | reg_nv_device(gpu, 1); // register | 906 | // TRACE_CUR("Simple GPU KFMLP notify_acquired invoked\n"); |
907 | |||
908 | reg_nv_device(gpu, 1, t); // register | ||
848 | } | 909 | } |
849 | 910 | ||
850 | void simple_gpu_kfmlp_notify_freed(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t) | 911 | void simple_gpu_kfmlp_notify_freed(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t) |
@@ -852,7 +913,9 @@ void simple_gpu_kfmlp_notify_freed(struct kfmlp_affinity* aff, struct kfmlp_queu | |||
852 | struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock); | 913 | struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock); |
853 | int gpu = kfmlp_get_idx(sem, fq) + aff->offset; | 914 | int gpu = kfmlp_get_idx(sem, fq) + aff->offset; |
854 | 915 | ||
855 | reg_nv_device(gpu, 0); // unregister | 916 | // TRACE_CUR("Simple GPU KFMLP notify_freed invoked\n"); |
917 | |||
918 | reg_nv_device(gpu, 0, t); // unregister | ||
856 | } | 919 | } |
857 | 920 | ||
858 | struct kfmlp_affinity_ops simple_gpu_kfmlp_affinity = | 921 | struct kfmlp_affinity_ops simple_gpu_kfmlp_affinity = |
diff --git a/litmus/litmus.c b/litmus/litmus.c index dd8b72e1af08..b876e67b7a9b 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -67,7 +67,7 @@ void release_heap_free(struct release_heap* rh); | |||
67 | asmlinkage long sys_register_nv_device(int nv_device_id, int reg_action) | 67 | asmlinkage long sys_register_nv_device(int nv_device_id, int reg_action) |
68 | { | 68 | { |
69 | /* register the device to caller (aka 'current') */ | 69 | /* register the device to caller (aka 'current') */ |
70 | return(reg_nv_device(nv_device_id, reg_action)); | 70 | return(reg_nv_device(nv_device_id, reg_action, current)); |
71 | } | 71 | } |
72 | #else | 72 | #else |
73 | asmlinkage long sys_register_nv_device(int nv_device_id, int reg_action) | 73 | asmlinkage long sys_register_nv_device(int nv_device_id, int reg_action) |
@@ -161,22 +161,6 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param) | |||
161 | 161 | ||
162 | target->rt_param.task_params = tp; | 162 | target->rt_param.task_params = tp; |
163 | 163 | ||
164 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
165 | /* proxy thread off by default */ | ||
166 | target->rt_param.is_proxy_thread = 0; | ||
167 | target->rt_param.cur_klitirqd = NULL; | ||
168 | //init_MUTEX(&target->rt_param.klitirqd_sem); | ||
169 | mutex_init(&target->rt_param.klitirqd_sem); | ||
170 | //init_completion(&target->rt_param.klitirqd_sem); | ||
171 | //target->rt_param.klitirqd_sem_stat = NOT_HELD; | ||
172 | atomic_set(&target->rt_param.klitirqd_sem_stat, NOT_HELD); | ||
173 | #endif | ||
174 | |||
175 | #ifdef CONFIG_LITMUS_NVIDIA | ||
176 | atomic_set(&target->rt_param.nv_int_count, 0); | ||
177 | #endif | ||
178 | |||
179 | |||
180 | retval = 0; | 164 | retval = 0; |
181 | out_unlock: | 165 | out_unlock: |
182 | read_unlock_irq(&tasklist_lock); | 166 | read_unlock_irq(&tasklist_lock); |
@@ -329,6 +313,22 @@ asmlinkage long sys_null_call(cycles_t __user *ts) | |||
329 | return ret; | 313 | return ret; |
330 | } | 314 | } |
331 | 315 | ||
316 | |||
317 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) | ||
318 | void init_gpu_affinity_state(struct task_struct* p) | ||
319 | { | ||
320 | // under-damped | ||
321 | p->rt_param.gpu_fb_param_a = _frac(14008, 10000); | ||
322 | p->rt_param.gpu_fb_param_b = _frac(16024, 10000); | ||
323 | // critically-damped | ||
324 | // p->rt_param.gpu_fb_param_a = _frac(102, 1000); | ||
325 | // p->rt_param.gpu_fb_param_b = _frac(303, 1000); | ||
326 | |||
327 | p->rt_param.gpu_migration = MIG_NONE; | ||
328 | p->rt_param.last_gpu = -1; | ||
329 | } | ||
330 | #endif | ||
331 | |||
332 | /* p is a real-time task. Re-init its state as a best-effort task. */ | 332 | /* p is a real-time task. Re-init its state as a best-effort task. */ |
333 | static void reinit_litmus_state(struct task_struct* p, int restore) | 333 | static void reinit_litmus_state(struct task_struct* p, int restore) |
334 | { | 334 | { |
@@ -388,10 +388,7 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
388 | } | 388 | } |
389 | 389 | ||
390 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) | 390 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) |
391 | p->rt_param.gpu_fb_param_a = _frac(14008, 1000); | 391 | init_gpu_affinity_state(p); |
392 | p->rt_param.gpu_fb_param_b = _frac(16024, 1000); | ||
393 | p->rt_param.gpu_migration = MIG_LAST; | ||
394 | p->rt_param.last_gpu = -1; | ||
395 | #endif | 392 | #endif |
396 | 393 | ||
397 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 394 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
@@ -443,10 +440,25 @@ long litmus_admit_task(struct task_struct* tsk) | |||
443 | } else { | 440 | } else { |
444 | bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); | 441 | bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); |
445 | } | 442 | } |
446 | 443 | ||
444 | |||
445 | #ifdef CONFIG_LITMUS_NVIDIA | ||
446 | atomic_set(&tsk_rt(tsk)->nv_int_count, 0); | ||
447 | #endif | ||
448 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) | ||
449 | init_gpu_affinity_state(tsk); | ||
450 | #endif | ||
447 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 451 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
448 | tsk_rt(tsk)->blocked_lock = NULL; | 452 | tsk_rt(tsk)->blocked_lock = NULL; |
449 | raw_spin_lock_init(&tsk_rt(tsk)->hp_blocked_tasks_lock); | 453 | raw_spin_lock_init(&tsk_rt(tsk)->hp_blocked_tasks_lock); |
454 | //INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, prio_order); // done by scheduler | ||
455 | #endif | ||
456 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
457 | /* proxy thread off by default */ | ||
458 | tsk_rt(tsk)is_proxy_thread = 0; | ||
459 | tsk_rt(tsk)cur_klitirqd = NULL; | ||
460 | mutex_init(&tsk_rt(tsk)->klitirqd_sem); | ||
461 | atomic_set(&tsk_rt(tsk)->klitirqd_sem_stat, NOT_HELD); | ||
450 | #endif | 462 | #endif |
451 | 463 | ||
452 | retval = litmus->admit_task(tsk); | 464 | retval = litmus->admit_task(tsk); |
diff --git a/litmus/locking.c b/litmus/locking.c index ef13062913ce..fd3c7260319f 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -52,7 +52,10 @@ static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user ar | |||
52 | lock->nest.hp_waiter_eff_prio = NULL; | 52 | lock->nest.hp_waiter_eff_prio = NULL; |
53 | 53 | ||
54 | INIT_BINHEAP_NODE(&lock->nest.hp_binheap_node); | 54 | INIT_BINHEAP_NODE(&lock->nest.hp_binheap_node); |
55 | WARN_ON(!(lock->nest.hp_waiter_ptr)); | 55 | if(!lock->nest.hp_waiter_ptr) { |
56 | TRACE_CUR("BEWARE: hp_waiter_ptr should probably not be NULL in " | ||
57 | "most uses. (exception: IKGLP donors)\n"); | ||
58 | } | ||
56 | #endif | 59 | #endif |
57 | lock->type = type; | 60 | lock->type = type; |
58 | lock->ident = atomic_inc_return(&lock_id_gen); | 61 | lock->ident = atomic_inc_return(&lock_id_gen); |
diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c index 80900035881c..287e4a0662d9 100644 --- a/litmus/nvidia_info.c +++ b/litmus/nvidia_info.c | |||
@@ -418,16 +418,15 @@ void pai_check_priority_decrease(struct task_struct *t, int reg_device_id) | |||
418 | } | 418 | } |
419 | #endif | 419 | #endif |
420 | 420 | ||
421 | static int __reg_nv_device(int reg_device_id) | 421 | static int __reg_nv_device(int reg_device_id, struct task_struct *t) |
422 | { | 422 | { |
423 | int ret = 0; | 423 | int ret = 0; |
424 | int i; | 424 | int i; |
425 | struct task_struct *t = current; | ||
426 | struct task_struct *old_max = NULL; | 425 | struct task_struct *old_max = NULL; |
427 | unsigned long flags; | 426 | unsigned long flags; |
428 | nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; | 427 | nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; |
429 | 428 | ||
430 | if(__test_bit(reg_device_id, &tsk_rt(t)->held_gpus)) { | 429 | if(test_bit(reg_device_id, &tsk_rt(t)->held_gpus)) { |
431 | // TODO: check if taks is already registered. | 430 | // TODO: check if taks is already registered. |
432 | return ret; // assume already registered. | 431 | return ret; // assume already registered. |
433 | } | 432 | } |
@@ -471,11 +470,10 @@ static int __reg_nv_device(int reg_device_id) | |||
471 | return(ret); | 470 | return(ret); |
472 | } | 471 | } |
473 | 472 | ||
474 | static int __clear_reg_nv_device(int de_reg_device_id) | 473 | static int __clear_reg_nv_device(int de_reg_device_id, struct task_struct *t) |
475 | { | 474 | { |
476 | int ret = 0; | 475 | int ret = 0; |
477 | int i; | 476 | int i; |
478 | struct task_struct *t = current; | ||
479 | unsigned long flags; | 477 | unsigned long flags; |
480 | nv_device_registry_t *reg = &NV_DEVICE_REG[de_reg_device_id]; | 478 | nv_device_registry_t *reg = &NV_DEVICE_REG[de_reg_device_id]; |
481 | 479 | ||
@@ -483,7 +481,7 @@ static int __clear_reg_nv_device(int de_reg_device_id) | |||
483 | struct task_struct* klitirqd_th = get_klitirqd(de_reg_device_id); | 481 | struct task_struct* klitirqd_th = get_klitirqd(de_reg_device_id); |
484 | #endif | 482 | #endif |
485 | 483 | ||
486 | WARN_ON(!__test_bit(de_reg_device_id, &tsk_rt(t)->held_gpus)); | 484 | WARN_ON(!test_bit(de_reg_device_id, &tsk_rt(t)->held_gpus)); |
487 | 485 | ||
488 | raw_spin_lock_irqsave(®->lock, flags); | 486 | raw_spin_lock_irqsave(®->lock, flags); |
489 | 487 | ||
@@ -518,16 +516,16 @@ static int __clear_reg_nv_device(int de_reg_device_id) | |||
518 | } | 516 | } |
519 | 517 | ||
520 | 518 | ||
521 | int reg_nv_device(int reg_device_id, int reg_action) | 519 | int reg_nv_device(int reg_device_id, int reg_action, struct task_struct *t) |
522 | { | 520 | { |
523 | int ret; | 521 | int ret; |
524 | 522 | ||
525 | if((reg_device_id < NV_DEVICE_NUM) && (reg_device_id >= 0)) | 523 | if((reg_device_id < NV_DEVICE_NUM) && (reg_device_id >= 0)) |
526 | { | 524 | { |
527 | if(reg_action) | 525 | if(reg_action) |
528 | ret = __reg_nv_device(reg_device_id); | 526 | ret = __reg_nv_device(reg_device_id, t); |
529 | else | 527 | else |
530 | ret = __clear_reg_nv_device(reg_device_id); | 528 | ret = __clear_reg_nv_device(reg_device_id, t); |
531 | } | 529 | } |
532 | else | 530 | else |
533 | { | 531 | { |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index b4ab2361e37a..09334aea43ac 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -814,11 +814,12 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
814 | 814 | ||
815 | /* If a task blocks we have no choice but to reschedule. | 815 | /* If a task blocks we have no choice but to reschedule. |
816 | */ | 816 | */ |
817 | if (blocks) | 817 | if (blocks) { |
818 | unlink(entry->scheduled); | 818 | unlink(entry->scheduled); |
819 | } | ||
819 | 820 | ||
820 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) | 821 | #if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) |
821 | if(tsk_rt(entry->scheduled)->held_gpus) { | 822 | if(exists && is_realtime(entry->scheduled) && tsk_rt(entry->scheduled)->held_gpus) { |
822 | if(!blocks || tsk_rt(entry->scheduled)->suspend_gpu_tracker_on_block) { | 823 | if(!blocks || tsk_rt(entry->scheduled)->suspend_gpu_tracker_on_block) { |
823 | stop_gpu_tracker(entry->scheduled); | 824 | stop_gpu_tracker(entry->scheduled); |
824 | } | 825 | } |
@@ -1119,7 +1120,9 @@ static void __increase_priority_inheritance(struct task_struct* t, | |||
1119 | "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n", | 1120 | "Occurance is likely okay: probably due to (hopefully safe) concurrent priority updates.\n", |
1120 | t->comm, t->pid, | 1121 | t->comm, t->pid, |
1121 | effective_priority(t)->comm, effective_priority(t)->pid, | 1122 | effective_priority(t)->comm, effective_priority(t)->pid, |
1122 | prio_inh->comm, prio_inh->pid); | 1123 | (prio_inh) ? prio_inh->comm : "nil", |
1124 | (prio_inh) ? prio_inh->pid : -1); | ||
1125 | WARN_ON(!prio_inh); | ||
1123 | } | 1126 | } |
1124 | #endif | 1127 | #endif |
1125 | } | 1128 | } |
@@ -1146,10 +1149,12 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
1146 | raw_spin_unlock(&gsnedf_lock); | 1149 | raw_spin_unlock(&gsnedf_lock); |
1147 | 1150 | ||
1148 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) | 1151 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) |
1149 | for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); | 1152 | if(tsk_rt(t)->held_gpus) { |
1150 | i < NV_DEVICE_NUM; | 1153 | for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); |
1151 | i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i)) { | 1154 | i < NV_DEVICE_NUM; |
1152 | pai_check_priority_increase(t, i); | 1155 | i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) { |
1156 | pai_check_priority_increase(t, i); | ||
1157 | } | ||
1153 | } | 1158 | } |
1154 | #endif | 1159 | #endif |
1155 | } | 1160 | } |
@@ -1230,10 +1235,12 @@ static void decrease_priority_inheritance(struct task_struct* t, | |||
1230 | raw_spin_unlock(&gsnedf_lock); | 1235 | raw_spin_unlock(&gsnedf_lock); |
1231 | 1236 | ||
1232 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) | 1237 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) |
1233 | for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); | 1238 | if(tsk_rt(t)->held_gpus) { |
1234 | i < NV_DEVICE_NUM; | 1239 | for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); |
1235 | i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i)) { | 1240 | i < NV_DEVICE_NUM; |
1236 | pai_check_priority_decrease(t, i); | 1241 | i = find_next_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus), i+1)) { |
1242 | pai_check_priority_decrease(t, i); | ||
1243 | } | ||
1237 | } | 1244 | } |
1238 | #endif | 1245 | #endif |
1239 | } | 1246 | } |
@@ -1792,6 +1799,7 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | |||
1792 | .task_block = gsnedf_task_block, | 1799 | .task_block = gsnedf_task_block, |
1793 | .admit_task = gsnedf_admit_task, | 1800 | .admit_task = gsnedf_admit_task, |
1794 | .activate_plugin = gsnedf_activate_plugin, | 1801 | .activate_plugin = gsnedf_activate_plugin, |
1802 | .compare = edf_higher_prio, | ||
1795 | #ifdef CONFIG_LITMUS_LOCKING | 1803 | #ifdef CONFIG_LITMUS_LOCKING |
1796 | .allocate_lock = gsnedf_allocate_lock, | 1804 | .allocate_lock = gsnedf_allocate_lock, |
1797 | .increase_prio = increase_priority_inheritance, | 1805 | .increase_prio = increase_priority_inheritance, |
@@ -1800,6 +1808,7 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | |||
1800 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1808 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1801 | .nested_increase_prio = nested_increase_priority_inheritance, | 1809 | .nested_increase_prio = nested_increase_priority_inheritance, |
1802 | .nested_decrease_prio = nested_decrease_priority_inheritance, | 1810 | .nested_decrease_prio = nested_decrease_priority_inheritance, |
1811 | .__compare = __edf_higher_prio, | ||
1803 | #endif | 1812 | #endif |
1804 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | 1813 | #ifdef CONFIG_LITMUS_DGL_SUPPORT |
1805 | .get_dgl_spinlock = gsnedf_get_dgl_spinlock, | 1814 | .get_dgl_spinlock = gsnedf_get_dgl_spinlock, |
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index de15e80743a8..a334fdf66c3b 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c | |||
@@ -112,6 +112,7 @@ static long litmus_dummy_deactivate_plugin(void) | |||
112 | 112 | ||
113 | static int litmus_dummy_compare(struct task_struct* a, struct task_struct* b) | 113 | static int litmus_dummy_compare(struct task_struct* a, struct task_struct* b) |
114 | { | 114 | { |
115 | TRACE_CUR("WARNING: Dummy compare function called!\n"); | ||
115 | return 0; | 116 | return 0; |
116 | } | 117 | } |
117 | 118 | ||
@@ -177,6 +178,7 @@ static void litmus_dummy_nested_decrease_prio(struct task_struct* t, struct task | |||
177 | static int litmus_dummy___compare(struct task_struct* a, comparison_mode_t a_mod, | 178 | static int litmus_dummy___compare(struct task_struct* a, comparison_mode_t a_mod, |
178 | struct task_struct* b, comparison_mode_t b_mode) | 179 | struct task_struct* b, comparison_mode_t b_mode) |
179 | { | 180 | { |
181 | TRACE_CUR("WARNING: Dummy compare function called!\n"); | ||
180 | return 0; | 182 | return 0; |
181 | } | 183 | } |
182 | #endif | 184 | #endif |