aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/litmus/fdso.h6
-rw-r--r--include/litmus/r2dglp_lock.h (renamed from include/litmus/ikglp_lock.h)124
-rw-r--r--include/litmus/rt_param.h10
-rw-r--r--litmus/Makefile2
-rw-r--r--litmus/fdso.c6
-rw-r--r--litmus/locking.c2
-rw-r--r--litmus/r2dglp_lock.c (renamed from litmus/ikglp_lock.c)1016
-rw-r--r--litmus/sched_cedf.c42
8 files changed, 604 insertions, 604 deletions
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
index 8b73285e09f3..6d6473eb55bd 100644
--- a/include/litmus/fdso.h
+++ b/include/litmus/fdso.h
@@ -27,11 +27,11 @@ typedef enum {
27 PCP_SEM = 5, 27 PCP_SEM = 5,
28 28
29 FIFO_MUTEX = 6, 29 FIFO_MUTEX = 6,
30 IKGLP_SEM = 7, 30 R2DGLP_SEM = 7,
31 KFMLP_SEM = 8, 31 KFMLP_SEM = 8,
32 32
33 IKGLP_SIMPLE_GPU_AFF_OBS = 9, 33 R2DGLP_SIMPLE_GPU_AFF_OBS = 9,
34 IKGLP_GPU_AFF_OBS = 10, 34 R2DGLP_GPU_AFF_OBS = 10,
35 KFMLP_SIMPLE_GPU_AFF_OBS = 11, 35 KFMLP_SIMPLE_GPU_AFF_OBS = 11,
36 KFMLP_GPU_AFF_OBS = 12, 36 KFMLP_GPU_AFF_OBS = 12,
37 37
diff --git a/include/litmus/ikglp_lock.h b/include/litmus/r2dglp_lock.h
index c85c8c280299..2773997b0ff1 100644
--- a/include/litmus/ikglp_lock.h
+++ b/include/litmus/r2dglp_lock.h
@@ -1,5 +1,5 @@
1#ifndef LITMUS_IKGLP_H 1#ifndef LITMUS_R2DGLP_H
2#define LITMUS_IKGLP_H 2#define LITMUS_R2DGLP_H
3 3
4#include <litmus/litmus.h> 4#include <litmus/litmus.h>
5#include <litmus/binheap.h> 5#include <litmus/binheap.h>
@@ -7,51 +7,51 @@
7 7
8#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 8#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
9#include <litmus/kexclu_affinity.h> 9#include <litmus/kexclu_affinity.h>
10struct ikglp_affinity; 10struct r2dglp_affinity;
11#endif 11#endif
12 12
13typedef struct ikglp_heap_node 13typedef struct r2dglp_heap_node
14{ 14{
15 struct task_struct *task; 15 struct task_struct *task;
16 struct binheap_node node; 16 struct binheap_node node;
17} ikglp_heap_node_t; 17} r2dglp_heap_node_t;
18 18
19struct fifo_queue; 19struct fifo_queue;
20struct ikglp_wait_state; 20struct r2dglp_wait_state;
21struct fifo_queue; 21struct fifo_queue;
22 22
23typedef struct ikglp_donee_heap_node 23typedef struct r2dglp_donee_heap_node
24{ 24{
25 struct task_struct *task; 25 struct task_struct *task;
26 struct fifo_queue *fq; 26 struct fifo_queue *fq;
27 27
28 /* cross-linked with ikglp_wait_state_t of donor */ 28 /* cross-linked with r2dglp_wait_state_t of donor */
29 struct ikglp_wait_state *donor_info; 29 struct r2dglp_wait_state *donor_info;
30 30
31 struct binheap_node node; 31 struct binheap_node node;
32} ikglp_donee_heap_node_t; 32} r2dglp_donee_heap_node_t;
33 33
34typedef enum ikglp_states 34typedef enum r2dglp_states
35{ 35{
36 IKGLP_INVL = 0, 36 R2DGLP_INVL = 0,
37 IKGLP_FQ, 37 R2DGLP_FQ,
38 IKGLP_PQ, 38 R2DGLP_PQ,
39 IKGLP_DONOR 39 R2DGLP_DONOR
40} ikglp_states_t; 40} r2dglp_states_t;
41 41
42/* 42/*
43 Maintains the state of a request as it goes through the IKGLP. 43 Maintains the state of a request as it goes through the R2DGLP.
44 There are three exclusive wait states: 44 There are three exclusive wait states:
45 (1) as a donor 45 (1) as a donor
46 (2) in the PQ 46 (2) in the PQ
47 (3) in the FQ 47 (3) in the FQ
48*/ 48*/
49typedef struct ikglp_wait_state { 49typedef struct r2dglp_wait_state {
50 struct task_struct *task; /* pointer back to the requesting task */ 50 struct task_struct *task; /* pointer back to the requesting task */
51 51
52 ikglp_states_t cur_q; 52 r2dglp_states_t cur_q;
53 /* data for x-highest-prio tasks */ 53 /* data for x-highest-prio tasks */
54 ikglp_heap_node_t global_heap_node; 54 r2dglp_heap_node_t global_heap_node;
55 55
56 /* TODO: put these fields in an appropriate union since wait 56 /* TODO: put these fields in an appropriate union since wait
57 states are exclusive. */ 57 states are exclusive. */
@@ -59,17 +59,17 @@ typedef struct ikglp_wait_state {
59 /** Data for whilst in FIFO Queue **/ 59 /** Data for whilst in FIFO Queue **/
60 wait_queue_t fq_node; 60 wait_queue_t fq_node;
61 struct fifo_queue *fq; 61 struct fifo_queue *fq;
62 ikglp_donee_heap_node_t donee_heap_node; 62 r2dglp_donee_heap_node_t donee_heap_node;
63 63
64 /** Data for whilst in PQ **/ 64 /** Data for whilst in PQ **/
65 ikglp_heap_node_t pq_node; 65 r2dglp_heap_node_t pq_node;
66 66
67 /** Data for whilst a donor **/ 67 /** Data for whilst a donor **/
68 /* cross-linked with donee's ikglp_donee_heap_node_t */ 68 /* cross-linked with donee's r2dglp_donee_heap_node_t */
69 ikglp_donee_heap_node_t *donee_info; 69 r2dglp_donee_heap_node_t *donee_info;
70 struct nested_info prio_donation; 70 struct nested_info prio_donation;
71 struct binheap_node node; 71 struct binheap_node node;
72} ikglp_wait_state_t; 72} r2dglp_wait_state_t;
73 73
74/* struct for FIFO mutex with priority inheritance */ 74/* struct for FIFO mutex with priority inheritance */
75struct fifo_queue 75struct fifo_queue
@@ -78,8 +78,8 @@ struct fifo_queue
78 struct task_struct* owner; 78 struct task_struct* owner;
79 79
80 /* used for bookkeepping */ 80 /* used for bookkeepping */
81 ikglp_heap_node_t global_heap_node; 81 r2dglp_heap_node_t global_heap_node;
82 ikglp_donee_heap_node_t donee_heap_node; 82 r2dglp_donee_heap_node_t donee_heap_node;
83 83
84 struct task_struct* hp_waiter; 84 struct task_struct* hp_waiter;
85 unsigned int count; /* number of waiters + holder */ 85 unsigned int count; /* number of waiters + holder */
@@ -96,8 +96,8 @@ struct fifo_queue
96 unsigned int is_vunlocked:1; 96 unsigned int is_vunlocked:1;
97}; 97};
98 98
99/* Main IKGLP data structure. */ 99/* Main R2DGLP data structure. */
100struct ikglp_semaphore 100struct r2dglp_semaphore
101{ 101{
102 struct litmus_lock litmus_lock; 102 struct litmus_lock litmus_lock;
103 103
@@ -126,27 +126,27 @@ struct ikglp_semaphore
126 struct binheap donors; /* max-heap, ordered by base priority */ 126 struct binheap donors; /* max-heap, ordered by base priority */
127 127
128#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 128#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
129 struct ikglp_affinity *aff_obs; /* pointer to affinity observer */ 129 struct r2dglp_affinity *aff_obs; /* pointer to affinity observer */
130#endif 130#endif
131}; 131};
132 132
133static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock) 133static inline struct r2dglp_semaphore* r2dglp_from_lock(struct litmus_lock* lock)
134{ 134{
135 return container_of(lock, struct ikglp_semaphore, litmus_lock); 135 return container_of(lock, struct r2dglp_semaphore, litmus_lock);
136} 136}
137 137
138int ikglp_lock(struct litmus_lock* l); 138int r2dglp_lock(struct litmus_lock* l);
139int ikglp_unlock(struct litmus_lock* l); 139int r2dglp_unlock(struct litmus_lock* l);
140void ikglp_virtual_unlock(struct litmus_lock* l, struct task_struct* t); 140void r2dglp_virtual_unlock(struct litmus_lock* l, struct task_struct* t);
141void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t); 141void r2dglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t);
142 142
143int ikglp_close(struct litmus_lock* l); 143int r2dglp_close(struct litmus_lock* l);
144void ikglp_free(struct litmus_lock* l); 144void r2dglp_free(struct litmus_lock* l);
145struct litmus_lock* ikglp_new(unsigned int m, struct litmus_lock_ops*, 145struct litmus_lock* r2dglp_new(unsigned int m, struct litmus_lock_ops*,
146 void* __user arg); 146 void* __user arg);
147 147
148#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 148#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
149struct ikglp_queue_info 149struct r2dglp_queue_info
150{ 150{
151 struct fifo_queue* q; 151 struct fifo_queue* q;
152 lt_t estimated_len; 152 lt_t estimated_len;
@@ -154,33 +154,33 @@ struct ikglp_queue_info
154 unsigned int *nr_aff_users; 154 unsigned int *nr_aff_users;
155}; 155};
156 156
157/* routines for IKGLP to call to get advice on queueing operations */ 157/* routines for R2DGLP to call to get advice on queueing operations */
158typedef struct fifo_queue* (*advise_enqueue_t)(struct ikglp_affinity* aff, 158typedef struct fifo_queue* (*advise_enqueue_t)(struct r2dglp_affinity* aff,
159 struct task_struct* t); 159 struct task_struct* t);
160typedef ikglp_wait_state_t* (*advise_steal_t)(struct ikglp_affinity* aff, 160typedef r2dglp_wait_state_t* (*advise_steal_t)(struct r2dglp_affinity* aff,
161 struct fifo_queue* dst); 161 struct fifo_queue* dst);
162typedef ikglp_donee_heap_node_t* (*advise_donee_t)(struct ikglp_affinity* aff, 162typedef r2dglp_donee_heap_node_t* (*advise_donee_t)(struct r2dglp_affinity* aff,
163 struct task_struct* t); 163 struct task_struct* t);
164typedef ikglp_wait_state_t* (*advise_donor_t)(struct ikglp_affinity* aff, 164typedef r2dglp_wait_state_t* (*advise_donor_t)(struct r2dglp_affinity* aff,
165 struct fifo_queue* dst); 165 struct fifo_queue* dst);
166 166
167/* routines for IKGLP to notify the affinity observer about changes in mutex state */ 167/* routines for R2DGLP to notify the affinity observer about changes in mutex state */
168typedef void (*notify_enqueue_t)(struct ikglp_affinity* aff, 168typedef void (*notify_enqueue_t)(struct r2dglp_affinity* aff,
169 struct fifo_queue* fq, struct task_struct* t); 169 struct fifo_queue* fq, struct task_struct* t);
170typedef void (*notify_dequeue_t)(struct ikglp_affinity* aff, 170typedef void (*notify_dequeue_t)(struct r2dglp_affinity* aff,
171 struct fifo_queue* fq, struct task_struct* t); 171 struct fifo_queue* fq, struct task_struct* t);
172typedef void (*notify_acquire_t)(struct ikglp_affinity* aff, 172typedef void (*notify_acquire_t)(struct r2dglp_affinity* aff,
173 struct fifo_queue* fq, struct task_struct* t); 173 struct fifo_queue* fq, struct task_struct* t);
174typedef void (*notify_free_t)(struct ikglp_affinity* aff, 174typedef void (*notify_free_t)(struct r2dglp_affinity* aff,
175 struct fifo_queue* fq, struct task_struct* t); 175 struct fifo_queue* fq, struct task_struct* t);
176typedef int (*notify_exit_t)(struct ikglp_affinity* aff, 176typedef int (*notify_exit_t)(struct r2dglp_affinity* aff,
177 struct task_struct* t); 177 struct task_struct* t);
178 178
179/* convert a replica # to a GPU (includes offsets & simult user folding) */ 179/* convert a replica # to a GPU (includes offsets & simult user folding) */
180typedef int (*replica_to_resource_t)(struct ikglp_affinity* aff, 180typedef int (*replica_to_resource_t)(struct r2dglp_affinity* aff,
181 struct fifo_queue* fq); 181 struct fifo_queue* fq);
182 182
183struct ikglp_affinity_ops 183struct r2dglp_affinity_ops
184{ 184{
185 advise_enqueue_t advise_enqueue; 185 advise_enqueue_t advise_enqueue;
186 advise_steal_t advise_steal; 186 advise_steal_t advise_steal;
@@ -196,11 +196,11 @@ struct ikglp_affinity_ops
196 replica_to_resource_t replica_to_resource; 196 replica_to_resource_t replica_to_resource;
197}; 197};
198 198
199struct ikglp_affinity 199struct r2dglp_affinity
200{ 200{
201 struct affinity_observer obs; 201 struct affinity_observer obs;
202 struct ikglp_affinity_ops *ops; 202 struct r2dglp_affinity_ops *ops;
203 struct ikglp_queue_info *q_info; 203 struct r2dglp_queue_info *q_info;
204 unsigned int *nr_cur_users_on_rsrc; 204 unsigned int *nr_cur_users_on_rsrc;
205 unsigned int *nr_aff_on_rsrc; 205 unsigned int *nr_aff_on_rsrc;
206 unsigned int offset; 206 unsigned int offset;
@@ -210,20 +210,20 @@ struct ikglp_affinity
210 unsigned int relax_max_fifo_len:1; 210 unsigned int relax_max_fifo_len:1;
211}; 211};
212 212
213static inline struct ikglp_affinity* ikglp_aff_obs_from_aff_obs( 213static inline struct r2dglp_affinity* r2dglp_aff_obs_from_aff_obs(
214 struct affinity_observer* aff_obs) 214 struct affinity_observer* aff_obs)
215{ 215{
216 return container_of(aff_obs, struct ikglp_affinity, obs); 216 return container_of(aff_obs, struct r2dglp_affinity, obs);
217} 217}
218 218
219int ikglp_aff_obs_close(struct affinity_observer*); 219int r2dglp_aff_obs_close(struct affinity_observer*);
220void ikglp_aff_obs_free(struct affinity_observer*); 220void r2dglp_aff_obs_free(struct affinity_observer*);
221 221
222#ifdef CONFIG_LITMUS_NVIDIA 222#ifdef CONFIG_LITMUS_NVIDIA
223struct affinity_observer* ikglp_gpu_aff_obs_new( 223struct affinity_observer* r2dglp_gpu_aff_obs_new(
224 struct affinity_observer_ops* aff, 224 struct affinity_observer_ops* aff,
225 void* __user arg); 225 void* __user arg);
226struct affinity_observer* ikglp_simple_gpu_aff_obs_new( 226struct affinity_observer* r2dglp_simple_gpu_aff_obs_new(
227 struct affinity_observer_ops* aff, 227 struct affinity_observer_ops* aff,
228 void* __user arg); 228 void* __user arg);
229#endif /* end LITMUS_NVIDIA */ 229#endif /* end LITMUS_NVIDIA */
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index f59b5b91ec09..6fa133afe4af 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -135,12 +135,12 @@ struct gpu_affinity_observer_args
135 int relaxed_rules; 135 int relaxed_rules;
136}; 136};
137 137
138#define IKGLP_M_IN_FIFOS (0u) 138#define R2DGLP_M_IN_FIFOS (0u)
139#define IKGLP_UNLIMITED_IN_FIFOS (~0u) 139#define R2DGLP_UNLIMITED_IN_FIFOS (~0u)
140#define IKGLP_OPTIMAL_FIFO_LEN (0u) 140#define R2DGLP_OPTIMAL_FIFO_LEN (0u)
141#define IKGLP_UNLIMITED_FIFO_LEN (~0u) 141#define R2DGLP_UNLIMITED_FIFO_LEN (~0u)
142 142
143struct ikglp_args 143struct r2dglp_args
144{ 144{
145 unsigned int nr_replicas; 145 unsigned int nr_replicas;
146 unsigned int max_in_fifos; 146 unsigned int max_in_fifos;
diff --git a/litmus/Makefile b/litmus/Makefile
index 76b59aa14374..160553f8fc03 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -30,7 +30,7 @@ obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
30obj-$(CONFIG_SCHED_CPU_AFFINITY) += affinity.o 30obj-$(CONFIG_SCHED_CPU_AFFINITY) += affinity.o
31 31
32obj-$(CONFIG_LITMUS_LOCKING) += kfmlp_lock.o 32obj-$(CONFIG_LITMUS_LOCKING) += kfmlp_lock.o
33obj-$(CONFIG_LITMUS_NESTED_LOCKING) += fifo_lock.o prioq_lock.o ikglp_lock.o 33obj-$(CONFIG_LITMUS_NESTED_LOCKING) += fifo_lock.o prioq_lock.o r2dglp_lock.o
34obj-$(CONFIG_LITMUS_AFFINITY_LOCKING) += kexclu_affinity.o 34obj-$(CONFIG_LITMUS_AFFINITY_LOCKING) += kexclu_affinity.o
35 35
36obj-$(CONFIG_LITMUS_SOFTIRQD) += klmirqd.o 36obj-$(CONFIG_LITMUS_SOFTIRQD) += klmirqd.o
diff --git a/litmus/fdso.c b/litmus/fdso.c
index 0f5ca022537a..c2e9fd0c241e 100644
--- a/litmus/fdso.c
+++ b/litmus/fdso.c
@@ -34,11 +34,11 @@ static const struct fdso_ops* fdso_ops[] = {
34 &generic_lock_ops, /* PCP_SEM */ 34 &generic_lock_ops, /* PCP_SEM */
35 35
36 &generic_lock_ops, /* FIFO_MUTEX */ 36 &generic_lock_ops, /* FIFO_MUTEX */
37 &generic_lock_ops, /* IKGLP_SEM */ 37 &generic_lock_ops, /* R2DGLP_SEM */
38 &generic_lock_ops, /* KFMLP_SEM */ 38 &generic_lock_ops, /* KFMLP_SEM */
39#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 39#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
40 &generic_affinity_ops, /* IKGLP_SIMPLE_GPU_AFF_OBS */ 40 &generic_affinity_ops, /* R2DGLP_SIMPLE_GPU_AFF_OBS */
41 &generic_affinity_ops, /* IKGLP_GPU_AFF_OBS */ 41 &generic_affinity_ops, /* R2DGLP_GPU_AFF_OBS */
42 &generic_affinity_ops, /* KFMLP_SIMPLE_GPU_AFF_OBS */ 42 &generic_affinity_ops, /* KFMLP_SIMPLE_GPU_AFF_OBS */
43 &generic_affinity_ops, /* KFMLP_GPU_AFF_OBS */ 43 &generic_affinity_ops, /* KFMLP_GPU_AFF_OBS */
44#else 44#else
diff --git a/litmus/locking.c b/litmus/locking.c
index 0de007cc9732..6302bd47304b 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -67,7 +67,7 @@ static int create_generic_lock(void** obj_ref, obj_type_t type,
67 INIT_BINHEAP_NODE(&lock->nest.hp_binheap_node); 67 INIT_BINHEAP_NODE(&lock->nest.hp_binheap_node);
68 if(!lock->nest.hp_waiter_ptr) { 68 if(!lock->nest.hp_waiter_ptr) {
69 TRACE_CUR("BEWARE: hp_waiter_ptr should probably not be NULL in " 69 TRACE_CUR("BEWARE: hp_waiter_ptr should probably not be NULL in "
70 "most cases. (exception: IKGLP donors)\n"); 70 "most cases. (exception: R2DGLP donors)\n");
71 } 71 }
72#endif 72#endif
73 lock->type = type; 73 lock->type = type;
diff --git a/litmus/ikglp_lock.c b/litmus/r2dglp_lock.c
index 6f6090591c58..857725c75984 100644
--- a/litmus/ikglp_lock.c
+++ b/litmus/r2dglp_lock.c
@@ -12,15 +12,15 @@
12#include <litmus/nvidia_info.h> 12#include <litmus/nvidia_info.h>
13#endif 13#endif
14 14
15#include <litmus/ikglp_lock.h> 15#include <litmus/r2dglp_lock.h>
16 16
17#define IKGLP_INVAL_DISTANCE 0x7FFFFFFF 17#define R2DGLP_INVAL_DISTANCE 0x7FFFFFFF
18 18
19int ikglp_max_heap_base_priority_order(struct binheap_node *a, 19int r2dglp_max_heap_base_priority_order(struct binheap_node *a,
20 struct binheap_node *b) 20 struct binheap_node *b)
21{ 21{
22 ikglp_heap_node_t *d_a = binheap_entry(a, ikglp_heap_node_t, node); 22 r2dglp_heap_node_t *d_a = binheap_entry(a, r2dglp_heap_node_t, node);
23 ikglp_heap_node_t *d_b = binheap_entry(b, ikglp_heap_node_t, node); 23 r2dglp_heap_node_t *d_b = binheap_entry(b, r2dglp_heap_node_t, node);
24 24
25 BUG_ON(!d_a); 25 BUG_ON(!d_a);
26 BUG_ON(!d_b); 26 BUG_ON(!d_b);
@@ -28,34 +28,34 @@ int ikglp_max_heap_base_priority_order(struct binheap_node *a,
28 return litmus->__compare(d_a->task, BASE, d_b->task, BASE); 28 return litmus->__compare(d_a->task, BASE, d_b->task, BASE);
29} 29}
30 30
31int ikglp_min_heap_base_priority_order(struct binheap_node *a, 31int r2dglp_min_heap_base_priority_order(struct binheap_node *a,
32 struct binheap_node *b) 32 struct binheap_node *b)
33{ 33{
34 ikglp_heap_node_t *d_a = binheap_entry(a, ikglp_heap_node_t, node); 34 r2dglp_heap_node_t *d_a = binheap_entry(a, r2dglp_heap_node_t, node);
35 ikglp_heap_node_t *d_b = binheap_entry(b, ikglp_heap_node_t, node); 35 r2dglp_heap_node_t *d_b = binheap_entry(b, r2dglp_heap_node_t, node);
36 36
37 return litmus->__compare(d_b->task, BASE, d_a->task, BASE); 37 return litmus->__compare(d_b->task, BASE, d_a->task, BASE);
38} 38}
39 39
40int ikglp_donor_max_heap_base_priority_order(struct binheap_node *a, 40int r2dglp_donor_max_heap_base_priority_order(struct binheap_node *a,
41 struct binheap_node *b) 41 struct binheap_node *b)
42{ 42{
43 ikglp_wait_state_t *d_a = binheap_entry(a, ikglp_wait_state_t, node); 43 r2dglp_wait_state_t *d_a = binheap_entry(a, r2dglp_wait_state_t, node);
44 ikglp_wait_state_t *d_b = binheap_entry(b, ikglp_wait_state_t, node); 44 r2dglp_wait_state_t *d_b = binheap_entry(b, r2dglp_wait_state_t, node);
45 45
46 return litmus->__compare(d_a->task, BASE, d_b->task, BASE); 46 return litmus->__compare(d_a->task, BASE, d_b->task, BASE);
47} 47}
48 48
49 49
50int ikglp_min_heap_donee_order(struct binheap_node *a, 50int r2dglp_min_heap_donee_order(struct binheap_node *a,
51 struct binheap_node *b) 51 struct binheap_node *b)
52{ 52{
53 struct task_struct *prio_a, *prio_b; 53 struct task_struct *prio_a, *prio_b;
54 54
55 ikglp_donee_heap_node_t *d_a = 55 r2dglp_donee_heap_node_t *d_a =
56 binheap_entry(a, ikglp_donee_heap_node_t, node); 56 binheap_entry(a, r2dglp_donee_heap_node_t, node);
57 ikglp_donee_heap_node_t *d_b = 57 r2dglp_donee_heap_node_t *d_b =
58 binheap_entry(b, ikglp_donee_heap_node_t, node); 58 binheap_entry(b, r2dglp_donee_heap_node_t, node);
59 59
60 if(!d_a->donor_info) { 60 if(!d_a->donor_info) {
61 prio_a = d_a->task; 61 prio_a = d_a->task;
@@ -83,13 +83,13 @@ static inline unsigned int nominal_fq_len(struct fifo_queue *fq)
83 return (fq->count - fq->is_vunlocked); 83 return (fq->count - fq->is_vunlocked);
84} 84}
85 85
86static inline int ikglp_get_idx(struct ikglp_semaphore *sem, 86static inline int r2dglp_get_idx(struct r2dglp_semaphore *sem,
87 struct fifo_queue *queue) 87 struct fifo_queue *queue)
88{ 88{
89 return (queue - &sem->fifo_queues[0]); 89 return (queue - &sem->fifo_queues[0]);
90} 90}
91 91
92static inline struct fifo_queue* ikglp_get_queue(struct ikglp_semaphore *sem, 92static inline struct fifo_queue* r2dglp_get_queue(struct r2dglp_semaphore *sem,
93 struct task_struct *holder) 93 struct task_struct *holder)
94{ 94{
95 struct fifo_queue *fq = NULL; 95 struct fifo_queue *fq = NULL;
@@ -104,7 +104,7 @@ static inline struct fifo_queue* ikglp_get_queue(struct ikglp_semaphore *sem,
104 return(fq); 104 return(fq);
105} 105}
106 106
107static struct task_struct* ikglp_find_hp_waiter(struct fifo_queue *kqueue, 107static struct task_struct* r2dglp_find_hp_waiter(struct fifo_queue *kqueue,
108 struct task_struct *skip) 108 struct task_struct *skip)
109{ 109{
110 struct list_head *pos; 110 struct list_head *pos;
@@ -120,7 +120,7 @@ static struct task_struct* ikglp_find_hp_waiter(struct fifo_queue *kqueue,
120 return found; 120 return found;
121} 121}
122 122
123static struct fifo_queue* ikglp_find_shortest(struct ikglp_semaphore *sem, 123static struct fifo_queue* r2dglp_find_shortest(struct r2dglp_semaphore *sem,
124 struct fifo_queue *search_start) 124 struct fifo_queue *search_start)
125{ 125{
126 /* we start our search at search_start instead of at the beginning of the 126 /* we start our search at search_start instead of at the beginning of the
@@ -143,14 +143,14 @@ static struct fifo_queue* ikglp_find_shortest(struct ikglp_semaphore *sem,
143 return(shortest); 143 return(shortest);
144} 144}
145 145
146static inline struct task_struct* ikglp_mth_highest(struct ikglp_semaphore *sem) 146static inline struct task_struct* r2dglp_mth_highest(struct r2dglp_semaphore *sem)
147{ 147{
148 return binheap_top_entry(&sem->top_m, ikglp_heap_node_t, node)->task; 148 return binheap_top_entry(&sem->top_m, r2dglp_heap_node_t, node)->task;
149} 149}
150 150
151static void ikglp_add_global_list(struct ikglp_semaphore *sem, 151static void r2dglp_add_global_list(struct r2dglp_semaphore *sem,
152 struct task_struct *t, 152 struct task_struct *t,
153 ikglp_heap_node_t *node) 153 r2dglp_heap_node_t *node)
154{ 154{
155 node->task = t; 155 node->task = t;
156 INIT_BINHEAP_NODE(&node->node); 156 INIT_BINHEAP_NODE(&node->node);
@@ -158,35 +158,35 @@ static void ikglp_add_global_list(struct ikglp_semaphore *sem,
158 if(sem->top_m_size < sem->max_in_fifos) { 158 if(sem->top_m_size < sem->max_in_fifos) {
159 TRACE_CUR("Trivially adding %s/%d to top-m global list.\n", 159 TRACE_CUR("Trivially adding %s/%d to top-m global list.\n",
160 t->comm, t->pid); 160 t->comm, t->pid);
161 binheap_add(&node->node, &sem->top_m, ikglp_heap_node_t, node); 161 binheap_add(&node->node, &sem->top_m, r2dglp_heap_node_t, node);
162 ++(sem->top_m_size); 162 ++(sem->top_m_size);
163 } 163 }
164 else if(litmus->__compare(t, BASE, ikglp_mth_highest(sem), BASE)) { 164 else if(litmus->__compare(t, BASE, r2dglp_mth_highest(sem), BASE)) {
165 ikglp_heap_node_t *evicted = 165 r2dglp_heap_node_t *evicted =
166 binheap_top_entry(&sem->top_m, ikglp_heap_node_t, node); 166 binheap_top_entry(&sem->top_m, r2dglp_heap_node_t, node);
167 167
168 TRACE_CUR("Adding %s/%d to top-m and evicting %s/%d.\n", 168 TRACE_CUR("Adding %s/%d to top-m and evicting %s/%d.\n",
169 t->comm, t->pid, 169 t->comm, t->pid,
170 evicted->task->comm, evicted->task->pid); 170 evicted->task->comm, evicted->task->pid);
171 171
172 binheap_delete_root(&sem->top_m, ikglp_heap_node_t, node); 172 binheap_delete_root(&sem->top_m, r2dglp_heap_node_t, node);
173 INIT_BINHEAP_NODE(&evicted->node); 173 INIT_BINHEAP_NODE(&evicted->node);
174 binheap_add(&evicted->node, &sem->not_top_m, ikglp_heap_node_t, node); 174 binheap_add(&evicted->node, &sem->not_top_m, r2dglp_heap_node_t, node);
175 175
176 binheap_add(&node->node, &sem->top_m, ikglp_heap_node_t, node); 176 binheap_add(&node->node, &sem->top_m, r2dglp_heap_node_t, node);
177 } 177 }
178 else { 178 else {
179 TRACE_CUR("Trivially adding %s/%d to not-top-m global list.\n", 179 TRACE_CUR("Trivially adding %s/%d to not-top-m global list.\n",
180 t->comm, t->pid); 180 t->comm, t->pid);
181 181
182 binheap_add(&node->node, &sem->not_top_m, ikglp_heap_node_t, node); 182 binheap_add(&node->node, &sem->not_top_m, r2dglp_heap_node_t, node);
183 } 183 }
184} 184}
185 185
186 186
187static void ikglp_del_global_list(struct ikglp_semaphore *sem, 187static void r2dglp_del_global_list(struct r2dglp_semaphore *sem,
188 struct task_struct *t, 188 struct task_struct *t,
189 ikglp_heap_node_t *node) 189 r2dglp_heap_node_t *node)
190{ 190{
191 BUG_ON(!binheap_is_in_heap(&node->node)); 191 BUG_ON(!binheap_is_in_heap(&node->node));
192 192
@@ -198,16 +198,16 @@ static void ikglp_del_global_list(struct ikglp_semaphore *sem,
198 binheap_delete(&node->node, &sem->top_m); 198 binheap_delete(&node->node, &sem->top_m);
199 199
200 if(!binheap_empty(&sem->not_top_m)) { 200 if(!binheap_empty(&sem->not_top_m)) {
201 ikglp_heap_node_t *promoted = 201 r2dglp_heap_node_t *promoted =
202 binheap_top_entry(&sem->not_top_m, ikglp_heap_node_t, node); 202 binheap_top_entry(&sem->not_top_m, r2dglp_heap_node_t, node);
203 203
204 TRACE_CUR("Promoting %s/%d to top-m\n", 204 TRACE_CUR("Promoting %s/%d to top-m\n",
205 promoted->task->comm, promoted->task->pid); 205 promoted->task->comm, promoted->task->pid);
206 206
207 binheap_delete_root(&sem->not_top_m, ikglp_heap_node_t, node); 207 binheap_delete_root(&sem->not_top_m, r2dglp_heap_node_t, node);
208 INIT_BINHEAP_NODE(&promoted->node); 208 INIT_BINHEAP_NODE(&promoted->node);
209 209
210 binheap_add(&promoted->node, &sem->top_m, ikglp_heap_node_t, node); 210 binheap_add(&promoted->node, &sem->top_m, r2dglp_heap_node_t, node);
211 } 211 }
212 else { 212 else {
213 TRACE_CUR("No one to promote to top-m.\n"); 213 TRACE_CUR("No one to promote to top-m.\n");
@@ -222,23 +222,23 @@ static void ikglp_del_global_list(struct ikglp_semaphore *sem,
222} 222}
223 223
224 224
225static void ikglp_add_donees(struct ikglp_semaphore *sem, 225static void r2dglp_add_donees(struct r2dglp_semaphore *sem,
226 struct fifo_queue *fq, 226 struct fifo_queue *fq,
227 struct task_struct *t, 227 struct task_struct *t,
228 ikglp_donee_heap_node_t* node) 228 r2dglp_donee_heap_node_t* node)
229{ 229{
230 node->task = t; 230 node->task = t;
231 node->donor_info = NULL; 231 node->donor_info = NULL;
232 node->fq = fq; 232 node->fq = fq;
233 INIT_BINHEAP_NODE(&node->node); 233 INIT_BINHEAP_NODE(&node->node);
234 234
235 binheap_add(&node->node, &sem->donees, ikglp_donee_heap_node_t, node); 235 binheap_add(&node->node, &sem->donees, r2dglp_donee_heap_node_t, node);
236} 236}
237 237
238 238
239static void ikglp_refresh_owners_prio_increase(struct task_struct *t, 239static void r2dglp_refresh_owners_prio_increase(struct task_struct *t,
240 struct fifo_queue *fq, 240 struct fifo_queue *fq,
241 struct ikglp_semaphore *sem, 241 struct r2dglp_semaphore *sem,
242 unsigned long flags) 242 unsigned long flags)
243{ 243{
244 /* priority of 't' has increased (note: 't' might already be hp_waiter). */ 244 /* priority of 't' has increased (note: 't' might already be hp_waiter). */
@@ -260,7 +260,7 @@ static void ikglp_refresh_owners_prio_increase(struct task_struct *t,
260 260
261 if (unlikely(binheap_empty(&tsk_rt(owner)->hp_blocked_tasks))) { 261 if (unlikely(binheap_empty(&tsk_rt(owner)->hp_blocked_tasks))) {
262 TRACE_TASK(owner, "not drawing inheritance from fq %d.\n", 262 TRACE_TASK(owner, "not drawing inheritance from fq %d.\n",
263 ikglp_get_idx(sem, fq)); 263 r2dglp_get_idx(sem, fq));
264 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); 264 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
265 WARN_ON(1); 265 WARN_ON(1);
266 return; 266 return;
@@ -320,8 +320,8 @@ static void ikglp_refresh_owners_prio_increase(struct task_struct *t,
320} 320}
321 321
322/* hp_waiter has decreased */ 322/* hp_waiter has decreased */
323static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq, 323static void r2dglp_refresh_owners_prio_decrease(struct fifo_queue *fq,
324 struct ikglp_semaphore *sem, 324 struct r2dglp_semaphore *sem,
325 unsigned long flags, 325 unsigned long flags,
326 int budget_triggered) 326 int budget_triggered)
327{ 327{
@@ -336,13 +336,13 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq,
336 return; 336 return;
337 } 337 }
338 338
339 TRACE_CUR("ikglp_refresh_owners_prio_decrease\n"); 339 TRACE_CUR("r2dglp_refresh_owners_prio_decrease\n");
340 340
341 raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); 341 raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock);
342 342
343 if (unlikely(binheap_empty(&tsk_rt(owner)->hp_blocked_tasks))) { 343 if (unlikely(binheap_empty(&tsk_rt(owner)->hp_blocked_tasks))) {
344 TRACE_TASK(owner, "not drawing inheritance from fq %d.\n", 344 TRACE_TASK(owner, "not drawing inheritance from fq %d.\n",
345 ikglp_get_idx(sem, fq)); 345 r2dglp_get_idx(sem, fq));
346 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); 346 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
347 unlock_fine_irqrestore(&sem->lock, flags); 347 unlock_fine_irqrestore(&sem->lock, flags);
348 WARN_ON(1); 348 WARN_ON(1);
@@ -366,7 +366,7 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq,
366 struct task_struct *decreased_prio; 366 struct task_struct *decreased_prio;
367 367
368 TRACE_CUR("Propagating decreased inheritance to holder of fq %d.\n", 368 TRACE_CUR("Propagating decreased inheritance to holder of fq %d.\n",
369 ikglp_get_idx(sem, fq)); 369 r2dglp_get_idx(sem, fq));
370 370
371 if(litmus->__compare(new_max_eff_prio, BASE, owner, BASE)) { 371 if(litmus->__compare(new_max_eff_prio, BASE, owner, BASE)) {
372 TRACE_CUR("%s/%d has greater base priority than base priority " 372 TRACE_CUR("%s/%d has greater base priority than base priority "
@@ -375,7 +375,7 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq,
375 (new_max_eff_prio) ? new_max_eff_prio->pid : 0, 375 (new_max_eff_prio) ? new_max_eff_prio->pid : 0,
376 owner->comm, 376 owner->comm,
377 owner->pid, 377 owner->pid,
378 ikglp_get_idx(sem, fq)); 378 r2dglp_get_idx(sem, fq));
379 379
380 decreased_prio = new_max_eff_prio; 380 decreased_prio = new_max_eff_prio;
381 } 381 }
@@ -386,7 +386,7 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq,
386 (new_max_eff_prio) ? new_max_eff_prio->pid : 0, 386 (new_max_eff_prio) ? new_max_eff_prio->pid : 0,
387 owner->comm, 387 owner->comm,
388 owner->pid, 388 owner->pid,
389 ikglp_get_idx(sem, fq)); 389 r2dglp_get_idx(sem, fq));
390 390
391 decreased_prio = NULL; 391 decreased_prio = NULL;
392 } 392 }
@@ -404,9 +404,9 @@ static void ikglp_refresh_owners_prio_decrease(struct fifo_queue *fq,
404} 404}
405 405
406 406
407static void ikglp_remove_donation_from_owner(struct binheap_node *n, 407static void r2dglp_remove_donation_from_owner(struct binheap_node *n,
408 struct fifo_queue *fq, 408 struct fifo_queue *fq,
409 struct ikglp_semaphore *sem, 409 struct r2dglp_semaphore *sem,
410 unsigned long flags) 410 unsigned long flags)
411{ 411{
412 struct task_struct *owner = fq->owner; 412 struct task_struct *owner = fq->owner;
@@ -431,18 +431,18 @@ static void ikglp_remove_donation_from_owner(struct binheap_node *n,
431 struct task_struct *decreased_prio; 431 struct task_struct *decreased_prio;
432 432
433 TRACE_CUR("Propagating decreased inheritance to holder of fq %d.\n", 433 TRACE_CUR("Propagating decreased inheritance to holder of fq %d.\n",
434 ikglp_get_idx(sem, fq)); 434 r2dglp_get_idx(sem, fq));
435 435
436 if(litmus->__compare(new_max_eff_prio, BASE, owner, BASE)) { 436 if(litmus->__compare(new_max_eff_prio, BASE, owner, BASE)) {
437 TRACE_CUR("has greater base priority than base priority of owner " 437 TRACE_CUR("has greater base priority than base priority of owner "
438 "of fq %d.\n", 438 "of fq %d.\n",
439 ikglp_get_idx(sem, fq)); 439 r2dglp_get_idx(sem, fq));
440 decreased_prio = new_max_eff_prio; 440 decreased_prio = new_max_eff_prio;
441 } 441 }
442 else { 442 else {
443 TRACE_CUR("has lesser base priority than base priority of owner of " 443 TRACE_CUR("has lesser base priority than base priority of owner of "
444 "fq %d.\n", 444 "fq %d.\n",
445 ikglp_get_idx(sem, fq)); 445 r2dglp_get_idx(sem, fq));
446 decreased_prio = NULL; 446 decreased_prio = NULL;
447 } 447 }
448 448
@@ -458,7 +458,7 @@ static void ikglp_remove_donation_from_owner(struct binheap_node *n,
458 } 458 }
459} 459}
460 460
461static void ikglp_remove_donation_from_fq_waiter(struct task_struct *t, 461static void r2dglp_remove_donation_from_fq_waiter(struct task_struct *t,
462 struct binheap_node *n) 462 struct binheap_node *n)
463{ 463{
464 struct task_struct *old_max_eff_prio; 464 struct task_struct *old_max_eff_prio;
@@ -497,13 +497,13 @@ static void ikglp_remove_donation_from_fq_waiter(struct task_struct *t,
497 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); 497 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
498} 498}
499 499
500static void ikglp_get_immediate(struct task_struct* t, 500static void r2dglp_get_immediate(struct task_struct* t,
501 struct fifo_queue *fq, 501 struct fifo_queue *fq,
502 struct ikglp_semaphore *sem, 502 struct r2dglp_semaphore *sem,
503 unsigned long flags) 503 unsigned long flags)
504{ 504{
505 /* resource available now */ 505 /* resource available now */
506 TRACE_CUR("queue %d: acquired immediately\n", ikglp_get_idx(sem, fq)); 506 TRACE_CUR("queue %d: acquired immediately\n", r2dglp_get_idx(sem, fq));
507 507
508 fq->owner = t; 508 fq->owner = t;
509 509
@@ -517,11 +517,11 @@ static void ikglp_get_immediate(struct task_struct* t,
517 /* even though we got the replica, we're still considered in the fifo */ 517 /* even though we got the replica, we're still considered in the fifo */
518 ++(sem->nr_in_fifos); 518 ++(sem->nr_in_fifos);
519 519
520 ikglp_add_global_list(sem, t, &fq->global_heap_node); 520 r2dglp_add_global_list(sem, t, &fq->global_heap_node);
521 ikglp_add_donees(sem, fq, t, &fq->donee_heap_node); 521 r2dglp_add_donees(sem, fq, t, &fq->donee_heap_node);
522 522
523 sem->shortest_fifo_queue = 523 sem->shortest_fifo_queue =
524 ikglp_find_shortest(sem, sem->shortest_fifo_queue); 524 r2dglp_find_shortest(sem, sem->shortest_fifo_queue);
525 525
526#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 526#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
527 if(sem->aff_obs) { 527 if(sem->aff_obs) {
@@ -534,17 +534,17 @@ static void ikglp_get_immediate(struct task_struct* t,
534} 534}
535 535
536 536
537static void __ikglp_enqueue_on_fq(struct ikglp_semaphore *sem, 537static void __r2dglp_enqueue_on_fq(struct r2dglp_semaphore *sem,
538 struct fifo_queue *fq, 538 struct fifo_queue *fq,
539 ikglp_wait_state_t *wait, 539 r2dglp_wait_state_t *wait,
540 ikglp_heap_node_t *global_heap_node, 540 r2dglp_heap_node_t *global_heap_node,
541 ikglp_donee_heap_node_t *donee_heap_node) 541 r2dglp_donee_heap_node_t *donee_heap_node)
542{ 542{
543 struct task_struct *t = wait->task; 543 struct task_struct *t = wait->task;
544 544
545 /* resource is not free => must suspend and wait */ 545 /* resource is not free => must suspend and wait */
546 TRACE_TASK(t, "Enqueuing on fq %d.\n", 546 TRACE_TASK(t, "Enqueuing on fq %d.\n",
547 ikglp_get_idx(sem, fq)); 547 r2dglp_get_idx(sem, fq));
548 548
549 init_waitqueue_entry(&wait->fq_node, t); 549 init_waitqueue_entry(&wait->fq_node, t);
550 550
@@ -557,80 +557,80 @@ static void __ikglp_enqueue_on_fq(struct ikglp_semaphore *sem,
557 if(likely(global_heap_node)) { 557 if(likely(global_heap_node)) {
558 if(binheap_is_in_heap(&global_heap_node->node)) { 558 if(binheap_is_in_heap(&global_heap_node->node)) {
559 WARN_ON(1); 559 WARN_ON(1);
560 ikglp_del_global_list(sem, t, global_heap_node); 560 r2dglp_del_global_list(sem, t, global_heap_node);
561 } 561 }
562 ikglp_add_global_list(sem, t, global_heap_node); 562 r2dglp_add_global_list(sem, t, global_heap_node);
563 } 563 }
564 // update donor eligiblity list. 564 // update donor eligiblity list.
565 if(likely(donee_heap_node)) 565 if(likely(donee_heap_node))
566 ikglp_add_donees(sem, fq, t, donee_heap_node); 566 r2dglp_add_donees(sem, fq, t, donee_heap_node);
567 567
568 if(sem->shortest_fifo_queue == fq) 568 if(sem->shortest_fifo_queue == fq)
569 sem->shortest_fifo_queue = ikglp_find_shortest(sem, fq); 569 sem->shortest_fifo_queue = r2dglp_find_shortest(sem, fq);
570 570
571#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 571#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
572 if(sem->aff_obs) 572 if(sem->aff_obs)
573 sem->aff_obs->ops->notify_enqueue(sem->aff_obs, fq, t); 573 sem->aff_obs->ops->notify_enqueue(sem->aff_obs, fq, t);
574#endif 574#endif
575 575
576 wait->cur_q = IKGLP_FQ; 576 wait->cur_q = R2DGLP_FQ;
577 wait->fq = fq; 577 wait->fq = fq;
578 mb(); 578 mb();
579 579
580 TRACE_TASK(t, "shortest queue is now %d\n", ikglp_get_idx(sem, fq)); 580 TRACE_TASK(t, "shortest queue is now %d\n", r2dglp_get_idx(sem, fq));
581} 581}
582 582
583 583
584static void ikglp_enqueue_on_fq(struct ikglp_semaphore *sem, 584static void r2dglp_enqueue_on_fq(struct r2dglp_semaphore *sem,
585 struct fifo_queue *fq, 585 struct fifo_queue *fq,
586 ikglp_wait_state_t *wait, 586 r2dglp_wait_state_t *wait,
587 unsigned long flags) 587 unsigned long flags)
588{ 588{
589 /* resource is not free => must suspend and wait */ 589 /* resource is not free => must suspend and wait */
590 TRACE_TASK(wait->task, "queue %d: Resource is not free => must suspend " 590 TRACE_TASK(wait->task, "queue %d: Resource is not free => must suspend "
591 "and wait.\n", 591 "and wait.\n",
592 ikglp_get_idx(sem, fq)); 592 r2dglp_get_idx(sem, fq));
593 593
594 INIT_BINHEAP_NODE(&wait->global_heap_node.node); 594 INIT_BINHEAP_NODE(&wait->global_heap_node.node);
595 INIT_BINHEAP_NODE(&wait->donee_heap_node.node); 595 INIT_BINHEAP_NODE(&wait->donee_heap_node.node);
596 596
597 __ikglp_enqueue_on_fq(sem, fq, wait, 597 __r2dglp_enqueue_on_fq(sem, fq, wait,
598 &wait->global_heap_node, &wait->donee_heap_node); 598 &wait->global_heap_node, &wait->donee_heap_node);
599 599
600 /* call unlocks sem->lock */ 600 /* call unlocks sem->lock */
601 ikglp_refresh_owners_prio_increase(wait->task, fq, sem, flags); 601 r2dglp_refresh_owners_prio_increase(wait->task, fq, sem, flags);
602} 602}
603 603
604 604
605static void __ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, 605static void __r2dglp_enqueue_on_pq(struct r2dglp_semaphore *sem,
606 ikglp_wait_state_t *wait) 606 r2dglp_wait_state_t *wait)
607{ 607{
608 TRACE_TASK(wait->task, "goes to PQ.\n"); 608 TRACE_TASK(wait->task, "goes to PQ.\n");
609 609
610 wait->pq_node.task = wait->task; /* copy over task (little redundant...) */ 610 wait->pq_node.task = wait->task; /* copy over task (little redundant...) */
611 611
612 binheap_add(&wait->pq_node.node, &sem->priority_queue, 612 binheap_add(&wait->pq_node.node, &sem->priority_queue,
613 ikglp_heap_node_t, node); 613 r2dglp_heap_node_t, node);
614 614
615 wait->cur_q = IKGLP_PQ; 615 wait->cur_q = R2DGLP_PQ;
616} 616}
617 617
618static void ikglp_enqueue_on_pq(struct ikglp_semaphore *sem, 618static void r2dglp_enqueue_on_pq(struct r2dglp_semaphore *sem,
619 ikglp_wait_state_t *wait) 619 r2dglp_wait_state_t *wait)
620{ 620{
621 INIT_BINHEAP_NODE(&wait->global_heap_node.node); 621 INIT_BINHEAP_NODE(&wait->global_heap_node.node);
622 INIT_BINHEAP_NODE(&wait->donee_heap_node.node); 622 INIT_BINHEAP_NODE(&wait->donee_heap_node.node);
623 INIT_BINHEAP_NODE(&wait->pq_node.node); 623 INIT_BINHEAP_NODE(&wait->pq_node.node);
624 624
625 __ikglp_enqueue_on_pq(sem, wait); 625 __r2dglp_enqueue_on_pq(sem, wait);
626} 626}
627 627
628static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem, 628static void r2dglp_enqueue_on_donor(struct r2dglp_semaphore *sem,
629 ikglp_wait_state_t* wait, 629 r2dglp_wait_state_t* wait,
630 unsigned long flags) 630 unsigned long flags)
631{ 631{
632 struct task_struct *t = wait->task; 632 struct task_struct *t = wait->task;
633 ikglp_donee_heap_node_t *donee_node = NULL; 633 r2dglp_donee_heap_node_t *donee_node = NULL;
634 struct task_struct *donee; 634 struct task_struct *donee;
635 635
636 struct task_struct *old_max_eff_prio; 636 struct task_struct *old_max_eff_prio;
@@ -643,15 +643,15 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem,
643 INIT_BINHEAP_NODE(&wait->node); 643 INIT_BINHEAP_NODE(&wait->node);
644 644
645 /* Add donor to the global list. */ 645 /* Add donor to the global list. */
646 ikglp_add_global_list(sem, t, &wait->global_heap_node); 646 r2dglp_add_global_list(sem, t, &wait->global_heap_node);
647 647
648 /* Select a donee */ 648 /* Select a donee */
649#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 649#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
650 donee_node = (sem->aff_obs) ? 650 donee_node = (sem->aff_obs) ?
651 sem->aff_obs->ops->advise_donee_selection(sem->aff_obs, t) : 651 sem->aff_obs->ops->advise_donee_selection(sem->aff_obs, t) :
652 binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node); 652 binheap_top_entry(&sem->donees, r2dglp_donee_heap_node_t, node);
653#else 653#else
654 donee_node = binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node); 654 donee_node = binheap_top_entry(&sem->donees, r2dglp_donee_heap_node_t, node);
655#endif 655#endif
656 656
657 donee = donee_node->task; 657 donee = donee_node->task;
@@ -667,7 +667,7 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem,
667 wait->donee_info = donee_node; 667 wait->donee_info = donee_node;
668 668
669 /* Add t to donor heap. */ 669 /* Add t to donor heap. */
670 binheap_add(&wait->node, &sem->donors, ikglp_wait_state_t, node); 670 binheap_add(&wait->node, &sem->donors, r2dglp_wait_state_t, node);
671 671
672 /* Now adjust the donee's priority. */ 672 /* Now adjust the donee's priority. */
673 673
@@ -680,7 +680,7 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem,
680 /* Steal donation relation. Evict old donor to PQ. */ 680 /* Steal donation relation. Evict old donor to PQ. */
681 681
682 /* Remove old donor from donor heap */ 682 /* Remove old donor from donor heap */
683 ikglp_wait_state_t *old_wait = donee_node->donor_info; 683 r2dglp_wait_state_t *old_wait = donee_node->donor_info;
684 struct task_struct *old_donor = old_wait->task; 684 struct task_struct *old_donor = old_wait->task;
685 685
686 TRACE_TASK(t, "Donee (%s/%d) had donor %s/%d. " 686 TRACE_TASK(t, "Donee (%s/%d) had donor %s/%d. "
@@ -695,10 +695,10 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem,
695 /* WARNING: have not updated inh_prio! */ 695 /* WARNING: have not updated inh_prio! */
696 696
697 /* Add old donor to PQ. */ 697 /* Add old donor to PQ. */
698 __ikglp_enqueue_on_pq(sem, old_wait); 698 __r2dglp_enqueue_on_pq(sem, old_wait);
699 699
700 /* Remove old donor from the global heap. */ 700 /* Remove old donor from the global heap. */
701 ikglp_del_global_list(sem, old_donor, &old_wait->global_heap_node); 701 r2dglp_del_global_list(sem, old_donor, &old_wait->global_heap_node);
702 } 702 }
703 703
704 /* Add back donee's node to the donees heap with increased prio */ 704 /* Add back donee's node to the donees heap with increased prio */
@@ -706,7 +706,7 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem,
706 706
707 donee_node->donor_info = wait; 707 donee_node->donor_info = wait;
708 INIT_BINHEAP_NODE(&donee_node->node); 708 INIT_BINHEAP_NODE(&donee_node->node);
709 binheap_add(&donee_node->node, &sem->donees, ikglp_donee_heap_node_t, node); 709 binheap_add(&donee_node->node, &sem->donees, r2dglp_donee_heap_node_t, node);
710 710
711 /* Add an inheritance/donation to the donee's inheritance heap. */ 711 /* Add an inheritance/donation to the donee's inheritance heap. */
712 wait->prio_donation.lock = (struct litmus_lock*)sem; 712 wait->prio_donation.lock = (struct litmus_lock*)sem;
@@ -741,7 +741,7 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem,
741 raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock); 741 raw_spin_unlock(&tsk_rt(donee)->hp_blocked_tasks_lock);
742 742
743 /* call unlocks sem->lock */ 743 /* call unlocks sem->lock */
744 ikglp_refresh_owners_prio_increase(donee, donee_fq, sem, flags); 744 r2dglp_refresh_owners_prio_increase(donee, donee_fq, sem, flags);
745 } 745 }
746 else { 746 else {
747 TRACE_TASK(t, "%s/%d is the owner. " 747 TRACE_TASK(t, "%s/%d is the owner. "
@@ -760,14 +760,14 @@ static void ikglp_enqueue_on_donor(struct ikglp_semaphore *sem,
760 unlock_fine_irqrestore(&sem->lock, flags); 760 unlock_fine_irqrestore(&sem->lock, flags);
761 } 761 }
762 762
763 wait->cur_q = IKGLP_DONOR; 763 wait->cur_q = R2DGLP_DONOR;
764} 764}
765 765
766 766
767int ikglp_lock(struct litmus_lock* l) 767int r2dglp_lock(struct litmus_lock* l)
768{ 768{
769 struct task_struct* t = current; 769 struct task_struct* t = current;
770 struct ikglp_semaphore *sem = ikglp_from_lock(l); 770 struct r2dglp_semaphore *sem = r2dglp_from_lock(l);
771 unsigned long flags = 0, more_flags; 771 unsigned long flags = 0, more_flags;
772 struct fifo_queue *fq = NULL; 772 struct fifo_queue *fq = NULL;
773 int replica = -EINVAL; 773 int replica = -EINVAL;
@@ -776,7 +776,7 @@ int ikglp_lock(struct litmus_lock* l)
776 raw_spinlock_t *dgl_lock; 776 raw_spinlock_t *dgl_lock;
777#endif 777#endif
778 778
779 ikglp_wait_state_t wait; 779 r2dglp_wait_state_t wait;
780 780
781 if (!is_realtime(t)) 781 if (!is_realtime(t))
782 return -EPERM; 782 return -EPERM;
@@ -804,11 +804,11 @@ int ikglp_lock(struct litmus_lock* l)
804#endif 804#endif
805 if(fq->count == 0) { 805 if(fq->count == 0) {
806 /* take available resource */ 806 /* take available resource */
807 replica = ikglp_get_idx(sem, fq); 807 replica = r2dglp_get_idx(sem, fq);
808 808
809 TRACE_CUR("Getting replica %d\n", replica); 809 TRACE_CUR("Getting replica %d\n", replica);
810 810
811 ikglp_get_immediate(t, fq, sem, flags); /* unlocks sem->lock */ 811 r2dglp_get_immediate(t, fq, sem, flags); /* unlocks sem->lock */
812 812
813 raw_spin_unlock_irqrestore(&sem->real_lock, more_flags); 813 raw_spin_unlock_irqrestore(&sem->real_lock, more_flags);
814 unlock_global_irqrestore(dgl_lock, flags); 814 unlock_global_irqrestore(dgl_lock, flags);
@@ -825,7 +825,7 @@ int ikglp_lock(struct litmus_lock* l)
825 825
826 set_task_state(t, TASK_UNINTERRUPTIBLE); 826 set_task_state(t, TASK_UNINTERRUPTIBLE);
827 827
828 ikglp_enqueue_on_fq(sem, fq, &wait, flags); /* unlocks sem->lock */ 828 r2dglp_enqueue_on_fq(sem, fq, &wait, flags); /* unlocks sem->lock */
829 } 829 }
830 } 830 }
831 else { 831 else {
@@ -840,16 +840,16 @@ int ikglp_lock(struct litmus_lock* l)
840 /* FIXME: interruptible would be nice some day */ 840 /* FIXME: interruptible would be nice some day */
841 set_task_state(t, TASK_UNINTERRUPTIBLE); 841 set_task_state(t, TASK_UNINTERRUPTIBLE);
842 842
843 if(litmus->__compare(ikglp_mth_highest(sem), BASE, t, BASE)) { 843 if(litmus->__compare(r2dglp_mth_highest(sem), BASE, t, BASE)) {
844 TRACE_CUR("Going on PQ heap.\n"); 844 TRACE_CUR("Going on PQ heap.\n");
845 /* enqueue on PQ */ 845 /* enqueue on PQ */
846 ikglp_enqueue_on_pq(sem, &wait); 846 r2dglp_enqueue_on_pq(sem, &wait);
847 unlock_fine_irqrestore(&sem->lock, flags); 847 unlock_fine_irqrestore(&sem->lock, flags);
848 } 848 }
849 else { 849 else {
850 /* enqueue as donor */ 850 /* enqueue as donor */
851 TRACE_CUR("Going on donor heap.\n"); 851 TRACE_CUR("Going on donor heap.\n");
852 ikglp_enqueue_on_donor(sem, &wait, flags); /* unlocks sem->lock */ 852 r2dglp_enqueue_on_donor(sem, &wait, flags); /* unlocks sem->lock */
853 } 853 }
854 } 854 }
855 855
@@ -870,7 +870,7 @@ int ikglp_lock(struct litmus_lock* l)
870 870
871 tsk_rt(t)->blocked_lock_data = 0; 871 tsk_rt(t)->blocked_lock_data = 0;
872 872
873 replica = ikglp_get_idx(sem, fq); 873 replica = r2dglp_get_idx(sem, fq);
874 874
875acquired: 875acquired:
876 TRACE_CUR("Acquired lock %d, queue %d\n", l->ident, replica); 876 TRACE_CUR("Acquired lock %d, queue %d\n", l->ident, replica);
@@ -883,70 +883,70 @@ acquired:
883 return replica; 883 return replica;
884} 884}
885 885
886static void __drop_from_donor(struct ikglp_semaphore *sem, 886static void __drop_from_donor(struct r2dglp_semaphore *sem,
887 ikglp_wait_state_t *wait) 887 r2dglp_wait_state_t *wait)
888{ 888{
889 BUG_ON(wait->cur_q != IKGLP_DONOR); 889 BUG_ON(wait->cur_q != R2DGLP_DONOR);
890 890
891 TRACE_TASK(wait->task, "is being dropped from donor heap.\n"); 891 TRACE_TASK(wait->task, "is being dropped from donor heap.\n");
892 892
893 binheap_delete(&wait->node, &sem->donors); 893 binheap_delete(&wait->node, &sem->donors);
894 wait->cur_q = IKGLP_INVL; 894 wait->cur_q = R2DGLP_INVL;
895} 895}
896 896
897static void ikglp_move_donor_to_fq(struct ikglp_semaphore *sem, 897static void r2dglp_move_donor_to_fq(struct r2dglp_semaphore *sem,
898 struct fifo_queue *fq, 898 struct fifo_queue *fq,
899 ikglp_wait_state_t *donor_info) 899 r2dglp_wait_state_t *donor_info)
900{ 900{
901 struct task_struct *t = donor_info->task; 901 struct task_struct *t = donor_info->task;
902 902
903 TRACE_CUR("Donor %s/%d being moved to fq %d\n", 903 TRACE_CUR("Donor %s/%d being moved to fq %d\n",
904 t->comm, 904 t->comm,
905 t->pid, 905 t->pid,
906 ikglp_get_idx(sem, fq)); 906 r2dglp_get_idx(sem, fq));
907 907
908 __drop_from_donor(sem, donor_info); 908 __drop_from_donor(sem, donor_info);
909 909
910 /* Already in global_list, so pass null to prevent adding 2nd time. */ 910 /* Already in global_list, so pass null to prevent adding 2nd time. */
911 __ikglp_enqueue_on_fq(sem, fq, donor_info, 911 __r2dglp_enqueue_on_fq(sem, fq, donor_info,
912 NULL, /* pass NULL */ 912 NULL, /* pass NULL */
913 &donor_info->donee_heap_node); 913 &donor_info->donee_heap_node);
914 914
915 /* Note: ikglp_update_owners_prio() still needs to be called. */ 915 /* Note: r2dglp_update_owners_prio() still needs to be called. */
916} 916}
917 917
918static void __drop_from_pq(struct ikglp_semaphore *sem, 918static void __drop_from_pq(struct r2dglp_semaphore *sem,
919 ikglp_wait_state_t *wait) 919 r2dglp_wait_state_t *wait)
920{ 920{
921 BUG_ON(wait->cur_q != IKGLP_PQ); 921 BUG_ON(wait->cur_q != R2DGLP_PQ);
922 922
923 TRACE_TASK(wait->task, "is being dropped from the PQ.\n"); 923 TRACE_TASK(wait->task, "is being dropped from the PQ.\n");
924 924
925 binheap_delete(&wait->pq_node.node, &sem->priority_queue); 925 binheap_delete(&wait->pq_node.node, &sem->priority_queue);
926 wait->cur_q = IKGLP_INVL; 926 wait->cur_q = R2DGLP_INVL;
927} 927}
928 928
929static void ikglp_move_pq_to_fq(struct ikglp_semaphore *sem, 929static void r2dglp_move_pq_to_fq(struct r2dglp_semaphore *sem,
930 struct fifo_queue *fq, 930 struct fifo_queue *fq,
931 ikglp_wait_state_t *wait) 931 r2dglp_wait_state_t *wait)
932{ 932{
933 struct task_struct *t = wait->task; 933 struct task_struct *t = wait->task;
934 934
935 TRACE_CUR("PQ request %s/%d being moved to fq %d\n", 935 TRACE_CUR("PQ request %s/%d being moved to fq %d\n",
936 t->comm, 936 t->comm,
937 t->pid, 937 t->pid,
938 ikglp_get_idx(sem, fq)); 938 r2dglp_get_idx(sem, fq));
939 939
940 __drop_from_pq(sem, wait); 940 __drop_from_pq(sem, wait);
941 __ikglp_enqueue_on_fq(sem, fq, wait, 941 __r2dglp_enqueue_on_fq(sem, fq, wait,
942 &wait->global_heap_node, 942 &wait->global_heap_node,
943 &wait->donee_heap_node); 943 &wait->donee_heap_node);
944 944
945 /* Note: ikglp_update_owners_prio() still needs to be called. */ 945 /* Note: r2dglp_update_owners_prio() still needs to be called. */
946} 946}
947 947
948static ikglp_wait_state_t* ikglp_find_hp_waiter_to_steal( 948static r2dglp_wait_state_t* r2dglp_find_hp_waiter_to_steal(
949 struct ikglp_semaphore* sem, 949 struct r2dglp_semaphore* sem,
950 struct fifo_queue* skip) 950 struct fifo_queue* skip)
951{ 951{
952 /* must hold sem->lock */ 952 /* must hold sem->lock */
@@ -962,10 +962,10 @@ static ikglp_wait_state_t* ikglp_find_hp_waiter_to_steal(
962 962
963 TRACE_CUR("hp_waiter on fq %d (%s/%d) has higher prio than " 963 TRACE_CUR("hp_waiter on fq %d (%s/%d) has higher prio than "
964 "hp_waiter on fq %d (%s/%d)\n", 964 "hp_waiter on fq %d (%s/%d)\n",
965 ikglp_get_idx(sem, &sem->fifo_queues[i]), 965 r2dglp_get_idx(sem, &sem->fifo_queues[i]),
966 sem->fifo_queues[i].hp_waiter->comm, 966 sem->fifo_queues[i].hp_waiter->comm,
967 sem->fifo_queues[i].hp_waiter->pid, 967 sem->fifo_queues[i].hp_waiter->pid,
968 (fq) ? ikglp_get_idx(sem, fq) : 0, 968 (fq) ? r2dglp_get_idx(sem, fq) : 0,
969 (fq) ? ((fq->hp_waiter) ? fq->hp_waiter->comm : "null") : "nullXX", 969 (fq) ? ((fq->hp_waiter) ? fq->hp_waiter->comm : "null") : "nullXX",
970 (fq) ? ((fq->hp_waiter) ? fq->hp_waiter->pid : 0) : -2); 970 (fq) ? ((fq->hp_waiter) ? fq->hp_waiter->pid : 0) : -2);
971 971
@@ -977,12 +977,12 @@ static ikglp_wait_state_t* ikglp_find_hp_waiter_to_steal(
977 977
978 if(fq) { 978 if(fq) {
979 struct task_struct *max_hp = fq->hp_waiter; 979 struct task_struct *max_hp = fq->hp_waiter;
980 ikglp_wait_state_t* ret = NULL; 980 r2dglp_wait_state_t* ret = NULL;
981 981
982 TRACE_CUR("Searching for %s/%d on fq %d\n", 982 TRACE_CUR("Searching for %s/%d on fq %d\n",
983 max_hp->comm, 983 max_hp->comm,
984 max_hp->pid, 984 max_hp->pid,
985 ikglp_get_idx(sem, fq)); 985 r2dglp_get_idx(sem, fq));
986 986
987 BUG_ON(!max_hp); 987 BUG_ON(!max_hp);
988 988
@@ -992,14 +992,14 @@ static ikglp_wait_state_t* ikglp_find_hp_waiter_to_steal(
992 queued = (struct task_struct*) wait->private; 992 queued = (struct task_struct*) wait->private;
993 993
994 TRACE_CUR("fq %d entry: %s/%d\n", 994 TRACE_CUR("fq %d entry: %s/%d\n",
995 ikglp_get_idx(sem, fq), 995 r2dglp_get_idx(sem, fq),
996 queued->comm, 996 queued->comm,
997 queued->pid); 997 queued->pid);
998 998
999 /* Compare task prios, find high prio task. */ 999 /* Compare task prios, find high prio task. */
1000 if (queued == max_hp) { 1000 if (queued == max_hp) {
1001 TRACE_CUR("Found it!\n"); 1001 TRACE_CUR("Found it!\n");
1002 ret = container_of(wait, ikglp_wait_state_t, fq_node); 1002 ret = container_of(wait, r2dglp_wait_state_t, fq_node);
1003 } 1003 }
1004 } 1004 }
1005 1005
@@ -1010,13 +1010,13 @@ static ikglp_wait_state_t* ikglp_find_hp_waiter_to_steal(
1010 return(NULL); 1010 return(NULL);
1011} 1011}
1012 1012
1013static void __drop_from_fq(struct ikglp_semaphore *sem, 1013static void __drop_from_fq(struct r2dglp_semaphore *sem,
1014 ikglp_wait_state_t *wait) 1014 r2dglp_wait_state_t *wait)
1015{ 1015{
1016 struct task_struct *t = wait->task; 1016 struct task_struct *t = wait->task;
1017 struct fifo_queue *fq = wait->fq; 1017 struct fifo_queue *fq = wait->fq;
1018 1018
1019 BUG_ON(wait->cur_q != IKGLP_FQ); 1019 BUG_ON(wait->cur_q != R2DGLP_FQ);
1020 BUG_ON(!fq); 1020 BUG_ON(!fq);
1021 1021
1022 TRACE_TASK(t, "is being dropped from fq.\n"); 1022 TRACE_TASK(t, "is being dropped from fq.\n");
@@ -1030,9 +1030,9 @@ static void __drop_from_fq(struct ikglp_semaphore *sem,
1030#endif 1030#endif
1031 1031
1032 if(t == fq->hp_waiter) { 1032 if(t == fq->hp_waiter) {
1033 fq->hp_waiter = ikglp_find_hp_waiter(fq, NULL); 1033 fq->hp_waiter = r2dglp_find_hp_waiter(fq, NULL);
1034 TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", 1034 TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n",
1035 ikglp_get_idx(sem, fq), 1035 r2dglp_get_idx(sem, fq),
1036 (fq->hp_waiter) ? fq->hp_waiter->comm : "null", 1036 (fq->hp_waiter) ? fq->hp_waiter->comm : "null",
1037 (fq->hp_waiter) ? fq->hp_waiter->pid : 0); 1037 (fq->hp_waiter) ? fq->hp_waiter->pid : 0);
1038 } 1038 }
@@ -1042,42 +1042,42 @@ static void __drop_from_fq(struct ikglp_semaphore *sem,
1042 sem->shortest_fifo_queue = fq; 1042 sem->shortest_fifo_queue = fq;
1043 --(sem->nr_in_fifos); 1043 --(sem->nr_in_fifos);
1044 1044
1045 wait->cur_q = IKGLP_INVL; 1045 wait->cur_q = R2DGLP_INVL;
1046} 1046}
1047 1047
1048static void ikglp_steal_to_fq(struct ikglp_semaphore *sem, 1048static void r2dglp_steal_to_fq(struct r2dglp_semaphore *sem,
1049 struct fifo_queue *fq, 1049 struct fifo_queue *fq,
1050 ikglp_wait_state_t *fq_wait) 1050 r2dglp_wait_state_t *fq_wait)
1051{ 1051{
1052 WARN_ON(fq_wait->fq != fq_wait->donee_heap_node.fq); 1052 WARN_ON(fq_wait->fq != fq_wait->donee_heap_node.fq);
1053 __drop_from_fq(sem, fq_wait); 1053 __drop_from_fq(sem, fq_wait);
1054 1054
1055 fq_wait->donee_heap_node.fq = fq; // just to be safe 1055 fq_wait->donee_heap_node.fq = fq; // just to be safe
1056 __ikglp_enqueue_on_fq(sem, fq, fq_wait, NULL, NULL); 1056 __r2dglp_enqueue_on_fq(sem, fq, fq_wait, NULL, NULL);
1057 1057
1058 /* Note: We have not checked the priority inheritance of fq's owner yet. */ 1058 /* Note: We have not checked the priority inheritance of fq's owner yet. */
1059} 1059}
1060 1060
1061 1061
1062static void ikglp_migrate_fq_to_owner_heap_nodes(struct ikglp_semaphore *sem, 1062static void r2dglp_migrate_fq_to_owner_heap_nodes(struct r2dglp_semaphore *sem,
1063 struct fifo_queue *fq, 1063 struct fifo_queue *fq,
1064 ikglp_wait_state_t *old_wait) 1064 r2dglp_wait_state_t *old_wait)
1065{ 1065{
1066 struct task_struct *t = old_wait->task; 1066 struct task_struct *t = old_wait->task;
1067 1067
1068 BUG_ON(old_wait->donee_heap_node.fq != fq); 1068 BUG_ON(old_wait->donee_heap_node.fq != fq);
1069 1069
1070 TRACE_TASK(t, "Migrating wait_state to memory of queue %d.\n", 1070 TRACE_TASK(t, "Migrating wait_state to memory of queue %d.\n",
1071 ikglp_get_idx(sem, fq)); 1071 r2dglp_get_idx(sem, fq));
1072 1072
1073 /* Need to migrate global_heap_node and donee_heap_node off of the stack 1073 /* Need to migrate global_heap_node and donee_heap_node off of the stack
1074 to the nodes allocated for the owner of this fq. */ 1074 to the nodes allocated for the owner of this fq. */
1075 1075
1076 /* TODO: Enhance binheap() to perform this operation in place. */ 1076 /* TODO: Enhance binheap() to perform this operation in place. */
1077 1077
1078 ikglp_del_global_list(sem, t, &old_wait->global_heap_node); /* remove */ 1078 r2dglp_del_global_list(sem, t, &old_wait->global_heap_node); /* remove */
1079 fq->global_heap_node = old_wait->global_heap_node; /* copy */ 1079 fq->global_heap_node = old_wait->global_heap_node; /* copy */
1080 ikglp_add_global_list(sem, t, &fq->global_heap_node); /* re-add */ 1080 r2dglp_add_global_list(sem, t, &fq->global_heap_node); /* re-add */
1081 1081
1082 binheap_delete(&old_wait->donee_heap_node.node, &sem->donees); /* remove */ 1082 binheap_delete(&old_wait->donee_heap_node.node, &sem->donees); /* remove */
1083 fq->donee_heap_node = old_wait->donee_heap_node; /* copy */ 1083 fq->donee_heap_node = old_wait->donee_heap_node; /* copy */
@@ -1092,32 +1092,32 @@ static void ikglp_migrate_fq_to_owner_heap_nodes(struct ikglp_semaphore *sem,
1092 } 1092 }
1093 INIT_BINHEAP_NODE(&fq->donee_heap_node.node); 1093 INIT_BINHEAP_NODE(&fq->donee_heap_node.node);
1094 binheap_add(&fq->donee_heap_node.node, &sem->donees, 1094 binheap_add(&fq->donee_heap_node.node, &sem->donees,
1095 ikglp_donee_heap_node_t, node); /* re-add */ 1095 r2dglp_donee_heap_node_t, node); /* re-add */
1096} 1096}
1097 1097
1098 1098
1099 1099
1100void ikglp_grant_replica_to_next(struct ikglp_semaphore *sem, 1100void r2dglp_grant_replica_to_next(struct r2dglp_semaphore *sem,
1101 struct fifo_queue *fq) 1101 struct fifo_queue *fq)
1102{ 1102{
1103 wait_queue_t *wait; 1103 wait_queue_t *wait;
1104 ikglp_wait_state_t *fq_wait; 1104 r2dglp_wait_state_t *fq_wait;
1105 struct task_struct *next; 1105 struct task_struct *next;
1106 1106
1107 BUG_ON(!waitqueue_active(&fq->wait)); 1107 BUG_ON(!waitqueue_active(&fq->wait));
1108 1108
1109 wait = list_entry(fq->wait.task_list.next, wait_queue_t, task_list); 1109 wait = list_entry(fq->wait.task_list.next, wait_queue_t, task_list);
1110 fq_wait = container_of(wait, ikglp_wait_state_t, fq_node); 1110 fq_wait = container_of(wait, r2dglp_wait_state_t, fq_node);
1111 next = (struct task_struct*) wait->private; 1111 next = (struct task_struct*) wait->private;
1112 1112
1113 __remove_wait_queue(&fq->wait, wait); 1113 __remove_wait_queue(&fq->wait, wait);
1114 1114
1115 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n", 1115 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n",
1116 ikglp_get_idx(sem, fq), 1116 r2dglp_get_idx(sem, fq),
1117 next->comm, next->pid); 1117 next->comm, next->pid);
1118 1118
1119 /* migrate wait-state to fifo-memory. */ 1119 /* migrate wait-state to fifo-memory. */
1120 ikglp_migrate_fq_to_owner_heap_nodes(sem, fq, fq_wait); 1120 r2dglp_migrate_fq_to_owner_heap_nodes(sem, fq, fq_wait);
1121 1121
1122 /* next becomes the resouce holder */ 1122 /* next becomes the resouce holder */
1123 fq->owner = next; 1123 fq->owner = next;
@@ -1135,9 +1135,9 @@ void ikglp_grant_replica_to_next(struct ikglp_semaphore *sem,
1135 * inherit. However, we need to make sure that the 1135 * inherit. However, we need to make sure that the
1136 * next-highest priority in the queue is reflected in 1136 * next-highest priority in the queue is reflected in
1137 * hp_waiter. */ 1137 * hp_waiter. */
1138 fq->hp_waiter = ikglp_find_hp_waiter(fq, NULL); 1138 fq->hp_waiter = r2dglp_find_hp_waiter(fq, NULL);
1139 TRACE_TASK(next, "New hp_waiter for fq %d is %s/%d!\n", 1139 TRACE_TASK(next, "New hp_waiter for fq %d is %s/%d!\n",
1140 ikglp_get_idx(sem, fq), 1140 r2dglp_get_idx(sem, fq),
1141 (fq->hp_waiter) ? fq->hp_waiter->comm : "null", 1141 (fq->hp_waiter) ? fq->hp_waiter->comm : "null",
1142 (fq->hp_waiter) ? fq->hp_waiter->pid : 0); 1142 (fq->hp_waiter) ? fq->hp_waiter->pid : 0);
1143 1143
@@ -1161,7 +1161,7 @@ void ikglp_grant_replica_to_next(struct ikglp_semaphore *sem,
1161 * then it (probably) ought to inherit the highest-priority 1161 * then it (probably) ought to inherit the highest-priority
1162 * waiter's priority. */ 1162 * waiter's priority. */
1163 TRACE_TASK(next, "is not hp_waiter of replica %d. hp_waiter is %s/%d\n", 1163 TRACE_TASK(next, "is not hp_waiter of replica %d. hp_waiter is %s/%d\n",
1164 ikglp_get_idx(sem, fq), 1164 r2dglp_get_idx(sem, fq),
1165 (fq->hp_waiter) ? fq->hp_waiter->comm : "null", 1165 (fq->hp_waiter) ? fq->hp_waiter->comm : "null",
1166 (fq->hp_waiter) ? fq->hp_waiter->pid : 0); 1166 (fq->hp_waiter) ? fq->hp_waiter->pid : 0);
1167 1167
@@ -1201,10 +1201,10 @@ void ikglp_grant_replica_to_next(struct ikglp_semaphore *sem,
1201#define ALLOW_STEALING 1 1201#define ALLOW_STEALING 1
1202#define ALWAYS_TERMINATE_DONATION 1 1202#define ALWAYS_TERMINATE_DONATION 1
1203 1203
1204void ikglp_move_next_to_fq(struct ikglp_semaphore *sem, 1204void r2dglp_move_next_to_fq(struct r2dglp_semaphore *sem,
1205 struct fifo_queue *fq, 1205 struct fifo_queue *fq,
1206 struct task_struct *t, 1206 struct task_struct *t,
1207 ikglp_donee_heap_node_t *donee_node, 1207 r2dglp_donee_heap_node_t *donee_node,
1208 unsigned long *flags, 1208 unsigned long *flags,
1209 int allow_stealing, 1209 int allow_stealing,
1210 int always_terminate_donation) 1210 int always_terminate_donation)
@@ -1213,12 +1213,12 @@ void ikglp_move_next_to_fq(struct ikglp_semaphore *sem,
1213 struct task_struct *new_on_fq = NULL; 1213 struct task_struct *new_on_fq = NULL;
1214 struct fifo_queue *fq_of_new_on_fq = NULL; 1214 struct fifo_queue *fq_of_new_on_fq = NULL;
1215 1215
1216 ikglp_wait_state_t *other_donor_info = NULL; 1216 r2dglp_wait_state_t *other_donor_info = NULL;
1217 struct fifo_queue *to_steal = NULL; 1217 struct fifo_queue *to_steal = NULL;
1218 int need_steal_prio_reeval = 0; 1218 int need_steal_prio_reeval = 0;
1219 1219
1220 if (donee_node->donor_info) { 1220 if (donee_node->donor_info) {
1221 ikglp_wait_state_t *donor_info = donee_node->donor_info; 1221 r2dglp_wait_state_t *donor_info = donee_node->donor_info;
1222 1222
1223 new_on_fq = donor_info->task; 1223 new_on_fq = donor_info->task;
1224 1224
@@ -1245,10 +1245,10 @@ void ikglp_move_next_to_fq(struct ikglp_semaphore *sem,
1245 TRACE_TASK(t, "Moving MY donor (%s/%d) to fq %d " 1245 TRACE_TASK(t, "Moving MY donor (%s/%d) to fq %d "
1246 "(non-aff wanted fq %d).\n", 1246 "(non-aff wanted fq %d).\n",
1247 new_on_fq->comm, new_on_fq->pid, 1247 new_on_fq->comm, new_on_fq->pid,
1248 ikglp_get_idx(sem, fq_of_new_on_fq), 1248 r2dglp_get_idx(sem, fq_of_new_on_fq),
1249 ikglp_get_idx(sem, fq)); 1249 r2dglp_get_idx(sem, fq));
1250 1250
1251 ikglp_move_donor_to_fq(sem, fq_of_new_on_fq, donor_info); 1251 r2dglp_move_donor_to_fq(sem, fq_of_new_on_fq, donor_info);
1252 1252
1253 /* treat donor as if it had donated to a task other than 't'. 1253 /* treat donor as if it had donated to a task other than 't'.
1254 * this triggers the termination of the donation relationship. */ 1254 * this triggers the termination of the donation relationship. */
@@ -1259,10 +1259,10 @@ void ikglp_move_next_to_fq(struct ikglp_semaphore *sem,
1259#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 1259#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
1260 other_donor_info = (sem->aff_obs) ? 1260 other_donor_info = (sem->aff_obs) ?
1261 sem->aff_obs->ops->advise_donor_to_fq(sem->aff_obs, fq) : 1261 sem->aff_obs->ops->advise_donor_to_fq(sem->aff_obs, fq) :
1262 binheap_top_entry(&sem->donors, ikglp_wait_state_t, node); 1262 binheap_top_entry(&sem->donors, r2dglp_wait_state_t, node);
1263#else 1263#else
1264 other_donor_info = 1264 other_donor_info =
1265 binheap_top_entry(&sem->donors, ikglp_wait_state_t, node); 1265 binheap_top_entry(&sem->donors, r2dglp_wait_state_t, node);
1266#endif 1266#endif
1267 1267
1268 new_on_fq = other_donor_info->task; 1268 new_on_fq = other_donor_info->task;
@@ -1292,15 +1292,15 @@ void ikglp_move_next_to_fq(struct ikglp_semaphore *sem,
1292 TRACE_TASK(t, "Moving a donor (%s/%d) to fq %d " 1292 TRACE_TASK(t, "Moving a donor (%s/%d) to fq %d "
1293 "(non-aff wanted fq %d).\n", 1293 "(non-aff wanted fq %d).\n",
1294 new_on_fq->comm, new_on_fq->pid, 1294 new_on_fq->comm, new_on_fq->pid,
1295 ikglp_get_idx(sem, fq_of_new_on_fq), 1295 r2dglp_get_idx(sem, fq_of_new_on_fq),
1296 ikglp_get_idx(sem, fq)); 1296 r2dglp_get_idx(sem, fq));
1297 1297
1298 ikglp_move_donor_to_fq(sem, fq_of_new_on_fq, other_donor_info); 1298 r2dglp_move_donor_to_fq(sem, fq_of_new_on_fq, other_donor_info);
1299 } 1299 }
1300 else if(!binheap_empty(&sem->priority_queue)) { /* No donors, so move PQ */ 1300 else if(!binheap_empty(&sem->priority_queue)) { /* No donors, so move PQ */
1301 ikglp_heap_node_t *pq_node = binheap_top_entry(&sem->priority_queue, 1301 r2dglp_heap_node_t *pq_node = binheap_top_entry(&sem->priority_queue,
1302 ikglp_heap_node_t, node); 1302 r2dglp_heap_node_t, node);
1303 ikglp_wait_state_t *pq_wait = container_of(pq_node, ikglp_wait_state_t, 1303 r2dglp_wait_state_t *pq_wait = container_of(pq_node, r2dglp_wait_state_t,
1304 pq_node); 1304 pq_node);
1305 1305
1306 new_on_fq = pq_wait->task; 1306 new_on_fq = pq_wait->task;
@@ -1325,25 +1325,25 @@ void ikglp_move_next_to_fq(struct ikglp_semaphore *sem,
1325 TRACE_TASK(t, "Moving a pq waiter (%s/%d) to fq %d " 1325 TRACE_TASK(t, "Moving a pq waiter (%s/%d) to fq %d "
1326 "(non-aff wanted fq %d).\n", 1326 "(non-aff wanted fq %d).\n",
1327 new_on_fq->comm, new_on_fq->pid, 1327 new_on_fq->comm, new_on_fq->pid,
1328 ikglp_get_idx(sem, fq_of_new_on_fq), 1328 r2dglp_get_idx(sem, fq_of_new_on_fq),
1329 ikglp_get_idx(sem, fq)); 1329 r2dglp_get_idx(sem, fq));
1330 1330
1331 ikglp_move_pq_to_fq(sem, fq_of_new_on_fq, pq_wait); 1331 r2dglp_move_pq_to_fq(sem, fq_of_new_on_fq, pq_wait);
1332 } 1332 }
1333 else if(allow_stealing && fq->count == 0) { 1333 else if(allow_stealing && fq->count == 0) {
1334 /* No PQ and this queue is empty, so steal. */ 1334 /* No PQ and this queue is empty, so steal. */
1335 1335
1336 ikglp_wait_state_t *fq_wait; 1336 r2dglp_wait_state_t *fq_wait;
1337 1337
1338 TRACE_TASK(t, "Looking to steal a request for fq %d...\n", 1338 TRACE_TASK(t, "Looking to steal a request for fq %d...\n",
1339 ikglp_get_idx(sem, fq)); 1339 r2dglp_get_idx(sem, fq));
1340 1340
1341#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 1341#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
1342 fq_wait = (sem->aff_obs) ? 1342 fq_wait = (sem->aff_obs) ?
1343 sem->aff_obs->ops->advise_steal(sem->aff_obs, fq) : 1343 sem->aff_obs->ops->advise_steal(sem->aff_obs, fq) :
1344 ikglp_find_hp_waiter_to_steal(sem, fq); 1344 r2dglp_find_hp_waiter_to_steal(sem, fq);
1345#else 1345#else
1346 fq_wait = ikglp_find_hp_waiter_to_steal(sem, fq); 1346 fq_wait = r2dglp_find_hp_waiter_to_steal(sem, fq);
1347#endif 1347#endif
1348 1348
1349 if(fq_wait) { 1349 if(fq_wait) {
@@ -1355,14 +1355,14 @@ void ikglp_move_next_to_fq(struct ikglp_semaphore *sem,
1355 1355
1356 TRACE_TASK(t, "Found %s/%d of fq %d to steal for fq %d...\n", 1356 TRACE_TASK(t, "Found %s/%d of fq %d to steal for fq %d...\n",
1357 new_on_fq->comm, new_on_fq->pid, 1357 new_on_fq->comm, new_on_fq->pid,
1358 ikglp_get_idx(sem, to_steal), 1358 r2dglp_get_idx(sem, to_steal),
1359 ikglp_get_idx(sem, fq)); 1359 r2dglp_get_idx(sem, fq));
1360 1360
1361 ikglp_steal_to_fq(sem, fq, fq_wait); 1361 r2dglp_steal_to_fq(sem, fq, fq_wait);
1362 } 1362 }
1363 else { 1363 else {
1364 TRACE_TASK(t, "Found nothing to steal for fq %d.\n", 1364 TRACE_TASK(t, "Found nothing to steal for fq %d.\n",
1365 ikglp_get_idx(sem, fq)); 1365 r2dglp_get_idx(sem, fq));
1366 } 1366 }
1367 } 1367 }
1368 else { 1368 else {
@@ -1393,9 +1393,9 @@ void ikglp_move_next_to_fq(struct ikglp_semaphore *sem,
1393 if(donee == other_fq->owner) { 1393 if(donee == other_fq->owner) {
1394 TRACE_TASK(t, "Donee %s/%d is an owner of fq %d.\n", 1394 TRACE_TASK(t, "Donee %s/%d is an owner of fq %d.\n",
1395 donee->comm, donee->pid, 1395 donee->comm, donee->pid,
1396 ikglp_get_idx(sem, other_fq)); 1396 r2dglp_get_idx(sem, other_fq));
1397 1397
1398 ikglp_remove_donation_from_owner(&other_donor_info->prio_donation.hp_binheap_node, 1398 r2dglp_remove_donation_from_owner(&other_donor_info->prio_donation.hp_binheap_node,
1399 other_fq, sem, *flags); 1399 other_fq, sem, *flags);
1400 1400
1401 /* there should be no contention!!!! */ 1401 /* there should be no contention!!!! */
@@ -1404,24 +1404,24 @@ void ikglp_move_next_to_fq(struct ikglp_semaphore *sem,
1404 else { 1404 else {
1405 TRACE_TASK(t, "Donee %s/%d is blocked in of fq %d.\n", 1405 TRACE_TASK(t, "Donee %s/%d is blocked in of fq %d.\n",
1406 donee->comm, donee->pid, 1406 donee->comm, donee->pid,
1407 ikglp_get_idx(sem, other_fq)); 1407 r2dglp_get_idx(sem, other_fq));
1408 1408
1409 ikglp_remove_donation_from_fq_waiter(donee, 1409 r2dglp_remove_donation_from_fq_waiter(donee,
1410 &other_donor_info->prio_donation.hp_binheap_node); 1410 &other_donor_info->prio_donation.hp_binheap_node);
1411 if(donee == other_fq->hp_waiter) { 1411 if(donee == other_fq->hp_waiter) {
1412 TRACE_TASK(t, "Donee %s/%d was an hp_waiter of fq %d. " 1412 TRACE_TASK(t, "Donee %s/%d was an hp_waiter of fq %d. "
1413 "Rechecking hp_waiter.\n", 1413 "Rechecking hp_waiter.\n",
1414 donee->comm, donee->pid, 1414 donee->comm, donee->pid,
1415 ikglp_get_idx(sem, other_fq)); 1415 r2dglp_get_idx(sem, other_fq));
1416 1416
1417 other_fq->hp_waiter = ikglp_find_hp_waiter(other_fq, NULL); 1417 other_fq->hp_waiter = r2dglp_find_hp_waiter(other_fq, NULL);
1418 TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", 1418 TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n",
1419 ikglp_get_idx(sem, other_fq), 1419 r2dglp_get_idx(sem, other_fq),
1420 (other_fq->hp_waiter) ? other_fq->hp_waiter->comm : "null", 1420 (other_fq->hp_waiter) ? other_fq->hp_waiter->comm : "null",
1421 (other_fq->hp_waiter) ? other_fq->hp_waiter->pid : 0); 1421 (other_fq->hp_waiter) ? other_fq->hp_waiter->pid : 0);
1422 1422
1423 /* unlocks sem->lock. reacquire it. */ 1423 /* unlocks sem->lock. reacquire it. */
1424 ikglp_refresh_owners_prio_decrease(other_fq, sem, *flags, 0); 1424 r2dglp_refresh_owners_prio_decrease(other_fq, sem, *flags, 0);
1425 /* there should be no contention!!!! */ 1425 /* there should be no contention!!!! */
1426 lock_fine_irqsave(&sem->lock, *flags); 1426 lock_fine_irqsave(&sem->lock, *flags);
1427 } 1427 }
@@ -1430,11 +1430,11 @@ void ikglp_move_next_to_fq(struct ikglp_semaphore *sem,
1430 else if(to_steal) { 1430 else if(to_steal) {
1431 TRACE_TASK(t, "Rechecking priority inheritance of fq %d, " 1431 TRACE_TASK(t, "Rechecking priority inheritance of fq %d, "
1432 "triggered by stealing.\n", 1432 "triggered by stealing.\n",
1433 ikglp_get_idx(sem, to_steal)); 1433 r2dglp_get_idx(sem, to_steal));
1434 1434
1435 if(need_steal_prio_reeval) { 1435 if(need_steal_prio_reeval) {
1436 /* unlocks sem->lock. reacquire it. */ 1436 /* unlocks sem->lock. reacquire it. */
1437 ikglp_refresh_owners_prio_decrease(to_steal, sem, *flags, 0); 1437 r2dglp_refresh_owners_prio_decrease(to_steal, sem, *flags, 0);
1438 /* there should be no contention!!!! */ 1438 /* there should be no contention!!!! */
1439 lock_fine_irqsave(&sem->lock, *flags); 1439 lock_fine_irqsave(&sem->lock, *flags);
1440 } 1440 }
@@ -1443,7 +1443,7 @@ void ikglp_move_next_to_fq(struct ikglp_semaphore *sem,
1443 /* check for new HP waiter. */ 1443 /* check for new HP waiter. */
1444 if(new_on_fq) { 1444 if(new_on_fq) {
1445 /* unlocks sem->lock. reacquire it. */ 1445 /* unlocks sem->lock. reacquire it. */
1446 ikglp_refresh_owners_prio_increase(new_on_fq, fq_of_new_on_fq, 1446 r2dglp_refresh_owners_prio_increase(new_on_fq, fq_of_new_on_fq,
1447 sem, *flags); 1447 sem, *flags);
1448 /* there should be no contention!!!! */ 1448 /* there should be no contention!!!! */
1449 lock_fine_irqsave(&sem->lock, *flags); 1449 lock_fine_irqsave(&sem->lock, *flags);
@@ -1453,13 +1453,13 @@ void ikglp_move_next_to_fq(struct ikglp_semaphore *sem,
1453 if(unlikely(fq_of_new_on_fq && 1453 if(unlikely(fq_of_new_on_fq &&
1454 fq_of_new_on_fq != fq && 1454 fq_of_new_on_fq != fq &&
1455 fq_of_new_on_fq->count == 1)) { 1455 fq_of_new_on_fq->count == 1)) {
1456 ikglp_grant_replica_to_next(sem, fq_of_new_on_fq); 1456 r2dglp_grant_replica_to_next(sem, fq_of_new_on_fq);
1457 } 1457 }
1458} 1458}
1459 1459
1460int ikglp_unlock(struct litmus_lock* l) 1460int r2dglp_unlock(struct litmus_lock* l)
1461{ 1461{
1462 struct ikglp_semaphore *sem = ikglp_from_lock(l); 1462 struct r2dglp_semaphore *sem = r2dglp_from_lock(l);
1463 struct task_struct *t = current; 1463 struct task_struct *t = current;
1464 struct fifo_queue *fq; 1464 struct fifo_queue *fq;
1465 1465
@@ -1471,7 +1471,7 @@ int ikglp_unlock(struct litmus_lock* l)
1471 1471
1472 int err = 0; 1472 int err = 0;
1473 1473
1474 fq = ikglp_get_queue(sem, t); /* returns NULL if 't' is not owner. */ 1474 fq = r2dglp_get_queue(sem, t); /* returns NULL if 't' is not owner. */
1475 1475
1476 if (!fq) { 1476 if (!fq) {
1477 TRACE_TASK(t, "does not hold a replica of lock %d\n", l->ident); 1477 TRACE_TASK(t, "does not hold a replica of lock %d\n", l->ident);
@@ -1486,10 +1486,10 @@ int ikglp_unlock(struct litmus_lock* l)
1486 raw_spin_lock_irqsave(&sem->real_lock, more_flags); 1486 raw_spin_lock_irqsave(&sem->real_lock, more_flags);
1487 lock_fine_irqsave(&sem->lock, flags); 1487 lock_fine_irqsave(&sem->lock, flags);
1488 1488
1489 TRACE_TASK(t, "Freeing replica %d.\n", ikglp_get_idx(sem, fq)); 1489 TRACE_TASK(t, "Freeing replica %d.\n", r2dglp_get_idx(sem, fq));
1490 1490
1491 /* Remove 't' from the heaps, but data in nodes will still be good. */ 1491 /* Remove 't' from the heaps, but data in nodes will still be good. */
1492 ikglp_del_global_list(sem, t, &fq->global_heap_node); 1492 r2dglp_del_global_list(sem, t, &fq->global_heap_node);
1493 binheap_delete(&fq->donee_heap_node.node, &sem->donees); 1493 binheap_delete(&fq->donee_heap_node.node, &sem->donees);
1494 1494
1495 fq->owner = NULL; /* no longer owned!! */ 1495 fq->owner = NULL; /* no longer owned!! */
@@ -1512,14 +1512,14 @@ int ikglp_unlock(struct litmus_lock* l)
1512 1512
1513 /* 't' must drop all priority and clean up data structures before hand-off. 1513 /* 't' must drop all priority and clean up data structures before hand-off.
1514 1514
1515 DROP ALL INHERITANCE. IKGLP MUST BE OUTER-MOST 1515 DROP ALL INHERITANCE. R2DGLP MUST BE OUTER-MOST
1516 This kills any inheritance from a donor. 1516 This kills any inheritance from a donor.
1517 */ 1517 */
1518 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); 1518 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
1519 { 1519 {
1520 int count = 0; 1520 int count = 0;
1521 1521
1522 TRACE_TASK(t, "discarding inheritance because IKGLP is outermost\n"); 1522 TRACE_TASK(t, "discarding inheritance because R2DGLP is outermost\n");
1523 1523
1524 while(!binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) { 1524 while(!binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) {
1525 binheap_delete_root(&tsk_rt(t)->hp_blocked_tasks, 1525 binheap_delete_root(&tsk_rt(t)->hp_blocked_tasks,
@@ -1535,7 +1535,7 @@ int ikglp_unlock(struct litmus_lock* l)
1535 if (likely(!fq->is_vunlocked)) { 1535 if (likely(!fq->is_vunlocked)) {
1536 /* Move the next request into the FQ and update heaps as needed. 1536 /* Move the next request into the FQ and update heaps as needed.
1537 Skip this step we already did this during the virtual unlock. */ 1537 Skip this step we already did this during the virtual unlock. */
1538 ikglp_move_next_to_fq(sem, fq, t, &fq->donee_heap_node, &flags, 1538 r2dglp_move_next_to_fq(sem, fq, t, &fq->donee_heap_node, &flags,
1539 ALLOW_STEALING, !ALWAYS_TERMINATE_DONATION); 1539 ALLOW_STEALING, !ALWAYS_TERMINATE_DONATION);
1540 } 1540 }
1541 else { 1541 else {
@@ -1544,7 +1544,7 @@ int ikglp_unlock(struct litmus_lock* l)
1544 } 1544 }
1545 1545
1546 if (waitqueue_active(&fq->wait)) 1546 if (waitqueue_active(&fq->wait))
1547 ikglp_grant_replica_to_next(sem, fq); 1547 r2dglp_grant_replica_to_next(sem, fq);
1548 1548
1549 unlock_fine_irqrestore(&sem->lock, flags); 1549 unlock_fine_irqrestore(&sem->lock, flags);
1550 raw_spin_unlock_irqrestore(&sem->real_lock, more_flags); 1550 raw_spin_unlock_irqrestore(&sem->real_lock, more_flags);
@@ -1558,30 +1558,30 @@ out:
1558 1558
1559 1559
1560 1560
1561void ikglp_abort_request(struct ikglp_semaphore *sem, struct task_struct *t, 1561void r2dglp_abort_request(struct r2dglp_semaphore *sem, struct task_struct *t,
1562 unsigned long flags) 1562 unsigned long flags)
1563{ 1563{
1564 ikglp_wait_state_t *wait = 1564 r2dglp_wait_state_t *wait =
1565 (ikglp_wait_state_t*)tsk_rt(t)->blocked_lock_data; 1565 (r2dglp_wait_state_t*)tsk_rt(t)->blocked_lock_data;
1566 ikglp_donee_heap_node_t *donee_info; 1566 r2dglp_donee_heap_node_t *donee_info;
1567 struct task_struct *donee; 1567 struct task_struct *donee;
1568 struct fifo_queue *donee_fq; 1568 struct fifo_queue *donee_fq;
1569 struct fifo_queue *fq = wait->fq; 1569 struct fifo_queue *fq = wait->fq;
1570 1570
1571 BUG_ON(!wait); 1571 BUG_ON(!wait);
1572 1572
1573 /* drop the request from the proper IKGLP data structure and re-eval 1573 /* drop the request from the proper R2DGLP data structure and re-eval
1574 * priority relations */ 1574 * priority relations */
1575 switch(wait->cur_q) 1575 switch(wait->cur_q)
1576 { 1576 {
1577 case IKGLP_PQ: 1577 case R2DGLP_PQ:
1578 /* No one inherits from waiters in PQ. Just drop the request. */ 1578 /* No one inherits from waiters in PQ. Just drop the request. */
1579 __drop_from_pq(sem, wait); 1579 __drop_from_pq(sem, wait);
1580 break; 1580 break;
1581 1581
1582 1582
1583 case IKGLP_FQ: 1583 case R2DGLP_FQ:
1584 ikglp_del_global_list(sem, t, &wait->global_heap_node); 1584 r2dglp_del_global_list(sem, t, &wait->global_heap_node);
1585 binheap_delete(&wait->donee_heap_node.node, &sem->donees); 1585 binheap_delete(&wait->donee_heap_node.node, &sem->donees);
1586 1586
1587 /* remove the task from the FQ */ 1587 /* remove the task from the FQ */
@@ -1595,7 +1595,7 @@ void ikglp_abort_request(struct ikglp_semaphore *sem, struct task_struct *t,
1595 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); 1595 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
1596 { 1596 {
1597 int count = 0; 1597 int count = 0;
1598 TRACE_TASK(t, "discarding inheritance because IKGLP " 1598 TRACE_TASK(t, "discarding inheritance because R2DGLP "
1599 "is outermost\n"); 1599 "is outermost\n");
1600 1600
1601 while(!binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) { 1601 while(!binheap_empty(&tsk_rt(t)->hp_blocked_tasks)) {
@@ -1609,17 +1609,17 @@ void ikglp_abort_request(struct ikglp_semaphore *sem, struct task_struct *t,
1609 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); 1609 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
1610 1610
1611 /* unlocks sem->lock. reacquire it. */ 1611 /* unlocks sem->lock. reacquire it. */
1612 ikglp_refresh_owners_prio_decrease(wait->donee_heap_node.fq, 1612 r2dglp_refresh_owners_prio_decrease(wait->donee_heap_node.fq,
1613 sem, flags, 1); 1613 sem, flags, 1);
1614 /* there should be no contention!!!! */ 1614 /* there should be no contention!!!! */
1615 lock_fine_irqsave(&sem->lock, flags); 1615 lock_fine_irqsave(&sem->lock, flags);
1616 ikglp_move_next_to_fq(sem, fq, t, &wait->donee_heap_node, &flags, 1616 r2dglp_move_next_to_fq(sem, fq, t, &wait->donee_heap_node, &flags,
1617 ALLOW_STEALING, !ALWAYS_TERMINATE_DONATION); 1617 ALLOW_STEALING, !ALWAYS_TERMINATE_DONATION);
1618 break; 1618 break;
1619 1619
1620 1620
1621 case IKGLP_DONOR: 1621 case R2DGLP_DONOR:
1622 ikglp_del_global_list(sem, t, &wait->global_heap_node); 1622 r2dglp_del_global_list(sem, t, &wait->global_heap_node);
1623 __drop_from_donor(sem, wait); 1623 __drop_from_donor(sem, wait);
1624 1624
1625 /* update donee */ 1625 /* update donee */
@@ -1632,9 +1632,9 @@ void ikglp_abort_request(struct ikglp_semaphore *sem, struct task_struct *t,
1632 if (donee == donee_fq->owner) { 1632 if (donee == donee_fq->owner) {
1633 TRACE_TASK(t, "Donee %s/%d is an owner of fq %d.\n", 1633 TRACE_TASK(t, "Donee %s/%d is an owner of fq %d.\n",
1634 donee->comm, donee->pid, 1634 donee->comm, donee->pid,
1635 ikglp_get_idx(sem, donee_fq)); 1635 r2dglp_get_idx(sem, donee_fq));
1636 /* unlocks sem->lock. reacquire it. */ 1636 /* unlocks sem->lock. reacquire it. */
1637 ikglp_remove_donation_from_owner(&wait->prio_donation.hp_binheap_node, 1637 r2dglp_remove_donation_from_owner(&wait->prio_donation.hp_binheap_node,
1638 donee_fq, sem, flags); 1638 donee_fq, sem, flags);
1639 /* there should be no contention!!!! */ 1639 /* there should be no contention!!!! */
1640 lock_fine_irqsave(&sem->lock, flags); 1640 lock_fine_irqsave(&sem->lock, flags);
@@ -1642,24 +1642,24 @@ void ikglp_abort_request(struct ikglp_semaphore *sem, struct task_struct *t,
1642 else { 1642 else {
1643 TRACE_TASK(t, "Donee %s/%d is blocked in of fq %d.\n", 1643 TRACE_TASK(t, "Donee %s/%d is blocked in of fq %d.\n",
1644 donee->comm, donee->pid, 1644 donee->comm, donee->pid,
1645 ikglp_get_idx(sem, donee_fq)); 1645 r2dglp_get_idx(sem, donee_fq));
1646 1646
1647 ikglp_remove_donation_from_fq_waiter(donee, 1647 r2dglp_remove_donation_from_fq_waiter(donee,
1648 &wait->prio_donation.hp_binheap_node); 1648 &wait->prio_donation.hp_binheap_node);
1649 if(donee == donee_fq->hp_waiter) { 1649 if(donee == donee_fq->hp_waiter) {
1650 TRACE_TASK(t, "Donee %s/%d was an hp_waiter of fq %d. " 1650 TRACE_TASK(t, "Donee %s/%d was an hp_waiter of fq %d. "
1651 "Rechecking hp_waiter.\n", 1651 "Rechecking hp_waiter.\n",
1652 donee->comm, donee->pid, 1652 donee->comm, donee->pid,
1653 ikglp_get_idx(sem, donee_fq)); 1653 r2dglp_get_idx(sem, donee_fq));
1654 1654
1655 donee_fq->hp_waiter = ikglp_find_hp_waiter(donee_fq, NULL); 1655 donee_fq->hp_waiter = r2dglp_find_hp_waiter(donee_fq, NULL);
1656 TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n", 1656 TRACE_TASK(t, "New hp_waiter for fq %d is %s/%d!\n",
1657 ikglp_get_idx(sem, donee_fq), 1657 r2dglp_get_idx(sem, donee_fq),
1658 (donee_fq->hp_waiter) ? donee_fq->hp_waiter->comm : "null", 1658 (donee_fq->hp_waiter) ? donee_fq->hp_waiter->comm : "null",
1659 (donee_fq->hp_waiter) ? donee_fq->hp_waiter->pid : 0); 1659 (donee_fq->hp_waiter) ? donee_fq->hp_waiter->pid : 0);
1660 1660
1661 /* unlocks sem->lock. reacquire it. */ 1661 /* unlocks sem->lock. reacquire it. */
1662 ikglp_refresh_owners_prio_decrease(donee_fq, sem, flags, 1); 1662 r2dglp_refresh_owners_prio_decrease(donee_fq, sem, flags, 1);
1663 /* there should be no contention!!!! */ 1663 /* there should be no contention!!!! */
1664 lock_fine_irqsave(&sem->lock, flags); 1664 lock_fine_irqsave(&sem->lock, flags);
1665 } 1665 }
@@ -1670,10 +1670,10 @@ void ikglp_abort_request(struct ikglp_semaphore *sem, struct task_struct *t,
1670 BUG(); 1670 BUG();
1671 } 1671 }
1672 1672
1673 BUG_ON(wait->cur_q != IKGLP_INVL); /* state should now be invalid */ 1673 BUG_ON(wait->cur_q != R2DGLP_INVL); /* state should now be invalid */
1674} 1674}
1675 1675
1676void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t) 1676void r2dglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t)
1677{ 1677{
1678 /* 1678 /*
1679 * PRE: (1) Our deadline has already been postponed. 1679 * PRE: (1) Our deadline has already been postponed.
@@ -1686,7 +1686,7 @@ void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t)
1686 * step 3: reissue the request 1686 * step 3: reissue the request
1687 */ 1687 */
1688 1688
1689 struct ikglp_semaphore *sem = ikglp_from_lock(l); 1689 struct r2dglp_semaphore *sem = r2dglp_from_lock(l);
1690 struct litmus_lock* blocked_lock; 1690 struct litmus_lock* blocked_lock;
1691 unsigned long flags = 0, more_flags; 1691 unsigned long flags = 0, more_flags;
1692 1692
@@ -1695,15 +1695,15 @@ void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t)
1695 1695
1696 blocked_lock = tsk_rt(t)->blocked_lock; 1696 blocked_lock = tsk_rt(t)->blocked_lock;
1697 if (blocked_lock == l) { 1697 if (blocked_lock == l) {
1698 ikglp_wait_state_t *wait; 1698 r2dglp_wait_state_t *wait;
1699 ikglp_abort_request(sem, t, flags); 1699 r2dglp_abort_request(sem, t, flags);
1700 1700
1701 /* now re-issue the request */ 1701 /* now re-issue the request */
1702 1702
1703 TRACE_TASK(t, "Reissuing a request for replica from lock %d.\n", 1703 TRACE_TASK(t, "Reissuing a request for replica from lock %d.\n",
1704 l->ident); 1704 l->ident);
1705 1705
1706 wait = (ikglp_wait_state_t*)tsk_rt(t)->blocked_lock_data; 1706 wait = (r2dglp_wait_state_t*)tsk_rt(t)->blocked_lock_data;
1707 if(sem->nr_in_fifos < sem->max_in_fifos) { 1707 if(sem->nr_in_fifos < sem->max_in_fifos) {
1708 1708
1709 struct fifo_queue *fq; 1709 struct fifo_queue *fq;
@@ -1719,18 +1719,18 @@ void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t)
1719 TRACE_TASK(t, "is going to an FQ.\n"); 1719 TRACE_TASK(t, "is going to an FQ.\n");
1720 /* if this were true, then we should have been blocked */ 1720 /* if this were true, then we should have been blocked */
1721 BUG_ON(fq->count == 0); 1721 BUG_ON(fq->count == 0);
1722 ikglp_enqueue_on_fq(sem, fq, wait, flags); /* unlocks sem->lock */ 1722 r2dglp_enqueue_on_fq(sem, fq, wait, flags); /* unlocks sem->lock */
1723 } 1723 }
1724 else if(litmus->__compare(ikglp_mth_highest(sem), BASE, t, BASE)) { 1724 else if(litmus->__compare(r2dglp_mth_highest(sem), BASE, t, BASE)) {
1725 TRACE_TASK(t, "is going to PQ.\n"); 1725 TRACE_TASK(t, "is going to PQ.\n");
1726 /* enqueue on PQ */ 1726 /* enqueue on PQ */
1727 ikglp_enqueue_on_pq(sem, wait); 1727 r2dglp_enqueue_on_pq(sem, wait);
1728 unlock_fine_irqrestore(&sem->lock, flags); 1728 unlock_fine_irqrestore(&sem->lock, flags);
1729 } 1729 }
1730 else { 1730 else {
1731 /* enqueue as donor */ 1731 /* enqueue as donor */
1732 TRACE_TASK(t, "is going to donor heap.\n"); 1732 TRACE_TASK(t, "is going to donor heap.\n");
1733 ikglp_enqueue_on_donor(sem, wait, flags); /* unlocks sem->lock */ 1733 r2dglp_enqueue_on_donor(sem, wait, flags); /* unlocks sem->lock */
1734 } 1734 }
1735 1735
1736 raw_spin_unlock_irqrestore(&sem->real_lock, more_flags); 1736 raw_spin_unlock_irqrestore(&sem->real_lock, more_flags);
@@ -1739,7 +1739,7 @@ void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t)
1739 unlock_fine_irqrestore(&sem->lock, flags); 1739 unlock_fine_irqrestore(&sem->lock, flags);
1740 raw_spin_unlock_irqrestore(&sem->real_lock, more_flags); 1740 raw_spin_unlock_irqrestore(&sem->real_lock, more_flags);
1741 1741
1742 TRACE_TASK(t, "is blocked, but not on IKGLP. Redirecting...\n"); 1742 TRACE_TASK(t, "is blocked, but not on R2DGLP. Redirecting...\n");
1743 if(blocked_lock->ops->supports_budget_exhaustion) { 1743 if(blocked_lock->ops->supports_budget_exhaustion) {
1744 TRACE_TASK(t, "Lock %d supports budget exhaustion.\n", 1744 TRACE_TASK(t, "Lock %d supports budget exhaustion.\n",
1745 blocked_lock->ident); 1745 blocked_lock->ident);
@@ -1755,12 +1755,12 @@ void ikglp_budget_exhausted(struct litmus_lock* l, struct task_struct* t)
1755 return; 1755 return;
1756} 1756}
1757 1757
1758void ikglp_virtual_unlock(struct litmus_lock* l, struct task_struct* t) 1758void r2dglp_virtual_unlock(struct litmus_lock* l, struct task_struct* t)
1759{ 1759{
1760 /* PRE: DGL lock already held if DGLs are supported */ 1760 /* PRE: DGL lock already held if DGLs are supported */
1761 1761
1762 struct ikglp_semaphore *sem = ikglp_from_lock(l); 1762 struct r2dglp_semaphore *sem = r2dglp_from_lock(l);
1763 struct fifo_queue *fq = ikglp_get_queue(sem, t); 1763 struct fifo_queue *fq = r2dglp_get_queue(sem, t);
1764 unsigned long flags = 0, more_flags; 1764 unsigned long flags = 0, more_flags;
1765 1765
1766 TRACE_TASK(t, "virtual unlock!\n"); 1766 TRACE_TASK(t, "virtual unlock!\n");
@@ -1785,7 +1785,7 @@ void ikglp_virtual_unlock(struct litmus_lock* l, struct task_struct* t)
1785 * other FQs. Also, terminate donation relationship if we move 1785 * other FQs. Also, terminate donation relationship if we move
1786 * a donor to 't' to the FQ (we'll pick inheritance back up via 1786 * a donor to 't' to the FQ (we'll pick inheritance back up via
1787 * the FQ, if needed). */ 1787 * the FQ, if needed). */
1788 ikglp_move_next_to_fq(sem, fq, t, &fq->donee_heap_node, &flags, 1788 r2dglp_move_next_to_fq(sem, fq, t, &fq->donee_heap_node, &flags,
1789 !ALLOW_STEALING, ALWAYS_TERMINATE_DONATION); 1789 !ALLOW_STEALING, ALWAYS_TERMINATE_DONATION);
1790 1790
1791 /* decrement fifo count to simulate unlock. individual fifo 1791 /* decrement fifo count to simulate unlock. individual fifo
@@ -1800,10 +1800,10 @@ out:
1800 1800
1801 1801
1802 1802
1803int ikglp_close(struct litmus_lock* l) 1803int r2dglp_close(struct litmus_lock* l)
1804{ 1804{
1805 struct task_struct *t = current; 1805 struct task_struct *t = current;
1806 struct ikglp_semaphore *sem = ikglp_from_lock(l); 1806 struct r2dglp_semaphore *sem = r2dglp_from_lock(l);
1807 unsigned long flags; 1807 unsigned long flags;
1808 1808
1809 int owner = 0; 1809 int owner = 0;
@@ -1821,25 +1821,25 @@ int ikglp_close(struct litmus_lock* l)
1821 raw_spin_unlock_irqrestore(&sem->real_lock, flags); 1821 raw_spin_unlock_irqrestore(&sem->real_lock, flags);
1822 1822
1823 if (owner) 1823 if (owner)
1824 ikglp_unlock(l); 1824 r2dglp_unlock(l);
1825 1825
1826 return 0; 1826 return 0;
1827} 1827}
1828 1828
1829void ikglp_free(struct litmus_lock* l) 1829void r2dglp_free(struct litmus_lock* l)
1830{ 1830{
1831 struct ikglp_semaphore *sem = ikglp_from_lock(l); 1831 struct r2dglp_semaphore *sem = r2dglp_from_lock(l);
1832 1832
1833 kfree(sem->fifo_queues); 1833 kfree(sem->fifo_queues);
1834 kfree(sem); 1834 kfree(sem);
1835} 1835}
1836 1836
1837struct litmus_lock* ikglp_new(unsigned int m, 1837struct litmus_lock* r2dglp_new(unsigned int m,
1838 struct litmus_lock_ops* ops, 1838 struct litmus_lock_ops* ops,
1839 void* __user uarg) 1839 void* __user uarg)
1840{ 1840{
1841 struct ikglp_semaphore* sem; 1841 struct r2dglp_semaphore* sem;
1842 struct ikglp_args args; 1842 struct r2dglp_args args;
1843 unsigned int i; 1843 unsigned int i;
1844 1844
1845 BUG_ON(m <= 0); 1845 BUG_ON(m <= 0);
@@ -1856,16 +1856,16 @@ struct litmus_lock* ikglp_new(unsigned int m,
1856 printk("Invalid number of replicas.\n"); 1856 printk("Invalid number of replicas.\n");
1857 return(NULL); 1857 return(NULL);
1858 } 1858 }
1859 /* IKGLP_OPTIMAL_FIFO_LEN can only be determined if nr_max_holders 1859 /* R2DGLP_OPTIMAL_FIFO_LEN can only be determined if nr_max_holders
1860 is IKGLP_M_HOLDERS (number of CPUs) */ 1860 is R2DGLP_M_HOLDERS (number of CPUs) */
1861 if (args.max_fifo_len == IKGLP_OPTIMAL_FIFO_LEN && 1861 if (args.max_fifo_len == R2DGLP_OPTIMAL_FIFO_LEN &&
1862 args.max_in_fifos != IKGLP_M_IN_FIFOS) { 1862 args.max_in_fifos != R2DGLP_M_IN_FIFOS) {
1863 printk("Cannot compute optimal FIFO length if " 1863 printk("Cannot compute optimal FIFO length if "
1864 "max_in_fifos != IKGLP_M_IN_FIFOS\n"); 1864 "max_in_fifos != R2DGLP_M_IN_FIFOS\n");
1865 return(NULL); 1865 return(NULL);
1866 } 1866 }
1867 if ((args.max_in_fifos != IKGLP_UNLIMITED_IN_FIFOS) && 1867 if ((args.max_in_fifos != R2DGLP_UNLIMITED_IN_FIFOS) &&
1868 (args.max_fifo_len != IKGLP_UNLIMITED_FIFO_LEN) && 1868 (args.max_fifo_len != R2DGLP_UNLIMITED_FIFO_LEN) &&
1869 (args.max_in_fifos > args.nr_replicas*args.max_fifo_len)) { 1869 (args.max_in_fifos > args.nr_replicas*args.max_fifo_len)) {
1870 printk("Not enough total FIFO space for specified max requests " 1870 printk("Not enough total FIFO space for specified max requests "
1871 "in FIFOs.\n"); 1871 "in FIFOs.\n");
@@ -1886,7 +1886,7 @@ struct litmus_lock* ikglp_new(unsigned int m,
1886 } 1886 }
1887 1887
1888 sem->litmus_lock.ops = ops; 1888 sem->litmus_lock.ops = ops;
1889// sem->litmus_lock.proc = &ikglp_proc_ops; 1889// sem->litmus_lock.proc = &r2dglp_proc_ops;
1890 1890
1891 raw_spin_lock_init(&sem->lock); 1891 raw_spin_lock_init(&sem->lock);
1892 LOCKDEP_DYNAMIC_ALLOC(sem, &sem->lock); 1892 LOCKDEP_DYNAMIC_ALLOC(sem, &sem->lock);
@@ -1894,16 +1894,16 @@ struct litmus_lock* ikglp_new(unsigned int m,
1894 raw_spin_lock_init(&sem->real_lock); 1894 raw_spin_lock_init(&sem->real_lock);
1895 1895
1896 sem->nr_replicas = args.nr_replicas; 1896 sem->nr_replicas = args.nr_replicas;
1897 sem->max_in_fifos = (args.max_in_fifos == IKGLP_M_IN_FIFOS) ? 1897 sem->max_in_fifos = (args.max_in_fifos == R2DGLP_M_IN_FIFOS) ?
1898 m : 1898 m :
1899 args.max_in_fifos; 1899 args.max_in_fifos;
1900 sem->max_fifo_len = (args.max_fifo_len == IKGLP_OPTIMAL_FIFO_LEN) ? 1900 sem->max_fifo_len = (args.max_fifo_len == R2DGLP_OPTIMAL_FIFO_LEN) ?
1901 (sem->max_in_fifos/args.nr_replicas) + 1901 (sem->max_in_fifos/args.nr_replicas) +
1902 ((sem->max_in_fifos%args.nr_replicas) != 0) : 1902 ((sem->max_in_fifos%args.nr_replicas) != 0) :
1903 args.max_fifo_len; 1903 args.max_fifo_len;
1904 sem->nr_in_fifos = 0; 1904 sem->nr_in_fifos = 0;
1905 1905
1906 TRACE_CUR("New IKGLP Sem: m = %u, k = %u, max fifo_len = %u\n", 1906 TRACE_CUR("New R2DGLP Sem: m = %u, k = %u, max fifo_len = %u\n",
1907 sem->max_in_fifos, 1907 sem->max_in_fifos,
1908 sem->nr_replicas, 1908 sem->nr_replicas,
1909 sem->max_fifo_len); 1909 sem->max_fifo_len);
@@ -1936,12 +1936,12 @@ struct litmus_lock* ikglp_new(unsigned int m,
1936 sem->top_m_size = 0; 1936 sem->top_m_size = 0;
1937 1937
1938 // init heaps 1938 // init heaps
1939 INIT_BINHEAP_HANDLE(&sem->top_m, ikglp_min_heap_base_priority_order); 1939 INIT_BINHEAP_HANDLE(&sem->top_m, r2dglp_min_heap_base_priority_order);
1940 INIT_BINHEAP_HANDLE(&sem->not_top_m, ikglp_max_heap_base_priority_order); 1940 INIT_BINHEAP_HANDLE(&sem->not_top_m, r2dglp_max_heap_base_priority_order);
1941 INIT_BINHEAP_HANDLE(&sem->donees, ikglp_min_heap_donee_order); 1941 INIT_BINHEAP_HANDLE(&sem->donees, r2dglp_min_heap_donee_order);
1942 INIT_BINHEAP_HANDLE(&sem->priority_queue, 1942 INIT_BINHEAP_HANDLE(&sem->priority_queue,
1943 ikglp_max_heap_base_priority_order); 1943 r2dglp_max_heap_base_priority_order);
1944 INIT_BINHEAP_HANDLE(&sem->donors, ikglp_donor_max_heap_base_priority_order); 1944 INIT_BINHEAP_HANDLE(&sem->donors, r2dglp_donor_max_heap_base_priority_order);
1945 1945
1946#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 1946#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
1947 sem->aff_obs = NULL; 1947 sem->aff_obs = NULL;
@@ -1960,31 +1960,31 @@ struct litmus_lock* ikglp_new(unsigned int m,
1960/****************************************************************************/ 1960/****************************************************************************/
1961 1961
1962 1962
1963static inline int __replica_to_gpu(struct ikglp_affinity* aff, int replica) 1963static inline int __replica_to_gpu(struct r2dglp_affinity* aff, int replica)
1964{ 1964{
1965 int gpu = replica % aff->nr_rsrc; 1965 int gpu = replica % aff->nr_rsrc;
1966 return gpu; 1966 return gpu;
1967} 1967}
1968 1968
1969static inline int replica_to_gpu(struct ikglp_affinity* aff, int replica) 1969static inline int replica_to_gpu(struct r2dglp_affinity* aff, int replica)
1970{ 1970{
1971 int gpu = __replica_to_gpu(aff, replica) + aff->offset; 1971 int gpu = __replica_to_gpu(aff, replica) + aff->offset;
1972 return gpu; 1972 return gpu;
1973} 1973}
1974 1974
1975static inline int gpu_to_base_replica(struct ikglp_affinity* aff, int gpu) 1975static inline int gpu_to_base_replica(struct r2dglp_affinity* aff, int gpu)
1976{ 1976{
1977 int replica = gpu - aff->offset; 1977 int replica = gpu - aff->offset;
1978 return replica; 1978 return replica;
1979} 1979}
1980 1980
1981static inline int same_gpu(struct ikglp_affinity* aff, 1981static inline int same_gpu(struct r2dglp_affinity* aff,
1982 int replica_a, int replica_b) 1982 int replica_a, int replica_b)
1983{ 1983{
1984 return(replica_to_gpu(aff, replica_a) == replica_to_gpu(aff, replica_b)); 1984 return(replica_to_gpu(aff, replica_a) == replica_to_gpu(aff, replica_b));
1985} 1985}
1986 1986
1987static inline int has_affinity(struct ikglp_affinity* aff, 1987static inline int has_affinity(struct r2dglp_affinity* aff,
1988 struct task_struct* t, int replica) 1988 struct task_struct* t, int replica)
1989{ 1989{
1990 if(tsk_rt(t)->last_gpu >= 0) 1990 if(tsk_rt(t)->last_gpu >= 0)
@@ -1992,38 +1992,38 @@ static inline int has_affinity(struct ikglp_affinity* aff,
1992 return 0; 1992 return 0;
1993} 1993}
1994 1994
1995int ikglp_aff_obs_close(struct affinity_observer* obs) 1995int r2dglp_aff_obs_close(struct affinity_observer* obs)
1996{ 1996{
1997 return 0; 1997 return 0;
1998} 1998}
1999 1999
2000void ikglp_aff_obs_free(struct affinity_observer* obs) 2000void r2dglp_aff_obs_free(struct affinity_observer* obs)
2001{ 2001{
2002 struct ikglp_affinity *ikglp_aff = ikglp_aff_obs_from_aff_obs(obs); 2002 struct r2dglp_affinity *r2dglp_aff = r2dglp_aff_obs_from_aff_obs(obs);
2003 2003
2004 /* make sure the thread destroying this semaphore will not 2004 /* make sure the thread destroying this semaphore will not
2005 call the exit callback on a destroyed lock. */ 2005 call the exit callback on a destroyed lock. */
2006 struct task_struct *t = current; 2006 struct task_struct *t = current;
2007 if (is_realtime(t) && tsk_rt(t)->rsrc_exit_cb_args == ikglp_aff) 2007 if (is_realtime(t) && tsk_rt(t)->rsrc_exit_cb_args == r2dglp_aff)
2008 { 2008 {
2009 tsk_rt(t)->rsrc_exit_cb = NULL; 2009 tsk_rt(t)->rsrc_exit_cb = NULL;
2010 tsk_rt(t)->rsrc_exit_cb_args = NULL; 2010 tsk_rt(t)->rsrc_exit_cb_args = NULL;
2011 } 2011 }
2012 2012
2013 kfree(ikglp_aff->nr_cur_users_on_rsrc); 2013 kfree(r2dglp_aff->nr_cur_users_on_rsrc);
2014 kfree(ikglp_aff->nr_aff_on_rsrc); 2014 kfree(r2dglp_aff->nr_aff_on_rsrc);
2015 kfree(ikglp_aff->q_info); 2015 kfree(r2dglp_aff->q_info);
2016 kfree(ikglp_aff); 2016 kfree(r2dglp_aff);
2017} 2017}
2018 2018
2019static struct affinity_observer* ikglp_aff_obs_new( 2019static struct affinity_observer* r2dglp_aff_obs_new(
2020 struct affinity_observer_ops* ops, 2020 struct affinity_observer_ops* ops,
2021 struct ikglp_affinity_ops* ikglp_ops, 2021 struct r2dglp_affinity_ops* r2dglp_ops,
2022 void* __user args) 2022 void* __user args)
2023{ 2023{
2024 struct ikglp_affinity* ikglp_aff; 2024 struct r2dglp_affinity* r2dglp_aff;
2025 struct gpu_affinity_observer_args aff_args; 2025 struct gpu_affinity_observer_args aff_args;
2026 struct ikglp_semaphore* sem; 2026 struct r2dglp_semaphore* sem;
2027 unsigned int i; 2027 unsigned int i;
2028 unsigned long flags; 2028 unsigned long flags;
2029 2029
@@ -2034,9 +2034,9 @@ static struct affinity_observer* ikglp_aff_obs_new(
2034 return(NULL); 2034 return(NULL);
2035 } 2035 }
2036 2036
2037 sem = (struct ikglp_semaphore*) get_lock_from_od(aff_args.obs.lock_od); 2037 sem = (struct r2dglp_semaphore*) get_lock_from_od(aff_args.obs.lock_od);
2038 2038
2039 if(sem->litmus_lock.type != IKGLP_SEM) { 2039 if(sem->litmus_lock.type != R2DGLP_SEM) {
2040 TRACE_CUR("Lock type not supported. Type = %d\n", 2040 TRACE_CUR("Lock type not supported. Type = %d\n",
2041 sem->litmus_lock.type); 2041 sem->litmus_lock.type);
2042 return(NULL); 2042 return(NULL);
@@ -2053,80 +2053,80 @@ static struct affinity_observer* ikglp_aff_obs_new(
2053 return(NULL); 2053 return(NULL);
2054 } 2054 }
2055 2055
2056 ikglp_aff = kmalloc(sizeof(*ikglp_aff), GFP_KERNEL); 2056 r2dglp_aff = kmalloc(sizeof(*r2dglp_aff), GFP_KERNEL);
2057 if(!ikglp_aff) 2057 if(!r2dglp_aff)
2058 return(NULL); 2058 return(NULL);
2059 2059
2060 ikglp_aff->q_info = kmalloc( 2060 r2dglp_aff->q_info = kmalloc(
2061 sizeof(struct ikglp_queue_info)*sem->nr_replicas, 2061 sizeof(struct r2dglp_queue_info)*sem->nr_replicas,
2062 GFP_KERNEL); 2062 GFP_KERNEL);
2063 if(!ikglp_aff->q_info) { 2063 if(!r2dglp_aff->q_info) {
2064 kfree(ikglp_aff); 2064 kfree(r2dglp_aff);
2065 return(NULL); 2065 return(NULL);
2066 } 2066 }
2067 2067
2068 ikglp_aff->nr_cur_users_on_rsrc = kmalloc( 2068 r2dglp_aff->nr_cur_users_on_rsrc = kmalloc(
2069 sizeof(unsigned int)*(sem->nr_replicas / aff_args.rho), 2069 sizeof(unsigned int)*(sem->nr_replicas / aff_args.rho),
2070 GFP_KERNEL); 2070 GFP_KERNEL);
2071 if(!ikglp_aff->nr_cur_users_on_rsrc) { 2071 if(!r2dglp_aff->nr_cur_users_on_rsrc) {
2072 kfree(ikglp_aff->q_info); 2072 kfree(r2dglp_aff->q_info);
2073 kfree(ikglp_aff); 2073 kfree(r2dglp_aff);
2074 return(NULL); 2074 return(NULL);
2075 } 2075 }
2076 2076
2077 ikglp_aff->nr_aff_on_rsrc = kmalloc( 2077 r2dglp_aff->nr_aff_on_rsrc = kmalloc(
2078 sizeof(unsigned int)*(sem->nr_replicas / aff_args.rho), 2078 sizeof(unsigned int)*(sem->nr_replicas / aff_args.rho),
2079 GFP_KERNEL); 2079 GFP_KERNEL);
2080 if(!ikglp_aff->nr_aff_on_rsrc) { 2080 if(!r2dglp_aff->nr_aff_on_rsrc) {
2081 kfree(ikglp_aff->nr_cur_users_on_rsrc); 2081 kfree(r2dglp_aff->nr_cur_users_on_rsrc);
2082 kfree(ikglp_aff->q_info); 2082 kfree(r2dglp_aff->q_info);
2083 kfree(ikglp_aff); 2083 kfree(r2dglp_aff);
2084 return(NULL); 2084 return(NULL);
2085 } 2085 }
2086 2086
2087 affinity_observer_new(&ikglp_aff->obs, ops, &aff_args.obs); 2087 affinity_observer_new(&r2dglp_aff->obs, ops, &aff_args.obs);
2088 2088
2089 ikglp_aff->ops = ikglp_ops; 2089 r2dglp_aff->ops = r2dglp_ops;
2090 ikglp_aff->offset = aff_args.replica_to_gpu_offset; 2090 r2dglp_aff->offset = aff_args.replica_to_gpu_offset;
2091 ikglp_aff->nr_simult = aff_args.rho; 2091 r2dglp_aff->nr_simult = aff_args.rho;
2092 ikglp_aff->nr_rsrc = sem->nr_replicas / ikglp_aff->nr_simult; 2092 r2dglp_aff->nr_rsrc = sem->nr_replicas / r2dglp_aff->nr_simult;
2093 ikglp_aff->relax_max_fifo_len = (aff_args.relaxed_rules) ? 1 : 0; 2093 r2dglp_aff->relax_max_fifo_len = (aff_args.relaxed_rules) ? 1 : 0;
2094 2094
2095 TRACE_CUR("GPU affinity_observer: offset = %d, nr_simult = %d, " 2095 TRACE_CUR("GPU affinity_observer: offset = %d, nr_simult = %d, "
2096 "nr_rsrc = %d, relaxed_fifo_len = %d\n", 2096 "nr_rsrc = %d, relaxed_fifo_len = %d\n",
2097 ikglp_aff->offset, ikglp_aff->nr_simult, ikglp_aff->nr_rsrc, 2097 r2dglp_aff->offset, r2dglp_aff->nr_simult, r2dglp_aff->nr_rsrc,
2098 ikglp_aff->relax_max_fifo_len); 2098 r2dglp_aff->relax_max_fifo_len);
2099 2099
2100 memset(ikglp_aff->nr_cur_users_on_rsrc, 0, 2100 memset(r2dglp_aff->nr_cur_users_on_rsrc, 0,
2101 sizeof(int)*(ikglp_aff->nr_rsrc)); 2101 sizeof(int)*(r2dglp_aff->nr_rsrc));
2102 memset(ikglp_aff->nr_aff_on_rsrc, 0, 2102 memset(r2dglp_aff->nr_aff_on_rsrc, 0,
2103 sizeof(unsigned int)*(ikglp_aff->nr_rsrc)); 2103 sizeof(unsigned int)*(r2dglp_aff->nr_rsrc));
2104 2104
2105 for(i = 0; i < sem->nr_replicas; ++i) { 2105 for(i = 0; i < sem->nr_replicas; ++i) {
2106 ikglp_aff->q_info[i].q = &sem->fifo_queues[i]; 2106 r2dglp_aff->q_info[i].q = &sem->fifo_queues[i];
2107 ikglp_aff->q_info[i].estimated_len = 0; 2107 r2dglp_aff->q_info[i].estimated_len = 0;
2108 2108
2109 /* multiple q_info's will point to the same resource (aka GPU) if 2109 /* multiple q_info's will point to the same resource (aka GPU) if
2110 aff_args.nr_simult_users > 1 */ 2110 aff_args.nr_simult_users > 1 */
2111 ikglp_aff->q_info[i].nr_cur_users = 2111 r2dglp_aff->q_info[i].nr_cur_users =
2112 &ikglp_aff->nr_cur_users_on_rsrc[__replica_to_gpu(ikglp_aff,i)]; 2112 &r2dglp_aff->nr_cur_users_on_rsrc[__replica_to_gpu(r2dglp_aff,i)];
2113 ikglp_aff->q_info[i].nr_aff_users = 2113 r2dglp_aff->q_info[i].nr_aff_users =
2114 &ikglp_aff->nr_aff_on_rsrc[__replica_to_gpu(ikglp_aff,i)]; 2114 &r2dglp_aff->nr_aff_on_rsrc[__replica_to_gpu(r2dglp_aff,i)];
2115 } 2115 }
2116 2116
2117 /* attach observer to the lock */ 2117 /* attach observer to the lock */
2118 raw_spin_lock_irqsave(&sem->real_lock, flags); 2118 raw_spin_lock_irqsave(&sem->real_lock, flags);
2119 sem->aff_obs = ikglp_aff; 2119 sem->aff_obs = r2dglp_aff;
2120 raw_spin_unlock_irqrestore(&sem->real_lock, flags); 2120 raw_spin_unlock_irqrestore(&sem->real_lock, flags);
2121 2121
2122 return &ikglp_aff->obs; 2122 return &r2dglp_aff->obs;
2123} 2123}
2124 2124
2125static int gpu_replica_to_resource(struct ikglp_affinity* aff, 2125static int gpu_replica_to_resource(struct r2dglp_affinity* aff,
2126 struct fifo_queue* fq) 2126 struct fifo_queue* fq)
2127{ 2127{
2128 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 2128 struct r2dglp_semaphore *sem = r2dglp_from_lock(aff->obs.lock);
2129 return(replica_to_gpu(aff, ikglp_get_idx(sem, fq))); 2129 return(replica_to_gpu(aff, r2dglp_get_idx(sem, fq)));
2130} 2130}
2131 2131
2132 2132
@@ -2147,10 +2147,10 @@ static int gpu_replica_to_resource(struct ikglp_affinity* aff,
2147/* - Task period */ 2147/* - Task period */
2148/*--------------------------------------------------------------------------*/ 2148/*--------------------------------------------------------------------------*/
2149 2149
2150struct fifo_queue* gpu_ikglp_advise_enqueue(struct ikglp_affinity* aff, 2150struct fifo_queue* gpu_r2dglp_advise_enqueue(struct r2dglp_affinity* aff,
2151 struct task_struct* t) 2151 struct task_struct* t)
2152{ 2152{
2153 // advise_enqueue must be smart as not not break IKGLP rules: 2153 // advise_enqueue must be smart as not not break R2DGLP rules:
2154 // * No queue can be greater than ceil(m/k) in length, unless 2154 // * No queue can be greater than ceil(m/k) in length, unless
2155 // 'relax_max_fifo_len' is asserted 2155 // 'relax_max_fifo_len' is asserted
2156 // * Cannot let a queue idle if there exist waiting PQ/donors 2156 // * Cannot let a queue idle if there exist waiting PQ/donors
@@ -2161,10 +2161,10 @@ struct fifo_queue* gpu_ikglp_advise_enqueue(struct ikglp_affinity* aff,
2161 // 2161 //
2162 // Huristic strategy: Find the shortest queue that is not full. 2162 // Huristic strategy: Find the shortest queue that is not full.
2163 2163
2164 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 2164 struct r2dglp_semaphore *sem = r2dglp_from_lock(aff->obs.lock);
2165 lt_t min_len; 2165 lt_t min_len;
2166 unsigned int min_nr_users, min_nr_aff_users; 2166 unsigned int min_nr_users, min_nr_aff_users;
2167 struct ikglp_queue_info *shortest, *aff_queue; 2167 struct r2dglp_queue_info *shortest, *aff_queue;
2168 struct fifo_queue *to_enqueue; 2168 struct fifo_queue *to_enqueue;
2169 unsigned int i; 2169 unsigned int i;
2170 int affinity_gpu; 2170 int affinity_gpu;
@@ -2205,7 +2205,7 @@ struct fifo_queue* gpu_ikglp_advise_enqueue(struct ikglp_affinity* aff,
2205 2205
2206 TRACE_CUR("cs is %llu on queue %d (count = %u): est len = %llu\n", 2206 TRACE_CUR("cs is %llu on queue %d (count = %u): est len = %llu\n",
2207 get_gpu_estimate(t, MIG_LOCAL), 2207 get_gpu_estimate(t, MIG_LOCAL),
2208 ikglp_get_idx(sem, shortest->q), 2208 r2dglp_get_idx(sem, shortest->q),
2209 shortest->q->count, 2209 shortest->q->count,
2210 min_len); 2210 min_len);
2211 2211
@@ -2241,7 +2241,7 @@ struct fifo_queue* gpu_ikglp_advise_enqueue(struct ikglp_affinity* aff,
2241 } 2241 }
2242 else if(unlikely(est_len == min_len)) { 2242 else if(unlikely(est_len == min_len)) {
2243 /* equal lengths */ 2243 /* equal lengths */
2244 if(!has_affinity(aff, t, ikglp_get_idx(sem, shortest->q))) { 2244 if(!has_affinity(aff, t, r2dglp_get_idx(sem, shortest->q))) {
2245 /* don't sacrifice affinity on tie */ 2245 /* don't sacrifice affinity on tie */
2246 if(has_affinity(aff, t, i)) { 2246 if(has_affinity(aff, t, i)) {
2247 /* switch to maintain affinity */ 2247 /* switch to maintain affinity */
@@ -2271,40 +2271,40 @@ struct fifo_queue* gpu_ikglp_advise_enqueue(struct ikglp_affinity* aff,
2271 get_gpu_estimate(t, 2271 get_gpu_estimate(t,
2272 gpu_migration_distance(tsk_rt(t)->last_gpu, 2272 gpu_migration_distance(tsk_rt(t)->last_gpu,
2273 replica_to_gpu(aff, i))), 2273 replica_to_gpu(aff, i))),
2274 ikglp_get_idx(sem, aff->q_info[i].q), 2274 r2dglp_get_idx(sem, aff->q_info[i].q),
2275 aff->q_info[i].q->count, 2275 aff->q_info[i].q->count,
2276 est_len); 2276 est_len);
2277 } 2277 }
2278 else { 2278 else {
2279 TRACE_CUR("queue %d is too long. ineligible for enqueue.\n", 2279 TRACE_CUR("queue %d is too long. ineligible for enqueue.\n",
2280 ikglp_get_idx(sem, aff->q_info[i].q)); 2280 r2dglp_get_idx(sem, aff->q_info[i].q));
2281 } 2281 }
2282 } 2282 }
2283 } 2283 }
2284 2284
2285 if(nominal_fq_len(shortest->q) >= max_fifo_len) { 2285 if(nominal_fq_len(shortest->q) >= max_fifo_len) {
2286 TRACE_CUR("selected fq %d is too long, but returning it anyway.\n", 2286 TRACE_CUR("selected fq %d is too long, but returning it anyway.\n",
2287 ikglp_get_idx(sem, shortest->q)); 2287 r2dglp_get_idx(sem, shortest->q));
2288 } 2288 }
2289 2289
2290 to_enqueue = shortest->q; 2290 to_enqueue = shortest->q;
2291 TRACE_CUR("enqueue on fq %d (count = %u) (non-aff wanted fq %d)\n", 2291 TRACE_CUR("enqueue on fq %d (count = %u) (non-aff wanted fq %d)\n",
2292 ikglp_get_idx(sem, to_enqueue), 2292 r2dglp_get_idx(sem, to_enqueue),
2293 to_enqueue->count, 2293 to_enqueue->count,
2294 ikglp_get_idx(sem, sem->shortest_fifo_queue)); 2294 r2dglp_get_idx(sem, sem->shortest_fifo_queue));
2295 2295
2296 return to_enqueue; 2296 return to_enqueue;
2297} 2297}
2298 2298
2299 2299
2300static ikglp_wait_state_t* pick_steal(struct ikglp_affinity* aff, 2300static r2dglp_wait_state_t* pick_steal(struct r2dglp_affinity* aff,
2301 int dest_gpu, 2301 int dest_gpu,
2302 struct fifo_queue* fq) 2302 struct fifo_queue* fq)
2303{ 2303{
2304 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 2304 struct r2dglp_semaphore *sem = r2dglp_from_lock(aff->obs.lock);
2305 ikglp_wait_state_t *wait = NULL; 2305 r2dglp_wait_state_t *wait = NULL;
2306 int max_improvement = -(MIG_NONE+1); 2306 int max_improvement = -(MIG_NONE+1);
2307 int replica = ikglp_get_idx(sem, fq); 2307 int replica = r2dglp_get_idx(sem, fq);
2308 2308
2309 if(waitqueue_active(&fq->wait)) { 2309 if(waitqueue_active(&fq->wait)) {
2310 int this_gpu = replica_to_gpu(aff, replica); 2310 int this_gpu = replica_to_gpu(aff, replica);
@@ -2312,8 +2312,8 @@ static ikglp_wait_state_t* pick_steal(struct ikglp_affinity* aff,
2312 2312
2313 list_for_each(pos, &fq->wait.task_list) { 2313 list_for_each(pos, &fq->wait.task_list) {
2314 wait_queue_t *fq_wait = list_entry(pos, wait_queue_t, task_list); 2314 wait_queue_t *fq_wait = list_entry(pos, wait_queue_t, task_list);
2315 ikglp_wait_state_t *tmp_wait = 2315 r2dglp_wait_state_t *tmp_wait =
2316 container_of(fq_wait, ikglp_wait_state_t, fq_node); 2316 container_of(fq_wait, r2dglp_wait_state_t, fq_node);
2317 2317
2318 int tmp_improvement = 2318 int tmp_improvement =
2319 gpu_migration_distance(this_gpu, 2319 gpu_migration_distance(this_gpu,
@@ -2349,22 +2349,22 @@ out:
2349} 2349}
2350 2350
2351 2351
2352ikglp_wait_state_t* gpu_ikglp_advise_steal(struct ikglp_affinity* aff, 2352r2dglp_wait_state_t* gpu_r2dglp_advise_steal(struct r2dglp_affinity* aff,
2353 struct fifo_queue* dst) 2353 struct fifo_queue* dst)
2354{ 2354{
2355 /* Huristic strategy: Find task with greatest improvement in affinity. */ 2355 /* Huristic strategy: Find task with greatest improvement in affinity. */
2356 2356
2357 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 2357 struct r2dglp_semaphore *sem = r2dglp_from_lock(aff->obs.lock);
2358 ikglp_wait_state_t *to_steal_state = NULL; 2358 r2dglp_wait_state_t *to_steal_state = NULL;
2359 int max_improvement = -(MIG_NONE+1); 2359 int max_improvement = -(MIG_NONE+1);
2360 int replica, i; 2360 int replica, i;
2361 int dest_gpu; 2361 int dest_gpu;
2362 2362
2363 replica = ikglp_get_idx(sem, dst); 2363 replica = r2dglp_get_idx(sem, dst);
2364 dest_gpu = replica_to_gpu(aff, replica); 2364 dest_gpu = replica_to_gpu(aff, replica);
2365 2365
2366 for(i = 0; i < sem->nr_replicas; ++i) { 2366 for(i = 0; i < sem->nr_replicas; ++i) {
2367 ikglp_wait_state_t *tmp_to_steal_state = 2367 r2dglp_wait_state_t *tmp_to_steal_state =
2368 pick_steal(aff, dest_gpu, &sem->fifo_queues[i]); 2368 pick_steal(aff, dest_gpu, &sem->fifo_queues[i]);
2369 2369
2370 if(tmp_to_steal_state) { 2370 if(tmp_to_steal_state) {
@@ -2393,10 +2393,10 @@ out:
2393 TRACE_CUR("Selected victim %s/%d on fq %d (GPU %d) for fq %d " 2393 TRACE_CUR("Selected victim %s/%d on fq %d (GPU %d) for fq %d "
2394 "(GPU %d): improvement = %d\n", 2394 "(GPU %d): improvement = %d\n",
2395 to_steal_state->task->comm, to_steal_state->task->pid, 2395 to_steal_state->task->comm, to_steal_state->task->pid,
2396 ikglp_get_idx(sem, to_steal_state->donee_heap_node.fq), 2396 r2dglp_get_idx(sem, to_steal_state->donee_heap_node.fq),
2397 replica_to_gpu(aff, 2397 replica_to_gpu(aff,
2398 ikglp_get_idx(sem, to_steal_state->donee_heap_node.fq)), 2398 r2dglp_get_idx(sem, to_steal_state->donee_heap_node.fq)),
2399 ikglp_get_idx(sem, dst), 2399 r2dglp_get_idx(sem, dst),
2400 dest_gpu, 2400 dest_gpu,
2401 max_improvement); 2401 max_improvement);
2402 } 2402 }
@@ -2407,19 +2407,19 @@ out:
2407 2407
2408static inline int has_donor(wait_queue_t* fq_wait) 2408static inline int has_donor(wait_queue_t* fq_wait)
2409{ 2409{
2410 ikglp_wait_state_t *wait = 2410 r2dglp_wait_state_t *wait =
2411 container_of(fq_wait, ikglp_wait_state_t, fq_node); 2411 container_of(fq_wait, r2dglp_wait_state_t, fq_node);
2412 return(wait->donee_heap_node.donor_info != NULL); 2412 return(wait->donee_heap_node.donor_info != NULL);
2413} 2413}
2414 2414
2415static ikglp_donee_heap_node_t* pick_donee(struct ikglp_affinity* aff, 2415static r2dglp_donee_heap_node_t* pick_donee(struct r2dglp_affinity* aff,
2416 struct fifo_queue* fq, 2416 struct fifo_queue* fq,
2417 int* dist_from_head) 2417 int* dist_from_head)
2418{ 2418{
2419 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 2419 struct r2dglp_semaphore *sem = r2dglp_from_lock(aff->obs.lock);
2420 struct task_struct *donee; 2420 struct task_struct *donee;
2421 ikglp_donee_heap_node_t *donee_node; 2421 r2dglp_donee_heap_node_t *donee_node;
2422 struct task_struct *mth_highest = ikglp_mth_highest(sem); 2422 struct task_struct *mth_highest = r2dglp_mth_highest(sem);
2423 2423
2424 if(fq->owner && 2424 if(fq->owner &&
2425 fq->donee_heap_node.donor_info == NULL && 2425 fq->donee_heap_node.donor_info == NULL &&
@@ -2432,14 +2432,14 @@ static ikglp_donee_heap_node_t* pick_donee(struct ikglp_affinity* aff,
2432 BUG_ON(donee != donee_node->task); 2432 BUG_ON(donee != donee_node->task);
2433 2433
2434 TRACE_CUR("picked owner of fq %d as donee\n", 2434 TRACE_CUR("picked owner of fq %d as donee\n",
2435 ikglp_get_idx(sem, fq)); 2435 r2dglp_get_idx(sem, fq));
2436 2436
2437 goto out; 2437 goto out;
2438 } 2438 }
2439 else if(waitqueue_active(&fq->wait)) { 2439 else if(waitqueue_active(&fq->wait)) {
2440 struct list_head *pos; 2440 struct list_head *pos;
2441 2441
2442 TRACE_CUR("searching fq %d for donee\n", ikglp_get_idx(sem, fq)); 2442 TRACE_CUR("searching fq %d for donee\n", r2dglp_get_idx(sem, fq));
2443 2443
2444 *dist_from_head = 1; 2444 *dist_from_head = 1;
2445 2445
@@ -2447,8 +2447,8 @@ static ikglp_donee_heap_node_t* pick_donee(struct ikglp_affinity* aff,
2447 the donee will be closer to obtaining a resource. */ 2447 the donee will be closer to obtaining a resource. */
2448 list_for_each(pos, &fq->wait.task_list) { 2448 list_for_each(pos, &fq->wait.task_list) {
2449 wait_queue_t *fq_wait = list_entry(pos, wait_queue_t, task_list); 2449 wait_queue_t *fq_wait = list_entry(pos, wait_queue_t, task_list);
2450 ikglp_wait_state_t *wait = 2450 r2dglp_wait_state_t *wait =
2451 container_of(fq_wait, ikglp_wait_state_t, fq_node); 2451 container_of(fq_wait, r2dglp_wait_state_t, fq_node);
2452 2452
2453 if(!has_donor(fq_wait) && 2453 if(!has_donor(fq_wait) &&
2454 mth_highest != wait->task && 2454 mth_highest != wait->task &&
@@ -2459,7 +2459,7 @@ static ikglp_donee_heap_node_t* pick_donee(struct ikglp_affinity* aff,
2459 BUG_ON(donee != donee_node->task); 2459 BUG_ON(donee != donee_node->task);
2460 2460
2461 TRACE_CUR("picked waiter in fq %d as donee\n", 2461 TRACE_CUR("picked waiter in fq %d as donee\n",
2462 ikglp_get_idx(sem, fq)); 2462 r2dglp_get_idx(sem, fq));
2463 2463
2464 goto out; 2464 goto out;
2465 } 2465 }
@@ -2469,14 +2469,14 @@ static ikglp_donee_heap_node_t* pick_donee(struct ikglp_affinity* aff,
2469 2469
2470 donee = NULL; 2470 donee = NULL;
2471 donee_node = NULL; 2471 donee_node = NULL;
2472 *dist_from_head = IKGLP_INVAL_DISTANCE; 2472 *dist_from_head = R2DGLP_INVAL_DISTANCE;
2473 2473
2474 TRACE_CUR("Found no one to be donee in fq %d!\n", ikglp_get_idx(sem, fq)); 2474 TRACE_CUR("Found no one to be donee in fq %d!\n", r2dglp_get_idx(sem, fq));
2475 2475
2476out: 2476out:
2477 2477
2478 TRACE_CUR("Candidate donee for fq %d is %s/%d (dist_from_head = %d)\n", 2478 TRACE_CUR("Candidate donee for fq %d is %s/%d (dist_from_head = %d)\n",
2479 ikglp_get_idx(sem, fq), 2479 r2dglp_get_idx(sem, fq),
2480 (donee) ? (donee)->comm : "null", 2480 (donee) ? (donee)->comm : "null",
2481 (donee) ? (donee)->pid : 0, 2481 (donee) ? (donee)->pid : 0,
2482 *dist_from_head); 2482 *dist_from_head);
@@ -2484,8 +2484,8 @@ out:
2484 return donee_node; 2484 return donee_node;
2485} 2485}
2486 2486
2487ikglp_donee_heap_node_t* gpu_ikglp_advise_donee_selection( 2487r2dglp_donee_heap_node_t* gpu_r2dglp_advise_donee_selection(
2488 struct ikglp_affinity* aff, 2488 struct r2dglp_affinity* aff,
2489 struct task_struct* donor) 2489 struct task_struct* donor)
2490{ 2490{
2491 // Huristic strategy: Find the highest-priority donee that is waiting on 2491 // Huristic strategy: Find the highest-priority donee that is waiting on
@@ -2496,19 +2496,19 @@ ikglp_donee_heap_node_t* gpu_ikglp_advise_donee_selection(
2496 // Further strategy: amongst elible donees waiting for the same GPU, pick 2496 // Further strategy: amongst elible donees waiting for the same GPU, pick
2497 // the one closest to the head of the FIFO queue (including owners). 2497 // the one closest to the head of the FIFO queue (including owners).
2498 2498
2499 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 2499 struct r2dglp_semaphore *sem = r2dglp_from_lock(aff->obs.lock);
2500 ikglp_donee_heap_node_t *donee_node; 2500 r2dglp_donee_heap_node_t *donee_node;
2501 gpu_migration_dist_t distance; 2501 gpu_migration_dist_t distance;
2502 int start, i, j; 2502 int start, i, j;
2503 2503
2504 ikglp_donee_heap_node_t *default_donee; 2504 r2dglp_donee_heap_node_t *default_donee;
2505 ikglp_wait_state_t *default_donee_donor_info; 2505 r2dglp_wait_state_t *default_donee_donor_info;
2506 2506
2507 if(tsk_rt(donor)->last_gpu < 0) { 2507 if(tsk_rt(donor)->last_gpu < 0) {
2508 /* no affinity. just return the min prio, like standard IKGLP */ 2508 /* no affinity. just return the min prio, like standard R2DGLP */
2509 /* TODO: Find something closer to the head of the queue?? */ 2509 /* TODO: Find something closer to the head of the queue?? */
2510 donee_node = binheap_top_entry(&sem->donees, 2510 donee_node = binheap_top_entry(&sem->donees,
2511 ikglp_donee_heap_node_t, 2511 r2dglp_donee_heap_node_t,
2512 node); 2512 node);
2513 goto out; 2513 goto out;
2514 } 2514 }
@@ -2520,9 +2520,9 @@ ikglp_donee_heap_node_t* gpu_ikglp_advise_donee_selection(
2520 // NOTE: The original donor relation *must* be restored, even if we select 2520 // NOTE: The original donor relation *must* be restored, even if we select
2521 // the default donee throug affinity-aware selection, before returning 2521 // the default donee throug affinity-aware selection, before returning
2522 // from this function so we don't screw up our heap ordering. 2522 // from this function so we don't screw up our heap ordering.
2523 // The standard IKGLP algorithm will steal the donor relationship if needed. 2523 // The standard R2DGLP algorithm will steal the donor relationship if needed.
2524 default_donee = 2524 default_donee =
2525 binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node); 2525 binheap_top_entry(&sem->donees, r2dglp_donee_heap_node_t, node);
2526 2526
2527 default_donee_donor_info = default_donee->donor_info; // back-up donor relation 2527 default_donee_donor_info = default_donee->donor_info; // back-up donor relation
2528 default_donee->donor_info = NULL; // temporarily break any donor relation. 2528 default_donee->donor_info = NULL; // temporarily break any donor relation.
@@ -2542,7 +2542,7 @@ ikglp_donee_heap_node_t* gpu_ikglp_advise_donee_selection(
2542 2542
2543 // only interested in queues that will improve our distance 2543 // only interested in queues that will improve our distance
2544 if(temp_distance < distance || donee_node == NULL) { 2544 if(temp_distance < distance || donee_node == NULL) {
2545 int dist_from_head = IKGLP_INVAL_DISTANCE; 2545 int dist_from_head = R2DGLP_INVAL_DISTANCE;
2546 2546
2547 TRACE_CUR("searching for donor on GPU %d\n", i); 2547 TRACE_CUR("searching for donor on GPU %d\n", i);
2548 2548
@@ -2551,7 +2551,7 @@ ikglp_donee_heap_node_t* gpu_ikglp_advise_donee_selection(
2551 2551
2552 for(j = 0; j < aff->nr_simult; ++j) { 2552 for(j = 0; j < aff->nr_simult; ++j) {
2553 int temp_dist_from_head; 2553 int temp_dist_from_head;
2554 ikglp_donee_heap_node_t *temp_donee_node; 2554 r2dglp_donee_heap_node_t *temp_donee_node;
2555 struct fifo_queue *fq; 2555 struct fifo_queue *fq;
2556 2556
2557 fq = &(sem->fifo_queues[i + j*aff->nr_rsrc]); 2557 fq = &(sem->fifo_queues[i + j*aff->nr_rsrc]);
@@ -2566,7 +2566,7 @@ ikglp_donee_heap_node_t* gpu_ikglp_advise_donee_selection(
2566 } 2566 }
2567 } 2567 }
2568 2568
2569 if(dist_from_head != IKGLP_INVAL_DISTANCE) { 2569 if(dist_from_head != R2DGLP_INVAL_DISTANCE) {
2570 TRACE_CUR("found donee %s/%d and is the %d-th waiter.\n", 2570 TRACE_CUR("found donee %s/%d and is the %d-th waiter.\n",
2571 donee_node->task->comm, donee_node->task->pid, 2571 donee_node->task->comm, donee_node->task->pid,
2572 dist_from_head); 2572 dist_from_head);
@@ -2600,8 +2600,8 @@ out:
2600 TRACE_CUR("Selected donee %s/%d on fq %d " 2600 TRACE_CUR("Selected donee %s/%d on fq %d "
2601 "(GPU %d) for %s/%d with affinity for GPU %d\n", 2601 "(GPU %d) for %s/%d with affinity for GPU %d\n",
2602 donee_node->task->comm, donee_node->task->pid, 2602 donee_node->task->comm, donee_node->task->pid,
2603 ikglp_get_idx(sem, donee_node->fq), 2603 r2dglp_get_idx(sem, donee_node->fq),
2604 replica_to_gpu(aff, ikglp_get_idx(sem, donee_node->fq)), 2604 replica_to_gpu(aff, r2dglp_get_idx(sem, donee_node->fq)),
2605 donor->comm, donor->pid, tsk_rt(donor)->last_gpu); 2605 donor->comm, donor->pid, tsk_rt(donor)->last_gpu);
2606 2606
2607 return(donee_node); 2607 return(donee_node);
@@ -2611,11 +2611,11 @@ out:
2611 2611
2612static void __find_closest_donor(int target_gpu, 2612static void __find_closest_donor(int target_gpu,
2613 struct binheap_node* donor_node, 2613 struct binheap_node* donor_node,
2614 ikglp_wait_state_t** cur_closest, 2614 r2dglp_wait_state_t** cur_closest,
2615 int* cur_dist) 2615 int* cur_dist)
2616{ 2616{
2617 ikglp_wait_state_t *this_donor = 2617 r2dglp_wait_state_t *this_donor =
2618 binheap_entry(donor_node, ikglp_wait_state_t, node); 2618 binheap_entry(donor_node, r2dglp_wait_state_t, node);
2619 2619
2620 int this_dist = 2620 int this_dist =
2621 gpu_migration_distance(target_gpu, tsk_rt(this_donor->task)->last_gpu); 2621 gpu_migration_distance(target_gpu, tsk_rt(this_donor->task)->last_gpu);
@@ -2645,7 +2645,7 @@ static void __find_closest_donor(int target_gpu,
2645 cur_closest, cur_dist); 2645 cur_closest, cur_dist);
2646} 2646}
2647 2647
2648ikglp_wait_state_t* gpu_ikglp_advise_donor_to_fq(struct ikglp_affinity* aff, 2648r2dglp_wait_state_t* gpu_r2dglp_advise_donor_to_fq(struct r2dglp_affinity* aff,
2649 struct fifo_queue* fq) 2649 struct fifo_queue* fq)
2650{ 2650{
2651 // Huristic strategy: Find donor with the closest affinity to fq. 2651 // Huristic strategy: Find donor with the closest affinity to fq.
@@ -2657,14 +2657,14 @@ ikglp_wait_state_t* gpu_ikglp_advise_donor_to_fq(struct ikglp_affinity* aff,
2657 // donors, at most. We won't recurse too deeply to have to worry about 2657 // donors, at most. We won't recurse too deeply to have to worry about
2658 // our stack. (even with 128 CPUs, our nest depth is at most 7 deep). 2658 // our stack. (even with 128 CPUs, our nest depth is at most 7 deep).
2659 2659
2660 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 2660 struct r2dglp_semaphore *sem = r2dglp_from_lock(aff->obs.lock);
2661 ikglp_wait_state_t *donor = NULL; 2661 r2dglp_wait_state_t *donor = NULL;
2662 int distance = MIG_NONE; 2662 int distance = MIG_NONE;
2663 int gpu = replica_to_gpu(aff, ikglp_get_idx(sem, fq)); 2663 int gpu = replica_to_gpu(aff, r2dglp_get_idx(sem, fq));
2664 2664
2665#ifdef CONFIG_SCHED_DEBUG_TRACE 2665#ifdef CONFIG_SCHED_DEBUG_TRACE
2666 ikglp_wait_state_t* default_donor = 2666 r2dglp_wait_state_t* default_donor =
2667 binheap_top_entry(&sem->donors, ikglp_wait_state_t, node); 2667 binheap_top_entry(&sem->donors, r2dglp_wait_state_t, node);
2668#endif 2668#endif
2669 2669
2670 __find_closest_donor(gpu, sem->donors.root, &donor, &distance); 2670 __find_closest_donor(gpu, sem->donors.root, &donor, &distance);
@@ -2673,7 +2673,7 @@ ikglp_wait_state_t* gpu_ikglp_advise_donor_to_fq(struct ikglp_affinity* aff,
2673 "(non-aff wanted %s/%d). differs = %d\n", 2673 "(non-aff wanted %s/%d). differs = %d\n",
2674 donor->task->comm, donor->task->pid, 2674 donor->task->comm, donor->task->pid,
2675 distance, 2675 distance,
2676 ikglp_get_idx(sem, fq), 2676 r2dglp_get_idx(sem, fq),
2677 default_donor->task->comm, default_donor->task->pid, 2677 default_donor->task->comm, default_donor->task->pid,
2678 (donor->task != default_donor->task) 2678 (donor->task != default_donor->task)
2679 ); 2679 );
@@ -2683,13 +2683,13 @@ ikglp_wait_state_t* gpu_ikglp_advise_donor_to_fq(struct ikglp_affinity* aff,
2683 2683
2684 2684
2685 2685
2686void gpu_ikglp_notify_enqueue(struct ikglp_affinity* aff, 2686void gpu_r2dglp_notify_enqueue(struct r2dglp_affinity* aff,
2687 struct fifo_queue* fq, struct task_struct* t) 2687 struct fifo_queue* fq, struct task_struct* t)
2688{ 2688{
2689 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 2689 struct r2dglp_semaphore *sem = r2dglp_from_lock(aff->obs.lock);
2690 int replica = ikglp_get_idx(sem, fq); 2690 int replica = r2dglp_get_idx(sem, fq);
2691 int gpu = replica_to_gpu(aff, replica); 2691 int gpu = replica_to_gpu(aff, replica);
2692 struct ikglp_queue_info *info = &aff->q_info[replica]; 2692 struct r2dglp_queue_info *info = &aff->q_info[replica];
2693 lt_t est_time; 2693 lt_t est_time;
2694 lt_t est_len_before; 2694 lt_t est_len_before;
2695 2695
@@ -2702,18 +2702,18 @@ void gpu_ikglp_notify_enqueue(struct ikglp_affinity* aff,
2702 info->estimated_len += est_time; 2702 info->estimated_len += est_time;
2703 2703
2704 TRACE_CUR("fq %d: q_len (%llu) + est_cs (%llu) = %llu\n", 2704 TRACE_CUR("fq %d: q_len (%llu) + est_cs (%llu) = %llu\n",
2705 ikglp_get_idx(sem, info->q), 2705 r2dglp_get_idx(sem, info->q),
2706 est_len_before, est_time, 2706 est_len_before, est_time,
2707 info->estimated_len); 2707 info->estimated_len);
2708} 2708}
2709 2709
2710void gpu_ikglp_notify_dequeue(struct ikglp_affinity* aff, struct fifo_queue* fq, 2710void gpu_r2dglp_notify_dequeue(struct r2dglp_affinity* aff, struct fifo_queue* fq,
2711 struct task_struct* t) 2711 struct task_struct* t)
2712{ 2712{
2713 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 2713 struct r2dglp_semaphore *sem = r2dglp_from_lock(aff->obs.lock);
2714 int replica = ikglp_get_idx(sem, fq); 2714 int replica = r2dglp_get_idx(sem, fq);
2715 int gpu = replica_to_gpu(aff, replica); 2715 int gpu = replica_to_gpu(aff, replica);
2716 struct ikglp_queue_info *info = &aff->q_info[replica]; 2716 struct r2dglp_queue_info *info = &aff->q_info[replica];
2717 lt_t est_time = get_gpu_estimate(t, 2717 lt_t est_time = get_gpu_estimate(t,
2718 gpu_migration_distance(tsk_rt(t)->last_gpu, gpu)); 2718 gpu_migration_distance(tsk_rt(t)->last_gpu, gpu));
2719 2719
@@ -2726,13 +2726,13 @@ void gpu_ikglp_notify_dequeue(struct ikglp_affinity* aff, struct fifo_queue* fq,
2726 } 2726 }
2727 2727
2728 TRACE_CUR("fq %d est len is now %llu\n", 2728 TRACE_CUR("fq %d est len is now %llu\n",
2729 ikglp_get_idx(sem, info->q), 2729 r2dglp_get_idx(sem, info->q),
2730 info->estimated_len); 2730 info->estimated_len);
2731} 2731}
2732 2732
2733int gpu_ikglp_notify_exit(struct ikglp_affinity* aff, struct task_struct* t) 2733int gpu_r2dglp_notify_exit(struct r2dglp_affinity* aff, struct task_struct* t)
2734{ 2734{
2735 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 2735 struct r2dglp_semaphore *sem = r2dglp_from_lock(aff->obs.lock);
2736 unsigned long flags = 0, more_flags; 2736 unsigned long flags = 0, more_flags;
2737 int aff_rsrc; 2737 int aff_rsrc;
2738#ifdef CONFIG_LITMUS_DGL_SUPPORT 2738#ifdef CONFIG_LITMUS_DGL_SUPPORT
@@ -2762,22 +2762,22 @@ int gpu_ikglp_notify_exit(struct ikglp_affinity* aff, struct task_struct* t)
2762 return 0; 2762 return 0;
2763} 2763}
2764 2764
2765int gpu_ikglp_notify_exit_trampoline(struct task_struct* t) 2765int gpu_r2dglp_notify_exit_trampoline(struct task_struct* t)
2766{ 2766{
2767 struct ikglp_affinity* aff = 2767 struct r2dglp_affinity* aff =
2768 (struct ikglp_affinity*)tsk_rt(t)->rsrc_exit_cb_args; 2768 (struct r2dglp_affinity*)tsk_rt(t)->rsrc_exit_cb_args;
2769 if(likely(aff)) 2769 if(likely(aff))
2770 return gpu_ikglp_notify_exit(aff, t); 2770 return gpu_r2dglp_notify_exit(aff, t);
2771 else 2771 else
2772 return -1; 2772 return -1;
2773} 2773}
2774 2774
2775void gpu_ikglp_notify_acquired(struct ikglp_affinity* aff, 2775void gpu_r2dglp_notify_acquired(struct r2dglp_affinity* aff,
2776 struct fifo_queue* fq, 2776 struct fifo_queue* fq,
2777 struct task_struct* t) 2777 struct task_struct* t)
2778{ 2778{
2779 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 2779 struct r2dglp_semaphore *sem = r2dglp_from_lock(aff->obs.lock);
2780 int replica = ikglp_get_idx(sem, fq); 2780 int replica = r2dglp_get_idx(sem, fq);
2781 int gpu = replica_to_gpu(aff, replica); 2781 int gpu = replica_to_gpu(aff, replica);
2782 int last_gpu = tsk_rt(t)->last_gpu; 2782 int last_gpu = tsk_rt(t)->last_gpu;
2783 2783
@@ -2799,7 +2799,7 @@ void gpu_ikglp_notify_acquired(struct ikglp_affinity* aff,
2799 /* increment affinity count on new GPU */ 2799 /* increment affinity count on new GPU */
2800 ++(aff->nr_aff_on_rsrc[gpu - aff->offset]); 2800 ++(aff->nr_aff_on_rsrc[gpu - aff->offset]);
2801 tsk_rt(t)->rsrc_exit_cb_args = aff; 2801 tsk_rt(t)->rsrc_exit_cb_args = aff;
2802 tsk_rt(t)->rsrc_exit_cb = gpu_ikglp_notify_exit_trampoline; 2802 tsk_rt(t)->rsrc_exit_cb = gpu_r2dglp_notify_exit_trampoline;
2803 } 2803 }
2804 2804
2805 reg_nv_device(gpu, 1, t); /* register */ 2805 reg_nv_device(gpu, 1, t); /* register */
@@ -2809,12 +2809,12 @@ void gpu_ikglp_notify_acquired(struct ikglp_affinity* aff,
2809 start_gpu_tracker(t); 2809 start_gpu_tracker(t);
2810} 2810}
2811 2811
2812void gpu_ikglp_notify_freed(struct ikglp_affinity* aff, 2812void gpu_r2dglp_notify_freed(struct r2dglp_affinity* aff,
2813 struct fifo_queue* fq, 2813 struct fifo_queue* fq,
2814 struct task_struct* t) 2814 struct task_struct* t)
2815{ 2815{
2816 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 2816 struct r2dglp_semaphore *sem = r2dglp_from_lock(aff->obs.lock);
2817 int replica = ikglp_get_idx(sem, fq); 2817 int replica = r2dglp_get_idx(sem, fq);
2818 int gpu = replica_to_gpu(aff, replica); 2818 int gpu = replica_to_gpu(aff, replica);
2819 lt_t est_time; 2819 lt_t est_time;
2820 2820
@@ -2844,28 +2844,28 @@ void gpu_ikglp_notify_freed(struct ikglp_affinity* aff,
2844 tsk_rt(t)->last_gpu = gpu; 2844 tsk_rt(t)->last_gpu = gpu;
2845} 2845}
2846 2846
2847struct ikglp_affinity_ops gpu_ikglp_affinity = 2847struct r2dglp_affinity_ops gpu_r2dglp_affinity =
2848{ 2848{
2849 .advise_enqueue = gpu_ikglp_advise_enqueue, 2849 .advise_enqueue = gpu_r2dglp_advise_enqueue,
2850 .advise_steal = gpu_ikglp_advise_steal, 2850 .advise_steal = gpu_r2dglp_advise_steal,
2851 .advise_donee_selection = gpu_ikglp_advise_donee_selection, 2851 .advise_donee_selection = gpu_r2dglp_advise_donee_selection,
2852 .advise_donor_to_fq = gpu_ikglp_advise_donor_to_fq, 2852 .advise_donor_to_fq = gpu_r2dglp_advise_donor_to_fq,
2853 2853
2854 .notify_enqueue = gpu_ikglp_notify_enqueue, 2854 .notify_enqueue = gpu_r2dglp_notify_enqueue,
2855 .notify_dequeue = gpu_ikglp_notify_dequeue, 2855 .notify_dequeue = gpu_r2dglp_notify_dequeue,
2856 .notify_acquired = gpu_ikglp_notify_acquired, 2856 .notify_acquired = gpu_r2dglp_notify_acquired,
2857 .notify_freed = gpu_ikglp_notify_freed, 2857 .notify_freed = gpu_r2dglp_notify_freed,
2858 2858
2859 .notify_exit = gpu_ikglp_notify_exit, 2859 .notify_exit = gpu_r2dglp_notify_exit,
2860 2860
2861 .replica_to_resource = gpu_replica_to_resource, 2861 .replica_to_resource = gpu_replica_to_resource,
2862}; 2862};
2863 2863
2864struct affinity_observer* ikglp_gpu_aff_obs_new( 2864struct affinity_observer* r2dglp_gpu_aff_obs_new(
2865 struct affinity_observer_ops* ops, 2865 struct affinity_observer_ops* ops,
2866 void* __user args) 2866 void* __user args)
2867{ 2867{
2868 return ikglp_aff_obs_new(ops, &gpu_ikglp_affinity, args); 2868 return r2dglp_aff_obs_new(ops, &gpu_r2dglp_affinity, args);
2869} 2869}
2870 2870
2871 2871
@@ -2875,13 +2875,13 @@ struct affinity_observer* ikglp_gpu_aff_obs_new(
2875/* SIMPLE LOAD-BALANCING AFFINITY HEURISTIC */ 2875/* SIMPLE LOAD-BALANCING AFFINITY HEURISTIC */
2876/*--------------------------------------------------------------------------*/ 2876/*--------------------------------------------------------------------------*/
2877 2877
2878struct fifo_queue* simple_gpu_ikglp_advise_enqueue(struct ikglp_affinity* aff, 2878struct fifo_queue* simple_gpu_r2dglp_advise_enqueue(struct r2dglp_affinity* aff,
2879 struct task_struct* t) 2879 struct task_struct* t)
2880{ 2880{
2881 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 2881 struct r2dglp_semaphore *sem = r2dglp_from_lock(aff->obs.lock);
2882 unsigned int min_count; 2882 unsigned int min_count;
2883 unsigned int min_nr_users; 2883 unsigned int min_nr_users;
2884 struct ikglp_queue_info *shortest; 2884 struct r2dglp_queue_info *shortest;
2885 struct fifo_queue *to_enqueue; 2885 struct fifo_queue *to_enqueue;
2886 unsigned int i; 2886 unsigned int i;
2887 2887
@@ -2890,7 +2890,7 @@ struct fifo_queue* simple_gpu_ikglp_advise_enqueue(struct ikglp_affinity* aff,
2890 min_nr_users = *(shortest->nr_cur_users); 2890 min_nr_users = *(shortest->nr_cur_users);
2891 2891
2892 TRACE_CUR("queue %d: waiters = %u, total holders = %u\n", 2892 TRACE_CUR("queue %d: waiters = %u, total holders = %u\n",
2893 ikglp_get_idx(sem, shortest->q), 2893 r2dglp_get_idx(sem, shortest->q),
2894 shortest->q->count, 2894 shortest->q->count,
2895 min_nr_users); 2895 min_nr_users);
2896 2896
@@ -2910,59 +2910,59 @@ struct fifo_queue* simple_gpu_ikglp_advise_enqueue(struct ikglp_affinity* aff,
2910 } 2910 }
2911 2911
2912 TRACE_CUR("queue %d: waiters = %d, total holders = %d\n", 2912 TRACE_CUR("queue %d: waiters = %d, total holders = %d\n",
2913 ikglp_get_idx(sem, aff->q_info[i].q), 2913 r2dglp_get_idx(sem, aff->q_info[i].q),
2914 aff->q_info[i].q->count, 2914 aff->q_info[i].q->count,
2915 *(aff->q_info[i].nr_cur_users)); 2915 *(aff->q_info[i].nr_cur_users));
2916 } 2916 }
2917 2917
2918 to_enqueue = shortest->q; 2918 to_enqueue = shortest->q;
2919 TRACE_CUR("enqueue on fq %d (non-aff wanted fq %d)\n", 2919 TRACE_CUR("enqueue on fq %d (non-aff wanted fq %d)\n",
2920 ikglp_get_idx(sem, to_enqueue), 2920 r2dglp_get_idx(sem, to_enqueue),
2921 ikglp_get_idx(sem, sem->shortest_fifo_queue)); 2921 r2dglp_get_idx(sem, sem->shortest_fifo_queue));
2922 2922
2923 return to_enqueue; 2923 return to_enqueue;
2924} 2924}
2925 2925
2926ikglp_wait_state_t* simple_gpu_ikglp_advise_steal(struct ikglp_affinity* aff, 2926r2dglp_wait_state_t* simple_gpu_r2dglp_advise_steal(struct r2dglp_affinity* aff,
2927 struct fifo_queue* dst) 2927 struct fifo_queue* dst)
2928{ 2928{
2929 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 2929 struct r2dglp_semaphore *sem = r2dglp_from_lock(aff->obs.lock);
2930 return ikglp_find_hp_waiter_to_steal(sem, NULL); 2930 return r2dglp_find_hp_waiter_to_steal(sem, NULL);
2931} 2931}
2932 2932
2933ikglp_donee_heap_node_t* simple_gpu_ikglp_advise_donee_selection( 2933r2dglp_donee_heap_node_t* simple_gpu_r2dglp_advise_donee_selection(
2934 struct ikglp_affinity* aff, struct task_struct* donor) 2934 struct r2dglp_affinity* aff, struct task_struct* donor)
2935{ 2935{
2936 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 2936 struct r2dglp_semaphore *sem = r2dglp_from_lock(aff->obs.lock);
2937 ikglp_donee_heap_node_t *donee = 2937 r2dglp_donee_heap_node_t *donee =
2938 binheap_top_entry(&sem->donees, ikglp_donee_heap_node_t, node); 2938 binheap_top_entry(&sem->donees, r2dglp_donee_heap_node_t, node);
2939 return(donee); 2939 return(donee);
2940} 2940}
2941 2941
2942ikglp_wait_state_t* simple_gpu_ikglp_advise_donor_to_fq( 2942r2dglp_wait_state_t* simple_gpu_r2dglp_advise_donor_to_fq(
2943 struct ikglp_affinity* aff, struct fifo_queue* fq) 2943 struct r2dglp_affinity* aff, struct fifo_queue* fq)
2944{ 2944{
2945 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 2945 struct r2dglp_semaphore *sem = r2dglp_from_lock(aff->obs.lock);
2946 ikglp_wait_state_t* donor = 2946 r2dglp_wait_state_t* donor =
2947 binheap_top_entry(&sem->donors, ikglp_wait_state_t, node); 2947 binheap_top_entry(&sem->donors, r2dglp_wait_state_t, node);
2948 return(donor); 2948 return(donor);
2949} 2949}
2950 2950
2951void simple_gpu_ikglp_notify_enqueue(struct ikglp_affinity* aff, 2951void simple_gpu_r2dglp_notify_enqueue(struct r2dglp_affinity* aff,
2952 struct fifo_queue* fq, struct task_struct* t) 2952 struct fifo_queue* fq, struct task_struct* t)
2953{ 2953{
2954} 2954}
2955 2955
2956void simple_gpu_ikglp_notify_dequeue(struct ikglp_affinity* aff, 2956void simple_gpu_r2dglp_notify_dequeue(struct r2dglp_affinity* aff,
2957 struct fifo_queue* fq, struct task_struct* t) 2957 struct fifo_queue* fq, struct task_struct* t)
2958{ 2958{
2959} 2959}
2960 2960
2961void simple_gpu_ikglp_notify_acquired(struct ikglp_affinity* aff, 2961void simple_gpu_r2dglp_notify_acquired(struct r2dglp_affinity* aff,
2962 struct fifo_queue* fq, struct task_struct* t) 2962 struct fifo_queue* fq, struct task_struct* t)
2963{ 2963{
2964 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 2964 struct r2dglp_semaphore *sem = r2dglp_from_lock(aff->obs.lock);
2965 int replica = ikglp_get_idx(sem, fq); 2965 int replica = r2dglp_get_idx(sem, fq);
2966 int gpu = replica_to_gpu(aff, replica); 2966 int gpu = replica_to_gpu(aff, replica);
2967 2967
2968 /* count the number or resource holders */ 2968 /* count the number or resource holders */
@@ -2971,11 +2971,11 @@ void simple_gpu_ikglp_notify_acquired(struct ikglp_affinity* aff,
2971 reg_nv_device(gpu, 1, t); /* register */ 2971 reg_nv_device(gpu, 1, t); /* register */
2972} 2972}
2973 2973
2974void simple_gpu_ikglp_notify_freed(struct ikglp_affinity* aff, 2974void simple_gpu_r2dglp_notify_freed(struct r2dglp_affinity* aff,
2975 struct fifo_queue* fq, struct task_struct* t) 2975 struct fifo_queue* fq, struct task_struct* t)
2976{ 2976{
2977 struct ikglp_semaphore *sem = ikglp_from_lock(aff->obs.lock); 2977 struct r2dglp_semaphore *sem = r2dglp_from_lock(aff->obs.lock);
2978 int replica = ikglp_get_idx(sem, fq); 2978 int replica = r2dglp_get_idx(sem, fq);
2979 int gpu = replica_to_gpu(aff, replica); 2979 int gpu = replica_to_gpu(aff, replica);
2980 2980
2981 /* count the number or resource holders */ 2981 /* count the number or resource holders */
@@ -2984,37 +2984,37 @@ void simple_gpu_ikglp_notify_freed(struct ikglp_affinity* aff,
2984 reg_nv_device(gpu, 0, t); /* unregister */ 2984 reg_nv_device(gpu, 0, t); /* unregister */
2985} 2985}
2986 2986
2987struct ikglp_affinity_ops simple_gpu_ikglp_affinity = 2987struct r2dglp_affinity_ops simple_gpu_r2dglp_affinity =
2988{ 2988{
2989 .advise_enqueue = simple_gpu_ikglp_advise_enqueue, 2989 .advise_enqueue = simple_gpu_r2dglp_advise_enqueue,
2990 .advise_steal = simple_gpu_ikglp_advise_steal, 2990 .advise_steal = simple_gpu_r2dglp_advise_steal,
2991 .advise_donee_selection = simple_gpu_ikglp_advise_donee_selection, 2991 .advise_donee_selection = simple_gpu_r2dglp_advise_donee_selection,
2992 .advise_donor_to_fq = simple_gpu_ikglp_advise_donor_to_fq, 2992 .advise_donor_to_fq = simple_gpu_r2dglp_advise_donor_to_fq,
2993 2993
2994 .notify_enqueue = simple_gpu_ikglp_notify_enqueue, 2994 .notify_enqueue = simple_gpu_r2dglp_notify_enqueue,
2995 .notify_dequeue = simple_gpu_ikglp_notify_dequeue, 2995 .notify_dequeue = simple_gpu_r2dglp_notify_dequeue,
2996 .notify_acquired = simple_gpu_ikglp_notify_acquired, 2996 .notify_acquired = simple_gpu_r2dglp_notify_acquired,
2997 .notify_freed = simple_gpu_ikglp_notify_freed, 2997 .notify_freed = simple_gpu_r2dglp_notify_freed,
2998 2998
2999 .notify_exit = NULL, 2999 .notify_exit = NULL,
3000 3000
3001 .replica_to_resource = gpu_replica_to_resource, 3001 .replica_to_resource = gpu_replica_to_resource,
3002}; 3002};
3003 3003
3004struct affinity_observer* ikglp_simple_gpu_aff_obs_new( 3004struct affinity_observer* r2dglp_simple_gpu_aff_obs_new(
3005 struct affinity_observer_ops* ops, 3005 struct affinity_observer_ops* ops,
3006 void* __user args) 3006 void* __user args)
3007{ 3007{
3008 return ikglp_aff_obs_new(ops, &simple_gpu_ikglp_affinity, args); 3008 return r2dglp_aff_obs_new(ops, &simple_gpu_r2dglp_affinity, args);
3009} 3009}
3010#endif /* end LITMUS_AFFINITY_LOCKING && LITMUS_NVIDIA */ 3010#endif /* end LITMUS_AFFINITY_LOCKING && LITMUS_NVIDIA */
3011 3011
3012#if 0 3012#if 0
3013/* debugging routines */ 3013/* debugging routines */
3014 3014
3015static void __ikglp_dump_pq(struct binheap_node *n, int depth) 3015static void __r2dglp_dump_pq(struct binheap_node *n, int depth)
3016{ 3016{
3017 ikglp_heap_node_t *request; 3017 r2dglp_heap_node_t *request;
3018 char padding[81] = " "; 3018 char padding[81] = " ";
3019 3019
3020 if(n == NULL) { 3020 if(n == NULL) {
@@ -3022,7 +3022,7 @@ static void __ikglp_dump_pq(struct binheap_node *n, int depth)
3022 return; 3022 return;
3023 } 3023 }
3024 3024
3025 request = binheap_entry(n, ikglp_heap_node_t, node); 3025 request = binheap_entry(n, r2dglp_heap_node_t, node);
3026 3026
3027 if(depth*2 <= 80) 3027 if(depth*2 <= 80)
3028 padding[depth*2] = '\0'; 3028 padding[depth*2] = '\0';
@@ -3033,13 +3033,13 @@ static void __ikglp_dump_pq(struct binheap_node *n, int depth)
3033 request->task->comm, 3033 request->task->comm,
3034 request->task->pid); 3034 request->task->pid);
3035 3035
3036 if(n->left) __ikglp_dump_pq(n->left, depth+1); 3036 if(n->left) __r2dglp_dump_pq(n->left, depth+1);
3037 if(n->right) __ikglp_dump_pq(n->right, depth+1); 3037 if(n->right) __r2dglp_dump_pq(n->right, depth+1);
3038} 3038}
3039 3039
3040static void __ikglp_dump_donors(struct binheap_node *n, int depth) 3040static void __r2dglp_dump_donors(struct binheap_node *n, int depth)
3041{ 3041{
3042 ikglp_wait_state_t *donor_node; 3042 r2dglp_wait_state_t *donor_node;
3043 char padding[81] = " "; 3043 char padding[81] = " ";
3044 3044
3045 if(n == NULL) { 3045 if(n == NULL) {
@@ -3047,7 +3047,7 @@ static void __ikglp_dump_donors(struct binheap_node *n, int depth)
3047 return; 3047 return;
3048 } 3048 }
3049 3049
3050 donor_node = binheap_entry(n, ikglp_wait_state_t, node); 3050 donor_node = binheap_entry(n, r2dglp_wait_state_t, node);
3051 3051
3052 if(depth*2 <= 80) 3052 if(depth*2 <= 80)
3053 padding[depth*2] = '\0'; 3053 padding[depth*2] = '\0';
@@ -3060,11 +3060,11 @@ static void __ikglp_dump_donors(struct binheap_node *n, int depth)
3060 donor_node->donee_info->task->comm, 3060 donor_node->donee_info->task->comm,
3061 donor_node->donee_info->task->pid); 3061 donor_node->donee_info->task->pid);
3062 3062
3063 if(n->left) __ikglp_dump_donors(n->left, depth+1); 3063 if(n->left) __r2dglp_dump_donors(n->left, depth+1);
3064 if(n->right) __ikglp_dump_donors(n->right, depth+1); 3064 if(n->right) __r2dglp_dump_donors(n->right, depth+1);
3065} 3065}
3066 3066
3067static void __ikglp_dump_fifoq(int i, struct fifo_queue* fq) 3067static void __r2dglp_dump_fifoq(int i, struct fifo_queue* fq)
3068{ 3068{
3069 TRACE(" FIFO %d: Owner = %s/%d (Virtually Unlocked = %u), HP Waiter = %s/%d, Length = %u\n", 3069 TRACE(" FIFO %d: Owner = %s/%d (Virtually Unlocked = %u), HP Waiter = %s/%d, Length = %u\n",
3070 i, 3070 i,
@@ -3088,27 +3088,27 @@ static void __ikglp_dump_fifoq(int i, struct fifo_queue* fq)
3088} 3088}
3089 3089
3090__attribute__ ((unused)) 3090__attribute__ ((unused))
3091static void __ikglp_dump_state(struct ikglp_semaphore *sem) 3091static void __r2dglp_dump_state(struct r2dglp_semaphore *sem)
3092{ 3092{
3093 int i; 3093 int i;
3094 TRACE("IKGLP Lock %d\n", sem->litmus_lock.ident); 3094 TRACE("R2DGLP Lock %d\n", sem->litmus_lock.ident);
3095 TRACE("# Replicas: %u Max FIFO Len: %u Max in FIFOs: %u Cur # in FIFOs: %u\n", 3095 TRACE("# Replicas: %u Max FIFO Len: %u Max in FIFOs: %u Cur # in FIFOs: %u\n",
3096 sem->nr_replicas, sem->max_fifo_len, sem->max_in_fifos, sem->nr_in_fifos); 3096 sem->nr_replicas, sem->max_fifo_len, sem->max_in_fifos, sem->nr_in_fifos);
3097 TRACE("# requests in top-m: %u\n", sem->top_m_size); 3097 TRACE("# requests in top-m: %u\n", sem->top_m_size);
3098 3098
3099 for (i = 0; i < sem->nr_replicas; ++i) 3099 for (i = 0; i < sem->nr_replicas; ++i)
3100 __ikglp_dump_fifoq(i, &sem->fifo_queues[i]); 3100 __r2dglp_dump_fifoq(i, &sem->fifo_queues[i]);
3101 3101
3102 TRACE(" PQ:\n"); 3102 TRACE(" PQ:\n");
3103 __ikglp_dump_pq(sem->priority_queue.root, 1); 3103 __r2dglp_dump_pq(sem->priority_queue.root, 1);
3104 3104
3105 TRACE(" Donors:\n"); 3105 TRACE(" Donors:\n");
3106 __ikglp_dump_donors(sem->donors.root, 1); 3106 __r2dglp_dump_donors(sem->donors.root, 1);
3107} 3107}
3108 3108
3109static void print_global_list(struct binheap_node* n, int depth) 3109static void print_global_list(struct binheap_node* n, int depth)
3110{ 3110{
3111 ikglp_heap_node_t *global_heap_node; 3111 r2dglp_heap_node_t *global_heap_node;
3112 char padding[81] = " "; 3112 char padding[81] = " ";
3113 3113
3114 if(n == NULL) { 3114 if(n == NULL) {
@@ -3116,7 +3116,7 @@ static void print_global_list(struct binheap_node* n, int depth)
3116 return; 3116 return;
3117 } 3117 }
3118 3118
3119 global_heap_node = binheap_entry(n, ikglp_heap_node_t, node); 3119 global_heap_node = binheap_entry(n, r2dglp_heap_node_t, node);
3120 3120
3121 if(depth*2 <= 80) 3121 if(depth*2 <= 80)
3122 padding[depth*2] = '\0'; 3122 padding[depth*2] = '\0';
@@ -3130,9 +3130,9 @@ static void print_global_list(struct binheap_node* n, int depth)
3130 if(n->right) print_global_list(n->right, depth+1); 3130 if(n->right) print_global_list(n->right, depth+1);
3131} 3131}
3132 3132
3133static void print_donees(struct ikglp_semaphore *sem, struct binheap_node *n, int depth) 3133static void print_donees(struct r2dglp_semaphore *sem, struct binheap_node *n, int depth)
3134{ 3134{
3135 ikglp_donee_heap_node_t *donee_node; 3135 r2dglp_donee_heap_node_t *donee_node;
3136 char padding[81] = " "; 3136 char padding[81] = " ";
3137 struct task_struct* donor = NULL; 3137 struct task_struct* donor = NULL;
3138 3138
@@ -3141,7 +3141,7 @@ static void print_donees(struct ikglp_semaphore *sem, struct binheap_node *n, in
3141 return; 3141 return;
3142 } 3142 }
3143 3143
3144 donee_node = binheap_entry(n, ikglp_donee_heap_node_t, node); 3144 donee_node = binheap_entry(n, r2dglp_donee_heap_node_t, node);
3145 3145
3146 if(depth*2 <= 80) 3146 if(depth*2 <= 80)
3147 padding[depth*2] = '\0'; 3147 padding[depth*2] = '\0';
@@ -3156,7 +3156,7 @@ static void print_donees(struct ikglp_semaphore *sem, struct binheap_node *n, in
3156 donee_node->task->pid, 3156 donee_node->task->pid,
3157 (donor) ? donor->comm : "null", 3157 (donor) ? donor->comm : "null",
3158 (donor) ? donor->pid : 0, 3158 (donor) ? donor->pid : 0,
3159 ikglp_get_idx(sem, donee_node->fq)); 3159 r2dglp_get_idx(sem, donee_node->fq));
3160 3160
3161 if(n->left) print_donees(sem, n->left, depth+1); 3161 if(n->left) print_donees(sem, n->left, depth+1);
3162 if(n->right) print_donees(sem, n->right, depth+1); 3162 if(n->right) print_donees(sem, n->right, depth+1);
@@ -3164,7 +3164,7 @@ static void print_donees(struct ikglp_semaphore *sem, struct binheap_node *n, in
3164 3164
3165static void print_donors(struct binheap_node *n, int depth) 3165static void print_donors(struct binheap_node *n, int depth)
3166{ 3166{
3167 ikglp_wait_state_t *donor_node; 3167 r2dglp_wait_state_t *donor_node;
3168 char padding[81] = " "; 3168 char padding[81] = " ";
3169 3169
3170 if(n == NULL) { 3170 if(n == NULL) {
@@ -3172,7 +3172,7 @@ static void print_donors(struct binheap_node *n, int depth)
3172 return; 3172 return;
3173 } 3173 }
3174 3174
3175 donor_node = binheap_entry(n, ikglp_wait_state_t, node); 3175 donor_node = binheap_entry(n, r2dglp_wait_state_t, node);
3176 3176
3177 if(depth*2 <= 80) 3177 if(depth*2 <= 80)
3178 padding[depth*2] = '\0'; 3178 padding[depth*2] = '\0';
@@ -3191,24 +3191,24 @@ static void print_donors(struct binheap_node *n, int depth)
3191#endif 3191#endif
3192 3192
3193#if 0 3193#if 0
3194struct ikglp_proc_print_heap_args 3194struct r2dglp_proc_print_heap_args
3195{ 3195{
3196 struct ikglp_semaphore *sem; 3196 struct r2dglp_semaphore *sem;
3197 int *size; 3197 int *size;
3198 char **next; 3198 char **next;
3199}; 3199};
3200 3200
3201static void __ikglp_pq_to_proc(struct binheap_node *n, void *args) 3201static void __r2dglp_pq_to_proc(struct binheap_node *n, void *args)
3202{ 3202{
3203 struct ikglp_proc_print_heap_args *hargs; 3203 struct r2dglp_proc_print_heap_args *hargs;
3204 ikglp_heap_node_t *request; 3204 r2dglp_heap_node_t *request;
3205 int w; 3205 int w;
3206 3206
3207 if (!n) 3207 if (!n)
3208 return; 3208 return;
3209 3209
3210 hargs = (struct ikglp_proc_print_heap_args*) args; 3210 hargs = (struct r2dglp_proc_print_heap_args*) args;
3211 request = binheap_entry(n, ikglp_heap_node_t, node); 3211 request = binheap_entry(n, r2dglp_heap_node_t, node);
3212 3212
3213 w = scnprintf(*(hargs->next), *(hargs->size), "\t%s/%d\n", 3213 w = scnprintf(*(hargs->next), *(hargs->size), "\t%s/%d\n",
3214 request->task->comm, request->task->pid); 3214 request->task->comm, request->task->pid);
@@ -3216,17 +3216,17 @@ static void __ikglp_pq_to_proc(struct binheap_node *n, void *args)
3216 *(hargs->next) += w; 3216 *(hargs->next) += w;
3217} 3217}
3218 3218
3219static void __ikglp_donor_to_proc(struct binheap_node *n, void *args) 3219static void __r2dglp_donor_to_proc(struct binheap_node *n, void *args)
3220{ 3220{
3221 struct ikglp_proc_print_heap_args *hargs; 3221 struct r2dglp_proc_print_heap_args *hargs;
3222 ikglp_wait_state_t *donor_node; 3222 r2dglp_wait_state_t *donor_node;
3223 int w; 3223 int w;
3224 3224
3225 if (!n) 3225 if (!n)
3226 return; 3226 return;
3227 3227
3228 hargs = (struct ikglp_proc_print_heap_args*) args; 3228 hargs = (struct r2dglp_proc_print_heap_args*) args;
3229 donor_node = binheap_entry(n, ikglp_wait_state_t, node); 3229 donor_node = binheap_entry(n, r2dglp_wait_state_t, node);
3230 3230
3231 w = scnprintf(*(hargs->next), *(hargs->size), "\t%s/%d (donee: %s/%d)\n", 3231 w = scnprintf(*(hargs->next), *(hargs->size), "\t%s/%d (donee: %s/%d)\n",
3232 donor_node->task->comm, 3232 donor_node->task->comm,
@@ -3238,9 +3238,9 @@ static void __ikglp_donor_to_proc(struct binheap_node *n, void *args)
3238} 3238}
3239 3239
3240 3240
3241static int ikglp_proc_print(char *page, char **start, off_t off, int count, int *eof, void *data) 3241static int r2dglp_proc_print(char *page, char **start, off_t off, int count, int *eof, void *data)
3242{ 3242{
3243 struct ikglp_semaphore *sem = ikglp_from_lock((struct litmus_lock*)data); 3243 struct r2dglp_semaphore *sem = r2dglp_from_lock((struct litmus_lock*)data);
3244 3244
3245 int attempts = 0; 3245 int attempts = 0;
3246 const int max_attempts = 10; 3246 const int max_attempts = 10;
@@ -3250,7 +3250,7 @@ static int ikglp_proc_print(char *page, char **start, off_t off, int count, int
3250 int size = count; 3250 int size = count;
3251 char *next = page; 3251 char *next = page;
3252 3252
3253 struct ikglp_proc_print_heap_args heap_args = {sem, &size, &next}; 3253 struct r2dglp_proc_print_heap_args heap_args = {sem, &size, &next};
3254 3254
3255 int w; 3255 int w;
3256 int i; 3256 int i;
@@ -3325,7 +3325,7 @@ static int ikglp_proc_print(char *page, char **start, off_t off, int count, int
3325 } 3325 }
3326 else { 3326 else {
3327 w = scnprintf(next, size, "donors:\n"); size -= w; next += w; 3327 w = scnprintf(next, size, "donors:\n"); size -= w; next += w;
3328 binheap_for_each(&sem->priority_queue, __ikglp_pq_to_proc, &heap_args); 3328 binheap_for_each(&sem->priority_queue, __r2dglp_pq_to_proc, &heap_args);
3329 } 3329 }
3330 3330
3331 if (binheap_empty(&sem->donors)) { 3331 if (binheap_empty(&sem->donors)) {
@@ -3335,7 +3335,7 @@ static int ikglp_proc_print(char *page, char **start, off_t off, int count, int
3335 } 3335 }
3336 else { 3336 else {
3337 w = scnprintf(next, size, "donors:\n"); size -= w; next += w; 3337 w = scnprintf(next, size, "donors:\n"); size -= w; next += w;
3338 binheap_for_each(&sem->donors, __ikglp_donor_to_proc, &heap_args); 3338 binheap_for_each(&sem->donors, __r2dglp_donor_to_proc, &heap_args);
3339 } 3339 }
3340 3340
3341 raw_spin_unlock_irqrestore(&sem->real_lock, flags); 3341 raw_spin_unlock_irqrestore(&sem->real_lock, flags);
@@ -3343,15 +3343,15 @@ static int ikglp_proc_print(char *page, char **start, off_t off, int count, int
3343 return count - size; 3343 return count - size;
3344} 3344}
3345 3345
3346static void ikglp_proc_add(struct litmus_lock *l) 3346static void r2dglp_proc_add(struct litmus_lock *l)
3347{ 3347{
3348 if (!l->name) 3348 if (!l->name)
3349 l->name = kmalloc(LOCK_NAME_LEN*sizeof(char), GFP_KERNEL); 3349 l->name = kmalloc(LOCK_NAME_LEN*sizeof(char), GFP_KERNEL);
3350 snprintf(l->name, LOCK_NAME_LEN, "ikglp-%d", l->ident); 3350 snprintf(l->name, LOCK_NAME_LEN, "r2dglp-%d", l->ident);
3351 litmus_add_proc_lock(l, ikglp_proc_print); 3351 litmus_add_proc_lock(l, r2dglp_proc_print);
3352} 3352}
3353 3353
3354static void ikglp_proc_remove(struct litmus_lock *l) 3354static void r2dglp_proc_remove(struct litmus_lock *l)
3355{ 3355{
3356 if (l->name) { 3356 if (l->name) {
3357 litmus_remove_proc_lock(l); 3357 litmus_remove_proc_lock(l);
@@ -3361,9 +3361,9 @@ static void ikglp_proc_remove(struct litmus_lock *l)
3361 } 3361 }
3362} 3362}
3363 3363
3364static struct litmus_lock_proc_ops ikglp_proc_ops = 3364static struct litmus_lock_proc_ops r2dglp_proc_ops =
3365{ 3365{
3366 .add = ikglp_proc_add, 3366 .add = r2dglp_proc_add,
3367 .remove = ikglp_proc_remove 3367 .remove = r2dglp_proc_remove
3368}; 3368};
3369#endif 3369#endif
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 64e6a97bd6e9..6eb72d239f61 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -60,7 +60,7 @@
60#ifdef CONFIG_LITMUS_NESTED_LOCKING 60#ifdef CONFIG_LITMUS_NESTED_LOCKING
61#include <litmus/fifo_lock.h> 61#include <litmus/fifo_lock.h>
62#include <litmus/prioq_lock.h> 62#include <litmus/prioq_lock.h>
63#include <litmus/ikglp_lock.h> 63#include <litmus/r2dglp_lock.h>
64#endif 64#endif
65 65
66#ifdef CONFIG_REALTIME_AUX_TASKS 66#ifdef CONFIG_REALTIME_AUX_TASKS
@@ -2152,19 +2152,19 @@ static struct litmus_lock* cedf_new_prioq_mutex(void)
2152 return prioq_mutex_new(&cedf_prioq_mutex_lock_ops); 2152 return prioq_mutex_new(&cedf_prioq_mutex_lock_ops);
2153} 2153}
2154 2154
2155/* ******************** IKGLP ********************** */ 2155/* ******************** R2DGLP ********************** */
2156 2156
2157static struct litmus_lock_ops cedf_ikglp_lock_ops = { 2157static struct litmus_lock_ops cedf_r2dglp_lock_ops = {
2158 .lock = ikglp_lock, 2158 .lock = r2dglp_lock,
2159 .unlock = ikglp_unlock, 2159 .unlock = r2dglp_unlock,
2160 .should_yield_lock = NULL, 2160 .should_yield_lock = NULL,
2161 .close = ikglp_close, 2161 .close = r2dglp_close,
2162 .deallocate = ikglp_free, 2162 .deallocate = r2dglp_free,
2163 2163
2164 .budget_exhausted = ikglp_budget_exhausted, 2164 .budget_exhausted = r2dglp_budget_exhausted,
2165 .omlp_virtual_unlock = ikglp_virtual_unlock, 2165 .omlp_virtual_unlock = r2dglp_virtual_unlock,
2166 2166
2167 // ikglp can only be an outer-most lock. 2167 // r2dglp can only be an outer-most lock.
2168 .propagate_increase_inheritance = NULL, 2168 .propagate_increase_inheritance = NULL,
2169 .propagate_decrease_inheritance = NULL, 2169 .propagate_decrease_inheritance = NULL,
2170 2170
@@ -2177,10 +2177,10 @@ static struct litmus_lock_ops cedf_ikglp_lock_ops = {
2177 .is_omlp_family = 1, 2177 .is_omlp_family = 1,
2178}; 2178};
2179 2179
2180static struct litmus_lock* cedf_new_ikglp(void* __user arg) 2180static struct litmus_lock* cedf_new_r2dglp(void* __user arg)
2181{ 2181{
2182 /* assumes clusters of uniform size. */ 2182 /* assumes clusters of uniform size. */
2183 return ikglp_new(cluster_size, &cedf_ikglp_lock_ops, arg); 2183 return r2dglp_new(cluster_size, &cedf_r2dglp_lock_ops, arg);
2184} 2184}
2185#endif /* end LITMUS_NESTED_LOCKING */ 2185#endif /* end LITMUS_NESTED_LOCKING */
2186 2186
@@ -2229,8 +2229,8 @@ static long cedf_allocate_lock(struct litmus_lock **lock, int type,
2229 *lock = cedf_new_prioq_mutex(); 2229 *lock = cedf_new_prioq_mutex();
2230 break; 2230 break;
2231 2231
2232 case IKGLP_SEM: 2232 case R2DGLP_SEM:
2233 *lock = cedf_new_ikglp(args); 2233 *lock = cedf_new_r2dglp(args);
2234 break; 2234 break;
2235#endif 2235#endif
2236 case KFMLP_SEM: 2236 case KFMLP_SEM:
@@ -2262,9 +2262,9 @@ cedf_kfmlp_affinity_ops __attribute__ ((unused)) = {
2262 2262
2263#ifdef CONFIG_LITMUS_NESTED_LOCKING 2263#ifdef CONFIG_LITMUS_NESTED_LOCKING
2264static struct affinity_observer_ops 2264static struct affinity_observer_ops
2265cedf_ikglp_affinity_ops __attribute__ ((unused)) = { 2265cedf_r2dglp_affinity_ops __attribute__ ((unused)) = {
2266 .close = ikglp_aff_obs_close, 2266 .close = r2dglp_aff_obs_close,
2267 .deallocate = ikglp_aff_obs_free, 2267 .deallocate = r2dglp_aff_obs_free,
2268}; 2268};
2269#endif 2269#endif
2270 2270
@@ -2286,13 +2286,13 @@ static long cedf_allocate_affinity_observer(struct affinity_observer **aff_obs,
2286 break; 2286 break;
2287 2287
2288#ifdef CONFIG_LITMUS_NESTED_LOCKING 2288#ifdef CONFIG_LITMUS_NESTED_LOCKING
2289 case IKGLP_SIMPLE_GPU_AFF_OBS: 2289 case R2DGLP_SIMPLE_GPU_AFF_OBS:
2290 *aff_obs = ikglp_simple_gpu_aff_obs_new(&cedf_ikglp_affinity_ops, 2290 *aff_obs = r2dglp_simple_gpu_aff_obs_new(&cedf_r2dglp_affinity_ops,
2291 args); 2291 args);
2292 break; 2292 break;
2293 2293
2294 case IKGLP_GPU_AFF_OBS: 2294 case R2DGLP_GPU_AFF_OBS:
2295 *aff_obs = ikglp_gpu_aff_obs_new(&cedf_ikglp_affinity_ops, args); 2295 *aff_obs = r2dglp_gpu_aff_obs_new(&cedf_r2dglp_affinity_ops, args);
2296 break; 2296 break;
2297#endif /* end LITMUS_NESTED_LOCKING */ 2297#endif /* end LITMUS_NESTED_LOCKING */
2298#endif /* end LITMUS_NVIDIA */ 2298#endif /* end LITMUS_NVIDIA */