aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-02-08 13:06:54 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2013-02-08 13:06:54 -0500
commit09939a5991fedc0d9f95e0ec9f26aa75e9c2da23 (patch)
tree1ec099484be3d9f2235cbcc459ababdfbd139d28
parent955683d320ad8d3ce232693e8870b134006c7771 (diff)
Add PRIOQ_MUTEX semaphore
-rw-r--r--include/litmus/fdso.h6
-rw-r--r--include/litmus/locking.h36
-rw-r--r--include/litmus/prioq_lock.h61
-rw-r--r--include/litmus/rt_param.h9
-rw-r--r--litmus/Makefile2
-rw-r--r--litmus/fdso.c1
-rw-r--r--litmus/locking.c35
-rw-r--r--litmus/prioq_lock.c1162
8 files changed, 1302 insertions, 10 deletions
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
index e1a0ac24b8a2..f7887288d8f5 100644
--- a/include/litmus/fdso.h
+++ b/include/litmus/fdso.h
@@ -35,7 +35,9 @@ typedef enum {
35 KFMLP_SIMPLE_GPU_AFF_OBS = 11, 35 KFMLP_SIMPLE_GPU_AFF_OBS = 11,
36 KFMLP_GPU_AFF_OBS = 12, 36 KFMLP_GPU_AFF_OBS = 12,
37 37
38 MAX_OBJ_TYPE = 12 38 PRIOQ_MUTEX = 13,
39
40 MAX_OBJ_TYPE = 13
39} obj_type_t; 41} obj_type_t;
40 42
41struct inode_obj_id { 43struct inode_obj_id {
@@ -84,6 +86,6 @@ static inline void* od_lookup(int od, obj_type_t type)
84#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) 86#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID))
85 87
86#define lookup_fifo_mutex(od)((struct litmus_lock*) od_lookup(od, FIFO_MUTEX)) 88#define lookup_fifo_mutex(od)((struct litmus_lock*) od_lookup(od, FIFO_MUTEX))
87 89#define lookup_prioq_mutex(od)((struct litmus_lock*) od_lookup(od, PRIOQ_MUTEX))
88 90
89#endif 91#endif
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index cc62fa0cb044..660bfc7f8174 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -11,6 +11,7 @@ struct nested_info
11 struct litmus_lock *lock; 11 struct litmus_lock *lock;
12 struct task_struct *hp_waiter_eff_prio; 12 struct task_struct *hp_waiter_eff_prio;
13 struct task_struct **hp_waiter_ptr; 13 struct task_struct **hp_waiter_ptr;
14 struct task_struct **owner_ptr;
14 struct binheap_node hp_binheap_node; 15 struct binheap_node hp_binheap_node;
15}; 16};
16 17
@@ -30,6 +31,8 @@ struct litmus_lock_proc_ops {
30 void (*remove)(struct litmus_lock *l); 31 void (*remove)(struct litmus_lock *l);
31}; 32};
32 33
34
35
33/* Generic base struct for LITMUS^RT userspace semaphores. 36/* Generic base struct for LITMUS^RT userspace semaphores.
34 * This structure should be embedded in protocol-specific semaphores. 37 * This structure should be embedded in protocol-specific semaphores.
35 */ 38 */
@@ -41,10 +44,8 @@ struct litmus_lock {
41 44
42#ifdef CONFIG_LITMUS_NESTED_LOCKING 45#ifdef CONFIG_LITMUS_NESTED_LOCKING
43 struct nested_info nest; 46 struct nested_info nest;
44//#ifdef CONFIG_DEBUG_SPINLOCK
45 char cheat_lockdep[2]; 47 char cheat_lockdep[2];
46 struct lock_class_key key; 48 struct lock_class_key key;
47//#endif
48#endif 49#endif
49 50
50 struct litmus_lock_proc_ops *proc; 51 struct litmus_lock_proc_ops *proc;
@@ -71,7 +72,32 @@ void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lo
71void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait); 72void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait);
72int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key); 73int dgl_wake_up(wait_queue_t *wq_node, unsigned mode, int sync, void *key);
73void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task); 74void __waitqueue_dgl_remove_first(wait_queue_head_t *wq, dgl_wait_state_t** dgl_wait, struct task_struct **task);
75
76
77int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t *dgl_wait);
78#endif
79
80
81
82
83static inline struct task_struct* get_queued_task(wait_queue_t* q)
84{
85 struct task_struct *queued;
86#ifdef CONFIG_LITMUS_DGL_SUPPORT
87 if(q->func == dgl_wake_up) {
88 dgl_wait_state_t *dgl_wait = (dgl_wait_state_t*) q->private;
89 queued = dgl_wait->task;
90 }
91 else {
92 queued = (struct task_struct*) q->private;
93 }
94#else
95 queued = (struct task_struct*) q->private;
74#endif 96#endif
97 return queued;
98}
99
100
75 101
76typedef int (*lock_op_t)(struct litmus_lock *l); 102typedef int (*lock_op_t)(struct litmus_lock *l);
77typedef lock_op_t lock_close_t; 103typedef lock_op_t lock_close_t;
@@ -94,16 +120,22 @@ struct litmus_lock_ops {
94 /* The lock is no longer being referenced (mandatory method). */ 120 /* The lock is no longer being referenced (mandatory method). */
95 lock_free_t deallocate; 121 lock_free_t deallocate;
96 122
123
97#ifdef CONFIG_LITMUS_NESTED_LOCKING 124#ifdef CONFIG_LITMUS_NESTED_LOCKING
98 void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); 125 void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags);
99 void (*propagate_decrease_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); 126 void (*propagate_decrease_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags);
100#endif 127#endif
101 128
129
102#ifdef CONFIG_LITMUS_DGL_SUPPORT 130#ifdef CONFIG_LITMUS_DGL_SUPPORT
103 raw_spinlock_t* (*get_dgl_spin_lock)(struct litmus_lock *l); 131 raw_spinlock_t* (*get_dgl_spin_lock)(struct litmus_lock *l);
104 int (*dgl_lock)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node); 132 int (*dgl_lock)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node);
105 int (*is_owner)(struct litmus_lock *l, struct task_struct *t); 133 int (*is_owner)(struct litmus_lock *l, struct task_struct *t);
106 void (*enable_priority)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait); 134 void (*enable_priority)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait);
135
136 int (*dgl_can_quick_lock)(struct litmus_lock *l, struct task_struct *t);
137 void (*dgl_quick_lock)(struct litmus_lock *l, struct litmus_lock *cur_lock,
138 struct task_struct* t, wait_queue_t *q);
107#endif 139#endif
108}; 140};
109 141
diff --git a/include/litmus/prioq_lock.h b/include/litmus/prioq_lock.h
new file mode 100644
index 000000000000..f3c11d241991
--- /dev/null
+++ b/include/litmus/prioq_lock.h
@@ -0,0 +1,61 @@
1#ifndef LITMUS_PRIOQ_H
2#define LITMUS_PRIOQ_H
3
4#include <litmus/litmus.h>
5#include <litmus/binheap.h>
6#include <litmus/locking.h>
7
8/* struct for semaphore with priority inheritance */
9struct prioq_mutex {
10 struct litmus_lock litmus_lock;
11
12 /* current resource holder */
13 struct task_struct *owner;
14
15 /* highest-priority waiter */
16 struct task_struct *hp_waiter;
17
18 /* priority-ordered queue of waiting tasks.
19 * Ironically, we don't use a binheap because that would make DGL
20 * implementation a LOT harder. */
21 wait_queue_head_t wait;
22
23 /* we do some nesting within spinlocks, so we can't use the normal
24 sleeplocks found in wait_queue_head_t. */
25 raw_spinlock_t lock;
26};
27
28static inline struct prioq_mutex* prioq_mutex_from_lock(struct litmus_lock* lock)
29{
30 return container_of(lock, struct prioq_mutex, litmus_lock);
31}
32
33#ifdef CONFIG_LITMUS_DGL_SUPPORT
34int prioq_mutex_is_owner(struct litmus_lock *l, struct task_struct *t);
35int prioq_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node);
36void prioq_mutex_enable_priority(struct litmus_lock *l, dgl_wait_state_t* dgl_wait);
37void prioq_mutex_dgl_quick_lock(struct litmus_lock *l, struct litmus_lock *cur_lock,
38 struct task_struct* t, wait_queue_t *q);
39int prioq_mutex_dgl_can_quick_lock(struct litmus_lock *l, struct task_struct *t);
40#endif
41
42void prioq_mutex_propagate_increase_inheritance(struct litmus_lock* l,
43 struct task_struct* t,
44 raw_spinlock_t* to_unlock,
45 unsigned long irqflags);
46
47void prioq_mutex_propagate_decrease_inheritance(struct litmus_lock* l,
48 struct task_struct* t,
49 raw_spinlock_t* to_unlock,
50 unsigned long irqflags);
51
52
53int prioq_mutex_lock(struct litmus_lock* l);
54int prioq_mutex_unlock(struct litmus_lock* l);
55int prioq_mutex_close(struct litmus_lock* l);
56void prioq_mutex_free(struct litmus_lock* l);
57struct litmus_lock* prioq_mutex_new(struct litmus_lock_ops*);
58
59
60#endif
61
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 144be3b6ee3d..716fc034c5f4 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -312,10 +312,6 @@ struct rt_param {
312 312
313 gpu_migration_dist_t gpu_migration; 313 gpu_migration_dist_t gpu_migration;
314 int last_gpu; 314 int last_gpu;
315
316 notify_rsrc_exit_t rsrc_exit_cb;
317 void* rsrc_exit_cb_args;
318
319 lt_t accum_gpu_time; 315 lt_t accum_gpu_time;
320 lt_t gpu_time_stamp; 316 lt_t gpu_time_stamp;
321 317
@@ -323,6 +319,11 @@ struct rt_param {
323#endif 319#endif
324#endif 320#endif
325 321
322#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
323 notify_rsrc_exit_t rsrc_exit_cb;
324 void* rsrc_exit_cb_args;
325#endif
326
326#ifdef CONFIG_LITMUS_LOCKING 327#ifdef CONFIG_LITMUS_LOCKING
327 /* Is the task being priority-boosted by a locking protocol? */ 328 /* Is the task being priority-boosted by a locking protocol? */
328 unsigned int priority_boosted:1; 329 unsigned int priority_boosted:1;
diff --git a/litmus/Makefile b/litmus/Makefile
index 6ad94f69a347..6b7acf0bbf2c 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -33,7 +33,7 @@ obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o
33obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o 33obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o
34 34
35obj-$(CONFIG_LITMUS_LOCKING) += kfmlp_lock.o 35obj-$(CONFIG_LITMUS_LOCKING) += kfmlp_lock.o
36obj-$(CONFIG_LITMUS_NESTED_LOCKING) += fifo_lock.o ikglp_lock.o 36obj-$(CONFIG_LITMUS_NESTED_LOCKING) += fifo_lock.o prioq_lock.o ikglp_lock.o
37obj-$(CONFIG_LITMUS_SOFTIRQD) += litmus_softirq.o 37obj-$(CONFIG_LITMUS_SOFTIRQD) += litmus_softirq.o
38obj-$(CONFIG_LITMUS_PAI_SOFTIRQD) += litmus_pai_softirq.o 38obj-$(CONFIG_LITMUS_PAI_SOFTIRQD) += litmus_pai_softirq.o
39obj-$(CONFIG_LITMUS_NVIDIA) += nvidia_info.o sched_trace_external.o 39obj-$(CONFIG_LITMUS_NVIDIA) += nvidia_info.o sched_trace_external.o
diff --git a/litmus/fdso.c b/litmus/fdso.c
index 1fcc47a6a62b..e23e7d5a5daa 100644
--- a/litmus/fdso.c
+++ b/litmus/fdso.c
@@ -42,6 +42,7 @@ static const struct fdso_ops* fdso_ops[] = {
42 &generic_affinity_ops, /* KFMLP_SIMPLE_GPU_AFF_OBS */ 42 &generic_affinity_ops, /* KFMLP_SIMPLE_GPU_AFF_OBS */
43 &generic_affinity_ops, /* KFMLP_GPU_AFF_OBS */ 43 &generic_affinity_ops, /* KFMLP_GPU_AFF_OBS */
44#endif 44#endif
45 &generic_lock_ops, /* PRIOQ_MUTEX */
45}; 46};
46 47
47static int fdso_create(void** obj_ref, obj_type_t type, void* __user config) 48static int fdso_create(void** obj_ref, obj_type_t type, void* __user config)
diff --git a/litmus/locking.c b/litmus/locking.c
index 58b5edd9df32..f7d33156cf49 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -246,6 +246,11 @@ void select_next_lock(dgl_wait_state_t* dgl_wait /*, struct litmus_lock* prev_lo
246 246
247 //WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != prev_lock); 247 //WARN_ON(dgl_wait->locks[dgl_wait->last_primary] != prev_lock);
248 248
249 if (dgl_wait->last_primary == 0) {
250 /* loop around */
251 dgl_wait->last_primary = dgl_wait->size;
252 }
253
249 // note reverse order 254 // note reverse order
250 for(dgl_wait->last_primary = dgl_wait->last_primary - 1; 255 for(dgl_wait->last_primary = dgl_wait->last_primary - 1;
251 dgl_wait->last_primary >= 0; 256 dgl_wait->last_primary >= 0;
@@ -327,6 +332,35 @@ static void snprintf_dgl(char* buf, size_t bsz, struct litmus_lock* dgl_locks[],
327} 332}
328#endif 333#endif
329 334
335
336/* only valid when locks are prioq locks!!!
337 * THE BIG DGL LOCK MUST BE HELD! */
338int __attempt_atomic_dgl_acquire(struct litmus_lock *cur_lock, dgl_wait_state_t *dgl_wait)
339{
340 int i;
341
342 /* check to see if we can take all the locks */
343 for(i = 0; i < dgl_wait->size; ++i) {
344 struct litmus_lock *l = dgl_wait->locks[i];
345 if(!l->ops->dgl_can_quick_lock(l, dgl_wait->task))
346 {
347 return -1;
348 }
349 }
350
351 /* take the locks */
352 for(i = 0; i < dgl_wait->size; ++i) {
353 struct litmus_lock *l = dgl_wait->locks[i];
354
355 l->ops->dgl_quick_lock(l, cur_lock, dgl_wait->task, &dgl_wait->wq_nodes[i]);
356
357 BUG_ON(dgl_wait->task != *(l->nest.owner_ptr));
358 }
359
360 return 0; /* success */
361}
362
363
330static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) 364static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait)
331{ 365{
332 int i; 366 int i;
@@ -394,7 +428,6 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait)
394 raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending 428 raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending
395 429
396 suspend_for_lock(); // suspend!!! 430 suspend_for_lock(); // suspend!!!
397 //schedule(); // suspend!!!
398 431
399 TS_DGL_LOCK_RESUME; 432 TS_DGL_LOCK_RESUME;
400 433
diff --git a/litmus/prioq_lock.c b/litmus/prioq_lock.c
new file mode 100644
index 000000000000..0091e4c1901e
--- /dev/null
+++ b/litmus/prioq_lock.c
@@ -0,0 +1,1162 @@
1#include <linux/slab.h>
2#include <linux/uaccess.h>
3
4#include <litmus/trace.h>
5#include <litmus/sched_plugin.h>
6#include <litmus/prioq_lock.h>
7
8#include <litmus/litmus_proc.h>
9
10
11#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
12#include <litmus/gpu_affinity.h>
13#endif
14
15static void __attribute__((unused))
16__dump_lock_info(struct prioq_mutex *mutex)
17{
18#ifdef CONFIG_SCHED_DEBUG_TRACE
19 TRACE_CUR("%s (mutex: %p):\n", mutex->litmus_lock.name, mutex);
20 TRACE_CUR("owner: %s/%d (inh: %s/%d)\n",
21 (mutex->owner) ?
22 mutex->owner->comm : "null",
23 (mutex->owner) ?
24 mutex->owner->pid : 0,
25 (mutex->owner && tsk_rt(mutex->owner)->inh_task) ?
26 tsk_rt(mutex->owner)->inh_task->comm : "null",
27 (mutex->owner && tsk_rt(mutex->owner)->inh_task) ?
28 tsk_rt(mutex->owner)->inh_task->pid : 0);
29 TRACE_CUR("hp waiter: %s/%d (inh: %s/%d)\n",
30 (mutex->hp_waiter) ?
31 mutex->hp_waiter->comm : "null",
32 (mutex->hp_waiter) ?
33 mutex->hp_waiter->pid : 0,
34 (mutex->hp_waiter && tsk_rt(mutex->hp_waiter)->inh_task) ?
35 tsk_rt(mutex->hp_waiter)->inh_task->comm : "null",
36 (mutex->hp_waiter && tsk_rt(mutex->hp_waiter)->inh_task) ?
37 tsk_rt(mutex->hp_waiter)->inh_task->pid : 0);
38 TRACE_CUR("blocked tasks, front to back:\n");
39 if (waitqueue_active(&mutex->wait)) {
40 wait_queue_t *q;
41 struct list_head *pos;
42#ifdef CONFIG_LITMUS_DGL_SUPPORT
43 dgl_wait_state_t *dgl_wait = NULL;
44#endif
45 list_for_each(pos, &mutex->wait.task_list) {
46 struct task_struct *blocked_task;
47#ifdef CONFIG_LITMUS_DGL_SUPPORT
48 int enabled = 1;
49#endif
50 q = list_entry(pos, wait_queue_t, task_list);
51
52#ifdef CONFIG_LITMUS_DGL_SUPPORT
53 if(q->func == dgl_wake_up) {
54 dgl_wait = (dgl_wait_state_t*) q->private;
55 blocked_task = dgl_wait->task;
56
57 if(tsk_rt(blocked_task)->blocked_lock != &mutex->litmus_lock)
58 enabled = 0;
59 }
60 else {
61 blocked_task = (struct task_struct*) q->private;
62 }
63#else
64 blocked_task = (struct task_struct*) q->private;
65#endif
66 TRACE_CUR("\t%s/%d (inh: %s/%d)"
67#ifdef CONFIG_LITMUS_DGL_SUPPORT
68 " DGL enabled: %d"
69#endif
70 "\n",
71 blocked_task->comm, blocked_task->pid,
72 (tsk_rt(blocked_task)->inh_task) ?
73 tsk_rt(blocked_task)->inh_task->comm : "null",
74 (tsk_rt(blocked_task)->inh_task) ?
75 tsk_rt(blocked_task)->inh_task->pid : 0
76#ifdef CONFIG_LITMUS_DGL_SUPPORT
77 , enabled
78#endif
79 );
80 }
81 }
82 else {
83 TRACE_CUR("\t<NONE>\n");
84 }
85#endif
86}
87
88static void __add_wait_queue_sorted(wait_queue_head_t *q, wait_queue_t *add_node)
89{
90 struct list_head *pq = &(q->task_list);
91 wait_queue_t *q_node;
92 struct task_struct *queued_task;
93 struct task_struct *add_task;
94 struct list_head *pos;
95
96 if (list_empty(pq)) {
97 list_add_tail(&add_node->task_list, pq);
98 return;
99 }
100
101 add_task = get_queued_task(add_node);
102
103 /* less priority than tail? if so, go to tail */
104 q_node = list_entry(pq->prev, wait_queue_t, task_list);
105 queued_task = get_queued_task(q_node);
106 if (litmus->compare(queued_task, add_task)) {
107 list_add_tail(&add_node->task_list, pq);
108 return;
109 }
110
111 /* belongs at head or between nodes */
112 list_for_each(pos, pq) {
113 q_node = list_entry(pos, wait_queue_t, task_list);
114 queued_task = get_queued_task(q_node);
115 if(litmus->compare(add_task, queued_task)) {
116 list_add(&add_node->task_list, pos->prev);
117 return;
118 }
119 }
120
121 BUG();
122}
123
124static inline void __add_wait_queue_sorted_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
125{
126 wait->flags |= WQ_FLAG_EXCLUSIVE;
127 __add_wait_queue_sorted(q, wait);
128}
129
130static void __prioq_increase_pos(struct prioq_mutex *mutex, struct task_struct *t)
131{
132 wait_queue_t *q;
133 struct list_head *pos;
134 struct task_struct *queued;
135
136 /* TODO: Make this efficient instead of remove/add */
137 list_for_each(pos, &mutex->wait.task_list) {
138 q = list_entry(pos, wait_queue_t, task_list);
139 queued = get_queued_task(q);
140 if (queued == t) {
141 __remove_wait_queue(&mutex->wait, q);
142 __add_wait_queue_sorted(&mutex->wait, q);
143 return;
144 }
145 }
146
147 BUG();
148}
149
150static void __prioq_decrease_pos(struct prioq_mutex *mutex, struct task_struct *t)
151{
152 wait_queue_t *q;
153 struct list_head *pos;
154 struct task_struct *queued;
155
156 /* TODO: Make this efficient instead of remove/add */
157 list_for_each(pos, &mutex->wait.task_list) {
158 q = list_entry(pos, wait_queue_t, task_list);
159 queued = get_queued_task(q);
160 if (queued == t) {
161 __remove_wait_queue(&mutex->wait, q);
162 __add_wait_queue_sorted(&mutex->wait, q);
163 return;
164 }
165 }
166
167 BUG();
168}
169
170
171/* caller is responsible for locking */
172static struct task_struct* __prioq_mutex_find_hp_waiter(struct prioq_mutex *mutex,
173 struct task_struct* skip)
174{
175 wait_queue_t *q;
176 struct list_head *pos;
177 struct task_struct *queued = NULL, *found = NULL;
178
179 /* list in sorted order. higher-prio tasks likely at the front. */
180 list_for_each(pos, &mutex->wait.task_list) {
181 q = list_entry(pos, wait_queue_t, task_list);
182 queued = get_queued_task(q);
183
184 /* Compare task prios, find high prio task. */
185 if (queued && queued != skip && litmus->compare(queued, found)) {
186 found = queued;
187 }
188 }
189 return found;
190}
191
192
193#ifdef CONFIG_LITMUS_DGL_SUPPORT
194
195int prioq_mutex_is_owner(struct litmus_lock *l, struct task_struct *t)
196{
197 struct prioq_mutex *mutex = prioq_mutex_from_lock(l);
198 return(mutex->owner == t);
199}
200
201// return 1 if resource was immediatly acquired.
202// Assumes mutex->lock is held.
203// Must set task state to TASK_UNINTERRUPTIBLE if task blocks.
204int prioq_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait,
205 wait_queue_t* wq_node)
206{
207 struct prioq_mutex *mutex = prioq_mutex_from_lock(l);
208 struct task_struct *t = dgl_wait->task;
209
210 int acquired_immediatly = 0;
211
212 BUG_ON(t != current);
213
214 if (mutex->owner) {
215 TRACE_TASK(t, "Enqueuing on lock %d (held by %s/%d).\n",
216 l->ident, mutex->owner->comm, mutex->owner->pid);
217
218 init_dgl_waitqueue_entry(wq_node, dgl_wait);
219
220 set_task_state(t, TASK_UNINTERRUPTIBLE);
221 __add_wait_queue_sorted_exclusive(&mutex->wait, wq_node);
222 } else {
223 TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident);
224
225 /* it's ours now */
226 mutex->owner = t;
227
228 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
229 binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks,
230 struct nested_info, hp_binheap_node);
231 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
232
233 acquired_immediatly = 1;
234 }
235
236 return acquired_immediatly;
237}
238
239void prioq_mutex_enable_priority(struct litmus_lock *l,
240 dgl_wait_state_t* dgl_wait)
241{
242 struct prioq_mutex *mutex = prioq_mutex_from_lock(l);
243 struct task_struct *t = dgl_wait->task;
244 struct task_struct *owner = mutex->owner;
245 unsigned long flags = 0; // these are unused under DGL coarse-grain locking
246
247 BUG_ON(owner == t);
248
249 tsk_rt(t)->blocked_lock = l;
250 mb();
251
252 if (litmus->compare(t, mutex->hp_waiter)) {
253 struct task_struct *old_max_eff_prio;
254 struct task_struct *new_max_eff_prio;
255 struct task_struct *new_prio = NULL;
256
257 if(mutex->hp_waiter)
258 TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n",
259 mutex->hp_waiter->comm, mutex->hp_waiter->pid);
260 else
261 TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n");
262
263 raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock);
264
265 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
266 mutex->hp_waiter = t;
267 l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter);
268 binheap_decrease(&l->nest.hp_binheap_node,
269 &tsk_rt(owner)->hp_blocked_tasks);
270 new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
271
272 if(new_max_eff_prio != old_max_eff_prio) {
273 TRACE_TASK(t, "is new hp_waiter.\n");
274
275 if ((effective_priority(owner) == old_max_eff_prio) ||
276 (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))){
277 new_prio = new_max_eff_prio;
278 }
279 }
280 else {
281 TRACE_TASK(t, "no change in max_eff_prio of heap.\n");
282 }
283
284 if(new_prio) {
285 litmus->nested_increase_prio(owner, new_prio,
286 &mutex->lock, flags); // unlocks lock.
287 }
288 else {
289 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
290 unlock_fine_irqrestore(&mutex->lock, flags);
291 }
292 }
293 else {
294 TRACE_TASK(t, "no change in hp_waiter.\n");
295 unlock_fine_irqrestore(&mutex->lock, flags);
296 }
297}
298
299static void select_next_lock_if_primary(struct litmus_lock *l,
300 dgl_wait_state_t *dgl_wait)
301{
302 if(tsk_rt(dgl_wait->task)->blocked_lock == l) {
303 TRACE_CUR("Lock %d in DGL was primary for %s/%d.\n",
304 l->ident, dgl_wait->task->comm, dgl_wait->task->pid);
305 tsk_rt(dgl_wait->task)->blocked_lock = NULL;
306 mb();
307 select_next_lock(dgl_wait /*, l*/); // pick the next lock to be blocked on
308 }
309 else {
310 TRACE_CUR("Got lock early! Lock %d in DGL was NOT primary for %s/%d.\n",
311 l->ident, dgl_wait->task->comm, dgl_wait->task->pid);
312 }
313}
314#endif
315
316
317
318
319#ifdef CONFIG_LITMUS_DGL_SUPPORT
320
321int prioq_mutex_dgl_can_quick_lock(struct litmus_lock *l, struct task_struct *t)
322{
323 struct prioq_mutex *mutex = prioq_mutex_from_lock(l);
324
325 if(!mutex->owner && mutex->hp_waiter == t) {
326 wait_queue_t *front = list_entry(mutex->wait.task_list.next, wait_queue_t, task_list);
327 struct task_struct *at_front = get_queued_task(front);
328 if(t == at_front) {
329 return 1;
330 }
331 }
332 return 0;
333}
334
335void prioq_mutex_dgl_quick_lock(struct litmus_lock *l, struct litmus_lock *cur_lock,
336 struct task_struct* t, wait_queue_t *q)
337{
338 struct prioq_mutex *mutex = prioq_mutex_from_lock(l);
339
340 BUG_ON(mutex->owner);
341 BUG_ON(mutex->hp_waiter != t);
342 BUG_ON(t != get_queued_task(list_entry(mutex->wait.task_list.next, wait_queue_t, task_list)));
343
344
345 mutex->owner = t;
346
347 if (l != cur_lock) {
348 /* we have to update the state of the other lock for it */
349 __remove_wait_queue(&mutex->wait, q);
350
351 mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, t);
352 l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ?
353 effective_priority(mutex->hp_waiter) :
354 NULL;
355
356 if (mutex->hp_waiter)
357 TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n");
358 else
359 TRACE("no further waiters\n");
360
361 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
362
363 binheap_add(&l->nest.hp_binheap_node,
364 &tsk_rt(t)->hp_blocked_tasks,
365 struct nested_info, hp_binheap_node);
366
367 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
368 }
369}
370#endif
371
372
373int prioq_mutex_lock(struct litmus_lock* l)
374{
375 struct task_struct *t = current;
376 struct task_struct *owner;
377 struct prioq_mutex *mutex = prioq_mutex_from_lock(l);
378 wait_queue_t wait;
379 unsigned long flags;
380
381#ifdef CONFIG_LITMUS_DGL_SUPPORT
382 raw_spinlock_t *dgl_lock;
383#endif
384
385 if (!is_realtime(t))
386 return -EPERM;
387
388#ifdef CONFIG_LITMUS_DGL_SUPPORT
389 dgl_lock = litmus->get_dgl_spinlock(t);
390#endif
391
392 lock_global_irqsave(dgl_lock, flags);
393 lock_fine_irqsave(&mutex->lock, flags);
394
395 /* block if there is an owner, or if hp_waiter is blocked for DGL and
396 * prio(t) < prio(hp_waiter) */
397 if (mutex->owner) {
398 TRACE_TASK(t, "Blocking on lock %d (held by %s/%d).\n",
399 l->ident, mutex->owner->comm, mutex->owner->pid);
400
401#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
402 // KLUDGE: don't count this suspension as time in the critical gpu
403 // critical section
404 if(tsk_rt(t)->held_gpus) {
405 tsk_rt(t)->suspend_gpu_tracker_on_block = 1;
406 }
407#endif
408
409 /* resource is not free => must suspend and wait */
410
411 owner = mutex->owner;
412
413 init_waitqueue_entry(&wait, t);
414
415 tsk_rt(t)->blocked_lock = l; /* record where we are blocked */
416 mb(); // needed?
417
418 /* FIXME: interruptible would be nice some day */
419 set_task_state(t, TASK_UNINTERRUPTIBLE);
420
421 __add_wait_queue_sorted_exclusive(&mutex->wait, &wait);
422
423 /* check if we need to activate priority inheritance */
424 if (litmus->compare(t, mutex->hp_waiter)) {
425
426 struct task_struct *old_max_eff_prio;
427 struct task_struct *new_max_eff_prio;
428 struct task_struct *new_prio = NULL;
429
430 if(mutex->hp_waiter)
431 TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n",
432 mutex->hp_waiter->comm, mutex->hp_waiter->pid);
433 else
434 TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n");
435
436 raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock);
437
438 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
439 mutex->hp_waiter = t;
440
441 TRACE_TASK(t, "prioq_mutex %d state after enqeue in priority queue\n", l->ident);
442 __dump_lock_info(mutex);
443
444 l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter);
445 binheap_decrease(&l->nest.hp_binheap_node,
446 &tsk_rt(owner)->hp_blocked_tasks);
447 new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
448
449 if(new_max_eff_prio != old_max_eff_prio) {
450 TRACE_TASK(t, "is new hp_waiter.\n");
451
452 if ((effective_priority(owner) == old_max_eff_prio) ||
453 (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))){
454 new_prio = new_max_eff_prio;
455 }
456 }
457 else {
458 TRACE_TASK(t, "no change in max_eff_prio of heap.\n");
459 }
460
461 if(new_prio) {
462 litmus->nested_increase_prio(owner, new_prio, &mutex->lock,
463 flags); // unlocks lock.
464 }
465 else {
466 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
467 unlock_fine_irqrestore(&mutex->lock, flags);
468 }
469 }
470 else {
471 TRACE_TASK(t, "no change in hp_waiter.\n");
472
473 TRACE_TASK(t, "prioq_mutex %d state after enqeue in priority queue\n", l->ident);
474 __dump_lock_info(mutex);
475
476 unlock_fine_irqrestore(&mutex->lock, flags);
477 }
478
479 unlock_global_irqrestore(dgl_lock, flags);
480
481 TS_LOCK_SUSPEND;
482
483 /* We depend on the FIFO order. Thus, we don't need to recheck
484 * when we wake up; we are guaranteed to have the lock since
485 * there is only one wake up per release.
486 */
487
488 suspend_for_lock();
489
490 TS_LOCK_RESUME;
491
492 /* Since we hold the lock, no other task will change
493 * ->owner. We can thus check it without acquiring the spin
494 * lock. */
495 BUG_ON(mutex->owner != t);
496
497 TRACE_TASK(t, "Acquired lock %d.\n", l->ident);
498
499 } else {
500 TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident);
501
502 /* it's ours now */
503 mutex->owner = t;
504
505 raw_spin_lock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock);
506 binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks,
507 struct nested_info, hp_binheap_node);
508 raw_spin_unlock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock);
509
510
511 unlock_fine_irqrestore(&mutex->lock, flags);
512 unlock_global_irqrestore(dgl_lock, flags);
513 }
514
515 return 0;
516}
517
518
519
520int prioq_mutex_unlock(struct litmus_lock* l)
521{
522 struct task_struct *t = current, *next = NULL;
523 struct prioq_mutex *mutex = prioq_mutex_from_lock(l);
524 unsigned long flags;
525
526 struct task_struct *old_max_eff_prio;
527
528 int wake_up_task = 1;
529
530#ifdef CONFIG_LITMUS_DGL_SUPPORT
531 dgl_wait_state_t *dgl_wait = NULL;
532 raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t);
533#endif
534
535 int err = 0;
536
537 if (mutex->owner != t) {
538 err = -EINVAL;
539 return err;
540 }
541
542 lock_global_irqsave(dgl_lock, flags);
543 lock_fine_irqsave(&mutex->lock, flags);
544
545 raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock);
546
547 TRACE_TASK(t, "Freeing lock %d\n", l->ident);
548
549 old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks);
550 binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks);
551
552 if(tsk_rt(t)->inh_task){
553 struct task_struct *new_max_eff_prio =
554 top_priority(&tsk_rt(t)->hp_blocked_tasks);
555
556 if((new_max_eff_prio == NULL) ||
557 /* there was a change in eff prio */
558 ( (new_max_eff_prio != old_max_eff_prio) &&
559 /* and owner had the old eff prio */
560 (effective_priority(t) == old_max_eff_prio)) )
561 {
562 // old_max_eff_prio > new_max_eff_prio
563
564 if(litmus->__compare(new_max_eff_prio, BASE, t, EFFECTIVE)) {
565 TRACE_TASK(t, "new_max_eff_prio > task's eff_prio-- new_max_eff_prio: %s/%d task: %s/%d [%s/%d]\n",
566 new_max_eff_prio->comm, new_max_eff_prio->pid,
567 t->comm, t->pid, tsk_rt(t)->inh_task->comm,
568 tsk_rt(t)->inh_task->pid);
569 WARN_ON(1);
570 }
571
572 litmus->decrease_prio(t, new_max_eff_prio);
573 }
574 }
575
576 if(binheap_empty(&tsk_rt(t)->hp_blocked_tasks) &&
577 tsk_rt(t)->inh_task != NULL)
578 {
579 WARN_ON(tsk_rt(t)->inh_task != NULL);
580 TRACE_TASK(t, "No more locks are held, but eff_prio = %s/%d\n",
581 tsk_rt(t)->inh_task->comm, tsk_rt(t)->inh_task->pid);
582 }
583
584 raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock);
585
586
587 /* check if there are jobs waiting for this resource */
588#ifdef CONFIG_LITMUS_DGL_SUPPORT
589 __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait, &next);
590 if(dgl_wait) {
591 next = dgl_wait->task;
592 }
593#else
594
595 next = __waitqueue_remove_first(&mutex->wait);
596#endif
597 if (next) {
598 /* next becomes the resouce holder */
599 mutex->owner = next;
600 TRACE_CUR("lock %d ownership passed to %s/%d\n", l->ident, next->comm, next->pid);
601
602 /* determine new hp_waiter if necessary */
603 if (next == mutex->hp_waiter) {
604
605 TRACE_TASK(next, "was highest-prio waiter\n");
606 /* next has the highest priority --- it doesn't need to
607 * inherit. However, we need to make sure that the
608 * next-highest priority in the queue is reflected in
609 * hp_waiter. */
610 mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, next);
611 l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ?
612 effective_priority(mutex->hp_waiter) :
613 NULL;
614
615 if (mutex->hp_waiter)
616 TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n");
617 else
618 TRACE("no further waiters\n");
619
620 raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock);
621
622 binheap_add(&l->nest.hp_binheap_node,
623 &tsk_rt(next)->hp_blocked_tasks,
624 struct nested_info, hp_binheap_node);
625
626#ifdef CONFIG_LITMUS_DGL_SUPPORT
627 if(dgl_wait) {
628 select_next_lock_if_primary(l, dgl_wait);
629 --(dgl_wait->nr_remaining);
630 wake_up_task = (dgl_wait->nr_remaining == 0);
631 }
632#endif
633
634 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock);
635 }
636 else {
637 /* Well, if 'next' is not the highest-priority waiter,
638 * then it (probably) ought to inherit the highest-priority
639 * waiter's priority. */
640 TRACE_TASK(next, "is not hp_waiter of lock %d.\n", l->ident);
641
642 raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock);
643
644 binheap_add(&l->nest.hp_binheap_node,
645 &tsk_rt(next)->hp_blocked_tasks,
646 struct nested_info, hp_binheap_node);
647
648 /* It is possible that 'next' *should* be the hp_waiter, but isn't
649 * because that update hasn't yet executed (update operation is
650 * probably blocked on mutex->lock). So only inherit if the top of
651 * 'next's top heap node is indeed the effective prio. of hp_waiter.
652 * (We use l->hp_waiter_eff_prio instead of effective_priority(hp_waiter)
653 * since the effective priority of hp_waiter can change (and the
654 * update has not made it to this lock).)
655 */
656#ifdef CONFIG_LITMUS_DGL_SUPPORT
657 if((l->nest.hp_waiter_eff_prio != NULL) &&
658 (top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio))
659 {
660 if(dgl_wait && tsk_rt(next)->blocked_lock) {
661 if(litmus->__compare(l->nest.hp_waiter_eff_prio, BASE, next, EFFECTIVE)) {
662 litmus->nested_increase_prio(next, l->nest.hp_waiter_eff_prio, &mutex->lock, flags); // unlocks lock && hp_blocked_tasks_lock.
663 goto out; // all spinlocks are released. bail out now.
664 }
665 }
666 else {
667 litmus->increase_prio(next, l->nest.hp_waiter_eff_prio);
668 }
669 }
670
671 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock);
672#else
673 if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == l->nest.hp_waiter_eff_prio))
674 {
675 litmus->increase_prio(next, l->nest.hp_waiter_eff_prio);
676 }
677 raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock);
678#endif
679 }
680
681 if(wake_up_task) {
682 TRACE_TASK(next, "waking up since it is no longer blocked.\n");
683
684 tsk_rt(next)->blocked_lock = NULL;
685 mb();
686
687#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
688 // re-enable tracking
689 if(tsk_rt(next)->held_gpus) {
690 tsk_rt(next)->suspend_gpu_tracker_on_block = 0;
691 }
692#endif
693
694 wake_up_process(next);
695 }
696 else {
697 TRACE_TASK(next, "is still blocked.\n");
698 }
699 }
700 else {
701 /* becomes available */
702 mutex->owner = NULL;
703 }
704
705 unlock_fine_irqrestore(&mutex->lock, flags);
706
707#ifdef CONFIG_LITMUS_DGL_SUPPORT
708out:
709#endif
710 unlock_global_irqrestore(dgl_lock, flags);
711
712 TRACE_TASK(t, "-- Freed lock %d --\n", l->ident);
713
714 return err;
715}
716
717
718void prioq_mutex_propagate_increase_inheritance(struct litmus_lock* l,
719 struct task_struct* t,
720 raw_spinlock_t* to_unlock,
721 unsigned long irqflags)
722{
723 struct prioq_mutex *mutex = prioq_mutex_from_lock(l);
724
725 // relay-style locking
726 lock_fine(&mutex->lock);
727 unlock_fine(to_unlock);
728
729 __prioq_increase_pos(mutex, t);
730
731 if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked
732 struct task_struct *owner = mutex->owner;
733
734 struct task_struct *old_max_eff_prio;
735 struct task_struct *new_max_eff_prio;
736
737 raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock);
738
739 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
740
741 if((t != mutex->hp_waiter) && litmus->compare(t, mutex->hp_waiter)) {
742 TRACE_TASK(t, "is new highest-prio waiter by propagation.\n");
743 mutex->hp_waiter = t;
744
745 TRACE_TASK(t, "prioq_mutex %d state after prio increase in priority queue\n", l->ident);
746 __dump_lock_info(mutex);
747 }
748 else {
749 TRACE_TASK(t, "prioq_mutex %d state after prio increase in priority queue\n", l->ident);
750 __dump_lock_info(mutex);
751 }
752
753 if(t == mutex->hp_waiter) {
754 // reflect the decreased priority in the heap node.
755 l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter);
756
757 BUG_ON(!binheap_is_in_heap(&l->nest.hp_binheap_node));
758 BUG_ON(!binheap_is_in_this_heap(&l->nest.hp_binheap_node,
759 &tsk_rt(owner)->hp_blocked_tasks));
760
761 binheap_decrease(&l->nest.hp_binheap_node,
762 &tsk_rt(owner)->hp_blocked_tasks);
763 }
764
765 new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
766
767
768 if(new_max_eff_prio != old_max_eff_prio) {
769 // new_max_eff_prio > old_max_eff_prio holds.
770 if ((effective_priority(owner) == old_max_eff_prio) ||
771 (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))) {
772 TRACE_CUR("Propagating inheritance to holder of lock %d.\n",
773 l->ident);
774
775 // beware: recursion
776 litmus->nested_increase_prio(owner, new_max_eff_prio,
777 &mutex->lock, irqflags); // unlocks mutex->lock
778 }
779 else {
780 TRACE_CUR("Lower priority than holder %s/%d. No propagation.\n",
781 owner->comm, owner->pid);
782 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
783 unlock_fine_irqrestore(&mutex->lock, irqflags);
784 }
785 }
786 else {
787 TRACE_TASK(mutex->owner, "No change in maxiumum effective priority.\n");
788 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
789 unlock_fine_irqrestore(&mutex->lock, irqflags);
790 }
791 }
792 else {
793 struct litmus_lock *still_blocked;
794
795 TRACE_TASK(t, "prioq_mutex %d state after prio increase in priority queue\n", l->ident);
796 __dump_lock_info(mutex);
797
798 still_blocked = tsk_rt(t)->blocked_lock;
799
800 TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident);
801 if(still_blocked) {
802 TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n",
803 still_blocked->ident);
804 if(still_blocked->ops->propagate_increase_inheritance) {
805 /* due to relay-style nesting of spinlocks (acq. A, acq. B, free A, free B)
806 we know that task 't' has not released any locks behind us in this
807 chain. Propagation just needs to catch up with task 't'. */
808 still_blocked->ops->propagate_increase_inheritance(still_blocked,
809 t,
810 &mutex->lock,
811 irqflags);
812 }
813 else {
814 TRACE_TASK(t,
815 "Inheritor is blocked on lock (%p) that does not "
816 "support nesting!\n",
817 still_blocked);
818 unlock_fine_irqrestore(&mutex->lock, irqflags);
819 }
820 }
821 else {
822 unlock_fine_irqrestore(&mutex->lock, irqflags);
823 }
824 }
825}
826
827
828void prioq_mutex_propagate_decrease_inheritance(struct litmus_lock* l,
829 struct task_struct* t,
830 raw_spinlock_t* to_unlock,
831 unsigned long irqflags)
832{
833 struct prioq_mutex *mutex = prioq_mutex_from_lock(l);
834
835 // relay-style locking
836 lock_fine(&mutex->lock);
837 unlock_fine(to_unlock);
838
839 __prioq_decrease_pos(mutex, t);
840
841 if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked
842 if(t == mutex->hp_waiter) {
843 struct task_struct *owner = mutex->owner;
844
845 struct task_struct *old_max_eff_prio;
846 struct task_struct *new_max_eff_prio;
847
848 raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock);
849
850 old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
851
852 binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks);
853 mutex->hp_waiter = __prioq_mutex_find_hp_waiter(mutex, NULL);
854
855 TRACE_TASK(t, "prioq_mutex %d state after prio decrease in priority queue\n", l->ident);
856 __dump_lock_info(mutex);
857
858 l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ?
859 effective_priority(mutex->hp_waiter) : NULL;
860 binheap_add(&l->nest.hp_binheap_node,
861 &tsk_rt(owner)->hp_blocked_tasks,
862 struct nested_info, hp_binheap_node);
863
864 new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks);
865
866 if((old_max_eff_prio != new_max_eff_prio) &&
867 (effective_priority(owner) == old_max_eff_prio))
868 {
869 // Need to set new effective_priority for owner
870
871 struct task_struct *decreased_prio;
872
873 TRACE_CUR("Propagating decreased inheritance to holder of lock %d.\n",
874 l->ident);
875
876 if(litmus->__compare(new_max_eff_prio, BASE, owner, BASE)) {
877 TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of lock %d.\n",
878 (new_max_eff_prio) ? new_max_eff_prio->comm : "null",
879 (new_max_eff_prio) ? new_max_eff_prio->pid : 0,
880 owner->comm,
881 owner->pid,
882 l->ident);
883
884 decreased_prio = new_max_eff_prio;
885 }
886 else {
887 TRACE_CUR("%s/%d has lesser base priority than base priority of owner (%s/%d) of lock %d.\n",
888 (new_max_eff_prio) ? new_max_eff_prio->comm : "null",
889 (new_max_eff_prio) ? new_max_eff_prio->pid : 0,
890 owner->comm,
891 owner->pid,
892 l->ident);
893
894 decreased_prio = NULL;
895 }
896
897 // beware: recursion
898 litmus->nested_decrease_prio(owner, decreased_prio, &mutex->lock, irqflags); // will unlock mutex->lock
899 }
900 else {
901 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
902 unlock_fine_irqrestore(&mutex->lock, irqflags);
903 }
904 }
905 else {
906 TRACE_TASK(t, "prioq_mutex %d state after prio decrease in priority queue\n", l->ident);
907 __dump_lock_info(mutex);
908
909 TRACE_TASK(t, "is not hp_waiter. No propagation.\n");
910 unlock_fine_irqrestore(&mutex->lock, irqflags);
911 }
912 }
913 else {
914 struct litmus_lock *still_blocked;
915
916 TRACE_TASK(t, "prioq_mutex %d state after prio decrease in priority queue\n", l->ident);
917 __dump_lock_info(mutex);
918
919 still_blocked = tsk_rt(t)->blocked_lock;
920
921 TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident);
922 if(still_blocked) {
923 TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n",
924 still_blocked->ident);
925 if(still_blocked->ops->propagate_decrease_inheritance) {
926 /* due to linked nesting of spinlocks (acq. A, acq. B, free A, free B)
927 we know that task 't' has not released any locks behind us in this
928 chain. propagation just needs to catch up with task 't' */
929 still_blocked->ops->propagate_decrease_inheritance(still_blocked,
930 t,
931 &mutex->lock,
932 irqflags);
933 }
934 else {
935 TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n",
936 still_blocked);
937 unlock_fine_irqrestore(&mutex->lock, irqflags);
938 }
939 }
940 else {
941 unlock_fine_irqrestore(&mutex->lock, irqflags);
942 }
943 }
944}
945
946
947int prioq_mutex_close(struct litmus_lock* l)
948{
949 struct task_struct *t = current;
950 struct prioq_mutex *mutex = prioq_mutex_from_lock(l);
951 unsigned long flags;
952
953 int owner;
954
955#ifdef CONFIG_LITMUS_DGL_SUPPORT
956 raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t);
957#endif
958
959 lock_global_irqsave(dgl_lock, flags);
960 lock_fine_irqsave(&mutex->lock, flags);
961
962 owner = (mutex->owner == t);
963
964 unlock_fine_irqrestore(&mutex->lock, flags);
965 unlock_global_irqrestore(dgl_lock, flags);
966
967 /*
968 TODO: Currently panic. FIX THIS!
969 if (owner)
970 prioq_mutex_unlock(l);
971 */
972
973 return 0;
974}
975
976void prioq_mutex_free(struct litmus_lock* lock)
977{
978 kfree(prioq_mutex_from_lock(lock));
979}
980
981
982/* The following may race if DGLs are enabled. Only examine /proc if things
983 appear to be locked up. TODO: FIX THIS! Must find an elegant way to transmit
984 DGL lock to function. */
985static int prioq_proc_print(char *page, char **start, off_t off, int count, int *eof, void *data)
986{
987 struct prioq_mutex *mutex = prioq_mutex_from_lock((struct litmus_lock*)data);
988
989 int attempts = 0;
990 const int max_attempts = 10;
991 int locked = 0;
992 unsigned long flags;
993
994 int size = count;
995 char *next = page;
996 int w;
997
998 while(attempts < max_attempts)
999 {
1000 locked = raw_spin_trylock_irqsave(&mutex->lock, flags);
1001
1002 if (unlikely(!locked)) {
1003 ++attempts;
1004 cpu_relax();
1005 }
1006 else {
1007 break;
1008 }
1009 }
1010
1011 if (locked) {
1012 w = scnprintf(next, size, "%s (mutex: %p, data: %p):\n", mutex->litmus_lock.name, mutex, data);
1013 size -= w;
1014 next += w;
1015
1016 w = scnprintf(next, size,
1017 "owner: %s/%d (inh: %s/%d)\n",
1018 (mutex->owner) ?
1019 mutex->owner->comm : "null",
1020 (mutex->owner) ?
1021 mutex->owner->pid : 0,
1022 (mutex->owner && tsk_rt(mutex->owner)->inh_task) ?
1023 tsk_rt(mutex->owner)->inh_task->comm : "null",
1024 (mutex->owner && tsk_rt(mutex->owner)->inh_task) ?
1025 tsk_rt(mutex->owner)->inh_task->pid : 0);
1026 size -= w;
1027 next += w;
1028
1029 w = scnprintf(next, size,
1030 "hp waiter: %s/%d (inh: %s/%d)\n",
1031 (mutex->hp_waiter) ?
1032 mutex->hp_waiter->comm : "null",
1033 (mutex->hp_waiter) ?
1034 mutex->hp_waiter->pid : 0,
1035 (mutex->hp_waiter && tsk_rt(mutex->hp_waiter)->inh_task) ?
1036 tsk_rt(mutex->hp_waiter)->inh_task->comm : "null",
1037 (mutex->hp_waiter && tsk_rt(mutex->hp_waiter)->inh_task) ?
1038 tsk_rt(mutex->hp_waiter)->inh_task->pid : 0);
1039 size -= w;
1040 next += w;
1041
1042 w = scnprintf(next, size, "\nblocked tasks, front to back:\n");
1043 size -= w;
1044 next += w;
1045
1046 if (waitqueue_active(&mutex->wait)) {
1047 wait_queue_t *q;
1048 struct list_head *pos;
1049#ifdef CONFIG_LITMUS_DGL_SUPPORT
1050 dgl_wait_state_t *dgl_wait = NULL;
1051#endif
1052 list_for_each(pos, &mutex->wait.task_list) {
1053 struct task_struct *blocked_task;
1054#ifdef CONFIG_LITMUS_DGL_SUPPORT
1055 int enabled = 1;
1056#endif
1057 q = list_entry(pos, wait_queue_t, task_list);
1058
1059 blocked_task = get_queued_task(q);
1060#ifdef CONFIG_LITMUS_DGL_SUPPORT
1061 if(q->func == dgl_wake_up) {
1062 dgl_wait = (dgl_wait_state_t*) q->private;
1063 blocked_task = dgl_wait->task;
1064
1065 if(tsk_rt(blocked_task)->blocked_lock != &mutex->litmus_lock)
1066 enabled = 0;
1067 }
1068 else {
1069 blocked_task = (struct task_struct*) q->private;
1070 }
1071#else
1072 blocked_task = (struct task_struct*) q->private;
1073#endif
1074
1075 w = scnprintf(next, size,
1076 "\t%s/%d (inh: %s/%d)"
1077#ifdef CONFIG_LITMUS_DGL_SUPPORT
1078 " DGL enabled: %d"
1079#endif
1080 "\n",
1081 blocked_task->comm, blocked_task->pid,
1082 (tsk_rt(blocked_task)->inh_task) ?
1083 tsk_rt(blocked_task)->inh_task->comm : "null",
1084 (tsk_rt(blocked_task)->inh_task) ?
1085 tsk_rt(blocked_task)->inh_task->pid : 0
1086#ifdef CONFIG_LITMUS_DGL_SUPPORT
1087 , enabled
1088#endif
1089 );
1090 size -= w;
1091 next += w;
1092 }
1093 }
1094 else {
1095 w = scnprintf(next, size, "\t<NONE>\n");
1096 size -= w;
1097 next += w;
1098 }
1099
1100 raw_spin_unlock_irqrestore(&mutex->lock, flags);
1101 }
1102 else {
1103 w = scnprintf(next, size, "%s is busy.\n", mutex->litmus_lock.name);
1104 size -= w;
1105 next += w;
1106 }
1107
1108 return count - size;
1109}
1110
1111static void prioq_proc_add(struct litmus_lock* l)
1112{
1113 snprintf(l->name, LOCK_NAME_LEN, "prioq-%d", l->ident);
1114
1115 l->proc_entry = litmus_add_proc_lock(l, prioq_proc_print);
1116}
1117
1118static void prioq_proc_remove(struct litmus_lock* l)
1119{
1120 litmus_remove_proc_lock(l);
1121}
1122
1123static struct litmus_lock_proc_ops prioq_proc_ops =
1124{
1125 .add = prioq_proc_add,
1126 .remove = prioq_proc_remove
1127};
1128
1129
1130struct litmus_lock* prioq_mutex_new(struct litmus_lock_ops* ops)
1131{
1132 struct prioq_mutex* mutex;
1133
1134 mutex = kmalloc(sizeof(*mutex), GFP_KERNEL);
1135 if (!mutex)
1136 return NULL;
1137 memset(mutex, 0, sizeof(*mutex));
1138
1139 mutex->litmus_lock.ops = ops;
1140 mutex->owner = NULL;
1141 mutex->hp_waiter = NULL;
1142 init_waitqueue_head(&mutex->wait);
1143
1144
1145#ifdef CONFIG_DEBUG_SPINLOCK
1146 {
1147 __raw_spin_lock_init(&mutex->lock,
1148 ((struct litmus_lock*)mutex)->cheat_lockdep,
1149 &((struct litmus_lock*)mutex)->key);
1150 }
1151#else
1152 raw_spin_lock_init(&mutex->lock);
1153#endif
1154
1155 ((struct litmus_lock*)mutex)->nest.hp_waiter_ptr = &mutex->hp_waiter;
1156 ((struct litmus_lock*)mutex)->nest.owner_ptr = &mutex->owner;
1157
1158 ((struct litmus_lock*)mutex)->proc = &prioq_proc_ops;
1159
1160 return &mutex->litmus_lock;
1161}
1162