aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/litmus/litmus_proc.h7
-rw-r--r--include/litmus/locking.h17
-rw-r--r--include/litmus/rsm_lock.h3
-rw-r--r--litmus/ikglp_lock.c3
-rw-r--r--litmus/kfmlp_lock.c1
-rw-r--r--litmus/litmus_proc.c39
-rw-r--r--litmus/locking.c93
-rw-r--r--litmus/nvidia_info.c18
-rw-r--r--litmus/rsm_lock.c222
-rw-r--r--litmus/sched_cedf.c6
-rw-r--r--litmus/sched_gsn_edf.c1
-rw-r--r--litmus/sched_pfp.c4
-rw-r--r--litmus/srp.c1
13 files changed, 339 insertions, 76 deletions
diff --git a/include/litmus/litmus_proc.h b/include/litmus/litmus_proc.h
index 6800e725d48c..0c5c07ea0ef5 100644
--- a/include/litmus/litmus_proc.h
+++ b/include/litmus/litmus_proc.h
@@ -23,3 +23,10 @@ void remove_plugin_proc_dir(struct sched_plugin* plugin);
23 * -EFAULT. */ 23 * -EFAULT. */
24int copy_and_chomp(char *kbuf, unsigned long ksize, 24int copy_and_chomp(char *kbuf, unsigned long ksize,
25 __user const char* ubuf, unsigned long ulength); 25 __user const char* ubuf, unsigned long ulength);
26
27
28#ifdef CONFIG_LITMUS_LOCKING
29struct proc_dir_entry* litmus_add_proc_lock(struct litmus_lock *l, read_proc_t func);
30void litmus_remove_proc_lock(struct litmus_lock* l);
31#endif
32
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index 4a5f198a0407..22f7372bf621 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -24,6 +24,11 @@ static inline struct task_struct* top_priority(struct binheap* handle) {
24void print_hp_waiters(struct binheap_node* n, int depth); 24void print_hp_waiters(struct binheap_node* n, int depth);
25#endif 25#endif
26 26
27#define LOCK_NAME_LEN 16
28struct litmus_lock_proc_ops {
29 void (*add)(struct litmus_lock *l);
30 void (*remove)(struct litmus_lock *l);
31};
27 32
28/* Generic base struct for LITMUS^RT userspace semaphores. 33/* Generic base struct for LITMUS^RT userspace semaphores.
29 * This structure should be embedded in protocol-specific semaphores. 34 * This structure should be embedded in protocol-specific semaphores.
@@ -41,6 +46,10 @@ struct litmus_lock {
41 struct lock_class_key key; 46 struct lock_class_key key;
42//#endif 47//#endif
43#endif 48#endif
49
50 struct litmus_lock_proc_ops *proc;
51 struct proc_dir_entry *proc_entry;
52 char name[LOCK_NAME_LEN];
44}; 53};
45 54
46#ifdef CONFIG_LITMUS_DGL_SUPPORT 55#ifdef CONFIG_LITMUS_DGL_SUPPORT
@@ -131,12 +140,16 @@ struct litmus_lock_ops {
131/* DGL requires a big lock to implement nested inheritance */ 140/* DGL requires a big lock to implement nested inheritance */
132#define lock_global_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags)) 141#define lock_global_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags))
133#define lock_global(lock) raw_spin_lock((lock)) 142#define lock_global(lock) raw_spin_lock((lock))
143#define trylock_global_irqsave(lock, flags) raw_spin_trylock_irqsave((lock), (flags))
144#define trylock_global(lock) raw_spin_trylock((lock))
134#define unlock_global_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags)) 145#define unlock_global_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags))
135#define unlock_global(lock) raw_spin_unlock((lock)) 146#define unlock_global(lock) raw_spin_unlock((lock))
136 147
137/* fine-grain locking are no-ops with DGL support */ 148/* fine-grain locking are no-ops with DGL support */
138#define lock_fine_irqsave(lock, flags) 149#define lock_fine_irqsave(lock, flags)
139#define lock_fine(lock) 150#define lock_fine(lock)
151#define trylock_fine_irqsave(lock, flags)
152#define trylock_fine(lock)
140#define unlock_fine_irqrestore(lock, flags) 153#define unlock_fine_irqrestore(lock, flags)
141#define unlock_fine(lock) 154#define unlock_fine(lock)
142 155
@@ -145,11 +158,15 @@ struct litmus_lock_ops {
145/* global locking are no-ops without DGL support */ 158/* global locking are no-ops without DGL support */
146#define lock_global_irqsave(lock, flags) 159#define lock_global_irqsave(lock, flags)
147#define lock_global(lock) 160#define lock_global(lock)
161#define trylock_global_irqsave(lock, flags)
162#define trylock_global(lock)
148#define unlock_global_irqrestore(lock, flags) 163#define unlock_global_irqrestore(lock, flags)
149#define unlock_global(lock) 164#define unlock_global(lock)
150 165
151#define lock_fine_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags)) 166#define lock_fine_irqsave(lock, flags) raw_spin_lock_irqsave((lock), (flags))
152#define lock_fine(lock) raw_spin_lock((lock)) 167#define lock_fine(lock) raw_spin_lock((lock))
168#define trylock_fine_irqsave(lock, flags) raw_spin_trylock_irqsave((lock), (flags))
169#define trylock_fine(lock) raw_spin_trylock((lock))
153#define unlock_fine_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags)) 170#define unlock_fine_irqrestore(lock, flags) raw_spin_unlock_irqrestore((lock), (flags))
154#define unlock_fine(lock) raw_spin_unlock((lock)) 171#define unlock_fine(lock) raw_spin_unlock((lock))
155 172
diff --git a/include/litmus/rsm_lock.h b/include/litmus/rsm_lock.h
index a15189683de4..f0d263322a69 100644
--- a/include/litmus/rsm_lock.h
+++ b/include/litmus/rsm_lock.h
@@ -51,4 +51,5 @@ void rsm_mutex_free(struct litmus_lock* l);
51struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops*); 51struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops*);
52 52
53 53
54#endif \ No newline at end of file 54#endif
55
diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c
index a4ae74331782..aa6b659e437d 100644
--- a/litmus/ikglp_lock.c
+++ b/litmus/ikglp_lock.c
@@ -1768,6 +1768,7 @@ struct litmus_lock* ikglp_new(int m,
1768 { 1768 {
1769 return NULL; 1769 return NULL;
1770 } 1770 }
1771 memset(sem, 0, sizeof(*sem));
1771 1772
1772 sem->fifo_queues = kmalloc(sizeof(struct fifo_queue)*nr_replicas, GFP_KERNEL); 1773 sem->fifo_queues = kmalloc(sizeof(struct fifo_queue)*nr_replicas, GFP_KERNEL);
1773 if(!sem->fifo_queues) 1774 if(!sem->fifo_queues)
@@ -2484,7 +2485,7 @@ ikglp_donee_heap_node_t* gpu_ikglp_advise_donee_selection(
2484 if(temp_distance < distance || donee_node == NULL) { 2485 if(temp_distance < distance || donee_node == NULL) {
2485 int dist_from_head = IKGLP_INVAL_DISTANCE; 2486 int dist_from_head = IKGLP_INVAL_DISTANCE;
2486 2487
2487 TRACE_CUR("searching for donor on GPU %d", i); 2488 TRACE_CUR("searching for donor on GPU %d\n", i);
2488 2489
2489 // visit each queue and pick a donee. bail as soon as we find 2490 // visit each queue and pick a donee. bail as soon as we find
2490 // one for this class. 2491 // one for this class.
diff --git a/litmus/kfmlp_lock.c b/litmus/kfmlp_lock.c
index 785a095275e6..377e5a8f7456 100644
--- a/litmus/kfmlp_lock.c
+++ b/litmus/kfmlp_lock.c
@@ -486,6 +486,7 @@ struct litmus_lock* kfmlp_new(struct litmus_lock_ops* ops, void* __user args)
486 { 486 {
487 return(NULL); 487 return(NULL);
488 } 488 }
489 memset(sem, 0, sizeof(*sem));
489 490
490 sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL); 491 sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL);
491 if(!sem->queues) 492 if(!sem->queues)
diff --git a/litmus/litmus_proc.c b/litmus/litmus_proc.c
index 136fecfb0b8b..be62d04da376 100644
--- a/litmus/litmus_proc.c
+++ b/litmus/litmus_proc.c
@@ -10,6 +10,10 @@
10 10
11#include <litmus/clustered.h> 11#include <litmus/clustered.h>
12 12
13#ifdef CONFIG_LITMUS_LOCKING
14#include <litmus/locking.h>
15#endif
16
13/* in litmus/litmus.c */ 17/* in litmus/litmus.c */
14extern atomic_t rt_task_count; 18extern atomic_t rt_task_count;
15 19
@@ -23,6 +27,9 @@ static struct proc_dir_entry *litmus_dir = NULL,
23#ifdef CONFIG_LITMUS_SOFTIRQD 27#ifdef CONFIG_LITMUS_SOFTIRQD
24 *klmirqd_file = NULL, 28 *klmirqd_file = NULL,
25#endif 29#endif
30#ifdef CONFIG_LITMUS_LOCKING
31 *locks_dir = NULL,
32#endif
26 *plugs_file = NULL; 33 *plugs_file = NULL;
27 34
28/* in litmus/sync.c */ 35/* in litmus/sync.c */
@@ -187,6 +194,15 @@ int __init init_litmus_proc(void)
187 plugs_file = create_proc_read_entry("loaded", 0444, plugs_dir, 194 plugs_file = create_proc_read_entry("loaded", 0444, plugs_dir,
188 proc_read_plugins, NULL); 195 proc_read_plugins, NULL);
189 196
197#ifdef CONFIG_LITMUS_LOCKING
198 locks_dir = proc_mkdir("locks", litmus_dir);
199 if (!locks_dir) {
200 printk(KERN_ERR "Could not allocate locks directory "
201 "procfs entry.\n");
202 return -ENOMEM;
203 }
204#endif
205
190 return 0; 206 return 0;
191} 207}
192 208
@@ -196,6 +212,8 @@ void exit_litmus_proc(void)
196 remove_proc_entry("loaded", plugs_dir); 212 remove_proc_entry("loaded", plugs_dir);
197 if (plugs_dir) 213 if (plugs_dir)
198 remove_proc_entry("plugins", litmus_dir); 214 remove_proc_entry("plugins", litmus_dir);
215 if (locks_dir)
216 remove_proc_entry("locks", litmus_dir);
199 if (stat_file) 217 if (stat_file)
200 remove_proc_entry("stats", litmus_dir); 218 remove_proc_entry("stats", litmus_dir);
201 if (curr_file) 219 if (curr_file)
@@ -362,3 +380,24 @@ struct proc_dir_entry* create_cluster_file(struct proc_dir_entry* parent,
362 return cluster_file; 380 return cluster_file;
363} 381}
364 382
383#ifdef CONFIG_LITMUS_LOCKING
384struct proc_dir_entry* litmus_add_proc_lock(struct litmus_lock* l, read_proc_t func)
385{
386 struct proc_dir_entry* entry = NULL;
387
388 if (locks_dir)
389 entry = create_proc_read_entry(l->name, 0444, locks_dir, func, l);
390
391 return entry;
392}
393
394void litmus_remove_proc_lock(struct litmus_lock* l)
395{
396 if (locks_dir)
397 remove_proc_entry(l->name, locks_dir);
398}
399#endif
400
401
402
403
diff --git a/litmus/locking.c b/litmus/locking.c
index c21ec1ae36d7..b7f02f0e6b24 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -28,7 +28,7 @@ struct fdso_ops generic_lock_ops = {
28 .create = create_generic_lock, 28 .create = create_generic_lock,
29 .open = open_generic_lock, 29 .open = open_generic_lock,
30 .close = close_generic_lock, 30 .close = close_generic_lock,
31 .destroy = destroy_generic_lock 31 .destroy = destroy_generic_lock,
32}; 32};
33 33
34static atomic_t lock_id_gen = ATOMIC_INIT(0); 34static atomic_t lock_id_gen = ATOMIC_INIT(0);
@@ -59,12 +59,18 @@ static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user ar
59 INIT_BINHEAP_NODE(&lock->nest.hp_binheap_node); 59 INIT_BINHEAP_NODE(&lock->nest.hp_binheap_node);
60 if(!lock->nest.hp_waiter_ptr) { 60 if(!lock->nest.hp_waiter_ptr) {
61 TRACE_CUR("BEWARE: hp_waiter_ptr should probably not be NULL in " 61 TRACE_CUR("BEWARE: hp_waiter_ptr should probably not be NULL in "
62 "most uses. (exception: IKGLP donors)\n"); 62 "most cases. (exception: IKGLP donors)\n");
63 } 63 }
64#endif 64#endif
65 lock->type = type; 65 lock->type = type;
66 lock->ident = atomic_inc_return(&lock_id_gen); 66 lock->ident = atomic_inc_return(&lock_id_gen);
67 *obj_ref = lock; 67 *obj_ref = lock;
68
69 TRACE_CUR("Lock %d created. Type = %d\n.", lock->ident, type);
70
71 if (lock->proc && lock->proc->add) {
72 lock->proc->add(lock);
73 }
68 } 74 }
69 return err; 75 return err;
70} 76}
@@ -81,8 +87,14 @@ static int open_generic_lock(struct od_table_entry* entry, void* __user arg)
81static int close_generic_lock(struct od_table_entry* entry) 87static int close_generic_lock(struct od_table_entry* entry)
82{ 88{
83 struct litmus_lock* lock = get_lock(entry); 89 struct litmus_lock* lock = get_lock(entry);
84 if (lock->ops->close) 90 if (lock->ops->close) {
91
92 if (lock->proc && lock->proc->remove) {
93 lock->proc->remove(lock);
94 }
95
85 return lock->ops->close(lock); 96 return lock->ops->close(lock);
97 }
86 else 98 else
87 return 0; /* default: closing succeeds */ 99 return 0; /* default: closing succeeds */
88} 100}
@@ -109,8 +121,11 @@ asmlinkage long sys_litmus_lock(int lock_od)
109 if (entry && is_lock(entry)) { 121 if (entry && is_lock(entry)) {
110 l = get_lock(entry); 122 l = get_lock(entry);
111 //TRACE_CUR("attempts to lock 0x%p\n", l); 123 //TRACE_CUR("attempts to lock 0x%p\n", l);
112 TRACE_CUR("attempts to lock %d\n", l->ident); 124 TRACE_CUR("Attempts to lock %d\n", l->ident);
113 err = l->ops->lock(l); 125 err = l->ops->lock(l);
126 if (!err) {
127 TRACE_CUR("Got lock %d\n", l->ident);
128 }
114 } 129 }
115 130
116 /* Note: task my have been suspended or preempted in between! Take 131 /* Note: task my have been suspended or preempted in between! Take
@@ -138,8 +153,11 @@ asmlinkage long sys_litmus_unlock(int lock_od)
138 if (entry && is_lock(entry)) { 153 if (entry && is_lock(entry)) {
139 l = get_lock(entry); 154 l = get_lock(entry);
140 //TRACE_CUR("attempts to unlock 0x%p\n", l); 155 //TRACE_CUR("attempts to unlock 0x%p\n", l);
141 TRACE_CUR("attempts to unlock %d\n", l->ident); 156 TRACE_CUR("Attempts to unlock %d\n", l->ident);
142 err = l->ops->unlock(l); 157 err = l->ops->unlock(l);
158 if (!err) {
159 TRACE_CUR("Unlocked %d\n", l->ident);
160 }
143 } 161 }
144 162
145 /* Note: task my have been preempted in between! Take this into 163 /* Note: task my have been preempted in between! Take this into
@@ -285,22 +303,50 @@ void init_dgl_waitqueue_entry(wait_queue_t *wq_node, dgl_wait_state_t* dgl_wait)
285 wq_node->func = dgl_wake_up; 303 wq_node->func = dgl_wake_up;
286} 304}
287 305
306#ifdef CONFIG_SCHED_DEBUG_TRACE
307static void snprintf_dgl(char* buf, size_t bsz, struct litmus_lock* dgl_locks[], int sz)
308{
309 int i;
310 char* ptr;
311
312 ptr = buf;
313 for(i = 0; i < sz && ptr < buf+bsz; ++i)
314 {
315 struct litmus_lock *l = dgl_locks[i];
316 int remaining = bsz - (ptr-buf);
317 int written;
318
319 if(i == 0)
320 written = snprintf(ptr, remaining, "%d ", l->ident);
321 else if(i == sz - 1)
322 written = snprintf(ptr, remaining, " %d", l->ident);
323 else
324 written = snprintf(ptr, remaining, " %d ", l->ident);
325 ptr += written;
326 }
327}
328#endif
288 329
289static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait) 330static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait)
290{ 331{
291 int i; 332 int i;
292 unsigned long irqflags; //, dummyflags; 333 unsigned long irqflags; //, dummyflags;
293 raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task); 334 raw_spinlock_t *dgl_lock;
335
336#ifdef CONFIG_SCHED_DEBUG_TRACE
337 char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5];
338 snprintf_dgl(dglstr, sizeof(dglstr), dgl_wait->locks, dgl_wait->size);
339 TRACE_CUR("Locking DGL with size %d: %s\n", dgl_wait->size, dglstr);
340#endif
341
342 dgl_lock = litmus->get_dgl_spinlock(dgl_wait->task);
294 343
295 BUG_ON(dgl_wait->task != current); 344 BUG_ON(dgl_wait->task != current);
296 345
297 raw_spin_lock_irqsave(dgl_lock, irqflags); 346 raw_spin_lock_irqsave(dgl_lock, irqflags);
298 347
299
300 dgl_wait->nr_remaining = dgl_wait->size; 348 dgl_wait->nr_remaining = dgl_wait->size;
301 349
302 TRACE_CUR("Locking DGL with size %d\n", dgl_wait->size);
303
304 // try to acquire each lock. enqueue (non-blocking) if it is unavailable. 350 // try to acquire each lock. enqueue (non-blocking) if it is unavailable.
305 for(i = 0; i < dgl_wait->size; ++i) { 351 for(i = 0; i < dgl_wait->size; ++i) {
306 struct litmus_lock *l = dgl_wait->locks[i]; 352 struct litmus_lock *l = dgl_wait->locks[i];
@@ -347,7 +393,8 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait)
347 393
348 raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending 394 raw_spin_unlock_irqrestore(dgl_lock, irqflags); // free dgl_lock before suspending
349 395
350 schedule(); // suspend!!! 396 suspend_for_lock(); // suspend!!!
397 //schedule(); // suspend!!!
351 398
352 TS_DGL_LOCK_RESUME; 399 TS_DGL_LOCK_RESUME;
353 400
@@ -443,7 +490,11 @@ static long do_litmus_dgl_unlock(struct litmus_lock* dgl_locks[], int dgl_size)
443 int i; 490 int i;
444 long err = 0; 491 long err = 0;
445 492
446 TRACE_CUR("Unlocking a DGL of %d size\n", dgl_size); 493#ifdef CONFIG_SCHED_DEBUG_TRACE
494 char dglstr[CONFIG_LITMUS_MAX_DGL_SIZE*5];
495 snprintf_dgl(dglstr, sizeof(dglstr), dgl_locks, dgl_size);
496 TRACE_CUR("Unlocking a DGL with size %d: %s\n", dgl_size, dglstr);
497#endif
447 498
448 for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order 499 for(i = dgl_size - 1; i >= 0; --i) { // unlock in reverse order
449 500
@@ -573,16 +624,16 @@ void suspend_for_lock(void)
573 unsigned int gpu_hide; 624 unsigned int gpu_hide;
574#endif 625#endif
575 626
576//#ifdef CONFIG_REALTIME_AUX_TASKS 627#ifdef CONFIG_REALTIME_AUX_TASKS
577// if (tsk_rt(t)->has_aux_tasks) { 628 if (tsk_rt(t)->has_aux_tasks) {
578// /* hide from aux tasks so they can't inherit our priority when we block 629 /* hide from aux tasks so they can't inherit our priority when we block
579// * for a litmus lock. inheritance is already going to a litmus lock 630 * for a litmus lock. inheritance is already going to a litmus lock
580// * holder. */ 631 * holder. */
581// aux_hide = tsk_rt(t)->hide_from_aux_tasks; 632 aux_hide = tsk_rt(t)->hide_from_aux_tasks;
582// aux_restore = 1; 633 aux_restore = 1;
583// tsk_rt(t)->hide_from_aux_tasks = 1; 634 tsk_rt(t)->hide_from_aux_tasks = 1;
584// } 635 }
585//#endif 636#endif
586 637
587#ifdef CONFIG_LITMUS_NVIDIA 638#ifdef CONFIG_LITMUS_NVIDIA
588 if (tsk_rt(t)->held_gpus) { 639 if (tsk_rt(t)->held_gpus) {
diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c
index ae4ad446408b..be0c09b19c6d 100644
--- a/litmus/nvidia_info.c
+++ b/litmus/nvidia_info.c
@@ -320,7 +320,6 @@ int init_nvidia_info(void)
320 320
321 init_nv_device_reg(); 321 init_nv_device_reg();
322 return(0); 322 return(0);
323// return(-1);
324 } 323 }
325} 324}
326 325
@@ -650,7 +649,6 @@ static int gpu_klmirqd_decrease_priority(struct task_struct *klmirqd, struct tas
650long enable_gpu_owner(struct task_struct *t) 649long enable_gpu_owner(struct task_struct *t)
651{ 650{
652 long retval = 0; 651 long retval = 0;
653// unsigned long flags;
654 int gpu; 652 int gpu;
655 nv_device_registry_t *reg; 653 nv_device_registry_t *reg;
656 654
@@ -675,8 +673,6 @@ long enable_gpu_owner(struct task_struct *t)
675 /* update the registration (and maybe klmirqd) */ 673 /* update the registration (and maybe klmirqd) */
676 reg = &NV_DEVICE_REG[gpu]; 674 reg = &NV_DEVICE_REG[gpu];
677 675
678// raw_spin_lock_irqsave(&reg->lock, flags);
679
680 binheap_add(&tsk_rt(t)->gpu_owner_node, &reg->owners, 676 binheap_add(&tsk_rt(t)->gpu_owner_node, &reg->owners,
681 struct rt_param, gpu_owner_node); 677 struct rt_param, gpu_owner_node);
682 678
@@ -693,8 +689,6 @@ long enable_gpu_owner(struct task_struct *t)
693 } 689 }
694#endif 690#endif
695 691
696// raw_spin_unlock_irqsave(&reg->lock, flags);
697
698out: 692out:
699 return retval; 693 return retval;
700} 694}
@@ -703,7 +697,6 @@ out:
703long disable_gpu_owner(struct task_struct *t) 697long disable_gpu_owner(struct task_struct *t)
704{ 698{
705 long retval = 0; 699 long retval = 0;
706// unsigned long flags;
707 int gpu; 700 int gpu;
708 nv_device_registry_t *reg; 701 nv_device_registry_t *reg;
709 702
@@ -731,9 +724,6 @@ long disable_gpu_owner(struct task_struct *t)
731 724
732 reg = &NV_DEVICE_REG[gpu]; 725 reg = &NV_DEVICE_REG[gpu];
733 726
734// raw_spin_lock_irqsave(&reg->lock, flags);
735
736
737#ifdef CONFIG_LITMUS_SOFTIRQD 727#ifdef CONFIG_LITMUS_SOFTIRQD
738 hp = container_of(binheap_top_entry(&reg->owners, struct rt_param, gpu_owner_node), 728 hp = container_of(binheap_top_entry(&reg->owners, struct rt_param, gpu_owner_node),
739 struct task_struct, rt_param); 729 struct task_struct, rt_param);
@@ -761,9 +751,6 @@ long disable_gpu_owner(struct task_struct *t)
761 binheap_delete(&tsk_rt(t)->gpu_owner_node, &reg->owners); 751 binheap_delete(&tsk_rt(t)->gpu_owner_node, &reg->owners);
762#endif 752#endif
763 753
764// raw_spin_unlock_irqsave(&reg->lock, flags);
765
766
767out: 754out:
768 return retval; 755 return retval;
769} 756}
@@ -792,15 +779,11 @@ int gpu_owner_increase_priority(struct task_struct *t)
792 gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); 779 gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus));
793 780
794 if (!binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) { 781 if (!binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) {
795 WARN_ON(!is_running(t) && !tsk_rt(t)->hide_from_gpu);
796 TRACE_CUR("nv klmirqd may not inherit from %s/%d on GPU %d\n", 782 TRACE_CUR("nv klmirqd may not inherit from %s/%d on GPU %d\n",
797 t->comm, t->pid, gpu); 783 t->comm, t->pid, gpu);
798 goto out; 784 goto out;
799 } 785 }
800 786
801
802
803
804 TRACE_CUR("task %s/%d on GPU %d increasing priority.\n", t->comm, t->pid, gpu); 787 TRACE_CUR("task %s/%d on GPU %d increasing priority.\n", t->comm, t->pid, gpu);
805 reg = &NV_DEVICE_REG[gpu]; 788 reg = &NV_DEVICE_REG[gpu];
806 789
@@ -842,7 +825,6 @@ int gpu_owner_decrease_priority(struct task_struct *t)
842 gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); 825 gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus));
843 826
844 if (!binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) { 827 if (!binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) {
845 WARN_ON(!is_running(t) && !tsk_rt(t)->hide_from_gpu);
846 TRACE_CUR("nv klmirqd may not inherit from %s/%d on GPU %d\n", 828 TRACE_CUR("nv klmirqd may not inherit from %s/%d on GPU %d\n",
847 t->comm, t->pid, gpu); 829 t->comm, t->pid, gpu);
848 goto out; 830 goto out;
diff --git a/litmus/rsm_lock.c b/litmus/rsm_lock.c
index 3dfd8ae9d221..ae6dd3fb237b 100644
--- a/litmus/rsm_lock.c
+++ b/litmus/rsm_lock.c
@@ -5,6 +5,8 @@
5#include <litmus/sched_plugin.h> 5#include <litmus/sched_plugin.h>
6#include <litmus/rsm_lock.h> 6#include <litmus/rsm_lock.h>
7 7
8#include <litmus/litmus_proc.h>
9
8//#include <litmus/edf_common.h> 10//#include <litmus/edf_common.h>
9 11
10#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) 12#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
@@ -14,43 +16,43 @@
14 16
15/* caller is responsible for locking */ 17/* caller is responsible for locking */
16static struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex, 18static struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex,
17 struct task_struct* skip) 19 struct task_struct* skip)
18{ 20{
19 wait_queue_t *q; 21 wait_queue_t *q;
20 struct list_head *pos; 22 struct list_head *pos;
21 struct task_struct *queued = NULL, *found = NULL; 23 struct task_struct *queued = NULL, *found = NULL;
22 24
23#ifdef CONFIG_LITMUS_DGL_SUPPORT 25#ifdef CONFIG_LITMUS_DGL_SUPPORT
24 dgl_wait_state_t *dgl_wait = NULL; 26 dgl_wait_state_t *dgl_wait = NULL;
25#endif 27#endif
26 28
27 list_for_each(pos, &mutex->wait.task_list) { 29 list_for_each(pos, &mutex->wait.task_list) {
28 q = list_entry(pos, wait_queue_t, task_list); 30 q = list_entry(pos, wait_queue_t, task_list);
29 31
30#ifdef CONFIG_LITMUS_DGL_SUPPORT 32#ifdef CONFIG_LITMUS_DGL_SUPPORT
31 if(q->func == dgl_wake_up) { 33 if(q->func == dgl_wake_up) {
32 dgl_wait = (dgl_wait_state_t*) q->private; 34 dgl_wait = (dgl_wait_state_t*) q->private;
33 if(tsk_rt(dgl_wait->task)->blocked_lock == &mutex->litmus_lock) { 35 if(tsk_rt(dgl_wait->task)->blocked_lock == &mutex->litmus_lock) {
34 queued = dgl_wait->task; 36 queued = dgl_wait->task;
35 } 37 }
36 else { 38 else {
37 queued = NULL; // skip it. 39 queued = NULL; // skip it.
38 } 40 }
39 } 41 }
40 else { 42 else {
41 queued = (struct task_struct*) q->private; 43 queued = (struct task_struct*) q->private;
42 } 44 }
43#else 45#else
44 queued = (struct task_struct*) q->private; 46 queued = (struct task_struct*) q->private;
45#endif 47#endif
46 48
47 /* Compare task prios, find high prio task. */ 49 /* Compare task prios, find high prio task. */
48 //if (queued && queued != skip && edf_higher_prio(queued, found)) { 50 //if (queued && queued != skip && edf_higher_prio(queued, found)) {
49 if (queued && queued != skip && litmus->compare(queued, found)) { 51 if (queued && queued != skip && litmus->compare(queued, found)) {
50 found = queued; 52 found = queued;
51 } 53 }
52 } 54 }
53 return found; 55 return found;
54} 56}
55 57
56 58
@@ -76,7 +78,8 @@ int rsm_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait,
76 BUG_ON(t != current); 78 BUG_ON(t != current);
77 79
78 if (mutex->owner) { 80 if (mutex->owner) {
79 TRACE_TASK(t, "Enqueuing on lock %d.\n", l->ident); 81 TRACE_TASK(t, "Enqueuing on lock %d (held by %s/%d).\n",
82 l->ident, mutex->owner->comm, mutex->owner->pid);
80 83
81 init_dgl_waitqueue_entry(wq_node, dgl_wait); 84 init_dgl_waitqueue_entry(wq_node, dgl_wait);
82 85
@@ -205,7 +208,8 @@ int rsm_mutex_lock(struct litmus_lock* l)
205 lock_fine_irqsave(&mutex->lock, flags); 208 lock_fine_irqsave(&mutex->lock, flags);
206 209
207 if (mutex->owner) { 210 if (mutex->owner) {
208 TRACE_TASK(t, "Blocking on lock %d.\n", l->ident); 211 TRACE_TASK(t, "Blocking on lock %d (held by %s/%d).\n",
212 l->ident, mutex->owner->comm, mutex->owner->pid);
209 213
210#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) 214#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
211 // KLUDGE: don't count this suspension as time in the critical gpu 215 // KLUDGE: don't count this suspension as time in the critical gpu
@@ -358,7 +362,7 @@ int rsm_mutex_unlock(struct litmus_lock* l)
358 top_priority(&tsk_rt(t)->hp_blocked_tasks); 362 top_priority(&tsk_rt(t)->hp_blocked_tasks);
359 363
360 if((new_max_eff_prio == NULL) || 364 if((new_max_eff_prio == NULL) ||
361 /* there was a change in eff prio */ 365 /* there was a change in eff prio */
362 ( (new_max_eff_prio != old_max_eff_prio) && 366 ( (new_max_eff_prio != old_max_eff_prio) &&
363 /* and owner had the old eff prio */ 367 /* and owner had the old eff prio */
364 (effective_priority(t) == old_max_eff_prio)) ) 368 (effective_priority(t) == old_max_eff_prio)) )
@@ -402,7 +406,7 @@ int rsm_mutex_unlock(struct litmus_lock* l)
402 if (next) { 406 if (next) {
403 /* next becomes the resouce holder */ 407 /* next becomes the resouce holder */
404 mutex->owner = next; 408 mutex->owner = next;
405 TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); 409 TRACE_CUR("lock %d ownership passed to %s/%d\n", l->ident, next->comm, next->pid);
406 410
407 /* determine new hp_waiter if necessary */ 411 /* determine new hp_waiter if necessary */
408 if (next == mutex->hp_waiter) { 412 if (next == mutex->hp_waiter) {
@@ -459,7 +463,7 @@ int rsm_mutex_unlock(struct litmus_lock* l)
459#endif 463#endif
460 464
461 /* It is possible that 'next' *should* be the hp_waiter, but isn't 465 /* It is possible that 'next' *should* be the hp_waiter, but isn't
462 * because that update hasn't yet executed (update operation is 466 * because that update hasn't yet executed (update operation is
463 * probably blocked on mutex->lock). So only inherit if the top of 467 * probably blocked on mutex->lock). So only inherit if the top of
464 * 'next's top heap node is indeed the effective prio. of hp_waiter. 468 * 'next's top heap node is indeed the effective prio. of hp_waiter.
465 * (We use l->hp_waiter_eff_prio instead of effective_priority(hp_waiter) 469 * (We use l->hp_waiter_eff_prio instead of effective_priority(hp_waiter)
@@ -693,7 +697,7 @@ void rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l,
693 } 697 }
694 698
695 // beware: recursion 699 // beware: recursion
696 litmus->nested_decrease_prio(owner, decreased_prio, &mutex->lock, irqflags); // will unlock mutex->lock 700 litmus->nested_decrease_prio(owner, decreased_prio, &mutex->lock, irqflags); // will unlock mutex->lock
697 } 701 }
698 else { 702 else {
699 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); 703 raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock);
@@ -754,8 +758,11 @@ int rsm_mutex_close(struct litmus_lock* l)
754 unlock_fine_irqrestore(&mutex->lock, flags); 758 unlock_fine_irqrestore(&mutex->lock, flags);
755 unlock_global_irqrestore(dgl_lock, flags); 759 unlock_global_irqrestore(dgl_lock, flags);
756 760
761 /*
762 TODO: Currently panic. FIX THIS!
757 if (owner) 763 if (owner)
758 rsm_mutex_unlock(l); 764 rsm_mutex_unlock(l);
765 */
759 766
760 return 0; 767 return 0;
761} 768}
@@ -765,6 +772,154 @@ void rsm_mutex_free(struct litmus_lock* lock)
765 kfree(rsm_mutex_from_lock(lock)); 772 kfree(rsm_mutex_from_lock(lock));
766} 773}
767 774
775
776/* The following may race if DGLs are enabled. Only examine /proc if things
777 appear to be locked up. TODO: FIX THIS! Must find an elegant way to transmit
778 DGL lock to function. */
779static int rsm_proc_print(char *page, char **start, off_t off, int count, int *eof, void *data)
780{
781 struct rsm_mutex *mutex = rsm_mutex_from_lock((struct litmus_lock*)data);
782
783 int attempts = 0;
784 const int max_attempts = 10;
785 int locked = 0;
786 unsigned long flags;
787
788 int size = count;
789 char *next = page;
790 int w;
791
792 while(attempts < max_attempts)
793 {
794 locked = raw_spin_trylock_irqsave(&mutex->lock, flags);
795
796 if (unlikely(!locked)) {
797 ++attempts;
798 cpu_relax();
799 }
800 else {
801 break;
802 }
803 }
804
805 if (locked) {
806 w = scnprintf(next, size, "%s:\n", mutex->litmus_lock.name);
807 size -= w;
808 next += w;
809
810 w = scnprintf(next, size,
811 "owner: %s/%d (inh: %s/%d)\n",
812 (mutex->owner) ?
813 mutex->owner->comm : "nil",
814 (mutex->owner) ?
815 mutex->owner->pid : -1,
816 (mutex->owner && tsk_rt(mutex->owner)->inh_task) ?
817 tsk_rt(mutex->owner)->inh_task->comm : "nil",
818 (mutex->owner && tsk_rt(mutex->owner)->inh_task) ?
819 tsk_rt(mutex->owner)->inh_task->pid : -1);
820 size -= w;
821 next += w;
822
823 w = scnprintf(next, size,
824 "hp waiter: %s/%d (inh: %s/%d)\n",
825 (mutex->hp_waiter) ?
826 mutex->hp_waiter->comm : "nil",
827 (mutex->hp_waiter) ?
828 mutex->hp_waiter->pid : -1,
829 (mutex->hp_waiter && tsk_rt(mutex->hp_waiter)->inh_task) ?
830 tsk_rt(mutex->hp_waiter)->inh_task->comm : "nil",
831 (mutex->hp_waiter && tsk_rt(mutex->hp_waiter)->inh_task) ?
832 tsk_rt(mutex->hp_waiter)->inh_task->pid : -1);
833 size -= w;
834 next += w;
835
836 w = scnprintf(next, size, "\nblocked tasks, front to back:\n");
837 size -= w;
838 next += w;
839
840 if (waitqueue_active(&mutex->wait)) {
841 wait_queue_t *q;
842 struct list_head *pos;
843#ifdef CONFIG_LITMUS_DGL_SUPPORT
844 dgl_wait_state_t *dgl_wait = NULL;
845#endif
846 list_for_each(pos, &mutex->wait.task_list) {
847 struct task_struct *blocked_task;
848#ifdef CONFIG_LITMUS_DGL_SUPPORT
849 int enabled = 1;
850#endif
851 q = list_entry(pos, wait_queue_t, task_list);
852
853#ifdef CONFIG_LITMUS_DGL_SUPPORT
854 if(q->func == dgl_wake_up) {
855 dgl_wait = (dgl_wait_state_t*) q->private;
856 blocked_task = dgl_wait->task;
857
858 if(tsk_rt(blocked_task)->blocked_lock != &mutex->litmus_lock)
859 enabled = 0;
860 }
861 else {
862 blocked_task = (struct task_struct*) q->private;
863 }
864#else
865 blocked_task = (struct task_struct*) q->private;
866#endif
867
868 w = scnprintf(next, size,
869 "\t%s/%d (inh: %s/%d)"
870#ifdef CONFIG_LITMUS_DGL_SUPPORT
871 " DGL enabled: %d"
872#endif
873 "\n",
874 blocked_task->comm, blocked_task->pid,
875 (tsk_rt(blocked_task)->inh_task) ?
876 tsk_rt(blocked_task)->inh_task->comm : "nil",
877 (tsk_rt(blocked_task)->inh_task) ?
878 tsk_rt(blocked_task)->inh_task->pid : -1
879#ifdef CONFIG_LITMUS_DGL_SUPPORT
880 , enabled
881#endif
882 );
883 size -= w;
884 next += w;
885 }
886 }
887 else {
888 w = scnprintf(next, size, "\t<NONE>\n");
889 size -= w;
890 next += w;
891 }
892
893 raw_spin_unlock_irqrestore(&mutex->lock, flags);
894 }
895 else {
896 w = scnprintf(next, size, "%s is busy.\n", mutex->litmus_lock.name);
897 size -= w;
898 next += w;
899 }
900
901 return count - size;
902}
903
904static void rsm_proc_add(struct litmus_lock* l)
905{
906 snprintf(l->name, LOCK_NAME_LEN, "rsm-%d", l->ident);
907
908 l->proc_entry = litmus_add_proc_lock(l, rsm_proc_print);
909}
910
911static void rsm_proc_remove(struct litmus_lock* l)
912{
913 litmus_remove_proc_lock(l);
914}
915
916static struct litmus_lock_proc_ops rsm_proc_ops =
917{
918 .add = rsm_proc_add,
919 .remove = rsm_proc_remove
920};
921
922
768struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops* ops) 923struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops* ops)
769{ 924{
770 struct rsm_mutex* mutex; 925 struct rsm_mutex* mutex;
@@ -772,6 +927,7 @@ struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops* ops)
772 mutex = kmalloc(sizeof(*mutex), GFP_KERNEL); 927 mutex = kmalloc(sizeof(*mutex), GFP_KERNEL);
773 if (!mutex) 928 if (!mutex)
774 return NULL; 929 return NULL;
930 memset(mutex, 0, sizeof(*mutex));
775 931
776 mutex->litmus_lock.ops = ops; 932 mutex->litmus_lock.ops = ops;
777 mutex->owner = NULL; 933 mutex->owner = NULL;
@@ -791,6 +947,8 @@ struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops* ops)
791 947
792 ((struct litmus_lock*)mutex)->nest.hp_waiter_ptr = &mutex->hp_waiter; 948 ((struct litmus_lock*)mutex)->nest.hp_waiter_ptr = &mutex->hp_waiter;
793 949
950 ((struct litmus_lock*)mutex)->proc = &rsm_proc_ops;
951
794 return &mutex->litmus_lock; 952 return &mutex->litmus_lock;
795} 953}
796 954
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index db47f4413329..2ec919dc850c 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -1068,7 +1068,7 @@ static void cedf_task_block(struct task_struct *t)
1068#ifdef CONFIG_LITMUS_NVIDIA 1068#ifdef CONFIG_LITMUS_NVIDIA
1069 if (tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu) { 1069 if (tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu) {
1070 1070
1071 TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid); 1071 TRACE_CUR("%s/%d is blocked so klmirqd threads may inherit.\n", t->comm, t->pid);
1072 enable_gpu_owner(t); 1072 enable_gpu_owner(t);
1073 } 1073 }
1074#endif 1074#endif
@@ -1118,7 +1118,7 @@ static void cedf_task_exit(struct task_struct * t)
1118 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 1118 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
1119 1119
1120 BUG_ON(!is_realtime(t)); 1120 BUG_ON(!is_realtime(t));
1121 TRACE_TASK(t, "RIP\n"); 1121 TRACE_TASK(t, "RIP\n");
1122} 1122}
1123 1123
1124static long cedf_admit_task(struct task_struct* tsk) 1124static long cedf_admit_task(struct task_struct* tsk)
@@ -1128,7 +1128,7 @@ static long cedf_admit_task(struct task_struct* tsk)
1128 edf_max_heap_base_priority_order); 1128 edf_max_heap_base_priority_order);
1129#endif 1129#endif
1130 1130
1131 return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; 1131 return (task_cpu(tsk) == tsk->rt_param.task_params.cpu) ? 0 : -EINVAL;
1132} 1132}
1133 1133
1134 1134
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 01791a18e8f3..b3309ee2561e 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -1720,6 +1720,7 @@ static struct litmus_lock* gsnedf_new_fmlp(void)
1720 sem = kmalloc(sizeof(*sem), GFP_KERNEL); 1720 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
1721 if (!sem) 1721 if (!sem)
1722 return NULL; 1722 return NULL;
1723 memset(sem, 0, sizeof(*sem));
1723 1724
1724 sem->owner = NULL; 1725 sem->owner = NULL;
1725 sem->hp_waiter = NULL; 1726 sem->hp_waiter = NULL;
diff --git a/litmus/sched_pfp.c b/litmus/sched_pfp.c
index a96c2b1aa26f..a435ed6621cf 100644
--- a/litmus/sched_pfp.c
+++ b/litmus/sched_pfp.c
@@ -692,6 +692,7 @@ static struct litmus_lock* pfp_new_fmlp(void)
692 sem = kmalloc(sizeof(*sem), GFP_KERNEL); 692 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
693 if (!sem) 693 if (!sem)
694 return NULL; 694 return NULL;
695 memset(sem, 0, sizeof(*sem));
695 696
696 sem->owner = NULL; 697 sem->owner = NULL;
697 init_waitqueue_head(&sem->wait); 698 init_waitqueue_head(&sem->wait);
@@ -971,6 +972,7 @@ static struct litmus_lock* pfp_new_mpcp(int vspin)
971 sem = kmalloc(sizeof(*sem), GFP_KERNEL); 972 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
972 if (!sem) 973 if (!sem)
973 return NULL; 974 return NULL;
975 memset(sem, 0, sizeof(*sem));
974 976
975 sem->owner = NULL; 977 sem->owner = NULL;
976 init_waitqueue_head(&sem->wait); 978 init_waitqueue_head(&sem->wait);
@@ -1362,6 +1364,7 @@ static struct litmus_lock* pfp_new_pcp(int on_cpu)
1362 sem = kmalloc(sizeof(*sem), GFP_KERNEL); 1364 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
1363 if (!sem) 1365 if (!sem)
1364 return NULL; 1366 return NULL;
1367 memset(sem, 0, sizeof(*sem));
1365 1368
1366 sem->litmus_lock.ops = &pfp_pcp_lock_ops; 1369 sem->litmus_lock.ops = &pfp_pcp_lock_ops;
1367 pcp_init_semaphore(sem, on_cpu); 1370 pcp_init_semaphore(sem, on_cpu);
@@ -1552,6 +1555,7 @@ static struct litmus_lock* pfp_new_dpcp(int on_cpu)
1552 sem = kmalloc(sizeof(*sem), GFP_KERNEL); 1555 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
1553 if (!sem) 1556 if (!sem)
1554 return NULL; 1557 return NULL;
1558 memset(sem, 0, sizeof(*sem));
1555 1559
1556 sem->litmus_lock.ops = &pfp_dpcp_lock_ops; 1560 sem->litmus_lock.ops = &pfp_dpcp_lock_ops;
1557 sem->owner_cpu = NO_CPU; 1561 sem->owner_cpu = NO_CPU;
diff --git a/litmus/srp.c b/litmus/srp.c
index 2ed4ec12a9d3..5ffdc9e7dc5b 100644
--- a/litmus/srp.c
+++ b/litmus/srp.c
@@ -219,6 +219,7 @@ struct srp_semaphore* allocate_srp_semaphore(void)
219 sem = kmalloc(sizeof(*sem), GFP_KERNEL); 219 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
220 if (!sem) 220 if (!sem)
221 return NULL; 221 return NULL;
222 memset(sem, 0, sizeof(*sem));
222 223
223 INIT_LIST_HEAD(&sem->ceiling.list); 224 INIT_LIST_HEAD(&sem->ceiling.list);
224 sem->ceiling.priority = 0; 225 sem->ceiling.priority = 0;