aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-04-18 21:33:21 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2012-04-18 21:33:21 -0400
commit149ef3b424a49e6b928c5e23fea83380ed95ea38 (patch)
tree38b9a7397875be56f31f9f04f86fcf1f9e4966ac
parentf916cdb8e6a9ee2c917fddb7351e6bb39f6c953e (diff)
Zap line-endings
-rw-r--r--include/litmus/fdso.h4
-rw-r--r--include/litmus/fpmath.h14
-rw-r--r--include/litmus/gpu_affinity.h6
-rw-r--r--include/litmus/ikglp_lock.h8
-rw-r--r--include/litmus/kexclu_affinity.h2
-rw-r--r--include/litmus/kfmlp_lock.h12
-rw-r--r--include/litmus/litmus_softirq.h18
-rw-r--r--include/litmus/locking.h30
-rw-r--r--include/litmus/rt_param.h20
-rw-r--r--include/litmus/sched_plugin.h4
-rw-r--r--litmus/affinity.c2
-rw-r--r--litmus/binheap.c126
-rw-r--r--litmus/edf_common.c18
-rw-r--r--litmus/fdso.c2
-rw-r--r--litmus/gpu_affinity.c26
-rw-r--r--litmus/ikglp_lock.c2
-rw-r--r--litmus/kexclu_affinity.c6
-rw-r--r--litmus/kfmlp_lock.c352
-rw-r--r--litmus/litmus.c38
-rw-r--r--litmus/litmus_pai_softirq.c2
-rw-r--r--litmus/litmus_proc.c4
-rw-r--r--litmus/litmus_softirq.c338
-rw-r--r--litmus/locking.c6
-rw-r--r--litmus/nvidia_info.c142
-rw-r--r--litmus/rsm_lock.c4
-rw-r--r--litmus/sched_cedf.c316
-rw-r--r--litmus/sched_gsn_edf.c228
-rw-r--r--litmus/sched_plugin.c6
-rw-r--r--litmus/sched_task_trace.c18
29 files changed, 877 insertions, 877 deletions
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
index 552a1e731672..1f5d3bd1a1db 100644
--- a/include/litmus/fdso.h
+++ b/include/litmus/fdso.h
@@ -19,7 +19,7 @@ typedef enum {
19 19
20 FMLP_SEM = 0, 20 FMLP_SEM = 0,
21 SRP_SEM = 1, 21 SRP_SEM = 1,
22 22
23 RSM_MUTEX = 2, 23 RSM_MUTEX = 2,
24 IKGLP_SEM = 3, 24 IKGLP_SEM = 3,
25 KFMLP_SEM = 4, 25 KFMLP_SEM = 4,
@@ -28,7 +28,7 @@ typedef enum {
28 IKGLP_GPU_AFF_OBS = 6, 28 IKGLP_GPU_AFF_OBS = 6,
29 KFMLP_SIMPLE_GPU_AFF_OBS = 7, 29 KFMLP_SIMPLE_GPU_AFF_OBS = 7,
30 KFMLP_GPU_AFF_OBS = 8, 30 KFMLP_GPU_AFF_OBS = 8,
31 31
32 MAX_OBJ_TYPE = 8 32 MAX_OBJ_TYPE = 8
33} obj_type_t; 33} obj_type_t;
34 34
diff --git a/include/litmus/fpmath.h b/include/litmus/fpmath.h
index d062b5ab5dc2..ba4121eaa1bf 100644
--- a/include/litmus/fpmath.h
+++ b/include/litmus/fpmath.h
@@ -33,11 +33,11 @@ static inline fp_t _frac(fpbuf_t a, fpbuf_t b)
33 return _fp(FP(a).val / (b)); 33 return _fp(FP(a).val / (b));
34} 34}
35 35
36static inline fpbuf_t _point(fp_t x) 36static inline fpbuf_t _point(fp_t x)
37{ 37{
38 return (x.val % (1 << FP_SHIFT)); 38 return (x.val % (1 << FP_SHIFT));
39 39
40} 40}
41 41
42#define fp2str(x) x.val 42#define fp2str(x) x.val
43/*(x.val >> FP_SHIFT), (x.val % (1 << FP_SHIFT)) */ 43/*(x.val >> FP_SHIFT), (x.val % (1 << FP_SHIFT)) */
@@ -52,11 +52,11 @@ static inline fpbuf_t _floor(fp_t x)
52static inline fpbuf_t _round(fp_t x) 52static inline fpbuf_t _round(fp_t x)
53{ 53{
54 return _floor(x) + ((x.val >> ROUND_BIT) & 1); 54 return _floor(x) + ((x.val >> ROUND_BIT) & 1);
55} 55}
56 56
57/* multiply two fixed point values */ 57/* multiply two fixed point values */
58static inline fp_t _mul(fp_t a, fp_t b) 58static inline fp_t _mul(fp_t a, fp_t b)
59{ 59{
60 return _fp((a.val * b.val) >> FP_SHIFT); 60 return _fp((a.val * b.val) >> FP_SHIFT);
61} 61}
62 62
@@ -66,7 +66,7 @@ static inline fp_t _div(fp_t a, fp_t b)
66#define unlikely(x) (x) 66#define unlikely(x) (x)
67#define DO_UNDEF_UNLIKELY 67#define DO_UNDEF_UNLIKELY
68#endif 68#endif
69 /* try not to overflow */ 69 /* try not to overflow */
70 if (unlikely( a.val > (2l << ((sizeof(fpbuf_t)*8) - FP_SHIFT)) )) 70 if (unlikely( a.val > (2l << ((sizeof(fpbuf_t)*8) - FP_SHIFT)) ))
71 return _fp((a.val / b.val) << FP_SHIFT); 71 return _fp((a.val / b.val) << FP_SHIFT);
72 else 72 else
diff --git a/include/litmus/gpu_affinity.h b/include/litmus/gpu_affinity.h
index ca4d10b93203..d4db2003ad86 100644
--- a/include/litmus/gpu_affinity.h
+++ b/include/litmus/gpu_affinity.h
@@ -34,15 +34,15 @@ static inline lt_t get_gpu_estimate(struct task_struct* t, gpu_migration_dist_t
34 int i; 34 int i;
35 fpbuf_t temp = _fp_to_integer(t->rt_param.gpu_migration_est[dist].est); 35 fpbuf_t temp = _fp_to_integer(t->rt_param.gpu_migration_est[dist].est);
36 lt_t val = (temp >= 0) ? temp : 0; // never allow negative estimates... 36 lt_t val = (temp >= 0) ? temp : 0; // never allow negative estimates...
37 37
38 WARN_ON(temp < 0); 38 WARN_ON(temp < 0);
39 39
40 // lower-bound a distant migration to be at least equal to the level 40 // lower-bound a distant migration to be at least equal to the level
41 // below it. 41 // below it.
42 for(i = dist-1; (val == 0) && (i >= MIG_LOCAL); --i) { 42 for(i = dist-1; (val == 0) && (i >= MIG_LOCAL); --i) {
43 val = _fp_to_integer(t->rt_param.gpu_migration_est[i].est); 43 val = _fp_to_integer(t->rt_param.gpu_migration_est[i].est);
44 } 44 }
45 45
46 // minimum value is 1 (val is 0 if we haven't run with local affinity yet) 46 // minimum value is 1 (val is 0 if we haven't run with local affinity yet)
47 // TODO: pick a better default min-value. 1 is too small. perhaps 47 // TODO: pick a better default min-value. 1 is too small. perhaps
48 // task execution time? 48 // task execution time?
diff --git a/include/litmus/ikglp_lock.h b/include/litmus/ikglp_lock.h
index 2a75a1719815..08e73332c3d4 100644
--- a/include/litmus/ikglp_lock.h
+++ b/include/litmus/ikglp_lock.h
@@ -87,10 +87,10 @@ struct ikglp_semaphore
87 struct fifo_queue *fifo_queues; // array nr_replicas in length 87 struct fifo_queue *fifo_queues; // array nr_replicas in length
88 struct binheap_handle priority_queue; // max-heap, base prio 88 struct binheap_handle priority_queue; // max-heap, base prio
89 struct binheap_handle donors; // max-heap, base prio 89 struct binheap_handle donors; // max-heap, base prio
90 90
91#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 91#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
92 struct ikglp_affinity *aff_obs; 92 struct ikglp_affinity *aff_obs;
93#endif 93#endif
94}; 94};
95 95
96static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock) 96static inline struct ikglp_semaphore* ikglp_from_lock(struct litmus_lock* lock)
@@ -121,7 +121,7 @@ struct ikglp_affinity_ops
121 struct task_struct* (*advise_steal)(struct ikglp_affinity* aff, wait_queue_t** to_steal, struct fifo_queue** to_steal_from); // select steal from FIFO 121 struct task_struct* (*advise_steal)(struct ikglp_affinity* aff, wait_queue_t** to_steal, struct fifo_queue** to_steal_from); // select steal from FIFO
122 struct task_struct* (*advise_donee_selection)(struct ikglp_affinity* aff, wait_queue_t** donee, struct fifo_queue** donee_queue); // select a donee 122 struct task_struct* (*advise_donee_selection)(struct ikglp_affinity* aff, wait_queue_t** donee, struct fifo_queue** donee_queue); // select a donee
123 struct task_struct* (*advise_doner_to_fq)(struct ikglp_affinity* aff, ikglp_wait_state_t** donor); // select a donor to move to PQ 123 struct task_struct* (*advise_doner_to_fq)(struct ikglp_affinity* aff, ikglp_wait_state_t** donor); // select a donor to move to PQ
124 124
125 void (*notify_enqueue)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // fifo enqueue 125 void (*notify_enqueue)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // fifo enqueue
126 void (*notify_dequeue)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // fifo dequeue 126 void (*notify_dequeue)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // fifo dequeue
127 void (*notify_acquired)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // replica acquired 127 void (*notify_acquired)(struct ikglp_affinity* aff, struct fifo_queue* fq, struct task_struct* t); // replica acquired
@@ -132,7 +132,7 @@ struct ikglp_affinity_ops
132struct ikglp_affinity 132struct ikglp_affinity
133{ 133{
134 struct affinity_observer obs; 134 struct affinity_observer obs;
135 struct ikglp_affinity_ops *ops; 135 struct ikglp_affinity_ops *ops;
136 struct fifo_queue *q_info; 136 struct fifo_queue *q_info;
137 int *nr_cur_users_on_rsrc; 137 int *nr_cur_users_on_rsrc;
138 int offset; 138 int offset;
diff --git a/include/litmus/kexclu_affinity.h b/include/litmus/kexclu_affinity.h
index 51e097f8ec54..f6355de49074 100644
--- a/include/litmus/kexclu_affinity.h
+++ b/include/litmus/kexclu_affinity.h
@@ -10,7 +10,7 @@ struct affinity_observer
10 struct affinity_observer_ops* ops; 10 struct affinity_observer_ops* ops;
11 int type; 11 int type;
12 int ident; 12 int ident;
13 13
14 struct litmus_lock* lock; // the lock under observation 14 struct litmus_lock* lock; // the lock under observation
15}; 15};
16 16
diff --git a/include/litmus/kfmlp_lock.h b/include/litmus/kfmlp_lock.h
index 6d7e24b2a3ad..5f0aae6e6f42 100644
--- a/include/litmus/kfmlp_lock.h
+++ b/include/litmus/kfmlp_lock.h
@@ -22,14 +22,14 @@ struct kfmlp_queue
22struct kfmlp_semaphore 22struct kfmlp_semaphore
23{ 23{
24 struct litmus_lock litmus_lock; 24 struct litmus_lock litmus_lock;
25 25
26 spinlock_t lock; 26 spinlock_t lock;
27 27
28 int num_resources; /* aka k */ 28 int num_resources; /* aka k */
29 29
30 struct kfmlp_queue *queues; /* array */ 30 struct kfmlp_queue *queues; /* array */
31 struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */ 31 struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */
32 32
33#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 33#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
34 struct kfmlp_affinity *aff_obs; 34 struct kfmlp_affinity *aff_obs;
35#endif 35#endif
@@ -59,7 +59,7 @@ struct kfmlp_affinity_ops
59{ 59{
60 struct kfmlp_queue* (*advise_enqueue)(struct kfmlp_affinity* aff, struct task_struct* t); 60 struct kfmlp_queue* (*advise_enqueue)(struct kfmlp_affinity* aff, struct task_struct* t);
61 struct task_struct* (*advise_steal)(struct kfmlp_affinity* aff, wait_queue_t** to_steal, struct kfmlp_queue** to_steal_from); 61 struct task_struct* (*advise_steal)(struct kfmlp_affinity* aff, wait_queue_t** to_steal, struct kfmlp_queue** to_steal_from);
62 void (*notify_enqueue)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t); 62 void (*notify_enqueue)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t);
63 void (*notify_dequeue)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t); 63 void (*notify_dequeue)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t);
64 void (*notify_acquired)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t); 64 void (*notify_acquired)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t);
65 void (*notify_freed)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t); 65 void (*notify_freed)(struct kfmlp_affinity* aff, struct kfmlp_queue* fq, struct task_struct* t);
@@ -69,7 +69,7 @@ struct kfmlp_affinity_ops
69struct kfmlp_affinity 69struct kfmlp_affinity
70{ 70{
71 struct affinity_observer obs; 71 struct affinity_observer obs;
72 struct kfmlp_affinity_ops *ops; 72 struct kfmlp_affinity_ops *ops;
73 struct kfmlp_queue_info *q_info; 73 struct kfmlp_queue_info *q_info;
74 int *nr_cur_users_on_rsrc; 74 int *nr_cur_users_on_rsrc;
75 int offset; 75 int offset;
diff --git a/include/litmus/litmus_softirq.h b/include/litmus/litmus_softirq.h
index 34287f3cbb8d..1eb5ea1a6c4b 100644
--- a/include/litmus/litmus_softirq.h
+++ b/include/litmus/litmus_softirq.h
@@ -9,13 +9,13 @@
9 are scheduled with the priority of the tasklet's 9 are scheduled with the priority of the tasklet's
10 owner---that is, the RT task on behalf the tasklet 10 owner---that is, the RT task on behalf the tasklet
11 runs. 11 runs.
12 12
13 Tasklets are current scheduled in FIFO order with 13 Tasklets are current scheduled in FIFO order with
14 NO priority inheritance for "blocked" tasklets. 14 NO priority inheritance for "blocked" tasklets.
15 15
16 klitirqd assumes the priority of the owner of the 16 klitirqd assumes the priority of the owner of the
17 tasklet when the tasklet is next to execute. 17 tasklet when the tasklet is next to execute.
18 18
19 Currently, hi-tasklets are scheduled before 19 Currently, hi-tasklets are scheduled before
20 low-tasklets, regardless of priority of low-tasklets. 20 low-tasklets, regardless of priority of low-tasklets.
21 And likewise, low-tasklets are scheduled before work 21 And likewise, low-tasklets are scheduled before work
@@ -35,22 +35,22 @@
35 workqueue, so daemons will likely not be immediately 35 workqueue, so daemons will likely not be immediately
36 running when this function returns, though the required 36 running when this function returns, though the required
37 data will be initialized. 37 data will be initialized.
38 38
39 @affinity_set: an array expressing the processor affinity 39 @affinity_set: an array expressing the processor affinity
40 for each of the NR_LITMUS_SOFTIRQD daemons. May be set 40 for each of the NR_LITMUS_SOFTIRQD daemons. May be set
41 to NULL for global scheduling. 41 to NULL for global scheduling.
42 42
43 - Examples - 43 - Examples -
44 8-CPU system with two CPU clusters: 44 8-CPU system with two CPU clusters:
45 affinity[] = {0, 0, 0, 0, 3, 3, 3, 3} 45 affinity[] = {0, 0, 0, 0, 3, 3, 3, 3}
46 NOTE: Daemons not actually bound to specified CPU, but rather 46 NOTE: Daemons not actually bound to specified CPU, but rather
47 cluster in which the CPU resides. 47 cluster in which the CPU resides.
48 48
49 8-CPU system, partitioned: 49 8-CPU system, partitioned:
50 affinity[] = {0, 1, 2, 3, 4, 5, 6, 7} 50 affinity[] = {0, 1, 2, 3, 4, 5, 6, 7}
51 51
52 FIXME: change array to a CPU topology or array of cpumasks 52 FIXME: change array to a CPU topology or array of cpumasks
53 53
54 */ 54 */
55void spawn_klitirqd(int* affinity); 55void spawn_klitirqd(int* affinity);
56 56
@@ -176,7 +176,7 @@ static inline int litmus_schedule_work(
176 176
177 177
178///////////// mutex operations for client threads. 178///////////// mutex operations for client threads.
179 179
180void down_and_set_stat(struct task_struct* t, 180void down_and_set_stat(struct task_struct* t,
181 enum klitirqd_sem_status to_set, 181 enum klitirqd_sem_status to_set,
182 struct mutex* sem); 182 struct mutex* sem);
diff --git a/include/litmus/locking.h b/include/litmus/locking.h
index c2324c4ccb8a..36647fee03e4 100644
--- a/include/litmus/locking.h
+++ b/include/litmus/locking.h
@@ -11,7 +11,7 @@ struct nested_info
11 struct litmus_lock *lock; 11 struct litmus_lock *lock;
12 struct task_struct *hp_waiter_eff_prio; 12 struct task_struct *hp_waiter_eff_prio;
13 struct task_struct **hp_waiter_ptr; 13 struct task_struct **hp_waiter_ptr;
14 struct binheap_node hp_binheap_node; 14 struct binheap_node hp_binheap_node;
15}; 15};
16 16
17static inline struct task_struct* top_priority(struct binheap_handle* handle) { 17static inline struct task_struct* top_priority(struct binheap_handle* handle) {
@@ -31,12 +31,12 @@ void print_hp_waiters(struct binheap_node* n, int depth);
31struct litmus_lock { 31struct litmus_lock {
32 struct litmus_lock_ops *ops; 32 struct litmus_lock_ops *ops;
33 int type; 33 int type;
34 34
35 int ident; 35 int ident;
36 36
37#ifdef CONFIG_LITMUS_NESTED_LOCKING 37#ifdef CONFIG_LITMUS_NESTED_LOCKING
38 struct nested_info nest; 38 struct nested_info nest;
39//#ifdef CONFIG_DEBUG_SPINLOCK 39//#ifdef CONFIG_DEBUG_SPINLOCK
40 char cheat_lockdep[2]; 40 char cheat_lockdep[2];
41 struct lock_class_key key; 41 struct lock_class_key key;
42//#endif 42//#endif
@@ -81,15 +81,15 @@ struct litmus_lock_ops {
81 /* Current tries to lock/unlock this lock (mandatory methods). */ 81 /* Current tries to lock/unlock this lock (mandatory methods). */
82 lock_lock_t lock; 82 lock_lock_t lock;
83 lock_unlock_t unlock; 83 lock_unlock_t unlock;
84 84
85 /* The lock is no longer being referenced (mandatory method). */ 85 /* The lock is no longer being referenced (mandatory method). */
86 lock_free_t deallocate; 86 lock_free_t deallocate;
87 87
88#ifdef CONFIG_LITMUS_NESTED_LOCKING 88#ifdef CONFIG_LITMUS_NESTED_LOCKING
89 void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); 89 void (*propagate_increase_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags);
90 void (*propagate_decrease_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags); 90 void (*propagate_decrease_inheritance)(struct litmus_lock* l, struct task_struct* t, raw_spinlock_t* to_unlock, unsigned long irqflags);
91#endif 91#endif
92 92
93#ifdef CONFIG_LITMUS_DGL_SUPPORT 93#ifdef CONFIG_LITMUS_DGL_SUPPORT
94 raw_spinlock_t* (*get_dgl_spin_lock)(struct litmus_lock *l); 94 raw_spinlock_t* (*get_dgl_spin_lock)(struct litmus_lock *l);
95 int (*dgl_lock)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node); 95 int (*dgl_lock)(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, wait_queue_t* wq_node);
@@ -105,26 +105,26 @@ struct litmus_lock_ops {
105 (no cycles!). However, DGLs allow locks to be acquired in any order. This 105 (no cycles!). However, DGLs allow locks to be acquired in any order. This
106 makes nested inheritance very difficult (we don't yet know a solution) to 106 makes nested inheritance very difficult (we don't yet know a solution) to
107 realize with fine-grain locks, so we use a big lock instead. 107 realize with fine-grain locks, so we use a big lock instead.
108 108
109 Code contains both fine-grain and coarse-grain methods together, side-by-side. 109 Code contains both fine-grain and coarse-grain methods together, side-by-side.
110 Each lock operation *IS NOT* surrounded by ifdef/endif to help make code more 110 Each lock operation *IS NOT* surrounded by ifdef/endif to help make code more
111 readable. However, this leads to the odd situation where both code paths 111 readable. However, this leads to the odd situation where both code paths
112 appear together in code as if they were both active together. 112 appear together in code as if they were both active together.
113 113
114 THIS IS NOT REALLY THE CASE! ONLY ONE CODE PATH IS ACTUALLY ACTIVE! 114 THIS IS NOT REALLY THE CASE! ONLY ONE CODE PATH IS ACTUALLY ACTIVE!
115 115
116 Example: 116 Example:
117 lock_global_irqsave(coarseLock, flags); 117 lock_global_irqsave(coarseLock, flags);
118 lock_fine_irqsave(fineLock, flags); 118 lock_fine_irqsave(fineLock, flags);
119 119
120 Reality (coarse): 120 Reality (coarse):
121 lock_global_irqsave(coarseLock, flags); 121 lock_global_irqsave(coarseLock, flags);
122 //lock_fine_irqsave(fineLock, flags); 122 //lock_fine_irqsave(fineLock, flags);
123 123
124 Reality (fine): 124 Reality (fine):
125 //lock_global_irqsave(coarseLock, flags); 125 //lock_global_irqsave(coarseLock, flags);
126 lock_fine_irqsave(fineLock, flags); 126 lock_fine_irqsave(fineLock, flags);
127 127
128 Be careful when you read code involving nested inheritance. 128 Be careful when you read code involving nested inheritance.
129 */ 129 */
130#if defined(CONFIG_LITMUS_DGL_SUPPORT) 130#if defined(CONFIG_LITMUS_DGL_SUPPORT)
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index e832ffcba17c..f2f3bfb6a6db 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -126,14 +126,14 @@ enum klitirqd_sem_status
126}; 126};
127 127
128typedef enum gpu_migration_dist 128typedef enum gpu_migration_dist
129{ 129{
130 // TODO: Make this variable against NR_NVIDIA_GPUS 130 // TODO: Make this variable against NR_NVIDIA_GPUS
131 MIG_LOCAL = 0, 131 MIG_LOCAL = 0,
132 MIG_NEAR = 1, 132 MIG_NEAR = 1,
133 MIG_MED = 2, 133 MIG_MED = 2,
134 MIG_FAR = 3, // 8 GPUs in a binary tree hierarchy 134 MIG_FAR = 3, // 8 GPUs in a binary tree hierarchy
135 MIG_NONE = 4, 135 MIG_NONE = 4,
136 136
137 MIG_LAST = MIG_NONE 137 MIG_LAST = MIG_NONE
138} gpu_migration_dist_t; 138} gpu_migration_dist_t;
139 139
@@ -159,11 +159,11 @@ struct rt_param {
159#ifdef CONFIG_LITMUS_SOFTIRQD 159#ifdef CONFIG_LITMUS_SOFTIRQD
160 /* proxy threads have minimum priority by default */ 160 /* proxy threads have minimum priority by default */
161 unsigned int is_proxy_thread:1; 161 unsigned int is_proxy_thread:1;
162 162
163 /* pointer to klitirqd currently working on this 163 /* pointer to klitirqd currently working on this
164 task_struct's behalf. only set by the task pointed 164 task_struct's behalf. only set by the task pointed
165 to by klitirqd. 165 to by klitirqd.
166 166
167 ptr only valid if is_proxy_thread == 0 167 ptr only valid if is_proxy_thread == 0
168 */ 168 */
169 struct task_struct* cur_klitirqd; 169 struct task_struct* cur_klitirqd;
@@ -190,14 +190,14 @@ struct rt_param {
190#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 190#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
191 fp_t gpu_fb_param_a; 191 fp_t gpu_fb_param_a;
192 fp_t gpu_fb_param_b; 192 fp_t gpu_fb_param_b;
193 193
194 gpu_migration_dist_t gpu_migration; 194 gpu_migration_dist_t gpu_migration;
195 int last_gpu; 195 int last_gpu;
196 feedback_est_t gpu_migration_est[MIG_LAST+1]; // local, near, med, far 196 feedback_est_t gpu_migration_est[MIG_LAST+1]; // local, near, med, far
197 197
198 lt_t accum_gpu_time; 198 lt_t accum_gpu_time;
199 lt_t gpu_time_stamp; 199 lt_t gpu_time_stamp;
200 200
201 unsigned int suspend_gpu_tracker_on_block:1; 201 unsigned int suspend_gpu_tracker_on_block:1;
202#endif 202#endif
203#endif 203#endif
@@ -222,15 +222,15 @@ struct rt_param {
222 * an increased task priority. 222 * an increased task priority.
223 */ 223 */
224 struct task_struct* inh_task; 224 struct task_struct* inh_task;
225 225
226#ifdef CONFIG_LITMUS_NESTED_LOCKING 226#ifdef CONFIG_LITMUS_NESTED_LOCKING
227 raw_spinlock_t hp_blocked_tasks_lock; 227 raw_spinlock_t hp_blocked_tasks_lock;
228 struct binheap_handle hp_blocked_tasks; 228 struct binheap_handle hp_blocked_tasks;
229 229
230 /* pointer to lock upon which is currently blocked */ 230 /* pointer to lock upon which is currently blocked */
231 struct litmus_lock* blocked_lock; 231 struct litmus_lock* blocked_lock;
232#endif 232#endif
233 233
234#ifdef CONFIG_NP_SECTION 234#ifdef CONFIG_NP_SECTION
235 /* For the FMLP under PSN-EDF, it is required to make the task 235 /* For the FMLP under PSN-EDF, it is required to make the task
236 * non-preemptive from kernel space. In order not to interfere with 236 * non-preemptive from kernel space. In order not to interfere with
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
index d14f705ef414..f0b464207e14 100644
--- a/include/litmus/sched_plugin.h
+++ b/include/litmus/sched_plugin.h
@@ -139,7 +139,7 @@ struct sched_plugin {
139 task_exit_t task_exit; 139 task_exit_t task_exit;
140 140
141 higher_prio_t compare; 141 higher_prio_t compare;
142 142
143#ifdef CONFIG_LITMUS_LOCKING 143#ifdef CONFIG_LITMUS_LOCKING
144 /* locking protocols */ 144 /* locking protocols */
145 allocate_lock_t allocate_lock; 145 allocate_lock_t allocate_lock;
@@ -158,7 +158,7 @@ struct sched_plugin {
158#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 158#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
159 allocate_affinity_observer_t allocate_aff_obs; 159 allocate_affinity_observer_t allocate_aff_obs;
160#endif 160#endif
161 161
162#ifdef CONFIG_LITMUS_SOFTIRQD 162#ifdef CONFIG_LITMUS_SOFTIRQD
163 increase_prio_klitirq_t increase_prio_klitirqd; 163 increase_prio_klitirq_t increase_prio_klitirqd;
164 decrease_prio_klitirqd_t decrease_prio_klitirqd; 164 decrease_prio_klitirqd_t decrease_prio_klitirqd;
diff --git a/litmus/affinity.c b/litmus/affinity.c
index 3fa6dd789400..cd93249b5506 100644
--- a/litmus/affinity.c
+++ b/litmus/affinity.c
@@ -26,7 +26,7 @@ void init_topology(void) {
26 cpumask_weight((struct cpumask *)&neigh_info[cpu].neighbors[i]); 26 cpumask_weight((struct cpumask *)&neigh_info[cpu].neighbors[i]);
27 } 27 }
28 printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n", 28 printk("CPU %d has %d neighbors at level %d. (mask = %lx)\n",
29 cpu, neigh_info[cpu].size[i], i, 29 cpu, neigh_info[cpu].size[i], i,
30 *cpumask_bits(neigh_info[cpu].neighbors[i])); 30 *cpumask_bits(neigh_info[cpu].neighbors[i]));
31 } 31 }
32 32
diff --git a/litmus/binheap.c b/litmus/binheap.c
index 22feea614e50..8d42403ad52c 100644
--- a/litmus/binheap.c
+++ b/litmus/binheap.c
@@ -9,11 +9,11 @@ int binheap_is_in_this_heap(struct binheap_node *node,
9 if(!binheap_is_in_heap(node)) { 9 if(!binheap_is_in_heap(node)) {
10 return 0; 10 return 0;
11 } 11 }
12 12
13 while(node->parent != NULL) { 13 while(node->parent != NULL) {
14 node = node->parent; 14 node = node->parent;
15 } 15 }
16 16
17 return (node == heap->root); 17 return (node == heap->root);
18} 18}
19 19
@@ -23,7 +23,7 @@ static void __update_ref(struct binheap_node *parent,
23{ 23{
24 *(parent->ref_ptr) = child; 24 *(parent->ref_ptr) = child;
25 *(child->ref_ptr) = parent; 25 *(child->ref_ptr) = parent;
26 26
27 swap(parent->ref_ptr, child->ref_ptr); 27 swap(parent->ref_ptr, child->ref_ptr);
28} 28}
29 29
@@ -35,7 +35,7 @@ static void __binheap_swap(struct binheap_node *parent,
35// dump_node_data(parent, child); 35// dump_node_data(parent, child);
36// BUG(); 36// BUG();
37// } 37// }
38 38
39 swap(parent->data, child->data); 39 swap(parent->data, child->data);
40 __update_ref(parent, child); 40 __update_ref(parent, child);
41} 41}
@@ -50,14 +50,14 @@ static void __binheap_swap_safe(struct binheap_handle *handle,
50{ 50{
51 swap(a->data, b->data); 51 swap(a->data, b->data);
52 __update_ref(a, b); 52 __update_ref(a, b);
53 53
54 if((a->parent != NULL) && (a->parent == b->parent)) { 54 if((a->parent != NULL) && (a->parent == b->parent)) {
55 /* special case: shared parent */ 55 /* special case: shared parent */
56 swap(a->parent->left, a->parent->right); 56 swap(a->parent->left, a->parent->right);
57 } 57 }
58 else { 58 else {
59 /* Update pointers to swap parents. */ 59 /* Update pointers to swap parents. */
60 60
61 if(a->parent) { 61 if(a->parent) {
62 if(a == a->parent->left) { 62 if(a == a->parent->left) {
63 a->parent->left = b; 63 a->parent->left = b;
@@ -66,7 +66,7 @@ static void __binheap_swap_safe(struct binheap_handle *handle,
66 a->parent->right = b; 66 a->parent->right = b;
67 } 67 }
68 } 68 }
69 69
70 if(b->parent) { 70 if(b->parent) {
71 if(b == b->parent->left) { 71 if(b == b->parent->left) {
72 b->parent->left = a; 72 b->parent->left = a;
@@ -75,48 +75,48 @@ static void __binheap_swap_safe(struct binheap_handle *handle,
75 b->parent->right = a; 75 b->parent->right = a;
76 } 76 }
77 } 77 }
78 78
79 swap(a->parent, b->parent); 79 swap(a->parent, b->parent);
80 } 80 }
81 81
82 /* swap children */ 82 /* swap children */
83 83
84 if(a->left) { 84 if(a->left) {
85 a->left->parent = b; 85 a->left->parent = b;
86 86
87 if(a->right) { 87 if(a->right) {
88 a->right->parent = b; 88 a->right->parent = b;
89 } 89 }
90 } 90 }
91 91
92 if(b->left) { 92 if(b->left) {
93 b->left->parent = a; 93 b->left->parent = a;
94 94
95 if(b->right) { 95 if(b->right) {
96 b->right->parent = a; 96 b->right->parent = a;
97 } 97 }
98 } 98 }
99 99
100 swap(a->left, b->left); 100 swap(a->left, b->left);
101 swap(a->right, b->right); 101 swap(a->right, b->right);
102 102
103 103
104 /* update next/last/root pointers */ 104 /* update next/last/root pointers */
105 105
106 if(a == handle->next) { 106 if(a == handle->next) {
107 handle->next = b; 107 handle->next = b;
108 } 108 }
109 else if(b == handle->next) { 109 else if(b == handle->next) {
110 handle->next = a; 110 handle->next = a;
111 } 111 }
112 112
113 if(a == handle->last) { 113 if(a == handle->last) {
114 handle->last = b; 114 handle->last = b;
115 } 115 }
116 else if(b == handle->last) { 116 else if(b == handle->last) {
117 handle->last = a; 117 handle->last = a;
118 } 118 }
119 119
120 if(a == handle->root) { 120 if(a == handle->root) {
121 handle->root = b; 121 handle->root = b;
122 } 122 }
@@ -133,29 +133,29 @@ static void __binheap_swap_safe(struct binheap_handle *handle,
133static void __binheap_update_last(struct binheap_handle *handle) 133static void __binheap_update_last(struct binheap_handle *handle)
134{ 134{
135 struct binheap_node *temp = handle->last; 135 struct binheap_node *temp = handle->last;
136 136
137 /* find a "bend" in the tree. */ 137 /* find a "bend" in the tree. */
138 while(temp->parent && (temp == temp->parent->left)) { 138 while(temp->parent && (temp == temp->parent->left)) {
139 temp = temp->parent; 139 temp = temp->parent;
140 } 140 }
141 141
142 /* step over to sibling if we're not at root */ 142 /* step over to sibling if we're not at root */
143 if(temp->parent != NULL) { 143 if(temp->parent != NULL) {
144 temp = temp->parent->left; 144 temp = temp->parent->left;
145 } 145 }
146 146
147 /* now travel right as far as possible. */ 147 /* now travel right as far as possible. */
148 while(temp->right != NULL) { 148 while(temp->right != NULL) {
149 temp = temp->right; 149 temp = temp->right;
150 } 150 }
151 151
152 /* take one step to the left if we're not at the bottom-most level. */ 152 /* take one step to the left if we're not at the bottom-most level. */
153 if(temp->left != NULL) { 153 if(temp->left != NULL) {
154 temp = temp->left; 154 temp = temp->left;
155 } 155 }
156 156
157 //BUG_ON(!(temp->left == NULL && temp->right == NULL)); 157 //BUG_ON(!(temp->left == NULL && temp->right == NULL));
158 158
159 handle->last = temp; 159 handle->last = temp;
160} 160}
161 161
@@ -166,22 +166,22 @@ static void __binheap_update_last(struct binheap_handle *handle)
166static void __binheap_update_next(struct binheap_handle *handle) 166static void __binheap_update_next(struct binheap_handle *handle)
167{ 167{
168 struct binheap_node *temp = handle->next; 168 struct binheap_node *temp = handle->next;
169 169
170 /* find a "bend" in the tree. */ 170 /* find a "bend" in the tree. */
171 while(temp->parent && (temp == temp->parent->right)) { 171 while(temp->parent && (temp == temp->parent->right)) {
172 temp = temp->parent; 172 temp = temp->parent;
173 } 173 }
174 174
175 /* step over to sibling if we're not at root */ 175 /* step over to sibling if we're not at root */
176 if(temp->parent != NULL) { 176 if(temp->parent != NULL) {
177 temp = temp->parent->right; 177 temp = temp->parent->right;
178 } 178 }
179 179
180 /* now travel left as far as possible. */ 180 /* now travel left as far as possible. */
181 while(temp->left != NULL) { 181 while(temp->left != NULL) {
182 temp = temp->left; 182 temp = temp->left;
183 } 183 }
184 184
185 handle->next = temp; 185 handle->next = temp;
186} 186}
187 187
@@ -198,13 +198,13 @@ static void __binheap_bubble_up(
198// dump_node_data2(handle, node); 198// dump_node_data2(handle, node);
199// BUG(); 199// BUG();
200// } 200// }
201 201
202 while((node->parent != NULL) && 202 while((node->parent != NULL) &&
203 ((node->data == BINHEAP_POISON) /* let BINHEAP_POISON data bubble to the top */ || 203 ((node->data == BINHEAP_POISON) /* let BINHEAP_POISON data bubble to the top */ ||
204 handle->compare(node, node->parent))) { 204 handle->compare(node, node->parent))) {
205 __binheap_swap(node->parent, node); 205 __binheap_swap(node->parent, node);
206 node = node->parent; 206 node = node->parent;
207 207
208// if(!binheap_is_in_heap(node)) 208// if(!binheap_is_in_heap(node))
209// { 209// {
210// dump_node_data2(handle, node); 210// dump_node_data2(handle, node);
@@ -218,7 +218,7 @@ static void __binheap_bubble_up(
218static void __binheap_bubble_down(struct binheap_handle *handle) 218static void __binheap_bubble_down(struct binheap_handle *handle)
219{ 219{
220 struct binheap_node *node = handle->root; 220 struct binheap_node *node = handle->root;
221 221
222 while(node->left != NULL) { 222 while(node->left != NULL) {
223 if(node->right && handle->compare(node->right, node->left)) { 223 if(node->right && handle->compare(node->right, node->left)) {
224 if(handle->compare(node->right, node)) { 224 if(handle->compare(node->right, node)) {
@@ -252,11 +252,11 @@ void __binheap_add(struct binheap_node *new_node,
252// dump_node_data2(handle, new_node); 252// dump_node_data2(handle, new_node);
253// BUG(); 253// BUG();
254// } 254// }
255 255
256 new_node->data = data; 256 new_node->data = data;
257 new_node->ref = new_node; 257 new_node->ref = new_node;
258 new_node->ref_ptr = &(new_node->ref); 258 new_node->ref_ptr = &(new_node->ref);
259 259
260 if(!binheap_empty(handle)) { 260 if(!binheap_empty(handle)) {
261 /* insert left side first */ 261 /* insert left side first */
262 if(handle->next->left == NULL) { 262 if(handle->next->left == NULL) {
@@ -264,9 +264,9 @@ void __binheap_add(struct binheap_node *new_node,
264 new_node->parent = handle->next; 264 new_node->parent = handle->next;
265 new_node->left = NULL; 265 new_node->left = NULL;
266 new_node->right = NULL; 266 new_node->right = NULL;
267 267
268 handle->last = new_node; 268 handle->last = new_node;
269 269
270 __binheap_bubble_up(handle, new_node); 270 __binheap_bubble_up(handle, new_node);
271 } 271 }
272 else { 272 else {
@@ -275,20 +275,20 @@ void __binheap_add(struct binheap_node *new_node,
275 new_node->parent = handle->next; 275 new_node->parent = handle->next;
276 new_node->left = NULL; 276 new_node->left = NULL;
277 new_node->right = NULL; 277 new_node->right = NULL;
278 278
279 handle->last = new_node; 279 handle->last = new_node;
280 280
281 __binheap_update_next(handle); 281 __binheap_update_next(handle);
282 __binheap_bubble_up(handle, new_node); 282 __binheap_bubble_up(handle, new_node);
283 } 283 }
284 } 284 }
285 else { 285 else {
286 /* first node in heap */ 286 /* first node in heap */
287 287
288 new_node->parent = NULL; 288 new_node->parent = NULL;
289 new_node->left = NULL; 289 new_node->left = NULL;
290 new_node->right = NULL; 290 new_node->right = NULL;
291 291
292 handle->root = new_node; 292 handle->root = new_node;
293 handle->next = new_node; 293 handle->next = new_node;
294 handle->last = new_node; 294 handle->last = new_node;
@@ -308,27 +308,27 @@ void __binheap_delete_root(struct binheap_handle *handle,
308 struct binheap_node *container) 308 struct binheap_node *container)
309{ 309{
310 struct binheap_node *root = handle->root; 310 struct binheap_node *root = handle->root;
311 311
312// if(!binheap_is_in_heap(container)) 312// if(!binheap_is_in_heap(container))
313// { 313// {
314// dump_node_data2(handle, container); 314// dump_node_data2(handle, container);
315// BUG(); 315// BUG();
316// } 316// }
317 317
318 if(root != container) { 318 if(root != container) {
319 /* coalesce */ 319 /* coalesce */
320 __binheap_swap_safe(handle, root, container); 320 __binheap_swap_safe(handle, root, container);
321 root = container; 321 root = container;
322 } 322 }
323 323
324 if(handle->last != root) { 324 if(handle->last != root) {
325 /* swap 'last' node up to root and bubble it down. */ 325 /* swap 'last' node up to root and bubble it down. */
326 326
327 struct binheap_node *to_move = handle->last; 327 struct binheap_node *to_move = handle->last;
328 328
329 if(to_move->parent != root) { 329 if(to_move->parent != root) {
330 handle->next = to_move->parent; 330 handle->next = to_move->parent;
331 331
332 if(handle->next->right == to_move) { 332 if(handle->next->right == to_move) {
333 /* disconnect from parent */ 333 /* disconnect from parent */
334 to_move->parent->right = NULL; 334 to_move->parent->right = NULL;
@@ -337,16 +337,16 @@ void __binheap_delete_root(struct binheap_handle *handle,
337 else { 337 else {
338 /* find new 'last' before we disconnect */ 338 /* find new 'last' before we disconnect */
339 __binheap_update_last(handle); 339 __binheap_update_last(handle);
340 340
341 /* disconnect from parent */ 341 /* disconnect from parent */
342 to_move->parent->left = NULL; 342 to_move->parent->left = NULL;
343 } 343 }
344 } 344 }
345 else { 345 else {
346 /* 'last' is direct child of root */ 346 /* 'last' is direct child of root */
347 347
348 handle->next = to_move; 348 handle->next = to_move;
349 349
350 if(to_move == to_move->parent->right) { 350 if(to_move == to_move->parent->right) {
351 to_move->parent->right = NULL; 351 to_move->parent->right = NULL;
352 handle->last = to_move->parent->left; 352 handle->last = to_move->parent->left;
@@ -357,7 +357,7 @@ void __binheap_delete_root(struct binheap_handle *handle,
357 } 357 }
358 } 358 }
359 to_move->parent = NULL; 359 to_move->parent = NULL;
360 360
361 /* reconnect as root. We can't just swap data ptrs since root node 361 /* reconnect as root. We can't just swap data ptrs since root node
362 * may be freed after this function returns. 362 * may be freed after this function returns.
363 */ 363 */
@@ -369,9 +369,9 @@ void __binheap_delete_root(struct binheap_handle *handle,
369 if(to_move->right != NULL) { 369 if(to_move->right != NULL) {
370 to_move->right->parent = to_move; 370 to_move->right->parent = to_move;
371 } 371 }
372 372
373 handle->root = to_move; 373 handle->root = to_move;
374 374
375 /* bubble down */ 375 /* bubble down */
376 __binheap_bubble_down(handle); 376 __binheap_bubble_down(handle);
377 } 377 }
@@ -381,7 +381,7 @@ void __binheap_delete_root(struct binheap_handle *handle,
381 handle->next = NULL; 381 handle->next = NULL;
382 handle->last = NULL; 382 handle->last = NULL;
383 } 383 }
384 384
385 /* mark as removed */ 385 /* mark as removed */
386 container->parent = BINHEAP_POISON; 386 container->parent = BINHEAP_POISON;
387} 387}
@@ -396,25 +396,25 @@ void __binheap_delete(struct binheap_node *node_to_delete,
396{ 396{
397 struct binheap_node *target = node_to_delete->ref; 397 struct binheap_node *target = node_to_delete->ref;
398 void *temp_data = target->data; 398 void *temp_data = target->data;
399 399
400// if(!binheap_is_in_heap(node_to_delete)) 400// if(!binheap_is_in_heap(node_to_delete))
401// { 401// {
402// dump_node_data2(handle, node_to_delete); 402// dump_node_data2(handle, node_to_delete);
403// BUG(); 403// BUG();
404// } 404// }
405// 405//
406// if(!binheap_is_in_heap(target)) 406// if(!binheap_is_in_heap(target))
407// { 407// {
408// dump_node_data2(handle, target); 408// dump_node_data2(handle, target);
409// BUG(); 409// BUG();
410// } 410// }
411 411
412 /* temporarily set data to null to allow node to bubble up to the top. */ 412 /* temporarily set data to null to allow node to bubble up to the top. */
413 target->data = BINHEAP_POISON; 413 target->data = BINHEAP_POISON;
414 414
415 __binheap_bubble_up(handle, target); 415 __binheap_bubble_up(handle, target);
416 __binheap_delete_root(handle, node_to_delete); 416 __binheap_delete_root(handle, node_to_delete);
417 417
418 node_to_delete->data = temp_data; /* restore node data pointer */ 418 node_to_delete->data = temp_data; /* restore node data pointer */
419 //node_to_delete->parent = BINHEAP_POISON; /* poison the node */ 419 //node_to_delete->parent = BINHEAP_POISON; /* poison the node */
420} 420}
@@ -426,18 +426,18 @@ void __binheap_decrease(struct binheap_node *orig_node,
426 struct binheap_handle *handle) 426 struct binheap_handle *handle)
427{ 427{
428 struct binheap_node *target = orig_node->ref; 428 struct binheap_node *target = orig_node->ref;
429 429
430// if(!binheap_is_in_heap(orig_node)) 430// if(!binheap_is_in_heap(orig_node))
431// { 431// {
432// dump_node_data2(handle, orig_node); 432// dump_node_data2(handle, orig_node);
433// BUG(); 433// BUG();
434// } 434// }
435// 435//
436// if(!binheap_is_in_heap(target)) 436// if(!binheap_is_in_heap(target))
437// { 437// {
438// dump_node_data2(handle, target); 438// dump_node_data2(handle, target);
439// BUG(); 439// BUG();
440// } 440// }
441// 441//
442 __binheap_bubble_up(handle, target); 442 __binheap_bubble_up(handle, target);
443} 443}
diff --git a/litmus/edf_common.c b/litmus/edf_common.c
index 250808e934a6..b346bdd65b3b 100644
--- a/litmus/edf_common.c
+++ b/litmus/edf_common.c
@@ -92,13 +92,13 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second)
92// if (!is_realtime(second_task)) { 92// if (!is_realtime(second_task)) {
93// return true; 93// return true;
94// } 94// }
95// 95//
96// if (shorter_period(first_task, second_task)) { 96// if (shorter_period(first_task, second_task)) {
97// return true; 97// return true;
98// } 98// }
99// 99//
100// if (get_period(first_task) == get_period(second_task)) { 100// if (get_period(first_task) == get_period(second_task)) {
101// if (first_task->pid < second_task->pid) { 101// if (first_task->pid < second_task->pid) {
102// return true; 102// return true;
103// } 103// }
104// else if (first_task->pid == second_task->pid) { 104// else if (first_task->pid == second_task->pid) {
@@ -114,12 +114,12 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second)
114 return true; 114 return true;
115 } 115 }
116 if (get_deadline(first_task) == get_deadline(second_task)) { 116 if (get_deadline(first_task) == get_deadline(second_task)) {
117 117
118 if (shorter_period(first_task, second_task)) { 118 if (shorter_period(first_task, second_task)) {
119 return true; 119 return true;
120 } 120 }
121 if (get_rt_period(first_task) == get_rt_period(second_task)) { 121 if (get_rt_period(first_task) == get_rt_period(second_task)) {
122 if (first_task->pid < second_task->pid) { 122 if (first_task->pid < second_task->pid) {
123 return true; 123 return true;
124 } 124 }
125 if (first_task->pid == second_task->pid) { 125 if (first_task->pid == second_task->pid) {
@@ -131,14 +131,14 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second)
131 if(first_task->rt_param.is_proxy_thread == second_task->rt_param.is_proxy_thread) { 131 if(first_task->rt_param.is_proxy_thread == second_task->rt_param.is_proxy_thread) {
132 return !second->rt_param.inh_task; 132 return !second->rt_param.inh_task;
133 } 133 }
134#else 134#else
135 return !second->rt_param.inh_task; 135 return !second->rt_param.inh_task;
136#endif 136#endif
137 } 137 }
138 138
139 } 139 }
140 } 140 }
141 141
142 return false; 142 return false;
143} 143}
144 144
diff --git a/litmus/fdso.c b/litmus/fdso.c
index fb328db77dec..0fc74be7f5ee 100644
--- a/litmus/fdso.c
+++ b/litmus/fdso.c
@@ -29,7 +29,7 @@ static const struct fdso_ops* fdso_ops[] = {
29 &generic_lock_ops, /* IKGLP_SEM */ 29 &generic_lock_ops, /* IKGLP_SEM */
30 &generic_lock_ops, /* KFMLP_SEM */ 30 &generic_lock_ops, /* KFMLP_SEM */
31 &generic_affinity_ops, /* IKGLP_SIMPLE_GPU_AFF_OBS */ 31 &generic_affinity_ops, /* IKGLP_SIMPLE_GPU_AFF_OBS */
32 &generic_affinity_ops, /* IKGLP_GPU_AFF_OBS */ 32 &generic_affinity_ops, /* IKGLP_GPU_AFF_OBS */
33 &generic_affinity_ops, /* KFMLP_SIMPLE_GPU_AFF_OBS */ 33 &generic_affinity_ops, /* KFMLP_SIMPLE_GPU_AFF_OBS */
34 &generic_affinity_ops, /* KFMLP_GPU_AFF_OBS */ 34 &generic_affinity_ops, /* KFMLP_GPU_AFF_OBS */
35}; 35};
diff --git a/litmus/gpu_affinity.c b/litmus/gpu_affinity.c
index 87349fe10a9b..70a86bdd9aec 100644
--- a/litmus/gpu_affinity.c
+++ b/litmus/gpu_affinity.c
@@ -11,7 +11,7 @@ static void update_estimate(feedback_est_t* fb, fp_t a, fp_t b, lt_t observed)
11{ 11{
12 fp_t err, new; 12 fp_t err, new;
13 fp_t actual = _integer_to_fp(observed); 13 fp_t actual = _integer_to_fp(observed);
14 14
15 err = _sub(actual, fb->est); 15 err = _sub(actual, fb->est);
16 new = _add(_mul(a, err), _mul(b, fb->accum_err)); 16 new = _add(_mul(a, err), _mul(b, fb->accum_err));
17 17
@@ -22,9 +22,9 @@ static void update_estimate(feedback_est_t* fb, fp_t a, fp_t b, lt_t observed)
22void update_gpu_estimate(struct task_struct *t, lt_t observed) 22void update_gpu_estimate(struct task_struct *t, lt_t observed)
23{ 23{
24 feedback_est_t *fb = &(tsk_rt(t)->gpu_migration_est[tsk_rt(t)->gpu_migration]); 24 feedback_est_t *fb = &(tsk_rt(t)->gpu_migration_est[tsk_rt(t)->gpu_migration]);
25 25
26 WARN_ON(tsk_rt(t)->gpu_migration > MIG_LAST); 26 WARN_ON(tsk_rt(t)->gpu_migration > MIG_LAST);
27 27
28 if(unlikely(fb->est.val == 0)) { 28 if(unlikely(fb->est.val == 0)) {
29 // kludge-- cap observed values to prevent whacky estimations. 29 // kludge-- cap observed values to prevent whacky estimations.
30 // whacky stuff happens during the first few jobs. 30 // whacky stuff happens during the first few jobs.
@@ -32,8 +32,8 @@ void update_gpu_estimate(struct task_struct *t, lt_t observed)
32 TRACE_TASK(t, "Crazy observation was capped: %llu -> %llu\n", 32 TRACE_TASK(t, "Crazy observation was capped: %llu -> %llu\n",
33 observed, OBSERVATION_CAP); 33 observed, OBSERVATION_CAP);
34 observed = OBSERVATION_CAP; 34 observed = OBSERVATION_CAP;
35 } 35 }
36 36
37 // take the first observation as our estimate 37 // take the first observation as our estimate
38 // (initial value of 0 was bogus anyhow) 38 // (initial value of 0 was bogus anyhow)
39 fb->est = _integer_to_fp(observed); 39 fb->est = _integer_to_fp(observed);
@@ -44,21 +44,21 @@ void update_gpu_estimate(struct task_struct *t, lt_t observed)
44 tsk_rt(t)->gpu_fb_param_a, 44 tsk_rt(t)->gpu_fb_param_a,
45 tsk_rt(t)->gpu_fb_param_b, 45 tsk_rt(t)->gpu_fb_param_b,
46 observed); 46 observed);
47 47
48 if(_fp_to_integer(fb->est) <= 0) { 48 if(_fp_to_integer(fb->est) <= 0) {
49 // TODO: talk to Jonathan about how well this works. 49 // TODO: talk to Jonathan about how well this works.
50 // Maybe we should average the observed and est instead? 50 // Maybe we should average the observed and est instead?
51 TRACE_TASK(t, "Invalid estimate. Patching.\n"); 51 TRACE_TASK(t, "Invalid estimate. Patching.\n");
52 fb->est = _integer_to_fp(observed); 52 fb->est = _integer_to_fp(observed);
53 fb->accum_err = _div(fb->est, _integer_to_fp(2)); // ...seems to work. 53 fb->accum_err = _div(fb->est, _integer_to_fp(2)); // ...seems to work.
54 } 54 }
55 } 55 }
56 56
57 TRACE_TASK(t, "GPU est update after (dist = %d, obs = %llu): %d.%d\n", 57 TRACE_TASK(t, "GPU est update after (dist = %d, obs = %llu): %d.%d\n",
58 tsk_rt(t)->gpu_migration, 58 tsk_rt(t)->gpu_migration,
59 observed, 59 observed,
60 _fp_to_integer(fb->est), 60 _fp_to_integer(fb->est),
61 _point(fb->est)); 61 _point(fb->est));
62} 62}
63 63
64gpu_migration_dist_t gpu_migration_distance(int a, int b) 64gpu_migration_dist_t gpu_migration_distance(int a, int b)
@@ -66,7 +66,7 @@ gpu_migration_dist_t gpu_migration_distance(int a, int b)
66 // GPUs organized in a binary hierarchy, no more than 2^MIG_FAR GPUs 66 // GPUs organized in a binary hierarchy, no more than 2^MIG_FAR GPUs
67 int i; 67 int i;
68 int dist; 68 int dist;
69 69
70 if(likely(a >= 0 && b >= 0)) { 70 if(likely(a >= 0 && b >= 0)) {
71 for(i = 0; i <= MIG_FAR; ++i) { 71 for(i = 0; i <= MIG_FAR; ++i) {
72 if(a>>i == b>>i) { 72 if(a>>i == b>>i) {
@@ -80,11 +80,11 @@ gpu_migration_dist_t gpu_migration_distance(int a, int b)
80 else { 80 else {
81 dist = MIG_NONE; 81 dist = MIG_NONE;
82 } 82 }
83 83
84out: 84out:
85 TRACE_CUR("Distance %d -> %d is %d\n", 85 TRACE_CUR("Distance %d -> %d is %d\n",
86 a, b, dist); 86 a, b, dist);
87 87
88 return dist; 88 return dist;
89} 89}
90 90
diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c
index a41a9d9a3627..94c954464a96 100644
--- a/litmus/ikglp_lock.c
+++ b/litmus/ikglp_lock.c
@@ -368,7 +368,7 @@ static void ikglp_refresh_owners_prio_increase(struct task_struct *t,
368{ 368{
369 // priority of 't' has increased (note: 't' might already be hp_waiter). 369 // priority of 't' has increased (note: 't' might already be hp_waiter).
370 // if ((t == fq->hp_waiter) || edf_higher_prio(t, fq->hp_waiter)) { 370 // if ((t == fq->hp_waiter) || edf_higher_prio(t, fq->hp_waiter)) {
371 if ((t == fq->hp_waiter) || litmus->compare(t, fq->hp_waiter)) { 371 if ((t == fq->hp_waiter) || litmus->compare(t, fq->hp_waiter)) {
372 struct task_struct *old_max_eff_prio; 372 struct task_struct *old_max_eff_prio;
373 struct task_struct *new_max_eff_prio; 373 struct task_struct *new_max_eff_prio;
374 struct task_struct *new_prio = NULL; 374 struct task_struct *new_prio = NULL;
diff --git a/litmus/kexclu_affinity.c b/litmus/kexclu_affinity.c
index 552179bf797d..5ef5e54d600d 100644
--- a/litmus/kexclu_affinity.c
+++ b/litmus/kexclu_affinity.c
@@ -35,7 +35,7 @@ static int create_generic_aff_obs(void** obj_ref, obj_type_t type, void* __user
35{ 35{
36 struct affinity_observer* aff_obs; 36 struct affinity_observer* aff_obs;
37 int err; 37 int err;
38 38
39 err = litmus->allocate_aff_obs(&aff_obs, type, arg); 39 err = litmus->allocate_aff_obs(&aff_obs, type, arg);
40 if (err == 0) { 40 if (err == 0) {
41 BUG_ON(!aff_obs->lock); 41 BUG_ON(!aff_obs->lock);
@@ -73,9 +73,9 @@ static void destroy_generic_aff_obs(obj_type_t type, void* obj)
73struct litmus_lock* get_lock_from_od(int od) 73struct litmus_lock* get_lock_from_od(int od)
74{ 74{
75 extern struct fdso_ops generic_lock_ops; 75 extern struct fdso_ops generic_lock_ops;
76 76
77 struct od_table_entry *entry = get_entry_for_od(od); 77 struct od_table_entry *entry = get_entry_for_od(od);
78 78
79 if(entry && entry->class == &generic_lock_ops) { 79 if(entry && entry->class == &generic_lock_ops) {
80 return (struct litmus_lock*) entry->obj->obj; 80 return (struct litmus_lock*) entry->obj->obj;
81 } 81 }
diff --git a/litmus/kfmlp_lock.c b/litmus/kfmlp_lock.c
index 9bbe31a05b97..d0a6bd364c43 100644
--- a/litmus/kfmlp_lock.c
+++ b/litmus/kfmlp_lock.c
@@ -34,11 +34,11 @@ static struct task_struct* kfmlp_find_hp_waiter(struct kfmlp_queue *kqueue,
34{ 34{
35 struct list_head *pos; 35 struct list_head *pos;
36 struct task_struct *queued, *found = NULL; 36 struct task_struct *queued, *found = NULL;
37 37
38 list_for_each(pos, &kqueue->wait.task_list) { 38 list_for_each(pos, &kqueue->wait.task_list) {
39 queued = (struct task_struct*) list_entry(pos, wait_queue_t, 39 queued = (struct task_struct*) list_entry(pos, wait_queue_t,
40 task_list)->private; 40 task_list)->private;
41 41
42 /* Compare task prios, find high prio task. */ 42 /* Compare task prios, find high prio task. */
43 //if (queued != skip && edf_higher_prio(queued, found)) 43 //if (queued != skip && edf_higher_prio(queued, found))
44 if (queued != skip && litmus->compare(queued, found)) 44 if (queued != skip && litmus->compare(queued, found))
@@ -54,21 +54,21 @@ static inline struct kfmlp_queue* kfmlp_find_shortest(struct kfmlp_semaphore* se
54 // queue list to load-balance across all resources. 54 // queue list to load-balance across all resources.
55 struct kfmlp_queue* step = search_start; 55 struct kfmlp_queue* step = search_start;
56 struct kfmlp_queue* shortest = sem->shortest_queue; 56 struct kfmlp_queue* shortest = sem->shortest_queue;
57 57
58 do 58 do
59 { 59 {
60 step = (step+1 != &sem->queues[sem->num_resources]) ? 60 step = (step+1 != &sem->queues[sem->num_resources]) ?
61 step+1 : &sem->queues[0]; 61 step+1 : &sem->queues[0];
62 62
63 if(step->count < shortest->count) 63 if(step->count < shortest->count)
64 { 64 {
65 shortest = step; 65 shortest = step;
66 if(step->count == 0) 66 if(step->count == 0)
67 break; /* can't get any shorter */ 67 break; /* can't get any shorter */
68 } 68 }
69 69
70 }while(step != search_start); 70 }while(step != search_start);
71 71
72 return(shortest); 72 return(shortest);
73} 73}
74 74
@@ -77,13 +77,13 @@ static struct task_struct* kfmlp_select_hp_steal(struct kfmlp_semaphore* sem,
77 wait_queue_t** to_steal, 77 wait_queue_t** to_steal,
78 struct kfmlp_queue** to_steal_from) 78 struct kfmlp_queue** to_steal_from)
79{ 79{
80 /* must hold sem->lock */ 80 /* must hold sem->lock */
81 81
82 int i; 82 int i;
83 83
84 *to_steal = NULL; 84 *to_steal = NULL;
85 *to_steal_from = NULL; 85 *to_steal_from = NULL;
86 86
87 for(i = 0; i < sem->num_resources; ++i) 87 for(i = 0; i < sem->num_resources; ++i)
88 { 88 {
89 if( (sem->queues[i].count > 1) && 89 if( (sem->queues[i].count > 1) &&
@@ -94,17 +94,17 @@ static struct task_struct* kfmlp_select_hp_steal(struct kfmlp_semaphore* sem,
94 *to_steal_from = &sem->queues[i]; 94 *to_steal_from = &sem->queues[i];
95 } 95 }
96 } 96 }
97 97
98 if(*to_steal_from) 98 if(*to_steal_from)
99 { 99 {
100 struct list_head *pos; 100 struct list_head *pos;
101 struct task_struct *target = (*to_steal_from)->hp_waiter; 101 struct task_struct *target = (*to_steal_from)->hp_waiter;
102 102
103 TRACE_CUR("want to steal hp_waiter (%s/%d) from queue %d\n", 103 TRACE_CUR("want to steal hp_waiter (%s/%d) from queue %d\n",
104 target->comm, 104 target->comm,
105 target->pid, 105 target->pid,
106 kfmlp_get_idx(sem, *to_steal_from)); 106 kfmlp_get_idx(sem, *to_steal_from));
107 107
108 list_for_each(pos, &(*to_steal_from)->wait.task_list) 108 list_for_each(pos, &(*to_steal_from)->wait.task_list)
109 { 109 {
110 wait_queue_t *node = list_entry(pos, wait_queue_t, task_list); 110 wait_queue_t *node = list_entry(pos, wait_queue_t, task_list);
@@ -113,21 +113,21 @@ static struct task_struct* kfmlp_select_hp_steal(struct kfmlp_semaphore* sem,
113 if (queued == target) 113 if (queued == target)
114 { 114 {
115 *to_steal = node; 115 *to_steal = node;
116 116
117 TRACE_CUR("steal: selected %s/%d from queue %d\n", 117 TRACE_CUR("steal: selected %s/%d from queue %d\n",
118 queued->comm, queued->pid, 118 queued->comm, queued->pid,
119 kfmlp_get_idx(sem, *to_steal_from)); 119 kfmlp_get_idx(sem, *to_steal_from));
120 120
121 return queued; 121 return queued;
122 } 122 }
123 } 123 }
124 124
125 TRACE_CUR("Could not find %s/%d in queue %d!!! THIS IS A BUG!\n", 125 TRACE_CUR("Could not find %s/%d in queue %d!!! THIS IS A BUG!\n",
126 target->comm, 126 target->comm,
127 target->pid, 127 target->pid,
128 kfmlp_get_idx(sem, *to_steal_from)); 128 kfmlp_get_idx(sem, *to_steal_from));
129 } 129 }
130 130
131 return NULL; 131 return NULL;
132} 132}
133 133
@@ -137,13 +137,13 @@ static void kfmlp_steal_node(struct kfmlp_semaphore *sem,
137 struct kfmlp_queue *src) 137 struct kfmlp_queue *src)
138{ 138{
139 struct task_struct* t = (struct task_struct*) wait->private; 139 struct task_struct* t = (struct task_struct*) wait->private;
140 140
141 __remove_wait_queue(&src->wait, wait); 141 __remove_wait_queue(&src->wait, wait);
142 --(src->count); 142 --(src->count);
143 143
144 if(t == src->hp_waiter) { 144 if(t == src->hp_waiter) {
145 src->hp_waiter = kfmlp_find_hp_waiter(src, NULL); 145 src->hp_waiter = kfmlp_find_hp_waiter(src, NULL);
146 146
147 TRACE_CUR("queue %d: %s/%d is new hp_waiter\n", 147 TRACE_CUR("queue %d: %s/%d is new hp_waiter\n",
148 kfmlp_get_idx(sem, src), 148 kfmlp_get_idx(sem, src),
149 (src->hp_waiter) ? src->hp_waiter->comm : "nil", 149 (src->hp_waiter) ? src->hp_waiter->comm : "nil",
@@ -153,40 +153,40 @@ static void kfmlp_steal_node(struct kfmlp_semaphore *sem,
153 litmus->decrease_prio(src->owner, src->hp_waiter); 153 litmus->decrease_prio(src->owner, src->hp_waiter);
154 } 154 }
155 } 155 }
156 156
157 if(sem->shortest_queue->count > src->count) { 157 if(sem->shortest_queue->count > src->count) {
158 sem->shortest_queue = src; 158 sem->shortest_queue = src;
159 TRACE_CUR("queue %d is the shortest\n", kfmlp_get_idx(sem, sem->shortest_queue)); 159 TRACE_CUR("queue %d is the shortest\n", kfmlp_get_idx(sem, sem->shortest_queue));
160 } 160 }
161 161
162#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 162#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
163 if(sem->aff_obs) { 163 if(sem->aff_obs) {
164 sem->aff_obs->ops->notify_dequeue(sem->aff_obs, src, t); 164 sem->aff_obs->ops->notify_dequeue(sem->aff_obs, src, t);
165 } 165 }
166#endif 166#endif
167 167
168 init_waitqueue_entry(wait, t); 168 init_waitqueue_entry(wait, t);
169 __add_wait_queue_tail_exclusive(&dst->wait, wait); 169 __add_wait_queue_tail_exclusive(&dst->wait, wait);
170 ++(dst->count); 170 ++(dst->count);
171 171
172 if(litmus->compare(t, dst->hp_waiter)) { 172 if(litmus->compare(t, dst->hp_waiter)) {
173 dst->hp_waiter = t; 173 dst->hp_waiter = t;
174 174
175 TRACE_CUR("queue %d: %s/%d is new hp_waiter\n", 175 TRACE_CUR("queue %d: %s/%d is new hp_waiter\n",
176 kfmlp_get_idx(sem, dst), 176 kfmlp_get_idx(sem, dst),
177 t->comm, t->pid); 177 t->comm, t->pid);
178 178
179 if(dst->owner && litmus->compare(t, dst->owner)) 179 if(dst->owner && litmus->compare(t, dst->owner))
180 { 180 {
181 litmus->increase_prio(dst->owner, t); 181 litmus->increase_prio(dst->owner, t);
182 } 182 }
183 } 183 }
184 184
185#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 185#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
186 if(sem->aff_obs) { 186 if(sem->aff_obs) {
187 sem->aff_obs->ops->notify_enqueue(sem->aff_obs, dst, t); 187 sem->aff_obs->ops->notify_enqueue(sem->aff_obs, dst, t);
188 } 188 }
189#endif 189#endif
190} 190}
191 191
192 192
@@ -197,13 +197,13 @@ int kfmlp_lock(struct litmus_lock* l)
197 struct kfmlp_queue* my_queue = NULL; 197 struct kfmlp_queue* my_queue = NULL;
198 wait_queue_t wait; 198 wait_queue_t wait;
199 unsigned long flags; 199 unsigned long flags;
200 200
201 if (!is_realtime(t)) 201 if (!is_realtime(t))
202 return -EPERM; 202 return -EPERM;
203 203
204 spin_lock_irqsave(&sem->lock, flags); 204 spin_lock_irqsave(&sem->lock, flags);
205 205
206#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 206#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
207 if(sem->aff_obs) { 207 if(sem->aff_obs) {
208 my_queue = sem->aff_obs->ops->advise_enqueue(sem->aff_obs, t); 208 my_queue = sem->aff_obs->ops->advise_enqueue(sem->aff_obs, t);
209 } 209 }
@@ -213,25 +213,25 @@ int kfmlp_lock(struct litmus_lock* l)
213#else 213#else
214 my_queue = sem->shortest_queue; 214 my_queue = sem->shortest_queue;
215#endif 215#endif
216 216
217 if (my_queue->owner) { 217 if (my_queue->owner) {
218 /* resource is not free => must suspend and wait */ 218 /* resource is not free => must suspend and wait */
219 TRACE_CUR("queue %d: Resource is not free => must suspend and wait. (queue size = %d)\n", 219 TRACE_CUR("queue %d: Resource is not free => must suspend and wait. (queue size = %d)\n",
220 kfmlp_get_idx(sem, my_queue), 220 kfmlp_get_idx(sem, my_queue),
221 my_queue->count); 221 my_queue->count);
222 222
223 init_waitqueue_entry(&wait, t); 223 init_waitqueue_entry(&wait, t);
224 224
225 /* FIXME: interruptible would be nice some day */ 225 /* FIXME: interruptible would be nice some day */
226 set_task_state(t, TASK_UNINTERRUPTIBLE); 226 set_task_state(t, TASK_UNINTERRUPTIBLE);
227 227
228 __add_wait_queue_tail_exclusive(&my_queue->wait, &wait); 228 __add_wait_queue_tail_exclusive(&my_queue->wait, &wait);
229 229
230 TRACE_CUR("queue %d: hp_waiter is currently %s/%d\n", 230 TRACE_CUR("queue %d: hp_waiter is currently %s/%d\n",
231 kfmlp_get_idx(sem, my_queue), 231 kfmlp_get_idx(sem, my_queue),
232 (my_queue->hp_waiter) ? my_queue->hp_waiter->comm : "nil", 232 (my_queue->hp_waiter) ? my_queue->hp_waiter->comm : "nil",
233 (my_queue->hp_waiter) ? my_queue->hp_waiter->pid : -1); 233 (my_queue->hp_waiter) ? my_queue->hp_waiter->pid : -1);
234 234
235 /* check if we need to activate priority inheritance */ 235 /* check if we need to activate priority inheritance */
236 //if (edf_higher_prio(t, my_queue->hp_waiter)) 236 //if (edf_higher_prio(t, my_queue->hp_waiter))
237 if (litmus->compare(t, my_queue->hp_waiter)) { 237 if (litmus->compare(t, my_queue->hp_waiter)) {
@@ -239,37 +239,37 @@ int kfmlp_lock(struct litmus_lock* l)
239 TRACE_CUR("queue %d: %s/%d is new hp_waiter\n", 239 TRACE_CUR("queue %d: %s/%d is new hp_waiter\n",
240 kfmlp_get_idx(sem, my_queue), 240 kfmlp_get_idx(sem, my_queue),
241 t->comm, t->pid); 241 t->comm, t->pid);
242 242
243 //if (edf_higher_prio(t, my_queue->owner)) 243 //if (edf_higher_prio(t, my_queue->owner))
244 if (litmus->compare(t, my_queue->owner)) { 244 if (litmus->compare(t, my_queue->owner)) {
245 litmus->increase_prio(my_queue->owner, my_queue->hp_waiter); 245 litmus->increase_prio(my_queue->owner, my_queue->hp_waiter);
246 } 246 }
247 } 247 }
248 248
249 ++(my_queue->count); 249 ++(my_queue->count);
250 250
251 if(my_queue == sem->shortest_queue) { 251 if(my_queue == sem->shortest_queue) {
252 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); 252 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);
253 TRACE_CUR("queue %d is the shortest\n", 253 TRACE_CUR("queue %d is the shortest\n",
254 kfmlp_get_idx(sem, sem->shortest_queue)); 254 kfmlp_get_idx(sem, sem->shortest_queue));
255 } 255 }
256 256
257#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 257#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
258 if(sem->aff_obs) { 258 if(sem->aff_obs) {
259 sem->aff_obs->ops->notify_enqueue(sem->aff_obs, my_queue, t); 259 sem->aff_obs->ops->notify_enqueue(sem->aff_obs, my_queue, t);
260 } 260 }
261#endif 261#endif
262 262
263 /* release lock before sleeping */ 263 /* release lock before sleeping */
264 spin_unlock_irqrestore(&sem->lock, flags); 264 spin_unlock_irqrestore(&sem->lock, flags);
265 265
266 /* We depend on the FIFO order. Thus, we don't need to recheck 266 /* We depend on the FIFO order. Thus, we don't need to recheck
267 * when we wake up; we are guaranteed to have the lock since 267 * when we wake up; we are guaranteed to have the lock since
268 * there is only one wake up per release (or steal). 268 * there is only one wake up per release (or steal).
269 */ 269 */
270 schedule(); 270 schedule();
271 271
272 272
273 if(my_queue->owner == t) { 273 if(my_queue->owner == t) {
274 TRACE_CUR("queue %d: acquired through waiting\n", 274 TRACE_CUR("queue %d: acquired through waiting\n",
275 kfmlp_get_idx(sem, my_queue)); 275 kfmlp_get_idx(sem, my_queue));
@@ -278,7 +278,7 @@ int kfmlp_lock(struct litmus_lock* l)
278 /* this case may happen if our wait entry was stolen 278 /* this case may happen if our wait entry was stolen
279 between queues. record where we went. */ 279 between queues. record where we went. */
280 my_queue = kfmlp_get_queue(sem, t); 280 my_queue = kfmlp_get_queue(sem, t);
281 281
282 BUG_ON(!my_queue); 282 BUG_ON(!my_queue);
283 TRACE_CUR("queue %d: acquired through stealing\n", 283 TRACE_CUR("queue %d: acquired through stealing\n",
284 kfmlp_get_idx(sem, my_queue)); 284 kfmlp_get_idx(sem, my_queue));
@@ -287,28 +287,28 @@ int kfmlp_lock(struct litmus_lock* l)
287 else { 287 else {
288 TRACE_CUR("queue %d: acquired immediately\n", 288 TRACE_CUR("queue %d: acquired immediately\n",
289 kfmlp_get_idx(sem, my_queue)); 289 kfmlp_get_idx(sem, my_queue));
290 290
291 my_queue->owner = t; 291 my_queue->owner = t;
292 292
293 ++(my_queue->count); 293 ++(my_queue->count);
294 294
295 if(my_queue == sem->shortest_queue) { 295 if(my_queue == sem->shortest_queue) {
296 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); 296 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);
297 TRACE_CUR("queue %d is the shortest\n", 297 TRACE_CUR("queue %d is the shortest\n",
298 kfmlp_get_idx(sem, sem->shortest_queue)); 298 kfmlp_get_idx(sem, sem->shortest_queue));
299 } 299 }
300 300
301#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 301#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
302 if(sem->aff_obs) { 302 if(sem->aff_obs) {
303 sem->aff_obs->ops->notify_enqueue(sem->aff_obs, my_queue, t); 303 sem->aff_obs->ops->notify_enqueue(sem->aff_obs, my_queue, t);
304 sem->aff_obs->ops->notify_acquired(sem->aff_obs, my_queue, t); 304 sem->aff_obs->ops->notify_acquired(sem->aff_obs, my_queue, t);
305 } 305 }
306#endif 306#endif
307 307
308 spin_unlock_irqrestore(&sem->lock, flags); 308 spin_unlock_irqrestore(&sem->lock, flags);
309 } 309 }
310 310
311 311
312#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 312#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
313 if(sem->aff_obs) { 313 if(sem->aff_obs) {
314 return sem->aff_obs->ops->replica_to_resource(sem->aff_obs, my_queue); 314 return sem->aff_obs->ops->replica_to_resource(sem->aff_obs, my_queue);
@@ -325,56 +325,56 @@ int kfmlp_unlock(struct litmus_lock* l)
325 struct kfmlp_queue *my_queue, *to_steal_from; 325 struct kfmlp_queue *my_queue, *to_steal_from;
326 unsigned long flags; 326 unsigned long flags;
327 int err = 0; 327 int err = 0;
328 328
329 my_queue = kfmlp_get_queue(sem, t); 329 my_queue = kfmlp_get_queue(sem, t);
330 330
331 if (!my_queue) { 331 if (!my_queue) {
332 err = -EINVAL; 332 err = -EINVAL;
333 goto out; 333 goto out;
334 } 334 }
335 335
336 spin_lock_irqsave(&sem->lock, flags); 336 spin_lock_irqsave(&sem->lock, flags);
337 337
338 TRACE_CUR("queue %d: unlocking\n", kfmlp_get_idx(sem, my_queue)); 338 TRACE_CUR("queue %d: unlocking\n", kfmlp_get_idx(sem, my_queue));
339 339
340 my_queue->owner = NULL; // clear ownership 340 my_queue->owner = NULL; // clear ownership
341 --(my_queue->count); 341 --(my_queue->count);
342 342
343 if(my_queue->count < sem->shortest_queue->count) 343 if(my_queue->count < sem->shortest_queue->count)
344 { 344 {
345 sem->shortest_queue = my_queue; 345 sem->shortest_queue = my_queue;
346 TRACE_CUR("queue %d is the shortest\n", 346 TRACE_CUR("queue %d is the shortest\n",
347 kfmlp_get_idx(sem, sem->shortest_queue)); 347 kfmlp_get_idx(sem, sem->shortest_queue));
348 } 348 }
349 349
350#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 350#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
351 if(sem->aff_obs) { 351 if(sem->aff_obs) {
352 sem->aff_obs->ops->notify_dequeue(sem->aff_obs, my_queue, t); 352 sem->aff_obs->ops->notify_dequeue(sem->aff_obs, my_queue, t);
353 sem->aff_obs->ops->notify_freed(sem->aff_obs, my_queue, t); 353 sem->aff_obs->ops->notify_freed(sem->aff_obs, my_queue, t);
354 } 354 }
355#endif 355#endif
356 356
357 /* we lose the benefit of priority inheritance (if any) */ 357 /* we lose the benefit of priority inheritance (if any) */
358 if (tsk_rt(t)->inh_task) 358 if (tsk_rt(t)->inh_task)
359 litmus->decrease_prio(t, NULL); 359 litmus->decrease_prio(t, NULL);
360 360
361 361
362 /* check if there are jobs waiting for this resource */ 362 /* check if there are jobs waiting for this resource */
363RETRY: 363RETRY:
364 next = __waitqueue_remove_first(&my_queue->wait); 364 next = __waitqueue_remove_first(&my_queue->wait);
365 if (next) { 365 if (next) {
366 /* next becomes the resouce holder */ 366 /* next becomes the resouce holder */
367 my_queue->owner = next; 367 my_queue->owner = next;
368 368
369#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 369#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
370 if(sem->aff_obs) { 370 if(sem->aff_obs) {
371 sem->aff_obs->ops->notify_acquired(sem->aff_obs, my_queue, next); 371 sem->aff_obs->ops->notify_acquired(sem->aff_obs, my_queue, next);
372 } 372 }
373#endif 373#endif
374 374
375 TRACE_CUR("queue %d: lock ownership passed to %s/%d\n", 375 TRACE_CUR("queue %d: lock ownership passed to %s/%d\n",
376 kfmlp_get_idx(sem, my_queue), next->comm, next->pid); 376 kfmlp_get_idx(sem, my_queue), next->comm, next->pid);
377 377
378 /* determine new hp_waiter if necessary */ 378 /* determine new hp_waiter if necessary */
379 if (next == my_queue->hp_waiter) { 379 if (next == my_queue->hp_waiter) {
380 TRACE_TASK(next, "was highest-prio waiter\n"); 380 TRACE_TASK(next, "was highest-prio waiter\n");
@@ -389,7 +389,7 @@ RETRY:
389 * waiter's priority. */ 389 * waiter's priority. */
390 litmus->increase_prio(next, my_queue->hp_waiter); 390 litmus->increase_prio(next, my_queue->hp_waiter);
391 } 391 }
392 392
393 /* wake up next */ 393 /* wake up next */
394 wake_up_process(next); 394 wake_up_process(next);
395 } 395 }
@@ -397,7 +397,7 @@ RETRY:
397 // TODO: put this stealing logic before we attempt to release 397 // TODO: put this stealing logic before we attempt to release
398 // our resource. (simplifies code and gets rid of ugly goto RETRY. 398 // our resource. (simplifies code and gets rid of ugly goto RETRY.
399 wait_queue_t *wait; 399 wait_queue_t *wait;
400 400
401 TRACE_CUR("queue %d: looking to steal someone...\n", 401 TRACE_CUR("queue %d: looking to steal someone...\n",
402 kfmlp_get_idx(sem, my_queue)); 402 kfmlp_get_idx(sem, my_queue));
403 403
@@ -408,26 +408,26 @@ RETRY:
408#else 408#else
409 next = kfmlp_select_hp_steal(sem, &wait, &to_steal_from); 409 next = kfmlp_select_hp_steal(sem, &wait, &to_steal_from);
410#endif 410#endif
411 411
412 if(next) { 412 if(next) {
413 TRACE_CUR("queue %d: stealing %s/%d from queue %d\n", 413 TRACE_CUR("queue %d: stealing %s/%d from queue %d\n",
414 kfmlp_get_idx(sem, my_queue), 414 kfmlp_get_idx(sem, my_queue),
415 next->comm, next->pid, 415 next->comm, next->pid,
416 kfmlp_get_idx(sem, to_steal_from)); 416 kfmlp_get_idx(sem, to_steal_from));
417 417
418 kfmlp_steal_node(sem, my_queue, wait, to_steal_from); 418 kfmlp_steal_node(sem, my_queue, wait, to_steal_from);
419 419
420 goto RETRY; // will succeed this time. 420 goto RETRY; // will succeed this time.
421 } 421 }
422 else { 422 else {
423 TRACE_CUR("queue %d: no one to steal.\n", 423 TRACE_CUR("queue %d: no one to steal.\n",
424 kfmlp_get_idx(sem, my_queue)); 424 kfmlp_get_idx(sem, my_queue));
425 } 425 }
426 } 426 }
427 427
428 spin_unlock_irqrestore(&sem->lock, flags); 428 spin_unlock_irqrestore(&sem->lock, flags);
429 429
430out: 430out:
431 return err; 431 return err;
432} 432}
433 433
@@ -437,19 +437,19 @@ int kfmlp_close(struct litmus_lock* l)
437 struct kfmlp_semaphore *sem = kfmlp_from_lock(l); 437 struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
438 struct kfmlp_queue *my_queue; 438 struct kfmlp_queue *my_queue;
439 unsigned long flags; 439 unsigned long flags;
440 440
441 int owner; 441 int owner;
442 442
443 spin_lock_irqsave(&sem->lock, flags); 443 spin_lock_irqsave(&sem->lock, flags);
444 444
445 my_queue = kfmlp_get_queue(sem, t); 445 my_queue = kfmlp_get_queue(sem, t);
446 owner = (my_queue) ? (my_queue->owner == t) : 0; 446 owner = (my_queue) ? (my_queue->owner == t) : 0;
447 447
448 spin_unlock_irqrestore(&sem->lock, flags); 448 spin_unlock_irqrestore(&sem->lock, flags);
449 449
450 if (owner) 450 if (owner)
451 kfmlp_unlock(l); 451 kfmlp_unlock(l);
452 452
453 return 0; 453 return 0;
454} 454}
455 455
@@ -467,7 +467,7 @@ struct litmus_lock* kfmlp_new(struct litmus_lock_ops* ops, void* __user args)
467 struct kfmlp_semaphore* sem; 467 struct kfmlp_semaphore* sem;
468 int num_resources = 0; 468 int num_resources = 0;
469 int i; 469 int i;
470 470
471 if(!access_ok(VERIFY_READ, args, sizeof(num_resources))) 471 if(!access_ok(VERIFY_READ, args, sizeof(num_resources)))
472 { 472 {
473 return(NULL); 473 return(NULL);
@@ -478,26 +478,26 @@ struct litmus_lock* kfmlp_new(struct litmus_lock_ops* ops, void* __user args)
478 } 478 }
479 if(num_resources < 1) 479 if(num_resources < 1)
480 { 480 {
481 return(NULL); 481 return(NULL);
482 } 482 }
483 483
484 sem = kmalloc(sizeof(*sem), GFP_KERNEL); 484 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
485 if(!sem) 485 if(!sem)
486 { 486 {
487 return(NULL); 487 return(NULL);
488 } 488 }
489 489
490 sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL); 490 sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL);
491 if(!sem->queues) 491 if(!sem->queues)
492 { 492 {
493 kfree(sem); 493 kfree(sem);
494 return(NULL); 494 return(NULL);
495 } 495 }
496 496
497 sem->litmus_lock.ops = ops; 497 sem->litmus_lock.ops = ops;
498 spin_lock_init(&sem->lock); 498 spin_lock_init(&sem->lock);
499 sem->num_resources = num_resources; 499 sem->num_resources = num_resources;
500 500
501 for(i = 0; i < num_resources; ++i) 501 for(i = 0; i < num_resources; ++i)
502 { 502 {
503 sem->queues[i].owner = NULL; 503 sem->queues[i].owner = NULL;
@@ -505,7 +505,7 @@ struct litmus_lock* kfmlp_new(struct litmus_lock_ops* ops, void* __user args)
505 init_waitqueue_head(&sem->queues[i].wait); 505 init_waitqueue_head(&sem->queues[i].wait);
506 sem->queues[i].count = 0; 506 sem->queues[i].count = 0;
507 } 507 }
508 508
509 sem->shortest_queue = &sem->queues[0]; 509 sem->shortest_queue = &sem->queues[0];
510 510
511 return &sem->litmus_lock; 511 return &sem->litmus_lock;
@@ -517,13 +517,13 @@ struct litmus_lock* kfmlp_new(struct litmus_lock_ops* ops, void* __user args)
517#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) 517#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
518 518
519static inline int __replica_to_gpu(struct kfmlp_affinity* aff, int replica) 519static inline int __replica_to_gpu(struct kfmlp_affinity* aff, int replica)
520{ 520{
521 int gpu = replica % aff->nr_rsrc; 521 int gpu = replica % aff->nr_rsrc;
522 return gpu; 522 return gpu;
523} 523}
524 524
525static inline int replica_to_gpu(struct kfmlp_affinity* aff, int replica) 525static inline int replica_to_gpu(struct kfmlp_affinity* aff, int replica)
526{ 526{
527 int gpu = __replica_to_gpu(aff, replica) + aff->offset; 527 int gpu = __replica_to_gpu(aff, replica) + aff->offset;
528 return gpu; 528 return gpu;
529} 529}
@@ -557,21 +557,21 @@ static struct affinity_observer* kfmlp_aff_obs_new(struct affinity_observer_ops*
557 struct kfmlp_semaphore* sem; 557 struct kfmlp_semaphore* sem;
558 int i; 558 int i;
559 unsigned long flags; 559 unsigned long flags;
560 560
561 if(!access_ok(VERIFY_READ, args, sizeof(aff_args))) { 561 if(!access_ok(VERIFY_READ, args, sizeof(aff_args))) {
562 return(NULL); 562 return(NULL);
563 } 563 }
564 if(__copy_from_user(&aff_args, args, sizeof(aff_args))) { 564 if(__copy_from_user(&aff_args, args, sizeof(aff_args))) {
565 return(NULL); 565 return(NULL);
566 } 566 }
567 567
568 sem = (struct kfmlp_semaphore*) get_lock_from_od(aff_args.obs.lock_od); 568 sem = (struct kfmlp_semaphore*) get_lock_from_od(aff_args.obs.lock_od);
569 569
570 if(sem->litmus_lock.type != KFMLP_SEM) { 570 if(sem->litmus_lock.type != KFMLP_SEM) {
571 TRACE_CUR("Lock type not supported. Type = %d\n", sem->litmus_lock.type); 571 TRACE_CUR("Lock type not supported. Type = %d\n", sem->litmus_lock.type);
572 return(NULL); 572 return(NULL);
573 } 573 }
574 574
575 if((aff_args.nr_simult_users <= 0) || 575 if((aff_args.nr_simult_users <= 0) ||
576 (sem->num_resources%aff_args.nr_simult_users != 0)) { 576 (sem->num_resources%aff_args.nr_simult_users != 0)) {
577 TRACE_CUR("Lock %d does not support #replicas (%d) for #simult_users " 577 TRACE_CUR("Lock %d does not support #replicas (%d) for #simult_users "
@@ -582,62 +582,62 @@ static struct affinity_observer* kfmlp_aff_obs_new(struct affinity_observer_ops*
582 aff_args.nr_simult_users); 582 aff_args.nr_simult_users);
583 return(NULL); 583 return(NULL);
584 } 584 }
585 585
586 if(aff_args.nr_simult_users > NV_MAX_SIMULT_USERS) { 586 if(aff_args.nr_simult_users > NV_MAX_SIMULT_USERS) {
587 TRACE_CUR("System does not support #simult_users >%d. %d requested.\n", 587 TRACE_CUR("System does not support #simult_users >%d. %d requested.\n",
588 NV_MAX_SIMULT_USERS, aff_args.nr_simult_users); 588 NV_MAX_SIMULT_USERS, aff_args.nr_simult_users);
589 return(NULL); 589 return(NULL);
590 } 590 }
591 591
592 kfmlp_aff = kmalloc(sizeof(*kfmlp_aff), GFP_KERNEL); 592 kfmlp_aff = kmalloc(sizeof(*kfmlp_aff), GFP_KERNEL);
593 if(!kfmlp_aff) { 593 if(!kfmlp_aff) {
594 return(NULL); 594 return(NULL);
595 } 595 }
596 596
597 kfmlp_aff->q_info = kmalloc(sizeof(struct kfmlp_queue_info)*sem->num_resources, GFP_KERNEL); 597 kfmlp_aff->q_info = kmalloc(sizeof(struct kfmlp_queue_info)*sem->num_resources, GFP_KERNEL);
598 if(!kfmlp_aff->q_info) { 598 if(!kfmlp_aff->q_info) {
599 kfree(kfmlp_aff); 599 kfree(kfmlp_aff);
600 return(NULL); 600 return(NULL);
601 } 601 }
602 602
603 kfmlp_aff->nr_cur_users_on_rsrc = kmalloc(sizeof(int)*(sem->num_resources / aff_args.nr_simult_users), GFP_KERNEL); 603 kfmlp_aff->nr_cur_users_on_rsrc = kmalloc(sizeof(int)*(sem->num_resources / aff_args.nr_simult_users), GFP_KERNEL);
604 if(!kfmlp_aff->nr_cur_users_on_rsrc) { 604 if(!kfmlp_aff->nr_cur_users_on_rsrc) {
605 kfree(kfmlp_aff->q_info); 605 kfree(kfmlp_aff->q_info);
606 kfree(kfmlp_aff); 606 kfree(kfmlp_aff);
607 return(NULL); 607 return(NULL);
608 } 608 }
609 609
610 affinity_observer_new(&kfmlp_aff->obs, ops, &aff_args.obs); 610 affinity_observer_new(&kfmlp_aff->obs, ops, &aff_args.obs);
611 611
612 kfmlp_aff->ops = kfmlp_ops; 612 kfmlp_aff->ops = kfmlp_ops;
613 kfmlp_aff->offset = aff_args.replica_to_gpu_offset; 613 kfmlp_aff->offset = aff_args.replica_to_gpu_offset;
614 kfmlp_aff->nr_simult = aff_args.nr_simult_users; 614 kfmlp_aff->nr_simult = aff_args.nr_simult_users;
615 kfmlp_aff->nr_rsrc = sem->num_resources / kfmlp_aff->nr_simult; 615 kfmlp_aff->nr_rsrc = sem->num_resources / kfmlp_aff->nr_simult;
616 616
617 memset(kfmlp_aff->nr_cur_users_on_rsrc, 0, sizeof(int)*(sem->num_resources / kfmlp_aff->nr_rsrc)); 617 memset(kfmlp_aff->nr_cur_users_on_rsrc, 0, sizeof(int)*(sem->num_resources / kfmlp_aff->nr_rsrc));
618 618
619 for(i = 0; i < sem->num_resources; ++i) { 619 for(i = 0; i < sem->num_resources; ++i) {
620 kfmlp_aff->q_info[i].q = &sem->queues[i]; 620 kfmlp_aff->q_info[i].q = &sem->queues[i];
621 kfmlp_aff->q_info[i].estimated_len = 0; 621 kfmlp_aff->q_info[i].estimated_len = 0;
622 622
623 // multiple q_info's will point to the same resource (aka GPU) if 623 // multiple q_info's will point to the same resource (aka GPU) if
624 // aff_args.nr_simult_users > 1 624 // aff_args.nr_simult_users > 1
625 kfmlp_aff->q_info[i].nr_cur_users = &kfmlp_aff->nr_cur_users_on_rsrc[__replica_to_gpu(kfmlp_aff,i)]; 625 kfmlp_aff->q_info[i].nr_cur_users = &kfmlp_aff->nr_cur_users_on_rsrc[__replica_to_gpu(kfmlp_aff,i)];
626 } 626 }
627 627
628 // attach observer to the lock 628 // attach observer to the lock
629 spin_lock_irqsave(&sem->lock, flags); 629 spin_lock_irqsave(&sem->lock, flags);
630 sem->aff_obs = kfmlp_aff; 630 sem->aff_obs = kfmlp_aff;
631 //kfmlp_aff->shortest_queue = &kfmlp_aff->q_info[kfmlp_get_idx(sem, sem->shortest_queue)]; 631 //kfmlp_aff->shortest_queue = &kfmlp_aff->q_info[kfmlp_get_idx(sem, sem->shortest_queue)];
632 spin_unlock_irqrestore(&sem->lock, flags); 632 spin_unlock_irqrestore(&sem->lock, flags);
633 633
634 return &kfmlp_aff->obs; 634 return &kfmlp_aff->obs;
635} 635}
636 636
637 637
638 638
639 639
640static int gpu_replica_to_resource(struct kfmlp_affinity* aff, 640static int gpu_replica_to_resource(struct kfmlp_affinity* aff,
641 struct kfmlp_queue* fq) { 641 struct kfmlp_queue* fq) {
642 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock); 642 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock);
643 return(replica_to_gpu(aff, kfmlp_get_idx(sem, fq))); 643 return(replica_to_gpu(aff, kfmlp_get_idx(sem, fq)));
@@ -651,13 +651,13 @@ static inline struct kfmlp_queue_info* kfmlp_aff_find_shortest(struct kfmlp_affi
651 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock); 651 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock);
652 struct kfmlp_queue_info *shortest = &aff->q_info[0]; 652 struct kfmlp_queue_info *shortest = &aff->q_info[0];
653 int i; 653 int i;
654 654
655 for(i = 1; i < sem->num_resources; ++i) { 655 for(i = 1; i < sem->num_resources; ++i) {
656 if(aff->q_info[i].estimated_len < shortest->estimated_len) { 656 if(aff->q_info[i].estimated_len < shortest->estimated_len) {
657 shortest = &aff->q_info[i]; 657 shortest = &aff->q_info[i];
658 } 658 }
659 } 659 }
660 660
661 return(shortest); 661 return(shortest);
662} 662}
663 663
@@ -670,7 +670,7 @@ struct kfmlp_queue* gpu_kfmlp_advise_enqueue(struct kfmlp_affinity* aff, struct
670 struct kfmlp_queue *to_enqueue; 670 struct kfmlp_queue *to_enqueue;
671 int i; 671 int i;
672 int affinity_gpu; 672 int affinity_gpu;
673 673
674 // simply pick the shortest queue if, we have no affinity, or we have 674 // simply pick the shortest queue if, we have no affinity, or we have
675 // affinity with the shortest 675 // affinity with the shortest
676 if(unlikely(tsk_rt(t)->last_gpu < 0)) { 676 if(unlikely(tsk_rt(t)->last_gpu < 0)) {
@@ -680,20 +680,20 @@ struct kfmlp_queue* gpu_kfmlp_advise_enqueue(struct kfmlp_affinity* aff, struct
680 else { 680 else {
681 affinity_gpu = tsk_rt(t)->last_gpu; 681 affinity_gpu = tsk_rt(t)->last_gpu;
682 } 682 }
683 683
684 // all things being equal, let's start with the queue with which we have 684 // all things being equal, let's start with the queue with which we have
685 // affinity. this helps us maintain affinity even when we don't have 685 // affinity. this helps us maintain affinity even when we don't have
686 // an estiamte for local-affinity execution time (i.e., 2nd time on GPU) 686 // an estiamte for local-affinity execution time (i.e., 2nd time on GPU)
687 shortest = &aff->q_info[gpu_to_base_replica(aff, affinity_gpu)]; 687 shortest = &aff->q_info[gpu_to_base_replica(aff, affinity_gpu)];
688 688
689// if(shortest == aff->shortest_queue) { 689// if(shortest == aff->shortest_queue) {
690// TRACE_CUR("special case: have affinity with shortest queue\n"); 690// TRACE_CUR("special case: have affinity with shortest queue\n");
691// goto out; 691// goto out;
692// } 692// }
693 693
694 min_len = shortest->estimated_len + get_gpu_estimate(t, MIG_LOCAL); 694 min_len = shortest->estimated_len + get_gpu_estimate(t, MIG_LOCAL);
695 min_nr_users = *(shortest->nr_cur_users); 695 min_nr_users = *(shortest->nr_cur_users);
696 696
697 TRACE_CUR("cs is %llu on queue %d: est len = %llu\n", 697 TRACE_CUR("cs is %llu on queue %d: est len = %llu\n",
698 get_gpu_estimate(t, MIG_LOCAL), 698 get_gpu_estimate(t, MIG_LOCAL),
699 kfmlp_get_idx(sem, shortest->q), 699 kfmlp_get_idx(sem, shortest->q),
@@ -701,11 +701,11 @@ struct kfmlp_queue* gpu_kfmlp_advise_enqueue(struct kfmlp_affinity* aff, struct
701 701
702 for(i = 0; i < sem->num_resources; ++i) { 702 for(i = 0; i < sem->num_resources; ++i) {
703 if(&aff->q_info[i] != shortest) { 703 if(&aff->q_info[i] != shortest) {
704 704
705 lt_t est_len = 705 lt_t est_len =
706 aff->q_info[i].estimated_len + 706 aff->q_info[i].estimated_len +
707 get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, replica_to_gpu(aff, i))); 707 get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, replica_to_gpu(aff, i)));
708 708
709 // queue is smaller, or they're equal and the other has a smaller number 709 // queue is smaller, or they're equal and the other has a smaller number
710 // of total users. 710 // of total users.
711 // 711 //
@@ -717,29 +717,29 @@ struct kfmlp_queue* gpu_kfmlp_advise_enqueue(struct kfmlp_affinity* aff, struct
717 min_len = est_len; 717 min_len = est_len;
718 min_nr_users = *(aff->q_info[i].nr_cur_users); 718 min_nr_users = *(aff->q_info[i].nr_cur_users);
719 } 719 }
720 720
721 TRACE_CUR("cs is %llu on queue %d: est len = %llu\n", 721 TRACE_CUR("cs is %llu on queue %d: est len = %llu\n",
722 get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, replica_to_gpu(aff, i))), 722 get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, replica_to_gpu(aff, i))),
723 kfmlp_get_idx(sem, aff->q_info[i].q), 723 kfmlp_get_idx(sem, aff->q_info[i].q),
724 est_len); 724 est_len);
725 } 725 }
726 } 726 }
727 727
728 to_enqueue = shortest->q; 728 to_enqueue = shortest->q;
729 TRACE_CUR("enqueue on fq %d (non-aff wanted fq %d)\n", 729 TRACE_CUR("enqueue on fq %d (non-aff wanted fq %d)\n",
730 kfmlp_get_idx(sem, to_enqueue), 730 kfmlp_get_idx(sem, to_enqueue),
731 kfmlp_get_idx(sem, sem->shortest_queue)); 731 kfmlp_get_idx(sem, sem->shortest_queue));
732 732
733 return to_enqueue; 733 return to_enqueue;
734} 734}
735 735
736struct task_struct* gpu_kfmlp_advise_steal(struct kfmlp_affinity* aff, wait_queue_t** to_steal, struct kfmlp_queue** to_steal_from) 736struct task_struct* gpu_kfmlp_advise_steal(struct kfmlp_affinity* aff, wait_queue_t** to_steal, struct kfmlp_queue** to_steal_from)
737{ 737{
738 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock); 738 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock);
739 739
740 // For now, just steal highest priority waiter 740 // For now, just steal highest priority waiter
741 // TODO: Implement affinity-aware stealing. 741 // TODO: Implement affinity-aware stealing.
742 742
743 return kfmlp_select_hp_steal(sem, to_steal, to_steal_from); 743 return kfmlp_select_hp_steal(sem, to_steal, to_steal_from);
744} 744}
745 745
@@ -752,24 +752,24 @@ void gpu_kfmlp_notify_enqueue(struct kfmlp_affinity* aff, struct kfmlp_queue* fq
752 struct kfmlp_queue_info *info = &aff->q_info[replica]; 752 struct kfmlp_queue_info *info = &aff->q_info[replica];
753 lt_t est_time; 753 lt_t est_time;
754 lt_t est_len_before; 754 lt_t est_len_before;
755 755
756 if(current == t) { 756 if(current == t) {
757 tsk_rt(t)->suspend_gpu_tracker_on_block = 1; 757 tsk_rt(t)->suspend_gpu_tracker_on_block = 1;
758 } 758 }
759 759
760 est_len_before = info->estimated_len; 760 est_len_before = info->estimated_len;
761 est_time = get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, gpu)); 761 est_time = get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, gpu));
762 info->estimated_len += est_time; 762 info->estimated_len += est_time;
763 763
764 TRACE_CUR("fq %d: q_len (%llu) + est_cs (%llu) = %llu\n", 764 TRACE_CUR("fq %d: q_len (%llu) + est_cs (%llu) = %llu\n",
765 kfmlp_get_idx(sem, info->q), 765 kfmlp_get_idx(sem, info->q),
766 est_len_before, est_time, 766 est_len_before, est_time,
767 info->estimated_len); 767 info->estimated_len);
768 768
769// if(aff->shortest_queue == info) { 769// if(aff->shortest_queue == info) {
770// // we may no longer be the shortest 770// // we may no longer be the shortest
771// aff->shortest_queue = kfmlp_aff_find_shortest(aff); 771// aff->shortest_queue = kfmlp_aff_find_shortest(aff);
772// 772//
773// TRACE_CUR("shortest queue is fq %d (with %d in queue) has est len %llu\n", 773// TRACE_CUR("shortest queue is fq %d (with %d in queue) has est len %llu\n",
774// kfmlp_get_idx(sem, aff->shortest_queue->q), 774// kfmlp_get_idx(sem, aff->shortest_queue->q),
775// aff->shortest_queue->q->count, 775// aff->shortest_queue->q->count,
@@ -784,7 +784,7 @@ void gpu_kfmlp_notify_dequeue(struct kfmlp_affinity* aff, struct kfmlp_queue* fq
784 int gpu = replica_to_gpu(aff, replica); 784 int gpu = replica_to_gpu(aff, replica);
785 struct kfmlp_queue_info *info = &aff->q_info[replica]; 785 struct kfmlp_queue_info *info = &aff->q_info[replica];
786 lt_t est_time = get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, gpu)); 786 lt_t est_time = get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, gpu));
787 787
788 if(est_time > info->estimated_len) { 788 if(est_time > info->estimated_len) {
789 WARN_ON(1); 789 WARN_ON(1);
790 info->estimated_len = 0; 790 info->estimated_len = 0;
@@ -792,21 +792,21 @@ void gpu_kfmlp_notify_dequeue(struct kfmlp_affinity* aff, struct kfmlp_queue* fq
792 else { 792 else {
793 info->estimated_len -= est_time; 793 info->estimated_len -= est_time;
794 } 794 }
795 795
796 TRACE_CUR("fq %d est len is now %llu\n", 796 TRACE_CUR("fq %d est len is now %llu\n",
797 kfmlp_get_idx(sem, info->q), 797 kfmlp_get_idx(sem, info->q),
798 info->estimated_len); 798 info->estimated_len);
799 799
800 // check to see if we're the shortest queue now. 800 // check to see if we're the shortest queue now.
801// if((aff->shortest_queue != info) && 801// if((aff->shortest_queue != info) &&
802// (aff->shortest_queue->estimated_len > info->estimated_len)) { 802// (aff->shortest_queue->estimated_len > info->estimated_len)) {
803// 803//
804// aff->shortest_queue = info; 804// aff->shortest_queue = info;
805// 805//
806// TRACE_CUR("shortest queue is fq %d (with %d in queue) has est len %llu\n", 806// TRACE_CUR("shortest queue is fq %d (with %d in queue) has est len %llu\n",
807// kfmlp_get_idx(sem, info->q), 807// kfmlp_get_idx(sem, info->q),
808// info->q->count, 808// info->q->count,
809// info->estimated_len); 809// info->estimated_len);
810// } 810// }
811} 811}
812 812
@@ -815,17 +815,17 @@ void gpu_kfmlp_notify_acquired(struct kfmlp_affinity* aff, struct kfmlp_queue* f
815 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock); 815 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock);
816 int replica = kfmlp_get_idx(sem, fq); 816 int replica = kfmlp_get_idx(sem, fq);
817 int gpu = replica_to_gpu(aff, replica); 817 int gpu = replica_to_gpu(aff, replica);
818 818
819 tsk_rt(t)->gpu_migration = gpu_migration_distance(tsk_rt(t)->last_gpu, gpu); // record the type of migration 819 tsk_rt(t)->gpu_migration = gpu_migration_distance(tsk_rt(t)->last_gpu, gpu); // record the type of migration
820 820
821 TRACE_CUR("%s/%d acquired gpu %d. migration type = %d\n", 821 TRACE_CUR("%s/%d acquired gpu %d. migration type = %d\n",
822 t->comm, t->pid, gpu, tsk_rt(t)->gpu_migration); 822 t->comm, t->pid, gpu, tsk_rt(t)->gpu_migration);
823 823
824 // count the number or resource holders 824 // count the number or resource holders
825 ++(*(aff->q_info[replica].nr_cur_users)); 825 ++(*(aff->q_info[replica].nr_cur_users));
826 826
827 reg_nv_device(gpu, 1, t); // register 827 reg_nv_device(gpu, 1, t); // register
828 828
829 tsk_rt(t)->suspend_gpu_tracker_on_block = 0; 829 tsk_rt(t)->suspend_gpu_tracker_on_block = 0;
830 reset_gpu_tracker(t); 830 reset_gpu_tracker(t);
831 start_gpu_tracker(t); 831 start_gpu_tracker(t);
@@ -837,21 +837,21 @@ void gpu_kfmlp_notify_freed(struct kfmlp_affinity* aff, struct kfmlp_queue* fq,
837 int replica = kfmlp_get_idx(sem, fq); 837 int replica = kfmlp_get_idx(sem, fq);
838 int gpu = replica_to_gpu(aff, replica); 838 int gpu = replica_to_gpu(aff, replica);
839 lt_t est_time; 839 lt_t est_time;
840 840
841 stop_gpu_tracker(t); // stop the tracker before we do anything else. 841 stop_gpu_tracker(t); // stop the tracker before we do anything else.
842 842
843 est_time = get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, gpu)); 843 est_time = get_gpu_estimate(t, gpu_migration_distance(tsk_rt(t)->last_gpu, gpu));
844 844
845 tsk_rt(t)->last_gpu = gpu; 845 tsk_rt(t)->last_gpu = gpu;
846 846
847 // count the number or resource holders 847 // count the number or resource holders
848 --(*(aff->q_info[replica].nr_cur_users)); 848 --(*(aff->q_info[replica].nr_cur_users));
849 849
850 reg_nv_device(gpu, 0, t); // unregister 850 reg_nv_device(gpu, 0, t); // unregister
851 851
852 // update estimates 852 // update estimates
853 update_gpu_estimate(t, get_gpu_time(t)); 853 update_gpu_estimate(t, get_gpu_time(t));
854 854
855 TRACE_CUR("%s/%d freed gpu %d. actual time was %llu. estimated was %llu. diff is %d\n", 855 TRACE_CUR("%s/%d freed gpu %d. actual time was %llu. estimated was %llu. diff is %d\n",
856 t->comm, t->pid, gpu, 856 t->comm, t->pid, gpu,
857 get_gpu_time(t), 857 get_gpu_time(t),
@@ -893,9 +893,9 @@ struct kfmlp_queue* simple_gpu_kfmlp_advise_enqueue(struct kfmlp_affinity* aff,
893 struct kfmlp_queue_info *shortest; 893 struct kfmlp_queue_info *shortest;
894 struct kfmlp_queue *to_enqueue; 894 struct kfmlp_queue *to_enqueue;
895 int i; 895 int i;
896 896
897// TRACE_CUR("Simple GPU KFMLP advise_enqueue invoked\n"); 897// TRACE_CUR("Simple GPU KFMLP advise_enqueue invoked\n");
898 898
899 shortest = &aff->q_info[0]; 899 shortest = &aff->q_info[0];
900 min_count = shortest->q->count; 900 min_count = shortest->q->count;
901 min_nr_users = *(shortest->nr_cur_users); 901 min_nr_users = *(shortest->nr_cur_users);
@@ -904,10 +904,10 @@ struct kfmlp_queue* simple_gpu_kfmlp_advise_enqueue(struct kfmlp_affinity* aff,
904 kfmlp_get_idx(sem, shortest->q), 904 kfmlp_get_idx(sem, shortest->q),
905 shortest->q->count, 905 shortest->q->count,
906 min_nr_users); 906 min_nr_users);
907 907
908 for(i = 1; i < sem->num_resources; ++i) { 908 for(i = 1; i < sem->num_resources; ++i) {
909 int len = aff->q_info[i].q->count; 909 int len = aff->q_info[i].q->count;
910 910
911 // queue is smaller, or they're equal and the other has a smaller number 911 // queue is smaller, or they're equal and the other has a smaller number
912 // of total users. 912 // of total users.
913 // 913 //
@@ -919,18 +919,18 @@ struct kfmlp_queue* simple_gpu_kfmlp_advise_enqueue(struct kfmlp_affinity* aff,
919 min_count = shortest->q->count; 919 min_count = shortest->q->count;
920 min_nr_users = *(aff->q_info[i].nr_cur_users); 920 min_nr_users = *(aff->q_info[i].nr_cur_users);
921 } 921 }
922 922
923 TRACE_CUR("queue %d: waiters = %d, total holders = %d\n", 923 TRACE_CUR("queue %d: waiters = %d, total holders = %d\n",
924 kfmlp_get_idx(sem, aff->q_info[i].q), 924 kfmlp_get_idx(sem, aff->q_info[i].q),
925 aff->q_info[i].q->count, 925 aff->q_info[i].q->count,
926 *(aff->q_info[i].nr_cur_users)); 926 *(aff->q_info[i].nr_cur_users));
927 } 927 }
928 928
929 to_enqueue = shortest->q; 929 to_enqueue = shortest->q;
930 TRACE_CUR("enqueue on fq %d (non-aff wanted fq %d)\n", 930 TRACE_CUR("enqueue on fq %d (non-aff wanted fq %d)\n",
931 kfmlp_get_idx(sem, to_enqueue), 931 kfmlp_get_idx(sem, to_enqueue),
932 kfmlp_get_idx(sem, sem->shortest_queue)); 932 kfmlp_get_idx(sem, sem->shortest_queue));
933 933
934 return to_enqueue; 934 return to_enqueue;
935} 935}
936 936
@@ -956,12 +956,12 @@ void simple_gpu_kfmlp_notify_acquired(struct kfmlp_affinity* aff, struct kfmlp_q
956 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock); 956 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock);
957 int replica = kfmlp_get_idx(sem, fq); 957 int replica = kfmlp_get_idx(sem, fq);
958 int gpu = replica_to_gpu(aff, replica); 958 int gpu = replica_to_gpu(aff, replica);
959 959
960// TRACE_CUR("Simple GPU KFMLP notify_acquired invoked\n"); 960// TRACE_CUR("Simple GPU KFMLP notify_acquired invoked\n");
961 961
962 // count the number or resource holders 962 // count the number or resource holders
963 ++(*(aff->q_info[replica].nr_cur_users)); 963 ++(*(aff->q_info[replica].nr_cur_users));
964 964
965 reg_nv_device(gpu, 1, t); // register 965 reg_nv_device(gpu, 1, t); // register
966} 966}
967 967
@@ -970,11 +970,11 @@ void simple_gpu_kfmlp_notify_freed(struct kfmlp_affinity* aff, struct kfmlp_queu
970 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock); 970 struct kfmlp_semaphore *sem = kfmlp_from_lock(aff->obs.lock);
971 int replica = kfmlp_get_idx(sem, fq); 971 int replica = kfmlp_get_idx(sem, fq);
972 int gpu = replica_to_gpu(aff, replica); 972 int gpu = replica_to_gpu(aff, replica);
973 973
974// TRACE_CUR("Simple GPU KFMLP notify_freed invoked\n"); 974// TRACE_CUR("Simple GPU KFMLP notify_freed invoked\n");
975 // count the number or resource holders 975 // count the number or resource holders
976 --(*(aff->q_info[replica].nr_cur_users)); 976 --(*(aff->q_info[replica].nr_cur_users));
977 977
978 reg_nv_device(gpu, 0, t); // unregister 978 reg_nv_device(gpu, 0, t); // unregister
979} 979}
980 980
@@ -986,7 +986,7 @@ struct kfmlp_affinity_ops simple_gpu_kfmlp_affinity =
986 .notify_dequeue = simple_gpu_kfmlp_notify_dequeue, 986 .notify_dequeue = simple_gpu_kfmlp_notify_dequeue,
987 .notify_acquired = simple_gpu_kfmlp_notify_acquired, 987 .notify_acquired = simple_gpu_kfmlp_notify_acquired,
988 .notify_freed = simple_gpu_kfmlp_notify_freed, 988 .notify_freed = simple_gpu_kfmlp_notify_freed,
989 .replica_to_resource = gpu_replica_to_resource, 989 .replica_to_resource = gpu_replica_to_resource,
990}; 990};
991 991
992struct affinity_observer* kfmlp_simple_gpu_aff_obs_new(struct affinity_observer_ops* ops, 992struct affinity_observer* kfmlp_simple_gpu_aff_obs_new(struct affinity_observer_ops* ops,
diff --git a/litmus/litmus.c b/litmus/litmus.c
index b876e67b7a9b..5b301c418b96 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -309,7 +309,7 @@ asmlinkage long sys_null_call(cycles_t __user *ts)
309 now = get_cycles(); 309 now = get_cycles();
310 ret = put_user(now, ts); 310 ret = put_user(now, ts);
311 } 311 }
312 312
313 return ret; 313 return ret;
314} 314}
315 315
@@ -323,9 +323,9 @@ void init_gpu_affinity_state(struct task_struct* p)
323 // critically-damped 323 // critically-damped
324 // p->rt_param.gpu_fb_param_a = _frac(102, 1000); 324 // p->rt_param.gpu_fb_param_a = _frac(102, 1000);
325 // p->rt_param.gpu_fb_param_b = _frac(303, 1000); 325 // p->rt_param.gpu_fb_param_b = _frac(303, 1000);
326 326
327 p->rt_param.gpu_migration = MIG_NONE; 327 p->rt_param.gpu_migration = MIG_NONE;
328 p->rt_param.last_gpu = -1; 328 p->rt_param.last_gpu = -1;
329} 329}
330#endif 330#endif
331 331
@@ -334,11 +334,11 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
334{ 334{
335 struct rt_task user_config = {}; 335 struct rt_task user_config = {};
336 void* ctrl_page = NULL; 336 void* ctrl_page = NULL;
337 337
338#ifdef CONFIG_LITMUS_NESTED_LOCKING 338#ifdef CONFIG_LITMUS_NESTED_LOCKING
339 binheap_order_t prio_order = NULL; 339 binheap_order_t prio_order = NULL;
340#endif 340#endif
341 341
342 if (restore) { 342 if (restore) {
343 /* Safe user-space provided configuration data. 343 /* Safe user-space provided configuration data.
344 * and allocated page. */ 344 * and allocated page. */
@@ -346,15 +346,15 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
346 ctrl_page = p->rt_param.ctrl_page; 346 ctrl_page = p->rt_param.ctrl_page;
347 } 347 }
348 348
349#ifdef CONFIG_LITMUS_NESTED_LOCKING 349#ifdef CONFIG_LITMUS_NESTED_LOCKING
350 prio_order = p->rt_param.hp_blocked_tasks.compare; 350 prio_order = p->rt_param.hp_blocked_tasks.compare;
351#endif 351#endif
352 352
353 /* We probably should not be inheriting any task's priority 353 /* We probably should not be inheriting any task's priority
354 * at this point in time. 354 * at this point in time.
355 */ 355 */
356 WARN_ON(p->rt_param.inh_task); 356 WARN_ON(p->rt_param.inh_task);
357 357
358#ifdef CONFIG_LITMUS_NESTED_LOCKING 358#ifdef CONFIG_LITMUS_NESTED_LOCKING
359 WARN_ON(p->rt_param.blocked_lock); 359 WARN_ON(p->rt_param.blocked_lock);
360 WARN_ON(!binheap_empty(&p->rt_param.hp_blocked_tasks)); 360 WARN_ON(!binheap_empty(&p->rt_param.hp_blocked_tasks));
@@ -363,7 +363,7 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
363#ifdef CONFIG_LITMUS_SOFTIRQD 363#ifdef CONFIG_LITMUS_SOFTIRQD
364 /* We probably should not have any tasklets executing for 364 /* We probably should not have any tasklets executing for
365 * us at this time. 365 * us at this time.
366 */ 366 */
367 WARN_ON(p->rt_param.cur_klitirqd); 367 WARN_ON(p->rt_param.cur_klitirqd);
368 WARN_ON(atomic_read(&p->rt_param.klitirqd_sem_stat) == HELD); 368 WARN_ON(atomic_read(&p->rt_param.klitirqd_sem_stat) == HELD);
369 369
@@ -377,24 +377,24 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
377#ifdef CONFIG_LITMUS_NVIDIA 377#ifdef CONFIG_LITMUS_NVIDIA
378 WARN_ON(p->rt_param.held_gpus != 0); 378 WARN_ON(p->rt_param.held_gpus != 0);
379#endif 379#endif
380 380
381 /* Cleanup everything else. */ 381 /* Cleanup everything else. */
382 memset(&p->rt_param, 0, sizeof(p->rt_param)); 382 memset(&p->rt_param, 0, sizeof(p->rt_param));
383 383
384 /* Restore preserved fields. */ 384 /* Restore preserved fields. */
385 if (restore) { 385 if (restore) {
386 p->rt_param.task_params = user_config; 386 p->rt_param.task_params = user_config;
387 p->rt_param.ctrl_page = ctrl_page; 387 p->rt_param.ctrl_page = ctrl_page;
388 } 388 }
389 389
390#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) 390#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING)
391 init_gpu_affinity_state(p); 391 init_gpu_affinity_state(p);
392#endif 392#endif
393 393
394#ifdef CONFIG_LITMUS_NESTED_LOCKING 394#ifdef CONFIG_LITMUS_NESTED_LOCKING
395 INIT_BINHEAP_HANDLE(&p->rt_param.hp_blocked_tasks, prio_order); 395 INIT_BINHEAP_HANDLE(&p->rt_param.hp_blocked_tasks, prio_order);
396 raw_spin_lock_init(&p->rt_param.hp_blocked_tasks_lock); 396 raw_spin_lock_init(&p->rt_param.hp_blocked_tasks_lock);
397#endif 397#endif
398} 398}
399 399
400long litmus_admit_task(struct task_struct* tsk) 400long litmus_admit_task(struct task_struct* tsk)
@@ -440,8 +440,8 @@ long litmus_admit_task(struct task_struct* tsk)
440 } else { 440 } else {
441 bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); 441 bheap_node_init(&tsk_rt(tsk)->heap_node, tsk);
442 } 442 }
443 443
444 444
445#ifdef CONFIG_LITMUS_NVIDIA 445#ifdef CONFIG_LITMUS_NVIDIA
446 atomic_set(&tsk_rt(tsk)->nv_int_count, 0); 446 atomic_set(&tsk_rt(tsk)->nv_int_count, 0);
447#endif 447#endif
@@ -460,7 +460,7 @@ long litmus_admit_task(struct task_struct* tsk)
460 mutex_init(&tsk_rt(tsk)->klitirqd_sem); 460 mutex_init(&tsk_rt(tsk)->klitirqd_sem);
461 atomic_set(&tsk_rt(tsk)->klitirqd_sem_stat, NOT_HELD); 461 atomic_set(&tsk_rt(tsk)->klitirqd_sem_stat, NOT_HELD);
462#endif 462#endif
463 463
464 retval = litmus->admit_task(tsk); 464 retval = litmus->admit_task(tsk);
465 465
466 if (!retval) { 466 if (!retval) {
diff --git a/litmus/litmus_pai_softirq.c b/litmus/litmus_pai_softirq.c
index b31eeb8a2538..300571a81bbd 100644
--- a/litmus/litmus_pai_softirq.c
+++ b/litmus/litmus_pai_softirq.c
@@ -28,7 +28,7 @@ int __litmus_tasklet_schedule(struct tasklet_struct *t, unsigned int k_id)
28 } 28 }
29 29
30 ret = litmus->enqueue_pai_tasklet(t); 30 ret = litmus->enqueue_pai_tasklet(t);
31 31
32 return(ret); 32 return(ret);
33} 33}
34 34
diff --git a/litmus/litmus_proc.c b/litmus/litmus_proc.c
index 381513366c7a..9ab7e015a3c1 100644
--- a/litmus/litmus_proc.c
+++ b/litmus/litmus_proc.c
@@ -172,8 +172,8 @@ int __init init_litmus_proc(void)
172 klitirqd_file = 172 klitirqd_file =
173 create_proc_read_entry("klitirqd_stats", 0444, litmus_dir, 173 create_proc_read_entry("klitirqd_stats", 0444, litmus_dir,
174 proc_read_klitirqd_stats, NULL); 174 proc_read_klitirqd_stats, NULL);
175#endif 175#endif
176 176
177 stat_file = create_proc_read_entry("stats", 0444, litmus_dir, 177 stat_file = create_proc_read_entry("stats", 0444, litmus_dir,
178 proc_read_stats, NULL); 178 proc_read_stats, NULL);
179 179
diff --git a/litmus/litmus_softirq.c b/litmus/litmus_softirq.c
index 6b033508877d..9f7d9da5facb 100644
--- a/litmus/litmus_softirq.c
+++ b/litmus/litmus_softirq.c
@@ -44,7 +44,7 @@ struct klitirqd_info
44 44
45 45
46 raw_spinlock_t lock; 46 raw_spinlock_t lock;
47 47
48 u32 pending; 48 u32 pending;
49 atomic_t num_hi_pending; 49 atomic_t num_hi_pending;
50 atomic_t num_low_pending; 50 atomic_t num_low_pending;
@@ -70,7 +70,7 @@ int proc_read_klitirqd_stats(char *page, char **start,
70 int len = snprintf(page, PAGE_SIZE, 70 int len = snprintf(page, PAGE_SIZE,
71 "num ready klitirqds: %d\n\n", 71 "num ready klitirqds: %d\n\n",
72 atomic_read(&num_ready_klitirqds)); 72 atomic_read(&num_ready_klitirqds));
73 73
74 if(klitirqd_is_ready()) 74 if(klitirqd_is_ready())
75 { 75 {
76 int i; 76 int i;
@@ -100,7 +100,7 @@ int proc_read_klitirqd_stats(char *page, char **start,
100 return(len); 100 return(len);
101} 101}
102 102
103 103
104 104
105 105
106 106
@@ -185,9 +185,9 @@ inline unsigned int klitirqd_id(struct task_struct* tsk)
185 return i; 185 return i;
186 } 186 }
187 } 187 }
188 188
189 BUG(); 189 BUG();
190 190
191 return 0; 191 return 0;
192} 192}
193 193
@@ -217,11 +217,11 @@ inline static u32 litirq_pending(struct klitirqd_info* which)
217{ 217{
218 unsigned long flags; 218 unsigned long flags;
219 u32 pending; 219 u32 pending;
220 220
221 raw_spin_lock_irqsave(&which->lock, flags); 221 raw_spin_lock_irqsave(&which->lock, flags);
222 pending = litirq_pending_irqoff(which); 222 pending = litirq_pending_irqoff(which);
223 raw_spin_unlock_irqrestore(&which->lock, flags); 223 raw_spin_unlock_irqrestore(&which->lock, flags);
224 224
225 return pending; 225 return pending;
226}; 226};
227 227
@@ -296,13 +296,13 @@ static int tasklet_ownership_change(
296 int ret = 0; 296 int ret = 0;
297 297
298 raw_spin_lock_irqsave(&which->lock, flags); 298 raw_spin_lock_irqsave(&which->lock, flags);
299 299
300 switch(taskletQ) 300 switch(taskletQ)
301 { 301 {
302 case LIT_TASKLET_HI: 302 case LIT_TASKLET_HI:
303 if(litirq_pending_hi_irqoff(which)) 303 if(litirq_pending_hi_irqoff(which))
304 { 304 {
305 ret = (which->pending_tasklets_hi.head->owner != 305 ret = (which->pending_tasklets_hi.head->owner !=
306 which->current_owner); 306 which->current_owner);
307 } 307 }
308 break; 308 break;
@@ -316,11 +316,11 @@ static int tasklet_ownership_change(
316 default: 316 default:
317 break; 317 break;
318 } 318 }
319 319
320 raw_spin_unlock_irqrestore(&which->lock, flags); 320 raw_spin_unlock_irqrestore(&which->lock, flags);
321 321
322 TRACE_TASK(which->klitirqd, "ownership change needed: %d\n", ret); 322 TRACE_TASK(which->klitirqd, "ownership change needed: %d\n", ret);
323 323
324 return ret; 324 return ret;
325} 325}
326 326
@@ -329,12 +329,12 @@ static void __reeval_prio(struct klitirqd_info* which)
329{ 329{
330 struct task_struct* next_owner = NULL; 330 struct task_struct* next_owner = NULL;
331 struct task_struct* klitirqd = which->klitirqd; 331 struct task_struct* klitirqd = which->klitirqd;
332 332
333 /* Check in prio-order */ 333 /* Check in prio-order */
334 u32 pending = litirq_pending_irqoff(which); 334 u32 pending = litirq_pending_irqoff(which);
335 335
336 //__dump_state(which, "__reeval_prio: before"); 336 //__dump_state(which, "__reeval_prio: before");
337 337
338 if(pending) 338 if(pending)
339 { 339 {
340 if(pending & LIT_TASKLET_HI) 340 if(pending & LIT_TASKLET_HI)
@@ -375,9 +375,9 @@ static void __reeval_prio(struct klitirqd_info* which)
375 TRACE("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__, 375 TRACE("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__,
376 ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->comm, 376 ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->comm,
377 ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->pid, 377 ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->pid,
378 next_owner->comm, next_owner->pid); 378 next_owner->comm, next_owner->pid);
379 } 379 }
380 380
381 litmus->increase_prio_inheritance_klitirqd(klitirqd, old_owner, next_owner); 381 litmus->increase_prio_inheritance_klitirqd(klitirqd, old_owner, next_owner);
382 } 382 }
383 else 383 else
@@ -391,21 +391,21 @@ static void __reeval_prio(struct klitirqd_info* which)
391 { 391 {
392 // is this a bug? 392 // is this a bug?
393 TRACE("%s: Ownership change: %s/%d to NULL (reverting)\n", 393 TRACE("%s: Ownership change: %s/%d to NULL (reverting)\n",
394 __FUNCTION__, klitirqd->comm, klitirqd->pid); 394 __FUNCTION__, klitirqd->comm, klitirqd->pid);
395 } 395 }
396 396
397 BUG_ON(pending != 0); 397 BUG_ON(pending != 0);
398 litmus->decrease_prio_inheritance_klitirqd(klitirqd, old_owner, NULL); 398 litmus->decrease_prio_inheritance_klitirqd(klitirqd, old_owner, NULL);
399 } 399 }
400 } 400 }
401 401
402 //__dump_state(which, "__reeval_prio: after"); 402 //__dump_state(which, "__reeval_prio: after");
403} 403}
404 404
405static void reeval_prio(struct klitirqd_info* which) 405static void reeval_prio(struct klitirqd_info* which)
406{ 406{
407 unsigned long flags; 407 unsigned long flags;
408 408
409 raw_spin_lock_irqsave(&which->lock, flags); 409 raw_spin_lock_irqsave(&which->lock, flags);
410 __reeval_prio(which); 410 __reeval_prio(which);
411 raw_spin_unlock_irqrestore(&which->lock, flags); 411 raw_spin_unlock_irqrestore(&which->lock, flags);
@@ -438,41 +438,41 @@ static void do_lit_tasklet(struct klitirqd_info* which,
438 atomic_t* count; 438 atomic_t* count;
439 439
440 raw_spin_lock_irqsave(&which->lock, flags); 440 raw_spin_lock_irqsave(&which->lock, flags);
441 441
442 //__dump_state(which, "do_lit_tasklet: before steal"); 442 //__dump_state(which, "do_lit_tasklet: before steal");
443 443
444 /* copy out the tasklets for our private use. */ 444 /* copy out the tasklets for our private use. */
445 list = pending_tasklets->head; 445 list = pending_tasklets->head;
446 pending_tasklets->head = NULL; 446 pending_tasklets->head = NULL;
447 pending_tasklets->tail = &pending_tasklets->head; 447 pending_tasklets->tail = &pending_tasklets->head;
448 448
449 /* remove pending flag */ 449 /* remove pending flag */
450 which->pending &= (pending_tasklets == &which->pending_tasklets) ? 450 which->pending &= (pending_tasklets == &which->pending_tasklets) ?
451 ~LIT_TASKLET_LOW : 451 ~LIT_TASKLET_LOW :
452 ~LIT_TASKLET_HI; 452 ~LIT_TASKLET_HI;
453 453
454 count = (pending_tasklets == &which->pending_tasklets) ? 454 count = (pending_tasklets == &which->pending_tasklets) ?
455 &which->num_low_pending: 455 &which->num_low_pending:
456 &which->num_hi_pending; 456 &which->num_hi_pending;
457 457
458 //__dump_state(which, "do_lit_tasklet: after steal"); 458 //__dump_state(which, "do_lit_tasklet: after steal");
459 459
460 raw_spin_unlock_irqrestore(&which->lock, flags); 460 raw_spin_unlock_irqrestore(&which->lock, flags);
461 461
462 462
463 while(list) 463 while(list)
464 { 464 {
465 struct tasklet_struct *t = list; 465 struct tasklet_struct *t = list;
466 466
467 /* advance, lest we forget */ 467 /* advance, lest we forget */
468 list = list->next; 468 list = list->next;
469 469
470 /* execute tasklet if it has my priority and is free */ 470 /* execute tasklet if it has my priority and is free */
471 if ((t->owner == which->current_owner) && tasklet_trylock(t)) { 471 if ((t->owner == which->current_owner) && tasklet_trylock(t)) {
472 if (!atomic_read(&t->count)) { 472 if (!atomic_read(&t->count)) {
473 473
474 sched_trace_tasklet_begin(t->owner); 474 sched_trace_tasklet_begin(t->owner);
475 475
476 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) 476 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
477 { 477 {
478 BUG(); 478 BUG();
@@ -480,18 +480,18 @@ static void do_lit_tasklet(struct klitirqd_info* which,
480 TRACE_CUR("%s: Invoking tasklet.\n", __FUNCTION__); 480 TRACE_CUR("%s: Invoking tasklet.\n", __FUNCTION__);
481 t->func(t->data); 481 t->func(t->data);
482 tasklet_unlock(t); 482 tasklet_unlock(t);
483 483
484 atomic_dec(count); 484 atomic_dec(count);
485 485
486 sched_trace_tasklet_end(t->owner, 0ul); 486 sched_trace_tasklet_end(t->owner, 0ul);
487 487
488 continue; /* process more tasklets */ 488 continue; /* process more tasklets */
489 } 489 }
490 tasklet_unlock(t); 490 tasklet_unlock(t);
491 } 491 }
492 492
493 TRACE_CUR("%s: Could not invoke tasklet. Requeuing.\n", __FUNCTION__); 493 TRACE_CUR("%s: Could not invoke tasklet. Requeuing.\n", __FUNCTION__);
494 494
495 /* couldn't process tasklet. put it back at the end of the queue. */ 495 /* couldn't process tasklet. put it back at the end of the queue. */
496 if(pending_tasklets == &which->pending_tasklets) 496 if(pending_tasklets == &which->pending_tasklets)
497 ___litmus_tasklet_schedule(t, which, 0); 497 ___litmus_tasklet_schedule(t, which, 0);
@@ -507,13 +507,13 @@ static int do_litirq(struct klitirqd_info* which)
507{ 507{
508 u32 pending; 508 u32 pending;
509 int resched = 0; 509 int resched = 0;
510 510
511 if(in_interrupt()) 511 if(in_interrupt())
512 { 512 {
513 TRACE("%s: exiting early: in interrupt context!\n", __FUNCTION__); 513 TRACE("%s: exiting early: in interrupt context!\n", __FUNCTION__);
514 return(0); 514 return(0);
515 } 515 }
516 516
517 if(which->klitirqd != current) 517 if(which->klitirqd != current)
518 { 518 {
519 TRACE_CUR("%s: exiting early: thread/info mismatch! Running %s/%d but given %s/%d.\n", 519 TRACE_CUR("%s: exiting early: thread/info mismatch! Running %s/%d but given %s/%d.\n",
@@ -521,7 +521,7 @@ static int do_litirq(struct klitirqd_info* which)
521 which->klitirqd->comm, which->klitirqd->pid); 521 which->klitirqd->comm, which->klitirqd->pid);
522 return(0); 522 return(0);
523 } 523 }
524 524
525 if(!is_realtime(current)) 525 if(!is_realtime(current))
526 { 526 {
527 TRACE_CUR("%s: exiting early: klitirqd is not real-time. Sched Policy = %d\n", 527 TRACE_CUR("%s: exiting early: klitirqd is not real-time. Sched Policy = %d\n",
@@ -529,9 +529,9 @@ static int do_litirq(struct klitirqd_info* which)
529 return(0); 529 return(0);
530 } 530 }
531 531
532 532
533 /* We only handle tasklets & work objects, no need for RCU triggers? */ 533 /* We only handle tasklets & work objects, no need for RCU triggers? */
534 534
535 pending = litirq_pending(which); 535 pending = litirq_pending(which);
536 if(pending) 536 if(pending)
537 { 537 {
@@ -541,20 +541,20 @@ static int do_litirq(struct klitirqd_info* which)
541 TRACE_CUR("%s: Invoking HI tasklets.\n", __FUNCTION__); 541 TRACE_CUR("%s: Invoking HI tasklets.\n", __FUNCTION__);
542 do_lit_tasklet(which, &which->pending_tasklets_hi); 542 do_lit_tasklet(which, &which->pending_tasklets_hi);
543 resched = tasklet_ownership_change(which, LIT_TASKLET_HI); 543 resched = tasklet_ownership_change(which, LIT_TASKLET_HI);
544 544
545 if(resched) 545 if(resched)
546 { 546 {
547 TRACE_CUR("%s: HI tasklets of another owner remain. " 547 TRACE_CUR("%s: HI tasklets of another owner remain. "
548 "Skipping any LOW tasklets.\n", __FUNCTION__); 548 "Skipping any LOW tasklets.\n", __FUNCTION__);
549 } 549 }
550 } 550 }
551 551
552 if(!resched && (pending & LIT_TASKLET_LOW)) 552 if(!resched && (pending & LIT_TASKLET_LOW))
553 { 553 {
554 TRACE_CUR("%s: Invoking LOW tasklets.\n", __FUNCTION__); 554 TRACE_CUR("%s: Invoking LOW tasklets.\n", __FUNCTION__);
555 do_lit_tasklet(which, &which->pending_tasklets); 555 do_lit_tasklet(which, &which->pending_tasklets);
556 resched = tasklet_ownership_change(which, LIT_TASKLET_LOW); 556 resched = tasklet_ownership_change(which, LIT_TASKLET_LOW);
557 557
558 if(resched) 558 if(resched)
559 { 559 {
560 TRACE_CUR("%s: LOW tasklets of another owner remain. " 560 TRACE_CUR("%s: LOW tasklets of another owner remain. "
@@ -562,7 +562,7 @@ static int do_litirq(struct klitirqd_info* which)
562 } 562 }
563 } 563 }
564 } 564 }
565 565
566 return(resched); 566 return(resched);
567} 567}
568 568
@@ -572,11 +572,11 @@ static void do_work(struct klitirqd_info* which)
572 unsigned long flags; 572 unsigned long flags;
573 work_func_t f; 573 work_func_t f;
574 struct work_struct* work; 574 struct work_struct* work;
575 575
576 // only execute one work-queue item to yield to tasklets. 576 // only execute one work-queue item to yield to tasklets.
577 // ...is this a good idea, or should we just batch them? 577 // ...is this a good idea, or should we just batch them?
578 raw_spin_lock_irqsave(&which->lock, flags); 578 raw_spin_lock_irqsave(&which->lock, flags);
579 579
580 if(!litirq_pending_work_irqoff(which)) 580 if(!litirq_pending_work_irqoff(which))
581 { 581 {
582 raw_spin_unlock_irqrestore(&which->lock, flags); 582 raw_spin_unlock_irqrestore(&which->lock, flags);
@@ -585,16 +585,16 @@ static void do_work(struct klitirqd_info* which)
585 585
586 work = list_first_entry(&which->worklist, struct work_struct, entry); 586 work = list_first_entry(&which->worklist, struct work_struct, entry);
587 list_del_init(&work->entry); 587 list_del_init(&work->entry);
588 588
589 if(list_empty(&which->worklist)) 589 if(list_empty(&which->worklist))
590 { 590 {
591 which->pending &= ~LIT_WORK; 591 which->pending &= ~LIT_WORK;
592 } 592 }
593 593
594 raw_spin_unlock_irqrestore(&which->lock, flags); 594 raw_spin_unlock_irqrestore(&which->lock, flags);
595 595
596 596
597 597
598 /* safe to read current_owner outside of lock since only this thread 598 /* safe to read current_owner outside of lock since only this thread
599 may write to the pointer. */ 599 may write to the pointer. */
600 if(work->owner == which->current_owner) 600 if(work->owner == which->current_owner)
@@ -605,7 +605,7 @@ static void do_work(struct klitirqd_info* which)
605 f = work->func; 605 f = work->func;
606 f(work); /* can't touch 'work' after this point, 606 f(work); /* can't touch 'work' after this point,
607 the user may have freed it. */ 607 the user may have freed it. */
608 608
609 atomic_dec(&which->num_work_pending); 609 atomic_dec(&which->num_work_pending);
610 } 610 }
611 else 611 else
@@ -614,7 +614,7 @@ static void do_work(struct klitirqd_info* which)
614 __FUNCTION__); 614 __FUNCTION__);
615 ___litmus_schedule_work(work, which, 0); 615 ___litmus_schedule_work(work, which, 0);
616 } 616 }
617 617
618no_work: 618no_work:
619 return; 619 return;
620} 620}
@@ -628,7 +628,7 @@ static int set_litmus_daemon_sched(void)
628 628
629 TODO: Transition to a new job whenever a 629 TODO: Transition to a new job whenever a
630 new tasklet is handled */ 630 new tasklet is handled */
631 631
632 int ret = 0; 632 int ret = 0;
633 633
634 struct rt_task tp = { 634 struct rt_task tp = {
@@ -639,20 +639,20 @@ static int set_litmus_daemon_sched(void)
639 .budget_policy = NO_ENFORCEMENT, 639 .budget_policy = NO_ENFORCEMENT,
640 .cls = RT_CLASS_BEST_EFFORT 640 .cls = RT_CLASS_BEST_EFFORT
641 }; 641 };
642 642
643 struct sched_param param = { .sched_priority = 0}; 643 struct sched_param param = { .sched_priority = 0};
644 644
645 645
646 /* set task params, mark as proxy thread, and init other data */ 646 /* set task params, mark as proxy thread, and init other data */
647 tsk_rt(current)->task_params = tp; 647 tsk_rt(current)->task_params = tp;
648 tsk_rt(current)->is_proxy_thread = 1; 648 tsk_rt(current)->is_proxy_thread = 1;
649 tsk_rt(current)->cur_klitirqd = NULL; 649 tsk_rt(current)->cur_klitirqd = NULL;
650 mutex_init(&tsk_rt(current)->klitirqd_sem); 650 mutex_init(&tsk_rt(current)->klitirqd_sem);
651 atomic_set(&tsk_rt(current)->klitirqd_sem_stat, NOT_HELD); 651 atomic_set(&tsk_rt(current)->klitirqd_sem_stat, NOT_HELD);
652 652
653 /* inform the OS we're SCHED_LITMUS -- 653 /* inform the OS we're SCHED_LITMUS --
654 sched_setscheduler_nocheck() calls litmus_admit_task(). */ 654 sched_setscheduler_nocheck() calls litmus_admit_task(). */
655 sched_setscheduler_nocheck(current, SCHED_LITMUS, &param); 655 sched_setscheduler_nocheck(current, SCHED_LITMUS, &param);
656 656
657 return ret; 657 return ret;
658} 658}
@@ -682,7 +682,7 @@ static void exit_execution_phase(struct klitirqd_info* which,
682 up_and_set_stat(current, NOT_HELD, sem); 682 up_and_set_stat(current, NOT_HELD, sem);
683 TRACE_CUR("%s: Execution phase exited! " 683 TRACE_CUR("%s: Execution phase exited! "
684 "Released semaphore of %s/%d\n", __FUNCTION__, 684 "Released semaphore of %s/%d\n", __FUNCTION__,
685 t->comm, t->pid); 685 t->comm, t->pid);
686 } 686 }
687 else 687 else
688 { 688 {
@@ -704,11 +704,11 @@ static int run_klitirqd(void* unused)
704 TRACE_CUR("%s: Failed to transition to rt-task.\n", __FUNCTION__); 704 TRACE_CUR("%s: Failed to transition to rt-task.\n", __FUNCTION__);
705 goto rt_failed; 705 goto rt_failed;
706 } 706 }
707 707
708 atomic_inc(&num_ready_klitirqds); 708 atomic_inc(&num_ready_klitirqds);
709 709
710 set_current_state(TASK_INTERRUPTIBLE); 710 set_current_state(TASK_INTERRUPTIBLE);
711 711
712 while (!kthread_should_stop()) 712 while (!kthread_should_stop())
713 { 713 {
714 preempt_disable(); 714 preempt_disable();
@@ -728,7 +728,7 @@ static int run_klitirqd(void* unused)
728 728
729 preempt_disable(); 729 preempt_disable();
730 } 730 }
731 731
732 __set_current_state(TASK_RUNNING); 732 __set_current_state(TASK_RUNNING);
733 733
734 while (litirq_pending_and_sem_and_owner(which, &sem, &owner)) 734 while (litirq_pending_and_sem_and_owner(which, &sem, &owner))
@@ -736,7 +736,7 @@ static int run_klitirqd(void* unused)
736 int needs_resched = 0; 736 int needs_resched = 0;
737 737
738 preempt_enable_no_resched(); 738 preempt_enable_no_resched();
739 739
740 BUG_ON(sem == NULL); 740 BUG_ON(sem == NULL);
741 741
742 // wait to enter execution phase; wait for 'current_owner' to block. 742 // wait to enter execution phase; wait for 'current_owner' to block.
@@ -749,28 +749,28 @@ static int run_klitirqd(void* unused)
749 } 749 }
750 750
751 preempt_disable(); 751 preempt_disable();
752 752
753 /* Double check that there's still pending work and the owner hasn't 753 /* Double check that there's still pending work and the owner hasn't
754 * changed. Pending items may have been flushed while we were sleeping. 754 * changed. Pending items may have been flushed while we were sleeping.
755 */ 755 */
756 if(litirq_pending_with_owner(which, owner)) 756 if(litirq_pending_with_owner(which, owner))
757 { 757 {
758 TRACE_CUR("%s: Executing tasklets and/or work objects.\n", 758 TRACE_CUR("%s: Executing tasklets and/or work objects.\n",
759 __FUNCTION__); 759 __FUNCTION__);
760 760
761 needs_resched = do_litirq(which); 761 needs_resched = do_litirq(which);
762 762
763 preempt_enable_no_resched(); 763 preempt_enable_no_resched();
764 764
765 // work objects are preemptible. 765 // work objects are preemptible.
766 if(!needs_resched) 766 if(!needs_resched)
767 { 767 {
768 do_work(which); 768 do_work(which);
769 } 769 }
770 770
771 // exit execution phase. 771 // exit execution phase.
772 exit_execution_phase(which, sem, owner); 772 exit_execution_phase(which, sem, owner);
773 773
774 TRACE_CUR("%s: Setting up next priority.\n", __FUNCTION__); 774 TRACE_CUR("%s: Setting up next priority.\n", __FUNCTION__);
775 reeval_prio(which); /* check if we need to change priority here */ 775 reeval_prio(which); /* check if we need to change priority here */
776 } 776 }
@@ -778,7 +778,7 @@ static int run_klitirqd(void* unused)
778 { 778 {
779 TRACE_CUR("%s: Pending work was flushed! Prev owner was %s/%d\n", 779 TRACE_CUR("%s: Pending work was flushed! Prev owner was %s/%d\n",
780 __FUNCTION__, 780 __FUNCTION__,
781 owner->comm, owner->pid); 781 owner->comm, owner->pid);
782 preempt_enable_no_resched(); 782 preempt_enable_no_resched();
783 783
784 // exit execution phase. 784 // exit execution phase.
@@ -792,12 +792,12 @@ static int run_klitirqd(void* unused)
792 set_current_state(TASK_INTERRUPTIBLE); 792 set_current_state(TASK_INTERRUPTIBLE);
793 } 793 }
794 __set_current_state(TASK_RUNNING); 794 __set_current_state(TASK_RUNNING);
795 795
796 atomic_dec(&num_ready_klitirqds); 796 atomic_dec(&num_ready_klitirqds);
797 797
798rt_failed: 798rt_failed:
799 litmus_exit_task(current); 799 litmus_exit_task(current);
800 800
801 return rt_status; 801 return rt_status;
802} 802}
803 803
@@ -812,18 +812,18 @@ struct klitirqd_launch_data
812static void launch_klitirqd(struct work_struct *work) 812static void launch_klitirqd(struct work_struct *work)
813{ 813{
814 int i; 814 int i;
815 815
816 struct klitirqd_launch_data* launch_data = 816 struct klitirqd_launch_data* launch_data =
817 container_of(work, struct klitirqd_launch_data, work); 817 container_of(work, struct klitirqd_launch_data, work);
818 818
819 TRACE("%s: Creating %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); 819 TRACE("%s: Creating %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD);
820 820
821 /* create the daemon threads */ 821 /* create the daemon threads */
822 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) 822 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
823 { 823 {
824 if(launch_data->cpu_affinity) 824 if(launch_data->cpu_affinity)
825 { 825 {
826 klitirqds[i].klitirqd = 826 klitirqds[i].klitirqd =
827 kthread_create( 827 kthread_create(
828 run_klitirqd, 828 run_klitirqd,
829 /* treat the affinity as a pointer, we'll cast it back later */ 829 /* treat the affinity as a pointer, we'll cast it back later */
@@ -831,13 +831,13 @@ static void launch_klitirqd(struct work_struct *work)
831 "klitirqd_th%d/%d", 831 "klitirqd_th%d/%d",
832 i, 832 i,
833 launch_data->cpu_affinity[i]); 833 launch_data->cpu_affinity[i]);
834 834
835 /* litmus will put is in the right cluster. */ 835 /* litmus will put is in the right cluster. */
836 kthread_bind(klitirqds[i].klitirqd, launch_data->cpu_affinity[i]); 836 kthread_bind(klitirqds[i].klitirqd, launch_data->cpu_affinity[i]);
837 } 837 }
838 else 838 else
839 { 839 {
840 klitirqds[i].klitirqd = 840 klitirqds[i].klitirqd =
841 kthread_create( 841 kthread_create(
842 run_klitirqd, 842 run_klitirqd,
843 /* treat the affinity as a pointer, we'll cast it back later */ 843 /* treat the affinity as a pointer, we'll cast it back later */
@@ -845,16 +845,16 @@ static void launch_klitirqd(struct work_struct *work)
845 "klitirqd_th%d", 845 "klitirqd_th%d",
846 i); 846 i);
847 } 847 }
848 } 848 }
849 849
850 TRACE("%s: Launching %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); 850 TRACE("%s: Launching %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD);
851 851
852 /* unleash the daemons */ 852 /* unleash the daemons */
853 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) 853 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
854 { 854 {
855 wake_up_process(klitirqds[i].klitirqd); 855 wake_up_process(klitirqds[i].klitirqd);
856 } 856 }
857 857
858 if(launch_data->cpu_affinity) 858 if(launch_data->cpu_affinity)
859 kfree(launch_data->cpu_affinity); 859 kfree(launch_data->cpu_affinity);
860 kfree(launch_data); 860 kfree(launch_data);
@@ -865,38 +865,38 @@ void spawn_klitirqd(int* affinity)
865{ 865{
866 int i; 866 int i;
867 struct klitirqd_launch_data* delayed_launch; 867 struct klitirqd_launch_data* delayed_launch;
868 868
869 if(atomic_read(&num_ready_klitirqds) != 0) 869 if(atomic_read(&num_ready_klitirqds) != 0)
870 { 870 {
871 TRACE("%s: At least one klitirqd is already running! Need to call kill_klitirqd()?\n"); 871 TRACE("%s: At least one klitirqd is already running! Need to call kill_klitirqd()?\n");
872 return; 872 return;
873 } 873 }
874 874
875 /* init the tasklet & work queues */ 875 /* init the tasklet & work queues */
876 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) 876 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
877 { 877 {
878 klitirqds[i].terminating = 0; 878 klitirqds[i].terminating = 0;
879 klitirqds[i].pending = 0; 879 klitirqds[i].pending = 0;
880 880
881 klitirqds[i].num_hi_pending.counter = 0; 881 klitirqds[i].num_hi_pending.counter = 0;
882 klitirqds[i].num_low_pending.counter = 0; 882 klitirqds[i].num_low_pending.counter = 0;
883 klitirqds[i].num_work_pending.counter = 0; 883 klitirqds[i].num_work_pending.counter = 0;
884 884
885 klitirqds[i].pending_tasklets_hi.head = NULL; 885 klitirqds[i].pending_tasklets_hi.head = NULL;
886 klitirqds[i].pending_tasklets_hi.tail = &klitirqds[i].pending_tasklets_hi.head; 886 klitirqds[i].pending_tasklets_hi.tail = &klitirqds[i].pending_tasklets_hi.head;
887 887
888 klitirqds[i].pending_tasklets.head = NULL; 888 klitirqds[i].pending_tasklets.head = NULL;
889 klitirqds[i].pending_tasklets.tail = &klitirqds[i].pending_tasklets.head; 889 klitirqds[i].pending_tasklets.tail = &klitirqds[i].pending_tasklets.head;
890 890
891 INIT_LIST_HEAD(&klitirqds[i].worklist); 891 INIT_LIST_HEAD(&klitirqds[i].worklist);
892 892
893 raw_spin_lock_init(&klitirqds[i].lock); 893 raw_spin_lock_init(&klitirqds[i].lock);
894 } 894 }
895 895
896 /* wait to flush the initializations to memory since other threads 896 /* wait to flush the initializations to memory since other threads
897 will access it. */ 897 will access it. */
898 mb(); 898 mb();
899 899
900 /* tell a work queue to launch the threads. we can't make scheduling 900 /* tell a work queue to launch the threads. we can't make scheduling
901 calls since we're in an atomic state. */ 901 calls since we're in an atomic state. */
902 TRACE("%s: Setting callback up to launch klitirqds\n", __FUNCTION__); 902 TRACE("%s: Setting callback up to launch klitirqds\n", __FUNCTION__);
@@ -905,7 +905,7 @@ void spawn_klitirqd(int* affinity)
905 { 905 {
906 delayed_launch->cpu_affinity = 906 delayed_launch->cpu_affinity =
907 kmalloc(sizeof(int)*NR_LITMUS_SOFTIRQD, GFP_ATOMIC); 907 kmalloc(sizeof(int)*NR_LITMUS_SOFTIRQD, GFP_ATOMIC);
908 908
909 memcpy(delayed_launch->cpu_affinity, affinity, 909 memcpy(delayed_launch->cpu_affinity, affinity,
910 sizeof(int)*NR_LITMUS_SOFTIRQD); 910 sizeof(int)*NR_LITMUS_SOFTIRQD);
911 } 911 }
@@ -923,9 +923,9 @@ void kill_klitirqd(void)
923 if(!klitirqd_is_dead()) 923 if(!klitirqd_is_dead())
924 { 924 {
925 int i; 925 int i;
926 926
927 TRACE("%s: Killing %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); 927 TRACE("%s: Killing %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD);
928 928
929 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) 929 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
930 { 930 {
931 if(klitirqds[i].terminating != 1) 931 if(klitirqds[i].terminating != 1)
@@ -961,35 +961,35 @@ struct task_struct* get_klitirqd(unsigned int k_id)
961 961
962void flush_pending(struct task_struct* klitirqd_thread, 962void flush_pending(struct task_struct* klitirqd_thread,
963 struct task_struct* owner) 963 struct task_struct* owner)
964{ 964{
965 unsigned int k_id = klitirqd_id(klitirqd_thread); 965 unsigned int k_id = klitirqd_id(klitirqd_thread);
966 struct klitirqd_info *which = &klitirqds[k_id]; 966 struct klitirqd_info *which = &klitirqds[k_id];
967 967
968 unsigned long flags; 968 unsigned long flags;
969 struct tasklet_struct *list; 969 struct tasklet_struct *list;
970 970
971 u32 work_flushed = 0; 971 u32 work_flushed = 0;
972 972
973 raw_spin_lock_irqsave(&which->lock, flags); 973 raw_spin_lock_irqsave(&which->lock, flags);
974 974
975 //__dump_state(which, "flush_pending: before"); 975 //__dump_state(which, "flush_pending: before");
976 976
977 // flush hi tasklets. 977 // flush hi tasklets.
978 if(litirq_pending_hi_irqoff(which)) 978 if(litirq_pending_hi_irqoff(which))
979 { 979 {
980 which->pending &= ~LIT_TASKLET_HI; 980 which->pending &= ~LIT_TASKLET_HI;
981 981
982 list = which->pending_tasklets_hi.head; 982 list = which->pending_tasklets_hi.head;
983 which->pending_tasklets_hi.head = NULL; 983 which->pending_tasklets_hi.head = NULL;
984 which->pending_tasklets_hi.tail = &which->pending_tasklets_hi.head; 984 which->pending_tasklets_hi.tail = &which->pending_tasklets_hi.head;
985 985
986 TRACE("%s: Handing HI tasklets back to Linux.\n", __FUNCTION__); 986 TRACE("%s: Handing HI tasklets back to Linux.\n", __FUNCTION__);
987 987
988 while(list) 988 while(list)
989 { 989 {
990 struct tasklet_struct *t = list; 990 struct tasklet_struct *t = list;
991 list = list->next; 991 list = list->next;
992 992
993 if(likely((t->owner == owner) || (owner == NULL))) 993 if(likely((t->owner == owner) || (owner == NULL)))
994 { 994 {
995 if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))) 995 if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)))
@@ -1021,23 +1021,23 @@ void flush_pending(struct task_struct* klitirqd_thread,
1021 } 1021 }
1022 } 1022 }
1023 } 1023 }
1024 1024
1025 // flush low tasklets. 1025 // flush low tasklets.
1026 if(litirq_pending_low_irqoff(which)) 1026 if(litirq_pending_low_irqoff(which))
1027 { 1027 {
1028 which->pending &= ~LIT_TASKLET_LOW; 1028 which->pending &= ~LIT_TASKLET_LOW;
1029 1029
1030 list = which->pending_tasklets.head; 1030 list = which->pending_tasklets.head;
1031 which->pending_tasklets.head = NULL; 1031 which->pending_tasklets.head = NULL;
1032 which->pending_tasklets.tail = &which->pending_tasklets.head; 1032 which->pending_tasklets.tail = &which->pending_tasklets.head;
1033 1033
1034 TRACE("%s: Handing LOW tasklets back to Linux.\n", __FUNCTION__); 1034 TRACE("%s: Handing LOW tasklets back to Linux.\n", __FUNCTION__);
1035 1035
1036 while(list) 1036 while(list)
1037 { 1037 {
1038 struct tasklet_struct *t = list; 1038 struct tasklet_struct *t = list;
1039 list = list->next; 1039 list = list->next;
1040 1040
1041 if(likely((t->owner == owner) || (owner == NULL))) 1041 if(likely((t->owner == owner) || (owner == NULL)))
1042 { 1042 {
1043 if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))) 1043 if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)))
@@ -1046,7 +1046,7 @@ void flush_pending(struct task_struct* klitirqd_thread,
1046 } 1046 }
1047 1047
1048 work_flushed |= LIT_TASKLET_LOW; 1048 work_flushed |= LIT_TASKLET_LOW;
1049 1049
1050 t->owner = NULL; 1050 t->owner = NULL;
1051 sched_trace_tasklet_end(owner, 1ul); 1051 sched_trace_tasklet_end(owner, 1ul);
1052 1052
@@ -1069,20 +1069,20 @@ void flush_pending(struct task_struct* klitirqd_thread,
1069 } 1069 }
1070 } 1070 }
1071 } 1071 }
1072 1072
1073 // flush work objects 1073 // flush work objects
1074 if(litirq_pending_work_irqoff(which)) 1074 if(litirq_pending_work_irqoff(which))
1075 { 1075 {
1076 which->pending &= ~LIT_WORK; 1076 which->pending &= ~LIT_WORK;
1077 1077
1078 TRACE("%s: Handing work objects back to Linux.\n", __FUNCTION__); 1078 TRACE("%s: Handing work objects back to Linux.\n", __FUNCTION__);
1079 1079
1080 while(!list_empty(&which->worklist)) 1080 while(!list_empty(&which->worklist))
1081 { 1081 {
1082 struct work_struct* work = 1082 struct work_struct* work =
1083 list_first_entry(&which->worklist, struct work_struct, entry); 1083 list_first_entry(&which->worklist, struct work_struct, entry);
1084 list_del_init(&work->entry); 1084 list_del_init(&work->entry);
1085 1085
1086 if(likely((work->owner == owner) || (owner == NULL))) 1086 if(likely((work->owner == owner) || (owner == NULL)))
1087 { 1087 {
1088 work_flushed |= LIT_WORK; 1088 work_flushed |= LIT_WORK;
@@ -1100,9 +1100,9 @@ void flush_pending(struct task_struct* klitirqd_thread,
1100 } 1100 }
1101 } 1101 }
1102 } 1102 }
1103 1103
1104 //__dump_state(which, "flush_pending: after (before reeval prio)"); 1104 //__dump_state(which, "flush_pending: after (before reeval prio)");
1105 1105
1106 1106
1107 mb(); /* commit changes to pending flags */ 1107 mb(); /* commit changes to pending flags */
1108 1108
@@ -1122,7 +1122,7 @@ void flush_pending(struct task_struct* klitirqd_thread,
1122 TRACE_CUR("%s: no work flushed, so __reeval_prio() skipped\n", __FUNCTION__); 1122 TRACE_CUR("%s: no work flushed, so __reeval_prio() skipped\n", __FUNCTION__);
1123 } 1123 }
1124 1124
1125 raw_spin_unlock_irqrestore(&which->lock, flags); 1125 raw_spin_unlock_irqrestore(&which->lock, flags);
1126} 1126}
1127 1127
1128 1128
@@ -1136,29 +1136,29 @@ static void ___litmus_tasklet_schedule(struct tasklet_struct *t,
1136 u32 old_pending; 1136 u32 old_pending;
1137 1137
1138 t->next = NULL; 1138 t->next = NULL;
1139 1139
1140 raw_spin_lock_irqsave(&which->lock, flags); 1140 raw_spin_lock_irqsave(&which->lock, flags);
1141 1141
1142 //__dump_state(which, "___litmus_tasklet_schedule: before queuing"); 1142 //__dump_state(which, "___litmus_tasklet_schedule: before queuing");
1143 1143
1144 *(which->pending_tasklets.tail) = t; 1144 *(which->pending_tasklets.tail) = t;
1145 which->pending_tasklets.tail = &t->next; 1145 which->pending_tasklets.tail = &t->next;
1146 1146
1147 old_pending = which->pending; 1147 old_pending = which->pending;
1148 which->pending |= LIT_TASKLET_LOW; 1148 which->pending |= LIT_TASKLET_LOW;
1149 1149
1150 atomic_inc(&which->num_low_pending); 1150 atomic_inc(&which->num_low_pending);
1151 1151
1152 mb(); 1152 mb();
1153 1153
1154 if(!old_pending && wakeup) 1154 if(!old_pending && wakeup)
1155 { 1155 {
1156 wakeup_litirqd_locked(which); /* wake up the klitirqd */ 1156 wakeup_litirqd_locked(which); /* wake up the klitirqd */
1157 } 1157 }
1158 1158
1159 //__dump_state(which, "___litmus_tasklet_schedule: after queuing"); 1159 //__dump_state(which, "___litmus_tasklet_schedule: after queuing");
1160 1160
1161 raw_spin_unlock_irqrestore(&which->lock, flags); 1161 raw_spin_unlock_irqrestore(&which->lock, flags);
1162} 1162}
1163 1163
1164int __litmus_tasklet_schedule(struct tasklet_struct *t, unsigned int k_id) 1164int __litmus_tasklet_schedule(struct tasklet_struct *t, unsigned int k_id)
@@ -1173,7 +1173,7 @@ int __litmus_tasklet_schedule(struct tasklet_struct *t, unsigned int k_id)
1173 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) 1173 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD))
1174 { 1174 {
1175 TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id); 1175 TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id);
1176 BUG(); 1176 BUG();
1177 } 1177 }
1178 1178
1179 if(likely(!klitirqds[k_id].terminating)) 1179 if(likely(!klitirqds[k_id].terminating))
@@ -1209,25 +1209,25 @@ static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t,
1209 u32 old_pending; 1209 u32 old_pending;
1210 1210
1211 t->next = NULL; 1211 t->next = NULL;
1212 1212
1213 raw_spin_lock_irqsave(&which->lock, flags); 1213 raw_spin_lock_irqsave(&which->lock, flags);
1214 1214
1215 *(which->pending_tasklets_hi.tail) = t; 1215 *(which->pending_tasklets_hi.tail) = t;
1216 which->pending_tasklets_hi.tail = &t->next; 1216 which->pending_tasklets_hi.tail = &t->next;
1217 1217
1218 old_pending = which->pending; 1218 old_pending = which->pending;
1219 which->pending |= LIT_TASKLET_HI; 1219 which->pending |= LIT_TASKLET_HI;
1220 1220
1221 atomic_inc(&which->num_hi_pending); 1221 atomic_inc(&which->num_hi_pending);
1222 1222
1223 mb(); 1223 mb();
1224 1224
1225 if(!old_pending && wakeup) 1225 if(!old_pending && wakeup)
1226 { 1226 {
1227 wakeup_litirqd_locked(which); /* wake up the klitirqd */ 1227 wakeup_litirqd_locked(which); /* wake up the klitirqd */
1228 } 1228 }
1229 1229
1230 raw_spin_unlock_irqrestore(&which->lock, flags); 1230 raw_spin_unlock_irqrestore(&which->lock, flags);
1231} 1231}
1232 1232
1233int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, unsigned int k_id) 1233int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, unsigned int k_id)
@@ -1238,19 +1238,19 @@ int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, unsigned int k_id)
1238 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); 1238 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
1239 BUG(); 1239 BUG();
1240 } 1240 }
1241 1241
1242 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) 1242 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD))
1243 { 1243 {
1244 TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id); 1244 TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id);
1245 BUG(); 1245 BUG();
1246 } 1246 }
1247 1247
1248 if(unlikely(!klitirqd_is_ready())) 1248 if(unlikely(!klitirqd_is_ready()))
1249 { 1249 {
1250 TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id); 1250 TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id);
1251 BUG(); 1251 BUG();
1252 } 1252 }
1253 1253
1254 if(likely(!klitirqds[k_id].terminating)) 1254 if(likely(!klitirqds[k_id].terminating))
1255 { 1255 {
1256 if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0)) 1256 if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0))
@@ -1276,41 +1276,41 @@ int __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, unsigned int k_
1276 u32 old_pending; 1276 u32 old_pending;
1277 1277
1278 BUG_ON(!irqs_disabled()); 1278 BUG_ON(!irqs_disabled());
1279 1279
1280 if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) 1280 if(unlikely((t->owner == NULL) || !is_realtime(t->owner)))
1281 { 1281 {
1282 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); 1282 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
1283 BUG(); 1283 BUG();
1284 } 1284 }
1285 1285
1286 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) 1286 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD))
1287 { 1287 {
1288 TRACE("%s: No klitirqd_th%u!\n", __FUNCTION__, k_id); 1288 TRACE("%s: No klitirqd_th%u!\n", __FUNCTION__, k_id);
1289 BUG(); 1289 BUG();
1290 } 1290 }
1291 1291
1292 if(unlikely(!klitirqd_is_ready())) 1292 if(unlikely(!klitirqd_is_ready()))
1293 { 1293 {
1294 TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id); 1294 TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id);
1295 BUG(); 1295 BUG();
1296 } 1296 }
1297 1297
1298 if(likely(!klitirqds[k_id].terminating)) 1298 if(likely(!klitirqds[k_id].terminating))
1299 { 1299 {
1300 raw_spin_lock(&klitirqds[k_id].lock); 1300 raw_spin_lock(&klitirqds[k_id].lock);
1301 1301
1302 if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0)) 1302 if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0))
1303 { 1303 {
1304 ret = 1; // success! 1304 ret = 1; // success!
1305 1305
1306 t->next = klitirqds[k_id].pending_tasklets_hi.head; 1306 t->next = klitirqds[k_id].pending_tasklets_hi.head;
1307 klitirqds[k_id].pending_tasklets_hi.head = t; 1307 klitirqds[k_id].pending_tasklets_hi.head = t;
1308 1308
1309 old_pending = klitirqds[k_id].pending; 1309 old_pending = klitirqds[k_id].pending;
1310 klitirqds[k_id].pending |= LIT_TASKLET_HI; 1310 klitirqds[k_id].pending |= LIT_TASKLET_HI;
1311 1311
1312 atomic_inc(&klitirqds[k_id].num_hi_pending); 1312 atomic_inc(&klitirqds[k_id].num_hi_pending);
1313 1313
1314 mb(); 1314 mb();
1315 1315
1316 if(!old_pending) 1316 if(!old_pending)
@@ -1339,22 +1339,22 @@ static void ___litmus_schedule_work(struct work_struct *w,
1339 u32 old_pending; 1339 u32 old_pending;
1340 1340
1341 raw_spin_lock_irqsave(&which->lock, flags); 1341 raw_spin_lock_irqsave(&which->lock, flags);
1342 1342
1343 work_pending(w); 1343 work_pending(w);
1344 list_add_tail(&w->entry, &which->worklist); 1344 list_add_tail(&w->entry, &which->worklist);
1345 1345
1346 old_pending = which->pending; 1346 old_pending = which->pending;
1347 which->pending |= LIT_WORK; 1347 which->pending |= LIT_WORK;
1348 1348
1349 atomic_inc(&which->num_work_pending); 1349 atomic_inc(&which->num_work_pending);
1350 1350
1351 mb(); 1351 mb();
1352 1352
1353 if(!old_pending && wakeup) 1353 if(!old_pending && wakeup)
1354 { 1354 {
1355 wakeup_litirqd_locked(which); /* wakeup the klitirqd */ 1355 wakeup_litirqd_locked(which); /* wakeup the klitirqd */
1356 } 1356 }
1357 1357
1358 raw_spin_unlock_irqrestore(&which->lock, flags); 1358 raw_spin_unlock_irqrestore(&which->lock, flags);
1359} 1359}
1360 1360
@@ -1366,18 +1366,18 @@ int __litmus_schedule_work(struct work_struct *w, unsigned int k_id)
1366 TRACE("%s: No owner associated with this work object!\n", __FUNCTION__); 1366 TRACE("%s: No owner associated with this work object!\n", __FUNCTION__);
1367 BUG(); 1367 BUG();
1368 } 1368 }
1369 1369
1370 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) 1370 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD))
1371 { 1371 {
1372 TRACE("%s: No klitirqd_th%u!\n", k_id); 1372 TRACE("%s: No klitirqd_th%u!\n", k_id);
1373 BUG(); 1373 BUG();
1374 } 1374 }
1375 1375
1376 if(unlikely(!klitirqd_is_ready())) 1376 if(unlikely(!klitirqd_is_ready()))
1377 { 1377 {
1378 TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id); 1378 TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id);
1379 BUG(); 1379 BUG();
1380 } 1380 }
1381 1381
1382 if(likely(!klitirqds[k_id].terminating)) 1382 if(likely(!klitirqds[k_id].terminating))
1383 ___litmus_schedule_work(w, &klitirqds[k_id], 1); 1383 ___litmus_schedule_work(w, &klitirqds[k_id], 1);
@@ -1485,13 +1485,13 @@ void release_klitirqd_lock(struct task_struct* t)
1485 { 1485 {
1486 struct mutex* sem; 1486 struct mutex* sem;
1487 struct task_struct* owner = t; 1487 struct task_struct* owner = t;
1488 1488
1489 if(t->state == TASK_RUNNING) 1489 if(t->state == TASK_RUNNING)
1490 { 1490 {
1491 TRACE_TASK(t, "NOT giving up klitirqd_sem because we're not blocked!\n"); 1491 TRACE_TASK(t, "NOT giving up klitirqd_sem because we're not blocked!\n");
1492 return; 1492 return;
1493 } 1493 }
1494 1494
1495 if(likely(!tsk_rt(t)->is_proxy_thread)) 1495 if(likely(!tsk_rt(t)->is_proxy_thread))
1496 { 1496 {
1497 sem = &tsk_rt(t)->klitirqd_sem; 1497 sem = &tsk_rt(t)->klitirqd_sem;
@@ -1510,7 +1510,7 @@ void release_klitirqd_lock(struct task_struct* t)
1510 else 1510 else
1511 { 1511 {
1512 BUG(); 1512 BUG();
1513 1513
1514 // We had the rug pulled out from under us. Abort attempt 1514 // We had the rug pulled out from under us. Abort attempt
1515 // to reacquire the lock since our client no longer needs us. 1515 // to reacquire the lock since our client no longer needs us.
1516 TRACE_CUR("HUH?! How did this happen?\n"); 1516 TRACE_CUR("HUH?! How did this happen?\n");
@@ -1518,7 +1518,7 @@ void release_klitirqd_lock(struct task_struct* t)
1518 return; 1518 return;
1519 } 1519 }
1520 } 1520 }
1521 1521
1522 //TRACE_CUR("Releasing semaphore of %s/%d...\n", owner->comm, owner->pid); 1522 //TRACE_CUR("Releasing semaphore of %s/%d...\n", owner->comm, owner->pid);
1523 up_and_set_stat(t, NEED_TO_REACQUIRE, sem); 1523 up_and_set_stat(t, NEED_TO_REACQUIRE, sem);
1524 //TRACE_CUR("Semaphore of %s/%d released!\n", owner->comm, owner->pid); 1524 //TRACE_CUR("Semaphore of %s/%d released!\n", owner->comm, owner->pid);
@@ -1539,17 +1539,17 @@ int reacquire_klitirqd_lock(struct task_struct* t)
1539 { 1539 {
1540 struct mutex* sem; 1540 struct mutex* sem;
1541 struct task_struct* owner = t; 1541 struct task_struct* owner = t;
1542 1542
1543 if(likely(!tsk_rt(t)->is_proxy_thread)) 1543 if(likely(!tsk_rt(t)->is_proxy_thread))
1544 { 1544 {
1545 sem = &tsk_rt(t)->klitirqd_sem; 1545 sem = &tsk_rt(t)->klitirqd_sem;
1546 } 1546 }
1547 else 1547 else
1548 { 1548 {
1549 unsigned int k_id = klitirqd_id(t); 1549 unsigned int k_id = klitirqd_id(t);
1550 //struct task_struct* owner = klitirqds[k_id].current_owner; 1550 //struct task_struct* owner = klitirqds[k_id].current_owner;
1551 owner = klitirqds[k_id].current_owner; 1551 owner = klitirqds[k_id].current_owner;
1552 1552
1553 BUG_ON(t != klitirqds[k_id].klitirqd); 1553 BUG_ON(t != klitirqds[k_id].klitirqd);
1554 1554
1555 if(likely(owner)) 1555 if(likely(owner))
@@ -1565,7 +1565,7 @@ int reacquire_klitirqd_lock(struct task_struct* t)
1565 return(0); 1565 return(0);
1566 } 1566 }
1567 } 1567 }
1568 1568
1569 //TRACE_CUR("Trying to reacquire semaphore of %s/%d\n", owner->comm, owner->pid); 1569 //TRACE_CUR("Trying to reacquire semaphore of %s/%d\n", owner->comm, owner->pid);
1570 __down_and_reset_and_set_stat(t, REACQUIRING, HELD, sem); 1570 __down_and_reset_and_set_stat(t, REACQUIRING, HELD, sem);
1571 //TRACE_CUR("Reacquired semaphore %s/%d\n", owner->comm, owner->pid); 1571 //TRACE_CUR("Reacquired semaphore %s/%d\n", owner->comm, owner->pid);
diff --git a/litmus/locking.c b/litmus/locking.c
index fd3c7260319f..e754b2fa2634 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -57,7 +57,7 @@ static int create_generic_lock(void** obj_ref, obj_type_t type, void* __user ar
57 "most uses. (exception: IKGLP donors)\n"); 57 "most uses. (exception: IKGLP donors)\n");
58 } 58 }
59#endif 59#endif
60 lock->type = type; 60 lock->type = type;
61 lock->ident = atomic_inc_return(&lock_id_gen); 61 lock->ident = atomic_inc_return(&lock_id_gen);
62 *obj_ref = lock; 62 *obj_ref = lock;
63 } 63 }
@@ -300,14 +300,14 @@ static long do_litmus_dgl_lock(dgl_wait_state_t *dgl_wait)
300 300
301 TRACE_CUR("As many as %d locks in DGL are pending. Suspending.\n", 301 TRACE_CUR("As many as %d locks in DGL are pending. Suspending.\n",
302 dgl_wait->nr_remaining); 302 dgl_wait->nr_remaining);
303 303
304#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) 304#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
305 // KLUDGE: don't count this suspension as time in the critical gpu 305 // KLUDGE: don't count this suspension as time in the critical gpu
306 // critical section 306 // critical section
307 if(tsk_rt(dgl_wait->task)->held_gpus) { 307 if(tsk_rt(dgl_wait->task)->held_gpus) {
308 tsk_rt(dgl_wait->task)->suspend_gpu_tracker_on_block = 1; 308 tsk_rt(dgl_wait->task)->suspend_gpu_tracker_on_block = 1;
309 } 309 }
310#endif 310#endif
311 311
312 // note reverse order. see comments in select_next_lock for reason. 312 // note reverse order. see comments in select_next_lock for reason.
313 for(i = dgl_wait->size - 1; i >= 0; --i) { 313 for(i = dgl_wait->size - 1; i >= 0; --i) {
diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c
index fd6398121fbf..889313c854e6 100644
--- a/litmus/nvidia_info.c
+++ b/litmus/nvidia_info.c
@@ -41,10 +41,10 @@ typedef struct
41{ 41{
42 void *priv; /* private data */ 42 void *priv; /* private data */
43 void *os_state; /* os-specific device state */ 43 void *os_state; /* os-specific device state */
44 44
45 int rmInitialized; 45 int rmInitialized;
46 int flags; 46 int flags;
47 47
48 /* PCI config info */ 48 /* PCI config info */
49 NvU32 domain; 49 NvU32 domain;
50 NvU16 bus; 50 NvU16 bus;
@@ -54,31 +54,31 @@ typedef struct
54 NvU16 subsystem_id; 54 NvU16 subsystem_id;
55 NvU32 gpu_id; 55 NvU32 gpu_id;
56 void *handle; 56 void *handle;
57 57
58 NvU32 pci_cfg_space[16]; 58 NvU32 pci_cfg_space[16];
59 59
60 /* physical characteristics */ 60 /* physical characteristics */
61 litmus_nv_aperture_t bars[3]; 61 litmus_nv_aperture_t bars[3];
62 litmus_nv_aperture_t *regs; 62 litmus_nv_aperture_t *regs;
63 litmus_nv_aperture_t *fb, ud; 63 litmus_nv_aperture_t *fb, ud;
64 litmus_nv_aperture_t agp; 64 litmus_nv_aperture_t agp;
65 65
66 NvU32 interrupt_line; 66 NvU32 interrupt_line;
67 67
68 NvU32 agp_config; 68 NvU32 agp_config;
69 NvU32 agp_status; 69 NvU32 agp_status;
70 70
71 NvU32 primary_vga; 71 NvU32 primary_vga;
72 72
73 NvU32 sim_env; 73 NvU32 sim_env;
74 74
75 NvU32 rc_timer_enabled; 75 NvU32 rc_timer_enabled;
76 76
77 /* list of events allocated for this device */ 77 /* list of events allocated for this device */
78 void *event_list; 78 void *event_list;
79 79
80 void *kern_mappings; 80 void *kern_mappings;
81 81
82} litmus_nv_state_t; 82} litmus_nv_state_t;
83 83
84typedef struct work_struct litmus_nv_task_t; 84typedef struct work_struct litmus_nv_task_t;
@@ -91,11 +91,11 @@ typedef struct litmus_nv_work_s {
91typedef struct litmus_nv_linux_state_s { 91typedef struct litmus_nv_linux_state_s {
92 litmus_nv_state_t nv_state; 92 litmus_nv_state_t nv_state;
93 atomic_t usage_count; 93 atomic_t usage_count;
94 94
95 struct pci_dev *dev; 95 struct pci_dev *dev;
96 void *agp_bridge; 96 void *agp_bridge;
97 void *alloc_queue; 97 void *alloc_queue;
98 98
99 void *timer_sp; 99 void *timer_sp;
100 void *isr_sp; 100 void *isr_sp;
101 void *pci_cfgchk_sp; 101 void *pci_cfgchk_sp;
@@ -108,29 +108,29 @@ typedef struct litmus_nv_linux_state_s {
108 /* keep track of any pending bottom halfes */ 108 /* keep track of any pending bottom halfes */
109 struct tasklet_struct tasklet; 109 struct tasklet_struct tasklet;
110 litmus_nv_work_t work; 110 litmus_nv_work_t work;
111 111
112 /* get a timer callback every second */ 112 /* get a timer callback every second */
113 struct timer_list rc_timer; 113 struct timer_list rc_timer;
114 114
115 /* lock for linux-specific data, not used by core rm */ 115 /* lock for linux-specific data, not used by core rm */
116 struct semaphore ldata_lock; 116 struct semaphore ldata_lock;
117 117
118 /* lock for linux-specific alloc queue */ 118 /* lock for linux-specific alloc queue */
119 struct semaphore at_lock; 119 struct semaphore at_lock;
120 120
121#if 0 121#if 0
122#if defined(NV_USER_MAP) 122#if defined(NV_USER_MAP)
123 /* list of user mappings */ 123 /* list of user mappings */
124 struct nv_usermap_s *usermap_list; 124 struct nv_usermap_s *usermap_list;
125 125
126 /* lock for VMware-specific mapping list */ 126 /* lock for VMware-specific mapping list */
127 struct semaphore mt_lock; 127 struct semaphore mt_lock;
128#endif /* defined(NV_USER_MAP) */ 128#endif /* defined(NV_USER_MAP) */
129#if defined(NV_PM_SUPPORT_OLD_STYLE_APM) 129#if defined(NV_PM_SUPPORT_OLD_STYLE_APM)
130 void *apm_nv_dev; 130 void *apm_nv_dev;
131#endif 131#endif
132#endif 132#endif
133 133
134 NvU32 device_num; 134 NvU32 device_num;
135 struct litmus_nv_linux_state_s *next; 135 struct litmus_nv_linux_state_s *next;
136} litmus_nv_linux_state_t; 136} litmus_nv_linux_state_t;
@@ -140,9 +140,9 @@ void dump_nvidia_info(const struct tasklet_struct *t)
140 litmus_nv_state_t* nvstate = NULL; 140 litmus_nv_state_t* nvstate = NULL;
141 litmus_nv_linux_state_t* linuxstate = NULL; 141 litmus_nv_linux_state_t* linuxstate = NULL;
142 struct pci_dev* pci = NULL; 142 struct pci_dev* pci = NULL;
143 143
144 nvstate = (litmus_nv_state_t*)(t->data); 144 nvstate = (litmus_nv_state_t*)(t->data);
145 145
146 if(nvstate) 146 if(nvstate)
147 { 147 {
148 TRACE("NV State:\n" 148 TRACE("NV State:\n"
@@ -170,21 +170,21 @@ void dump_nvidia_info(const struct tasklet_struct *t)
170 nvstate->subsystem_id, 170 nvstate->subsystem_id,
171 nvstate->gpu_id, 171 nvstate->gpu_id,
172 nvstate->interrupt_line); 172 nvstate->interrupt_line);
173 173
174 linuxstate = container_of(nvstate, litmus_nv_linux_state_t, nv_state); 174 linuxstate = container_of(nvstate, litmus_nv_linux_state_t, nv_state);
175 } 175 }
176 else 176 else
177 { 177 {
178 TRACE("INVALID NVSTATE????\n"); 178 TRACE("INVALID NVSTATE????\n");
179 } 179 }
180 180
181 if(linuxstate) 181 if(linuxstate)
182 { 182 {
183 int ls_offset = (void*)(&(linuxstate->device_num)) - (void*)(linuxstate); 183 int ls_offset = (void*)(&(linuxstate->device_num)) - (void*)(linuxstate);
184 int ns_offset_raw = (void*)(&(linuxstate->device_num)) - (void*)(&(linuxstate->nv_state)); 184 int ns_offset_raw = (void*)(&(linuxstate->device_num)) - (void*)(&(linuxstate->nv_state));
185 int ns_offset_desired = (void*)(&(linuxstate->device_num)) - (void*)(nvstate); 185 int ns_offset_desired = (void*)(&(linuxstate->device_num)) - (void*)(nvstate);
186 186
187 187
188 TRACE("LINUX NV State:\n" 188 TRACE("LINUX NV State:\n"
189 "\tlinux nv state ptr: %p\n" 189 "\tlinux nv state ptr: %p\n"
190 "\taddress of tasklet: %p\n" 190 "\taddress of tasklet: %p\n"
@@ -200,9 +200,9 @@ void dump_nvidia_info(const struct tasklet_struct *t)
200 linuxstate->device_num, 200 linuxstate->device_num,
201 (t == &(linuxstate->tasklet)), 201 (t == &(linuxstate->tasklet)),
202 linuxstate->dev); 202 linuxstate->dev);
203 203
204 pci = linuxstate->dev; 204 pci = linuxstate->dev;
205 205
206 TRACE("Offsets:\n" 206 TRACE("Offsets:\n"
207 "\tOffset from LinuxState: %d, %x\n" 207 "\tOffset from LinuxState: %d, %x\n"
208 "\tOffset from NVState: %d, %x\n" 208 "\tOffset from NVState: %d, %x\n"
@@ -249,7 +249,7 @@ int init_nvidia_info(void)
249{ 249{
250 mutex_lock(&module_mutex); 250 mutex_lock(&module_mutex);
251 nvidia_mod = find_module("nvidia"); 251 nvidia_mod = find_module("nvidia");
252 mutex_unlock(&module_mutex); 252 mutex_unlock(&module_mutex);
253 if(nvidia_mod != NULL) 253 if(nvidia_mod != NULL)
254 { 254 {
255 TRACE("%s : Found NVIDIA module. Core Code: %p to %p\n", __FUNCTION__, 255 TRACE("%s : Found NVIDIA module. Core Code: %p to %p\n", __FUNCTION__,
@@ -280,7 +280,7 @@ int is_nvidia_func(void* func_addr)
280 __FUNCTION__, func_addr, ret); 280 __FUNCTION__, func_addr, ret);
281 }*/ 281 }*/
282 } 282 }
283 283
284 return(ret); 284 return(ret);
285} 285}
286 286
@@ -303,7 +303,7 @@ u32 get_tasklet_nv_device_num(const struct tasklet_struct *t)
303 303
304 void* state = (void*)(t->data); 304 void* state = (void*)(t->data);
305 void* device_num_ptr = state + DEVICE_NUM_OFFSET; 305 void* device_num_ptr = state + DEVICE_NUM_OFFSET;
306 306
307 //dump_nvidia_info(t); 307 //dump_nvidia_info(t);
308 return(*((u32*)device_num_ptr)); 308 return(*((u32*)device_num_ptr));
309#endif 309#endif
@@ -334,14 +334,14 @@ static nv_device_registry_t NV_DEVICE_REG[NV_DEVICE_NUM];
334int init_nv_device_reg(void) 334int init_nv_device_reg(void)
335{ 335{
336 int i; 336 int i;
337 337
338 memset(NV_DEVICE_REG, 0, sizeof(NV_DEVICE_REG)); 338 memset(NV_DEVICE_REG, 0, sizeof(NV_DEVICE_REG));
339 339
340 for(i = 0; i < NV_DEVICE_NUM; ++i) 340 for(i = 0; i < NV_DEVICE_NUM; ++i)
341 { 341 {
342 raw_spin_lock_init(&NV_DEVICE_REG[i].lock); 342 raw_spin_lock_init(&NV_DEVICE_REG[i].lock);
343 } 343 }
344 344
345 return(1); 345 return(1);
346} 346}
347 347
@@ -360,7 +360,7 @@ int get_nv_device_id(struct task_struct* owner)
360 if(NV_DEVICE_REG[i].device_owner == owner) 360 if(NV_DEVICE_REG[i].device_owner == owner)
361 return(i); 361 return(i);
362 } 362 }
363 return(-1); 363 return(-1);
364} 364}
365*/ 365*/
366 366
@@ -380,42 +380,42 @@ void pai_check_priority_increase(struct task_struct *t, int reg_device_id)
380{ 380{
381 unsigned long flags; 381 unsigned long flags;
382 nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; 382 nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id];
383 383
384 if(reg->max_prio_owner != t) { 384 if(reg->max_prio_owner != t) {
385 385
386 raw_spin_lock_irqsave(&reg->lock, flags); 386 raw_spin_lock_irqsave(&reg->lock, flags);
387 387
388 if(reg->max_prio_owner != t) { 388 if(reg->max_prio_owner != t) {
389 if(litmus->compare(t, reg->max_prio_owner)) { 389 if(litmus->compare(t, reg->max_prio_owner)) {
390 litmus->change_prio_pai_tasklet(reg->max_prio_owner, t); 390 litmus->change_prio_pai_tasklet(reg->max_prio_owner, t);
391 reg->max_prio_owner = t; 391 reg->max_prio_owner = t;
392 } 392 }
393 } 393 }
394 394
395 raw_spin_unlock_irqrestore(&reg->lock, flags); 395 raw_spin_unlock_irqrestore(&reg->lock, flags);
396 } 396 }
397} 397}
398 398
399 399
400void pai_check_priority_decrease(struct task_struct *t, int reg_device_id) 400void pai_check_priority_decrease(struct task_struct *t, int reg_device_id)
401{ 401{
402 unsigned long flags; 402 unsigned long flags;
403 nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; 403 nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id];
404 404
405 if(reg->max_prio_owner == t) { 405 if(reg->max_prio_owner == t) {
406 406
407 raw_spin_lock_irqsave(&reg->lock, flags); 407 raw_spin_lock_irqsave(&reg->lock, flags);
408 408
409 if(reg->max_prio_owner == t) { 409 if(reg->max_prio_owner == t) {
410 reg->max_prio_owner = find_hp_owner(reg, NULL); 410 reg->max_prio_owner = find_hp_owner(reg, NULL);
411 if(reg->max_prio_owner != t) { 411 if(reg->max_prio_owner != t) {
412 litmus->change_prio_pai_tasklet(t, reg->max_prio_owner); 412 litmus->change_prio_pai_tasklet(t, reg->max_prio_owner);
413 } 413 }
414 } 414 }
415 415
416 raw_spin_unlock_irqrestore(&reg->lock, flags); 416 raw_spin_unlock_irqrestore(&reg->lock, flags);
417 } 417 }
418} 418}
419#endif 419#endif
420 420
421static int __reg_nv_device(int reg_device_id, struct task_struct *t) 421static int __reg_nv_device(int reg_device_id, struct task_struct *t)
@@ -430,30 +430,30 @@ static int __reg_nv_device(int reg_device_id, struct task_struct *t)
430 // TODO: check if taks is already registered. 430 // TODO: check if taks is already registered.
431 return ret; // assume already registered. 431 return ret; // assume already registered.
432 } 432 }
433 433
434 raw_spin_lock_irqsave(&reg->lock, flags); 434 raw_spin_lock_irqsave(&reg->lock, flags);
435 435
436 if(reg->nr_owners < NV_MAX_SIMULT_USERS) { 436 if(reg->nr_owners < NV_MAX_SIMULT_USERS) {
437 TRACE_TASK(t, "registers GPU %d\n", reg_device_id); 437 TRACE_TASK(t, "registers GPU %d\n", reg_device_id);
438 for(i = 0; i < NV_MAX_SIMULT_USERS; ++i) { 438 for(i = 0; i < NV_MAX_SIMULT_USERS; ++i) {
439 if(reg->owners[i] == NULL) { 439 if(reg->owners[i] == NULL) {
440 reg->owners[i] = t; 440 reg->owners[i] = t;
441 441
442 //if(edf_higher_prio(t, reg->max_prio_owner)) { 442 //if(edf_higher_prio(t, reg->max_prio_owner)) {
443 if(litmus->compare(t, reg->max_prio_owner)) { 443 if(litmus->compare(t, reg->max_prio_owner)) {
444 old_max = reg->max_prio_owner; 444 old_max = reg->max_prio_owner;
445 reg->max_prio_owner = t; 445 reg->max_prio_owner = t;
446 446
447#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 447#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
448 litmus->change_prio_pai_tasklet(old_max, t); 448 litmus->change_prio_pai_tasklet(old_max, t);
449#endif 449#endif
450 } 450 }
451 451
452#ifdef CONFIG_LITMUS_SOFTIRQD 452#ifdef CONFIG_LITMUS_SOFTIRQD
453 down_and_set_stat(t, HELD, &tsk_rt(t)->klitirqd_sem); 453 down_and_set_stat(t, HELD, &tsk_rt(t)->klitirqd_sem);
454#endif 454#endif
455 ++(reg->nr_owners); 455 ++(reg->nr_owners);
456 456
457 break; 457 break;
458 } 458 }
459 } 459 }
@@ -461,13 +461,13 @@ static int __reg_nv_device(int reg_device_id, struct task_struct *t)
461 else 461 else
462 { 462 {
463 TRACE_CUR("%s: device %d is already in use!\n", __FUNCTION__, reg_device_id); 463 TRACE_CUR("%s: device %d is already in use!\n", __FUNCTION__, reg_device_id);
464 ret = -EBUSY; 464 ret = -EBUSY;
465 } 465 }
466 466
467 raw_spin_unlock_irqrestore(&reg->lock, flags); 467 raw_spin_unlock_irqrestore(&reg->lock, flags);
468 468
469 __set_bit(reg_device_id, &tsk_rt(t)->held_gpus); 469 __set_bit(reg_device_id, &tsk_rt(t)->held_gpus);
470 470
471 return(ret); 471 return(ret);
472} 472}
473 473
@@ -476,43 +476,43 @@ static int __clear_reg_nv_device(int de_reg_device_id, struct task_struct *t)
476 int ret = 0; 476 int ret = 0;
477 int i; 477 int i;
478 unsigned long flags; 478 unsigned long flags;
479 nv_device_registry_t *reg = &NV_DEVICE_REG[de_reg_device_id]; 479 nv_device_registry_t *reg = &NV_DEVICE_REG[de_reg_device_id];
480 480
481#ifdef CONFIG_LITMUS_SOFTIRQD 481#ifdef CONFIG_LITMUS_SOFTIRQD
482 struct task_struct* klitirqd_th = get_klitirqd(de_reg_device_id); 482 struct task_struct* klitirqd_th = get_klitirqd(de_reg_device_id);
483#endif 483#endif
484 484
485 WARN_ON(!test_bit(de_reg_device_id, &tsk_rt(t)->held_gpus)); 485 WARN_ON(!test_bit(de_reg_device_id, &tsk_rt(t)->held_gpus));
486 486
487 raw_spin_lock_irqsave(&reg->lock, flags); 487 raw_spin_lock_irqsave(&reg->lock, flags);
488 488
489 TRACE_TASK(t, "unregisters GPU %d\n", de_reg_device_id); 489 TRACE_TASK(t, "unregisters GPU %d\n", de_reg_device_id);
490 490
491 for(i = 0; i < NV_MAX_SIMULT_USERS; ++i) { 491 for(i = 0; i < NV_MAX_SIMULT_USERS; ++i) {
492 if(reg->owners[i] == t) { 492 if(reg->owners[i] == t) {
493#ifdef CONFIG_LITMUS_SOFTIRQD 493#ifdef CONFIG_LITMUS_SOFTIRQD
494 flush_pending(klitirqd_th, t); 494 flush_pending(klitirqd_th, t);
495#endif 495#endif
496 if(reg->max_prio_owner == t) { 496 if(reg->max_prio_owner == t) {
497 reg->max_prio_owner = find_hp_owner(reg, t); 497 reg->max_prio_owner = find_hp_owner(reg, t);
498#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 498#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
499 litmus->change_prio_pai_tasklet(t, reg->max_prio_owner); 499 litmus->change_prio_pai_tasklet(t, reg->max_prio_owner);
500#endif 500#endif
501 } 501 }
502 502
503#ifdef CONFIG_LITMUS_SOFTIRQD 503#ifdef CONFIG_LITMUS_SOFTIRQD
504 up_and_set_stat(t, NOT_HELD, &tsk_rt(t)->klitirqd_sem); 504 up_and_set_stat(t, NOT_HELD, &tsk_rt(t)->klitirqd_sem);
505#endif 505#endif
506 506
507 reg->owners[i] = NULL; 507 reg->owners[i] = NULL;
508 --(reg->nr_owners); 508 --(reg->nr_owners);
509 509
510 break; 510 break;
511 } 511 }
512 } 512 }
513 513
514 raw_spin_unlock_irqrestore(&reg->lock, flags); 514 raw_spin_unlock_irqrestore(&reg->lock, flags);
515 515
516 __clear_bit(de_reg_device_id, &tsk_rt(t)->held_gpus); 516 __clear_bit(de_reg_device_id, &tsk_rt(t)->held_gpus);
517 517
518 return(ret); 518 return(ret);
diff --git a/litmus/rsm_lock.c b/litmus/rsm_lock.c
index 0a851cd430a7..6a4bb500c4ae 100644
--- a/litmus/rsm_lock.c
+++ b/litmus/rsm_lock.c
@@ -206,7 +206,7 @@ int rsm_mutex_lock(struct litmus_lock* l)
206 206
207 if (mutex->owner) { 207 if (mutex->owner) {
208 TRACE_TASK(t, "Blocking on lock %d.\n", l->ident); 208 TRACE_TASK(t, "Blocking on lock %d.\n", l->ident);
209 209
210#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) 210#if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA)
211 // KLUDGE: don't count this suspension as time in the critical gpu 211 // KLUDGE: don't count this suspension as time in the critical gpu
212 // critical section 212 // critical section
@@ -214,7 +214,7 @@ int rsm_mutex_lock(struct litmus_lock* l)
214 tsk_rt(t)->suspend_gpu_tracker_on_block = 1; 214 tsk_rt(t)->suspend_gpu_tracker_on_block = 1;
215 } 215 }
216#endif 216#endif
217 217
218 /* resource is not free => must suspend and wait */ 218 /* resource is not free => must suspend and wait */
219 219
220 owner = mutex->owner; 220 owner = mutex->owner;
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index a55fc894340d..132c44a43564 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -131,7 +131,7 @@ typedef struct clusterdomain {
131 131
132#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 132#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
133 struct tasklet_head pending_tasklets; 133 struct tasklet_head pending_tasklets;
134#endif 134#endif
135} cedf_domain_t; 135} cedf_domain_t;
136 136
137/* a cedf_domain per cluster; allocation is done at init/activation time */ 137/* a cedf_domain per cluster; allocation is done at init/activation time */
@@ -442,7 +442,7 @@ static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flush
442 if(tasklet->owner) { 442 if(tasklet->owner) {
443 sched_trace_tasklet_begin(tasklet->owner); 443 sched_trace_tasklet_begin(tasklet->owner);
444 } 444 }
445 445
446 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) 446 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
447 { 447 {
448 BUG(); 448 BUG();
@@ -453,7 +453,7 @@ static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flush
453 (tasklet->owner) ? 0 : 1); 453 (tasklet->owner) ? 0 : 1);
454 tasklet->func(tasklet->data); 454 tasklet->func(tasklet->data);
455 tasklet_unlock(tasklet); 455 tasklet_unlock(tasklet);
456 456
457 if(tasklet->owner) { 457 if(tasklet->owner) {
458 sched_trace_tasklet_end(tasklet->owner, flushed); 458 sched_trace_tasklet_end(tasklet->owner, flushed);
459 } 459 }
@@ -469,20 +469,20 @@ static void flush_tasklets(cedf_domain_t* cluster, struct task_struct* task)
469 // lazy flushing. 469 // lazy flushing.
470 // just change ownership to NULL and let an idle processor 470 // just change ownership to NULL and let an idle processor
471 // take care of it. :P 471 // take care of it. :P
472 472
473 struct tasklet_struct* step; 473 struct tasklet_struct* step;
474 unsigned long flags; 474 unsigned long flags;
475 475
476 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 476 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
477 477
478 for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) { 478 for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) {
479 if(step->owner == task) { 479 if(step->owner == task) {
480 TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid); 480 TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid);
481 step->owner = NULL; 481 step->owner = NULL;
482 } 482 }
483 } 483 }
484 484
485 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 485 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
486} 486}
487 487
488 488
@@ -491,18 +491,18 @@ static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_ta
491 int work_to_do = 1; 491 int work_to_do = 1;
492 struct tasklet_struct *tasklet = NULL; 492 struct tasklet_struct *tasklet = NULL;
493 unsigned long flags; 493 unsigned long flags;
494 494
495 while(work_to_do) { 495 while(work_to_do) {
496 496
497 TS_NV_SCHED_BOTISR_START; 497 TS_NV_SCHED_BOTISR_START;
498 498
499 // remove tasklet at head of list if it has higher priority. 499 // remove tasklet at head of list if it has higher priority.
500 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 500 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
501 501
502 if(cluster->pending_tasklets.head != NULL) { 502 if(cluster->pending_tasklets.head != NULL) {
503 // remove tasklet at head. 503 // remove tasklet at head.
504 tasklet = cluster->pending_tasklets.head; 504 tasklet = cluster->pending_tasklets.head;
505 505
506 if(edf_higher_prio(tasklet->owner, sched_task)) { 506 if(edf_higher_prio(tasklet->owner, sched_task)) {
507 507
508 if(NULL == tasklet->next) { 508 if(NULL == tasklet->next) {
@@ -526,12 +526,12 @@ static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_ta
526 } 526 }
527 527
528 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 528 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
529 529
530 TS_NV_SCHED_BOTISR_END; 530 TS_NV_SCHED_BOTISR_END;
531 531
532 if(tasklet) { 532 if(tasklet) {
533 __do_lit_tasklet(tasklet, 0ul); 533 __do_lit_tasklet(tasklet, 0ul);
534 tasklet = NULL; 534 tasklet = NULL;
535 } 535 }
536 else { 536 else {
537 work_to_do = 0; 537 work_to_do = 0;
@@ -562,7 +562,7 @@ static void run_tasklets(struct task_struct* sched_task)
562static void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster) 562static void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster)
563{ 563{
564 struct tasklet_struct* step; 564 struct tasklet_struct* step;
565 565
566 tasklet->next = NULL; // make sure there are no old values floating around 566 tasklet->next = NULL; // make sure there are no old values floating around
567 567
568 step = cluster->pending_tasklets.head; 568 step = cluster->pending_tasklets.head;
@@ -570,13 +570,13 @@ static void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* clu
570 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid); 570 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
571 // insert at tail. 571 // insert at tail.
572 *(cluster->pending_tasklets.tail) = tasklet; 572 *(cluster->pending_tasklets.tail) = tasklet;
573 cluster->pending_tasklets.tail = &(tasklet->next); 573 cluster->pending_tasklets.tail = &(tasklet->next);
574 } 574 }
575 else if((*(cluster->pending_tasklets.tail) != NULL) && 575 else if((*(cluster->pending_tasklets.tail) != NULL) &&
576 edf_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) { 576 edf_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) {
577 // insert at tail. 577 // insert at tail.
578 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid); 578 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);
579 579
580 *(cluster->pending_tasklets.tail) = tasklet; 580 *(cluster->pending_tasklets.tail) = tasklet;
581 cluster->pending_tasklets.tail = &(tasklet->next); 581 cluster->pending_tasklets.tail = &(tasklet->next);
582 } 582 }
@@ -589,9 +589,9 @@ static void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* clu
589 while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) { 589 while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) {
590 step = step->next; 590 step = step->next;
591 } 591 }
592 592
593 // insert tasklet right before step->next. 593 // insert tasklet right before step->next.
594 594
595 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, 595 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__,
596 tasklet->owner->pid, 596 tasklet->owner->pid,
597 (step->owner) ? 597 (step->owner) ?
@@ -602,7 +602,7 @@ static void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* clu
602 step->next->owner->pid : 602 step->next->owner->pid :
603 -1) : 603 -1) :
604 -1); 604 -1);
605 605
606 tasklet->next = step->next; 606 tasklet->next = step->next;
607 step->next = tasklet; 607 step->next = tasklet;
608 608
@@ -623,23 +623,23 @@ static int enqueue_pai_tasklet(struct tasklet_struct* tasklet)
623 int runLocal = 0; 623 int runLocal = 0;
624 int runNow = 0; 624 int runNow = 0;
625 unsigned long flags; 625 unsigned long flags;
626 626
627 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner))) 627 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
628 { 628 {
629 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); 629 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
630 return 0; 630 return 0;
631 } 631 }
632 632
633 cluster = task_cpu_cluster(tasklet->owner); 633 cluster = task_cpu_cluster(tasklet->owner);
634 634
635 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 635 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
636 636
637 thisCPU = smp_processor_id(); 637 thisCPU = smp_processor_id();
638 638
639#ifdef CONFIG_SCHED_CPU_AFFINITY 639#ifdef CONFIG_SCHED_CPU_AFFINITY
640 { 640 {
641 cpu_entry_t* affinity = NULL; 641 cpu_entry_t* affinity = NULL;
642 642
643 // use this CPU if it is in our cluster and isn't running any RT work. 643 // use this CPU if it is in our cluster and isn't running any RT work.
644 if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(cedf_cpu_entries).linked == NULL)) { 644 if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(cedf_cpu_entries).linked == NULL)) {
645 affinity = &(__get_cpu_var(cedf_cpu_entries)); 645 affinity = &(__get_cpu_var(cedf_cpu_entries));
@@ -648,7 +648,7 @@ static int enqueue_pai_tasklet(struct tasklet_struct* tasklet)
648 // this CPU is busy or shouldn't run tasklet in this cluster. 648 // this CPU is busy or shouldn't run tasklet in this cluster.
649 // look for available near by CPUs. 649 // look for available near by CPUs.
650 // NOTE: Affinity towards owner and not this CPU. Is this right? 650 // NOTE: Affinity towards owner and not this CPU. Is this right?
651 affinity = 651 affinity =
652 cedf_get_nearest_available_cpu(cluster, 652 cedf_get_nearest_available_cpu(cluster,
653 &per_cpu(cedf_cpu_entries, task_cpu(tasklet->owner))); 653 &per_cpu(cedf_cpu_entries, task_cpu(tasklet->owner)));
654 } 654 }
@@ -677,15 +677,15 @@ static int enqueue_pai_tasklet(struct tasklet_struct* tasklet)
677 runLocal = 0; 677 runLocal = 0;
678 runNow = 0; 678 runNow = 0;
679 } 679 }
680 680
681 if(!runLocal) { 681 if(!runLocal) {
682 // enqueue the tasklet 682 // enqueue the tasklet
683 __add_pai_tasklet(tasklet, cluster); 683 __add_pai_tasklet(tasklet, cluster);
684 } 684 }
685 685
686 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 686 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
687 687
688 688
689 if (runLocal /*&& runNow */) { // runNow == 1 is implied 689 if (runLocal /*&& runNow */) { // runNow == 1 is implied
690 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__); 690 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
691 __do_lit_tasklet(tasklet, 0ul); 691 __do_lit_tasklet(tasklet, 0ul);
@@ -697,7 +697,7 @@ static int enqueue_pai_tasklet(struct tasklet_struct* tasklet)
697 else { 697 else {
698 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__); 698 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
699 } 699 }
700 700
701 return(1); // success 701 return(1); // success
702} 702}
703 703
@@ -965,8 +965,8 @@ static void cedf_task_exit(struct task_struct * t)
965 965
966#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 966#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
967 flush_tasklets(cluster, t); 967 flush_tasklets(cluster, t);
968#endif 968#endif
969 969
970 /* unlink if necessary */ 970 /* unlink if necessary */
971 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 971 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
972 unlink(t); 972 unlink(t);
@@ -997,21 +997,21 @@ static long cedf_admit_task(struct task_struct* tsk)
997static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) 997static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
998{ 998{
999 int linked_on; 999 int linked_on;
1000 int check_preempt = 0; 1000 int check_preempt = 0;
1001 1001
1002 cedf_domain_t* cluster = task_cpu_cluster(t); 1002 cedf_domain_t* cluster = task_cpu_cluster(t);
1003 1003
1004 if(prio_inh != NULL) 1004 if(prio_inh != NULL)
1005 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid); 1005 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid);
1006 else 1006 else
1007 TRACE_TASK(t, "inherits priority from %p\n", prio_inh); 1007 TRACE_TASK(t, "inherits priority from %p\n", prio_inh);
1008 1008
1009 sched_trace_eff_prio_change(t, prio_inh); 1009 sched_trace_eff_prio_change(t, prio_inh);
1010 1010
1011 tsk_rt(t)->inh_task = prio_inh; 1011 tsk_rt(t)->inh_task = prio_inh;
1012 1012
1013 linked_on = tsk_rt(t)->linked_on; 1013 linked_on = tsk_rt(t)->linked_on;
1014 1014
1015 /* If it is scheduled, then we need to reorder the CPU heap. */ 1015 /* If it is scheduled, then we need to reorder the CPU heap. */
1016 if (linked_on != NO_CPU) { 1016 if (linked_on != NO_CPU) {
1017 TRACE_TASK(t, "%s: linked on %d\n", 1017 TRACE_TASK(t, "%s: linked on %d\n",
@@ -1029,12 +1029,12 @@ static void __set_priority_inheritance(struct task_struct* t, struct task_struct
1029 raw_spin_lock(&cluster->domain.release_lock); 1029 raw_spin_lock(&cluster->domain.release_lock);
1030 if (is_queued(t)) { 1030 if (is_queued(t)) {
1031 TRACE_TASK(t, "%s: is queued\n", __FUNCTION__); 1031 TRACE_TASK(t, "%s: is queued\n", __FUNCTION__);
1032 1032
1033 /* We need to update the position of holder in some 1033 /* We need to update the position of holder in some
1034 * heap. Note that this could be a release heap if we 1034 * heap. Note that this could be a release heap if we
1035 * budget enforcement is used and this job overran. */ 1035 * budget enforcement is used and this job overran. */
1036 check_preempt = !bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node); 1036 check_preempt = !bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node);
1037 1037
1038 } else { 1038 } else {
1039 /* Nothing to do: if it is not queued and not linked 1039 /* Nothing to do: if it is not queued and not linked
1040 * then it is either sleeping or currently being moved 1040 * then it is either sleeping or currently being moved
@@ -1044,7 +1044,7 @@ static void __set_priority_inheritance(struct task_struct* t, struct task_struct
1044 TRACE_TASK(t, "%s: is NOT queued => Done.\n", __FUNCTION__); 1044 TRACE_TASK(t, "%s: is NOT queued => Done.\n", __FUNCTION__);
1045 } 1045 }
1046 raw_spin_unlock(&cluster->domain.release_lock); 1046 raw_spin_unlock(&cluster->domain.release_lock);
1047 1047
1048 /* If holder was enqueued in a release heap, then the following 1048 /* If holder was enqueued in a release heap, then the following
1049 * preemption check is pointless, but we can't easily detect 1049 * preemption check is pointless, but we can't easily detect
1050 * that case. If you want to fix this, then consider that 1050 * that case. If you want to fix this, then consider that
@@ -1065,21 +1065,21 @@ static void __set_priority_inheritance(struct task_struct* t, struct task_struct
1065static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) 1065static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
1066{ 1066{
1067 cedf_domain_t* cluster = task_cpu_cluster(t); 1067 cedf_domain_t* cluster = task_cpu_cluster(t);
1068 1068
1069 raw_spin_lock(&cluster->cluster_lock); 1069 raw_spin_lock(&cluster->cluster_lock);
1070 1070
1071 __set_priority_inheritance(t, prio_inh); 1071 __set_priority_inheritance(t, prio_inh);
1072 1072
1073#ifdef CONFIG_LITMUS_SOFTIRQD 1073#ifdef CONFIG_LITMUS_SOFTIRQD
1074 if(tsk_rt(t)->cur_klitirqd != NULL) 1074 if(tsk_rt(t)->cur_klitirqd != NULL)
1075 { 1075 {
1076 TRACE_TASK(t, "%s/%d inherits a new priority!\n", 1076 TRACE_TASK(t, "%s/%d inherits a new priority!\n",
1077 tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); 1077 tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid);
1078 1078
1079 __set_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh); 1079 __set_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh);
1080 } 1080 }
1081#endif 1081#endif
1082 1082
1083 raw_spin_unlock(&cluster->cluster_lock); 1083 raw_spin_unlock(&cluster->cluster_lock);
1084} 1084}
1085 1085
@@ -1088,13 +1088,13 @@ static void set_priority_inheritance(struct task_struct* t, struct task_struct*
1088static void __clear_priority_inheritance(struct task_struct* t) 1088static void __clear_priority_inheritance(struct task_struct* t)
1089{ 1089{
1090 TRACE_TASK(t, "priority restored\n"); 1090 TRACE_TASK(t, "priority restored\n");
1091 1091
1092 if(tsk_rt(t)->scheduled_on != NO_CPU) 1092 if(tsk_rt(t)->scheduled_on != NO_CPU)
1093 { 1093 {
1094 sched_trace_eff_prio_change(t, NULL); 1094 sched_trace_eff_prio_change(t, NULL);
1095 1095
1096 tsk_rt(t)->inh_task = NULL; 1096 tsk_rt(t)->inh_task = NULL;
1097 1097
1098 /* Check if rescheduling is necessary. We can't use heap_decrease() 1098 /* Check if rescheduling is necessary. We can't use heap_decrease()
1099 * since the priority was effectively lowered. */ 1099 * since the priority was effectively lowered. */
1100 unlink(t); 1100 unlink(t);
@@ -1104,19 +1104,19 @@ static void __clear_priority_inheritance(struct task_struct* t)
1104 { 1104 {
1105 __set_priority_inheritance(t, NULL); 1105 __set_priority_inheritance(t, NULL);
1106 } 1106 }
1107 1107
1108#ifdef CONFIG_LITMUS_SOFTIRQD 1108#ifdef CONFIG_LITMUS_SOFTIRQD
1109 if(tsk_rt(t)->cur_klitirqd != NULL) 1109 if(tsk_rt(t)->cur_klitirqd != NULL)
1110 { 1110 {
1111 TRACE_TASK(t, "%s/%d inheritance set back to owner.\n", 1111 TRACE_TASK(t, "%s/%d inheritance set back to owner.\n",
1112 tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); 1112 tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid);
1113 1113
1114 if(tsk_rt(tsk_rt(t)->cur_klitirqd)->scheduled_on != NO_CPU) 1114 if(tsk_rt(tsk_rt(t)->cur_klitirqd)->scheduled_on != NO_CPU)
1115 { 1115 {
1116 sched_trace_eff_prio_change(tsk_rt(t)->cur_klitirqd, t); 1116 sched_trace_eff_prio_change(tsk_rt(t)->cur_klitirqd, t);
1117 1117
1118 tsk_rt(tsk_rt(t)->cur_klitirqd)->inh_task = t; 1118 tsk_rt(tsk_rt(t)->cur_klitirqd)->inh_task = t;
1119 1119
1120 /* Check if rescheduling is necessary. We can't use heap_decrease() 1120 /* Check if rescheduling is necessary. We can't use heap_decrease()
1121 * since the priority was effectively lowered. */ 1121 * since the priority was effectively lowered. */
1122 unlink(tsk_rt(t)->cur_klitirqd); 1122 unlink(tsk_rt(t)->cur_klitirqd);
@@ -1134,7 +1134,7 @@ static void __clear_priority_inheritance(struct task_struct* t)
1134static void clear_priority_inheritance(struct task_struct* t) 1134static void clear_priority_inheritance(struct task_struct* t)
1135{ 1135{
1136 cedf_domain_t* cluster = task_cpu_cluster(t); 1136 cedf_domain_t* cluster = task_cpu_cluster(t);
1137 1137
1138 raw_spin_lock(&cluster->cluster_lock); 1138 raw_spin_lock(&cluster->cluster_lock);
1139 __clear_priority_inheritance(t); 1139 __clear_priority_inheritance(t);
1140 raw_spin_unlock(&cluster->cluster_lock); 1140 raw_spin_unlock(&cluster->cluster_lock);
@@ -1149,11 +1149,11 @@ static void set_priority_inheritance_klitirqd(struct task_struct* klitirqd,
1149 struct task_struct* new_owner) 1149 struct task_struct* new_owner)
1150{ 1150{
1151 cedf_domain_t* cluster = task_cpu_cluster(klitirqd); 1151 cedf_domain_t* cluster = task_cpu_cluster(klitirqd);
1152 1152
1153 BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); 1153 BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread));
1154 1154
1155 raw_spin_lock(&cluster->cluster_lock); 1155 raw_spin_lock(&cluster->cluster_lock);
1156 1156
1157 if(old_owner != new_owner) 1157 if(old_owner != new_owner)
1158 { 1158 {
1159 if(old_owner) 1159 if(old_owner)
@@ -1161,18 +1161,18 @@ static void set_priority_inheritance_klitirqd(struct task_struct* klitirqd,
1161 // unreachable? 1161 // unreachable?
1162 tsk_rt(old_owner)->cur_klitirqd = NULL; 1162 tsk_rt(old_owner)->cur_klitirqd = NULL;
1163 } 1163 }
1164 1164
1165 TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n", 1165 TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n",
1166 new_owner->comm, new_owner->pid); 1166 new_owner->comm, new_owner->pid);
1167 1167
1168 tsk_rt(new_owner)->cur_klitirqd = klitirqd; 1168 tsk_rt(new_owner)->cur_klitirqd = klitirqd;
1169 } 1169 }
1170 1170
1171 __set_priority_inheritance(klitirqd, 1171 __set_priority_inheritance(klitirqd,
1172 (tsk_rt(new_owner)->inh_task == NULL) ? 1172 (tsk_rt(new_owner)->inh_task == NULL) ?
1173 new_owner : 1173 new_owner :
1174 tsk_rt(new_owner)->inh_task); 1174 tsk_rt(new_owner)->inh_task);
1175 1175
1176 raw_spin_unlock(&cluster->cluster_lock); 1176 raw_spin_unlock(&cluster->cluster_lock);
1177} 1177}
1178 1178
@@ -1181,17 +1181,17 @@ static void clear_priority_inheritance_klitirqd(struct task_struct* klitirqd,
1181 struct task_struct* old_owner) 1181 struct task_struct* old_owner)
1182{ 1182{
1183 cedf_domain_t* cluster = task_cpu_cluster(klitirqd); 1183 cedf_domain_t* cluster = task_cpu_cluster(klitirqd);
1184 1184
1185 BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); 1185 BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread));
1186 1186
1187 raw_spin_lock(&cluster->cluster_lock); 1187 raw_spin_lock(&cluster->cluster_lock);
1188 1188
1189 TRACE_TASK(klitirqd, "priority restored\n"); 1189 TRACE_TASK(klitirqd, "priority restored\n");
1190 1190
1191 if(tsk_rt(klitirqd)->scheduled_on != NO_CPU) 1191 if(tsk_rt(klitirqd)->scheduled_on != NO_CPU)
1192 { 1192 {
1193 tsk_rt(klitirqd)->inh_task = NULL; 1193 tsk_rt(klitirqd)->inh_task = NULL;
1194 1194
1195 /* Check if rescheduling is necessary. We can't use heap_decrease() 1195 /* Check if rescheduling is necessary. We can't use heap_decrease()
1196 * since the priority was effectively lowered. */ 1196 * since the priority was effectively lowered. */
1197 unlink(klitirqd); 1197 unlink(klitirqd);
@@ -1201,9 +1201,9 @@ static void clear_priority_inheritance_klitirqd(struct task_struct* klitirqd,
1201 { 1201 {
1202 __set_priority_inheritance(klitirqd, NULL); 1202 __set_priority_inheritance(klitirqd, NULL);
1203 } 1203 }
1204 1204
1205 tsk_rt(old_owner)->cur_klitirqd = NULL; 1205 tsk_rt(old_owner)->cur_klitirqd = NULL;
1206 1206
1207 raw_spin_unlock(&cluster->cluster_lock); 1207 raw_spin_unlock(&cluster->cluster_lock);
1208} 1208}
1209#endif // CONFIG_LITMUS_SOFTIRQD 1209#endif // CONFIG_LITMUS_SOFTIRQD
@@ -1223,9 +1223,9 @@ struct kfmlp_queue
1223struct kfmlp_semaphore 1223struct kfmlp_semaphore
1224{ 1224{
1225 struct litmus_lock litmus_lock; 1225 struct litmus_lock litmus_lock;
1226 1226
1227 spinlock_t lock; 1227 spinlock_t lock;
1228 1228
1229 int num_resources; /* aka k */ 1229 int num_resources; /* aka k */
1230 struct kfmlp_queue *queues; /* array */ 1230 struct kfmlp_queue *queues; /* array */
1231 struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */ 1231 struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */
@@ -1258,11 +1258,11 @@ static struct task_struct* kfmlp_find_hp_waiter(struct kfmlp_queue *kqueue,
1258{ 1258{
1259 struct list_head *pos; 1259 struct list_head *pos;
1260 struct task_struct *queued, *found = NULL; 1260 struct task_struct *queued, *found = NULL;
1261 1261
1262 list_for_each(pos, &kqueue->wait.task_list) { 1262 list_for_each(pos, &kqueue->wait.task_list) {
1263 queued = (struct task_struct*) list_entry(pos, wait_queue_t, 1263 queued = (struct task_struct*) list_entry(pos, wait_queue_t,
1264 task_list)->private; 1264 task_list)->private;
1265 1265
1266 /* Compare task prios, find high prio task. */ 1266 /* Compare task prios, find high prio task. */
1267 if (queued != skip && edf_higher_prio(queued, found)) 1267 if (queued != skip && edf_higher_prio(queued, found))
1268 found = queued; 1268 found = queued;
@@ -1278,7 +1278,7 @@ static inline struct kfmlp_queue* kfmlp_find_shortest(
1278 // queue list to load-balance across all resources. 1278 // queue list to load-balance across all resources.
1279 struct kfmlp_queue* step = search_start; 1279 struct kfmlp_queue* step = search_start;
1280 struct kfmlp_queue* shortest = sem->shortest_queue; 1280 struct kfmlp_queue* shortest = sem->shortest_queue;
1281 1281
1282 do 1282 do
1283 { 1283 {
1284 step = (step+1 != &sem->queues[sem->num_resources]) ? 1284 step = (step+1 != &sem->queues[sem->num_resources]) ?
@@ -1290,22 +1290,22 @@ static inline struct kfmlp_queue* kfmlp_find_shortest(
1290 break; /* can't get any shorter */ 1290 break; /* can't get any shorter */
1291 } 1291 }
1292 }while(step != search_start); 1292 }while(step != search_start);
1293 1293
1294 return(shortest); 1294 return(shortest);
1295} 1295}
1296 1296
1297static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem) 1297static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem)
1298{ 1298{
1299 /* must hold sem->lock */ 1299 /* must hold sem->lock */
1300 1300
1301 struct kfmlp_queue *my_queue = NULL; 1301 struct kfmlp_queue *my_queue = NULL;
1302 struct task_struct *max_hp = NULL; 1302 struct task_struct *max_hp = NULL;
1303 1303
1304 1304
1305 struct list_head *pos; 1305 struct list_head *pos;
1306 struct task_struct *queued; 1306 struct task_struct *queued;
1307 int i; 1307 int i;
1308 1308
1309 for(i = 0; i < sem->num_resources; ++i) 1309 for(i = 0; i < sem->num_resources; ++i)
1310 { 1310 {
1311 if( (sem->queues[i].count > 1) && 1311 if( (sem->queues[i].count > 1) &&
@@ -1315,11 +1315,11 @@ static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem)
1315 my_queue = &sem->queues[i]; 1315 my_queue = &sem->queues[i];
1316 } 1316 }
1317 } 1317 }
1318 1318
1319 if(my_queue) 1319 if(my_queue)
1320 { 1320 {
1321 cedf_domain_t* cluster; 1321 cedf_domain_t* cluster;
1322 1322
1323 max_hp = my_queue->hp_waiter; 1323 max_hp = my_queue->hp_waiter;
1324 BUG_ON(!max_hp); 1324 BUG_ON(!max_hp);
1325 1325
@@ -1327,9 +1327,9 @@ static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem)
1327 kfmlp_get_idx(sem, my_queue), 1327 kfmlp_get_idx(sem, my_queue),
1328 max_hp->comm, max_hp->pid, 1328 max_hp->comm, max_hp->pid,
1329 kfmlp_get_idx(sem, my_queue)); 1329 kfmlp_get_idx(sem, my_queue));
1330 1330
1331 my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp); 1331 my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp);
1332 1332
1333 /* 1333 /*
1334 if(my_queue->hp_waiter) 1334 if(my_queue->hp_waiter)
1335 TRACE_CUR("queue %d: new hp_waiter is %s/%d\n", 1335 TRACE_CUR("queue %d: new hp_waiter is %s/%d\n",
@@ -1340,11 +1340,11 @@ static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem)
1340 TRACE_CUR("queue %d: new hp_waiter is %p\n", 1340 TRACE_CUR("queue %d: new hp_waiter is %p\n",
1341 kfmlp_get_idx(sem, my_queue), NULL); 1341 kfmlp_get_idx(sem, my_queue), NULL);
1342 */ 1342 */
1343 1343
1344 cluster = task_cpu_cluster(max_hp); 1344 cluster = task_cpu_cluster(max_hp);
1345 1345
1346 raw_spin_lock(&cluster->cluster_lock); 1346 raw_spin_lock(&cluster->cluster_lock);
1347 1347
1348 /* 1348 /*
1349 if(my_queue->owner) 1349 if(my_queue->owner)
1350 TRACE_CUR("queue %d: owner is %s/%d\n", 1350 TRACE_CUR("queue %d: owner is %s/%d\n",
@@ -1356,7 +1356,7 @@ static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem)
1356 kfmlp_get_idx(sem, my_queue), 1356 kfmlp_get_idx(sem, my_queue),
1357 NULL); 1357 NULL);
1358 */ 1358 */
1359 1359
1360 if(tsk_rt(my_queue->owner)->inh_task == max_hp) 1360 if(tsk_rt(my_queue->owner)->inh_task == max_hp)
1361 { 1361 {
1362 __clear_priority_inheritance(my_queue->owner); 1362 __clear_priority_inheritance(my_queue->owner);
@@ -1366,7 +1366,7 @@ static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem)
1366 } 1366 }
1367 } 1367 }
1368 raw_spin_unlock(&cluster->cluster_lock); 1368 raw_spin_unlock(&cluster->cluster_lock);
1369 1369
1370 list_for_each(pos, &my_queue->wait.task_list) 1370 list_for_each(pos, &my_queue->wait.task_list)
1371 { 1371 {
1372 queued = (struct task_struct*) list_entry(pos, wait_queue_t, 1372 queued = (struct task_struct*) list_entry(pos, wait_queue_t,
@@ -1385,7 +1385,7 @@ static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem)
1385 } 1385 }
1386 --(my_queue->count); 1386 --(my_queue->count);
1387 } 1387 }
1388 1388
1389 return(max_hp); 1389 return(max_hp);
1390} 1390}
1391 1391
@@ -1396,26 +1396,26 @@ int cedf_kfmlp_lock(struct litmus_lock* l)
1396 struct kfmlp_queue* my_queue; 1396 struct kfmlp_queue* my_queue;
1397 wait_queue_t wait; 1397 wait_queue_t wait;
1398 unsigned long flags; 1398 unsigned long flags;
1399 1399
1400 if (!is_realtime(t)) 1400 if (!is_realtime(t))
1401 return -EPERM; 1401 return -EPERM;
1402 1402
1403 spin_lock_irqsave(&sem->lock, flags); 1403 spin_lock_irqsave(&sem->lock, flags);
1404 1404
1405 my_queue = sem->shortest_queue; 1405 my_queue = sem->shortest_queue;
1406 1406
1407 if (my_queue->owner) { 1407 if (my_queue->owner) {
1408 /* resource is not free => must suspend and wait */ 1408 /* resource is not free => must suspend and wait */
1409 TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n", 1409 TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n",
1410 kfmlp_get_idx(sem, my_queue)); 1410 kfmlp_get_idx(sem, my_queue));
1411 1411
1412 init_waitqueue_entry(&wait, t); 1412 init_waitqueue_entry(&wait, t);
1413 1413
1414 /* FIXME: interruptible would be nice some day */ 1414 /* FIXME: interruptible would be nice some day */
1415 set_task_state(t, TASK_UNINTERRUPTIBLE); 1415 set_task_state(t, TASK_UNINTERRUPTIBLE);
1416 1416
1417 __add_wait_queue_tail_exclusive(&my_queue->wait, &wait); 1417 __add_wait_queue_tail_exclusive(&my_queue->wait, &wait);
1418 1418
1419 /* check if we need to activate priority inheritance */ 1419 /* check if we need to activate priority inheritance */
1420 if (edf_higher_prio(t, my_queue->hp_waiter)) 1420 if (edf_higher_prio(t, my_queue->hp_waiter))
1421 { 1421 {
@@ -1425,20 +1425,20 @@ int cedf_kfmlp_lock(struct litmus_lock* l)
1425 set_priority_inheritance(my_queue->owner, my_queue->hp_waiter); 1425 set_priority_inheritance(my_queue->owner, my_queue->hp_waiter);
1426 } 1426 }
1427 } 1427 }
1428 1428
1429 ++(my_queue->count); 1429 ++(my_queue->count);
1430 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); 1430 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);
1431 1431
1432 /* release lock before sleeping */ 1432 /* release lock before sleeping */
1433 spin_unlock_irqrestore(&sem->lock, flags); 1433 spin_unlock_irqrestore(&sem->lock, flags);
1434 1434
1435 /* We depend on the FIFO order. Thus, we don't need to recheck 1435 /* We depend on the FIFO order. Thus, we don't need to recheck
1436 * when we wake up; we are guaranteed to have the lock since 1436 * when we wake up; we are guaranteed to have the lock since
1437 * there is only one wake up per release (or steal). 1437 * there is only one wake up per release (or steal).
1438 */ 1438 */
1439 schedule(); 1439 schedule();
1440 1440
1441 1441
1442 if(my_queue->owner == t) 1442 if(my_queue->owner == t)
1443 { 1443 {
1444 TRACE_CUR("queue %d: acquired through waiting\n", 1444 TRACE_CUR("queue %d: acquired through waiting\n",
@@ -1458,15 +1458,15 @@ int cedf_kfmlp_lock(struct litmus_lock* l)
1458 { 1458 {
1459 TRACE_CUR("queue %d: acquired immediately\n", 1459 TRACE_CUR("queue %d: acquired immediately\n",
1460 kfmlp_get_idx(sem, my_queue)); 1460 kfmlp_get_idx(sem, my_queue));
1461 1461
1462 my_queue->owner = t; 1462 my_queue->owner = t;
1463 1463
1464 ++(my_queue->count); 1464 ++(my_queue->count);
1465 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue); 1465 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);
1466 1466
1467 spin_unlock_irqrestore(&sem->lock, flags); 1467 spin_unlock_irqrestore(&sem->lock, flags);
1468 } 1468 }
1469 1469
1470 return kfmlp_get_idx(sem, my_queue); 1470 return kfmlp_get_idx(sem, my_queue);
1471} 1471}
1472 1472
@@ -1477,16 +1477,16 @@ int cedf_kfmlp_unlock(struct litmus_lock* l)
1477 struct kfmlp_queue *my_queue; 1477 struct kfmlp_queue *my_queue;
1478 unsigned long flags; 1478 unsigned long flags;
1479 int err = 0; 1479 int err = 0;
1480 1480
1481 spin_lock_irqsave(&sem->lock, flags); 1481 spin_lock_irqsave(&sem->lock, flags);
1482 1482
1483 my_queue = kfmlp_get_queue(sem, t); 1483 my_queue = kfmlp_get_queue(sem, t);
1484 1484
1485 if (!my_queue) { 1485 if (!my_queue) {
1486 err = -EINVAL; 1486 err = -EINVAL;
1487 goto out; 1487 goto out;
1488 } 1488 }
1489 1489
1490 /* check if there are jobs waiting for this resource */ 1490 /* check if there are jobs waiting for this resource */
1491 next = __waitqueue_remove_first(&my_queue->wait); 1491 next = __waitqueue_remove_first(&my_queue->wait);
1492 if (next) { 1492 if (next) {
@@ -1497,16 +1497,16 @@ int cedf_kfmlp_unlock(struct litmus_lock* l)
1497 */ 1497 */
1498 /* next becomes the resouce holder */ 1498 /* next becomes the resouce holder */
1499 my_queue->owner = next; 1499 my_queue->owner = next;
1500 1500
1501 --(my_queue->count); 1501 --(my_queue->count);
1502 if(my_queue->count < sem->shortest_queue->count) 1502 if(my_queue->count < sem->shortest_queue->count)
1503 { 1503 {
1504 sem->shortest_queue = my_queue; 1504 sem->shortest_queue = my_queue;
1505 } 1505 }
1506 1506
1507 TRACE_CUR("queue %d: lock ownership passed to %s/%d\n", 1507 TRACE_CUR("queue %d: lock ownership passed to %s/%d\n",
1508 kfmlp_get_idx(sem, my_queue), next->comm, next->pid); 1508 kfmlp_get_idx(sem, my_queue), next->comm, next->pid);
1509 1509
1510 /* determine new hp_waiter if necessary */ 1510 /* determine new hp_waiter if necessary */
1511 if (next == my_queue->hp_waiter) { 1511 if (next == my_queue->hp_waiter) {
1512 TRACE_TASK(next, "was highest-prio waiter\n"); 1512 TRACE_TASK(next, "was highest-prio waiter\n");
@@ -1525,38 +1525,38 @@ int cedf_kfmlp_unlock(struct litmus_lock* l)
1525 * waiter's priority. */ 1525 * waiter's priority. */
1526 set_priority_inheritance(next, my_queue->hp_waiter); 1526 set_priority_inheritance(next, my_queue->hp_waiter);
1527 } 1527 }
1528 1528
1529 /* wake up next */ 1529 /* wake up next */
1530 wake_up_process(next); 1530 wake_up_process(next);
1531 } 1531 }
1532 else 1532 else
1533 { 1533 {
1534 TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue)); 1534 TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue));
1535 1535
1536 next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */ 1536 next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */
1537 1537
1538 /* 1538 /*
1539 if(next) 1539 if(next)
1540 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - steal\n", 1540 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - steal\n",
1541 kfmlp_get_idx(sem, my_queue), 1541 kfmlp_get_idx(sem, my_queue),
1542 next->comm, next->pid); 1542 next->comm, next->pid);
1543 */ 1543 */
1544 1544
1545 my_queue->owner = next; 1545 my_queue->owner = next;
1546 1546
1547 if(next) 1547 if(next)
1548 { 1548 {
1549 TRACE_CUR("queue %d: lock ownership passed to %s/%d (which was stolen)\n", 1549 TRACE_CUR("queue %d: lock ownership passed to %s/%d (which was stolen)\n",
1550 kfmlp_get_idx(sem, my_queue), 1550 kfmlp_get_idx(sem, my_queue),
1551 next->comm, next->pid); 1551 next->comm, next->pid);
1552 1552
1553 /* wake up next */ 1553 /* wake up next */
1554 wake_up_process(next); 1554 wake_up_process(next);
1555 } 1555 }
1556 else 1556 else
1557 { 1557 {
1558 TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue)); 1558 TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue));
1559 1559
1560 --(my_queue->count); 1560 --(my_queue->count);
1561 if(my_queue->count < sem->shortest_queue->count) 1561 if(my_queue->count < sem->shortest_queue->count)
1562 { 1562 {
@@ -1564,14 +1564,14 @@ int cedf_kfmlp_unlock(struct litmus_lock* l)
1564 } 1564 }
1565 } 1565 }
1566 } 1566 }
1567 1567
1568 /* we lose the benefit of priority inheritance (if any) */ 1568 /* we lose the benefit of priority inheritance (if any) */
1569 if (tsk_rt(t)->inh_task) 1569 if (tsk_rt(t)->inh_task)
1570 clear_priority_inheritance(t); 1570 clear_priority_inheritance(t);
1571 1571
1572out: 1572out:
1573 spin_unlock_irqrestore(&sem->lock, flags); 1573 spin_unlock_irqrestore(&sem->lock, flags);
1574 1574
1575 return err; 1575 return err;
1576} 1576}
1577 1577
@@ -1581,19 +1581,19 @@ int cedf_kfmlp_close(struct litmus_lock* l)
1581 struct kfmlp_semaphore *sem = kfmlp_from_lock(l); 1581 struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
1582 struct kfmlp_queue *my_queue; 1582 struct kfmlp_queue *my_queue;
1583 unsigned long flags; 1583 unsigned long flags;
1584 1584
1585 int owner; 1585 int owner;
1586 1586
1587 spin_lock_irqsave(&sem->lock, flags); 1587 spin_lock_irqsave(&sem->lock, flags);
1588 1588
1589 my_queue = kfmlp_get_queue(sem, t); 1589 my_queue = kfmlp_get_queue(sem, t);
1590 owner = (my_queue) ? (my_queue->owner == t) : 0; 1590 owner = (my_queue) ? (my_queue->owner == t) : 0;
1591 1591
1592 spin_unlock_irqrestore(&sem->lock, flags); 1592 spin_unlock_irqrestore(&sem->lock, flags);
1593 1593
1594 if (owner) 1594 if (owner)
1595 cedf_kfmlp_unlock(l); 1595 cedf_kfmlp_unlock(l);
1596 1596
1597 return 0; 1597 return 0;
1598} 1598}
1599 1599
@@ -1616,7 +1616,7 @@ static struct litmus_lock* cedf_new_kfmlp(void* __user arg, int* ret_code)
1616 struct kfmlp_semaphore* sem; 1616 struct kfmlp_semaphore* sem;
1617 int num_resources = 0; 1617 int num_resources = 0;
1618 int i; 1618 int i;
1619 1619
1620 if(!access_ok(VERIFY_READ, arg, sizeof(num_resources))) 1620 if(!access_ok(VERIFY_READ, arg, sizeof(num_resources)))
1621 { 1621 {
1622 *ret_code = -EINVAL; 1622 *ret_code = -EINVAL;
@@ -1630,28 +1630,28 @@ static struct litmus_lock* cedf_new_kfmlp(void* __user arg, int* ret_code)
1630 if(num_resources < 1) 1630 if(num_resources < 1)
1631 { 1631 {
1632 *ret_code = -EINVAL; 1632 *ret_code = -EINVAL;
1633 return(NULL); 1633 return(NULL);
1634 } 1634 }
1635 1635
1636 sem = kmalloc(sizeof(*sem), GFP_KERNEL); 1636 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
1637 if(!sem) 1637 if(!sem)
1638 { 1638 {
1639 *ret_code = -ENOMEM; 1639 *ret_code = -ENOMEM;
1640 return NULL; 1640 return NULL;
1641 } 1641 }
1642 1642
1643 sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL); 1643 sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL);
1644 if(!sem->queues) 1644 if(!sem->queues)
1645 { 1645 {
1646 kfree(sem); 1646 kfree(sem);
1647 *ret_code = -ENOMEM; 1647 *ret_code = -ENOMEM;
1648 return NULL; 1648 return NULL;
1649 } 1649 }
1650 1650
1651 sem->litmus_lock.ops = &cedf_kfmlp_lock_ops; 1651 sem->litmus_lock.ops = &cedf_kfmlp_lock_ops;
1652 spin_lock_init(&sem->lock); 1652 spin_lock_init(&sem->lock);
1653 sem->num_resources = num_resources; 1653 sem->num_resources = num_resources;
1654 1654
1655 for(i = 0; i < num_resources; ++i) 1655 for(i = 0; i < num_resources; ++i)
1656 { 1656 {
1657 sem->queues[i].owner = NULL; 1657 sem->queues[i].owner = NULL;
@@ -1659,9 +1659,9 @@ static struct litmus_lock* cedf_new_kfmlp(void* __user arg, int* ret_code)
1659 init_waitqueue_head(&sem->queues[i].wait); 1659 init_waitqueue_head(&sem->queues[i].wait);
1660 sem->queues[i].count = 0; 1660 sem->queues[i].count = 0;
1661 } 1661 }
1662 1662
1663 sem->shortest_queue = &sem->queues[0]; 1663 sem->shortest_queue = &sem->queues[0];
1664 1664
1665 *ret_code = 0; 1665 *ret_code = 0;
1666 return &sem->litmus_lock; 1666 return &sem->litmus_lock;
1667} 1667}
@@ -1673,7 +1673,7 @@ static long cedf_allocate_lock(struct litmus_lock **lock, int type,
1673 void* __user arg) 1673 void* __user arg)
1674{ 1674{
1675 int err = -ENXIO; 1675 int err = -ENXIO;
1676 1676
1677 /* C-EDF currently only supports the FMLP for global resources 1677 /* C-EDF currently only supports the FMLP for global resources
1678 WITHIN a given cluster. DO NOT USE CROSS-CLUSTER! */ 1678 WITHIN a given cluster. DO NOT USE CROSS-CLUSTER! */
1679 switch (type) { 1679 switch (type) {
@@ -1681,7 +1681,7 @@ static long cedf_allocate_lock(struct litmus_lock **lock, int type,
1681 *lock = cedf_new_kfmlp(arg, &err); 1681 *lock = cedf_new_kfmlp(arg, &err);
1682 break; 1682 break;
1683 }; 1683 };
1684 1684
1685 return err; 1685 return err;
1686} 1686}
1687 1687
@@ -1781,13 +1781,13 @@ static long cedf_activate_plugin(void)
1781 INIT_BINHEAP_HANDLE(&(cedf[i].cpu_heap), cpu_lower_prio); 1781 INIT_BINHEAP_HANDLE(&(cedf[i].cpu_heap), cpu_lower_prio);
1782 edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs); 1782 edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs);
1783 1783
1784 1784
1785#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 1785#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1786 cedf[i].pending_tasklets.head = NULL; 1786 cedf[i].pending_tasklets.head = NULL;
1787 cedf[i].pending_tasklets.tail = &(cedf[i].pending_tasklets.head); 1787 cedf[i].pending_tasklets.tail = &(cedf[i].pending_tasklets.head);
1788#endif 1788#endif
1789 1789
1790 1790
1791 if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC)) 1791 if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC))
1792 return -ENOMEM; 1792 return -ENOMEM;
1793#ifdef CONFIG_RELEASE_MASTER 1793#ifdef CONFIG_RELEASE_MASTER
@@ -1846,14 +1846,14 @@ static long cedf_activate_plugin(void)
1846 break; 1846 break;
1847 } 1847 }
1848 } 1848 }
1849 1849
1850#ifdef CONFIG_LITMUS_SOFTIRQD 1850#ifdef CONFIG_LITMUS_SOFTIRQD
1851 { 1851 {
1852 /* distribute the daemons evenly across the clusters. */ 1852 /* distribute the daemons evenly across the clusters. */
1853 int* affinity = kmalloc(NR_LITMUS_SOFTIRQD * sizeof(int), GFP_ATOMIC); 1853 int* affinity = kmalloc(NR_LITMUS_SOFTIRQD * sizeof(int), GFP_ATOMIC);
1854 int num_daemons_per_cluster = NR_LITMUS_SOFTIRQD / num_clusters; 1854 int num_daemons_per_cluster = NR_LITMUS_SOFTIRQD / num_clusters;
1855 int left_over = NR_LITMUS_SOFTIRQD % num_clusters; 1855 int left_over = NR_LITMUS_SOFTIRQD % num_clusters;
1856 1856
1857 int daemon = 0; 1857 int daemon = 0;
1858 for(i = 0; i < num_clusters; ++i) 1858 for(i = 0; i < num_clusters; ++i)
1859 { 1859 {
@@ -1863,23 +1863,23 @@ static long cedf_activate_plugin(void)
1863 ++num_on_this_cluster; 1863 ++num_on_this_cluster;
1864 --left_over; 1864 --left_over;
1865 } 1865 }
1866 1866
1867 for(j = 0; j < num_on_this_cluster; ++j) 1867 for(j = 0; j < num_on_this_cluster; ++j)
1868 { 1868 {
1869 // first CPU of this cluster 1869 // first CPU of this cluster
1870 affinity[daemon++] = i*cluster_size; 1870 affinity[daemon++] = i*cluster_size;
1871 } 1871 }
1872 } 1872 }
1873 1873
1874 spawn_klitirqd(affinity); 1874 spawn_klitirqd(affinity);
1875 1875
1876 kfree(affinity); 1876 kfree(affinity);
1877 } 1877 }
1878#endif 1878#endif
1879 1879
1880#ifdef CONFIG_LITMUS_NVIDIA 1880#ifdef CONFIG_LITMUS_NVIDIA
1881 init_nvidia_info(); 1881 init_nvidia_info();
1882#endif 1882#endif
1883 1883
1884 free_cpumask_var(mask); 1884 free_cpumask_var(mask);
1885 clusters_allocated = 1; 1885 clusters_allocated = 1;
@@ -1902,7 +1902,7 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = {
1902#ifdef CONFIG_LITMUS_LOCKING 1902#ifdef CONFIG_LITMUS_LOCKING
1903 .allocate_lock = cedf_allocate_lock, 1903 .allocate_lock = cedf_allocate_lock,
1904 .set_prio_inh = set_priority_inheritance, 1904 .set_prio_inh = set_priority_inheritance,
1905 .clear_prio_inh = clear_priority_inheritance, 1905 .clear_prio_inh = clear_priority_inheritance,
1906#endif 1906#endif
1907#ifdef CONFIG_LITMUS_SOFTIRQD 1907#ifdef CONFIG_LITMUS_SOFTIRQD
1908 .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd, 1908 .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd,
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 09334aea43ac..422ad0395099 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -366,7 +366,7 @@ static void check_for_preemptions(void)
366static noinline void gsnedf_job_arrival(struct task_struct* task) 366static noinline void gsnedf_job_arrival(struct task_struct* task)
367{ 367{
368 BUG_ON(!task); 368 BUG_ON(!task);
369 369
370 requeue(task); 370 requeue(task);
371 check_for_preemptions(); 371 check_for_preemptions();
372} 372}
@@ -387,7 +387,7 @@ static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
387static noinline void job_completion(struct task_struct *t, int forced) 387static noinline void job_completion(struct task_struct *t, int forced)
388{ 388{
389 BUG_ON(!t); 389 BUG_ON(!t);
390 390
391 sched_trace_task_completion(t, forced); 391 sched_trace_task_completion(t, forced);
392 392
393#ifdef CONFIG_LITMUS_NVIDIA 393#ifdef CONFIG_LITMUS_NVIDIA
@@ -446,7 +446,7 @@ static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flush
446{ 446{
447 if (!atomic_read(&tasklet->count)) { 447 if (!atomic_read(&tasklet->count)) {
448 sched_trace_tasklet_begin(tasklet->owner); 448 sched_trace_tasklet_begin(tasklet->owner);
449 449
450 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) 450 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
451 { 451 {
452 BUG(); 452 BUG();
@@ -454,7 +454,7 @@ static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flush
454 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed); 454 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed);
455 tasklet->func(tasklet->data); 455 tasklet->func(tasklet->data);
456 tasklet_unlock(tasklet); 456 tasklet_unlock(tasklet);
457 457
458 sched_trace_tasklet_end(tasklet->owner, flushed); 458 sched_trace_tasklet_end(tasklet->owner, flushed);
459 } 459 }
460 else { 460 else {
@@ -467,54 +467,54 @@ static void do_lit_tasklets(struct task_struct* sched_task)
467 int work_to_do = 1; 467 int work_to_do = 1;
468 struct tasklet_struct *tasklet = NULL; 468 struct tasklet_struct *tasklet = NULL;
469 unsigned long flags; 469 unsigned long flags;
470 470
471 while(work_to_do) { 471 while(work_to_do) {
472 472
473 TS_NV_SCHED_BOTISR_START; 473 TS_NV_SCHED_BOTISR_START;
474 474
475 // execute one tasklet that has higher priority 475 // execute one tasklet that has higher priority
476 raw_spin_lock_irqsave(&gsnedf_lock, flags); 476 raw_spin_lock_irqsave(&gsnedf_lock, flags);
477 477
478 if(gsnedf_pending_tasklets.head != NULL) { 478 if(gsnedf_pending_tasklets.head != NULL) {
479 struct tasklet_struct *prev = NULL; 479 struct tasklet_struct *prev = NULL;
480 tasklet = gsnedf_pending_tasklets.head; 480 tasklet = gsnedf_pending_tasklets.head;
481 481
482 while(tasklet && edf_higher_prio(sched_task, tasklet->owner)) { 482 while(tasklet && edf_higher_prio(sched_task, tasklet->owner)) {
483 prev = tasklet; 483 prev = tasklet;
484 tasklet = tasklet->next; 484 tasklet = tasklet->next;
485 } 485 }
486 486
487 // remove the tasklet from the queue 487 // remove the tasklet from the queue
488 if(prev) { 488 if(prev) {
489 prev->next = tasklet->next; 489 prev->next = tasklet->next;
490 if(prev->next == NULL) { 490 if(prev->next == NULL) {
491 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); 491 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
492 gsnedf_pending_tasklets.tail = &(prev); 492 gsnedf_pending_tasklets.tail = &(prev);
493 } 493 }
494 } 494 }
495 else { 495 else {
496 gsnedf_pending_tasklets.head = tasklet->next; 496 gsnedf_pending_tasklets.head = tasklet->next;
497 if(tasklet->next == NULL) { 497 if(tasklet->next == NULL) {
498 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); 498 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
499 gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); 499 gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
500 } 500 }
501 } 501 }
502 } 502 }
503 else { 503 else {
504 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); 504 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
505 } 505 }
506 506
507 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 507 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
508 508
509 if(tasklet) { 509 if(tasklet) {
510 __do_lit_tasklet(tasklet, 0ul); 510 __do_lit_tasklet(tasklet, 0ul);
511 tasklet = NULL; 511 tasklet = NULL;
512 } 512 }
513 else { 513 else {
514 work_to_do = 0; 514 work_to_do = 0;
515 } 515 }
516 516
517 TS_NV_SCHED_BOTISR_END; 517 TS_NV_SCHED_BOTISR_END;
518 } 518 }
519} 519}
520 520
@@ -524,29 +524,29 @@ static void do_lit_tasklets(struct task_struct* sched_task)
524// struct tasklet_struct *tasklet = NULL; 524// struct tasklet_struct *tasklet = NULL;
525// //struct tasklet_struct *step; 525// //struct tasklet_struct *step;
526// unsigned long flags; 526// unsigned long flags;
527// 527//
528// while(work_to_do) { 528// while(work_to_do) {
529// 529//
530// TS_NV_SCHED_BOTISR_START; 530// TS_NV_SCHED_BOTISR_START;
531// 531//
532// // remove tasklet at head of list if it has higher priority. 532// // remove tasklet at head of list if it has higher priority.
533// raw_spin_lock_irqsave(&gsnedf_lock, flags); 533// raw_spin_lock_irqsave(&gsnedf_lock, flags);
534// 534//
535// if(gsnedf_pending_tasklets.head != NULL) { 535// if(gsnedf_pending_tasklets.head != NULL) {
536// // remove tasklet at head. 536// // remove tasklet at head.
537// tasklet = gsnedf_pending_tasklets.head; 537// tasklet = gsnedf_pending_tasklets.head;
538// 538//
539// if(edf_higher_prio(tasklet->owner, sched_task)) { 539// if(edf_higher_prio(tasklet->owner, sched_task)) {
540// 540//
541// if(NULL == tasklet->next) { 541// if(NULL == tasklet->next) {
542// // tasklet is at the head, list only has one element 542// // tasklet is at the head, list only has one element
543// TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); 543// TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
544// gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); 544// gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
545// } 545// }
546// 546//
547// // remove the tasklet from the queue 547// // remove the tasklet from the queue
548// gsnedf_pending_tasklets.head = tasklet->next; 548// gsnedf_pending_tasklets.head = tasklet->next;
549// 549//
550// TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid); 550// TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
551// } 551// }
552// else { 552// else {
@@ -557,60 +557,60 @@ static void do_lit_tasklets(struct task_struct* sched_task)
557// else { 557// else {
558// TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__); 558// TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
559// } 559// }
560// 560//
561// raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 561// raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
562// 562//
563// TS_NV_SCHED_BOTISR_END; 563// TS_NV_SCHED_BOTISR_END;
564// 564//
565// if(tasklet) { 565// if(tasklet) {
566// __do_lit_tasklet(tasklet, 0ul); 566// __do_lit_tasklet(tasklet, 0ul);
567// tasklet = NULL; 567// tasklet = NULL;
568// } 568// }
569// else { 569// else {
570// work_to_do = 0; 570// work_to_do = 0;
571// } 571// }
572// } 572// }
573// 573//
574// //TRACE("%s: exited.\n", __FUNCTION__); 574// //TRACE("%s: exited.\n", __FUNCTION__);
575//} 575//}
576 576
577static void __add_pai_tasklet(struct tasklet_struct* tasklet) 577static void __add_pai_tasklet(struct tasklet_struct* tasklet)
578{ 578{
579 struct tasklet_struct* step; 579 struct tasklet_struct* step;
580 580
581 tasklet->next = NULL; // make sure there are no old values floating around 581 tasklet->next = NULL; // make sure there are no old values floating around
582 582
583 step = gsnedf_pending_tasklets.head; 583 step = gsnedf_pending_tasklets.head;
584 if(step == NULL) { 584 if(step == NULL) {
585 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid); 585 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
586 // insert at tail. 586 // insert at tail.
587 *(gsnedf_pending_tasklets.tail) = tasklet; 587 *(gsnedf_pending_tasklets.tail) = tasklet;
588 gsnedf_pending_tasklets.tail = &(tasklet->next); 588 gsnedf_pending_tasklets.tail = &(tasklet->next);
589 } 589 }
590 else if((*(gsnedf_pending_tasklets.tail) != NULL) && 590 else if((*(gsnedf_pending_tasklets.tail) != NULL) &&
591 edf_higher_prio((*(gsnedf_pending_tasklets.tail))->owner, tasklet->owner)) { 591 edf_higher_prio((*(gsnedf_pending_tasklets.tail))->owner, tasklet->owner)) {
592 // insert at tail. 592 // insert at tail.
593 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid); 593 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);
594 594
595 *(gsnedf_pending_tasklets.tail) = tasklet; 595 *(gsnedf_pending_tasklets.tail) = tasklet;
596 gsnedf_pending_tasklets.tail = &(tasklet->next); 596 gsnedf_pending_tasklets.tail = &(tasklet->next);
597 } 597 }
598 else { 598 else {
599 // insert the tasklet somewhere in the middle. 599 // insert the tasklet somewhere in the middle.
600 600
601 TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__); 601 TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__);
602 602
603 while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) { 603 while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) {
604 step = step->next; 604 step = step->next;
605 } 605 }
606 606
607 // insert tasklet right before step->next. 607 // insert tasklet right before step->next.
608 608
609 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1); 609 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1);
610 610
611 tasklet->next = step->next; 611 tasklet->next = step->next;
612 step->next = tasklet; 612 step->next = tasklet;
613 613
614 // patch up the head if needed. 614 // patch up the head if needed.
615 if(gsnedf_pending_tasklets.head == step) 615 if(gsnedf_pending_tasklets.head == step)
616 { 616 {
@@ -623,12 +623,12 @@ static void __add_pai_tasklet(struct tasklet_struct* tasklet)
623static void gsnedf_run_tasklets(struct task_struct* sched_task) 623static void gsnedf_run_tasklets(struct task_struct* sched_task)
624{ 624{
625 preempt_disable(); 625 preempt_disable();
626 626
627 if(gsnedf_pending_tasklets.head != NULL) { 627 if(gsnedf_pending_tasklets.head != NULL) {
628 TRACE("%s: There are tasklets to process.\n", __FUNCTION__); 628 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
629 do_lit_tasklets(sched_task); 629 do_lit_tasklets(sched_task);
630 } 630 }
631 631
632 preempt_enable_no_resched(); 632 preempt_enable_no_resched();
633} 633}
634 634
@@ -639,22 +639,22 @@ static int gsnedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
639 int runLocal = 0; 639 int runLocal = 0;
640 int runNow = 0; 640 int runNow = 0;
641 unsigned long flags; 641 unsigned long flags;
642 642
643 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner))) 643 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
644 { 644 {
645 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); 645 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
646 return 0; 646 return 0;
647 } 647 }
648
649
650 raw_spin_lock_irqsave(&gsnedf_lock, flags);
648 651
649
650 raw_spin_lock_irqsave(&gsnedf_lock, flags);
651
652 thisCPU = smp_processor_id(); 652 thisCPU = smp_processor_id();
653 653
654#ifdef CONFIG_SCHED_CPU_AFFINITY 654#ifdef CONFIG_SCHED_CPU_AFFINITY
655 { 655 {
656 cpu_entry_t* affinity = NULL; 656 cpu_entry_t* affinity = NULL;
657 657
658 // use this CPU if it is in our cluster and isn't running any RT work. 658 // use this CPU if it is in our cluster and isn't running any RT work.
659 if( 659 if(
660#ifdef CONFIG_RELEASE_MASTER 660#ifdef CONFIG_RELEASE_MASTER
@@ -666,20 +666,20 @@ static int gsnedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
666 else { 666 else {
667 // this CPU is busy or shouldn't run tasklet in this cluster. 667 // this CPU is busy or shouldn't run tasklet in this cluster.
668 // look for available near by CPUs. 668 // look for available near by CPUs.
669 // NOTE: Affinity towards owner and not this CPU. Is this right? 669 // NOTE: Affinity towards owner and not this CPU. Is this right?
670 affinity = 670 affinity =
671 gsnedf_get_nearest_available_cpu( 671 gsnedf_get_nearest_available_cpu(
672 &per_cpu(gsnedf_cpu_entries, task_cpu(tasklet->owner))); 672 &per_cpu(gsnedf_cpu_entries, task_cpu(tasklet->owner)));
673 } 673 }
674 674
675 targetCPU = affinity; 675 targetCPU = affinity;
676 } 676 }
677#endif 677#endif
678 678
679 if (targetCPU == NULL) { 679 if (targetCPU == NULL) {
680 targetCPU = lowest_prio_cpu(); 680 targetCPU = lowest_prio_cpu();
681 } 681 }
682 682
683 if (edf_higher_prio(tasklet->owner, targetCPU->linked)) { 683 if (edf_higher_prio(tasklet->owner, targetCPU->linked)) {
684 if (thisCPU == targetCPU->cpu) { 684 if (thisCPU == targetCPU->cpu) {
685 TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__); 685 TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__);
@@ -696,15 +696,15 @@ static int gsnedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
696 runLocal = 0; 696 runLocal = 0;
697 runNow = 0; 697 runNow = 0;
698 } 698 }
699 699
700 if(!runLocal) { 700 if(!runLocal) {
701 // enqueue the tasklet 701 // enqueue the tasklet
702 __add_pai_tasklet(tasklet); 702 __add_pai_tasklet(tasklet);
703 } 703 }
704 704
705 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 705 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
706 706
707 707
708 if (runLocal /*&& runNow */) { // runNow == 1 is implied 708 if (runLocal /*&& runNow */) { // runNow == 1 is implied
709 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__); 709 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
710 __do_lit_tasklet(tasklet, 0ul); 710 __do_lit_tasklet(tasklet, 0ul);
@@ -716,7 +716,7 @@ static int gsnedf_enqueue_pai_tasklet(struct tasklet_struct* tasklet)
716 else { 716 else {
717 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__); 717 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
718 } 718 }
719 719
720 return(1); // success 720 return(1); // success
721} 721}
722 722
@@ -725,7 +725,7 @@ static void gsnedf_change_prio_pai_tasklet(struct task_struct *old_prio,
725{ 725{
726 struct tasklet_struct* step; 726 struct tasklet_struct* step;
727 unsigned long flags; 727 unsigned long flags;
728 728
729 if(gsnedf_pending_tasklets.head != NULL) { 729 if(gsnedf_pending_tasklets.head != NULL) {
730 raw_spin_lock_irqsave(&gsnedf_lock, flags); 730 raw_spin_lock_irqsave(&gsnedf_lock, flags);
731 for(step = gsnedf_pending_tasklets.head; step != NULL; step = step->next) { 731 for(step = gsnedf_pending_tasklets.head; step != NULL; step = step->next) {
@@ -807,7 +807,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
807 blocks, out_of_time, np, sleep, preempt, 807 blocks, out_of_time, np, sleep, preempt,
808 prev->state, signal_pending(prev)); 808 prev->state, signal_pending(prev));
809 */ 809 */
810 810
811 if (entry->linked && preempt) 811 if (entry->linked && preempt)
812 TRACE_TASK(prev, "will be preempted by %s/%d\n", 812 TRACE_TASK(prev, "will be preempted by %s/%d\n",
813 entry->linked->comm, entry->linked->pid); 813 entry->linked->comm, entry->linked->pid);
@@ -817,8 +817,8 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
817 if (blocks) { 817 if (blocks) {
818 unlink(entry->scheduled); 818 unlink(entry->scheduled);
819 } 819 }
820 820
821#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING) 821#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_AFFINITY_LOCKING)
822 if(exists && is_realtime(entry->scheduled) && tsk_rt(entry->scheduled)->held_gpus) { 822 if(exists && is_realtime(entry->scheduled) && tsk_rt(entry->scheduled)->held_gpus) {
823 if(!blocks || tsk_rt(entry->scheduled)->suspend_gpu_tracker_on_block) { 823 if(!blocks || tsk_rt(entry->scheduled)->suspend_gpu_tracker_on_block) {
824 stop_gpu_tracker(entry->scheduled); 824 stop_gpu_tracker(entry->scheduled);
@@ -874,7 +874,7 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
874 if (exists) 874 if (exists)
875 next = prev; 875 next = prev;
876 } 876 }
877 877
878 sched_state_task_picked(); 878 sched_state_task_picked();
879 879
880 raw_spin_unlock(&gsnedf_lock); 880 raw_spin_unlock(&gsnedf_lock);
@@ -898,9 +898,9 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
898static void gsnedf_finish_switch(struct task_struct *prev) 898static void gsnedf_finish_switch(struct task_struct *prev)
899{ 899{
900 cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); 900 cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries);
901 901
902 entry->scheduled = is_realtime(current) ? current : NULL; 902 entry->scheduled = is_realtime(current) ? current : NULL;
903 903
904#ifdef WANT_ALL_SCHED_EVENTS 904#ifdef WANT_ALL_SCHED_EVENTS
905 TRACE_TASK(prev, "switched away from\n"); 905 TRACE_TASK(prev, "switched away from\n");
906#endif 906#endif
@@ -949,13 +949,13 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
949static void gsnedf_task_wake_up(struct task_struct *task) 949static void gsnedf_task_wake_up(struct task_struct *task)
950{ 950{
951 unsigned long flags; 951 unsigned long flags;
952 //lt_t now; 952 //lt_t now;
953 953
954 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 954 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
955 955
956 raw_spin_lock_irqsave(&gsnedf_lock, flags); 956 raw_spin_lock_irqsave(&gsnedf_lock, flags);
957 957
958 958
959#if 0 // sporadic task model 959#if 0 // sporadic task model
960 /* We need to take suspensions because of semaphores into 960 /* We need to take suspensions because of semaphores into
961 * account! If a job resumes after being suspended due to acquiring 961 * account! If a job resumes after being suspended due to acquiring
@@ -981,7 +981,7 @@ static void gsnedf_task_wake_up(struct task_struct *task)
981#else // periodic task model 981#else // periodic task model
982 set_rt_flags(task, RT_F_RUNNING); 982 set_rt_flags(task, RT_F_RUNNING);
983#endif 983#endif
984 984
985 gsnedf_job_arrival(task); 985 gsnedf_job_arrival(task);
986 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 986 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
987} 987}
@@ -995,9 +995,9 @@ static void gsnedf_task_block(struct task_struct *t)
995 995
996 /* unlink if necessary */ 996 /* unlink if necessary */
997 raw_spin_lock_irqsave(&gsnedf_lock, flags); 997 raw_spin_lock_irqsave(&gsnedf_lock, flags);
998 998
999 unlink(t); 999 unlink(t);
1000 1000
1001 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 1001 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
1002 1002
1003 BUG_ON(!is_realtime(t)); 1003 BUG_ON(!is_realtime(t));
@@ -1010,8 +1010,8 @@ static void gsnedf_task_exit(struct task_struct * t)
1010 1010
1011#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 1011#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1012 gsnedf_change_prio_pai_tasklet(t, NULL); 1012 gsnedf_change_prio_pai_tasklet(t, NULL);
1013#endif 1013#endif
1014 1014
1015 /* unlink if necessary */ 1015 /* unlink if necessary */
1016 raw_spin_lock_irqsave(&gsnedf_lock, flags); 1016 raw_spin_lock_irqsave(&gsnedf_lock, flags);
1017 unlink(t); 1017 unlink(t);
@@ -1020,7 +1020,7 @@ static void gsnedf_task_exit(struct task_struct * t)
1020 tsk_rt(t)->scheduled_on = NO_CPU; 1020 tsk_rt(t)->scheduled_on = NO_CPU;
1021 } 1021 }
1022 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 1022 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
1023 1023
1024 BUG_ON(!is_realtime(t)); 1024 BUG_ON(!is_realtime(t));
1025 TRACE_TASK(t, "RIP\n"); 1025 TRACE_TASK(t, "RIP\n");
1026} 1026}
@@ -1131,7 +1131,7 @@ static void __increase_priority_inheritance(struct task_struct* t,
1131static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh) 1131static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
1132{ 1132{
1133 int i = 0; 1133 int i = 0;
1134 1134
1135 raw_spin_lock(&gsnedf_lock); 1135 raw_spin_lock(&gsnedf_lock);
1136 1136
1137 __increase_priority_inheritance(t, prio_inh); 1137 __increase_priority_inheritance(t, prio_inh);
@@ -1141,13 +1141,13 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str
1141 { 1141 {
1142 TRACE_TASK(t, "%s/%d inherits a new priority!\n", 1142 TRACE_TASK(t, "%s/%d inherits a new priority!\n",
1143 tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); 1143 tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid);
1144 1144
1145 __increase_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh); 1145 __increase_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh);
1146 } 1146 }
1147#endif 1147#endif
1148 1148
1149 raw_spin_unlock(&gsnedf_lock); 1149 raw_spin_unlock(&gsnedf_lock);
1150 1150
1151#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) 1151#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
1152 if(tsk_rt(t)->held_gpus) { 1152 if(tsk_rt(t)->held_gpus) {
1153 for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); 1153 for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus));
@@ -1218,22 +1218,22 @@ static void decrease_priority_inheritance(struct task_struct* t,
1218 struct task_struct* prio_inh) 1218 struct task_struct* prio_inh)
1219{ 1219{
1220 int i; 1220 int i;
1221 1221
1222 raw_spin_lock(&gsnedf_lock); 1222 raw_spin_lock(&gsnedf_lock);
1223 __decrease_priority_inheritance(t, prio_inh); 1223 __decrease_priority_inheritance(t, prio_inh);
1224 1224
1225#ifdef CONFIG_LITMUS_SOFTIRQD 1225#ifdef CONFIG_LITMUS_SOFTIRQD
1226 if(tsk_rt(t)->cur_klitirqd != NULL) 1226 if(tsk_rt(t)->cur_klitirqd != NULL)
1227 { 1227 {
1228 TRACE_TASK(t, "%s/%d decreases in priority!\n", 1228 TRACE_TASK(t, "%s/%d decreases in priority!\n",
1229 tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); 1229 tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid);
1230 1230
1231 __decrease_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh); 1231 __decrease_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh);
1232 } 1232 }
1233#endif 1233#endif
1234 1234
1235 raw_spin_unlock(&gsnedf_lock); 1235 raw_spin_unlock(&gsnedf_lock);
1236 1236
1237#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) 1237#if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA)
1238 if(tsk_rt(t)->held_gpus) { 1238 if(tsk_rt(t)->held_gpus) {
1239 for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); 1239 for(i = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus));
@@ -1242,7 +1242,7 @@ static void decrease_priority_inheritance(struct task_struct* t,
1242 pai_check_priority_decrease(t, i); 1242 pai_check_priority_decrease(t, i);
1243 } 1243 }
1244 } 1244 }
1245#endif 1245#endif
1246} 1246}
1247 1247
1248 1248
@@ -1253,9 +1253,9 @@ static void increase_priority_inheritance_klitirqd(struct task_struct* klitirqd,
1253 struct task_struct* new_owner) 1253 struct task_struct* new_owner)
1254{ 1254{
1255 BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); 1255 BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread));
1256 1256
1257 raw_spin_lock(&gsnedf_lock); 1257 raw_spin_lock(&gsnedf_lock);
1258 1258
1259 if(old_owner != new_owner) 1259 if(old_owner != new_owner)
1260 { 1260 {
1261 if(old_owner) 1261 if(old_owner)
@@ -1263,20 +1263,20 @@ static void increase_priority_inheritance_klitirqd(struct task_struct* klitirqd,
1263 // unreachable? 1263 // unreachable?
1264 tsk_rt(old_owner)->cur_klitirqd = NULL; 1264 tsk_rt(old_owner)->cur_klitirqd = NULL;
1265 } 1265 }
1266 1266
1267 TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n", 1267 TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n",
1268 new_owner->comm, new_owner->pid); 1268 new_owner->comm, new_owner->pid);
1269 1269
1270 tsk_rt(new_owner)->cur_klitirqd = klitirqd; 1270 tsk_rt(new_owner)->cur_klitirqd = klitirqd;
1271 } 1271 }
1272 1272
1273 __decrease_priority_inheritance(klitirqd, NULL); // kludge to clear out cur prio. 1273 __decrease_priority_inheritance(klitirqd, NULL); // kludge to clear out cur prio.
1274 1274
1275 __increase_priority_inheritance(klitirqd, 1275 __increase_priority_inheritance(klitirqd,
1276 (tsk_rt(new_owner)->inh_task == NULL) ? 1276 (tsk_rt(new_owner)->inh_task == NULL) ?
1277 new_owner : 1277 new_owner :
1278 tsk_rt(new_owner)->inh_task); 1278 tsk_rt(new_owner)->inh_task);
1279 1279
1280 raw_spin_unlock(&gsnedf_lock); 1280 raw_spin_unlock(&gsnedf_lock);
1281} 1281}
1282 1282
@@ -1287,15 +1287,15 @@ static void decrease_priority_inheritance_klitirqd(struct task_struct* klitirqd,
1287 struct task_struct* new_owner) 1287 struct task_struct* new_owner)
1288{ 1288{
1289 BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); 1289 BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread));
1290 1290
1291 raw_spin_lock(&gsnedf_lock); 1291 raw_spin_lock(&gsnedf_lock);
1292 1292
1293 TRACE_TASK(klitirqd, "priority restored\n"); 1293 TRACE_TASK(klitirqd, "priority restored\n");
1294 1294
1295 __decrease_priority_inheritance(klitirqd, new_owner); 1295 __decrease_priority_inheritance(klitirqd, new_owner);
1296 1296
1297 tsk_rt(old_owner)->cur_klitirqd = NULL; 1297 tsk_rt(old_owner)->cur_klitirqd = NULL;
1298 1298
1299 raw_spin_unlock(&gsnedf_lock); 1299 raw_spin_unlock(&gsnedf_lock);
1300} 1300}
1301#endif 1301#endif
@@ -1435,10 +1435,10 @@ static struct litmus_lock_ops gsnedf_kfmlp_lock_ops = {
1435 .unlock = kfmlp_unlock, 1435 .unlock = kfmlp_unlock,
1436 .close = kfmlp_close, 1436 .close = kfmlp_close,
1437 .deallocate = kfmlp_free, 1437 .deallocate = kfmlp_free,
1438 1438
1439 // kfmlp can only be an outer-most lock. 1439 // kfmlp can only be an outer-most lock.
1440 .propagate_increase_inheritance = NULL, 1440 .propagate_increase_inheritance = NULL,
1441 .propagate_decrease_inheritance = NULL, 1441 .propagate_decrease_inheritance = NULL,
1442}; 1442};
1443 1443
1444 1444
@@ -1709,16 +1709,16 @@ static long gsnedf_allocate_affinity_observer(
1709 void* __user args) 1709 void* __user args)
1710{ 1710{
1711 int err; 1711 int err;
1712 1712
1713 /* GSN-EDF currently only supports the FMLP for global resources. */ 1713 /* GSN-EDF currently only supports the FMLP for global resources. */
1714 switch (type) { 1714 switch (type) {
1715 1715
1716 case KFMLP_SIMPLE_GPU_AFF_OBS: 1716 case KFMLP_SIMPLE_GPU_AFF_OBS:
1717 *aff_obs = kfmlp_simple_gpu_aff_obs_new(&gsnedf_kfmlp_affinity_ops, args); 1717 *aff_obs = kfmlp_simple_gpu_aff_obs_new(&gsnedf_kfmlp_affinity_ops, args);
1718 break; 1718 break;
1719 case KFMLP_GPU_AFF_OBS: 1719 case KFMLP_GPU_AFF_OBS:
1720 *aff_obs = kfmlp_gpu_aff_obs_new(&gsnedf_kfmlp_affinity_ops, args); 1720 *aff_obs = kfmlp_gpu_aff_obs_new(&gsnedf_kfmlp_affinity_ops, args);
1721 break; 1721 break;
1722#ifdef CONFIG_LITMUS_NESTED_LOCKING 1722#ifdef CONFIG_LITMUS_NESTED_LOCKING
1723// case IKGLP_GPU_AFF_OBS: 1723// case IKGLP_GPU_AFF_OBS:
1724// *aff_obs = gsnedf_new_ikglp_aff(arg); 1724// *aff_obs = gsnedf_new_ikglp_aff(arg);
@@ -1728,12 +1728,12 @@ static long gsnedf_allocate_affinity_observer(
1728 err = -ENXIO; 1728 err = -ENXIO;
1729 goto UNSUPPORTED_AFF_OBS; 1729 goto UNSUPPORTED_AFF_OBS;
1730 }; 1730 };
1731 1731
1732 if (*aff_obs) 1732 if (*aff_obs)
1733 err = 0; 1733 err = 0;
1734 else 1734 else
1735 err = -ENOMEM; 1735 err = -ENOMEM;
1736 1736
1737UNSUPPORTED_AFF_OBS: 1737UNSUPPORTED_AFF_OBS:
1738 return err; 1738 return err;
1739} 1739}
@@ -1769,12 +1769,12 @@ static long gsnedf_activate_plugin(void)
1769 } 1769 }
1770#endif 1770#endif
1771 } 1771 }
1772 1772
1773#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 1773#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1774 gsnedf_pending_tasklets.head = NULL; 1774 gsnedf_pending_tasklets.head = NULL;
1775 gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head); 1775 gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
1776#endif 1776#endif
1777 1777
1778#ifdef CONFIG_LITMUS_SOFTIRQD 1778#ifdef CONFIG_LITMUS_SOFTIRQD
1779 spawn_klitirqd(NULL); 1779 spawn_klitirqd(NULL);
1780#endif 1780#endif
@@ -1782,7 +1782,7 @@ static long gsnedf_activate_plugin(void)
1782#ifdef CONFIG_LITMUS_NVIDIA 1782#ifdef CONFIG_LITMUS_NVIDIA
1783 init_nvidia_info(); 1783 init_nvidia_info();
1784#endif 1784#endif
1785 1785
1786 return 0; 1786 return 0;
1787} 1787}
1788 1788
@@ -1815,7 +1815,7 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
1815#endif 1815#endif
1816#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 1816#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
1817 .allocate_aff_obs = gsnedf_allocate_affinity_observer, 1817 .allocate_aff_obs = gsnedf_allocate_affinity_observer,
1818#endif 1818#endif
1819#ifdef CONFIG_LITMUS_SOFTIRQD 1819#ifdef CONFIG_LITMUS_SOFTIRQD
1820 .increase_prio_klitirqd = increase_priority_inheritance_klitirqd, 1820 .increase_prio_klitirqd = increase_priority_inheritance_klitirqd,
1821 .decrease_prio_klitirqd = decrease_priority_inheritance_klitirqd, 1821 .decrease_prio_klitirqd = decrease_priority_inheritance_klitirqd,
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
index a334fdf66c3b..2433297b7482 100644
--- a/litmus/sched_plugin.c
+++ b/litmus/sched_plugin.c
@@ -240,8 +240,8 @@ struct sched_plugin linux_sched_plugin = {
240#endif 240#endif
241#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 241#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
242 .allocate_aff_obs = litmus_dummy_allocate_aff_obs, 242 .allocate_aff_obs = litmus_dummy_allocate_aff_obs,
243#endif 243#endif
244 244
245 .admit_task = litmus_dummy_admit_task 245 .admit_task = litmus_dummy_admit_task
246}; 246};
247 247
@@ -302,7 +302,7 @@ int register_sched_plugin(struct sched_plugin* plugin)
302#endif 302#endif
303#ifdef CONFIG_LITMUS_AFFINITY_LOCKING 303#ifdef CONFIG_LITMUS_AFFINITY_LOCKING
304 CHECK(allocate_aff_obs); 304 CHECK(allocate_aff_obs);
305#endif 305#endif
306 CHECK(admit_task); 306 CHECK(admit_task);
307 307
308 if (!plugin->release_at) 308 if (!plugin->release_at)
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
index d079df2b292a..2bd3a787611b 100644
--- a/litmus/sched_task_trace.c
+++ b/litmus/sched_task_trace.c
@@ -252,7 +252,7 @@ feather_callback void do_sched_trace_tasklet_release(unsigned long id,
252{ 252{
253 struct task_struct *t = (struct task_struct*) _owner; 253 struct task_struct *t = (struct task_struct*) _owner;
254 struct st_event_record *rec = get_record(ST_TASKLET_RELEASE, t); 254 struct st_event_record *rec = get_record(ST_TASKLET_RELEASE, t);
255 255
256 if (rec) { 256 if (rec) {
257 rec->data.tasklet_release.when = now(); 257 rec->data.tasklet_release.when = now();
258 put_record(rec); 258 put_record(rec);
@@ -265,7 +265,7 @@ feather_callback void do_sched_trace_tasklet_begin(unsigned long id,
265{ 265{
266 struct task_struct *t = (struct task_struct*) _owner; 266 struct task_struct *t = (struct task_struct*) _owner;
267 struct st_event_record *rec = get_record(ST_TASKLET_BEGIN, t); 267 struct st_event_record *rec = get_record(ST_TASKLET_BEGIN, t);
268 268
269 if (rec) { 269 if (rec) {
270 rec->data.tasklet_begin.when = now(); 270 rec->data.tasklet_begin.when = now();
271 271
@@ -286,7 +286,7 @@ feather_callback void do_sched_trace_tasklet_end(unsigned long id,
286{ 286{
287 struct task_struct *t = (struct task_struct*) _owner; 287 struct task_struct *t = (struct task_struct*) _owner;
288 struct st_event_record *rec = get_record(ST_TASKLET_END, t); 288 struct st_event_record *rec = get_record(ST_TASKLET_END, t);
289 289
290 if (rec) { 290 if (rec) {
291 rec->data.tasklet_end.when = now(); 291 rec->data.tasklet_end.when = now();
292 rec->data.tasklet_end.flushed = _flushed; 292 rec->data.tasklet_end.flushed = _flushed;
@@ -307,7 +307,7 @@ feather_callback void do_sched_trace_work_release(unsigned long id,
307{ 307{
308 struct task_struct *t = (struct task_struct*) _owner; 308 struct task_struct *t = (struct task_struct*) _owner;
309 struct st_event_record *rec = get_record(ST_WORK_RELEASE, t); 309 struct st_event_record *rec = get_record(ST_WORK_RELEASE, t);
310 310
311 if (rec) { 311 if (rec) {
312 rec->data.work_release.when = now(); 312 rec->data.work_release.when = now();
313 put_record(rec); 313 put_record(rec);
@@ -321,7 +321,7 @@ feather_callback void do_sched_trace_work_begin(unsigned long id,
321{ 321{
322 struct task_struct *t = (struct task_struct*) _owner; 322 struct task_struct *t = (struct task_struct*) _owner;
323 struct st_event_record *rec = get_record(ST_WORK_BEGIN, t); 323 struct st_event_record *rec = get_record(ST_WORK_BEGIN, t);
324 324
325 if (rec) { 325 if (rec) {
326 struct task_struct *exe = (struct task_struct*) _exe; 326 struct task_struct *exe = (struct task_struct*) _exe;
327 rec->data.work_begin.exe_pid = exe->pid; 327 rec->data.work_begin.exe_pid = exe->pid;
@@ -339,7 +339,7 @@ feather_callback void do_sched_trace_work_end(unsigned long id,
339{ 339{
340 struct task_struct *t = (struct task_struct*) _owner; 340 struct task_struct *t = (struct task_struct*) _owner;
341 struct st_event_record *rec = get_record(ST_WORK_END, t); 341 struct st_event_record *rec = get_record(ST_WORK_END, t);
342 342
343 if (rec) { 343 if (rec) {
344 struct task_struct *exe = (struct task_struct*) _exe; 344 struct task_struct *exe = (struct task_struct*) _exe;
345 rec->data.work_end.exe_pid = exe->pid; 345 rec->data.work_end.exe_pid = exe->pid;
@@ -357,14 +357,14 @@ feather_callback void do_sched_trace_eff_prio_change(unsigned long id,
357{ 357{
358 struct task_struct *t = (struct task_struct*) _task; 358 struct task_struct *t = (struct task_struct*) _task;
359 struct st_event_record *rec = get_record(ST_EFF_PRIO_CHANGE, t); 359 struct st_event_record *rec = get_record(ST_EFF_PRIO_CHANGE, t);
360 360
361 if (rec) { 361 if (rec) {
362 struct task_struct *inh = (struct task_struct*) _inh; 362 struct task_struct *inh = (struct task_struct*) _inh;
363 rec->data.effective_priority_change.when = now(); 363 rec->data.effective_priority_change.when = now();
364 rec->data.effective_priority_change.inh_pid = (inh != NULL) ? 364 rec->data.effective_priority_change.inh_pid = (inh != NULL) ?
365 inh->pid : 365 inh->pid :
366 0xffff; 366 0xffff;
367 367
368 put_record(rec); 368 put_record(rec);
369 } 369 }
370} 370}
@@ -401,7 +401,7 @@ feather_callback void do_sched_trace_nv_interrupt_begin(unsigned long id,
401 int_map->count = 1; 401 int_map->count = 1;
402 } 402 }
403 //int_map->data[int_map->count-1] = _device; 403 //int_map->data[int_map->count-1] = _device;
404 404
405 serial = &per_cpu(intCounter, smp_processor_id()); 405 serial = &per_cpu(intCounter, smp_processor_id());
406 *serial += num_online_cpus(); 406 *serial += num_online_cpus();
407 serialNum = *serial; 407 serialNum = *serial;