aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-12-09 16:53:50 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-12-09 16:55:39 -0500
commitfbd9574e298157b54c38f82f536e5cea8f766dff (patch)
treeab8985a838b6b4c69b99f28d6338a3b9a7721dc4
parentfccce270a5540021b544d439595fa0a736242ff0 (diff)
Rename klitirqd klmirqd.
-rw-r--r--[-rwxr-xr-x]include/litmus/aux_tasks.h0
-rw-r--r--[-rwxr-xr-x]include/litmus/ikglp_lock.h0
-rw-r--r--[-rwxr-xr-x]include/litmus/litmus.h0
-rw-r--r--include/litmus/litmus_softirq.h42
-rw-r--r--[-rwxr-xr-x]include/litmus/rt_param.h18
-rw-r--r--include/litmus/sched_plugin.h8
-rw-r--r--[-rwxr-xr-x]litmus/Kconfig10
-rw-r--r--[-rwxr-xr-x]litmus/aux_tasks.c0
-rw-r--r--[-rwxr-xr-x]litmus/edf_common.c0
-rw-r--r--[-rwxr-xr-x]litmus/ikglp_lock.c0
-rw-r--r--[-rwxr-xr-x]litmus/litmus.c22
-rw-r--r--litmus/litmus_proc.c14
-rw-r--r--litmus/litmus_softirq.c388
-rw-r--r--litmus/nvidia_info.c8
-rw-r--r--litmus/sched_cedf.c48
-rw-r--r--litmus/sched_gsn_edf.c44
-rw-r--r--litmus/sched_plugin.c12
17 files changed, 307 insertions, 307 deletions
diff --git a/include/litmus/aux_tasks.h b/include/litmus/aux_tasks.h
index 87745c1c0df0..87745c1c0df0 100755..100644
--- a/include/litmus/aux_tasks.h
+++ b/include/litmus/aux_tasks.h
diff --git a/include/litmus/ikglp_lock.h b/include/litmus/ikglp_lock.h
index af155eadbb35..af155eadbb35 100755..100644
--- a/include/litmus/ikglp_lock.h
+++ b/include/litmus/ikglp_lock.h
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 711b88e2b3d1..711b88e2b3d1 100755..100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
diff --git a/include/litmus/litmus_softirq.h b/include/litmus/litmus_softirq.h
index 1eb5ea1a6c4b..46fe89148505 100644
--- a/include/litmus/litmus_softirq.h
+++ b/include/litmus/litmus_softirq.h
@@ -13,7 +13,7 @@
13 Tasklets are current scheduled in FIFO order with 13 Tasklets are current scheduled in FIFO order with
14 NO priority inheritance for "blocked" tasklets. 14 NO priority inheritance for "blocked" tasklets.
15 15
16 klitirqd assumes the priority of the owner of the 16 klmirqd assumes the priority of the owner of the
17 tasklet when the tasklet is next to execute. 17 tasklet when the tasklet is next to execute.
18 18
19 Currently, hi-tasklets are scheduled before 19 Currently, hi-tasklets are scheduled before
@@ -21,7 +21,7 @@
21 And likewise, low-tasklets are scheduled before work 21 And likewise, low-tasklets are scheduled before work
22 queue objects. This priority inversion probably needs 22 queue objects. This priority inversion probably needs
23 to be fixed, though it is not an issue if our work with 23 to be fixed, though it is not an issue if our work with
24 GPUs as GPUs are owned (and associated klitirqds) for 24 GPUs as GPUs are owned (and associated klmirqds) for
25 exclusive time periods, thus no inversions can 25 exclusive time periods, thus no inversions can
26 occur. 26 occur.
27 */ 27 */
@@ -30,7 +30,7 @@
30 30
31#define NR_LITMUS_SOFTIRQD CONFIG_NR_LITMUS_SOFTIRQD 31#define NR_LITMUS_SOFTIRQD CONFIG_NR_LITMUS_SOFTIRQD
32 32
33/* Spawns NR_LITMUS_SOFTIRQD klitirqd daemons. 33/* Spawns NR_LITMUS_SOFTIRQD klmirqd daemons.
34 Actual launch of threads is deffered to kworker's 34 Actual launch of threads is deffered to kworker's
35 workqueue, so daemons will likely not be immediately 35 workqueue, so daemons will likely not be immediately
36 running when this function returns, though the required 36 running when this function returns, though the required
@@ -52,43 +52,43 @@
52 FIXME: change array to a CPU topology or array of cpumasks 52 FIXME: change array to a CPU topology or array of cpumasks
53 53
54 */ 54 */
55void spawn_klitirqd(int* affinity); 55void spawn_klmirqd(int* affinity);
56 56
57 57
58/* Raises a flag to tell klitirqds to terminate. 58/* Raises a flag to tell klmirqds to terminate.
59 Termination is async, so some threads may be running 59 Termination is async, so some threads may be running
60 after function return. */ 60 after function return. */
61void kill_klitirqd(void); 61void kill_klmirqd(void);
62 62
63 63
64/* Returns 1 if all NR_LITMUS_SOFTIRQD klitirqs are ready 64/* Returns 1 if all NR_LITMUS_SOFTIRQD klitirqs are ready
65 to handle tasklets. 0, otherwise.*/ 65 to handle tasklets. 0, otherwise.*/
66int klitirqd_is_ready(void); 66int klmirqd_is_ready(void);
67 67
68/* Returns 1 if no NR_LITMUS_SOFTIRQD klitirqs are ready 68/* Returns 1 if no NR_LITMUS_SOFTIRQD klitirqs are ready
69 to handle tasklets. 0, otherwise.*/ 69 to handle tasklets. 0, otherwise.*/
70int klitirqd_is_dead(void); 70int klmirqd_is_dead(void);
71 71
72/* Flushes all pending work out to the OS for regular 72/* Flushes all pending work out to the OS for regular
73 * tasklet/work processing of the specified 'owner' 73 * tasklet/work processing of the specified 'owner'
74 * 74 *
75 * PRECOND: klitirqd_thread must have a clear entry 75 * PRECOND: klmirqd_thread must have a clear entry
76 * in the GPU registry, otherwise this call will become 76 * in the GPU registry, otherwise this call will become
77 * a no-op as work will loop back to the klitirqd_thread. 77 * a no-op as work will loop back to the klmirqd_thread.
78 * 78 *
79 * Pass NULL for owner to flush ALL pending items. 79 * Pass NULL for owner to flush ALL pending items.
80 */ 80 */
81void flush_pending(struct task_struct* klitirqd_thread, 81void flush_pending(struct task_struct* klmirqd_thread,
82 struct task_struct* owner); 82 struct task_struct* owner);
83 83
84struct task_struct* get_klitirqd(unsigned int k_id); 84struct task_struct* get_klmirqd(unsigned int k_id);
85 85
86 86
87extern int __litmus_tasklet_schedule( 87extern int __litmus_tasklet_schedule(
88 struct tasklet_struct *t, 88 struct tasklet_struct *t,
89 unsigned int k_id); 89 unsigned int k_id);
90 90
91/* schedule a tasklet on klitirqd #k_id */ 91/* schedule a tasklet on klmirqd #k_id */
92static inline int litmus_tasklet_schedule( 92static inline int litmus_tasklet_schedule(
93 struct tasklet_struct *t, 93 struct tasklet_struct *t,
94 unsigned int k_id) 94 unsigned int k_id)
@@ -113,7 +113,7 @@ static inline int _litmus_tasklet_schedule(
113extern int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, 113extern int __litmus_tasklet_hi_schedule(struct tasklet_struct *t,
114 unsigned int k_id); 114 unsigned int k_id);
115 115
116/* schedule a hi tasklet on klitirqd #k_id */ 116/* schedule a hi tasklet on klmirqd #k_id */
117static inline int litmus_tasklet_hi_schedule(struct tasklet_struct *t, 117static inline int litmus_tasklet_hi_schedule(struct tasklet_struct *t,
118 unsigned int k_id) 118 unsigned int k_id)
119{ 119{
@@ -138,7 +138,7 @@ extern int __litmus_tasklet_hi_schedule_first(
138 struct tasklet_struct *t, 138 struct tasklet_struct *t,
139 unsigned int k_id); 139 unsigned int k_id);
140 140
141/* schedule a hi tasklet on klitirqd #k_id on next go-around */ 141/* schedule a hi tasklet on klmirqd #k_id on next go-around */
142/* PRECONDITION: Interrupts must be disabled. */ 142/* PRECONDITION: Interrupts must be disabled. */
143static inline int litmus_tasklet_hi_schedule_first( 143static inline int litmus_tasklet_hi_schedule_first(
144 struct tasklet_struct *t, 144 struct tasklet_struct *t,
@@ -178,22 +178,22 @@ static inline int litmus_schedule_work(
178///////////// mutex operations for client threads. 178///////////// mutex operations for client threads.
179 179
180void down_and_set_stat(struct task_struct* t, 180void down_and_set_stat(struct task_struct* t,
181 enum klitirqd_sem_status to_set, 181 enum klmirqd_sem_status to_set,
182 struct mutex* sem); 182 struct mutex* sem);
183 183
184void __down_and_reset_and_set_stat(struct task_struct* t, 184void __down_and_reset_and_set_stat(struct task_struct* t,
185 enum klitirqd_sem_status to_reset, 185 enum klmirqd_sem_status to_reset,
186 enum klitirqd_sem_status to_set, 186 enum klmirqd_sem_status to_set,
187 struct mutex* sem); 187 struct mutex* sem);
188 188
189void up_and_set_stat(struct task_struct* t, 189void up_and_set_stat(struct task_struct* t,
190 enum klitirqd_sem_status to_set, 190 enum klmirqd_sem_status to_set,
191 struct mutex* sem); 191 struct mutex* sem);
192 192
193 193
194 194
195void release_klitirqd_lock(struct task_struct* t); 195void release_klmirqd_lock(struct task_struct* t);
196 196
197int reacquire_klitirqd_lock(struct task_struct* t); 197int reacquire_klmirqd_lock(struct task_struct* t);
198 198
199#endif 199#endif
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index aca78a835529..47301c04d862 100755..100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -166,7 +166,7 @@ struct rt_job {
166 166
167struct pfair_param; 167struct pfair_param;
168 168
169enum klitirqd_sem_status 169enum klmirqd_sem_status
170{ 170{
171 NEED_TO_REACQUIRE, 171 NEED_TO_REACQUIRE,
172 REACQUIRING, 172 REACQUIRING,
@@ -223,26 +223,26 @@ struct rt_param {
223 /* proxy threads have minimum priority by default */ 223 /* proxy threads have minimum priority by default */
224 unsigned int is_proxy_thread:1; 224 unsigned int is_proxy_thread:1;
225 225
226 /* pointer to klitirqd currently working on this 226 /* pointer to klmirqd currently working on this
227 task_struct's behalf. only set by the task pointed 227 task_struct's behalf. only set by the task pointed
228 to by klitirqd. 228 to by klmirqd.
229 229
230 ptr only valid if is_proxy_thread == 0 230 ptr only valid if is_proxy_thread == 0
231 */ 231 */
232 struct task_struct* cur_klitirqd; 232 struct task_struct* cur_klmirqd;
233 233
234 /* Used to implement mutual execution exclusion between 234 /* Used to implement mutual execution exclusion between
235 * job and klitirqd execution. Job must always hold 235 * job and klmirqd execution. Job must always hold
236 * it's klitirqd_sem to execute. klitirqd instance 236 * it's klmirqd_sem to execute. klmirqd instance
237 * must hold the semaphore before executing on behalf 237 * must hold the semaphore before executing on behalf
238 * of a job. 238 * of a job.
239 */ 239 */
240 struct mutex klitirqd_sem; 240 struct mutex klmirqd_sem;
241 241
242 /* status of held klitirqd_sem, even if the held klitirqd_sem is from 242 /* status of held klmirqd_sem, even if the held klmirqd_sem is from
243 another task (only proxy threads do this though). 243 another task (only proxy threads do this though).
244 */ 244 */
245 atomic_t klitirqd_sem_stat; 245 atomic_t klmirqd_sem_stat;
246#endif 246#endif
247 247
248#ifdef CONFIG_LITMUS_NVIDIA 248#ifdef CONFIG_LITMUS_NVIDIA
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
index 65736b2a9199..e8127f427d56 100644
--- a/include/litmus/sched_plugin.h
+++ b/include/litmus/sched_plugin.h
@@ -79,10 +79,10 @@ typedef void (*nested_increase_prio_t)(struct task_struct* t, struct task_struct
79typedef void (*nested_decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh, 79typedef void (*nested_decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh,
80 raw_spinlock_t *to_unlock, unsigned long irqflags); 80 raw_spinlock_t *to_unlock, unsigned long irqflags);
81 81
82typedef void (*increase_prio_klitirq_t)(struct task_struct* klitirqd, 82typedef void (*increase_prio_klitirq_t)(struct task_struct* klmirqd,
83 struct task_struct* old_owner, 83 struct task_struct* old_owner,
84 struct task_struct* new_owner); 84 struct task_struct* new_owner);
85typedef void (*decrease_prio_klitirqd_t)(struct task_struct* klitirqd, 85typedef void (*decrease_prio_klmirqd_t)(struct task_struct* klmirqd,
86 struct task_struct* old_owner); 86 struct task_struct* old_owner);
87 87
88 88
@@ -168,8 +168,8 @@ struct sched_plugin {
168#endif 168#endif
169 169
170#ifdef CONFIG_LITMUS_SOFTIRQD 170#ifdef CONFIG_LITMUS_SOFTIRQD
171 increase_prio_klitirq_t increase_prio_klitirqd; 171 increase_prio_klitirq_t increase_prio_klmirqd;
172 decrease_prio_klitirqd_t decrease_prio_klitirqd; 172 decrease_prio_klmirqd_t decrease_prio_klmirqd;
173#endif 173#endif
174#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 174#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
175 enqueue_pai_tasklet_t enqueue_pai_tasklet; 175 enqueue_pai_tasklet_t enqueue_pai_tasklet;
diff --git a/litmus/Kconfig b/litmus/Kconfig
index 16087b9e4e81..f2434b87239b 100755..100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -392,11 +392,11 @@ config LITMUS_SOFTIRQD_NONE
392 Don't schedule tasklets in Litmus. Default. 392 Don't schedule tasklets in Litmus. Default.
393 393
394config LITMUS_SOFTIRQD 394config LITMUS_SOFTIRQD
395 bool "Spawn klitirqd interrupt handling threads." 395 bool "Spawn klmirqd interrupt handling threads."
396 help 396 help
397 Create klitirqd interrupt handling threads. Work must be 397 Create klmirqd interrupt handling threads. Work must be
398 specifically dispatched to these workers. (Softirqs for 398 specifically dispatched to these workers. (Softirqs for
399 Litmus tasks are not magically redirected to klitirqd.) 399 Litmus tasks are not magically redirected to klmirqd.)
400 400
401 G-EDF/RM, C-EDF/RM ONLY for now! 401 G-EDF/RM, C-EDF/RM ONLY for now!
402 402
@@ -415,7 +415,7 @@ endchoice
415 415
416 416
417config NR_LITMUS_SOFTIRQD 417config NR_LITMUS_SOFTIRQD
418 int "Number of klitirqd." 418 int "Number of klmirqd."
419 depends on LITMUS_SOFTIRQD 419 depends on LITMUS_SOFTIRQD
420 range 1 4096 420 range 1 4096
421 default "1" 421 default "1"
@@ -426,7 +426,7 @@ config LITMUS_NVIDIA
426 bool "Litmus handling of NVIDIA interrupts." 426 bool "Litmus handling of NVIDIA interrupts."
427 default n 427 default n
428 help 428 help
429 Direct tasklets from NVIDIA devices to Litmus's klitirqd 429 Direct tasklets from NVIDIA devices to Litmus's klmirqd
430 or PAI interrupt handling routines. 430 or PAI interrupt handling routines.
431 431
432 If unsure, say No. 432 If unsure, say No.
diff --git a/litmus/aux_tasks.c b/litmus/aux_tasks.c
index 20f477f6e3bc..20f477f6e3bc 100755..100644
--- a/litmus/aux_tasks.c
+++ b/litmus/aux_tasks.c
diff --git a/litmus/edf_common.c b/litmus/edf_common.c
index c279bf12a7f5..c279bf12a7f5 100755..100644
--- a/litmus/edf_common.c
+++ b/litmus/edf_common.c
diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c
index b29828344dd1..b29828344dd1 100755..100644
--- a/litmus/ikglp_lock.c
+++ b/litmus/ikglp_lock.c
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 065ef7d3192a..3b8017397e80 100755..100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -397,14 +397,14 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
397 /* We probably should not have any tasklets executing for 397 /* We probably should not have any tasklets executing for
398 * us at this time. 398 * us at this time.
399 */ 399 */
400 WARN_ON(p->rt_param.cur_klitirqd); 400 WARN_ON(p->rt_param.cur_klmirqd);
401 WARN_ON(atomic_read(&p->rt_param.klitirqd_sem_stat) == HELD); 401 WARN_ON(atomic_read(&p->rt_param.klmirqd_sem_stat) == HELD);
402 402
403 if(p->rt_param.cur_klitirqd) 403 if(p->rt_param.cur_klmirqd)
404 flush_pending(p->rt_param.cur_klitirqd, p); 404 flush_pending(p->rt_param.cur_klmirqd, p);
405 405
406 if(atomic_read(&p->rt_param.klitirqd_sem_stat) == HELD) 406 if(atomic_read(&p->rt_param.klmirqd_sem_stat) == HELD)
407 up_and_set_stat(p, NOT_HELD, &p->rt_param.klitirqd_sem); 407 up_and_set_stat(p, NOT_HELD, &p->rt_param.klmirqd_sem);
408#endif 408#endif
409 409
410#ifdef CONFIG_LITMUS_NVIDIA 410#ifdef CONFIG_LITMUS_NVIDIA
@@ -479,9 +479,9 @@ long __litmus_admit_task(struct task_struct* tsk)
479#ifdef CONFIG_LITMUS_SOFTIRQD 479#ifdef CONFIG_LITMUS_SOFTIRQD
480 /* proxy thread off by default */ 480 /* proxy thread off by default */
481 tsk_rt(tsk)is_proxy_thread = 0; 481 tsk_rt(tsk)is_proxy_thread = 0;
482 tsk_rt(tsk)cur_klitirqd = NULL; 482 tsk_rt(tsk)cur_klmirqd = NULL;
483 mutex_init(&tsk_rt(tsk)->klitirqd_sem); 483 mutex_init(&tsk_rt(tsk)->klmirqd_sem);
484 atomic_set(&tsk_rt(tsk)->klitirqd_sem_stat, NOT_HELD); 484 atomic_set(&tsk_rt(tsk)->klmirqd_sem_stat, NOT_HELD);
485#endif 485#endif
486 486
487 retval = litmus->admit_task(tsk); 487 retval = litmus->admit_task(tsk);
@@ -580,9 +580,9 @@ int switch_sched_plugin(struct sched_plugin* plugin)
580 cpu_relax(); 580 cpu_relax();
581 581
582#ifdef CONFIG_LITMUS_SOFTIRQD 582#ifdef CONFIG_LITMUS_SOFTIRQD
583 if(!klitirqd_is_dead()) 583 if(!klmirqd_is_dead())
584 { 584 {
585 kill_klitirqd(); 585 kill_klmirqd();
586 } 586 }
587#endif 587#endif
588 588
diff --git a/litmus/litmus_proc.c b/litmus/litmus_proc.c
index 9ab7e015a3c1..136fecfb0b8b 100644
--- a/litmus/litmus_proc.c
+++ b/litmus/litmus_proc.c
@@ -21,14 +21,14 @@ static struct proc_dir_entry *litmus_dir = NULL,
21 *release_master_file = NULL, 21 *release_master_file = NULL,
22#endif 22#endif
23#ifdef CONFIG_LITMUS_SOFTIRQD 23#ifdef CONFIG_LITMUS_SOFTIRQD
24 *klitirqd_file = NULL, 24 *klmirqd_file = NULL,
25#endif 25#endif
26 *plugs_file = NULL; 26 *plugs_file = NULL;
27 27
28/* in litmus/sync.c */ 28/* in litmus/sync.c */
29int count_tasks_waiting_for_release(void); 29int count_tasks_waiting_for_release(void);
30 30
31extern int proc_read_klitirqd_stats(char *page, char **start, 31extern int proc_read_klmirqd_stats(char *page, char **start,
32 off_t off, int count, 32 off_t off, int count,
33 int *eof, void *data); 33 int *eof, void *data);
34 34
@@ -169,9 +169,9 @@ int __init init_litmus_proc(void)
169#endif 169#endif
170 170
171#ifdef CONFIG_LITMUS_SOFTIRQD 171#ifdef CONFIG_LITMUS_SOFTIRQD
172 klitirqd_file = 172 klmirqd_file =
173 create_proc_read_entry("klitirqd_stats", 0444, litmus_dir, 173 create_proc_read_entry("klmirqd_stats", 0444, litmus_dir,
174 proc_read_klitirqd_stats, NULL); 174 proc_read_klmirqd_stats, NULL);
175#endif 175#endif
176 176
177 stat_file = create_proc_read_entry("stats", 0444, litmus_dir, 177 stat_file = create_proc_read_entry("stats", 0444, litmus_dir,
@@ -201,8 +201,8 @@ void exit_litmus_proc(void)
201 if (curr_file) 201 if (curr_file)
202 remove_proc_entry("active_plugin", litmus_dir); 202 remove_proc_entry("active_plugin", litmus_dir);
203#ifdef CONFIG_LITMUS_SOFTIRQD 203#ifdef CONFIG_LITMUS_SOFTIRQD
204 if (klitirqd_file) 204 if (klmirqd_file)
205 remove_proc_entry("klitirqd_stats", litmus_dir); 205 remove_proc_entry("klmirqd_stats", litmus_dir);
206#endif 206#endif
207#ifdef CONFIG_RELEASE_MASTER 207#ifdef CONFIG_RELEASE_MASTER
208 if (release_master_file) 208 if (release_master_file)
diff --git a/litmus/litmus_softirq.c b/litmus/litmus_softirq.c
index 9f7d9da5facb..73a3053e662b 100644
--- a/litmus/litmus_softirq.c
+++ b/litmus/litmus_softirq.c
@@ -20,7 +20,7 @@
20 20
21 21
22/* counts number of daemons ready to handle litmus irqs. */ 22/* counts number of daemons ready to handle litmus irqs. */
23static atomic_t num_ready_klitirqds = ATOMIC_INIT(0); 23static atomic_t num_ready_klmirqds = ATOMIC_INIT(0);
24 24
25enum pending_flags 25enum pending_flags
26{ 26{
@@ -36,9 +36,9 @@ struct tasklet_head
36 struct tasklet_struct **tail; 36 struct tasklet_struct **tail;
37}; 37};
38 38
39struct klitirqd_info 39struct klmirqd_info
40{ 40{
41 struct task_struct* klitirqd; 41 struct task_struct* klmirqd;
42 struct task_struct* current_owner; 42 struct task_struct* current_owner;
43 int terminating; 43 int terminating;
44 44
@@ -56,44 +56,44 @@ struct klitirqd_info
56 struct list_head worklist; 56 struct list_head worklist;
57}; 57};
58 58
59/* one list for each klitirqd */ 59/* one list for each klmirqd */
60static struct klitirqd_info klitirqds[NR_LITMUS_SOFTIRQD]; 60static struct klmirqd_info klmirqds[NR_LITMUS_SOFTIRQD];
61 61
62 62
63 63
64 64
65 65
66int proc_read_klitirqd_stats(char *page, char **start, 66int proc_read_klmirqd_stats(char *page, char **start,
67 off_t off, int count, 67 off_t off, int count,
68 int *eof, void *data) 68 int *eof, void *data)
69{ 69{
70 int len = snprintf(page, PAGE_SIZE, 70 int len = snprintf(page, PAGE_SIZE,
71 "num ready klitirqds: %d\n\n", 71 "num ready klmirqds: %d\n\n",
72 atomic_read(&num_ready_klitirqds)); 72 atomic_read(&num_ready_klmirqds));
73 73
74 if(klitirqd_is_ready()) 74 if(klmirqd_is_ready())
75 { 75 {
76 int i; 76 int i;
77 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) 77 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
78 { 78 {
79 len += 79 len +=
80 snprintf(page + len - 1, PAGE_SIZE, /* -1 to strip off \0 */ 80 snprintf(page + len - 1, PAGE_SIZE, /* -1 to strip off \0 */
81 "klitirqd_th%d: %s/%d\n" 81 "klmirqd_th%d: %s/%d\n"
82 "\tcurrent_owner: %s/%d\n" 82 "\tcurrent_owner: %s/%d\n"
83 "\tpending: %x\n" 83 "\tpending: %x\n"
84 "\tnum hi: %d\n" 84 "\tnum hi: %d\n"
85 "\tnum low: %d\n" 85 "\tnum low: %d\n"
86 "\tnum work: %d\n\n", 86 "\tnum work: %d\n\n",
87 i, 87 i,
88 klitirqds[i].klitirqd->comm, klitirqds[i].klitirqd->pid, 88 klmirqds[i].klmirqd->comm, klmirqds[i].klmirqd->pid,
89 (klitirqds[i].current_owner != NULL) ? 89 (klmirqds[i].current_owner != NULL) ?
90 klitirqds[i].current_owner->comm : "(null)", 90 klmirqds[i].current_owner->comm : "(null)",
91 (klitirqds[i].current_owner != NULL) ? 91 (klmirqds[i].current_owner != NULL) ?
92 klitirqds[i].current_owner->pid : 0, 92 klmirqds[i].current_owner->pid : 0,
93 klitirqds[i].pending, 93 klmirqds[i].pending,
94 atomic_read(&klitirqds[i].num_hi_pending), 94 atomic_read(&klmirqds[i].num_hi_pending),
95 atomic_read(&klitirqds[i].num_low_pending), 95 atomic_read(&klmirqds[i].num_low_pending),
96 atomic_read(&klitirqds[i].num_work_pending)); 96 atomic_read(&klmirqds[i].num_work_pending));
97 } 97 }
98 } 98 }
99 99
@@ -107,7 +107,7 @@ int proc_read_klitirqd_stats(char *page, char **start,
107#if 0 107#if 0
108static atomic_t dump_id = ATOMIC_INIT(0); 108static atomic_t dump_id = ATOMIC_INIT(0);
109 109
110static void __dump_state(struct klitirqd_info* which, const char* caller) 110static void __dump_state(struct klmirqd_info* which, const char* caller)
111{ 111{
112 struct tasklet_struct* list; 112 struct tasklet_struct* list;
113 113
@@ -118,22 +118,22 @@ static void __dump_state(struct klitirqd_info* which, const char* caller)
118 if(which->current_owner) 118 if(which->current_owner)
119 { 119 {
120 TRACE("(id: %d caller: %s)\n" 120 TRACE("(id: %d caller: %s)\n"
121 "klitirqd: %s/%d\n" 121 "klmirqd: %s/%d\n"
122 "current owner: %s/%d\n" 122 "current owner: %s/%d\n"
123 "pending: %x\n", 123 "pending: %x\n",
124 id, caller, 124 id, caller,
125 which->klitirqd->comm, which->klitirqd->pid, 125 which->klmirqd->comm, which->klmirqd->pid,
126 which->current_owner->comm, which->current_owner->pid, 126 which->current_owner->comm, which->current_owner->pid,
127 which->pending); 127 which->pending);
128 } 128 }
129 else 129 else
130 { 130 {
131 TRACE("(id: %d caller: %s)\n" 131 TRACE("(id: %d caller: %s)\n"
132 "klitirqd: %s/%d\n" 132 "klmirqd: %s/%d\n"
133 "current owner: %p\n" 133 "current owner: %p\n"
134 "pending: %x\n", 134 "pending: %x\n",
135 id, caller, 135 id, caller,
136 which->klitirqd->comm, which->klitirqd->pid, 136 which->klmirqd->comm, which->klmirqd->pid,
137 NULL, 137 NULL,
138 which->pending); 138 which->pending);
139 } 139 }
@@ -151,7 +151,7 @@ static void __dump_state(struct klitirqd_info* which, const char* caller)
151 } 151 }
152} 152}
153 153
154static void dump_state(struct klitirqd_info* which, const char* caller) 154static void dump_state(struct klmirqd_info* which, const char* caller)
155{ 155{
156 unsigned long flags; 156 unsigned long flags;
157 157
@@ -164,23 +164,23 @@ static void dump_state(struct klitirqd_info* which, const char* caller)
164 164
165/* forward declarations */ 165/* forward declarations */
166static void ___litmus_tasklet_schedule(struct tasklet_struct *t, 166static void ___litmus_tasklet_schedule(struct tasklet_struct *t,
167 struct klitirqd_info *which, 167 struct klmirqd_info *which,
168 int wakeup); 168 int wakeup);
169static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t, 169static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t,
170 struct klitirqd_info *which, 170 struct klmirqd_info *which,
171 int wakeup); 171 int wakeup);
172static void ___litmus_schedule_work(struct work_struct *w, 172static void ___litmus_schedule_work(struct work_struct *w,
173 struct klitirqd_info *which, 173 struct klmirqd_info *which,
174 int wakeup); 174 int wakeup);
175 175
176 176
177 177
178inline unsigned int klitirqd_id(struct task_struct* tsk) 178inline unsigned int klmirqd_id(struct task_struct* tsk)
179{ 179{
180 int i; 180 int i;
181 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) 181 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
182 { 182 {
183 if(klitirqds[i].klitirqd == tsk) 183 if(klmirqds[i].klmirqd == tsk)
184 { 184 {
185 return i; 185 return i;
186 } 186 }
@@ -192,28 +192,28 @@ inline unsigned int klitirqd_id(struct task_struct* tsk)
192} 192}
193 193
194 194
195inline static u32 litirq_pending_hi_irqoff(struct klitirqd_info* which) 195inline static u32 litirq_pending_hi_irqoff(struct klmirqd_info* which)
196{ 196{
197 return (which->pending & LIT_TASKLET_HI); 197 return (which->pending & LIT_TASKLET_HI);
198} 198}
199 199
200inline static u32 litirq_pending_low_irqoff(struct klitirqd_info* which) 200inline static u32 litirq_pending_low_irqoff(struct klmirqd_info* which)
201{ 201{
202 return (which->pending & LIT_TASKLET_LOW); 202 return (which->pending & LIT_TASKLET_LOW);
203} 203}
204 204
205inline static u32 litirq_pending_work_irqoff(struct klitirqd_info* which) 205inline static u32 litirq_pending_work_irqoff(struct klmirqd_info* which)
206{ 206{
207 return (which->pending & LIT_WORK); 207 return (which->pending & LIT_WORK);
208} 208}
209 209
210inline static u32 litirq_pending_irqoff(struct klitirqd_info* which) 210inline static u32 litirq_pending_irqoff(struct klmirqd_info* which)
211{ 211{
212 return(which->pending); 212 return(which->pending);
213} 213}
214 214
215 215
216inline static u32 litirq_pending(struct klitirqd_info* which) 216inline static u32 litirq_pending(struct klmirqd_info* which)
217{ 217{
218 unsigned long flags; 218 unsigned long flags;
219 u32 pending; 219 u32 pending;
@@ -225,7 +225,7 @@ inline static u32 litirq_pending(struct klitirqd_info* which)
225 return pending; 225 return pending;
226}; 226};
227 227
228inline static u32 litirq_pending_with_owner(struct klitirqd_info* which, struct task_struct* owner) 228inline static u32 litirq_pending_with_owner(struct klmirqd_info* which, struct task_struct* owner)
229{ 229{
230 unsigned long flags; 230 unsigned long flags;
231 u32 pending; 231 u32 pending;
@@ -245,7 +245,7 @@ inline static u32 litirq_pending_with_owner(struct klitirqd_info* which, struct
245} 245}
246 246
247 247
248inline static u32 litirq_pending_and_sem_and_owner(struct klitirqd_info* which, 248inline static u32 litirq_pending_and_sem_and_owner(struct klmirqd_info* which,
249 struct mutex** sem, 249 struct mutex** sem,
250 struct task_struct** t) 250 struct task_struct** t)
251{ 251{
@@ -264,7 +264,7 @@ inline static u32 litirq_pending_and_sem_and_owner(struct klitirqd_info* which,
264 if(which->current_owner != NULL) 264 if(which->current_owner != NULL)
265 { 265 {
266 *t = which->current_owner; 266 *t = which->current_owner;
267 *sem = &tsk_rt(which->current_owner)->klitirqd_sem; 267 *sem = &tsk_rt(which->current_owner)->klmirqd_sem;
268 } 268 }
269 else 269 else
270 { 270 {
@@ -286,7 +286,7 @@ inline static u32 litirq_pending_and_sem_and_owner(struct klitirqd_info* which,
286/* returns true if the next piece of work to do is from a different owner. 286/* returns true if the next piece of work to do is from a different owner.
287 */ 287 */
288static int tasklet_ownership_change( 288static int tasklet_ownership_change(
289 struct klitirqd_info* which, 289 struct klmirqd_info* which,
290 enum pending_flags taskletQ) 290 enum pending_flags taskletQ)
291{ 291{
292 /* this function doesn't have to look at work objects since they have 292 /* this function doesn't have to look at work objects since they have
@@ -319,16 +319,16 @@ static int tasklet_ownership_change(
319 319
320 raw_spin_unlock_irqrestore(&which->lock, flags); 320 raw_spin_unlock_irqrestore(&which->lock, flags);
321 321
322 TRACE_TASK(which->klitirqd, "ownership change needed: %d\n", ret); 322 TRACE_TASK(which->klmirqd, "ownership change needed: %d\n", ret);
323 323
324 return ret; 324 return ret;
325} 325}
326 326
327 327
328static void __reeval_prio(struct klitirqd_info* which) 328static void __reeval_prio(struct klmirqd_info* which)
329{ 329{
330 struct task_struct* next_owner = NULL; 330 struct task_struct* next_owner = NULL;
331 struct task_struct* klitirqd = which->klitirqd; 331 struct task_struct* klmirqd = which->klmirqd;
332 332
333 /* Check in prio-order */ 333 /* Check in prio-order */
334 u32 pending = litirq_pending_irqoff(which); 334 u32 pending = litirq_pending_irqoff(which);
@@ -366,43 +366,43 @@ static void __reeval_prio(struct klitirqd_info* which)
366 if(!in_interrupt()) 366 if(!in_interrupt())
367 { 367 {
368 TRACE_CUR("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__, 368 TRACE_CUR("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__,
369 ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->comm, 369 ((tsk_rt(klmirqd)->inh_task) ? tsk_rt(klmirqd)->inh_task : klmirqd)->comm,
370 ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->pid, 370 ((tsk_rt(klmirqd)->inh_task) ? tsk_rt(klmirqd)->inh_task : klmirqd)->pid,
371 next_owner->comm, next_owner->pid); 371 next_owner->comm, next_owner->pid);
372 } 372 }
373 else 373 else
374 { 374 {
375 TRACE("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__, 375 TRACE("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__,
376 ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->comm, 376 ((tsk_rt(klmirqd)->inh_task) ? tsk_rt(klmirqd)->inh_task : klmirqd)->comm,
377 ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->pid, 377 ((tsk_rt(klmirqd)->inh_task) ? tsk_rt(klmirqd)->inh_task : klmirqd)->pid,
378 next_owner->comm, next_owner->pid); 378 next_owner->comm, next_owner->pid);
379 } 379 }
380 380
381 litmus->increase_prio_inheritance_klitirqd(klitirqd, old_owner, next_owner); 381 litmus->increase_prio_inheritance_klmirqd(klmirqd, old_owner, next_owner);
382 } 382 }
383 else 383 else
384 { 384 {
385 if(likely(!in_interrupt())) 385 if(likely(!in_interrupt()))
386 { 386 {
387 TRACE_CUR("%s: Ownership change: %s/%d to NULL (reverting)\n", 387 TRACE_CUR("%s: Ownership change: %s/%d to NULL (reverting)\n",
388 __FUNCTION__, klitirqd->comm, klitirqd->pid); 388 __FUNCTION__, klmirqd->comm, klmirqd->pid);
389 } 389 }
390 else 390 else
391 { 391 {
392 // is this a bug? 392 // is this a bug?
393 TRACE("%s: Ownership change: %s/%d to NULL (reverting)\n", 393 TRACE("%s: Ownership change: %s/%d to NULL (reverting)\n",
394 __FUNCTION__, klitirqd->comm, klitirqd->pid); 394 __FUNCTION__, klmirqd->comm, klmirqd->pid);
395 } 395 }
396 396
397 BUG_ON(pending != 0); 397 BUG_ON(pending != 0);
398 litmus->decrease_prio_inheritance_klitirqd(klitirqd, old_owner, NULL); 398 litmus->decrease_prio_inheritance_klmirqd(klmirqd, old_owner, NULL);
399 } 399 }
400 } 400 }
401 401
402 //__dump_state(which, "__reeval_prio: after"); 402 //__dump_state(which, "__reeval_prio: after");
403} 403}
404 404
405static void reeval_prio(struct klitirqd_info* which) 405static void reeval_prio(struct klmirqd_info* which)
406{ 406{
407 unsigned long flags; 407 unsigned long flags;
408 408
@@ -412,25 +412,25 @@ static void reeval_prio(struct klitirqd_info* which)
412} 412}
413 413
414 414
415static void wakeup_litirqd_locked(struct klitirqd_info* which) 415static void wakeup_litirqd_locked(struct klmirqd_info* which)
416{ 416{
417 /* Interrupts are disabled: no need to stop preemption */ 417 /* Interrupts are disabled: no need to stop preemption */
418 if (which && which->klitirqd) 418 if (which && which->klmirqd)
419 { 419 {
420 __reeval_prio(which); /* configure the proper priority */ 420 __reeval_prio(which); /* configure the proper priority */
421 421
422 if(which->klitirqd->state != TASK_RUNNING) 422 if(which->klmirqd->state != TASK_RUNNING)
423 { 423 {
424 TRACE("%s: Waking up klitirqd: %s/%d\n", __FUNCTION__, 424 TRACE("%s: Waking up klmirqd: %s/%d\n", __FUNCTION__,
425 which->klitirqd->comm, which->klitirqd->pid); 425 which->klmirqd->comm, which->klmirqd->pid);
426 426
427 wake_up_process(which->klitirqd); 427 wake_up_process(which->klmirqd);
428 } 428 }
429 } 429 }
430} 430}
431 431
432 432
433static void do_lit_tasklet(struct klitirqd_info* which, 433static void do_lit_tasklet(struct klmirqd_info* which,
434 struct tasklet_head* pending_tasklets) 434 struct tasklet_head* pending_tasklets)
435{ 435{
436 unsigned long flags; 436 unsigned long flags;
@@ -503,7 +503,7 @@ static void do_lit_tasklet(struct klitirqd_info* which,
503 503
504// returns 1 if priorities need to be changed to continue processing 504// returns 1 if priorities need to be changed to continue processing
505// pending tasklets. 505// pending tasklets.
506static int do_litirq(struct klitirqd_info* which) 506static int do_litirq(struct klmirqd_info* which)
507{ 507{
508 u32 pending; 508 u32 pending;
509 int resched = 0; 509 int resched = 0;
@@ -514,17 +514,17 @@ static int do_litirq(struct klitirqd_info* which)
514 return(0); 514 return(0);
515 } 515 }
516 516
517 if(which->klitirqd != current) 517 if(which->klmirqd != current)
518 { 518 {
519 TRACE_CUR("%s: exiting early: thread/info mismatch! Running %s/%d but given %s/%d.\n", 519 TRACE_CUR("%s: exiting early: thread/info mismatch! Running %s/%d but given %s/%d.\n",
520 __FUNCTION__, current->comm, current->pid, 520 __FUNCTION__, current->comm, current->pid,
521 which->klitirqd->comm, which->klitirqd->pid); 521 which->klmirqd->comm, which->klmirqd->pid);
522 return(0); 522 return(0);
523 } 523 }
524 524
525 if(!is_realtime(current)) 525 if(!is_realtime(current))
526 { 526 {
527 TRACE_CUR("%s: exiting early: klitirqd is not real-time. Sched Policy = %d\n", 527 TRACE_CUR("%s: exiting early: klmirqd is not real-time. Sched Policy = %d\n",
528 __FUNCTION__, current->policy); 528 __FUNCTION__, current->policy);
529 return(0); 529 return(0);
530 } 530 }
@@ -567,7 +567,7 @@ static int do_litirq(struct klitirqd_info* which)
567} 567}
568 568
569 569
570static void do_work(struct klitirqd_info* which) 570static void do_work(struct klmirqd_info* which)
571{ 571{
572 unsigned long flags; 572 unsigned long flags;
573 work_func_t f; 573 work_func_t f;
@@ -646,9 +646,9 @@ static int set_litmus_daemon_sched(void)
646 /* set task params, mark as proxy thread, and init other data */ 646 /* set task params, mark as proxy thread, and init other data */
647 tsk_rt(current)->task_params = tp; 647 tsk_rt(current)->task_params = tp;
648 tsk_rt(current)->is_proxy_thread = 1; 648 tsk_rt(current)->is_proxy_thread = 1;
649 tsk_rt(current)->cur_klitirqd = NULL; 649 tsk_rt(current)->cur_klmirqd = NULL;
650 mutex_init(&tsk_rt(current)->klitirqd_sem); 650 mutex_init(&tsk_rt(current)->klmirqd_sem);
651 atomic_set(&tsk_rt(current)->klitirqd_sem_stat, NOT_HELD); 651 atomic_set(&tsk_rt(current)->klmirqd_sem_stat, NOT_HELD);
652 652
653 /* inform the OS we're SCHED_LITMUS -- 653 /* inform the OS we're SCHED_LITMUS --
654 sched_setscheduler_nocheck() calls litmus_admit_task(). */ 654 sched_setscheduler_nocheck() calls litmus_admit_task(). */
@@ -657,7 +657,7 @@ static int set_litmus_daemon_sched(void)
657 return ret; 657 return ret;
658} 658}
659 659
660static void enter_execution_phase(struct klitirqd_info* which, 660static void enter_execution_phase(struct klmirqd_info* which,
661 struct mutex* sem, 661 struct mutex* sem,
662 struct task_struct* t) 662 struct task_struct* t)
663{ 663{
@@ -670,14 +670,14 @@ static void enter_execution_phase(struct klitirqd_info* which,
670 t->comm, t->pid); 670 t->comm, t->pid);
671} 671}
672 672
673static void exit_execution_phase(struct klitirqd_info* which, 673static void exit_execution_phase(struct klmirqd_info* which,
674 struct mutex* sem, 674 struct mutex* sem,
675 struct task_struct* t) 675 struct task_struct* t)
676{ 676{
677 TRACE_CUR("%s: Exiting execution phase. " 677 TRACE_CUR("%s: Exiting execution phase. "
678 "Releasing semaphore of %s/%d\n", __FUNCTION__, 678 "Releasing semaphore of %s/%d\n", __FUNCTION__,
679 t->comm, t->pid); 679 t->comm, t->pid);
680 if(atomic_read(&tsk_rt(current)->klitirqd_sem_stat) == HELD) 680 if(atomic_read(&tsk_rt(current)->klmirqd_sem_stat) == HELD)
681 { 681 {
682 up_and_set_stat(current, NOT_HELD, sem); 682 up_and_set_stat(current, NOT_HELD, sem);
683 TRACE_CUR("%s: Execution phase exited! " 683 TRACE_CUR("%s: Execution phase exited! "
@@ -691,9 +691,9 @@ static void exit_execution_phase(struct klitirqd_info* which,
691} 691}
692 692
693/* main loop for klitsoftirqd */ 693/* main loop for klitsoftirqd */
694static int run_klitirqd(void* unused) 694static int run_klmirqd(void* unused)
695{ 695{
696 struct klitirqd_info* which = &klitirqds[klitirqd_id(current)]; 696 struct klmirqd_info* which = &klmirqds[klmirqd_id(current)];
697 struct mutex* sem; 697 struct mutex* sem;
698 struct task_struct* owner; 698 struct task_struct* owner;
699 699
@@ -705,7 +705,7 @@ static int run_klitirqd(void* unused)
705 goto rt_failed; 705 goto rt_failed;
706 } 706 }
707 707
708 atomic_inc(&num_ready_klitirqds); 708 atomic_inc(&num_ready_klmirqds);
709 709
710 set_current_state(TASK_INTERRUPTIBLE); 710 set_current_state(TASK_INTERRUPTIBLE);
711 711
@@ -793,7 +793,7 @@ static int run_klitirqd(void* unused)
793 } 793 }
794 __set_current_state(TASK_RUNNING); 794 __set_current_state(TASK_RUNNING);
795 795
796 atomic_dec(&num_ready_klitirqds); 796 atomic_dec(&num_ready_klmirqds);
797 797
798rt_failed: 798rt_failed:
799 litmus_exit_task(current); 799 litmus_exit_task(current);
@@ -802,57 +802,57 @@ rt_failed:
802} 802}
803 803
804 804
805struct klitirqd_launch_data 805struct klmirqd_launch_data
806{ 806{
807 int* cpu_affinity; 807 int* cpu_affinity;
808 struct work_struct work; 808 struct work_struct work;
809}; 809};
810 810
811/* executed by a kworker from workqueues */ 811/* executed by a kworker from workqueues */
812static void launch_klitirqd(struct work_struct *work) 812static void launch_klmirqd(struct work_struct *work)
813{ 813{
814 int i; 814 int i;
815 815
816 struct klitirqd_launch_data* launch_data = 816 struct klmirqd_launch_data* launch_data =
817 container_of(work, struct klitirqd_launch_data, work); 817 container_of(work, struct klmirqd_launch_data, work);
818 818
819 TRACE("%s: Creating %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); 819 TRACE("%s: Creating %d klmirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD);
820 820
821 /* create the daemon threads */ 821 /* create the daemon threads */
822 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) 822 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
823 { 823 {
824 if(launch_data->cpu_affinity) 824 if(launch_data->cpu_affinity)
825 { 825 {
826 klitirqds[i].klitirqd = 826 klmirqds[i].klmirqd =
827 kthread_create( 827 kthread_create(
828 run_klitirqd, 828 run_klmirqd,
829 /* treat the affinity as a pointer, we'll cast it back later */ 829 /* treat the affinity as a pointer, we'll cast it back later */
830 (void*)(long long)launch_data->cpu_affinity[i], 830 (void*)(long long)launch_data->cpu_affinity[i],
831 "klitirqd_th%d/%d", 831 "klmirqd_th%d/%d",
832 i, 832 i,
833 launch_data->cpu_affinity[i]); 833 launch_data->cpu_affinity[i]);
834 834
835 /* litmus will put is in the right cluster. */ 835 /* litmus will put is in the right cluster. */
836 kthread_bind(klitirqds[i].klitirqd, launch_data->cpu_affinity[i]); 836 kthread_bind(klmirqds[i].klmirqd, launch_data->cpu_affinity[i]);
837 } 837 }
838 else 838 else
839 { 839 {
840 klitirqds[i].klitirqd = 840 klmirqds[i].klmirqd =
841 kthread_create( 841 kthread_create(
842 run_klitirqd, 842 run_klmirqd,
843 /* treat the affinity as a pointer, we'll cast it back later */ 843 /* treat the affinity as a pointer, we'll cast it back later */
844 (void*)(long long)(-1), 844 (void*)(long long)(-1),
845 "klitirqd_th%d", 845 "klmirqd_th%d",
846 i); 846 i);
847 } 847 }
848 } 848 }
849 849
850 TRACE("%s: Launching %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); 850 TRACE("%s: Launching %d klmirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD);
851 851
852 /* unleash the daemons */ 852 /* unleash the daemons */
853 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) 853 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
854 { 854 {
855 wake_up_process(klitirqds[i].klitirqd); 855 wake_up_process(klmirqds[i].klmirqd);
856 } 856 }
857 857
858 if(launch_data->cpu_affinity) 858 if(launch_data->cpu_affinity)
@@ -861,36 +861,36 @@ static void launch_klitirqd(struct work_struct *work)
861} 861}
862 862
863 863
864void spawn_klitirqd(int* affinity) 864void spawn_klmirqd(int* affinity)
865{ 865{
866 int i; 866 int i;
867 struct klitirqd_launch_data* delayed_launch; 867 struct klmirqd_launch_data* delayed_launch;
868 868
869 if(atomic_read(&num_ready_klitirqds) != 0) 869 if(atomic_read(&num_ready_klmirqds) != 0)
870 { 870 {
871 TRACE("%s: At least one klitirqd is already running! Need to call kill_klitirqd()?\n"); 871 TRACE("%s: At least one klmirqd is already running! Need to call kill_klmirqd()?\n");
872 return; 872 return;
873 } 873 }
874 874
875 /* init the tasklet & work queues */ 875 /* init the tasklet & work queues */
876 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) 876 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
877 { 877 {
878 klitirqds[i].terminating = 0; 878 klmirqds[i].terminating = 0;
879 klitirqds[i].pending = 0; 879 klmirqds[i].pending = 0;
880 880
881 klitirqds[i].num_hi_pending.counter = 0; 881 klmirqds[i].num_hi_pending.counter = 0;
882 klitirqds[i].num_low_pending.counter = 0; 882 klmirqds[i].num_low_pending.counter = 0;
883 klitirqds[i].num_work_pending.counter = 0; 883 klmirqds[i].num_work_pending.counter = 0;
884 884
885 klitirqds[i].pending_tasklets_hi.head = NULL; 885 klmirqds[i].pending_tasklets_hi.head = NULL;
886 klitirqds[i].pending_tasklets_hi.tail = &klitirqds[i].pending_tasklets_hi.head; 886 klmirqds[i].pending_tasklets_hi.tail = &klmirqds[i].pending_tasklets_hi.head;
887 887
888 klitirqds[i].pending_tasklets.head = NULL; 888 klmirqds[i].pending_tasklets.head = NULL;
889 klitirqds[i].pending_tasklets.tail = &klitirqds[i].pending_tasklets.head; 889 klmirqds[i].pending_tasklets.tail = &klmirqds[i].pending_tasklets.head;
890 890
891 INIT_LIST_HEAD(&klitirqds[i].worklist); 891 INIT_LIST_HEAD(&klmirqds[i].worklist);
892 892
893 raw_spin_lock_init(&klitirqds[i].lock); 893 raw_spin_lock_init(&klmirqds[i].lock);
894 } 894 }
895 895
896 /* wait to flush the initializations to memory since other threads 896 /* wait to flush the initializations to memory since other threads
@@ -899,8 +899,8 @@ void spawn_klitirqd(int* affinity)
899 899
900 /* tell a work queue to launch the threads. we can't make scheduling 900 /* tell a work queue to launch the threads. we can't make scheduling
901 calls since we're in an atomic state. */ 901 calls since we're in an atomic state. */
902 TRACE("%s: Setting callback up to launch klitirqds\n", __FUNCTION__); 902 TRACE("%s: Setting callback up to launch klmirqds\n", __FUNCTION__);
903 delayed_launch = kmalloc(sizeof(struct klitirqd_launch_data), GFP_ATOMIC); 903 delayed_launch = kmalloc(sizeof(struct klmirqd_launch_data), GFP_ATOMIC);
904 if(affinity) 904 if(affinity)
905 { 905 {
906 delayed_launch->cpu_affinity = 906 delayed_launch->cpu_affinity =
@@ -913,57 +913,57 @@ void spawn_klitirqd(int* affinity)
913 { 913 {
914 delayed_launch->cpu_affinity = NULL; 914 delayed_launch->cpu_affinity = NULL;
915 } 915 }
916 INIT_WORK(&delayed_launch->work, launch_klitirqd); 916 INIT_WORK(&delayed_launch->work, launch_klmirqd);
917 schedule_work(&delayed_launch->work); 917 schedule_work(&delayed_launch->work);
918} 918}
919 919
920 920
921void kill_klitirqd(void) 921void kill_klmirqd(void)
922{ 922{
923 if(!klitirqd_is_dead()) 923 if(!klmirqd_is_dead())
924 { 924 {
925 int i; 925 int i;
926 926
927 TRACE("%s: Killing %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); 927 TRACE("%s: Killing %d klmirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD);
928 928
929 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) 929 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
930 { 930 {
931 if(klitirqds[i].terminating != 1) 931 if(klmirqds[i].terminating != 1)
932 { 932 {
933 klitirqds[i].terminating = 1; 933 klmirqds[i].terminating = 1;
934 mb(); /* just to be sure? */ 934 mb(); /* just to be sure? */
935 flush_pending(klitirqds[i].klitirqd, NULL); 935 flush_pending(klmirqds[i].klmirqd, NULL);
936 936
937 /* signal termination */ 937 /* signal termination */
938 kthread_stop(klitirqds[i].klitirqd); 938 kthread_stop(klmirqds[i].klmirqd);
939 } 939 }
940 } 940 }
941 } 941 }
942} 942}
943 943
944 944
945int klitirqd_is_ready(void) 945int klmirqd_is_ready(void)
946{ 946{
947 return(atomic_read(&num_ready_klitirqds) == NR_LITMUS_SOFTIRQD); 947 return(atomic_read(&num_ready_klmirqds) == NR_LITMUS_SOFTIRQD);
948} 948}
949 949
950int klitirqd_is_dead(void) 950int klmirqd_is_dead(void)
951{ 951{
952 return(atomic_read(&num_ready_klitirqds) == 0); 952 return(atomic_read(&num_ready_klmirqds) == 0);
953} 953}
954 954
955 955
956struct task_struct* get_klitirqd(unsigned int k_id) 956struct task_struct* get_klmirqd(unsigned int k_id)
957{ 957{
958 return(klitirqds[k_id].klitirqd); 958 return(klmirqds[k_id].klmirqd);
959} 959}
960 960
961 961
962void flush_pending(struct task_struct* klitirqd_thread, 962void flush_pending(struct task_struct* klmirqd_thread,
963 struct task_struct* owner) 963 struct task_struct* owner)
964{ 964{
965 unsigned int k_id = klitirqd_id(klitirqd_thread); 965 unsigned int k_id = klmirqd_id(klmirqd_thread);
966 struct klitirqd_info *which = &klitirqds[k_id]; 966 struct klmirqd_info *which = &klmirqds[k_id];
967 967
968 unsigned long flags; 968 unsigned long flags;
969 struct tasklet_struct *list; 969 struct tasklet_struct *list;
@@ -1129,7 +1129,7 @@ void flush_pending(struct task_struct* klitirqd_thread,
1129 1129
1130 1130
1131static void ___litmus_tasklet_schedule(struct tasklet_struct *t, 1131static void ___litmus_tasklet_schedule(struct tasklet_struct *t,
1132 struct klitirqd_info *which, 1132 struct klmirqd_info *which,
1133 int wakeup) 1133 int wakeup)
1134{ 1134{
1135 unsigned long flags; 1135 unsigned long flags;
@@ -1153,7 +1153,7 @@ static void ___litmus_tasklet_schedule(struct tasklet_struct *t,
1153 1153
1154 if(!old_pending && wakeup) 1154 if(!old_pending && wakeup)
1155 { 1155 {
1156 wakeup_litirqd_locked(which); /* wake up the klitirqd */ 1156 wakeup_litirqd_locked(which); /* wake up the klmirqd */
1157 } 1157 }
1158 1158
1159 //__dump_state(which, "___litmus_tasklet_schedule: after queuing"); 1159 //__dump_state(which, "___litmus_tasklet_schedule: after queuing");
@@ -1172,11 +1172,11 @@ int __litmus_tasklet_schedule(struct tasklet_struct *t, unsigned int k_id)
1172 1172
1173 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) 1173 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD))
1174 { 1174 {
1175 TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id); 1175 TRACE("%s: No klmirqd_th%d!\n", __FUNCTION__, k_id);
1176 BUG(); 1176 BUG();
1177 } 1177 }
1178 1178
1179 if(likely(!klitirqds[k_id].terminating)) 1179 if(likely(!klmirqds[k_id].terminating))
1180 { 1180 {
1181 /* Can't accept tasklets while we're processing a workqueue 1181 /* Can't accept tasklets while we're processing a workqueue
1182 because they're handled by the same thread. This case is 1182 because they're handled by the same thread. This case is
@@ -1184,10 +1184,10 @@ int __litmus_tasklet_schedule(struct tasklet_struct *t, unsigned int k_id)
1184 1184
1185 TODO: Use a separate thread for work objects!!!!!! 1185 TODO: Use a separate thread for work objects!!!!!!
1186 */ 1186 */
1187 if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0)) 1187 if(likely(atomic_read(&klmirqds[k_id].num_work_pending) == 0))
1188 { 1188 {
1189 ret = 1; 1189 ret = 1;
1190 ___litmus_tasklet_schedule(t, &klitirqds[k_id], 1); 1190 ___litmus_tasklet_schedule(t, &klmirqds[k_id], 1);
1191 } 1191 }
1192 else 1192 else
1193 { 1193 {
@@ -1202,7 +1202,7 @@ EXPORT_SYMBOL(__litmus_tasklet_schedule);
1202 1202
1203 1203
1204static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t, 1204static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t,
1205 struct klitirqd_info *which, 1205 struct klmirqd_info *which,
1206 int wakeup) 1206 int wakeup)
1207{ 1207{
1208 unsigned long flags; 1208 unsigned long flags;
@@ -1224,7 +1224,7 @@ static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t,
1224 1224
1225 if(!old_pending && wakeup) 1225 if(!old_pending && wakeup)
1226 { 1226 {
1227 wakeup_litirqd_locked(which); /* wake up the klitirqd */ 1227 wakeup_litirqd_locked(which); /* wake up the klmirqd */
1228 } 1228 }
1229 1229
1230 raw_spin_unlock_irqrestore(&which->lock, flags); 1230 raw_spin_unlock_irqrestore(&which->lock, flags);
@@ -1241,22 +1241,22 @@ int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, unsigned int k_id)
1241 1241
1242 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) 1242 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD))
1243 { 1243 {
1244 TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id); 1244 TRACE("%s: No klmirqd_th%d!\n", __FUNCTION__, k_id);
1245 BUG(); 1245 BUG();
1246 } 1246 }
1247 1247
1248 if(unlikely(!klitirqd_is_ready())) 1248 if(unlikely(!klmirqd_is_ready()))
1249 { 1249 {
1250 TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id); 1250 TRACE("%s: klmirqd is not ready!\n", __FUNCTION__, k_id);
1251 BUG(); 1251 BUG();
1252 } 1252 }
1253 1253
1254 if(likely(!klitirqds[k_id].terminating)) 1254 if(likely(!klmirqds[k_id].terminating))
1255 { 1255 {
1256 if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0)) 1256 if(likely(atomic_read(&klmirqds[k_id].num_work_pending) == 0))
1257 { 1257 {
1258 ret = 1; 1258 ret = 1;
1259 ___litmus_tasklet_hi_schedule(t, &klitirqds[k_id], 1); 1259 ___litmus_tasklet_hi_schedule(t, &klmirqds[k_id], 1);
1260 } 1260 }
1261 else 1261 else
1262 { 1262 {
@@ -1285,36 +1285,36 @@ int __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, unsigned int k_
1285 1285
1286 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) 1286 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD))
1287 { 1287 {
1288 TRACE("%s: No klitirqd_th%u!\n", __FUNCTION__, k_id); 1288 TRACE("%s: No klmirqd_th%u!\n", __FUNCTION__, k_id);
1289 BUG(); 1289 BUG();
1290 } 1290 }
1291 1291
1292 if(unlikely(!klitirqd_is_ready())) 1292 if(unlikely(!klmirqd_is_ready()))
1293 { 1293 {
1294 TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id); 1294 TRACE("%s: klmirqd is not ready!\n", __FUNCTION__, k_id);
1295 BUG(); 1295 BUG();
1296 } 1296 }
1297 1297
1298 if(likely(!klitirqds[k_id].terminating)) 1298 if(likely(!klmirqds[k_id].terminating))
1299 { 1299 {
1300 raw_spin_lock(&klitirqds[k_id].lock); 1300 raw_spin_lock(&klmirqds[k_id].lock);
1301 1301
1302 if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0)) 1302 if(likely(atomic_read(&klmirqds[k_id].num_work_pending) == 0))
1303 { 1303 {
1304 ret = 1; // success! 1304 ret = 1; // success!
1305 1305
1306 t->next = klitirqds[k_id].pending_tasklets_hi.head; 1306 t->next = klmirqds[k_id].pending_tasklets_hi.head;
1307 klitirqds[k_id].pending_tasklets_hi.head = t; 1307 klmirqds[k_id].pending_tasklets_hi.head = t;
1308 1308
1309 old_pending = klitirqds[k_id].pending; 1309 old_pending = klmirqds[k_id].pending;
1310 klitirqds[k_id].pending |= LIT_TASKLET_HI; 1310 klmirqds[k_id].pending |= LIT_TASKLET_HI;
1311 1311
1312 atomic_inc(&klitirqds[k_id].num_hi_pending); 1312 atomic_inc(&klmirqds[k_id].num_hi_pending);
1313 1313
1314 mb(); 1314 mb();
1315 1315
1316 if(!old_pending) 1316 if(!old_pending)
1317 wakeup_litirqd_locked(&klitirqds[k_id]); /* wake up the klitirqd */ 1317 wakeup_litirqd_locked(&klmirqds[k_id]); /* wake up the klmirqd */
1318 } 1318 }
1319 else 1319 else
1320 { 1320 {
@@ -1322,7 +1322,7 @@ int __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, unsigned int k_
1322 __FUNCTION__); 1322 __FUNCTION__);
1323 } 1323 }
1324 1324
1325 raw_spin_unlock(&klitirqds[k_id].lock); 1325 raw_spin_unlock(&klmirqds[k_id].lock);
1326 } 1326 }
1327 return(ret); 1327 return(ret);
1328} 1328}
@@ -1332,7 +1332,7 @@ EXPORT_SYMBOL(__litmus_tasklet_hi_schedule_first);
1332 1332
1333 1333
1334static void ___litmus_schedule_work(struct work_struct *w, 1334static void ___litmus_schedule_work(struct work_struct *w,
1335 struct klitirqd_info *which, 1335 struct klmirqd_info *which,
1336 int wakeup) 1336 int wakeup)
1337{ 1337{
1338 unsigned long flags; 1338 unsigned long flags;
@@ -1352,7 +1352,7 @@ static void ___litmus_schedule_work(struct work_struct *w,
1352 1352
1353 if(!old_pending && wakeup) 1353 if(!old_pending && wakeup)
1354 { 1354 {
1355 wakeup_litirqd_locked(which); /* wakeup the klitirqd */ 1355 wakeup_litirqd_locked(which); /* wakeup the klmirqd */
1356 } 1356 }
1357 1357
1358 raw_spin_unlock_irqrestore(&which->lock, flags); 1358 raw_spin_unlock_irqrestore(&which->lock, flags);
@@ -1369,18 +1369,18 @@ int __litmus_schedule_work(struct work_struct *w, unsigned int k_id)
1369 1369
1370 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) 1370 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD))
1371 { 1371 {
1372 TRACE("%s: No klitirqd_th%u!\n", k_id); 1372 TRACE("%s: No klmirqd_th%u!\n", k_id);
1373 BUG(); 1373 BUG();
1374 } 1374 }
1375 1375
1376 if(unlikely(!klitirqd_is_ready())) 1376 if(unlikely(!klmirqd_is_ready()))
1377 { 1377 {
1378 TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id); 1378 TRACE("%s: klmirqd is not ready!\n", __FUNCTION__, k_id);
1379 BUG(); 1379 BUG();
1380 } 1380 }
1381 1381
1382 if(likely(!klitirqds[k_id].terminating)) 1382 if(likely(!klmirqds[k_id].terminating))
1383 ___litmus_schedule_work(w, &klitirqds[k_id], 1); 1383 ___litmus_schedule_work(w, &klmirqds[k_id], 1);
1384 else 1384 else
1385 ret = 0; 1385 ret = 0;
1386 return(ret); 1386 return(ret);
@@ -1388,34 +1388,34 @@ int __litmus_schedule_work(struct work_struct *w, unsigned int k_id)
1388EXPORT_SYMBOL(__litmus_schedule_work); 1388EXPORT_SYMBOL(__litmus_schedule_work);
1389 1389
1390 1390
1391static int set_klitirqd_sem_status(unsigned long stat) 1391static int set_klmirqd_sem_status(unsigned long stat)
1392{ 1392{
1393 TRACE_CUR("SETTING STATUS FROM %d TO %d\n", 1393 TRACE_CUR("SETTING STATUS FROM %d TO %d\n",
1394 atomic_read(&tsk_rt(current)->klitirqd_sem_stat), 1394 atomic_read(&tsk_rt(current)->klmirqd_sem_stat),
1395 stat); 1395 stat);
1396 atomic_set(&tsk_rt(current)->klitirqd_sem_stat, stat); 1396 atomic_set(&tsk_rt(current)->klmirqd_sem_stat, stat);
1397 //mb(); 1397 //mb();
1398 1398
1399 return(0); 1399 return(0);
1400} 1400}
1401 1401
1402static int set_klitirqd_sem_status_if_not_held(unsigned long stat) 1402static int set_klmirqd_sem_status_if_not_held(unsigned long stat)
1403{ 1403{
1404 if(atomic_read(&tsk_rt(current)->klitirqd_sem_stat) != HELD) 1404 if(atomic_read(&tsk_rt(current)->klmirqd_sem_stat) != HELD)
1405 { 1405 {
1406 return(set_klitirqd_sem_status(stat)); 1406 return(set_klmirqd_sem_status(stat));
1407 } 1407 }
1408 return(-1); 1408 return(-1);
1409} 1409}
1410 1410
1411 1411
1412void __down_and_reset_and_set_stat(struct task_struct* t, 1412void __down_and_reset_and_set_stat(struct task_struct* t,
1413 enum klitirqd_sem_status to_reset, 1413 enum klmirqd_sem_status to_reset,
1414 enum klitirqd_sem_status to_set, 1414 enum klmirqd_sem_status to_set,
1415 struct mutex* sem) 1415 struct mutex* sem)
1416{ 1416{
1417#if 0 1417#if 0
1418 struct rt_param* param = container_of(sem, struct rt_param, klitirqd_sem); 1418 struct rt_param* param = container_of(sem, struct rt_param, klmirqd_sem);
1419 struct task_struct* task = container_of(param, struct task_struct, rt_param); 1419 struct task_struct* task = container_of(param, struct task_struct, rt_param);
1420 1420
1421 TRACE_CUR("%s: entered. Locking semaphore of %s/%d\n", 1421 TRACE_CUR("%s: entered. Locking semaphore of %s/%d\n",
@@ -1423,8 +1423,8 @@ void __down_and_reset_and_set_stat(struct task_struct* t,
1423#endif 1423#endif
1424 1424
1425 mutex_lock_sfx(sem, 1425 mutex_lock_sfx(sem,
1426 set_klitirqd_sem_status_if_not_held, to_reset, 1426 set_klmirqd_sem_status_if_not_held, to_reset,
1427 set_klitirqd_sem_status, to_set); 1427 set_klmirqd_sem_status, to_set);
1428#if 0 1428#if 0
1429 TRACE_CUR("%s: exiting. Have semaphore of %s/%d\n", 1429 TRACE_CUR("%s: exiting. Have semaphore of %s/%d\n",
1430 __FUNCTION__, task->comm, task->pid); 1430 __FUNCTION__, task->comm, task->pid);
@@ -1432,11 +1432,11 @@ void __down_and_reset_and_set_stat(struct task_struct* t,
1432} 1432}
1433 1433
1434void down_and_set_stat(struct task_struct* t, 1434void down_and_set_stat(struct task_struct* t,
1435 enum klitirqd_sem_status to_set, 1435 enum klmirqd_sem_status to_set,
1436 struct mutex* sem) 1436 struct mutex* sem)
1437{ 1437{
1438#if 0 1438#if 0
1439 struct rt_param* param = container_of(sem, struct rt_param, klitirqd_sem); 1439 struct rt_param* param = container_of(sem, struct rt_param, klmirqd_sem);
1440 struct task_struct* task = container_of(param, struct task_struct, rt_param); 1440 struct task_struct* task = container_of(param, struct task_struct, rt_param);
1441 1441
1442 TRACE_CUR("%s: entered. Locking semaphore of %s/%d\n", 1442 TRACE_CUR("%s: entered. Locking semaphore of %s/%d\n",
@@ -1445,7 +1445,7 @@ void down_and_set_stat(struct task_struct* t,
1445 1445
1446 mutex_lock_sfx(sem, 1446 mutex_lock_sfx(sem,
1447 NULL, 0, 1447 NULL, 0,
1448 set_klitirqd_sem_status, to_set); 1448 set_klmirqd_sem_status, to_set);
1449 1449
1450#if 0 1450#if 0
1451 TRACE_CUR("%s: exiting. Have semaphore of %s/%d\n", 1451 TRACE_CUR("%s: exiting. Have semaphore of %s/%d\n",
@@ -1455,11 +1455,11 @@ void down_and_set_stat(struct task_struct* t,
1455 1455
1456 1456
1457void up_and_set_stat(struct task_struct* t, 1457void up_and_set_stat(struct task_struct* t,
1458 enum klitirqd_sem_status to_set, 1458 enum klmirqd_sem_status to_set,
1459 struct mutex* sem) 1459 struct mutex* sem)
1460{ 1460{
1461#if 0 1461#if 0
1462 struct rt_param* param = container_of(sem, struct rt_param, klitirqd_sem); 1462 struct rt_param* param = container_of(sem, struct rt_param, klmirqd_sem);
1463 struct task_struct* task = container_of(param, struct task_struct, rt_param); 1463 struct task_struct* task = container_of(param, struct task_struct, rt_param);
1464 1464
1465 TRACE_CUR("%s: entered. Unlocking semaphore of %s/%d\n", 1465 TRACE_CUR("%s: entered. Unlocking semaphore of %s/%d\n",
@@ -1468,7 +1468,7 @@ void up_and_set_stat(struct task_struct* t,
1468#endif 1468#endif
1469 1469
1470 mutex_unlock_sfx(sem, NULL, 0, 1470 mutex_unlock_sfx(sem, NULL, 0,
1471 set_klitirqd_sem_status, to_set); 1471 set_klmirqd_sem_status, to_set);
1472 1472
1473#if 0 1473#if 0
1474 TRACE_CUR("%s: exiting. Unlocked semaphore of %s/%d\n", 1474 TRACE_CUR("%s: exiting. Unlocked semaphore of %s/%d\n",
@@ -1479,33 +1479,33 @@ void up_and_set_stat(struct task_struct* t,
1479 1479
1480 1480
1481 1481
1482void release_klitirqd_lock(struct task_struct* t) 1482void release_klmirqd_lock(struct task_struct* t)
1483{ 1483{
1484 if(is_realtime(t) && (atomic_read(&tsk_rt(t)->klitirqd_sem_stat) == HELD)) 1484 if(is_realtime(t) && (atomic_read(&tsk_rt(t)->klmirqd_sem_stat) == HELD))
1485 { 1485 {
1486 struct mutex* sem; 1486 struct mutex* sem;
1487 struct task_struct* owner = t; 1487 struct task_struct* owner = t;
1488 1488
1489 if(t->state == TASK_RUNNING) 1489 if(t->state == TASK_RUNNING)
1490 { 1490 {
1491 TRACE_TASK(t, "NOT giving up klitirqd_sem because we're not blocked!\n"); 1491 TRACE_TASK(t, "NOT giving up klmirqd_sem because we're not blocked!\n");
1492 return; 1492 return;
1493 } 1493 }
1494 1494
1495 if(likely(!tsk_rt(t)->is_proxy_thread)) 1495 if(likely(!tsk_rt(t)->is_proxy_thread))
1496 { 1496 {
1497 sem = &tsk_rt(t)->klitirqd_sem; 1497 sem = &tsk_rt(t)->klmirqd_sem;
1498 } 1498 }
1499 else 1499 else
1500 { 1500 {
1501 unsigned int k_id = klitirqd_id(t); 1501 unsigned int k_id = klmirqd_id(t);
1502 owner = klitirqds[k_id].current_owner; 1502 owner = klmirqds[k_id].current_owner;
1503 1503
1504 BUG_ON(t != klitirqds[k_id].klitirqd); 1504 BUG_ON(t != klmirqds[k_id].klmirqd);
1505 1505
1506 if(likely(owner)) 1506 if(likely(owner))
1507 { 1507 {
1508 sem = &tsk_rt(owner)->klitirqd_sem; 1508 sem = &tsk_rt(owner)->klmirqd_sem;
1509 } 1509 }
1510 else 1510 else
1511 { 1511 {
@@ -1514,7 +1514,7 @@ void release_klitirqd_lock(struct task_struct* t)
1514 // We had the rug pulled out from under us. Abort attempt 1514 // We had the rug pulled out from under us. Abort attempt
1515 // to reacquire the lock since our client no longer needs us. 1515 // to reacquire the lock since our client no longer needs us.
1516 TRACE_CUR("HUH?! How did this happen?\n"); 1516 TRACE_CUR("HUH?! How did this happen?\n");
1517 atomic_set(&tsk_rt(t)->klitirqd_sem_stat, NOT_HELD); 1517 atomic_set(&tsk_rt(t)->klmirqd_sem_stat, NOT_HELD);
1518 return; 1518 return;
1519 } 1519 }
1520 } 1520 }
@@ -1526,42 +1526,42 @@ void release_klitirqd_lock(struct task_struct* t)
1526 /* 1526 /*
1527 else if(is_realtime(t)) 1527 else if(is_realtime(t))
1528 { 1528 {
1529 TRACE_CUR("%s: Nothing to do. Stat = %d\n", __FUNCTION__, tsk_rt(t)->klitirqd_sem_stat); 1529 TRACE_CUR("%s: Nothing to do. Stat = %d\n", __FUNCTION__, tsk_rt(t)->klmirqd_sem_stat);
1530 } 1530 }
1531 */ 1531 */
1532} 1532}
1533 1533
1534int reacquire_klitirqd_lock(struct task_struct* t) 1534int reacquire_klmirqd_lock(struct task_struct* t)
1535{ 1535{
1536 int ret = 0; 1536 int ret = 0;
1537 1537
1538 if(is_realtime(t) && (atomic_read(&tsk_rt(t)->klitirqd_sem_stat) == NEED_TO_REACQUIRE)) 1538 if(is_realtime(t) && (atomic_read(&tsk_rt(t)->klmirqd_sem_stat) == NEED_TO_REACQUIRE))
1539 { 1539 {
1540 struct mutex* sem; 1540 struct mutex* sem;
1541 struct task_struct* owner = t; 1541 struct task_struct* owner = t;
1542 1542
1543 if(likely(!tsk_rt(t)->is_proxy_thread)) 1543 if(likely(!tsk_rt(t)->is_proxy_thread))
1544 { 1544 {
1545 sem = &tsk_rt(t)->klitirqd_sem; 1545 sem = &tsk_rt(t)->klmirqd_sem;
1546 } 1546 }
1547 else 1547 else
1548 { 1548 {
1549 unsigned int k_id = klitirqd_id(t); 1549 unsigned int k_id = klmirqd_id(t);
1550 //struct task_struct* owner = klitirqds[k_id].current_owner; 1550 //struct task_struct* owner = klmirqds[k_id].current_owner;
1551 owner = klitirqds[k_id].current_owner; 1551 owner = klmirqds[k_id].current_owner;
1552 1552
1553 BUG_ON(t != klitirqds[k_id].klitirqd); 1553 BUG_ON(t != klmirqds[k_id].klmirqd);
1554 1554
1555 if(likely(owner)) 1555 if(likely(owner))
1556 { 1556 {
1557 sem = &tsk_rt(owner)->klitirqd_sem; 1557 sem = &tsk_rt(owner)->klmirqd_sem;
1558 } 1558 }
1559 else 1559 else
1560 { 1560 {
1561 // We had the rug pulled out from under us. Abort attempt 1561 // We had the rug pulled out from under us. Abort attempt
1562 // to reacquire the lock since our client no longer needs us. 1562 // to reacquire the lock since our client no longer needs us.
1563 TRACE_CUR("No longer needs to reacquire klitirqd_sem!\n"); 1563 TRACE_CUR("No longer needs to reacquire klmirqd_sem!\n");
1564 atomic_set(&tsk_rt(t)->klitirqd_sem_stat, NOT_HELD); 1564 atomic_set(&tsk_rt(t)->klmirqd_sem_stat, NOT_HELD);
1565 return(0); 1565 return(0);
1566 } 1566 }
1567 } 1567 }
@@ -1573,7 +1573,7 @@ int reacquire_klitirqd_lock(struct task_struct* t)
1573 /* 1573 /*
1574 else if(is_realtime(t)) 1574 else if(is_realtime(t))
1575 { 1575 {
1576 TRACE_CUR("%s: Nothing to do. Stat = %d\n", __FUNCTION__, tsk_rt(t)->klitirqd_sem_stat); 1576 TRACE_CUR("%s: Nothing to do. Stat = %d\n", __FUNCTION__, tsk_rt(t)->klmirqd_sem_stat);
1577 } 1577 }
1578 */ 1578 */
1579 1579
diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c
index d04c6efa5f05..22586cde8255 100644
--- a/litmus/nvidia_info.c
+++ b/litmus/nvidia_info.c
@@ -506,7 +506,7 @@ static int __reg_nv_device(int reg_device_id, struct task_struct *t)
506 } 506 }
507 507
508#ifdef CONFIG_LITMUS_SOFTIRQD 508#ifdef CONFIG_LITMUS_SOFTIRQD
509 down_and_set_stat(t, HELD, &tsk_rt(t)->klitirqd_sem); 509 down_and_set_stat(t, HELD, &tsk_rt(t)->klmirqd_sem);
510#endif 510#endif
511 ++(reg->nr_owners); 511 ++(reg->nr_owners);
512 512
@@ -535,7 +535,7 @@ static int __clear_reg_nv_device(int de_reg_device_id, struct task_struct *t)
535 nv_device_registry_t *reg = &NV_DEVICE_REG[de_reg_device_id]; 535 nv_device_registry_t *reg = &NV_DEVICE_REG[de_reg_device_id];
536 536
537#ifdef CONFIG_LITMUS_SOFTIRQD 537#ifdef CONFIG_LITMUS_SOFTIRQD
538 struct task_struct* klitirqd_th = get_klitirqd(de_reg_device_id); 538 struct task_struct* klmirqd_th = get_klmirqd(de_reg_device_id);
539#endif 539#endif
540 540
541 if(!test_bit(de_reg_device_id, &tsk_rt(t)->held_gpus)) { 541 if(!test_bit(de_reg_device_id, &tsk_rt(t)->held_gpus)) {
@@ -549,7 +549,7 @@ static int __clear_reg_nv_device(int de_reg_device_id, struct task_struct *t)
549 for(i = 0; i < NV_MAX_SIMULT_USERS; ++i) { 549 for(i = 0; i < NV_MAX_SIMULT_USERS; ++i) {
550 if(reg->owners[i] == t) { 550 if(reg->owners[i] == t) {
551#ifdef CONFIG_LITMUS_SOFTIRQD 551#ifdef CONFIG_LITMUS_SOFTIRQD
552 flush_pending(klitirqd_th, t); 552 flush_pending(klmirqd_th, t);
553#endif 553#endif
554 if(reg->max_prio_owner == t) { 554 if(reg->max_prio_owner == t) {
555 reg->max_prio_owner = find_hp_owner(reg, t); 555 reg->max_prio_owner = find_hp_owner(reg, t);
@@ -559,7 +559,7 @@ static int __clear_reg_nv_device(int de_reg_device_id, struct task_struct *t)
559 } 559 }
560 560
561#ifdef CONFIG_LITMUS_SOFTIRQD 561#ifdef CONFIG_LITMUS_SOFTIRQD
562 up_and_set_stat(t, NOT_HELD, &tsk_rt(t)->klitirqd_sem); 562 up_and_set_stat(t, NOT_HELD, &tsk_rt(t)->klmirqd_sem);
563#endif 563#endif
564 564
565 reg->owners[i] = NULL; 565 reg->owners[i] = NULL;
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 6746d4d6033e..44c8336c5061 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -1143,7 +1143,7 @@ static int __increase_priority_inheritance(struct task_struct* t,
1143 1143
1144#ifdef CONFIG_LITMUS_NESTED_LOCKING 1144#ifdef CONFIG_LITMUS_NESTED_LOCKING
1145 /* this sanity check allows for weaker locking in protocols */ 1145 /* this sanity check allows for weaker locking in protocols */
1146 /* TODO (klitirqd): Skip this check if 't' is a proxy thread (???) */ 1146 /* TODO (klmirqd): Skip this check if 't' is a proxy thread (???) */
1147 if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) { 1147 if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) {
1148#endif 1148#endif
1149 TRACE_TASK(t, "inherits priority from %s/%d\n", 1149 TRACE_TASK(t, "inherits priority from %s/%d\n",
@@ -1238,12 +1238,12 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str
1238 __increase_priority_inheritance(t, prio_inh); 1238 __increase_priority_inheritance(t, prio_inh);
1239 1239
1240#ifdef CONFIG_LITMUS_SOFTIRQD 1240#ifdef CONFIG_LITMUS_SOFTIRQD
1241 if(tsk_rt(t)->cur_klitirqd != NULL) 1241 if(tsk_rt(t)->cur_klmirqd != NULL)
1242 { 1242 {
1243 TRACE_TASK(t, "%s/%d inherits a new priority!\n", 1243 TRACE_TASK(t, "%s/%d inherits a new priority!\n",
1244 tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); 1244 tsk_rt(t)->cur_klmirqd->comm, tsk_rt(t)->cur_klmirqd->pid);
1245 1245
1246 __increase_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh); 1246 __increase_priority_inheritance(tsk_rt(t)->cur_klmirqd, prio_inh);
1247 } 1247 }
1248#endif 1248#endif
1249 1249
@@ -1347,12 +1347,12 @@ static void decrease_priority_inheritance(struct task_struct* t,
1347 __decrease_priority_inheritance(t, prio_inh); 1347 __decrease_priority_inheritance(t, prio_inh);
1348 1348
1349#ifdef CONFIG_LITMUS_SOFTIRQD 1349#ifdef CONFIG_LITMUS_SOFTIRQD
1350 if(tsk_rt(t)->cur_klitirqd != NULL) 1350 if(tsk_rt(t)->cur_klmirqd != NULL)
1351 { 1351 {
1352 TRACE_TASK(t, "%s/%d decreases in priority!\n", 1352 TRACE_TASK(t, "%s/%d decreases in priority!\n",
1353 tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); 1353 tsk_rt(t)->cur_klmirqd->comm, tsk_rt(t)->cur_klmirqd->pid);
1354 1354
1355 __decrease_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh); 1355 __decrease_priority_inheritance(tsk_rt(t)->cur_klmirqd, prio_inh);
1356 } 1356 }
1357#endif 1357#endif
1358 1358
@@ -1376,13 +1376,13 @@ static void decrease_priority_inheritance(struct task_struct* t,
1376 1376
1377#ifdef CONFIG_LITMUS_SOFTIRQD 1377#ifdef CONFIG_LITMUS_SOFTIRQD
1378/* called with IRQs off */ 1378/* called with IRQs off */
1379static void increase_priority_inheritance_klitirqd(struct task_struct* klitirqd, 1379static void increase_priority_inheritance_klmirqd(struct task_struct* klmirqd,
1380 struct task_struct* old_owner, 1380 struct task_struct* old_owner,
1381 struct task_struct* new_owner) 1381 struct task_struct* new_owner)
1382{ 1382{
1383 cedf_domain_t* cluster = task_cpu_cluster(klitirqd); 1383 cedf_domain_t* cluster = task_cpu_cluster(klmirqd);
1384 1384
1385 BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); 1385 BUG_ON(!(tsk_rt(klmirqd)->is_proxy_thread));
1386 1386
1387 raw_spin_lock(&cluster->cluster_lock); 1387 raw_spin_lock(&cluster->cluster_lock);
1388 1388
@@ -1391,18 +1391,18 @@ static void increase_priority_inheritance_klitirqd(struct task_struct* klitirqd,
1391 if(old_owner) 1391 if(old_owner)
1392 { 1392 {
1393 // unreachable? 1393 // unreachable?
1394 tsk_rt(old_owner)->cur_klitirqd = NULL; 1394 tsk_rt(old_owner)->cur_klmirqd = NULL;
1395 } 1395 }
1396 1396
1397 TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n", 1397 TRACE_TASK(klmirqd, "giving ownership to %s/%d.\n",
1398 new_owner->comm, new_owner->pid); 1398 new_owner->comm, new_owner->pid);
1399 1399
1400 tsk_rt(new_owner)->cur_klitirqd = klitirqd; 1400 tsk_rt(new_owner)->cur_klmirqd = klmirqd;
1401 } 1401 }
1402 1402
1403 __decrease_priority_inheritance(klitirqd, NULL); // kludge to clear out cur prio. 1403 __decrease_priority_inheritance(klmirqd, NULL); // kludge to clear out cur prio.
1404 1404
1405 __increase_priority_inheritance(klitirqd, 1405 __increase_priority_inheritance(klmirqd,
1406 (tsk_rt(new_owner)->inh_task == NULL) ? 1406 (tsk_rt(new_owner)->inh_task == NULL) ?
1407 new_owner : 1407 new_owner :
1408 tsk_rt(new_owner)->inh_task); 1408 tsk_rt(new_owner)->inh_task);
@@ -1412,21 +1412,21 @@ static void increase_priority_inheritance_klitirqd(struct task_struct* klitirqd,
1412 1412
1413 1413
1414/* called with IRQs off */ 1414/* called with IRQs off */
1415static void decrease_priority_inheritance_klitirqd(struct task_struct* klitirqd, 1415static void decrease_priority_inheritance_klmirqd(struct task_struct* klmirqd,
1416 struct task_struct* old_owner, 1416 struct task_struct* old_owner,
1417 struct task_struct* new_owner) 1417 struct task_struct* new_owner)
1418{ 1418{
1419 cedf_domain_t* cluster = task_cpu_cluster(klitirqd); 1419 cedf_domain_t* cluster = task_cpu_cluster(klmirqd);
1420 1420
1421 BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); 1421 BUG_ON(!(tsk_rt(klmirqd)->is_proxy_thread));
1422 1422
1423 raw_spin_lock(&cluster->cluster_lock); 1423 raw_spin_lock(&cluster->cluster_lock);
1424 1424
1425 TRACE_TASK(klitirqd, "priority restored\n"); 1425 TRACE_TASK(klmirqd, "priority restored\n");
1426 1426
1427 __decrease_priority_inheritance(klitirqd, new_owner); 1427 __decrease_priority_inheritance(klmirqd, new_owner);
1428 1428
1429 tsk_rt(old_owner)->cur_klitirqd = NULL; 1429 tsk_rt(old_owner)->cur_klmirqd = NULL;
1430 1430
1431 raw_spin_unlock(&cluster->cluster_lock); 1431 raw_spin_unlock(&cluster->cluster_lock);
1432} 1432}
@@ -1859,7 +1859,7 @@ static long cedf_activate_plugin(void)
1859 } 1859 }
1860 } 1860 }
1861 1861
1862 spawn_klitirqd(affinity); 1862 spawn_klmirqd(affinity);
1863 1863
1864 kfree(affinity); 1864 kfree(affinity);
1865 } 1865 }
@@ -1907,8 +1907,8 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = {
1907 .allocate_aff_obs = cedf_allocate_affinity_observer, 1907 .allocate_aff_obs = cedf_allocate_affinity_observer,
1908#endif 1908#endif
1909#ifdef CONFIG_LITMUS_SOFTIRQD 1909#ifdef CONFIG_LITMUS_SOFTIRQD
1910 .increase_prio_klitirqd = increase_priority_inheritance_klitirqd, 1910 .increase_prio_klmirqd = increase_priority_inheritance_klmirqd,
1911 .decrease_prio_klitirqd = decrease_priority_inheritance_klitirqd, 1911 .decrease_prio_klmirqd = decrease_priority_inheritance_klmirqd,
1912#endif 1912#endif
1913#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 1913#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1914 .enqueue_pai_tasklet = cedf_enqueue_pai_tasklet, 1914 .enqueue_pai_tasklet = cedf_enqueue_pai_tasklet,
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 04b189e54b03..d52be9325044 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -1154,7 +1154,7 @@ static int __increase_priority_inheritance(struct task_struct* t,
1154 1154
1155#ifdef CONFIG_LITMUS_NESTED_LOCKING 1155#ifdef CONFIG_LITMUS_NESTED_LOCKING
1156 /* this sanity check allows for weaker locking in protocols */ 1156 /* this sanity check allows for weaker locking in protocols */
1157 /* TODO (klitirqd): Skip this check if 't' is a proxy thread (???) */ 1157 /* TODO (klmirqd): Skip this check if 't' is a proxy thread (???) */
1158 if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) { 1158 if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) {
1159#endif 1159#endif
1160 TRACE_TASK(t, "inherits priority from %s/%d\n", 1160 TRACE_TASK(t, "inherits priority from %s/%d\n",
@@ -1248,12 +1248,12 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str
1248 success = __increase_priority_inheritance(t, prio_inh); 1248 success = __increase_priority_inheritance(t, prio_inh);
1249 1249
1250#ifdef CONFIG_LITMUS_SOFTIRQD 1250#ifdef CONFIG_LITMUS_SOFTIRQD
1251 if(tsk_rt(t)->cur_klitirqd != NULL) 1251 if(tsk_rt(t)->cur_klmirqd != NULL)
1252 { 1252 {
1253 TRACE_TASK(t, "%s/%d inherits a new priority!\n", 1253 TRACE_TASK(t, "%s/%d inherits a new priority!\n",
1254 tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); 1254 tsk_rt(t)->cur_klmirqd->comm, tsk_rt(t)->cur_klmirqd->pid);
1255 1255
1256 __increase_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh); 1256 __increase_priority_inheritance(tsk_rt(t)->cur_klmirqd, prio_inh);
1257 } 1257 }
1258#endif 1258#endif
1259 1259
@@ -1358,12 +1358,12 @@ static void decrease_priority_inheritance(struct task_struct* t,
1358 success = __decrease_priority_inheritance(t, prio_inh); 1358 success = __decrease_priority_inheritance(t, prio_inh);
1359 1359
1360#ifdef CONFIG_LITMUS_SOFTIRQD 1360#ifdef CONFIG_LITMUS_SOFTIRQD
1361 if(tsk_rt(t)->cur_klitirqd != NULL) 1361 if(tsk_rt(t)->cur_klmirqd != NULL)
1362 { 1362 {
1363 TRACE_TASK(t, "%s/%d decreases in priority!\n", 1363 TRACE_TASK(t, "%s/%d decreases in priority!\n",
1364 tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid); 1364 tsk_rt(t)->cur_klmirqd->comm, tsk_rt(t)->cur_klmirqd->pid);
1365 1365
1366 __decrease_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh); 1366 __decrease_priority_inheritance(tsk_rt(t)->cur_klmirqd, prio_inh);
1367 } 1367 }
1368#endif 1368#endif
1369 1369
@@ -1384,11 +1384,11 @@ static void decrease_priority_inheritance(struct task_struct* t,
1384 1384
1385#ifdef CONFIG_LITMUS_SOFTIRQD 1385#ifdef CONFIG_LITMUS_SOFTIRQD
1386/* called with IRQs off */ 1386/* called with IRQs off */
1387static void increase_priority_inheritance_klitirqd(struct task_struct* klitirqd, 1387static void increase_priority_inheritance_klmirqd(struct task_struct* klmirqd,
1388 struct task_struct* old_owner, 1388 struct task_struct* old_owner,
1389 struct task_struct* new_owner) 1389 struct task_struct* new_owner)
1390{ 1390{
1391 BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); 1391 BUG_ON(!(tsk_rt(klmirqd)->is_proxy_thread));
1392 1392
1393 raw_spin_lock(&gsnedf_lock); 1393 raw_spin_lock(&gsnedf_lock);
1394 1394
@@ -1397,18 +1397,18 @@ static void increase_priority_inheritance_klitirqd(struct task_struct* klitirqd,
1397 if(old_owner) 1397 if(old_owner)
1398 { 1398 {
1399 // unreachable? 1399 // unreachable?
1400 tsk_rt(old_owner)->cur_klitirqd = NULL; 1400 tsk_rt(old_owner)->cur_klmirqd = NULL;
1401 } 1401 }
1402 1402
1403 TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n", 1403 TRACE_TASK(klmirqd, "giving ownership to %s/%d.\n",
1404 new_owner->comm, new_owner->pid); 1404 new_owner->comm, new_owner->pid);
1405 1405
1406 tsk_rt(new_owner)->cur_klitirqd = klitirqd; 1406 tsk_rt(new_owner)->cur_klmirqd = klmirqd;
1407 } 1407 }
1408 1408
1409 __decrease_priority_inheritance(klitirqd, NULL); // kludge to clear out cur prio. 1409 __decrease_priority_inheritance(klmirqd, NULL); // kludge to clear out cur prio.
1410 1410
1411 __increase_priority_inheritance(klitirqd, 1411 __increase_priority_inheritance(klmirqd,
1412 (tsk_rt(new_owner)->inh_task == NULL) ? 1412 (tsk_rt(new_owner)->inh_task == NULL) ?
1413 new_owner : 1413 new_owner :
1414 tsk_rt(new_owner)->inh_task); 1414 tsk_rt(new_owner)->inh_task);
@@ -1418,19 +1418,19 @@ static void increase_priority_inheritance_klitirqd(struct task_struct* klitirqd,
1418 1418
1419 1419
1420/* called with IRQs off */ 1420/* called with IRQs off */
1421static void decrease_priority_inheritance_klitirqd(struct task_struct* klitirqd, 1421static void decrease_priority_inheritance_klmirqd(struct task_struct* klmirqd,
1422 struct task_struct* old_owner, 1422 struct task_struct* old_owner,
1423 struct task_struct* new_owner) 1423 struct task_struct* new_owner)
1424{ 1424{
1425 BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread)); 1425 BUG_ON(!(tsk_rt(klmirqd)->is_proxy_thread));
1426 1426
1427 raw_spin_lock(&gsnedf_lock); 1427 raw_spin_lock(&gsnedf_lock);
1428 1428
1429 TRACE_TASK(klitirqd, "priority restored\n"); 1429 TRACE_TASK(klmirqd, "priority restored\n");
1430 1430
1431 __decrease_priority_inheritance(klitirqd, new_owner); 1431 __decrease_priority_inheritance(klmirqd, new_owner);
1432 1432
1433 tsk_rt(old_owner)->cur_klitirqd = NULL; 1433 tsk_rt(old_owner)->cur_klmirqd = NULL;
1434 1434
1435 raw_spin_unlock(&gsnedf_lock); 1435 raw_spin_unlock(&gsnedf_lock);
1436} 1436}
@@ -1923,7 +1923,7 @@ static long gsnedf_activate_plugin(void)
1923#endif 1923#endif
1924 1924
1925#ifdef CONFIG_LITMUS_SOFTIRQD 1925#ifdef CONFIG_LITMUS_SOFTIRQD
1926 spawn_klitirqd(NULL); 1926 spawn_klmirqd(NULL);
1927#endif 1927#endif
1928 1928
1929#ifdef CONFIG_LITMUS_NVIDIA 1929#ifdef CONFIG_LITMUS_NVIDIA
@@ -1966,8 +1966,8 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
1966 .allocate_aff_obs = gsnedf_allocate_affinity_observer, 1966 .allocate_aff_obs = gsnedf_allocate_affinity_observer,
1967#endif 1967#endif
1968#ifdef CONFIG_LITMUS_SOFTIRQD 1968#ifdef CONFIG_LITMUS_SOFTIRQD
1969 .increase_prio_klitirqd = increase_priority_inheritance_klitirqd, 1969 .increase_prio_klmirqd = increase_priority_inheritance_klmirqd,
1970 .decrease_prio_klitirqd = decrease_priority_inheritance_klitirqd, 1970 .decrease_prio_klmirqd = decrease_priority_inheritance_klmirqd,
1971#endif 1971#endif
1972#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 1972#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1973 .enqueue_pai_tasklet = gsnedf_enqueue_pai_tasklet, 1973 .enqueue_pai_tasklet = gsnedf_enqueue_pai_tasklet,
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
index f9423861eb1f..cda67e0f6bc8 100644
--- a/litmus/sched_plugin.c
+++ b/litmus/sched_plugin.c
@@ -168,13 +168,13 @@ static int litmus_dummy___decrease_prio(struct task_struct* t, struct task_struc
168#endif 168#endif
169 169
170#ifdef CONFIG_LITMUS_SOFTIRQD 170#ifdef CONFIG_LITMUS_SOFTIRQD
171static void litmus_dummy_increase_prio_klitirq(struct task_struct* klitirqd, 171static void litmus_dummy_increase_prio_klmirqd(struct task_struct* klmirqd,
172 struct task_struct* old_owner, 172 struct task_struct* old_owner,
173 struct task_struct* new_owner) 173 struct task_struct* new_owner)
174{ 174{
175} 175}
176 176
177static void litmus_dummy_decrease_prio_klitirqd(struct task_struct* klitirqd, 177static void litmus_dummy_decrease_prio_klmirqd(struct task_struct* klmirqd,
178 struct task_struct* old_owner) 178 struct task_struct* old_owner)
179{ 179{
180} 180}
@@ -264,8 +264,8 @@ struct sched_plugin linux_sched_plugin = {
264 .__compare = litmus_dummy___compare, 264 .__compare = litmus_dummy___compare,
265#endif 265#endif
266#ifdef CONFIG_LITMUS_SOFTIRQD 266#ifdef CONFIG_LITMUS_SOFTIRQD
267 .increase_prio_klitirqd = litmus_dummy_increase_prio_klitirqd, 267 .increase_prio_klmirqd = litmus_dummy_increase_prio_klmirqd,
268 .decrease_prio_klitirqd = litmus_dummy_decrease_prio_klitirqd, 268 .decrease_prio_klmirqd = litmus_dummy_decrease_prio_klmirqd,
269#endif 269#endif
270#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 270#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
271 .enqueue_pai_tasklet = litmus_dummy_enqueue_pai_tasklet, 271 .enqueue_pai_tasklet = litmus_dummy_enqueue_pai_tasklet,
@@ -328,8 +328,8 @@ int register_sched_plugin(struct sched_plugin* plugin)
328 CHECK(__compare); 328 CHECK(__compare);
329#endif 329#endif
330#ifdef CONFIG_LITMUS_SOFTIRQD 330#ifdef CONFIG_LITMUS_SOFTIRQD
331 CHECK(increase_prio_klitirqd); 331 CHECK(increase_prio_klmirqd);
332 CHECK(decrease_prio_klitirqd); 332 CHECK(decrease_prio_klmirqd);
333#endif 333#endif
334#ifdef CONFIG_LITMUS_PAI_SOFTIRQD 334#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
335 CHECK(enqueue_pai_tasklet); 335 CHECK(enqueue_pai_tasklet);