aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-04-15 15:03:33 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2012-04-15 15:03:33 -0400
commitb3ae67412531cbc583d5697d2366fc58d6dd07e7 (patch)
treefba83da4fee9c8d5fa06ce253af50ce7c39197d9
parentc0667dc4894e913048cf8904f0ce9a79b481b556 (diff)
parentadeff95dcdcf88789e983f20b0657f29286de8d7 (diff)
Merge branch 'wip-gpu-interrupts' into wip-gpu-rtss12
Conflicts: include/litmus/fdso.h include/litmus/rt_param.h include/litmus/sched_plugin.h include/litmus/unistd_32.h include/litmus/unistd_64.h litmus/Makefile litmus/edf_common.c litmus/litmus.c litmus/locking.c litmus/sched_gsn_edf.c litmus/sched_plugin.c
-rw-r--r--arch/x86/kernel/irq.c4
-rw-r--r--arch/x86/kernel/syscall_table_32.S1
-rw-r--r--include/linux/completion.h1
-rw-r--r--include/linux/interrupt.h10
-rw-r--r--include/linux/mutex.h10
-rw-r--r--include/linux/semaphore.h9
-rw-r--r--include/linux/workqueue.h18
-rw-r--r--include/litmus/fdso.h4
-rw-r--r--include/litmus/fifo_common.h25
-rw-r--r--include/litmus/litmus.h5
-rw-r--r--include/litmus/litmus_softirq.h199
-rw-r--r--include/litmus/nvidia_info.h38
-rw-r--r--include/litmus/preempt.h1
-rw-r--r--include/litmus/rm_common.h25
-rw-r--r--include/litmus/rm_srt_common.h25
-rw-r--r--include/litmus/rt_param.h46
-rw-r--r--include/litmus/sched_plugin.h24
-rw-r--r--include/litmus/sched_trace.h174
-rw-r--r--include/litmus/sched_trace_external.h78
-rw-r--r--include/litmus/trace.h20
-rw-r--r--include/litmus/unistd_32.h3
-rw-r--r--include/litmus/unistd_64.h5
-rw-r--r--kernel/lockdep.c3
-rw-r--r--kernel/mutex.c125
-rw-r--r--kernel/sched.c27
-rw-r--r--kernel/semaphore.c13
-rw-r--r--kernel/softirq.c288
-rw-r--r--kernel/workqueue.c71
-rw-r--r--litmus/Kconfig86
-rw-r--r--litmus/Makefile6
-rw-r--r--litmus/edf_common.c45
-rw-r--r--litmus/fdso.c1
-rw-r--r--litmus/litmus.c76
-rw-r--r--litmus/litmus_pai_softirq.c64
-rw-r--r--litmus/litmus_proc.c17
-rw-r--r--litmus/litmus_softirq.c1582
-rw-r--r--litmus/locking.c5
-rw-r--r--litmus/nvidia_info.c536
-rw-r--r--litmus/preempt.c5
-rw-r--r--litmus/sched_cedf.c1102
-rw-r--r--litmus/sched_gsn_edf.c490
-rw-r--r--litmus/sched_litmus.c2
-rw-r--r--litmus/sched_plugin.c43
-rw-r--r--litmus/sched_task_trace.c232
-rw-r--r--litmus/sched_trace_external.c64
45 files changed, 5449 insertions, 159 deletions
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 6c0802eb2f7f..680a5cb4b585 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -10,6 +10,10 @@
10#include <linux/ftrace.h> 10#include <linux/ftrace.h>
11#include <linux/delay.h> 11#include <linux/delay.h>
12 12
13#ifdef CONFIG_LITMUS_NVIDIA
14#include <litmus/sched_trace.h>
15#endif
16
13#include <asm/apic.h> 17#include <asm/apic.h>
14#include <asm/io_apic.h> 18#include <asm/io_apic.h>
15#include <asm/irq.h> 19#include <asm/irq.h>
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index d0126222b394..0cb4373698e7 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -358,3 +358,4 @@ ENTRY(sys_call_table)
358 .long sys_wait_for_ts_release 358 .long sys_wait_for_ts_release
359 .long sys_release_ts /* +10 */ 359 .long sys_release_ts /* +10 */
360 .long sys_null_call 360 .long sys_null_call
361 .long sys_register_nv_device
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 9d727271c9fe..cff405c4dd3a 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -76,6 +76,7 @@ static inline void init_completion(struct completion *x)
76 init_waitqueue_head(&x->wait); 76 init_waitqueue_head(&x->wait);
77} 77}
78 78
79extern void __wait_for_completion_locked(struct completion *);
79extern void wait_for_completion(struct completion *); 80extern void wait_for_completion(struct completion *);
80extern int wait_for_completion_interruptible(struct completion *x); 81extern int wait_for_completion_interruptible(struct completion *x);
81extern int wait_for_completion_killable(struct completion *x); 82extern int wait_for_completion_killable(struct completion *x);
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index f6efed0039ed..57a7bc8807be 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -445,6 +445,7 @@ static inline void __raise_softirq_irqoff(unsigned int nr)
445 445
446extern void raise_softirq_irqoff(unsigned int nr); 446extern void raise_softirq_irqoff(unsigned int nr);
447extern void raise_softirq(unsigned int nr); 447extern void raise_softirq(unsigned int nr);
448extern void wakeup_softirqd(void);
448 449
449/* This is the worklist that queues up per-cpu softirq work. 450/* This is the worklist that queues up per-cpu softirq work.
450 * 451 *
@@ -500,6 +501,10 @@ struct tasklet_struct
500 atomic_t count; 501 atomic_t count;
501 void (*func)(unsigned long); 502 void (*func)(unsigned long);
502 unsigned long data; 503 unsigned long data;
504
505#if defined(CONFIG_LITMUS_SOFTIRQD) || defined(CONFIG_LITMUS_PAI_SOFTIRQD)
506 struct task_struct *owner;
507#endif
503}; 508};
504 509
505#define DECLARE_TASKLET(name, func, data) \ 510#define DECLARE_TASKLET(name, func, data) \
@@ -537,6 +542,7 @@ static inline void tasklet_unlock_wait(struct tasklet_struct *t)
537#define tasklet_unlock(t) do { } while (0) 542#define tasklet_unlock(t) do { } while (0)
538#endif 543#endif
539 544
545extern void ___tasklet_schedule(struct tasklet_struct *t);
540extern void __tasklet_schedule(struct tasklet_struct *t); 546extern void __tasklet_schedule(struct tasklet_struct *t);
541 547
542static inline void tasklet_schedule(struct tasklet_struct *t) 548static inline void tasklet_schedule(struct tasklet_struct *t)
@@ -545,6 +551,7 @@ static inline void tasklet_schedule(struct tasklet_struct *t)
545 __tasklet_schedule(t); 551 __tasklet_schedule(t);
546} 552}
547 553
554extern void ___tasklet_hi_schedule(struct tasklet_struct *t);
548extern void __tasklet_hi_schedule(struct tasklet_struct *t); 555extern void __tasklet_hi_schedule(struct tasklet_struct *t);
549 556
550static inline void tasklet_hi_schedule(struct tasklet_struct *t) 557static inline void tasklet_hi_schedule(struct tasklet_struct *t)
@@ -553,6 +560,7 @@ static inline void tasklet_hi_schedule(struct tasklet_struct *t)
553 __tasklet_hi_schedule(t); 560 __tasklet_hi_schedule(t);
554} 561}
555 562
563extern void ___tasklet_hi_schedule_first(struct tasklet_struct *t);
556extern void __tasklet_hi_schedule_first(struct tasklet_struct *t); 564extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
557 565
558/* 566/*
@@ -582,7 +590,7 @@ static inline void tasklet_disable(struct tasklet_struct *t)
582} 590}
583 591
584static inline void tasklet_enable(struct tasklet_struct *t) 592static inline void tasklet_enable(struct tasklet_struct *t)
585{ 593{
586 smp_mb__before_atomic_dec(); 594 smp_mb__before_atomic_dec();
587 atomic_dec(&t->count); 595 atomic_dec(&t->count);
588} 596}
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index a940fe435aca..cb47debbf24d 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -126,6 +126,15 @@ static inline int mutex_is_locked(struct mutex *lock)
126 return atomic_read(&lock->count) != 1; 126 return atomic_read(&lock->count) != 1;
127} 127}
128 128
129/* return non-zero to abort. only pre-side-effects may abort */
130typedef int (*side_effect_t)(unsigned long);
131extern void mutex_lock_sfx(struct mutex *lock,
132 side_effect_t pre, unsigned long pre_arg,
133 side_effect_t post, unsigned long post_arg);
134extern void mutex_unlock_sfx(struct mutex *lock,
135 side_effect_t pre, unsigned long pre_arg,
136 side_effect_t post, unsigned long post_arg);
137
129/* 138/*
130 * See kernel/mutex.c for detailed documentation of these APIs. 139 * See kernel/mutex.c for detailed documentation of these APIs.
131 * Also see Documentation/mutex-design.txt. 140 * Also see Documentation/mutex-design.txt.
@@ -153,6 +162,7 @@ extern void mutex_lock(struct mutex *lock);
153extern int __must_check mutex_lock_interruptible(struct mutex *lock); 162extern int __must_check mutex_lock_interruptible(struct mutex *lock);
154extern int __must_check mutex_lock_killable(struct mutex *lock); 163extern int __must_check mutex_lock_killable(struct mutex *lock);
155 164
165
156# define mutex_lock_nested(lock, subclass) mutex_lock(lock) 166# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
157# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) 167# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
158# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) 168# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index 39fa04966aa8..c83fc2b65f01 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -43,4 +43,13 @@ extern int __must_check down_trylock(struct semaphore *sem);
43extern int __must_check down_timeout(struct semaphore *sem, long jiffies); 43extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
44extern void up(struct semaphore *sem); 44extern void up(struct semaphore *sem);
45 45
46extern void __down(struct semaphore *sem);
47extern void __up(struct semaphore *sem);
48
49struct semaphore_waiter {
50 struct list_head list;
51 struct task_struct *task;
52 int up;
53};
54
46#endif /* __LINUX_SEMAPHORE_H */ 55#endif /* __LINUX_SEMAPHORE_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index f584aba78ca9..1ec2ec7d4e3b 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -83,6 +83,9 @@ struct work_struct {
83#ifdef CONFIG_LOCKDEP 83#ifdef CONFIG_LOCKDEP
84 struct lockdep_map lockdep_map; 84 struct lockdep_map lockdep_map;
85#endif 85#endif
86#ifdef CONFIG_LITMUS_SOFTIRQD
87 struct task_struct *owner;
88#endif
86}; 89};
87 90
88#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) 91#define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU)
@@ -115,11 +118,25 @@ struct execute_work {
115#define __WORK_INIT_LOCKDEP_MAP(n, k) 118#define __WORK_INIT_LOCKDEP_MAP(n, k)
116#endif 119#endif
117 120
121#ifdef CONFIG_LITMUS_SOFTIRQD
122#define __WORK_INIT_OWNER() \
123 .owner = NULL,
124
125#define PREPARE_OWNER(_work, _owner) \
126 do { \
127 (_work)->owner = (_owner); \
128 } while(0)
129#else
130#define __WORK_INIT_OWNER()
131#define PREPARE_OWNER(_work, _owner)
132#endif
133
118#define __WORK_INITIALIZER(n, f) { \ 134#define __WORK_INITIALIZER(n, f) { \
119 .data = WORK_DATA_STATIC_INIT(), \ 135 .data = WORK_DATA_STATIC_INIT(), \
120 .entry = { &(n).entry, &(n).entry }, \ 136 .entry = { &(n).entry, &(n).entry }, \
121 .func = (f), \ 137 .func = (f), \
122 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ 138 __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
139 __WORK_INIT_OWNER() \
123 } 140 }
124 141
125#define __DELAYED_WORK_INITIALIZER(n, f) { \ 142#define __DELAYED_WORK_INITIALIZER(n, f) { \
@@ -357,6 +374,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
357extern void flush_workqueue(struct workqueue_struct *wq); 374extern void flush_workqueue(struct workqueue_struct *wq);
358extern void flush_scheduled_work(void); 375extern void flush_scheduled_work(void);
359 376
377extern int __schedule_work(struct work_struct *work);
360extern int schedule_work(struct work_struct *work); 378extern int schedule_work(struct work_struct *work);
361extern int schedule_work_on(int cpu, struct work_struct *work); 379extern int schedule_work_on(int cpu, struct work_struct *work);
362extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); 380extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h
index 4e7a6bf06134..2bff9cc3909d 100644
--- a/include/litmus/fdso.h
+++ b/include/litmus/fdso.h
@@ -22,8 +22,9 @@ typedef enum {
22 22
23 RSM_MUTEX = 2, 23 RSM_MUTEX = 2,
24 IKGLP_SEM = 3, 24 IKGLP_SEM = 3,
25 KFMLP_SEM = 4,
25 26
26 MAX_OBJ_TYPE = 3 27 MAX_OBJ_TYPE = 4
27} obj_type_t; 28} obj_type_t;
28 29
29struct inode_obj_id { 30struct inode_obj_id {
@@ -67,6 +68,7 @@ static inline void* od_lookup(int od, obj_type_t type)
67} 68}
68 69
69#define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) 70#define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM))
71#define lookup_kfmlp_sem(od)((struct pi_semaphore*) od_lookup(od, KFMLP_SEM))
70#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) 72#define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM))
71#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) 73#define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID))
72 74
diff --git a/include/litmus/fifo_common.h b/include/litmus/fifo_common.h
new file mode 100644
index 000000000000..12cfbfea41ee
--- /dev/null
+++ b/include/litmus/fifo_common.h
@@ -0,0 +1,25 @@
1/*
2 * EDF common data structures and utility functions shared by all EDF
3 * based scheduler plugins
4 */
5
6/* CLEANUP: Add comments and make it less messy.
7 *
8 */
9
10#ifndef __UNC_FIFO_COMMON_H__
11#define __UNC_FIFO_COMMON_H__
12
13#include <litmus/rt_domain.h>
14
15void fifo_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
16 release_jobs_t release);
17
18int fifo_higher_prio(struct task_struct* first,
19 struct task_struct* second);
20
21int fifo_ready_order(struct bheap_node* a, struct bheap_node* b);
22
23int fifo_preemption_needed(rt_domain_t* rt, struct task_struct *t);
24
25#endif
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 9a7334f1eb00..e2df49b171c5 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -26,6 +26,7 @@ static inline int in_list(struct list_head* list)
26 ); 26 );
27} 27}
28 28
29
29struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); 30struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq);
30 31
31#define NO_CPU 0xffffffff 32#define NO_CPU 0xffffffff
@@ -124,7 +125,9 @@ static inline lt_t litmus_clock(void)
124#define earlier_release(a, b) (lt_before(\ 125#define earlier_release(a, b) (lt_before(\
125 (a)->rt_param.job_params.release,\ 126 (a)->rt_param.job_params.release,\
126 (b)->rt_param.job_params.release)) 127 (b)->rt_param.job_params.release))
127 128#define shorter_period(a, b) (lt_before(\
129 (a)->rt_param.task_params.period,\
130 (b)->rt_param.task_params.period))
128void preempt_if_preemptable(struct task_struct* t, int on_cpu); 131void preempt_if_preemptable(struct task_struct* t, int on_cpu);
129 132
130#ifdef CONFIG_LITMUS_LOCKING 133#ifdef CONFIG_LITMUS_LOCKING
diff --git a/include/litmus/litmus_softirq.h b/include/litmus/litmus_softirq.h
new file mode 100644
index 000000000000..34287f3cbb8d
--- /dev/null
+++ b/include/litmus/litmus_softirq.h
@@ -0,0 +1,199 @@
1#ifndef __LITMUS_SOFTIRQ_H
2#define __LITMUS_SOFTIRQ_H
3
4#include <linux/interrupt.h>
5#include <linux/workqueue.h>
6
7/*
8 Threaded tasklet handling for Litmus. Tasklets
9 are scheduled with the priority of the tasklet's
10 owner---that is, the RT task on behalf the tasklet
11 runs.
12
13 Tasklets are current scheduled in FIFO order with
14 NO priority inheritance for "blocked" tasklets.
15
16 klitirqd assumes the priority of the owner of the
17 tasklet when the tasklet is next to execute.
18
19 Currently, hi-tasklets are scheduled before
20 low-tasklets, regardless of priority of low-tasklets.
21 And likewise, low-tasklets are scheduled before work
22 queue objects. This priority inversion probably needs
23 to be fixed, though it is not an issue if our work with
24 GPUs as GPUs are owned (and associated klitirqds) for
25 exclusive time periods, thus no inversions can
26 occur.
27 */
28
29
30
31#define NR_LITMUS_SOFTIRQD CONFIG_NR_LITMUS_SOFTIRQD
32
33/* Spawns NR_LITMUS_SOFTIRQD klitirqd daemons.
34 Actual launch of threads is deffered to kworker's
35 workqueue, so daemons will likely not be immediately
36 running when this function returns, though the required
37 data will be initialized.
38
39 @affinity_set: an array expressing the processor affinity
40 for each of the NR_LITMUS_SOFTIRQD daemons. May be set
41 to NULL for global scheduling.
42
43 - Examples -
44 8-CPU system with two CPU clusters:
45 affinity[] = {0, 0, 0, 0, 3, 3, 3, 3}
46 NOTE: Daemons not actually bound to specified CPU, but rather
47 cluster in which the CPU resides.
48
49 8-CPU system, partitioned:
50 affinity[] = {0, 1, 2, 3, 4, 5, 6, 7}
51
52 FIXME: change array to a CPU topology or array of cpumasks
53
54 */
55void spawn_klitirqd(int* affinity);
56
57
58/* Raises a flag to tell klitirqds to terminate.
59 Termination is async, so some threads may be running
60 after function return. */
61void kill_klitirqd(void);
62
63
64/* Returns 1 if all NR_LITMUS_SOFTIRQD klitirqs are ready
65 to handle tasklets. 0, otherwise.*/
66int klitirqd_is_ready(void);
67
68/* Returns 1 if no NR_LITMUS_SOFTIRQD klitirqs are ready
69 to handle tasklets. 0, otherwise.*/
70int klitirqd_is_dead(void);
71
72/* Flushes all pending work out to the OS for regular
73 * tasklet/work processing of the specified 'owner'
74 *
75 * PRECOND: klitirqd_thread must have a clear entry
76 * in the GPU registry, otherwise this call will become
77 * a no-op as work will loop back to the klitirqd_thread.
78 *
79 * Pass NULL for owner to flush ALL pending items.
80 */
81void flush_pending(struct task_struct* klitirqd_thread,
82 struct task_struct* owner);
83
84struct task_struct* get_klitirqd(unsigned int k_id);
85
86
87extern int __litmus_tasklet_schedule(
88 struct tasklet_struct *t,
89 unsigned int k_id);
90
91/* schedule a tasklet on klitirqd #k_id */
92static inline int litmus_tasklet_schedule(
93 struct tasklet_struct *t,
94 unsigned int k_id)
95{
96 int ret = 0;
97 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
98 ret = __litmus_tasklet_schedule(t, k_id);
99 return(ret);
100}
101
102/* for use by __tasklet_schedule() */
103static inline int _litmus_tasklet_schedule(
104 struct tasklet_struct *t,
105 unsigned int k_id)
106{
107 return(__litmus_tasklet_schedule(t, k_id));
108}
109
110
111
112
113extern int __litmus_tasklet_hi_schedule(struct tasklet_struct *t,
114 unsigned int k_id);
115
116/* schedule a hi tasklet on klitirqd #k_id */
117static inline int litmus_tasklet_hi_schedule(struct tasklet_struct *t,
118 unsigned int k_id)
119{
120 int ret = 0;
121 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
122 ret = __litmus_tasklet_hi_schedule(t, k_id);
123 return(ret);
124}
125
126/* for use by __tasklet_hi_schedule() */
127static inline int _litmus_tasklet_hi_schedule(struct tasklet_struct *t,
128 unsigned int k_id)
129{
130 return(__litmus_tasklet_hi_schedule(t, k_id));
131}
132
133
134
135
136
137extern int __litmus_tasklet_hi_schedule_first(
138 struct tasklet_struct *t,
139 unsigned int k_id);
140
141/* schedule a hi tasklet on klitirqd #k_id on next go-around */
142/* PRECONDITION: Interrupts must be disabled. */
143static inline int litmus_tasklet_hi_schedule_first(
144 struct tasklet_struct *t,
145 unsigned int k_id)
146{
147 int ret = 0;
148 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
149 ret = __litmus_tasklet_hi_schedule_first(t, k_id);
150 return(ret);
151}
152
153/* for use by __tasklet_hi_schedule_first() */
154static inline int _litmus_tasklet_hi_schedule_first(
155 struct tasklet_struct *t,
156 unsigned int k_id)
157{
158 return(__litmus_tasklet_hi_schedule_first(t, k_id));
159}
160
161
162
163//////////////
164
165extern int __litmus_schedule_work(
166 struct work_struct* w,
167 unsigned int k_id);
168
169static inline int litmus_schedule_work(
170 struct work_struct* w,
171 unsigned int k_id)
172{
173 return(__litmus_schedule_work(w, k_id));
174}
175
176
177
178///////////// mutex operations for client threads.
179
180void down_and_set_stat(struct task_struct* t,
181 enum klitirqd_sem_status to_set,
182 struct mutex* sem);
183
184void __down_and_reset_and_set_stat(struct task_struct* t,
185 enum klitirqd_sem_status to_reset,
186 enum klitirqd_sem_status to_set,
187 struct mutex* sem);
188
189void up_and_set_stat(struct task_struct* t,
190 enum klitirqd_sem_status to_set,
191 struct mutex* sem);
192
193
194
195void release_klitirqd_lock(struct task_struct* t);
196
197int reacquire_klitirqd_lock(struct task_struct* t);
198
199#endif
diff --git a/include/litmus/nvidia_info.h b/include/litmus/nvidia_info.h
new file mode 100644
index 000000000000..9e07a27fdee3
--- /dev/null
+++ b/include/litmus/nvidia_info.h
@@ -0,0 +1,38 @@
1#ifndef __LITMUS_NVIDIA_H
2#define __LITMUS_NVIDIA_H
3
4#include <linux/interrupt.h>
5
6
7#include <litmus/litmus_softirq.h>
8
9
10//#define NV_DEVICE_NUM NR_LITMUS_SOFTIRQD
11#define NV_DEVICE_NUM CONFIG_NV_DEVICE_NUM
12
13int init_nvidia_info(void);
14
15int is_nvidia_func(void* func_addr);
16
17void dump_nvidia_info(const struct tasklet_struct *t);
18
19
20// Returns the Nvidia device # associated with provided tasklet and work_struct.
21u32 get_tasklet_nv_device_num(const struct tasklet_struct *t);
22u32 get_work_nv_device_num(const struct work_struct *t);
23
24
25int init_nv_device_reg(void);
26//int get_nv_device_id(struct task_struct* owner);
27
28
29int reg_nv_device(int reg_device_id, int register_device);
30
31struct task_struct* get_nv_device_owner(u32 target_device_id);
32
33void lock_nv_registry(u32 reg_device_id, unsigned long* flags);
34void unlock_nv_registry(u32 reg_device_id, unsigned long* flags);
35
36void increment_nv_int_count(u32 device);
37
38#endif
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h
index 61f4db18e49d..8f3a9ca2d4e3 100644
--- a/include/litmus/preempt.h
+++ b/include/litmus/preempt.h
@@ -26,6 +26,7 @@ const char* sched_state_name(int s);
26 (x), #x, __FUNCTION__); \ 26 (x), #x, __FUNCTION__); \
27 } while (0); 27 } while (0);
28 28
29//#define TRACE_SCHED_STATE_CHANGE(x, y, cpu) /* ignore */
29#define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \ 30#define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \
30 TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \ 31 TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \
31 cpu, (x), sched_state_name(x), \ 32 cpu, (x), sched_state_name(x), \
diff --git a/include/litmus/rm_common.h b/include/litmus/rm_common.h
new file mode 100644
index 000000000000..5991b0b4e758
--- /dev/null
+++ b/include/litmus/rm_common.h
@@ -0,0 +1,25 @@
1/*
2 * EDF common data structures and utility functions shared by all EDF
3 * based scheduler plugins
4 */
5
6/* CLEANUP: Add comments and make it less messy.
7 *
8 */
9
10#ifndef __UNC_RM_COMMON_H__
11#define __UNC_RM_COMMON_H__
12
13#include <litmus/rt_domain.h>
14
15void rm_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
16 release_jobs_t release);
17
18int rm_higher_prio(struct task_struct* first,
19 struct task_struct* second);
20
21int rm_ready_order(struct bheap_node* a, struct bheap_node* b);
22
23int rm_preemption_needed(rt_domain_t* rt, struct task_struct *t);
24
25#endif
diff --git a/include/litmus/rm_srt_common.h b/include/litmus/rm_srt_common.h
new file mode 100644
index 000000000000..78aa287327a2
--- /dev/null
+++ b/include/litmus/rm_srt_common.h
@@ -0,0 +1,25 @@
1/*
2 * EDF common data structures and utility functions shared by all EDF
3 * based scheduler plugins
4 */
5
6/* CLEANUP: Add comments and make it less messy.
7 *
8 */
9
10#ifndef __UNC_RM_SRT_COMMON_H__
11#define __UNC_RM_SRT_COMMON_H__
12
13#include <litmus/rt_domain.h>
14
15void rm_srt_domain_init(rt_domain_t* rt, check_resched_needed_t resched,
16 release_jobs_t release);
17
18int rm_srt_higher_prio(struct task_struct* first,
19 struct task_struct* second);
20
21int rm_srt_ready_order(struct bheap_node* a, struct bheap_node* b);
22
23int rm_srt_preemption_needed(rt_domain_t* rt, struct task_struct *t);
24
25#endif
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h
index 307de81db0fd..d0040bfd2d0c 100644
--- a/include/litmus/rt_param.h
+++ b/include/litmus/rt_param.h
@@ -76,6 +76,7 @@ struct control_page {
76#ifdef __KERNEL__ 76#ifdef __KERNEL__
77 77
78#include <litmus/binheap.h> 78#include <litmus/binheap.h>
79#include <linux/semaphore.h>
79 80
80struct _rt_domain; 81struct _rt_domain;
81struct bheap_node; 82struct bheap_node;
@@ -102,6 +103,14 @@ struct rt_job {
102 103
103struct pfair_param; 104struct pfair_param;
104 105
106enum klitirqd_sem_status
107{
108 NEED_TO_REACQUIRE,
109 REACQUIRING,
110 NOT_HELD,
111 HELD
112};
113
105/* RT task parameters for scheduling extensions 114/* RT task parameters for scheduling extensions
106 * These parameters are inherited during clone and therefore must 115 * These parameters are inherited during clone and therefore must
107 * be explicitly set up before the task set is launched. 116 * be explicitly set up before the task set is launched.
@@ -116,6 +125,37 @@ struct rt_param {
116 /* is the task present? (true if it can be scheduled) */ 125 /* is the task present? (true if it can be scheduled) */
117 unsigned int present:1; 126 unsigned int present:1;
118 127
128#ifdef CONFIG_LITMUS_SOFTIRQD
129 /* proxy threads have minimum priority by default */
130 unsigned int is_proxy_thread:1;
131
132 /* pointer to klitirqd currently working on this
133 task_struct's behalf. only set by the task pointed
134 to by klitirqd.
135
136 ptr only valid if is_proxy_thread == 0
137 */
138 struct task_struct* cur_klitirqd;
139
140 /* Used to implement mutual execution exclusion between
141 * job and klitirqd execution. Job must always hold
142 * it's klitirqd_sem to execute. klitirqd instance
143 * must hold the semaphore before executing on behalf
144 * of a job.
145 */
146 struct mutex klitirqd_sem;
147
148 /* status of held klitirqd_sem, even if the held klitirqd_sem is from
149 another task (only proxy threads do this though).
150 */
151 atomic_t klitirqd_sem_stat;
152#endif
153
154#ifdef CONFIG_LITMUS_NVIDIA
155 /* number of top-half interrupts handled on behalf of current job */
156 atomic_t nv_int_count;
157#endif
158
119#ifdef CONFIG_LITMUS_LOCKING 159#ifdef CONFIG_LITMUS_LOCKING
120 /* Is the task being priority-boosted by a locking protocol? */ 160 /* Is the task being priority-boosted by a locking protocol? */
121 unsigned int priority_boosted:1; 161 unsigned int priority_boosted:1;
@@ -141,16 +181,10 @@ struct rt_param {
141 raw_spinlock_t hp_blocked_tasks_lock; 181 raw_spinlock_t hp_blocked_tasks_lock;
142 struct binheap_handle hp_blocked_tasks; 182 struct binheap_handle hp_blocked_tasks;
143 183
144
145 /* pointer to lock upon which is currently blocked */ 184 /* pointer to lock upon which is currently blocked */
146 struct litmus_lock* blocked_lock; 185 struct litmus_lock* blocked_lock;
147
148
149 //void* donor_data;
150#endif 186#endif
151 187
152
153
154#ifdef CONFIG_NP_SECTION 188#ifdef CONFIG_NP_SECTION
155 /* For the FMLP under PSN-EDF, it is required to make the task 189 /* For the FMLP under PSN-EDF, it is required to make the task
156 * non-preemptive from kernel space. In order not to interfere with 190 * non-preemptive from kernel space. In order not to interfere with
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h
index 8e5167970340..e31008fcdd59 100644
--- a/include/litmus/sched_plugin.h
+++ b/include/litmus/sched_plugin.h
@@ -11,6 +11,8 @@
11#include <litmus/locking.h> 11#include <litmus/locking.h>
12#endif 12#endif
13 13
14#include <linux/interrupt.h>
15
14/************************ setup/tear down ********************/ 16/************************ setup/tear down ********************/
15 17
16typedef long (*activate_plugin_t) (void); 18typedef long (*activate_plugin_t) (void);
@@ -29,7 +31,6 @@ typedef struct task_struct* (*schedule_t)(struct task_struct * prev);
29 */ 31 */
30typedef void (*finish_switch_t)(struct task_struct *prev); 32typedef void (*finish_switch_t)(struct task_struct *prev);
31 33
32
33/********************* task state changes ********************/ 34/********************* task state changes ********************/
34 35
35/* Called to setup a new real-time task. 36/* Called to setup a new real-time task.
@@ -65,6 +66,16 @@ typedef void (*nested_increase_prio_t)(struct task_struct* t, struct task_struct
65typedef void (*nested_decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh, 66typedef void (*nested_decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh,
66 raw_spinlock_t *to_unlock, unsigned long irqflags); 67 raw_spinlock_t *to_unlock, unsigned long irqflags);
67 68
69typedef void (*increase_prio_klitirq_t)(struct task_struct* klitirqd,
70 struct task_struct* old_owner,
71 struct task_struct* new_owner);
72typedef void (*decrease_prio_klitirqd_t)(struct task_struct* klitirqd,
73 struct task_struct* old_owner);
74
75
76typedef int (*enqueue_pai_tasklet_t)(struct tasklet_struct* tasklet);
77typedef void (*run_tasklets_t)(struct task_struct* next);
78
68typedef raw_spinlock_t* (*get_dgl_spinlock_t) (struct task_struct *t); 79typedef raw_spinlock_t* (*get_dgl_spinlock_t) (struct task_struct *t);
69 80
70/********************* sys call backends ********************/ 81/********************* sys call backends ********************/
@@ -96,7 +107,7 @@ struct sched_plugin {
96 /* task state changes */ 107 /* task state changes */
97 admit_task_t admit_task; 108 admit_task_t admit_task;
98 109
99 task_new_t task_new; 110 task_new_t task_new;
100 task_wake_up_t task_wake_up; 111 task_wake_up_t task_wake_up;
101 task_block_t task_block; 112 task_block_t task_block;
102 task_exit_t task_exit; 113 task_exit_t task_exit;
@@ -114,6 +125,15 @@ struct sched_plugin {
114#ifdef CONFIG_LITMUS_DGL_SUPPORT 125#ifdef CONFIG_LITMUS_DGL_SUPPORT
115 get_dgl_spinlock_t get_dgl_spinlock; 126 get_dgl_spinlock_t get_dgl_spinlock;
116#endif 127#endif
128
129#ifdef CONFIG_LITMUS_SOFTIRQD
130 increase_prio_klitirq_t increase_prio_klitirqd;
131 decrease_prio_klitirqd_t decrease_prio_klitirqd;
132#endif
133#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
134 enqueue_pai_tasklet_t enqueue_pai_tasklet;
135 run_tasklets_t run_tasklets;
136#endif
117} __attribute__ ((__aligned__(SMP_CACHE_BYTES))); 137} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
118 138
119 139
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h
index 7ca34cb13881..232c7588d103 100644
--- a/include/litmus/sched_trace.h
+++ b/include/litmus/sched_trace.h
@@ -11,12 +11,12 @@ struct st_trace_header {
11 u8 cpu; /* On which CPU was it recorded? */ 11 u8 cpu; /* On which CPU was it recorded? */
12 u16 pid; /* PID of the task. */ 12 u16 pid; /* PID of the task. */
13 u32 job; /* The job sequence number. */ 13 u32 job; /* The job sequence number. */
14}; 14} __attribute__((packed));
15 15
16#define ST_NAME_LEN 16 16#define ST_NAME_LEN 16
17struct st_name_data { 17struct st_name_data {
18 char cmd[ST_NAME_LEN];/* The name of the executable of this process. */ 18 char cmd[ST_NAME_LEN];/* The name of the executable of this process. */
19}; 19} __attribute__((packed));
20 20
21struct st_param_data { /* regular params */ 21struct st_param_data { /* regular params */
22 u32 wcet; 22 u32 wcet;
@@ -25,30 +25,29 @@ struct st_param_data { /* regular params */
25 u8 partition; 25 u8 partition;
26 u8 class; 26 u8 class;
27 u8 __unused[2]; 27 u8 __unused[2];
28}; 28} __attribute__((packed));
29 29
30struct st_release_data { /* A job is was/is going to be released. */ 30struct st_release_data { /* A job is was/is going to be released. */
31 u64 release; /* What's the release time? */ 31 u64 release; /* What's the release time? */
32 u64 deadline; /* By when must it finish? */ 32 u64 deadline; /* By when must it finish? */
33}; 33} __attribute__((packed));
34 34
35struct st_assigned_data { /* A job was asigned to a CPU. */ 35struct st_assigned_data { /* A job was asigned to a CPU. */
36 u64 when; 36 u64 when;
37 u8 target; /* Where should it execute? */ 37 u8 target; /* Where should it execute? */
38 u8 __unused[7]; 38 u8 __unused[7];
39}; 39} __attribute__((packed));
40 40
41struct st_switch_to_data { /* A process was switched to on a given CPU. */ 41struct st_switch_to_data { /* A process was switched to on a given CPU. */
42 u64 when; /* When did this occur? */ 42 u64 when; /* When did this occur? */
43 u32 exec_time; /* Time the current job has executed. */ 43 u32 exec_time; /* Time the current job has executed. */
44 u8 __unused[4]; 44 u8 __unused[4];
45 45} __attribute__((packed));
46};
47 46
48struct st_switch_away_data { /* A process was switched away from on a given CPU. */ 47struct st_switch_away_data { /* A process was switched away from on a given CPU. */
49 u64 when; 48 u64 when;
50 u64 exec_time; 49 u64 exec_time;
51}; 50} __attribute__((packed));
52 51
53struct st_completion_data { /* A job completed. */ 52struct st_completion_data { /* A job completed. */
54 u64 when; 53 u64 when;
@@ -56,35 +55,92 @@ struct st_completion_data { /* A job completed. */
56 * next task automatically; set to 0 otherwise. 55 * next task automatically; set to 0 otherwise.
57 */ 56 */
58 u8 __uflags:7; 57 u8 __uflags:7;
59 u8 __unused[7]; 58 u16 nv_int_count;
60}; 59 u8 __unused[5];
60} __attribute__((packed));
61 61
62struct st_block_data { /* A task blocks. */ 62struct st_block_data { /* A task blocks. */
63 u64 when; 63 u64 when;
64 u64 __unused; 64 u64 __unused;
65}; 65} __attribute__((packed));
66 66
67struct st_resume_data { /* A task resumes. */ 67struct st_resume_data { /* A task resumes. */
68 u64 when; 68 u64 when;
69 u64 __unused; 69 u64 __unused;
70}; 70} __attribute__((packed));
71 71
72struct st_action_data { 72struct st_action_data {
73 u64 when; 73 u64 when;
74 u8 action; 74 u8 action;
75 u8 __unused[7]; 75 u8 __unused[7];
76}; 76} __attribute__((packed));
77 77
78struct st_sys_release_data { 78struct st_sys_release_data {
79 u64 when; 79 u64 when;
80 u64 release; 80 u64 release;
81}; 81} __attribute__((packed));
82
83
84struct st_tasklet_release_data {
85 u64 when;
86 u64 __unused;
87} __attribute__((packed));
88
89struct st_tasklet_begin_data {
90 u64 when;
91 u16 exe_pid;
92 u8 __unused[6];
93} __attribute__((packed));
94
95struct st_tasklet_end_data {
96 u64 when;
97 u16 exe_pid;
98 u8 flushed;
99 u8 __unused[5];
100} __attribute__((packed));
101
102
103struct st_work_release_data {
104 u64 when;
105 u64 __unused;
106} __attribute__((packed));
107
108struct st_work_begin_data {
109 u64 when;
110 u16 exe_pid;
111 u8 __unused[6];
112} __attribute__((packed));
113
114struct st_work_end_data {
115 u64 when;
116 u16 exe_pid;
117 u8 flushed;
118 u8 __unused[5];
119} __attribute__((packed));
120
121struct st_effective_priority_change_data {
122 u64 when;
123 u16 inh_pid;
124 u8 __unused[6];
125} __attribute__((packed));
126
127struct st_nv_interrupt_begin_data {
128 u64 when;
129 u32 device;
130 u32 serialNumber;
131} __attribute__((packed));
132
133struct st_nv_interrupt_end_data {
134 u64 when;
135 u32 device;
136 u32 serialNumber;
137} __attribute__((packed));
82 138
83#define DATA(x) struct st_ ## x ## _data x; 139#define DATA(x) struct st_ ## x ## _data x;
84 140
85typedef enum { 141typedef enum {
86 ST_NAME = 1, /* Start at one, so that we can spot 142 ST_NAME = 1, /* Start at one, so that we can spot
87 * uninitialized records. */ 143 * uninitialized records. */
88 ST_PARAM, 144 ST_PARAM,
89 ST_RELEASE, 145 ST_RELEASE,
90 ST_ASSIGNED, 146 ST_ASSIGNED,
@@ -94,7 +150,16 @@ typedef enum {
94 ST_BLOCK, 150 ST_BLOCK,
95 ST_RESUME, 151 ST_RESUME,
96 ST_ACTION, 152 ST_ACTION,
97 ST_SYS_RELEASE 153 ST_SYS_RELEASE,
154 ST_TASKLET_RELEASE,
155 ST_TASKLET_BEGIN,
156 ST_TASKLET_END,
157 ST_WORK_RELEASE,
158 ST_WORK_BEGIN,
159 ST_WORK_END,
160 ST_EFF_PRIO_CHANGE,
161 ST_NV_INTERRUPT_BEGIN,
162 ST_NV_INTERRUPT_END,
98} st_event_record_type_t; 163} st_event_record_type_t;
99 164
100struct st_event_record { 165struct st_event_record {
@@ -113,8 +178,17 @@ struct st_event_record {
113 DATA(resume); 178 DATA(resume);
114 DATA(action); 179 DATA(action);
115 DATA(sys_release); 180 DATA(sys_release);
181 DATA(tasklet_release);
182 DATA(tasklet_begin);
183 DATA(tasklet_end);
184 DATA(work_release);
185 DATA(work_begin);
186 DATA(work_end);
187 DATA(effective_priority_change);
188 DATA(nv_interrupt_begin);
189 DATA(nv_interrupt_end);
116 } data; 190 } data;
117}; 191} __attribute__((packed));
118 192
119#undef DATA 193#undef DATA
120 194
@@ -129,6 +203,8 @@ struct st_event_record {
129 ft_event1(id, callback, task) 203 ft_event1(id, callback, task)
130#define SCHED_TRACE2(id, callback, task, xtra) \ 204#define SCHED_TRACE2(id, callback, task, xtra) \
131 ft_event2(id, callback, task, xtra) 205 ft_event2(id, callback, task, xtra)
206#define SCHED_TRACE3(id, callback, task, xtra1, xtra2) \
207 ft_event3(id, callback, task, xtra1, xtra2)
132 208
133/* provide prototypes; needed on sparc64 */ 209/* provide prototypes; needed on sparc64 */
134#ifndef NO_TASK_TRACE_DECLS 210#ifndef NO_TASK_TRACE_DECLS
@@ -155,12 +231,45 @@ feather_callback void do_sched_trace_action(unsigned long id,
155feather_callback void do_sched_trace_sys_release(unsigned long id, 231feather_callback void do_sched_trace_sys_release(unsigned long id,
156 lt_t* start); 232 lt_t* start);
157 233
234
235feather_callback void do_sched_trace_tasklet_release(unsigned long id,
236 struct task_struct* owner);
237feather_callback void do_sched_trace_tasklet_begin(unsigned long id,
238 struct task_struct* owner);
239feather_callback void do_sched_trace_tasklet_end(unsigned long id,
240 struct task_struct* owner,
241 unsigned long flushed);
242
243feather_callback void do_sched_trace_work_release(unsigned long id,
244 struct task_struct* owner);
245feather_callback void do_sched_trace_work_begin(unsigned long id,
246 struct task_struct* owner,
247 struct task_struct* exe);
248feather_callback void do_sched_trace_work_end(unsigned long id,
249 struct task_struct* owner,
250 struct task_struct* exe,
251 unsigned long flushed);
252
253feather_callback void do_sched_trace_eff_prio_change(unsigned long id,
254 struct task_struct* task,
255 struct task_struct* inh);
256
257feather_callback void do_sched_trace_nv_interrupt_begin(unsigned long id,
258 u32 device);
259feather_callback void do_sched_trace_nv_interrupt_end(unsigned long id,
260 unsigned long unused);
261
262
263/* returns true if we're tracing an interrupt on current CPU */
264/* int is_interrupt_tracing_active(void); */
265
158#endif 266#endif
159 267
160#else 268#else
161 269
162#define SCHED_TRACE(id, callback, task) /* no tracing */ 270#define SCHED_TRACE(id, callback, task) /* no tracing */
163#define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */ 271#define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */
272#define SCHED_TRACE3(id, callback, task, xtra1, xtra2)
164 273
165#endif 274#endif
166 275
@@ -193,6 +302,35 @@ feather_callback void do_sched_trace_sys_release(unsigned long id,
193 SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when) 302 SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when)
194 303
195 304
305#define sched_trace_tasklet_release(t) \
306 SCHED_TRACE(SCHED_TRACE_BASE_ID + 11, do_sched_trace_tasklet_release, t)
307
308#define sched_trace_tasklet_begin(t) \
309 SCHED_TRACE(SCHED_TRACE_BASE_ID + 12, do_sched_trace_tasklet_begin, t)
310
311#define sched_trace_tasklet_end(t, flushed) \
312 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 13, do_sched_trace_tasklet_end, t, flushed)
313
314
315#define sched_trace_work_release(t) \
316 SCHED_TRACE(SCHED_TRACE_BASE_ID + 14, do_sched_trace_work_release, t)
317
318#define sched_trace_work_begin(t, e) \
319 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 15, do_sched_trace_work_begin, t, e)
320
321#define sched_trace_work_end(t, e, flushed) \
322 SCHED_TRACE3(SCHED_TRACE_BASE_ID + 16, do_sched_trace_work_end, t, e, flushed)
323
324
325#define sched_trace_eff_prio_change(t, inh) \
326 SCHED_TRACE2(SCHED_TRACE_BASE_ID + 17, do_sched_trace_eff_prio_change, t, inh)
327
328
329#define sched_trace_nv_interrupt_begin(d) \
330 SCHED_TRACE(SCHED_TRACE_BASE_ID + 18, do_sched_trace_nv_interrupt_begin, d)
331#define sched_trace_nv_interrupt_end(d) \
332 SCHED_TRACE(SCHED_TRACE_BASE_ID + 19, do_sched_trace_nv_interrupt_end, d)
333
196#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ 334#define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */
197 335
198#endif /* __KERNEL__ */ 336#endif /* __KERNEL__ */
diff --git a/include/litmus/sched_trace_external.h b/include/litmus/sched_trace_external.h
new file mode 100644
index 000000000000..e70e45e4cf51
--- /dev/null
+++ b/include/litmus/sched_trace_external.h
@@ -0,0 +1,78 @@
1/*
2 * sched_trace.h -- record scheduler events to a byte stream for offline analysis.
3 */
4#ifndef _LINUX_SCHED_TRACE_EXTERNAL_H_
5#define _LINUX_SCHED_TRACE_EXTERNAL_H_
6
7
8#ifdef CONFIG_SCHED_TASK_TRACE
9extern void __sched_trace_tasklet_begin_external(struct task_struct* t);
10static inline void sched_trace_tasklet_begin_external(struct task_struct* t)
11{
12 __sched_trace_tasklet_begin_external(t);
13}
14
15extern void __sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed);
16static inline void sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed)
17{
18 __sched_trace_tasklet_end_external(t, flushed);
19}
20
21extern void __sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e);
22static inline void sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e)
23{
24 __sched_trace_work_begin_external(t, e);
25}
26
27extern void __sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f);
28static inline void sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f)
29{
30 __sched_trace_work_end_external(t, e, f);
31}
32
33#ifdef CONFIG_LITMUS_NVIDIA
34extern void __sched_trace_nv_interrupt_begin_external(u32 device);
35static inline void sched_trace_nv_interrupt_begin_external(u32 device)
36{
37 __sched_trace_nv_interrupt_begin_external(device);
38}
39
40extern void __sched_trace_nv_interrupt_end_external(u32 device);
41static inline void sched_trace_nv_interrupt_end_external(u32 device)
42{
43 __sched_trace_nv_interrupt_end_external(device);
44}
45#endif
46
47#else
48
49// no tracing.
50static inline void sched_trace_tasklet_begin_external(struct task_struct* t){}
51static inline void sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed){}
52static inline void sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e){}
53static inline void sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f){}
54
55#ifdef CONFIG_LITMUS_NVIDIA
56static inline void sched_trace_nv_interrupt_begin_external(u32 device){}
57static inline void sched_trace_nv_interrupt_end_external(u32 device){}
58#endif
59
60#endif
61
62
63#ifdef CONFIG_LITMUS_NVIDIA
64
65#define EX_TS(evt) \
66extern void __##evt(void); \
67static inline void EX_##evt(void) { __##evt(); }
68
69EX_TS(TS_NV_TOPISR_START)
70EX_TS(TS_NV_TOPISR_END)
71EX_TS(TS_NV_BOTISR_START)
72EX_TS(TS_NV_BOTISR_END)
73EX_TS(TS_NV_RELEASE_BOTISR_START)
74EX_TS(TS_NV_RELEASE_BOTISR_END)
75
76#endif
77
78#endif
diff --git a/include/litmus/trace.h b/include/litmus/trace.h
index 1a1b0d479f61..e078aee4234d 100644
--- a/include/litmus/trace.h
+++ b/include/litmus/trace.h
@@ -125,4 +125,24 @@ feather_callback void save_task_latency(unsigned long event, unsigned long when_
125 125
126#define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when)) 126#define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when))
127 127
128
129#ifdef CONFIG_LITMUS_NVIDIA
130
131#define TS_NV_TOPISR_START TIMESTAMP(200)
132#define TS_NV_TOPISR_END TIMESTAMP(201)
133
134#define TS_NV_BOTISR_START TIMESTAMP(202)
135#define TS_NV_BOTISR_END TIMESTAMP(203)
136
137#define TS_NV_RELEASE_BOTISR_START TIMESTAMP(204)
138#define TS_NV_RELEASE_BOTISR_END TIMESTAMP(205)
139
140#endif
141
142#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
143#define TS_NV_SCHED_BOTISR_START TIMESTAMP(206)
144#define TS_NV_SCHED_BOTISR_END TIMESTAMP(207)
145#endif
146
147
128#endif /* !_SYS_TRACE_H_ */ 148#endif /* !_SYS_TRACE_H_ */
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h
index 941231c8184b..4fa514c89605 100644
--- a/include/litmus/unistd_32.h
+++ b/include/litmus/unistd_32.h
@@ -19,5 +19,6 @@
19#define __NR_null_call __LSC(11) 19#define __NR_null_call __LSC(11)
20#define __NR_litmus_dgl_lock __LSC(12) 20#define __NR_litmus_dgl_lock __LSC(12)
21#define __NR_litmus_dgl_unlock __LSC(13) 21#define __NR_litmus_dgl_unlock __LSC(13)
22#define __NR_register_nv_device __LSC(14)
22 23
23#define NR_litmus_syscalls 14 24#define NR_litmus_syscalls 15
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h
index bf2ffeac2dbb..f80dc45dc185 100644
--- a/include/litmus/unistd_64.h
+++ b/include/litmus/unistd_64.h
@@ -33,5 +33,8 @@ __SYSCALL(__NR_null_call, sys_null_call)
33__SYSCALL(__NR_litmus_dgl_lock, sys_litmus_dgl_lock) 33__SYSCALL(__NR_litmus_dgl_lock, sys_litmus_dgl_lock)
34#define __NR_litmus_dgl_unlock __LSC(13) 34#define __NR_litmus_dgl_unlock __LSC(13)
35__SYSCALL(__NR_litmus_dgl_unlock, sys_litmus_dgl_unlock) 35__SYSCALL(__NR_litmus_dgl_unlock, sys_litmus_dgl_unlock)
36#define __NR_register_nv_device __LSC(14)
37__SYSCALL(__NR_register_nv_device, sys_register_nv_device)
36 38
37#define NR_litmus_syscalls 14 39
40#define NR_litmus_syscalls 15
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 4b3107a244fa..2bdcdc3691e5 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -542,7 +542,7 @@ static void print_lock(struct held_lock *hlock)
542 print_ip_sym(hlock->acquire_ip); 542 print_ip_sym(hlock->acquire_ip);
543} 543}
544 544
545static void lockdep_print_held_locks(struct task_struct *curr) 545void lockdep_print_held_locks(struct task_struct *curr)
546{ 546{
547 int i, depth = curr->lockdep_depth; 547 int i, depth = curr->lockdep_depth;
548 548
@@ -558,6 +558,7 @@ static void lockdep_print_held_locks(struct task_struct *curr)
558 print_lock(curr->held_locks + i); 558 print_lock(curr->held_locks + i);
559 } 559 }
560} 560}
561EXPORT_SYMBOL(lockdep_print_held_locks);
561 562
562static void print_kernel_version(void) 563static void print_kernel_version(void)
563{ 564{
diff --git a/kernel/mutex.c b/kernel/mutex.c
index d607ed5dd441..2f363b9bfc1f 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -498,3 +498,128 @@ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
498 return 1; 498 return 1;
499} 499}
500EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 500EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
501
502
503
504
505void mutex_lock_sfx(struct mutex *lock,
506 side_effect_t pre, unsigned long pre_arg,
507 side_effect_t post, unsigned long post_arg)
508{
509 long state = TASK_UNINTERRUPTIBLE;
510
511 struct task_struct *task = current;
512 struct mutex_waiter waiter;
513 unsigned long flags;
514
515 preempt_disable();
516 mutex_acquire(&lock->dep_map, subclass, 0, ip);
517
518 spin_lock_mutex(&lock->wait_lock, flags);
519
520 if(pre)
521 {
522 if(unlikely(pre(pre_arg)))
523 {
524 // this will fuck with lockdep's CONFIG_PROVE_LOCKING...
525 spin_unlock_mutex(&lock->wait_lock, flags);
526 preempt_enable();
527 return;
528 }
529 }
530
531 debug_mutex_lock_common(lock, &waiter);
532 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
533
534 /* add waiting tasks to the end of the waitqueue (FIFO): */
535 list_add_tail(&waiter.list, &lock->wait_list);
536 waiter.task = task;
537
538 if (atomic_xchg(&lock->count, -1) == 1)
539 goto done;
540
541 lock_contended(&lock->dep_map, ip);
542
543 for (;;) {
544 /*
545 * Lets try to take the lock again - this is needed even if
546 * we get here for the first time (shortly after failing to
547 * acquire the lock), to make sure that we get a wakeup once
548 * it's unlocked. Later on, if we sleep, this is the
549 * operation that gives us the lock. We xchg it to -1, so
550 * that when we release the lock, we properly wake up the
551 * other waiters:
552 */
553 if (atomic_xchg(&lock->count, -1) == 1)
554 break;
555
556 __set_task_state(task, state);
557
558 /* didnt get the lock, go to sleep: */
559 spin_unlock_mutex(&lock->wait_lock, flags);
560 preempt_enable_no_resched();
561 schedule();
562 preempt_disable();
563 spin_lock_mutex(&lock->wait_lock, flags);
564 }
565
566done:
567 lock_acquired(&lock->dep_map, ip);
568 /* got the lock - rejoice! */
569 mutex_remove_waiter(lock, &waiter, current_thread_info());
570 mutex_set_owner(lock);
571
572 /* set it to 0 if there are no waiters left: */
573 if (likely(list_empty(&lock->wait_list)))
574 atomic_set(&lock->count, 0);
575
576 if(post)
577 post(post_arg);
578
579 spin_unlock_mutex(&lock->wait_lock, flags);
580
581 debug_mutex_free_waiter(&waiter);
582 preempt_enable();
583}
584EXPORT_SYMBOL(mutex_lock_sfx);
585
586void mutex_unlock_sfx(struct mutex *lock,
587 side_effect_t pre, unsigned long pre_arg,
588 side_effect_t post, unsigned long post_arg)
589{
590 unsigned long flags;
591
592 spin_lock_mutex(&lock->wait_lock, flags);
593
594 if(pre)
595 pre(pre_arg);
596
597 //mutex_release(&lock->dep_map, nested, _RET_IP_);
598 mutex_release(&lock->dep_map, 1, _RET_IP_);
599 debug_mutex_unlock(lock);
600
601 /*
602 * some architectures leave the lock unlocked in the fastpath failure
603 * case, others need to leave it locked. In the later case we have to
604 * unlock it here
605 */
606 if (__mutex_slowpath_needs_to_unlock())
607 atomic_set(&lock->count, 1);
608
609 if (!list_empty(&lock->wait_list)) {
610 /* get the first entry from the wait-list: */
611 struct mutex_waiter *waiter =
612 list_entry(lock->wait_list.next,
613 struct mutex_waiter, list);
614
615 debug_mutex_wake_waiter(lock, waiter);
616
617 wake_up_process(waiter->task);
618 }
619
620 if(post)
621 post(post_arg);
622
623 spin_unlock_mutex(&lock->wait_lock, flags);
624}
625EXPORT_SYMBOL(mutex_unlock_sfx);
diff --git a/kernel/sched.c b/kernel/sched.c
index baaca61bc3a3..f3d9a69a3777 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -83,6 +83,10 @@
83#include <litmus/sched_trace.h> 83#include <litmus/sched_trace.h>
84#include <litmus/trace.h> 84#include <litmus/trace.h>
85 85
86#ifdef CONFIG_LITMUS_SOFTIRQD
87#include <litmus/litmus_softirq.h>
88#endif
89
86static void litmus_tick(struct rq*, struct task_struct*); 90static void litmus_tick(struct rq*, struct task_struct*);
87 91
88#define CREATE_TRACE_POINTS 92#define CREATE_TRACE_POINTS
@@ -4305,6 +4309,7 @@ pick_next_task(struct rq *rq)
4305 BUG(); /* the idle class will always have a runnable task */ 4309 BUG(); /* the idle class will always have a runnable task */
4306} 4310}
4307 4311
4312
4308/* 4313/*
4309 * schedule() is the main scheduler function. 4314 * schedule() is the main scheduler function.
4310 */ 4315 */
@@ -4323,6 +4328,10 @@ need_resched:
4323 rcu_note_context_switch(cpu); 4328 rcu_note_context_switch(cpu);
4324 prev = rq->curr; 4329 prev = rq->curr;
4325 4330
4331#ifdef CONFIG_LITMUS_SOFTIRQD
4332 release_klitirqd_lock(prev);
4333#endif
4334
4326 /* LITMUS^RT: quickly re-evaluate the scheduling decision 4335 /* LITMUS^RT: quickly re-evaluate the scheduling decision
4327 * if the previous one is no longer valid after CTX. 4336 * if the previous one is no longer valid after CTX.
4328 */ 4337 */
@@ -4411,13 +4420,24 @@ litmus_need_resched_nonpreemptible:
4411 goto litmus_need_resched_nonpreemptible; 4420 goto litmus_need_resched_nonpreemptible;
4412 4421
4413 preempt_enable_no_resched(); 4422 preempt_enable_no_resched();
4423
4414 if (need_resched()) 4424 if (need_resched())
4415 goto need_resched; 4425 goto need_resched;
4416 4426
4427#ifdef LITMUS_SOFTIRQD
4428 reacquire_klitirqd_lock(prev);
4429#endif
4430
4431#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
4432 litmus->run_tasklets(prev);
4433#endif
4434
4417 srp_ceiling_block(); 4435 srp_ceiling_block();
4418} 4436}
4419EXPORT_SYMBOL(schedule); 4437EXPORT_SYMBOL(schedule);
4420 4438
4439
4440
4421#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 4441#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
4422 4442
4423static inline bool owner_running(struct mutex *lock, struct task_struct *owner) 4443static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
@@ -4561,6 +4581,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
4561 } 4581 }
4562} 4582}
4563 4583
4584
4564/** 4585/**
4565 * __wake_up - wake up threads blocked on a waitqueue. 4586 * __wake_up - wake up threads blocked on a waitqueue.
4566 * @q: the waitqueue 4587 * @q: the waitqueue
@@ -4747,6 +4768,12 @@ void __sched wait_for_completion(struct completion *x)
4747} 4768}
4748EXPORT_SYMBOL(wait_for_completion); 4769EXPORT_SYMBOL(wait_for_completion);
4749 4770
4771void __sched __wait_for_completion_locked(struct completion *x)
4772{
4773 do_wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
4774}
4775EXPORT_SYMBOL(__wait_for_completion_locked);
4776
4750/** 4777/**
4751 * wait_for_completion_timeout: - waits for completion of a task (w/timeout) 4778 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4752 * @x: holds the state of this particular completion 4779 * @x: holds the state of this particular completion
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index 94a62c0d4ade..c947a046a6d7 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -33,11 +33,11 @@
33#include <linux/spinlock.h> 33#include <linux/spinlock.h>
34#include <linux/ftrace.h> 34#include <linux/ftrace.h>
35 35
36static noinline void __down(struct semaphore *sem); 36noinline void __down(struct semaphore *sem);
37static noinline int __down_interruptible(struct semaphore *sem); 37static noinline int __down_interruptible(struct semaphore *sem);
38static noinline int __down_killable(struct semaphore *sem); 38static noinline int __down_killable(struct semaphore *sem);
39static noinline int __down_timeout(struct semaphore *sem, long jiffies); 39static noinline int __down_timeout(struct semaphore *sem, long jiffies);
40static noinline void __up(struct semaphore *sem); 40noinline void __up(struct semaphore *sem);
41 41
42/** 42/**
43 * down - acquire the semaphore 43 * down - acquire the semaphore
@@ -190,11 +190,13 @@ EXPORT_SYMBOL(up);
190 190
191/* Functions for the contended case */ 191/* Functions for the contended case */
192 192
193/*
193struct semaphore_waiter { 194struct semaphore_waiter {
194 struct list_head list; 195 struct list_head list;
195 struct task_struct *task; 196 struct task_struct *task;
196 int up; 197 int up;
197}; 198};
199 */
198 200
199/* 201/*
200 * Because this function is inlined, the 'state' parameter will be 202 * Because this function is inlined, the 'state' parameter will be
@@ -233,10 +235,12 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
233 return -EINTR; 235 return -EINTR;
234} 236}
235 237
236static noinline void __sched __down(struct semaphore *sem) 238noinline void __sched __down(struct semaphore *sem)
237{ 239{
238 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 240 __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
239} 241}
242EXPORT_SYMBOL(__down);
243
240 244
241static noinline int __sched __down_interruptible(struct semaphore *sem) 245static noinline int __sched __down_interruptible(struct semaphore *sem)
242{ 246{
@@ -253,7 +257,7 @@ static noinline int __sched __down_timeout(struct semaphore *sem, long jiffies)
253 return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies); 257 return __down_common(sem, TASK_UNINTERRUPTIBLE, jiffies);
254} 258}
255 259
256static noinline void __sched __up(struct semaphore *sem) 260noinline void __sched __up(struct semaphore *sem)
257{ 261{
258 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, 262 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
259 struct semaphore_waiter, list); 263 struct semaphore_waiter, list);
@@ -261,3 +265,4 @@ static noinline void __sched __up(struct semaphore *sem)
261 waiter->up = 1; 265 waiter->up = 1;
262 wake_up_process(waiter->task); 266 wake_up_process(waiter->task);
263} 267}
268EXPORT_SYMBOL(__up); \ No newline at end of file
diff --git a/kernel/softirq.c b/kernel/softirq.c
index fca82c32042b..7c562558a863 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -29,6 +29,15 @@
29#include <trace/events/irq.h> 29#include <trace/events/irq.h>
30 30
31#include <asm/irq.h> 31#include <asm/irq.h>
32
33#include <litmus/litmus.h>
34#include <litmus/sched_trace.h>
35
36#ifdef CONFIG_LITMUS_NVIDIA
37#include <litmus/nvidia_info.h>
38#include <litmus/trace.h>
39#endif
40
32/* 41/*
33 - No shared variables, all the data are CPU local. 42 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself 43 - If a softirq needs serialization, let it serialize itself
@@ -67,7 +76,7 @@ char *softirq_to_name[NR_SOFTIRQS] = {
67 * to the pending events, so lets the scheduler to balance 76 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us. 77 * the softirq load for us.
69 */ 78 */
70static void wakeup_softirqd(void) 79void wakeup_softirqd(void)
71{ 80{
72 /* Interrupts are disabled: no need to stop preemption */ 81 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __this_cpu_read(ksoftirqd); 82 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
@@ -193,6 +202,7 @@ void local_bh_enable_ip(unsigned long ip)
193} 202}
194EXPORT_SYMBOL(local_bh_enable_ip); 203EXPORT_SYMBOL(local_bh_enable_ip);
195 204
205
196/* 206/*
197 * We restart softirq processing MAX_SOFTIRQ_RESTART times, 207 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
198 * and we fall back to softirqd after that. 208 * and we fall back to softirqd after that.
@@ -206,65 +216,65 @@ EXPORT_SYMBOL(local_bh_enable_ip);
206 216
207asmlinkage void __do_softirq(void) 217asmlinkage void __do_softirq(void)
208{ 218{
209 struct softirq_action *h; 219 struct softirq_action *h;
210 __u32 pending; 220 __u32 pending;
211 int max_restart = MAX_SOFTIRQ_RESTART; 221 int max_restart = MAX_SOFTIRQ_RESTART;
212 int cpu; 222 int cpu;
213 223
214 pending = local_softirq_pending(); 224 pending = local_softirq_pending();
215 account_system_vtime(current); 225 account_system_vtime(current);
216 226
217 __local_bh_disable((unsigned long)__builtin_return_address(0), 227 __local_bh_disable((unsigned long)__builtin_return_address(0),
218 SOFTIRQ_OFFSET); 228 SOFTIRQ_OFFSET);
219 lockdep_softirq_enter(); 229 lockdep_softirq_enter();
220 230
221 cpu = smp_processor_id(); 231 cpu = smp_processor_id();
222restart: 232restart:
223 /* Reset the pending bitmask before enabling irqs */ 233 /* Reset the pending bitmask before enabling irqs */
224 set_softirq_pending(0); 234 set_softirq_pending(0);
225 235
226 local_irq_enable(); 236 local_irq_enable();
227 237
228 h = softirq_vec; 238 h = softirq_vec;
229
230 do {
231 if (pending & 1) {
232 unsigned int vec_nr = h - softirq_vec;
233 int prev_count = preempt_count();
234
235 kstat_incr_softirqs_this_cpu(vec_nr);
236
237 trace_softirq_entry(vec_nr);
238 h->action(h);
239 trace_softirq_exit(vec_nr);
240 if (unlikely(prev_count != preempt_count())) {
241 printk(KERN_ERR "huh, entered softirq %u %s %p"
242 "with preempt_count %08x,"
243 " exited with %08x?\n", vec_nr,
244 softirq_to_name[vec_nr], h->action,
245 prev_count, preempt_count());
246 preempt_count() = prev_count;
247 }
248 239
249 rcu_bh_qs(cpu); 240 do {
250 } 241 if (pending & 1) {
251 h++; 242 unsigned int vec_nr = h - softirq_vec;
252 pending >>= 1; 243 int prev_count = preempt_count();
253 } while (pending);
254 244
255 local_irq_disable(); 245 kstat_incr_softirqs_this_cpu(vec_nr);
256 246
257 pending = local_softirq_pending(); 247 trace_softirq_entry(vec_nr);
258 if (pending && --max_restart) 248 h->action(h);
259 goto restart; 249 trace_softirq_exit(vec_nr);
250 if (unlikely(prev_count != preempt_count())) {
251 printk(KERN_ERR "huh, entered softirq %u %s %p"
252 "with preempt_count %08x,"
253 " exited with %08x?\n", vec_nr,
254 softirq_to_name[vec_nr], h->action,
255 prev_count, preempt_count());
256 preempt_count() = prev_count;
257 }
260 258
261 if (pending) 259 rcu_bh_qs(cpu);
262 wakeup_softirqd(); 260 }
261 h++;
262 pending >>= 1;
263 } while (pending);
263 264
264 lockdep_softirq_exit(); 265 local_irq_disable();
265 266
266 account_system_vtime(current); 267 pending = local_softirq_pending();
267 __local_bh_enable(SOFTIRQ_OFFSET); 268 if (pending && --max_restart)
269 goto restart;
270
271 if (pending)
272 wakeup_softirqd();
273
274 lockdep_softirq_exit();
275
276 account_system_vtime(current);
277 __local_bh_enable(SOFTIRQ_OFFSET);
268} 278}
269 279
270#ifndef __ARCH_HAS_DO_SOFTIRQ 280#ifndef __ARCH_HAS_DO_SOFTIRQ
@@ -402,8 +412,65 @@ struct tasklet_head
402static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); 412static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
403static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); 413static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
404 414
415
405void __tasklet_schedule(struct tasklet_struct *t) 416void __tasklet_schedule(struct tasklet_struct *t)
406{ 417{
418#ifdef CONFIG_LITMUS_NVIDIA
419 if(is_nvidia_func(t->func))
420 {
421 u32 nvidia_device = get_tasklet_nv_device_num(t);
422 // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n",
423 // __FUNCTION__, nvidia_device,litmus_clock());
424
425 unsigned long flags;
426 struct task_struct* device_owner;
427
428 lock_nv_registry(nvidia_device, &flags);
429
430 device_owner = get_nv_device_owner(nvidia_device);
431
432 if(device_owner==NULL)
433 {
434 t->owner = NULL;
435 }
436 else
437 {
438 if(is_realtime(device_owner))
439 {
440 TRACE("%s: Handling NVIDIA tasklet for device %u at %llu\n",
441 __FUNCTION__, nvidia_device,litmus_clock());
442 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
443 __FUNCTION__,device_owner->pid,nvidia_device);
444
445 t->owner = device_owner;
446 sched_trace_tasklet_release(t->owner);
447
448 if(likely(_litmus_tasklet_schedule(t,nvidia_device)))
449 {
450 unlock_nv_registry(nvidia_device, &flags);
451 return;
452 }
453 else
454 {
455 t->owner = NULL; /* fall through to normal scheduling */
456 }
457 }
458 else
459 {
460 t->owner = NULL;
461 }
462 }
463 unlock_nv_registry(nvidia_device, &flags);
464 }
465#endif
466
467 ___tasklet_schedule(t);
468}
469EXPORT_SYMBOL(__tasklet_schedule);
470
471
472void ___tasklet_schedule(struct tasklet_struct *t)
473{
407 unsigned long flags; 474 unsigned long flags;
408 475
409 local_irq_save(flags); 476 local_irq_save(flags);
@@ -413,11 +480,65 @@ void __tasklet_schedule(struct tasklet_struct *t)
413 raise_softirq_irqoff(TASKLET_SOFTIRQ); 480 raise_softirq_irqoff(TASKLET_SOFTIRQ);
414 local_irq_restore(flags); 481 local_irq_restore(flags);
415} 482}
483EXPORT_SYMBOL(___tasklet_schedule);
416 484
417EXPORT_SYMBOL(__tasklet_schedule);
418 485
419void __tasklet_hi_schedule(struct tasklet_struct *t) 486void __tasklet_hi_schedule(struct tasklet_struct *t)
420{ 487{
488#ifdef CONFIG_LITMUS_NVIDIA
489 if(is_nvidia_func(t->func))
490 {
491 u32 nvidia_device = get_tasklet_nv_device_num(t);
492 // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n",
493 // __FUNCTION__, nvidia_device,litmus_clock());
494
495 unsigned long flags;
496 struct task_struct* device_owner;
497
498 lock_nv_registry(nvidia_device, &flags);
499
500 device_owner = get_nv_device_owner(nvidia_device);
501
502 if(device_owner==NULL)
503 {
504 t->owner = NULL;
505 }
506 else
507 {
508 if( is_realtime(device_owner))
509 {
510 TRACE("%s: Handling NVIDIA tasklet for device %u\tat %llu\n",
511 __FUNCTION__, nvidia_device,litmus_clock());
512 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
513 __FUNCTION__,device_owner->pid,nvidia_device);
514
515 t->owner = device_owner;
516 sched_trace_tasklet_release(t->owner);
517 if(likely(_litmus_tasklet_hi_schedule(t,nvidia_device)))
518 {
519 unlock_nv_registry(nvidia_device, &flags);
520 return;
521 }
522 else
523 {
524 t->owner = NULL; /* fall through to normal scheduling */
525 }
526 }
527 else
528 {
529 t->owner = NULL;
530 }
531 }
532 unlock_nv_registry(nvidia_device, &flags);
533 }
534#endif
535
536 ___tasklet_hi_schedule(t);
537}
538EXPORT_SYMBOL(__tasklet_hi_schedule);
539
540void ___tasklet_hi_schedule(struct tasklet_struct* t)
541{
421 unsigned long flags; 542 unsigned long flags;
422 543
423 local_irq_save(flags); 544 local_irq_save(flags);
@@ -427,19 +548,72 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
427 raise_softirq_irqoff(HI_SOFTIRQ); 548 raise_softirq_irqoff(HI_SOFTIRQ);
428 local_irq_restore(flags); 549 local_irq_restore(flags);
429} 550}
430 551EXPORT_SYMBOL(___tasklet_hi_schedule);
431EXPORT_SYMBOL(__tasklet_hi_schedule);
432 552
433void __tasklet_hi_schedule_first(struct tasklet_struct *t) 553void __tasklet_hi_schedule_first(struct tasklet_struct *t)
434{ 554{
435 BUG_ON(!irqs_disabled()); 555 BUG_ON(!irqs_disabled());
556#ifdef CONFIG_LITMUS_NVIDIA
557 if(is_nvidia_func(t->func))
558 {
559 u32 nvidia_device = get_tasklet_nv_device_num(t);
560 // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n",
561 // __FUNCTION__, nvidia_device,litmus_clock());
562 unsigned long flags;
563 struct task_struct* device_owner;
564
565 lock_nv_registry(nvidia_device, &flags);
566
567 device_owner = get_nv_device_owner(nvidia_device);
568
569 if(device_owner==NULL)
570 {
571 t->owner = NULL;
572 }
573 else
574 {
575 if(is_realtime(device_owner))
576 {
577 TRACE("%s: Handling NVIDIA tasklet for device %u at %llu\n",
578 __FUNCTION__, nvidia_device,litmus_clock());
579
580 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
581 __FUNCTION__,device_owner->pid,nvidia_device);
582
583 t->owner = device_owner;
584 sched_trace_tasklet_release(t->owner);
585 if(likely(_litmus_tasklet_hi_schedule_first(t,nvidia_device)))
586 {
587 unlock_nv_registry(nvidia_device, &flags);
588 return;
589 }
590 else
591 {
592 t->owner = NULL; /* fall through to normal scheduling */
593 }
594 }
595 else
596 {
597 t->owner = NULL;
598 }
599 }
600 unlock_nv_registry(nvidia_device, &flags);
601 }
602#endif
603
604 ___tasklet_hi_schedule_first(t);
605}
606EXPORT_SYMBOL(__tasklet_hi_schedule_first);
607
608void ___tasklet_hi_schedule_first(struct tasklet_struct* t)
609{
610 BUG_ON(!irqs_disabled());
436 611
437 t->next = __this_cpu_read(tasklet_hi_vec.head); 612 t->next = __this_cpu_read(tasklet_hi_vec.head);
438 __this_cpu_write(tasklet_hi_vec.head, t); 613 __this_cpu_write(tasklet_hi_vec.head, t);
439 __raise_softirq_irqoff(HI_SOFTIRQ); 614 __raise_softirq_irqoff(HI_SOFTIRQ);
440} 615}
441 616EXPORT_SYMBOL(___tasklet_hi_schedule_first);
442EXPORT_SYMBOL(__tasklet_hi_schedule_first);
443 617
444static void tasklet_action(struct softirq_action *a) 618static void tasklet_action(struct softirq_action *a)
445{ 619{
@@ -495,6 +669,7 @@ static void tasklet_hi_action(struct softirq_action *a)
495 if (!atomic_read(&t->count)) { 669 if (!atomic_read(&t->count)) {
496 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) 670 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
497 BUG(); 671 BUG();
672
498 t->func(t->data); 673 t->func(t->data);
499 tasklet_unlock(t); 674 tasklet_unlock(t);
500 continue; 675 continue;
@@ -518,8 +693,13 @@ void tasklet_init(struct tasklet_struct *t,
518 t->next = NULL; 693 t->next = NULL;
519 t->state = 0; 694 t->state = 0;
520 atomic_set(&t->count, 0); 695 atomic_set(&t->count, 0);
696
521 t->func = func; 697 t->func = func;
522 t->data = data; 698 t->data = data;
699
700#ifdef CONFIG_LITMUS_SOFTIRQD
701 t->owner = NULL;
702#endif
523} 703}
524 704
525EXPORT_SYMBOL(tasklet_init); 705EXPORT_SYMBOL(tasklet_init);
@@ -534,6 +714,7 @@ void tasklet_kill(struct tasklet_struct *t)
534 yield(); 714 yield();
535 } while (test_bit(TASKLET_STATE_SCHED, &t->state)); 715 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
536 } 716 }
717
537 tasklet_unlock_wait(t); 718 tasklet_unlock_wait(t);
538 clear_bit(TASKLET_STATE_SCHED, &t->state); 719 clear_bit(TASKLET_STATE_SCHED, &t->state);
539} 720}
@@ -808,6 +989,7 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
808 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { 989 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
809 if (*i == t) { 990 if (*i == t) {
810 *i = t->next; 991 *i = t->next;
992
811 /* If this was the tail element, move the tail ptr */ 993 /* If this was the tail element, move the tail ptr */
812 if (*i == NULL) 994 if (*i == NULL)
813 per_cpu(tasklet_vec, cpu).tail = i; 995 per_cpu(tasklet_vec, cpu).tail = i;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0400553f0d04..2ceb7b43a045 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -44,6 +44,13 @@
44 44
45#include "workqueue_sched.h" 45#include "workqueue_sched.h"
46 46
47#ifdef CONFIG_LITMUS_NVIDIA
48#include <litmus/litmus.h>
49#include <litmus/sched_trace.h>
50#include <litmus/nvidia_info.h>
51#endif
52
53
47enum { 54enum {
48 /* global_cwq flags */ 55 /* global_cwq flags */
49 GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ 56 GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
@@ -1047,9 +1054,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
1047 work_flags |= WORK_STRUCT_DELAYED; 1054 work_flags |= WORK_STRUCT_DELAYED;
1048 worklist = &cwq->delayed_works; 1055 worklist = &cwq->delayed_works;
1049 } 1056 }
1050
1051 insert_work(cwq, work, worklist, work_flags); 1057 insert_work(cwq, work, worklist, work_flags);
1052
1053 spin_unlock_irqrestore(&gcwq->lock, flags); 1058 spin_unlock_irqrestore(&gcwq->lock, flags);
1054} 1059}
1055 1060
@@ -2687,10 +2692,70 @@ EXPORT_SYMBOL(cancel_delayed_work_sync);
2687 */ 2692 */
2688int schedule_work(struct work_struct *work) 2693int schedule_work(struct work_struct *work)
2689{ 2694{
2690 return queue_work(system_wq, work); 2695#if 0
2696#if defined(CONFIG_LITMUS_NVIDIA) && defined(CONFIG_LITMUS_SOFTIRQD)
2697 if(is_nvidia_func(work->func))
2698 {
2699 u32 nvidiaDevice = get_work_nv_device_num(work);
2700
2701 //1) Ask Litmus which task owns GPU <nvidiaDevice>. (API to be defined.)
2702 unsigned long flags;
2703 struct task_struct* device_owner;
2704
2705 lock_nv_registry(nvidiaDevice, &flags);
2706
2707 device_owner = get_nv_device_owner(nvidiaDevice);
2708
2709 //2) If there is an owner, set work->owner to the owner's task struct.
2710 if(device_owner==NULL)
2711 {
2712 work->owner = NULL;
2713 //TRACE("%s: the owner task of NVIDIA Device %u is NULL\n",__FUNCTION__,nvidiaDevice);
2714 }
2715 else
2716 {
2717 if( is_realtime(device_owner))
2718 {
2719 TRACE("%s: Handling NVIDIA work for device\t%u\tat\t%llu\n",
2720 __FUNCTION__, nvidiaDevice,litmus_clock());
2721 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
2722 __FUNCTION__,
2723 device_owner->pid,
2724 nvidiaDevice);
2725
2726 //3) Call litmus_schedule_work() and return (don't execute the rest
2727 // of schedule_schedule()).
2728 work->owner = device_owner;
2729 sched_trace_work_release(work->owner);
2730 if(likely(litmus_schedule_work(work, nvidiaDevice)))
2731 {
2732 unlock_nv_registry(nvidiaDevice, &flags);
2733 return 1;
2734 }
2735 else
2736 {
2737 work->owner = NULL; /* fall through to normal work scheduling */
2738 }
2739 }
2740 else
2741 {
2742 work->owner = NULL;
2743 }
2744 }
2745 unlock_nv_registry(nvidiaDevice, &flags);
2746 }
2747#endif
2748#endif
2749 return(__schedule_work(work));
2691} 2750}
2692EXPORT_SYMBOL(schedule_work); 2751EXPORT_SYMBOL(schedule_work);
2693 2752
2753int __schedule_work(struct work_struct* work)
2754{
2755 return queue_work(system_wq, work);
2756}
2757EXPORT_SYMBOL(__schedule_work);
2758
2694/* 2759/*
2695 * schedule_work_on - put work task on a specific cpu 2760 * schedule_work_on - put work task on a specific cpu
2696 * @cpu: cpu to put the work task on 2761 * @cpu: cpu to put the work task on
diff --git a/litmus/Kconfig b/litmus/Kconfig
index 97200506e31c..6cf4d7eaa96f 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -241,4 +241,90 @@ config PREEMPT_STATE_TRACE
241 241
242endmenu 242endmenu
243 243
244menu "Interrupt Handling"
245
246choice
247 prompt "Scheduling of interrupt bottom-halves in Litmus."
248 default LITMUS_SOFTIRQD_NONE
249 depends on LITMUS_LOCKING && !LITMUS_THREAD_ALL_SOFTIRQ
250 help
251 Schedule tasklets with known priorities in Litmus.
252
253config LITMUS_SOFTIRQD_NONE
254 bool "No tasklet scheduling in Litmus."
255 help
256 Don't schedule tasklets in Litmus. Default.
257
258config LITMUS_SOFTIRQD
259 bool "Spawn klitirqd interrupt handling threads."
260 help
261 Create klitirqd interrupt handling threads. Work must be
262 specifically dispatched to these workers. (Softirqs for
263 Litmus tasks are not magically redirected to klitirqd.)
264
265 G-EDF/RM, C-EDF/RM ONLY for now!
266
267
268config LITMUS_PAI_SOFTIRQD
269 bool "Defer tasklets to context switch points."
270 help
271 Only execute scheduled tasklet bottom halves at
272 scheduling points. Trades context switch overhead
273 at the cost of non-preemptive durations of bottom half
274 processing.
275
276 G-EDF/RM, C-EDF/RM ONLY for now!
277
278endchoice
279
280
281config NR_LITMUS_SOFTIRQD
282 int "Number of klitirqd."
283 depends on LITMUS_SOFTIRQD
284 range 1 4096
285 default "1"
286 help
287 Should be <= to the number of CPUs in your system.
288
289config LITMUS_NVIDIA
290 bool "Litmus handling of NVIDIA interrupts."
291 depends on LITMUS_SOFTIRQD || LITMUS_PAI_SOFTIRQD
292 default n
293 help
294 Direct tasklets from NVIDIA devices to Litmus's klitirqd.
295
296 If unsure, say No.
297
298config NV_DEVICE_NUM
299 int "Number of NVIDIA GPUs."
300 depends on LITMUS_SOFTIRQD || LITMUS_PAI_SOFTIRQD
301 range 1 4096
302 default "1"
303 help
304 Should be (<= to the number of CPUs) and
305 (<= to the number of GPUs) in your system.
306
307choice
308 prompt "CUDA/Driver Version Support"
309 default CUDA_4_0
310 depends on LITMUS_NVIDIA
311 help
312 Select the version of CUDA/driver to support.
313
314config CUDA_4_0
315 bool "CUDA 4.0"
316 depends on LITMUS_NVIDIA
317 help
318 Support CUDA 4.0 RC2 (dev. driver version: x86_64-270.40)
319
320config CUDA_3_2
321 bool "CUDA 3.2"
322 depends on LITMUS_NVIDIA
323 help
324 Support CUDA 3.2 (dev. driver version: x86_64-260.24)
325
326endchoice
327
328endmenu
329
244endmenu 330endmenu
diff --git a/litmus/Makefile b/litmus/Makefile
index c2449a761ea4..91fd32cb979d 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -18,7 +18,8 @@ obj-y = sched_plugin.o litmus.o \
18 binheap.o \ 18 binheap.o \
19 ctrldev.o \ 19 ctrldev.o \
20 sched_gsn_edf.o \ 20 sched_gsn_edf.o \
21 sched_psn_edf.o 21 sched_psn_edf.o \
22 kfmlp_lock.o
22 23
23obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o 24obj-$(CONFIG_PLUGIN_CEDF) += sched_cedf.o
24obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o 25obj-$(CONFIG_PLUGIN_PFAIR) += sched_pfair.o
@@ -30,3 +31,6 @@ obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o
30obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o 31obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o
31 32
32obj-$(CONFIG_LITMUS_NESTED_LOCKING) += rsm_lock.o ikglp_lock.o 33obj-$(CONFIG_LITMUS_NESTED_LOCKING) += rsm_lock.o ikglp_lock.o
34obj-$(CONFIG_LITMUS_SOFTIRQD) += litmus_softirq.o
35obj-$(CONFIG_LITMUS_PAI_SOFTIRQD) += litmus_pai_softirq.o
36obj-$(CONFIG_LITMUS_NVIDIA) += nvidia_info.o sched_trace_external.o
diff --git a/litmus/edf_common.c b/litmus/edf_common.c
index 4b65be7302be..989757cdcc5c 100644
--- a/litmus/edf_common.c
+++ b/litmus/edf_common.c
@@ -103,24 +103,33 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second)
103// (first_task->pid == second_task->pid && 103// (first_task->pid == second_task->pid &&
104// !second->rt_param.inh_task))); 104// !second->rt_param.inh_task)));
105 105
106 return !is_realtime(second_task) || 106 if (!is_realtime(second_task))
107 107 return true;
108 /* is the deadline of the first task earlier? 108
109 * Then it has higher priority. 109 if (earlier_deadline(first_task, second_task))
110 */ 110 return true;
111 earlier_deadline(first_task, second_task) || 111
112 112 if (get_deadline(first_task) == get_deadline(second_task)) {
113 /* Do we have a deadline tie? 113 if (shorter_period(first_task, second_task)) {
114 * Then break by PID. 114 return true;
115 */ 115 }
116 (get_deadline(first_task) == get_deadline(second_task) && 116 if (get_rt_period(first_task) == get_rt_period(second_task)) {
117 (first_task->pid < second_task->pid || 117#ifdef CONFIG_LITMUS_SOFTIRQD
118 118 if (first_task->rt_param.is_proxy_thread <
119 /* If the PIDs are the same then the task with the EFFECTIVE 119 second_task->rt_param.is_proxy_thread) {
120 * priority wins. 120 return true;
121 */ 121 }
122 (first_task->pid == second_task->pid && 122#endif
123 !second->rt_param.inh_task))); 123 if (first_task->pid < second_task->pid) {
124 return true;
125 }
126 if (first_task->pid == second_task->pid) {
127 return !second->rt_param.inh_task;
128 }
129 }
130 }
131
132 return false;
124} 133}
125 134
126 135
diff --git a/litmus/fdso.c b/litmus/fdso.c
index 2000ba8a92f5..5ef858e59ab0 100644
--- a/litmus/fdso.c
+++ b/litmus/fdso.c
@@ -22,6 +22,7 @@ extern struct fdso_ops generic_lock_ops;
22 22
23static const struct fdso_ops* fdso_ops[] = { 23static const struct fdso_ops* fdso_ops[] = {
24 &generic_lock_ops, /* FMLP_SEM */ 24 &generic_lock_ops, /* FMLP_SEM */
25 &generic_lock_ops, /* KFMLP_SEM */
25 &generic_lock_ops, /* SRP_SEM */ 26 &generic_lock_ops, /* SRP_SEM */
26 &generic_lock_ops, /* RSM_MUTEX */ 27 &generic_lock_ops, /* RSM_MUTEX */
27 &generic_lock_ops, /* IKGLP_SEM */ 28 &generic_lock_ops, /* IKGLP_SEM */
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 7271af09a188..4a40c571d8c6 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -21,6 +21,10 @@
21#include <litmus/affinity.h> 21#include <litmus/affinity.h>
22#endif 22#endif
23 23
24#ifdef CONFIG_LITMUS_NVIDIA
25#include <litmus/nvidia_info.h>
26#endif
27
24/* Number of RT tasks that exist in the system */ 28/* Number of RT tasks that exist in the system */
25atomic_t rt_task_count = ATOMIC_INIT(0); 29atomic_t rt_task_count = ATOMIC_INIT(0);
26static DEFINE_RAW_SPINLOCK(task_transition_lock); 30static DEFINE_RAW_SPINLOCK(task_transition_lock);
@@ -51,6 +55,28 @@ void bheap_node_free(struct bheap_node* hn)
51struct release_heap* release_heap_alloc(int gfp_flags); 55struct release_heap* release_heap_alloc(int gfp_flags);
52void release_heap_free(struct release_heap* rh); 56void release_heap_free(struct release_heap* rh);
53 57
58#ifdef CONFIG_LITMUS_NVIDIA
59/*
60 * sys_register_nv_device
61 * @nv_device_id: The Nvidia device id that the task want to register
62 * @reg_action: set to '1' to register the specified device. zero otherwise.
63 * Syscall for register task's designated nvidia device into NV_DEVICE_REG array
64 * Returns EFAULT if nv_device_id is out of range.
65 * 0 if success
66 */
67asmlinkage long sys_register_nv_device(int nv_device_id, int reg_action)
68{
69 /* register the device to caller (aka 'current') */
70 return(reg_nv_device(nv_device_id, reg_action));
71}
72#else
73asmlinkage long sys_register_nv_device(int nv_device_id, int reg_action)
74{
75 return(-EINVAL);
76}
77#endif
78
79
54/* 80/*
55 * sys_set_task_rt_param 81 * sys_set_task_rt_param
56 * @pid: Pid of the task which scheduling parameters must be changed 82 * @pid: Pid of the task which scheduling parameters must be changed
@@ -135,6 +161,22 @@ asmlinkage long sys_set_rt_task_param(pid_t pid, struct rt_task __user * param)
135 161
136 target->rt_param.task_params = tp; 162 target->rt_param.task_params = tp;
137 163
164#ifdef CONFIG_LITMUS_SOFTIRQD
165 /* proxy thread off by default */
166 target->rt_param.is_proxy_thread = 0;
167 target->rt_param.cur_klitirqd = NULL;
168 //init_MUTEX(&target->rt_param.klitirqd_sem);
169 mutex_init(&target->rt_param.klitirqd_sem);
170 //init_completion(&target->rt_param.klitirqd_sem);
171 //target->rt_param.klitirqd_sem_stat = NOT_HELD;
172 atomic_set(&target->rt_param.klitirqd_sem_stat, NOT_HELD);
173#endif
174
175#ifdef CONFIG_LITMUS_NVIDIA
176 atomic_set(&target->rt_param.nv_int_count, 0);
177#endif
178
179
138 retval = 0; 180 retval = 0;
139 out_unlock: 181 out_unlock:
140 read_unlock_irq(&tasklist_lock); 182 read_unlock_irq(&tasklist_lock);
@@ -269,6 +311,7 @@ asmlinkage long sys_query_job_no(unsigned int __user *job)
269 return retval; 311 return retval;
270} 312}
271 313
314
272/* sys_null_call() is only used for determining raw system call 315/* sys_null_call() is only used for determining raw system call
273 * overheads (kernel entry, kernel exit). It has no useful side effects. 316 * overheads (kernel entry, kernel exit). It has no useful side effects.
274 * If ts is non-NULL, then the current Feather-Trace time is recorded. 317 * If ts is non-NULL, then the current Feather-Trace time is recorded.
@@ -282,7 +325,7 @@ asmlinkage long sys_null_call(cycles_t __user *ts)
282 now = get_cycles(); 325 now = get_cycles();
283 ret = put_user(now, ts); 326 ret = put_user(now, ts);
284 } 327 }
285 328
286 return ret; 329 return ret;
287} 330}
288 331
@@ -296,7 +339,6 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
296 binheap_order_t prio_order = NULL; 339 binheap_order_t prio_order = NULL;
297#endif 340#endif
298 341
299
300 if (restore) { 342 if (restore) {
301 /* Safe user-space provided configuration data. 343 /* Safe user-space provided configuration data.
302 * and allocated page. */ 344 * and allocated page. */
@@ -316,8 +358,20 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
316#ifdef CONFIG_LITMUS_NESTED_LOCKING 358#ifdef CONFIG_LITMUS_NESTED_LOCKING
317 WARN_ON(p->rt_param.blocked_lock); 359 WARN_ON(p->rt_param.blocked_lock);
318 WARN_ON(!binheap_empty(&p->rt_param.hp_blocked_tasks)); 360 WARN_ON(!binheap_empty(&p->rt_param.hp_blocked_tasks));
319 361#endif
320 //WARN_ON(p->rt_param.donor_data); 362
363#ifdef CONFIG_LITMUS_SOFTIRQD
364 /* We probably should not have any tasklets executing for
365 * us at this time.
366 */
367 WARN_ON(p->rt_param.cur_klitirqd);
368 WARN_ON(atomic_read(&p->rt_param.klitirqd_sem_stat) == HELD);
369
370 if(p->rt_param.cur_klitirqd)
371 flush_pending(p->rt_param.cur_klitirqd, p);
372
373 if(atomic_read(&p->rt_param.klitirqd_sem_stat) == HELD)
374 up_and_set_stat(p, NOT_HELD, &p->rt_param.klitirqd_sem);
321#endif 375#endif
322 376
323 /* Cleanup everything else. */ 377 /* Cleanup everything else. */
@@ -429,7 +483,7 @@ static void synch_on_plugin_switch(void* info)
429 */ 483 */
430int switch_sched_plugin(struct sched_plugin* plugin) 484int switch_sched_plugin(struct sched_plugin* plugin)
431{ 485{
432 unsigned long flags; 486 //unsigned long flags;
433 int ret = 0; 487 int ret = 0;
434 488
435 BUG_ON(!plugin); 489 BUG_ON(!plugin);
@@ -443,8 +497,15 @@ int switch_sched_plugin(struct sched_plugin* plugin)
443 while (atomic_read(&cannot_use_plugin) < num_online_cpus()) 497 while (atomic_read(&cannot_use_plugin) < num_online_cpus())
444 cpu_relax(); 498 cpu_relax();
445 499
500#ifdef CONFIG_LITMUS_SOFTIRQD
501 if(!klitirqd_is_dead())
502 {
503 kill_klitirqd();
504 }
505#endif
506
446 /* stop task transitions */ 507 /* stop task transitions */
447 raw_spin_lock_irqsave(&task_transition_lock, flags); 508 //raw_spin_lock_irqsave(&task_transition_lock, flags);
448 509
449 /* don't switch if there are active real-time tasks */ 510 /* don't switch if there are active real-time tasks */
450 if (atomic_read(&rt_task_count) == 0) { 511 if (atomic_read(&rt_task_count) == 0) {
@@ -462,7 +523,7 @@ int switch_sched_plugin(struct sched_plugin* plugin)
462 } else 523 } else
463 ret = -EBUSY; 524 ret = -EBUSY;
464out: 525out:
465 raw_spin_unlock_irqrestore(&task_transition_lock, flags); 526 //raw_spin_unlock_irqrestore(&task_transition_lock, flags);
466 atomic_set(&cannot_use_plugin, 0); 527 atomic_set(&cannot_use_plugin, 0);
467 return ret; 528 return ret;
468} 529}
@@ -495,7 +556,6 @@ void litmus_exec(void)
495 556
496 if (is_realtime(p)) { 557 if (is_realtime(p)) {
497 WARN_ON(p->rt_param.inh_task); 558 WARN_ON(p->rt_param.inh_task);
498 //WARN_ON(p->rt_param.donor_data);
499 if (tsk_rt(p)->ctrl_page) { 559 if (tsk_rt(p)->ctrl_page) {
500 free_page((unsigned long) tsk_rt(p)->ctrl_page); 560 free_page((unsigned long) tsk_rt(p)->ctrl_page);
501 tsk_rt(p)->ctrl_page = NULL; 561 tsk_rt(p)->ctrl_page = NULL;
diff --git a/litmus/litmus_pai_softirq.c b/litmus/litmus_pai_softirq.c
new file mode 100644
index 000000000000..b31eeb8a2538
--- /dev/null
+++ b/litmus/litmus_pai_softirq.c
@@ -0,0 +1,64 @@
1#include <linux/interrupt.h>
2#include <linux/percpu.h>
3#include <linux/cpu.h>
4#include <linux/kthread.h>
5#include <linux/ftrace.h>
6#include <linux/smp.h>
7#include <linux/slab.h>
8#include <linux/mutex.h>
9
10#include <linux/sched.h>
11#include <linux/cpuset.h>
12
13#include <litmus/litmus.h>
14#include <litmus/sched_trace.h>
15#include <litmus/jobs.h>
16#include <litmus/sched_plugin.h>
17#include <litmus/litmus_softirq.h>
18
19
20
21int __litmus_tasklet_schedule(struct tasklet_struct *t, unsigned int k_id)
22{
23 int ret = 0; /* assume failure */
24 if(unlikely((t->owner == NULL) || !is_realtime(t->owner)))
25 {
26 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
27 BUG();
28 }
29
30 ret = litmus->enqueue_pai_tasklet(t);
31
32 return(ret);
33}
34
35EXPORT_SYMBOL(__litmus_tasklet_schedule);
36
37
38
39// failure causes default Linux handling.
40int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, unsigned int k_id)
41{
42 int ret = 0; /* assume failure */
43 return(ret);
44}
45EXPORT_SYMBOL(__litmus_tasklet_hi_schedule);
46
47
48// failure causes default Linux handling.
49int __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, unsigned int k_id)
50{
51 int ret = 0; /* assume failure */
52 return(ret);
53}
54EXPORT_SYMBOL(__litmus_tasklet_hi_schedule_first);
55
56
57// failure causes default Linux handling.
58int __litmus_schedule_work(struct work_struct *w, unsigned int k_id)
59{
60 int ret = 0; /* assume failure */
61 return(ret);
62}
63EXPORT_SYMBOL(__litmus_schedule_work);
64
diff --git a/litmus/litmus_proc.c b/litmus/litmus_proc.c
index 4bf725a36c9c..381513366c7a 100644
--- a/litmus/litmus_proc.c
+++ b/litmus/litmus_proc.c
@@ -20,11 +20,18 @@ static struct proc_dir_entry *litmus_dir = NULL,
20#ifdef CONFIG_RELEASE_MASTER 20#ifdef CONFIG_RELEASE_MASTER
21 *release_master_file = NULL, 21 *release_master_file = NULL,
22#endif 22#endif
23#ifdef CONFIG_LITMUS_SOFTIRQD
24 *klitirqd_file = NULL,
25#endif
23 *plugs_file = NULL; 26 *plugs_file = NULL;
24 27
25/* in litmus/sync.c */ 28/* in litmus/sync.c */
26int count_tasks_waiting_for_release(void); 29int count_tasks_waiting_for_release(void);
27 30
31extern int proc_read_klitirqd_stats(char *page, char **start,
32 off_t off, int count,
33 int *eof, void *data);
34
28static int proc_read_stats(char *page, char **start, 35static int proc_read_stats(char *page, char **start,
29 off_t off, int count, 36 off_t off, int count,
30 int *eof, void *data) 37 int *eof, void *data)
@@ -161,6 +168,12 @@ int __init init_litmus_proc(void)
161 release_master_file->write_proc = proc_write_release_master; 168 release_master_file->write_proc = proc_write_release_master;
162#endif 169#endif
163 170
171#ifdef CONFIG_LITMUS_SOFTIRQD
172 klitirqd_file =
173 create_proc_read_entry("klitirqd_stats", 0444, litmus_dir,
174 proc_read_klitirqd_stats, NULL);
175#endif
176
164 stat_file = create_proc_read_entry("stats", 0444, litmus_dir, 177 stat_file = create_proc_read_entry("stats", 0444, litmus_dir,
165 proc_read_stats, NULL); 178 proc_read_stats, NULL);
166 179
@@ -187,6 +200,10 @@ void exit_litmus_proc(void)
187 remove_proc_entry("stats", litmus_dir); 200 remove_proc_entry("stats", litmus_dir);
188 if (curr_file) 201 if (curr_file)
189 remove_proc_entry("active_plugin", litmus_dir); 202 remove_proc_entry("active_plugin", litmus_dir);
203#ifdef CONFIG_LITMUS_SOFTIRQD
204 if (klitirqd_file)
205 remove_proc_entry("klitirqd_stats", litmus_dir);
206#endif
190#ifdef CONFIG_RELEASE_MASTER 207#ifdef CONFIG_RELEASE_MASTER
191 if (release_master_file) 208 if (release_master_file)
192 remove_proc_entry("release_master", litmus_dir); 209 remove_proc_entry("release_master", litmus_dir);
diff --git a/litmus/litmus_softirq.c b/litmus/litmus_softirq.c
new file mode 100644
index 000000000000..6b033508877d
--- /dev/null
+++ b/litmus/litmus_softirq.c
@@ -0,0 +1,1582 @@
1#include <linux/interrupt.h>
2#include <linux/percpu.h>
3#include <linux/cpu.h>
4#include <linux/kthread.h>
5#include <linux/ftrace.h>
6#include <linux/smp.h>
7#include <linux/slab.h>
8#include <linux/mutex.h>
9
10#include <linux/sched.h>
11#include <linux/cpuset.h>
12
13#include <litmus/litmus.h>
14#include <litmus/sched_trace.h>
15#include <litmus/jobs.h>
16#include <litmus/sched_plugin.h>
17#include <litmus/litmus_softirq.h>
18
19/* TODO: Remove unneeded mb() and other barriers. */
20
21
22/* counts number of daemons ready to handle litmus irqs. */
23static atomic_t num_ready_klitirqds = ATOMIC_INIT(0);
24
25enum pending_flags
26{
27 LIT_TASKLET_LOW = 0x1,
28 LIT_TASKLET_HI = LIT_TASKLET_LOW<<1,
29 LIT_WORK = LIT_TASKLET_HI<<1
30};
31
32/* only support tasklet processing for now. */
33struct tasklet_head
34{
35 struct tasklet_struct *head;
36 struct tasklet_struct **tail;
37};
38
39struct klitirqd_info
40{
41 struct task_struct* klitirqd;
42 struct task_struct* current_owner;
43 int terminating;
44
45
46 raw_spinlock_t lock;
47
48 u32 pending;
49 atomic_t num_hi_pending;
50 atomic_t num_low_pending;
51 atomic_t num_work_pending;
52
53 /* in order of priority */
54 struct tasklet_head pending_tasklets_hi;
55 struct tasklet_head pending_tasklets;
56 struct list_head worklist;
57};
58
59/* one list for each klitirqd */
60static struct klitirqd_info klitirqds[NR_LITMUS_SOFTIRQD];
61
62
63
64
65
66int proc_read_klitirqd_stats(char *page, char **start,
67 off_t off, int count,
68 int *eof, void *data)
69{
70 int len = snprintf(page, PAGE_SIZE,
71 "num ready klitirqds: %d\n\n",
72 atomic_read(&num_ready_klitirqds));
73
74 if(klitirqd_is_ready())
75 {
76 int i;
77 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
78 {
79 len +=
80 snprintf(page + len - 1, PAGE_SIZE, /* -1 to strip off \0 */
81 "klitirqd_th%d: %s/%d\n"
82 "\tcurrent_owner: %s/%d\n"
83 "\tpending: %x\n"
84 "\tnum hi: %d\n"
85 "\tnum low: %d\n"
86 "\tnum work: %d\n\n",
87 i,
88 klitirqds[i].klitirqd->comm, klitirqds[i].klitirqd->pid,
89 (klitirqds[i].current_owner != NULL) ?
90 klitirqds[i].current_owner->comm : "(null)",
91 (klitirqds[i].current_owner != NULL) ?
92 klitirqds[i].current_owner->pid : 0,
93 klitirqds[i].pending,
94 atomic_read(&klitirqds[i].num_hi_pending),
95 atomic_read(&klitirqds[i].num_low_pending),
96 atomic_read(&klitirqds[i].num_work_pending));
97 }
98 }
99
100 return(len);
101}
102
103
104
105
106
107#if 0
108static atomic_t dump_id = ATOMIC_INIT(0);
109
110static void __dump_state(struct klitirqd_info* which, const char* caller)
111{
112 struct tasklet_struct* list;
113
114 int id = atomic_inc_return(&dump_id);
115
116 //if(in_interrupt())
117 {
118 if(which->current_owner)
119 {
120 TRACE("(id: %d caller: %s)\n"
121 "klitirqd: %s/%d\n"
122 "current owner: %s/%d\n"
123 "pending: %x\n",
124 id, caller,
125 which->klitirqd->comm, which->klitirqd->pid,
126 which->current_owner->comm, which->current_owner->pid,
127 which->pending);
128 }
129 else
130 {
131 TRACE("(id: %d caller: %s)\n"
132 "klitirqd: %s/%d\n"
133 "current owner: %p\n"
134 "pending: %x\n",
135 id, caller,
136 which->klitirqd->comm, which->klitirqd->pid,
137 NULL,
138 which->pending);
139 }
140
141 list = which->pending_tasklets.head;
142 while(list)
143 {
144 struct tasklet_struct *t = list;
145 list = list->next; /* advance */
146 if(t->owner)
147 TRACE("(id: %d caller: %s) Tasklet: %x, Owner = %s/%d\n", id, caller, t, t->owner->comm, t->owner->pid);
148 else
149 TRACE("(id: %d caller: %s) Tasklet: %x, Owner = %p\n", id, caller, t, NULL);
150 }
151 }
152}
153
154static void dump_state(struct klitirqd_info* which, const char* caller)
155{
156 unsigned long flags;
157
158 raw_spin_lock_irqsave(&which->lock, flags);
159 __dump_state(which, caller);
160 raw_spin_unlock_irqrestore(&which->lock, flags);
161}
162#endif
163
164
165/* forward declarations */
166static void ___litmus_tasklet_schedule(struct tasklet_struct *t,
167 struct klitirqd_info *which,
168 int wakeup);
169static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t,
170 struct klitirqd_info *which,
171 int wakeup);
172static void ___litmus_schedule_work(struct work_struct *w,
173 struct klitirqd_info *which,
174 int wakeup);
175
176
177
178inline unsigned int klitirqd_id(struct task_struct* tsk)
179{
180 int i;
181 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
182 {
183 if(klitirqds[i].klitirqd == tsk)
184 {
185 return i;
186 }
187 }
188
189 BUG();
190
191 return 0;
192}
193
194
195inline static u32 litirq_pending_hi_irqoff(struct klitirqd_info* which)
196{
197 return (which->pending & LIT_TASKLET_HI);
198}
199
200inline static u32 litirq_pending_low_irqoff(struct klitirqd_info* which)
201{
202 return (which->pending & LIT_TASKLET_LOW);
203}
204
205inline static u32 litirq_pending_work_irqoff(struct klitirqd_info* which)
206{
207 return (which->pending & LIT_WORK);
208}
209
210inline static u32 litirq_pending_irqoff(struct klitirqd_info* which)
211{
212 return(which->pending);
213}
214
215
216inline static u32 litirq_pending(struct klitirqd_info* which)
217{
218 unsigned long flags;
219 u32 pending;
220
221 raw_spin_lock_irqsave(&which->lock, flags);
222 pending = litirq_pending_irqoff(which);
223 raw_spin_unlock_irqrestore(&which->lock, flags);
224
225 return pending;
226};
227
228inline static u32 litirq_pending_with_owner(struct klitirqd_info* which, struct task_struct* owner)
229{
230 unsigned long flags;
231 u32 pending;
232
233 raw_spin_lock_irqsave(&which->lock, flags);
234 pending = litirq_pending_irqoff(which);
235 if(pending)
236 {
237 if(which->current_owner != owner)
238 {
239 pending = 0; // owner switch!
240 }
241 }
242 raw_spin_unlock_irqrestore(&which->lock, flags);
243
244 return pending;
245}
246
247
248inline static u32 litirq_pending_and_sem_and_owner(struct klitirqd_info* which,
249 struct mutex** sem,
250 struct task_struct** t)
251{
252 unsigned long flags;
253 u32 pending;
254
255 /* init values */
256 *sem = NULL;
257 *t = NULL;
258
259 raw_spin_lock_irqsave(&which->lock, flags);
260
261 pending = litirq_pending_irqoff(which);
262 if(pending)
263 {
264 if(which->current_owner != NULL)
265 {
266 *t = which->current_owner;
267 *sem = &tsk_rt(which->current_owner)->klitirqd_sem;
268 }
269 else
270 {
271 BUG();
272 }
273 }
274 raw_spin_unlock_irqrestore(&which->lock, flags);
275
276 if(likely(*sem))
277 {
278 return pending;
279 }
280 else
281 {
282 return 0;
283 }
284}
285
286/* returns true if the next piece of work to do is from a different owner.
287 */
288static int tasklet_ownership_change(
289 struct klitirqd_info* which,
290 enum pending_flags taskletQ)
291{
292 /* this function doesn't have to look at work objects since they have
293 priority below tasklets. */
294
295 unsigned long flags;
296 int ret = 0;
297
298 raw_spin_lock_irqsave(&which->lock, flags);
299
300 switch(taskletQ)
301 {
302 case LIT_TASKLET_HI:
303 if(litirq_pending_hi_irqoff(which))
304 {
305 ret = (which->pending_tasklets_hi.head->owner !=
306 which->current_owner);
307 }
308 break;
309 case LIT_TASKLET_LOW:
310 if(litirq_pending_low_irqoff(which))
311 {
312 ret = (which->pending_tasklets.head->owner !=
313 which->current_owner);
314 }
315 break;
316 default:
317 break;
318 }
319
320 raw_spin_unlock_irqrestore(&which->lock, flags);
321
322 TRACE_TASK(which->klitirqd, "ownership change needed: %d\n", ret);
323
324 return ret;
325}
326
327
328static void __reeval_prio(struct klitirqd_info* which)
329{
330 struct task_struct* next_owner = NULL;
331 struct task_struct* klitirqd = which->klitirqd;
332
333 /* Check in prio-order */
334 u32 pending = litirq_pending_irqoff(which);
335
336 //__dump_state(which, "__reeval_prio: before");
337
338 if(pending)
339 {
340 if(pending & LIT_TASKLET_HI)
341 {
342 next_owner = which->pending_tasklets_hi.head->owner;
343 }
344 else if(pending & LIT_TASKLET_LOW)
345 {
346 next_owner = which->pending_tasklets.head->owner;
347 }
348 else if(pending & LIT_WORK)
349 {
350 struct work_struct* work =
351 list_first_entry(&which->worklist, struct work_struct, entry);
352 next_owner = work->owner;
353 }
354 }
355
356 if(next_owner != which->current_owner)
357 {
358 struct task_struct* old_owner = which->current_owner;
359
360 /* bind the next owner. */
361 which->current_owner = next_owner;
362 mb();
363
364 if(next_owner != NULL)
365 {
366 if(!in_interrupt())
367 {
368 TRACE_CUR("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__,
369 ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->comm,
370 ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->pid,
371 next_owner->comm, next_owner->pid);
372 }
373 else
374 {
375 TRACE("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__,
376 ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->comm,
377 ((tsk_rt(klitirqd)->inh_task) ? tsk_rt(klitirqd)->inh_task : klitirqd)->pid,
378 next_owner->comm, next_owner->pid);
379 }
380
381 litmus->increase_prio_inheritance_klitirqd(klitirqd, old_owner, next_owner);
382 }
383 else
384 {
385 if(likely(!in_interrupt()))
386 {
387 TRACE_CUR("%s: Ownership change: %s/%d to NULL (reverting)\n",
388 __FUNCTION__, klitirqd->comm, klitirqd->pid);
389 }
390 else
391 {
392 // is this a bug?
393 TRACE("%s: Ownership change: %s/%d to NULL (reverting)\n",
394 __FUNCTION__, klitirqd->comm, klitirqd->pid);
395 }
396
397 BUG_ON(pending != 0);
398 litmus->decrease_prio_inheritance_klitirqd(klitirqd, old_owner, NULL);
399 }
400 }
401
402 //__dump_state(which, "__reeval_prio: after");
403}
404
405static void reeval_prio(struct klitirqd_info* which)
406{
407 unsigned long flags;
408
409 raw_spin_lock_irqsave(&which->lock, flags);
410 __reeval_prio(which);
411 raw_spin_unlock_irqrestore(&which->lock, flags);
412}
413
414
415static void wakeup_litirqd_locked(struct klitirqd_info* which)
416{
417 /* Interrupts are disabled: no need to stop preemption */
418 if (which && which->klitirqd)
419 {
420 __reeval_prio(which); /* configure the proper priority */
421
422 if(which->klitirqd->state != TASK_RUNNING)
423 {
424 TRACE("%s: Waking up klitirqd: %s/%d\n", __FUNCTION__,
425 which->klitirqd->comm, which->klitirqd->pid);
426
427 wake_up_process(which->klitirqd);
428 }
429 }
430}
431
432
433static void do_lit_tasklet(struct klitirqd_info* which,
434 struct tasklet_head* pending_tasklets)
435{
436 unsigned long flags;
437 struct tasklet_struct *list;
438 atomic_t* count;
439
440 raw_spin_lock_irqsave(&which->lock, flags);
441
442 //__dump_state(which, "do_lit_tasklet: before steal");
443
444 /* copy out the tasklets for our private use. */
445 list = pending_tasklets->head;
446 pending_tasklets->head = NULL;
447 pending_tasklets->tail = &pending_tasklets->head;
448
449 /* remove pending flag */
450 which->pending &= (pending_tasklets == &which->pending_tasklets) ?
451 ~LIT_TASKLET_LOW :
452 ~LIT_TASKLET_HI;
453
454 count = (pending_tasklets == &which->pending_tasklets) ?
455 &which->num_low_pending:
456 &which->num_hi_pending;
457
458 //__dump_state(which, "do_lit_tasklet: after steal");
459
460 raw_spin_unlock_irqrestore(&which->lock, flags);
461
462
463 while(list)
464 {
465 struct tasklet_struct *t = list;
466
467 /* advance, lest we forget */
468 list = list->next;
469
470 /* execute tasklet if it has my priority and is free */
471 if ((t->owner == which->current_owner) && tasklet_trylock(t)) {
472 if (!atomic_read(&t->count)) {
473
474 sched_trace_tasklet_begin(t->owner);
475
476 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
477 {
478 BUG();
479 }
480 TRACE_CUR("%s: Invoking tasklet.\n", __FUNCTION__);
481 t->func(t->data);
482 tasklet_unlock(t);
483
484 atomic_dec(count);
485
486 sched_trace_tasklet_end(t->owner, 0ul);
487
488 continue; /* process more tasklets */
489 }
490 tasklet_unlock(t);
491 }
492
493 TRACE_CUR("%s: Could not invoke tasklet. Requeuing.\n", __FUNCTION__);
494
495 /* couldn't process tasklet. put it back at the end of the queue. */
496 if(pending_tasklets == &which->pending_tasklets)
497 ___litmus_tasklet_schedule(t, which, 0);
498 else
499 ___litmus_tasklet_hi_schedule(t, which, 0);
500 }
501}
502
503
504// returns 1 if priorities need to be changed to continue processing
505// pending tasklets.
506static int do_litirq(struct klitirqd_info* which)
507{
508 u32 pending;
509 int resched = 0;
510
511 if(in_interrupt())
512 {
513 TRACE("%s: exiting early: in interrupt context!\n", __FUNCTION__);
514 return(0);
515 }
516
517 if(which->klitirqd != current)
518 {
519 TRACE_CUR("%s: exiting early: thread/info mismatch! Running %s/%d but given %s/%d.\n",
520 __FUNCTION__, current->comm, current->pid,
521 which->klitirqd->comm, which->klitirqd->pid);
522 return(0);
523 }
524
525 if(!is_realtime(current))
526 {
527 TRACE_CUR("%s: exiting early: klitirqd is not real-time. Sched Policy = %d\n",
528 __FUNCTION__, current->policy);
529 return(0);
530 }
531
532
533 /* We only handle tasklets & work objects, no need for RCU triggers? */
534
535 pending = litirq_pending(which);
536 if(pending)
537 {
538 /* extract the work to do and do it! */
539 if(pending & LIT_TASKLET_HI)
540 {
541 TRACE_CUR("%s: Invoking HI tasklets.\n", __FUNCTION__);
542 do_lit_tasklet(which, &which->pending_tasklets_hi);
543 resched = tasklet_ownership_change(which, LIT_TASKLET_HI);
544
545 if(resched)
546 {
547 TRACE_CUR("%s: HI tasklets of another owner remain. "
548 "Skipping any LOW tasklets.\n", __FUNCTION__);
549 }
550 }
551
552 if(!resched && (pending & LIT_TASKLET_LOW))
553 {
554 TRACE_CUR("%s: Invoking LOW tasklets.\n", __FUNCTION__);
555 do_lit_tasklet(which, &which->pending_tasklets);
556 resched = tasklet_ownership_change(which, LIT_TASKLET_LOW);
557
558 if(resched)
559 {
560 TRACE_CUR("%s: LOW tasklets of another owner remain. "
561 "Skipping any work objects.\n", __FUNCTION__);
562 }
563 }
564 }
565
566 return(resched);
567}
568
569
570static void do_work(struct klitirqd_info* which)
571{
572 unsigned long flags;
573 work_func_t f;
574 struct work_struct* work;
575
576 // only execute one work-queue item to yield to tasklets.
577 // ...is this a good idea, or should we just batch them?
578 raw_spin_lock_irqsave(&which->lock, flags);
579
580 if(!litirq_pending_work_irqoff(which))
581 {
582 raw_spin_unlock_irqrestore(&which->lock, flags);
583 goto no_work;
584 }
585
586 work = list_first_entry(&which->worklist, struct work_struct, entry);
587 list_del_init(&work->entry);
588
589 if(list_empty(&which->worklist))
590 {
591 which->pending &= ~LIT_WORK;
592 }
593
594 raw_spin_unlock_irqrestore(&which->lock, flags);
595
596
597
598 /* safe to read current_owner outside of lock since only this thread
599 may write to the pointer. */
600 if(work->owner == which->current_owner)
601 {
602 TRACE_CUR("%s: Invoking work object.\n", __FUNCTION__);
603 // do the work!
604 work_clear_pending(work);
605 f = work->func;
606 f(work); /* can't touch 'work' after this point,
607 the user may have freed it. */
608
609 atomic_dec(&which->num_work_pending);
610 }
611 else
612 {
613 TRACE_CUR("%s: Could not invoke work object. Requeuing.\n",
614 __FUNCTION__);
615 ___litmus_schedule_work(work, which, 0);
616 }
617
618no_work:
619 return;
620}
621
622
623static int set_litmus_daemon_sched(void)
624{
625 /* set up a daemon job that will never complete.
626 it should only ever run on behalf of another
627 real-time task.
628
629 TODO: Transition to a new job whenever a
630 new tasklet is handled */
631
632 int ret = 0;
633
634 struct rt_task tp = {
635 .exec_cost = 0,
636 .period = 1000000000, /* dummy 1 second period */
637 .phase = 0,
638 .cpu = task_cpu(current),
639 .budget_policy = NO_ENFORCEMENT,
640 .cls = RT_CLASS_BEST_EFFORT
641 };
642
643 struct sched_param param = { .sched_priority = 0};
644
645
646 /* set task params, mark as proxy thread, and init other data */
647 tsk_rt(current)->task_params = tp;
648 tsk_rt(current)->is_proxy_thread = 1;
649 tsk_rt(current)->cur_klitirqd = NULL;
650 mutex_init(&tsk_rt(current)->klitirqd_sem);
651 atomic_set(&tsk_rt(current)->klitirqd_sem_stat, NOT_HELD);
652
653 /* inform the OS we're SCHED_LITMUS --
654 sched_setscheduler_nocheck() calls litmus_admit_task(). */
655 sched_setscheduler_nocheck(current, SCHED_LITMUS, &param);
656
657 return ret;
658}
659
660static void enter_execution_phase(struct klitirqd_info* which,
661 struct mutex* sem,
662 struct task_struct* t)
663{
664 TRACE_CUR("%s: Trying to enter execution phase. "
665 "Acquiring semaphore of %s/%d\n", __FUNCTION__,
666 t->comm, t->pid);
667 down_and_set_stat(current, HELD, sem);
668 TRACE_CUR("%s: Execution phase entered! "
669 "Acquired semaphore of %s/%d\n", __FUNCTION__,
670 t->comm, t->pid);
671}
672
673static void exit_execution_phase(struct klitirqd_info* which,
674 struct mutex* sem,
675 struct task_struct* t)
676{
677 TRACE_CUR("%s: Exiting execution phase. "
678 "Releasing semaphore of %s/%d\n", __FUNCTION__,
679 t->comm, t->pid);
680 if(atomic_read(&tsk_rt(current)->klitirqd_sem_stat) == HELD)
681 {
682 up_and_set_stat(current, NOT_HELD, sem);
683 TRACE_CUR("%s: Execution phase exited! "
684 "Released semaphore of %s/%d\n", __FUNCTION__,
685 t->comm, t->pid);
686 }
687 else
688 {
689 TRACE_CUR("%s: COULDN'T RELEASE SEMAPHORE BECAUSE ONE IS NOT HELD!\n", __FUNCTION__);
690 }
691}
692
693/* main loop for klitsoftirqd */
694static int run_klitirqd(void* unused)
695{
696 struct klitirqd_info* which = &klitirqds[klitirqd_id(current)];
697 struct mutex* sem;
698 struct task_struct* owner;
699
700 int rt_status = set_litmus_daemon_sched();
701
702 if(rt_status != 0)
703 {
704 TRACE_CUR("%s: Failed to transition to rt-task.\n", __FUNCTION__);
705 goto rt_failed;
706 }
707
708 atomic_inc(&num_ready_klitirqds);
709
710 set_current_state(TASK_INTERRUPTIBLE);
711
712 while (!kthread_should_stop())
713 {
714 preempt_disable();
715 if (!litirq_pending(which))
716 {
717 /* sleep for work */
718 TRACE_CUR("%s: No more tasklets or work objects. Going to sleep.\n",
719 __FUNCTION__);
720 preempt_enable_no_resched();
721 schedule();
722
723 if(kthread_should_stop()) /* bail out */
724 {
725 TRACE_CUR("%s:%d: Signaled to terminate.\n", __FUNCTION__, __LINE__);
726 continue;
727 }
728
729 preempt_disable();
730 }
731
732 __set_current_state(TASK_RUNNING);
733
734 while (litirq_pending_and_sem_and_owner(which, &sem, &owner))
735 {
736 int needs_resched = 0;
737
738 preempt_enable_no_resched();
739
740 BUG_ON(sem == NULL);
741
742 // wait to enter execution phase; wait for 'current_owner' to block.
743 enter_execution_phase(which, sem, owner);
744
745 if(kthread_should_stop())
746 {
747 TRACE_CUR("%s:%d: Signaled to terminate.\n", __FUNCTION__, __LINE__);
748 break;
749 }
750
751 preempt_disable();
752
753 /* Double check that there's still pending work and the owner hasn't
754 * changed. Pending items may have been flushed while we were sleeping.
755 */
756 if(litirq_pending_with_owner(which, owner))
757 {
758 TRACE_CUR("%s: Executing tasklets and/or work objects.\n",
759 __FUNCTION__);
760
761 needs_resched = do_litirq(which);
762
763 preempt_enable_no_resched();
764
765 // work objects are preemptible.
766 if(!needs_resched)
767 {
768 do_work(which);
769 }
770
771 // exit execution phase.
772 exit_execution_phase(which, sem, owner);
773
774 TRACE_CUR("%s: Setting up next priority.\n", __FUNCTION__);
775 reeval_prio(which); /* check if we need to change priority here */
776 }
777 else
778 {
779 TRACE_CUR("%s: Pending work was flushed! Prev owner was %s/%d\n",
780 __FUNCTION__,
781 owner->comm, owner->pid);
782 preempt_enable_no_resched();
783
784 // exit execution phase.
785 exit_execution_phase(which, sem, owner);
786 }
787
788 cond_resched();
789 preempt_disable();
790 }
791 preempt_enable();
792 set_current_state(TASK_INTERRUPTIBLE);
793 }
794 __set_current_state(TASK_RUNNING);
795
796 atomic_dec(&num_ready_klitirqds);
797
798rt_failed:
799 litmus_exit_task(current);
800
801 return rt_status;
802}
803
804
805struct klitirqd_launch_data
806{
807 int* cpu_affinity;
808 struct work_struct work;
809};
810
811/* executed by a kworker from workqueues */
812static void launch_klitirqd(struct work_struct *work)
813{
814 int i;
815
816 struct klitirqd_launch_data* launch_data =
817 container_of(work, struct klitirqd_launch_data, work);
818
819 TRACE("%s: Creating %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD);
820
821 /* create the daemon threads */
822 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
823 {
824 if(launch_data->cpu_affinity)
825 {
826 klitirqds[i].klitirqd =
827 kthread_create(
828 run_klitirqd,
829 /* treat the affinity as a pointer, we'll cast it back later */
830 (void*)(long long)launch_data->cpu_affinity[i],
831 "klitirqd_th%d/%d",
832 i,
833 launch_data->cpu_affinity[i]);
834
835 /* litmus will put is in the right cluster. */
836 kthread_bind(klitirqds[i].klitirqd, launch_data->cpu_affinity[i]);
837 }
838 else
839 {
840 klitirqds[i].klitirqd =
841 kthread_create(
842 run_klitirqd,
843 /* treat the affinity as a pointer, we'll cast it back later */
844 (void*)(long long)(-1),
845 "klitirqd_th%d",
846 i);
847 }
848 }
849
850 TRACE("%s: Launching %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD);
851
852 /* unleash the daemons */
853 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
854 {
855 wake_up_process(klitirqds[i].klitirqd);
856 }
857
858 if(launch_data->cpu_affinity)
859 kfree(launch_data->cpu_affinity);
860 kfree(launch_data);
861}
862
863
864void spawn_klitirqd(int* affinity)
865{
866 int i;
867 struct klitirqd_launch_data* delayed_launch;
868
869 if(atomic_read(&num_ready_klitirqds) != 0)
870 {
871 TRACE("%s: At least one klitirqd is already running! Need to call kill_klitirqd()?\n");
872 return;
873 }
874
875 /* init the tasklet & work queues */
876 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
877 {
878 klitirqds[i].terminating = 0;
879 klitirqds[i].pending = 0;
880
881 klitirqds[i].num_hi_pending.counter = 0;
882 klitirqds[i].num_low_pending.counter = 0;
883 klitirqds[i].num_work_pending.counter = 0;
884
885 klitirqds[i].pending_tasklets_hi.head = NULL;
886 klitirqds[i].pending_tasklets_hi.tail = &klitirqds[i].pending_tasklets_hi.head;
887
888 klitirqds[i].pending_tasklets.head = NULL;
889 klitirqds[i].pending_tasklets.tail = &klitirqds[i].pending_tasklets.head;
890
891 INIT_LIST_HEAD(&klitirqds[i].worklist);
892
893 raw_spin_lock_init(&klitirqds[i].lock);
894 }
895
896 /* wait to flush the initializations to memory since other threads
897 will access it. */
898 mb();
899
900 /* tell a work queue to launch the threads. we can't make scheduling
901 calls since we're in an atomic state. */
902 TRACE("%s: Setting callback up to launch klitirqds\n", __FUNCTION__);
903 delayed_launch = kmalloc(sizeof(struct klitirqd_launch_data), GFP_ATOMIC);
904 if(affinity)
905 {
906 delayed_launch->cpu_affinity =
907 kmalloc(sizeof(int)*NR_LITMUS_SOFTIRQD, GFP_ATOMIC);
908
909 memcpy(delayed_launch->cpu_affinity, affinity,
910 sizeof(int)*NR_LITMUS_SOFTIRQD);
911 }
912 else
913 {
914 delayed_launch->cpu_affinity = NULL;
915 }
916 INIT_WORK(&delayed_launch->work, launch_klitirqd);
917 schedule_work(&delayed_launch->work);
918}
919
920
921void kill_klitirqd(void)
922{
923 if(!klitirqd_is_dead())
924 {
925 int i;
926
927 TRACE("%s: Killing %d klitirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD);
928
929 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
930 {
931 if(klitirqds[i].terminating != 1)
932 {
933 klitirqds[i].terminating = 1;
934 mb(); /* just to be sure? */
935 flush_pending(klitirqds[i].klitirqd, NULL);
936
937 /* signal termination */
938 kthread_stop(klitirqds[i].klitirqd);
939 }
940 }
941 }
942}
943
944
945int klitirqd_is_ready(void)
946{
947 return(atomic_read(&num_ready_klitirqds) == NR_LITMUS_SOFTIRQD);
948}
949
950int klitirqd_is_dead(void)
951{
952 return(atomic_read(&num_ready_klitirqds) == 0);
953}
954
955
956struct task_struct* get_klitirqd(unsigned int k_id)
957{
958 return(klitirqds[k_id].klitirqd);
959}
960
961
962void flush_pending(struct task_struct* klitirqd_thread,
963 struct task_struct* owner)
964{
965 unsigned int k_id = klitirqd_id(klitirqd_thread);
966 struct klitirqd_info *which = &klitirqds[k_id];
967
968 unsigned long flags;
969 struct tasklet_struct *list;
970
971 u32 work_flushed = 0;
972
973 raw_spin_lock_irqsave(&which->lock, flags);
974
975 //__dump_state(which, "flush_pending: before");
976
977 // flush hi tasklets.
978 if(litirq_pending_hi_irqoff(which))
979 {
980 which->pending &= ~LIT_TASKLET_HI;
981
982 list = which->pending_tasklets_hi.head;
983 which->pending_tasklets_hi.head = NULL;
984 which->pending_tasklets_hi.tail = &which->pending_tasklets_hi.head;
985
986 TRACE("%s: Handing HI tasklets back to Linux.\n", __FUNCTION__);
987
988 while(list)
989 {
990 struct tasklet_struct *t = list;
991 list = list->next;
992
993 if(likely((t->owner == owner) || (owner == NULL)))
994 {
995 if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)))
996 {
997 BUG();
998 }
999
1000 work_flushed |= LIT_TASKLET_HI;
1001
1002 t->owner = NULL;
1003
1004 // WTF?
1005 if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
1006 {
1007 atomic_dec(&which->num_hi_pending);
1008 ___tasklet_hi_schedule(t);
1009 }
1010 else
1011 {
1012 TRACE("%s: dropped hi tasklet??\n", __FUNCTION__);
1013 BUG();
1014 }
1015 }
1016 else
1017 {
1018 TRACE("%s: Could not flush a HI tasklet.\n", __FUNCTION__);
1019 // put back on queue.
1020 ___litmus_tasklet_hi_schedule(t, which, 0);
1021 }
1022 }
1023 }
1024
1025 // flush low tasklets.
1026 if(litirq_pending_low_irqoff(which))
1027 {
1028 which->pending &= ~LIT_TASKLET_LOW;
1029
1030 list = which->pending_tasklets.head;
1031 which->pending_tasklets.head = NULL;
1032 which->pending_tasklets.tail = &which->pending_tasklets.head;
1033
1034 TRACE("%s: Handing LOW tasklets back to Linux.\n", __FUNCTION__);
1035
1036 while(list)
1037 {
1038 struct tasklet_struct *t = list;
1039 list = list->next;
1040
1041 if(likely((t->owner == owner) || (owner == NULL)))
1042 {
1043 if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)))
1044 {
1045 BUG();
1046 }
1047
1048 work_flushed |= LIT_TASKLET_LOW;
1049
1050 t->owner = NULL;
1051 sched_trace_tasklet_end(owner, 1ul);
1052
1053 if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
1054 {
1055 atomic_dec(&which->num_low_pending);
1056 ___tasklet_schedule(t);
1057 }
1058 else
1059 {
1060 TRACE("%s: dropped tasklet??\n", __FUNCTION__);
1061 BUG();
1062 }
1063 }
1064 else
1065 {
1066 TRACE("%s: Could not flush a LOW tasklet.\n", __FUNCTION__);
1067 // put back on queue
1068 ___litmus_tasklet_schedule(t, which, 0);
1069 }
1070 }
1071 }
1072
1073 // flush work objects
1074 if(litirq_pending_work_irqoff(which))
1075 {
1076 which->pending &= ~LIT_WORK;
1077
1078 TRACE("%s: Handing work objects back to Linux.\n", __FUNCTION__);
1079
1080 while(!list_empty(&which->worklist))
1081 {
1082 struct work_struct* work =
1083 list_first_entry(&which->worklist, struct work_struct, entry);
1084 list_del_init(&work->entry);
1085
1086 if(likely((work->owner == owner) || (owner == NULL)))
1087 {
1088 work_flushed |= LIT_WORK;
1089 atomic_dec(&which->num_work_pending);
1090
1091 work->owner = NULL;
1092 sched_trace_work_end(owner, current, 1ul);
1093 __schedule_work(work);
1094 }
1095 else
1096 {
1097 TRACE("%s: Could not flush a work object.\n", __FUNCTION__);
1098 // put back on queue
1099 ___litmus_schedule_work(work, which, 0);
1100 }
1101 }
1102 }
1103
1104 //__dump_state(which, "flush_pending: after (before reeval prio)");
1105
1106
1107 mb(); /* commit changes to pending flags */
1108
1109 /* reset the scheduling priority */
1110 if(work_flushed)
1111 {
1112 __reeval_prio(which);
1113
1114 /* Try to offload flushed tasklets to Linux's ksoftirqd. */
1115 if(work_flushed & (LIT_TASKLET_LOW | LIT_TASKLET_HI))
1116 {
1117 wakeup_softirqd();
1118 }
1119 }
1120 else
1121 {
1122 TRACE_CUR("%s: no work flushed, so __reeval_prio() skipped\n", __FUNCTION__);
1123 }
1124
1125 raw_spin_unlock_irqrestore(&which->lock, flags);
1126}
1127
1128
1129
1130
1131static void ___litmus_tasklet_schedule(struct tasklet_struct *t,
1132 struct klitirqd_info *which,
1133 int wakeup)
1134{
1135 unsigned long flags;
1136 u32 old_pending;
1137
1138 t->next = NULL;
1139
1140 raw_spin_lock_irqsave(&which->lock, flags);
1141
1142 //__dump_state(which, "___litmus_tasklet_schedule: before queuing");
1143
1144 *(which->pending_tasklets.tail) = t;
1145 which->pending_tasklets.tail = &t->next;
1146
1147 old_pending = which->pending;
1148 which->pending |= LIT_TASKLET_LOW;
1149
1150 atomic_inc(&which->num_low_pending);
1151
1152 mb();
1153
1154 if(!old_pending && wakeup)
1155 {
1156 wakeup_litirqd_locked(which); /* wake up the klitirqd */
1157 }
1158
1159 //__dump_state(which, "___litmus_tasklet_schedule: after queuing");
1160
1161 raw_spin_unlock_irqrestore(&which->lock, flags);
1162}
1163
1164int __litmus_tasklet_schedule(struct tasklet_struct *t, unsigned int k_id)
1165{
1166 int ret = 0; /* assume failure */
1167 if(unlikely((t->owner == NULL) || !is_realtime(t->owner)))
1168 {
1169 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
1170 BUG();
1171 }
1172
1173 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD))
1174 {
1175 TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id);
1176 BUG();
1177 }
1178
1179 if(likely(!klitirqds[k_id].terminating))
1180 {
1181 /* Can't accept tasklets while we're processing a workqueue
1182 because they're handled by the same thread. This case is
1183 very RARE.
1184
1185 TODO: Use a separate thread for work objects!!!!!!
1186 */
1187 if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0))
1188 {
1189 ret = 1;
1190 ___litmus_tasklet_schedule(t, &klitirqds[k_id], 1);
1191 }
1192 else
1193 {
1194 TRACE("%s: rejected tasklet because of pending work.\n",
1195 __FUNCTION__);
1196 }
1197 }
1198 return(ret);
1199}
1200
1201EXPORT_SYMBOL(__litmus_tasklet_schedule);
1202
1203
1204static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t,
1205 struct klitirqd_info *which,
1206 int wakeup)
1207{
1208 unsigned long flags;
1209 u32 old_pending;
1210
1211 t->next = NULL;
1212
1213 raw_spin_lock_irqsave(&which->lock, flags);
1214
1215 *(which->pending_tasklets_hi.tail) = t;
1216 which->pending_tasklets_hi.tail = &t->next;
1217
1218 old_pending = which->pending;
1219 which->pending |= LIT_TASKLET_HI;
1220
1221 atomic_inc(&which->num_hi_pending);
1222
1223 mb();
1224
1225 if(!old_pending && wakeup)
1226 {
1227 wakeup_litirqd_locked(which); /* wake up the klitirqd */
1228 }
1229
1230 raw_spin_unlock_irqrestore(&which->lock, flags);
1231}
1232
1233int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, unsigned int k_id)
1234{
1235 int ret = 0; /* assume failure */
1236 if(unlikely((t->owner == NULL) || !is_realtime(t->owner)))
1237 {
1238 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
1239 BUG();
1240 }
1241
1242 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD))
1243 {
1244 TRACE("%s: No klitirqd_th%d!\n", __FUNCTION__, k_id);
1245 BUG();
1246 }
1247
1248 if(unlikely(!klitirqd_is_ready()))
1249 {
1250 TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id);
1251 BUG();
1252 }
1253
1254 if(likely(!klitirqds[k_id].terminating))
1255 {
1256 if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0))
1257 {
1258 ret = 1;
1259 ___litmus_tasklet_hi_schedule(t, &klitirqds[k_id], 1);
1260 }
1261 else
1262 {
1263 TRACE("%s: rejected tasklet because of pending work.\n",
1264 __FUNCTION__);
1265 }
1266 }
1267 return(ret);
1268}
1269
1270EXPORT_SYMBOL(__litmus_tasklet_hi_schedule);
1271
1272
1273int __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, unsigned int k_id)
1274{
1275 int ret = 0; /* assume failure */
1276 u32 old_pending;
1277
1278 BUG_ON(!irqs_disabled());
1279
1280 if(unlikely((t->owner == NULL) || !is_realtime(t->owner)))
1281 {
1282 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
1283 BUG();
1284 }
1285
1286 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD))
1287 {
1288 TRACE("%s: No klitirqd_th%u!\n", __FUNCTION__, k_id);
1289 BUG();
1290 }
1291
1292 if(unlikely(!klitirqd_is_ready()))
1293 {
1294 TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id);
1295 BUG();
1296 }
1297
1298 if(likely(!klitirqds[k_id].terminating))
1299 {
1300 raw_spin_lock(&klitirqds[k_id].lock);
1301
1302 if(likely(atomic_read(&klitirqds[k_id].num_work_pending) == 0))
1303 {
1304 ret = 1; // success!
1305
1306 t->next = klitirqds[k_id].pending_tasklets_hi.head;
1307 klitirqds[k_id].pending_tasklets_hi.head = t;
1308
1309 old_pending = klitirqds[k_id].pending;
1310 klitirqds[k_id].pending |= LIT_TASKLET_HI;
1311
1312 atomic_inc(&klitirqds[k_id].num_hi_pending);
1313
1314 mb();
1315
1316 if(!old_pending)
1317 wakeup_litirqd_locked(&klitirqds[k_id]); /* wake up the klitirqd */
1318 }
1319 else
1320 {
1321 TRACE("%s: rejected tasklet because of pending work.\n",
1322 __FUNCTION__);
1323 }
1324
1325 raw_spin_unlock(&klitirqds[k_id].lock);
1326 }
1327 return(ret);
1328}
1329
1330EXPORT_SYMBOL(__litmus_tasklet_hi_schedule_first);
1331
1332
1333
1334static void ___litmus_schedule_work(struct work_struct *w,
1335 struct klitirqd_info *which,
1336 int wakeup)
1337{
1338 unsigned long flags;
1339 u32 old_pending;
1340
1341 raw_spin_lock_irqsave(&which->lock, flags);
1342
1343 work_pending(w);
1344 list_add_tail(&w->entry, &which->worklist);
1345
1346 old_pending = which->pending;
1347 which->pending |= LIT_WORK;
1348
1349 atomic_inc(&which->num_work_pending);
1350
1351 mb();
1352
1353 if(!old_pending && wakeup)
1354 {
1355 wakeup_litirqd_locked(which); /* wakeup the klitirqd */
1356 }
1357
1358 raw_spin_unlock_irqrestore(&which->lock, flags);
1359}
1360
1361int __litmus_schedule_work(struct work_struct *w, unsigned int k_id)
1362{
1363 int ret = 1; /* assume success */
1364 if(unlikely(w->owner == NULL) || !is_realtime(w->owner))
1365 {
1366 TRACE("%s: No owner associated with this work object!\n", __FUNCTION__);
1367 BUG();
1368 }
1369
1370 if(unlikely(k_id >= NR_LITMUS_SOFTIRQD))
1371 {
1372 TRACE("%s: No klitirqd_th%u!\n", k_id);
1373 BUG();
1374 }
1375
1376 if(unlikely(!klitirqd_is_ready()))
1377 {
1378 TRACE("%s: klitirqd is not ready!\n", __FUNCTION__, k_id);
1379 BUG();
1380 }
1381
1382 if(likely(!klitirqds[k_id].terminating))
1383 ___litmus_schedule_work(w, &klitirqds[k_id], 1);
1384 else
1385 ret = 0;
1386 return(ret);
1387}
1388EXPORT_SYMBOL(__litmus_schedule_work);
1389
1390
1391static int set_klitirqd_sem_status(unsigned long stat)
1392{
1393 TRACE_CUR("SETTING STATUS FROM %d TO %d\n",
1394 atomic_read(&tsk_rt(current)->klitirqd_sem_stat),
1395 stat);
1396 atomic_set(&tsk_rt(current)->klitirqd_sem_stat, stat);
1397 //mb();
1398
1399 return(0);
1400}
1401
1402static int set_klitirqd_sem_status_if_not_held(unsigned long stat)
1403{
1404 if(atomic_read(&tsk_rt(current)->klitirqd_sem_stat) != HELD)
1405 {
1406 return(set_klitirqd_sem_status(stat));
1407 }
1408 return(-1);
1409}
1410
1411
1412void __down_and_reset_and_set_stat(struct task_struct* t,
1413 enum klitirqd_sem_status to_reset,
1414 enum klitirqd_sem_status to_set,
1415 struct mutex* sem)
1416{
1417#if 0
1418 struct rt_param* param = container_of(sem, struct rt_param, klitirqd_sem);
1419 struct task_struct* task = container_of(param, struct task_struct, rt_param);
1420
1421 TRACE_CUR("%s: entered. Locking semaphore of %s/%d\n",
1422 __FUNCTION__, task->comm, task->pid);
1423#endif
1424
1425 mutex_lock_sfx(sem,
1426 set_klitirqd_sem_status_if_not_held, to_reset,
1427 set_klitirqd_sem_status, to_set);
1428#if 0
1429 TRACE_CUR("%s: exiting. Have semaphore of %s/%d\n",
1430 __FUNCTION__, task->comm, task->pid);
1431#endif
1432}
1433
1434void down_and_set_stat(struct task_struct* t,
1435 enum klitirqd_sem_status to_set,
1436 struct mutex* sem)
1437{
1438#if 0
1439 struct rt_param* param = container_of(sem, struct rt_param, klitirqd_sem);
1440 struct task_struct* task = container_of(param, struct task_struct, rt_param);
1441
1442 TRACE_CUR("%s: entered. Locking semaphore of %s/%d\n",
1443 __FUNCTION__, task->comm, task->pid);
1444#endif
1445
1446 mutex_lock_sfx(sem,
1447 NULL, 0,
1448 set_klitirqd_sem_status, to_set);
1449
1450#if 0
1451 TRACE_CUR("%s: exiting. Have semaphore of %s/%d\n",
1452 __FUNCTION__, task->comm, task->pid);
1453#endif
1454}
1455
1456
1457void up_and_set_stat(struct task_struct* t,
1458 enum klitirqd_sem_status to_set,
1459 struct mutex* sem)
1460{
1461#if 0
1462 struct rt_param* param = container_of(sem, struct rt_param, klitirqd_sem);
1463 struct task_struct* task = container_of(param, struct task_struct, rt_param);
1464
1465 TRACE_CUR("%s: entered. Unlocking semaphore of %s/%d\n",
1466 __FUNCTION__,
1467 task->comm, task->pid);
1468#endif
1469
1470 mutex_unlock_sfx(sem, NULL, 0,
1471 set_klitirqd_sem_status, to_set);
1472
1473#if 0
1474 TRACE_CUR("%s: exiting. Unlocked semaphore of %s/%d\n",
1475 __FUNCTION__,
1476 task->comm, task->pid);
1477#endif
1478}
1479
1480
1481
1482void release_klitirqd_lock(struct task_struct* t)
1483{
1484 if(is_realtime(t) && (atomic_read(&tsk_rt(t)->klitirqd_sem_stat) == HELD))
1485 {
1486 struct mutex* sem;
1487 struct task_struct* owner = t;
1488
1489 if(t->state == TASK_RUNNING)
1490 {
1491 TRACE_TASK(t, "NOT giving up klitirqd_sem because we're not blocked!\n");
1492 return;
1493 }
1494
1495 if(likely(!tsk_rt(t)->is_proxy_thread))
1496 {
1497 sem = &tsk_rt(t)->klitirqd_sem;
1498 }
1499 else
1500 {
1501 unsigned int k_id = klitirqd_id(t);
1502 owner = klitirqds[k_id].current_owner;
1503
1504 BUG_ON(t != klitirqds[k_id].klitirqd);
1505
1506 if(likely(owner))
1507 {
1508 sem = &tsk_rt(owner)->klitirqd_sem;
1509 }
1510 else
1511 {
1512 BUG();
1513
1514 // We had the rug pulled out from under us. Abort attempt
1515 // to reacquire the lock since our client no longer needs us.
1516 TRACE_CUR("HUH?! How did this happen?\n");
1517 atomic_set(&tsk_rt(t)->klitirqd_sem_stat, NOT_HELD);
1518 return;
1519 }
1520 }
1521
1522 //TRACE_CUR("Releasing semaphore of %s/%d...\n", owner->comm, owner->pid);
1523 up_and_set_stat(t, NEED_TO_REACQUIRE, sem);
1524 //TRACE_CUR("Semaphore of %s/%d released!\n", owner->comm, owner->pid);
1525 }
1526 /*
1527 else if(is_realtime(t))
1528 {
1529 TRACE_CUR("%s: Nothing to do. Stat = %d\n", __FUNCTION__, tsk_rt(t)->klitirqd_sem_stat);
1530 }
1531 */
1532}
1533
1534int reacquire_klitirqd_lock(struct task_struct* t)
1535{
1536 int ret = 0;
1537
1538 if(is_realtime(t) && (atomic_read(&tsk_rt(t)->klitirqd_sem_stat) == NEED_TO_REACQUIRE))
1539 {
1540 struct mutex* sem;
1541 struct task_struct* owner = t;
1542
1543 if(likely(!tsk_rt(t)->is_proxy_thread))
1544 {
1545 sem = &tsk_rt(t)->klitirqd_sem;
1546 }
1547 else
1548 {
1549 unsigned int k_id = klitirqd_id(t);
1550 //struct task_struct* owner = klitirqds[k_id].current_owner;
1551 owner = klitirqds[k_id].current_owner;
1552
1553 BUG_ON(t != klitirqds[k_id].klitirqd);
1554
1555 if(likely(owner))
1556 {
1557 sem = &tsk_rt(owner)->klitirqd_sem;
1558 }
1559 else
1560 {
1561 // We had the rug pulled out from under us. Abort attempt
1562 // to reacquire the lock since our client no longer needs us.
1563 TRACE_CUR("No longer needs to reacquire klitirqd_sem!\n");
1564 atomic_set(&tsk_rt(t)->klitirqd_sem_stat, NOT_HELD);
1565 return(0);
1566 }
1567 }
1568
1569 //TRACE_CUR("Trying to reacquire semaphore of %s/%d\n", owner->comm, owner->pid);
1570 __down_and_reset_and_set_stat(t, REACQUIRING, HELD, sem);
1571 //TRACE_CUR("Reacquired semaphore %s/%d\n", owner->comm, owner->pid);
1572 }
1573 /*
1574 else if(is_realtime(t))
1575 {
1576 TRACE_CUR("%s: Nothing to do. Stat = %d\n", __FUNCTION__, tsk_rt(t)->klitirqd_sem_stat);
1577 }
1578 */
1579
1580 return(ret);
1581}
1582
diff --git a/litmus/locking.c b/litmus/locking.c
index f78169dbbeef..6d28efe97c91 100644
--- a/litmus/locking.c
+++ b/litmus/locking.c
@@ -473,7 +473,7 @@ out:
473 return err; 473 return err;
474} 474}
475 475
476#else 476#else // CONFIG_LITMUS_DGL_SUPPORT
477 477
478asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size) 478asmlinkage long sys_litmus_dgl_lock(void* __user usr_dgl_ods, int dgl_size)
479{ 479{
@@ -487,8 +487,7 @@ asmlinkage long sys_litmus_dgl_unlock(void* __user usr_dgl_ods, int dgl_size)
487 487
488#endif 488#endif
489 489
490 490#else // CONFIG_LITMUS_LOCKING
491#else
492 491
493struct fdso_ops generic_lock_ops = {}; 492struct fdso_ops generic_lock_ops = {};
494 493
diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c
new file mode 100644
index 000000000000..d17152138c63
--- /dev/null
+++ b/litmus/nvidia_info.c
@@ -0,0 +1,536 @@
1#include <linux/module.h>
2#include <linux/semaphore.h>
3#include <linux/pci.h>
4
5#include <litmus/sched_trace.h>
6#include <litmus/nvidia_info.h>
7#include <litmus/litmus.h>
8
9typedef unsigned char NvV8; /* "void": enumerated or multiple fields */
10typedef unsigned short NvV16; /* "void": enumerated or multiple fields */
11typedef unsigned char NvU8; /* 0 to 255 */
12typedef unsigned short NvU16; /* 0 to 65535 */
13typedef signed char NvS8; /* -128 to 127 */
14typedef signed short NvS16; /* -32768 to 32767 */
15typedef float NvF32; /* IEEE Single Precision (S1E8M23) */
16typedef double NvF64; /* IEEE Double Precision (S1E11M52) */
17typedef unsigned int NvV32; /* "void": enumerated or multiple fields */
18typedef unsigned int NvU32; /* 0 to 4294967295 */
19typedef unsigned long long NvU64; /* 0 to 18446744073709551615 */
20typedef union
21{
22 volatile NvV8 Reg008[1];
23 volatile NvV16 Reg016[1];
24 volatile NvV32 Reg032[1];
25} litmus_nv_hwreg_t, * litmus_nv_phwreg_t;
26
27typedef struct
28{
29 NvU64 address;
30 NvU64 size;
31 NvU32 offset;
32 NvU32 *map;
33 litmus_nv_phwreg_t map_u;
34} litmus_nv_aperture_t;
35
36typedef struct
37{
38 void *priv; /* private data */
39 void *os_state; /* os-specific device state */
40
41 int rmInitialized;
42 int flags;
43
44 /* PCI config info */
45 NvU32 domain;
46 NvU16 bus;
47 NvU16 slot;
48 NvU16 vendor_id;
49 NvU16 device_id;
50 NvU16 subsystem_id;
51 NvU32 gpu_id;
52 void *handle;
53
54 NvU32 pci_cfg_space[16];
55
56 /* physical characteristics */
57 litmus_nv_aperture_t bars[3];
58 litmus_nv_aperture_t *regs;
59 litmus_nv_aperture_t *fb, ud;
60 litmus_nv_aperture_t agp;
61
62 NvU32 interrupt_line;
63
64 NvU32 agp_config;
65 NvU32 agp_status;
66
67 NvU32 primary_vga;
68
69 NvU32 sim_env;
70
71 NvU32 rc_timer_enabled;
72
73 /* list of events allocated for this device */
74 void *event_list;
75
76 void *kern_mappings;
77
78} litmus_nv_state_t;
79
80typedef struct work_struct litmus_nv_task_t;
81
82typedef struct litmus_nv_work_s {
83 litmus_nv_task_t task;
84 void *data;
85} litmus_nv_work_t;
86
87typedef struct litmus_nv_linux_state_s {
88 litmus_nv_state_t nv_state;
89 atomic_t usage_count;
90
91 struct pci_dev *dev;
92 void *agp_bridge;
93 void *alloc_queue;
94
95 void *timer_sp;
96 void *isr_sp;
97 void *pci_cfgchk_sp;
98 void *isr_bh_sp;
99
100#ifdef CONFIG_CUDA_4_0
101 char registry_keys[512];
102#endif
103
104 /* keep track of any pending bottom halfes */
105 struct tasklet_struct tasklet;
106 litmus_nv_work_t work;
107
108 /* get a timer callback every second */
109 struct timer_list rc_timer;
110
111 /* lock for linux-specific data, not used by core rm */
112 struct semaphore ldata_lock;
113
114 /* lock for linux-specific alloc queue */
115 struct semaphore at_lock;
116
117#if 0
118#if defined(NV_USER_MAP)
119 /* list of user mappings */
120 struct nv_usermap_s *usermap_list;
121
122 /* lock for VMware-specific mapping list */
123 struct semaphore mt_lock;
124#endif /* defined(NV_USER_MAP) */
125#if defined(NV_PM_SUPPORT_OLD_STYLE_APM)
126 void *apm_nv_dev;
127#endif
128#endif
129
130 NvU32 device_num;
131 struct litmus_nv_linux_state_s *next;
132} litmus_nv_linux_state_t;
133
134void dump_nvidia_info(const struct tasklet_struct *t)
135{
136 litmus_nv_state_t* nvstate = NULL;
137 litmus_nv_linux_state_t* linuxstate = NULL;
138 struct pci_dev* pci = NULL;
139
140 nvstate = (litmus_nv_state_t*)(t->data);
141
142 if(nvstate)
143 {
144 TRACE("NV State:\n"
145 "\ttasklet ptr = %p\n"
146 "\tstate ptr = %p\n"
147 "\tprivate data ptr = %p\n"
148 "\tos state ptr = %p\n"
149 "\tdomain = %u\n"
150 "\tbus = %u\n"
151 "\tslot = %u\n"
152 "\tvender_id = %u\n"
153 "\tdevice_id = %u\n"
154 "\tsubsystem_id = %u\n"
155 "\tgpu_id = %u\n"
156 "\tinterrupt_line = %u\n",
157 t,
158 nvstate,
159 nvstate->priv,
160 nvstate->os_state,
161 nvstate->domain,
162 nvstate->bus,
163 nvstate->slot,
164 nvstate->vendor_id,
165 nvstate->device_id,
166 nvstate->subsystem_id,
167 nvstate->gpu_id,
168 nvstate->interrupt_line);
169
170 linuxstate = container_of(nvstate, litmus_nv_linux_state_t, nv_state);
171 }
172 else
173 {
174 TRACE("INVALID NVSTATE????\n");
175 }
176
177 if(linuxstate)
178 {
179 int ls_offset = (void*)(&(linuxstate->device_num)) - (void*)(linuxstate);
180 int ns_offset_raw = (void*)(&(linuxstate->device_num)) - (void*)(&(linuxstate->nv_state));
181 int ns_offset_desired = (void*)(&(linuxstate->device_num)) - (void*)(nvstate);
182
183
184 TRACE("LINUX NV State:\n"
185 "\tlinux nv state ptr: %p\n"
186 "\taddress of tasklet: %p\n"
187 "\taddress of work: %p\n"
188 "\tusage_count: %d\n"
189 "\tdevice_num: %u\n"
190 "\ttasklet addr == this tasklet: %d\n"
191 "\tpci: %p\n",
192 linuxstate,
193 &(linuxstate->tasklet),
194 &(linuxstate->work),
195 atomic_read(&(linuxstate->usage_count)),
196 linuxstate->device_num,
197 (t == &(linuxstate->tasklet)),
198 linuxstate->dev);
199
200 pci = linuxstate->dev;
201
202 TRACE("Offsets:\n"
203 "\tOffset from LinuxState: %d, %x\n"
204 "\tOffset from NVState: %d, %x\n"
205 "\tOffset from parameter: %d, %x\n"
206 "\tdevice_num: %u\n",
207 ls_offset, ls_offset,
208 ns_offset_raw, ns_offset_raw,
209 ns_offset_desired, ns_offset_desired,
210 *((u32*)((void*)nvstate + ns_offset_desired)));
211 }
212 else
213 {
214 TRACE("INVALID LINUXNVSTATE?????\n");
215 }
216
217#if 0
218 if(pci)
219 {
220 TRACE("PCI DEV Info:\n"
221 "pci device ptr: %p\n"
222 "\tdevfn = %d\n"
223 "\tvendor = %d\n"
224 "\tdevice = %d\n"
225 "\tsubsystem_vendor = %d\n"
226 "\tsubsystem_device = %d\n"
227 "\tslot # = %d\n",
228 pci,
229 pci->devfn,
230 pci->vendor,
231 pci->device,
232 pci->subsystem_vendor,
233 pci->subsystem_device,
234 pci->slot->number);
235 }
236 else
237 {
238 TRACE("INVALID PCIDEV PTR?????\n");
239 }
240#endif
241}
242
243static struct module* nvidia_mod = NULL;
244int init_nvidia_info(void)
245{
246 mutex_lock(&module_mutex);
247 nvidia_mod = find_module("nvidia");
248 mutex_unlock(&module_mutex);
249 if(nvidia_mod != NULL)
250 {
251 TRACE("%s : Found NVIDIA module. Core Code: %p to %p\n", __FUNCTION__,
252 (void*)(nvidia_mod->module_core),
253 (void*)(nvidia_mod->module_core) + nvidia_mod->core_size);
254 init_nv_device_reg();
255 return(0);
256 }
257 else
258 {
259 TRACE("%s : Could not find NVIDIA module! Loaded?\n", __FUNCTION__);
260 return(-1);
261 }
262}
263
264
265/* works with pointers to static data inside the module too. */
266int is_nvidia_func(void* func_addr)
267{
268 int ret = 0;
269 if(nvidia_mod)
270 {
271 ret = within_module_core((long unsigned int)func_addr, nvidia_mod);
272 /*
273 if(ret)
274 {
275 TRACE("%s : %p is in NVIDIA module: %d\n",
276 __FUNCTION__, func_addr, ret);
277 }*/
278 }
279
280 return(ret);
281}
282
283u32 get_tasklet_nv_device_num(const struct tasklet_struct *t)
284{
285 // life is too short to use hard-coded offsets. update this later.
286 litmus_nv_state_t* nvstate = (litmus_nv_state_t*)(t->data);
287 litmus_nv_linux_state_t* linuxstate = container_of(nvstate, litmus_nv_linux_state_t, nv_state);
288
289 BUG_ON(linuxstate->device_num >= NV_DEVICE_NUM);
290
291 return(linuxstate->device_num);
292
293 //int DEVICE_NUM_OFFSET = (void*)(&(linuxstate->device_num)) - (void*)(nvstate);
294
295#if 0
296 // offset determined though observed behavior of the NV driver.
297 //const int DEVICE_NUM_OFFSET = 0x480; // CUDA 4.0 RC1
298 //const int DEVICE_NUM_OFFSET = 0x510; // CUDA 4.0 RC2
299
300 void* state = (void*)(t->data);
301 void* device_num_ptr = state + DEVICE_NUM_OFFSET;
302
303 //dump_nvidia_info(t);
304 return(*((u32*)device_num_ptr));
305#endif
306}
307
308u32 get_work_nv_device_num(const struct work_struct *t)
309{
310 // offset determined though observed behavior of the NV driver.
311 const int DEVICE_NUM_OFFSET = sizeof(struct work_struct);
312 void* state = (void*)(t);
313 void** device_num_ptr = state + DEVICE_NUM_OFFSET;
314 return(*((u32*)(*device_num_ptr)));
315}
316
317
318
319typedef struct {
320 raw_spinlock_t lock;
321 struct task_struct *device_owner;
322}nv_device_registry_t;
323
324static nv_device_registry_t NV_DEVICE_REG[NV_DEVICE_NUM];
325
326int init_nv_device_reg(void)
327{
328 int i;
329
330 //memset(NV_DEVICE_REG, 0, sizeof(NV_DEVICE_REG));
331
332 for(i = 0; i < NV_DEVICE_NUM; ++i)
333 {
334 raw_spin_lock_init(&NV_DEVICE_REG[i].lock);
335 NV_DEVICE_REG[i].device_owner = NULL;
336 }
337
338 return(1);
339}
340
341/* use to get nv_device_id by given owner.
342 (if return -1, can't get the assocaite device id)*/
343/*
344int get_nv_device_id(struct task_struct* owner)
345{
346 int i;
347 if(!owner)
348 {
349 return(-1);
350 }
351 for(i = 0; i < NV_DEVICE_NUM; ++i)
352 {
353 if(NV_DEVICE_REG[i].device_owner == owner)
354 return(i);
355 }
356 return(-1);
357}
358*/
359
360
361
362static int __reg_nv_device(int reg_device_id)
363{
364 int ret = 0;
365 struct task_struct* old =
366 cmpxchg(&NV_DEVICE_REG[reg_device_id].device_owner,
367 NULL,
368 current);
369
370 mb();
371
372 if(likely(old == NULL))
373 {
374#ifdef CONFIG_LITMUS_SOFTIRQD
375 down_and_set_stat(current, HELD, &tsk_rt(current)->klitirqd_sem);
376#endif
377 TRACE_CUR("%s: device %d registered.\n", __FUNCTION__, reg_device_id);
378 }
379 else
380 {
381 TRACE_CUR("%s: device %d is already in use!\n", __FUNCTION__, reg_device_id);
382 ret = -EBUSY;
383 }
384
385 return(ret);
386
387
388
389#if 0
390 //unsigned long flags;
391 //raw_spin_lock_irqsave(&NV_DEVICE_REG[reg_device_id].lock, flags);
392 //lock_nv_registry(reg_device_id, &flags);
393
394 if(likely(NV_DEVICE_REG[reg_device_id].device_owner == NULL))
395 {
396 NV_DEVICE_REG[reg_device_id].device_owner = current;
397 mb(); // needed?
398
399 // release spin lock before chance of going to sleep.
400 //raw_spin_unlock_irqrestore(&NV_DEVICE_REG[reg_device_id].lock, flags);
401 //unlock_nv_registry(reg_device_id, &flags);
402
403 down_and_set_stat(current, HELD, &tsk_rt(current)->klitirqd_sem);
404 TRACE_CUR("%s: device %d registered.\n", __FUNCTION__, reg_device_id);
405 return(0);
406 }
407 else
408 {
409 //raw_spin_unlock_irqrestore(&NV_DEVICE_REG[reg_device_id].lock, flags);
410 //unlock_nv_registry(reg_device_id, &flags);
411
412 TRACE_CUR("%s: device %d is already in use!\n", __FUNCTION__, reg_device_id);
413 return(-EBUSY);
414 }
415#endif
416}
417
418static int __clear_reg_nv_device(int de_reg_device_id)
419{
420 int ret = 0;
421 struct task_struct* old;
422
423#ifdef CONFIG_LITMUS_SOFTIRQD
424 unsigned long flags;
425 struct task_struct* klitirqd_th = get_klitirqd(de_reg_device_id);
426 lock_nv_registry(de_reg_device_id, &flags);
427#endif
428
429 old = cmpxchg(&NV_DEVICE_REG[de_reg_device_id].device_owner,
430 current,
431 NULL);
432
433 mb();
434
435#ifdef CONFIG_LITMUS_SOFTIRQD
436 if(likely(old == current))
437 {
438 flush_pending(klitirqd_th, current);
439 //unlock_nv_registry(de_reg_device_id, &flags);
440
441 up_and_set_stat(current, NOT_HELD, &tsk_rt(current)->klitirqd_sem);
442
443 unlock_nv_registry(de_reg_device_id, &flags);
444 ret = 0;
445
446 TRACE_CUR("%s: semaphore released.\n",__FUNCTION__);
447 }
448 else
449 {
450 unlock_nv_registry(de_reg_device_id, &flags);
451 ret = -EINVAL;
452
453 if(old)
454 TRACE_CUR("%s: device %d is not registered for this process's use! %s/%d is!\n",
455 __FUNCTION__, de_reg_device_id, old->comm, old->pid);
456 else
457 TRACE_CUR("%s: device %d is not registered for this process's use! No one is!\n",
458 __FUNCTION__, de_reg_device_id);
459 }
460#endif
461
462 return(ret);
463}
464
465
466int reg_nv_device(int reg_device_id, int reg_action)
467{
468 int ret;
469
470 if((reg_device_id < NV_DEVICE_NUM) && (reg_device_id >= 0))
471 {
472 if(reg_action)
473 ret = __reg_nv_device(reg_device_id);
474 else
475 ret = __clear_reg_nv_device(reg_device_id);
476 }
477 else
478 {
479 ret = -ENODEV;
480 }
481
482 return(ret);
483}
484
485/* use to get the owner of nv_device_id. */
486struct task_struct* get_nv_device_owner(u32 target_device_id)
487{
488 struct task_struct* owner;
489 BUG_ON(target_device_id >= NV_DEVICE_NUM);
490 owner = NV_DEVICE_REG[target_device_id].device_owner;
491 return(owner);
492}
493
494void lock_nv_registry(u32 target_device_id, unsigned long* flags)
495{
496 BUG_ON(target_device_id >= NV_DEVICE_NUM);
497
498 if(in_interrupt())
499 TRACE("Locking registry for %d.\n", target_device_id);
500 else
501 TRACE_CUR("Locking registry for %d.\n", target_device_id);
502
503 raw_spin_lock_irqsave(&NV_DEVICE_REG[target_device_id].lock, *flags);
504}
505
506void unlock_nv_registry(u32 target_device_id, unsigned long* flags)
507{
508 BUG_ON(target_device_id >= NV_DEVICE_NUM);
509
510 if(in_interrupt())
511 TRACE("Unlocking registry for %d.\n", target_device_id);
512 else
513 TRACE_CUR("Unlocking registry for %d.\n", target_device_id);
514
515 raw_spin_unlock_irqrestore(&NV_DEVICE_REG[target_device_id].lock, *flags);
516}
517
518
519void increment_nv_int_count(u32 device)
520{
521 unsigned long flags;
522 struct task_struct* owner;
523
524 lock_nv_registry(device, &flags);
525
526 owner = NV_DEVICE_REG[device].device_owner;
527 if(owner)
528 {
529 atomic_inc(&tsk_rt(owner)->nv_int_count);
530 }
531
532 unlock_nv_registry(device, &flags);
533}
534EXPORT_SYMBOL(increment_nv_int_count);
535
536
diff --git a/litmus/preempt.c b/litmus/preempt.c
index 5704d0bf4c0b..28368d5bc046 100644
--- a/litmus/preempt.c
+++ b/litmus/preempt.c
@@ -30,6 +30,7 @@ void sched_state_will_schedule(struct task_struct* tsk)
30 /* Litmus tasks should never be subject to a remote 30 /* Litmus tasks should never be subject to a remote
31 * set_tsk_need_resched(). */ 31 * set_tsk_need_resched(). */
32 BUG_ON(is_realtime(tsk)); 32 BUG_ON(is_realtime(tsk));
33
33#ifdef CONFIG_PREEMPT_STATE_TRACE 34#ifdef CONFIG_PREEMPT_STATE_TRACE
34 TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n", 35 TRACE_TASK(tsk, "set_tsk_need_resched() ret:%p\n",
35 __builtin_return_address(0)); 36 __builtin_return_address(0));
@@ -45,13 +46,17 @@ void sched_state_ipi(void)
45 /* Cause scheduler to be invoked. 46 /* Cause scheduler to be invoked.
46 * This will cause a transition to WILL_SCHEDULE. */ 47 * This will cause a transition to WILL_SCHEDULE. */
47 set_tsk_need_resched(current); 48 set_tsk_need_resched(current);
49 /*
48 TRACE_STATE("IPI -> set_tsk_need_resched(%s/%d)\n", 50 TRACE_STATE("IPI -> set_tsk_need_resched(%s/%d)\n",
49 current->comm, current->pid); 51 current->comm, current->pid);
52 */
50 } else { 53 } else {
51 /* ignore */ 54 /* ignore */
55 /*
52 TRACE_STATE("ignoring IPI in state %x (%s)\n", 56 TRACE_STATE("ignoring IPI in state %x (%s)\n",
53 get_sched_state(), 57 get_sched_state(),
54 sched_state_name(get_sched_state())); 58 sched_state_name(get_sched_state()));
59 */
55 } 60 }
56} 61}
57 62
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index 49653f1ea49d..a55fc894340d 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -29,6 +29,7 @@
29#include <linux/percpu.h> 29#include <linux/percpu.h>
30#include <linux/sched.h> 30#include <linux/sched.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/uaccess.h>
32 33
33#include <linux/module.h> 34#include <linux/module.h>
34 35
@@ -50,7 +51,23 @@
50 51
51/* to configure the cluster size */ 52/* to configure the cluster size */
52#include <litmus/litmus_proc.h> 53#include <litmus/litmus_proc.h>
53#include <linux/uaccess.h> 54
55#ifdef CONFIG_SCHED_CPU_AFFINITY
56#include <litmus/affinity.h>
57#endif
58
59#ifdef CONFIG_LITMUS_SOFTIRQD
60#include <litmus/litmus_softirq.h>
61#endif
62
63#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
64#include <linux/interrupt.h>
65#include <litmus/trace.h>
66#endif
67
68#ifdef CONFIG_LITMUS_NVIDIA
69#include <litmus/nvidia_info.h>
70#endif
54 71
55/* Reference configuration variable. Determines which cache level is used to 72/* Reference configuration variable. Determines which cache level is used to
56 * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that 73 * group CPUs into clusters. GLOBAL_CLUSTER, which is the default, means that
@@ -86,6 +103,15 @@ DEFINE_PER_CPU(cpu_entry_t, cedf_cpu_entries);
86#define test_will_schedule(cpu) \ 103#define test_will_schedule(cpu) \
87 (atomic_read(&per_cpu(cedf_cpu_entries, cpu).will_schedule)) 104 (atomic_read(&per_cpu(cedf_cpu_entries, cpu).will_schedule))
88 105
106
107#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
108struct tasklet_head
109{
110 struct tasklet_struct *head;
111 struct tasklet_struct **tail;
112};
113#endif
114
89/* 115/*
90 * In C-EDF there is a cedf domain _per_ cluster 116 * In C-EDF there is a cedf domain _per_ cluster
91 * The number of clusters is dynamically determined accordingly to the 117 * The number of clusters is dynamically determined accordingly to the
@@ -102,6 +128,10 @@ typedef struct clusterdomain {
102 struct binheap_handle cpu_heap; 128 struct binheap_handle cpu_heap;
103 /* lock for this cluster */ 129 /* lock for this cluster */
104#define cluster_lock domain.ready_lock 130#define cluster_lock domain.ready_lock
131
132#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
133 struct tasklet_head pending_tasklets;
134#endif
105} cedf_domain_t; 135} cedf_domain_t;
106 136
107/* a cedf_domain per cluster; allocation is done at init/activation time */ 137/* a cedf_domain per cluster; allocation is done at init/activation time */
@@ -206,7 +236,7 @@ static noinline void link_task_to_cpu(struct task_struct* linked,
206} 236}
207 237
208/* unlink - Make sure a task is not linked any longer to an entry 238/* unlink - Make sure a task is not linked any longer to an entry
209 * where it was linked before. Must hold cedf_lock. 239 * where it was linked before. Must hold cluster_lock.
210 */ 240 */
211static noinline void unlink(struct task_struct* t) 241static noinline void unlink(struct task_struct* t)
212{ 242{
@@ -242,7 +272,7 @@ static void preempt(cpu_entry_t *entry)
242} 272}
243 273
244/* requeue - Put an unlinked task into gsn-edf domain. 274/* requeue - Put an unlinked task into gsn-edf domain.
245 * Caller must hold cedf_lock. 275 * Caller must hold cluster_lock.
246 */ 276 */
247static noinline void requeue(struct task_struct* task) 277static noinline void requeue(struct task_struct* task)
248{ 278{
@@ -337,13 +367,17 @@ static void cedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
337 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 367 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
338} 368}
339 369
340/* caller holds cedf_lock */ 370/* caller holds cluster_lock */
341static noinline void job_completion(struct task_struct *t, int forced) 371static noinline void job_completion(struct task_struct *t, int forced)
342{ 372{
343 BUG_ON(!t); 373 BUG_ON(!t);
344 374
345 sched_trace_task_completion(t, forced); 375 sched_trace_task_completion(t, forced);
346 376
377#ifdef CONFIG_LITMUS_NVIDIA
378 atomic_set(&tsk_rt(t)->nv_int_count, 0);
379#endif
380
347 TRACE_TASK(t, "job_completion().\n"); 381 TRACE_TASK(t, "job_completion().\n");
348 382
349 /* set flags */ 383 /* set flags */
@@ -387,6 +421,288 @@ static void cedf_tick(struct task_struct* t)
387 } 421 }
388} 422}
389 423
424
425
426
427
428
429
430
431
432
433
434
435
436#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
437
438
439static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
440{
441 if (!atomic_read(&tasklet->count)) {
442 if(tasklet->owner) {
443 sched_trace_tasklet_begin(tasklet->owner);
444 }
445
446 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
447 {
448 BUG();
449 }
450 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n",
451 __FUNCTION__,
452 (tasklet->owner) ? tasklet->owner->pid : -1,
453 (tasklet->owner) ? 0 : 1);
454 tasklet->func(tasklet->data);
455 tasklet_unlock(tasklet);
456
457 if(tasklet->owner) {
458 sched_trace_tasklet_end(tasklet->owner, flushed);
459 }
460 }
461 else {
462 BUG();
463 }
464}
465
466
467static void flush_tasklets(cedf_domain_t* cluster, struct task_struct* task)
468{
469 // lazy flushing.
470 // just change ownership to NULL and let an idle processor
471 // take care of it. :P
472
473 struct tasklet_struct* step;
474 unsigned long flags;
475
476 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
477
478 for(step = cluster->pending_tasklets.head; step != NULL; step = step->next) {
479 if(step->owner == task) {
480 TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid);
481 step->owner = NULL;
482 }
483 }
484
485 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
486}
487
488
489static void do_lit_tasklets(cedf_domain_t* cluster, struct task_struct* sched_task)
490{
491 int work_to_do = 1;
492 struct tasklet_struct *tasklet = NULL;
493 unsigned long flags;
494
495 while(work_to_do) {
496
497 TS_NV_SCHED_BOTISR_START;
498
499 // remove tasklet at head of list if it has higher priority.
500 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
501
502 if(cluster->pending_tasklets.head != NULL) {
503 // remove tasklet at head.
504 tasklet = cluster->pending_tasklets.head;
505
506 if(edf_higher_prio(tasklet->owner, sched_task)) {
507
508 if(NULL == tasklet->next) {
509 // tasklet is at the head, list only has one element
510 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, (tasklet->owner) ? tasklet->owner->pid : -1);
511 cluster->pending_tasklets.tail = &(cluster->pending_tasklets.head);
512 }
513
514 // remove the tasklet from the queue
515 cluster->pending_tasklets.head = tasklet->next;
516
517 TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, (tasklet->owner) ? tasklet->owner->pid : -1);
518 }
519 else {
520 TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, (tasklet->owner) ? tasklet->owner->pid : -1, smp_processor_id());
521 tasklet = NULL;
522 }
523 }
524 else {
525 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
526 }
527
528 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
529
530 TS_NV_SCHED_BOTISR_END;
531
532 if(tasklet) {
533 __do_lit_tasklet(tasklet, 0ul);
534 tasklet = NULL;
535 }
536 else {
537 work_to_do = 0;
538 }
539 }
540}
541
542
543static void run_tasklets(struct task_struct* sched_task)
544{
545 cedf_domain_t* cluster;
546
547 preempt_disable();
548
549 cluster = (is_realtime(sched_task)) ?
550 task_cpu_cluster(sched_task) :
551 remote_cluster(smp_processor_id());
552
553 if(cluster && cluster->pending_tasklets.head != NULL) {
554 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
555 do_lit_tasklets(cluster, sched_task);
556 }
557
558 preempt_enable_no_resched();
559}
560
561
562static void __add_pai_tasklet(struct tasklet_struct* tasklet, cedf_domain_t* cluster)
563{
564 struct tasklet_struct* step;
565
566 tasklet->next = NULL; // make sure there are no old values floating around
567
568 step = cluster->pending_tasklets.head;
569 if(step == NULL) {
570 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
571 // insert at tail.
572 *(cluster->pending_tasklets.tail) = tasklet;
573 cluster->pending_tasklets.tail = &(tasklet->next);
574 }
575 else if((*(cluster->pending_tasklets.tail) != NULL) &&
576 edf_higher_prio((*(cluster->pending_tasklets.tail))->owner, tasklet->owner)) {
577 // insert at tail.
578 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);
579
580 *(cluster->pending_tasklets.tail) = tasklet;
581 cluster->pending_tasklets.tail = &(tasklet->next);
582 }
583 else {
584
585 // insert the tasklet somewhere in the middle.
586
587 TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__);
588
589 while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) {
590 step = step->next;
591 }
592
593 // insert tasklet right before step->next.
594
595 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__,
596 tasklet->owner->pid,
597 (step->owner) ?
598 step->owner->pid :
599 -1,
600 (step->next) ?
601 ((step->next->owner) ?
602 step->next->owner->pid :
603 -1) :
604 -1);
605
606 tasklet->next = step->next;
607 step->next = tasklet;
608
609 // patch up the head if needed.
610 if(cluster->pending_tasklets.head == step)
611 {
612 TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid);
613 cluster->pending_tasklets.head = tasklet;
614 }
615 }
616}
617
618static int enqueue_pai_tasklet(struct tasklet_struct* tasklet)
619{
620 cedf_domain_t *cluster = NULL;
621 cpu_entry_t *targetCPU = NULL;
622 int thisCPU;
623 int runLocal = 0;
624 int runNow = 0;
625 unsigned long flags;
626
627 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
628 {
629 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
630 return 0;
631 }
632
633 cluster = task_cpu_cluster(tasklet->owner);
634
635 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
636
637 thisCPU = smp_processor_id();
638
639#ifdef CONFIG_SCHED_CPU_AFFINITY
640 {
641 cpu_entry_t* affinity = NULL;
642
643 // use this CPU if it is in our cluster and isn't running any RT work.
644 if(cpu_isset(thisCPU, *cluster->cpu_map) && (__get_cpu_var(cedf_cpu_entries).linked == NULL)) {
645 affinity = &(__get_cpu_var(cedf_cpu_entries));
646 }
647 else {
648 // this CPU is busy or shouldn't run tasklet in this cluster.
649 // look for available near by CPUs.
650 // NOTE: Affinity towards owner and not this CPU. Is this right?
651 affinity =
652 cedf_get_nearest_available_cpu(cluster,
653 &per_cpu(cedf_cpu_entries, task_cpu(tasklet->owner)));
654 }
655
656 targetCPU = affinity;
657 }
658#endif
659
660 if (targetCPU == NULL) {
661 targetCPU = lowest_prio_cpu(cluster);
662 }
663
664 if (edf_higher_prio(tasklet->owner, targetCPU->linked)) {
665 if (thisCPU == targetCPU->cpu) {
666 TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__);
667 runLocal = 1;
668 runNow = 1;
669 }
670 else {
671 TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__);
672 runLocal = 0;
673 runNow = 1;
674 }
675 }
676 else {
677 runLocal = 0;
678 runNow = 0;
679 }
680
681 if(!runLocal) {
682 // enqueue the tasklet
683 __add_pai_tasklet(tasklet, cluster);
684 }
685
686 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
687
688
689 if (runLocal /*&& runNow */) { // runNow == 1 is implied
690 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
691 __do_lit_tasklet(tasklet, 0ul);
692 }
693 else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied
694 TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu);
695 preempt(targetCPU); // need to be protected by cluster_lock?
696 }
697 else {
698 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
699 }
700
701 return(1); // success
702}
703
704#endif // PAI
705
390/* Getting schedule() right is a bit tricky. schedule() may not make any 706/* Getting schedule() right is a bit tricky. schedule() may not make any
391 * assumptions on the state of the current task since it may be called for a 707 * assumptions on the state of the current task since it may be called for a
392 * number of reasons. The reasons include a scheduler_tick() determined that it 708 * number of reasons. The reasons include a scheduler_tick() determined that it
@@ -512,7 +828,7 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
512 raw_spin_unlock(&cluster->cluster_lock); 828 raw_spin_unlock(&cluster->cluster_lock);
513 829
514#ifdef WANT_ALL_SCHED_EVENTS 830#ifdef WANT_ALL_SCHED_EVENTS
515 TRACE("cedf_lock released, next=0x%p\n", next); 831 TRACE("cluster_lock released, next=0x%p\n", next);
516 832
517 if (next) 833 if (next)
518 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock()); 834 TRACE_TASK(next, "scheduled at %llu\n", litmus_clock());
@@ -520,7 +836,6 @@ static struct task_struct* cedf_schedule(struct task_struct * prev)
520 TRACE("becomes idle at %llu.\n", litmus_clock()); 836 TRACE("becomes idle at %llu.\n", litmus_clock());
521#endif 837#endif
522 838
523
524 return next; 839 return next;
525} 840}
526 841
@@ -584,7 +899,7 @@ static void cedf_task_new(struct task_struct * t, int on_rq, int running)
584static void cedf_task_wake_up(struct task_struct *task) 899static void cedf_task_wake_up(struct task_struct *task)
585{ 900{
586 unsigned long flags; 901 unsigned long flags;
587 lt_t now; 902 //lt_t now;
588 cedf_domain_t *cluster; 903 cedf_domain_t *cluster;
589 904
590 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 905 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
@@ -592,6 +907,8 @@ static void cedf_task_wake_up(struct task_struct *task)
592 cluster = task_cpu_cluster(task); 907 cluster = task_cpu_cluster(task);
593 908
594 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 909 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
910
911#if 0 // sproadic task model
595 /* We need to take suspensions because of semaphores into 912 /* We need to take suspensions because of semaphores into
596 * account! If a job resumes after being suspended due to acquiring 913 * account! If a job resumes after being suspended due to acquiring
597 * a semaphore, it should never be treated as a new job release. 914 * a semaphore, it should never be treated as a new job release.
@@ -613,7 +930,13 @@ static void cedf_task_wake_up(struct task_struct *task)
613 } 930 }
614 } 931 }
615 } 932 }
616 cedf_job_arrival(task); 933#endif
934
935 set_rt_flags(task, RT_F_RUNNING); // periodic model
936
937 if(tsk_rt(task)->linked_on == NO_CPU)
938 cedf_job_arrival(task);
939
617 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); 940 raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags);
618} 941}
619 942
@@ -640,6 +963,10 @@ static void cedf_task_exit(struct task_struct * t)
640 unsigned long flags; 963 unsigned long flags;
641 cedf_domain_t *cluster = task_cpu_cluster(t); 964 cedf_domain_t *cluster = task_cpu_cluster(t);
642 965
966#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
967 flush_tasklets(cluster, t);
968#endif
969
643 /* unlink if necessary */ 970 /* unlink if necessary */
644 raw_spin_lock_irqsave(&cluster->cluster_lock, flags); 971 raw_spin_lock_irqsave(&cluster->cluster_lock, flags);
645 unlink(t); 972 unlink(t);
@@ -660,6 +987,711 @@ static long cedf_admit_task(struct task_struct* tsk)
660 return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL; 987 return task_cpu(tsk) == tsk->rt_param.task_params.cpu ? 0 : -EINVAL;
661} 988}
662 989
990
991
992#ifdef CONFIG_LITMUS_LOCKING
993
994#include <litmus/fdso.h>
995
996
997static void __set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
998{
999 int linked_on;
1000 int check_preempt = 0;
1001
1002 cedf_domain_t* cluster = task_cpu_cluster(t);
1003
1004 if(prio_inh != NULL)
1005 TRACE_TASK(t, "inherits priority from %s/%d\n", prio_inh->comm, prio_inh->pid);
1006 else
1007 TRACE_TASK(t, "inherits priority from %p\n", prio_inh);
1008
1009 sched_trace_eff_prio_change(t, prio_inh);
1010
1011 tsk_rt(t)->inh_task = prio_inh;
1012
1013 linked_on = tsk_rt(t)->linked_on;
1014
1015 /* If it is scheduled, then we need to reorder the CPU heap. */
1016 if (linked_on != NO_CPU) {
1017 TRACE_TASK(t, "%s: linked on %d\n",
1018 __FUNCTION__, linked_on);
1019 /* Holder is scheduled; need to re-order CPUs.
1020 * We can't use heap_decrease() here since
1021 * the cpu_heap is ordered in reverse direction, so
1022 * it is actually an increase. */
1023 bheap_delete(cpu_lower_prio, &cluster->cpu_heap,
1024 per_cpu(cedf_cpu_entries, linked_on).hn);
1025 bheap_insert(cpu_lower_prio, &cluster->cpu_heap,
1026 per_cpu(cedf_cpu_entries, linked_on).hn);
1027 } else {
1028 /* holder may be queued: first stop queue changes */
1029 raw_spin_lock(&cluster->domain.release_lock);
1030 if (is_queued(t)) {
1031 TRACE_TASK(t, "%s: is queued\n", __FUNCTION__);
1032
1033 /* We need to update the position of holder in some
1034 * heap. Note that this could be a release heap if we
1035 * budget enforcement is used and this job overran. */
1036 check_preempt = !bheap_decrease(edf_ready_order, tsk_rt(t)->heap_node);
1037
1038 } else {
1039 /* Nothing to do: if it is not queued and not linked
1040 * then it is either sleeping or currently being moved
1041 * by other code (e.g., a timer interrupt handler) that
1042 * will use the correct priority when enqueuing the
1043 * task. */
1044 TRACE_TASK(t, "%s: is NOT queued => Done.\n", __FUNCTION__);
1045 }
1046 raw_spin_unlock(&cluster->domain.release_lock);
1047
1048 /* If holder was enqueued in a release heap, then the following
1049 * preemption check is pointless, but we can't easily detect
1050 * that case. If you want to fix this, then consider that
1051 * simply adding a state flag requires O(n) time to update when
1052 * releasing n tasks, which conflicts with the goal to have
1053 * O(log n) merges. */
1054 if (check_preempt) {
1055 /* heap_decrease() hit the top level of the heap: make
1056 * sure preemption checks get the right task, not the
1057 * potentially stale cache. */
1058 bheap_uncache_min(edf_ready_order, &cluster->domain.ready_queue);
1059 check_for_preemptions(cluster);
1060 }
1061 }
1062}
1063
1064/* called with IRQs off */
1065static void set_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
1066{
1067 cedf_domain_t* cluster = task_cpu_cluster(t);
1068
1069 raw_spin_lock(&cluster->cluster_lock);
1070
1071 __set_priority_inheritance(t, prio_inh);
1072
1073#ifdef CONFIG_LITMUS_SOFTIRQD
1074 if(tsk_rt(t)->cur_klitirqd != NULL)
1075 {
1076 TRACE_TASK(t, "%s/%d inherits a new priority!\n",
1077 tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid);
1078
1079 __set_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh);
1080 }
1081#endif
1082
1083 raw_spin_unlock(&cluster->cluster_lock);
1084}
1085
1086
1087/* called with IRQs off */
1088static void __clear_priority_inheritance(struct task_struct* t)
1089{
1090 TRACE_TASK(t, "priority restored\n");
1091
1092 if(tsk_rt(t)->scheduled_on != NO_CPU)
1093 {
1094 sched_trace_eff_prio_change(t, NULL);
1095
1096 tsk_rt(t)->inh_task = NULL;
1097
1098 /* Check if rescheduling is necessary. We can't use heap_decrease()
1099 * since the priority was effectively lowered. */
1100 unlink(t);
1101 cedf_job_arrival(t);
1102 }
1103 else
1104 {
1105 __set_priority_inheritance(t, NULL);
1106 }
1107
1108#ifdef CONFIG_LITMUS_SOFTIRQD
1109 if(tsk_rt(t)->cur_klitirqd != NULL)
1110 {
1111 TRACE_TASK(t, "%s/%d inheritance set back to owner.\n",
1112 tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid);
1113
1114 if(tsk_rt(tsk_rt(t)->cur_klitirqd)->scheduled_on != NO_CPU)
1115 {
1116 sched_trace_eff_prio_change(tsk_rt(t)->cur_klitirqd, t);
1117
1118 tsk_rt(tsk_rt(t)->cur_klitirqd)->inh_task = t;
1119
1120 /* Check if rescheduling is necessary. We can't use heap_decrease()
1121 * since the priority was effectively lowered. */
1122 unlink(tsk_rt(t)->cur_klitirqd);
1123 cedf_job_arrival(tsk_rt(t)->cur_klitirqd);
1124 }
1125 else
1126 {
1127 __set_priority_inheritance(tsk_rt(t)->cur_klitirqd, t);
1128 }
1129 }
1130#endif
1131}
1132
1133/* called with IRQs off */
1134static void clear_priority_inheritance(struct task_struct* t)
1135{
1136 cedf_domain_t* cluster = task_cpu_cluster(t);
1137
1138 raw_spin_lock(&cluster->cluster_lock);
1139 __clear_priority_inheritance(t);
1140 raw_spin_unlock(&cluster->cluster_lock);
1141}
1142
1143
1144
1145#ifdef CONFIG_LITMUS_SOFTIRQD
1146/* called with IRQs off */
1147static void set_priority_inheritance_klitirqd(struct task_struct* klitirqd,
1148 struct task_struct* old_owner,
1149 struct task_struct* new_owner)
1150{
1151 cedf_domain_t* cluster = task_cpu_cluster(klitirqd);
1152
1153 BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread));
1154
1155 raw_spin_lock(&cluster->cluster_lock);
1156
1157 if(old_owner != new_owner)
1158 {
1159 if(old_owner)
1160 {
1161 // unreachable?
1162 tsk_rt(old_owner)->cur_klitirqd = NULL;
1163 }
1164
1165 TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n",
1166 new_owner->comm, new_owner->pid);
1167
1168 tsk_rt(new_owner)->cur_klitirqd = klitirqd;
1169 }
1170
1171 __set_priority_inheritance(klitirqd,
1172 (tsk_rt(new_owner)->inh_task == NULL) ?
1173 new_owner :
1174 tsk_rt(new_owner)->inh_task);
1175
1176 raw_spin_unlock(&cluster->cluster_lock);
1177}
1178
1179/* called with IRQs off */
1180static void clear_priority_inheritance_klitirqd(struct task_struct* klitirqd,
1181 struct task_struct* old_owner)
1182{
1183 cedf_domain_t* cluster = task_cpu_cluster(klitirqd);
1184
1185 BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread));
1186
1187 raw_spin_lock(&cluster->cluster_lock);
1188
1189 TRACE_TASK(klitirqd, "priority restored\n");
1190
1191 if(tsk_rt(klitirqd)->scheduled_on != NO_CPU)
1192 {
1193 tsk_rt(klitirqd)->inh_task = NULL;
1194
1195 /* Check if rescheduling is necessary. We can't use heap_decrease()
1196 * since the priority was effectively lowered. */
1197 unlink(klitirqd);
1198 cedf_job_arrival(klitirqd);
1199 }
1200 else
1201 {
1202 __set_priority_inheritance(klitirqd, NULL);
1203 }
1204
1205 tsk_rt(old_owner)->cur_klitirqd = NULL;
1206
1207 raw_spin_unlock(&cluster->cluster_lock);
1208}
1209#endif // CONFIG_LITMUS_SOFTIRQD
1210
1211
1212/* ******************** KFMLP support ********************** */
1213
1214/* struct for semaphore with priority inheritance */
1215struct kfmlp_queue
1216{
1217 wait_queue_head_t wait;
1218 struct task_struct* owner;
1219 struct task_struct* hp_waiter;
1220 int count; /* number of waiters + holder */
1221};
1222
1223struct kfmlp_semaphore
1224{
1225 struct litmus_lock litmus_lock;
1226
1227 spinlock_t lock;
1228
1229 int num_resources; /* aka k */
1230 struct kfmlp_queue *queues; /* array */
1231 struct kfmlp_queue *shortest_queue; /* pointer to shortest queue */
1232};
1233
1234static inline struct kfmlp_semaphore* kfmlp_from_lock(struct litmus_lock* lock)
1235{
1236 return container_of(lock, struct kfmlp_semaphore, litmus_lock);
1237}
1238
1239static inline int kfmlp_get_idx(struct kfmlp_semaphore* sem,
1240 struct kfmlp_queue* queue)
1241{
1242 return (queue - &sem->queues[0]);
1243}
1244
1245static inline struct kfmlp_queue* kfmlp_get_queue(struct kfmlp_semaphore* sem,
1246 struct task_struct* holder)
1247{
1248 int i;
1249 for(i = 0; i < sem->num_resources; ++i)
1250 if(sem->queues[i].owner == holder)
1251 return(&sem->queues[i]);
1252 return(NULL);
1253}
1254
1255/* caller is responsible for locking */
1256static struct task_struct* kfmlp_find_hp_waiter(struct kfmlp_queue *kqueue,
1257 struct task_struct *skip)
1258{
1259 struct list_head *pos;
1260 struct task_struct *queued, *found = NULL;
1261
1262 list_for_each(pos, &kqueue->wait.task_list) {
1263 queued = (struct task_struct*) list_entry(pos, wait_queue_t,
1264 task_list)->private;
1265
1266 /* Compare task prios, find high prio task. */
1267 if (queued != skip && edf_higher_prio(queued, found))
1268 found = queued;
1269 }
1270 return found;
1271}
1272
1273static inline struct kfmlp_queue* kfmlp_find_shortest(
1274 struct kfmlp_semaphore* sem,
1275 struct kfmlp_queue* search_start)
1276{
1277 // we start our search at search_start instead of at the beginning of the
1278 // queue list to load-balance across all resources.
1279 struct kfmlp_queue* step = search_start;
1280 struct kfmlp_queue* shortest = sem->shortest_queue;
1281
1282 do
1283 {
1284 step = (step+1 != &sem->queues[sem->num_resources]) ?
1285 step+1 : &sem->queues[0];
1286 if(step->count < shortest->count)
1287 {
1288 shortest = step;
1289 if(step->count == 0)
1290 break; /* can't get any shorter */
1291 }
1292 }while(step != search_start);
1293
1294 return(shortest);
1295}
1296
1297static struct task_struct* kfmlp_remove_hp_waiter(struct kfmlp_semaphore* sem)
1298{
1299 /* must hold sem->lock */
1300
1301 struct kfmlp_queue *my_queue = NULL;
1302 struct task_struct *max_hp = NULL;
1303
1304
1305 struct list_head *pos;
1306 struct task_struct *queued;
1307 int i;
1308
1309 for(i = 0; i < sem->num_resources; ++i)
1310 {
1311 if( (sem->queues[i].count > 1) &&
1312 ((my_queue == NULL) ||
1313 (edf_higher_prio(sem->queues[i].hp_waiter, my_queue->hp_waiter))) )
1314 {
1315 my_queue = &sem->queues[i];
1316 }
1317 }
1318
1319 if(my_queue)
1320 {
1321 cedf_domain_t* cluster;
1322
1323 max_hp = my_queue->hp_waiter;
1324 BUG_ON(!max_hp);
1325
1326 TRACE_CUR("queue %d: stealing %s/%d from queue %d\n",
1327 kfmlp_get_idx(sem, my_queue),
1328 max_hp->comm, max_hp->pid,
1329 kfmlp_get_idx(sem, my_queue));
1330
1331 my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, max_hp);
1332
1333 /*
1334 if(my_queue->hp_waiter)
1335 TRACE_CUR("queue %d: new hp_waiter is %s/%d\n",
1336 kfmlp_get_idx(sem, my_queue),
1337 my_queue->hp_waiter->comm,
1338 my_queue->hp_waiter->pid);
1339 else
1340 TRACE_CUR("queue %d: new hp_waiter is %p\n",
1341 kfmlp_get_idx(sem, my_queue), NULL);
1342 */
1343
1344 cluster = task_cpu_cluster(max_hp);
1345
1346 raw_spin_lock(&cluster->cluster_lock);
1347
1348 /*
1349 if(my_queue->owner)
1350 TRACE_CUR("queue %d: owner is %s/%d\n",
1351 kfmlp_get_idx(sem, my_queue),
1352 my_queue->owner->comm,
1353 my_queue->owner->pid);
1354 else
1355 TRACE_CUR("queue %d: owner is %p\n",
1356 kfmlp_get_idx(sem, my_queue),
1357 NULL);
1358 */
1359
1360 if(tsk_rt(my_queue->owner)->inh_task == max_hp)
1361 {
1362 __clear_priority_inheritance(my_queue->owner);
1363 if(my_queue->hp_waiter != NULL)
1364 {
1365 __set_priority_inheritance(my_queue->owner, my_queue->hp_waiter);
1366 }
1367 }
1368 raw_spin_unlock(&cluster->cluster_lock);
1369
1370 list_for_each(pos, &my_queue->wait.task_list)
1371 {
1372 queued = (struct task_struct*) list_entry(pos, wait_queue_t,
1373 task_list)->private;
1374 /* Compare task prios, find high prio task. */
1375 if (queued == max_hp)
1376 {
1377 /*
1378 TRACE_CUR("queue %d: found entry in wait queue. REMOVING!\n",
1379 kfmlp_get_idx(sem, my_queue));
1380 */
1381 __remove_wait_queue(&my_queue->wait,
1382 list_entry(pos, wait_queue_t, task_list));
1383 break;
1384 }
1385 }
1386 --(my_queue->count);
1387 }
1388
1389 return(max_hp);
1390}
1391
1392int cedf_kfmlp_lock(struct litmus_lock* l)
1393{
1394 struct task_struct* t = current;
1395 struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
1396 struct kfmlp_queue* my_queue;
1397 wait_queue_t wait;
1398 unsigned long flags;
1399
1400 if (!is_realtime(t))
1401 return -EPERM;
1402
1403 spin_lock_irqsave(&sem->lock, flags);
1404
1405 my_queue = sem->shortest_queue;
1406
1407 if (my_queue->owner) {
1408 /* resource is not free => must suspend and wait */
1409 TRACE_CUR("queue %d: Resource is not free => must suspend and wait.\n",
1410 kfmlp_get_idx(sem, my_queue));
1411
1412 init_waitqueue_entry(&wait, t);
1413
1414 /* FIXME: interruptible would be nice some day */
1415 set_task_state(t, TASK_UNINTERRUPTIBLE);
1416
1417 __add_wait_queue_tail_exclusive(&my_queue->wait, &wait);
1418
1419 /* check if we need to activate priority inheritance */
1420 if (edf_higher_prio(t, my_queue->hp_waiter))
1421 {
1422 my_queue->hp_waiter = t;
1423 if (edf_higher_prio(t, my_queue->owner))
1424 {
1425 set_priority_inheritance(my_queue->owner, my_queue->hp_waiter);
1426 }
1427 }
1428
1429 ++(my_queue->count);
1430 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);
1431
1432 /* release lock before sleeping */
1433 spin_unlock_irqrestore(&sem->lock, flags);
1434
1435 /* We depend on the FIFO order. Thus, we don't need to recheck
1436 * when we wake up; we are guaranteed to have the lock since
1437 * there is only one wake up per release (or steal).
1438 */
1439 schedule();
1440
1441
1442 if(my_queue->owner == t)
1443 {
1444 TRACE_CUR("queue %d: acquired through waiting\n",
1445 kfmlp_get_idx(sem, my_queue));
1446 }
1447 else
1448 {
1449 /* this case may happen if our wait entry was stolen
1450 between queues. record where we went.*/
1451 my_queue = kfmlp_get_queue(sem, t);
1452 BUG_ON(!my_queue);
1453 TRACE_CUR("queue %d: acquired through stealing\n",
1454 kfmlp_get_idx(sem, my_queue));
1455 }
1456 }
1457 else
1458 {
1459 TRACE_CUR("queue %d: acquired immediately\n",
1460 kfmlp_get_idx(sem, my_queue));
1461
1462 my_queue->owner = t;
1463
1464 ++(my_queue->count);
1465 sem->shortest_queue = kfmlp_find_shortest(sem, my_queue);
1466
1467 spin_unlock_irqrestore(&sem->lock, flags);
1468 }
1469
1470 return kfmlp_get_idx(sem, my_queue);
1471}
1472
1473int cedf_kfmlp_unlock(struct litmus_lock* l)
1474{
1475 struct task_struct *t = current, *next;
1476 struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
1477 struct kfmlp_queue *my_queue;
1478 unsigned long flags;
1479 int err = 0;
1480
1481 spin_lock_irqsave(&sem->lock, flags);
1482
1483 my_queue = kfmlp_get_queue(sem, t);
1484
1485 if (!my_queue) {
1486 err = -EINVAL;
1487 goto out;
1488 }
1489
1490 /* check if there are jobs waiting for this resource */
1491 next = __waitqueue_remove_first(&my_queue->wait);
1492 if (next) {
1493 /*
1494 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - next\n",
1495 kfmlp_get_idx(sem, my_queue),
1496 next->comm, next->pid);
1497 */
1498 /* next becomes the resouce holder */
1499 my_queue->owner = next;
1500
1501 --(my_queue->count);
1502 if(my_queue->count < sem->shortest_queue->count)
1503 {
1504 sem->shortest_queue = my_queue;
1505 }
1506
1507 TRACE_CUR("queue %d: lock ownership passed to %s/%d\n",
1508 kfmlp_get_idx(sem, my_queue), next->comm, next->pid);
1509
1510 /* determine new hp_waiter if necessary */
1511 if (next == my_queue->hp_waiter) {
1512 TRACE_TASK(next, "was highest-prio waiter\n");
1513 /* next has the highest priority --- it doesn't need to
1514 * inherit. However, we need to make sure that the
1515 * next-highest priority in the queue is reflected in
1516 * hp_waiter. */
1517 my_queue->hp_waiter = kfmlp_find_hp_waiter(my_queue, next);
1518 if (my_queue->hp_waiter)
1519 TRACE_TASK(my_queue->hp_waiter, "queue %d: is new highest-prio waiter\n", kfmlp_get_idx(sem, my_queue));
1520 else
1521 TRACE("queue %d: no further waiters\n", kfmlp_get_idx(sem, my_queue));
1522 } else {
1523 /* Well, if next is not the highest-priority waiter,
1524 * then it ought to inherit the highest-priority
1525 * waiter's priority. */
1526 set_priority_inheritance(next, my_queue->hp_waiter);
1527 }
1528
1529 /* wake up next */
1530 wake_up_process(next);
1531 }
1532 else
1533 {
1534 TRACE_CUR("queue %d: looking to steal someone...\n", kfmlp_get_idx(sem, my_queue));
1535
1536 next = kfmlp_remove_hp_waiter(sem); /* returns NULL if nothing to steal */
1537
1538 /*
1539 if(next)
1540 TRACE_CUR("queue %d: ASSIGNING %s/%d as owner - steal\n",
1541 kfmlp_get_idx(sem, my_queue),
1542 next->comm, next->pid);
1543 */
1544
1545 my_queue->owner = next;
1546
1547 if(next)
1548 {
1549 TRACE_CUR("queue %d: lock ownership passed to %s/%d (which was stolen)\n",
1550 kfmlp_get_idx(sem, my_queue),
1551 next->comm, next->pid);
1552
1553 /* wake up next */
1554 wake_up_process(next);
1555 }
1556 else
1557 {
1558 TRACE_CUR("queue %d: no one to steal.\n", kfmlp_get_idx(sem, my_queue));
1559
1560 --(my_queue->count);
1561 if(my_queue->count < sem->shortest_queue->count)
1562 {
1563 sem->shortest_queue = my_queue;
1564 }
1565 }
1566 }
1567
1568 /* we lose the benefit of priority inheritance (if any) */
1569 if (tsk_rt(t)->inh_task)
1570 clear_priority_inheritance(t);
1571
1572out:
1573 spin_unlock_irqrestore(&sem->lock, flags);
1574
1575 return err;
1576}
1577
1578int cedf_kfmlp_close(struct litmus_lock* l)
1579{
1580 struct task_struct *t = current;
1581 struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
1582 struct kfmlp_queue *my_queue;
1583 unsigned long flags;
1584
1585 int owner;
1586
1587 spin_lock_irqsave(&sem->lock, flags);
1588
1589 my_queue = kfmlp_get_queue(sem, t);
1590 owner = (my_queue) ? (my_queue->owner == t) : 0;
1591
1592 spin_unlock_irqrestore(&sem->lock, flags);
1593
1594 if (owner)
1595 cedf_kfmlp_unlock(l);
1596
1597 return 0;
1598}
1599
1600void cedf_kfmlp_free(struct litmus_lock* l)
1601{
1602 struct kfmlp_semaphore *sem = kfmlp_from_lock(l);
1603 kfree(sem->queues);
1604 kfree(sem);
1605}
1606
1607static struct litmus_lock_ops cedf_kfmlp_lock_ops = {
1608 .close = cedf_kfmlp_close,
1609 .lock = cedf_kfmlp_lock,
1610 .unlock = cedf_kfmlp_unlock,
1611 .deallocate = cedf_kfmlp_free,
1612};
1613
1614static struct litmus_lock* cedf_new_kfmlp(void* __user arg, int* ret_code)
1615{
1616 struct kfmlp_semaphore* sem;
1617 int num_resources = 0;
1618 int i;
1619
1620 if(!access_ok(VERIFY_READ, arg, sizeof(num_resources)))
1621 {
1622 *ret_code = -EINVAL;
1623 return(NULL);
1624 }
1625 if(__copy_from_user(&num_resources, arg, sizeof(num_resources)))
1626 {
1627 *ret_code = -EINVAL;
1628 return(NULL);
1629 }
1630 if(num_resources < 1)
1631 {
1632 *ret_code = -EINVAL;
1633 return(NULL);
1634 }
1635
1636 sem = kmalloc(sizeof(*sem), GFP_KERNEL);
1637 if(!sem)
1638 {
1639 *ret_code = -ENOMEM;
1640 return NULL;
1641 }
1642
1643 sem->queues = kmalloc(sizeof(struct kfmlp_queue)*num_resources, GFP_KERNEL);
1644 if(!sem->queues)
1645 {
1646 kfree(sem);
1647 *ret_code = -ENOMEM;
1648 return NULL;
1649 }
1650
1651 sem->litmus_lock.ops = &cedf_kfmlp_lock_ops;
1652 spin_lock_init(&sem->lock);
1653 sem->num_resources = num_resources;
1654
1655 for(i = 0; i < num_resources; ++i)
1656 {
1657 sem->queues[i].owner = NULL;
1658 sem->queues[i].hp_waiter = NULL;
1659 init_waitqueue_head(&sem->queues[i].wait);
1660 sem->queues[i].count = 0;
1661 }
1662
1663 sem->shortest_queue = &sem->queues[0];
1664
1665 *ret_code = 0;
1666 return &sem->litmus_lock;
1667}
1668
1669
1670/* **** lock constructor **** */
1671
1672static long cedf_allocate_lock(struct litmus_lock **lock, int type,
1673 void* __user arg)
1674{
1675 int err = -ENXIO;
1676
1677 /* C-EDF currently only supports the FMLP for global resources
1678 WITHIN a given cluster. DO NOT USE CROSS-CLUSTER! */
1679 switch (type) {
1680 case KFMLP_SEM:
1681 *lock = cedf_new_kfmlp(arg, &err);
1682 break;
1683 };
1684
1685 return err;
1686}
1687
1688#endif // CONFIG_LITMUS_LOCKING
1689
1690
1691
1692
1693
1694
663/* total number of cluster */ 1695/* total number of cluster */
664static int num_clusters; 1696static int num_clusters;
665/* we do not support cluster of different sizes */ 1697/* we do not support cluster of different sizes */
@@ -749,6 +1781,13 @@ static long cedf_activate_plugin(void)
749 INIT_BINHEAP_HANDLE(&(cedf[i].cpu_heap), cpu_lower_prio); 1781 INIT_BINHEAP_HANDLE(&(cedf[i].cpu_heap), cpu_lower_prio);
750 edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs); 1782 edf_domain_init(&(cedf[i].domain), NULL, cedf_release_jobs);
751 1783
1784
1785#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1786 cedf[i].pending_tasklets.head = NULL;
1787 cedf[i].pending_tasklets.tail = &(cedf[i].pending_tasklets.head);
1788#endif
1789
1790
752 if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC)) 1791 if(!zalloc_cpumask_var(&cedf[i].cpu_map, GFP_ATOMIC))
753 return -ENOMEM; 1792 return -ENOMEM;
754#ifdef CONFIG_RELEASE_MASTER 1793#ifdef CONFIG_RELEASE_MASTER
@@ -807,6 +1846,40 @@ static long cedf_activate_plugin(void)
807 break; 1846 break;
808 } 1847 }
809 } 1848 }
1849
1850#ifdef CONFIG_LITMUS_SOFTIRQD
1851 {
1852 /* distribute the daemons evenly across the clusters. */
1853 int* affinity = kmalloc(NR_LITMUS_SOFTIRQD * sizeof(int), GFP_ATOMIC);
1854 int num_daemons_per_cluster = NR_LITMUS_SOFTIRQD / num_clusters;
1855 int left_over = NR_LITMUS_SOFTIRQD % num_clusters;
1856
1857 int daemon = 0;
1858 for(i = 0; i < num_clusters; ++i)
1859 {
1860 int num_on_this_cluster = num_daemons_per_cluster;
1861 if(left_over)
1862 {
1863 ++num_on_this_cluster;
1864 --left_over;
1865 }
1866
1867 for(j = 0; j < num_on_this_cluster; ++j)
1868 {
1869 // first CPU of this cluster
1870 affinity[daemon++] = i*cluster_size;
1871 }
1872 }
1873
1874 spawn_klitirqd(affinity);
1875
1876 kfree(affinity);
1877 }
1878#endif
1879
1880#ifdef CONFIG_LITMUS_NVIDIA
1881 init_nvidia_info();
1882#endif
810 1883
811 free_cpumask_var(mask); 1884 free_cpumask_var(mask);
812 clusters_allocated = 1; 1885 clusters_allocated = 1;
@@ -826,6 +1899,19 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = {
826 .task_block = cedf_task_block, 1899 .task_block = cedf_task_block,
827 .admit_task = cedf_admit_task, 1900 .admit_task = cedf_admit_task,
828 .activate_plugin = cedf_activate_plugin, 1901 .activate_plugin = cedf_activate_plugin,
1902#ifdef CONFIG_LITMUS_LOCKING
1903 .allocate_lock = cedf_allocate_lock,
1904 .set_prio_inh = set_priority_inheritance,
1905 .clear_prio_inh = clear_priority_inheritance,
1906#endif
1907#ifdef CONFIG_LITMUS_SOFTIRQD
1908 .set_prio_inh_klitirqd = set_priority_inheritance_klitirqd,
1909 .clear_prio_inh_klitirqd = clear_priority_inheritance_klitirqd,
1910#endif
1911#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1912 .enqueue_pai_tasklet = enqueue_pai_tasklet,
1913 .run_tasklets = run_tasklets,
1914#endif
829}; 1915};
830 1916
831static struct proc_dir_entry *cluster_file = NULL, *cedf_dir = NULL; 1917static struct proc_dir_entry *cluster_file = NULL, *cedf_dir = NULL;
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index 59236d007fd8..e009b7f34aca 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -12,6 +12,8 @@
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/uaccess.h>
16
15 17
16#ifdef CONFIG_LITMUS_NESTED_LOCKING 18#ifdef CONFIG_LITMUS_NESTED_LOCKING
17#include <linux/uaccess.h> 19#include <linux/uaccess.h>
@@ -29,6 +31,8 @@
29#include <litmus/bheap.h> 31#include <litmus/bheap.h>
30#include <litmus/binheap.h> 32#include <litmus/binheap.h>
31 33
34#include <litmus/kfmlp_lock.h>
35
32#ifdef CONFIG_LITMUS_NESTED_LOCKING 36#ifdef CONFIG_LITMUS_NESTED_LOCKING
33#include <litmus/rsm_lock.h> 37#include <litmus/rsm_lock.h>
34#include <litmus/ikglp_lock.h> 38#include <litmus/ikglp_lock.h>
@@ -40,6 +44,24 @@
40 44
41#include <linux/module.h> 45#include <linux/module.h>
42 46
47#ifdef CONFIG_SCHED_CPU_AFFINITY
48#include <litmus/affinity.h>
49#endif
50
51#ifdef CONFIG_LITMUS_SOFTIRQD
52#include <litmus/litmus_softirq.h>
53#endif
54
55#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
56#include <linux/interrupt.h>
57#include <litmus/trace.h>
58#endif
59
60#ifdef CONFIG_LITMUS_NVIDIA
61#include <litmus/nvidia_info.h>
62#endif
63
64
43/* Overview of GSN-EDF operations. 65/* Overview of GSN-EDF operations.
44 * 66 *
45 * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This 67 * For a detailed explanation of GSN-EDF have a look at the FMLP paper. This
@@ -134,6 +156,17 @@ static raw_spinlock_t* gsnedf_get_dgl_spinlock(struct task_struct *t)
134} 156}
135#endif 157#endif
136 158
159#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
160struct tasklet_head
161{
162 struct tasklet_struct *head;
163 struct tasklet_struct **tail;
164};
165
166struct tasklet_head gsnedf_pending_tasklets;
167#endif
168
169
137/* Uncomment this if you want to see all scheduling decisions in the 170/* Uncomment this if you want to see all scheduling decisions in the
138 * TRACE() log. 171 * TRACE() log.
139#define WANT_ALL_SCHED_EVENTS 172#define WANT_ALL_SCHED_EVENTS
@@ -330,7 +363,7 @@ static void check_for_preemptions(void)
330static noinline void gsnedf_job_arrival(struct task_struct* task) 363static noinline void gsnedf_job_arrival(struct task_struct* task)
331{ 364{
332 BUG_ON(!task); 365 BUG_ON(!task);
333 366
334 requeue(task); 367 requeue(task);
335 check_for_preemptions(); 368 check_for_preemptions();
336} 369}
@@ -351,9 +384,13 @@ static void gsnedf_release_jobs(rt_domain_t* rt, struct bheap* tasks)
351static noinline void job_completion(struct task_struct *t, int forced) 384static noinline void job_completion(struct task_struct *t, int forced)
352{ 385{
353 BUG_ON(!t); 386 BUG_ON(!t);
354 387
355 sched_trace_task_completion(t, forced); 388 sched_trace_task_completion(t, forced);
356 389
390#ifdef CONFIG_LITMUS_NVIDIA
391 atomic_set(&tsk_rt(t)->nv_int_count, 0);
392#endif
393
357 TRACE_TASK(t, "job_completion().\n"); 394 TRACE_TASK(t, "job_completion().\n");
358 395
359 /* set flags */ 396 /* set flags */
@@ -396,6 +433,268 @@ static void gsnedf_tick(struct task_struct* t)
396 } 433 }
397} 434}
398 435
436
437
438
439
440
441
442
443
444
445
446
447#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
448
449
450static void __do_lit_tasklet(struct tasklet_struct* tasklet, unsigned long flushed)
451{
452 if (!atomic_read(&tasklet->count)) {
453 sched_trace_tasklet_begin(tasklet->owner);
454
455 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state))
456 {
457 BUG();
458 }
459 TRACE("%s: Invoking tasklet with owner pid = %d (flushed = %d).\n", __FUNCTION__, tasklet->owner->pid, flushed);
460 tasklet->func(tasklet->data);
461 tasklet_unlock(tasklet);
462
463 sched_trace_tasklet_end(tasklet->owner, flushed);
464 }
465 else {
466 BUG();
467 }
468}
469
470
471static void flush_tasklets(struct task_struct* task)
472{
473 // lazy flushing.
474 // just change ownership to NULL and let idel processor
475 // take care of it. :P
476
477 struct tasklet_struct* step;
478 unsigned long flags;
479
480 raw_spin_lock_irqsave(&gsnedf_lock, flags);
481 for(step = gsnedf_pending_tasklets.head; step != NULL; step = step->next) {
482 if(step->owner == task) {
483 TRACE("%s: Found tasklet to flush: %d\n", __FUNCTION__, step->owner->pid);
484 step->owner = NULL;
485 }
486 }
487 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
488}
489
490
491static void do_lit_tasklets(struct task_struct* sched_task)
492{
493 int work_to_do = 1;
494 struct tasklet_struct *tasklet = NULL;
495 //struct tasklet_struct *step;
496 unsigned long flags;
497
498 while(work_to_do) {
499
500 TS_NV_SCHED_BOTISR_START;
501
502 // remove tasklet at head of list if it has higher priority.
503 raw_spin_lock_irqsave(&gsnedf_lock, flags);
504
505 if(gsnedf_pending_tasklets.head != NULL) {
506 // remove tasklet at head.
507 tasklet = gsnedf_pending_tasklets.head;
508
509 if(edf_higher_prio(tasklet->owner, sched_task)) {
510
511 if(NULL == tasklet->next) {
512 // tasklet is at the head, list only has one element
513 TRACE("%s: Tasklet for %d is the last element in tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
514 gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
515 }
516
517 // remove the tasklet from the queue
518 gsnedf_pending_tasklets.head = tasklet->next;
519
520 TRACE("%s: Removed tasklet for %d from tasklet queue.\n", __FUNCTION__, tasklet->owner->pid);
521 }
522 else {
523 TRACE("%s: Pending tasklet (%d) does not have priority to run on this CPU (%d).\n", __FUNCTION__, tasklet->owner->pid, smp_processor_id());
524 tasklet = NULL;
525 }
526 }
527 else {
528 TRACE("%s: Tasklet queue is empty.\n", __FUNCTION__);
529 }
530
531 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
532
533 TS_NV_SCHED_BOTISR_END;
534
535 if(tasklet) {
536 __do_lit_tasklet(tasklet, 0ul);
537 tasklet = NULL;
538 }
539 else {
540 work_to_do = 0;
541 }
542 }
543
544 //TRACE("%s: exited.\n", __FUNCTION__);
545}
546
547
548static void run_tasklets(struct task_struct* sched_task)
549{
550 preempt_disable();
551
552 if(gsnedf_pending_tasklets.head != NULL) {
553 TRACE("%s: There are tasklets to process.\n", __FUNCTION__);
554 do_lit_tasklets(sched_task);
555 }
556
557 preempt_enable_no_resched();
558}
559
560
561static void __add_pai_tasklet(struct tasklet_struct* tasklet)
562{
563 struct tasklet_struct* step;
564
565 tasklet->next = NULL; // make sure there are no old values floating around
566
567 step = gsnedf_pending_tasklets.head;
568 if(step == NULL) {
569 TRACE("%s: tasklet queue empty. inserting tasklet for %d at head.\n", __FUNCTION__, tasklet->owner->pid);
570 // insert at tail.
571 *(gsnedf_pending_tasklets.tail) = tasklet;
572 gsnedf_pending_tasklets.tail = &(tasklet->next);
573 }
574 else if((*(gsnedf_pending_tasklets.tail) != NULL) &&
575 edf_higher_prio((*(gsnedf_pending_tasklets.tail))->owner, tasklet->owner)) {
576 // insert at tail.
577 TRACE("%s: tasklet belongs at end. inserting tasklet for %d at tail.\n", __FUNCTION__, tasklet->owner->pid);
578
579 *(gsnedf_pending_tasklets.tail) = tasklet;
580 gsnedf_pending_tasklets.tail = &(tasklet->next);
581 }
582 else {
583 // insert the tasklet somewhere in the middle.
584
585 TRACE("%s: tasklet belongs somewhere in the middle.\n", __FUNCTION__);
586
587 while(step->next && edf_higher_prio(step->next->owner, tasklet->owner)) {
588 step = step->next;
589 }
590
591 // insert tasklet right before step->next.
592
593 TRACE("%s: inserting tasklet for %d between %d and %d.\n", __FUNCTION__, tasklet->owner->pid, step->owner->pid, (step->next) ? step->next->owner->pid : -1);
594
595 tasklet->next = step->next;
596 step->next = tasklet;
597
598 // patch up the head if needed.
599 if(gsnedf_pending_tasklets.head == step)
600 {
601 TRACE("%s: %d is the new tasklet queue head.\n", __FUNCTION__, tasklet->owner->pid);
602 gsnedf_pending_tasklets.head = tasklet;
603 }
604 }
605}
606
607static int enqueue_pai_tasklet(struct tasklet_struct* tasklet)
608{
609 cpu_entry_t *targetCPU = NULL;
610 int thisCPU;
611 int runLocal = 0;
612 int runNow = 0;
613 unsigned long flags;
614
615 if(unlikely((tasklet->owner == NULL) || !is_realtime(tasklet->owner)))
616 {
617 TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__);
618 return 0;
619 }
620
621
622 raw_spin_lock_irqsave(&gsnedf_lock, flags);
623
624 thisCPU = smp_processor_id();
625
626#ifdef CONFIG_SCHED_CPU_AFFINITY
627 {
628 cpu_entry_t* affinity = NULL;
629
630 // use this CPU if it is in our cluster and isn't running any RT work.
631 if(
632#ifdef CONFIG_RELEASE_MASTER
633 (thisCPU != gsnedf.release_master) &&
634#endif
635 (__get_cpu_var(gsnedf_cpu_entries).linked == NULL)) {
636 affinity = &(__get_cpu_var(gsnedf_cpu_entries));
637 }
638 else {
639 // this CPU is busy or shouldn't run tasklet in this cluster.
640 // look for available near by CPUs.
641 // NOTE: Affinity towards owner and not this CPU. Is this right?
642 affinity =
643 gsnedf_get_nearest_available_cpu(
644 &per_cpu(gsnedf_cpu_entries, task_cpu(tasklet->owner)));
645 }
646
647 targetCPU = affinity;
648 }
649#endif
650
651 if (targetCPU == NULL) {
652 targetCPU = lowest_prio_cpu();
653 }
654
655 if (edf_higher_prio(tasklet->owner, targetCPU->linked)) {
656 if (thisCPU == targetCPU->cpu) {
657 TRACE("%s: Run tasklet locally (and now).\n", __FUNCTION__);
658 runLocal = 1;
659 runNow = 1;
660 }
661 else {
662 TRACE("%s: Run tasklet remotely (and now).\n", __FUNCTION__);
663 runLocal = 0;
664 runNow = 1;
665 }
666 }
667 else {
668 runLocal = 0;
669 runNow = 0;
670 }
671
672 if(!runLocal) {
673 // enqueue the tasklet
674 __add_pai_tasklet(tasklet);
675 }
676
677 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
678
679
680 if (runLocal /*&& runNow */) { // runNow == 1 is implied
681 TRACE("%s: Running tasklet on CPU where it was received.\n", __FUNCTION__);
682 __do_lit_tasklet(tasklet, 0ul);
683 }
684 else if (runNow /*&& !runLocal */) { // runLocal == 0 is implied
685 TRACE("%s: Triggering CPU %d to run tasklet.\n", __FUNCTION__, targetCPU->cpu);
686 preempt(targetCPU); // need to be protected by cedf_lock?
687 }
688 else {
689 TRACE("%s: Scheduling of tasklet was deferred.\n", __FUNCTION__);
690 }
691
692 return(1); // success
693}
694
695#endif // end PAI
696
697
399/* Getting schedule() right is a bit tricky. schedule() may not make any 698/* Getting schedule() right is a bit tricky. schedule() may not make any
400 * assumptions on the state of the current task since it may be called for a 699 * assumptions on the state of the current task since it may be called for a
401 * number of reasons. The reasons include a scheduler_tick() determined that it 700 * number of reasons. The reasons include a scheduler_tick() determined that it
@@ -454,17 +753,19 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
454 TRACE_TASK(prev, "invoked gsnedf_schedule.\n"); 753 TRACE_TASK(prev, "invoked gsnedf_schedule.\n");
455#endif 754#endif
456 755
756 /*
457 if (exists) 757 if (exists)
458 TRACE_TASK(prev, 758 TRACE_TASK(prev,
459 "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d " 759 "blocks:%d out_of_time:%d np:%d sleep:%d preempt:%d "
460 "state:%d sig:%d\n", 760 "state:%d sig:%d\n",
461 blocks, out_of_time, np, sleep, preempt, 761 blocks, out_of_time, np, sleep, preempt,
462 prev->state, signal_pending(prev)); 762 prev->state, signal_pending(prev));
763 */
764
463 if (entry->linked && preempt) 765 if (entry->linked && preempt)
464 TRACE_TASK(prev, "will be preempted by %s/%d\n", 766 TRACE_TASK(prev, "will be preempted by %s/%d\n",
465 entry->linked->comm, entry->linked->pid); 767 entry->linked->comm, entry->linked->pid);
466 768
467
468 /* If a task blocks we have no choice but to reschedule. 769 /* If a task blocks we have no choice but to reschedule.
469 */ 770 */
470 if (blocks) 771 if (blocks)
@@ -509,12 +810,15 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
509 entry->scheduled->rt_param.scheduled_on = NO_CPU; 810 entry->scheduled->rt_param.scheduled_on = NO_CPU;
510 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n"); 811 TRACE_TASK(entry->scheduled, "scheduled_on = NO_CPU\n");
511 } 812 }
512 } else 813 }
814 else
815 {
513 /* Only override Linux scheduler if we have a real-time task 816 /* Only override Linux scheduler if we have a real-time task
514 * scheduled that needs to continue. 817 * scheduled that needs to continue.
515 */ 818 */
516 if (exists) 819 if (exists)
517 next = prev; 820 next = prev;
821 }
518 822
519 sched_state_task_picked(); 823 sched_state_task_picked();
520 824
@@ -539,8 +843,9 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev)
539static void gsnedf_finish_switch(struct task_struct *prev) 843static void gsnedf_finish_switch(struct task_struct *prev)
540{ 844{
541 cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries); 845 cpu_entry_t* entry = &__get_cpu_var(gsnedf_cpu_entries);
542 846
543 entry->scheduled = is_realtime(current) ? current : NULL; 847 entry->scheduled = is_realtime(current) ? current : NULL;
848
544#ifdef WANT_ALL_SCHED_EVENTS 849#ifdef WANT_ALL_SCHED_EVENTS
545 TRACE_TASK(prev, "switched away from\n"); 850 TRACE_TASK(prev, "switched away from\n");
546#endif 851#endif
@@ -589,11 +894,14 @@ static void gsnedf_task_new(struct task_struct * t, int on_rq, int running)
589static void gsnedf_task_wake_up(struct task_struct *task) 894static void gsnedf_task_wake_up(struct task_struct *task)
590{ 895{
591 unsigned long flags; 896 unsigned long flags;
592 lt_t now; 897 //lt_t now;
593 898
594 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock()); 899 TRACE_TASK(task, "wake_up at %llu\n", litmus_clock());
595 900
596 raw_spin_lock_irqsave(&gsnedf_lock, flags); 901 raw_spin_lock_irqsave(&gsnedf_lock, flags);
902
903
904#if 0 // sporadic task model
597 /* We need to take suspensions because of semaphores into 905 /* We need to take suspensions because of semaphores into
598 * account! If a job resumes after being suspended due to acquiring 906 * account! If a job resumes after being suspended due to acquiring
599 * a semaphore, it should never be treated as a new job release. 907 * a semaphore, it should never be treated as a new job release.
@@ -615,19 +923,26 @@ static void gsnedf_task_wake_up(struct task_struct *task)
615 } 923 }
616 } 924 }
617 } 925 }
926#else // periodic task model
927 set_rt_flags(task, RT_F_RUNNING);
928#endif
929
618 gsnedf_job_arrival(task); 930 gsnedf_job_arrival(task);
619 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 931 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
620} 932}
621 933
622static void gsnedf_task_block(struct task_struct *t) 934static void gsnedf_task_block(struct task_struct *t)
623{ 935{
936 // TODO: is this called on preemption??
624 unsigned long flags; 937 unsigned long flags;
625 938
626 TRACE_TASK(t, "block at %llu\n", litmus_clock()); 939 TRACE_TASK(t, "block at %llu\n", litmus_clock());
627 940
628 /* unlink if necessary */ 941 /* unlink if necessary */
629 raw_spin_lock_irqsave(&gsnedf_lock, flags); 942 raw_spin_lock_irqsave(&gsnedf_lock, flags);
943
630 unlink(t); 944 unlink(t);
945
631 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 946 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
632 947
633 BUG_ON(!is_realtime(t)); 948 BUG_ON(!is_realtime(t));
@@ -638,6 +953,10 @@ static void gsnedf_task_exit(struct task_struct * t)
638{ 953{
639 unsigned long flags; 954 unsigned long flags;
640 955
956#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
957 flush_tasklets(t);
958#endif
959
641 /* unlink if necessary */ 960 /* unlink if necessary */
642 raw_spin_lock_irqsave(&gsnedf_lock, flags); 961 raw_spin_lock_irqsave(&gsnedf_lock, flags);
643 unlink(t); 962 unlink(t);
@@ -646,7 +965,7 @@ static void gsnedf_task_exit(struct task_struct * t)
646 tsk_rt(t)->scheduled_on = NO_CPU; 965 tsk_rt(t)->scheduled_on = NO_CPU;
647 } 966 }
648 raw_spin_unlock_irqrestore(&gsnedf_lock, flags); 967 raw_spin_unlock_irqrestore(&gsnedf_lock, flags);
649 968
650 BUG_ON(!is_realtime(t)); 969 BUG_ON(!is_realtime(t));
651 TRACE_TASK(t, "RIP\n"); 970 TRACE_TASK(t, "RIP\n");
652} 971}
@@ -672,14 +991,12 @@ static long gsnedf_admit_task(struct task_struct* tsk)
672#include <litmus/fdso.h> 991#include <litmus/fdso.h>
673 992
674/* called with IRQs off */ 993/* called with IRQs off */
675static void increase_priority_inheritance(struct task_struct* t, 994static void __increase_priority_inheritance(struct task_struct* t,
676 struct task_struct* prio_inh) 995 struct task_struct* prio_inh)
677{ 996{
678 int linked_on; 997 int linked_on;
679 int check_preempt = 0; 998 int check_preempt = 0;
680 999
681 raw_spin_lock(&gsnedf_lock);
682
683#ifdef CONFIG_LITMUS_NESTED_LOCKING 1000#ifdef CONFIG_LITMUS_NESTED_LOCKING
684 /* this sanity check allows for weaker locking in protocols */ 1001 /* this sanity check allows for weaker locking in protocols */
685 if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) { 1002 if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) {
@@ -750,16 +1067,33 @@ static void increase_priority_inheritance(struct task_struct* t,
750 prio_inh->comm, prio_inh->pid); 1067 prio_inh->comm, prio_inh->pid);
751 } 1068 }
752#endif 1069#endif
753
754 raw_spin_unlock(&gsnedf_lock);
755} 1070}
756 1071
757/* called with IRQs off */ 1072/* called with IRQs off */
758static void decrease_priority_inheritance(struct task_struct* t, 1073static void increase_priority_inheritance(struct task_struct* t, struct task_struct* prio_inh)
759 struct task_struct* prio_inh)
760{ 1074{
761 raw_spin_lock(&gsnedf_lock); 1075 raw_spin_lock(&gsnedf_lock);
762 1076
1077 __increase_priority_inheritance(t, prio_inh);
1078
1079#ifdef CONFIG_LITMUS_SOFTIRQD
1080 if(tsk_rt(t)->cur_klitirqd != NULL)
1081 {
1082 TRACE_TASK(t, "%s/%d inherits a new priority!\n",
1083 tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid);
1084
1085 __increase_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh);
1086 }
1087#endif
1088
1089 raw_spin_unlock(&gsnedf_lock);
1090}
1091
1092
1093/* called with IRQs off */
1094static void __decrease_priority_inheritance(struct task_struct* t,
1095 struct task_struct* prio_inh)
1096{
763#ifdef CONFIG_LITMUS_NESTED_LOCKING 1097#ifdef CONFIG_LITMUS_NESTED_LOCKING
764 if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) { 1098 if(__edf_higher_prio(t, EFFECTIVE, prio_inh, BASE)) {
765#endif 1099#endif
@@ -808,9 +1142,81 @@ static void decrease_priority_inheritance(struct task_struct* t,
808 (prio_inh) ? prio_inh->pid : -1); 1142 (prio_inh) ? prio_inh->pid : -1);
809 } 1143 }
810#endif 1144#endif
1145}
1146
1147static void decrease_priority_inheritance(struct task_struct* t,
1148 struct task_struct* prio_inh)
1149{
1150 raw_spin_lock(&gsnedf_lock);
1151 __decrease_priority_inheritance(t, prio_inh);
1152
1153#ifdef CONFIG_LITMUS_SOFTIRQD
1154 if(tsk_rt(t)->cur_klitirqd != NULL)
1155 {
1156 TRACE_TASK(t, "%s/%d decreases in priority!\n",
1157 tsk_rt(t)->cur_klitirqd->comm, tsk_rt(t)->cur_klitirqd->pid);
1158
1159 __decrease_priority_inheritance(tsk_rt(t)->cur_klitirqd, prio_inh);
1160 }
1161#endif
1162
1163 raw_spin_unlock(&gsnedf_lock);
1164}
811 1165
1166
1167#ifdef CONFIG_LITMUS_SOFTIRQD
1168/* called with IRQs off */
1169static void increase_priority_inheritance_klitirqd(struct task_struct* klitirqd,
1170 struct task_struct* old_owner,
1171 struct task_struct* new_owner)
1172{
1173 BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread));
1174
1175 raw_spin_lock(&gsnedf_lock);
1176
1177 if(old_owner != new_owner)
1178 {
1179 if(old_owner)
1180 {
1181 // unreachable?
1182 tsk_rt(old_owner)->cur_klitirqd = NULL;
1183 }
1184
1185 TRACE_TASK(klitirqd, "giving ownership to %s/%d.\n",
1186 new_owner->comm, new_owner->pid);
1187
1188 tsk_rt(new_owner)->cur_klitirqd = klitirqd;
1189 }
1190
1191 __decrease_priority_inheritance(klitirqd, NULL); // kludge to clear out cur prio.
1192
1193 __increase_priority_inheritance(klitirqd,
1194 (tsk_rt(new_owner)->inh_task == NULL) ?
1195 new_owner :
1196 tsk_rt(new_owner)->inh_task);
1197
1198 raw_spin_unlock(&gsnedf_lock);
1199}
1200
1201
1202/* called with IRQs off */
1203static void decrease_priority_inheritance_klitirqd(struct task_struct* klitirqd,
1204 struct task_struct* old_owner,
1205 struct task_struct* new_owner)
1206{
1207 BUG_ON(!(tsk_rt(klitirqd)->is_proxy_thread));
1208
1209 raw_spin_lock(&gsnedf_lock);
1210
1211 TRACE_TASK(klitirqd, "priority restored\n");
1212
1213 __decrease_priority_inheritance(klitirqd, new_owner);
1214
1215 tsk_rt(old_owner)->cur_klitirqd = NULL;
1216
812 raw_spin_unlock(&gsnedf_lock); 1217 raw_spin_unlock(&gsnedf_lock);
813} 1218}
1219#endif
814 1220
815 1221
816 1222
@@ -940,6 +1346,28 @@ static struct litmus_lock* gsnedf_new_ikglp(void* __user arg)
940#endif /* CONFIG_LITMUS_NESTED_LOCKING */ 1346#endif /* CONFIG_LITMUS_NESTED_LOCKING */
941 1347
942 1348
1349/* ******************** KFMLP support ********************** */
1350
1351static struct litmus_lock_ops gsnedf_kfmlp_lock_ops = {
1352 .lock = kfmlp_lock,
1353 .unlock = kfmlp_unlock,
1354 .close = kfmlp_close,
1355 .deallocate = kfmlp_free,
1356
1357 // kfmlp can only be an outer-most lock.
1358 .propagate_increase_inheritance = NULL,
1359 .propagate_decrease_inheritance = NULL,
1360};
1361
1362
1363static struct litmus_lock* gsnedf_new_kfmlp(void* __user arg)
1364{
1365 return kfmlp_new(&gsnedf_kfmlp_lock_ops, arg);
1366}
1367
1368
1369
1370
943/* ******************** FMLP support ********************** */ 1371/* ******************** FMLP support ********************** */
944 1372
945/* struct for semaphore with priority inheritance */ 1373/* struct for semaphore with priority inheritance */
@@ -1145,8 +1573,6 @@ static struct litmus_lock* gsnedf_new_fmlp(void)
1145 return &sem->litmus_lock; 1573 return &sem->litmus_lock;
1146} 1574}
1147 1575
1148/* **** lock constructor **** */
1149
1150 1576
1151static long gsnedf_allocate_lock(struct litmus_lock **lock, int type, 1577static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
1152 void* __user args) 1578 void* __user args)
@@ -1160,7 +1586,6 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
1160 /* Flexible Multiprocessor Locking Protocol */ 1586 /* Flexible Multiprocessor Locking Protocol */
1161 *lock = gsnedf_new_fmlp(); 1587 *lock = gsnedf_new_fmlp();
1162 break; 1588 break;
1163
1164#ifdef CONFIG_LITMUS_NESTED_LOCKING 1589#ifdef CONFIG_LITMUS_NESTED_LOCKING
1165 case RSM_MUTEX: 1590 case RSM_MUTEX:
1166 *lock = gsnedf_new_rsm_mutex(); 1591 *lock = gsnedf_new_rsm_mutex();
@@ -1170,7 +1595,9 @@ static long gsnedf_allocate_lock(struct litmus_lock **lock, int type,
1170 *lock = gsnedf_new_ikglp(args); 1595 *lock = gsnedf_new_ikglp(args);
1171 break; 1596 break;
1172#endif 1597#endif
1173 1598 case KFMLP_SEM:
1599 *lock = gsnedf_new_kfmlp(args);
1600 break;
1174 default: 1601 default:
1175 err = -ENXIO; 1602 err = -ENXIO;
1176 goto UNSUPPORTED_LOCK; 1603 goto UNSUPPORTED_LOCK;
@@ -1187,7 +1614,6 @@ UNSUPPORTED_LOCK:
1187 1614
1188#endif 1615#endif
1189 1616
1190
1191static long gsnedf_activate_plugin(void) 1617static long gsnedf_activate_plugin(void)
1192{ 1618{
1193 int cpu; 1619 int cpu;
@@ -1214,6 +1640,20 @@ static long gsnedf_activate_plugin(void)
1214 } 1640 }
1215#endif 1641#endif
1216 } 1642 }
1643
1644#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1645 gsnedf_pending_tasklets.head = NULL;
1646 gsnedf_pending_tasklets.tail = &(gsnedf_pending_tasklets.head);
1647#endif
1648
1649#ifdef CONFIG_LITMUS_SOFTIRQD
1650 spawn_klitirqd(NULL);
1651#endif
1652
1653#ifdef CONFIG_LITMUS_NVIDIA
1654 init_nvidia_info();
1655#endif
1656
1217 return 0; 1657 return 0;
1218} 1658}
1219 1659
@@ -1242,6 +1682,14 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = {
1242#ifdef CONFIG_LITMUS_DGL_SUPPORT 1682#ifdef CONFIG_LITMUS_DGL_SUPPORT
1243 .get_dgl_spinlock = gsnedf_get_dgl_spinlock, 1683 .get_dgl_spinlock = gsnedf_get_dgl_spinlock,
1244#endif 1684#endif
1685#ifdef CONFIG_LITMUS_SOFTIRQD
1686 .increase_prio_klitirqd = increase_priority_inheritance_klitirqd,
1687 .decrease_prio_klitirqd = decrease_priority_inheritance_klitirqd,
1688#endif
1689#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
1690 .enqueue_pai_tasklet = enqueue_pai_tasklet,
1691 .run_tasklets = run_tasklets,
1692#endif
1245}; 1693};
1246 1694
1247 1695
diff --git a/litmus/sched_litmus.c b/litmus/sched_litmus.c
index 5a15ce938984..9a6fe487718e 100644
--- a/litmus/sched_litmus.c
+++ b/litmus/sched_litmus.c
@@ -103,7 +103,9 @@ litmus_schedule(struct rq *rq, struct task_struct *prev)
103 } 103 }
104#ifdef __ARCH_WANT_UNLOCKED_CTXSW 104#ifdef __ARCH_WANT_UNLOCKED_CTXSW
105 if (next->oncpu) 105 if (next->oncpu)
106 {
106 TRACE_TASK(next, "waiting for !oncpu"); 107 TRACE_TASK(next, "waiting for !oncpu");
108 }
107 while (next->oncpu) { 109 while (next->oncpu) {
108 cpu_relax(); 110 cpu_relax();
109 mb(); 111 mb();
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c
index 20c67ff4fdce..75694350a9ad 100644
--- a/litmus/sched_plugin.c
+++ b/litmus/sched_plugin.c
@@ -126,6 +126,32 @@ static void litmus_dummy_decrease_prio(struct task_struct* t, struct task_struct
126} 126}
127#endif 127#endif
128 128
129#ifdef CONFIG_LITMUS_SOFTIRQD
130static void litmus_dummy_increase_prio_klitirq(struct task_struct* klitirqd,
131 struct task_struct* old_owner,
132 struct task_struct* new_owner)
133{
134}
135
136static void litmus_dummy_decrease_prio_klitirqd(struct task_struct* klitirqd,
137 struct task_struct* old_owner)
138{
139}
140#endif
141
142#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
143static int litmus_dummy_enqueue_pai_tasklet(struct tasklet_struct* t)
144{
145 TRACE("%s: PAI Tasklet unsupported in this plugin!!!!!!\n", __FUNCTION__);
146 return(0); // failure.
147}
148
149static void litmus_dummy_run_tasklets(struct task_struct* t)
150{
151 //TRACE("%s: PAI Tasklet unsupported in this plugin!!!!!!\n", __FUNCTION__);
152}
153#endif
154
129#ifdef CONFIG_LITMUS_NESTED_LOCKING 155#ifdef CONFIG_LITMUS_NESTED_LOCKING
130static void litmus_dummy_nested_increase_prio(struct task_struct* t, struct task_struct* prio_inh, 156static void litmus_dummy_nested_increase_prio(struct task_struct* t, struct task_struct* prio_inh,
131 raw_spinlock_t *to_unlock, unsigned long irqflags) 157 raw_spinlock_t *to_unlock, unsigned long irqflags)
@@ -141,7 +167,6 @@ static void litmus_dummy_nested_decrease_prio(struct task_struct* t, struct task
141#ifdef CONFIG_LITMUS_DGL_SUPPORT 167#ifdef CONFIG_LITMUS_DGL_SUPPORT
142static raw_spinlock_t* litmus_dummy_get_dgl_spinlock(struct task_struct *t) 168static raw_spinlock_t* litmus_dummy_get_dgl_spinlock(struct task_struct *t)
143{ 169{
144 BUG();
145 return NULL; 170 return NULL;
146} 171}
147#endif 172#endif
@@ -172,6 +197,14 @@ struct sched_plugin linux_sched_plugin = {
172 .nested_increase_prio = litmus_dummy_nested_increase_prio, 197 .nested_increase_prio = litmus_dummy_nested_increase_prio,
173 .nested_decrease_prio = litmus_dummy_nested_decrease_prio, 198 .nested_decrease_prio = litmus_dummy_nested_decrease_prio,
174#endif 199#endif
200#ifdef CONFIG_LITMUS_SOFTIRQD
201 .increase_prio_klitirqd = litmus_dummy_increase_prio_klitirqd,
202 .decrease_prio_klitirqd = litmus_dummy_decrease_prio_klitirqd,
203#endif
204#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
205 .enqueue_pai_tasklet = litmus_dummy_enqueue_pai_tasklet,
206 .run_tasklets = litmus_dummy_run_tasklets,
207#endif
175#ifdef CONFIG_LITMUS_DGL_SUPPORT 208#ifdef CONFIG_LITMUS_DGL_SUPPORT
176 .get_dgl_spinlock = litmus_dummy_get_dgl_spinlock, 209 .get_dgl_spinlock = litmus_dummy_get_dgl_spinlock,
177#endif 210#endif
@@ -219,6 +252,14 @@ int register_sched_plugin(struct sched_plugin* plugin)
219 CHECK(nested_increase_prio); 252 CHECK(nested_increase_prio);
220 CHECK(nested_decrease_prio); 253 CHECK(nested_decrease_prio);
221#endif 254#endif
255#ifdef CONFIG_LITMUS_SOFTIRQD
256 CHECK(increase_prio_klitirqd);
257 CHECK(decrease_prio_klitirqd);
258#endif
259#ifdef CONFIG_LITMUS_PAI_SOFTIRQD
260 CHECK(enqueue_pai_tasklet);
261 CHECK(run_tasklets);
262#endif
222#ifdef CONFIG_LITMUS_DGL_SUPPORT 263#ifdef CONFIG_LITMUS_DGL_SUPPORT
223 CHECK(get_dgl_spinlock); 264 CHECK(get_dgl_spinlock);
224#endif 265#endif
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
index 5ef8d09ab41f..d079df2b292a 100644
--- a/litmus/sched_task_trace.c
+++ b/litmus/sched_task_trace.c
@@ -7,6 +7,7 @@
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <linux/percpu.h> 9#include <linux/percpu.h>
10#include <linux/hardirq.h>
10 11
11#include <litmus/ftdev.h> 12#include <litmus/ftdev.h>
12#include <litmus/litmus.h> 13#include <litmus/litmus.h>
@@ -16,13 +17,13 @@
16#include <litmus/ftdev.h> 17#include <litmus/ftdev.h>
17 18
18 19
19#define NO_EVENTS (1 << CONFIG_SCHED_TASK_TRACE_SHIFT) 20#define NUM_EVENTS (1 << (CONFIG_SCHED_TASK_TRACE_SHIFT+11))
20 21
21#define now() litmus_clock() 22#define now() litmus_clock()
22 23
23struct local_buffer { 24struct local_buffer {
24 struct st_event_record record[NO_EVENTS]; 25 struct st_event_record record[NUM_EVENTS];
25 char flag[NO_EVENTS]; 26 char flag[NUM_EVENTS];
26 struct ft_buffer ftbuf; 27 struct ft_buffer ftbuf;
27}; 28};
28 29
@@ -41,7 +42,7 @@ static int __init init_sched_task_trace(void)
41 int i, ok = 0, err; 42 int i, ok = 0, err;
42 printk("Allocated %u sched_trace_xxx() events per CPU " 43 printk("Allocated %u sched_trace_xxx() events per CPU "
43 "(buffer size: %d bytes)\n", 44 "(buffer size: %d bytes)\n",
44 NO_EVENTS, (int) sizeof(struct local_buffer)); 45 NUM_EVENTS, (int) sizeof(struct local_buffer));
45 46
46 err = ftdev_init(&st_dev, THIS_MODULE, 47 err = ftdev_init(&st_dev, THIS_MODULE,
47 num_online_cpus(), "sched_trace"); 48 num_online_cpus(), "sched_trace");
@@ -50,7 +51,7 @@ static int __init init_sched_task_trace(void)
50 51
51 for (i = 0; i < st_dev.minor_cnt; i++) { 52 for (i = 0; i < st_dev.minor_cnt; i++) {
52 buf = &per_cpu(st_event_buffer, i); 53 buf = &per_cpu(st_event_buffer, i);
53 ok += init_ft_buffer(&buf->ftbuf, NO_EVENTS, 54 ok += init_ft_buffer(&buf->ftbuf, NUM_EVENTS,
54 sizeof(struct st_event_record), 55 sizeof(struct st_event_record),
55 buf->flag, 56 buf->flag,
56 buf->record); 57 buf->record);
@@ -154,7 +155,8 @@ feather_callback void do_sched_trace_task_switch_to(unsigned long id,
154{ 155{
155 struct task_struct *t = (struct task_struct*) _task; 156 struct task_struct *t = (struct task_struct*) _task;
156 struct st_event_record* rec; 157 struct st_event_record* rec;
157 if (is_realtime(t)) { 158 //if (is_realtime(t)) /* comment out to trace EVERYTHING */
159 {
158 rec = get_record(ST_SWITCH_TO, t); 160 rec = get_record(ST_SWITCH_TO, t);
159 if (rec) { 161 if (rec) {
160 rec->data.switch_to.when = now(); 162 rec->data.switch_to.when = now();
@@ -169,7 +171,8 @@ feather_callback void do_sched_trace_task_switch_away(unsigned long id,
169{ 171{
170 struct task_struct *t = (struct task_struct*) _task; 172 struct task_struct *t = (struct task_struct*) _task;
171 struct st_event_record* rec; 173 struct st_event_record* rec;
172 if (is_realtime(t)) { 174 //if (is_realtime(t)) /* comment out to trace EVERYTHING */
175 {
173 rec = get_record(ST_SWITCH_AWAY, t); 176 rec = get_record(ST_SWITCH_AWAY, t);
174 if (rec) { 177 if (rec) {
175 rec->data.switch_away.when = now(); 178 rec->data.switch_away.when = now();
@@ -188,6 +191,9 @@ feather_callback void do_sched_trace_task_completion(unsigned long id,
188 if (rec) { 191 if (rec) {
189 rec->data.completion.when = now(); 192 rec->data.completion.when = now();
190 rec->data.completion.forced = forced; 193 rec->data.completion.forced = forced;
194#ifdef LITMUS_NVIDIA
195 rec->data.completion.nv_int_count = (u16)atomic_read(&tsk_rt(t)->nv_int_count);
196#endif
191 put_record(rec); 197 put_record(rec);
192 } 198 }
193} 199}
@@ -239,3 +245,215 @@ feather_callback void do_sched_trace_action(unsigned long id,
239 put_record(rec); 245 put_record(rec);
240 } 246 }
241} 247}
248
249
250feather_callback void do_sched_trace_tasklet_release(unsigned long id,
251 unsigned long _owner)
252{
253 struct task_struct *t = (struct task_struct*) _owner;
254 struct st_event_record *rec = get_record(ST_TASKLET_RELEASE, t);
255
256 if (rec) {
257 rec->data.tasklet_release.when = now();
258 put_record(rec);
259 }
260}
261
262
263feather_callback void do_sched_trace_tasklet_begin(unsigned long id,
264 unsigned long _owner)
265{
266 struct task_struct *t = (struct task_struct*) _owner;
267 struct st_event_record *rec = get_record(ST_TASKLET_BEGIN, t);
268
269 if (rec) {
270 rec->data.tasklet_begin.when = now();
271
272 if(!in_interrupt())
273 rec->data.tasklet_begin.exe_pid = current->pid;
274 else
275 rec->data.tasklet_begin.exe_pid = 0;
276
277 put_record(rec);
278 }
279}
280EXPORT_SYMBOL(do_sched_trace_tasklet_begin);
281
282
283feather_callback void do_sched_trace_tasklet_end(unsigned long id,
284 unsigned long _owner,
285 unsigned long _flushed)
286{
287 struct task_struct *t = (struct task_struct*) _owner;
288 struct st_event_record *rec = get_record(ST_TASKLET_END, t);
289
290 if (rec) {
291 rec->data.tasklet_end.when = now();
292 rec->data.tasklet_end.flushed = _flushed;
293
294 if(!in_interrupt())
295 rec->data.tasklet_end.exe_pid = current->pid;
296 else
297 rec->data.tasklet_end.exe_pid = 0;
298
299 put_record(rec);
300 }
301}
302EXPORT_SYMBOL(do_sched_trace_tasklet_end);
303
304
305feather_callback void do_sched_trace_work_release(unsigned long id,
306 unsigned long _owner)
307{
308 struct task_struct *t = (struct task_struct*) _owner;
309 struct st_event_record *rec = get_record(ST_WORK_RELEASE, t);
310
311 if (rec) {
312 rec->data.work_release.when = now();
313 put_record(rec);
314 }
315}
316
317
318feather_callback void do_sched_trace_work_begin(unsigned long id,
319 unsigned long _owner,
320 unsigned long _exe)
321{
322 struct task_struct *t = (struct task_struct*) _owner;
323 struct st_event_record *rec = get_record(ST_WORK_BEGIN, t);
324
325 if (rec) {
326 struct task_struct *exe = (struct task_struct*) _exe;
327 rec->data.work_begin.exe_pid = exe->pid;
328 rec->data.work_begin.when = now();
329 put_record(rec);
330 }
331}
332EXPORT_SYMBOL(do_sched_trace_work_begin);
333
334
335feather_callback void do_sched_trace_work_end(unsigned long id,
336 unsigned long _owner,
337 unsigned long _exe,
338 unsigned long _flushed)
339{
340 struct task_struct *t = (struct task_struct*) _owner;
341 struct st_event_record *rec = get_record(ST_WORK_END, t);
342
343 if (rec) {
344 struct task_struct *exe = (struct task_struct*) _exe;
345 rec->data.work_end.exe_pid = exe->pid;
346 rec->data.work_end.flushed = _flushed;
347 rec->data.work_end.when = now();
348 put_record(rec);
349 }
350}
351EXPORT_SYMBOL(do_sched_trace_work_end);
352
353
354feather_callback void do_sched_trace_eff_prio_change(unsigned long id,
355 unsigned long _task,
356 unsigned long _inh)
357{
358 struct task_struct *t = (struct task_struct*) _task;
359 struct st_event_record *rec = get_record(ST_EFF_PRIO_CHANGE, t);
360
361 if (rec) {
362 struct task_struct *inh = (struct task_struct*) _inh;
363 rec->data.effective_priority_change.when = now();
364 rec->data.effective_priority_change.inh_pid = (inh != NULL) ?
365 inh->pid :
366 0xffff;
367
368 put_record(rec);
369 }
370}
371
372/* pray for no nesting of nv interrupts on same CPU... */
373struct tracing_interrupt_map
374{
375 int active;
376 int count;
377 unsigned long data[128]; // assume nesting less than 128...
378 unsigned long serial[128];
379};
380DEFINE_PER_CPU(struct tracing_interrupt_map, active_interrupt_tracing);
381
382
383DEFINE_PER_CPU(u32, intCounter);
384
385feather_callback void do_sched_trace_nv_interrupt_begin(unsigned long id,
386 unsigned long _device)
387{
388 struct st_event_record *rec;
389 u32 serialNum;
390
391 {
392 u32* serial;
393 struct tracing_interrupt_map* int_map = &per_cpu(active_interrupt_tracing, smp_processor_id());
394 if(!int_map->active == 0xcafebabe)
395 {
396 int_map->count++;
397 }
398 else
399 {
400 int_map->active = 0xcafebabe;
401 int_map->count = 1;
402 }
403 //int_map->data[int_map->count-1] = _device;
404
405 serial = &per_cpu(intCounter, smp_processor_id());
406 *serial += num_online_cpus();
407 serialNum = *serial;
408 int_map->serial[int_map->count-1] = serialNum;
409 }
410
411 rec = get_record(ST_NV_INTERRUPT_BEGIN, NULL);
412 if(rec) {
413 u32 device = _device;
414 rec->data.nv_interrupt_begin.when = now();
415 rec->data.nv_interrupt_begin.device = device;
416 rec->data.nv_interrupt_begin.serialNumber = serialNum;
417 put_record(rec);
418 }
419}
420EXPORT_SYMBOL(do_sched_trace_nv_interrupt_begin);
421
422/*
423int is_interrupt_tracing_active(void)
424{
425 struct tracing_interrupt_map* int_map = &per_cpu(active_interrupt_tracing, smp_processor_id());
426 if(int_map->active == 0xcafebabe)
427 return 1;
428 return 0;
429}
430*/
431
432feather_callback void do_sched_trace_nv_interrupt_end(unsigned long id, unsigned long _device)
433{
434 struct tracing_interrupt_map* int_map = &per_cpu(active_interrupt_tracing, smp_processor_id());
435 if(int_map->active == 0xcafebabe)
436 {
437 struct st_event_record *rec = get_record(ST_NV_INTERRUPT_END, NULL);
438
439 int_map->count--;
440 if(int_map->count == 0)
441 int_map->active = 0;
442
443 if(rec) {
444 u32 device = _device;
445 rec->data.nv_interrupt_end.when = now();
446 //rec->data.nv_interrupt_end.device = int_map->data[int_map->count];
447 rec->data.nv_interrupt_end.device = device;
448 rec->data.nv_interrupt_end.serialNumber = int_map->serial[int_map->count];
449 put_record(rec);
450 }
451 }
452}
453EXPORT_SYMBOL(do_sched_trace_nv_interrupt_end);
454
455
456
457
458
459
diff --git a/litmus/sched_trace_external.c b/litmus/sched_trace_external.c
new file mode 100644
index 000000000000..cf8e1d78aa77
--- /dev/null
+++ b/litmus/sched_trace_external.c
@@ -0,0 +1,64 @@
1#include <linux/module.h>
2
3#include <litmus/trace.h>
4#include <litmus/sched_trace.h>
5#include <litmus/litmus.h>
6
7void __sched_trace_tasklet_begin_external(struct task_struct* t)
8{
9 sched_trace_tasklet_begin(t);
10}
11EXPORT_SYMBOL(__sched_trace_tasklet_begin_external);
12
13void __sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed)
14{
15 sched_trace_tasklet_end(t, flushed);
16}
17EXPORT_SYMBOL(__sched_trace_tasklet_end_external);
18
19
20
21void __sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e)
22{
23 sched_trace_work_begin(t, e);
24}
25EXPORT_SYMBOL(__sched_trace_work_begin_external);
26
27void __sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f)
28{
29 sched_trace_work_end(t, e, f);
30}
31EXPORT_SYMBOL(__sched_trace_work_end_external);
32
33
34
35void __sched_trace_nv_interrupt_begin_external(u32 device)
36{
37 //unsigned long _device = device;
38 sched_trace_nv_interrupt_begin((unsigned long)device);
39}
40EXPORT_SYMBOL(__sched_trace_nv_interrupt_begin_external);
41
42void __sched_trace_nv_interrupt_end_external(u32 device)
43{
44 //unsigned long _device = device;
45 sched_trace_nv_interrupt_end((unsigned long)device);
46}
47EXPORT_SYMBOL(__sched_trace_nv_interrupt_end_external);
48
49
50#ifdef CONFIG_LITMUS_NVIDIA
51
52#define EXX_TS(evt) \
53void __##evt(void) { evt; } \
54EXPORT_SYMBOL(__##evt);
55
56EXX_TS(TS_NV_TOPISR_START)
57EXX_TS(TS_NV_TOPISR_END)
58EXX_TS(TS_NV_BOTISR_START)
59EXX_TS(TS_NV_BOTISR_END)
60EXX_TS(TS_NV_RELEASE_BOTISR_START)
61EXX_TS(TS_NV_RELEASE_BOTISR_END)
62
63#endif
64