diff options
Diffstat (limited to 'include/litmus')
-rw-r--r-- | include/litmus/fdso.h | 6 | ||||
-rw-r--r-- | include/litmus/fifo_common.h | 25 | ||||
-rw-r--r-- | include/litmus/litmus.h | 5 | ||||
-rw-r--r-- | include/litmus/litmus_softirq.h | 199 | ||||
-rw-r--r-- | include/litmus/nvidia_info.h | 38 | ||||
-rw-r--r-- | include/litmus/preempt.h | 1 | ||||
-rw-r--r-- | include/litmus/rm_common.h | 25 | ||||
-rw-r--r-- | include/litmus/rm_srt_common.h | 25 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 44 | ||||
-rw-r--r-- | include/litmus/sched_plugin.h | 33 | ||||
-rw-r--r-- | include/litmus/sched_trace.h | 174 | ||||
-rw-r--r-- | include/litmus/sched_trace_external.h | 78 | ||||
-rw-r--r-- | include/litmus/trace.h | 20 | ||||
-rw-r--r-- | include/litmus/unistd_32.h | 3 | ||||
-rw-r--r-- | include/litmus/unistd_64.h | 5 |
15 files changed, 655 insertions, 26 deletions
diff --git a/include/litmus/fdso.h b/include/litmus/fdso.h index caf2a1e6918c..c740e8fc3e88 100644 --- a/include/litmus/fdso.h +++ b/include/litmus/fdso.h | |||
@@ -18,9 +18,10 @@ typedef enum { | |||
18 | MIN_OBJ_TYPE = 0, | 18 | MIN_OBJ_TYPE = 0, |
19 | 19 | ||
20 | FMLP_SEM = 0, | 20 | FMLP_SEM = 0, |
21 | SRP_SEM = 1, | 21 | KFMLP_SEM = 1, |
22 | SRP_SEM = 2, | ||
22 | 23 | ||
23 | MAX_OBJ_TYPE = 1 | 24 | MAX_OBJ_TYPE = SRP_SEM |
24 | } obj_type_t; | 25 | } obj_type_t; |
25 | 26 | ||
26 | struct inode_obj_id { | 27 | struct inode_obj_id { |
@@ -64,6 +65,7 @@ static inline void* od_lookup(int od, obj_type_t type) | |||
64 | } | 65 | } |
65 | 66 | ||
66 | #define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) | 67 | #define lookup_fmlp_sem(od)((struct pi_semaphore*) od_lookup(od, FMLP_SEM)) |
68 | #define lookup_kfmlp_sem(od)((struct pi_semaphore*) od_lookup(od, KFMLP_SEM)) | ||
67 | #define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) | 69 | #define lookup_srp_sem(od) ((struct srp_semaphore*) od_lookup(od, SRP_SEM)) |
68 | #define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) | 70 | #define lookup_ics(od) ((struct ics*) od_lookup(od, ICS_ID)) |
69 | 71 | ||
diff --git a/include/litmus/fifo_common.h b/include/litmus/fifo_common.h new file mode 100644 index 000000000000..12cfbfea41ee --- /dev/null +++ b/include/litmus/fifo_common.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * EDF common data structures and utility functions shared by all EDF | ||
3 | * based scheduler plugins | ||
4 | */ | ||
5 | |||
6 | /* CLEANUP: Add comments and make it less messy. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #ifndef __UNC_FIFO_COMMON_H__ | ||
11 | #define __UNC_FIFO_COMMON_H__ | ||
12 | |||
13 | #include <litmus/rt_domain.h> | ||
14 | |||
15 | void fifo_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
16 | release_jobs_t release); | ||
17 | |||
18 | int fifo_higher_prio(struct task_struct* first, | ||
19 | struct task_struct* second); | ||
20 | |||
21 | int fifo_ready_order(struct bheap_node* a, struct bheap_node* b); | ||
22 | |||
23 | int fifo_preemption_needed(rt_domain_t* rt, struct task_struct *t); | ||
24 | |||
25 | #endif | ||
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h index 0b071fd359f9..a2e564b885a7 100644 --- a/include/litmus/litmus.h +++ b/include/litmus/litmus.h | |||
@@ -26,6 +26,7 @@ static inline int in_list(struct list_head* list) | |||
26 | ); | 26 | ); |
27 | } | 27 | } |
28 | 28 | ||
29 | |||
29 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); | 30 | struct task_struct* __waitqueue_remove_first(wait_queue_head_t *wq); |
30 | 31 | ||
31 | #define NO_CPU 0xffffffff | 32 | #define NO_CPU 0xffffffff |
@@ -117,7 +118,9 @@ static inline lt_t litmus_clock(void) | |||
117 | #define earlier_release(a, b) (lt_before(\ | 118 | #define earlier_release(a, b) (lt_before(\ |
118 | (a)->rt_param.job_params.release,\ | 119 | (a)->rt_param.job_params.release,\ |
119 | (b)->rt_param.job_params.release)) | 120 | (b)->rt_param.job_params.release)) |
120 | 121 | #define shorter_period(a, b) (lt_before(\ | |
122 | (a)->rt_param.task_params.period,\ | ||
123 | (b)->rt_param.task_params.period)) | ||
121 | void preempt_if_preemptable(struct task_struct* t, int on_cpu); | 124 | void preempt_if_preemptable(struct task_struct* t, int on_cpu); |
122 | 125 | ||
123 | #ifdef CONFIG_LITMUS_LOCKING | 126 | #ifdef CONFIG_LITMUS_LOCKING |
diff --git a/include/litmus/litmus_softirq.h b/include/litmus/litmus_softirq.h new file mode 100644 index 000000000000..34287f3cbb8d --- /dev/null +++ b/include/litmus/litmus_softirq.h | |||
@@ -0,0 +1,199 @@ | |||
1 | #ifndef __LITMUS_SOFTIRQ_H | ||
2 | #define __LITMUS_SOFTIRQ_H | ||
3 | |||
4 | #include <linux/interrupt.h> | ||
5 | #include <linux/workqueue.h> | ||
6 | |||
7 | /* | ||
8 | Threaded tasklet handling for Litmus. Tasklets | ||
9 | are scheduled with the priority of the tasklet's | ||
10 | owner---that is, the RT task on behalf the tasklet | ||
11 | runs. | ||
12 | |||
13 | Tasklets are current scheduled in FIFO order with | ||
14 | NO priority inheritance for "blocked" tasklets. | ||
15 | |||
16 | klitirqd assumes the priority of the owner of the | ||
17 | tasklet when the tasklet is next to execute. | ||
18 | |||
19 | Currently, hi-tasklets are scheduled before | ||
20 | low-tasklets, regardless of priority of low-tasklets. | ||
21 | And likewise, low-tasklets are scheduled before work | ||
22 | queue objects. This priority inversion probably needs | ||
23 | to be fixed, though it is not an issue if our work with | ||
24 | GPUs as GPUs are owned (and associated klitirqds) for | ||
25 | exclusive time periods, thus no inversions can | ||
26 | occur. | ||
27 | */ | ||
28 | |||
29 | |||
30 | |||
31 | #define NR_LITMUS_SOFTIRQD CONFIG_NR_LITMUS_SOFTIRQD | ||
32 | |||
33 | /* Spawns NR_LITMUS_SOFTIRQD klitirqd daemons. | ||
34 | Actual launch of threads is deffered to kworker's | ||
35 | workqueue, so daemons will likely not be immediately | ||
36 | running when this function returns, though the required | ||
37 | data will be initialized. | ||
38 | |||
39 | @affinity_set: an array expressing the processor affinity | ||
40 | for each of the NR_LITMUS_SOFTIRQD daemons. May be set | ||
41 | to NULL for global scheduling. | ||
42 | |||
43 | - Examples - | ||
44 | 8-CPU system with two CPU clusters: | ||
45 | affinity[] = {0, 0, 0, 0, 3, 3, 3, 3} | ||
46 | NOTE: Daemons not actually bound to specified CPU, but rather | ||
47 | cluster in which the CPU resides. | ||
48 | |||
49 | 8-CPU system, partitioned: | ||
50 | affinity[] = {0, 1, 2, 3, 4, 5, 6, 7} | ||
51 | |||
52 | FIXME: change array to a CPU topology or array of cpumasks | ||
53 | |||
54 | */ | ||
55 | void spawn_klitirqd(int* affinity); | ||
56 | |||
57 | |||
58 | /* Raises a flag to tell klitirqds to terminate. | ||
59 | Termination is async, so some threads may be running | ||
60 | after function return. */ | ||
61 | void kill_klitirqd(void); | ||
62 | |||
63 | |||
64 | /* Returns 1 if all NR_LITMUS_SOFTIRQD klitirqs are ready | ||
65 | to handle tasklets. 0, otherwise.*/ | ||
66 | int klitirqd_is_ready(void); | ||
67 | |||
68 | /* Returns 1 if no NR_LITMUS_SOFTIRQD klitirqs are ready | ||
69 | to handle tasklets. 0, otherwise.*/ | ||
70 | int klitirqd_is_dead(void); | ||
71 | |||
72 | /* Flushes all pending work out to the OS for regular | ||
73 | * tasklet/work processing of the specified 'owner' | ||
74 | * | ||
75 | * PRECOND: klitirqd_thread must have a clear entry | ||
76 | * in the GPU registry, otherwise this call will become | ||
77 | * a no-op as work will loop back to the klitirqd_thread. | ||
78 | * | ||
79 | * Pass NULL for owner to flush ALL pending items. | ||
80 | */ | ||
81 | void flush_pending(struct task_struct* klitirqd_thread, | ||
82 | struct task_struct* owner); | ||
83 | |||
84 | struct task_struct* get_klitirqd(unsigned int k_id); | ||
85 | |||
86 | |||
87 | extern int __litmus_tasklet_schedule( | ||
88 | struct tasklet_struct *t, | ||
89 | unsigned int k_id); | ||
90 | |||
91 | /* schedule a tasklet on klitirqd #k_id */ | ||
92 | static inline int litmus_tasklet_schedule( | ||
93 | struct tasklet_struct *t, | ||
94 | unsigned int k_id) | ||
95 | { | ||
96 | int ret = 0; | ||
97 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
98 | ret = __litmus_tasklet_schedule(t, k_id); | ||
99 | return(ret); | ||
100 | } | ||
101 | |||
102 | /* for use by __tasklet_schedule() */ | ||
103 | static inline int _litmus_tasklet_schedule( | ||
104 | struct tasklet_struct *t, | ||
105 | unsigned int k_id) | ||
106 | { | ||
107 | return(__litmus_tasklet_schedule(t, k_id)); | ||
108 | } | ||
109 | |||
110 | |||
111 | |||
112 | |||
113 | extern int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
114 | unsigned int k_id); | ||
115 | |||
116 | /* schedule a hi tasklet on klitirqd #k_id */ | ||
117 | static inline int litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
118 | unsigned int k_id) | ||
119 | { | ||
120 | int ret = 0; | ||
121 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
122 | ret = __litmus_tasklet_hi_schedule(t, k_id); | ||
123 | return(ret); | ||
124 | } | ||
125 | |||
126 | /* for use by __tasklet_hi_schedule() */ | ||
127 | static inline int _litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
128 | unsigned int k_id) | ||
129 | { | ||
130 | return(__litmus_tasklet_hi_schedule(t, k_id)); | ||
131 | } | ||
132 | |||
133 | |||
134 | |||
135 | |||
136 | |||
137 | extern int __litmus_tasklet_hi_schedule_first( | ||
138 | struct tasklet_struct *t, | ||
139 | unsigned int k_id); | ||
140 | |||
141 | /* schedule a hi tasklet on klitirqd #k_id on next go-around */ | ||
142 | /* PRECONDITION: Interrupts must be disabled. */ | ||
143 | static inline int litmus_tasklet_hi_schedule_first( | ||
144 | struct tasklet_struct *t, | ||
145 | unsigned int k_id) | ||
146 | { | ||
147 | int ret = 0; | ||
148 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
149 | ret = __litmus_tasklet_hi_schedule_first(t, k_id); | ||
150 | return(ret); | ||
151 | } | ||
152 | |||
153 | /* for use by __tasklet_hi_schedule_first() */ | ||
154 | static inline int _litmus_tasklet_hi_schedule_first( | ||
155 | struct tasklet_struct *t, | ||
156 | unsigned int k_id) | ||
157 | { | ||
158 | return(__litmus_tasklet_hi_schedule_first(t, k_id)); | ||
159 | } | ||
160 | |||
161 | |||
162 | |||
163 | ////////////// | ||
164 | |||
165 | extern int __litmus_schedule_work( | ||
166 | struct work_struct* w, | ||
167 | unsigned int k_id); | ||
168 | |||
169 | static inline int litmus_schedule_work( | ||
170 | struct work_struct* w, | ||
171 | unsigned int k_id) | ||
172 | { | ||
173 | return(__litmus_schedule_work(w, k_id)); | ||
174 | } | ||
175 | |||
176 | |||
177 | |||
178 | ///////////// mutex operations for client threads. | ||
179 | |||
180 | void down_and_set_stat(struct task_struct* t, | ||
181 | enum klitirqd_sem_status to_set, | ||
182 | struct mutex* sem); | ||
183 | |||
184 | void __down_and_reset_and_set_stat(struct task_struct* t, | ||
185 | enum klitirqd_sem_status to_reset, | ||
186 | enum klitirqd_sem_status to_set, | ||
187 | struct mutex* sem); | ||
188 | |||
189 | void up_and_set_stat(struct task_struct* t, | ||
190 | enum klitirqd_sem_status to_set, | ||
191 | struct mutex* sem); | ||
192 | |||
193 | |||
194 | |||
195 | void release_klitirqd_lock(struct task_struct* t); | ||
196 | |||
197 | int reacquire_klitirqd_lock(struct task_struct* t); | ||
198 | |||
199 | #endif | ||
diff --git a/include/litmus/nvidia_info.h b/include/litmus/nvidia_info.h new file mode 100644 index 000000000000..9e07a27fdee3 --- /dev/null +++ b/include/litmus/nvidia_info.h | |||
@@ -0,0 +1,38 @@ | |||
1 | #ifndef __LITMUS_NVIDIA_H | ||
2 | #define __LITMUS_NVIDIA_H | ||
3 | |||
4 | #include <linux/interrupt.h> | ||
5 | |||
6 | |||
7 | #include <litmus/litmus_softirq.h> | ||
8 | |||
9 | |||
10 | //#define NV_DEVICE_NUM NR_LITMUS_SOFTIRQD | ||
11 | #define NV_DEVICE_NUM CONFIG_NV_DEVICE_NUM | ||
12 | |||
13 | int init_nvidia_info(void); | ||
14 | |||
15 | int is_nvidia_func(void* func_addr); | ||
16 | |||
17 | void dump_nvidia_info(const struct tasklet_struct *t); | ||
18 | |||
19 | |||
20 | // Returns the Nvidia device # associated with provided tasklet and work_struct. | ||
21 | u32 get_tasklet_nv_device_num(const struct tasklet_struct *t); | ||
22 | u32 get_work_nv_device_num(const struct work_struct *t); | ||
23 | |||
24 | |||
25 | int init_nv_device_reg(void); | ||
26 | //int get_nv_device_id(struct task_struct* owner); | ||
27 | |||
28 | |||
29 | int reg_nv_device(int reg_device_id, int register_device); | ||
30 | |||
31 | struct task_struct* get_nv_device_owner(u32 target_device_id); | ||
32 | |||
33 | void lock_nv_registry(u32 reg_device_id, unsigned long* flags); | ||
34 | void unlock_nv_registry(u32 reg_device_id, unsigned long* flags); | ||
35 | |||
36 | void increment_nv_int_count(u32 device); | ||
37 | |||
38 | #endif | ||
diff --git a/include/litmus/preempt.h b/include/litmus/preempt.h index 380b886d78ff..9f2a153ed236 100644 --- a/include/litmus/preempt.h +++ b/include/litmus/preempt.h | |||
@@ -26,6 +26,7 @@ const char* sched_state_name(int s); | |||
26 | (x), #x, __FUNCTION__); \ | 26 | (x), #x, __FUNCTION__); \ |
27 | } while (0); | 27 | } while (0); |
28 | 28 | ||
29 | //#define TRACE_SCHED_STATE_CHANGE(x, y, cpu) /* ignore */ | ||
29 | #define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \ | 30 | #define TRACE_SCHED_STATE_CHANGE(x, y, cpu) \ |
30 | TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \ | 31 | TRACE_STATE("[P%d] 0x%x (%s) -> 0x%x (%s)\n", \ |
31 | cpu, (x), sched_state_name(x), \ | 32 | cpu, (x), sched_state_name(x), \ |
diff --git a/include/litmus/rm_common.h b/include/litmus/rm_common.h new file mode 100644 index 000000000000..5991b0b4e758 --- /dev/null +++ b/include/litmus/rm_common.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * EDF common data structures and utility functions shared by all EDF | ||
3 | * based scheduler plugins | ||
4 | */ | ||
5 | |||
6 | /* CLEANUP: Add comments and make it less messy. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #ifndef __UNC_RM_COMMON_H__ | ||
11 | #define __UNC_RM_COMMON_H__ | ||
12 | |||
13 | #include <litmus/rt_domain.h> | ||
14 | |||
15 | void rm_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
16 | release_jobs_t release); | ||
17 | |||
18 | int rm_higher_prio(struct task_struct* first, | ||
19 | struct task_struct* second); | ||
20 | |||
21 | int rm_ready_order(struct bheap_node* a, struct bheap_node* b); | ||
22 | |||
23 | int rm_preemption_needed(rt_domain_t* rt, struct task_struct *t); | ||
24 | |||
25 | #endif | ||
diff --git a/include/litmus/rm_srt_common.h b/include/litmus/rm_srt_common.h new file mode 100644 index 000000000000..78aa287327a2 --- /dev/null +++ b/include/litmus/rm_srt_common.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * EDF common data structures and utility functions shared by all EDF | ||
3 | * based scheduler plugins | ||
4 | */ | ||
5 | |||
6 | /* CLEANUP: Add comments and make it less messy. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #ifndef __UNC_RM_SRT_COMMON_H__ | ||
11 | #define __UNC_RM_SRT_COMMON_H__ | ||
12 | |||
13 | #include <litmus/rt_domain.h> | ||
14 | |||
15 | void rm_srt_domain_init(rt_domain_t* rt, check_resched_needed_t resched, | ||
16 | release_jobs_t release); | ||
17 | |||
18 | int rm_srt_higher_prio(struct task_struct* first, | ||
19 | struct task_struct* second); | ||
20 | |||
21 | int rm_srt_ready_order(struct bheap_node* a, struct bheap_node* b); | ||
22 | |||
23 | int rm_srt_preemption_needed(rt_domain_t* rt, struct task_struct *t); | ||
24 | |||
25 | #endif | ||
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index d6d799174160..f50af3322c4b 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -75,6 +75,8 @@ struct control_page { | |||
75 | /* don't export internal data structures to user space (liblitmus) */ | 75 | /* don't export internal data structures to user space (liblitmus) */ |
76 | #ifdef __KERNEL__ | 76 | #ifdef __KERNEL__ |
77 | 77 | ||
78 | #include <linux/semaphore.h> | ||
79 | |||
78 | struct _rt_domain; | 80 | struct _rt_domain; |
79 | struct bheap_node; | 81 | struct bheap_node; |
80 | struct release_heap; | 82 | struct release_heap; |
@@ -100,6 +102,14 @@ struct rt_job { | |||
100 | 102 | ||
101 | struct pfair_param; | 103 | struct pfair_param; |
102 | 104 | ||
105 | enum klitirqd_sem_status | ||
106 | { | ||
107 | NEED_TO_REACQUIRE, | ||
108 | REACQUIRING, | ||
109 | NOT_HELD, | ||
110 | HELD | ||
111 | }; | ||
112 | |||
103 | /* RT task parameters for scheduling extensions | 113 | /* RT task parameters for scheduling extensions |
104 | * These parameters are inherited during clone and therefore must | 114 | * These parameters are inherited during clone and therefore must |
105 | * be explicitly set up before the task set is launched. | 115 | * be explicitly set up before the task set is launched. |
@@ -114,6 +124,38 @@ struct rt_param { | |||
114 | /* is the task present? (true if it can be scheduled) */ | 124 | /* is the task present? (true if it can be scheduled) */ |
115 | unsigned int present:1; | 125 | unsigned int present:1; |
116 | 126 | ||
127 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
128 | /* proxy threads have minimum priority by default */ | ||
129 | unsigned int is_proxy_thread:1; | ||
130 | |||
131 | /* pointer to klitirqd currently working on this | ||
132 | task_struct's behalf. only set by the task pointed | ||
133 | to by klitirqd. | ||
134 | |||
135 | ptr only valid if is_proxy_thread == 0 | ||
136 | */ | ||
137 | struct task_struct* cur_klitirqd; | ||
138 | |||
139 | /* Used to implement mutual execution exclusion between | ||
140 | * job and klitirqd execution. Job must always hold | ||
141 | * it's klitirqd_sem to execute. klitirqd instance | ||
142 | * must hold the semaphore before executing on behalf | ||
143 | * of a job. | ||
144 | */ | ||
145 | //struct semaphore klitirqd_sem; | ||
146 | struct mutex klitirqd_sem; | ||
147 | |||
148 | /* status of held klitirqd_sem, even if the held klitirqd_sem is from | ||
149 | another task (only proxy threads do this though). | ||
150 | */ | ||
151 | atomic_t klitirqd_sem_stat; | ||
152 | #endif | ||
153 | |||
154 | #ifdef CONFIG_LITMUS_NVIDIA | ||
155 | /* number of top-half interrupts handled on behalf of current job */ | ||
156 | atomic_t nv_int_count; | ||
157 | #endif | ||
158 | |||
117 | #ifdef CONFIG_LITMUS_LOCKING | 159 | #ifdef CONFIG_LITMUS_LOCKING |
118 | /* Is the task being priority-boosted by a locking protocol? */ | 160 | /* Is the task being priority-boosted by a locking protocol? */ |
119 | unsigned int priority_boosted:1; | 161 | unsigned int priority_boosted:1; |
@@ -134,7 +176,7 @@ struct rt_param { | |||
134 | * an increased task priority. | 176 | * an increased task priority. |
135 | */ | 177 | */ |
136 | struct task_struct* inh_task; | 178 | struct task_struct* inh_task; |
137 | 179 | ||
138 | #ifdef CONFIG_NP_SECTION | 180 | #ifdef CONFIG_NP_SECTION |
139 | /* For the FMLP under PSN-EDF, it is required to make the task | 181 | /* For the FMLP under PSN-EDF, it is required to make the task |
140 | * non-preemptive from kernel space. In order not to interfere with | 182 | * non-preemptive from kernel space. In order not to interfere with |
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index 6e7cabdddae8..8fdf05dd7cd3 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <litmus/locking.h> | 11 | #include <litmus/locking.h> |
12 | #endif | 12 | #endif |
13 | 13 | ||
14 | #include <linux/interrupt.h> | ||
15 | |||
14 | /************************ setup/tear down ********************/ | 16 | /************************ setup/tear down ********************/ |
15 | 17 | ||
16 | typedef long (*activate_plugin_t) (void); | 18 | typedef long (*activate_plugin_t) (void); |
@@ -29,7 +31,6 @@ typedef struct task_struct* (*schedule_t)(struct task_struct * prev); | |||
29 | */ | 31 | */ |
30 | typedef void (*finish_switch_t)(struct task_struct *prev); | 32 | typedef void (*finish_switch_t)(struct task_struct *prev); |
31 | 33 | ||
32 | |||
33 | /********************* task state changes ********************/ | 34 | /********************* task state changes ********************/ |
34 | 35 | ||
35 | /* Called to setup a new real-time task. | 36 | /* Called to setup a new real-time task. |
@@ -58,6 +59,21 @@ typedef void (*task_exit_t) (struct task_struct *); | |||
58 | typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type, | 59 | typedef long (*allocate_lock_t) (struct litmus_lock **lock, int type, |
59 | void* __user config); | 60 | void* __user config); |
60 | 61 | ||
62 | /* Called to change inheritance levels of given task */ | ||
63 | typedef void (*set_prio_inh_t)(struct task_struct* t, | ||
64 | struct task_struct* prio_inh); | ||
65 | typedef void (*clear_prio_inh_t)(struct task_struct* t); | ||
66 | |||
67 | |||
68 | typedef void (*set_prio_inh_klitirq_t)(struct task_struct* klitirqd, | ||
69 | struct task_struct* old_owner, | ||
70 | struct task_struct* new_owner); | ||
71 | typedef void (*clear_prio_inh_klitirqd_t)(struct task_struct* klitirqd, | ||
72 | struct task_struct* old_owner); | ||
73 | |||
74 | |||
75 | typedef int (*enqueue_pai_tasklet_t)(struct tasklet_struct* tasklet); | ||
76 | typedef void (*run_tasklets_t)(struct task_struct* next); | ||
61 | 77 | ||
62 | /********************* sys call backends ********************/ | 78 | /********************* sys call backends ********************/ |
63 | /* This function causes the caller to sleep until the next release */ | 79 | /* This function causes the caller to sleep until the next release */ |
@@ -88,7 +104,7 @@ struct sched_plugin { | |||
88 | /* task state changes */ | 104 | /* task state changes */ |
89 | admit_task_t admit_task; | 105 | admit_task_t admit_task; |
90 | 106 | ||
91 | task_new_t task_new; | 107 | task_new_t task_new; |
92 | task_wake_up_t task_wake_up; | 108 | task_wake_up_t task_wake_up; |
93 | task_block_t task_block; | 109 | task_block_t task_block; |
94 | task_exit_t task_exit; | 110 | task_exit_t task_exit; |
@@ -96,6 +112,19 @@ struct sched_plugin { | |||
96 | #ifdef CONFIG_LITMUS_LOCKING | 112 | #ifdef CONFIG_LITMUS_LOCKING |
97 | /* locking protocols */ | 113 | /* locking protocols */ |
98 | allocate_lock_t allocate_lock; | 114 | allocate_lock_t allocate_lock; |
115 | |||
116 | set_prio_inh_t set_prio_inh; | ||
117 | clear_prio_inh_t clear_prio_inh; | ||
118 | #endif | ||
119 | |||
120 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
121 | set_prio_inh_klitirq_t set_prio_inh_klitirqd; | ||
122 | clear_prio_inh_klitirqd_t clear_prio_inh_klitirqd; | ||
123 | #endif | ||
124 | |||
125 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
126 | enqueue_pai_tasklet_t enqueue_pai_tasklet; | ||
127 | run_tasklets_t run_tasklets; | ||
99 | #endif | 128 | #endif |
100 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | 129 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); |
101 | 130 | ||
diff --git a/include/litmus/sched_trace.h b/include/litmus/sched_trace.h index 7ca34cb13881..232c7588d103 100644 --- a/include/litmus/sched_trace.h +++ b/include/litmus/sched_trace.h | |||
@@ -11,12 +11,12 @@ struct st_trace_header { | |||
11 | u8 cpu; /* On which CPU was it recorded? */ | 11 | u8 cpu; /* On which CPU was it recorded? */ |
12 | u16 pid; /* PID of the task. */ | 12 | u16 pid; /* PID of the task. */ |
13 | u32 job; /* The job sequence number. */ | 13 | u32 job; /* The job sequence number. */ |
14 | }; | 14 | } __attribute__((packed)); |
15 | 15 | ||
16 | #define ST_NAME_LEN 16 | 16 | #define ST_NAME_LEN 16 |
17 | struct st_name_data { | 17 | struct st_name_data { |
18 | char cmd[ST_NAME_LEN];/* The name of the executable of this process. */ | 18 | char cmd[ST_NAME_LEN];/* The name of the executable of this process. */ |
19 | }; | 19 | } __attribute__((packed)); |
20 | 20 | ||
21 | struct st_param_data { /* regular params */ | 21 | struct st_param_data { /* regular params */ |
22 | u32 wcet; | 22 | u32 wcet; |
@@ -25,30 +25,29 @@ struct st_param_data { /* regular params */ | |||
25 | u8 partition; | 25 | u8 partition; |
26 | u8 class; | 26 | u8 class; |
27 | u8 __unused[2]; | 27 | u8 __unused[2]; |
28 | }; | 28 | } __attribute__((packed)); |
29 | 29 | ||
30 | struct st_release_data { /* A job is was/is going to be released. */ | 30 | struct st_release_data { /* A job is was/is going to be released. */ |
31 | u64 release; /* What's the release time? */ | 31 | u64 release; /* What's the release time? */ |
32 | u64 deadline; /* By when must it finish? */ | 32 | u64 deadline; /* By when must it finish? */ |
33 | }; | 33 | } __attribute__((packed)); |
34 | 34 | ||
35 | struct st_assigned_data { /* A job was asigned to a CPU. */ | 35 | struct st_assigned_data { /* A job was asigned to a CPU. */ |
36 | u64 when; | 36 | u64 when; |
37 | u8 target; /* Where should it execute? */ | 37 | u8 target; /* Where should it execute? */ |
38 | u8 __unused[7]; | 38 | u8 __unused[7]; |
39 | }; | 39 | } __attribute__((packed)); |
40 | 40 | ||
41 | struct st_switch_to_data { /* A process was switched to on a given CPU. */ | 41 | struct st_switch_to_data { /* A process was switched to on a given CPU. */ |
42 | u64 when; /* When did this occur? */ | 42 | u64 when; /* When did this occur? */ |
43 | u32 exec_time; /* Time the current job has executed. */ | 43 | u32 exec_time; /* Time the current job has executed. */ |
44 | u8 __unused[4]; | 44 | u8 __unused[4]; |
45 | 45 | } __attribute__((packed)); | |
46 | }; | ||
47 | 46 | ||
48 | struct st_switch_away_data { /* A process was switched away from on a given CPU. */ | 47 | struct st_switch_away_data { /* A process was switched away from on a given CPU. */ |
49 | u64 when; | 48 | u64 when; |
50 | u64 exec_time; | 49 | u64 exec_time; |
51 | }; | 50 | } __attribute__((packed)); |
52 | 51 | ||
53 | struct st_completion_data { /* A job completed. */ | 52 | struct st_completion_data { /* A job completed. */ |
54 | u64 when; | 53 | u64 when; |
@@ -56,35 +55,92 @@ struct st_completion_data { /* A job completed. */ | |||
56 | * next task automatically; set to 0 otherwise. | 55 | * next task automatically; set to 0 otherwise. |
57 | */ | 56 | */ |
58 | u8 __uflags:7; | 57 | u8 __uflags:7; |
59 | u8 __unused[7]; | 58 | u16 nv_int_count; |
60 | }; | 59 | u8 __unused[5]; |
60 | } __attribute__((packed)); | ||
61 | 61 | ||
62 | struct st_block_data { /* A task blocks. */ | 62 | struct st_block_data { /* A task blocks. */ |
63 | u64 when; | 63 | u64 when; |
64 | u64 __unused; | 64 | u64 __unused; |
65 | }; | 65 | } __attribute__((packed)); |
66 | 66 | ||
67 | struct st_resume_data { /* A task resumes. */ | 67 | struct st_resume_data { /* A task resumes. */ |
68 | u64 when; | 68 | u64 when; |
69 | u64 __unused; | 69 | u64 __unused; |
70 | }; | 70 | } __attribute__((packed)); |
71 | 71 | ||
72 | struct st_action_data { | 72 | struct st_action_data { |
73 | u64 when; | 73 | u64 when; |
74 | u8 action; | 74 | u8 action; |
75 | u8 __unused[7]; | 75 | u8 __unused[7]; |
76 | }; | 76 | } __attribute__((packed)); |
77 | 77 | ||
78 | struct st_sys_release_data { | 78 | struct st_sys_release_data { |
79 | u64 when; | 79 | u64 when; |
80 | u64 release; | 80 | u64 release; |
81 | }; | 81 | } __attribute__((packed)); |
82 | |||
83 | |||
84 | struct st_tasklet_release_data { | ||
85 | u64 when; | ||
86 | u64 __unused; | ||
87 | } __attribute__((packed)); | ||
88 | |||
89 | struct st_tasklet_begin_data { | ||
90 | u64 when; | ||
91 | u16 exe_pid; | ||
92 | u8 __unused[6]; | ||
93 | } __attribute__((packed)); | ||
94 | |||
95 | struct st_tasklet_end_data { | ||
96 | u64 when; | ||
97 | u16 exe_pid; | ||
98 | u8 flushed; | ||
99 | u8 __unused[5]; | ||
100 | } __attribute__((packed)); | ||
101 | |||
102 | |||
103 | struct st_work_release_data { | ||
104 | u64 when; | ||
105 | u64 __unused; | ||
106 | } __attribute__((packed)); | ||
107 | |||
108 | struct st_work_begin_data { | ||
109 | u64 when; | ||
110 | u16 exe_pid; | ||
111 | u8 __unused[6]; | ||
112 | } __attribute__((packed)); | ||
113 | |||
114 | struct st_work_end_data { | ||
115 | u64 when; | ||
116 | u16 exe_pid; | ||
117 | u8 flushed; | ||
118 | u8 __unused[5]; | ||
119 | } __attribute__((packed)); | ||
120 | |||
121 | struct st_effective_priority_change_data { | ||
122 | u64 when; | ||
123 | u16 inh_pid; | ||
124 | u8 __unused[6]; | ||
125 | } __attribute__((packed)); | ||
126 | |||
127 | struct st_nv_interrupt_begin_data { | ||
128 | u64 when; | ||
129 | u32 device; | ||
130 | u32 serialNumber; | ||
131 | } __attribute__((packed)); | ||
132 | |||
133 | struct st_nv_interrupt_end_data { | ||
134 | u64 when; | ||
135 | u32 device; | ||
136 | u32 serialNumber; | ||
137 | } __attribute__((packed)); | ||
82 | 138 | ||
83 | #define DATA(x) struct st_ ## x ## _data x; | 139 | #define DATA(x) struct st_ ## x ## _data x; |
84 | 140 | ||
85 | typedef enum { | 141 | typedef enum { |
86 | ST_NAME = 1, /* Start at one, so that we can spot | 142 | ST_NAME = 1, /* Start at one, so that we can spot |
87 | * uninitialized records. */ | 143 | * uninitialized records. */ |
88 | ST_PARAM, | 144 | ST_PARAM, |
89 | ST_RELEASE, | 145 | ST_RELEASE, |
90 | ST_ASSIGNED, | 146 | ST_ASSIGNED, |
@@ -94,7 +150,16 @@ typedef enum { | |||
94 | ST_BLOCK, | 150 | ST_BLOCK, |
95 | ST_RESUME, | 151 | ST_RESUME, |
96 | ST_ACTION, | 152 | ST_ACTION, |
97 | ST_SYS_RELEASE | 153 | ST_SYS_RELEASE, |
154 | ST_TASKLET_RELEASE, | ||
155 | ST_TASKLET_BEGIN, | ||
156 | ST_TASKLET_END, | ||
157 | ST_WORK_RELEASE, | ||
158 | ST_WORK_BEGIN, | ||
159 | ST_WORK_END, | ||
160 | ST_EFF_PRIO_CHANGE, | ||
161 | ST_NV_INTERRUPT_BEGIN, | ||
162 | ST_NV_INTERRUPT_END, | ||
98 | } st_event_record_type_t; | 163 | } st_event_record_type_t; |
99 | 164 | ||
100 | struct st_event_record { | 165 | struct st_event_record { |
@@ -113,8 +178,17 @@ struct st_event_record { | |||
113 | DATA(resume); | 178 | DATA(resume); |
114 | DATA(action); | 179 | DATA(action); |
115 | DATA(sys_release); | 180 | DATA(sys_release); |
181 | DATA(tasklet_release); | ||
182 | DATA(tasklet_begin); | ||
183 | DATA(tasklet_end); | ||
184 | DATA(work_release); | ||
185 | DATA(work_begin); | ||
186 | DATA(work_end); | ||
187 | DATA(effective_priority_change); | ||
188 | DATA(nv_interrupt_begin); | ||
189 | DATA(nv_interrupt_end); | ||
116 | } data; | 190 | } data; |
117 | }; | 191 | } __attribute__((packed)); |
118 | 192 | ||
119 | #undef DATA | 193 | #undef DATA |
120 | 194 | ||
@@ -129,6 +203,8 @@ struct st_event_record { | |||
129 | ft_event1(id, callback, task) | 203 | ft_event1(id, callback, task) |
130 | #define SCHED_TRACE2(id, callback, task, xtra) \ | 204 | #define SCHED_TRACE2(id, callback, task, xtra) \ |
131 | ft_event2(id, callback, task, xtra) | 205 | ft_event2(id, callback, task, xtra) |
206 | #define SCHED_TRACE3(id, callback, task, xtra1, xtra2) \ | ||
207 | ft_event3(id, callback, task, xtra1, xtra2) | ||
132 | 208 | ||
133 | /* provide prototypes; needed on sparc64 */ | 209 | /* provide prototypes; needed on sparc64 */ |
134 | #ifndef NO_TASK_TRACE_DECLS | 210 | #ifndef NO_TASK_TRACE_DECLS |
@@ -155,12 +231,45 @@ feather_callback void do_sched_trace_action(unsigned long id, | |||
155 | feather_callback void do_sched_trace_sys_release(unsigned long id, | 231 | feather_callback void do_sched_trace_sys_release(unsigned long id, |
156 | lt_t* start); | 232 | lt_t* start); |
157 | 233 | ||
234 | |||
235 | feather_callback void do_sched_trace_tasklet_release(unsigned long id, | ||
236 | struct task_struct* owner); | ||
237 | feather_callback void do_sched_trace_tasklet_begin(unsigned long id, | ||
238 | struct task_struct* owner); | ||
239 | feather_callback void do_sched_trace_tasklet_end(unsigned long id, | ||
240 | struct task_struct* owner, | ||
241 | unsigned long flushed); | ||
242 | |||
243 | feather_callback void do_sched_trace_work_release(unsigned long id, | ||
244 | struct task_struct* owner); | ||
245 | feather_callback void do_sched_trace_work_begin(unsigned long id, | ||
246 | struct task_struct* owner, | ||
247 | struct task_struct* exe); | ||
248 | feather_callback void do_sched_trace_work_end(unsigned long id, | ||
249 | struct task_struct* owner, | ||
250 | struct task_struct* exe, | ||
251 | unsigned long flushed); | ||
252 | |||
253 | feather_callback void do_sched_trace_eff_prio_change(unsigned long id, | ||
254 | struct task_struct* task, | ||
255 | struct task_struct* inh); | ||
256 | |||
257 | feather_callback void do_sched_trace_nv_interrupt_begin(unsigned long id, | ||
258 | u32 device); | ||
259 | feather_callback void do_sched_trace_nv_interrupt_end(unsigned long id, | ||
260 | unsigned long unused); | ||
261 | |||
262 | |||
263 | /* returns true if we're tracing an interrupt on current CPU */ | ||
264 | /* int is_interrupt_tracing_active(void); */ | ||
265 | |||
158 | #endif | 266 | #endif |
159 | 267 | ||
160 | #else | 268 | #else |
161 | 269 | ||
162 | #define SCHED_TRACE(id, callback, task) /* no tracing */ | 270 | #define SCHED_TRACE(id, callback, task) /* no tracing */ |
163 | #define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */ | 271 | #define SCHED_TRACE2(id, callback, task, xtra) /* no tracing */ |
272 | #define SCHED_TRACE3(id, callback, task, xtra1, xtra2) | ||
164 | 273 | ||
165 | #endif | 274 | #endif |
166 | 275 | ||
@@ -193,6 +302,35 @@ feather_callback void do_sched_trace_sys_release(unsigned long id, | |||
193 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when) | 302 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 10, do_sched_trace_sys_release, when) |
194 | 303 | ||
195 | 304 | ||
305 | #define sched_trace_tasklet_release(t) \ | ||
306 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 11, do_sched_trace_tasklet_release, t) | ||
307 | |||
308 | #define sched_trace_tasklet_begin(t) \ | ||
309 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 12, do_sched_trace_tasklet_begin, t) | ||
310 | |||
311 | #define sched_trace_tasklet_end(t, flushed) \ | ||
312 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 13, do_sched_trace_tasklet_end, t, flushed) | ||
313 | |||
314 | |||
315 | #define sched_trace_work_release(t) \ | ||
316 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 14, do_sched_trace_work_release, t) | ||
317 | |||
318 | #define sched_trace_work_begin(t, e) \ | ||
319 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 15, do_sched_trace_work_begin, t, e) | ||
320 | |||
321 | #define sched_trace_work_end(t, e, flushed) \ | ||
322 | SCHED_TRACE3(SCHED_TRACE_BASE_ID + 16, do_sched_trace_work_end, t, e, flushed) | ||
323 | |||
324 | |||
325 | #define sched_trace_eff_prio_change(t, inh) \ | ||
326 | SCHED_TRACE2(SCHED_TRACE_BASE_ID + 17, do_sched_trace_eff_prio_change, t, inh) | ||
327 | |||
328 | |||
329 | #define sched_trace_nv_interrupt_begin(d) \ | ||
330 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 18, do_sched_trace_nv_interrupt_begin, d) | ||
331 | #define sched_trace_nv_interrupt_end(d) \ | ||
332 | SCHED_TRACE(SCHED_TRACE_BASE_ID + 19, do_sched_trace_nv_interrupt_end, d) | ||
333 | |||
196 | #define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ | 334 | #define sched_trace_quantum_boundary() /* NOT IMPLEMENTED */ |
197 | 335 | ||
198 | #endif /* __KERNEL__ */ | 336 | #endif /* __KERNEL__ */ |
diff --git a/include/litmus/sched_trace_external.h b/include/litmus/sched_trace_external.h new file mode 100644 index 000000000000..e70e45e4cf51 --- /dev/null +++ b/include/litmus/sched_trace_external.h | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * sched_trace.h -- record scheduler events to a byte stream for offline analysis. | ||
3 | */ | ||
4 | #ifndef _LINUX_SCHED_TRACE_EXTERNAL_H_ | ||
5 | #define _LINUX_SCHED_TRACE_EXTERNAL_H_ | ||
6 | |||
7 | |||
8 | #ifdef CONFIG_SCHED_TASK_TRACE | ||
9 | extern void __sched_trace_tasklet_begin_external(struct task_struct* t); | ||
10 | static inline void sched_trace_tasklet_begin_external(struct task_struct* t) | ||
11 | { | ||
12 | __sched_trace_tasklet_begin_external(t); | ||
13 | } | ||
14 | |||
15 | extern void __sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed); | ||
16 | static inline void sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed) | ||
17 | { | ||
18 | __sched_trace_tasklet_end_external(t, flushed); | ||
19 | } | ||
20 | |||
21 | extern void __sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e); | ||
22 | static inline void sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e) | ||
23 | { | ||
24 | __sched_trace_work_begin_external(t, e); | ||
25 | } | ||
26 | |||
27 | extern void __sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f); | ||
28 | static inline void sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f) | ||
29 | { | ||
30 | __sched_trace_work_end_external(t, e, f); | ||
31 | } | ||
32 | |||
33 | #ifdef CONFIG_LITMUS_NVIDIA | ||
34 | extern void __sched_trace_nv_interrupt_begin_external(u32 device); | ||
35 | static inline void sched_trace_nv_interrupt_begin_external(u32 device) | ||
36 | { | ||
37 | __sched_trace_nv_interrupt_begin_external(device); | ||
38 | } | ||
39 | |||
40 | extern void __sched_trace_nv_interrupt_end_external(u32 device); | ||
41 | static inline void sched_trace_nv_interrupt_end_external(u32 device) | ||
42 | { | ||
43 | __sched_trace_nv_interrupt_end_external(device); | ||
44 | } | ||
45 | #endif | ||
46 | |||
47 | #else | ||
48 | |||
49 | // no tracing. | ||
50 | static inline void sched_trace_tasklet_begin_external(struct task_struct* t){} | ||
51 | static inline void sched_trace_tasklet_end_external(struct task_struct* t, unsigned long flushed){} | ||
52 | static inline void sched_trace_work_begin_external(struct task_struct* t, struct task_struct* e){} | ||
53 | static inline void sched_trace_work_end_external(struct task_struct* t, struct task_struct* e, unsigned long f){} | ||
54 | |||
55 | #ifdef CONFIG_LITMUS_NVIDIA | ||
56 | static inline void sched_trace_nv_interrupt_begin_external(u32 device){} | ||
57 | static inline void sched_trace_nv_interrupt_end_external(u32 device){} | ||
58 | #endif | ||
59 | |||
60 | #endif | ||
61 | |||
62 | |||
63 | #ifdef CONFIG_LITMUS_NVIDIA | ||
64 | |||
65 | #define EX_TS(evt) \ | ||
66 | extern void __##evt(void); \ | ||
67 | static inline void EX_##evt(void) { __##evt(); } | ||
68 | |||
69 | EX_TS(TS_NV_TOPISR_START) | ||
70 | EX_TS(TS_NV_TOPISR_END) | ||
71 | EX_TS(TS_NV_BOTISR_START) | ||
72 | EX_TS(TS_NV_BOTISR_END) | ||
73 | EX_TS(TS_NV_RELEASE_BOTISR_START) | ||
74 | EX_TS(TS_NV_RELEASE_BOTISR_END) | ||
75 | |||
76 | #endif | ||
77 | |||
78 | #endif | ||
diff --git a/include/litmus/trace.h b/include/litmus/trace.h index e809376d6487..baa542d0135a 100644 --- a/include/litmus/trace.h +++ b/include/litmus/trace.h | |||
@@ -113,4 +113,24 @@ feather_callback void save_task_latency(unsigned long event, unsigned long when_ | |||
113 | 113 | ||
114 | #define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when)) | 114 | #define TS_RELEASE_LATENCY(when) LTIMESTAMP(208, &(when)) |
115 | 115 | ||
116 | |||
117 | #ifdef CONFIG_LITMUS_NVIDIA | ||
118 | |||
119 | #define TS_NV_TOPISR_START TIMESTAMP(200) | ||
120 | #define TS_NV_TOPISR_END TIMESTAMP(201) | ||
121 | |||
122 | #define TS_NV_BOTISR_START TIMESTAMP(202) | ||
123 | #define TS_NV_BOTISR_END TIMESTAMP(203) | ||
124 | |||
125 | #define TS_NV_RELEASE_BOTISR_START TIMESTAMP(204) | ||
126 | #define TS_NV_RELEASE_BOTISR_END TIMESTAMP(205) | ||
127 | |||
128 | #endif | ||
129 | |||
130 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
131 | #define TS_NV_SCHED_BOTISR_START TIMESTAMP(206) | ||
132 | #define TS_NV_SCHED_BOTISR_END TIMESTAMP(207) | ||
133 | #endif | ||
134 | |||
135 | |||
116 | #endif /* !_SYS_TRACE_H_ */ | 136 | #endif /* !_SYS_TRACE_H_ */ |
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h index 94264c27d9ac..c6efc4c40af2 100644 --- a/include/litmus/unistd_32.h +++ b/include/litmus/unistd_32.h | |||
@@ -17,5 +17,6 @@ | |||
17 | #define __NR_wait_for_ts_release __LSC(9) | 17 | #define __NR_wait_for_ts_release __LSC(9) |
18 | #define __NR_release_ts __LSC(10) | 18 | #define __NR_release_ts __LSC(10) |
19 | #define __NR_null_call __LSC(11) | 19 | #define __NR_null_call __LSC(11) |
20 | #define __NR_register_nv_device __LSC(12) | ||
20 | 21 | ||
21 | #define NR_litmus_syscalls 12 | 22 | #define NR_litmus_syscalls 13 |
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h index d5ced0d2642c..b44a7c33bdf8 100644 --- a/include/litmus/unistd_64.h +++ b/include/litmus/unistd_64.h | |||
@@ -29,5 +29,8 @@ __SYSCALL(__NR_wait_for_ts_release, sys_wait_for_ts_release) | |||
29 | __SYSCALL(__NR_release_ts, sys_release_ts) | 29 | __SYSCALL(__NR_release_ts, sys_release_ts) |
30 | #define __NR_null_call __LSC(11) | 30 | #define __NR_null_call __LSC(11) |
31 | __SYSCALL(__NR_null_call, sys_null_call) | 31 | __SYSCALL(__NR_null_call, sys_null_call) |
32 | #define __NR_register_nv_device __LSC(12) | ||
33 | __SYSCALL(__NR_register_nv_device, sys_register_nv_device) | ||
32 | 34 | ||
33 | #define NR_litmus_syscalls 12 | 35 | |
36 | #define NR_litmus_syscalls 13 | ||