diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-12-11 22:01:01 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-12-12 14:14:41 -0500 |
commit | c8483ef0959672310bf4ebb72e1a308b00543f74 (patch) | |
tree | 9cb306009b01c5226178f69172738026431d37f2 | |
parent | fbd9574e298157b54c38f82f536e5cea8f766dff (diff) |
make klmirqd work like aux tasks. checkpoint.
this code is untested!
-rw-r--r-- | include/linux/interrupt.h | 6 | ||||
-rw-r--r-- | include/litmus/aux_tasks.h | 10 | ||||
-rw-r--r-- | include/litmus/litmus_softirq.h | 164 | ||||
-rw-r--r-- | include/litmus/nvidia_info.h | 35 | ||||
-rw-r--r-- | include/litmus/rt_param.h | 70 | ||||
-rw-r--r-- | include/litmus/sched_plugin.h | 11 | ||||
-rw-r--r-- | include/litmus/signal.h | 2 | ||||
-rw-r--r-- | include/litmus/unistd_32.h | 5 | ||||
-rw-r--r-- | include/litmus/unistd_64.h | 6 | ||||
-rw-r--r-- | kernel/sched.c | 8 | ||||
-rw-r--r-- | kernel/softirq.c | 14 | ||||
-rw-r--r-- | litmus/Kconfig | 20 | ||||
-rw-r--r-- | litmus/aux_tasks.c | 40 | ||||
-rw-r--r-- | litmus/edf_common.c | 60 | ||||
-rw-r--r-- | litmus/ikglp_lock.c | 12 | ||||
-rw-r--r-- | litmus/jobs.c | 2 | ||||
-rw-r--r-- | litmus/kfmlp_lock.c | 11 | ||||
-rw-r--r-- | litmus/litmus.c | 48 | ||||
-rw-r--r-- | litmus/litmus_softirq.c | 1460 | ||||
-rw-r--r-- | litmus/locking.c | 56 | ||||
-rw-r--r-- | litmus/nvidia_info.c | 743 | ||||
-rw-r--r-- | litmus/sched_cedf.c | 165 | ||||
-rw-r--r-- | litmus/sched_gsn_edf.c | 136 | ||||
-rw-r--r-- | litmus/sched_plugin.c | 20 |
24 files changed, 1458 insertions, 1646 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 57a7bc8807be..9fc31289a1bb 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -507,6 +507,12 @@ struct tasklet_struct | |||
507 | #endif | 507 | #endif |
508 | }; | 508 | }; |
509 | 509 | ||
510 | struct tasklet_head | ||
511 | { | ||
512 | struct tasklet_struct *head; | ||
513 | struct tasklet_struct **tail; | ||
514 | }; | ||
515 | |||
510 | #define DECLARE_TASKLET(name, func, data) \ | 516 | #define DECLARE_TASKLET(name, func, data) \ |
511 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } | 517 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } |
512 | 518 | ||
diff --git a/include/litmus/aux_tasks.h b/include/litmus/aux_tasks.h index 87745c1c0df0..255bbafcc6b7 100644 --- a/include/litmus/aux_tasks.h +++ b/include/litmus/aux_tasks.h | |||
@@ -3,9 +3,6 @@ | |||
3 | 3 | ||
4 | struct task_struct; | 4 | struct task_struct; |
5 | 5 | ||
6 | /* admit an aux task with default parameters */ | ||
7 | //int admit_aux_task(struct task_struct *t); | ||
8 | |||
9 | int make_aux_task_if_required(struct task_struct *t); | 6 | int make_aux_task_if_required(struct task_struct *t); |
10 | 7 | ||
11 | /* call on an aux task when it exits real-time */ | 8 | /* call on an aux task when it exits real-time */ |
@@ -17,13 +14,6 @@ long enable_aux_task_owner(struct task_struct *t); | |||
17 | /* call when an aux_owner exits real-time */ | 14 | /* call when an aux_owner exits real-time */ |
18 | long disable_aux_task_owner(struct task_struct *t); | 15 | long disable_aux_task_owner(struct task_struct *t); |
19 | 16 | ||
20 | |||
21 | /* collectivelly make all aux tasks in the process of leader inherit from hp */ | ||
22 | //int aux_tasks_increase_priority(struct task_struct *leader, struct task_struct *hp); | ||
23 | |||
24 | /* collectivelly make all aux tasks in the process of leader inherit from hp */ | ||
25 | //int aux_tasks_decrease_priority(struct task_struct *leader, struct task_struct *hp); | ||
26 | |||
27 | /* call when an aux_owner increases its priority */ | 17 | /* call when an aux_owner increases its priority */ |
28 | int aux_task_owner_increase_priority(struct task_struct *t); | 18 | int aux_task_owner_increase_priority(struct task_struct *t); |
29 | 19 | ||
diff --git a/include/litmus/litmus_softirq.h b/include/litmus/litmus_softirq.h index 46fe89148505..52e3f7e74ab1 100644 --- a/include/litmus/litmus_softirq.h +++ b/include/litmus/litmus_softirq.h | |||
@@ -5,129 +5,113 @@ | |||
5 | #include <linux/workqueue.h> | 5 | #include <linux/workqueue.h> |
6 | 6 | ||
7 | /* | 7 | /* |
8 | Threaded tasklet handling for Litmus. Tasklets | 8 | Threaded tasklet/workqueue handling for Litmus. |
9 | are scheduled with the priority of the tasklet's | 9 | Items are scheduled in the following order: hi-tasklet, |
10 | owner---that is, the RT task on behalf the tasklet | 10 | lo-tasklet, workqueue. Items are scheduled in FIFO order |
11 | runs. | 11 | within each of these classes. |
12 | |||
13 | Tasklets are current scheduled in FIFO order with | ||
14 | NO priority inheritance for "blocked" tasklets. | ||
15 | 12 | ||
16 | klmirqd assumes the priority of the owner of the | 13 | klmirqd assumes the priority of the owner of the |
17 | tasklet when the tasklet is next to execute. | 14 | tasklet when the tasklet is next to execute. |
18 | 15 | ||
19 | Currently, hi-tasklets are scheduled before | 16 | The base-priority of a klimirqd thread is below all regular |
20 | low-tasklets, regardless of priority of low-tasklets. | 17 | real-time tasks, but above all other Linux scheduling |
21 | And likewise, low-tasklets are scheduled before work | 18 | classes (klmirqd threads are within the SHCED_LITMUS class). |
22 | queue objects. This priority inversion probably needs | 19 | Regular real-time tasks may increase the priority of |
23 | to be fixed, though it is not an issue if our work with | 20 | a klmirqd thread, but klmirqd is unaware of this |
24 | GPUs as GPUs are owned (and associated klmirqds) for | 21 | (this was not the case in prior incarnations of klmirqd). |
25 | exclusive time periods, thus no inversions can | ||
26 | occur. | ||
27 | */ | 22 | */ |
28 | 23 | ||
29 | 24 | ||
30 | 25 | /* Initialize klmirqd */ | |
31 | #define NR_LITMUS_SOFTIRQD CONFIG_NR_LITMUS_SOFTIRQD | 26 | void init_klmirqd(void); |
32 | |||
33 | /* Spawns NR_LITMUS_SOFTIRQD klmirqd daemons. | ||
34 | Actual launch of threads is deffered to kworker's | ||
35 | workqueue, so daemons will likely not be immediately | ||
36 | running when this function returns, though the required | ||
37 | data will be initialized. | ||
38 | |||
39 | @affinity_set: an array expressing the processor affinity | ||
40 | for each of the NR_LITMUS_SOFTIRQD daemons. May be set | ||
41 | to NULL for global scheduling. | ||
42 | |||
43 | - Examples - | ||
44 | 8-CPU system with two CPU clusters: | ||
45 | affinity[] = {0, 0, 0, 0, 3, 3, 3, 3} | ||
46 | NOTE: Daemons not actually bound to specified CPU, but rather | ||
47 | cluster in which the CPU resides. | ||
48 | |||
49 | 8-CPU system, partitioned: | ||
50 | affinity[] = {0, 1, 2, 3, 4, 5, 6, 7} | ||
51 | |||
52 | FIXME: change array to a CPU topology or array of cpumasks | ||
53 | |||
54 | */ | ||
55 | void spawn_klmirqd(int* affinity); | ||
56 | |||
57 | 27 | ||
58 | /* Raises a flag to tell klmirqds to terminate. | 28 | /* Raises a flag to tell klmirqds to terminate. |
59 | Termination is async, so some threads may be running | 29 | Termination is async, so some threads may be running |
60 | after function return. */ | 30 | after function return. */ |
61 | void kill_klmirqd(void); | 31 | void kill_klmirqd(void); |
62 | 32 | ||
33 | void kill_klmirqd_thread(struct task_struct* klmirqd_thread); | ||
63 | 34 | ||
64 | /* Returns 1 if all NR_LITMUS_SOFTIRQD klitirqs are ready | 35 | /* Returns 1 if all NR_LITMUS_SOFTIRQD klitirqs are ready |
65 | to handle tasklets. 0, otherwise.*/ | 36 | to handle tasklets. 0, otherwise.*/ |
66 | int klmirqd_is_ready(void); | 37 | int klmirqd_is_ready(void); |
67 | 38 | ||
68 | /* Returns 1 if no NR_LITMUS_SOFTIRQD klitirqs are ready | 39 | /* Returns 1 if no NR_LITMUS_SOFTIRQD klitirqs are ready |
69 | to handle tasklets. 0, otherwise.*/ | 40 | to handle tasklets. 0, otherwise.*/ |
70 | int klmirqd_is_dead(void); | 41 | int klmirqd_is_dead(void); |
71 | 42 | ||
72 | /* Flushes all pending work out to the OS for regular | 43 | |
73 | * tasklet/work processing of the specified 'owner' | 44 | typedef int (*klmirqd_cb_t) (void *arg); |
74 | * | 45 | |
75 | * PRECOND: klmirqd_thread must have a clear entry | 46 | typedef struct |
76 | * in the GPU registry, otherwise this call will become | 47 | { |
77 | * a no-op as work will loop back to the klmirqd_thread. | 48 | klmirqd_cb_t func; |
78 | * | 49 | void* arg; |
79 | * Pass NULL for owner to flush ALL pending items. | 50 | } klmirqd_callback_t; |
51 | |||
52 | /* Launches a klmirqd thread with the provided affinity. | ||
53 | |||
54 | Actual launch of threads is deffered to kworker's | ||
55 | workqueue, so daemons will likely not be immediately | ||
56 | running when this function returns, though the required | ||
57 | data will be initialized. | ||
58 | |||
59 | cpu == -1 for no affinity | ||
80 | */ | 60 | */ |
81 | void flush_pending(struct task_struct* klmirqd_thread, | 61 | int launch_klmirqd_thread(int cpu, klmirqd_callback_t* cb); |
82 | struct task_struct* owner); | ||
83 | 62 | ||
84 | struct task_struct* get_klmirqd(unsigned int k_id); | ||
85 | 63 | ||
64 | /* Flushes all pending work out to the OS for regular | ||
65 | * tasklet/work processing. | ||
66 | */ | ||
67 | void flush_pending(struct task_struct* klmirqd_thread); | ||
86 | 68 | ||
87 | extern int __litmus_tasklet_schedule( | 69 | extern int __litmus_tasklet_schedule( |
88 | struct tasklet_struct *t, | 70 | struct tasklet_struct *t, |
89 | unsigned int k_id); | 71 | struct task_struct *klmirqd_thread); |
90 | 72 | ||
91 | /* schedule a tasklet on klmirqd #k_id */ | 73 | /* schedule a tasklet on klmirqd #k_id */ |
92 | static inline int litmus_tasklet_schedule( | 74 | static inline int litmus_tasklet_schedule( |
93 | struct tasklet_struct *t, | 75 | struct tasklet_struct *t, |
94 | unsigned int k_id) | 76 | struct task_struct *klmirqd_thread) |
95 | { | 77 | { |
96 | int ret = 0; | 78 | int ret = 0; |
97 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | 79 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { |
98 | ret = __litmus_tasklet_schedule(t, k_id); | 80 | ret = __litmus_tasklet_schedule(t, klmirqd_thread); |
81 | } | ||
99 | return(ret); | 82 | return(ret); |
100 | } | 83 | } |
101 | 84 | ||
102 | /* for use by __tasklet_schedule() */ | 85 | /* for use by __tasklet_schedule() */ |
103 | static inline int _litmus_tasklet_schedule( | 86 | static inline int _litmus_tasklet_schedule( |
104 | struct tasklet_struct *t, | 87 | struct tasklet_struct *t, |
105 | unsigned int k_id) | 88 | struct task_struct *klmirqd_thread) |
106 | { | 89 | { |
107 | return(__litmus_tasklet_schedule(t, k_id)); | 90 | return(__litmus_tasklet_schedule(t, klmirqd_thread)); |
108 | } | 91 | } |
109 | 92 | ||
110 | 93 | ||
111 | 94 | ||
112 | 95 | ||
113 | extern int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, | 96 | extern int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, |
114 | unsigned int k_id); | 97 | struct task_struct *klmirqd_thread); |
115 | 98 | ||
116 | /* schedule a hi tasklet on klmirqd #k_id */ | 99 | /* schedule a hi tasklet on klmirqd #k_id */ |
117 | static inline int litmus_tasklet_hi_schedule(struct tasklet_struct *t, | 100 | static inline int litmus_tasklet_hi_schedule(struct tasklet_struct *t, |
118 | unsigned int k_id) | 101 | struct task_struct *klmirqd_thread) |
119 | { | 102 | { |
120 | int ret = 0; | 103 | int ret = 0; |
121 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | 104 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { |
122 | ret = __litmus_tasklet_hi_schedule(t, k_id); | 105 | ret = __litmus_tasklet_hi_schedule(t, klmirqd_thread); |
106 | } | ||
123 | return(ret); | 107 | return(ret); |
124 | } | 108 | } |
125 | 109 | ||
126 | /* for use by __tasklet_hi_schedule() */ | 110 | /* for use by __tasklet_hi_schedule() */ |
127 | static inline int _litmus_tasklet_hi_schedule(struct tasklet_struct *t, | 111 | static inline int _litmus_tasklet_hi_schedule(struct tasklet_struct *t, |
128 | unsigned int k_id) | 112 | struct task_struct *klmirqd_thread) |
129 | { | 113 | { |
130 | return(__litmus_tasklet_hi_schedule(t, k_id)); | 114 | return(__litmus_tasklet_hi_schedule(t, klmirqd_thread)); |
131 | } | 115 | } |
132 | 116 | ||
133 | 117 | ||
@@ -136,26 +120,27 @@ static inline int _litmus_tasklet_hi_schedule(struct tasklet_struct *t, | |||
136 | 120 | ||
137 | extern int __litmus_tasklet_hi_schedule_first( | 121 | extern int __litmus_tasklet_hi_schedule_first( |
138 | struct tasklet_struct *t, | 122 | struct tasklet_struct *t, |
139 | unsigned int k_id); | 123 | struct task_struct *klmirqd_thread); |
140 | 124 | ||
141 | /* schedule a hi tasklet on klmirqd #k_id on next go-around */ | 125 | /* schedule a hi tasklet on klmirqd #k_id on next go-around */ |
142 | /* PRECONDITION: Interrupts must be disabled. */ | 126 | /* PRECONDITION: Interrupts must be disabled. */ |
143 | static inline int litmus_tasklet_hi_schedule_first( | 127 | static inline int litmus_tasklet_hi_schedule_first( |
144 | struct tasklet_struct *t, | 128 | struct tasklet_struct *t, |
145 | unsigned int k_id) | 129 | struct task_struct *klmirqd_thread) |
146 | { | 130 | { |
147 | int ret = 0; | 131 | int ret = 0; |
148 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | 132 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { |
149 | ret = __litmus_tasklet_hi_schedule_first(t, k_id); | 133 | ret = __litmus_tasklet_hi_schedule_first(t, klmirqd_thread); |
134 | } | ||
150 | return(ret); | 135 | return(ret); |
151 | } | 136 | } |
152 | 137 | ||
153 | /* for use by __tasklet_hi_schedule_first() */ | 138 | /* for use by __tasklet_hi_schedule_first() */ |
154 | static inline int _litmus_tasklet_hi_schedule_first( | 139 | static inline int _litmus_tasklet_hi_schedule_first( |
155 | struct tasklet_struct *t, | 140 | struct tasklet_struct *t, |
156 | unsigned int k_id) | 141 | struct task_struct *klmirqd_thread) |
157 | { | 142 | { |
158 | return(__litmus_tasklet_hi_schedule_first(t, k_id)); | 143 | return(__litmus_tasklet_hi_schedule_first(t, klmirqd_thread)); |
159 | } | 144 | } |
160 | 145 | ||
161 | 146 | ||
@@ -164,36 +149,13 @@ static inline int _litmus_tasklet_hi_schedule_first( | |||
164 | 149 | ||
165 | extern int __litmus_schedule_work( | 150 | extern int __litmus_schedule_work( |
166 | struct work_struct* w, | 151 | struct work_struct* w, |
167 | unsigned int k_id); | 152 | struct task_struct *klmirqd_thread); |
168 | 153 | ||
169 | static inline int litmus_schedule_work( | 154 | static inline int litmus_schedule_work( |
170 | struct work_struct* w, | 155 | struct work_struct* w, |
171 | unsigned int k_id) | 156 | struct task_struct *klmirqd_thread) |
172 | { | 157 | { |
173 | return(__litmus_schedule_work(w, k_id)); | 158 | return(__litmus_schedule_work(w, klmirqd_thread)); |
174 | } | 159 | } |
175 | 160 | ||
176 | |||
177 | |||
178 | ///////////// mutex operations for client threads. | ||
179 | |||
180 | void down_and_set_stat(struct task_struct* t, | ||
181 | enum klmirqd_sem_status to_set, | ||
182 | struct mutex* sem); | ||
183 | |||
184 | void __down_and_reset_and_set_stat(struct task_struct* t, | ||
185 | enum klmirqd_sem_status to_reset, | ||
186 | enum klmirqd_sem_status to_set, | ||
187 | struct mutex* sem); | ||
188 | |||
189 | void up_and_set_stat(struct task_struct* t, | ||
190 | enum klmirqd_sem_status to_set, | ||
191 | struct mutex* sem); | ||
192 | |||
193 | |||
194 | |||
195 | void release_klmirqd_lock(struct task_struct* t); | ||
196 | |||
197 | int reacquire_klmirqd_lock(struct task_struct* t); | ||
198 | |||
199 | #endif | 161 | #endif |
diff --git a/include/litmus/nvidia_info.h b/include/litmus/nvidia_info.h index 97c9577141db..6f354c8b00ac 100644 --- a/include/litmus/nvidia_info.h +++ b/include/litmus/nvidia_info.h | |||
@@ -6,10 +6,9 @@ | |||
6 | 6 | ||
7 | #include <litmus/litmus_softirq.h> | 7 | #include <litmus/litmus_softirq.h> |
8 | 8 | ||
9 | |||
10 | //#define NV_DEVICE_NUM NR_LITMUS_SOFTIRQD | ||
11 | #define NV_DEVICE_NUM CONFIG_NV_DEVICE_NUM | 9 | #define NV_DEVICE_NUM CONFIG_NV_DEVICE_NUM |
12 | #define NV_MAX_SIMULT_USERS CONFIG_NV_MAX_SIMULT_USERS | 10 | |
11 | /* Functions used for decoding NVIDIA blobs. */ | ||
13 | 12 | ||
14 | int init_nvidia_info(void); | 13 | int init_nvidia_info(void); |
15 | void shutdown_nvidia_info(void); | 14 | void shutdown_nvidia_info(void); |
@@ -18,29 +17,33 @@ int is_nvidia_func(void* func_addr); | |||
18 | 17 | ||
19 | void dump_nvidia_info(const struct tasklet_struct *t); | 18 | void dump_nvidia_info(const struct tasklet_struct *t); |
20 | 19 | ||
21 | |||
22 | // Returns the Nvidia device # associated with provided tasklet and work_struct. | 20 | // Returns the Nvidia device # associated with provided tasklet and work_struct. |
23 | u32 get_tasklet_nv_device_num(const struct tasklet_struct *t); | 21 | u32 get_tasklet_nv_device_num(const struct tasklet_struct *t); |
24 | u32 get_work_nv_device_num(const struct work_struct *t); | 22 | u32 get_work_nv_device_num(const struct work_struct *t); |
25 | 23 | ||
26 | 24 | ||
27 | int init_nv_device_reg(void); | ||
28 | //int get_nv_device_id(struct task_struct* owner); | ||
29 | 25 | ||
30 | 26 | /* Functions for figuring out the priority of GPU-using tasks */ | |
31 | int reg_nv_device(int reg_device_id, int register_device, struct task_struct *t); | ||
32 | 27 | ||
33 | struct task_struct* get_nv_max_device_owner(u32 target_device_id); | 28 | struct task_struct* get_nv_max_device_owner(u32 target_device_id); |
34 | //int is_nv_device_owner(u32 target_device_id); | ||
35 | |||
36 | void lock_nv_registry(u32 reg_device_id, unsigned long* flags); | ||
37 | void unlock_nv_registry(u32 reg_device_id, unsigned long* flags); | ||
38 | 29 | ||
39 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | 30 | #ifdef CONFIG_LITMUS_SOFTIRQD |
40 | void pai_check_priority_increase(struct task_struct *t, int reg_device_id); | 31 | struct task_struct* get_nv_klmirqd_thread(u32 target_device_id); |
41 | void pai_check_priority_decrease(struct task_struct *t, int reg_device_id); | ||
42 | #endif | 32 | #endif |
43 | 33 | ||
44 | //void increment_nv_int_count(u32 device); | 34 | /* call when the GPU-holding task, t, blocks */ |
35 | long enable_gpu_owner(struct task_struct *t); | ||
36 | |||
37 | /* call when the GPU-holding task, t, resumes */ | ||
38 | long disable_gpu_owner(struct task_struct *t); | ||
39 | |||
40 | /* call when the GPU-holding task, t, increases its priority */ | ||
41 | int gpu_owner_increase_priority(struct task_struct *t); | ||
42 | |||
43 | /* call when the GPU-holding task, t, decreases its priority */ | ||
44 | int gpu_owner_decrease_priority(struct task_struct *t); | ||
45 | |||
46 | |||
47 | int reg_nv_device(int reg_device_id, int reg_action, struct task_struct *t); | ||
45 | 48 | ||
46 | #endif | 49 | #endif |
diff --git a/include/litmus/rt_param.h b/include/litmus/rt_param.h index 47301c04d862..c8ee64569dbb 100644 --- a/include/litmus/rt_param.h +++ b/include/litmus/rt_param.h | |||
@@ -128,6 +128,10 @@ struct control_page { | |||
128 | #include <litmus/binheap.h> | 128 | #include <litmus/binheap.h> |
129 | #include <linux/semaphore.h> | 129 | #include <linux/semaphore.h> |
130 | 130 | ||
131 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
132 | #include <linux/interrupt.h> | ||
133 | #endif | ||
134 | |||
131 | struct _rt_domain; | 135 | struct _rt_domain; |
132 | struct bheap_node; | 136 | struct bheap_node; |
133 | struct release_heap; | 137 | struct release_heap; |
@@ -205,6 +209,38 @@ typedef struct avg_est{ | |||
205 | lt_t avg; | 209 | lt_t avg; |
206 | } avg_est_t; | 210 | } avg_est_t; |
207 | 211 | ||
212 | |||
213 | |||
214 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
215 | //struct tasklet_head | ||
216 | //{ | ||
217 | // struct tasklet_struct *head; | ||
218 | // struct tasklet_struct **tail; | ||
219 | //}; | ||
220 | |||
221 | struct klmirqd_info | ||
222 | { | ||
223 | struct task_struct* klmirqd; | ||
224 | struct task_struct* current_owner; | ||
225 | unsigned int terminating:1; | ||
226 | |||
227 | raw_spinlock_t lock; | ||
228 | |||
229 | u32 pending; | ||
230 | atomic_t num_hi_pending; | ||
231 | atomic_t num_low_pending; | ||
232 | atomic_t num_work_pending; | ||
233 | |||
234 | /* in order of priority */ | ||
235 | struct tasklet_head pending_tasklets_hi; | ||
236 | struct tasklet_head pending_tasklets; | ||
237 | struct list_head worklist; | ||
238 | |||
239 | struct list_head klmirqd_reg; | ||
240 | }; | ||
241 | #endif | ||
242 | |||
243 | |||
208 | /* RT task parameters for scheduling extensions | 244 | /* RT task parameters for scheduling extensions |
209 | * These parameters are inherited during clone and therefore must | 245 | * These parameters are inherited during clone and therefore must |
210 | * be explicitly set up before the task set is launched. | 246 | * be explicitly set up before the task set is launched. |
@@ -221,34 +257,21 @@ struct rt_param { | |||
221 | 257 | ||
222 | #ifdef CONFIG_LITMUS_SOFTIRQD | 258 | #ifdef CONFIG_LITMUS_SOFTIRQD |
223 | /* proxy threads have minimum priority by default */ | 259 | /* proxy threads have minimum priority by default */ |
224 | unsigned int is_proxy_thread:1; | 260 | unsigned int is_interrupt_thread:1; |
225 | |||
226 | /* pointer to klmirqd currently working on this | ||
227 | task_struct's behalf. only set by the task pointed | ||
228 | to by klmirqd. | ||
229 | 261 | ||
230 | ptr only valid if is_proxy_thread == 0 | 262 | /* pointer to data used by klmirqd thread. |
231 | */ | 263 | * |
232 | struct task_struct* cur_klmirqd; | 264 | * ptr only valid if is_interrupt_thread == 1 |
233 | |||
234 | /* Used to implement mutual execution exclusion between | ||
235 | * job and klmirqd execution. Job must always hold | ||
236 | * it's klmirqd_sem to execute. klmirqd instance | ||
237 | * must hold the semaphore before executing on behalf | ||
238 | * of a job. | ||
239 | */ | ||
240 | struct mutex klmirqd_sem; | ||
241 | |||
242 | /* status of held klmirqd_sem, even if the held klmirqd_sem is from | ||
243 | another task (only proxy threads do this though). | ||
244 | */ | 265 | */ |
245 | atomic_t klmirqd_sem_stat; | 266 | struct klmirqd_info* klmirqd_info; |
246 | #endif | 267 | #endif |
247 | 268 | ||
248 | #ifdef CONFIG_LITMUS_NVIDIA | 269 | #ifdef CONFIG_LITMUS_NVIDIA |
249 | /* number of top-half interrupts handled on behalf of current job */ | 270 | /* number of top-half interrupts handled on behalf of current job */ |
250 | atomic_t nv_int_count; | 271 | atomic_t nv_int_count; |
251 | long unsigned int held_gpus; // bitmap of held GPUs. | 272 | long unsigned int held_gpus; // bitmap of held GPUs. |
273 | struct binheap_node gpu_owner_node; // just one GPU for now... | ||
274 | unsigned int hide_from_gpu:1; | ||
252 | 275 | ||
253 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | 276 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING |
254 | avg_est_t gpu_migration_est[MIG_LAST+1]; | 277 | avg_est_t gpu_migration_est[MIG_LAST+1]; |
@@ -370,6 +393,13 @@ struct rt_param { | |||
370 | struct control_page * ctrl_page; | 393 | struct control_page * ctrl_page; |
371 | }; | 394 | }; |
372 | 395 | ||
396 | //#ifdef CONFIG_LITMUS_SOFTIRQD | ||
397 | //struct klmirqd_data | ||
398 | //{ | ||
399 | // struct binheap klmirqd_users; | ||
400 | //}; | ||
401 | //#endif | ||
402 | |||
373 | #ifdef CONFIG_REALTIME_AUX_TASKS | 403 | #ifdef CONFIG_REALTIME_AUX_TASKS |
374 | struct aux_data | 404 | struct aux_data |
375 | { | 405 | { |
diff --git a/include/litmus/sched_plugin.h b/include/litmus/sched_plugin.h index e8127f427d56..a13d1a2992fe 100644 --- a/include/litmus/sched_plugin.h +++ b/include/litmus/sched_plugin.h | |||
@@ -79,12 +79,6 @@ typedef void (*nested_increase_prio_t)(struct task_struct* t, struct task_struct | |||
79 | typedef void (*nested_decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh, | 79 | typedef void (*nested_decrease_prio_t)(struct task_struct* t, struct task_struct* prio_inh, |
80 | raw_spinlock_t *to_unlock, unsigned long irqflags); | 80 | raw_spinlock_t *to_unlock, unsigned long irqflags); |
81 | 81 | ||
82 | typedef void (*increase_prio_klitirq_t)(struct task_struct* klmirqd, | ||
83 | struct task_struct* old_owner, | ||
84 | struct task_struct* new_owner); | ||
85 | typedef void (*decrease_prio_klmirqd_t)(struct task_struct* klmirqd, | ||
86 | struct task_struct* old_owner); | ||
87 | |||
88 | 82 | ||
89 | typedef int (*enqueue_pai_tasklet_t)(struct tasklet_struct* tasklet); | 83 | typedef int (*enqueue_pai_tasklet_t)(struct tasklet_struct* tasklet); |
90 | typedef void (*change_prio_pai_tasklet_t)(struct task_struct *old_prio, | 84 | typedef void (*change_prio_pai_tasklet_t)(struct task_struct *old_prio, |
@@ -166,11 +160,6 @@ struct sched_plugin { | |||
166 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | 160 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING |
167 | allocate_affinity_observer_t allocate_aff_obs; | 161 | allocate_affinity_observer_t allocate_aff_obs; |
168 | #endif | 162 | #endif |
169 | |||
170 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
171 | increase_prio_klitirq_t increase_prio_klmirqd; | ||
172 | decrease_prio_klmirqd_t decrease_prio_klmirqd; | ||
173 | #endif | ||
174 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | 163 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD |
175 | enqueue_pai_tasklet_t enqueue_pai_tasklet; | 164 | enqueue_pai_tasklet_t enqueue_pai_tasklet; |
176 | change_prio_pai_tasklet_t change_prio_pai_tasklet; | 165 | change_prio_pai_tasklet_t change_prio_pai_tasklet; |
diff --git a/include/litmus/signal.h b/include/litmus/signal.h index b3d82b294984..38c3207951e0 100644 --- a/include/litmus/signal.h +++ b/include/litmus/signal.h | |||
@@ -9,7 +9,7 @@ | |||
9 | 9 | ||
10 | /* Signals used by Litmus to asynchronously communicate events | 10 | /* Signals used by Litmus to asynchronously communicate events |
11 | * to real-time tasks. | 11 | * to real-time tasks. |
12 | * | 12 | * |
13 | * Signal values overlap with [SIGRTMIN, SIGRTMAX], so beware of | 13 | * Signal values overlap with [SIGRTMIN, SIGRTMAX], so beware of |
14 | * application-level conflicts when dealing with COTS user-level | 14 | * application-level conflicts when dealing with COTS user-level |
15 | * code. | 15 | * code. |
diff --git a/include/litmus/unistd_32.h b/include/litmus/unistd_32.h index c86b743408ed..7265ffadf555 100644 --- a/include/litmus/unistd_32.h +++ b/include/litmus/unistd_32.h | |||
@@ -19,8 +19,7 @@ | |||
19 | #define __NR_null_call __LSC(11) | 19 | #define __NR_null_call __LSC(11) |
20 | #define __NR_litmus_dgl_lock __LSC(12) | 20 | #define __NR_litmus_dgl_lock __LSC(12) |
21 | #define __NR_litmus_dgl_unlock __LSC(13) | 21 | #define __NR_litmus_dgl_unlock __LSC(13) |
22 | #define __NR_register_nv_device __LSC(14) | ||
23 | 22 | ||
24 | #define __NR_set_aux_tasks _LSC(15) | 23 | #define __NR_set_aux_tasks _LSC(14) |
25 | 24 | ||
26 | #define NR_litmus_syscalls 16 | 25 | #define NR_litmus_syscalls 15 |
diff --git a/include/litmus/unistd_64.h b/include/litmus/unistd_64.h index 3825bc129dbd..51e730124dde 100644 --- a/include/litmus/unistd_64.h +++ b/include/litmus/unistd_64.h | |||
@@ -33,10 +33,8 @@ __SYSCALL(__NR_null_call, sys_null_call) | |||
33 | __SYSCALL(__NR_litmus_dgl_lock, sys_litmus_dgl_lock) | 33 | __SYSCALL(__NR_litmus_dgl_lock, sys_litmus_dgl_lock) |
34 | #define __NR_litmus_dgl_unlock __LSC(13) | 34 | #define __NR_litmus_dgl_unlock __LSC(13) |
35 | __SYSCALL(__NR_litmus_dgl_unlock, sys_litmus_dgl_unlock) | 35 | __SYSCALL(__NR_litmus_dgl_unlock, sys_litmus_dgl_unlock) |
36 | #define __NR_register_nv_device __LSC(14) | ||
37 | __SYSCALL(__NR_register_nv_device, sys_register_nv_device) | ||
38 | 36 | ||
39 | #define __NR_set_aux_tasks __LSC(15) | 37 | #define __NR_set_aux_tasks __LSC(14) |
40 | __SYSCALL(__NR_set_aux_tasks, sys_set_aux_tasks) | 38 | __SYSCALL(__NR_set_aux_tasks, sys_set_aux_tasks) |
41 | 39 | ||
42 | #define NR_litmus_syscalls 16 | 40 | #define NR_litmus_syscalls 15 |
diff --git a/kernel/sched.c b/kernel/sched.c index 251c89eaf24e..840f87bce097 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4340,10 +4340,6 @@ need_resched: | |||
4340 | rcu_note_context_switch(cpu); | 4340 | rcu_note_context_switch(cpu); |
4341 | prev = rq->curr; | 4341 | prev = rq->curr; |
4342 | 4342 | ||
4343 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
4344 | release_klitirqd_lock(prev); | ||
4345 | #endif | ||
4346 | |||
4347 | /* LITMUS^RT: quickly re-evaluate the scheduling decision | 4343 | /* LITMUS^RT: quickly re-evaluate the scheduling decision |
4348 | * if the previous one is no longer valid after CTX. | 4344 | * if the previous one is no longer valid after CTX. |
4349 | */ | 4345 | */ |
@@ -4444,10 +4440,6 @@ litmus_need_resched_nonpreemptible: | |||
4444 | if (need_resched()) | 4440 | if (need_resched()) |
4445 | goto need_resched; | 4441 | goto need_resched; |
4446 | 4442 | ||
4447 | #ifdef LITMUS_SOFTIRQD | ||
4448 | reacquire_klitirqd_lock(prev); | ||
4449 | #endif | ||
4450 | |||
4451 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | 4443 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD |
4452 | litmus->run_tasklets(prev); | 4444 | litmus->run_tasklets(prev); |
4453 | #endif | 4445 | #endif |
diff --git a/kernel/softirq.c b/kernel/softirq.c index b013046e8c36..053aec196a50 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -403,11 +403,13 @@ void open_softirq(int nr, void (*action)(struct softirq_action *)) | |||
403 | /* | 403 | /* |
404 | * Tasklets | 404 | * Tasklets |
405 | */ | 405 | */ |
406 | /* | ||
406 | struct tasklet_head | 407 | struct tasklet_head |
407 | { | 408 | { |
408 | struct tasklet_struct *head; | 409 | struct tasklet_struct *head; |
409 | struct tasklet_struct **tail; | 410 | struct tasklet_struct **tail; |
410 | }; | 411 | }; |
412 | */ | ||
411 | 413 | ||
412 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); | 414 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); |
413 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); | 415 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); |
@@ -522,6 +524,11 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) | |||
522 | #ifdef CONFIG_LITMUS_NVIDIA | 524 | #ifdef CONFIG_LITMUS_NVIDIA |
523 | if(is_nvidia_func(t->func)) | 525 | if(is_nvidia_func(t->func)) |
524 | { | 526 | { |
527 | #if 1 | ||
528 | // do nvidia tasklets right away and return | ||
529 | if(__do_nv_now(t)) | ||
530 | return; | ||
531 | #else | ||
525 | u32 nvidia_device = get_tasklet_nv_device_num(t); | 532 | u32 nvidia_device = get_tasklet_nv_device_num(t); |
526 | // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n", | 533 | // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n", |
527 | // __FUNCTION__, nvidia_device,litmus_clock()); | 534 | // __FUNCTION__, nvidia_device,litmus_clock()); |
@@ -564,6 +571,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) | |||
564 | } | 571 | } |
565 | } | 572 | } |
566 | unlock_nv_registry(nvidia_device, &flags); | 573 | unlock_nv_registry(nvidia_device, &flags); |
574 | #endif | ||
567 | } | 575 | } |
568 | #endif | 576 | #endif |
569 | 577 | ||
@@ -590,6 +598,11 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t) | |||
590 | #ifdef CONFIG_LITMUS_NVIDIA | 598 | #ifdef CONFIG_LITMUS_NVIDIA |
591 | if(is_nvidia_func(t->func)) | 599 | if(is_nvidia_func(t->func)) |
592 | { | 600 | { |
601 | #if 1 | ||
602 | // do nvidia tasklets right away and return | ||
603 | if(__do_nv_now(t)) | ||
604 | return; | ||
605 | #else | ||
593 | u32 nvidia_device = get_tasklet_nv_device_num(t); | 606 | u32 nvidia_device = get_tasklet_nv_device_num(t); |
594 | // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n", | 607 | // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n", |
595 | // __FUNCTION__, nvidia_device,litmus_clock()); | 608 | // __FUNCTION__, nvidia_device,litmus_clock()); |
@@ -632,6 +645,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t) | |||
632 | } | 645 | } |
633 | } | 646 | } |
634 | unlock_nv_registry(nvidia_device, &flags); | 647 | unlock_nv_registry(nvidia_device, &flags); |
648 | #endif | ||
635 | } | 649 | } |
636 | #endif | 650 | #endif |
637 | 651 | ||
diff --git a/litmus/Kconfig b/litmus/Kconfig index f2434b87239b..9aeae659ae32 100644 --- a/litmus/Kconfig +++ b/litmus/Kconfig | |||
@@ -382,7 +382,7 @@ menu "Interrupt Handling" | |||
382 | choice | 382 | choice |
383 | prompt "Scheduling of interrupt bottom-halves in Litmus." | 383 | prompt "Scheduling of interrupt bottom-halves in Litmus." |
384 | default LITMUS_SOFTIRQD_NONE | 384 | default LITMUS_SOFTIRQD_NONE |
385 | depends on LITMUS_LOCKING && !LITMUS_THREAD_ALL_SOFTIRQ | 385 | depends on LITMUS_LOCKING |
386 | help | 386 | help |
387 | Schedule tasklets with known priorities in Litmus. | 387 | Schedule tasklets with known priorities in Litmus. |
388 | 388 | ||
@@ -398,7 +398,7 @@ config LITMUS_SOFTIRQD | |||
398 | specifically dispatched to these workers. (Softirqs for | 398 | specifically dispatched to these workers. (Softirqs for |
399 | Litmus tasks are not magically redirected to klmirqd.) | 399 | Litmus tasks are not magically redirected to klmirqd.) |
400 | 400 | ||
401 | G-EDF/RM, C-EDF/RM ONLY for now! | 401 | G-EDF, C-EDF ONLY for now! |
402 | 402 | ||
403 | 403 | ||
404 | config LITMUS_PAI_SOFTIRQD | 404 | config LITMUS_PAI_SOFTIRQD |
@@ -409,19 +409,11 @@ config LITMUS_PAI_SOFTIRQD | |||
409 | at the cost of non-preemptive durations of bottom half | 409 | at the cost of non-preemptive durations of bottom half |
410 | processing. | 410 | processing. |
411 | 411 | ||
412 | G-EDF/RM, C-EDF/RM ONLY for now! | 412 | G-EDF, C-EDF ONLY for now! |
413 | 413 | ||
414 | endchoice | 414 | endchoice |
415 | 415 | ||
416 | 416 | ||
417 | config NR_LITMUS_SOFTIRQD | ||
418 | int "Number of klmirqd." | ||
419 | depends on LITMUS_SOFTIRQD | ||
420 | range 1 4096 | ||
421 | default "1" | ||
422 | help | ||
423 | Should be <= to the number of CPUs in your system. | ||
424 | |||
425 | config LITMUS_NVIDIA | 417 | config LITMUS_NVIDIA |
426 | bool "Litmus handling of NVIDIA interrupts." | 418 | bool "Litmus handling of NVIDIA interrupts." |
427 | default n | 419 | default n |
@@ -445,7 +437,7 @@ config LITMUS_AFFINITY_AWARE_GPU_ASSINGMENT | |||
445 | config NV_DEVICE_NUM | 437 | config NV_DEVICE_NUM |
446 | int "Number of NVIDIA GPUs." | 438 | int "Number of NVIDIA GPUs." |
447 | depends on LITMUS_SOFTIRQD || LITMUS_PAI_SOFTIRQD | 439 | depends on LITMUS_SOFTIRQD || LITMUS_PAI_SOFTIRQD |
448 | range 1 4096 | 440 | range 1 16 |
449 | default "1" | 441 | default "1" |
450 | help | 442 | help |
451 | Should be (<= to the number of CPUs) and | 443 | Should be (<= to the number of CPUs) and |
@@ -453,11 +445,11 @@ config NV_DEVICE_NUM | |||
453 | 445 | ||
454 | config NV_MAX_SIMULT_USERS | 446 | config NV_MAX_SIMULT_USERS |
455 | int "Maximum number of threads sharing a GPU simultanously" | 447 | int "Maximum number of threads sharing a GPU simultanously" |
456 | depends on LITMUS_SOFTIRQD || LITMUS_PAI_SOFTIRQD | 448 | depends on LITMUS_NVIDIA |
457 | range 1 3 | 449 | range 1 3 |
458 | default "2" | 450 | default "2" |
459 | help | 451 | help |
460 | Should be equal to the #copy_engines + #execution_engines | 452 | Should be at least equal to the #copy_engines + #execution_engines |
461 | of the GPUs in your system. | 453 | of the GPUs in your system. |
462 | 454 | ||
463 | Scientific/Professional GPUs = 3 (ex. M2070, Quadro 6000?) | 455 | Scientific/Professional GPUs = 3 (ex. M2070, Quadro 6000?) |
diff --git a/litmus/aux_tasks.c b/litmus/aux_tasks.c index 20f477f6e3bc..ef26bba3be77 100644 --- a/litmus/aux_tasks.c +++ b/litmus/aux_tasks.c | |||
@@ -54,7 +54,7 @@ int exit_aux_task(struct task_struct *t) | |||
54 | TRACE_CUR("Aux task %s/%d is exiting from %s/%d.\n", t->comm, t->pid, t->group_leader->comm, t->group_leader->pid); | 54 | TRACE_CUR("Aux task %s/%d is exiting from %s/%d.\n", t->comm, t->pid, t->group_leader->comm, t->group_leader->pid); |
55 | 55 | ||
56 | tsk_rt(t)->is_aux_task = 0; | 56 | tsk_rt(t)->is_aux_task = 0; |
57 | 57 | ||
58 | #ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE | 58 | #ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE |
59 | list_del(&tsk_rt(t)->aux_task_node); | 59 | list_del(&tsk_rt(t)->aux_task_node); |
60 | if (tsk_rt(t)->inh_task) { | 60 | if (tsk_rt(t)->inh_task) { |
@@ -218,36 +218,36 @@ int make_aux_task_if_required(struct task_struct *t) | |||
218 | { | 218 | { |
219 | struct task_struct *leader; | 219 | struct task_struct *leader; |
220 | int retval = 0; | 220 | int retval = 0; |
221 | 221 | ||
222 | read_lock_irq(&tasklist_lock); | 222 | read_lock_irq(&tasklist_lock); |
223 | 223 | ||
224 | leader = t->group_leader; | 224 | leader = t->group_leader; |
225 | 225 | ||
226 | if(!tsk_aux(leader)->initialized || !tsk_aux(leader)->aux_future) { | 226 | if(!tsk_aux(leader)->initialized || !tsk_aux(leader)->aux_future) { |
227 | goto out; | 227 | goto out; |
228 | } | 228 | } |
229 | 229 | ||
230 | TRACE_CUR("Making %s/%d in %s/%d an aux thread.\n", t->comm, t->pid, leader->comm, leader->pid); | 230 | TRACE_CUR("Making %s/%d in %s/%d an aux thread.\n", t->comm, t->pid, leader->comm, leader->pid); |
231 | 231 | ||
232 | INIT_LIST_HEAD(&tsk_rt(t)->aux_task_node); | 232 | INIT_LIST_HEAD(&tsk_rt(t)->aux_task_node); |
233 | INIT_BINHEAP_NODE(&tsk_rt(t)->aux_task_owner_node); | 233 | INIT_BINHEAP_NODE(&tsk_rt(t)->aux_task_owner_node); |
234 | 234 | ||
235 | retval = admit_aux_task(t); | 235 | retval = admit_aux_task(t); |
236 | if (retval == 0) { | 236 | if (retval == 0) { |
237 | tsk_rt(t)->is_aux_task = 1; | 237 | tsk_rt(t)->is_aux_task = 1; |
238 | 238 | ||
239 | #ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE | 239 | #ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE |
240 | list_add_tail(&tsk_rt(t)->aux_task_node, &tsk_aux(leader)->aux_tasks); | 240 | list_add_tail(&tsk_rt(t)->aux_task_node, &tsk_aux(leader)->aux_tasks); |
241 | 241 | ||
242 | if (!binheap_empty(&tsk_aux(leader)->aux_task_owners)) { | 242 | if (!binheap_empty(&tsk_aux(leader)->aux_task_owners)) { |
243 | struct task_struct *hp = | 243 | struct task_struct *hp = |
244 | container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), | 244 | container_of(binheap_top_entry(&tsk_aux(leader)->aux_task_owners, struct rt_param, aux_task_owner_node), |
245 | struct task_struct, rt_param); | 245 | struct task_struct, rt_param); |
246 | 246 | ||
247 | TRACE_CUR("hp in group: %s/%d\n", hp->comm, hp->pid); | 247 | TRACE_CUR("hp in group: %s/%d\n", hp->comm, hp->pid); |
248 | 248 | ||
249 | retval = litmus->__increase_prio(t, (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp); | 249 | retval = litmus->__increase_prio(t, (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp); |
250 | 250 | ||
251 | if (retval != 0) { | 251 | if (retval != 0) { |
252 | /* don't know how to recover from bugs with prio inheritance. better just crash. */ | 252 | /* don't know how to recover from bugs with prio inheritance. better just crash. */ |
253 | read_unlock_irq(&tasklist_lock); | 253 | read_unlock_irq(&tasklist_lock); |
@@ -256,7 +256,7 @@ int make_aux_task_if_required(struct task_struct *t) | |||
256 | } | 256 | } |
257 | #endif | 257 | #endif |
258 | } | 258 | } |
259 | 259 | ||
260 | out: | 260 | out: |
261 | read_unlock_irq(&tasklist_lock); | 261 | read_unlock_irq(&tasklist_lock); |
262 | 262 | ||
@@ -385,7 +385,7 @@ static long __do_enable_aux_tasks(int flags) | |||
385 | if (flags & AUX_FUTURE) { | 385 | if (flags & AUX_FUTURE) { |
386 | tsk_aux(leader)->aux_future = 1; | 386 | tsk_aux(leader)->aux_future = 1; |
387 | } | 387 | } |
388 | 388 | ||
389 | t = leader; | 389 | t = leader; |
390 | do { | 390 | do { |
391 | if (!tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->is_aux_task) { | 391 | if (!tsk_rt(t)->has_aux_tasks && !tsk_rt(t)->is_aux_task) { |
@@ -398,22 +398,22 @@ static long __do_enable_aux_tasks(int flags) | |||
398 | TRACE_CUR("Checking task in %s/%d: %s/%d = (p = %llu):\n", | 398 | TRACE_CUR("Checking task in %s/%d: %s/%d = (p = %llu):\n", |
399 | leader->comm, leader->pid, t->comm, t->pid, | 399 | leader->comm, leader->pid, t->comm, t->pid, |
400 | tsk_rt(t)->task_params.period); | 400 | tsk_rt(t)->task_params.period); |
401 | 401 | ||
402 | /* inspect period to see if it is an rt task */ | 402 | /* inspect period to see if it is an rt task */ |
403 | if (tsk_rt(t)->task_params.period == 0) { | 403 | if (tsk_rt(t)->task_params.period == 0) { |
404 | if (flags && AUX_CURRENT) { | 404 | if (flags && AUX_CURRENT) { |
405 | if (!tsk_rt(t)->is_aux_task) { | 405 | if (!tsk_rt(t)->is_aux_task) { |
406 | int admit_ret; | 406 | int admit_ret; |
407 | 407 | ||
408 | TRACE_CUR("AUX task in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid); | 408 | TRACE_CUR("AUX task in %s/%d: %s/%d:\n", leader->comm, leader->pid, t->comm, t->pid); |
409 | 409 | ||
410 | admit_ret = admit_aux_task(t); | 410 | admit_ret = admit_aux_task(t); |
411 | 411 | ||
412 | if (admit_ret == 0) { | 412 | if (admit_ret == 0) { |
413 | /* hasn't been aux_tasks_increase_priorityted into rt. make it a aux. */ | 413 | /* hasn't been aux_tasks_increase_priorityted into rt. make it a aux. */ |
414 | tsk_rt(t)->is_aux_task = 1; | 414 | tsk_rt(t)->is_aux_task = 1; |
415 | aux_tasks_added = 1; | 415 | aux_tasks_added = 1; |
416 | 416 | ||
417 | #ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE | 417 | #ifdef CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE |
418 | list_add_tail(&tsk_rt(t)->aux_task_node, &tsk_aux(leader)->aux_tasks); | 418 | list_add_tail(&tsk_rt(t)->aux_task_node, &tsk_aux(leader)->aux_tasks); |
419 | #endif | 419 | #endif |
@@ -464,7 +464,7 @@ static long __do_disable_aux_tasks(int flags) | |||
464 | if (flags & AUX_FUTURE) { | 464 | if (flags & AUX_FUTURE) { |
465 | tsk_aux(leader)->aux_future = 0; | 465 | tsk_aux(leader)->aux_future = 0; |
466 | } | 466 | } |
467 | 467 | ||
468 | if (flags & AUX_CURRENT) { | 468 | if (flags & AUX_CURRENT) { |
469 | t = leader; | 469 | t = leader; |
470 | do { | 470 | do { |
diff --git a/litmus/edf_common.c b/litmus/edf_common.c index c279bf12a7f5..27b728a55669 100644 --- a/litmus/edf_common.c +++ b/litmus/edf_common.c | |||
@@ -73,6 +73,22 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
73 | return first && !second; | 73 | return first && !second; |
74 | } | 74 | } |
75 | 75 | ||
76 | /* There is some goofy stuff in this code here. There are three subclasses | ||
77 | * within the SCHED_LITMUS scheduling class: | ||
78 | * 1) Auxiliary tasks: COTS helper threads from the application level that | ||
79 | * are forced to be real-time. | ||
80 | * 2) klmirqd interrupt threads: Litmus threaded interrupt handlers. | ||
81 | * 3) Normal Litmus tasks. | ||
82 | * | ||
83 | * At their base priorities, #3 > #2 > #1. However, #1 and #2 threads might | ||
84 | * inherit a priority from a task of #3. | ||
85 | * | ||
86 | * The code proceeds in the following manner: | ||
87 | * 1) Make aux and klmirqd threads with base-priorities have low priorities. | ||
88 | * 2) Determine effective priorities. | ||
89 | * 3) Perform priority comparison. Favor #3 over #1 and #2 in case of tie. | ||
90 | */ | ||
91 | |||
76 | 92 | ||
77 | #if defined(CONFIG_REALTIME_AUX_TASK_PRIORITY_BOOSTED) | 93 | #if defined(CONFIG_REALTIME_AUX_TASK_PRIORITY_BOOSTED) |
78 | /* run aux tasks at max priority */ | 94 | /* run aux tasks at max priority */ |
@@ -109,7 +125,7 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
109 | return temp; | 125 | return temp; |
110 | } | 126 | } |
111 | } | 127 | } |
112 | 128 | ||
113 | if (first->rt_param.is_aux_task && second->rt_param.is_aux_task && | 129 | if (first->rt_param.is_aux_task && second->rt_param.is_aux_task && |
114 | first->rt_param.inh_task == second->rt_param.inh_task) { // inh_task is !NULL for both tasks since neither was a lo_aux task | 130 | first->rt_param.inh_task == second->rt_param.inh_task) { // inh_task is !NULL for both tasks since neither was a lo_aux task |
115 | // Both aux tasks inherit from the same task, so tie-break | 131 | // Both aux tasks inherit from the same task, so tie-break |
@@ -120,6 +136,36 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
120 | } | 136 | } |
121 | #endif | 137 | #endif |
122 | 138 | ||
139 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
140 | { | ||
141 | int first_lo_klmirqd = first->rt_param.is_interrupt_thread && !first->rt_param.inh_task; | ||
142 | int second_lo_klmirqd = second->rt_param.is_interrupt_thread && !second->rt_param.inh_task; | ||
143 | |||
144 | /* prioritize aux tasks without inheritance below real-time tasks */ | ||
145 | if (first_lo_klmirqd || second_lo_klmirqd) { | ||
146 | // one of these is an klmirqd thread without inheritance. | ||
147 | if(first_lo_klmirqd && second_lo_klmirqd) { | ||
148 | TRACE_CUR("klmirqd tie break!\n"); // tie-break by BASE priority of the aux tasks | ||
149 | goto klmirqd_tie_break; | ||
150 | } | ||
151 | else { | ||
152 | // make the klmirqd thread (second) lowest priority real-time task | ||
153 | int temp = (first_lo_klmirqd) ? !is_realtime(second) : !is_realtime(first); | ||
154 | TRACE_CUR("%s/%d >> %s/%d --- %d\n", first->comm, first->pid, second->comm, second->pid, temp); | ||
155 | return temp; | ||
156 | } | ||
157 | } | ||
158 | |||
159 | if (first->rt_param.is_interrupt_thread && second->rt_param.is_interrupt_thread && | ||
160 | first->rt_param.inh_task == second->rt_param.inh_task) { // inh_task is !NULL for both tasks since neither was a lo_klmirqd task | ||
161 | // Both klmirqd tasks inherit from the same task, so tie-break | ||
162 | // by base priority of the klmirqd tasks. | ||
163 | TRACE_CUR("klmirqd tie break!\n"); | ||
164 | goto klmirqd_tie_break; | ||
165 | } | ||
166 | } | ||
167 | #endif | ||
168 | |||
123 | 169 | ||
124 | #ifdef CONFIG_LITMUS_LOCKING | 170 | #ifdef CONFIG_LITMUS_LOCKING |
125 | /* Check for EFFECTIVE priorities. Change task | 171 | /* Check for EFFECTIVE priorities. Change task |
@@ -161,7 +207,8 @@ int edf_higher_prio(struct task_struct* first, struct task_struct* second) | |||
161 | #endif | 207 | #endif |
162 | 208 | ||
163 | aux_tie_break: | 209 | aux_tie_break: |
164 | 210 | klmirqd_tie_break: | |
211 | |||
165 | if (!is_realtime(second_task)) { | 212 | if (!is_realtime(second_task)) { |
166 | return 1; | 213 | return 1; |
167 | } | 214 | } |
@@ -230,15 +277,13 @@ aux_tie_break: | |||
230 | } | 277 | } |
231 | else if (first_task->pid == second_task->pid) { | 278 | else if (first_task->pid == second_task->pid) { |
232 | #ifdef CONFIG_LITMUS_SOFTIRQD | 279 | #ifdef CONFIG_LITMUS_SOFTIRQD |
233 | if (first_task->rt_param.is_proxy_thread < | 280 | if (first_task->rt_param.is_interrupt_thread < second_task->rt_param.is_interrupt_thread) { |
234 | second_task->rt_param.is_proxy_thread) { | ||
235 | return 1; | 281 | return 1; |
236 | } | 282 | } |
237 | else if (first_task->rt_param.is_proxy_thread == second_task->rt_param.is_proxy_thread) { | 283 | else if (first_task->rt_param.is_interrupt_thread == second_task->rt_param.is_interrupt_thread) { |
238 | #endif | 284 | #endif |
239 | 285 | ||
240 | #if defined(CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE) | 286 | #if defined(CONFIG_REALTIME_AUX_TASK_PRIORITY_INHERITANCE) |
241 | /* is this dead code? */ | ||
242 | if (tsk_rt(first)->is_aux_task < tsk_rt(second)->is_aux_task) { | 287 | if (tsk_rt(first)->is_aux_task < tsk_rt(second)->is_aux_task) { |
243 | return 1; | 288 | return 1; |
244 | } | 289 | } |
@@ -246,8 +291,7 @@ aux_tie_break: | |||
246 | #endif | 291 | #endif |
247 | 292 | ||
248 | /* Something could be wrong if you get this far. */ | 293 | /* Something could be wrong if you get this far. */ |
249 | if (unlikely(first->rt_param.inh_task == | 294 | if (unlikely(first->rt_param.inh_task == second->rt_param.inh_task)) { |
250 | second->rt_param.inh_task)) { | ||
251 | /* Both tasks have the same inherited priority. | 295 | /* Both tasks have the same inherited priority. |
252 | * Likely in a bug-condition. | 296 | * Likely in a bug-condition. |
253 | */ | 297 | */ |
diff --git a/litmus/ikglp_lock.c b/litmus/ikglp_lock.c index b29828344dd1..a4ae74331782 100644 --- a/litmus/ikglp_lock.c +++ b/litmus/ikglp_lock.c | |||
@@ -1960,11 +1960,11 @@ static struct affinity_observer* ikglp_aff_obs_new(struct affinity_observer_ops* | |||
1960 | return(NULL); | 1960 | return(NULL); |
1961 | } | 1961 | } |
1962 | 1962 | ||
1963 | if(aff_args.nr_simult_users > NV_MAX_SIMULT_USERS) { | 1963 | // if(aff_args.nr_simult_users > NV_MAX_SIMULT_USERS) { |
1964 | TRACE_CUR("System does not support #simult_users > %d. %d requested.\n", | 1964 | // TRACE_CUR("System does not support #simult_users > %d. %d requested.\n", |
1965 | NV_MAX_SIMULT_USERS, aff_args.nr_simult_users); | 1965 | // NV_MAX_SIMULT_USERS, aff_args.nr_simult_users); |
1966 | // return(NULL); | 1966 | //// return(NULL); |
1967 | } | 1967 | // } |
1968 | 1968 | ||
1969 | ikglp_aff = kmalloc(sizeof(*ikglp_aff), GFP_KERNEL); | 1969 | ikglp_aff = kmalloc(sizeof(*ikglp_aff), GFP_KERNEL); |
1970 | if(!ikglp_aff) { | 1970 | if(!ikglp_aff) { |
@@ -2124,7 +2124,7 @@ struct fifo_queue* gpu_ikglp_advise_enqueue(struct ikglp_affinity* aff, struct t | |||
2124 | if(aff->q_info[i].q->count < max_fifo_len) { | 2124 | if(aff->q_info[i].q->count < max_fifo_len) { |
2125 | int want = 0; | 2125 | int want = 0; |
2126 | 2126 | ||
2127 | lt_t migration = | 2127 | lt_t migration = |
2128 | get_gpu_estimate(t, | 2128 | get_gpu_estimate(t, |
2129 | gpu_migration_distance(tsk_rt(t)->last_gpu, | 2129 | gpu_migration_distance(tsk_rt(t)->last_gpu, |
2130 | replica_to_gpu(aff, i))); | 2130 | replica_to_gpu(aff, i))); |
diff --git a/litmus/jobs.c b/litmus/jobs.c index 9fe4eb1fa168..8593a8d2f107 100644 --- a/litmus/jobs.c +++ b/litmus/jobs.c | |||
@@ -30,7 +30,7 @@ void prepare_for_next_period(struct task_struct *t) | |||
30 | * release and deadline. Lateness may be negative. | 30 | * release and deadline. Lateness may be negative. |
31 | */ | 31 | */ |
32 | t->rt_param.job_params.lateness = | 32 | t->rt_param.job_params.lateness = |
33 | (long long)litmus_clock() - | 33 | (long long)litmus_clock() - |
34 | (long long)t->rt_param.job_params.deadline; | 34 | (long long)t->rt_param.job_params.deadline; |
35 | 35 | ||
36 | setup_release(t, get_release(t) + get_rt_period(t)); | 36 | setup_release(t, get_release(t) + get_rt_period(t)); |
diff --git a/litmus/kfmlp_lock.c b/litmus/kfmlp_lock.c index ab472330095d..785a095275e6 100644 --- a/litmus/kfmlp_lock.c +++ b/litmus/kfmlp_lock.c | |||
@@ -587,11 +587,11 @@ static struct affinity_observer* kfmlp_aff_obs_new(struct affinity_observer_ops* | |||
587 | return(NULL); | 587 | return(NULL); |
588 | } | 588 | } |
589 | 589 | ||
590 | if(aff_args.nr_simult_users > NV_MAX_SIMULT_USERS) { | 590 | // if(aff_args.nr_simult_users > NV_MAX_SIMULT_USERS) { |
591 | TRACE_CUR("System does not support #simult_users > %d. %d requested.\n", | 591 | // TRACE_CUR("System does not support #simult_users > %d. %d requested.\n", |
592 | NV_MAX_SIMULT_USERS, aff_args.nr_simult_users); | 592 | // NV_MAX_SIMULT_USERS, aff_args.nr_simult_users); |
593 | // return(NULL); | 593 | //// return(NULL); |
594 | } | 594 | // } |
595 | 595 | ||
596 | kfmlp_aff = kmalloc(sizeof(*kfmlp_aff), GFP_KERNEL); | 596 | kfmlp_aff = kmalloc(sizeof(*kfmlp_aff), GFP_KERNEL); |
597 | if(!kfmlp_aff) { | 597 | if(!kfmlp_aff) { |
@@ -829,6 +829,7 @@ void gpu_kfmlp_notify_acquired(struct kfmlp_affinity* aff, struct kfmlp_queue* f | |||
829 | 829 | ||
830 | reg_nv_device(gpu, 1, t); // register | 830 | reg_nv_device(gpu, 1, t); // register |
831 | 831 | ||
832 | |||
832 | tsk_rt(t)->suspend_gpu_tracker_on_block = 0; | 833 | tsk_rt(t)->suspend_gpu_tracker_on_block = 0; |
833 | reset_gpu_tracker(t); | 834 | reset_gpu_tracker(t); |
834 | start_gpu_tracker(t); | 835 | start_gpu_tracker(t); |
diff --git a/litmus/litmus.c b/litmus/litmus.c index 3b8017397e80..fa244ba53e22 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -60,28 +60,6 @@ void bheap_node_free(struct bheap_node* hn) | |||
60 | struct release_heap* release_heap_alloc(int gfp_flags); | 60 | struct release_heap* release_heap_alloc(int gfp_flags); |
61 | void release_heap_free(struct release_heap* rh); | 61 | void release_heap_free(struct release_heap* rh); |
62 | 62 | ||
63 | #ifdef CONFIG_LITMUS_NVIDIA | ||
64 | /* | ||
65 | * sys_register_nv_device | ||
66 | * @nv_device_id: The Nvidia device id that the task want to register | ||
67 | * @reg_action: set to '1' to register the specified device. zero otherwise. | ||
68 | * Syscall for register task's designated nvidia device into NV_DEVICE_REG array | ||
69 | * Returns EFAULT if nv_device_id is out of range. | ||
70 | * 0 if success | ||
71 | */ | ||
72 | asmlinkage long sys_register_nv_device(int nv_device_id, int reg_action) | ||
73 | { | ||
74 | /* register the device to caller (aka 'current') */ | ||
75 | return(reg_nv_device(nv_device_id, reg_action, current)); | ||
76 | } | ||
77 | #else | ||
78 | asmlinkage long sys_register_nv_device(int nv_device_id, int reg_action) | ||
79 | { | ||
80 | return(-EINVAL); | ||
81 | } | ||
82 | #endif | ||
83 | |||
84 | |||
85 | /* | 63 | /* |
86 | * sys_set_task_rt_param | 64 | * sys_set_task_rt_param |
87 | * @pid: Pid of the task which scheduling parameters must be changed | 65 | * @pid: Pid of the task which scheduling parameters must be changed |
@@ -393,22 +371,11 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
393 | // WARN_ON(!binheap_empty(&p->rt_param.hp_blocked_tasks)); | 371 | // WARN_ON(!binheap_empty(&p->rt_param.hp_blocked_tasks)); |
394 | #endif | 372 | #endif |
395 | 373 | ||
396 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
397 | /* We probably should not have any tasklets executing for | ||
398 | * us at this time. | ||
399 | */ | ||
400 | WARN_ON(p->rt_param.cur_klmirqd); | ||
401 | WARN_ON(atomic_read(&p->rt_param.klmirqd_sem_stat) == HELD); | ||
402 | |||
403 | if(p->rt_param.cur_klmirqd) | ||
404 | flush_pending(p->rt_param.cur_klmirqd, p); | ||
405 | |||
406 | if(atomic_read(&p->rt_param.klmirqd_sem_stat) == HELD) | ||
407 | up_and_set_stat(p, NOT_HELD, &p->rt_param.klmirqd_sem); | ||
408 | #endif | ||
409 | 374 | ||
410 | #ifdef CONFIG_LITMUS_NVIDIA | 375 | #ifdef CONFIG_LITMUS_NVIDIA |
411 | WARN_ON(p->rt_param.held_gpus != 0); | 376 | WARN_ON(p->rt_param.held_gpus != 0); |
377 | |||
378 | INIT_BINHEAP_NODE(&p->rt_param.gpu_owner_node); | ||
412 | #endif | 379 | #endif |
413 | 380 | ||
414 | /* Cleanup everything else. */ | 381 | /* Cleanup everything else. */ |
@@ -477,11 +444,9 @@ long __litmus_admit_task(struct task_struct* tsk) | |||
477 | //INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, prio_order); // done by scheduler | 444 | //INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, prio_order); // done by scheduler |
478 | #endif | 445 | #endif |
479 | #ifdef CONFIG_LITMUS_SOFTIRQD | 446 | #ifdef CONFIG_LITMUS_SOFTIRQD |
480 | /* proxy thread off by default */ | 447 | /* not an interrupt thread by default */ |
481 | tsk_rt(tsk)is_proxy_thread = 0; | 448 | tsk_rt(tsk)->is_interrupt_thread = 0; |
482 | tsk_rt(tsk)cur_klmirqd = NULL; | 449 | tsk_rt(tsk)->klmirqd_info = NULL; |
483 | mutex_init(&tsk_rt(tsk)->klmirqd_sem); | ||
484 | atomic_set(&tsk_rt(tsk)->klmirqd_sem_stat, NOT_HELD); | ||
485 | #endif | 450 | #endif |
486 | 451 | ||
487 | retval = litmus->admit_task(tsk); | 452 | retval = litmus->admit_task(tsk); |
@@ -580,8 +545,7 @@ int switch_sched_plugin(struct sched_plugin* plugin) | |||
580 | cpu_relax(); | 545 | cpu_relax(); |
581 | 546 | ||
582 | #ifdef CONFIG_LITMUS_SOFTIRQD | 547 | #ifdef CONFIG_LITMUS_SOFTIRQD |
583 | if(!klmirqd_is_dead()) | 548 | if (!klmirqd_is_dead()) { |
584 | { | ||
585 | kill_klmirqd(); | 549 | kill_klmirqd(); |
586 | } | 550 | } |
587 | #endif | 551 | #endif |
diff --git a/litmus/litmus_softirq.c b/litmus/litmus_softirq.c index 73a3053e662b..44e2d38ad982 100644 --- a/litmus/litmus_softirq.c +++ b/litmus/litmus_softirq.c | |||
@@ -18,10 +18,6 @@ | |||
18 | 18 | ||
19 | /* TODO: Remove unneeded mb() and other barriers. */ | 19 | /* TODO: Remove unneeded mb() and other barriers. */ |
20 | 20 | ||
21 | |||
22 | /* counts number of daemons ready to handle litmus irqs. */ | ||
23 | static atomic_t num_ready_klmirqds = ATOMIC_INIT(0); | ||
24 | |||
25 | enum pending_flags | 21 | enum pending_flags |
26 | { | 22 | { |
27 | LIT_TASKLET_LOW = 0x1, | 23 | LIT_TASKLET_LOW = 0x1, |
@@ -29,35 +25,313 @@ enum pending_flags | |||
29 | LIT_WORK = LIT_TASKLET_HI<<1 | 25 | LIT_WORK = LIT_TASKLET_HI<<1 |
30 | }; | 26 | }; |
31 | 27 | ||
32 | /* only support tasklet processing for now. */ | 28 | struct klmirqd_registration |
33 | struct tasklet_head | ||
34 | { | 29 | { |
35 | struct tasklet_struct *head; | 30 | raw_spinlock_t lock; |
36 | struct tasklet_struct **tail; | 31 | u32 nr_threads; |
32 | unsigned int initialized:1; | ||
33 | unsigned int shuttingdown:1; | ||
34 | struct list_head threads; | ||
37 | }; | 35 | }; |
38 | 36 | ||
39 | struct klmirqd_info | 37 | static atomic_t klmirqd_id_gen = ATOMIC_INIT(0); |
38 | |||
39 | static struct klmirqd_registration klmirqd_state; | ||
40 | |||
41 | |||
42 | |||
43 | void init_klmirqd(void) | ||
44 | { | ||
45 | raw_spin_lock_init(&klmirqd_state.lock); | ||
46 | |||
47 | klmirqd_state.nr_threads = 0; | ||
48 | klmirqd_state.initialized = 1; | ||
49 | klmirqd_state.shuttingdown = 0; | ||
50 | INIT_LIST_HEAD(&klmirqd_state.threads); | ||
51 | } | ||
52 | |||
53 | static int __klmirqd_is_ready(void) | ||
54 | { | ||
55 | return (klmirqd_state.initialized == 1 && klmirqd_state.shuttingdown == 0); | ||
56 | } | ||
57 | |||
58 | int klmirqd_is_ready(void) | ||
59 | { | ||
60 | unsigned long flags; | ||
61 | int ret; | ||
62 | |||
63 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | ||
64 | ret = __klmirqd_is_ready(); | ||
65 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
66 | |||
67 | return ret; | ||
68 | } | ||
69 | |||
70 | int klmirqd_is_dead(void) | ||
71 | { | ||
72 | return(!klmirqd_is_ready()); | ||
73 | } | ||
74 | |||
75 | |||
76 | void kill_klmirqd(void) | ||
77 | { | ||
78 | if(!klmirqd_is_dead()) | ||
79 | { | ||
80 | unsigned long flags; | ||
81 | struct list_head *pos; | ||
82 | |||
83 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | ||
84 | |||
85 | TRACE("%s: Killing all klmirqd threads! (%d of them)\n", __FUNCTION__, klmirqd_state.nr_threads); | ||
86 | |||
87 | klmirqd_state.shuttingdown = 1; | ||
88 | |||
89 | list_for_each(pos, &klmirqd_state.threads) { | ||
90 | struct klmirqd_info* info = list_entry(pos, struct klmirqd_info, klmirqd_reg); | ||
91 | |||
92 | if(info->terminating != 1) | ||
93 | { | ||
94 | info->terminating = 1; | ||
95 | mb(); /* just to be sure? */ | ||
96 | flush_pending(info->klmirqd); | ||
97 | |||
98 | /* signal termination */ | ||
99 | kthread_stop(info->klmirqd); | ||
100 | } | ||
101 | } | ||
102 | |||
103 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
104 | } | ||
105 | } | ||
106 | |||
107 | |||
108 | |||
109 | void kill_klmirqd_thread(struct task_struct* klmirqd_thread) | ||
40 | { | 110 | { |
41 | struct task_struct* klmirqd; | 111 | unsigned long flags; |
42 | struct task_struct* current_owner; | 112 | struct klmirqd_info* info; |
43 | int terminating; | ||
44 | 113 | ||
114 | if (!tsk_rt(klmirqd_thread)->is_interrupt_thread) { | ||
115 | TRACE("%s/%d is not a klmirqd thread\n", klmirqd_thread->comm, klmirqd_thread->pid); | ||
116 | return; | ||
117 | } | ||
118 | |||
119 | TRACE("%s: Killing klmirqd thread %s/%d\n", __FUNCTION__, klmirqd_thread->comm, klmirqd_thread->pid); | ||
45 | 120 | ||
46 | raw_spinlock_t lock; | 121 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); |
47 | 122 | ||
48 | u32 pending; | 123 | info = tsk_rt(klmirqd_thread)->klmirqd_info; |
49 | atomic_t num_hi_pending; | 124 | |
50 | atomic_t num_low_pending; | 125 | if(info->terminating != 1) { |
51 | atomic_t num_work_pending; | 126 | info->terminating = 1; |
127 | mb(); | ||
128 | |||
129 | flush_pending(klmirqd_thread); | ||
130 | kthread_stop(klmirqd_thread); | ||
131 | } | ||
132 | |||
133 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
134 | } | ||
52 | 135 | ||
53 | /* in order of priority */ | 136 | |
54 | struct tasklet_head pending_tasklets_hi; | 137 | |
55 | struct tasklet_head pending_tasklets; | 138 | struct klmirqd_launch_data |
56 | struct list_head worklist; | 139 | { |
140 | int cpu_affinity; | ||
141 | klmirqd_callback_t* cb; | ||
142 | struct work_struct work; | ||
57 | }; | 143 | }; |
58 | 144 | ||
59 | /* one list for each klmirqd */ | 145 | static int run_klmirqd(void* callback); |
60 | static struct klmirqd_info klmirqds[NR_LITMUS_SOFTIRQD]; | 146 | |
147 | |||
148 | /* executed by a kworker from workqueues */ | ||
149 | static void __launch_klmirqd_thread(struct work_struct *work) | ||
150 | { | ||
151 | int id; | ||
152 | struct task_struct* thread = NULL; | ||
153 | struct klmirqd_launch_data* launch_data = | ||
154 | container_of(work, struct klmirqd_launch_data, work); | ||
155 | |||
156 | TRACE("%s: Creating klmirqd thread\n", __FUNCTION__); | ||
157 | |||
158 | id = atomic_inc_return(&klmirqd_id_gen); | ||
159 | |||
160 | if (launch_data->cpu_affinity != -1) { | ||
161 | thread = kthread_create( | ||
162 | run_klmirqd, | ||
163 | /* treat the affinity as a pointer, we'll cast it back later */ | ||
164 | (void*)launch_data->cb, | ||
165 | "klmirqd_th%d/%d", | ||
166 | id, | ||
167 | launch_data->cpu_affinity); | ||
168 | |||
169 | /* litmus will put is in the right cluster. */ | ||
170 | kthread_bind(thread, launch_data->cpu_affinity); | ||
171 | |||
172 | TRACE("%s: Launching klmirqd_th%d/%d\n", __FUNCTION__, id, launch_data->cpu_affinity); | ||
173 | } | ||
174 | else { | ||
175 | thread = kthread_create( | ||
176 | run_klmirqd, | ||
177 | /* treat the affinity as a pointer, we'll cast it back later */ | ||
178 | (void*)launch_data->cb, | ||
179 | "klmirqd_th%d", | ||
180 | id); | ||
181 | |||
182 | TRACE("%s: Launching klmirqd_th%d\n", __FUNCTION__, id); | ||
183 | } | ||
184 | |||
185 | if (thread) { | ||
186 | wake_up_process(thread); | ||
187 | } | ||
188 | else { | ||
189 | TRACE("Could not create klmirqd/%d thread!\n", id); | ||
190 | } | ||
191 | |||
192 | kfree(launch_data); | ||
193 | } | ||
194 | |||
195 | |||
196 | int launch_klmirqd_thread(int cpu, klmirqd_callback_t* cb) | ||
197 | { | ||
198 | struct klmirqd_launch_data* delayed_launch; | ||
199 | |||
200 | if (!klmirqd_is_ready()) { | ||
201 | TRACE("klmirqd is not ready. Check that it was initialized!\n"); | ||
202 | return -1; | ||
203 | } | ||
204 | |||
205 | /* tell a work queue to launch the threads. we can't make scheduling | ||
206 | calls since we're in an atomic state. */ | ||
207 | delayed_launch = kmalloc(sizeof(struct klmirqd_launch_data), GFP_ATOMIC); | ||
208 | delayed_launch->cpu_affinity = cpu; | ||
209 | delayed_launch->cb = cb; | ||
210 | INIT_WORK(&delayed_launch->work, __launch_klmirqd_thread); | ||
211 | schedule_work(&delayed_launch->work); | ||
212 | |||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | |||
217 | |||
218 | |||
219 | #define KLMIRQD_SLICE_NR_JIFFIES 1 | ||
220 | #define KLMIRQD_SLICE_NS ((NSEC_PER_SEC / HZ) * KLMIRQD_SLICE_NR_JIFFIES) | ||
221 | |||
222 | static int set_litmus_daemon_sched(struct task_struct* tsk) | ||
223 | { | ||
224 | int ret = 0; | ||
225 | |||
226 | struct rt_task tp = { | ||
227 | .period = KLMIRQD_SLICE_NS, /* dummy 1 second period */ | ||
228 | .relative_deadline = KLMIRQD_SLICE_NS, | ||
229 | .exec_cost = KLMIRQD_SLICE_NS, | ||
230 | .phase = 0, | ||
231 | .cpu = task_cpu(current), | ||
232 | .budget_policy = NO_ENFORCEMENT, | ||
233 | .budget_signal_policy = NO_SIGNALS, | ||
234 | .cls = RT_CLASS_BEST_EFFORT | ||
235 | }; | ||
236 | |||
237 | struct sched_param param = { .sched_priority = 0}; | ||
238 | |||
239 | TRACE_CUR("Setting %s/%d as daemon thread.\n", tsk->comm, tsk->pid); | ||
240 | |||
241 | /* set task params */ | ||
242 | tsk_rt(tsk)->task_params = tp; | ||
243 | tsk_rt(tsk)->is_interrupt_thread = 1; | ||
244 | |||
245 | /* inform the OS we're SCHED_LITMUS -- | ||
246 | sched_setscheduler_nocheck() calls litmus_admit_task(). */ | ||
247 | sched_setscheduler_nocheck(tsk, SCHED_LITMUS, ¶m); | ||
248 | |||
249 | return ret; | ||
250 | } | ||
251 | |||
252 | static int register_klmirqd(struct task_struct* tsk) | ||
253 | { | ||
254 | int retval = 0; | ||
255 | unsigned long flags; | ||
256 | struct klmirqd_info *info = NULL; | ||
257 | |||
258 | if (!tsk_rt(tsk)->is_interrupt_thread) { | ||
259 | TRACE("Only proxy threads already running in Litmus may become klmirqd threads!\n"); | ||
260 | WARN_ON(1); | ||
261 | retval = -1; | ||
262 | goto out; | ||
263 | } | ||
264 | |||
265 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | ||
266 | |||
267 | if (!__klmirqd_is_ready()) { | ||
268 | TRACE("klmirqd is not ready! Did you forget to initialize it?\n"); | ||
269 | WARN_ON(1); | ||
270 | retval = -1; | ||
271 | goto out_unlock; | ||
272 | } | ||
273 | |||
274 | /* allocate and initialize klmirqd data for the thread */ | ||
275 | info = kmalloc(sizeof(struct klmirqd_info), GFP_KERNEL); | ||
276 | if (!info) { | ||
277 | TRACE("Failed to allocate klmirqd_info struct!\n"); | ||
278 | retval = -1; /* todo: pick better code */ | ||
279 | goto out_unlock; | ||
280 | } | ||
281 | memset(info, 0, sizeof(struct klmirqd_info)); | ||
282 | info->klmirqd = tsk; | ||
283 | info->pending_tasklets_hi.tail = &info->pending_tasklets_hi.head; | ||
284 | info->pending_tasklets.tail = &info->pending_tasklets.head; | ||
285 | INIT_LIST_HEAD(&info->worklist); | ||
286 | INIT_LIST_HEAD(&info->klmirqd_reg); | ||
287 | raw_spin_lock_init(&info->lock); | ||
288 | |||
289 | |||
290 | /* now register with klmirqd */ | ||
291 | list_add_tail(&info->klmirqd_reg, &klmirqd_state.threads); | ||
292 | ++klmirqd_state.nr_threads; | ||
293 | |||
294 | /* update the task struct to point to klmirqd info */ | ||
295 | tsk_rt(tsk)->klmirqd_info = info; | ||
296 | |||
297 | out_unlock: | ||
298 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
299 | |||
300 | out: | ||
301 | return retval; | ||
302 | } | ||
303 | |||
304 | static int unregister_klmirqd(struct task_struct* tsk) | ||
305 | { | ||
306 | int retval = 0; | ||
307 | unsigned long flags; | ||
308 | struct klmirqd_info *info = tsk_rt(tsk)->klmirqd_info; | ||
309 | |||
310 | if (!tsk_rt(tsk)->is_interrupt_thread || !info) { | ||
311 | TRACE("%s/%d is not a klmirqd thread!\n", tsk->comm, tsk->pid); | ||
312 | WARN_ON(1); | ||
313 | retval = -1; | ||
314 | goto out; | ||
315 | } | ||
316 | |||
317 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | ||
318 | |||
319 | /* remove the entry in the klmirqd thread list */ | ||
320 | list_del(&info->klmirqd_reg); | ||
321 | --klmirqd_state.nr_threads; | ||
322 | |||
323 | /* remove link to klmirqd info from thread */ | ||
324 | tsk_rt(tsk)->klmirqd_info = NULL; | ||
325 | |||
326 | /* clean up memory */ | ||
327 | kfree(info); | ||
328 | |||
329 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
330 | |||
331 | out: | ||
332 | return retval; | ||
333 | } | ||
334 | |||
61 | 335 | ||
62 | 336 | ||
63 | 337 | ||
@@ -67,35 +341,50 @@ int proc_read_klmirqd_stats(char *page, char **start, | |||
67 | off_t off, int count, | 341 | off_t off, int count, |
68 | int *eof, void *data) | 342 | int *eof, void *data) |
69 | { | 343 | { |
70 | int len = snprintf(page, PAGE_SIZE, | 344 | unsigned long flags; |
71 | "num ready klmirqds: %d\n\n", | 345 | int len; |
72 | atomic_read(&num_ready_klmirqds)); | 346 | |
73 | 347 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | |
74 | if(klmirqd_is_ready()) | 348 | |
75 | { | 349 | if (klmirqd_state.initialized) { |
76 | int i; | 350 | if (!klmirqd_state.shuttingdown) { |
77 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | 351 | struct list_head *pos; |
78 | { | 352 | |
79 | len += | 353 | len = snprintf(page, PAGE_SIZE, |
80 | snprintf(page + len - 1, PAGE_SIZE, /* -1 to strip off \0 */ | 354 | "num ready klmirqds: %d\n\n", |
81 | "klmirqd_th%d: %s/%d\n" | 355 | klmirqd_state.nr_threads); |
82 | "\tcurrent_owner: %s/%d\n" | 356 | |
83 | "\tpending: %x\n" | 357 | list_for_each(pos, &klmirqd_state.threads) { |
84 | "\tnum hi: %d\n" | 358 | struct klmirqd_info* info = list_entry(pos, struct klmirqd_info, klmirqd_reg); |
85 | "\tnum low: %d\n" | 359 | |
86 | "\tnum work: %d\n\n", | 360 | len += |
87 | i, | 361 | snprintf(page + len - 1, PAGE_SIZE, /* -1 to strip off \0 */ |
88 | klmirqds[i].klmirqd->comm, klmirqds[i].klmirqd->pid, | 362 | "klmirqd_thread: %s/%d\n" |
89 | (klmirqds[i].current_owner != NULL) ? | 363 | "\tcurrent_owner: %s/%d\n" |
90 | klmirqds[i].current_owner->comm : "(null)", | 364 | "\tpending: %x\n" |
91 | (klmirqds[i].current_owner != NULL) ? | 365 | "\tnum hi: %d\n" |
92 | klmirqds[i].current_owner->pid : 0, | 366 | "\tnum low: %d\n" |
93 | klmirqds[i].pending, | 367 | "\tnum work: %d\n\n", |
94 | atomic_read(&klmirqds[i].num_hi_pending), | 368 | info->klmirqd->comm, info->klmirqd->pid, |
95 | atomic_read(&klmirqds[i].num_low_pending), | 369 | (info->current_owner != NULL) ? |
96 | atomic_read(&klmirqds[i].num_work_pending)); | 370 | info->current_owner->comm : "(null)", |
371 | (info->current_owner != NULL) ? | ||
372 | info->current_owner->pid : 0, | ||
373 | info->pending, | ||
374 | atomic_read(&info->num_hi_pending), | ||
375 | atomic_read(&info->num_low_pending), | ||
376 | atomic_read(&info->num_work_pending)); | ||
377 | } | ||
378 | } | ||
379 | else { | ||
380 | len = snprintf(page, PAGE_SIZE, "klmirqd is shutting down\n"); | ||
97 | } | 381 | } |
98 | } | 382 | } |
383 | else { | ||
384 | len = snprintf(page, PAGE_SIZE, "klmirqd is not initialized!\n"); | ||
385 | } | ||
386 | |||
387 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
99 | 388 | ||
100 | return(len); | 389 | return(len); |
101 | } | 390 | } |
@@ -162,6 +451,15 @@ static void dump_state(struct klmirqd_info* which, const char* caller) | |||
162 | #endif | 451 | #endif |
163 | 452 | ||
164 | 453 | ||
454 | |||
455 | |||
456 | |||
457 | |||
458 | |||
459 | |||
460 | |||
461 | |||
462 | |||
165 | /* forward declarations */ | 463 | /* forward declarations */ |
166 | static void ___litmus_tasklet_schedule(struct tasklet_struct *t, | 464 | static void ___litmus_tasklet_schedule(struct tasklet_struct *t, |
167 | struct klmirqd_info *which, | 465 | struct klmirqd_info *which, |
@@ -174,24 +472,6 @@ static void ___litmus_schedule_work(struct work_struct *w, | |||
174 | int wakeup); | 472 | int wakeup); |
175 | 473 | ||
176 | 474 | ||
177 | |||
178 | inline unsigned int klmirqd_id(struct task_struct* tsk) | ||
179 | { | ||
180 | int i; | ||
181 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
182 | { | ||
183 | if(klmirqds[i].klmirqd == tsk) | ||
184 | { | ||
185 | return i; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | BUG(); | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | |||
195 | inline static u32 litirq_pending_hi_irqoff(struct klmirqd_info* which) | 475 | inline static u32 litirq_pending_hi_irqoff(struct klmirqd_info* which) |
196 | { | 476 | { |
197 | return (which->pending & LIT_TASKLET_HI); | 477 | return (which->pending & LIT_TASKLET_HI); |
@@ -225,200 +505,11 @@ inline static u32 litirq_pending(struct klmirqd_info* which) | |||
225 | return pending; | 505 | return pending; |
226 | }; | 506 | }; |
227 | 507 | ||
228 | inline static u32 litirq_pending_with_owner(struct klmirqd_info* which, struct task_struct* owner) | ||
229 | { | ||
230 | unsigned long flags; | ||
231 | u32 pending; | ||
232 | |||
233 | raw_spin_lock_irqsave(&which->lock, flags); | ||
234 | pending = litirq_pending_irqoff(which); | ||
235 | if(pending) | ||
236 | { | ||
237 | if(which->current_owner != owner) | ||
238 | { | ||
239 | pending = 0; // owner switch! | ||
240 | } | ||
241 | } | ||
242 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
243 | |||
244 | return pending; | ||
245 | } | ||
246 | |||
247 | |||
248 | inline static u32 litirq_pending_and_sem_and_owner(struct klmirqd_info* which, | ||
249 | struct mutex** sem, | ||
250 | struct task_struct** t) | ||
251 | { | ||
252 | unsigned long flags; | ||
253 | u32 pending; | ||
254 | |||
255 | /* init values */ | ||
256 | *sem = NULL; | ||
257 | *t = NULL; | ||
258 | |||
259 | raw_spin_lock_irqsave(&which->lock, flags); | ||
260 | |||
261 | pending = litirq_pending_irqoff(which); | ||
262 | if(pending) | ||
263 | { | ||
264 | if(which->current_owner != NULL) | ||
265 | { | ||
266 | *t = which->current_owner; | ||
267 | *sem = &tsk_rt(which->current_owner)->klmirqd_sem; | ||
268 | } | ||
269 | else | ||
270 | { | ||
271 | BUG(); | ||
272 | } | ||
273 | } | ||
274 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
275 | |||
276 | if(likely(*sem)) | ||
277 | { | ||
278 | return pending; | ||
279 | } | ||
280 | else | ||
281 | { | ||
282 | return 0; | ||
283 | } | ||
284 | } | ||
285 | |||
286 | /* returns true if the next piece of work to do is from a different owner. | ||
287 | */ | ||
288 | static int tasklet_ownership_change( | ||
289 | struct klmirqd_info* which, | ||
290 | enum pending_flags taskletQ) | ||
291 | { | ||
292 | /* this function doesn't have to look at work objects since they have | ||
293 | priority below tasklets. */ | ||
294 | |||
295 | unsigned long flags; | ||
296 | int ret = 0; | ||
297 | |||
298 | raw_spin_lock_irqsave(&which->lock, flags); | ||
299 | |||
300 | switch(taskletQ) | ||
301 | { | ||
302 | case LIT_TASKLET_HI: | ||
303 | if(litirq_pending_hi_irqoff(which)) | ||
304 | { | ||
305 | ret = (which->pending_tasklets_hi.head->owner != | ||
306 | which->current_owner); | ||
307 | } | ||
308 | break; | ||
309 | case LIT_TASKLET_LOW: | ||
310 | if(litirq_pending_low_irqoff(which)) | ||
311 | { | ||
312 | ret = (which->pending_tasklets.head->owner != | ||
313 | which->current_owner); | ||
314 | } | ||
315 | break; | ||
316 | default: | ||
317 | break; | ||
318 | } | ||
319 | |||
320 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
321 | |||
322 | TRACE_TASK(which->klmirqd, "ownership change needed: %d\n", ret); | ||
323 | |||
324 | return ret; | ||
325 | } | ||
326 | |||
327 | |||
328 | static void __reeval_prio(struct klmirqd_info* which) | ||
329 | { | ||
330 | struct task_struct* next_owner = NULL; | ||
331 | struct task_struct* klmirqd = which->klmirqd; | ||
332 | |||
333 | /* Check in prio-order */ | ||
334 | u32 pending = litirq_pending_irqoff(which); | ||
335 | |||
336 | //__dump_state(which, "__reeval_prio: before"); | ||
337 | |||
338 | if(pending) | ||
339 | { | ||
340 | if(pending & LIT_TASKLET_HI) | ||
341 | { | ||
342 | next_owner = which->pending_tasklets_hi.head->owner; | ||
343 | } | ||
344 | else if(pending & LIT_TASKLET_LOW) | ||
345 | { | ||
346 | next_owner = which->pending_tasklets.head->owner; | ||
347 | } | ||
348 | else if(pending & LIT_WORK) | ||
349 | { | ||
350 | struct work_struct* work = | ||
351 | list_first_entry(&which->worklist, struct work_struct, entry); | ||
352 | next_owner = work->owner; | ||
353 | } | ||
354 | } | ||
355 | |||
356 | if(next_owner != which->current_owner) | ||
357 | { | ||
358 | struct task_struct* old_owner = which->current_owner; | ||
359 | |||
360 | /* bind the next owner. */ | ||
361 | which->current_owner = next_owner; | ||
362 | mb(); | ||
363 | |||
364 | if(next_owner != NULL) | ||
365 | { | ||
366 | if(!in_interrupt()) | ||
367 | { | ||
368 | TRACE_CUR("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__, | ||
369 | ((tsk_rt(klmirqd)->inh_task) ? tsk_rt(klmirqd)->inh_task : klmirqd)->comm, | ||
370 | ((tsk_rt(klmirqd)->inh_task) ? tsk_rt(klmirqd)->inh_task : klmirqd)->pid, | ||
371 | next_owner->comm, next_owner->pid); | ||
372 | } | ||
373 | else | ||
374 | { | ||
375 | TRACE("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__, | ||
376 | ((tsk_rt(klmirqd)->inh_task) ? tsk_rt(klmirqd)->inh_task : klmirqd)->comm, | ||
377 | ((tsk_rt(klmirqd)->inh_task) ? tsk_rt(klmirqd)->inh_task : klmirqd)->pid, | ||
378 | next_owner->comm, next_owner->pid); | ||
379 | } | ||
380 | |||
381 | litmus->increase_prio_inheritance_klmirqd(klmirqd, old_owner, next_owner); | ||
382 | } | ||
383 | else | ||
384 | { | ||
385 | if(likely(!in_interrupt())) | ||
386 | { | ||
387 | TRACE_CUR("%s: Ownership change: %s/%d to NULL (reverting)\n", | ||
388 | __FUNCTION__, klmirqd->comm, klmirqd->pid); | ||
389 | } | ||
390 | else | ||
391 | { | ||
392 | // is this a bug? | ||
393 | TRACE("%s: Ownership change: %s/%d to NULL (reverting)\n", | ||
394 | __FUNCTION__, klmirqd->comm, klmirqd->pid); | ||
395 | } | ||
396 | |||
397 | BUG_ON(pending != 0); | ||
398 | litmus->decrease_prio_inheritance_klmirqd(klmirqd, old_owner, NULL); | ||
399 | } | ||
400 | } | ||
401 | |||
402 | //__dump_state(which, "__reeval_prio: after"); | ||
403 | } | ||
404 | |||
405 | static void reeval_prio(struct klmirqd_info* which) | ||
406 | { | ||
407 | unsigned long flags; | ||
408 | |||
409 | raw_spin_lock_irqsave(&which->lock, flags); | ||
410 | __reeval_prio(which); | ||
411 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
412 | } | ||
413 | |||
414 | |||
415 | static void wakeup_litirqd_locked(struct klmirqd_info* which) | 508 | static void wakeup_litirqd_locked(struct klmirqd_info* which) |
416 | { | 509 | { |
417 | /* Interrupts are disabled: no need to stop preemption */ | 510 | /* Interrupts are disabled: no need to stop preemption */ |
418 | if (which && which->klmirqd) | 511 | if (which && which->klmirqd) |
419 | { | 512 | { |
420 | __reeval_prio(which); /* configure the proper priority */ | ||
421 | |||
422 | if(which->klmirqd->state != TASK_RUNNING) | 513 | if(which->klmirqd->state != TASK_RUNNING) |
423 | { | 514 | { |
424 | TRACE("%s: Waking up klmirqd: %s/%d\n", __FUNCTION__, | 515 | TRACE("%s: Waking up klmirqd: %s/%d\n", __FUNCTION__, |
@@ -468,7 +559,7 @@ static void do_lit_tasklet(struct klmirqd_info* which, | |||
468 | list = list->next; | 559 | list = list->next; |
469 | 560 | ||
470 | /* execute tasklet if it has my priority and is free */ | 561 | /* execute tasklet if it has my priority and is free */ |
471 | if ((t->owner == which->current_owner) && tasklet_trylock(t)) { | 562 | if (tasklet_trylock(t)) { |
472 | if (!atomic_read(&t->count)) { | 563 | if (!atomic_read(&t->count)) { |
473 | 564 | ||
474 | sched_trace_tasklet_begin(t->owner); | 565 | sched_trace_tasklet_begin(t->owner); |
@@ -503,15 +594,14 @@ static void do_lit_tasklet(struct klmirqd_info* which, | |||
503 | 594 | ||
504 | // returns 1 if priorities need to be changed to continue processing | 595 | // returns 1 if priorities need to be changed to continue processing |
505 | // pending tasklets. | 596 | // pending tasklets. |
506 | static int do_litirq(struct klmirqd_info* which) | 597 | static void do_litirq(struct klmirqd_info* which) |
507 | { | 598 | { |
508 | u32 pending; | 599 | u32 pending; |
509 | int resched = 0; | ||
510 | 600 | ||
511 | if(in_interrupt()) | 601 | if(in_interrupt()) |
512 | { | 602 | { |
513 | TRACE("%s: exiting early: in interrupt context!\n", __FUNCTION__); | 603 | TRACE("%s: exiting early: in interrupt context!\n", __FUNCTION__); |
514 | return(0); | 604 | return; |
515 | } | 605 | } |
516 | 606 | ||
517 | if(which->klmirqd != current) | 607 | if(which->klmirqd != current) |
@@ -519,59 +609,40 @@ static int do_litirq(struct klmirqd_info* which) | |||
519 | TRACE_CUR("%s: exiting early: thread/info mismatch! Running %s/%d but given %s/%d.\n", | 609 | TRACE_CUR("%s: exiting early: thread/info mismatch! Running %s/%d but given %s/%d.\n", |
520 | __FUNCTION__, current->comm, current->pid, | 610 | __FUNCTION__, current->comm, current->pid, |
521 | which->klmirqd->comm, which->klmirqd->pid); | 611 | which->klmirqd->comm, which->klmirqd->pid); |
522 | return(0); | 612 | return; |
523 | } | 613 | } |
524 | 614 | ||
525 | if(!is_realtime(current)) | 615 | if(!is_realtime(current)) |
526 | { | 616 | { |
527 | TRACE_CUR("%s: exiting early: klmirqd is not real-time. Sched Policy = %d\n", | 617 | TRACE_CUR("%s: exiting early: klmirqd is not real-time. Sched Policy = %d\n", |
528 | __FUNCTION__, current->policy); | 618 | __FUNCTION__, current->policy); |
529 | return(0); | 619 | return; |
530 | } | 620 | } |
531 | 621 | ||
532 | 622 | ||
533 | /* We only handle tasklets & work objects, no need for RCU triggers? */ | 623 | /* We only handle tasklets & work objects, no need for RCU triggers? */ |
534 | 624 | ||
535 | pending = litirq_pending(which); | 625 | pending = litirq_pending(which); |
536 | if(pending) | 626 | if(pending) { |
537 | { | ||
538 | /* extract the work to do and do it! */ | 627 | /* extract the work to do and do it! */ |
539 | if(pending & LIT_TASKLET_HI) | 628 | if(pending & LIT_TASKLET_HI) { |
540 | { | ||
541 | TRACE_CUR("%s: Invoking HI tasklets.\n", __FUNCTION__); | 629 | TRACE_CUR("%s: Invoking HI tasklets.\n", __FUNCTION__); |
542 | do_lit_tasklet(which, &which->pending_tasklets_hi); | 630 | do_lit_tasklet(which, &which->pending_tasklets_hi); |
543 | resched = tasklet_ownership_change(which, LIT_TASKLET_HI); | ||
544 | |||
545 | if(resched) | ||
546 | { | ||
547 | TRACE_CUR("%s: HI tasklets of another owner remain. " | ||
548 | "Skipping any LOW tasklets.\n", __FUNCTION__); | ||
549 | } | ||
550 | } | 631 | } |
551 | 632 | ||
552 | if(!resched && (pending & LIT_TASKLET_LOW)) | 633 | if(pending & LIT_TASKLET_LOW) { |
553 | { | ||
554 | TRACE_CUR("%s: Invoking LOW tasklets.\n", __FUNCTION__); | 634 | TRACE_CUR("%s: Invoking LOW tasklets.\n", __FUNCTION__); |
555 | do_lit_tasklet(which, &which->pending_tasklets); | 635 | do_lit_tasklet(which, &which->pending_tasklets); |
556 | resched = tasklet_ownership_change(which, LIT_TASKLET_LOW); | ||
557 | |||
558 | if(resched) | ||
559 | { | ||
560 | TRACE_CUR("%s: LOW tasklets of another owner remain. " | ||
561 | "Skipping any work objects.\n", __FUNCTION__); | ||
562 | } | ||
563 | } | 636 | } |
564 | } | 637 | } |
565 | |||
566 | return(resched); | ||
567 | } | 638 | } |
568 | 639 | ||
569 | 640 | ||
570 | static void do_work(struct klmirqd_info* which) | 641 | static void do_work(struct klmirqd_info* which) |
571 | { | 642 | { |
572 | unsigned long flags; | 643 | unsigned long flags; |
573 | work_func_t f; | ||
574 | struct work_struct* work; | 644 | struct work_struct* work; |
645 | work_func_t f; | ||
575 | 646 | ||
576 | // only execute one work-queue item to yield to tasklets. | 647 | // only execute one work-queue item to yield to tasklets. |
577 | // ...is this a good idea, or should we just batch them? | 648 | // ...is this a good idea, or should we just batch them? |
@@ -594,125 +665,58 @@ static void do_work(struct klmirqd_info* which) | |||
594 | raw_spin_unlock_irqrestore(&which->lock, flags); | 665 | raw_spin_unlock_irqrestore(&which->lock, flags); |
595 | 666 | ||
596 | 667 | ||
668 | TRACE_CUR("%s: Invoking work object.\n", __FUNCTION__); | ||
669 | // do the work! | ||
670 | work_clear_pending(work); | ||
671 | f = work->func; | ||
672 | f(work); /* can't touch 'work' after this point, | ||
673 | the user may have freed it. */ | ||
597 | 674 | ||
598 | /* safe to read current_owner outside of lock since only this thread | 675 | atomic_dec(&which->num_work_pending); |
599 | may write to the pointer. */ | ||
600 | if(work->owner == which->current_owner) | ||
601 | { | ||
602 | TRACE_CUR("%s: Invoking work object.\n", __FUNCTION__); | ||
603 | // do the work! | ||
604 | work_clear_pending(work); | ||
605 | f = work->func; | ||
606 | f(work); /* can't touch 'work' after this point, | ||
607 | the user may have freed it. */ | ||
608 | |||
609 | atomic_dec(&which->num_work_pending); | ||
610 | } | ||
611 | else | ||
612 | { | ||
613 | TRACE_CUR("%s: Could not invoke work object. Requeuing.\n", | ||
614 | __FUNCTION__); | ||
615 | ___litmus_schedule_work(work, which, 0); | ||
616 | } | ||
617 | 676 | ||
618 | no_work: | 677 | no_work: |
619 | return; | 678 | return; |
620 | } | 679 | } |
621 | 680 | ||
622 | 681 | ||
623 | static int set_litmus_daemon_sched(void) | ||
624 | { | ||
625 | /* set up a daemon job that will never complete. | ||
626 | it should only ever run on behalf of another | ||
627 | real-time task. | ||
628 | |||
629 | TODO: Transition to a new job whenever a | ||
630 | new tasklet is handled */ | ||
631 | |||
632 | int ret = 0; | ||
633 | |||
634 | struct rt_task tp = { | ||
635 | .exec_cost = 0, | ||
636 | .period = 1000000000, /* dummy 1 second period */ | ||
637 | .phase = 0, | ||
638 | .cpu = task_cpu(current), | ||
639 | .budget_policy = NO_ENFORCEMENT, | ||
640 | .cls = RT_CLASS_BEST_EFFORT | ||
641 | }; | ||
642 | |||
643 | struct sched_param param = { .sched_priority = 0}; | ||
644 | |||
645 | |||
646 | /* set task params, mark as proxy thread, and init other data */ | ||
647 | tsk_rt(current)->task_params = tp; | ||
648 | tsk_rt(current)->is_proxy_thread = 1; | ||
649 | tsk_rt(current)->cur_klmirqd = NULL; | ||
650 | mutex_init(&tsk_rt(current)->klmirqd_sem); | ||
651 | atomic_set(&tsk_rt(current)->klmirqd_sem_stat, NOT_HELD); | ||
652 | |||
653 | /* inform the OS we're SCHED_LITMUS -- | ||
654 | sched_setscheduler_nocheck() calls litmus_admit_task(). */ | ||
655 | sched_setscheduler_nocheck(current, SCHED_LITMUS, ¶m); | ||
656 | |||
657 | return ret; | ||
658 | } | ||
659 | |||
660 | static void enter_execution_phase(struct klmirqd_info* which, | ||
661 | struct mutex* sem, | ||
662 | struct task_struct* t) | ||
663 | { | ||
664 | TRACE_CUR("%s: Trying to enter execution phase. " | ||
665 | "Acquiring semaphore of %s/%d\n", __FUNCTION__, | ||
666 | t->comm, t->pid); | ||
667 | down_and_set_stat(current, HELD, sem); | ||
668 | TRACE_CUR("%s: Execution phase entered! " | ||
669 | "Acquired semaphore of %s/%d\n", __FUNCTION__, | ||
670 | t->comm, t->pid); | ||
671 | } | ||
672 | |||
673 | static void exit_execution_phase(struct klmirqd_info* which, | ||
674 | struct mutex* sem, | ||
675 | struct task_struct* t) | ||
676 | { | ||
677 | TRACE_CUR("%s: Exiting execution phase. " | ||
678 | "Releasing semaphore of %s/%d\n", __FUNCTION__, | ||
679 | t->comm, t->pid); | ||
680 | if(atomic_read(&tsk_rt(current)->klmirqd_sem_stat) == HELD) | ||
681 | { | ||
682 | up_and_set_stat(current, NOT_HELD, sem); | ||
683 | TRACE_CUR("%s: Execution phase exited! " | ||
684 | "Released semaphore of %s/%d\n", __FUNCTION__, | ||
685 | t->comm, t->pid); | ||
686 | } | ||
687 | else | ||
688 | { | ||
689 | TRACE_CUR("%s: COULDN'T RELEASE SEMAPHORE BECAUSE ONE IS NOT HELD!\n", __FUNCTION__); | ||
690 | } | ||
691 | } | ||
692 | 682 | ||
693 | /* main loop for klitsoftirqd */ | 683 | /* main loop for klitsoftirqd */ |
694 | static int run_klmirqd(void* unused) | 684 | static int run_klmirqd(void* callback) |
695 | { | 685 | { |
696 | struct klmirqd_info* which = &klmirqds[klmirqd_id(current)]; | 686 | int retval = 0; |
697 | struct mutex* sem; | 687 | struct klmirqd_info* info = NULL; |
698 | struct task_struct* owner; | 688 | klmirqd_callback_t* cb = (klmirqd_callback_t*)(callback); |
699 | 689 | ||
700 | int rt_status = set_litmus_daemon_sched(); | 690 | retval = set_litmus_daemon_sched(current); |
701 | 691 | if (retval != 0) { | |
702 | if(rt_status != 0) | ||
703 | { | ||
704 | TRACE_CUR("%s: Failed to transition to rt-task.\n", __FUNCTION__); | 692 | TRACE_CUR("%s: Failed to transition to rt-task.\n", __FUNCTION__); |
705 | goto rt_failed; | 693 | goto failed; |
706 | } | 694 | } |
707 | 695 | ||
708 | atomic_inc(&num_ready_klmirqds); | 696 | retval = register_klmirqd(current); |
697 | if (retval != 0) { | ||
698 | TRACE_CUR("%s: Failed to become a klmirqd thread.\n", __FUNCTION__); | ||
699 | goto failed; | ||
700 | } | ||
701 | |||
702 | if (cb && cb->func) { | ||
703 | retval = cb->func(cb->arg); | ||
704 | if (retval != 0) { | ||
705 | TRACE_CUR("%s: klmirqd callback reported failure. retval = %d\n", __FUNCTION__, retval); | ||
706 | goto failed_unregister; | ||
707 | } | ||
708 | } | ||
709 | |||
710 | /* enter the interrupt handling workloop */ | ||
711 | |||
712 | info = tsk_rt(current)->klmirqd_info; | ||
709 | 713 | ||
710 | set_current_state(TASK_INTERRUPTIBLE); | 714 | set_current_state(TASK_INTERRUPTIBLE); |
711 | 715 | ||
712 | while (!kthread_should_stop()) | 716 | while (!kthread_should_stop()) |
713 | { | 717 | { |
714 | preempt_disable(); | 718 | preempt_disable(); |
715 | if (!litirq_pending(which)) | 719 | if (!litirq_pending(info)) |
716 | { | 720 | { |
717 | /* sleep for work */ | 721 | /* sleep for work */ |
718 | TRACE_CUR("%s: No more tasklets or work objects. Going to sleep.\n", | 722 | TRACE_CUR("%s: No more tasklets or work objects. Going to sleep.\n", |
@@ -731,17 +735,10 @@ static int run_klmirqd(void* unused) | |||
731 | 735 | ||
732 | __set_current_state(TASK_RUNNING); | 736 | __set_current_state(TASK_RUNNING); |
733 | 737 | ||
734 | while (litirq_pending_and_sem_and_owner(which, &sem, &owner)) | 738 | while (litirq_pending(info)) |
735 | { | 739 | { |
736 | int needs_resched = 0; | ||
737 | |||
738 | preempt_enable_no_resched(); | 740 | preempt_enable_no_resched(); |
739 | 741 | ||
740 | BUG_ON(sem == NULL); | ||
741 | |||
742 | // wait to enter execution phase; wait for 'current_owner' to block. | ||
743 | enter_execution_phase(which, sem, owner); | ||
744 | |||
745 | if(kthread_should_stop()) | 742 | if(kthread_should_stop()) |
746 | { | 743 | { |
747 | TRACE_CUR("%s:%d: Signaled to terminate.\n", __FUNCTION__, __LINE__); | 744 | TRACE_CUR("%s:%d: Signaled to terminate.\n", __FUNCTION__, __LINE__); |
@@ -753,36 +750,23 @@ static int run_klmirqd(void* unused) | |||
753 | /* Double check that there's still pending work and the owner hasn't | 750 | /* Double check that there's still pending work and the owner hasn't |
754 | * changed. Pending items may have been flushed while we were sleeping. | 751 | * changed. Pending items may have been flushed while we were sleeping. |
755 | */ | 752 | */ |
756 | if(litirq_pending_with_owner(which, owner)) | 753 | if(litirq_pending(info)) |
757 | { | 754 | { |
758 | TRACE_CUR("%s: Executing tasklets and/or work objects.\n", | 755 | TRACE_CUR("%s: Executing tasklets and/or work objects.\n", |
759 | __FUNCTION__); | 756 | __FUNCTION__); |
760 | 757 | ||
761 | needs_resched = do_litirq(which); | 758 | do_litirq(info); |
762 | 759 | ||
763 | preempt_enable_no_resched(); | 760 | preempt_enable_no_resched(); |
764 | 761 | ||
765 | // work objects are preemptible. | 762 | // work objects are preemptible. |
766 | if(!needs_resched) | 763 | do_work(info); |
767 | { | ||
768 | do_work(which); | ||
769 | } | ||
770 | |||
771 | // exit execution phase. | ||
772 | exit_execution_phase(which, sem, owner); | ||
773 | |||
774 | TRACE_CUR("%s: Setting up next priority.\n", __FUNCTION__); | ||
775 | reeval_prio(which); /* check if we need to change priority here */ | ||
776 | } | 764 | } |
777 | else | 765 | else |
778 | { | 766 | { |
779 | TRACE_CUR("%s: Pending work was flushed! Prev owner was %s/%d\n", | 767 | TRACE_CUR("%s: Pending work was flushed!\n", __FUNCTION__); |
780 | __FUNCTION__, | ||
781 | owner->comm, owner->pid); | ||
782 | preempt_enable_no_resched(); | ||
783 | 768 | ||
784 | // exit execution phase. | 769 | preempt_enable_no_resched(); |
785 | exit_execution_phase(which, sem, owner); | ||
786 | } | 770 | } |
787 | 771 | ||
788 | cond_resched(); | 772 | cond_resched(); |
@@ -793,183 +777,39 @@ static int run_klmirqd(void* unused) | |||
793 | } | 777 | } |
794 | __set_current_state(TASK_RUNNING); | 778 | __set_current_state(TASK_RUNNING); |
795 | 779 | ||
796 | atomic_dec(&num_ready_klmirqds); | 780 | failed_unregister: |
781 | /* remove our registration from klmirqd */ | ||
782 | unregister_klmirqd(current); | ||
797 | 783 | ||
798 | rt_failed: | 784 | failed: |
799 | litmus_exit_task(current); | 785 | litmus_exit_task(current); |
800 | 786 | ||
801 | return rt_status; | 787 | return retval; |
802 | } | 788 | } |
803 | 789 | ||
804 | 790 | ||
805 | struct klmirqd_launch_data | 791 | void flush_pending(struct task_struct* tsk) |
806 | { | ||
807 | int* cpu_affinity; | ||
808 | struct work_struct work; | ||
809 | }; | ||
810 | |||
811 | /* executed by a kworker from workqueues */ | ||
812 | static void launch_klmirqd(struct work_struct *work) | ||
813 | { | 792 | { |
814 | int i; | 793 | unsigned long flags; |
815 | 794 | struct tasklet_struct *list; | |
816 | struct klmirqd_launch_data* launch_data = | 795 | u32 work_flushed = 0; |
817 | container_of(work, struct klmirqd_launch_data, work); | ||
818 | |||
819 | TRACE("%s: Creating %d klmirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); | ||
820 | |||
821 | /* create the daemon threads */ | ||
822 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
823 | { | ||
824 | if(launch_data->cpu_affinity) | ||
825 | { | ||
826 | klmirqds[i].klmirqd = | ||
827 | kthread_create( | ||
828 | run_klmirqd, | ||
829 | /* treat the affinity as a pointer, we'll cast it back later */ | ||
830 | (void*)(long long)launch_data->cpu_affinity[i], | ||
831 | "klmirqd_th%d/%d", | ||
832 | i, | ||
833 | launch_data->cpu_affinity[i]); | ||
834 | |||
835 | /* litmus will put is in the right cluster. */ | ||
836 | kthread_bind(klmirqds[i].klmirqd, launch_data->cpu_affinity[i]); | ||
837 | } | ||
838 | else | ||
839 | { | ||
840 | klmirqds[i].klmirqd = | ||
841 | kthread_create( | ||
842 | run_klmirqd, | ||
843 | /* treat the affinity as a pointer, we'll cast it back later */ | ||
844 | (void*)(long long)(-1), | ||
845 | "klmirqd_th%d", | ||
846 | i); | ||
847 | } | ||
848 | } | ||
849 | |||
850 | TRACE("%s: Launching %d klmirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); | ||
851 | |||
852 | /* unleash the daemons */ | ||
853 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
854 | { | ||
855 | wake_up_process(klmirqds[i].klmirqd); | ||
856 | } | ||
857 | |||
858 | if(launch_data->cpu_affinity) | ||
859 | kfree(launch_data->cpu_affinity); | ||
860 | kfree(launch_data); | ||
861 | } | ||
862 | 796 | ||
797 | struct klmirqd_info *which; | ||
863 | 798 | ||
864 | void spawn_klmirqd(int* affinity) | 799 | if (!tsk_rt(tsk)->is_interrupt_thread) { |
865 | { | 800 | TRACE("%s/%d is not a proxy thread\n", tsk->comm, tsk->pid); |
866 | int i; | 801 | WARN_ON(1); |
867 | struct klmirqd_launch_data* delayed_launch; | ||
868 | |||
869 | if(atomic_read(&num_ready_klmirqds) != 0) | ||
870 | { | ||
871 | TRACE("%s: At least one klmirqd is already running! Need to call kill_klmirqd()?\n"); | ||
872 | return; | 802 | return; |
873 | } | 803 | } |
874 | 804 | ||
875 | /* init the tasklet & work queues */ | 805 | which = tsk_rt(tsk)->klmirqd_info; |
876 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | 806 | if (!which) { |
877 | { | 807 | TRACE("%s/%d is not a klmirqd thread!\n", tsk->comm, tsk->pid); |
878 | klmirqds[i].terminating = 0; | 808 | WARN_ON(1); |
879 | klmirqds[i].pending = 0; | 809 | return; |
880 | |||
881 | klmirqds[i].num_hi_pending.counter = 0; | ||
882 | klmirqds[i].num_low_pending.counter = 0; | ||
883 | klmirqds[i].num_work_pending.counter = 0; | ||
884 | |||
885 | klmirqds[i].pending_tasklets_hi.head = NULL; | ||
886 | klmirqds[i].pending_tasklets_hi.tail = &klmirqds[i].pending_tasklets_hi.head; | ||
887 | |||
888 | klmirqds[i].pending_tasklets.head = NULL; | ||
889 | klmirqds[i].pending_tasklets.tail = &klmirqds[i].pending_tasklets.head; | ||
890 | |||
891 | INIT_LIST_HEAD(&klmirqds[i].worklist); | ||
892 | |||
893 | raw_spin_lock_init(&klmirqds[i].lock); | ||
894 | } | ||
895 | |||
896 | /* wait to flush the initializations to memory since other threads | ||
897 | will access it. */ | ||
898 | mb(); | ||
899 | |||
900 | /* tell a work queue to launch the threads. we can't make scheduling | ||
901 | calls since we're in an atomic state. */ | ||
902 | TRACE("%s: Setting callback up to launch klmirqds\n", __FUNCTION__); | ||
903 | delayed_launch = kmalloc(sizeof(struct klmirqd_launch_data), GFP_ATOMIC); | ||
904 | if(affinity) | ||
905 | { | ||
906 | delayed_launch->cpu_affinity = | ||
907 | kmalloc(sizeof(int)*NR_LITMUS_SOFTIRQD, GFP_ATOMIC); | ||
908 | |||
909 | memcpy(delayed_launch->cpu_affinity, affinity, | ||
910 | sizeof(int)*NR_LITMUS_SOFTIRQD); | ||
911 | } | ||
912 | else | ||
913 | { | ||
914 | delayed_launch->cpu_affinity = NULL; | ||
915 | } | ||
916 | INIT_WORK(&delayed_launch->work, launch_klmirqd); | ||
917 | schedule_work(&delayed_launch->work); | ||
918 | } | ||
919 | |||
920 | |||
921 | void kill_klmirqd(void) | ||
922 | { | ||
923 | if(!klmirqd_is_dead()) | ||
924 | { | ||
925 | int i; | ||
926 | |||
927 | TRACE("%s: Killing %d klmirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); | ||
928 | |||
929 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
930 | { | ||
931 | if(klmirqds[i].terminating != 1) | ||
932 | { | ||
933 | klmirqds[i].terminating = 1; | ||
934 | mb(); /* just to be sure? */ | ||
935 | flush_pending(klmirqds[i].klmirqd, NULL); | ||
936 | |||
937 | /* signal termination */ | ||
938 | kthread_stop(klmirqds[i].klmirqd); | ||
939 | } | ||
940 | } | ||
941 | } | 810 | } |
942 | } | ||
943 | 811 | ||
944 | 812 | ||
945 | int klmirqd_is_ready(void) | ||
946 | { | ||
947 | return(atomic_read(&num_ready_klmirqds) == NR_LITMUS_SOFTIRQD); | ||
948 | } | ||
949 | |||
950 | int klmirqd_is_dead(void) | ||
951 | { | ||
952 | return(atomic_read(&num_ready_klmirqds) == 0); | ||
953 | } | ||
954 | |||
955 | |||
956 | struct task_struct* get_klmirqd(unsigned int k_id) | ||
957 | { | ||
958 | return(klmirqds[k_id].klmirqd); | ||
959 | } | ||
960 | |||
961 | |||
962 | void flush_pending(struct task_struct* klmirqd_thread, | ||
963 | struct task_struct* owner) | ||
964 | { | ||
965 | unsigned int k_id = klmirqd_id(klmirqd_thread); | ||
966 | struct klmirqd_info *which = &klmirqds[k_id]; | ||
967 | |||
968 | unsigned long flags; | ||
969 | struct tasklet_struct *list; | ||
970 | |||
971 | u32 work_flushed = 0; | ||
972 | |||
973 | raw_spin_lock_irqsave(&which->lock, flags); | 813 | raw_spin_lock_irqsave(&which->lock, flags); |
974 | 814 | ||
975 | //__dump_state(which, "flush_pending: before"); | 815 | //__dump_state(which, "flush_pending: before"); |
@@ -990,35 +830,27 @@ void flush_pending(struct task_struct* klmirqd_thread, | |||
990 | struct tasklet_struct *t = list; | 830 | struct tasklet_struct *t = list; |
991 | list = list->next; | 831 | list = list->next; |
992 | 832 | ||
993 | if(likely((t->owner == owner) || (owner == NULL))) | 833 | if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))) |
994 | { | 834 | { |
995 | if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))) | 835 | BUG(); |
996 | { | 836 | } |
997 | BUG(); | ||
998 | } | ||
999 | 837 | ||
1000 | work_flushed |= LIT_TASKLET_HI; | 838 | work_flushed |= LIT_TASKLET_HI; |
1001 | 839 | ||
1002 | t->owner = NULL; | 840 | t->owner = NULL; |
1003 | 841 | ||
1004 | // WTF? | 842 | // WTF? |
1005 | if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | 843 | if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
1006 | { | 844 | { |
1007 | atomic_dec(&which->num_hi_pending); | 845 | atomic_dec(&which->num_hi_pending); |
1008 | ___tasklet_hi_schedule(t); | 846 | ___tasklet_hi_schedule(t); |
1009 | } | ||
1010 | else | ||
1011 | { | ||
1012 | TRACE("%s: dropped hi tasklet??\n", __FUNCTION__); | ||
1013 | BUG(); | ||
1014 | } | ||
1015 | } | 847 | } |
1016 | else | 848 | else |
1017 | { | 849 | { |
1018 | TRACE("%s: Could not flush a HI tasklet.\n", __FUNCTION__); | 850 | TRACE("%s: dropped hi tasklet??\n", __FUNCTION__); |
1019 | // put back on queue. | 851 | BUG(); |
1020 | ___litmus_tasklet_hi_schedule(t, which, 0); | ||
1021 | } | 852 | } |
853 | |||
1022 | } | 854 | } |
1023 | } | 855 | } |
1024 | 856 | ||
@@ -1038,34 +870,25 @@ void flush_pending(struct task_struct* klmirqd_thread, | |||
1038 | struct tasklet_struct *t = list; | 870 | struct tasklet_struct *t = list; |
1039 | list = list->next; | 871 | list = list->next; |
1040 | 872 | ||
1041 | if(likely((t->owner == owner) || (owner == NULL))) | 873 | if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))) |
1042 | { | 874 | { |
1043 | if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))) | 875 | BUG(); |
1044 | { | 876 | } |
1045 | BUG(); | ||
1046 | } | ||
1047 | 877 | ||
1048 | work_flushed |= LIT_TASKLET_LOW; | 878 | work_flushed |= LIT_TASKLET_LOW; |
1049 | 879 | ||
1050 | t->owner = NULL; | 880 | t->owner = NULL; |
1051 | sched_trace_tasklet_end(owner, 1ul); | 881 | // sched_trace_tasklet_end(owner, 1ul); |
1052 | 882 | ||
1053 | if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | 883 | if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
1054 | { | 884 | { |
1055 | atomic_dec(&which->num_low_pending); | 885 | atomic_dec(&which->num_low_pending); |
1056 | ___tasklet_schedule(t); | 886 | ___tasklet_schedule(t); |
1057 | } | ||
1058 | else | ||
1059 | { | ||
1060 | TRACE("%s: dropped tasklet??\n", __FUNCTION__); | ||
1061 | BUG(); | ||
1062 | } | ||
1063 | } | 887 | } |
1064 | else | 888 | else |
1065 | { | 889 | { |
1066 | TRACE("%s: Could not flush a LOW tasklet.\n", __FUNCTION__); | 890 | TRACE("%s: dropped tasklet??\n", __FUNCTION__); |
1067 | // put back on queue | 891 | BUG(); |
1068 | ___litmus_tasklet_schedule(t, which, 0); | ||
1069 | } | 892 | } |
1070 | } | 893 | } |
1071 | } | 894 | } |
@@ -1083,21 +906,12 @@ void flush_pending(struct task_struct* klmirqd_thread, | |||
1083 | list_first_entry(&which->worklist, struct work_struct, entry); | 906 | list_first_entry(&which->worklist, struct work_struct, entry); |
1084 | list_del_init(&work->entry); | 907 | list_del_init(&work->entry); |
1085 | 908 | ||
1086 | if(likely((work->owner == owner) || (owner == NULL))) | 909 | work_flushed |= LIT_WORK; |
1087 | { | 910 | atomic_dec(&which->num_work_pending); |
1088 | work_flushed |= LIT_WORK; | ||
1089 | atomic_dec(&which->num_work_pending); | ||
1090 | 911 | ||
1091 | work->owner = NULL; | 912 | work->owner = NULL; |
1092 | sched_trace_work_end(owner, current, 1ul); | 913 | // sched_trace_work_end(owner, current, 1ul); |
1093 | __schedule_work(work); | 914 | __schedule_work(work); |
1094 | } | ||
1095 | else | ||
1096 | { | ||
1097 | TRACE("%s: Could not flush a work object.\n", __FUNCTION__); | ||
1098 | // put back on queue | ||
1099 | ___litmus_schedule_work(work, which, 0); | ||
1100 | } | ||
1101 | } | 915 | } |
1102 | } | 916 | } |
1103 | 917 | ||
@@ -1106,22 +920,6 @@ void flush_pending(struct task_struct* klmirqd_thread, | |||
1106 | 920 | ||
1107 | mb(); /* commit changes to pending flags */ | 921 | mb(); /* commit changes to pending flags */ |
1108 | 922 | ||
1109 | /* reset the scheduling priority */ | ||
1110 | if(work_flushed) | ||
1111 | { | ||
1112 | __reeval_prio(which); | ||
1113 | |||
1114 | /* Try to offload flushed tasklets to Linux's ksoftirqd. */ | ||
1115 | if(work_flushed & (LIT_TASKLET_LOW | LIT_TASKLET_HI)) | ||
1116 | { | ||
1117 | wakeup_softirqd(); | ||
1118 | } | ||
1119 | } | ||
1120 | else | ||
1121 | { | ||
1122 | TRACE_CUR("%s: no work flushed, so __reeval_prio() skipped\n", __FUNCTION__); | ||
1123 | } | ||
1124 | |||
1125 | raw_spin_unlock_irqrestore(&which->lock, flags); | 923 | raw_spin_unlock_irqrestore(&which->lock, flags); |
1126 | } | 924 | } |
1127 | 925 | ||
@@ -1161,39 +959,27 @@ static void ___litmus_tasklet_schedule(struct tasklet_struct *t, | |||
1161 | raw_spin_unlock_irqrestore(&which->lock, flags); | 959 | raw_spin_unlock_irqrestore(&which->lock, flags); |
1162 | } | 960 | } |
1163 | 961 | ||
1164 | int __litmus_tasklet_schedule(struct tasklet_struct *t, unsigned int k_id) | 962 | |
963 | int __litmus_tasklet_schedule(struct tasklet_struct *t, struct task_struct* klmirqd_thread) | ||
1165 | { | 964 | { |
1166 | int ret = 0; /* assume failure */ | 965 | int ret = 0; /* assume failure */ |
1167 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | 966 | struct klmirqd_info* info; |
1168 | { | ||
1169 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
1170 | BUG(); | ||
1171 | } | ||
1172 | 967 | ||
1173 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | 968 | if (unlikely(!is_realtime(klmirqd_thread) || |
1174 | { | 969 | !tsk_rt(klmirqd_thread)->is_interrupt_thread || |
1175 | TRACE("%s: No klmirqd_th%d!\n", __FUNCTION__, k_id); | 970 | !tsk_rt(klmirqd_thread)->klmirqd_info)) { |
1176 | BUG(); | 971 | TRACE("%s: %s/%d can't handle tasklets\n", klmirqd_thread->comm, klmirqd_thread->pid); |
1177 | } | 972 | return ret; |
973 | } | ||
1178 | 974 | ||
1179 | if(likely(!klmirqds[k_id].terminating)) | 975 | info = tsk_rt(klmirqd_thread)->klmirqd_info; |
1180 | { | ||
1181 | /* Can't accept tasklets while we're processing a workqueue | ||
1182 | because they're handled by the same thread. This case is | ||
1183 | very RARE. | ||
1184 | 976 | ||
1185 | TODO: Use a separate thread for work objects!!!!!! | 977 | if (likely(!info->terminating)) { |
1186 | */ | 978 | ret = 1; |
1187 | if(likely(atomic_read(&klmirqds[k_id].num_work_pending) == 0)) | 979 | ___litmus_tasklet_schedule(t, info, 1); |
1188 | { | 980 | } |
1189 | ret = 1; | 981 | else { |
1190 | ___litmus_tasklet_schedule(t, &klmirqds[k_id], 1); | 982 | TRACE("%s: Tasklet rejected because %s/%d is terminating\n", klmirqd_thread->comm, klmirqd_thread->pid); |
1191 | } | ||
1192 | else | ||
1193 | { | ||
1194 | TRACE("%s: rejected tasklet because of pending work.\n", | ||
1195 | __FUNCTION__); | ||
1196 | } | ||
1197 | } | 983 | } |
1198 | return(ret); | 984 | return(ret); |
1199 | } | 985 | } |
@@ -1230,100 +1016,77 @@ static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t, | |||
1230 | raw_spin_unlock_irqrestore(&which->lock, flags); | 1016 | raw_spin_unlock_irqrestore(&which->lock, flags); |
1231 | } | 1017 | } |
1232 | 1018 | ||
1233 | int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, unsigned int k_id) | 1019 | int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, struct task_struct* klmirqd_thread) |
1234 | { | 1020 | { |
1235 | int ret = 0; /* assume failure */ | 1021 | int ret = 0; /* assume failure */ |
1236 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | 1022 | struct klmirqd_info* info; |
1237 | { | ||
1238 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
1239 | BUG(); | ||
1240 | } | ||
1241 | 1023 | ||
1242 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | 1024 | if (unlikely(!is_realtime(klmirqd_thread) || |
1243 | { | 1025 | !tsk_rt(klmirqd_thread)->is_interrupt_thread || |
1244 | TRACE("%s: No klmirqd_th%d!\n", __FUNCTION__, k_id); | 1026 | !tsk_rt(klmirqd_thread)->klmirqd_info)) { |
1245 | BUG(); | 1027 | TRACE("%s: %s/%d can't handle tasklets\n", klmirqd_thread->comm, klmirqd_thread->pid); |
1246 | } | 1028 | return ret; |
1029 | } | ||
1247 | 1030 | ||
1248 | if(unlikely(!klmirqd_is_ready())) | 1031 | info = tsk_rt(klmirqd_thread)->klmirqd_info; |
1249 | { | ||
1250 | TRACE("%s: klmirqd is not ready!\n", __FUNCTION__, k_id); | ||
1251 | BUG(); | ||
1252 | } | ||
1253 | 1032 | ||
1254 | if(likely(!klmirqds[k_id].terminating)) | 1033 | if (likely(!info->terminating)) { |
1255 | { | 1034 | ret = 1; |
1256 | if(likely(atomic_read(&klmirqds[k_id].num_work_pending) == 0)) | 1035 | ___litmus_tasklet_hi_schedule(t, info, 1); |
1257 | { | ||
1258 | ret = 1; | ||
1259 | ___litmus_tasklet_hi_schedule(t, &klmirqds[k_id], 1); | ||
1260 | } | ||
1261 | else | ||
1262 | { | ||
1263 | TRACE("%s: rejected tasklet because of pending work.\n", | ||
1264 | __FUNCTION__); | ||
1265 | } | ||
1266 | } | 1036 | } |
1037 | else { | ||
1038 | TRACE("%s: Tasklet rejected because %s/%d is terminating\n", klmirqd_thread->comm, klmirqd_thread->pid); | ||
1039 | } | ||
1040 | |||
1267 | return(ret); | 1041 | return(ret); |
1268 | } | 1042 | } |
1269 | 1043 | ||
1270 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule); | 1044 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule); |
1271 | 1045 | ||
1272 | 1046 | ||
1273 | int __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, unsigned int k_id) | 1047 | int __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, struct task_struct* klmirqd_thread) |
1274 | { | 1048 | { |
1275 | int ret = 0; /* assume failure */ | 1049 | int ret = 0; /* assume failure */ |
1276 | u32 old_pending; | 1050 | u32 old_pending; |
1051 | struct klmirqd_info* info; | ||
1277 | 1052 | ||
1278 | BUG_ON(!irqs_disabled()); | 1053 | BUG_ON(!irqs_disabled()); |
1279 | 1054 | ||
1280 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | 1055 | if (unlikely(!is_realtime(klmirqd_thread) || |
1281 | { | 1056 | !tsk_rt(klmirqd_thread)->is_interrupt_thread || |
1282 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | 1057 | !tsk_rt(klmirqd_thread)->klmirqd_info)) { |
1283 | BUG(); | 1058 | TRACE("%s: %s/%d can't handle tasklets\n", klmirqd_thread->comm, klmirqd_thread->pid); |
1284 | } | 1059 | return ret; |
1060 | } | ||
1285 | 1061 | ||
1286 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | 1062 | info = tsk_rt(klmirqd_thread)->klmirqd_info; |
1287 | { | ||
1288 | TRACE("%s: No klmirqd_th%u!\n", __FUNCTION__, k_id); | ||
1289 | BUG(); | ||
1290 | } | ||
1291 | 1063 | ||
1292 | if(unlikely(!klmirqd_is_ready())) | 1064 | if (likely(!info->terminating)) { |
1293 | { | ||
1294 | TRACE("%s: klmirqd is not ready!\n", __FUNCTION__, k_id); | ||
1295 | BUG(); | ||
1296 | } | ||
1297 | 1065 | ||
1298 | if(likely(!klmirqds[k_id].terminating)) | 1066 | raw_spin_lock(&info->lock); |
1299 | { | ||
1300 | raw_spin_lock(&klmirqds[k_id].lock); | ||
1301 | 1067 | ||
1302 | if(likely(atomic_read(&klmirqds[k_id].num_work_pending) == 0)) | 1068 | ret = 1; // success! |
1303 | { | ||
1304 | ret = 1; // success! | ||
1305 | 1069 | ||
1306 | t->next = klmirqds[k_id].pending_tasklets_hi.head; | 1070 | t->next = info->pending_tasklets_hi.head; |
1307 | klmirqds[k_id].pending_tasklets_hi.head = t; | 1071 | info->pending_tasklets_hi.head = t; |
1308 | 1072 | ||
1309 | old_pending = klmirqds[k_id].pending; | 1073 | old_pending = info->pending; |
1310 | klmirqds[k_id].pending |= LIT_TASKLET_HI; | 1074 | info->pending |= LIT_TASKLET_HI; |
1311 | 1075 | ||
1312 | atomic_inc(&klmirqds[k_id].num_hi_pending); | 1076 | atomic_inc(&info->num_hi_pending); |
1313 | 1077 | ||
1314 | mb(); | 1078 | mb(); |
1315 | 1079 | ||
1316 | if(!old_pending) | 1080 | if(!old_pending) { |
1317 | wakeup_litirqd_locked(&klmirqds[k_id]); /* wake up the klmirqd */ | 1081 | wakeup_litirqd_locked(info); /* wake up the klmirqd */ |
1318 | } | ||
1319 | else | ||
1320 | { | ||
1321 | TRACE("%s: rejected tasklet because of pending work.\n", | ||
1322 | __FUNCTION__); | ||
1323 | } | 1082 | } |
1324 | 1083 | ||
1325 | raw_spin_unlock(&klmirqds[k_id].lock); | 1084 | raw_spin_unlock(&info->lock); |
1326 | } | 1085 | } |
1086 | else { | ||
1087 | TRACE("%s: Tasklet rejected because %s/%d is terminating\n", klmirqd_thread->comm, klmirqd_thread->pid); | ||
1088 | } | ||
1089 | |||
1327 | return(ret); | 1090 | return(ret); |
1328 | } | 1091 | } |
1329 | 1092 | ||
@@ -1358,225 +1121,30 @@ static void ___litmus_schedule_work(struct work_struct *w, | |||
1358 | raw_spin_unlock_irqrestore(&which->lock, flags); | 1121 | raw_spin_unlock_irqrestore(&which->lock, flags); |
1359 | } | 1122 | } |
1360 | 1123 | ||
1361 | int __litmus_schedule_work(struct work_struct *w, unsigned int k_id) | 1124 | int __litmus_schedule_work(struct work_struct *w, struct task_struct* klmirqd_thread) |
1362 | { | 1125 | { |
1363 | int ret = 1; /* assume success */ | 1126 | int ret = 1; /* assume success */ |
1364 | if(unlikely(w->owner == NULL) || !is_realtime(w->owner)) | 1127 | struct klmirqd_info* info; |
1365 | { | ||
1366 | TRACE("%s: No owner associated with this work object!\n", __FUNCTION__); | ||
1367 | BUG(); | ||
1368 | } | ||
1369 | |||
1370 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
1371 | { | ||
1372 | TRACE("%s: No klmirqd_th%u!\n", k_id); | ||
1373 | BUG(); | ||
1374 | } | ||
1375 | |||
1376 | if(unlikely(!klmirqd_is_ready())) | ||
1377 | { | ||
1378 | TRACE("%s: klmirqd is not ready!\n", __FUNCTION__, k_id); | ||
1379 | BUG(); | ||
1380 | } | ||
1381 | |||
1382 | if(likely(!klmirqds[k_id].terminating)) | ||
1383 | ___litmus_schedule_work(w, &klmirqds[k_id], 1); | ||
1384 | else | ||
1385 | ret = 0; | ||
1386 | return(ret); | ||
1387 | } | ||
1388 | EXPORT_SYMBOL(__litmus_schedule_work); | ||
1389 | |||
1390 | |||
1391 | static int set_klmirqd_sem_status(unsigned long stat) | ||
1392 | { | ||
1393 | TRACE_CUR("SETTING STATUS FROM %d TO %d\n", | ||
1394 | atomic_read(&tsk_rt(current)->klmirqd_sem_stat), | ||
1395 | stat); | ||
1396 | atomic_set(&tsk_rt(current)->klmirqd_sem_stat, stat); | ||
1397 | //mb(); | ||
1398 | |||
1399 | return(0); | ||
1400 | } | ||
1401 | |||
1402 | static int set_klmirqd_sem_status_if_not_held(unsigned long stat) | ||
1403 | { | ||
1404 | if(atomic_read(&tsk_rt(current)->klmirqd_sem_stat) != HELD) | ||
1405 | { | ||
1406 | return(set_klmirqd_sem_status(stat)); | ||
1407 | } | ||
1408 | return(-1); | ||
1409 | } | ||
1410 | |||
1411 | |||
1412 | void __down_and_reset_and_set_stat(struct task_struct* t, | ||
1413 | enum klmirqd_sem_status to_reset, | ||
1414 | enum klmirqd_sem_status to_set, | ||
1415 | struct mutex* sem) | ||
1416 | { | ||
1417 | #if 0 | ||
1418 | struct rt_param* param = container_of(sem, struct rt_param, klmirqd_sem); | ||
1419 | struct task_struct* task = container_of(param, struct task_struct, rt_param); | ||
1420 | |||
1421 | TRACE_CUR("%s: entered. Locking semaphore of %s/%d\n", | ||
1422 | __FUNCTION__, task->comm, task->pid); | ||
1423 | #endif | ||
1424 | |||
1425 | mutex_lock_sfx(sem, | ||
1426 | set_klmirqd_sem_status_if_not_held, to_reset, | ||
1427 | set_klmirqd_sem_status, to_set); | ||
1428 | #if 0 | ||
1429 | TRACE_CUR("%s: exiting. Have semaphore of %s/%d\n", | ||
1430 | __FUNCTION__, task->comm, task->pid); | ||
1431 | #endif | ||
1432 | } | ||
1433 | |||
1434 | void down_and_set_stat(struct task_struct* t, | ||
1435 | enum klmirqd_sem_status to_set, | ||
1436 | struct mutex* sem) | ||
1437 | { | ||
1438 | #if 0 | ||
1439 | struct rt_param* param = container_of(sem, struct rt_param, klmirqd_sem); | ||
1440 | struct task_struct* task = container_of(param, struct task_struct, rt_param); | ||
1441 | |||
1442 | TRACE_CUR("%s: entered. Locking semaphore of %s/%d\n", | ||
1443 | __FUNCTION__, task->comm, task->pid); | ||
1444 | #endif | ||
1445 | |||
1446 | mutex_lock_sfx(sem, | ||
1447 | NULL, 0, | ||
1448 | set_klmirqd_sem_status, to_set); | ||
1449 | |||
1450 | #if 0 | ||
1451 | TRACE_CUR("%s: exiting. Have semaphore of %s/%d\n", | ||
1452 | __FUNCTION__, task->comm, task->pid); | ||
1453 | #endif | ||
1454 | } | ||
1455 | |||
1456 | |||
1457 | void up_and_set_stat(struct task_struct* t, | ||
1458 | enum klmirqd_sem_status to_set, | ||
1459 | struct mutex* sem) | ||
1460 | { | ||
1461 | #if 0 | ||
1462 | struct rt_param* param = container_of(sem, struct rt_param, klmirqd_sem); | ||
1463 | struct task_struct* task = container_of(param, struct task_struct, rt_param); | ||
1464 | |||
1465 | TRACE_CUR("%s: entered. Unlocking semaphore of %s/%d\n", | ||
1466 | __FUNCTION__, | ||
1467 | task->comm, task->pid); | ||
1468 | #endif | ||
1469 | |||
1470 | mutex_unlock_sfx(sem, NULL, 0, | ||
1471 | set_klmirqd_sem_status, to_set); | ||
1472 | |||
1473 | #if 0 | ||
1474 | TRACE_CUR("%s: exiting. Unlocked semaphore of %s/%d\n", | ||
1475 | __FUNCTION__, | ||
1476 | task->comm, task->pid); | ||
1477 | #endif | ||
1478 | } | ||
1479 | |||
1480 | |||
1481 | |||
1482 | void release_klmirqd_lock(struct task_struct* t) | ||
1483 | { | ||
1484 | if(is_realtime(t) && (atomic_read(&tsk_rt(t)->klmirqd_sem_stat) == HELD)) | ||
1485 | { | ||
1486 | struct mutex* sem; | ||
1487 | struct task_struct* owner = t; | ||
1488 | |||
1489 | if(t->state == TASK_RUNNING) | ||
1490 | { | ||
1491 | TRACE_TASK(t, "NOT giving up klmirqd_sem because we're not blocked!\n"); | ||
1492 | return; | ||
1493 | } | ||
1494 | 1128 | ||
1495 | if(likely(!tsk_rt(t)->is_proxy_thread)) | 1129 | if (unlikely(!is_realtime(klmirqd_thread) || |
1496 | { | 1130 | !tsk_rt(klmirqd_thread)->is_interrupt_thread || |
1497 | sem = &tsk_rt(t)->klmirqd_sem; | 1131 | !tsk_rt(klmirqd_thread)->klmirqd_info)) { |
1498 | } | 1132 | TRACE("%s: %s/%d can't handle work items\n", klmirqd_thread->comm, klmirqd_thread->pid); |
1499 | else | 1133 | return ret; |
1500 | { | ||
1501 | unsigned int k_id = klmirqd_id(t); | ||
1502 | owner = klmirqds[k_id].current_owner; | ||
1503 | |||
1504 | BUG_ON(t != klmirqds[k_id].klmirqd); | ||
1505 | |||
1506 | if(likely(owner)) | ||
1507 | { | ||
1508 | sem = &tsk_rt(owner)->klmirqd_sem; | ||
1509 | } | ||
1510 | else | ||
1511 | { | ||
1512 | BUG(); | ||
1513 | |||
1514 | // We had the rug pulled out from under us. Abort attempt | ||
1515 | // to reacquire the lock since our client no longer needs us. | ||
1516 | TRACE_CUR("HUH?! How did this happen?\n"); | ||
1517 | atomic_set(&tsk_rt(t)->klmirqd_sem_stat, NOT_HELD); | ||
1518 | return; | ||
1519 | } | ||
1520 | } | ||
1521 | |||
1522 | //TRACE_CUR("Releasing semaphore of %s/%d...\n", owner->comm, owner->pid); | ||
1523 | up_and_set_stat(t, NEED_TO_REACQUIRE, sem); | ||
1524 | //TRACE_CUR("Semaphore of %s/%d released!\n", owner->comm, owner->pid); | ||
1525 | } | ||
1526 | /* | ||
1527 | else if(is_realtime(t)) | ||
1528 | { | ||
1529 | TRACE_CUR("%s: Nothing to do. Stat = %d\n", __FUNCTION__, tsk_rt(t)->klmirqd_sem_stat); | ||
1530 | } | 1134 | } |
1531 | */ | ||
1532 | } | ||
1533 | 1135 | ||
1534 | int reacquire_klmirqd_lock(struct task_struct* t) | 1136 | info = tsk_rt(klmirqd_thread)->klmirqd_info; |
1535 | { | ||
1536 | int ret = 0; | ||
1537 | |||
1538 | if(is_realtime(t) && (atomic_read(&tsk_rt(t)->klmirqd_sem_stat) == NEED_TO_REACQUIRE)) | ||
1539 | { | ||
1540 | struct mutex* sem; | ||
1541 | struct task_struct* owner = t; | ||
1542 | |||
1543 | if(likely(!tsk_rt(t)->is_proxy_thread)) | ||
1544 | { | ||
1545 | sem = &tsk_rt(t)->klmirqd_sem; | ||
1546 | } | ||
1547 | else | ||
1548 | { | ||
1549 | unsigned int k_id = klmirqd_id(t); | ||
1550 | //struct task_struct* owner = klmirqds[k_id].current_owner; | ||
1551 | owner = klmirqds[k_id].current_owner; | ||
1552 | |||
1553 | BUG_ON(t != klmirqds[k_id].klmirqd); | ||
1554 | 1137 | ||
1555 | if(likely(owner)) | ||
1556 | { | ||
1557 | sem = &tsk_rt(owner)->klmirqd_sem; | ||
1558 | } | ||
1559 | else | ||
1560 | { | ||
1561 | // We had the rug pulled out from under us. Abort attempt | ||
1562 | // to reacquire the lock since our client no longer needs us. | ||
1563 | TRACE_CUR("No longer needs to reacquire klmirqd_sem!\n"); | ||
1564 | atomic_set(&tsk_rt(t)->klmirqd_sem_stat, NOT_HELD); | ||
1565 | return(0); | ||
1566 | } | ||
1567 | } | ||
1568 | 1138 | ||
1569 | //TRACE_CUR("Trying to reacquire semaphore of %s/%d\n", owner->comm, owner->pid); | 1139 | if (likely(!info->terminating)) { |
1570 | __down_and_reset_and_set_stat(t, REACQUIRING, HELD, sem); | 1140 | ___litmus_schedule_work(w, info, 1); |
1571 | //TRACE_CUR("Reacquired semaphore %s/%d\n", owner->comm, owner->pid); | ||
1572 | } | 1141 | } |
1573 | /* | 1142 | else { |
1574 | else if(is_realtime(t)) | 1143 | TRACE("%s: Work rejected because %s/%d is terminating\n", klmirqd_thread->comm, klmirqd_thread->pid); |
1575 | { | 1144 | ret = 0; |
1576 | TRACE_CUR("%s: Nothing to do. Stat = %d\n", __FUNCTION__, tsk_rt(t)->klmirqd_sem_stat); | ||
1577 | } | 1145 | } |
1578 | */ | ||
1579 | 1146 | ||
1580 | return(ret); | 1147 | return(ret); |
1581 | } | 1148 | } |
1149 | EXPORT_SYMBOL(__litmus_schedule_work); | ||
1582 | 1150 | ||
diff --git a/litmus/locking.c b/litmus/locking.c index 22f46df4308a..7af1dd69a079 100644 --- a/litmus/locking.c +++ b/litmus/locking.c | |||
@@ -543,32 +543,54 @@ out: | |||
543 | 543 | ||
544 | void suspend_for_lock(void) | 544 | void suspend_for_lock(void) |
545 | { | 545 | { |
546 | #ifdef CONFIG_REALTIME_AUX_TASKS | 546 | #if defined(CONFIG_REALTIME_AUX_TASKS) || defined(CONFIG_LITMUS_NVIDIA) |
547 | #if 0 | ||
548 | unsigned int restore = 0; | ||
549 | struct task_struct *t = current; | 547 | struct task_struct *t = current; |
550 | unsigned int hide; | ||
551 | |||
552 | if (tsk_rt(t)->has_aux_tasks) { | ||
553 | /* hide from aux tasks so they can't inherit our priority when we block | ||
554 | * for a litmus lock. inheritance is already going to a litmus lock | ||
555 | * holder. */ | ||
556 | hide = tsk_rt(t)->hide_from_aux_tasks; | ||
557 | restore = 1; | ||
558 | tsk_rt(t)->hide_from_aux_tasks = 1; | ||
559 | } | ||
560 | #endif | 548 | #endif |
549 | |||
550 | #ifdef CONFIG_REALTIME_AUX_TASKS | ||
551 | unsigned int aux_restore = 0; | ||
552 | unsigned int aux_hide; | ||
553 | #endif | ||
554 | |||
555 | #ifdef CONFIG_LITMUS_NVIDIA | ||
556 | unsigned int gpu_restore = 0; | ||
557 | unsigned int gpu_hide; | ||
558 | #endif | ||
559 | |||
560 | |||
561 | //#ifdef CONFIG_REALTIME_AUX_TASKS | ||
562 | // if (tsk_rt(t)->has_aux_tasks) { | ||
563 | // /* hide from aux tasks so they can't inherit our priority when we block | ||
564 | // * for a litmus lock. inheritance is already going to a litmus lock | ||
565 | // * holder. */ | ||
566 | // aux_hide = tsk_rt(t)->hide_from_aux_tasks; | ||
567 | // aux_restore = 1; | ||
568 | // tsk_rt(t)->hide_from_aux_tasks = 1; | ||
569 | // } | ||
570 | //#endif | ||
571 | |||
572 | #ifdef CONFIG_LITMUS_NVIDIA | ||
573 | if (tsk_rt(t)->held_gpus) { | ||
574 | gpu_hide = tsk_rt(t)->hide_from_gpu; | ||
575 | gpu_restore = 1; | ||
576 | tsk_rt(t)->hide_from_gpu = 1; | ||
577 | } | ||
561 | #endif | 578 | #endif |
562 | 579 | ||
563 | schedule(); | 580 | schedule(); |
564 | 581 | ||
565 | #ifdef CONFIG_REALTIME_AUX_TASKS | 582 | #ifdef CONFIG_LITMUS_NVIDIA |
566 | #if 0 | 583 | if (gpu_restore) { |
567 | if (restore) { | ||
568 | /* restore our state */ | 584 | /* restore our state */ |
569 | tsk_rt(t)->hide_from_aux_tasks = hide; | 585 | tsk_rt(t)->hide_from_gpu = gpu_hide; |
570 | } | 586 | } |
571 | #endif | 587 | #endif |
588 | |||
589 | #ifdef CONFIG_REALTIME_AUX_TASKS | ||
590 | if (aux_restore) { | ||
591 | /* restore our state */ | ||
592 | tsk_rt(t)->hide_from_aux_tasks = aux_hide; | ||
593 | } | ||
572 | #endif | 594 | #endif |
573 | } | 595 | } |
574 | 596 | ||
diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c index 22586cde8255..b29f4d3f0dac 100644 --- a/litmus/nvidia_info.c +++ b/litmus/nvidia_info.c | |||
@@ -10,6 +10,10 @@ | |||
10 | 10 | ||
11 | #include <litmus/binheap.h> | 11 | #include <litmus/binheap.h> |
12 | 12 | ||
13 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
14 | #include <litmus/litmus_softirq.h> | ||
15 | #endif | ||
16 | |||
13 | typedef unsigned char NvV8; /* "void": enumerated or multiple fields */ | 17 | typedef unsigned char NvV8; /* "void": enumerated or multiple fields */ |
14 | typedef unsigned short NvV16; /* "void": enumerated or multiple fields */ | 18 | typedef unsigned short NvV16; /* "void": enumerated or multiple fields */ |
15 | typedef unsigned char NvU8; /* 0 to 255 */ | 19 | typedef unsigned char NvU8; /* 0 to 255 */ |
@@ -296,9 +300,14 @@ static struct notifier_block nvidia_going = { | |||
296 | }; | 300 | }; |
297 | #endif | 301 | #endif |
298 | 302 | ||
303 | |||
304 | |||
305 | static int init_nv_device_reg(void); | ||
306 | static int shutdown_nv_device_reg(void); | ||
307 | |||
308 | |||
299 | int init_nvidia_info(void) | 309 | int init_nvidia_info(void) |
300 | { | 310 | { |
301 | #if 1 | ||
302 | mutex_lock(&module_mutex); | 311 | mutex_lock(&module_mutex); |
303 | nvidia_mod = find_module("nvidia"); | 312 | nvidia_mod = find_module("nvidia"); |
304 | mutex_unlock(&module_mutex); | 313 | mutex_unlock(&module_mutex); |
@@ -315,13 +324,14 @@ int init_nvidia_info(void) | |||
315 | TRACE("%s : Could not find NVIDIA module! Loaded?\n", __FUNCTION__); | 324 | TRACE("%s : Could not find NVIDIA module! Loaded?\n", __FUNCTION__); |
316 | return(-1); | 325 | return(-1); |
317 | } | 326 | } |
318 | #endif | ||
319 | } | 327 | } |
320 | 328 | ||
321 | void shutdown_nvidia_info(void) | 329 | void shutdown_nvidia_info(void) |
322 | { | 330 | { |
323 | nvidia_mod = NULL; | 331 | nvidia_mod = NULL; |
324 | mb(); | 332 | mb(); |
333 | |||
334 | shutdown_nv_device_reg(); | ||
325 | } | 335 | } |
326 | 336 | ||
327 | /* works with pointers to static data inside the module too. */ | 337 | /* works with pointers to static data inside the module too. */ |
@@ -351,20 +361,6 @@ u32 get_tasklet_nv_device_num(const struct tasklet_struct *t) | |||
351 | BUG_ON(linuxstate->device_num >= NV_DEVICE_NUM); | 361 | BUG_ON(linuxstate->device_num >= NV_DEVICE_NUM); |
352 | 362 | ||
353 | return(linuxstate->device_num); | 363 | return(linuxstate->device_num); |
354 | |||
355 | //int DEVICE_NUM_OFFSET = (void*)(&(linuxstate->device_num)) - (void*)(nvstate); | ||
356 | |||
357 | #if 0 | ||
358 | // offset determined though observed behavior of the NV driver. | ||
359 | //const int DEVICE_NUM_OFFSET = 0x480; // CUDA 4.0 RC1 | ||
360 | //const int DEVICE_NUM_OFFSET = 0x510; // CUDA 4.0 RC2 | ||
361 | |||
362 | void* state = (void*)(t->data); | ||
363 | void* device_num_ptr = state + DEVICE_NUM_OFFSET; | ||
364 | |||
365 | //dump_nvidia_info(t); | ||
366 | return(*((u32*)device_num_ptr)); | ||
367 | #endif | ||
368 | } | 364 | } |
369 | 365 | ||
370 | u32 get_work_nv_device_num(const struct work_struct *t) | 366 | u32 get_work_nv_device_num(const struct work_struct *t) |
@@ -377,203 +373,452 @@ u32 get_work_nv_device_num(const struct work_struct *t) | |||
377 | } | 373 | } |
378 | 374 | ||
379 | 375 | ||
376 | /////////////////////////////////////////////////////////////////////////////// | ||
377 | /////////////////////////////////////////////////////////////////////////////// | ||
378 | /////////////////////////////////////////////////////////////////////////////// | ||
379 | |||
380 | |||
380 | typedef struct { | 381 | typedef struct { |
381 | raw_spinlock_t lock; | 382 | raw_spinlock_t lock; /* not needed if GPU not shared between scheudling domains */ |
382 | int nr_owners; | 383 | struct binheap owners; |
383 | struct task_struct* max_prio_owner; | 384 | |
384 | struct task_struct* owners[NV_MAX_SIMULT_USERS]; | 385 | #ifdef CONFIG_LITMUS_SOFTIRQD |
386 | klmirqd_callback_t callback; | ||
387 | struct task_struct* thread; | ||
388 | int ready:1; /* todo: make threads check for the ready flag */ | ||
389 | #endif | ||
385 | }nv_device_registry_t; | 390 | }nv_device_registry_t; |
386 | 391 | ||
392 | |||
387 | static nv_device_registry_t NV_DEVICE_REG[NV_DEVICE_NUM]; | 393 | static nv_device_registry_t NV_DEVICE_REG[NV_DEVICE_NUM]; |
388 | 394 | ||
389 | int init_nv_device_reg(void) | 395 | |
396 | |||
397 | |||
398 | |||
399 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
400 | static int nvidia_klmirqd_cb(void *arg) | ||
390 | { | 401 | { |
391 | int i; | 402 | unsigned long flags; |
403 | int reg_device_id = (int)(long long)(arg); | ||
404 | nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; | ||
392 | 405 | ||
393 | memset(NV_DEVICE_REG, 0, sizeof(NV_DEVICE_REG)); | 406 | TRACE("nv klmirqd callback for GPU %d\n", reg_device_id); |
394 | 407 | ||
395 | for(i = 0; i < NV_DEVICE_NUM; ++i) | 408 | raw_spin_lock_irqsave(®->lock, flags); |
396 | { | 409 | reg->thread = current; |
397 | raw_spin_lock_init(&NV_DEVICE_REG[i].lock); | 410 | reg->ready = 1; |
398 | } | 411 | raw_spin_unlock_irqrestore(®->lock, flags); |
399 | 412 | ||
400 | return(1); | 413 | return 0; |
401 | } | 414 | } |
415 | #endif | ||
402 | 416 | ||
403 | /* use to get nv_device_id by given owner. | 417 | |
404 | (if return -1, can't get the assocaite device id)*/ | 418 | static int gpu_owner_max_priority_order(struct binheap_node *a, |
405 | /* | 419 | struct binheap_node *b) |
406 | int get_nv_device_id(struct task_struct* owner) | ||
407 | { | 420 | { |
408 | int i; | 421 | struct task_struct *d_a = container_of(binheap_entry(a, struct rt_param, gpu_owner_node), |
409 | if(!owner) | 422 | struct task_struct, rt_param); |
410 | { | 423 | struct task_struct *d_b = container_of(binheap_entry(b, struct rt_param, gpu_owner_node), |
411 | return(-1); | 424 | struct task_struct, rt_param); |
412 | } | 425 | |
413 | for(i = 0; i < NV_DEVICE_NUM; ++i) | 426 | BUG_ON(!d_a); |
414 | { | 427 | BUG_ON(!d_b); |
415 | if(NV_DEVICE_REG[i].device_owner == owner) | 428 | |
416 | return(i); | 429 | return litmus->compare(d_a, d_b); |
417 | } | ||
418 | return(-1); | ||
419 | } | 430 | } |
420 | */ | ||
421 | 431 | ||
422 | static struct task_struct* find_hp_owner(nv_device_registry_t *reg, struct task_struct *skip) { | 432 | static int init_nv_device_reg(void) |
433 | { | ||
423 | int i; | 434 | int i; |
424 | struct task_struct *found = NULL; | 435 | |
425 | for(i = 0; i < reg->nr_owners; ++i) { | 436 | #ifdef CONFIG_LITMUS_SOFTIRQD |
426 | if(reg->owners[i] && reg->owners[i] != skip && litmus->compare(reg->owners[i], found)) { | 437 | if (!klmirqd_is_ready()) { |
427 | found = reg->owners[i]; | 438 | TRACE("klmirqd is not ready!\n"); |
439 | return 0; | ||
440 | } | ||
441 | #endif | ||
442 | |||
443 | memset(NV_DEVICE_REG, 0, sizeof(NV_DEVICE_REG)); | ||
444 | mb(); | ||
445 | |||
446 | |||
447 | for(i = 0; i < NV_DEVICE_NUM; ++i) { | ||
448 | raw_spin_lock_init(&NV_DEVICE_REG[i].lock); | ||
449 | INIT_BINHEAP_HANDLE(&NV_DEVICE_REG[i].owners, gpu_owner_max_priority_order); | ||
450 | |||
451 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
452 | // TODO: Make thread spawning this a litmus plugin call. | ||
453 | NV_DEVICE_REG[i].callback.func = nvidia_klmirqd_cb; | ||
454 | NV_DEVICE_REG[i].callback.arg = (void*)(long long)(i); | ||
455 | mb(); | ||
456 | |||
457 | if(launch_klmirqd_thread(0, &NV_DEVICE_REG[i].callback) != 0) { | ||
458 | TRACE("Failed to create klmirqd thread for GPU %d\n", i); | ||
428 | } | 459 | } |
460 | #endif | ||
429 | } | 461 | } |
430 | return found; | 462 | |
463 | return(1); | ||
431 | } | 464 | } |
432 | 465 | ||
433 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | 466 | |
434 | void pai_check_priority_increase(struct task_struct *t, int reg_device_id) | 467 | /* The following code is full of nasty race conditions... */ |
468 | /* spawning of klimirqd threads can race with init_nv_device_reg()!!!! */ | ||
469 | static int shutdown_nv_device_reg(void) | ||
435 | { | 470 | { |
436 | unsigned long flags; | 471 | TRACE("Shutting down nv device registration.\n"); |
437 | nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; | 472 | |
473 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
474 | { | ||
475 | int i; | ||
476 | nv_device_registry_t *reg; | ||
438 | 477 | ||
439 | if(reg->max_prio_owner != t) { | 478 | for (i = 0; i < NV_DEVICE_NUM; ++i) { |
440 | 479 | ||
441 | raw_spin_lock_irqsave(®->lock, flags); | 480 | TRACE("Shutting down GPU %d.\n", i); |
442 | 481 | ||
443 | if(reg->max_prio_owner != t) { | 482 | reg = &NV_DEVICE_REG[i]; |
444 | if(litmus->compare(t, reg->max_prio_owner)) { | 483 | |
445 | litmus->change_prio_pai_tasklet(reg->max_prio_owner, t); | 484 | if (reg->thread && reg->ready) { |
446 | reg->max_prio_owner = t; | 485 | kill_klmirqd_thread(reg->thread); |
486 | |||
487 | /* assume that all goes according to plan... */ | ||
488 | reg->thread = NULL; | ||
489 | reg->ready = 0; | ||
447 | } | 490 | } |
448 | } | ||
449 | 491 | ||
450 | raw_spin_unlock_irqrestore(®->lock, flags); | 492 | while (!binheap_empty(®->owners)) { |
493 | binheap_delete_root(®->owners, struct rt_param, gpu_owner_node); | ||
494 | } | ||
495 | } | ||
451 | } | 496 | } |
497 | #endif | ||
498 | |||
499 | return(1); | ||
452 | } | 500 | } |
453 | 501 | ||
454 | 502 | ||
455 | void pai_check_priority_decrease(struct task_struct *t, int reg_device_id) | 503 | /* use to get the owner of nv_device_id. */ |
504 | struct task_struct* get_nv_max_device_owner(u32 target_device_id) | ||
456 | { | 505 | { |
457 | unsigned long flags; | 506 | struct task_struct *owner = NULL; |
458 | nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; | 507 | nv_device_registry_t *reg; |
459 | 508 | ||
460 | if(reg->max_prio_owner == t) { | 509 | BUG_ON(target_device_id >= NV_DEVICE_NUM); |
461 | 510 | ||
462 | raw_spin_lock_irqsave(®->lock, flags); | 511 | reg = &NV_DEVICE_REG[target_device_id]; |
463 | 512 | ||
464 | if(reg->max_prio_owner == t) { | 513 | if (!binheap_empty(®->owners)) { |
465 | reg->max_prio_owner = find_hp_owner(reg, NULL); | 514 | struct task_struct *hp = container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), |
466 | if(reg->max_prio_owner != t) { | 515 | struct task_struct, rt_param); |
467 | litmus->change_prio_pai_tasklet(t, reg->max_prio_owner); | 516 | TRACE_CUR("hp: %s/%d\n", hp->comm, hp->pid); |
468 | } | 517 | } |
469 | } | ||
470 | 518 | ||
471 | raw_spin_unlock_irqrestore(®->lock, flags); | 519 | return(owner); |
520 | } | ||
521 | |||
522 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
523 | struct task_struct* get_nv_klmirqd_thread(u32 target_device_id) | ||
524 | { | ||
525 | struct task_struct *klmirqd = NULL; | ||
526 | nv_device_registry_t *reg; | ||
527 | |||
528 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
529 | |||
530 | reg = &NV_DEVICE_REG[target_device_id]; | ||
531 | |||
532 | if(likely(reg->ready)) { | ||
533 | klmirqd = reg->thread; | ||
472 | } | 534 | } |
535 | |||
536 | return klmirqd; | ||
473 | } | 537 | } |
474 | #endif | 538 | #endif |
475 | 539 | ||
476 | static int __reg_nv_device(int reg_device_id, struct task_struct *t) | 540 | |
541 | |||
542 | |||
543 | |||
544 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
545 | static int gpu_klmirqd_increase_priority(struct task_struct *klmirqd, struct task_struct *hp) | ||
477 | { | 546 | { |
478 | int ret = 0; | 547 | int retval = 0; |
479 | int i; | ||
480 | struct task_struct *old_max = NULL; | ||
481 | unsigned long flags; | ||
482 | nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; | ||
483 | 548 | ||
484 | if(test_bit(reg_device_id, &tsk_rt(t)->held_gpus)) { | 549 | TRACE_CUR("Increasing priority of nv klmirqd: %s/%d.\n", klmirqd->comm, klmirqd->pid); |
485 | // TODO: check if taks is already registered. | ||
486 | return ret; // assume already registered. | ||
487 | } | ||
488 | 550 | ||
551 | /* the klmirqd thread should never attempt to hold a litmus-level real-time | ||
552 | * so nested support is not required */ | ||
553 | retval = litmus->__increase_prio(klmirqd, hp); | ||
489 | 554 | ||
490 | raw_spin_lock_irqsave(®->lock, flags); | 555 | return retval; |
556 | } | ||
557 | |||
558 | static int gpu_klmirqd_decrease_priority(struct task_struct *klmirqd, struct task_struct *hp) | ||
559 | { | ||
560 | int retval = 0; | ||
491 | 561 | ||
492 | if(reg->nr_owners < NV_MAX_SIMULT_USERS) { | 562 | TRACE_CUR("Decreasing priority of nv klmirqd: %s/%d.\n", klmirqd->comm, klmirqd->pid); |
493 | TRACE_TASK(t, "registers GPU %d\n", reg_device_id); | ||
494 | for(i = 0; i < NV_MAX_SIMULT_USERS; ++i) { | ||
495 | if(reg->owners[i] == NULL) { | ||
496 | reg->owners[i] = t; | ||
497 | 563 | ||
498 | //if(edf_higher_prio(t, reg->max_prio_owner)) { | 564 | /* the klmirqd thread should never attempt to hold a litmus-level real-time |
499 | if(litmus->compare(t, reg->max_prio_owner)) { | 565 | * so nested support is not required */ |
500 | old_max = reg->max_prio_owner; | 566 | retval = litmus->__decrease_prio(klmirqd, hp); |
501 | reg->max_prio_owner = t; | ||
502 | 567 | ||
503 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | 568 | return retval; |
504 | litmus->change_prio_pai_tasklet(old_max, t); | 569 | } |
505 | #endif | 570 | #endif |
506 | } | 571 | |
572 | |||
573 | |||
574 | |||
575 | /* call when an aux_owner becomes real-time */ | ||
576 | long enable_gpu_owner(struct task_struct *t) | ||
577 | { | ||
578 | long retval = 0; | ||
579 | // unsigned long flags; | ||
580 | int gpu; | ||
581 | nv_device_registry_t *reg; | ||
507 | 582 | ||
508 | #ifdef CONFIG_LITMUS_SOFTIRQD | 583 | #ifdef CONFIG_LITMUS_SOFTIRQD |
509 | down_and_set_stat(t, HELD, &tsk_rt(t)->klmirqd_sem); | 584 | struct task_struct *hp; |
510 | #endif | 585 | #endif |
511 | ++(reg->nr_owners); | ||
512 | 586 | ||
513 | break; | 587 | if (!tsk_rt(t)->held_gpus) { |
514 | } | 588 | TRACE_CUR("task %s/%d does not hold any GPUs\n", t->comm, t->pid); |
515 | } | 589 | return -1; |
516 | } | 590 | } |
517 | else | 591 | |
518 | { | 592 | BUG_ON(!is_realtime(t)); |
519 | TRACE_CUR("%s: device %d is already in use!\n", __FUNCTION__, reg_device_id); | 593 | |
520 | //ret = -EBUSY; | 594 | gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); |
595 | |||
596 | if (binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) { | ||
597 | TRACE_CUR("task %s/%d is already active on GPU %d\n", t->comm, t->pid, gpu); | ||
598 | goto out; | ||
521 | } | 599 | } |
522 | 600 | ||
523 | raw_spin_unlock_irqrestore(®->lock, flags); | 601 | /* update the registration (and maybe klmirqd) */ |
602 | reg = &NV_DEVICE_REG[gpu]; | ||
524 | 603 | ||
525 | __set_bit(reg_device_id, &tsk_rt(t)->held_gpus); | 604 | // raw_spin_lock_irqsave(®->lock, flags); |
526 | 605 | ||
527 | return(ret); | 606 | binheap_add(&tsk_rt(t)->gpu_owner_node, ®->owners, |
607 | struct rt_param, gpu_owner_node); | ||
608 | |||
609 | |||
610 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
611 | hp = container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), | ||
612 | struct task_struct, rt_param); | ||
613 | |||
614 | if (hp == t) { | ||
615 | /* we're the new hp */ | ||
616 | TRACE_CUR("%s/%d is new hp on GPU %d.\n", t->comm, t->pid, gpu); | ||
617 | |||
618 | retval = gpu_klmirqd_increase_priority(reg->thread, (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp); | ||
619 | } | ||
620 | #endif | ||
621 | |||
622 | // raw_spin_unlock_irqsave(®->lock, flags); | ||
623 | |||
624 | out: | ||
625 | return retval; | ||
528 | } | 626 | } |
529 | 627 | ||
530 | static int __clear_reg_nv_device(int de_reg_device_id, struct task_struct *t) | 628 | /* call when an aux_owner exits real-time */ |
629 | long disable_gpu_owner(struct task_struct *t) | ||
531 | { | 630 | { |
532 | int ret = 0; | 631 | long retval = 0; |
533 | int i; | 632 | // unsigned long flags; |
534 | unsigned long flags; | 633 | int gpu; |
535 | nv_device_registry_t *reg = &NV_DEVICE_REG[de_reg_device_id]; | 634 | nv_device_registry_t *reg; |
536 | 635 | ||
537 | #ifdef CONFIG_LITMUS_SOFTIRQD | 636 | #ifdef CONFIG_LITMUS_SOFTIRQD |
538 | struct task_struct* klmirqd_th = get_klmirqd(de_reg_device_id); | 637 | struct task_struct *hp; |
638 | struct task_struct *new_hp = NULL; | ||
539 | #endif | 639 | #endif |
540 | 640 | ||
541 | if(!test_bit(de_reg_device_id, &tsk_rt(t)->held_gpus)) { | 641 | if (!tsk_rt(t)->held_gpus) { |
542 | return ret; | 642 | TRACE_CUR("task %s/%d does not hold any GPUs\n", t->comm, t->pid); |
643 | return -1; | ||
543 | } | 644 | } |
544 | 645 | ||
545 | raw_spin_lock_irqsave(®->lock, flags); | 646 | BUG_ON(!is_realtime(t)); |
546 | 647 | ||
547 | TRACE_TASK(t, "unregisters GPU %d\n", de_reg_device_id); | 648 | gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); |
649 | |||
650 | if (!binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) { | ||
651 | TRACE_CUR("task %s/%d is not active on GPU %d\n", t->comm, t->pid, gpu); | ||
652 | goto out; | ||
653 | } | ||
654 | |||
655 | TRACE_CUR("task %s/%d exiting from GPU %d.\n", t->comm, t->pid, gpu); | ||
656 | |||
657 | |||
658 | reg = &NV_DEVICE_REG[gpu]; | ||
659 | |||
660 | // raw_spin_lock_irqsave(®->lock, flags); | ||
548 | 661 | ||
549 | for(i = 0; i < NV_MAX_SIMULT_USERS; ++i) { | ||
550 | if(reg->owners[i] == t) { | ||
551 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
552 | flush_pending(klmirqd_th, t); | ||
553 | #endif | ||
554 | if(reg->max_prio_owner == t) { | ||
555 | reg->max_prio_owner = find_hp_owner(reg, t); | ||
556 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
557 | litmus->change_prio_pai_tasklet(t, reg->max_prio_owner); | ||
558 | #endif | ||
559 | } | ||
560 | 662 | ||
561 | #ifdef CONFIG_LITMUS_SOFTIRQD | 663 | #ifdef CONFIG_LITMUS_SOFTIRQD |
562 | up_and_set_stat(t, NOT_HELD, &tsk_rt(t)->klmirqd_sem); | 664 | hp = container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), |
665 | struct task_struct, rt_param); | ||
666 | |||
667 | binheap_delete(&tsk_rt(t)->gpu_owner_node, ®->owners); | ||
668 | |||
669 | |||
670 | if (!binheap_empty(®->owners)) { | ||
671 | new_hp = container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), | ||
672 | struct task_struct, rt_param); | ||
673 | } | ||
674 | |||
675 | if (hp == t && new_hp != t) { | ||
676 | struct task_struct *to_inh = NULL; | ||
677 | |||
678 | TRACE_CUR("%s/%d is no longer hp on GPU %d.\n", t->comm, t->pid, gpu); | ||
679 | |||
680 | if (new_hp) { | ||
681 | to_inh = (tsk_rt(new_hp)->inh_task) ? tsk_rt(new_hp)->inh_task : new_hp; | ||
682 | } | ||
683 | |||
684 | retval = gpu_klmirqd_decrease_priority(reg->thread, to_inh); | ||
685 | } | ||
686 | #else | ||
687 | binheap_delete(&tsk_rt(t)->gpu_owner_node, ®->owners); | ||
563 | #endif | 688 | #endif |
564 | 689 | ||
565 | reg->owners[i] = NULL; | 690 | // raw_spin_unlock_irqsave(®->lock, flags); |
566 | --(reg->nr_owners); | 691 | |
692 | |||
693 | out: | ||
694 | return retval; | ||
695 | } | ||
696 | |||
697 | |||
698 | |||
699 | |||
700 | |||
701 | |||
702 | |||
703 | |||
704 | |||
705 | |||
706 | int gpu_owner_increase_priority(struct task_struct *t) | ||
707 | { | ||
708 | int retval = 0; | ||
709 | int gpu; | ||
710 | nv_device_registry_t *reg; | ||
711 | |||
712 | struct task_struct *hp = NULL; | ||
713 | struct task_struct *hp_eff = NULL; | ||
714 | |||
715 | BUG_ON(!is_realtime(t)); | ||
716 | BUG_ON(!tsk_rt(t)->held_gpus); | ||
717 | |||
718 | gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); | ||
719 | |||
720 | if (!binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) { | ||
721 | WARN_ON(!is_running(t)); | ||
722 | TRACE_CUR("gpu klmirqd may not inherit from %s/%d on GPU %d\n", | ||
723 | t->comm, t->pid, gpu); | ||
724 | goto out; | ||
725 | } | ||
726 | |||
727 | |||
728 | |||
729 | |||
730 | TRACE_CUR("task %s/%d on GPU %d increasing priority.\n", t->comm, t->pid, gpu); | ||
731 | reg = &NV_DEVICE_REG[gpu]; | ||
732 | |||
733 | hp = container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), | ||
734 | struct task_struct, rt_param); | ||
735 | hp_eff = effective_priority(hp); | ||
736 | |||
737 | if (hp != t) { /* our position in the heap may have changed. hp is already at the root. */ | ||
738 | binheap_decrease(&tsk_rt(t)->gpu_owner_node, ®->owners); | ||
739 | } | ||
740 | |||
741 | hp = container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), | ||
742 | struct task_struct, rt_param); | ||
743 | |||
744 | if (effective_priority(hp) != hp_eff) { /* the eff. prio. of hp has changed */ | ||
745 | hp_eff = effective_priority(hp); | ||
746 | TRACE_CUR("%s/%d is new hp on GPU %d.\n", t->comm, t->pid, gpu); | ||
747 | |||
748 | retval = gpu_klmirqd_increase_priority(reg->thread, hp_eff); | ||
749 | } | ||
750 | |||
751 | out: | ||
752 | return retval; | ||
753 | } | ||
754 | |||
755 | |||
756 | int gpu_owner_decrease_priority(struct task_struct *t) | ||
757 | { | ||
758 | int retval = 0; | ||
759 | int gpu; | ||
760 | nv_device_registry_t *reg; | ||
761 | |||
762 | struct task_struct *hp = NULL; | ||
763 | struct task_struct *hp_eff = NULL; | ||
567 | 764 | ||
568 | break; | 765 | BUG_ON(!is_realtime(t)); |
766 | BUG_ON(!tsk_rt(t)->held_gpus); | ||
767 | |||
768 | gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); | ||
769 | |||
770 | if (!binheap_is_in_heap(&tsk_rt(t)->aux_task_owner_node)) { | ||
771 | WARN_ON(!is_running(t)); | ||
772 | TRACE_CUR("aux tasks may not inherit from %s/%d on GPU %d\n", | ||
773 | t->comm, t->pid, gpu); | ||
774 | goto out; | ||
775 | } | ||
776 | |||
777 | TRACE_CUR("task %s/%d on GPU %d decresing priority.\n", t->comm, t->pid, gpu); | ||
778 | reg = &NV_DEVICE_REG[gpu]; | ||
779 | |||
780 | hp = container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), | ||
781 | struct task_struct, rt_param); | ||
782 | hp_eff = effective_priority(hp); | ||
783 | binheap_delete(&tsk_rt(t)->gpu_owner_node, ®->owners); | ||
784 | binheap_add(&tsk_rt(t)->gpu_owner_node, ®->owners, | ||
785 | struct rt_param, gpu_owner_node); | ||
786 | |||
787 | if (hp == t) { /* t was originally the hp */ | ||
788 | struct task_struct *new_hp = | ||
789 | container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), | ||
790 | struct task_struct, rt_param); | ||
791 | if (effective_priority(new_hp) != hp_eff) { /* eff prio. of hp has changed */ | ||
792 | hp_eff = effective_priority(new_hp); | ||
793 | TRACE_CUR("%s/%d is no longer hp on GPU %d.\n", t->comm, t->pid, gpu); | ||
794 | retval = gpu_klmirqd_decrease_priority(reg->thread, hp_eff); | ||
569 | } | 795 | } |
570 | } | 796 | } |
571 | 797 | ||
572 | raw_spin_unlock_irqrestore(®->lock, flags); | 798 | out: |
799 | return retval; | ||
800 | } | ||
801 | |||
802 | |||
803 | |||
804 | |||
805 | |||
806 | |||
807 | |||
808 | |||
809 | |||
810 | static int __reg_nv_device(int reg_device_id, struct task_struct *t) | ||
811 | { | ||
812 | __set_bit(reg_device_id, &tsk_rt(t)->held_gpus); | ||
813 | |||
814 | return(0); | ||
815 | } | ||
573 | 816 | ||
817 | static int __clear_reg_nv_device(int de_reg_device_id, struct task_struct *t) | ||
818 | { | ||
574 | __clear_bit(de_reg_device_id, &tsk_rt(t)->held_gpus); | 819 | __clear_bit(de_reg_device_id, &tsk_rt(t)->held_gpus); |
575 | 820 | ||
576 | return(ret); | 821 | return(0); |
577 | } | 822 | } |
578 | 823 | ||
579 | 824 | ||
@@ -596,55 +841,213 @@ int reg_nv_device(int reg_device_id, int reg_action, struct task_struct *t) | |||
596 | return(ret); | 841 | return(ret); |
597 | } | 842 | } |
598 | 843 | ||
599 | /* use to get the owner of nv_device_id. */ | ||
600 | struct task_struct* get_nv_max_device_owner(u32 target_device_id) | ||
601 | { | ||
602 | struct task_struct *owner = NULL; | ||
603 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
604 | owner = NV_DEVICE_REG[target_device_id].max_prio_owner; | ||
605 | return(owner); | ||
606 | } | ||
607 | 844 | ||
608 | void lock_nv_registry(u32 target_device_id, unsigned long* flags) | ||
609 | { | ||
610 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
611 | 845 | ||
612 | if(in_interrupt()) | ||
613 | TRACE("Locking registry for %d.\n", target_device_id); | ||
614 | else | ||
615 | TRACE_CUR("Locking registry for %d.\n", target_device_id); | ||
616 | 846 | ||
617 | raw_spin_lock_irqsave(&NV_DEVICE_REG[target_device_id].lock, *flags); | ||
618 | } | ||
619 | 847 | ||
620 | void unlock_nv_registry(u32 target_device_id, unsigned long* flags) | ||
621 | { | ||
622 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
623 | 848 | ||
624 | if(in_interrupt()) | ||
625 | TRACE("Unlocking registry for %d.\n", target_device_id); | ||
626 | else | ||
627 | TRACE_CUR("Unlocking registry for %d.\n", target_device_id); | ||
628 | 849 | ||
629 | raw_spin_unlock_irqrestore(&NV_DEVICE_REG[target_device_id].lock, *flags); | ||
630 | } | ||
631 | 850 | ||
632 | 851 | ||
633 | //void increment_nv_int_count(u32 device) | 852 | |
853 | |||
854 | |||
855 | |||
856 | |||
857 | |||
858 | |||
859 | |||
860 | |||
861 | |||
862 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
863 | //void pai_check_priority_increase(struct task_struct *t, int reg_device_id) | ||
864 | //{ | ||
865 | // unsigned long flags; | ||
866 | // nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; | ||
867 | // | ||
868 | // | ||
869 | // | ||
870 | // if(reg->max_prio_owner != t) { | ||
871 | // | ||
872 | // raw_spin_lock_irqsave(®->lock, flags); | ||
873 | // | ||
874 | // if(reg->max_prio_owner != t) { | ||
875 | // if(litmus->compare(t, reg->max_prio_owner)) { | ||
876 | // litmus->change_prio_pai_tasklet(reg->max_prio_owner, t); | ||
877 | // reg->max_prio_owner = t; | ||
878 | // } | ||
879 | // } | ||
880 | // | ||
881 | // raw_spin_unlock_irqrestore(®->lock, flags); | ||
882 | // } | ||
883 | //} | ||
884 | // | ||
885 | // | ||
886 | //void pai_check_priority_decrease(struct task_struct *t, int reg_device_id) | ||
634 | //{ | 887 | //{ |
635 | // unsigned long flags; | 888 | // unsigned long flags; |
636 | // struct task_struct* owner; | 889 | // nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; |
637 | // | 890 | // |
638 | // lock_nv_registry(device, &flags); | 891 | // if(reg->max_prio_owner == t) { |
639 | // | 892 | // |
640 | // owner = NV_DEVICE_REG[device].device_owner; | 893 | // raw_spin_lock_irqsave(®->lock, flags); |
641 | // if(owner) | 894 | // |
895 | // if(reg->max_prio_owner == t) { | ||
896 | // reg->max_prio_owner = find_hp_owner(reg, NULL); | ||
897 | // if(reg->max_prio_owner != t) { | ||
898 | // litmus->change_prio_pai_tasklet(t, reg->max_prio_owner); | ||
899 | // } | ||
900 | // } | ||
901 | // | ||
902 | // raw_spin_unlock_irqrestore(®->lock, flags); | ||
903 | // } | ||
904 | //} | ||
905 | #endif | ||
906 | |||
907 | |||
908 | |||
909 | |||
910 | |||
911 | //static int __reg_nv_device(int reg_device_id, struct task_struct *t) | ||
912 | //{ | ||
913 | // int ret = 0; | ||
914 | // int i; | ||
915 | // struct task_struct *old_max = NULL; | ||
916 | // | ||
917 | // | ||
918 | // raw_spin_lock_irqsave(®->lock, flags); | ||
919 | // | ||
920 | // if(reg->nr_owners < NV_MAX_SIMULT_USERS) { | ||
921 | // TRACE_TASK(t, "registers GPU %d\n", reg_device_id); | ||
922 | // for(i = 0; i < NV_MAX_SIMULT_USERS; ++i) { | ||
923 | // if(reg->owners[i] == NULL) { | ||
924 | // reg->owners[i] = t; | ||
925 | // | ||
926 | // //if(edf_higher_prio(t, reg->max_prio_owner)) { | ||
927 | // if(litmus->compare(t, reg->max_prio_owner)) { | ||
928 | // old_max = reg->max_prio_owner; | ||
929 | // reg->max_prio_owner = t; | ||
930 | // | ||
931 | //#ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
932 | // litmus->change_prio_pai_tasklet(old_max, t); | ||
933 | //#endif | ||
934 | // } | ||
935 | // | ||
936 | //#ifdef CONFIG_LITMUS_SOFTIRQD | ||
937 | // down_and_set_stat(t, HELD, &tsk_rt(t)->klmirqd_sem); | ||
938 | //#endif | ||
939 | // ++(reg->nr_owners); | ||
940 | // | ||
941 | // break; | ||
942 | // } | ||
943 | // } | ||
944 | // } | ||
945 | // else | ||
642 | // { | 946 | // { |
643 | // atomic_inc(&tsk_rt(owner)->nv_int_count); | 947 | // TRACE_CUR("%s: device %d is already in use!\n", __FUNCTION__, reg_device_id); |
948 | // //ret = -EBUSY; | ||
644 | // } | 949 | // } |
645 | // | 950 | // |
646 | // unlock_nv_registry(device, &flags); | 951 | // raw_spin_unlock_irqrestore(®->lock, flags); |
952 | // | ||
953 | // __set_bit(reg_device_id, &tsk_rt(t)->held_gpus); | ||
954 | // | ||
955 | // return(ret); | ||
956 | //} | ||
957 | // | ||
958 | //static int __clear_reg_nv_device(int de_reg_device_id, struct task_struct *t) | ||
959 | //{ | ||
960 | // int ret = 0; | ||
961 | // int i; | ||
962 | // unsigned long flags; | ||
963 | // nv_device_registry_t *reg = &NV_DEVICE_REG[de_reg_device_id]; | ||
964 | // | ||
965 | //#ifdef CONFIG_LITMUS_SOFTIRQD | ||
966 | // struct task_struct* klmirqd_th = get_klmirqd(de_reg_device_id); | ||
967 | //#endif | ||
968 | // | ||
969 | // if(!test_bit(de_reg_device_id, &tsk_rt(t)->held_gpus)) { | ||
970 | // return ret; | ||
971 | // } | ||
972 | // | ||
973 | // raw_spin_lock_irqsave(®->lock, flags); | ||
974 | // | ||
975 | // TRACE_TASK(t, "unregisters GPU %d\n", de_reg_device_id); | ||
976 | // | ||
977 | // for(i = 0; i < NV_MAX_SIMULT_USERS; ++i) { | ||
978 | // if(reg->owners[i] == t) { | ||
979 | //#ifdef CONFIG_LITMUS_SOFTIRQD | ||
980 | // flush_pending(klmirqd_th, t); | ||
981 | //#endif | ||
982 | // if(reg->max_prio_owner == t) { | ||
983 | // reg->max_prio_owner = find_hp_owner(reg, t); | ||
984 | //#ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
985 | // litmus->change_prio_pai_tasklet(t, reg->max_prio_owner); | ||
986 | //#endif | ||
987 | // } | ||
988 | // | ||
989 | //#ifdef CONFIG_LITMUS_SOFTIRQD | ||
990 | // up_and_set_stat(t, NOT_HELD, &tsk_rt(t)->klmirqd_sem); | ||
991 | //#endif | ||
992 | // | ||
993 | // reg->owners[i] = NULL; | ||
994 | // --(reg->nr_owners); | ||
995 | // | ||
996 | // break; | ||
997 | // } | ||
998 | // } | ||
999 | // | ||
1000 | // raw_spin_unlock_irqrestore(®->lock, flags); | ||
1001 | // | ||
1002 | // __clear_bit(de_reg_device_id, &tsk_rt(t)->held_gpus); | ||
1003 | // | ||
1004 | // return(ret); | ||
1005 | //} | ||
1006 | // | ||
1007 | // | ||
1008 | //int reg_nv_device(int reg_device_id, int reg_action, struct task_struct *t) | ||
1009 | //{ | ||
1010 | // int ret; | ||
1011 | // | ||
1012 | // if((reg_device_id < NV_DEVICE_NUM) && (reg_device_id >= 0)) | ||
1013 | // { | ||
1014 | // if(reg_action) | ||
1015 | // ret = __reg_nv_device(reg_device_id, t); | ||
1016 | // else | ||
1017 | // ret = __clear_reg_nv_device(reg_device_id, t); | ||
1018 | // } | ||
1019 | // else | ||
1020 | // { | ||
1021 | // ret = -ENODEV; | ||
1022 | // } | ||
1023 | // | ||
1024 | // return(ret); | ||
1025 | //} | ||
1026 | |||
1027 | |||
1028 | |||
1029 | //void lock_nv_registry(u32 target_device_id, unsigned long* flags) | ||
1030 | //{ | ||
1031 | // BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
1032 | // | ||
1033 | // if(in_interrupt()) | ||
1034 | // TRACE("Locking registry for %d.\n", target_device_id); | ||
1035 | // else | ||
1036 | // TRACE_CUR("Locking registry for %d.\n", target_device_id); | ||
1037 | // | ||
1038 | // raw_spin_lock_irqsave(&NV_DEVICE_REG[target_device_id].lock, *flags); | ||
1039 | //} | ||
1040 | // | ||
1041 | //void unlock_nv_registry(u32 target_device_id, unsigned long* flags) | ||
1042 | //{ | ||
1043 | // BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
1044 | // | ||
1045 | // if(in_interrupt()) | ||
1046 | // TRACE("Unlocking registry for %d.\n", target_device_id); | ||
1047 | // else | ||
1048 | // TRACE_CUR("Unlocking registry for %d.\n", target_device_id); | ||
1049 | // | ||
1050 | // raw_spin_unlock_irqrestore(&NV_DEVICE_REG[target_device_id].lock, *flags); | ||
647 | //} | 1051 | //} |
648 | //EXPORT_SYMBOL(increment_nv_int_count); | ||
649 | 1052 | ||
650 | 1053 | ||
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c index 44c8336c5061..84aafca78cde 100644 --- a/litmus/sched_cedf.c +++ b/litmus/sched_cedf.c | |||
@@ -44,6 +44,7 @@ | |||
44 | 44 | ||
45 | #include <litmus/bheap.h> | 45 | #include <litmus/bheap.h> |
46 | #include <litmus/binheap.h> | 46 | #include <litmus/binheap.h> |
47 | #include <litmus/trace.h> | ||
47 | 48 | ||
48 | #ifdef CONFIG_LITMUS_LOCKING | 49 | #ifdef CONFIG_LITMUS_LOCKING |
49 | #include <litmus/kfmlp_lock.h> | 50 | #include <litmus/kfmlp_lock.h> |
@@ -75,7 +76,6 @@ | |||
75 | 76 | ||
76 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | 77 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD |
77 | #include <linux/interrupt.h> | 78 | #include <linux/interrupt.h> |
78 | #include <litmus/trace.h> | ||
79 | #endif | 79 | #endif |
80 | 80 | ||
81 | #ifdef CONFIG_LITMUS_NVIDIA | 81 | #ifdef CONFIG_LITMUS_NVIDIA |
@@ -118,14 +118,6 @@ DEFINE_PER_CPU(cpu_entry_t, cedf_cpu_entries); | |||
118 | #define test_will_schedule(cpu) \ | 118 | #define test_will_schedule(cpu) \ |
119 | (atomic_read(&per_cpu(cedf_cpu_entries, cpu).will_schedule)) | 119 | (atomic_read(&per_cpu(cedf_cpu_entries, cpu).will_schedule)) |
120 | 120 | ||
121 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
122 | struct tasklet_head | ||
123 | { | ||
124 | struct tasklet_struct *head; | ||
125 | struct tasklet_struct **tail; | ||
126 | }; | ||
127 | #endif | ||
128 | |||
129 | /* | 121 | /* |
130 | * In C-EDF there is a cedf domain _per_ cluster | 122 | * In C-EDF there is a cedf domain _per_ cluster |
131 | * The number of clusters is dynamically determined accordingly to the | 123 | * The number of clusters is dynamically determined accordingly to the |
@@ -1038,6 +1030,13 @@ static void cedf_task_wake_up(struct task_struct *task) | |||
1038 | } | 1030 | } |
1039 | #endif | 1031 | #endif |
1040 | 1032 | ||
1033 | #ifdef CONFIG_LITMUS_NVIDIA | ||
1034 | if (tsk_rt(task)->held_gpus && !tsk_rt(task)->hide_from_gpu) { | ||
1035 | TRACE_CUR("%s/%d is ready so gpu klmirqd tasks may not inherit.\n", task->comm, task->pid); | ||
1036 | disable_gpu_owner(task); | ||
1037 | } | ||
1038 | #endif | ||
1039 | |||
1041 | cedf_job_arrival(task); | 1040 | cedf_job_arrival(task); |
1042 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | 1041 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); |
1043 | } | 1042 | } |
@@ -1064,6 +1063,14 @@ static void cedf_task_block(struct task_struct *t) | |||
1064 | } | 1063 | } |
1065 | #endif | 1064 | #endif |
1066 | 1065 | ||
1066 | #ifdef CONFIG_LITMUS_NVIDIA | ||
1067 | if (tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu) { | ||
1068 | |||
1069 | TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid); | ||
1070 | enable_gpu_owner(t); | ||
1071 | } | ||
1072 | #endif | ||
1073 | |||
1067 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); | 1074 | raw_spin_unlock_irqrestore(&cluster->cluster_lock, flags); |
1068 | 1075 | ||
1069 | BUG_ON(!is_realtime(t)); | 1076 | BUG_ON(!is_realtime(t)); |
@@ -1092,6 +1099,13 @@ static void cedf_task_exit(struct task_struct * t) | |||
1092 | } | 1099 | } |
1093 | #endif | 1100 | #endif |
1094 | 1101 | ||
1102 | #ifdef CONFIG_LITMUS_NVIDIA | ||
1103 | /* make sure we clean up on our way out */ | ||
1104 | if(tsk_rt(t)->held_gpus) { | ||
1105 | disable_gpu_owner(t); | ||
1106 | } | ||
1107 | #endif | ||
1108 | |||
1095 | unlink(t); | 1109 | unlink(t); |
1096 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | 1110 | if (tsk_rt(t)->scheduled_on != NO_CPU) { |
1097 | cpu_entry_t *cpu; | 1111 | cpu_entry_t *cpu; |
@@ -1208,6 +1222,13 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1208 | aux_task_owner_increase_priority(t); | 1222 | aux_task_owner_increase_priority(t); |
1209 | } | 1223 | } |
1210 | #endif | 1224 | #endif |
1225 | |||
1226 | #ifdef CONFIG_LITMUS_NVIDIA | ||
1227 | /* propagate to gpu klmirqd */ | ||
1228 | if (tsk_rt(t)->held_gpus) { | ||
1229 | gpu_owner_increase_priority(t); | ||
1230 | } | ||
1231 | #endif | ||
1211 | } | 1232 | } |
1212 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1233 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1213 | } | 1234 | } |
@@ -1237,16 +1258,6 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
1237 | 1258 | ||
1238 | __increase_priority_inheritance(t, prio_inh); | 1259 | __increase_priority_inheritance(t, prio_inh); |
1239 | 1260 | ||
1240 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1241 | if(tsk_rt(t)->cur_klmirqd != NULL) | ||
1242 | { | ||
1243 | TRACE_TASK(t, "%s/%d inherits a new priority!\n", | ||
1244 | tsk_rt(t)->cur_klmirqd->comm, tsk_rt(t)->cur_klmirqd->pid); | ||
1245 | |||
1246 | __increase_priority_inheritance(tsk_rt(t)->cur_klmirqd, prio_inh); | ||
1247 | } | ||
1248 | #endif | ||
1249 | |||
1250 | raw_spin_unlock(&cluster->cluster_lock); | 1261 | raw_spin_unlock(&cluster->cluster_lock); |
1251 | 1262 | ||
1252 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) | 1263 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) |
@@ -1320,6 +1331,13 @@ static int __decrease_priority_inheritance(struct task_struct* t, | |||
1320 | } | 1331 | } |
1321 | #endif | 1332 | #endif |
1322 | 1333 | ||
1334 | #ifdef CONFIG_LITMUS_NVIDIA | ||
1335 | /* propagate to gpu */ | ||
1336 | if (tsk_rt(t)->held_gpus) { | ||
1337 | gpu_owner_decrease_priority(t); | ||
1338 | } | ||
1339 | #endif | ||
1340 | |||
1323 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1341 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1324 | } | 1342 | } |
1325 | else { | 1343 | else { |
@@ -1346,16 +1364,6 @@ static void decrease_priority_inheritance(struct task_struct* t, | |||
1346 | raw_spin_lock(&cluster->cluster_lock); | 1364 | raw_spin_lock(&cluster->cluster_lock); |
1347 | __decrease_priority_inheritance(t, prio_inh); | 1365 | __decrease_priority_inheritance(t, prio_inh); |
1348 | 1366 | ||
1349 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1350 | if(tsk_rt(t)->cur_klmirqd != NULL) | ||
1351 | { | ||
1352 | TRACE_TASK(t, "%s/%d decreases in priority!\n", | ||
1353 | tsk_rt(t)->cur_klmirqd->comm, tsk_rt(t)->cur_klmirqd->pid); | ||
1354 | |||
1355 | __decrease_priority_inheritance(tsk_rt(t)->cur_klmirqd, prio_inh); | ||
1356 | } | ||
1357 | #endif | ||
1358 | |||
1359 | raw_spin_unlock(&cluster->cluster_lock); | 1367 | raw_spin_unlock(&cluster->cluster_lock); |
1360 | 1368 | ||
1361 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) | 1369 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) |
@@ -1371,73 +1379,6 @@ static void decrease_priority_inheritance(struct task_struct* t, | |||
1371 | } | 1379 | } |
1372 | 1380 | ||
1373 | 1381 | ||
1374 | |||
1375 | |||
1376 | |||
1377 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1378 | /* called with IRQs off */ | ||
1379 | static void increase_priority_inheritance_klmirqd(struct task_struct* klmirqd, | ||
1380 | struct task_struct* old_owner, | ||
1381 | struct task_struct* new_owner) | ||
1382 | { | ||
1383 | cedf_domain_t* cluster = task_cpu_cluster(klmirqd); | ||
1384 | |||
1385 | BUG_ON(!(tsk_rt(klmirqd)->is_proxy_thread)); | ||
1386 | |||
1387 | raw_spin_lock(&cluster->cluster_lock); | ||
1388 | |||
1389 | if(old_owner != new_owner) | ||
1390 | { | ||
1391 | if(old_owner) | ||
1392 | { | ||
1393 | // unreachable? | ||
1394 | tsk_rt(old_owner)->cur_klmirqd = NULL; | ||
1395 | } | ||
1396 | |||
1397 | TRACE_TASK(klmirqd, "giving ownership to %s/%d.\n", | ||
1398 | new_owner->comm, new_owner->pid); | ||
1399 | |||
1400 | tsk_rt(new_owner)->cur_klmirqd = klmirqd; | ||
1401 | } | ||
1402 | |||
1403 | __decrease_priority_inheritance(klmirqd, NULL); // kludge to clear out cur prio. | ||
1404 | |||
1405 | __increase_priority_inheritance(klmirqd, | ||
1406 | (tsk_rt(new_owner)->inh_task == NULL) ? | ||
1407 | new_owner : | ||
1408 | tsk_rt(new_owner)->inh_task); | ||
1409 | |||
1410 | raw_spin_unlock(&cluster->cluster_lock); | ||
1411 | } | ||
1412 | |||
1413 | |||
1414 | /* called with IRQs off */ | ||
1415 | static void decrease_priority_inheritance_klmirqd(struct task_struct* klmirqd, | ||
1416 | struct task_struct* old_owner, | ||
1417 | struct task_struct* new_owner) | ||
1418 | { | ||
1419 | cedf_domain_t* cluster = task_cpu_cluster(klmirqd); | ||
1420 | |||
1421 | BUG_ON(!(tsk_rt(klmirqd)->is_proxy_thread)); | ||
1422 | |||
1423 | raw_spin_lock(&cluster->cluster_lock); | ||
1424 | |||
1425 | TRACE_TASK(klmirqd, "priority restored\n"); | ||
1426 | |||
1427 | __decrease_priority_inheritance(klmirqd, new_owner); | ||
1428 | |||
1429 | tsk_rt(old_owner)->cur_klmirqd = NULL; | ||
1430 | |||
1431 | raw_spin_unlock(&cluster->cluster_lock); | ||
1432 | } | ||
1433 | #endif // CONFIG_LITMUS_SOFTIRQD | ||
1434 | |||
1435 | |||
1436 | |||
1437 | |||
1438 | |||
1439 | |||
1440 | |||
1441 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1382 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1442 | 1383 | ||
1443 | /* called with IRQs off */ | 1384 | /* called with IRQs off */ |
@@ -1836,33 +1777,7 @@ static long cedf_activate_plugin(void) | |||
1836 | } | 1777 | } |
1837 | 1778 | ||
1838 | #ifdef CONFIG_LITMUS_SOFTIRQD | 1779 | #ifdef CONFIG_LITMUS_SOFTIRQD |
1839 | { | 1780 | init_klmirqd(); |
1840 | /* distribute the daemons evenly across the clusters. */ | ||
1841 | int* affinity = kmalloc(NR_LITMUS_SOFTIRQD * sizeof(int), GFP_ATOMIC); | ||
1842 | int num_daemons_per_cluster = NR_LITMUS_SOFTIRQD / num_clusters; | ||
1843 | int left_over = NR_LITMUS_SOFTIRQD % num_clusters; | ||
1844 | |||
1845 | int daemon = 0; | ||
1846 | for(i = 0; i < num_clusters; ++i) | ||
1847 | { | ||
1848 | int num_on_this_cluster = num_daemons_per_cluster; | ||
1849 | if(left_over) | ||
1850 | { | ||
1851 | ++num_on_this_cluster; | ||
1852 | --left_over; | ||
1853 | } | ||
1854 | |||
1855 | for(j = 0; j < num_on_this_cluster; ++j) | ||
1856 | { | ||
1857 | // first CPU of this cluster | ||
1858 | affinity[daemon++] = i*cluster_size; | ||
1859 | } | ||
1860 | } | ||
1861 | |||
1862 | spawn_klmirqd(affinity); | ||
1863 | |||
1864 | kfree(affinity); | ||
1865 | } | ||
1866 | #endif | 1781 | #endif |
1867 | 1782 | ||
1868 | #ifdef CONFIG_LITMUS_NVIDIA | 1783 | #ifdef CONFIG_LITMUS_NVIDIA |
@@ -1906,10 +1821,6 @@ static struct sched_plugin cedf_plugin __cacheline_aligned_in_smp = { | |||
1906 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | 1821 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING |
1907 | .allocate_aff_obs = cedf_allocate_affinity_observer, | 1822 | .allocate_aff_obs = cedf_allocate_affinity_observer, |
1908 | #endif | 1823 | #endif |
1909 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1910 | .increase_prio_klmirqd = increase_priority_inheritance_klmirqd, | ||
1911 | .decrease_prio_klmirqd = decrease_priority_inheritance_klmirqd, | ||
1912 | #endif | ||
1913 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | 1824 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD |
1914 | .enqueue_pai_tasklet = cedf_enqueue_pai_tasklet, | 1825 | .enqueue_pai_tasklet = cedf_enqueue_pai_tasklet, |
1915 | .change_prio_pai_tasklet = cedf_change_prio_pai_tasklet, | 1826 | .change_prio_pai_tasklet = cedf_change_prio_pai_tasklet, |
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c index d52be9325044..f27c104ea027 100644 --- a/litmus/sched_gsn_edf.c +++ b/litmus/sched_gsn_edf.c | |||
@@ -26,6 +26,7 @@ | |||
26 | 26 | ||
27 | #include <litmus/bheap.h> | 27 | #include <litmus/bheap.h> |
28 | #include <litmus/binheap.h> | 28 | #include <litmus/binheap.h> |
29 | #include <litmus/trace.h> | ||
29 | 30 | ||
30 | #ifdef CONFIG_LITMUS_LOCKING | 31 | #ifdef CONFIG_LITMUS_LOCKING |
31 | #include <litmus/kfmlp_lock.h> | 32 | #include <litmus/kfmlp_lock.h> |
@@ -50,7 +51,6 @@ | |||
50 | 51 | ||
51 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | 52 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD |
52 | #include <linux/interrupt.h> | 53 | #include <linux/interrupt.h> |
53 | #include <litmus/trace.h> | ||
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #ifdef CONFIG_LITMUS_NVIDIA | 56 | #ifdef CONFIG_LITMUS_NVIDIA |
@@ -156,12 +156,6 @@ static raw_spinlock_t* gsnedf_get_dgl_spinlock(struct task_struct *t) | |||
156 | #endif | 156 | #endif |
157 | 157 | ||
158 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | 158 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD |
159 | struct tasklet_head | ||
160 | { | ||
161 | struct tasklet_struct *head; | ||
162 | struct tasklet_struct **tail; | ||
163 | }; | ||
164 | |||
165 | struct tasklet_head gsnedf_pending_tasklets; | 159 | struct tasklet_head gsnedf_pending_tasklets; |
166 | #endif | 160 | #endif |
167 | 161 | ||
@@ -938,13 +932,6 @@ static struct task_struct* gsnedf_schedule(struct task_struct * prev) | |||
938 | 932 | ||
939 | sched_state_task_picked(); | 933 | sched_state_task_picked(); |
940 | 934 | ||
941 | #if 0 | ||
942 | if (next && is_realtime(next) && tsk_rt(next)->is_aux_task && !tsk_rt(next)->inh_task) { | ||
943 | TRACE_TASK(next, "is aux with no inheritance. preventing it from actually running.\n"); | ||
944 | next = NULL; | ||
945 | } | ||
946 | #endif | ||
947 | |||
948 | raw_spin_unlock(&gsnedf_lock); | 935 | raw_spin_unlock(&gsnedf_lock); |
949 | 936 | ||
950 | #ifdef WANT_ALL_SCHED_EVENTS | 937 | #ifdef WANT_ALL_SCHED_EVENTS |
@@ -1056,6 +1043,13 @@ static void gsnedf_task_wake_up(struct task_struct *task) | |||
1056 | } | 1043 | } |
1057 | #endif | 1044 | #endif |
1058 | 1045 | ||
1046 | #ifdef CONFIG_LITMUS_NVIDIA | ||
1047 | if (tsk_rt(task)->held_gpus && !tsk_rt(task)->hide_from_gpu) { | ||
1048 | TRACE_CUR("%s/%d is ready so gpu klmirqd tasks may not inherit.\n", task->comm, task->pid); | ||
1049 | disable_gpu_owner(task); | ||
1050 | } | ||
1051 | #endif | ||
1052 | |||
1059 | gsnedf_job_arrival(task); | 1053 | gsnedf_job_arrival(task); |
1060 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 1054 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
1061 | } | 1055 | } |
@@ -1079,6 +1073,14 @@ static void gsnedf_task_block(struct task_struct *t) | |||
1079 | } | 1073 | } |
1080 | #endif | 1074 | #endif |
1081 | 1075 | ||
1076 | #ifdef CONFIG_LITMUS_NVIDIA | ||
1077 | if (tsk_rt(t)->held_gpus && !tsk_rt(t)->hide_from_gpu) { | ||
1078 | |||
1079 | TRACE_CUR("%s/%d is blocked so aux tasks may inherit.\n", t->comm, t->pid); | ||
1080 | enable_gpu_owner(t); | ||
1081 | } | ||
1082 | #endif | ||
1083 | |||
1082 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); | 1084 | raw_spin_unlock_irqrestore(&gsnedf_lock, flags); |
1083 | 1085 | ||
1084 | BUG_ON(!is_realtime(t)); | 1086 | BUG_ON(!is_realtime(t)); |
@@ -1106,6 +1108,13 @@ static void gsnedf_task_exit(struct task_struct * t) | |||
1106 | } | 1108 | } |
1107 | #endif | 1109 | #endif |
1108 | 1110 | ||
1111 | #ifdef CONFIG_LITMUS_NVIDIA | ||
1112 | /* make sure we clean up on our way out */ | ||
1113 | if(tsk_rt(t)->held_gpus) { | ||
1114 | disable_gpu_owner(t); | ||
1115 | } | ||
1116 | #endif | ||
1117 | |||
1109 | unlink(t); | 1118 | unlink(t); |
1110 | if (tsk_rt(t)->scheduled_on != NO_CPU) { | 1119 | if (tsk_rt(t)->scheduled_on != NO_CPU) { |
1111 | gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; | 1120 | gsnedf_cpus[tsk_rt(t)->scheduled_on]->scheduled = NULL; |
@@ -1154,7 +1163,6 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1154 | 1163 | ||
1155 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1164 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1156 | /* this sanity check allows for weaker locking in protocols */ | 1165 | /* this sanity check allows for weaker locking in protocols */ |
1157 | /* TODO (klmirqd): Skip this check if 't' is a proxy thread (???) */ | ||
1158 | if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) { | 1166 | if(__edf_higher_prio(prio_inh, BASE, t, EFFECTIVE)) { |
1159 | #endif | 1167 | #endif |
1160 | TRACE_TASK(t, "inherits priority from %s/%d\n", | 1168 | TRACE_TASK(t, "inherits priority from %s/%d\n", |
@@ -1218,6 +1226,14 @@ static int __increase_priority_inheritance(struct task_struct* t, | |||
1218 | aux_task_owner_increase_priority(t); | 1226 | aux_task_owner_increase_priority(t); |
1219 | } | 1227 | } |
1220 | #endif | 1228 | #endif |
1229 | |||
1230 | #ifdef CONFIG_LITMUS_NVIDIA | ||
1231 | /* propagate to gpu klmirqd */ | ||
1232 | if (tsk_rt(t)->held_gpus) { | ||
1233 | gpu_owner_increase_priority(t); | ||
1234 | } | ||
1235 | #endif | ||
1236 | |||
1221 | } | 1237 | } |
1222 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1238 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1223 | } | 1239 | } |
@@ -1247,16 +1263,6 @@ static void increase_priority_inheritance(struct task_struct* t, struct task_str | |||
1247 | 1263 | ||
1248 | success = __increase_priority_inheritance(t, prio_inh); | 1264 | success = __increase_priority_inheritance(t, prio_inh); |
1249 | 1265 | ||
1250 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1251 | if(tsk_rt(t)->cur_klmirqd != NULL) | ||
1252 | { | ||
1253 | TRACE_TASK(t, "%s/%d inherits a new priority!\n", | ||
1254 | tsk_rt(t)->cur_klmirqd->comm, tsk_rt(t)->cur_klmirqd->pid); | ||
1255 | |||
1256 | __increase_priority_inheritance(tsk_rt(t)->cur_klmirqd, prio_inh); | ||
1257 | } | ||
1258 | #endif | ||
1259 | |||
1260 | raw_spin_unlock(&gsnedf_lock); | 1266 | raw_spin_unlock(&gsnedf_lock); |
1261 | 1267 | ||
1262 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) | 1268 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) |
@@ -1330,6 +1336,14 @@ static int __decrease_priority_inheritance(struct task_struct* t, | |||
1330 | } | 1336 | } |
1331 | #endif | 1337 | #endif |
1332 | 1338 | ||
1339 | #ifdef CONFIG_LITMUS_NVIDIA | ||
1340 | /* propagate to gpu */ | ||
1341 | if (tsk_rt(t)->held_gpus) { | ||
1342 | gpu_owner_decrease_priority(t); | ||
1343 | } | ||
1344 | #endif | ||
1345 | |||
1346 | |||
1333 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1347 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1334 | } | 1348 | } |
1335 | else { | 1349 | else { |
@@ -1357,16 +1371,6 @@ static void decrease_priority_inheritance(struct task_struct* t, | |||
1357 | 1371 | ||
1358 | success = __decrease_priority_inheritance(t, prio_inh); | 1372 | success = __decrease_priority_inheritance(t, prio_inh); |
1359 | 1373 | ||
1360 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1361 | if(tsk_rt(t)->cur_klmirqd != NULL) | ||
1362 | { | ||
1363 | TRACE_TASK(t, "%s/%d decreases in priority!\n", | ||
1364 | tsk_rt(t)->cur_klmirqd->comm, tsk_rt(t)->cur_klmirqd->pid); | ||
1365 | |||
1366 | __decrease_priority_inheritance(tsk_rt(t)->cur_klmirqd, prio_inh); | ||
1367 | } | ||
1368 | #endif | ||
1369 | |||
1370 | raw_spin_unlock(&gsnedf_lock); | 1374 | raw_spin_unlock(&gsnedf_lock); |
1371 | 1375 | ||
1372 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) | 1376 | #if defined(CONFIG_LITMUS_PAI_SOFTIRQD) && defined(CONFIG_LITMUS_NVIDIA) |
@@ -1382,62 +1386,6 @@ static void decrease_priority_inheritance(struct task_struct* t, | |||
1382 | } | 1386 | } |
1383 | 1387 | ||
1384 | 1388 | ||
1385 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1386 | /* called with IRQs off */ | ||
1387 | static void increase_priority_inheritance_klmirqd(struct task_struct* klmirqd, | ||
1388 | struct task_struct* old_owner, | ||
1389 | struct task_struct* new_owner) | ||
1390 | { | ||
1391 | BUG_ON(!(tsk_rt(klmirqd)->is_proxy_thread)); | ||
1392 | |||
1393 | raw_spin_lock(&gsnedf_lock); | ||
1394 | |||
1395 | if(old_owner != new_owner) | ||
1396 | { | ||
1397 | if(old_owner) | ||
1398 | { | ||
1399 | // unreachable? | ||
1400 | tsk_rt(old_owner)->cur_klmirqd = NULL; | ||
1401 | } | ||
1402 | |||
1403 | TRACE_TASK(klmirqd, "giving ownership to %s/%d.\n", | ||
1404 | new_owner->comm, new_owner->pid); | ||
1405 | |||
1406 | tsk_rt(new_owner)->cur_klmirqd = klmirqd; | ||
1407 | } | ||
1408 | |||
1409 | __decrease_priority_inheritance(klmirqd, NULL); // kludge to clear out cur prio. | ||
1410 | |||
1411 | __increase_priority_inheritance(klmirqd, | ||
1412 | (tsk_rt(new_owner)->inh_task == NULL) ? | ||
1413 | new_owner : | ||
1414 | tsk_rt(new_owner)->inh_task); | ||
1415 | |||
1416 | raw_spin_unlock(&gsnedf_lock); | ||
1417 | } | ||
1418 | |||
1419 | |||
1420 | /* called with IRQs off */ | ||
1421 | static void decrease_priority_inheritance_klmirqd(struct task_struct* klmirqd, | ||
1422 | struct task_struct* old_owner, | ||
1423 | struct task_struct* new_owner) | ||
1424 | { | ||
1425 | BUG_ON(!(tsk_rt(klmirqd)->is_proxy_thread)); | ||
1426 | |||
1427 | raw_spin_lock(&gsnedf_lock); | ||
1428 | |||
1429 | TRACE_TASK(klmirqd, "priority restored\n"); | ||
1430 | |||
1431 | __decrease_priority_inheritance(klmirqd, new_owner); | ||
1432 | |||
1433 | tsk_rt(old_owner)->cur_klmirqd = NULL; | ||
1434 | |||
1435 | raw_spin_unlock(&gsnedf_lock); | ||
1436 | } | ||
1437 | #endif | ||
1438 | |||
1439 | |||
1440 | |||
1441 | 1389 | ||
1442 | #ifdef CONFIG_LITMUS_NESTED_LOCKING | 1390 | #ifdef CONFIG_LITMUS_NESTED_LOCKING |
1443 | 1391 | ||
@@ -1923,7 +1871,7 @@ static long gsnedf_activate_plugin(void) | |||
1923 | #endif | 1871 | #endif |
1924 | 1872 | ||
1925 | #ifdef CONFIG_LITMUS_SOFTIRQD | 1873 | #ifdef CONFIG_LITMUS_SOFTIRQD |
1926 | spawn_klmirqd(NULL); | 1874 | init_klmirqd(); |
1927 | #endif | 1875 | #endif |
1928 | 1876 | ||
1929 | #ifdef CONFIG_LITMUS_NVIDIA | 1877 | #ifdef CONFIG_LITMUS_NVIDIA |
@@ -1965,10 +1913,6 @@ static struct sched_plugin gsn_edf_plugin __cacheline_aligned_in_smp = { | |||
1965 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING | 1913 | #ifdef CONFIG_LITMUS_AFFINITY_LOCKING |
1966 | .allocate_aff_obs = gsnedf_allocate_affinity_observer, | 1914 | .allocate_aff_obs = gsnedf_allocate_affinity_observer, |
1967 | #endif | 1915 | #endif |
1968 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
1969 | .increase_prio_klmirqd = increase_priority_inheritance_klmirqd, | ||
1970 | .decrease_prio_klmirqd = decrease_priority_inheritance_klmirqd, | ||
1971 | #endif | ||
1972 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | 1916 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD |
1973 | .enqueue_pai_tasklet = gsnedf_enqueue_pai_tasklet, | 1917 | .enqueue_pai_tasklet = gsnedf_enqueue_pai_tasklet, |
1974 | .change_prio_pai_tasklet = gsnedf_change_prio_pai_tasklet, | 1918 | .change_prio_pai_tasklet = gsnedf_change_prio_pai_tasklet, |
diff --git a/litmus/sched_plugin.c b/litmus/sched_plugin.c index cda67e0f6bc8..30c216fd6fdc 100644 --- a/litmus/sched_plugin.c +++ b/litmus/sched_plugin.c | |||
@@ -167,18 +167,6 @@ static int litmus_dummy___decrease_prio(struct task_struct* t, struct task_struc | |||
167 | } | 167 | } |
168 | #endif | 168 | #endif |
169 | 169 | ||
170 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
171 | static void litmus_dummy_increase_prio_klmirqd(struct task_struct* klmirqd, | ||
172 | struct task_struct* old_owner, | ||
173 | struct task_struct* new_owner) | ||
174 | { | ||
175 | } | ||
176 | |||
177 | static void litmus_dummy_decrease_prio_klmirqd(struct task_struct* klmirqd, | ||
178 | struct task_struct* old_owner) | ||
179 | { | ||
180 | } | ||
181 | #endif | ||
182 | 170 | ||
183 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | 171 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD |
184 | static int litmus_dummy_enqueue_pai_tasklet(struct tasklet_struct* t) | 172 | static int litmus_dummy_enqueue_pai_tasklet(struct tasklet_struct* t) |
@@ -263,10 +251,6 @@ struct sched_plugin linux_sched_plugin = { | |||
263 | .nested_decrease_prio = litmus_dummy_nested_decrease_prio, | 251 | .nested_decrease_prio = litmus_dummy_nested_decrease_prio, |
264 | .__compare = litmus_dummy___compare, | 252 | .__compare = litmus_dummy___compare, |
265 | #endif | 253 | #endif |
266 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
267 | .increase_prio_klmirqd = litmus_dummy_increase_prio_klmirqd, | ||
268 | .decrease_prio_klmirqd = litmus_dummy_decrease_prio_klmirqd, | ||
269 | #endif | ||
270 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | 254 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD |
271 | .enqueue_pai_tasklet = litmus_dummy_enqueue_pai_tasklet, | 255 | .enqueue_pai_tasklet = litmus_dummy_enqueue_pai_tasklet, |
272 | .change_prio_pai_tasklet = litmus_dummy_change_prio_pai_tasklet, | 256 | .change_prio_pai_tasklet = litmus_dummy_change_prio_pai_tasklet, |
@@ -327,10 +311,6 @@ int register_sched_plugin(struct sched_plugin* plugin) | |||
327 | CHECK(nested_decrease_prio); | 311 | CHECK(nested_decrease_prio); |
328 | CHECK(__compare); | 312 | CHECK(__compare); |
329 | #endif | 313 | #endif |
330 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
331 | CHECK(increase_prio_klmirqd); | ||
332 | CHECK(decrease_prio_klmirqd); | ||
333 | #endif | ||
334 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | 314 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD |
335 | CHECK(enqueue_pai_tasklet); | 315 | CHECK(enqueue_pai_tasklet); |
336 | CHECK(change_prio_pai_tasklet); | 316 | CHECK(change_prio_pai_tasklet); |