diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-12-11 22:01:01 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-12-12 14:14:41 -0500 |
commit | c8483ef0959672310bf4ebb72e1a308b00543f74 (patch) | |
tree | 9cb306009b01c5226178f69172738026431d37f2 /litmus/litmus_softirq.c | |
parent | fbd9574e298157b54c38f82f536e5cea8f766dff (diff) |
make klmirqd work like aux tasks. checkpoint.
this code is untested!
Diffstat (limited to 'litmus/litmus_softirq.c')
-rw-r--r-- | litmus/litmus_softirq.c | 1460 |
1 files changed, 514 insertions, 946 deletions
diff --git a/litmus/litmus_softirq.c b/litmus/litmus_softirq.c index 73a3053e662b..44e2d38ad982 100644 --- a/litmus/litmus_softirq.c +++ b/litmus/litmus_softirq.c | |||
@@ -18,10 +18,6 @@ | |||
18 | 18 | ||
19 | /* TODO: Remove unneeded mb() and other barriers. */ | 19 | /* TODO: Remove unneeded mb() and other barriers. */ |
20 | 20 | ||
21 | |||
22 | /* counts number of daemons ready to handle litmus irqs. */ | ||
23 | static atomic_t num_ready_klmirqds = ATOMIC_INIT(0); | ||
24 | |||
25 | enum pending_flags | 21 | enum pending_flags |
26 | { | 22 | { |
27 | LIT_TASKLET_LOW = 0x1, | 23 | LIT_TASKLET_LOW = 0x1, |
@@ -29,35 +25,313 @@ enum pending_flags | |||
29 | LIT_WORK = LIT_TASKLET_HI<<1 | 25 | LIT_WORK = LIT_TASKLET_HI<<1 |
30 | }; | 26 | }; |
31 | 27 | ||
32 | /* only support tasklet processing for now. */ | 28 | struct klmirqd_registration |
33 | struct tasklet_head | ||
34 | { | 29 | { |
35 | struct tasklet_struct *head; | 30 | raw_spinlock_t lock; |
36 | struct tasklet_struct **tail; | 31 | u32 nr_threads; |
32 | unsigned int initialized:1; | ||
33 | unsigned int shuttingdown:1; | ||
34 | struct list_head threads; | ||
37 | }; | 35 | }; |
38 | 36 | ||
39 | struct klmirqd_info | 37 | static atomic_t klmirqd_id_gen = ATOMIC_INIT(0); |
38 | |||
39 | static struct klmirqd_registration klmirqd_state; | ||
40 | |||
41 | |||
42 | |||
43 | void init_klmirqd(void) | ||
44 | { | ||
45 | raw_spin_lock_init(&klmirqd_state.lock); | ||
46 | |||
47 | klmirqd_state.nr_threads = 0; | ||
48 | klmirqd_state.initialized = 1; | ||
49 | klmirqd_state.shuttingdown = 0; | ||
50 | INIT_LIST_HEAD(&klmirqd_state.threads); | ||
51 | } | ||
52 | |||
53 | static int __klmirqd_is_ready(void) | ||
54 | { | ||
55 | return (klmirqd_state.initialized == 1 && klmirqd_state.shuttingdown == 0); | ||
56 | } | ||
57 | |||
58 | int klmirqd_is_ready(void) | ||
59 | { | ||
60 | unsigned long flags; | ||
61 | int ret; | ||
62 | |||
63 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | ||
64 | ret = __klmirqd_is_ready(); | ||
65 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
66 | |||
67 | return ret; | ||
68 | } | ||
69 | |||
70 | int klmirqd_is_dead(void) | ||
71 | { | ||
72 | return(!klmirqd_is_ready()); | ||
73 | } | ||
74 | |||
75 | |||
76 | void kill_klmirqd(void) | ||
77 | { | ||
78 | if(!klmirqd_is_dead()) | ||
79 | { | ||
80 | unsigned long flags; | ||
81 | struct list_head *pos; | ||
82 | |||
83 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | ||
84 | |||
85 | TRACE("%s: Killing all klmirqd threads! (%d of them)\n", __FUNCTION__, klmirqd_state.nr_threads); | ||
86 | |||
87 | klmirqd_state.shuttingdown = 1; | ||
88 | |||
89 | list_for_each(pos, &klmirqd_state.threads) { | ||
90 | struct klmirqd_info* info = list_entry(pos, struct klmirqd_info, klmirqd_reg); | ||
91 | |||
92 | if(info->terminating != 1) | ||
93 | { | ||
94 | info->terminating = 1; | ||
95 | mb(); /* just to be sure? */ | ||
96 | flush_pending(info->klmirqd); | ||
97 | |||
98 | /* signal termination */ | ||
99 | kthread_stop(info->klmirqd); | ||
100 | } | ||
101 | } | ||
102 | |||
103 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
104 | } | ||
105 | } | ||
106 | |||
107 | |||
108 | |||
109 | void kill_klmirqd_thread(struct task_struct* klmirqd_thread) | ||
40 | { | 110 | { |
41 | struct task_struct* klmirqd; | 111 | unsigned long flags; |
42 | struct task_struct* current_owner; | 112 | struct klmirqd_info* info; |
43 | int terminating; | ||
44 | 113 | ||
114 | if (!tsk_rt(klmirqd_thread)->is_interrupt_thread) { | ||
115 | TRACE("%s/%d is not a klmirqd thread\n", klmirqd_thread->comm, klmirqd_thread->pid); | ||
116 | return; | ||
117 | } | ||
118 | |||
119 | TRACE("%s: Killing klmirqd thread %s/%d\n", __FUNCTION__, klmirqd_thread->comm, klmirqd_thread->pid); | ||
45 | 120 | ||
46 | raw_spinlock_t lock; | 121 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); |
47 | 122 | ||
48 | u32 pending; | 123 | info = tsk_rt(klmirqd_thread)->klmirqd_info; |
49 | atomic_t num_hi_pending; | 124 | |
50 | atomic_t num_low_pending; | 125 | if(info->terminating != 1) { |
51 | atomic_t num_work_pending; | 126 | info->terminating = 1; |
127 | mb(); | ||
128 | |||
129 | flush_pending(klmirqd_thread); | ||
130 | kthread_stop(klmirqd_thread); | ||
131 | } | ||
132 | |||
133 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
134 | } | ||
52 | 135 | ||
53 | /* in order of priority */ | 136 | |
54 | struct tasklet_head pending_tasklets_hi; | 137 | |
55 | struct tasklet_head pending_tasklets; | 138 | struct klmirqd_launch_data |
56 | struct list_head worklist; | 139 | { |
140 | int cpu_affinity; | ||
141 | klmirqd_callback_t* cb; | ||
142 | struct work_struct work; | ||
57 | }; | 143 | }; |
58 | 144 | ||
59 | /* one list for each klmirqd */ | 145 | static int run_klmirqd(void* callback); |
60 | static struct klmirqd_info klmirqds[NR_LITMUS_SOFTIRQD]; | 146 | |
147 | |||
148 | /* executed by a kworker from workqueues */ | ||
149 | static void __launch_klmirqd_thread(struct work_struct *work) | ||
150 | { | ||
151 | int id; | ||
152 | struct task_struct* thread = NULL; | ||
153 | struct klmirqd_launch_data* launch_data = | ||
154 | container_of(work, struct klmirqd_launch_data, work); | ||
155 | |||
156 | TRACE("%s: Creating klmirqd thread\n", __FUNCTION__); | ||
157 | |||
158 | id = atomic_inc_return(&klmirqd_id_gen); | ||
159 | |||
160 | if (launch_data->cpu_affinity != -1) { | ||
161 | thread = kthread_create( | ||
162 | run_klmirqd, | ||
163 | /* treat the affinity as a pointer, we'll cast it back later */ | ||
164 | (void*)launch_data->cb, | ||
165 | "klmirqd_th%d/%d", | ||
166 | id, | ||
167 | launch_data->cpu_affinity); | ||
168 | |||
169 | /* litmus will put is in the right cluster. */ | ||
170 | kthread_bind(thread, launch_data->cpu_affinity); | ||
171 | |||
172 | TRACE("%s: Launching klmirqd_th%d/%d\n", __FUNCTION__, id, launch_data->cpu_affinity); | ||
173 | } | ||
174 | else { | ||
175 | thread = kthread_create( | ||
176 | run_klmirqd, | ||
177 | /* treat the affinity as a pointer, we'll cast it back later */ | ||
178 | (void*)launch_data->cb, | ||
179 | "klmirqd_th%d", | ||
180 | id); | ||
181 | |||
182 | TRACE("%s: Launching klmirqd_th%d\n", __FUNCTION__, id); | ||
183 | } | ||
184 | |||
185 | if (thread) { | ||
186 | wake_up_process(thread); | ||
187 | } | ||
188 | else { | ||
189 | TRACE("Could not create klmirqd/%d thread!\n", id); | ||
190 | } | ||
191 | |||
192 | kfree(launch_data); | ||
193 | } | ||
194 | |||
195 | |||
196 | int launch_klmirqd_thread(int cpu, klmirqd_callback_t* cb) | ||
197 | { | ||
198 | struct klmirqd_launch_data* delayed_launch; | ||
199 | |||
200 | if (!klmirqd_is_ready()) { | ||
201 | TRACE("klmirqd is not ready. Check that it was initialized!\n"); | ||
202 | return -1; | ||
203 | } | ||
204 | |||
205 | /* tell a work queue to launch the threads. we can't make scheduling | ||
206 | calls since we're in an atomic state. */ | ||
207 | delayed_launch = kmalloc(sizeof(struct klmirqd_launch_data), GFP_ATOMIC); | ||
208 | delayed_launch->cpu_affinity = cpu; | ||
209 | delayed_launch->cb = cb; | ||
210 | INIT_WORK(&delayed_launch->work, __launch_klmirqd_thread); | ||
211 | schedule_work(&delayed_launch->work); | ||
212 | |||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | |||
217 | |||
218 | |||
219 | #define KLMIRQD_SLICE_NR_JIFFIES 1 | ||
220 | #define KLMIRQD_SLICE_NS ((NSEC_PER_SEC / HZ) * KLMIRQD_SLICE_NR_JIFFIES) | ||
221 | |||
222 | static int set_litmus_daemon_sched(struct task_struct* tsk) | ||
223 | { | ||
224 | int ret = 0; | ||
225 | |||
226 | struct rt_task tp = { | ||
227 | .period = KLMIRQD_SLICE_NS, /* dummy 1 second period */ | ||
228 | .relative_deadline = KLMIRQD_SLICE_NS, | ||
229 | .exec_cost = KLMIRQD_SLICE_NS, | ||
230 | .phase = 0, | ||
231 | .cpu = task_cpu(current), | ||
232 | .budget_policy = NO_ENFORCEMENT, | ||
233 | .budget_signal_policy = NO_SIGNALS, | ||
234 | .cls = RT_CLASS_BEST_EFFORT | ||
235 | }; | ||
236 | |||
237 | struct sched_param param = { .sched_priority = 0}; | ||
238 | |||
239 | TRACE_CUR("Setting %s/%d as daemon thread.\n", tsk->comm, tsk->pid); | ||
240 | |||
241 | /* set task params */ | ||
242 | tsk_rt(tsk)->task_params = tp; | ||
243 | tsk_rt(tsk)->is_interrupt_thread = 1; | ||
244 | |||
245 | /* inform the OS we're SCHED_LITMUS -- | ||
246 | sched_setscheduler_nocheck() calls litmus_admit_task(). */ | ||
247 | sched_setscheduler_nocheck(tsk, SCHED_LITMUS, ¶m); | ||
248 | |||
249 | return ret; | ||
250 | } | ||
251 | |||
252 | static int register_klmirqd(struct task_struct* tsk) | ||
253 | { | ||
254 | int retval = 0; | ||
255 | unsigned long flags; | ||
256 | struct klmirqd_info *info = NULL; | ||
257 | |||
258 | if (!tsk_rt(tsk)->is_interrupt_thread) { | ||
259 | TRACE("Only proxy threads already running in Litmus may become klmirqd threads!\n"); | ||
260 | WARN_ON(1); | ||
261 | retval = -1; | ||
262 | goto out; | ||
263 | } | ||
264 | |||
265 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | ||
266 | |||
267 | if (!__klmirqd_is_ready()) { | ||
268 | TRACE("klmirqd is not ready! Did you forget to initialize it?\n"); | ||
269 | WARN_ON(1); | ||
270 | retval = -1; | ||
271 | goto out_unlock; | ||
272 | } | ||
273 | |||
274 | /* allocate and initialize klmirqd data for the thread */ | ||
275 | info = kmalloc(sizeof(struct klmirqd_info), GFP_KERNEL); | ||
276 | if (!info) { | ||
277 | TRACE("Failed to allocate klmirqd_info struct!\n"); | ||
278 | retval = -1; /* todo: pick better code */ | ||
279 | goto out_unlock; | ||
280 | } | ||
281 | memset(info, 0, sizeof(struct klmirqd_info)); | ||
282 | info->klmirqd = tsk; | ||
283 | info->pending_tasklets_hi.tail = &info->pending_tasklets_hi.head; | ||
284 | info->pending_tasklets.tail = &info->pending_tasklets.head; | ||
285 | INIT_LIST_HEAD(&info->worklist); | ||
286 | INIT_LIST_HEAD(&info->klmirqd_reg); | ||
287 | raw_spin_lock_init(&info->lock); | ||
288 | |||
289 | |||
290 | /* now register with klmirqd */ | ||
291 | list_add_tail(&info->klmirqd_reg, &klmirqd_state.threads); | ||
292 | ++klmirqd_state.nr_threads; | ||
293 | |||
294 | /* update the task struct to point to klmirqd info */ | ||
295 | tsk_rt(tsk)->klmirqd_info = info; | ||
296 | |||
297 | out_unlock: | ||
298 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
299 | |||
300 | out: | ||
301 | return retval; | ||
302 | } | ||
303 | |||
304 | static int unregister_klmirqd(struct task_struct* tsk) | ||
305 | { | ||
306 | int retval = 0; | ||
307 | unsigned long flags; | ||
308 | struct klmirqd_info *info = tsk_rt(tsk)->klmirqd_info; | ||
309 | |||
310 | if (!tsk_rt(tsk)->is_interrupt_thread || !info) { | ||
311 | TRACE("%s/%d is not a klmirqd thread!\n", tsk->comm, tsk->pid); | ||
312 | WARN_ON(1); | ||
313 | retval = -1; | ||
314 | goto out; | ||
315 | } | ||
316 | |||
317 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | ||
318 | |||
319 | /* remove the entry in the klmirqd thread list */ | ||
320 | list_del(&info->klmirqd_reg); | ||
321 | --klmirqd_state.nr_threads; | ||
322 | |||
323 | /* remove link to klmirqd info from thread */ | ||
324 | tsk_rt(tsk)->klmirqd_info = NULL; | ||
325 | |||
326 | /* clean up memory */ | ||
327 | kfree(info); | ||
328 | |||
329 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
330 | |||
331 | out: | ||
332 | return retval; | ||
333 | } | ||
334 | |||
61 | 335 | ||
62 | 336 | ||
63 | 337 | ||
@@ -67,35 +341,50 @@ int proc_read_klmirqd_stats(char *page, char **start, | |||
67 | off_t off, int count, | 341 | off_t off, int count, |
68 | int *eof, void *data) | 342 | int *eof, void *data) |
69 | { | 343 | { |
70 | int len = snprintf(page, PAGE_SIZE, | 344 | unsigned long flags; |
71 | "num ready klmirqds: %d\n\n", | 345 | int len; |
72 | atomic_read(&num_ready_klmirqds)); | 346 | |
73 | 347 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | |
74 | if(klmirqd_is_ready()) | 348 | |
75 | { | 349 | if (klmirqd_state.initialized) { |
76 | int i; | 350 | if (!klmirqd_state.shuttingdown) { |
77 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | 351 | struct list_head *pos; |
78 | { | 352 | |
79 | len += | 353 | len = snprintf(page, PAGE_SIZE, |
80 | snprintf(page + len - 1, PAGE_SIZE, /* -1 to strip off \0 */ | 354 | "num ready klmirqds: %d\n\n", |
81 | "klmirqd_th%d: %s/%d\n" | 355 | klmirqd_state.nr_threads); |
82 | "\tcurrent_owner: %s/%d\n" | 356 | |
83 | "\tpending: %x\n" | 357 | list_for_each(pos, &klmirqd_state.threads) { |
84 | "\tnum hi: %d\n" | 358 | struct klmirqd_info* info = list_entry(pos, struct klmirqd_info, klmirqd_reg); |
85 | "\tnum low: %d\n" | 359 | |
86 | "\tnum work: %d\n\n", | 360 | len += |
87 | i, | 361 | snprintf(page + len - 1, PAGE_SIZE, /* -1 to strip off \0 */ |
88 | klmirqds[i].klmirqd->comm, klmirqds[i].klmirqd->pid, | 362 | "klmirqd_thread: %s/%d\n" |
89 | (klmirqds[i].current_owner != NULL) ? | 363 | "\tcurrent_owner: %s/%d\n" |
90 | klmirqds[i].current_owner->comm : "(null)", | 364 | "\tpending: %x\n" |
91 | (klmirqds[i].current_owner != NULL) ? | 365 | "\tnum hi: %d\n" |
92 | klmirqds[i].current_owner->pid : 0, | 366 | "\tnum low: %d\n" |
93 | klmirqds[i].pending, | 367 | "\tnum work: %d\n\n", |
94 | atomic_read(&klmirqds[i].num_hi_pending), | 368 | info->klmirqd->comm, info->klmirqd->pid, |
95 | atomic_read(&klmirqds[i].num_low_pending), | 369 | (info->current_owner != NULL) ? |
96 | atomic_read(&klmirqds[i].num_work_pending)); | 370 | info->current_owner->comm : "(null)", |
371 | (info->current_owner != NULL) ? | ||
372 | info->current_owner->pid : 0, | ||
373 | info->pending, | ||
374 | atomic_read(&info->num_hi_pending), | ||
375 | atomic_read(&info->num_low_pending), | ||
376 | atomic_read(&info->num_work_pending)); | ||
377 | } | ||
378 | } | ||
379 | else { | ||
380 | len = snprintf(page, PAGE_SIZE, "klmirqd is shutting down\n"); | ||
97 | } | 381 | } |
98 | } | 382 | } |
383 | else { | ||
384 | len = snprintf(page, PAGE_SIZE, "klmirqd is not initialized!\n"); | ||
385 | } | ||
386 | |||
387 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
99 | 388 | ||
100 | return(len); | 389 | return(len); |
101 | } | 390 | } |
@@ -162,6 +451,15 @@ static void dump_state(struct klmirqd_info* which, const char* caller) | |||
162 | #endif | 451 | #endif |
163 | 452 | ||
164 | 453 | ||
454 | |||
455 | |||
456 | |||
457 | |||
458 | |||
459 | |||
460 | |||
461 | |||
462 | |||
165 | /* forward declarations */ | 463 | /* forward declarations */ |
166 | static void ___litmus_tasklet_schedule(struct tasklet_struct *t, | 464 | static void ___litmus_tasklet_schedule(struct tasklet_struct *t, |
167 | struct klmirqd_info *which, | 465 | struct klmirqd_info *which, |
@@ -174,24 +472,6 @@ static void ___litmus_schedule_work(struct work_struct *w, | |||
174 | int wakeup); | 472 | int wakeup); |
175 | 473 | ||
176 | 474 | ||
177 | |||
178 | inline unsigned int klmirqd_id(struct task_struct* tsk) | ||
179 | { | ||
180 | int i; | ||
181 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
182 | { | ||
183 | if(klmirqds[i].klmirqd == tsk) | ||
184 | { | ||
185 | return i; | ||
186 | } | ||
187 | } | ||
188 | |||
189 | BUG(); | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | |||
195 | inline static u32 litirq_pending_hi_irqoff(struct klmirqd_info* which) | 475 | inline static u32 litirq_pending_hi_irqoff(struct klmirqd_info* which) |
196 | { | 476 | { |
197 | return (which->pending & LIT_TASKLET_HI); | 477 | return (which->pending & LIT_TASKLET_HI); |
@@ -225,200 +505,11 @@ inline static u32 litirq_pending(struct klmirqd_info* which) | |||
225 | return pending; | 505 | return pending; |
226 | }; | 506 | }; |
227 | 507 | ||
228 | inline static u32 litirq_pending_with_owner(struct klmirqd_info* which, struct task_struct* owner) | ||
229 | { | ||
230 | unsigned long flags; | ||
231 | u32 pending; | ||
232 | |||
233 | raw_spin_lock_irqsave(&which->lock, flags); | ||
234 | pending = litirq_pending_irqoff(which); | ||
235 | if(pending) | ||
236 | { | ||
237 | if(which->current_owner != owner) | ||
238 | { | ||
239 | pending = 0; // owner switch! | ||
240 | } | ||
241 | } | ||
242 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
243 | |||
244 | return pending; | ||
245 | } | ||
246 | |||
247 | |||
248 | inline static u32 litirq_pending_and_sem_and_owner(struct klmirqd_info* which, | ||
249 | struct mutex** sem, | ||
250 | struct task_struct** t) | ||
251 | { | ||
252 | unsigned long flags; | ||
253 | u32 pending; | ||
254 | |||
255 | /* init values */ | ||
256 | *sem = NULL; | ||
257 | *t = NULL; | ||
258 | |||
259 | raw_spin_lock_irqsave(&which->lock, flags); | ||
260 | |||
261 | pending = litirq_pending_irqoff(which); | ||
262 | if(pending) | ||
263 | { | ||
264 | if(which->current_owner != NULL) | ||
265 | { | ||
266 | *t = which->current_owner; | ||
267 | *sem = &tsk_rt(which->current_owner)->klmirqd_sem; | ||
268 | } | ||
269 | else | ||
270 | { | ||
271 | BUG(); | ||
272 | } | ||
273 | } | ||
274 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
275 | |||
276 | if(likely(*sem)) | ||
277 | { | ||
278 | return pending; | ||
279 | } | ||
280 | else | ||
281 | { | ||
282 | return 0; | ||
283 | } | ||
284 | } | ||
285 | |||
286 | /* returns true if the next piece of work to do is from a different owner. | ||
287 | */ | ||
288 | static int tasklet_ownership_change( | ||
289 | struct klmirqd_info* which, | ||
290 | enum pending_flags taskletQ) | ||
291 | { | ||
292 | /* this function doesn't have to look at work objects since they have | ||
293 | priority below tasklets. */ | ||
294 | |||
295 | unsigned long flags; | ||
296 | int ret = 0; | ||
297 | |||
298 | raw_spin_lock_irqsave(&which->lock, flags); | ||
299 | |||
300 | switch(taskletQ) | ||
301 | { | ||
302 | case LIT_TASKLET_HI: | ||
303 | if(litirq_pending_hi_irqoff(which)) | ||
304 | { | ||
305 | ret = (which->pending_tasklets_hi.head->owner != | ||
306 | which->current_owner); | ||
307 | } | ||
308 | break; | ||
309 | case LIT_TASKLET_LOW: | ||
310 | if(litirq_pending_low_irqoff(which)) | ||
311 | { | ||
312 | ret = (which->pending_tasklets.head->owner != | ||
313 | which->current_owner); | ||
314 | } | ||
315 | break; | ||
316 | default: | ||
317 | break; | ||
318 | } | ||
319 | |||
320 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
321 | |||
322 | TRACE_TASK(which->klmirqd, "ownership change needed: %d\n", ret); | ||
323 | |||
324 | return ret; | ||
325 | } | ||
326 | |||
327 | |||
328 | static void __reeval_prio(struct klmirqd_info* which) | ||
329 | { | ||
330 | struct task_struct* next_owner = NULL; | ||
331 | struct task_struct* klmirqd = which->klmirqd; | ||
332 | |||
333 | /* Check in prio-order */ | ||
334 | u32 pending = litirq_pending_irqoff(which); | ||
335 | |||
336 | //__dump_state(which, "__reeval_prio: before"); | ||
337 | |||
338 | if(pending) | ||
339 | { | ||
340 | if(pending & LIT_TASKLET_HI) | ||
341 | { | ||
342 | next_owner = which->pending_tasklets_hi.head->owner; | ||
343 | } | ||
344 | else if(pending & LIT_TASKLET_LOW) | ||
345 | { | ||
346 | next_owner = which->pending_tasklets.head->owner; | ||
347 | } | ||
348 | else if(pending & LIT_WORK) | ||
349 | { | ||
350 | struct work_struct* work = | ||
351 | list_first_entry(&which->worklist, struct work_struct, entry); | ||
352 | next_owner = work->owner; | ||
353 | } | ||
354 | } | ||
355 | |||
356 | if(next_owner != which->current_owner) | ||
357 | { | ||
358 | struct task_struct* old_owner = which->current_owner; | ||
359 | |||
360 | /* bind the next owner. */ | ||
361 | which->current_owner = next_owner; | ||
362 | mb(); | ||
363 | |||
364 | if(next_owner != NULL) | ||
365 | { | ||
366 | if(!in_interrupt()) | ||
367 | { | ||
368 | TRACE_CUR("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__, | ||
369 | ((tsk_rt(klmirqd)->inh_task) ? tsk_rt(klmirqd)->inh_task : klmirqd)->comm, | ||
370 | ((tsk_rt(klmirqd)->inh_task) ? tsk_rt(klmirqd)->inh_task : klmirqd)->pid, | ||
371 | next_owner->comm, next_owner->pid); | ||
372 | } | ||
373 | else | ||
374 | { | ||
375 | TRACE("%s: Ownership change: %s/%d to %s/%d\n", __FUNCTION__, | ||
376 | ((tsk_rt(klmirqd)->inh_task) ? tsk_rt(klmirqd)->inh_task : klmirqd)->comm, | ||
377 | ((tsk_rt(klmirqd)->inh_task) ? tsk_rt(klmirqd)->inh_task : klmirqd)->pid, | ||
378 | next_owner->comm, next_owner->pid); | ||
379 | } | ||
380 | |||
381 | litmus->increase_prio_inheritance_klmirqd(klmirqd, old_owner, next_owner); | ||
382 | } | ||
383 | else | ||
384 | { | ||
385 | if(likely(!in_interrupt())) | ||
386 | { | ||
387 | TRACE_CUR("%s: Ownership change: %s/%d to NULL (reverting)\n", | ||
388 | __FUNCTION__, klmirqd->comm, klmirqd->pid); | ||
389 | } | ||
390 | else | ||
391 | { | ||
392 | // is this a bug? | ||
393 | TRACE("%s: Ownership change: %s/%d to NULL (reverting)\n", | ||
394 | __FUNCTION__, klmirqd->comm, klmirqd->pid); | ||
395 | } | ||
396 | |||
397 | BUG_ON(pending != 0); | ||
398 | litmus->decrease_prio_inheritance_klmirqd(klmirqd, old_owner, NULL); | ||
399 | } | ||
400 | } | ||
401 | |||
402 | //__dump_state(which, "__reeval_prio: after"); | ||
403 | } | ||
404 | |||
405 | static void reeval_prio(struct klmirqd_info* which) | ||
406 | { | ||
407 | unsigned long flags; | ||
408 | |||
409 | raw_spin_lock_irqsave(&which->lock, flags); | ||
410 | __reeval_prio(which); | ||
411 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
412 | } | ||
413 | |||
414 | |||
415 | static void wakeup_litirqd_locked(struct klmirqd_info* which) | 508 | static void wakeup_litirqd_locked(struct klmirqd_info* which) |
416 | { | 509 | { |
417 | /* Interrupts are disabled: no need to stop preemption */ | 510 | /* Interrupts are disabled: no need to stop preemption */ |
418 | if (which && which->klmirqd) | 511 | if (which && which->klmirqd) |
419 | { | 512 | { |
420 | __reeval_prio(which); /* configure the proper priority */ | ||
421 | |||
422 | if(which->klmirqd->state != TASK_RUNNING) | 513 | if(which->klmirqd->state != TASK_RUNNING) |
423 | { | 514 | { |
424 | TRACE("%s: Waking up klmirqd: %s/%d\n", __FUNCTION__, | 515 | TRACE("%s: Waking up klmirqd: %s/%d\n", __FUNCTION__, |
@@ -468,7 +559,7 @@ static void do_lit_tasklet(struct klmirqd_info* which, | |||
468 | list = list->next; | 559 | list = list->next; |
469 | 560 | ||
470 | /* execute tasklet if it has my priority and is free */ | 561 | /* execute tasklet if it has my priority and is free */ |
471 | if ((t->owner == which->current_owner) && tasklet_trylock(t)) { | 562 | if (tasklet_trylock(t)) { |
472 | if (!atomic_read(&t->count)) { | 563 | if (!atomic_read(&t->count)) { |
473 | 564 | ||
474 | sched_trace_tasklet_begin(t->owner); | 565 | sched_trace_tasklet_begin(t->owner); |
@@ -503,15 +594,14 @@ static void do_lit_tasklet(struct klmirqd_info* which, | |||
503 | 594 | ||
504 | // returns 1 if priorities need to be changed to continue processing | 595 | // returns 1 if priorities need to be changed to continue processing |
505 | // pending tasklets. | 596 | // pending tasklets. |
506 | static int do_litirq(struct klmirqd_info* which) | 597 | static void do_litirq(struct klmirqd_info* which) |
507 | { | 598 | { |
508 | u32 pending; | 599 | u32 pending; |
509 | int resched = 0; | ||
510 | 600 | ||
511 | if(in_interrupt()) | 601 | if(in_interrupt()) |
512 | { | 602 | { |
513 | TRACE("%s: exiting early: in interrupt context!\n", __FUNCTION__); | 603 | TRACE("%s: exiting early: in interrupt context!\n", __FUNCTION__); |
514 | return(0); | 604 | return; |
515 | } | 605 | } |
516 | 606 | ||
517 | if(which->klmirqd != current) | 607 | if(which->klmirqd != current) |
@@ -519,59 +609,40 @@ static int do_litirq(struct klmirqd_info* which) | |||
519 | TRACE_CUR("%s: exiting early: thread/info mismatch! Running %s/%d but given %s/%d.\n", | 609 | TRACE_CUR("%s: exiting early: thread/info mismatch! Running %s/%d but given %s/%d.\n", |
520 | __FUNCTION__, current->comm, current->pid, | 610 | __FUNCTION__, current->comm, current->pid, |
521 | which->klmirqd->comm, which->klmirqd->pid); | 611 | which->klmirqd->comm, which->klmirqd->pid); |
522 | return(0); | 612 | return; |
523 | } | 613 | } |
524 | 614 | ||
525 | if(!is_realtime(current)) | 615 | if(!is_realtime(current)) |
526 | { | 616 | { |
527 | TRACE_CUR("%s: exiting early: klmirqd is not real-time. Sched Policy = %d\n", | 617 | TRACE_CUR("%s: exiting early: klmirqd is not real-time. Sched Policy = %d\n", |
528 | __FUNCTION__, current->policy); | 618 | __FUNCTION__, current->policy); |
529 | return(0); | 619 | return; |
530 | } | 620 | } |
531 | 621 | ||
532 | 622 | ||
533 | /* We only handle tasklets & work objects, no need for RCU triggers? */ | 623 | /* We only handle tasklets & work objects, no need for RCU triggers? */ |
534 | 624 | ||
535 | pending = litirq_pending(which); | 625 | pending = litirq_pending(which); |
536 | if(pending) | 626 | if(pending) { |
537 | { | ||
538 | /* extract the work to do and do it! */ | 627 | /* extract the work to do and do it! */ |
539 | if(pending & LIT_TASKLET_HI) | 628 | if(pending & LIT_TASKLET_HI) { |
540 | { | ||
541 | TRACE_CUR("%s: Invoking HI tasklets.\n", __FUNCTION__); | 629 | TRACE_CUR("%s: Invoking HI tasklets.\n", __FUNCTION__); |
542 | do_lit_tasklet(which, &which->pending_tasklets_hi); | 630 | do_lit_tasklet(which, &which->pending_tasklets_hi); |
543 | resched = tasklet_ownership_change(which, LIT_TASKLET_HI); | ||
544 | |||
545 | if(resched) | ||
546 | { | ||
547 | TRACE_CUR("%s: HI tasklets of another owner remain. " | ||
548 | "Skipping any LOW tasklets.\n", __FUNCTION__); | ||
549 | } | ||
550 | } | 631 | } |
551 | 632 | ||
552 | if(!resched && (pending & LIT_TASKLET_LOW)) | 633 | if(pending & LIT_TASKLET_LOW) { |
553 | { | ||
554 | TRACE_CUR("%s: Invoking LOW tasklets.\n", __FUNCTION__); | 634 | TRACE_CUR("%s: Invoking LOW tasklets.\n", __FUNCTION__); |
555 | do_lit_tasklet(which, &which->pending_tasklets); | 635 | do_lit_tasklet(which, &which->pending_tasklets); |
556 | resched = tasklet_ownership_change(which, LIT_TASKLET_LOW); | ||
557 | |||
558 | if(resched) | ||
559 | { | ||
560 | TRACE_CUR("%s: LOW tasklets of another owner remain. " | ||
561 | "Skipping any work objects.\n", __FUNCTION__); | ||
562 | } | ||
563 | } | 636 | } |
564 | } | 637 | } |
565 | |||
566 | return(resched); | ||
567 | } | 638 | } |
568 | 639 | ||
569 | 640 | ||
570 | static void do_work(struct klmirqd_info* which) | 641 | static void do_work(struct klmirqd_info* which) |
571 | { | 642 | { |
572 | unsigned long flags; | 643 | unsigned long flags; |
573 | work_func_t f; | ||
574 | struct work_struct* work; | 644 | struct work_struct* work; |
645 | work_func_t f; | ||
575 | 646 | ||
576 | // only execute one work-queue item to yield to tasklets. | 647 | // only execute one work-queue item to yield to tasklets. |
577 | // ...is this a good idea, or should we just batch them? | 648 | // ...is this a good idea, or should we just batch them? |
@@ -594,125 +665,58 @@ static void do_work(struct klmirqd_info* which) | |||
594 | raw_spin_unlock_irqrestore(&which->lock, flags); | 665 | raw_spin_unlock_irqrestore(&which->lock, flags); |
595 | 666 | ||
596 | 667 | ||
668 | TRACE_CUR("%s: Invoking work object.\n", __FUNCTION__); | ||
669 | // do the work! | ||
670 | work_clear_pending(work); | ||
671 | f = work->func; | ||
672 | f(work); /* can't touch 'work' after this point, | ||
673 | the user may have freed it. */ | ||
597 | 674 | ||
598 | /* safe to read current_owner outside of lock since only this thread | 675 | atomic_dec(&which->num_work_pending); |
599 | may write to the pointer. */ | ||
600 | if(work->owner == which->current_owner) | ||
601 | { | ||
602 | TRACE_CUR("%s: Invoking work object.\n", __FUNCTION__); | ||
603 | // do the work! | ||
604 | work_clear_pending(work); | ||
605 | f = work->func; | ||
606 | f(work); /* can't touch 'work' after this point, | ||
607 | the user may have freed it. */ | ||
608 | |||
609 | atomic_dec(&which->num_work_pending); | ||
610 | } | ||
611 | else | ||
612 | { | ||
613 | TRACE_CUR("%s: Could not invoke work object. Requeuing.\n", | ||
614 | __FUNCTION__); | ||
615 | ___litmus_schedule_work(work, which, 0); | ||
616 | } | ||
617 | 676 | ||
618 | no_work: | 677 | no_work: |
619 | return; | 678 | return; |
620 | } | 679 | } |
621 | 680 | ||
622 | 681 | ||
623 | static int set_litmus_daemon_sched(void) | ||
624 | { | ||
625 | /* set up a daemon job that will never complete. | ||
626 | it should only ever run on behalf of another | ||
627 | real-time task. | ||
628 | |||
629 | TODO: Transition to a new job whenever a | ||
630 | new tasklet is handled */ | ||
631 | |||
632 | int ret = 0; | ||
633 | |||
634 | struct rt_task tp = { | ||
635 | .exec_cost = 0, | ||
636 | .period = 1000000000, /* dummy 1 second period */ | ||
637 | .phase = 0, | ||
638 | .cpu = task_cpu(current), | ||
639 | .budget_policy = NO_ENFORCEMENT, | ||
640 | .cls = RT_CLASS_BEST_EFFORT | ||
641 | }; | ||
642 | |||
643 | struct sched_param param = { .sched_priority = 0}; | ||
644 | |||
645 | |||
646 | /* set task params, mark as proxy thread, and init other data */ | ||
647 | tsk_rt(current)->task_params = tp; | ||
648 | tsk_rt(current)->is_proxy_thread = 1; | ||
649 | tsk_rt(current)->cur_klmirqd = NULL; | ||
650 | mutex_init(&tsk_rt(current)->klmirqd_sem); | ||
651 | atomic_set(&tsk_rt(current)->klmirqd_sem_stat, NOT_HELD); | ||
652 | |||
653 | /* inform the OS we're SCHED_LITMUS -- | ||
654 | sched_setscheduler_nocheck() calls litmus_admit_task(). */ | ||
655 | sched_setscheduler_nocheck(current, SCHED_LITMUS, ¶m); | ||
656 | |||
657 | return ret; | ||
658 | } | ||
659 | |||
660 | static void enter_execution_phase(struct klmirqd_info* which, | ||
661 | struct mutex* sem, | ||
662 | struct task_struct* t) | ||
663 | { | ||
664 | TRACE_CUR("%s: Trying to enter execution phase. " | ||
665 | "Acquiring semaphore of %s/%d\n", __FUNCTION__, | ||
666 | t->comm, t->pid); | ||
667 | down_and_set_stat(current, HELD, sem); | ||
668 | TRACE_CUR("%s: Execution phase entered! " | ||
669 | "Acquired semaphore of %s/%d\n", __FUNCTION__, | ||
670 | t->comm, t->pid); | ||
671 | } | ||
672 | |||
673 | static void exit_execution_phase(struct klmirqd_info* which, | ||
674 | struct mutex* sem, | ||
675 | struct task_struct* t) | ||
676 | { | ||
677 | TRACE_CUR("%s: Exiting execution phase. " | ||
678 | "Releasing semaphore of %s/%d\n", __FUNCTION__, | ||
679 | t->comm, t->pid); | ||
680 | if(atomic_read(&tsk_rt(current)->klmirqd_sem_stat) == HELD) | ||
681 | { | ||
682 | up_and_set_stat(current, NOT_HELD, sem); | ||
683 | TRACE_CUR("%s: Execution phase exited! " | ||
684 | "Released semaphore of %s/%d\n", __FUNCTION__, | ||
685 | t->comm, t->pid); | ||
686 | } | ||
687 | else | ||
688 | { | ||
689 | TRACE_CUR("%s: COULDN'T RELEASE SEMAPHORE BECAUSE ONE IS NOT HELD!\n", __FUNCTION__); | ||
690 | } | ||
691 | } | ||
692 | 682 | ||
693 | /* main loop for klitsoftirqd */ | 683 | /* main loop for klitsoftirqd */ |
694 | static int run_klmirqd(void* unused) | 684 | static int run_klmirqd(void* callback) |
695 | { | 685 | { |
696 | struct klmirqd_info* which = &klmirqds[klmirqd_id(current)]; | 686 | int retval = 0; |
697 | struct mutex* sem; | 687 | struct klmirqd_info* info = NULL; |
698 | struct task_struct* owner; | 688 | klmirqd_callback_t* cb = (klmirqd_callback_t*)(callback); |
699 | 689 | ||
700 | int rt_status = set_litmus_daemon_sched(); | 690 | retval = set_litmus_daemon_sched(current); |
701 | 691 | if (retval != 0) { | |
702 | if(rt_status != 0) | ||
703 | { | ||
704 | TRACE_CUR("%s: Failed to transition to rt-task.\n", __FUNCTION__); | 692 | TRACE_CUR("%s: Failed to transition to rt-task.\n", __FUNCTION__); |
705 | goto rt_failed; | 693 | goto failed; |
706 | } | 694 | } |
707 | 695 | ||
708 | atomic_inc(&num_ready_klmirqds); | 696 | retval = register_klmirqd(current); |
697 | if (retval != 0) { | ||
698 | TRACE_CUR("%s: Failed to become a klmirqd thread.\n", __FUNCTION__); | ||
699 | goto failed; | ||
700 | } | ||
701 | |||
702 | if (cb && cb->func) { | ||
703 | retval = cb->func(cb->arg); | ||
704 | if (retval != 0) { | ||
705 | TRACE_CUR("%s: klmirqd callback reported failure. retval = %d\n", __FUNCTION__, retval); | ||
706 | goto failed_unregister; | ||
707 | } | ||
708 | } | ||
709 | |||
710 | /* enter the interrupt handling workloop */ | ||
711 | |||
712 | info = tsk_rt(current)->klmirqd_info; | ||
709 | 713 | ||
710 | set_current_state(TASK_INTERRUPTIBLE); | 714 | set_current_state(TASK_INTERRUPTIBLE); |
711 | 715 | ||
712 | while (!kthread_should_stop()) | 716 | while (!kthread_should_stop()) |
713 | { | 717 | { |
714 | preempt_disable(); | 718 | preempt_disable(); |
715 | if (!litirq_pending(which)) | 719 | if (!litirq_pending(info)) |
716 | { | 720 | { |
717 | /* sleep for work */ | 721 | /* sleep for work */ |
718 | TRACE_CUR("%s: No more tasklets or work objects. Going to sleep.\n", | 722 | TRACE_CUR("%s: No more tasklets or work objects. Going to sleep.\n", |
@@ -731,17 +735,10 @@ static int run_klmirqd(void* unused) | |||
731 | 735 | ||
732 | __set_current_state(TASK_RUNNING); | 736 | __set_current_state(TASK_RUNNING); |
733 | 737 | ||
734 | while (litirq_pending_and_sem_and_owner(which, &sem, &owner)) | 738 | while (litirq_pending(info)) |
735 | { | 739 | { |
736 | int needs_resched = 0; | ||
737 | |||
738 | preempt_enable_no_resched(); | 740 | preempt_enable_no_resched(); |
739 | 741 | ||
740 | BUG_ON(sem == NULL); | ||
741 | |||
742 | // wait to enter execution phase; wait for 'current_owner' to block. | ||
743 | enter_execution_phase(which, sem, owner); | ||
744 | |||
745 | if(kthread_should_stop()) | 742 | if(kthread_should_stop()) |
746 | { | 743 | { |
747 | TRACE_CUR("%s:%d: Signaled to terminate.\n", __FUNCTION__, __LINE__); | 744 | TRACE_CUR("%s:%d: Signaled to terminate.\n", __FUNCTION__, __LINE__); |
@@ -753,36 +750,23 @@ static int run_klmirqd(void* unused) | |||
753 | /* Double check that there's still pending work and the owner hasn't | 750 | /* Double check that there's still pending work and the owner hasn't |
754 | * changed. Pending items may have been flushed while we were sleeping. | 751 | * changed. Pending items may have been flushed while we were sleeping. |
755 | */ | 752 | */ |
756 | if(litirq_pending_with_owner(which, owner)) | 753 | if(litirq_pending(info)) |
757 | { | 754 | { |
758 | TRACE_CUR("%s: Executing tasklets and/or work objects.\n", | 755 | TRACE_CUR("%s: Executing tasklets and/or work objects.\n", |
759 | __FUNCTION__); | 756 | __FUNCTION__); |
760 | 757 | ||
761 | needs_resched = do_litirq(which); | 758 | do_litirq(info); |
762 | 759 | ||
763 | preempt_enable_no_resched(); | 760 | preempt_enable_no_resched(); |
764 | 761 | ||
765 | // work objects are preemptible. | 762 | // work objects are preemptible. |
766 | if(!needs_resched) | 763 | do_work(info); |
767 | { | ||
768 | do_work(which); | ||
769 | } | ||
770 | |||
771 | // exit execution phase. | ||
772 | exit_execution_phase(which, sem, owner); | ||
773 | |||
774 | TRACE_CUR("%s: Setting up next priority.\n", __FUNCTION__); | ||
775 | reeval_prio(which); /* check if we need to change priority here */ | ||
776 | } | 764 | } |
777 | else | 765 | else |
778 | { | 766 | { |
779 | TRACE_CUR("%s: Pending work was flushed! Prev owner was %s/%d\n", | 767 | TRACE_CUR("%s: Pending work was flushed!\n", __FUNCTION__); |
780 | __FUNCTION__, | ||
781 | owner->comm, owner->pid); | ||
782 | preempt_enable_no_resched(); | ||
783 | 768 | ||
784 | // exit execution phase. | 769 | preempt_enable_no_resched(); |
785 | exit_execution_phase(which, sem, owner); | ||
786 | } | 770 | } |
787 | 771 | ||
788 | cond_resched(); | 772 | cond_resched(); |
@@ -793,183 +777,39 @@ static int run_klmirqd(void* unused) | |||
793 | } | 777 | } |
794 | __set_current_state(TASK_RUNNING); | 778 | __set_current_state(TASK_RUNNING); |
795 | 779 | ||
796 | atomic_dec(&num_ready_klmirqds); | 780 | failed_unregister: |
781 | /* remove our registration from klmirqd */ | ||
782 | unregister_klmirqd(current); | ||
797 | 783 | ||
798 | rt_failed: | 784 | failed: |
799 | litmus_exit_task(current); | 785 | litmus_exit_task(current); |
800 | 786 | ||
801 | return rt_status; | 787 | return retval; |
802 | } | 788 | } |
803 | 789 | ||
804 | 790 | ||
805 | struct klmirqd_launch_data | 791 | void flush_pending(struct task_struct* tsk) |
806 | { | ||
807 | int* cpu_affinity; | ||
808 | struct work_struct work; | ||
809 | }; | ||
810 | |||
811 | /* executed by a kworker from workqueues */ | ||
812 | static void launch_klmirqd(struct work_struct *work) | ||
813 | { | 792 | { |
814 | int i; | 793 | unsigned long flags; |
815 | 794 | struct tasklet_struct *list; | |
816 | struct klmirqd_launch_data* launch_data = | 795 | u32 work_flushed = 0; |
817 | container_of(work, struct klmirqd_launch_data, work); | ||
818 | |||
819 | TRACE("%s: Creating %d klmirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); | ||
820 | |||
821 | /* create the daemon threads */ | ||
822 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
823 | { | ||
824 | if(launch_data->cpu_affinity) | ||
825 | { | ||
826 | klmirqds[i].klmirqd = | ||
827 | kthread_create( | ||
828 | run_klmirqd, | ||
829 | /* treat the affinity as a pointer, we'll cast it back later */ | ||
830 | (void*)(long long)launch_data->cpu_affinity[i], | ||
831 | "klmirqd_th%d/%d", | ||
832 | i, | ||
833 | launch_data->cpu_affinity[i]); | ||
834 | |||
835 | /* litmus will put is in the right cluster. */ | ||
836 | kthread_bind(klmirqds[i].klmirqd, launch_data->cpu_affinity[i]); | ||
837 | } | ||
838 | else | ||
839 | { | ||
840 | klmirqds[i].klmirqd = | ||
841 | kthread_create( | ||
842 | run_klmirqd, | ||
843 | /* treat the affinity as a pointer, we'll cast it back later */ | ||
844 | (void*)(long long)(-1), | ||
845 | "klmirqd_th%d", | ||
846 | i); | ||
847 | } | ||
848 | } | ||
849 | |||
850 | TRACE("%s: Launching %d klmirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); | ||
851 | |||
852 | /* unleash the daemons */ | ||
853 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
854 | { | ||
855 | wake_up_process(klmirqds[i].klmirqd); | ||
856 | } | ||
857 | |||
858 | if(launch_data->cpu_affinity) | ||
859 | kfree(launch_data->cpu_affinity); | ||
860 | kfree(launch_data); | ||
861 | } | ||
862 | 796 | ||
797 | struct klmirqd_info *which; | ||
863 | 798 | ||
864 | void spawn_klmirqd(int* affinity) | 799 | if (!tsk_rt(tsk)->is_interrupt_thread) { |
865 | { | 800 | TRACE("%s/%d is not a proxy thread\n", tsk->comm, tsk->pid); |
866 | int i; | 801 | WARN_ON(1); |
867 | struct klmirqd_launch_data* delayed_launch; | ||
868 | |||
869 | if(atomic_read(&num_ready_klmirqds) != 0) | ||
870 | { | ||
871 | TRACE("%s: At least one klmirqd is already running! Need to call kill_klmirqd()?\n"); | ||
872 | return; | 802 | return; |
873 | } | 803 | } |
874 | 804 | ||
875 | /* init the tasklet & work queues */ | 805 | which = tsk_rt(tsk)->klmirqd_info; |
876 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | 806 | if (!which) { |
877 | { | 807 | TRACE("%s/%d is not a klmirqd thread!\n", tsk->comm, tsk->pid); |
878 | klmirqds[i].terminating = 0; | 808 | WARN_ON(1); |
879 | klmirqds[i].pending = 0; | 809 | return; |
880 | |||
881 | klmirqds[i].num_hi_pending.counter = 0; | ||
882 | klmirqds[i].num_low_pending.counter = 0; | ||
883 | klmirqds[i].num_work_pending.counter = 0; | ||
884 | |||
885 | klmirqds[i].pending_tasklets_hi.head = NULL; | ||
886 | klmirqds[i].pending_tasklets_hi.tail = &klmirqds[i].pending_tasklets_hi.head; | ||
887 | |||
888 | klmirqds[i].pending_tasklets.head = NULL; | ||
889 | klmirqds[i].pending_tasklets.tail = &klmirqds[i].pending_tasklets.head; | ||
890 | |||
891 | INIT_LIST_HEAD(&klmirqds[i].worklist); | ||
892 | |||
893 | raw_spin_lock_init(&klmirqds[i].lock); | ||
894 | } | ||
895 | |||
896 | /* wait to flush the initializations to memory since other threads | ||
897 | will access it. */ | ||
898 | mb(); | ||
899 | |||
900 | /* tell a work queue to launch the threads. we can't make scheduling | ||
901 | calls since we're in an atomic state. */ | ||
902 | TRACE("%s: Setting callback up to launch klmirqds\n", __FUNCTION__); | ||
903 | delayed_launch = kmalloc(sizeof(struct klmirqd_launch_data), GFP_ATOMIC); | ||
904 | if(affinity) | ||
905 | { | ||
906 | delayed_launch->cpu_affinity = | ||
907 | kmalloc(sizeof(int)*NR_LITMUS_SOFTIRQD, GFP_ATOMIC); | ||
908 | |||
909 | memcpy(delayed_launch->cpu_affinity, affinity, | ||
910 | sizeof(int)*NR_LITMUS_SOFTIRQD); | ||
911 | } | ||
912 | else | ||
913 | { | ||
914 | delayed_launch->cpu_affinity = NULL; | ||
915 | } | ||
916 | INIT_WORK(&delayed_launch->work, launch_klmirqd); | ||
917 | schedule_work(&delayed_launch->work); | ||
918 | } | ||
919 | |||
920 | |||
921 | void kill_klmirqd(void) | ||
922 | { | ||
923 | if(!klmirqd_is_dead()) | ||
924 | { | ||
925 | int i; | ||
926 | |||
927 | TRACE("%s: Killing %d klmirqds\n", __FUNCTION__, NR_LITMUS_SOFTIRQD); | ||
928 | |||
929 | for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i) | ||
930 | { | ||
931 | if(klmirqds[i].terminating != 1) | ||
932 | { | ||
933 | klmirqds[i].terminating = 1; | ||
934 | mb(); /* just to be sure? */ | ||
935 | flush_pending(klmirqds[i].klmirqd, NULL); | ||
936 | |||
937 | /* signal termination */ | ||
938 | kthread_stop(klmirqds[i].klmirqd); | ||
939 | } | ||
940 | } | ||
941 | } | 810 | } |
942 | } | ||
943 | 811 | ||
944 | 812 | ||
945 | int klmirqd_is_ready(void) | ||
946 | { | ||
947 | return(atomic_read(&num_ready_klmirqds) == NR_LITMUS_SOFTIRQD); | ||
948 | } | ||
949 | |||
950 | int klmirqd_is_dead(void) | ||
951 | { | ||
952 | return(atomic_read(&num_ready_klmirqds) == 0); | ||
953 | } | ||
954 | |||
955 | |||
956 | struct task_struct* get_klmirqd(unsigned int k_id) | ||
957 | { | ||
958 | return(klmirqds[k_id].klmirqd); | ||
959 | } | ||
960 | |||
961 | |||
962 | void flush_pending(struct task_struct* klmirqd_thread, | ||
963 | struct task_struct* owner) | ||
964 | { | ||
965 | unsigned int k_id = klmirqd_id(klmirqd_thread); | ||
966 | struct klmirqd_info *which = &klmirqds[k_id]; | ||
967 | |||
968 | unsigned long flags; | ||
969 | struct tasklet_struct *list; | ||
970 | |||
971 | u32 work_flushed = 0; | ||
972 | |||
973 | raw_spin_lock_irqsave(&which->lock, flags); | 813 | raw_spin_lock_irqsave(&which->lock, flags); |
974 | 814 | ||
975 | //__dump_state(which, "flush_pending: before"); | 815 | //__dump_state(which, "flush_pending: before"); |
@@ -990,35 +830,27 @@ void flush_pending(struct task_struct* klmirqd_thread, | |||
990 | struct tasklet_struct *t = list; | 830 | struct tasklet_struct *t = list; |
991 | list = list->next; | 831 | list = list->next; |
992 | 832 | ||
993 | if(likely((t->owner == owner) || (owner == NULL))) | 833 | if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))) |
994 | { | 834 | { |
995 | if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))) | 835 | BUG(); |
996 | { | 836 | } |
997 | BUG(); | ||
998 | } | ||
999 | 837 | ||
1000 | work_flushed |= LIT_TASKLET_HI; | 838 | work_flushed |= LIT_TASKLET_HI; |
1001 | 839 | ||
1002 | t->owner = NULL; | 840 | t->owner = NULL; |
1003 | 841 | ||
1004 | // WTF? | 842 | // WTF? |
1005 | if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | 843 | if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
1006 | { | 844 | { |
1007 | atomic_dec(&which->num_hi_pending); | 845 | atomic_dec(&which->num_hi_pending); |
1008 | ___tasklet_hi_schedule(t); | 846 | ___tasklet_hi_schedule(t); |
1009 | } | ||
1010 | else | ||
1011 | { | ||
1012 | TRACE("%s: dropped hi tasklet??\n", __FUNCTION__); | ||
1013 | BUG(); | ||
1014 | } | ||
1015 | } | 847 | } |
1016 | else | 848 | else |
1017 | { | 849 | { |
1018 | TRACE("%s: Could not flush a HI tasklet.\n", __FUNCTION__); | 850 | TRACE("%s: dropped hi tasklet??\n", __FUNCTION__); |
1019 | // put back on queue. | 851 | BUG(); |
1020 | ___litmus_tasklet_hi_schedule(t, which, 0); | ||
1021 | } | 852 | } |
853 | |||
1022 | } | 854 | } |
1023 | } | 855 | } |
1024 | 856 | ||
@@ -1038,34 +870,25 @@ void flush_pending(struct task_struct* klmirqd_thread, | |||
1038 | struct tasklet_struct *t = list; | 870 | struct tasklet_struct *t = list; |
1039 | list = list->next; | 871 | list = list->next; |
1040 | 872 | ||
1041 | if(likely((t->owner == owner) || (owner == NULL))) | 873 | if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))) |
1042 | { | 874 | { |
1043 | if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))) | 875 | BUG(); |
1044 | { | 876 | } |
1045 | BUG(); | ||
1046 | } | ||
1047 | 877 | ||
1048 | work_flushed |= LIT_TASKLET_LOW; | 878 | work_flushed |= LIT_TASKLET_LOW; |
1049 | 879 | ||
1050 | t->owner = NULL; | 880 | t->owner = NULL; |
1051 | sched_trace_tasklet_end(owner, 1ul); | 881 | // sched_trace_tasklet_end(owner, 1ul); |
1052 | 882 | ||
1053 | if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | 883 | if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) |
1054 | { | 884 | { |
1055 | atomic_dec(&which->num_low_pending); | 885 | atomic_dec(&which->num_low_pending); |
1056 | ___tasklet_schedule(t); | 886 | ___tasklet_schedule(t); |
1057 | } | ||
1058 | else | ||
1059 | { | ||
1060 | TRACE("%s: dropped tasklet??\n", __FUNCTION__); | ||
1061 | BUG(); | ||
1062 | } | ||
1063 | } | 887 | } |
1064 | else | 888 | else |
1065 | { | 889 | { |
1066 | TRACE("%s: Could not flush a LOW tasklet.\n", __FUNCTION__); | 890 | TRACE("%s: dropped tasklet??\n", __FUNCTION__); |
1067 | // put back on queue | 891 | BUG(); |
1068 | ___litmus_tasklet_schedule(t, which, 0); | ||
1069 | } | 892 | } |
1070 | } | 893 | } |
1071 | } | 894 | } |
@@ -1083,21 +906,12 @@ void flush_pending(struct task_struct* klmirqd_thread, | |||
1083 | list_first_entry(&which->worklist, struct work_struct, entry); | 906 | list_first_entry(&which->worklist, struct work_struct, entry); |
1084 | list_del_init(&work->entry); | 907 | list_del_init(&work->entry); |
1085 | 908 | ||
1086 | if(likely((work->owner == owner) || (owner == NULL))) | 909 | work_flushed |= LIT_WORK; |
1087 | { | 910 | atomic_dec(&which->num_work_pending); |
1088 | work_flushed |= LIT_WORK; | ||
1089 | atomic_dec(&which->num_work_pending); | ||
1090 | 911 | ||
1091 | work->owner = NULL; | 912 | work->owner = NULL; |
1092 | sched_trace_work_end(owner, current, 1ul); | 913 | // sched_trace_work_end(owner, current, 1ul); |
1093 | __schedule_work(work); | 914 | __schedule_work(work); |
1094 | } | ||
1095 | else | ||
1096 | { | ||
1097 | TRACE("%s: Could not flush a work object.\n", __FUNCTION__); | ||
1098 | // put back on queue | ||
1099 | ___litmus_schedule_work(work, which, 0); | ||
1100 | } | ||
1101 | } | 915 | } |
1102 | } | 916 | } |
1103 | 917 | ||
@@ -1106,22 +920,6 @@ void flush_pending(struct task_struct* klmirqd_thread, | |||
1106 | 920 | ||
1107 | mb(); /* commit changes to pending flags */ | 921 | mb(); /* commit changes to pending flags */ |
1108 | 922 | ||
1109 | /* reset the scheduling priority */ | ||
1110 | if(work_flushed) | ||
1111 | { | ||
1112 | __reeval_prio(which); | ||
1113 | |||
1114 | /* Try to offload flushed tasklets to Linux's ksoftirqd. */ | ||
1115 | if(work_flushed & (LIT_TASKLET_LOW | LIT_TASKLET_HI)) | ||
1116 | { | ||
1117 | wakeup_softirqd(); | ||
1118 | } | ||
1119 | } | ||
1120 | else | ||
1121 | { | ||
1122 | TRACE_CUR("%s: no work flushed, so __reeval_prio() skipped\n", __FUNCTION__); | ||
1123 | } | ||
1124 | |||
1125 | raw_spin_unlock_irqrestore(&which->lock, flags); | 923 | raw_spin_unlock_irqrestore(&which->lock, flags); |
1126 | } | 924 | } |
1127 | 925 | ||
@@ -1161,39 +959,27 @@ static void ___litmus_tasklet_schedule(struct tasklet_struct *t, | |||
1161 | raw_spin_unlock_irqrestore(&which->lock, flags); | 959 | raw_spin_unlock_irqrestore(&which->lock, flags); |
1162 | } | 960 | } |
1163 | 961 | ||
1164 | int __litmus_tasklet_schedule(struct tasklet_struct *t, unsigned int k_id) | 962 | |
963 | int __litmus_tasklet_schedule(struct tasklet_struct *t, struct task_struct* klmirqd_thread) | ||
1165 | { | 964 | { |
1166 | int ret = 0; /* assume failure */ | 965 | int ret = 0; /* assume failure */ |
1167 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | 966 | struct klmirqd_info* info; |
1168 | { | ||
1169 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
1170 | BUG(); | ||
1171 | } | ||
1172 | 967 | ||
1173 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | 968 | if (unlikely(!is_realtime(klmirqd_thread) || |
1174 | { | 969 | !tsk_rt(klmirqd_thread)->is_interrupt_thread || |
1175 | TRACE("%s: No klmirqd_th%d!\n", __FUNCTION__, k_id); | 970 | !tsk_rt(klmirqd_thread)->klmirqd_info)) { |
1176 | BUG(); | 971 | TRACE("%s: %s/%d can't handle tasklets\n", klmirqd_thread->comm, klmirqd_thread->pid); |
1177 | } | 972 | return ret; |
973 | } | ||
1178 | 974 | ||
1179 | if(likely(!klmirqds[k_id].terminating)) | 975 | info = tsk_rt(klmirqd_thread)->klmirqd_info; |
1180 | { | ||
1181 | /* Can't accept tasklets while we're processing a workqueue | ||
1182 | because they're handled by the same thread. This case is | ||
1183 | very RARE. | ||
1184 | 976 | ||
1185 | TODO: Use a separate thread for work objects!!!!!! | 977 | if (likely(!info->terminating)) { |
1186 | */ | 978 | ret = 1; |
1187 | if(likely(atomic_read(&klmirqds[k_id].num_work_pending) == 0)) | 979 | ___litmus_tasklet_schedule(t, info, 1); |
1188 | { | 980 | } |
1189 | ret = 1; | 981 | else { |
1190 | ___litmus_tasklet_schedule(t, &klmirqds[k_id], 1); | 982 | TRACE("%s: Tasklet rejected because %s/%d is terminating\n", klmirqd_thread->comm, klmirqd_thread->pid); |
1191 | } | ||
1192 | else | ||
1193 | { | ||
1194 | TRACE("%s: rejected tasklet because of pending work.\n", | ||
1195 | __FUNCTION__); | ||
1196 | } | ||
1197 | } | 983 | } |
1198 | return(ret); | 984 | return(ret); |
1199 | } | 985 | } |
@@ -1230,100 +1016,77 @@ static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t, | |||
1230 | raw_spin_unlock_irqrestore(&which->lock, flags); | 1016 | raw_spin_unlock_irqrestore(&which->lock, flags); |
1231 | } | 1017 | } |
1232 | 1018 | ||
1233 | int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, unsigned int k_id) | 1019 | int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, struct task_struct* klmirqd_thread) |
1234 | { | 1020 | { |
1235 | int ret = 0; /* assume failure */ | 1021 | int ret = 0; /* assume failure */ |
1236 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | 1022 | struct klmirqd_info* info; |
1237 | { | ||
1238 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | ||
1239 | BUG(); | ||
1240 | } | ||
1241 | 1023 | ||
1242 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | 1024 | if (unlikely(!is_realtime(klmirqd_thread) || |
1243 | { | 1025 | !tsk_rt(klmirqd_thread)->is_interrupt_thread || |
1244 | TRACE("%s: No klmirqd_th%d!\n", __FUNCTION__, k_id); | 1026 | !tsk_rt(klmirqd_thread)->klmirqd_info)) { |
1245 | BUG(); | 1027 | TRACE("%s: %s/%d can't handle tasklets\n", klmirqd_thread->comm, klmirqd_thread->pid); |
1246 | } | 1028 | return ret; |
1029 | } | ||
1247 | 1030 | ||
1248 | if(unlikely(!klmirqd_is_ready())) | 1031 | info = tsk_rt(klmirqd_thread)->klmirqd_info; |
1249 | { | ||
1250 | TRACE("%s: klmirqd is not ready!\n", __FUNCTION__, k_id); | ||
1251 | BUG(); | ||
1252 | } | ||
1253 | 1032 | ||
1254 | if(likely(!klmirqds[k_id].terminating)) | 1033 | if (likely(!info->terminating)) { |
1255 | { | 1034 | ret = 1; |
1256 | if(likely(atomic_read(&klmirqds[k_id].num_work_pending) == 0)) | 1035 | ___litmus_tasklet_hi_schedule(t, info, 1); |
1257 | { | ||
1258 | ret = 1; | ||
1259 | ___litmus_tasklet_hi_schedule(t, &klmirqds[k_id], 1); | ||
1260 | } | ||
1261 | else | ||
1262 | { | ||
1263 | TRACE("%s: rejected tasklet because of pending work.\n", | ||
1264 | __FUNCTION__); | ||
1265 | } | ||
1266 | } | 1036 | } |
1037 | else { | ||
1038 | TRACE("%s: Tasklet rejected because %s/%d is terminating\n", klmirqd_thread->comm, klmirqd_thread->pid); | ||
1039 | } | ||
1040 | |||
1267 | return(ret); | 1041 | return(ret); |
1268 | } | 1042 | } |
1269 | 1043 | ||
1270 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule); | 1044 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule); |
1271 | 1045 | ||
1272 | 1046 | ||
1273 | int __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, unsigned int k_id) | 1047 | int __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, struct task_struct* klmirqd_thread) |
1274 | { | 1048 | { |
1275 | int ret = 0; /* assume failure */ | 1049 | int ret = 0; /* assume failure */ |
1276 | u32 old_pending; | 1050 | u32 old_pending; |
1051 | struct klmirqd_info* info; | ||
1277 | 1052 | ||
1278 | BUG_ON(!irqs_disabled()); | 1053 | BUG_ON(!irqs_disabled()); |
1279 | 1054 | ||
1280 | if(unlikely((t->owner == NULL) || !is_realtime(t->owner))) | 1055 | if (unlikely(!is_realtime(klmirqd_thread) || |
1281 | { | 1056 | !tsk_rt(klmirqd_thread)->is_interrupt_thread || |
1282 | TRACE("%s: No owner associated with this tasklet!\n", __FUNCTION__); | 1057 | !tsk_rt(klmirqd_thread)->klmirqd_info)) { |
1283 | BUG(); | 1058 | TRACE("%s: %s/%d can't handle tasklets\n", klmirqd_thread->comm, klmirqd_thread->pid); |
1284 | } | 1059 | return ret; |
1060 | } | ||
1285 | 1061 | ||
1286 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | 1062 | info = tsk_rt(klmirqd_thread)->klmirqd_info; |
1287 | { | ||
1288 | TRACE("%s: No klmirqd_th%u!\n", __FUNCTION__, k_id); | ||
1289 | BUG(); | ||
1290 | } | ||
1291 | 1063 | ||
1292 | if(unlikely(!klmirqd_is_ready())) | 1064 | if (likely(!info->terminating)) { |
1293 | { | ||
1294 | TRACE("%s: klmirqd is not ready!\n", __FUNCTION__, k_id); | ||
1295 | BUG(); | ||
1296 | } | ||
1297 | 1065 | ||
1298 | if(likely(!klmirqds[k_id].terminating)) | 1066 | raw_spin_lock(&info->lock); |
1299 | { | ||
1300 | raw_spin_lock(&klmirqds[k_id].lock); | ||
1301 | 1067 | ||
1302 | if(likely(atomic_read(&klmirqds[k_id].num_work_pending) == 0)) | 1068 | ret = 1; // success! |
1303 | { | ||
1304 | ret = 1; // success! | ||
1305 | 1069 | ||
1306 | t->next = klmirqds[k_id].pending_tasklets_hi.head; | 1070 | t->next = info->pending_tasklets_hi.head; |
1307 | klmirqds[k_id].pending_tasklets_hi.head = t; | 1071 | info->pending_tasklets_hi.head = t; |
1308 | 1072 | ||
1309 | old_pending = klmirqds[k_id].pending; | 1073 | old_pending = info->pending; |
1310 | klmirqds[k_id].pending |= LIT_TASKLET_HI; | 1074 | info->pending |= LIT_TASKLET_HI; |
1311 | 1075 | ||
1312 | atomic_inc(&klmirqds[k_id].num_hi_pending); | 1076 | atomic_inc(&info->num_hi_pending); |
1313 | 1077 | ||
1314 | mb(); | 1078 | mb(); |
1315 | 1079 | ||
1316 | if(!old_pending) | 1080 | if(!old_pending) { |
1317 | wakeup_litirqd_locked(&klmirqds[k_id]); /* wake up the klmirqd */ | 1081 | wakeup_litirqd_locked(info); /* wake up the klmirqd */ |
1318 | } | ||
1319 | else | ||
1320 | { | ||
1321 | TRACE("%s: rejected tasklet because of pending work.\n", | ||
1322 | __FUNCTION__); | ||
1323 | } | 1082 | } |
1324 | 1083 | ||
1325 | raw_spin_unlock(&klmirqds[k_id].lock); | 1084 | raw_spin_unlock(&info->lock); |
1326 | } | 1085 | } |
1086 | else { | ||
1087 | TRACE("%s: Tasklet rejected because %s/%d is terminating\n", klmirqd_thread->comm, klmirqd_thread->pid); | ||
1088 | } | ||
1089 | |||
1327 | return(ret); | 1090 | return(ret); |
1328 | } | 1091 | } |
1329 | 1092 | ||
@@ -1358,225 +1121,30 @@ static void ___litmus_schedule_work(struct work_struct *w, | |||
1358 | raw_spin_unlock_irqrestore(&which->lock, flags); | 1121 | raw_spin_unlock_irqrestore(&which->lock, flags); |
1359 | } | 1122 | } |
1360 | 1123 | ||
1361 | int __litmus_schedule_work(struct work_struct *w, unsigned int k_id) | 1124 | int __litmus_schedule_work(struct work_struct *w, struct task_struct* klmirqd_thread) |
1362 | { | 1125 | { |
1363 | int ret = 1; /* assume success */ | 1126 | int ret = 1; /* assume success */ |
1364 | if(unlikely(w->owner == NULL) || !is_realtime(w->owner)) | 1127 | struct klmirqd_info* info; |
1365 | { | ||
1366 | TRACE("%s: No owner associated with this work object!\n", __FUNCTION__); | ||
1367 | BUG(); | ||
1368 | } | ||
1369 | |||
1370 | if(unlikely(k_id >= NR_LITMUS_SOFTIRQD)) | ||
1371 | { | ||
1372 | TRACE("%s: No klmirqd_th%u!\n", k_id); | ||
1373 | BUG(); | ||
1374 | } | ||
1375 | |||
1376 | if(unlikely(!klmirqd_is_ready())) | ||
1377 | { | ||
1378 | TRACE("%s: klmirqd is not ready!\n", __FUNCTION__, k_id); | ||
1379 | BUG(); | ||
1380 | } | ||
1381 | |||
1382 | if(likely(!klmirqds[k_id].terminating)) | ||
1383 | ___litmus_schedule_work(w, &klmirqds[k_id], 1); | ||
1384 | else | ||
1385 | ret = 0; | ||
1386 | return(ret); | ||
1387 | } | ||
1388 | EXPORT_SYMBOL(__litmus_schedule_work); | ||
1389 | |||
1390 | |||
1391 | static int set_klmirqd_sem_status(unsigned long stat) | ||
1392 | { | ||
1393 | TRACE_CUR("SETTING STATUS FROM %d TO %d\n", | ||
1394 | atomic_read(&tsk_rt(current)->klmirqd_sem_stat), | ||
1395 | stat); | ||
1396 | atomic_set(&tsk_rt(current)->klmirqd_sem_stat, stat); | ||
1397 | //mb(); | ||
1398 | |||
1399 | return(0); | ||
1400 | } | ||
1401 | |||
1402 | static int set_klmirqd_sem_status_if_not_held(unsigned long stat) | ||
1403 | { | ||
1404 | if(atomic_read(&tsk_rt(current)->klmirqd_sem_stat) != HELD) | ||
1405 | { | ||
1406 | return(set_klmirqd_sem_status(stat)); | ||
1407 | } | ||
1408 | return(-1); | ||
1409 | } | ||
1410 | |||
1411 | |||
1412 | void __down_and_reset_and_set_stat(struct task_struct* t, | ||
1413 | enum klmirqd_sem_status to_reset, | ||
1414 | enum klmirqd_sem_status to_set, | ||
1415 | struct mutex* sem) | ||
1416 | { | ||
1417 | #if 0 | ||
1418 | struct rt_param* param = container_of(sem, struct rt_param, klmirqd_sem); | ||
1419 | struct task_struct* task = container_of(param, struct task_struct, rt_param); | ||
1420 | |||
1421 | TRACE_CUR("%s: entered. Locking semaphore of %s/%d\n", | ||
1422 | __FUNCTION__, task->comm, task->pid); | ||
1423 | #endif | ||
1424 | |||
1425 | mutex_lock_sfx(sem, | ||
1426 | set_klmirqd_sem_status_if_not_held, to_reset, | ||
1427 | set_klmirqd_sem_status, to_set); | ||
1428 | #if 0 | ||
1429 | TRACE_CUR("%s: exiting. Have semaphore of %s/%d\n", | ||
1430 | __FUNCTION__, task->comm, task->pid); | ||
1431 | #endif | ||
1432 | } | ||
1433 | |||
1434 | void down_and_set_stat(struct task_struct* t, | ||
1435 | enum klmirqd_sem_status to_set, | ||
1436 | struct mutex* sem) | ||
1437 | { | ||
1438 | #if 0 | ||
1439 | struct rt_param* param = container_of(sem, struct rt_param, klmirqd_sem); | ||
1440 | struct task_struct* task = container_of(param, struct task_struct, rt_param); | ||
1441 | |||
1442 | TRACE_CUR("%s: entered. Locking semaphore of %s/%d\n", | ||
1443 | __FUNCTION__, task->comm, task->pid); | ||
1444 | #endif | ||
1445 | |||
1446 | mutex_lock_sfx(sem, | ||
1447 | NULL, 0, | ||
1448 | set_klmirqd_sem_status, to_set); | ||
1449 | |||
1450 | #if 0 | ||
1451 | TRACE_CUR("%s: exiting. Have semaphore of %s/%d\n", | ||
1452 | __FUNCTION__, task->comm, task->pid); | ||
1453 | #endif | ||
1454 | } | ||
1455 | |||
1456 | |||
1457 | void up_and_set_stat(struct task_struct* t, | ||
1458 | enum klmirqd_sem_status to_set, | ||
1459 | struct mutex* sem) | ||
1460 | { | ||
1461 | #if 0 | ||
1462 | struct rt_param* param = container_of(sem, struct rt_param, klmirqd_sem); | ||
1463 | struct task_struct* task = container_of(param, struct task_struct, rt_param); | ||
1464 | |||
1465 | TRACE_CUR("%s: entered. Unlocking semaphore of %s/%d\n", | ||
1466 | __FUNCTION__, | ||
1467 | task->comm, task->pid); | ||
1468 | #endif | ||
1469 | |||
1470 | mutex_unlock_sfx(sem, NULL, 0, | ||
1471 | set_klmirqd_sem_status, to_set); | ||
1472 | |||
1473 | #if 0 | ||
1474 | TRACE_CUR("%s: exiting. Unlocked semaphore of %s/%d\n", | ||
1475 | __FUNCTION__, | ||
1476 | task->comm, task->pid); | ||
1477 | #endif | ||
1478 | } | ||
1479 | |||
1480 | |||
1481 | |||
1482 | void release_klmirqd_lock(struct task_struct* t) | ||
1483 | { | ||
1484 | if(is_realtime(t) && (atomic_read(&tsk_rt(t)->klmirqd_sem_stat) == HELD)) | ||
1485 | { | ||
1486 | struct mutex* sem; | ||
1487 | struct task_struct* owner = t; | ||
1488 | |||
1489 | if(t->state == TASK_RUNNING) | ||
1490 | { | ||
1491 | TRACE_TASK(t, "NOT giving up klmirqd_sem because we're not blocked!\n"); | ||
1492 | return; | ||
1493 | } | ||
1494 | 1128 | ||
1495 | if(likely(!tsk_rt(t)->is_proxy_thread)) | 1129 | if (unlikely(!is_realtime(klmirqd_thread) || |
1496 | { | 1130 | !tsk_rt(klmirqd_thread)->is_interrupt_thread || |
1497 | sem = &tsk_rt(t)->klmirqd_sem; | 1131 | !tsk_rt(klmirqd_thread)->klmirqd_info)) { |
1498 | } | 1132 | TRACE("%s: %s/%d can't handle work items\n", klmirqd_thread->comm, klmirqd_thread->pid); |
1499 | else | 1133 | return ret; |
1500 | { | ||
1501 | unsigned int k_id = klmirqd_id(t); | ||
1502 | owner = klmirqds[k_id].current_owner; | ||
1503 | |||
1504 | BUG_ON(t != klmirqds[k_id].klmirqd); | ||
1505 | |||
1506 | if(likely(owner)) | ||
1507 | { | ||
1508 | sem = &tsk_rt(owner)->klmirqd_sem; | ||
1509 | } | ||
1510 | else | ||
1511 | { | ||
1512 | BUG(); | ||
1513 | |||
1514 | // We had the rug pulled out from under us. Abort attempt | ||
1515 | // to reacquire the lock since our client no longer needs us. | ||
1516 | TRACE_CUR("HUH?! How did this happen?\n"); | ||
1517 | atomic_set(&tsk_rt(t)->klmirqd_sem_stat, NOT_HELD); | ||
1518 | return; | ||
1519 | } | ||
1520 | } | ||
1521 | |||
1522 | //TRACE_CUR("Releasing semaphore of %s/%d...\n", owner->comm, owner->pid); | ||
1523 | up_and_set_stat(t, NEED_TO_REACQUIRE, sem); | ||
1524 | //TRACE_CUR("Semaphore of %s/%d released!\n", owner->comm, owner->pid); | ||
1525 | } | ||
1526 | /* | ||
1527 | else if(is_realtime(t)) | ||
1528 | { | ||
1529 | TRACE_CUR("%s: Nothing to do. Stat = %d\n", __FUNCTION__, tsk_rt(t)->klmirqd_sem_stat); | ||
1530 | } | 1134 | } |
1531 | */ | ||
1532 | } | ||
1533 | 1135 | ||
1534 | int reacquire_klmirqd_lock(struct task_struct* t) | 1136 | info = tsk_rt(klmirqd_thread)->klmirqd_info; |
1535 | { | ||
1536 | int ret = 0; | ||
1537 | |||
1538 | if(is_realtime(t) && (atomic_read(&tsk_rt(t)->klmirqd_sem_stat) == NEED_TO_REACQUIRE)) | ||
1539 | { | ||
1540 | struct mutex* sem; | ||
1541 | struct task_struct* owner = t; | ||
1542 | |||
1543 | if(likely(!tsk_rt(t)->is_proxy_thread)) | ||
1544 | { | ||
1545 | sem = &tsk_rt(t)->klmirqd_sem; | ||
1546 | } | ||
1547 | else | ||
1548 | { | ||
1549 | unsigned int k_id = klmirqd_id(t); | ||
1550 | //struct task_struct* owner = klmirqds[k_id].current_owner; | ||
1551 | owner = klmirqds[k_id].current_owner; | ||
1552 | |||
1553 | BUG_ON(t != klmirqds[k_id].klmirqd); | ||
1554 | 1137 | ||
1555 | if(likely(owner)) | ||
1556 | { | ||
1557 | sem = &tsk_rt(owner)->klmirqd_sem; | ||
1558 | } | ||
1559 | else | ||
1560 | { | ||
1561 | // We had the rug pulled out from under us. Abort attempt | ||
1562 | // to reacquire the lock since our client no longer needs us. | ||
1563 | TRACE_CUR("No longer needs to reacquire klmirqd_sem!\n"); | ||
1564 | atomic_set(&tsk_rt(t)->klmirqd_sem_stat, NOT_HELD); | ||
1565 | return(0); | ||
1566 | } | ||
1567 | } | ||
1568 | 1138 | ||
1569 | //TRACE_CUR("Trying to reacquire semaphore of %s/%d\n", owner->comm, owner->pid); | 1139 | if (likely(!info->terminating)) { |
1570 | __down_and_reset_and_set_stat(t, REACQUIRING, HELD, sem); | 1140 | ___litmus_schedule_work(w, info, 1); |
1571 | //TRACE_CUR("Reacquired semaphore %s/%d\n", owner->comm, owner->pid); | ||
1572 | } | 1141 | } |
1573 | /* | 1142 | else { |
1574 | else if(is_realtime(t)) | 1143 | TRACE("%s: Work rejected because %s/%d is terminating\n", klmirqd_thread->comm, klmirqd_thread->pid); |
1575 | { | 1144 | ret = 0; |
1576 | TRACE_CUR("%s: Nothing to do. Stat = %d\n", __FUNCTION__, tsk_rt(t)->klmirqd_sem_stat); | ||
1577 | } | 1145 | } |
1578 | */ | ||
1579 | 1146 | ||
1580 | return(ret); | 1147 | return(ret); |
1581 | } | 1148 | } |
1149 | EXPORT_SYMBOL(__litmus_schedule_work); | ||
1582 | 1150 | ||