diff options
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r-- | kernel/softirq.c | 150 |
1 files changed, 130 insertions, 20 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c index 266cea2b9721..6f977ebc2770 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -29,6 +29,10 @@ | |||
29 | #include <trace/events/irq.h> | 29 | #include <trace/events/irq.h> |
30 | 30 | ||
31 | #include <asm/irq.h> | 31 | #include <asm/irq.h> |
32 | |||
33 | #include <litmus/sched_trace.h> | ||
34 | #include <litmus/nvidia_info.h> | ||
35 | |||
32 | /* | 36 | /* |
33 | - No shared variables, all the data are CPU local. | 37 | - No shared variables, all the data are CPU local. |
34 | - If a softirq needs serialization, let it serialize itself | 38 | - If a softirq needs serialization, let it serialize itself |
@@ -54,7 +58,7 @@ EXPORT_SYMBOL(irq_stat); | |||
54 | 58 | ||
55 | static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; | 59 | static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; |
56 | 60 | ||
57 | static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); | 61 | static DEFINE_PER_CPU(struct task_struct *, ksoftirqd) = NULL; |
58 | 62 | ||
59 | char *softirq_to_name[NR_SOFTIRQS] = { | 63 | char *softirq_to_name[NR_SOFTIRQS] = { |
60 | "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", | 64 | "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", |
@@ -177,6 +181,7 @@ void local_bh_enable_ip(unsigned long ip) | |||
177 | } | 181 | } |
178 | EXPORT_SYMBOL(local_bh_enable_ip); | 182 | EXPORT_SYMBOL(local_bh_enable_ip); |
179 | 183 | ||
184 | |||
180 | /* | 185 | /* |
181 | * We restart softirq processing MAX_SOFTIRQ_RESTART times, | 186 | * We restart softirq processing MAX_SOFTIRQ_RESTART times, |
182 | * and we fall back to softirqd after that. | 187 | * and we fall back to softirqd after that. |
@@ -186,35 +191,36 @@ EXPORT_SYMBOL(local_bh_enable_ip); | |||
186 | * we want to handle softirqs as soon as possible, but they | 191 | * we want to handle softirqs as soon as possible, but they |
187 | * should not be able to lock up the box. | 192 | * should not be able to lock up the box. |
188 | */ | 193 | */ |
194 | #ifndef LITMUS_THREAD_ALL_SOFTIRQ | ||
189 | #define MAX_SOFTIRQ_RESTART 10 | 195 | #define MAX_SOFTIRQ_RESTART 10 |
196 | #else | ||
197 | #define MAX_SOFTIRQ_RESTART 20 | ||
198 | #endif | ||
190 | 199 | ||
191 | asmlinkage void __do_softirq(void) | 200 | static void ____do_softirq(void) |
192 | { | 201 | { |
193 | struct softirq_action *h; | ||
194 | __u32 pending; | 202 | __u32 pending; |
195 | int max_restart = MAX_SOFTIRQ_RESTART; | 203 | |
204 | struct softirq_action *h; | ||
196 | int cpu; | 205 | int cpu; |
197 | 206 | ||
198 | pending = local_softirq_pending(); | 207 | pending = local_softirq_pending(); |
208 | |||
199 | account_system_vtime(current); | 209 | account_system_vtime(current); |
200 | 210 | ||
201 | __local_bh_disable((unsigned long)__builtin_return_address(0)); | ||
202 | lockdep_softirq_enter(); | ||
203 | |||
204 | cpu = smp_processor_id(); | 211 | cpu = smp_processor_id(); |
205 | restart: | ||
206 | /* Reset the pending bitmask before enabling irqs */ | ||
207 | set_softirq_pending(0); | ||
208 | 212 | ||
213 | set_softirq_pending(0); | ||
214 | |||
209 | local_irq_enable(); | 215 | local_irq_enable(); |
210 | 216 | ||
211 | h = softirq_vec; | 217 | h = softirq_vec; |
212 | 218 | ||
213 | do { | 219 | do { |
214 | if (pending & 1) { | 220 | if (pending & 1) { |
215 | int prev_count = preempt_count(); | 221 | int prev_count = preempt_count(); |
216 | kstat_incr_softirqs_this_cpu(h - softirq_vec); | 222 | kstat_incr_softirqs_this_cpu(h - softirq_vec); |
217 | 223 | ||
218 | trace_softirq_entry(h, softirq_vec); | 224 | trace_softirq_entry(h, softirq_vec); |
219 | h->action(h); | 225 | h->action(h); |
220 | trace_softirq_exit(h, softirq_vec); | 226 | trace_softirq_exit(h, softirq_vec); |
@@ -226,26 +232,68 @@ restart: | |||
226 | h->action, prev_count, preempt_count()); | 232 | h->action, prev_count, preempt_count()); |
227 | preempt_count() = prev_count; | 233 | preempt_count() = prev_count; |
228 | } | 234 | } |
229 | 235 | ||
230 | rcu_bh_qs(cpu); | 236 | rcu_bh_qs(cpu); |
231 | } | 237 | } |
232 | h++; | 238 | h++; |
233 | pending >>= 1; | 239 | pending >>= 1; |
234 | } while (pending); | 240 | } while (pending); |
235 | 241 | ||
236 | local_irq_disable(); | 242 | local_irq_disable(); |
243 | } | ||
244 | |||
245 | static void ___do_softirq(void) | ||
246 | { | ||
247 | __u32 pending; | ||
248 | |||
249 | //struct softirq_action *h; | ||
250 | int max_restart = MAX_SOFTIRQ_RESTART; | ||
251 | //int cpu; | ||
252 | |||
253 | pending = local_softirq_pending(); | ||
254 | |||
255 | restart: | ||
256 | ____do_softirq(); | ||
237 | 257 | ||
238 | pending = local_softirq_pending(); | 258 | pending = local_softirq_pending(); |
239 | if (pending && --max_restart) | 259 | if (pending && --max_restart) |
240 | goto restart; | 260 | goto restart; |
241 | 261 | ||
242 | if (pending) | 262 | if (pending) |
263 | { | ||
243 | wakeup_softirqd(); | 264 | wakeup_softirqd(); |
265 | } | ||
266 | } | ||
244 | 267 | ||
268 | asmlinkage void __do_softirq(void) | ||
269 | { | ||
270 | #ifdef LITMUS_THREAD_ALL_SOFTIRQ | ||
271 | /* Skip straight to wakeup_softirqd() if we're using | ||
272 | LITMUS_THREAD_ALL_SOFTIRQ. This will cause ALL softirqs | ||
273 | and tasklets be threaded, either on Litmus klitirqd | ||
274 | threads or Linux ksoftirqd threads. */ | ||
275 | struct task_struct *tsk = __get_cpu_var(ksoftirqd); | ||
276 | |||
277 | if(tsk) | ||
278 | { | ||
279 | if(local_softirq_pending()) | ||
280 | wakeup_softirqd(); | ||
281 | return; | ||
282 | } | ||
283 | #endif | ||
284 | |||
285 | /* | ||
286 | * 'immediate' softirq execution: | ||
287 | */ | ||
288 | __local_bh_disable((unsigned long)__builtin_return_address(0)); | ||
289 | lockdep_softirq_enter(); | ||
290 | |||
291 | ___do_softirq(); | ||
292 | |||
245 | lockdep_softirq_exit(); | 293 | lockdep_softirq_exit(); |
246 | 294 | ||
247 | account_system_vtime(current); | 295 | account_system_vtime(current); |
248 | _local_bh_enable(); | 296 | _local_bh_enable(); |
249 | } | 297 | } |
250 | 298 | ||
251 | #ifndef __ARCH_HAS_DO_SOFTIRQ | 299 | #ifndef __ARCH_HAS_DO_SOFTIRQ |
@@ -361,6 +409,26 @@ void __tasklet_schedule(struct tasklet_struct *t) | |||
361 | { | 409 | { |
362 | unsigned long flags; | 410 | unsigned long flags; |
363 | 411 | ||
412 | if(is_nvidia_func(t->func)) | ||
413 | { | ||
414 | u32 nvidiaDevice = get_nv_device_num(t); | ||
415 | |||
416 | TRACE("%s: Handling Nvidia tasklet for device %u\n", | ||
417 | __FUNCTION__, nvidiaDevice); | ||
418 | |||
419 | BUG_ON(nvidiaDevice > 7); /* for Bonham. remove for general support. */ | ||
420 | |||
421 | /* | ||
422 | TODO: | ||
423 | 1) Ask Litmus which task owns GPU <nvidiaDevice>. (API to be defined.) | ||
424 | 2) If there is an owner, set t->owner to the owner's task struct. | ||
425 | 3) Call litmus_tasklet_schedule() and return (don't execute the rest | ||
426 | of __tasklet_schedule()). | ||
427 | 4) If there is no owner, then continue on to the regular | ||
428 | tasklet processing below. | ||
429 | */ | ||
430 | } | ||
431 | |||
364 | local_irq_save(flags); | 432 | local_irq_save(flags); |
365 | t->next = NULL; | 433 | t->next = NULL; |
366 | *__get_cpu_var(tasklet_vec).tail = t; | 434 | *__get_cpu_var(tasklet_vec).tail = t; |
@@ -375,6 +443,24 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) | |||
375 | { | 443 | { |
376 | unsigned long flags; | 444 | unsigned long flags; |
377 | 445 | ||
446 | if(is_nvidia_func(t->func)) | ||
447 | { | ||
448 | u32 nvidiaDevice = get_nv_device_num(t); | ||
449 | |||
450 | TRACE("%s: Handling Nvidia tasklet for device %u\n", | ||
451 | __FUNCTION__, nvidiaDevice); | ||
452 | |||
453 | /* | ||
454 | TODO: | ||
455 | 1) Ask Litmus which task owns GPU <nvidiaDevice>. (API to be defined.) | ||
456 | 2) If there is an owner, set t->owner to the owner's task struct. | ||
457 | 3) Call litmus_tasklet_hi_schedule() and return (don't execute the rest | ||
458 | of __tasklet_schedule()). | ||
459 | 4) If there is no owner, then continue on to the regular | ||
460 | tasklet processing below. | ||
461 | */ | ||
462 | } | ||
463 | |||
378 | local_irq_save(flags); | 464 | local_irq_save(flags); |
379 | t->next = NULL; | 465 | t->next = NULL; |
380 | *__get_cpu_var(tasklet_hi_vec).tail = t; | 466 | *__get_cpu_var(tasklet_hi_vec).tail = t; |
@@ -389,6 +475,24 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t) | |||
389 | { | 475 | { |
390 | BUG_ON(!irqs_disabled()); | 476 | BUG_ON(!irqs_disabled()); |
391 | 477 | ||
478 | if(is_nvidia_func(t->func)) | ||
479 | { | ||
480 | u32 nvidiaDevice = get_nv_device_num(t); | ||
481 | |||
482 | TRACE("%s: Handling Nvidia tasklet for device %u\n", | ||
483 | __FUNCTION__, nvidiaDevice); | ||
484 | |||
485 | /* | ||
486 | TODO: | ||
487 | 1) Ask Litmus which task owns GPU <nvidiaDevice>. (API to be defined.) | ||
488 | 2) If there is an owner, set t->owner to the owner's task struct. | ||
489 | 3) Call litmus_tasklet_hi_schedule first() and return | ||
490 | (don't execute the rest of __tasklet_schedule()). | ||
491 | 4) If there is no owner, then continue on to the regular | ||
492 | tasklet processing below. | ||
493 | */ | ||
494 | } | ||
495 | |||
392 | t->next = __get_cpu_var(tasklet_hi_vec).head; | 496 | t->next = __get_cpu_var(tasklet_hi_vec).head; |
393 | __get_cpu_var(tasklet_hi_vec).head = t; | 497 | __get_cpu_var(tasklet_hi_vec).head = t; |
394 | __raise_softirq_irqoff(HI_SOFTIRQ); | 498 | __raise_softirq_irqoff(HI_SOFTIRQ); |
@@ -698,6 +802,8 @@ void __init softirq_init(void) | |||
698 | 802 | ||
699 | static int run_ksoftirqd(void * __bind_cpu) | 803 | static int run_ksoftirqd(void * __bind_cpu) |
700 | { | 804 | { |
805 | unsigned long flags; | ||
806 | |||
701 | set_current_state(TASK_INTERRUPTIBLE); | 807 | set_current_state(TASK_INTERRUPTIBLE); |
702 | 808 | ||
703 | while (!kthread_should_stop()) { | 809 | while (!kthread_should_stop()) { |
@@ -716,7 +822,11 @@ static int run_ksoftirqd(void * __bind_cpu) | |||
716 | don't process */ | 822 | don't process */ |
717 | if (cpu_is_offline((long)__bind_cpu)) | 823 | if (cpu_is_offline((long)__bind_cpu)) |
718 | goto wait_to_die; | 824 | goto wait_to_die; |
719 | do_softirq(); | 825 | |
826 | local_irq_save(flags); | ||
827 | ____do_softirq(); | ||
828 | local_irq_restore(flags); | ||
829 | |||
720 | preempt_enable_no_resched(); | 830 | preempt_enable_no_resched(); |
721 | cond_resched(); | 831 | cond_resched(); |
722 | preempt_disable(); | 832 | preempt_disable(); |