diff options
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r-- | kernel/softirq.c | 322 |
1 files changed, 269 insertions, 53 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c index fca82c32042b..5ce271675662 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -29,6 +29,15 @@ | |||
29 | #include <trace/events/irq.h> | 29 | #include <trace/events/irq.h> |
30 | 30 | ||
31 | #include <asm/irq.h> | 31 | #include <asm/irq.h> |
32 | |||
33 | #include <litmus/litmus.h> | ||
34 | #include <litmus/sched_trace.h> | ||
35 | |||
36 | #ifdef CONFIG_LITMUS_NVIDIA | ||
37 | #include <litmus/nvidia_info.h> | ||
38 | #include <litmus/trace.h> | ||
39 | #endif | ||
40 | |||
32 | /* | 41 | /* |
33 | - No shared variables, all the data are CPU local. | 42 | - No shared variables, all the data are CPU local. |
34 | - If a softirq needs serialization, let it serialize itself | 43 | - If a softirq needs serialization, let it serialize itself |
@@ -67,7 +76,7 @@ char *softirq_to_name[NR_SOFTIRQS] = { | |||
67 | * to the pending events, so lets the scheduler to balance | 76 | * to the pending events, so lets the scheduler to balance |
68 | * the softirq load for us. | 77 | * the softirq load for us. |
69 | */ | 78 | */ |
70 | static void wakeup_softirqd(void) | 79 | void wakeup_softirqd(void) |
71 | { | 80 | { |
72 | /* Interrupts are disabled: no need to stop preemption */ | 81 | /* Interrupts are disabled: no need to stop preemption */ |
73 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); | 82 | struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
@@ -193,6 +202,7 @@ void local_bh_enable_ip(unsigned long ip) | |||
193 | } | 202 | } |
194 | EXPORT_SYMBOL(local_bh_enable_ip); | 203 | EXPORT_SYMBOL(local_bh_enable_ip); |
195 | 204 | ||
205 | |||
196 | /* | 206 | /* |
197 | * We restart softirq processing MAX_SOFTIRQ_RESTART times, | 207 | * We restart softirq processing MAX_SOFTIRQ_RESTART times, |
198 | * and we fall back to softirqd after that. | 208 | * and we fall back to softirqd after that. |
@@ -206,65 +216,65 @@ EXPORT_SYMBOL(local_bh_enable_ip); | |||
206 | 216 | ||
207 | asmlinkage void __do_softirq(void) | 217 | asmlinkage void __do_softirq(void) |
208 | { | 218 | { |
209 | struct softirq_action *h; | 219 | struct softirq_action *h; |
210 | __u32 pending; | 220 | __u32 pending; |
211 | int max_restart = MAX_SOFTIRQ_RESTART; | 221 | int max_restart = MAX_SOFTIRQ_RESTART; |
212 | int cpu; | 222 | int cpu; |
213 | 223 | ||
214 | pending = local_softirq_pending(); | 224 | pending = local_softirq_pending(); |
215 | account_system_vtime(current); | 225 | account_system_vtime(current); |
216 | 226 | ||
217 | __local_bh_disable((unsigned long)__builtin_return_address(0), | 227 | __local_bh_disable((unsigned long)__builtin_return_address(0), |
218 | SOFTIRQ_OFFSET); | 228 | SOFTIRQ_OFFSET); |
219 | lockdep_softirq_enter(); | 229 | lockdep_softirq_enter(); |
220 | 230 | ||
221 | cpu = smp_processor_id(); | 231 | cpu = smp_processor_id(); |
222 | restart: | 232 | restart: |
223 | /* Reset the pending bitmask before enabling irqs */ | 233 | /* Reset the pending bitmask before enabling irqs */ |
224 | set_softirq_pending(0); | 234 | set_softirq_pending(0); |
225 | 235 | ||
226 | local_irq_enable(); | 236 | local_irq_enable(); |
227 | 237 | ||
228 | h = softirq_vec; | 238 | h = softirq_vec; |
229 | |||
230 | do { | ||
231 | if (pending & 1) { | ||
232 | unsigned int vec_nr = h - softirq_vec; | ||
233 | int prev_count = preempt_count(); | ||
234 | |||
235 | kstat_incr_softirqs_this_cpu(vec_nr); | ||
236 | |||
237 | trace_softirq_entry(vec_nr); | ||
238 | h->action(h); | ||
239 | trace_softirq_exit(vec_nr); | ||
240 | if (unlikely(prev_count != preempt_count())) { | ||
241 | printk(KERN_ERR "huh, entered softirq %u %s %p" | ||
242 | "with preempt_count %08x," | ||
243 | " exited with %08x?\n", vec_nr, | ||
244 | softirq_to_name[vec_nr], h->action, | ||
245 | prev_count, preempt_count()); | ||
246 | preempt_count() = prev_count; | ||
247 | } | ||
248 | 239 | ||
249 | rcu_bh_qs(cpu); | 240 | do { |
250 | } | 241 | if (pending & 1) { |
251 | h++; | 242 | unsigned int vec_nr = h - softirq_vec; |
252 | pending >>= 1; | 243 | int prev_count = preempt_count(); |
253 | } while (pending); | ||
254 | 244 | ||
255 | local_irq_disable(); | 245 | kstat_incr_softirqs_this_cpu(vec_nr); |
256 | 246 | ||
257 | pending = local_softirq_pending(); | 247 | trace_softirq_entry(vec_nr); |
258 | if (pending && --max_restart) | 248 | h->action(h); |
259 | goto restart; | 249 | trace_softirq_exit(vec_nr); |
250 | if (unlikely(prev_count != preempt_count())) { | ||
251 | printk(KERN_ERR "huh, entered softirq %u %s %p" | ||
252 | "with preempt_count %08x," | ||
253 | " exited with %08x?\n", vec_nr, | ||
254 | softirq_to_name[vec_nr], h->action, | ||
255 | prev_count, preempt_count()); | ||
256 | preempt_count() = prev_count; | ||
257 | } | ||
260 | 258 | ||
261 | if (pending) | 259 | rcu_bh_qs(cpu); |
262 | wakeup_softirqd(); | 260 | } |
261 | h++; | ||
262 | pending >>= 1; | ||
263 | } while (pending); | ||
263 | 264 | ||
264 | lockdep_softirq_exit(); | 265 | local_irq_disable(); |
265 | 266 | ||
266 | account_system_vtime(current); | 267 | pending = local_softirq_pending(); |
267 | __local_bh_enable(SOFTIRQ_OFFSET); | 268 | if (pending && --max_restart) |
269 | goto restart; | ||
270 | |||
271 | if (pending) | ||
272 | wakeup_softirqd(); | ||
273 | |||
274 | lockdep_softirq_exit(); | ||
275 | |||
276 | account_system_vtime(current); | ||
277 | __local_bh_enable(SOFTIRQ_OFFSET); | ||
268 | } | 278 | } |
269 | 279 | ||
270 | #ifndef __ARCH_HAS_DO_SOFTIRQ | 280 | #ifndef __ARCH_HAS_DO_SOFTIRQ |
@@ -402,8 +412,99 @@ struct tasklet_head | |||
402 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); | 412 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); |
403 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); | 413 | static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); |
404 | 414 | ||
415 | #ifdef CONFIG_LITMUS_NVIDIA | ||
416 | static int __do_nv_now(struct tasklet_struct* tasklet) | ||
417 | { | ||
418 | int success = 1; | ||
419 | |||
420 | if(tasklet_trylock(tasklet)) { | ||
421 | if (!atomic_read(&tasklet->count)) { | ||
422 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) { | ||
423 | BUG(); | ||
424 | } | ||
425 | tasklet->func(tasklet->data); | ||
426 | tasklet_unlock(tasklet); | ||
427 | } | ||
428 | else { | ||
429 | success = 0; | ||
430 | } | ||
431 | |||
432 | tasklet_unlock(tasklet); | ||
433 | } | ||
434 | else { | ||
435 | success = 0; | ||
436 | } | ||
437 | |||
438 | return success; | ||
439 | } | ||
440 | #endif | ||
441 | |||
442 | |||
405 | void __tasklet_schedule(struct tasklet_struct *t) | 443 | void __tasklet_schedule(struct tasklet_struct *t) |
406 | { | 444 | { |
445 | #ifdef CONFIG_LITMUS_NVIDIA | ||
446 | if(is_nvidia_func(t->func)) | ||
447 | { | ||
448 | #if 0 | ||
449 | // do nvidia tasklets right away and return | ||
450 | if(__do_nv_now(t)) | ||
451 | return; | ||
452 | #else | ||
453 | u32 nvidia_device = get_tasklet_nv_device_num(t); | ||
454 | // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n", | ||
455 | // __FUNCTION__, nvidia_device,litmus_clock()); | ||
456 | |||
457 | unsigned long flags; | ||
458 | struct task_struct* device_owner; | ||
459 | |||
460 | lock_nv_registry(nvidia_device, &flags); | ||
461 | |||
462 | device_owner = get_nv_max_device_owner(nvidia_device); | ||
463 | |||
464 | if(device_owner==NULL) | ||
465 | { | ||
466 | t->owner = NULL; | ||
467 | } | ||
468 | else | ||
469 | { | ||
470 | if(is_realtime(device_owner)) | ||
471 | { | ||
472 | TRACE("%s: Handling NVIDIA tasklet for device %u at %llu\n", | ||
473 | __FUNCTION__, nvidia_device,litmus_clock()); | ||
474 | TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n", | ||
475 | __FUNCTION__,device_owner->pid,nvidia_device); | ||
476 | |||
477 | t->owner = device_owner; | ||
478 | sched_trace_tasklet_release(t->owner); | ||
479 | |||
480 | if(likely(_litmus_tasklet_schedule(t,nvidia_device))) | ||
481 | { | ||
482 | unlock_nv_registry(nvidia_device, &flags); | ||
483 | return; | ||
484 | } | ||
485 | else | ||
486 | { | ||
487 | t->owner = NULL; /* fall through to normal scheduling */ | ||
488 | } | ||
489 | } | ||
490 | else | ||
491 | { | ||
492 | t->owner = NULL; | ||
493 | } | ||
494 | } | ||
495 | unlock_nv_registry(nvidia_device, &flags); | ||
496 | #endif | ||
497 | } | ||
498 | |||
499 | #endif | ||
500 | |||
501 | ___tasklet_schedule(t); | ||
502 | } | ||
503 | EXPORT_SYMBOL(__tasklet_schedule); | ||
504 | |||
505 | |||
506 | void ___tasklet_schedule(struct tasklet_struct *t) | ||
507 | { | ||
407 | unsigned long flags; | 508 | unsigned long flags; |
408 | 509 | ||
409 | local_irq_save(flags); | 510 | local_irq_save(flags); |
@@ -413,11 +514,65 @@ void __tasklet_schedule(struct tasklet_struct *t) | |||
413 | raise_softirq_irqoff(TASKLET_SOFTIRQ); | 514 | raise_softirq_irqoff(TASKLET_SOFTIRQ); |
414 | local_irq_restore(flags); | 515 | local_irq_restore(flags); |
415 | } | 516 | } |
517 | EXPORT_SYMBOL(___tasklet_schedule); | ||
416 | 518 | ||
417 | EXPORT_SYMBOL(__tasklet_schedule); | ||
418 | 519 | ||
419 | void __tasklet_hi_schedule(struct tasklet_struct *t) | 520 | void __tasklet_hi_schedule(struct tasklet_struct *t) |
420 | { | 521 | { |
522 | #ifdef CONFIG_LITMUS_NVIDIA | ||
523 | if(is_nvidia_func(t->func)) | ||
524 | { | ||
525 | u32 nvidia_device = get_tasklet_nv_device_num(t); | ||
526 | // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n", | ||
527 | // __FUNCTION__, nvidia_device,litmus_clock()); | ||
528 | |||
529 | unsigned long flags; | ||
530 | struct task_struct* device_owner; | ||
531 | |||
532 | lock_nv_registry(nvidia_device, &flags); | ||
533 | |||
534 | device_owner = get_nv_max_device_owner(nvidia_device); | ||
535 | |||
536 | if(device_owner==NULL) | ||
537 | { | ||
538 | t->owner = NULL; | ||
539 | } | ||
540 | else | ||
541 | { | ||
542 | if( is_realtime(device_owner)) | ||
543 | { | ||
544 | TRACE("%s: Handling NVIDIA tasklet for device %u\tat %llu\n", | ||
545 | __FUNCTION__, nvidia_device,litmus_clock()); | ||
546 | TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n", | ||
547 | __FUNCTION__,device_owner->pid,nvidia_device); | ||
548 | |||
549 | t->owner = device_owner; | ||
550 | sched_trace_tasklet_release(t->owner); | ||
551 | if(likely(_litmus_tasklet_hi_schedule(t,nvidia_device))) | ||
552 | { | ||
553 | unlock_nv_registry(nvidia_device, &flags); | ||
554 | return; | ||
555 | } | ||
556 | else | ||
557 | { | ||
558 | t->owner = NULL; /* fall through to normal scheduling */ | ||
559 | } | ||
560 | } | ||
561 | else | ||
562 | { | ||
563 | t->owner = NULL; | ||
564 | } | ||
565 | } | ||
566 | unlock_nv_registry(nvidia_device, &flags); | ||
567 | } | ||
568 | #endif | ||
569 | |||
570 | ___tasklet_hi_schedule(t); | ||
571 | } | ||
572 | EXPORT_SYMBOL(__tasklet_hi_schedule); | ||
573 | |||
574 | void ___tasklet_hi_schedule(struct tasklet_struct* t) | ||
575 | { | ||
421 | unsigned long flags; | 576 | unsigned long flags; |
422 | 577 | ||
423 | local_irq_save(flags); | 578 | local_irq_save(flags); |
@@ -427,19 +582,72 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) | |||
427 | raise_softirq_irqoff(HI_SOFTIRQ); | 582 | raise_softirq_irqoff(HI_SOFTIRQ); |
428 | local_irq_restore(flags); | 583 | local_irq_restore(flags); |
429 | } | 584 | } |
430 | 585 | EXPORT_SYMBOL(___tasklet_hi_schedule); | |
431 | EXPORT_SYMBOL(__tasklet_hi_schedule); | ||
432 | 586 | ||
433 | void __tasklet_hi_schedule_first(struct tasklet_struct *t) | 587 | void __tasklet_hi_schedule_first(struct tasklet_struct *t) |
434 | { | 588 | { |
435 | BUG_ON(!irqs_disabled()); | 589 | BUG_ON(!irqs_disabled()); |
590 | #ifdef CONFIG_LITMUS_NVIDIA | ||
591 | if(is_nvidia_func(t->func)) | ||
592 | { | ||
593 | u32 nvidia_device = get_tasklet_nv_device_num(t); | ||
594 | // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n", | ||
595 | // __FUNCTION__, nvidia_device,litmus_clock()); | ||
596 | unsigned long flags; | ||
597 | struct task_struct* device_owner; | ||
598 | |||
599 | lock_nv_registry(nvidia_device, &flags); | ||
600 | |||
601 | device_owner = get_nv_max_device_owner(nvidia_device); | ||
602 | |||
603 | if(device_owner==NULL) | ||
604 | { | ||
605 | t->owner = NULL; | ||
606 | } | ||
607 | else | ||
608 | { | ||
609 | if(is_realtime(device_owner)) | ||
610 | { | ||
611 | TRACE("%s: Handling NVIDIA tasklet for device %u at %llu\n", | ||
612 | __FUNCTION__, nvidia_device,litmus_clock()); | ||
613 | |||
614 | TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n", | ||
615 | __FUNCTION__,device_owner->pid,nvidia_device); | ||
616 | |||
617 | t->owner = device_owner; | ||
618 | sched_trace_tasklet_release(t->owner); | ||
619 | if(likely(_litmus_tasklet_hi_schedule_first(t,nvidia_device))) | ||
620 | { | ||
621 | unlock_nv_registry(nvidia_device, &flags); | ||
622 | return; | ||
623 | } | ||
624 | else | ||
625 | { | ||
626 | t->owner = NULL; /* fall through to normal scheduling */ | ||
627 | } | ||
628 | } | ||
629 | else | ||
630 | { | ||
631 | t->owner = NULL; | ||
632 | } | ||
633 | } | ||
634 | unlock_nv_registry(nvidia_device, &flags); | ||
635 | } | ||
636 | #endif | ||
637 | |||
638 | ___tasklet_hi_schedule_first(t); | ||
639 | } | ||
640 | EXPORT_SYMBOL(__tasklet_hi_schedule_first); | ||
641 | |||
642 | void ___tasklet_hi_schedule_first(struct tasklet_struct* t) | ||
643 | { | ||
644 | BUG_ON(!irqs_disabled()); | ||
436 | 645 | ||
437 | t->next = __this_cpu_read(tasklet_hi_vec.head); | 646 | t->next = __this_cpu_read(tasklet_hi_vec.head); |
438 | __this_cpu_write(tasklet_hi_vec.head, t); | 647 | __this_cpu_write(tasklet_hi_vec.head, t); |
439 | __raise_softirq_irqoff(HI_SOFTIRQ); | 648 | __raise_softirq_irqoff(HI_SOFTIRQ); |
440 | } | 649 | } |
441 | 650 | EXPORT_SYMBOL(___tasklet_hi_schedule_first); | |
442 | EXPORT_SYMBOL(__tasklet_hi_schedule_first); | ||
443 | 651 | ||
444 | static void tasklet_action(struct softirq_action *a) | 652 | static void tasklet_action(struct softirq_action *a) |
445 | { | 653 | { |
@@ -495,6 +703,7 @@ static void tasklet_hi_action(struct softirq_action *a) | |||
495 | if (!atomic_read(&t->count)) { | 703 | if (!atomic_read(&t->count)) { |
496 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) | 704 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) |
497 | BUG(); | 705 | BUG(); |
706 | |||
498 | t->func(t->data); | 707 | t->func(t->data); |
499 | tasklet_unlock(t); | 708 | tasklet_unlock(t); |
500 | continue; | 709 | continue; |
@@ -518,8 +727,13 @@ void tasklet_init(struct tasklet_struct *t, | |||
518 | t->next = NULL; | 727 | t->next = NULL; |
519 | t->state = 0; | 728 | t->state = 0; |
520 | atomic_set(&t->count, 0); | 729 | atomic_set(&t->count, 0); |
730 | |||
521 | t->func = func; | 731 | t->func = func; |
522 | t->data = data; | 732 | t->data = data; |
733 | |||
734 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
735 | t->owner = NULL; | ||
736 | #endif | ||
523 | } | 737 | } |
524 | 738 | ||
525 | EXPORT_SYMBOL(tasklet_init); | 739 | EXPORT_SYMBOL(tasklet_init); |
@@ -534,6 +748,7 @@ void tasklet_kill(struct tasklet_struct *t) | |||
534 | yield(); | 748 | yield(); |
535 | } while (test_bit(TASKLET_STATE_SCHED, &t->state)); | 749 | } while (test_bit(TASKLET_STATE_SCHED, &t->state)); |
536 | } | 750 | } |
751 | |||
537 | tasklet_unlock_wait(t); | 752 | tasklet_unlock_wait(t); |
538 | clear_bit(TASKLET_STATE_SCHED, &t->state); | 753 | clear_bit(TASKLET_STATE_SCHED, &t->state); |
539 | } | 754 | } |
@@ -808,6 +1023,7 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) | |||
808 | for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { | 1023 | for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { |
809 | if (*i == t) { | 1024 | if (*i == t) { |
810 | *i = t->next; | 1025 | *i = t->next; |
1026 | |||
811 | /* If this was the tail element, move the tail ptr */ | 1027 | /* If this was the tail element, move the tail ptr */ |
812 | if (*i == NULL) | 1028 | if (*i == NULL) |
813 | per_cpu(tasklet_vec, cpu).tail = i; | 1029 | per_cpu(tasklet_vec, cpu).tail = i; |