aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/softirq.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r--kernel/softirq.c335
1 files changed, 279 insertions, 56 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 2f2df08df395..ea438a8635d0 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -29,6 +29,15 @@
29#include <trace/events/irq.h> 29#include <trace/events/irq.h>
30 30
31#include <asm/irq.h> 31#include <asm/irq.h>
32
33#include <litmus/litmus.h>
34#include <litmus/sched_trace.h>
35
36#ifdef CONFIG_LITMUS_NVIDIA
37#include <litmus/nvidia_info.h>
38#include <litmus/trace.h>
39#endif
40
32/* 41/*
33 - No shared variables, all the data are CPU local. 42 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself 43 - If a softirq needs serialization, let it serialize itself
@@ -67,7 +76,7 @@ char *softirq_to_name[NR_SOFTIRQS] = {
67 * to the pending events, so lets the scheduler to balance 76 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us. 77 * the softirq load for us.
69 */ 78 */
70static void wakeup_softirqd(void) 79void wakeup_softirqd(void)
71{ 80{
72 /* Interrupts are disabled: no need to stop preemption */ 81 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __this_cpu_read(ksoftirqd); 82 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
@@ -193,6 +202,7 @@ void local_bh_enable_ip(unsigned long ip)
193} 202}
194EXPORT_SYMBOL(local_bh_enable_ip); 203EXPORT_SYMBOL(local_bh_enable_ip);
195 204
205
196/* 206/*
197 * We restart softirq processing MAX_SOFTIRQ_RESTART times, 207 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
198 * and we fall back to softirqd after that. 208 * and we fall back to softirqd after that.
@@ -206,10 +216,10 @@ EXPORT_SYMBOL(local_bh_enable_ip);
206 216
207asmlinkage void __do_softirq(void) 217asmlinkage void __do_softirq(void)
208{ 218{
209 struct softirq_action *h; 219 struct softirq_action *h;
210 __u32 pending; 220 __u32 pending;
211 int max_restart = MAX_SOFTIRQ_RESTART; 221 int max_restart = MAX_SOFTIRQ_RESTART;
212 int cpu; 222 int cpu;
213 223
214 /* Mark Feather-Trace samples as "disturbed". */ 224 /* Mark Feather-Trace samples as "disturbed". */
215 ft_irq_fired(); 225 ft_irq_fired();
@@ -217,57 +227,57 @@ asmlinkage void __do_softirq(void)
217 pending = local_softirq_pending(); 227 pending = local_softirq_pending();
218 account_system_vtime(current); 228 account_system_vtime(current);
219 229
220 __local_bh_disable((unsigned long)__builtin_return_address(0), 230 __local_bh_disable((unsigned long)__builtin_return_address(0),
221 SOFTIRQ_OFFSET); 231 SOFTIRQ_OFFSET);
222 lockdep_softirq_enter(); 232 lockdep_softirq_enter();
223 233
224 cpu = smp_processor_id(); 234 cpu = smp_processor_id();
225restart: 235restart:
226 /* Reset the pending bitmask before enabling irqs */ 236 /* Reset the pending bitmask before enabling irqs */
227 set_softirq_pending(0); 237 set_softirq_pending(0);
228 238
229 local_irq_enable(); 239 local_irq_enable();
230 240
231 h = softirq_vec; 241 h = softirq_vec;
232
233 do {
234 if (pending & 1) {
235 unsigned int vec_nr = h - softirq_vec;
236 int prev_count = preempt_count();
237
238 kstat_incr_softirqs_this_cpu(vec_nr);
239
240 trace_softirq_entry(vec_nr);
241 h->action(h);
242 trace_softirq_exit(vec_nr);
243 if (unlikely(prev_count != preempt_count())) {
244 printk(KERN_ERR "huh, entered softirq %u %s %p"
245 "with preempt_count %08x,"
246 " exited with %08x?\n", vec_nr,
247 softirq_to_name[vec_nr], h->action,
248 prev_count, preempt_count());
249 preempt_count() = prev_count;
250 }
251 242
252 rcu_bh_qs(cpu); 243 do {
253 } 244 if (pending & 1) {
254 h++; 245 unsigned int vec_nr = h - softirq_vec;
255 pending >>= 1; 246 int prev_count = preempt_count();
256 } while (pending);
257 247
258 local_irq_disable(); 248 kstat_incr_softirqs_this_cpu(vec_nr);
259 249
260 pending = local_softirq_pending(); 250 trace_softirq_entry(vec_nr);
261 if (pending && --max_restart) 251 h->action(h);
262 goto restart; 252 trace_softirq_exit(vec_nr);
253 if (unlikely(prev_count != preempt_count())) {
254 printk(KERN_ERR "huh, entered softirq %u %s %p"
255 "with preempt_count %08x,"
256 " exited with %08x?\n", vec_nr,
257 softirq_to_name[vec_nr], h->action,
258 prev_count, preempt_count());
259 preempt_count() = prev_count;
260 }
263 261
264 if (pending) 262 rcu_bh_qs(cpu);
265 wakeup_softirqd(); 263 }
264 h++;
265 pending >>= 1;
266 } while (pending);
266 267
267 lockdep_softirq_exit(); 268 local_irq_disable();
268 269
269 account_system_vtime(current); 270 pending = local_softirq_pending();
270 __local_bh_enable(SOFTIRQ_OFFSET); 271 if (pending && --max_restart)
272 goto restart;
273
274 if (pending)
275 wakeup_softirqd();
276
277 lockdep_softirq_exit();
278
279 account_system_vtime(current);
280 __local_bh_enable(SOFTIRQ_OFFSET);
271} 281}
272 282
273#ifndef __ARCH_HAS_DO_SOFTIRQ 283#ifndef __ARCH_HAS_DO_SOFTIRQ
@@ -396,17 +406,103 @@ void open_softirq(int nr, void (*action)(struct softirq_action *))
396/* 406/*
397 * Tasklets 407 * Tasklets
398 */ 408 */
399struct tasklet_head
400{
401 struct tasklet_struct *head;
402 struct tasklet_struct **tail;
403};
404 409
405static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); 410static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
406static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); 411static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
407 412
413#ifdef CONFIG_LITMUS_NVIDIA
414static int __do_nv_now(struct tasklet_struct* tasklet)
415{
416 int success = 1;
417
418 if(tasklet_trylock(tasklet)) {
419 if (!atomic_read(&tasklet->count)) {
420 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &tasklet->state)) {
421 BUG();
422 }
423 tasklet->func(tasklet->data);
424 tasklet_unlock(tasklet);
425 }
426 else {
427 success = 0;
428 }
429
430 tasklet_unlock(tasklet);
431 }
432 else {
433 success = 0;
434 }
435
436 return success;
437}
438#endif
439
440
408void __tasklet_schedule(struct tasklet_struct *t) 441void __tasklet_schedule(struct tasklet_struct *t)
409{ 442{
443#ifdef CONFIG_LITMUS_NVIDIA
444 if(is_nvidia_func(t->func))
445 {
446#if 1
447 // do nvidia tasklets right away and return
448 if(__do_nv_now(t))
449 return;
450#else
451 u32 nvidia_device = get_tasklet_nv_device_num(t);
452 // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n",
453 // __FUNCTION__, nvidia_device,litmus_clock());
454
455 unsigned long flags;
456 struct task_struct* device_owner;
457
458 lock_nv_registry(nvidia_device, &flags);
459
460 device_owner = get_nv_max_device_owner(nvidia_device);
461
462 if(device_owner==NULL)
463 {
464 t->owner = NULL;
465 }
466 else
467 {
468 if(is_realtime(device_owner))
469 {
470 TRACE("%s: Handling NVIDIA tasklet for device %u at %llu\n",
471 __FUNCTION__, nvidia_device,litmus_clock());
472 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
473 __FUNCTION__,device_owner->pid,nvidia_device);
474
475 t->owner = device_owner;
476 sched_trace_tasklet_release(t->owner);
477
478 if(likely(_litmus_tasklet_schedule(t,nvidia_device)))
479 {
480 unlock_nv_registry(nvidia_device, &flags);
481 return;
482 }
483 else
484 {
485 t->owner = NULL; /* fall through to normal scheduling */
486 }
487 }
488 else
489 {
490 t->owner = NULL;
491 }
492 }
493 unlock_nv_registry(nvidia_device, &flags);
494#endif
495 }
496
497#endif
498
499 ___tasklet_schedule(t);
500}
501EXPORT_SYMBOL(__tasklet_schedule);
502
503
504void ___tasklet_schedule(struct tasklet_struct *t)
505{
410 unsigned long flags; 506 unsigned long flags;
411 507
412 local_irq_save(flags); 508 local_irq_save(flags);
@@ -416,11 +512,71 @@ void __tasklet_schedule(struct tasklet_struct *t)
416 raise_softirq_irqoff(TASKLET_SOFTIRQ); 512 raise_softirq_irqoff(TASKLET_SOFTIRQ);
417 local_irq_restore(flags); 513 local_irq_restore(flags);
418} 514}
515EXPORT_SYMBOL(___tasklet_schedule);
419 516
420EXPORT_SYMBOL(__tasklet_schedule);
421 517
422void __tasklet_hi_schedule(struct tasklet_struct *t) 518void __tasklet_hi_schedule(struct tasklet_struct *t)
423{ 519{
520#ifdef CONFIG_LITMUS_NVIDIA
521 if(is_nvidia_func(t->func))
522 {
523#if 1
524 // do nvidia tasklets right away and return
525 if(__do_nv_now(t))
526 return;
527#else
528 u32 nvidia_device = get_tasklet_nv_device_num(t);
529 // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n",
530 // __FUNCTION__, nvidia_device,litmus_clock());
531
532 unsigned long flags;
533 struct task_struct* device_owner;
534
535 lock_nv_registry(nvidia_device, &flags);
536
537 device_owner = get_nv_max_device_owner(nvidia_device);
538
539 if(device_owner==NULL)
540 {
541 t->owner = NULL;
542 }
543 else
544 {
545 if( is_realtime(device_owner))
546 {
547 TRACE("%s: Handling NVIDIA tasklet for device %u\tat %llu\n",
548 __FUNCTION__, nvidia_device,litmus_clock());
549 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
550 __FUNCTION__,device_owner->pid,nvidia_device);
551
552 t->owner = device_owner;
553 sched_trace_tasklet_release(t->owner);
554 if(likely(_litmus_tasklet_hi_schedule(t,nvidia_device)))
555 {
556 unlock_nv_registry(nvidia_device, &flags);
557 return;
558 }
559 else
560 {
561 t->owner = NULL; /* fall through to normal scheduling */
562 }
563 }
564 else
565 {
566 t->owner = NULL;
567 }
568 }
569 unlock_nv_registry(nvidia_device, &flags);
570#endif
571 }
572#endif
573
574 ___tasklet_hi_schedule(t);
575}
576EXPORT_SYMBOL(__tasklet_hi_schedule);
577
578void ___tasklet_hi_schedule(struct tasklet_struct* t)
579{
424 unsigned long flags; 580 unsigned long flags;
425 581
426 local_irq_save(flags); 582 local_irq_save(flags);
@@ -430,19 +586,78 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
430 raise_softirq_irqoff(HI_SOFTIRQ); 586 raise_softirq_irqoff(HI_SOFTIRQ);
431 local_irq_restore(flags); 587 local_irq_restore(flags);
432} 588}
433 589EXPORT_SYMBOL(___tasklet_hi_schedule);
434EXPORT_SYMBOL(__tasklet_hi_schedule);
435 590
436void __tasklet_hi_schedule_first(struct tasklet_struct *t) 591void __tasklet_hi_schedule_first(struct tasklet_struct *t)
437{ 592{
438 BUG_ON(!irqs_disabled()); 593 BUG_ON(!irqs_disabled());
594#ifdef CONFIG_LITMUS_NVIDIA
595 if(is_nvidia_func(t->func))
596 {
597#if 1
598 // do nvidia tasklets right away and return
599 if(__do_nv_now(t))
600 return;
601#else
602 u32 nvidia_device = get_tasklet_nv_device_num(t);
603 // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n",
604 // __FUNCTION__, nvidia_device,litmus_clock());
605 unsigned long flags;
606 struct task_struct* device_owner;
607
608 lock_nv_registry(nvidia_device, &flags);
609
610 device_owner = get_nv_max_device_owner(nvidia_device);
611
612 if(device_owner==NULL)
613 {
614 t->owner = NULL;
615 }
616 else
617 {
618 if(is_realtime(device_owner))
619 {
620 TRACE("%s: Handling NVIDIA tasklet for device %u at %llu\n",
621 __FUNCTION__, nvidia_device,litmus_clock());
622
623 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
624 __FUNCTION__,device_owner->pid,nvidia_device);
625
626 t->owner = device_owner;
627 sched_trace_tasklet_release(t->owner);
628 if(likely(_litmus_tasklet_hi_schedule_first(t,nvidia_device)))
629 {
630 unlock_nv_registry(nvidia_device, &flags);
631 return;
632 }
633 else
634 {
635 t->owner = NULL; /* fall through to normal scheduling */
636 }
637 }
638 else
639 {
640 t->owner = NULL;
641 }
642 }
643 unlock_nv_registry(nvidia_device, &flags);
644#endif
645 }
646#endif
647
648 ___tasklet_hi_schedule_first(t);
649}
650EXPORT_SYMBOL(__tasklet_hi_schedule_first);
651
652void ___tasklet_hi_schedule_first(struct tasklet_struct* t)
653{
654 BUG_ON(!irqs_disabled());
439 655
440 t->next = __this_cpu_read(tasklet_hi_vec.head); 656 t->next = __this_cpu_read(tasklet_hi_vec.head);
441 __this_cpu_write(tasklet_hi_vec.head, t); 657 __this_cpu_write(tasklet_hi_vec.head, t);
442 __raise_softirq_irqoff(HI_SOFTIRQ); 658 __raise_softirq_irqoff(HI_SOFTIRQ);
443} 659}
444 660EXPORT_SYMBOL(___tasklet_hi_schedule_first);
445EXPORT_SYMBOL(__tasklet_hi_schedule_first);
446 661
447static void tasklet_action(struct softirq_action *a) 662static void tasklet_action(struct softirq_action *a)
448{ 663{
@@ -498,6 +713,7 @@ static void tasklet_hi_action(struct softirq_action *a)
498 if (!atomic_read(&t->count)) { 713 if (!atomic_read(&t->count)) {
499 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) 714 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
500 BUG(); 715 BUG();
716
501 t->func(t->data); 717 t->func(t->data);
502 tasklet_unlock(t); 718 tasklet_unlock(t);
503 continue; 719 continue;
@@ -521,8 +737,13 @@ void tasklet_init(struct tasklet_struct *t,
521 t->next = NULL; 737 t->next = NULL;
522 t->state = 0; 738 t->state = 0;
523 atomic_set(&t->count, 0); 739 atomic_set(&t->count, 0);
740
524 t->func = func; 741 t->func = func;
525 t->data = data; 742 t->data = data;
743
744#ifdef CONFIG_LITMUS_SOFTIRQD
745 t->owner = NULL;
746#endif
526} 747}
527 748
528EXPORT_SYMBOL(tasklet_init); 749EXPORT_SYMBOL(tasklet_init);
@@ -537,6 +758,7 @@ void tasklet_kill(struct tasklet_struct *t)
537 yield(); 758 yield();
538 } while (test_bit(TASKLET_STATE_SCHED, &t->state)); 759 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
539 } 760 }
761
540 tasklet_unlock_wait(t); 762 tasklet_unlock_wait(t);
541 clear_bit(TASKLET_STATE_SCHED, &t->state); 763 clear_bit(TASKLET_STATE_SCHED, &t->state);
542} 764}
@@ -811,6 +1033,7 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
811 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { 1033 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
812 if (*i == t) { 1034 if (*i == t) {
813 *i = t->next; 1035 *i = t->next;
1036
814 /* If this was the tail element, move the tail ptr */ 1037 /* If this was the tail element, move the tail ptr */
815 if (*i == NULL) 1038 if (*i == NULL)
816 per_cpu(tasklet_vec, cpu).tail = i; 1039 per_cpu(tasklet_vec, cpu).tail = i;