aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/softirq.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2011-06-02 16:06:05 -0400
committerGlenn Elliott <gelliott@cs.unc.edu>2011-06-02 16:06:05 -0400
commit3d5537c160c1484e8d562b9828baf679cc53f67a (patch)
treeb595364f1b0f94ac2426c8315bc5967debc7bbb0 /kernel/softirq.c
parent7d754596756240fa918b94cd0c3011c77a638987 (diff)
Full patch for klitirqd with Nvidia GPU support.
Diffstat (limited to 'kernel/softirq.c')
-rw-r--r--kernel/softirq.c278
1 files changed, 252 insertions, 26 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 07b4f1b1a73a..be4b8fab3637 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -29,6 +29,14 @@
29#include <trace/events/irq.h> 29#include <trace/events/irq.h>
30 30
31#include <asm/irq.h> 31#include <asm/irq.h>
32
33#include <litmus/litmus.h>
34#include <litmus/sched_trace.h>
35
36#ifdef CONFIG_LITMUS_NVIDIA
37#include <litmus/nvidia_info.h>
38#endif
39
32/* 40/*
33 - No shared variables, all the data are CPU local. 41 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself 42 - If a softirq needs serialization, let it serialize itself
@@ -54,7 +62,7 @@ EXPORT_SYMBOL(irq_stat);
54 62
55static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; 63static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
56 64
57static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); 65static DEFINE_PER_CPU(struct task_struct *, ksoftirqd) = NULL;
58 66
59char *softirq_to_name[NR_SOFTIRQS] = { 67char *softirq_to_name[NR_SOFTIRQS] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", 68 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
@@ -177,6 +185,7 @@ void local_bh_enable_ip(unsigned long ip)
177} 185}
178EXPORT_SYMBOL(local_bh_enable_ip); 186EXPORT_SYMBOL(local_bh_enable_ip);
179 187
188
180/* 189/*
181 * We restart softirq processing MAX_SOFTIRQ_RESTART times, 190 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
182 * and we fall back to softirqd after that. 191 * and we fall back to softirqd after that.
@@ -187,34 +196,30 @@ EXPORT_SYMBOL(local_bh_enable_ip);
187 * should not be able to lock up the box. 196 * should not be able to lock up the box.
188 */ 197 */
189#define MAX_SOFTIRQ_RESTART 10 198#define MAX_SOFTIRQ_RESTART 10
190 199static void ____do_softirq(void)
191asmlinkage void __do_softirq(void)
192{ 200{
193 struct softirq_action *h;
194 __u32 pending; 201 __u32 pending;
195 int max_restart = MAX_SOFTIRQ_RESTART; 202
203 struct softirq_action *h;
196 int cpu; 204 int cpu;
197 205
198 pending = local_softirq_pending(); 206 pending = local_softirq_pending();
207
199 account_system_vtime(current); 208 account_system_vtime(current);
200 209
201 __local_bh_disable((unsigned long)__builtin_return_address(0));
202 lockdep_softirq_enter();
203
204 cpu = smp_processor_id(); 210 cpu = smp_processor_id();
205restart:
206 /* Reset the pending bitmask before enabling irqs */
207 set_softirq_pending(0);
208 211
212 set_softirq_pending(0);
213
209 local_irq_enable(); 214 local_irq_enable();
210 215
211 h = softirq_vec; 216 h = softirq_vec;
212 217
213 do { 218 do {
214 if (pending & 1) { 219 if (pending & 1) {
215 int prev_count = preempt_count(); 220 int prev_count = preempt_count();
216 kstat_incr_softirqs_this_cpu(h - softirq_vec); 221 kstat_incr_softirqs_this_cpu(h - softirq_vec);
217 222
218 trace_softirq_entry(h, softirq_vec); 223 trace_softirq_entry(h, softirq_vec);
219 h->action(h); 224 h->action(h);
220 trace_softirq_exit(h, softirq_vec); 225 trace_softirq_exit(h, softirq_vec);
@@ -226,26 +231,70 @@ restart:
226 h->action, prev_count, preempt_count()); 231 h->action, prev_count, preempt_count());
227 preempt_count() = prev_count; 232 preempt_count() = prev_count;
228 } 233 }
229 234
230 rcu_bh_qs(cpu); 235 rcu_bh_qs(cpu);
231 } 236 }
232 h++; 237 h++;
233 pending >>= 1; 238 pending >>= 1;
234 } while (pending); 239 } while (pending);
235 240
236 local_irq_disable(); 241 local_irq_disable();
242}
243
244static void ___do_softirq(void)
245{
246 __u32 pending;
247
248 //struct softirq_action *h;
249 int max_restart = MAX_SOFTIRQ_RESTART;
250 //int cpu;
251
252 pending = local_softirq_pending();
253
254restart:
255 ____do_softirq();
237 256
238 pending = local_softirq_pending(); 257 pending = local_softirq_pending();
239 if (pending && --max_restart) 258 if (pending && --max_restart)
240 goto restart; 259 goto restart;
241 260
242 if (pending) 261 if (pending)
262 {
243 wakeup_softirqd(); 263 wakeup_softirqd();
264 }
265}
244 266
267asmlinkage void __do_softirq(void)
268{
269#ifdef LITMUS_THREAD_ALL_SOFTIRQ
270 /* Skip straight to wakeup_softirqd() if we're using
271 LITMUS_THREAD_ALL_SOFTIRQ (unless there's really high prio-stuff waiting.). */
272 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
273
274 if(tsk)
275 {
276 __u32 pending = local_softirq_pending();
277 const __u32 high_prio_softirq = (1<<HI_SOFTIRQ) | (1<<TIMER_SOFTIRQ) | (1<<HRTIMER_SOFTIRQ);
278 if(pending && !(pending & high_prio_softirq))
279 {
280 wakeup_softirqd();
281 return;
282 }
283 }
284#endif
285
286 /*
287 * 'immediate' softirq execution:
288 */
289 __local_bh_disable((unsigned long)__builtin_return_address(0));
290 lockdep_softirq_enter();
291
292 ___do_softirq();
293
245 lockdep_softirq_exit(); 294 lockdep_softirq_exit();
246 295
247 account_system_vtime(current); 296 account_system_vtime(current);
248 _local_bh_enable(); 297 _local_bh_enable();
249} 298}
250 299
251#ifndef __ARCH_HAS_DO_SOFTIRQ 300#ifndef __ARCH_HAS_DO_SOFTIRQ
@@ -357,8 +406,64 @@ struct tasklet_head
357static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); 406static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
358static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); 407static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
359 408
409
360void __tasklet_schedule(struct tasklet_struct *t) 410void __tasklet_schedule(struct tasklet_struct *t)
361{ 411{
412#ifdef CONFIG_LITMUS_NVIDIA
413 if(is_nvidia_func(t->func))
414 {
415 u32 nvidia_device = get_tasklet_nv_device_num(t);
416 // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n",
417 // __FUNCTION__, nvidia_device,litmus_clock());
418
419 unsigned long flags;
420 struct task_struct* device_owner;
421
422 lock_nv_registry(nvidia_device, &flags);
423
424 device_owner = get_nv_device_owner(nvidia_device);
425
426 if(device_owner==NULL)
427 {
428 t->owner = NULL;
429 }
430 else
431 {
432 if(is_realtime(device_owner))
433 {
434 TRACE("%s: Handling NVIDIA tasklet for device %u at %llu\n",
435 __FUNCTION__, nvidia_device,litmus_clock());
436 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
437 __FUNCTION__,device_owner->pid,nvidia_device);
438
439 t->owner = device_owner;
440 sched_trace_tasklet_release(t->owner);
441 if(likely(_litmus_tasklet_schedule(t,nvidia_device)))
442 {
443 unlock_nv_registry(nvidia_device, &flags);
444 return;
445 }
446 else
447 {
448 t->owner = NULL; /* fall through to normal scheduling */
449 }
450 }
451 else
452 {
453 t->owner = NULL;
454 }
455 }
456 unlock_nv_registry(nvidia_device, &flags);
457 }
458#endif
459
460 ___tasklet_schedule(t);
461}
462EXPORT_SYMBOL(__tasklet_schedule);
463
464
465void ___tasklet_schedule(struct tasklet_struct *t)
466{
362 unsigned long flags; 467 unsigned long flags;
363 468
364 local_irq_save(flags); 469 local_irq_save(flags);
@@ -368,11 +473,65 @@ void __tasklet_schedule(struct tasklet_struct *t)
368 raise_softirq_irqoff(TASKLET_SOFTIRQ); 473 raise_softirq_irqoff(TASKLET_SOFTIRQ);
369 local_irq_restore(flags); 474 local_irq_restore(flags);
370} 475}
476EXPORT_SYMBOL(___tasklet_schedule);
371 477
372EXPORT_SYMBOL(__tasklet_schedule);
373 478
374void __tasklet_hi_schedule(struct tasklet_struct *t) 479void __tasklet_hi_schedule(struct tasklet_struct *t)
375{ 480{
481#ifdef CONFIG_LITMUS_NVIDIA
482 if(is_nvidia_func(t->func))
483 {
484 u32 nvidia_device = get_tasklet_nv_device_num(t);
485 // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n",
486 // __FUNCTION__, nvidia_device,litmus_clock());
487
488 unsigned long flags;
489 struct task_struct* device_owner;
490
491 lock_nv_registry(nvidia_device, &flags);
492
493 device_owner = get_nv_device_owner(nvidia_device);
494
495 if(device_owner==NULL)
496 {
497 t->owner = NULL;
498 }
499 else
500 {
501 if( is_realtime(device_owner))
502 {
503 TRACE("%s: Handling NVIDIA tasklet for device %u\tat %llu\n",
504 __FUNCTION__, nvidia_device,litmus_clock());
505 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
506 __FUNCTION__,device_owner->pid,nvidia_device);
507
508 t->owner = device_owner;
509 sched_trace_tasklet_release(t->owner);
510 if(likely(_litmus_tasklet_hi_schedule(t,nvidia_device)))
511 {
512 unlock_nv_registry(nvidia_device, &flags);
513 return;
514 }
515 else
516 {
517 t->owner = NULL; /* fall through to normal scheduling */
518 }
519 }
520 else
521 {
522 t->owner = NULL;
523 }
524 }
525 unlock_nv_registry(nvidia_device, &flags);
526 }
527#endif
528
529 ___tasklet_hi_schedule(t);
530}
531EXPORT_SYMBOL(__tasklet_hi_schedule);
532
533void ___tasklet_hi_schedule(struct tasklet_struct* t)
534{
376 unsigned long flags; 535 unsigned long flags;
377 536
378 local_irq_save(flags); 537 local_irq_save(flags);
@@ -382,19 +541,72 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
382 raise_softirq_irqoff(HI_SOFTIRQ); 541 raise_softirq_irqoff(HI_SOFTIRQ);
383 local_irq_restore(flags); 542 local_irq_restore(flags);
384} 543}
385 544EXPORT_SYMBOL(___tasklet_hi_schedule);
386EXPORT_SYMBOL(__tasklet_hi_schedule);
387 545
388void __tasklet_hi_schedule_first(struct tasklet_struct *t) 546void __tasklet_hi_schedule_first(struct tasklet_struct *t)
389{ 547{
390 BUG_ON(!irqs_disabled()); 548 BUG_ON(!irqs_disabled());
549#ifdef CONFIG_LITMUS_NVIDIA
550 if(is_nvidia_func(t->func))
551 {
552 u32 nvidia_device = get_tasklet_nv_device_num(t);
553 // TRACE("%s: Handling NVIDIA tasklet for device\t%u\tat\t%llu\n",
554 // __FUNCTION__, nvidia_device,litmus_clock());
555 unsigned long flags;
556 struct task_struct* device_owner;
557
558 lock_nv_registry(nvidia_device, &flags);
559
560 device_owner = get_nv_device_owner(nvidia_device);
561
562 if(device_owner==NULL)
563 {
564 t->owner = NULL;
565 }
566 else
567 {
568 if(is_realtime(device_owner))
569 {
570 TRACE("%s: Handling NVIDIA tasklet for device %u at %llu\n",
571 __FUNCTION__, nvidia_device,litmus_clock());
572
573 TRACE("%s: the owner task %d of NVIDIA Device %u is RT-task\n",
574 __FUNCTION__,device_owner->pid,nvidia_device);
575
576 t->owner = device_owner;
577 sched_trace_tasklet_release(t->owner);
578 if(likely(_litmus_tasklet_hi_schedule_first(t,nvidia_device)))
579 {
580 unlock_nv_registry(nvidia_device, &flags);
581 return;
582 }
583 else
584 {
585 t->owner = NULL; /* fall through to normal scheduling */
586 }
587 }
588 else
589 {
590 t->owner = NULL;
591 }
592 }
593 unlock_nv_registry(nvidia_device, &flags);
594 }
595#endif
596
597 ___tasklet_hi_schedule_first(t);
598}
599EXPORT_SYMBOL(__tasklet_hi_schedule_first);
600
601void ___tasklet_hi_schedule_first(struct tasklet_struct* t)
602{
603 BUG_ON(!irqs_disabled());
391 604
392 t->next = __get_cpu_var(tasklet_hi_vec).head; 605 t->next = __get_cpu_var(tasklet_hi_vec).head;
393 __get_cpu_var(tasklet_hi_vec).head = t; 606 __get_cpu_var(tasklet_hi_vec).head = t;
394 __raise_softirq_irqoff(HI_SOFTIRQ); 607 __raise_softirq_irqoff(HI_SOFTIRQ);
395} 608}
396 609EXPORT_SYMBOL(___tasklet_hi_schedule_first);
397EXPORT_SYMBOL(__tasklet_hi_schedule_first);
398 610
399static void tasklet_action(struct softirq_action *a) 611static void tasklet_action(struct softirq_action *a)
400{ 612{
@@ -450,6 +662,7 @@ static void tasklet_hi_action(struct softirq_action *a)
450 if (!atomic_read(&t->count)) { 662 if (!atomic_read(&t->count)) {
451 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) 663 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
452 BUG(); 664 BUG();
665
453 t->func(t->data); 666 t->func(t->data);
454 tasklet_unlock(t); 667 tasklet_unlock(t);
455 continue; 668 continue;
@@ -473,8 +686,13 @@ void tasklet_init(struct tasklet_struct *t,
473 t->next = NULL; 686 t->next = NULL;
474 t->state = 0; 687 t->state = 0;
475 atomic_set(&t->count, 0); 688 atomic_set(&t->count, 0);
689
476 t->func = func; 690 t->func = func;
477 t->data = data; 691 t->data = data;
692
693#ifdef CONFIG_LITMUS_SOFTIRQD
694 t->owner = NULL;
695#endif
478} 696}
479 697
480EXPORT_SYMBOL(tasklet_init); 698EXPORT_SYMBOL(tasklet_init);
@@ -489,6 +707,7 @@ void tasklet_kill(struct tasklet_struct *t)
489 yield(); 707 yield();
490 } while (test_bit(TASKLET_STATE_SCHED, &t->state)); 708 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
491 } 709 }
710
492 tasklet_unlock_wait(t); 711 tasklet_unlock_wait(t);
493 clear_bit(TASKLET_STATE_SCHED, &t->state); 712 clear_bit(TASKLET_STATE_SCHED, &t->state);
494} 713}
@@ -694,6 +913,8 @@ void __init softirq_init(void)
694 913
695static int run_ksoftirqd(void * __bind_cpu) 914static int run_ksoftirqd(void * __bind_cpu)
696{ 915{
916 unsigned long flags;
917
697 set_current_state(TASK_INTERRUPTIBLE); 918 set_current_state(TASK_INTERRUPTIBLE);
698 919
699 while (!kthread_should_stop()) { 920 while (!kthread_should_stop()) {
@@ -712,7 +933,11 @@ static int run_ksoftirqd(void * __bind_cpu)
712 don't process */ 933 don't process */
713 if (cpu_is_offline((long)__bind_cpu)) 934 if (cpu_is_offline((long)__bind_cpu))
714 goto wait_to_die; 935 goto wait_to_die;
715 do_softirq(); 936
937 local_irq_save(flags);
938 ____do_softirq();
939 local_irq_restore(flags);
940
716 preempt_enable_no_resched(); 941 preempt_enable_no_resched();
717 cond_resched(); 942 cond_resched();
718 preempt_disable(); 943 preempt_disable();
@@ -760,6 +985,7 @@ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
760 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { 985 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
761 if (*i == t) { 986 if (*i == t) {
762 *i = t->next; 987 *i = t->next;
988
763 /* If this was the tail element, move the tail ptr */ 989 /* If this was the tail element, move the tail ptr */
764 if (*i == NULL) 990 if (*i == NULL)
765 per_cpu(tasklet_vec, cpu).tail = i; 991 per_cpu(tasklet_vec, cpu).tail = i;