aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-02-06 20:30:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2013-02-06 20:30:13 -0500
commit9aacc135e0abe206b7d778af937babaaa7f3c199 (patch)
treeeaab1b47ff8b90cc5f733b6a345878fd1ba03f27 /litmus
parentdd4c9d77928d67e3afa916b6f1a14e20f02ee67f (diff)
re-enable klmirqd for workqueues, and grace reboot
Diffstat (limited to 'litmus')
-rw-r--r--litmus/Kconfig37
-rw-r--r--litmus/litmus.c2
-rw-r--r--litmus/litmus_softirq.c10
-rw-r--r--litmus/nvidia_info.c306
-rw-r--r--litmus/sched_cedf.c4
-rw-r--r--litmus/sched_task_trace.c4
6 files changed, 307 insertions, 56 deletions
diff --git a/litmus/Kconfig b/litmus/Kconfig
index bf802b760e1f..fad4220ae49f 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -400,7 +400,7 @@ config LITMUS_SOFTIRQD_NONE
400 Don't schedule tasklets in Litmus. Default. 400 Don't schedule tasklets in Litmus. Default.
401 401
402config LITMUS_SOFTIRQD 402config LITMUS_SOFTIRQD
403 bool "Spawn klmirqd interrupt handling threads." 403 bool "Enable klmirqd interrupt (and workqueue) handling threads."
404 help 404 help
405 Create klmirqd interrupt handling threads. Work must be 405 Create klmirqd interrupt handling threads. Work must be
406 specifically dispatched to these workers. (Softirqs for 406 specifically dispatched to these workers. (Softirqs for
@@ -423,11 +423,10 @@ endchoice
423 423
424 424
425config LITMUS_NVIDIA 425config LITMUS_NVIDIA
426 bool "Litmus handling of NVIDIA interrupts." 426 bool "Litmus handling of NVIDIA driver."
427 default n 427 default n
428 help 428 help
429 Direct tasklets from NVIDIA devices to Litmus's klmirqd 429 Enable Litmus control of NVIDIA driver tasklet/workqueues.
430 or PAI interrupt handling routines.
431 430
432 If unsure, say No. 431 If unsure, say No.
433 432
@@ -444,6 +443,36 @@ config LITMUS_NVIDIA_NONSPLIT_INTERRUPTS
444 443
445 If unsure, say No. 444 If unsure, say No.
446 445
446choice
447 prompt "Litmus handling of NVIDIA workqueues."
448 depends on LITMUS_NVIDIA
449 default LITMUS_NVIDIA_WORKQ_OFF
450 help
451 Select method for handling NVIDIA workqueues.
452
453config LITMUS_NVIDIA_WORKQ_OFF
454 bool "Use Linux's default work queues."
455 help
456 Let Linux process all NVIDIA work queue items.
457
458config LITMUS_NVIDIA_WORKQ_ON
459 bool "Schedule work with interrupt thread."
460 depends on LITMUS_SOFTIRQD
461 help
462 Direct work queue items from NVIDIA devices Litmus's
463 klmirqd handling routines. Use the same thread
464 as interrupt handling.
465
466config LITMUS_NVIDIA_WORKQ_ON_DEDICATED
467 bool "Sechedule work in dedicated threads."
468 depends on LITMUS_SOFTIRQD
469 help
470 Direct work queue items from NVIDIA devices to Litmus's
471 klmirqd handling routines. Use dedicated thread for
472 work (seperate thread from interrupt handling).
473
474endchoice
475
447config LITMUS_AFFINITY_AWARE_GPU_ASSINGMENT 476config LITMUS_AFFINITY_AWARE_GPU_ASSINGMENT
448 bool "Enable affinity-aware heuristics to improve GPU assignment." 477 bool "Enable affinity-aware heuristics to improve GPU assignment."
449 depends on LITMUS_NVIDIA && LITMUS_AFFINITY_LOCKING 478 depends on LITMUS_NVIDIA && LITMUS_AFFINITY_LOCKING
diff --git a/litmus/litmus.c b/litmus/litmus.c
index f0d01c34110c..740b5e57ecc1 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -736,7 +736,7 @@ static int litmus_shutdown_nb(struct notifier_block *unused1,
736 if (litmus != &linux_sched_plugin) { 736 if (litmus != &linux_sched_plugin) {
737 int ret = switch_sched_plugin(&linux_sched_plugin); 737 int ret = switch_sched_plugin(&linux_sched_plugin);
738 if (ret) { 738 if (ret) {
739 printk("Auto-shutdown of active Litmus plugin failed.\n"); 739 printk(KERN_EMERG "Auto-shutdown of active Litmus plugin failed.\n");
740 } 740 }
741 } 741 }
742 return NOTIFY_DONE; 742 return NOTIFY_DONE;
diff --git a/litmus/litmus_softirq.c b/litmus/litmus_softirq.c
index a5d61afe7952..aa83b363be7c 100644
--- a/litmus/litmus_softirq.c
+++ b/litmus/litmus_softirq.c
@@ -883,9 +883,6 @@ void flush_pending(struct task_struct* tsk)
883 883
884 work_flushed |= LIT_TASKLET_HI; 884 work_flushed |= LIT_TASKLET_HI;
885 885
886// t->owner = NULL;
887
888 // WTF?
889 if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 886 if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
890 { 887 {
891 atomic_dec(&which->num_hi_pending); 888 atomic_dec(&which->num_hi_pending);
@@ -923,9 +920,6 @@ void flush_pending(struct task_struct* tsk)
923 920
924 work_flushed |= LIT_TASKLET_LOW; 921 work_flushed |= LIT_TASKLET_LOW;
925 922
926// t->owner = NULL;
927// sched_trace_tasklet_end(owner, 1ul);
928
929 if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) 923 if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
930 { 924 {
931 atomic_dec(&which->num_low_pending); 925 atomic_dec(&which->num_low_pending);
@@ -956,14 +950,10 @@ void flush_pending(struct task_struct* tsk)
956 atomic_dec(&which->num_work_pending); 950 atomic_dec(&which->num_work_pending);
957 951
958 work->owner = NULL; 952 work->owner = NULL;
959// sched_trace_work_end(owner, current, 1ul);
960 __schedule_work(work); 953 __schedule_work(work);
961 } 954 }
962 } 955 }
963 956
964 //__dump_state(which, "flush_pending: after (before reeval prio)");
965
966
967 mb(); /* commit changes to pending flags */ 957 mb(); /* commit changes to pending flags */
968 958
969 raw_spin_unlock_irqrestore(&which->lock, flags); 959 raw_spin_unlock_irqrestore(&which->lock, flags);
diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c
index 29031f741fcf..7f1dc18624e1 100644
--- a/litmus/nvidia_info.c
+++ b/litmus/nvidia_info.c
@@ -141,7 +141,7 @@ typedef struct litmus_nv_linux_state_s {
141} litmus_nv_linux_state_t; 141} litmus_nv_linux_state_t;
142 142
143 143
144 144#ifdef CONFIG_SCHED_DEBUG_TRACE
145static void __attribute__((unused)) 145static void __attribute__((unused))
146dump_nvidia_info(const struct tasklet_struct *t) 146dump_nvidia_info(const struct tasklet_struct *t)
147{ 147{
@@ -192,7 +192,6 @@ dump_nvidia_info(const struct tasklet_struct *t)
192 int ns_offset_raw = (void*)(&(linuxstate->device_num)) - (void*)(&(linuxstate->nv_state)); 192 int ns_offset_raw = (void*)(&(linuxstate->device_num)) - (void*)(&(linuxstate->nv_state));
193 int ns_offset_desired = (void*)(&(linuxstate->device_num)) - (void*)(nvstate); 193 int ns_offset_desired = (void*)(&(linuxstate->device_num)) - (void*)(nvstate);
194 194
195
196 TRACE("LINUX NV State:\n" 195 TRACE("LINUX NV State:\n"
197 "\tlinux nv state ptr: %p\n" 196 "\tlinux nv state ptr: %p\n"
198 "\taddress of tasklet: %p\n" 197 "\taddress of tasklet: %p\n"
@@ -226,14 +225,39 @@ dump_nvidia_info(const struct tasklet_struct *t)
226 TRACE("INVALID LINUXNVSTATE?????\n"); 225 TRACE("INVALID LINUXNVSTATE?????\n");
227 } 226 }
228} 227}
229 228#endif
230 229
231 230
232static struct module* nvidia_mod = NULL; 231static struct module* nvidia_mod = NULL;
233 232
234static int init_nv_device_reg(void); 233static int init_nv_device_reg(void);
235static int shutdown_nv_device_reg(void); 234static int shutdown_nv_device_reg(void);
235void shutdown_nvidia_info(void);
236 236
237static int nvidia_going_module_notify(struct notifier_block *self,
238 unsigned long val, void *data)
239{
240 struct module *mod = data;
241
242 if (nvidia_mod && (mod == nvidia_mod)) {
243 switch (val) {
244 case MODULE_STATE_GOING:
245 /* just set our mod reference to null to avoid crash */
246 nvidia_mod = NULL;
247 mb();
248 break;
249 default:
250 break;
251 }
252 }
253
254 return 0;
255}
256
257static struct notifier_block nvidia_going = {
258 .notifier_call = nvidia_going_module_notify,
259 .priority = 1,
260};
237 261
238int init_nvidia_info(void) 262int init_nvidia_info(void)
239{ 263{
@@ -246,6 +270,9 @@ int init_nvidia_info(void)
246 (void*)(nvidia_mod->module_core), 270 (void*)(nvidia_mod->module_core),
247 (void*)(nvidia_mod->module_core) + nvidia_mod->core_size); 271 (void*)(nvidia_mod->module_core) + nvidia_mod->core_size);
248 init_nv_device_reg(); 272 init_nv_device_reg();
273
274 register_module_notifier(&nvidia_going);
275
249 return(0); 276 return(0);
250 } 277 }
251 else 278 else
@@ -262,6 +289,7 @@ void shutdown_nvidia_info(void)
262 nvidia_mod = NULL; 289 nvidia_mod = NULL;
263 mb(); 290 mb();
264 291
292 unregister_module_notifier(&nvidia_going);
265 shutdown_nv_device_reg(); 293 shutdown_nv_device_reg();
266} 294}
267 295
@@ -314,9 +342,15 @@ typedef struct {
314 struct binheap owners; 342 struct binheap owners;
315 343
316#ifdef CONFIG_LITMUS_SOFTIRQD 344#ifdef CONFIG_LITMUS_SOFTIRQD
317 klmirqd_callback_t callback; 345 klmirqd_callback_t interrupt_callback;
318 struct task_struct* thread; 346 struct task_struct* interrupt_thread;
319 int ready:1; /* todo: make threads check for the ready flag */ 347 int interrupt_ready:1; /* todo: make threads check for the ready flag */
348
349#ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED
350 klmirqd_callback_t workq_callback;
351 struct task_struct* workq_thread;
352 int workq_ready:1;
353#endif
320#endif 354#endif
321 355
322#ifdef CONFIG_LITMUS_NV_KLMIRQD_DEBUG 356#ifdef CONFIG_LITMUS_NV_KLMIRQD_DEBUG
@@ -330,22 +364,40 @@ static nv_device_registry_t NV_DEVICE_REG[NV_DEVICE_NUM];
330 364
331 365
332#ifdef CONFIG_LITMUS_SOFTIRQD 366#ifdef CONFIG_LITMUS_SOFTIRQD
333static int nvidia_klmirqd_cb(void *arg) 367static int nvidia_launch_interrupt_cb(void *arg)
368{
369 unsigned long flags;
370 int reg_device_id = (int)(long long)(arg);
371 nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id];
372
373 TRACE("nvklmirqd callback for GPU %d\n", reg_device_id);
374
375 raw_spin_lock_irqsave(&reg->lock, flags);
376 reg->interrupt_thread = current;
377 reg->interrupt_ready = 1;
378 raw_spin_unlock_irqrestore(&reg->lock, flags);
379
380 return 0;
381}
382
383#ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED
384static int nvidia_launch_workq_cb(void *arg)
334{ 385{
335 unsigned long flags; 386 unsigned long flags;
336 int reg_device_id = (int)(long long)(arg); 387 int reg_device_id = (int)(long long)(arg);
337 nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; 388 nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id];
338 389
339 TRACE("nv klmirqd callback for GPU %d\n", reg_device_id); 390 TRACE("nvklmworkerd callback for GPU %d\n", reg_device_id);
340 391
341 raw_spin_lock_irqsave(&reg->lock, flags); 392 raw_spin_lock_irqsave(&reg->lock, flags);
342 reg->thread = current; 393 reg->workq_thread = current;
343 reg->ready = 1; 394 reg->workq_ready = 1;
344 raw_spin_unlock_irqrestore(&reg->lock, flags); 395 raw_spin_unlock_irqrestore(&reg->lock, flags);
345 396
346 return 0; 397 return 0;
347} 398}
348#endif 399#endif
400#endif
349 401
350#ifdef CONFIG_LITMUS_NV_KLMIRQD_DEBUG 402#ifdef CONFIG_LITMUS_NV_KLMIRQD_DEBUG
351struct nv_klmirqd_dbg_timer_struct 403struct nv_klmirqd_dbg_timer_struct
@@ -391,9 +443,9 @@ static enum hrtimer_restart nvklmirqd_timer_func(struct hrtimer *timer)
391 443
392 reg = &NV_DEVICE_REG[gpu]; 444 reg = &NV_DEVICE_REG[gpu];
393 445
394 if (reg->thread && reg->ready) { 446 if (reg->interrupt_thread && reg->interrupt_ready) {
395 TRACE("Adding a tasklet for GPU %d\n", gpu); 447 TRACE("Adding a tasklet for GPU %d\n", gpu);
396 litmus_tasklet_schedule(&reg->nv_klmirqd_dbg_tasklet, reg->thread); 448 litmus_tasklet_schedule(&reg->nv_klmirqd_dbg_tasklet, reg->interrupt_thread);
397 } 449 }
398 else { 450 else {
399 TRACE("nv klmirqd is not ready!\n"); 451 TRACE("nv klmirqd is not ready!\n");
@@ -448,15 +500,25 @@ static int init_nv_device_reg(void)
448 { 500 {
449 int default_cpu = litmus->map_gpu_to_cpu(i); 501 int default_cpu = litmus->map_gpu_to_cpu(i);
450 502
503 /* spawn the interrupt thread */
451 snprintf(name, MAX_KLMIRQD_NAME_LEN, "nvklmirqd%d", i); 504 snprintf(name, MAX_KLMIRQD_NAME_LEN, "nvklmirqd%d", i);
452 505 NV_DEVICE_REG[i].interrupt_callback.func = nvidia_launch_interrupt_cb;
453 NV_DEVICE_REG[i].callback.func = nvidia_klmirqd_cb; 506 NV_DEVICE_REG[i].interrupt_callback.arg = (void*)(long long)(i);
454 NV_DEVICE_REG[i].callback.arg = (void*)(long long)(i);
455 mb(); 507 mb();
508 if(launch_klmirqd_thread(name, default_cpu, &NV_DEVICE_REG[i].interrupt_callback) != 0) {
509 TRACE("Failed to create nvklmirqd thread for GPU %d\n", i);
510 }
456 511
457 if(launch_klmirqd_thread(name, default_cpu, &NV_DEVICE_REG[i].callback) != 0) { 512#ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED
458 TRACE("Failed to create klmirqd thread for GPU %d\n", i); 513 /* spawn the workqueue thread */
514 snprintf(name, MAX_KLMIRQD_NAME_LEN, "nvklmworker%d", i);
515 NV_DEVICE_REG[i].workq_callback.func = nvidia_launch_workq_cb;
516 NV_DEVICE_REG[i].workq_callback.arg = (void*)(long long)(i);
517 mb();
518 if(launch_klmirqd_thread(name, default_cpu, &NV_DEVICE_REG[i].workq_callback) != 0) {
519 TRACE("Failed to create nvklmworkqd thread for GPU %d\n", i);
459 } 520 }
521#endif
460 } 522 }
461#endif 523#endif
462 } 524 }
@@ -479,6 +541,7 @@ static int shutdown_nv_device_reg(void)
479 541
480#ifdef CONFIG_LITMUS_SOFTIRQD 542#ifdef CONFIG_LITMUS_SOFTIRQD
481 { 543 {
544 unsigned long flags;
482 int i; 545 int i;
483 nv_device_registry_t *reg; 546 nv_device_registry_t *reg;
484 547
@@ -488,12 +551,36 @@ static int shutdown_nv_device_reg(void)
488 551
489 reg = &NV_DEVICE_REG[i]; 552 reg = &NV_DEVICE_REG[i];
490 553
491 if (reg->thread && reg->ready) { 554 if ((reg->interrupt_thread && reg->interrupt_ready)
492 kill_klmirqd_thread(reg->thread); 555#ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED
493 556 || (reg->workq_thread && reg->workq_ready)
494 /* assume that all goes according to plan... */ 557#endif
495 reg->thread = NULL; 558 )
496 reg->ready = 0; 559 {
560 raw_spin_lock_irqsave(&reg->lock, flags);
561
562 if (reg->interrupt_thread && reg->interrupt_ready) {
563 struct task_struct* th = reg->interrupt_thread;
564 reg->interrupt_thread = NULL;
565 mb();
566 reg->interrupt_ready = 0;
567 mb();
568
569 kill_klmirqd_thread(th);
570 }
571
572#ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED
573 if (reg->workq_thread && reg->workq_ready) {
574 struct task_struct* th = reg->workq_thread;
575 reg->workq_thread = NULL;
576 mb();
577 reg->workq_ready = 0;
578 mb();
579
580 kill_klmirqd_thread(th);
581 }
582#endif
583 raw_spin_unlock_irqrestore(&reg->lock, flags);
497 } 584 }
498 585
499 while (!binheap_empty(&reg->owners)) { 586 while (!binheap_empty(&reg->owners)) {
@@ -528,29 +615,118 @@ struct task_struct* get_nv_max_device_owner(u32 target_device_id)
528 return(owner); 615 return(owner);
529} 616}
530 617
618
531#ifdef CONFIG_LITMUS_SOFTIRQD 619#ifdef CONFIG_LITMUS_SOFTIRQD
532struct task_struct* get_nv_klmirqd_thread(u32 target_device_id) 620
621typedef enum {
622 INTERRUPT_TH,
623 WORKQ_TH
624} nvklmtype_t;
625
626static struct task_struct* __get_klm_thread(nv_device_registry_t* reg, nvklmtype_t type)
533{ 627{
534 struct task_struct *klmirqd = NULL; 628 struct task_struct *klmirqd = NULL;
535 nv_device_registry_t *reg; 629
630 switch(type)
631 {
632 case INTERRUPT_TH:
633#ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON
634 case WORKQ_TH:
635#endif
636 if(likely(reg->interrupt_ready))
637 klmirqd = reg->interrupt_thread;
638 break;
639#ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED
640 case WORKQ_TH:
641 if(likely(reg->workq_ready))
642 klmirqd = reg->workq_thread;
643 break;
644#endif
645 }
536 646
537 BUG_ON(target_device_id >= NV_DEVICE_NUM); 647 return klmirqd;
648}
538 649
539 reg = &NV_DEVICE_REG[target_device_id]; 650static struct task_struct* __get_and_lock_klm_thread(nv_device_registry_t* reg, unsigned long* flags, nvklmtype_t type)
651{
652 struct task_struct *klmirqd;
653
654 raw_spin_lock_irqsave(&reg->lock, *flags);
655 klmirqd = __get_klm_thread(reg, type);
540 656
541 if(likely(reg->ready)) { 657 if (!klmirqd) {
542 klmirqd = reg->thread; 658 /* unlock if thread does not exist or is not ready */
659 raw_spin_unlock_irqrestore(&reg->lock, *flags);
543 } 660 }
544 661
545 return klmirqd; 662 return klmirqd;
546} 663}
547#endif
548 664
665static void __unlock_klm_thread(nv_device_registry_t* reg, unsigned long* flags, nvklmtype_t type)
666{
667 /* workq and interrupts share a lock per GPU */
668 raw_spin_unlock_irqrestore(&reg->lock, *flags);
669}
670
671struct task_struct* get_and_lock_nvklmirqd_thread(u32 target_device_id, unsigned long* flags)
672{
673 nv_device_registry_t *reg;
674 BUG_ON(target_device_id >= NV_DEVICE_NUM);
675 reg = &NV_DEVICE_REG[target_device_id];
676 return __get_and_lock_klm_thread(reg, flags, INTERRUPT_TH);
677}
678
679void unlock_nvklmirqd_thread(u32 target_device_id, unsigned long* flags)
680{
681 nv_device_registry_t *reg;
682 BUG_ON(target_device_id >= NV_DEVICE_NUM);
683 reg = &NV_DEVICE_REG[target_device_id];
684 __unlock_klm_thread(reg, flags, INTERRUPT_TH);
685}
549 686
687struct task_struct* get_nvklmirqd_thread(u32 target_device_id)
688{
689 /* should this function be allowed? who will use klmirqd thread without thread safety? */
690 unsigned long flags;
691 struct task_struct *klmirqd;
692 klmirqd = get_and_lock_nvklmirqd_thread(target_device_id, &flags);
693 if(klmirqd)
694 unlock_nvklmirqd_thread(target_device_id, &flags);
695 return klmirqd;
696}
550 697
698#if defined(CONFIG_LITMUS_NVIDIA_WORKQ_ON) || defined(CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED)
699
700struct task_struct* get_and_lock_nvklmworkqd_thread(u32 target_device_id, unsigned long* flags)
701{
702 nv_device_registry_t *reg;
703 BUG_ON(target_device_id >= NV_DEVICE_NUM);
704 reg = &NV_DEVICE_REG[target_device_id];
705 return __get_and_lock_klm_thread(reg, flags, WORKQ_TH);
706}
707
708void unlock_nvklmworkqd_thread(u32 target_device_id, unsigned long* flags)
709{
710 nv_device_registry_t *reg;
711 BUG_ON(target_device_id >= NV_DEVICE_NUM);
712 reg = &NV_DEVICE_REG[target_device_id];
713 __unlock_klm_thread(reg, flags, WORKQ_TH);
714}
715
716
717struct task_struct* get_nvklmworkqd_thread(u32 target_device_id)
718{
719 /* should this function be allowed? who will use klmirqd thread without thread safety? */
720 unsigned long flags;
721 struct task_struct *klmirqd;
722 klmirqd = get_and_lock_nvklmworkqd_thread(target_device_id, &flags);
723 if(klmirqd)
724 unlock_nvklmworkqd_thread(target_device_id, &flags);
725 return klmirqd;
726}
727#endif // end WORKQs
551 728
552 729
553#ifdef CONFIG_LITMUS_SOFTIRQD
554static int gpu_klmirqd_increase_priority(struct task_struct *klmirqd, struct task_struct *hp) 730static int gpu_klmirqd_increase_priority(struct task_struct *klmirqd, struct task_struct *hp)
555{ 731{
556 int retval = 0; 732 int retval = 0;
@@ -582,7 +758,7 @@ static int gpu_klmirqd_decrease_priority(struct task_struct *klmirqd, struct tas
582 758
583 return retval; 759 return retval;
584} 760}
585#endif 761#endif // end CONFIG_LITMUS_SOFTIRQD
586 762
587 763
588 764
@@ -624,13 +800,27 @@ long enable_gpu_owner(struct task_struct *t)
624 struct task_struct, rt_param); 800 struct task_struct, rt_param);
625 801
626 if (hp == t) { 802 if (hp == t) {
803 int interrupt_success;
804
627 /* we're the new hp */ 805 /* we're the new hp */
628 TRACE_CUR("%s/%d (eff_prio = %s/%d) is new hp on GPU %d.\n", 806 TRACE_CUR("%s/%d (eff_prio = %s/%d) is new hp on GPU %d.\n",
629 t->comm, t->pid, 807 t->comm, t->pid,
630 effective_priority(t)->comm, effective_priority(t)->pid, 808 effective_priority(t)->comm, effective_priority(t)->pid,
631 gpu); 809 gpu);
632 810
633 retval = gpu_klmirqd_increase_priority(reg->thread, effective_priority(t)); 811 interrupt_success = gpu_klmirqd_increase_priority(reg->interrupt_thread, effective_priority(t));
812
813#ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED
814 {
815 int workq_success = gpu_klmirqd_increase_priority(reg->workq_thread, effective_priority(t));
816 if(interrupt_success != 1 || workq_success != 1)
817 retval = (interrupt_success != 1) ? interrupt_success : workq_success;
818 else
819 retval = 1;
820 }
821#else
822 retval = interrupt_success;
823#endif
634 } 824 }
635#endif 825#endif
636 826
@@ -682,6 +872,9 @@ long disable_gpu_owner(struct task_struct *t)
682 } 872 }
683 873
684 if (hp == t && new_hp != t) { 874 if (hp == t && new_hp != t) {
875 int interrupt_success;
876#ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED
877#endif
685 struct task_struct *to_inh = (new_hp) ? effective_priority(new_hp) : NULL; 878 struct task_struct *to_inh = (new_hp) ? effective_priority(new_hp) : NULL;
686 879
687 TRACE_CUR("%s/%d is no longer hp on GPU %d; new hp = %s/%d (eff_prio = %s/%d).\n", 880 TRACE_CUR("%s/%d is no longer hp on GPU %d; new hp = %s/%d (eff_prio = %s/%d).\n",
@@ -692,7 +885,19 @@ long disable_gpu_owner(struct task_struct *t)
692 (to_inh) ? to_inh->comm : "null", 885 (to_inh) ? to_inh->comm : "null",
693 (to_inh) ? to_inh->pid : 0); 886 (to_inh) ? to_inh->pid : 0);
694 887
695 retval = gpu_klmirqd_decrease_priority(reg->thread, to_inh); 888 interrupt_success = gpu_klmirqd_decrease_priority(reg->interrupt_thread, to_inh);
889
890#ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED
891 {
892 int workq_success = gpu_klmirqd_decrease_priority(reg->workq_thread, to_inh);
893 if(interrupt_success != 1 || workq_success != 1)
894 retval = (interrupt_success != 1) ? interrupt_success : workq_success;
895 else
896 retval = 1;
897 }
898#else
899 retval = interrupt_success;
900#endif
696 } 901 }
697#else 902#else
698 binheap_delete(&tsk_rt(t)->gpu_owner_node, &reg->owners); 903 binheap_delete(&tsk_rt(t)->gpu_owner_node, &reg->owners);
@@ -758,13 +963,28 @@ int gpu_owner_increase_priority(struct task_struct *t)
758 963
759 /* check if the eff. prio. of hp has changed */ 964 /* check if the eff. prio. of hp has changed */
760 if (increase_klmirqd || (effective_priority(hp) != hp_eff)) { 965 if (increase_klmirqd || (effective_priority(hp) != hp_eff)) {
966 int interrupt_success;
967
761 hp_eff = effective_priority(hp); 968 hp_eff = effective_priority(hp);
762 TRACE_CUR("%s/%d (eff_prio = %s/%d) is new hp on GPU %d.\n", 969 TRACE_CUR("%s/%d (eff_prio = %s/%d) is new hp on GPU %d.\n",
763 t->comm, t->pid, 970 t->comm, t->pid,
764 hp_eff->comm, hp_eff->pid, 971 hp_eff->comm, hp_eff->pid,
765 gpu); 972 gpu);
766 973
767 retval = gpu_klmirqd_increase_priority(reg->thread, hp_eff); 974 interrupt_success = gpu_klmirqd_increase_priority(reg->interrupt_thread, hp_eff);
975
976#ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED
977 {
978 int workq_success = gpu_klmirqd_increase_priority(reg->workq_thread, hp_eff);
979 if(interrupt_success != 1 || workq_success != 1)
980 retval = (interrupt_success != 1) ? interrupt_success : workq_success;
981 else
982 retval = 1;
983 }
984#else
985 retval = interrupt_success;
986#endif
987
768 } 988 }
769#endif 989#endif
770 990
@@ -810,9 +1030,23 @@ int gpu_owner_decrease_priority(struct task_struct *t)
810 struct task_struct, rt_param); 1030 struct task_struct, rt_param);
811 /* if the new_hp is still t, or if the effective priority has changed */ 1031 /* if the new_hp is still t, or if the effective priority has changed */
812 if ((new_hp == t) || (effective_priority(new_hp) != hp_eff)) { 1032 if ((new_hp == t) || (effective_priority(new_hp) != hp_eff)) {
1033 int interrupt_success;
1034
813 hp_eff = effective_priority(new_hp); 1035 hp_eff = effective_priority(new_hp);
814 TRACE_CUR("%s/%d is no longer hp on GPU %d.\n", t->comm, t->pid, gpu); 1036 TRACE_CUR("%s/%d is no longer hp on GPU %d.\n", t->comm, t->pid, gpu);
815 retval = gpu_klmirqd_decrease_priority(reg->thread, hp_eff); 1037 interrupt_success = gpu_klmirqd_decrease_priority(reg->interrupt_thread, hp_eff);
1038
1039#ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED
1040 {
1041 int workq_success = gpu_klmirqd_decrease_priority(reg->workq_thread, hp_eff);
1042 if(interrupt_success != 1 || workq_success != 1)
1043 retval = (interrupt_success != 1) ? interrupt_success : workq_success;
1044 else
1045 retval = 1;
1046 }
1047#else
1048 retval = interrupt_success;
1049#endif
816 } 1050 }
817 } 1051 }
818#endif 1052#endif
diff --git a/litmus/sched_cedf.c b/litmus/sched_cedf.c
index df931de13e16..e2737bafa9b8 100644
--- a/litmus/sched_cedf.c
+++ b/litmus/sched_cedf.c
@@ -1701,10 +1701,6 @@ static void cleanup_cedf(void)
1701{ 1701{
1702 int i; 1702 int i;
1703 1703
1704#ifdef CONFIG_LITMUS_NVIDIA
1705 shutdown_nvidia_info();
1706#endif
1707
1708 if (clusters_allocated) { 1704 if (clusters_allocated) {
1709 for (i = 0; i < num_clusters; i++) { 1705 for (i = 0; i < num_clusters; i++) {
1710 kfree(cedf[i].cpus); 1706 kfree(cedf[i].cpus);
diff --git a/litmus/sched_task_trace.c b/litmus/sched_task_trace.c
index 2583ee2bb437..d1df0127cfa4 100644
--- a/litmus/sched_task_trace.c
+++ b/litmus/sched_task_trace.c
@@ -349,13 +349,15 @@ EXPORT_SYMBOL(do_sched_trace_tasklet_end);
349 349
350 350
351feather_callback void do_sched_trace_work_release(unsigned long id, 351feather_callback void do_sched_trace_work_release(unsigned long id,
352 unsigned long _owner) 352 unsigned long _owner,
353 unsigned long _device)
353{ 354{
354 struct task_struct *t = (struct task_struct*) _owner; 355 struct task_struct *t = (struct task_struct*) _owner;
355 struct st_event_record *rec = get_record(ST_WORK_RELEASE, t); 356 struct st_event_record *rec = get_record(ST_WORK_RELEASE, t);
356 357
357 if (rec) { 358 if (rec) {
358 rec->data.work_release.when = now(); 359 rec->data.work_release.when = now();
360 rec->data.work_release.device = _device;
359 put_record(rec); 361 put_record(rec);
360 } 362 }
361} 363}