diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2013-02-06 20:30:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2013-02-06 20:30:13 -0500 |
commit | 9aacc135e0abe206b7d778af937babaaa7f3c199 (patch) | |
tree | eaab1b47ff8b90cc5f733b6a345878fd1ba03f27 /litmus/nvidia_info.c | |
parent | dd4c9d77928d67e3afa916b6f1a14e20f02ee67f (diff) |
re-enable klmirqd for workqueues, and grace reboot
Diffstat (limited to 'litmus/nvidia_info.c')
-rw-r--r-- | litmus/nvidia_info.c | 306 |
1 files changed, 270 insertions, 36 deletions
diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c index 29031f741fcf..7f1dc18624e1 100644 --- a/litmus/nvidia_info.c +++ b/litmus/nvidia_info.c | |||
@@ -141,7 +141,7 @@ typedef struct litmus_nv_linux_state_s { | |||
141 | } litmus_nv_linux_state_t; | 141 | } litmus_nv_linux_state_t; |
142 | 142 | ||
143 | 143 | ||
144 | 144 | #ifdef CONFIG_SCHED_DEBUG_TRACE | |
145 | static void __attribute__((unused)) | 145 | static void __attribute__((unused)) |
146 | dump_nvidia_info(const struct tasklet_struct *t) | 146 | dump_nvidia_info(const struct tasklet_struct *t) |
147 | { | 147 | { |
@@ -192,7 +192,6 @@ dump_nvidia_info(const struct tasklet_struct *t) | |||
192 | int ns_offset_raw = (void*)(&(linuxstate->device_num)) - (void*)(&(linuxstate->nv_state)); | 192 | int ns_offset_raw = (void*)(&(linuxstate->device_num)) - (void*)(&(linuxstate->nv_state)); |
193 | int ns_offset_desired = (void*)(&(linuxstate->device_num)) - (void*)(nvstate); | 193 | int ns_offset_desired = (void*)(&(linuxstate->device_num)) - (void*)(nvstate); |
194 | 194 | ||
195 | |||
196 | TRACE("LINUX NV State:\n" | 195 | TRACE("LINUX NV State:\n" |
197 | "\tlinux nv state ptr: %p\n" | 196 | "\tlinux nv state ptr: %p\n" |
198 | "\taddress of tasklet: %p\n" | 197 | "\taddress of tasklet: %p\n" |
@@ -226,14 +225,39 @@ dump_nvidia_info(const struct tasklet_struct *t) | |||
226 | TRACE("INVALID LINUXNVSTATE?????\n"); | 225 | TRACE("INVALID LINUXNVSTATE?????\n"); |
227 | } | 226 | } |
228 | } | 227 | } |
229 | 228 | #endif | |
230 | 229 | ||
231 | 230 | ||
232 | static struct module* nvidia_mod = NULL; | 231 | static struct module* nvidia_mod = NULL; |
233 | 232 | ||
234 | static int init_nv_device_reg(void); | 233 | static int init_nv_device_reg(void); |
235 | static int shutdown_nv_device_reg(void); | 234 | static int shutdown_nv_device_reg(void); |
235 | void shutdown_nvidia_info(void); | ||
236 | 236 | ||
237 | static int nvidia_going_module_notify(struct notifier_block *self, | ||
238 | unsigned long val, void *data) | ||
239 | { | ||
240 | struct module *mod = data; | ||
241 | |||
242 | if (nvidia_mod && (mod == nvidia_mod)) { | ||
243 | switch (val) { | ||
244 | case MODULE_STATE_GOING: | ||
245 | /* just set our mod reference to null to avoid crash */ | ||
246 | nvidia_mod = NULL; | ||
247 | mb(); | ||
248 | break; | ||
249 | default: | ||
250 | break; | ||
251 | } | ||
252 | } | ||
253 | |||
254 | return 0; | ||
255 | } | ||
256 | |||
257 | static struct notifier_block nvidia_going = { | ||
258 | .notifier_call = nvidia_going_module_notify, | ||
259 | .priority = 1, | ||
260 | }; | ||
237 | 261 | ||
238 | int init_nvidia_info(void) | 262 | int init_nvidia_info(void) |
239 | { | 263 | { |
@@ -246,6 +270,9 @@ int init_nvidia_info(void) | |||
246 | (void*)(nvidia_mod->module_core), | 270 | (void*)(nvidia_mod->module_core), |
247 | (void*)(nvidia_mod->module_core) + nvidia_mod->core_size); | 271 | (void*)(nvidia_mod->module_core) + nvidia_mod->core_size); |
248 | init_nv_device_reg(); | 272 | init_nv_device_reg(); |
273 | |||
274 | register_module_notifier(&nvidia_going); | ||
275 | |||
249 | return(0); | 276 | return(0); |
250 | } | 277 | } |
251 | else | 278 | else |
@@ -262,6 +289,7 @@ void shutdown_nvidia_info(void) | |||
262 | nvidia_mod = NULL; | 289 | nvidia_mod = NULL; |
263 | mb(); | 290 | mb(); |
264 | 291 | ||
292 | unregister_module_notifier(&nvidia_going); | ||
265 | shutdown_nv_device_reg(); | 293 | shutdown_nv_device_reg(); |
266 | } | 294 | } |
267 | 295 | ||
@@ -314,9 +342,15 @@ typedef struct { | |||
314 | struct binheap owners; | 342 | struct binheap owners; |
315 | 343 | ||
316 | #ifdef CONFIG_LITMUS_SOFTIRQD | 344 | #ifdef CONFIG_LITMUS_SOFTIRQD |
317 | klmirqd_callback_t callback; | 345 | klmirqd_callback_t interrupt_callback; |
318 | struct task_struct* thread; | 346 | struct task_struct* interrupt_thread; |
319 | int ready:1; /* todo: make threads check for the ready flag */ | 347 | int interrupt_ready:1; /* todo: make threads check for the ready flag */ |
348 | |||
349 | #ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED | ||
350 | klmirqd_callback_t workq_callback; | ||
351 | struct task_struct* workq_thread; | ||
352 | int workq_ready:1; | ||
353 | #endif | ||
320 | #endif | 354 | #endif |
321 | 355 | ||
322 | #ifdef CONFIG_LITMUS_NV_KLMIRQD_DEBUG | 356 | #ifdef CONFIG_LITMUS_NV_KLMIRQD_DEBUG |
@@ -330,22 +364,40 @@ static nv_device_registry_t NV_DEVICE_REG[NV_DEVICE_NUM]; | |||
330 | 364 | ||
331 | 365 | ||
332 | #ifdef CONFIG_LITMUS_SOFTIRQD | 366 | #ifdef CONFIG_LITMUS_SOFTIRQD |
333 | static int nvidia_klmirqd_cb(void *arg) | 367 | static int nvidia_launch_interrupt_cb(void *arg) |
368 | { | ||
369 | unsigned long flags; | ||
370 | int reg_device_id = (int)(long long)(arg); | ||
371 | nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; | ||
372 | |||
373 | TRACE("nvklmirqd callback for GPU %d\n", reg_device_id); | ||
374 | |||
375 | raw_spin_lock_irqsave(®->lock, flags); | ||
376 | reg->interrupt_thread = current; | ||
377 | reg->interrupt_ready = 1; | ||
378 | raw_spin_unlock_irqrestore(®->lock, flags); | ||
379 | |||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | #ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED | ||
384 | static int nvidia_launch_workq_cb(void *arg) | ||
334 | { | 385 | { |
335 | unsigned long flags; | 386 | unsigned long flags; |
336 | int reg_device_id = (int)(long long)(arg); | 387 | int reg_device_id = (int)(long long)(arg); |
337 | nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; | 388 | nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; |
338 | 389 | ||
339 | TRACE("nv klmirqd callback for GPU %d\n", reg_device_id); | 390 | TRACE("nvklmworkerd callback for GPU %d\n", reg_device_id); |
340 | 391 | ||
341 | raw_spin_lock_irqsave(®->lock, flags); | 392 | raw_spin_lock_irqsave(®->lock, flags); |
342 | reg->thread = current; | 393 | reg->workq_thread = current; |
343 | reg->ready = 1; | 394 | reg->workq_ready = 1; |
344 | raw_spin_unlock_irqrestore(®->lock, flags); | 395 | raw_spin_unlock_irqrestore(®->lock, flags); |
345 | 396 | ||
346 | return 0; | 397 | return 0; |
347 | } | 398 | } |
348 | #endif | 399 | #endif |
400 | #endif | ||
349 | 401 | ||
350 | #ifdef CONFIG_LITMUS_NV_KLMIRQD_DEBUG | 402 | #ifdef CONFIG_LITMUS_NV_KLMIRQD_DEBUG |
351 | struct nv_klmirqd_dbg_timer_struct | 403 | struct nv_klmirqd_dbg_timer_struct |
@@ -391,9 +443,9 @@ static enum hrtimer_restart nvklmirqd_timer_func(struct hrtimer *timer) | |||
391 | 443 | ||
392 | reg = &NV_DEVICE_REG[gpu]; | 444 | reg = &NV_DEVICE_REG[gpu]; |
393 | 445 | ||
394 | if (reg->thread && reg->ready) { | 446 | if (reg->interrupt_thread && reg->interrupt_ready) { |
395 | TRACE("Adding a tasklet for GPU %d\n", gpu); | 447 | TRACE("Adding a tasklet for GPU %d\n", gpu); |
396 | litmus_tasklet_schedule(®->nv_klmirqd_dbg_tasklet, reg->thread); | 448 | litmus_tasklet_schedule(®->nv_klmirqd_dbg_tasklet, reg->interrupt_thread); |
397 | } | 449 | } |
398 | else { | 450 | else { |
399 | TRACE("nv klmirqd is not ready!\n"); | 451 | TRACE("nv klmirqd is not ready!\n"); |
@@ -448,15 +500,25 @@ static int init_nv_device_reg(void) | |||
448 | { | 500 | { |
449 | int default_cpu = litmus->map_gpu_to_cpu(i); | 501 | int default_cpu = litmus->map_gpu_to_cpu(i); |
450 | 502 | ||
503 | /* spawn the interrupt thread */ | ||
451 | snprintf(name, MAX_KLMIRQD_NAME_LEN, "nvklmirqd%d", i); | 504 | snprintf(name, MAX_KLMIRQD_NAME_LEN, "nvklmirqd%d", i); |
452 | 505 | NV_DEVICE_REG[i].interrupt_callback.func = nvidia_launch_interrupt_cb; | |
453 | NV_DEVICE_REG[i].callback.func = nvidia_klmirqd_cb; | 506 | NV_DEVICE_REG[i].interrupt_callback.arg = (void*)(long long)(i); |
454 | NV_DEVICE_REG[i].callback.arg = (void*)(long long)(i); | ||
455 | mb(); | 507 | mb(); |
508 | if(launch_klmirqd_thread(name, default_cpu, &NV_DEVICE_REG[i].interrupt_callback) != 0) { | ||
509 | TRACE("Failed to create nvklmirqd thread for GPU %d\n", i); | ||
510 | } | ||
456 | 511 | ||
457 | if(launch_klmirqd_thread(name, default_cpu, &NV_DEVICE_REG[i].callback) != 0) { | 512 | #ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED |
458 | TRACE("Failed to create klmirqd thread for GPU %d\n", i); | 513 | /* spawn the workqueue thread */ |
514 | snprintf(name, MAX_KLMIRQD_NAME_LEN, "nvklmworker%d", i); | ||
515 | NV_DEVICE_REG[i].workq_callback.func = nvidia_launch_workq_cb; | ||
516 | NV_DEVICE_REG[i].workq_callback.arg = (void*)(long long)(i); | ||
517 | mb(); | ||
518 | if(launch_klmirqd_thread(name, default_cpu, &NV_DEVICE_REG[i].workq_callback) != 0) { | ||
519 | TRACE("Failed to create nvklmworkqd thread for GPU %d\n", i); | ||
459 | } | 520 | } |
521 | #endif | ||
460 | } | 522 | } |
461 | #endif | 523 | #endif |
462 | } | 524 | } |
@@ -479,6 +541,7 @@ static int shutdown_nv_device_reg(void) | |||
479 | 541 | ||
480 | #ifdef CONFIG_LITMUS_SOFTIRQD | 542 | #ifdef CONFIG_LITMUS_SOFTIRQD |
481 | { | 543 | { |
544 | unsigned long flags; | ||
482 | int i; | 545 | int i; |
483 | nv_device_registry_t *reg; | 546 | nv_device_registry_t *reg; |
484 | 547 | ||
@@ -488,12 +551,36 @@ static int shutdown_nv_device_reg(void) | |||
488 | 551 | ||
489 | reg = &NV_DEVICE_REG[i]; | 552 | reg = &NV_DEVICE_REG[i]; |
490 | 553 | ||
491 | if (reg->thread && reg->ready) { | 554 | if ((reg->interrupt_thread && reg->interrupt_ready) |
492 | kill_klmirqd_thread(reg->thread); | 555 | #ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED |
493 | 556 | || (reg->workq_thread && reg->workq_ready) | |
494 | /* assume that all goes according to plan... */ | 557 | #endif |
495 | reg->thread = NULL; | 558 | ) |
496 | reg->ready = 0; | 559 | { |
560 | raw_spin_lock_irqsave(®->lock, flags); | ||
561 | |||
562 | if (reg->interrupt_thread && reg->interrupt_ready) { | ||
563 | struct task_struct* th = reg->interrupt_thread; | ||
564 | reg->interrupt_thread = NULL; | ||
565 | mb(); | ||
566 | reg->interrupt_ready = 0; | ||
567 | mb(); | ||
568 | |||
569 | kill_klmirqd_thread(th); | ||
570 | } | ||
571 | |||
572 | #ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED | ||
573 | if (reg->workq_thread && reg->workq_ready) { | ||
574 | struct task_struct* th = reg->workq_thread; | ||
575 | reg->workq_thread = NULL; | ||
576 | mb(); | ||
577 | reg->workq_ready = 0; | ||
578 | mb(); | ||
579 | |||
580 | kill_klmirqd_thread(th); | ||
581 | } | ||
582 | #endif | ||
583 | raw_spin_unlock_irqrestore(®->lock, flags); | ||
497 | } | 584 | } |
498 | 585 | ||
499 | while (!binheap_empty(®->owners)) { | 586 | while (!binheap_empty(®->owners)) { |
@@ -528,29 +615,118 @@ struct task_struct* get_nv_max_device_owner(u32 target_device_id) | |||
528 | return(owner); | 615 | return(owner); |
529 | } | 616 | } |
530 | 617 | ||
618 | |||
531 | #ifdef CONFIG_LITMUS_SOFTIRQD | 619 | #ifdef CONFIG_LITMUS_SOFTIRQD |
532 | struct task_struct* get_nv_klmirqd_thread(u32 target_device_id) | 620 | |
621 | typedef enum { | ||
622 | INTERRUPT_TH, | ||
623 | WORKQ_TH | ||
624 | } nvklmtype_t; | ||
625 | |||
626 | static struct task_struct* __get_klm_thread(nv_device_registry_t* reg, nvklmtype_t type) | ||
533 | { | 627 | { |
534 | struct task_struct *klmirqd = NULL; | 628 | struct task_struct *klmirqd = NULL; |
535 | nv_device_registry_t *reg; | 629 | |
630 | switch(type) | ||
631 | { | ||
632 | case INTERRUPT_TH: | ||
633 | #ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON | ||
634 | case WORKQ_TH: | ||
635 | #endif | ||
636 | if(likely(reg->interrupt_ready)) | ||
637 | klmirqd = reg->interrupt_thread; | ||
638 | break; | ||
639 | #ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED | ||
640 | case WORKQ_TH: | ||
641 | if(likely(reg->workq_ready)) | ||
642 | klmirqd = reg->workq_thread; | ||
643 | break; | ||
644 | #endif | ||
645 | } | ||
536 | 646 | ||
537 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | 647 | return klmirqd; |
648 | } | ||
538 | 649 | ||
539 | reg = &NV_DEVICE_REG[target_device_id]; | 650 | static struct task_struct* __get_and_lock_klm_thread(nv_device_registry_t* reg, unsigned long* flags, nvklmtype_t type) |
651 | { | ||
652 | struct task_struct *klmirqd; | ||
653 | |||
654 | raw_spin_lock_irqsave(®->lock, *flags); | ||
655 | klmirqd = __get_klm_thread(reg, type); | ||
540 | 656 | ||
541 | if(likely(reg->ready)) { | 657 | if (!klmirqd) { |
542 | klmirqd = reg->thread; | 658 | /* unlock if thread does not exist or is not ready */ |
659 | raw_spin_unlock_irqrestore(®->lock, *flags); | ||
543 | } | 660 | } |
544 | 661 | ||
545 | return klmirqd; | 662 | return klmirqd; |
546 | } | 663 | } |
547 | #endif | ||
548 | 664 | ||
665 | static void __unlock_klm_thread(nv_device_registry_t* reg, unsigned long* flags, nvklmtype_t type) | ||
666 | { | ||
667 | /* workq and interrupts share a lock per GPU */ | ||
668 | raw_spin_unlock_irqrestore(®->lock, *flags); | ||
669 | } | ||
670 | |||
671 | struct task_struct* get_and_lock_nvklmirqd_thread(u32 target_device_id, unsigned long* flags) | ||
672 | { | ||
673 | nv_device_registry_t *reg; | ||
674 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
675 | reg = &NV_DEVICE_REG[target_device_id]; | ||
676 | return __get_and_lock_klm_thread(reg, flags, INTERRUPT_TH); | ||
677 | } | ||
678 | |||
679 | void unlock_nvklmirqd_thread(u32 target_device_id, unsigned long* flags) | ||
680 | { | ||
681 | nv_device_registry_t *reg; | ||
682 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
683 | reg = &NV_DEVICE_REG[target_device_id]; | ||
684 | __unlock_klm_thread(reg, flags, INTERRUPT_TH); | ||
685 | } | ||
549 | 686 | ||
687 | struct task_struct* get_nvklmirqd_thread(u32 target_device_id) | ||
688 | { | ||
689 | /* should this function be allowed? who will use klmirqd thread without thread safety? */ | ||
690 | unsigned long flags; | ||
691 | struct task_struct *klmirqd; | ||
692 | klmirqd = get_and_lock_nvklmirqd_thread(target_device_id, &flags); | ||
693 | if(klmirqd) | ||
694 | unlock_nvklmirqd_thread(target_device_id, &flags); | ||
695 | return klmirqd; | ||
696 | } | ||
550 | 697 | ||
698 | #if defined(CONFIG_LITMUS_NVIDIA_WORKQ_ON) || defined(CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED) | ||
699 | |||
700 | struct task_struct* get_and_lock_nvklmworkqd_thread(u32 target_device_id, unsigned long* flags) | ||
701 | { | ||
702 | nv_device_registry_t *reg; | ||
703 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
704 | reg = &NV_DEVICE_REG[target_device_id]; | ||
705 | return __get_and_lock_klm_thread(reg, flags, WORKQ_TH); | ||
706 | } | ||
707 | |||
708 | void unlock_nvklmworkqd_thread(u32 target_device_id, unsigned long* flags) | ||
709 | { | ||
710 | nv_device_registry_t *reg; | ||
711 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
712 | reg = &NV_DEVICE_REG[target_device_id]; | ||
713 | __unlock_klm_thread(reg, flags, WORKQ_TH); | ||
714 | } | ||
715 | |||
716 | |||
717 | struct task_struct* get_nvklmworkqd_thread(u32 target_device_id) | ||
718 | { | ||
719 | /* should this function be allowed? who will use klmirqd thread without thread safety? */ | ||
720 | unsigned long flags; | ||
721 | struct task_struct *klmirqd; | ||
722 | klmirqd = get_and_lock_nvklmworkqd_thread(target_device_id, &flags); | ||
723 | if(klmirqd) | ||
724 | unlock_nvklmworkqd_thread(target_device_id, &flags); | ||
725 | return klmirqd; | ||
726 | } | ||
727 | #endif // end WORKQs | ||
551 | 728 | ||
552 | 729 | ||
553 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
554 | static int gpu_klmirqd_increase_priority(struct task_struct *klmirqd, struct task_struct *hp) | 730 | static int gpu_klmirqd_increase_priority(struct task_struct *klmirqd, struct task_struct *hp) |
555 | { | 731 | { |
556 | int retval = 0; | 732 | int retval = 0; |
@@ -582,7 +758,7 @@ static int gpu_klmirqd_decrease_priority(struct task_struct *klmirqd, struct tas | |||
582 | 758 | ||
583 | return retval; | 759 | return retval; |
584 | } | 760 | } |
585 | #endif | 761 | #endif // end CONFIG_LITMUS_SOFTIRQD |
586 | 762 | ||
587 | 763 | ||
588 | 764 | ||
@@ -624,13 +800,27 @@ long enable_gpu_owner(struct task_struct *t) | |||
624 | struct task_struct, rt_param); | 800 | struct task_struct, rt_param); |
625 | 801 | ||
626 | if (hp == t) { | 802 | if (hp == t) { |
803 | int interrupt_success; | ||
804 | |||
627 | /* we're the new hp */ | 805 | /* we're the new hp */ |
628 | TRACE_CUR("%s/%d (eff_prio = %s/%d) is new hp on GPU %d.\n", | 806 | TRACE_CUR("%s/%d (eff_prio = %s/%d) is new hp on GPU %d.\n", |
629 | t->comm, t->pid, | 807 | t->comm, t->pid, |
630 | effective_priority(t)->comm, effective_priority(t)->pid, | 808 | effective_priority(t)->comm, effective_priority(t)->pid, |
631 | gpu); | 809 | gpu); |
632 | 810 | ||
633 | retval = gpu_klmirqd_increase_priority(reg->thread, effective_priority(t)); | 811 | interrupt_success = gpu_klmirqd_increase_priority(reg->interrupt_thread, effective_priority(t)); |
812 | |||
813 | #ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED | ||
814 | { | ||
815 | int workq_success = gpu_klmirqd_increase_priority(reg->workq_thread, effective_priority(t)); | ||
816 | if(interrupt_success != 1 || workq_success != 1) | ||
817 | retval = (interrupt_success != 1) ? interrupt_success : workq_success; | ||
818 | else | ||
819 | retval = 1; | ||
820 | } | ||
821 | #else | ||
822 | retval = interrupt_success; | ||
823 | #endif | ||
634 | } | 824 | } |
635 | #endif | 825 | #endif |
636 | 826 | ||
@@ -682,6 +872,9 @@ long disable_gpu_owner(struct task_struct *t) | |||
682 | } | 872 | } |
683 | 873 | ||
684 | if (hp == t && new_hp != t) { | 874 | if (hp == t && new_hp != t) { |
875 | int interrupt_success; | ||
876 | #ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED | ||
877 | #endif | ||
685 | struct task_struct *to_inh = (new_hp) ? effective_priority(new_hp) : NULL; | 878 | struct task_struct *to_inh = (new_hp) ? effective_priority(new_hp) : NULL; |
686 | 879 | ||
687 | TRACE_CUR("%s/%d is no longer hp on GPU %d; new hp = %s/%d (eff_prio = %s/%d).\n", | 880 | TRACE_CUR("%s/%d is no longer hp on GPU %d; new hp = %s/%d (eff_prio = %s/%d).\n", |
@@ -692,7 +885,19 @@ long disable_gpu_owner(struct task_struct *t) | |||
692 | (to_inh) ? to_inh->comm : "null", | 885 | (to_inh) ? to_inh->comm : "null", |
693 | (to_inh) ? to_inh->pid : 0); | 886 | (to_inh) ? to_inh->pid : 0); |
694 | 887 | ||
695 | retval = gpu_klmirqd_decrease_priority(reg->thread, to_inh); | 888 | interrupt_success = gpu_klmirqd_decrease_priority(reg->interrupt_thread, to_inh); |
889 | |||
890 | #ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED | ||
891 | { | ||
892 | int workq_success = gpu_klmirqd_decrease_priority(reg->workq_thread, to_inh); | ||
893 | if(interrupt_success != 1 || workq_success != 1) | ||
894 | retval = (interrupt_success != 1) ? interrupt_success : workq_success; | ||
895 | else | ||
896 | retval = 1; | ||
897 | } | ||
898 | #else | ||
899 | retval = interrupt_success; | ||
900 | #endif | ||
696 | } | 901 | } |
697 | #else | 902 | #else |
698 | binheap_delete(&tsk_rt(t)->gpu_owner_node, ®->owners); | 903 | binheap_delete(&tsk_rt(t)->gpu_owner_node, ®->owners); |
@@ -758,13 +963,28 @@ int gpu_owner_increase_priority(struct task_struct *t) | |||
758 | 963 | ||
759 | /* check if the eff. prio. of hp has changed */ | 964 | /* check if the eff. prio. of hp has changed */ |
760 | if (increase_klmirqd || (effective_priority(hp) != hp_eff)) { | 965 | if (increase_klmirqd || (effective_priority(hp) != hp_eff)) { |
966 | int interrupt_success; | ||
967 | |||
761 | hp_eff = effective_priority(hp); | 968 | hp_eff = effective_priority(hp); |
762 | TRACE_CUR("%s/%d (eff_prio = %s/%d) is new hp on GPU %d.\n", | 969 | TRACE_CUR("%s/%d (eff_prio = %s/%d) is new hp on GPU %d.\n", |
763 | t->comm, t->pid, | 970 | t->comm, t->pid, |
764 | hp_eff->comm, hp_eff->pid, | 971 | hp_eff->comm, hp_eff->pid, |
765 | gpu); | 972 | gpu); |
766 | 973 | ||
767 | retval = gpu_klmirqd_increase_priority(reg->thread, hp_eff); | 974 | interrupt_success = gpu_klmirqd_increase_priority(reg->interrupt_thread, hp_eff); |
975 | |||
976 | #ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED | ||
977 | { | ||
978 | int workq_success = gpu_klmirqd_increase_priority(reg->workq_thread, hp_eff); | ||
979 | if(interrupt_success != 1 || workq_success != 1) | ||
980 | retval = (interrupt_success != 1) ? interrupt_success : workq_success; | ||
981 | else | ||
982 | retval = 1; | ||
983 | } | ||
984 | #else | ||
985 | retval = interrupt_success; | ||
986 | #endif | ||
987 | |||
768 | } | 988 | } |
769 | #endif | 989 | #endif |
770 | 990 | ||
@@ -810,9 +1030,23 @@ int gpu_owner_decrease_priority(struct task_struct *t) | |||
810 | struct task_struct, rt_param); | 1030 | struct task_struct, rt_param); |
811 | /* if the new_hp is still t, or if the effective priority has changed */ | 1031 | /* if the new_hp is still t, or if the effective priority has changed */ |
812 | if ((new_hp == t) || (effective_priority(new_hp) != hp_eff)) { | 1032 | if ((new_hp == t) || (effective_priority(new_hp) != hp_eff)) { |
1033 | int interrupt_success; | ||
1034 | |||
813 | hp_eff = effective_priority(new_hp); | 1035 | hp_eff = effective_priority(new_hp); |
814 | TRACE_CUR("%s/%d is no longer hp on GPU %d.\n", t->comm, t->pid, gpu); | 1036 | TRACE_CUR("%s/%d is no longer hp on GPU %d.\n", t->comm, t->pid, gpu); |
815 | retval = gpu_klmirqd_decrease_priority(reg->thread, hp_eff); | 1037 | interrupt_success = gpu_klmirqd_decrease_priority(reg->interrupt_thread, hp_eff); |
1038 | |||
1039 | #ifdef CONFIG_LITMUS_NVIDIA_WORKQ_ON_DEDICATED | ||
1040 | { | ||
1041 | int workq_success = gpu_klmirqd_decrease_priority(reg->workq_thread, hp_eff); | ||
1042 | if(interrupt_success != 1 || workq_success != 1) | ||
1043 | retval = (interrupt_success != 1) ? interrupt_success : workq_success; | ||
1044 | else | ||
1045 | retval = 1; | ||
1046 | } | ||
1047 | #else | ||
1048 | retval = interrupt_success; | ||
1049 | #endif | ||
816 | } | 1050 | } |
817 | } | 1051 | } |
818 | #endif | 1052 | #endif |