summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2017-11-22 08:57:11 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-22 20:18:56 -0500
commitba2e59dc41f593bb011e0ec58c969337a35f4cf1 (patch)
tree12a4fcec144516a0e903e5495898e9072d7db27c /drivers/gpu/nvgpu/gk20a/channel_gk20a.c
parentb498f4c5c0a0a5f1a59a9210a1ee4d1d8ae98be7 (diff)
gpu: nvgpu: use submit callback only in linux code
Move the implementation for channel job update callbacks that is based on Linux specific work_struct usage to Linux-specific code. This requires a bit of extra work for allocating OS-specific priv data for channels which is also done in this patch. The priv data will be used more when more OS-specific features are moved. Jira NVGPU-259 Change-Id: I24bc0148a827f375b56a1c96044685affc2d1e8c Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1589321 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c57
1 files changed, 9 insertions, 48 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 4be232f1..e01d6cdb 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -444,6 +444,9 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
444 444
445 trace_gk20a_free_channel(ch->chid); 445 trace_gk20a_free_channel(ch->chid);
446 446
447 if (g->os_channel.close)
448 g->os_channel.close(ch);
449
447 /* 450 /*
448 * Disable channel/TSG and unbind here. This should not be executed if 451 * Disable channel/TSG and unbind here. This should not be executed if
449 * HW access is not available during shutdown/removal path as it will 452 * HW access is not available during shutdown/removal path as it will
@@ -561,12 +564,6 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force)
561 */ 564 */
562 nvgpu_vm_put(ch_vm); 565 nvgpu_vm_put(ch_vm);
563 566
564 nvgpu_spinlock_acquire(&ch->update_fn_lock);
565 ch->update_fn = NULL;
566 ch->update_fn_data = NULL;
567 nvgpu_spinlock_release(&ch->update_fn_lock);
568 cancel_work_sync(&ch->update_fn_work);
569
570 /* make sure we don't have deferred interrupts pending that 567 /* make sure we don't have deferred interrupts pending that
571 * could still touch the channel */ 568 * could still touch the channel */
572 nvgpu_wait_for_deferred_interrupts(g); 569 nvgpu_wait_for_deferred_interrupts(g);
@@ -756,40 +753,6 @@ void __gk20a_channel_kill(struct channel_gk20a *ch)
756 gk20a_free_channel(ch, true); 753 gk20a_free_channel(ch, true);
757} 754}
758 755
759static void gk20a_channel_update_runcb_fn(struct work_struct *work)
760{
761 struct channel_gk20a *ch =
762 container_of(work, struct channel_gk20a, update_fn_work);
763 void (*update_fn)(struct channel_gk20a *, void *);
764 void *update_fn_data;
765
766 nvgpu_spinlock_acquire(&ch->update_fn_lock);
767 update_fn = ch->update_fn;
768 update_fn_data = ch->update_fn_data;
769 nvgpu_spinlock_release(&ch->update_fn_lock);
770
771 if (update_fn)
772 update_fn(ch, update_fn_data);
773}
774
775struct channel_gk20a *gk20a_open_new_channel_with_cb(struct gk20a *g,
776 void (*update_fn)(struct channel_gk20a *, void *),
777 void *update_fn_data,
778 int runlist_id,
779 bool is_privileged_channel)
780{
781 struct channel_gk20a *ch = gk20a_open_new_channel(g, runlist_id, is_privileged_channel);
782
783 if (ch) {
784 nvgpu_spinlock_acquire(&ch->update_fn_lock);
785 ch->update_fn = update_fn;
786 ch->update_fn_data = update_fn_data;
787 nvgpu_spinlock_release(&ch->update_fn_lock);
788 }
789
790 return ch;
791}
792
793struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g, 756struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
794 s32 runlist_id, 757 s32 runlist_id,
795 bool is_privileged_channel) 758 bool is_privileged_channel)
@@ -872,10 +835,8 @@ struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g,
872 nvgpu_cond_init(&ch->notifier_wq); 835 nvgpu_cond_init(&ch->notifier_wq);
873 nvgpu_cond_init(&ch->semaphore_wq); 836 nvgpu_cond_init(&ch->semaphore_wq);
874 837
875 ch->update_fn = NULL; 838 if (g->os_channel.open)
876 ch->update_fn_data = NULL; 839 g->os_channel.open(ch);
877 nvgpu_spinlock_init(&ch->update_fn_lock);
878 INIT_WORK(&ch->update_fn_work, gk20a_channel_update_runcb_fn);
879 840
880 /* Mark the channel alive, get-able, with 1 initial use 841 /* Mark the channel alive, get-able, with 1 initial use
881 * references. The initial reference will be decreased in 842 * references. The initial reference will be decreased in
@@ -2120,8 +2081,8 @@ void gk20a_channel_clean_up_jobs(struct channel_gk20a *c,
2120 2081
2121 nvgpu_mutex_release(&c->joblist.cleanup_lock); 2082 nvgpu_mutex_release(&c->joblist.cleanup_lock);
2122 2083
2123 if (job_finished && c->update_fn) 2084 if (job_finished && g->os_channel.work_completion_signal)
2124 schedule_work(&c->update_fn_work); 2085 g->os_channel.work_completion_signal(c);
2125 2086
2126 gk20a_channel_put(c); 2087 gk20a_channel_put(c);
2127} 2088}
@@ -2322,8 +2283,8 @@ int gk20a_channel_suspend(struct gk20a *g)
2322 /* preempt the channel */ 2283 /* preempt the channel */
2323 gk20a_fifo_preempt(g, ch); 2284 gk20a_fifo_preempt(g, ch);
2324 /* wait for channel update notifiers */ 2285 /* wait for channel update notifiers */
2325 if (ch->update_fn) 2286 if (g->os_channel.work_completion_cancel_sync)
2326 cancel_work_sync(&ch->update_fn_work); 2287 g->os_channel.work_completion_cancel_sync(ch);
2327 2288
2328 channels_in_use = true; 2289 channels_in_use = true;
2329 2290