diff options
author | Deepak Nibade <dnibade@nvidia.com> | 2016-04-18 06:16:10 -0400 |
---|---|---|
committer | Terje Bergstrom <tbergstrom@nvidia.com> | 2016-04-19 11:16:13 -0400 |
commit | e0c9da1fe9d8862fc89773208aa170b7c73d093b (patch) | |
tree | f9f0f9edbe7ae1c2f44285b0ce89385d18dc826a /drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c | |
parent | 1c96bc6942cdae7f4e90563687da7d068aea90bc (diff) |
gpu: nvgpu: implement sync refcounting
We currently free sync when we find job list empty
If aggressive_sync is set to true, we try to free
sync during channel unbind() call
But we rarely free sync from channel_unbind() call
since freeing it when job list is empty is
aggressive enough
Hence remove sync free code from channel_unbind()
Implement refcounting for sync:
- get a refcount while submitting a job (and
allocate sync if it is not allocated already)
- put a refcount while freeing the job
- if refcount==0 and if aggressive_sync_destroy is
set, free the sync
- if aggressive_sync_destroy is not set, we will
free the sync during channel close time
Bug 200187553
Change-Id: I74e24adb15dc26a375ebca1fdd017b3ad6d57b61
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: http://git-master/r/1120410
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c | 7 |
1 files changed, 7 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c index 025b000e..b47c1010 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c | |||
@@ -356,6 +356,7 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c) | |||
356 | 356 | ||
357 | nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id); | 357 | nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id); |
358 | 358 | ||
359 | atomic_set(&sp->ops.refcount, 0); | ||
359 | sp->ops.wait_syncpt = gk20a_channel_syncpt_wait_syncpt; | 360 | sp->ops.wait_syncpt = gk20a_channel_syncpt_wait_syncpt; |
360 | sp->ops.wait_fd = gk20a_channel_syncpt_wait_fd; | 361 | sp->ops.wait_fd = gk20a_channel_syncpt_wait_fd; |
361 | sp->ops.incr = gk20a_channel_syncpt_incr; | 362 | sp->ops.incr = gk20a_channel_syncpt_incr; |
@@ -711,6 +712,7 @@ gk20a_channel_semaphore_create(struct channel_gk20a *c) | |||
711 | if (!sema->timeline) | 712 | if (!sema->timeline) |
712 | goto clean_up; | 713 | goto clean_up; |
713 | #endif | 714 | #endif |
715 | atomic_set(&sema->ops.refcount, 0); | ||
714 | sema->ops.wait_syncpt = gk20a_channel_semaphore_wait_syncpt; | 716 | sema->ops.wait_syncpt = gk20a_channel_semaphore_wait_syncpt; |
715 | sema->ops.wait_fd = gk20a_channel_semaphore_wait_fd; | 717 | sema->ops.wait_fd = gk20a_channel_semaphore_wait_fd; |
716 | sema->ops.incr = gk20a_channel_semaphore_incr; | 718 | sema->ops.incr = gk20a_channel_semaphore_incr; |
@@ -727,6 +729,11 @@ clean_up: | |||
727 | return NULL; | 729 | return NULL; |
728 | } | 730 | } |
729 | 731 | ||
732 | void gk20a_channel_sync_destroy(struct gk20a_channel_sync *sync) | ||
733 | { | ||
734 | sync->destroy(sync); | ||
735 | } | ||
736 | |||
730 | struct gk20a_channel_sync *gk20a_channel_sync_create(struct channel_gk20a *c) | 737 | struct gk20a_channel_sync *gk20a_channel_sync_create(struct channel_gk20a *c) |
731 | { | 738 | { |
732 | #ifdef CONFIG_TEGRA_GK20A | 739 | #ifdef CONFIG_TEGRA_GK20A |