diff options
author | Deepak Nibade <dnibade@nvidia.com> | 2018-03-20 07:51:23 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-03-23 11:20:35 -0400 |
commit | b5b4353ca6cc9b6457ddccc00bf87538291870fc (patch) | |
tree | 1e2334728031345a3cb042bcc934bb0d9e3b0f82 /drivers/gpu/nvgpu/common | |
parent | 2aead38194fb6f3166a9ccb501467f7b0662f6c1 (diff) |
gpu: nvgpu: set safe state for user managed syncpoints
MAX/threshold value of user managed syncpoint is not tracked by nvgpu
So if channel is reset by nvgpu there could be waiters still waiting on some
user syncpoint fence
Fix this by setting a large safe value to user managed syncpoint when aborting
the channel and when closing the channel
We right now increment the current value by 0x10000 which should be sufficient
to release any pending waiter
Bug 200326065
Jira NVGPU-179
Change-Id: Ie6432369bb4c21bd922c14b8d5a74c1477116f0b
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1678768
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/nvhost.c | 19 |
1 files changed, 19 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/nvhost.c b/drivers/gpu/nvgpu/common/linux/nvhost.c index a76953e3..fa169cf0 100644 --- a/drivers/gpu/nvgpu/common/linux/nvhost.c +++ b/drivers/gpu/nvgpu/common/linux/nvhost.c | |||
@@ -166,6 +166,25 @@ u32 nvgpu_nvhost_syncpt_read_maxval( | |||
166 | return nvhost_syncpt_read_maxval(nvhost_dev->host1x_pdev, id); | 166 | return nvhost_syncpt_read_maxval(nvhost_dev->host1x_pdev, id); |
167 | } | 167 | } |
168 | 168 | ||
169 | void nvgpu_nvhost_syncpt_set_safe_state( | ||
170 | struct nvgpu_nvhost_dev *nvhost_dev, u32 id) | ||
171 | { | ||
172 | u32 val; | ||
173 | |||
174 | /* | ||
175 | * Add large number of increments to current value | ||
176 | * so that all waiters on this syncpoint are released | ||
177 | * | ||
178 | * We don't expect any case where more than 0x10000 increments | ||
179 | * are pending | ||
180 | */ | ||
181 | val = nvhost_syncpt_read_minval(nvhost_dev->host1x_pdev, id); | ||
182 | val += 0x10000; | ||
183 | |||
184 | nvhost_syncpt_set_minval(nvhost_dev->host1x_pdev, id, val); | ||
185 | nvhost_syncpt_set_maxval(nvhost_dev->host1x_pdev, id, val); | ||
186 | } | ||
187 | |||
169 | int nvgpu_nvhost_create_symlink(struct gk20a *g) | 188 | int nvgpu_nvhost_create_symlink(struct gk20a *g) |
170 | { | 189 | { |
171 | struct device *dev = dev_from_gk20a(g); | 190 | struct device *dev = dev_from_gk20a(g); |