summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-05-25 08:11:38 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-06-08 09:37:15 -0400
commit0ad7f1d9aa18d959abf3cba6ca4e532fc9246a31 (patch)
tree781bd0ef8ffa6d11d45b9f595bfcbc966e5733e7 /drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
parent2c9713e21c9ab93e7af45a4b9e76c1f378aab024 (diff)
gpu: nvgpu: use nvgpu specific nvhost APIs
Remove use of linux specifix header files <linux/nvhost.h> and <linux/nvhost_ioctl.h> and use nvgpu specific header file <nvgpu/nvhost.h> instead This is needed to remove all Linux dependencies from nvgpu driver Replace all nvhost_*() calls by nvgpu_nvhost_*() calls from new nvgpu library Remove platform device pointer host1x_dev from struct gk20a and add struct nvgpu_nvhost_dev instead Jira NVGPU-29 Change-Id: Ia7af70602cfc16f9ccc380752538c05a9cbb8a67 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1489726 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c44
1 files changed, 22 insertions, 22 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index 07157dc9..3d313ce8 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -23,6 +23,7 @@
23#include <nvgpu/atomic.h> 23#include <nvgpu/atomic.h>
24#include <nvgpu/bug.h> 24#include <nvgpu/bug.h>
25#include <nvgpu/list.h> 25#include <nvgpu/list.h>
26#include <nvgpu/nvhost.h>
26 27
27#include "channel_sync_gk20a.h" 28#include "channel_sync_gk20a.h"
28#include "gk20a.h" 29#include "gk20a.h"
@@ -35,12 +36,11 @@
35#endif 36#endif
36 37
37#ifdef CONFIG_TEGRA_GK20A_NVHOST 38#ifdef CONFIG_TEGRA_GK20A_NVHOST
38#include <linux/nvhost.h>
39 39
40struct gk20a_channel_syncpt { 40struct gk20a_channel_syncpt {
41 struct gk20a_channel_sync ops; 41 struct gk20a_channel_sync ops;
42 struct channel_gk20a *c; 42 struct channel_gk20a *c;
43 struct platform_device *host1x_pdev; 43 struct nvgpu_nvhost_dev *nvhost_dev;
44 u32 id; 44 u32 id;
45 struct nvgpu_mem syncpt_buf; 45 struct nvgpu_mem syncpt_buf;
46}; 46};
@@ -54,12 +54,12 @@ static int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s,
54 struct channel_gk20a *c = sp->c; 54 struct channel_gk20a *c = sp->c;
55 int err = 0; 55 int err = 0;
56 56
57 if (!nvhost_syncpt_is_valid_pt_ext(sp->host1x_pdev, id)) { 57 if (!nvgpu_nvhost_syncpt_is_valid_pt_ext(sp->nvhost_dev, id)) {
58 nvgpu_warn(c->g, "invalid wait id in gpfifo submit, elided"); 58 nvgpu_warn(c->g, "invalid wait id in gpfifo submit, elided");
59 return 0; 59 return 0;
60 } 60 }
61 61
62 if (nvhost_syncpt_is_expired_ext(sp->host1x_pdev, id, thresh)) 62 if (nvgpu_nvhost_syncpt_is_expired_ext(sp->nvhost_dev, id, thresh))
63 return 0; 63 return 0;
64 64
65 err = gk20a_channel_alloc_priv_cmdbuf(c, 65 err = gk20a_channel_alloc_priv_cmdbuf(c,
@@ -94,7 +94,7 @@ static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
94 int err = 0; 94 int err = 0;
95 u32 wait_cmd_size = 0; 95 u32 wait_cmd_size = 0;
96 96
97 sync_fence = nvhost_sync_fdget(fd); 97 sync_fence = nvgpu_nvhost_sync_fdget(fd);
98 if (!sync_fence) 98 if (!sync_fence)
99 return -EINVAL; 99 return -EINVAL;
100 100
@@ -105,9 +105,9 @@ static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
105 for (i = 0; i < sync_fence->num_fences; i++) { 105 for (i = 0; i < sync_fence->num_fences; i++) {
106 pt = sync_pt_from_fence(sync_fence->cbs[i].sync_pt); 106 pt = sync_pt_from_fence(sync_fence->cbs[i].sync_pt);
107#endif 107#endif
108 wait_id = nvhost_sync_pt_id(pt); 108 wait_id = nvgpu_nvhost_sync_pt_id(pt);
109 if (!wait_id || !nvhost_syncpt_is_valid_pt_ext(sp->host1x_pdev, 109 if (!wait_id || !nvgpu_nvhost_syncpt_is_valid_pt_ext(
110 wait_id)) { 110 sp->nvhost_dev, wait_id)) {
111 sync_fence_put(sync_fence); 111 sync_fence_put(sync_fence);
112 return -EINVAL; 112 return -EINVAL;
113 } 113 }
@@ -117,7 +117,7 @@ static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
117 } 117 }
118#endif 118#endif
119 119
120 num_wait_cmds = nvhost_sync_num_pts(sync_fence); 120 num_wait_cmds = nvgpu_nvhost_sync_num_pts(sync_fence);
121 if (num_wait_cmds == 0) { 121 if (num_wait_cmds == 0) {
122 sync_fence_put(sync_fence); 122 sync_fence_put(sync_fence);
123 return 0; 123 return 0;
@@ -141,10 +141,10 @@ static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
141 struct fence *f = sync_fence->cbs[i].sync_pt; 141 struct fence *f = sync_fence->cbs[i].sync_pt;
142 struct sync_pt *pt = sync_pt_from_fence(f); 142 struct sync_pt *pt = sync_pt_from_fence(f);
143#endif 143#endif
144 u32 wait_id = nvhost_sync_pt_id(pt); 144 u32 wait_id = nvgpu_nvhost_sync_pt_id(pt);
145 u32 wait_value = nvhost_sync_pt_thresh(pt); 145 u32 wait_value = nvgpu_nvhost_sync_pt_thresh(pt);
146 146
147 if (nvhost_syncpt_is_expired_ext(sp->host1x_pdev, 147 if (nvgpu_nvhost_syncpt_is_expired_ext(sp->nvhost_dev,
148 wait_id, wait_value)) { 148 wait_id, wait_value)) {
149 nvgpu_memset(c->g, wait_cmd->mem, 149 nvgpu_memset(c->g, wait_cmd->mem,
150 (wait_cmd->off + i * wait_cmd_size) * sizeof(u32), 150 (wait_cmd->off + i * wait_cmd_size) * sizeof(u32),
@@ -206,7 +206,7 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
206 c->g->ops.fifo.add_syncpt_incr_cmd(c->g, wfi_cmd, 206 c->g->ops.fifo.add_syncpt_incr_cmd(c->g, wfi_cmd,
207 incr_cmd, sp->id, sp->syncpt_buf.gpu_va); 207 incr_cmd, sp->id, sp->syncpt_buf.gpu_va);
208 208
209 thresh = nvhost_syncpt_incr_max_ext(sp->host1x_pdev, sp->id, 2); 209 thresh = nvgpu_nvhost_syncpt_incr_max_ext(sp->nvhost_dev, sp->id, 2);
210 210
211 if (register_irq) { 211 if (register_irq) {
212 struct channel_gk20a *referenced = gk20a_channel_get(c); 212 struct channel_gk20a *referenced = gk20a_channel_get(c);
@@ -217,8 +217,8 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
217 /* note: channel_put() is in 217 /* note: channel_put() is in
218 * gk20a_channel_syncpt_update() */ 218 * gk20a_channel_syncpt_update() */
219 219
220 err = nvhost_intr_register_notifier( 220 err = nvgpu_nvhost_intr_register_notifier(
221 sp->host1x_pdev, 221 sp->nvhost_dev,
222 sp->id, thresh, 222 sp->id, thresh,
223 gk20a_channel_syncpt_update, c); 223 gk20a_channel_syncpt_update, c);
224 if (err) 224 if (err)
@@ -234,7 +234,7 @@ static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
234 } 234 }
235 } 235 }
236 236
237 err = gk20a_fence_from_syncpt(fence, sp->host1x_pdev, sp->id, thresh, 237 err = gk20a_fence_from_syncpt(fence, sp->nvhost_dev, sp->id, thresh,
238 wfi_cmd, need_sync_fence); 238 wfi_cmd, need_sync_fence);
239 if (err) 239 if (err)
240 goto clean_up_priv_cmd; 240 goto clean_up_priv_cmd;
@@ -290,7 +290,7 @@ static void gk20a_channel_syncpt_set_min_eq_max(struct gk20a_channel_sync *s)
290{ 290{
291 struct gk20a_channel_syncpt *sp = 291 struct gk20a_channel_syncpt *sp =
292 container_of(s, struct gk20a_channel_syncpt, ops); 292 container_of(s, struct gk20a_channel_syncpt, ops);
293 nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id); 293 nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id);
294} 294}
295 295
296static void gk20a_channel_syncpt_signal_timeline( 296static void gk20a_channel_syncpt_signal_timeline(
@@ -314,8 +314,8 @@ static void gk20a_channel_syncpt_destroy(struct gk20a_channel_sync *s)
314 314
315 sp->c->g->ops.fifo.free_syncpt_buf(sp->c, &sp->syncpt_buf); 315 sp->c->g->ops.fifo.free_syncpt_buf(sp->c, &sp->syncpt_buf);
316 316
317 nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id); 317 nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id);
318 nvhost_syncpt_put_ref_ext(sp->host1x_pdev, sp->id); 318 nvgpu_nvhost_syncpt_put_ref_ext(sp->nvhost_dev, sp->id);
319 nvgpu_kfree(sp->c->g, sp); 319 nvgpu_kfree(sp->c->g, sp);
320} 320}
321 321
@@ -330,12 +330,12 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c)
330 return NULL; 330 return NULL;
331 331
332 sp->c = c; 332 sp->c = c;
333 sp->host1x_pdev = c->g->host1x_dev; 333 sp->nvhost_dev = c->g->nvhost_dev;
334 334
335 snprintf(syncpt_name, sizeof(syncpt_name), 335 snprintf(syncpt_name, sizeof(syncpt_name),
336 "%s_%d", c->g->name, c->hw_chid); 336 "%s_%d", c->g->name, c->hw_chid);
337 337
338 sp->id = nvhost_get_syncpt_host_managed(sp->host1x_pdev, 338 sp->id = nvgpu_nvhost_get_syncpt_host_managed(sp->nvhost_dev,
339 c->hw_chid, syncpt_name); 339 c->hw_chid, syncpt_name);
340 if (!sp->id) { 340 if (!sp->id) {
341 nvgpu_kfree(c->g, sp); 341 nvgpu_kfree(c->g, sp);
@@ -346,7 +346,7 @@ gk20a_channel_syncpt_create(struct channel_gk20a *c)
346 sp->c->g->ops.fifo.alloc_syncpt_buf(sp->c, sp->id, 346 sp->c->g->ops.fifo.alloc_syncpt_buf(sp->c, sp->id,
347 &sp->syncpt_buf); 347 &sp->syncpt_buf);
348 348
349 nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id); 349 nvgpu_nvhost_syncpt_set_min_eq_max_ext(sp->nvhost_dev, sp->id);
350 350
351 atomic_set(&sp->ops.refcount, 0); 351 atomic_set(&sp->ops.refcount, 0);
352 sp->ops.wait_syncpt = gk20a_channel_syncpt_wait_syncpt; 352 sp->ops.wait_syncpt = gk20a_channel_syncpt_wait_syncpt;