summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2016-04-06 06:33:44 -0400
committerTerje Bergstrom <tbergstrom@nvidia.com>2016-04-15 11:11:14 -0400
commitb6dc4315a4c7fa817334797cc2a4b9fb3fbfd55f (patch)
tree661626690667ed1d2bb9ef72ec7062added33235 /drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
parentd369dca4ac742fed024b54d766bb1723916b7d87 (diff)
gpu: nvgpu: support kernel-3.10 version
Make necessary changes to support nvgpu on kernel-3.10 This includes below changes - PROBE_PREFER_ASYNCHRONOUS is defined only for K3.10 - Fence handling and struct sync_fence is different between K3.10 and K3.18 - variable status in struct sync_fence is atomic on K3.18 whereas it is int on K3.10 - if SOC == T132, set soc_name = "tegra13x" - ioremap_cache() is not defined on K3.10 ARM versions, hence use ioremap_cached() Bug 200188753 Change-Id: I18d77eb1404e15054e8510d67c9a61c0f1883e2b Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1121092 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
index 30bb6efe..025b000e 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_sync_gk20a.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include <linux/gk20a.h> 18#include <linux/gk20a.h>
19#include <linux/version.h>
19 20
20#include "channel_sync_gk20a.h" 21#include "channel_sync_gk20a.h"
21#include "gk20a.h" 22#include "gk20a.h"
@@ -93,10 +94,12 @@ static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
93 int i; 94 int i;
94 int num_wait_cmds; 95 int num_wait_cmds;
95 struct sync_fence *sync_fence; 96 struct sync_fence *sync_fence;
97 struct sync_pt *pt;
96 struct priv_cmd_entry *wait_cmd = NULL; 98 struct priv_cmd_entry *wait_cmd = NULL;
97 struct gk20a_channel_syncpt *sp = 99 struct gk20a_channel_syncpt *sp =
98 container_of(s, struct gk20a_channel_syncpt, ops); 100 container_of(s, struct gk20a_channel_syncpt, ops);
99 struct channel_gk20a *c = sp->c; 101 struct channel_gk20a *c = sp->c;
102 u32 wait_id;
100 int err = 0; 103 int err = 0;
101 104
102 sync_fence = nvhost_sync_fdget(fd); 105 sync_fence = nvhost_sync_fdget(fd);
@@ -104,9 +107,13 @@ static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
104 return -EINVAL; 107 return -EINVAL;
105 108
106 /* validate syncpt ids */ 109 /* validate syncpt ids */
110#if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
111 list_for_each_entry(pt, &sync_fence->pt_list_head, pt_list) {
112#else
107 for (i = 0; i < sync_fence->num_fences; i++) { 113 for (i = 0; i < sync_fence->num_fences; i++) {
108 struct sync_pt *pt = sync_pt_from_fence(sync_fence->cbs[i].sync_pt); 114 pt = sync_pt_from_fence(sync_fence->cbs[i].sync_pt);
109 u32 wait_id = nvhost_sync_pt_id(pt); 115#endif
116 wait_id = nvhost_sync_pt_id(pt);
110 if (!wait_id || !nvhost_syncpt_is_valid_pt_ext(sp->host1x_pdev, 117 if (!wait_id || !nvhost_syncpt_is_valid_pt_ext(sp->host1x_pdev,
111 wait_id)) { 118 wait_id)) {
112 sync_fence_put(sync_fence); 119 sync_fence_put(sync_fence);
@@ -129,9 +136,13 @@ static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
129 } 136 }
130 137
131 i = 0; 138 i = 0;
139#if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
140 list_for_each_entry(pt, &sync_fence->pt_list_head, pt_list) {
141#else
132 for (i = 0; i < sync_fence->num_fences; i++) { 142 for (i = 0; i < sync_fence->num_fences; i++) {
133 struct fence *f = sync_fence->cbs[i].sync_pt; 143 struct fence *f = sync_fence->cbs[i].sync_pt;
134 struct sync_pt *pt = sync_pt_from_fence(f); 144 struct sync_pt *pt = sync_pt_from_fence(f);
145#endif
135 u32 wait_id = nvhost_sync_pt_id(pt); 146 u32 wait_id = nvhost_sync_pt_id(pt);
136 u32 wait_value = nvhost_sync_pt_thresh(pt); 147 u32 wait_value = nvhost_sync_pt_thresh(pt);
137 148
@@ -144,6 +155,9 @@ static int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
144 } else 155 } else
145 add_wait_cmd(&wait_cmd->ptr[i * 4], wait_id, 156 add_wait_cmd(&wait_cmd->ptr[i * 4], wait_id,
146 wait_value); 157 wait_value);
158#if LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0)
159 i++;
160#endif
147 } 161 }
148 WARN_ON(i != num_wait_cmds); 162 WARN_ON(i != num_wait_cmds);
149 sync_fence_put(sync_fence); 163 sync_fence_put(sync_fence);