summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
diff options
context:
space:
mode:
authorLauri Peltonen <lpeltonen@nvidia.com>2014-07-17 19:21:34 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:10:40 -0400
commitbcf60a22c3e8671468517d34aa37548272455c1f (patch)
treec3544f6714c291e611e33a8d0e39c5cb2c795821 /drivers/gpu/nvgpu/gk20a/fence_gk20a.c
parent55295c6087ed975be12e92f9be799269aef94678 (diff)
gpu: nvgpu: Add gk20a_fence type
When moving compression state tracking and compbit management ops to kernel, we need to attach a fence to dma-buf metadata, along with the compbit state. To make in-kernel fence management easier, introduce a new gk20a_fence abstraction. A gk20a_fence may be backed by a semaphore or a syncpoint (id, value) pair. If the kernel is configured with CONFIG_SYNC, it will also contain a sync_fence. The gk20a_fence can easily be converted back to a syncpoint (id, value) parir or sync FD when we need to return it to user space. Change gk20a_submit_channel_gpfifo to return a gk20a_fence instead of nvhost_fence. This is to facilitate work submission initiated from kernel. Bug 1509620 Change-Id: I6154764a279dba83f5e91ba9e0cb5e227ca08e1b Signed-off-by: Lauri Peltonen <lpeltonen@nvidia.com> Reviewed-on: http://git-master/r/439846 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fence_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fence_gk20a.c229
1 files changed, 229 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
new file mode 100644
index 00000000..1a28e660
--- /dev/null
+++ b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c
@@ -0,0 +1,229 @@
1/*
2 * drivers/video/tegra/host/gk20a/fence_gk20a.c
3 *
4 * GK20A Fences
5 *
6 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 */
17
18#include "fence_gk20a.h"
19
20#include <linux/gk20a.h>
21#include <linux/file.h>
22
23#include "gk20a.h"
24#include "semaphore_gk20a.h"
25#include "channel_gk20a.h"
26#include "sync_gk20a.h"
27
28#ifdef CONFIG_SYNC
29#include "../../../staging/android/sync.h"
30#endif
31
32#ifdef CONFIG_TEGRA_GK20A
33#include <linux/nvhost.h>
34#endif
35
36struct gk20a_fence_ops {
37 int (*wait)(struct gk20a_fence *, int timeout);
38 bool (*is_expired)(struct gk20a_fence *);
39 void *(*free)(struct kref *);
40};
41
42static void gk20a_fence_free(struct kref *ref)
43{
44 struct gk20a_fence *f =
45 container_of(ref, struct gk20a_fence, ref);
46#ifdef CONFIG_SYNC
47 if (f->sync_fence)
48 sync_fence_put(f->sync_fence);
49#endif
50 if (f->semaphore)
51 gk20a_semaphore_put(f->semaphore);
52 kfree(f);
53}
54
55void gk20a_fence_put(struct gk20a_fence *f)
56{
57 if (f)
58 kref_put(&f->ref, gk20a_fence_free);
59}
60
61struct gk20a_fence *gk20a_fence_get(struct gk20a_fence *f)
62{
63 if (f)
64 kref_get(&f->ref);
65 return f;
66}
67
68int gk20a_fence_wait(struct gk20a_fence *f, int timeout)
69{
70 return f->ops->wait(f, timeout);
71}
72
73bool gk20a_fence_is_expired(struct gk20a_fence *f)
74{
75 return f->ops->is_expired(f);
76}
77
78int gk20a_fence_install_fd(struct gk20a_fence *f)
79{
80#ifdef CONFIG_SYNC
81 int fd;
82
83 if (!f->sync_fence)
84 return -EINVAL;
85
86 fd = get_unused_fd();
87 if (fd < 0)
88 return fd;
89
90 sync_fence_get(f->sync_fence);
91 sync_fence_install(f->sync_fence, fd);
92 return fd;
93#else
94 return -ENODEV;
95#endif
96}
97
98static struct gk20a_fence *alloc_fence(const struct gk20a_fence_ops *ops,
99 struct sync_fence *sync_fence, bool wfi)
100{
101 struct gk20a_fence *f = kzalloc(sizeof(*f), GFP_KERNEL);
102 if (!f)
103 return NULL;
104 kref_init(&f->ref);
105 f->ops = ops;
106 f->sync_fence = sync_fence;
107 f->wfi = wfi;
108 f->syncpt_id = -1;
109 return f;
110}
111
112/* Fences that are backed by GPU semaphores: */
113
114static int gk20a_semaphore_fence_wait(struct gk20a_fence *f, int timeout)
115{
116 int remain;
117
118 if (!gk20a_semaphore_is_acquired(f->semaphore))
119 return 0;
120
121 remain = wait_event_interruptible_timeout(
122 *f->semaphore_wq,
123 !gk20a_semaphore_is_acquired(f->semaphore),
124 timeout);
125 if (remain == 0 && gk20a_semaphore_is_acquired(f->semaphore))
126 return -ETIMEDOUT;
127 else if (remain < 0)
128 return remain;
129 return 0;
130}
131
132static bool gk20a_semaphore_fence_is_expired(struct gk20a_fence *f)
133{
134 return !gk20a_semaphore_is_acquired(f->semaphore);
135}
136
137static const struct gk20a_fence_ops gk20a_semaphore_fence_ops = {
138 .wait = &gk20a_semaphore_fence_wait,
139 .is_expired = &gk20a_semaphore_fence_is_expired,
140};
141
142struct gk20a_fence *gk20a_fence_from_semaphore(
143 struct sync_timeline *timeline,
144 struct gk20a_semaphore *semaphore,
145 wait_queue_head_t *semaphore_wq,
146 struct sync_fence *dependency,
147 bool wfi)
148{
149 struct gk20a_fence *f;
150 struct sync_fence *sync_fence = NULL;
151
152#ifdef CONFIG_SYNC
153 sync_fence = gk20a_sync_fence_create(timeline, semaphore,
154 dependency, "fence");
155 if (!sync_fence)
156 return NULL;
157#endif
158
159 f = alloc_fence(&gk20a_semaphore_fence_ops, sync_fence, wfi);
160 if (!f) {
161#ifdef CONFIG_SYNC
162 sync_fence_put(sync_fence);
163#endif
164 return NULL;
165 }
166 gk20a_semaphore_get(semaphore);
167 f->semaphore = semaphore;
168 f->semaphore_wq = semaphore_wq;
169 return f;
170}
171
172#ifdef CONFIG_TEGRA_GK20A
173/* Fences that are backed by host1x syncpoints: */
174
175static int gk20a_syncpt_fence_wait(struct gk20a_fence *f, int timeout)
176{
177 return nvhost_syncpt_wait_timeout_ext(
178 f->host1x_pdev, f->syncpt_id, f->syncpt_value,
179 timeout, NULL, NULL);
180}
181
182static bool gk20a_syncpt_fence_is_expired(struct gk20a_fence *f)
183{
184 return nvhost_syncpt_is_expired_ext(f->host1x_pdev, f->syncpt_id,
185 f->syncpt_value);
186}
187
188static const struct gk20a_fence_ops gk20a_syncpt_fence_ops = {
189 .wait = &gk20a_syncpt_fence_wait,
190 .is_expired = &gk20a_syncpt_fence_is_expired,
191};
192
193struct gk20a_fence *gk20a_fence_from_syncpt(struct platform_device *host1x_pdev,
194 u32 id, u32 value, bool wfi)
195{
196 struct gk20a_fence *f;
197 struct sync_fence *sync_fence = NULL;
198
199#ifdef CONFIG_SYNC
200 struct nvhost_ctrl_sync_fence_info pt = {
201 .id = id,
202 .thresh = value
203 };
204
205 sync_fence = nvhost_sync_create_fence(host1x_pdev, &pt, 1,
206 "fence");
207 if (!sync_fence)
208 return NULL;
209#endif
210
211 f = alloc_fence(&gk20a_syncpt_fence_ops, sync_fence, wfi);
212 if (!f) {
213#ifdef CONFIG_SYNC
214 sync_fence_put(sync_fence);
215#endif
216 return NULL;
217 }
218 f->host1x_pdev = host1x_pdev;
219 f->syncpt_id = id;
220 f->syncpt_value = value;
221 return f;
222}
223#else
224struct gk20a_fence *gk20a_fence_from_syncpt(struct platform_device *host1x_pdev,
225 u32 id, u32 value, bool wfi)
226{
227 return NULL;
228}
229#endif