aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/video/tegra/host/nvhost_syncpt.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/video/tegra/host/nvhost_syncpt.c
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'drivers/video/tegra/host/nvhost_syncpt.c')
-rw-r--r--drivers/video/tegra/host/nvhost_syncpt.c319
1 files changed, 319 insertions, 0 deletions
diff --git a/drivers/video/tegra/host/nvhost_syncpt.c b/drivers/video/tegra/host/nvhost_syncpt.c
new file mode 100644
index 00000000000..eb5176ea1bf
--- /dev/null
+++ b/drivers/video/tegra/host/nvhost_syncpt.c
@@ -0,0 +1,319 @@
1/*
2 * drivers/video/tegra/host/nvhost_syncpt.c
3 *
4 * Tegra Graphics Host Syncpoints
5 *
6 * Copyright (c) 2010-2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/nvhost_ioctl.h>
22#include <linux/platform_device.h>
23#include "nvhost_syncpt.h"
24#include "dev.h"
25
26#define MAX_STUCK_CHECK_COUNT 15
27
28/**
29 * Resets syncpoint and waitbase values to sw shadows
30 */
31void nvhost_syncpt_reset(struct nvhost_syncpt *sp)
32{
33 u32 i;
34 BUG_ON(!(syncpt_op(sp).reset && syncpt_op(sp).reset_wait_base));
35
36 for (i = 0; i < sp->nb_pts; i++)
37 syncpt_op(sp).reset(sp, i);
38 for (i = 0; i < sp->nb_bases; i++)
39 syncpt_op(sp).reset_wait_base(sp, i);
40 wmb();
41}
42
43/**
44 * Updates sw shadow state for client managed registers
45 */
46void nvhost_syncpt_save(struct nvhost_syncpt *sp)
47{
48 u32 i;
49 BUG_ON(!(syncpt_op(sp).update_min && syncpt_op(sp).read_wait_base));
50
51 for (i = 0; i < sp->nb_pts; i++) {
52 if (client_managed(i))
53 syncpt_op(sp).update_min(sp, i);
54 else
55 BUG_ON(!nvhost_syncpt_min_eq_max(sp, i));
56 }
57
58 for (i = 0; i < sp->nb_bases; i++)
59 syncpt_op(sp).read_wait_base(sp, i);
60}
61
62/**
63 * Updates the last value read from hardware.
64 */
65u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
66{
67 BUG_ON(!syncpt_op(sp).update_min);
68
69 return syncpt_op(sp).update_min(sp, id);
70}
71
72/**
73 * Get the current syncpoint value
74 */
75u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id)
76{
77 u32 val;
78 BUG_ON(!syncpt_op(sp).update_min);
79 nvhost_module_busy(syncpt_to_dev(sp)->dev);
80 val = syncpt_op(sp).update_min(sp, id);
81 nvhost_module_idle(syncpt_to_dev(sp)->dev);
82 return val;
83}
84
85/**
86 * Get the current syncpoint base
87 */
88u32 nvhost_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id)
89{
90 u32 val;
91 BUG_ON(!syncpt_op(sp).read_wait_base);
92 nvhost_module_busy(syncpt_to_dev(sp)->dev);
93 syncpt_op(sp).read_wait_base(sp, id);
94 val = sp->base_val[id];
95 nvhost_module_idle(syncpt_to_dev(sp)->dev);
96 return val;
97}
98
99/**
100 * Write a cpu syncpoint increment to the hardware, without touching
101 * the cache. Caller is responsible for host being powered.
102 */
103void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
104{
105 BUG_ON(!syncpt_op(sp).cpu_incr);
106 syncpt_op(sp).cpu_incr(sp, id);
107}
108
109/**
110 * Increment syncpoint value from cpu, updating cache
111 */
112void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id)
113{
114 if (client_managed(id))
115 nvhost_syncpt_incr_max(sp, id, 1);
116 nvhost_module_busy(syncpt_to_dev(sp)->dev);
117 nvhost_syncpt_cpu_incr(sp, id);
118 nvhost_module_idle(syncpt_to_dev(sp)->dev);
119}
120
121/**
122 * Main entrypoint for syncpoint value waits.
123 */
124int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
125 u32 thresh, u32 timeout, u32 *value)
126{
127 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
128 void *ref;
129 void *waiter;
130 int err = 0, check_count = 0, low_timeout = 0;
131 u32 val;
132
133 if (value)
134 *value = 0;
135
136 /* first check cache */
137 if (nvhost_syncpt_is_expired(sp, id, thresh)) {
138 if (value)
139 *value = nvhost_syncpt_read_min(sp, id);
140 return 0;
141 }
142
143 /* keep host alive */
144 nvhost_module_busy(syncpt_to_dev(sp)->dev);
145
146 /* try to read from register */
147 val = syncpt_op(sp).update_min(sp, id);
148 if (nvhost_syncpt_is_expired(sp, id, thresh)) {
149 if (value)
150 *value = val;
151 goto done;
152 }
153
154 if (!timeout) {
155 err = -EAGAIN;
156 goto done;
157 }
158
159 /* schedule a wakeup when the syncpoint value is reached */
160 waiter = nvhost_intr_alloc_waiter();
161 if (!waiter) {
162 err = -ENOMEM;
163 goto done;
164 }
165
166 err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
167 NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq,
168 waiter,
169 &ref);
170 if (err)
171 goto done;
172
173 err = -EAGAIN;
174 /* Caller-specified timeout may be impractically low */
175 if (timeout < SYNCPT_CHECK_PERIOD)
176 low_timeout = timeout;
177
178 /* wait for the syncpoint, or timeout, or signal */
179 while (timeout) {
180 u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
181 int remain = wait_event_interruptible_timeout(wq,
182 nvhost_syncpt_is_expired(sp, id, thresh),
183 check);
184 if (remain > 0) {
185 if (value)
186 *value = nvhost_syncpt_read_min(sp, id);
187 err = 0;
188 break;
189 }
190 if (remain < 0) {
191 err = remain;
192 break;
193 }
194 if (timeout != NVHOST_NO_TIMEOUT)
195 timeout -= check;
196 if (timeout) {
197 dev_warn(&syncpt_to_dev(sp)->dev->dev,
198 "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%d\n",
199 current->comm, id, syncpt_op(sp).name(sp, id),
200 thresh, timeout);
201 syncpt_op(sp).debug(sp);
202 if (check_count > MAX_STUCK_CHECK_COUNT) {
203 if (low_timeout) {
204 dev_warn(&syncpt_to_dev(sp)->dev->dev,
205 "is timeout %d too low?\n",
206 low_timeout);
207 }
208 nvhost_debug_dump(syncpt_to_dev(sp));
209 BUG();
210 }
211 check_count++;
212 }
213 }
214 nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref);
215
216done:
217 nvhost_module_idle(syncpt_to_dev(sp)->dev);
218 return err;
219}
220
221/**
222 * Returns true if syncpoint is expired, false if we may need to wait
223 */
224bool nvhost_syncpt_is_expired(
225 struct nvhost_syncpt *sp,
226 u32 id,
227 u32 thresh)
228{
229 u32 current_val;
230 u32 future_val;
231 smp_rmb();
232 current_val = (u32)atomic_read(&sp->min_val[id]);
233 future_val = (u32)atomic_read(&sp->max_val[id]);
234
235 /* Note the use of unsigned arithmetic here (mod 1<<32).
236 *
237 * c = current_val = min_val = the current value of the syncpoint.
238 * t = thresh = the value we are checking
239 * f = future_val = max_val = the value c will reach when all
240 * outstanding increments have completed.
241 *
242 * Note that c always chases f until it reaches f.
243 *
244 * Dtf = (f - t)
245 * Dtc = (c - t)
246 *
247 * Consider all cases:
248 *
249 * A) .....c..t..f..... Dtf < Dtc need to wait
250 * B) .....c.....f..t.. Dtf > Dtc expired
251 * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large)
252 *
253 * Any case where f==c: always expired (for any t). Dtf == Dcf
254 * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0)
255 * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0,
256 * Dtc!=0)
257 *
258 * Other cases:
259 *
260 * A) .....t..f..c..... Dtf < Dtc need to wait
261 * A) .....f..c..t..... Dtf < Dtc need to wait
262 * A) .....f..t..c..... Dtf > Dtc expired
263 *
264 * So:
265 * Dtf >= Dtc implies EXPIRED (return true)
266 * Dtf < Dtc implies WAIT (return false)
267 *
268 * Note: If t is expired then we *cannot* wait on it. We would wait
269 * forever (hang the system).
270 *
271 * Note: do NOT get clever and remove the -thresh from both sides. It
272 * is NOT the same.
273 *
274 * If future valueis zero, we have a client managed sync point. In that
275 * case we do a direct comparison.
276 */
277 if (!client_managed(id))
278 return future_val - thresh >= current_val - thresh;
279 else
280 return (s32)(current_val - thresh) >= 0;
281}
282
283void nvhost_syncpt_debug(struct nvhost_syncpt *sp)
284{
285 syncpt_op(sp).debug(sp);
286}
287
288int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx)
289{
290 struct nvhost_master *host = syncpt_to_dev(sp);
291 u32 reg;
292
293 nvhost_module_busy(host->dev);
294 reg = syncpt_op(sp).mutex_try_lock(sp, idx);
295 if (reg) {
296 nvhost_module_idle(host->dev);
297 return -EBUSY;
298 }
299 atomic_inc(&sp->lock_counts[idx]);
300 return 0;
301}
302
303void nvhost_mutex_unlock(struct nvhost_syncpt *sp, int idx)
304{
305 syncpt_op(sp).mutex_unlock(sp, idx);
306 nvhost_module_idle(syncpt_to_dev(sp)->dev);
307 atomic_dec(&sp->lock_counts[idx]);
308}
309
310/* check for old WAITs to be removed (avoiding a wrap) */
311int nvhost_syncpt_wait_check(struct nvhost_syncpt *sp,
312 struct nvmap_client *nvmap,
313 u32 waitchk_mask,
314 struct nvhost_waitchk *wait,
315 int num_waitchk)
316{
317 return syncpt_op(sp).wait_check(sp, nvmap,
318 waitchk_mask, wait, num_waitchk);
319}