diff options
Diffstat (limited to 'drivers/gpu/host1x/syncpt.c')
-rw-r--r-- | drivers/gpu/host1x/syncpt.c | 387 |
1 files changed, 387 insertions, 0 deletions
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c new file mode 100644 index 000000000000..4b493453e805 --- /dev/null +++ b/drivers/gpu/host1x/syncpt.c | |||
@@ -0,0 +1,387 @@ | |||
1 | /* | ||
2 | * Tegra host1x Syncpoints | ||
3 | * | ||
4 | * Copyright (c) 2010-2013, NVIDIA Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/module.h> | ||
20 | #include <linux/device.h> | ||
21 | #include <linux/slab.h> | ||
22 | |||
23 | #include <trace/events/host1x.h> | ||
24 | |||
25 | #include "syncpt.h" | ||
26 | #include "dev.h" | ||
27 | #include "intr.h" | ||
28 | #include "debug.h" | ||
29 | |||
30 | #define SYNCPT_CHECK_PERIOD (2 * HZ) | ||
31 | #define MAX_STUCK_CHECK_COUNT 15 | ||
32 | |||
33 | static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host, | ||
34 | struct device *dev, | ||
35 | int client_managed) | ||
36 | { | ||
37 | int i; | ||
38 | struct host1x_syncpt *sp = host->syncpt; | ||
39 | char *name; | ||
40 | |||
41 | for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++) | ||
42 | ; | ||
43 | if (sp->dev) | ||
44 | return NULL; | ||
45 | |||
46 | name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id, | ||
47 | dev ? dev_name(dev) : NULL); | ||
48 | if (!name) | ||
49 | return NULL; | ||
50 | |||
51 | sp->dev = dev; | ||
52 | sp->name = name; | ||
53 | sp->client_managed = client_managed; | ||
54 | |||
55 | return sp; | ||
56 | } | ||
57 | |||
58 | u32 host1x_syncpt_id(struct host1x_syncpt *sp) | ||
59 | { | ||
60 | return sp->id; | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * Updates the value sent to hardware. | ||
65 | */ | ||
66 | u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs) | ||
67 | { | ||
68 | return (u32)atomic_add_return(incrs, &sp->max_val); | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * Write cached syncpoint and waitbase values to hardware. | ||
73 | */ | ||
74 | void host1x_syncpt_restore(struct host1x *host) | ||
75 | { | ||
76 | struct host1x_syncpt *sp_base = host->syncpt; | ||
77 | u32 i; | ||
78 | |||
79 | for (i = 0; i < host1x_syncpt_nb_pts(host); i++) | ||
80 | host1x_hw_syncpt_restore(host, sp_base + i); | ||
81 | for (i = 0; i < host1x_syncpt_nb_bases(host); i++) | ||
82 | host1x_hw_syncpt_restore_wait_base(host, sp_base + i); | ||
83 | wmb(); | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * Update the cached syncpoint and waitbase values by reading them | ||
88 | * from the registers. | ||
89 | */ | ||
90 | void host1x_syncpt_save(struct host1x *host) | ||
91 | { | ||
92 | struct host1x_syncpt *sp_base = host->syncpt; | ||
93 | u32 i; | ||
94 | |||
95 | for (i = 0; i < host1x_syncpt_nb_pts(host); i++) { | ||
96 | if (host1x_syncpt_client_managed(sp_base + i)) | ||
97 | host1x_hw_syncpt_load(host, sp_base + i); | ||
98 | else | ||
99 | WARN_ON(!host1x_syncpt_idle(sp_base + i)); | ||
100 | } | ||
101 | |||
102 | for (i = 0; i < host1x_syncpt_nb_bases(host); i++) | ||
103 | host1x_hw_syncpt_load_wait_base(host, sp_base + i); | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Updates the cached syncpoint value by reading a new value from the hardware | ||
108 | * register | ||
109 | */ | ||
110 | u32 host1x_syncpt_load(struct host1x_syncpt *sp) | ||
111 | { | ||
112 | u32 val; | ||
113 | val = host1x_hw_syncpt_load(sp->host, sp); | ||
114 | trace_host1x_syncpt_load_min(sp->id, val); | ||
115 | |||
116 | return val; | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Get the current syncpoint base | ||
121 | */ | ||
122 | u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp) | ||
123 | { | ||
124 | u32 val; | ||
125 | host1x_hw_syncpt_load_wait_base(sp->host, sp); | ||
126 | val = sp->base_val; | ||
127 | return val; | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * Write a cpu syncpoint increment to the hardware, without touching | ||
132 | * the cache. Caller is responsible for host being powered. | ||
133 | */ | ||
134 | void host1x_syncpt_cpu_incr(struct host1x_syncpt *sp) | ||
135 | { | ||
136 | host1x_hw_syncpt_cpu_incr(sp->host, sp); | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * Increment syncpoint value from cpu, updating cache | ||
141 | */ | ||
142 | void host1x_syncpt_incr(struct host1x_syncpt *sp) | ||
143 | { | ||
144 | if (host1x_syncpt_client_managed(sp)) | ||
145 | host1x_syncpt_incr_max(sp, 1); | ||
146 | host1x_syncpt_cpu_incr(sp); | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Updated sync point form hardware, and returns true if syncpoint is expired, | ||
151 | * false if we may need to wait | ||
152 | */ | ||
153 | static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh) | ||
154 | { | ||
155 | host1x_hw_syncpt_load(sp->host, sp); | ||
156 | return host1x_syncpt_is_expired(sp, thresh); | ||
157 | } | ||
158 | |||
159 | /* | ||
160 | * Main entrypoint for syncpoint value waits. | ||
161 | */ | ||
162 | int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, | ||
163 | u32 *value) | ||
164 | { | ||
165 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); | ||
166 | void *ref; | ||
167 | struct host1x_waitlist *waiter; | ||
168 | int err = 0, check_count = 0; | ||
169 | u32 val; | ||
170 | |||
171 | if (value) | ||
172 | *value = 0; | ||
173 | |||
174 | /* first check cache */ | ||
175 | if (host1x_syncpt_is_expired(sp, thresh)) { | ||
176 | if (value) | ||
177 | *value = host1x_syncpt_load(sp); | ||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | /* try to read from register */ | ||
182 | val = host1x_hw_syncpt_load(sp->host, sp); | ||
183 | if (host1x_syncpt_is_expired(sp, thresh)) { | ||
184 | if (value) | ||
185 | *value = val; | ||
186 | goto done; | ||
187 | } | ||
188 | |||
189 | if (!timeout) { | ||
190 | err = -EAGAIN; | ||
191 | goto done; | ||
192 | } | ||
193 | |||
194 | /* allocate a waiter */ | ||
195 | waiter = kzalloc(sizeof(*waiter), GFP_KERNEL); | ||
196 | if (!waiter) { | ||
197 | err = -ENOMEM; | ||
198 | goto done; | ||
199 | } | ||
200 | |||
201 | /* schedule a wakeup when the syncpoint value is reached */ | ||
202 | err = host1x_intr_add_action(sp->host, sp->id, thresh, | ||
203 | HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE, | ||
204 | &wq, waiter, &ref); | ||
205 | if (err) | ||
206 | goto done; | ||
207 | |||
208 | err = -EAGAIN; | ||
209 | /* Caller-specified timeout may be impractically low */ | ||
210 | if (timeout < 0) | ||
211 | timeout = LONG_MAX; | ||
212 | |||
213 | /* wait for the syncpoint, or timeout, or signal */ | ||
214 | while (timeout) { | ||
215 | long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout); | ||
216 | int remain = wait_event_interruptible_timeout(wq, | ||
217 | syncpt_load_min_is_expired(sp, thresh), | ||
218 | check); | ||
219 | if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) { | ||
220 | if (value) | ||
221 | *value = host1x_syncpt_load(sp); | ||
222 | err = 0; | ||
223 | break; | ||
224 | } | ||
225 | if (remain < 0) { | ||
226 | err = remain; | ||
227 | break; | ||
228 | } | ||
229 | timeout -= check; | ||
230 | if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) { | ||
231 | dev_warn(sp->host->dev, | ||
232 | "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n", | ||
233 | current->comm, sp->id, sp->name, | ||
234 | thresh, timeout); | ||
235 | |||
236 | host1x_debug_dump_syncpts(sp->host); | ||
237 | if (check_count == MAX_STUCK_CHECK_COUNT) | ||
238 | host1x_debug_dump(sp->host); | ||
239 | check_count++; | ||
240 | } | ||
241 | } | ||
242 | host1x_intr_put_ref(sp->host, sp->id, ref); | ||
243 | |||
244 | done: | ||
245 | return err; | ||
246 | } | ||
247 | EXPORT_SYMBOL(host1x_syncpt_wait); | ||
248 | |||
249 | /* | ||
250 | * Returns true if syncpoint is expired, false if we may need to wait | ||
251 | */ | ||
252 | bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh) | ||
253 | { | ||
254 | u32 current_val; | ||
255 | u32 future_val; | ||
256 | smp_rmb(); | ||
257 | current_val = (u32)atomic_read(&sp->min_val); | ||
258 | future_val = (u32)atomic_read(&sp->max_val); | ||
259 | |||
260 | /* Note the use of unsigned arithmetic here (mod 1<<32). | ||
261 | * | ||
262 | * c = current_val = min_val = the current value of the syncpoint. | ||
263 | * t = thresh = the value we are checking | ||
264 | * f = future_val = max_val = the value c will reach when all | ||
265 | * outstanding increments have completed. | ||
266 | * | ||
267 | * Note that c always chases f until it reaches f. | ||
268 | * | ||
269 | * Dtf = (f - t) | ||
270 | * Dtc = (c - t) | ||
271 | * | ||
272 | * Consider all cases: | ||
273 | * | ||
274 | * A) .....c..t..f..... Dtf < Dtc need to wait | ||
275 | * B) .....c.....f..t.. Dtf > Dtc expired | ||
276 | * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large) | ||
277 | * | ||
278 | * Any case where f==c: always expired (for any t). Dtf == Dcf | ||
279 | * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0) | ||
280 | * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0, | ||
281 | * Dtc!=0) | ||
282 | * | ||
283 | * Other cases: | ||
284 | * | ||
285 | * A) .....t..f..c..... Dtf < Dtc need to wait | ||
286 | * A) .....f..c..t..... Dtf < Dtc need to wait | ||
287 | * A) .....f..t..c..... Dtf > Dtc expired | ||
288 | * | ||
289 | * So: | ||
290 | * Dtf >= Dtc implies EXPIRED (return true) | ||
291 | * Dtf < Dtc implies WAIT (return false) | ||
292 | * | ||
293 | * Note: If t is expired then we *cannot* wait on it. We would wait | ||
294 | * forever (hang the system). | ||
295 | * | ||
296 | * Note: do NOT get clever and remove the -thresh from both sides. It | ||
297 | * is NOT the same. | ||
298 | * | ||
299 | * If future valueis zero, we have a client managed sync point. In that | ||
300 | * case we do a direct comparison. | ||
301 | */ | ||
302 | if (!host1x_syncpt_client_managed(sp)) | ||
303 | return future_val - thresh >= current_val - thresh; | ||
304 | else | ||
305 | return (s32)(current_val - thresh) >= 0; | ||
306 | } | ||
307 | |||
308 | /* remove a wait pointed to by patch_addr */ | ||
309 | int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr) | ||
310 | { | ||
311 | return host1x_hw_syncpt_patch_wait(sp->host, sp, patch_addr); | ||
312 | } | ||
313 | |||
314 | int host1x_syncpt_init(struct host1x *host) | ||
315 | { | ||
316 | struct host1x_syncpt *syncpt; | ||
317 | int i; | ||
318 | |||
319 | syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts, | ||
320 | GFP_KERNEL); | ||
321 | if (!syncpt) | ||
322 | return -ENOMEM; | ||
323 | |||
324 | for (i = 0; i < host->info->nb_pts; ++i) { | ||
325 | syncpt[i].id = i; | ||
326 | syncpt[i].host = host; | ||
327 | } | ||
328 | |||
329 | host->syncpt = syncpt; | ||
330 | |||
331 | host1x_syncpt_restore(host); | ||
332 | |||
333 | /* Allocate sync point to use for clearing waits for expired fences */ | ||
334 | host->nop_sp = _host1x_syncpt_alloc(host, NULL, 0); | ||
335 | if (!host->nop_sp) | ||
336 | return -ENOMEM; | ||
337 | |||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | struct host1x_syncpt *host1x_syncpt_request(struct device *dev, | ||
342 | int client_managed) | ||
343 | { | ||
344 | struct host1x *host = dev_get_drvdata(dev->parent); | ||
345 | return _host1x_syncpt_alloc(host, dev, client_managed); | ||
346 | } | ||
347 | |||
348 | void host1x_syncpt_free(struct host1x_syncpt *sp) | ||
349 | { | ||
350 | if (!sp) | ||
351 | return; | ||
352 | |||
353 | kfree(sp->name); | ||
354 | sp->dev = NULL; | ||
355 | sp->name = NULL; | ||
356 | sp->client_managed = 0; | ||
357 | } | ||
358 | |||
359 | void host1x_syncpt_deinit(struct host1x *host) | ||
360 | { | ||
361 | int i; | ||
362 | struct host1x_syncpt *sp = host->syncpt; | ||
363 | for (i = 0; i < host->info->nb_pts; i++, sp++) | ||
364 | kfree(sp->name); | ||
365 | } | ||
366 | |||
367 | int host1x_syncpt_nb_pts(struct host1x *host) | ||
368 | { | ||
369 | return host->info->nb_pts; | ||
370 | } | ||
371 | |||
372 | int host1x_syncpt_nb_bases(struct host1x *host) | ||
373 | { | ||
374 | return host->info->nb_bases; | ||
375 | } | ||
376 | |||
377 | int host1x_syncpt_nb_mlocks(struct host1x *host) | ||
378 | { | ||
379 | return host->info->nb_mlocks; | ||
380 | } | ||
381 | |||
382 | struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id) | ||
383 | { | ||
384 | if (host->info->nb_pts < id) | ||
385 | return NULL; | ||
386 | return host->syncpt + id; | ||
387 | } | ||