aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/video/tegra/host/host1x
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/video/tegra/host/host1x')
-rw-r--r--drivers/video/tegra/host/host1x/Makefile12
-rw-r--r--drivers/video/tegra/host/host1x/host1x_cdma.c665
-rw-r--r--drivers/video/tegra/host/host1x/host1x_cdma.h41
-rw-r--r--drivers/video/tegra/host/host1x/host1x_channel.c627
-rw-r--r--drivers/video/tegra/host/host1x/host1x_channel.h45
-rw-r--r--drivers/video/tegra/host/host1x/host1x_debug.c404
-rw-r--r--drivers/video/tegra/host/host1x/host1x_hardware.h274
-rw-r--r--drivers/video/tegra/host/host1x/host1x_hwctx.h65
-rw-r--r--drivers/video/tegra/host/host1x/host1x_intr.c218
-rw-r--r--drivers/video/tegra/host/host1x/host1x_syncpt.c248
-rw-r--r--drivers/video/tegra/host/host1x/host1x_syncpt.h77
11 files changed, 2676 insertions, 0 deletions
diff --git a/drivers/video/tegra/host/host1x/Makefile b/drivers/video/tegra/host/host1x/Makefile
new file mode 100644
index 00000000000..c3214ffe147
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/Makefile
@@ -0,0 +1,12 @@
1GCOV_PROFILE := y
2
3EXTRA_CFLAGS += -Idrivers/video/tegra/host
4
5nvhost-host1x-objs = \
6 host1x_syncpt.o \
7 host1x_channel.o \
8 host1x_intr.o \
9 host1x_cdma.o \
10 host1x_debug.o
11
12obj-$(CONFIG_TEGRA_GRHOST) += nvhost-host1x.o
diff --git a/drivers/video/tegra/host/host1x/host1x_cdma.c b/drivers/video/tegra/host/host1x/host1x_cdma.c
new file mode 100644
index 00000000000..cdd6026718b
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_cdma.c
@@ -0,0 +1,665 @@
1/*
2 * drivers/video/tegra/host/host1x/host1x_cdma.c
3 *
4 * Tegra Graphics Host Command DMA
5 *
6 * Copyright (c) 2010-2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/slab.h>
22#include "nvhost_cdma.h"
23#include "dev.h"
24
25#include "host1x_hardware.h"
26#include "host1x_syncpt.h"
27#include "host1x_cdma.h"
28#include "host1x_hwctx.h"
29
30static inline u32 host1x_channel_dmactrl(int stop, int get_rst, int init_get)
31{
32 return HOST1X_CREATE(CHANNEL_DMACTRL, DMASTOP, stop)
33 | HOST1X_CREATE(CHANNEL_DMACTRL, DMAGETRST, get_rst)
34 | HOST1X_CREATE(CHANNEL_DMACTRL, DMAINITGET, init_get);
35}
36
37static void cdma_timeout_handler(struct work_struct *work);
38
39/*
40 * push_buffer
41 *
42 * The push buffer is a circular array of words to be fetched by command DMA.
43 * Note that it works slightly differently to the sync queue; fence == cur
44 * means that the push buffer is full, not empty.
45 */
46
47
48/**
49 * Reset to empty push buffer
50 */
51static void push_buffer_reset(struct push_buffer *pb)
52{
53 pb->fence = PUSH_BUFFER_SIZE - 8;
54 pb->cur = 0;
55}
56
57/**
58 * Init push buffer resources
59 */
60static int push_buffer_init(struct push_buffer *pb)
61{
62 struct nvhost_cdma *cdma = pb_to_cdma(pb);
63 struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
64 pb->mem = NULL;
65 pb->mapped = NULL;
66 pb->phys = 0;
67 pb->nvmap = NULL;
68
69 BUG_ON(!cdma_pb_op(cdma).reset);
70 cdma_pb_op(cdma).reset(pb);
71
72 /* allocate and map pushbuffer memory */
73 pb->mem = nvmap_alloc(nvmap, PUSH_BUFFER_SIZE + 4, 32,
74 NVMAP_HANDLE_WRITE_COMBINE, 0);
75 if (IS_ERR_OR_NULL(pb->mem)) {
76 pb->mem = NULL;
77 goto fail;
78 }
79 pb->mapped = nvmap_mmap(pb->mem);
80 if (pb->mapped == NULL)
81 goto fail;
82
83 /* pin pushbuffer and get physical address */
84 pb->phys = nvmap_pin(nvmap, pb->mem);
85 if (pb->phys >= 0xfffff000) {
86 pb->phys = 0;
87 goto fail;
88 }
89
90 /* memory for storing nvmap client and handles for each opcode pair */
91 pb->nvmap = kzalloc(NVHOST_GATHER_QUEUE_SIZE *
92 sizeof(struct nvmap_client_handle),
93 GFP_KERNEL);
94 if (!pb->nvmap)
95 goto fail;
96
97 /* put the restart at the end of pushbuffer memory */
98 *(pb->mapped + (PUSH_BUFFER_SIZE >> 2)) =
99 nvhost_opcode_restart(pb->phys);
100
101 return 0;
102
103fail:
104 cdma_pb_op(cdma).destroy(pb);
105 return -ENOMEM;
106}
107
108/**
109 * Clean up push buffer resources
110 */
111static void push_buffer_destroy(struct push_buffer *pb)
112{
113 struct nvhost_cdma *cdma = pb_to_cdma(pb);
114 struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
115 if (pb->mapped)
116 nvmap_munmap(pb->mem, pb->mapped);
117
118 if (pb->phys != 0)
119 nvmap_unpin(nvmap, pb->mem);
120
121 if (pb->mem)
122 nvmap_free(nvmap, pb->mem);
123
124 kfree(pb->nvmap);
125
126 pb->mem = NULL;
127 pb->mapped = NULL;
128 pb->phys = 0;
129 pb->nvmap = 0;
130}
131
132/**
133 * Push two words to the push buffer
134 * Caller must ensure push buffer is not full
135 */
136static void push_buffer_push_to(struct push_buffer *pb,
137 struct nvmap_client *client,
138 struct nvmap_handle *handle, u32 op1, u32 op2)
139{
140 u32 cur = pb->cur;
141 u32 *p = (u32 *)((u32)pb->mapped + cur);
142 u32 cur_nvmap = (cur/8) & (NVHOST_GATHER_QUEUE_SIZE - 1);
143 BUG_ON(cur == pb->fence);
144 *(p++) = op1;
145 *(p++) = op2;
146 pb->nvmap[cur_nvmap].client = client;
147 pb->nvmap[cur_nvmap].handle = handle;
148 pb->cur = (cur + 8) & (PUSH_BUFFER_SIZE - 1);
149}
150
151/**
152 * Pop a number of two word slots from the push buffer
153 * Caller must ensure push buffer is not empty
154 */
155static void push_buffer_pop_from(struct push_buffer *pb,
156 unsigned int slots)
157{
158 /* Clear the nvmap references for old items from pb */
159 unsigned int i;
160 u32 fence_nvmap = pb->fence/8;
161 for (i = 0; i < slots; i++) {
162 int cur_fence_nvmap = (fence_nvmap+i)
163 & (NVHOST_GATHER_QUEUE_SIZE - 1);
164 struct nvmap_client_handle *h =
165 &pb->nvmap[cur_fence_nvmap];
166 h->client = NULL;
167 h->handle = NULL;
168 }
169 /* Advance the next write position */
170 pb->fence = (pb->fence + slots * 8) & (PUSH_BUFFER_SIZE - 1);
171}
172
173/**
174 * Return the number of two word slots free in the push buffer
175 */
176static u32 push_buffer_space(struct push_buffer *pb)
177{
178 return ((pb->fence - pb->cur) & (PUSH_BUFFER_SIZE - 1)) / 8;
179}
180
181static u32 push_buffer_putptr(struct push_buffer *pb)
182{
183 return pb->phys + pb->cur;
184}
185
186/*
187 * The syncpt incr buffer is filled with methods to increment syncpts, which
188 * is later GATHER-ed into the mainline PB. It's used when a timed out context
189 * is interleaved with other work, so needs to inline the syncpt increments
190 * to maintain the count (but otherwise does no work).
191 */
192
193/**
194 * Init timeout and syncpt incr buffer resources
195 */
196static int cdma_timeout_init(struct nvhost_cdma *cdma,
197 u32 syncpt_id)
198{
199 struct nvhost_master *dev = cdma_to_dev(cdma);
200 struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
201 struct syncpt_buffer *sb = &cdma->syncpt_buffer;
202 struct nvhost_channel *ch = cdma_to_channel(cdma);
203 u32 i = 0;
204
205 if (syncpt_id == NVSYNCPT_INVALID)
206 return -EINVAL;
207
208 /* allocate and map syncpt incr memory */
209 sb->mem = nvmap_alloc(nvmap,
210 (SYNCPT_INCR_BUFFER_SIZE_WORDS * sizeof(u32)), 32,
211 NVMAP_HANDLE_WRITE_COMBINE, 0);
212 if (IS_ERR_OR_NULL(sb->mem)) {
213 sb->mem = NULL;
214 goto fail;
215 }
216 sb->mapped = nvmap_mmap(sb->mem);
217 if (sb->mapped == NULL)
218 goto fail;
219
220 /* pin syncpt buffer and get physical address */
221 sb->phys = nvmap_pin(nvmap, sb->mem);
222 if (sb->phys >= 0xfffff000) {
223 sb->phys = 0;
224 goto fail;
225 }
226
227 dev_dbg(&dev->dev->dev, "%s: SYNCPT_INCR buffer at 0x%x\n",
228 __func__, sb->phys);
229
230 sb->words_per_incr = (syncpt_id == NVSYNCPT_3D) ? 5 : 3;
231 sb->incr_per_buffer = (SYNCPT_INCR_BUFFER_SIZE_WORDS /
232 sb->words_per_incr);
233
234 /* init buffer with SETCL and INCR_SYNCPT methods */
235 while (i < sb->incr_per_buffer) {
236 sb->mapped[i++] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
237 0, 0);
238 sb->mapped[i++] = nvhost_opcode_imm_incr_syncpt(
239 NV_SYNCPT_IMMEDIATE,
240 syncpt_id);
241 if (syncpt_id == NVSYNCPT_3D) {
242 /* also contains base increments */
243 sb->mapped[i++] = nvhost_opcode_nonincr(
244 NV_CLASS_HOST_INCR_SYNCPT_BASE,
245 1);
246 sb->mapped[i++] = nvhost_class_host_incr_syncpt_base(
247 NVWAITBASE_3D, 1);
248 }
249 sb->mapped[i++] = nvhost_opcode_setclass(ch->dev->class,
250 0, 0);
251 }
252 wmb();
253
254 INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
255 cdma->timeout.initialized = true;
256
257 return 0;
258fail:
259 cdma_op(cdma).timeout_destroy(cdma);
260 return -ENOMEM;
261}
262
263/**
264 * Clean up timeout syncpt buffer resources
265 */
266static void cdma_timeout_destroy(struct nvhost_cdma *cdma)
267{
268 struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
269 struct syncpt_buffer *sb = &cdma->syncpt_buffer;
270
271 if (sb->mapped)
272 nvmap_munmap(sb->mem, sb->mapped);
273
274 if (sb->phys != 0)
275 nvmap_unpin(nvmap, sb->mem);
276
277 if (sb->mem)
278 nvmap_free(nvmap, sb->mem);
279
280 sb->mem = NULL;
281 sb->mapped = NULL;
282 sb->phys = 0;
283
284 if (cdma->timeout.initialized)
285 cancel_delayed_work(&cdma->timeout.wq);
286 cdma->timeout.initialized = false;
287}
288
289/**
290 * Increment timedout buffer's syncpt via CPU.
291 */
292static void cdma_timeout_cpu_incr(struct nvhost_cdma *cdma, u32 getptr,
293 u32 syncpt_incrs, u32 syncval, u32 nr_slots)
294{
295 struct nvhost_master *dev = cdma_to_dev(cdma);
296 struct push_buffer *pb = &cdma->push_buffer;
297 u32 i, getidx;
298
299 for (i = 0; i < syncpt_incrs; i++)
300 nvhost_syncpt_cpu_incr(&dev->syncpt, cdma->timeout.syncpt_id);
301
302 /* after CPU incr, ensure shadow is up to date */
303 nvhost_syncpt_update_min(&dev->syncpt, cdma->timeout.syncpt_id);
304
305 /* update WAITBASE_3D by same number of incrs */
306 if (cdma->timeout.syncpt_id == NVSYNCPT_3D) {
307 void __iomem *p;
308 p = dev->sync_aperture + HOST1X_SYNC_SYNCPT_BASE_0 +
309 (NVWAITBASE_3D * sizeof(u32));
310 writel(syncval, p);
311 }
312
313 /* NOP all the PB slots */
314 getidx = getptr - pb->phys;
315 while (nr_slots--) {
316 u32 *p = (u32 *)((u32)pb->mapped + getidx);
317 *(p++) = NVHOST_OPCODE_NOOP;
318 *(p++) = NVHOST_OPCODE_NOOP;
319 dev_dbg(&dev->dev->dev, "%s: NOP at 0x%x\n",
320 __func__, pb->phys + getidx);
321 getidx = (getidx + 8) & (PUSH_BUFFER_SIZE - 1);
322 }
323 wmb();
324}
325
326/**
327 * This routine is called at the point we transition back into a timed
328 * ctx. The syncpts are incremented via pushbuffer with a flag indicating
329 * whether there's a CTXSAVE that should be still executed (for the
330 * preceding HW ctx).
331 */
332static void cdma_timeout_pb_incr(struct nvhost_cdma *cdma, u32 getptr,
333 u32 syncpt_incrs, u32 nr_slots,
334 bool exec_ctxsave)
335{
336 struct nvhost_master *dev = cdma_to_dev(cdma);
337 struct syncpt_buffer *sb = &cdma->syncpt_buffer;
338 struct push_buffer *pb = &cdma->push_buffer;
339 struct host1x_hwctx *hwctx = to_host1x_hwctx(cdma->timeout.ctx);
340 u32 getidx, *p;
341
342 /* should have enough slots to incr to desired count */
343 BUG_ON(syncpt_incrs > (nr_slots * sb->incr_per_buffer));
344
345 getidx = getptr - pb->phys;
346 if (exec_ctxsave) {
347 /* don't disrupt the CTXSAVE of a good/non-timed out ctx */
348 nr_slots -= hwctx->save_slots;
349 syncpt_incrs -= hwctx->save_incrs;
350
351 getidx += (hwctx->save_slots * 8);
352 getidx &= (PUSH_BUFFER_SIZE - 1);
353
354 dev_dbg(&dev->dev->dev,
355 "%s: exec CTXSAVE of prev ctx (slots %d, incrs %d)\n",
356 __func__, nr_slots, syncpt_incrs);
357 }
358
359 while (syncpt_incrs) {
360 u32 incrs, count;
361
362 /* GATHER count are incrs * number of DWORDs per incr */
363 incrs = min(syncpt_incrs, sb->incr_per_buffer);
364 count = incrs * sb->words_per_incr;
365
366 p = (u32 *)((u32)pb->mapped + getidx);
367 *(p++) = nvhost_opcode_gather(count);
368 *(p++) = sb->phys;
369
370 dev_dbg(&dev->dev->dev,
371 "%s: GATHER at 0x%x, from 0x%x, dcount = %d\n",
372 __func__,
373 pb->phys + getidx, sb->phys,
374 (incrs * sb->words_per_incr));
375
376 syncpt_incrs -= incrs;
377 getidx = (getidx + 8) & (PUSH_BUFFER_SIZE - 1);
378 nr_slots--;
379 }
380
381 /* NOP remaining slots */
382 while (nr_slots--) {
383 p = (u32 *)((u32)pb->mapped + getidx);
384 *(p++) = NVHOST_OPCODE_NOOP;
385 *(p++) = NVHOST_OPCODE_NOOP;
386 dev_dbg(&dev->dev->dev, "%s: NOP at 0x%x\n",
387 __func__, pb->phys + getidx);
388 getidx = (getidx + 8) & (PUSH_BUFFER_SIZE - 1);
389 }
390 wmb();
391}
392
393/**
394 * Start channel DMA
395 */
396static void cdma_start(struct nvhost_cdma *cdma)
397{
398 void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
399
400 if (cdma->running)
401 return;
402
403 BUG_ON(!cdma_pb_op(cdma).putptr);
404 cdma->last_put = cdma_pb_op(cdma).putptr(&cdma->push_buffer);
405
406 writel(host1x_channel_dmactrl(true, false, false),
407 chan_regs + HOST1X_CHANNEL_DMACTRL);
408
409 /* set base, put, end pointer (all of memory) */
410 writel(0, chan_regs + HOST1X_CHANNEL_DMASTART);
411 writel(cdma->last_put, chan_regs + HOST1X_CHANNEL_DMAPUT);
412 writel(0xFFFFFFFF, chan_regs + HOST1X_CHANNEL_DMAEND);
413
414 /* reset GET */
415 writel(host1x_channel_dmactrl(true, true, true),
416 chan_regs + HOST1X_CHANNEL_DMACTRL);
417
418 /* start the command DMA */
419 writel(host1x_channel_dmactrl(false, false, false),
420 chan_regs + HOST1X_CHANNEL_DMACTRL);
421
422 cdma->running = true;
423}
424
425/**
426 * Similar to cdma_start(), but rather than starting from an idle
427 * state (where DMA GET is set to DMA PUT), on a timeout we restore
428 * DMA GET from an explicit value (so DMA may again be pending).
429 */
430static void cdma_timeout_restart(struct nvhost_cdma *cdma, u32 getptr)
431{
432 struct nvhost_master *dev = cdma_to_dev(cdma);
433 void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
434
435 if (cdma->running)
436 return;
437
438 BUG_ON(!cdma_pb_op(cdma).putptr);
439 cdma->last_put = cdma_pb_op(cdma).putptr(&cdma->push_buffer);
440
441 writel(host1x_channel_dmactrl(true, false, false),
442 chan_regs + HOST1X_CHANNEL_DMACTRL);
443
444 /* set base, end pointer (all of memory) */
445 writel(0, chan_regs + HOST1X_CHANNEL_DMASTART);
446 writel(0xFFFFFFFF, chan_regs + HOST1X_CHANNEL_DMAEND);
447
448 /* set GET, by loading the value in PUT (then reset GET) */
449 writel(getptr, chan_regs + HOST1X_CHANNEL_DMAPUT);
450 writel(host1x_channel_dmactrl(true, true, true),
451 chan_regs + HOST1X_CHANNEL_DMACTRL);
452
453 dev_dbg(&dev->dev->dev,
454 "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
455 __func__,
456 readl(chan_regs + HOST1X_CHANNEL_DMAGET),
457 readl(chan_regs + HOST1X_CHANNEL_DMAPUT),
458 cdma->last_put);
459
460 /* deassert GET reset and set PUT */
461 writel(host1x_channel_dmactrl(true, false, false),
462 chan_regs + HOST1X_CHANNEL_DMACTRL);
463 writel(cdma->last_put, chan_regs + HOST1X_CHANNEL_DMAPUT);
464
465 /* start the command DMA */
466 writel(host1x_channel_dmactrl(false, false, false),
467 chan_regs + HOST1X_CHANNEL_DMACTRL);
468
469 cdma->running = true;
470}
471
472/**
473 * Kick channel DMA into action by writing its PUT offset (if it has changed)
474 */
475static void cdma_kick(struct nvhost_cdma *cdma)
476{
477 u32 put;
478 BUG_ON(!cdma_pb_op(cdma).putptr);
479
480 put = cdma_pb_op(cdma).putptr(&cdma->push_buffer);
481
482 if (put != cdma->last_put) {
483 void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
484 wmb();
485 writel(put, chan_regs + HOST1X_CHANNEL_DMAPUT);
486 cdma->last_put = put;
487 }
488}
489
490static void cdma_stop(struct nvhost_cdma *cdma)
491{
492 void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
493
494 mutex_lock(&cdma->lock);
495 if (cdma->running) {
496 nvhost_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
497 writel(host1x_channel_dmactrl(true, false, false),
498 chan_regs + HOST1X_CHANNEL_DMACTRL);
499 cdma->running = false;
500 }
501 mutex_unlock(&cdma->lock);
502}
503
504/**
505 * Retrieve the op pair at a slot offset from a DMA address
506 */
507void cdma_peek(struct nvhost_cdma *cdma,
508 u32 dmaget, int slot, u32 *out)
509{
510 u32 offset = dmaget - cdma->push_buffer.phys;
511 u32 *p = cdma->push_buffer.mapped;
512
513 offset = ((offset + slot * 8) & (PUSH_BUFFER_SIZE - 1)) >> 2;
514 out[0] = p[offset];
515 out[1] = p[offset + 1];
516}
517
518/**
519 * Stops both channel's command processor and CDMA immediately.
520 * Also, tears down the channel and resets corresponding module.
521 */
522void cdma_timeout_teardown_begin(struct nvhost_cdma *cdma)
523{
524 struct nvhost_master *dev = cdma_to_dev(cdma);
525 struct nvhost_channel *ch = cdma_to_channel(cdma);
526 u32 cmdproc_stop;
527
528 BUG_ON(cdma->torndown);
529
530 dev_dbg(&dev->dev->dev,
531 "begin channel teardown (channel id %d)\n", ch->chid);
532
533 cmdproc_stop = readl(dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
534 cmdproc_stop |= BIT(ch->chid);
535 writel(cmdproc_stop, dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
536
537 dev_dbg(&dev->dev->dev,
538 "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
539 __func__,
540 readl(ch->aperture + HOST1X_CHANNEL_DMAGET),
541 readl(ch->aperture + HOST1X_CHANNEL_DMAPUT),
542 cdma->last_put);
543
544 writel(host1x_channel_dmactrl(true, false, false),
545 ch->aperture + HOST1X_CHANNEL_DMACTRL);
546
547 writel(BIT(ch->chid), dev->sync_aperture + HOST1X_SYNC_CH_TEARDOWN);
548 nvhost_module_reset(ch->dev);
549
550 cdma->running = false;
551 cdma->torndown = true;
552}
553
554void cdma_timeout_teardown_end(struct nvhost_cdma *cdma, u32 getptr)
555{
556 struct nvhost_master *dev = cdma_to_dev(cdma);
557 struct nvhost_channel *ch = cdma_to_channel(cdma);
558 u32 cmdproc_stop;
559
560 BUG_ON(!cdma->torndown || cdma->running);
561
562 dev_dbg(&dev->dev->dev,
563 "end channel teardown (id %d, DMAGET restart = 0x%x)\n",
564 ch->chid, getptr);
565
566 cmdproc_stop = readl(dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
567 cmdproc_stop &= ~(BIT(ch->chid));
568 writel(cmdproc_stop, dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
569
570 cdma->torndown = false;
571 cdma_timeout_restart(cdma, getptr);
572}
573
574/**
575 * If this timeout fires, it indicates the current sync_queue entry has
576 * exceeded its TTL and the userctx should be timed out and remaining
577 * submits already issued cleaned up (future submits return an error).
578 */
579static void cdma_timeout_handler(struct work_struct *work)
580{
581 struct nvhost_cdma *cdma;
582 struct nvhost_master *dev;
583 struct nvhost_syncpt *sp;
584 struct nvhost_channel *ch;
585
586 u32 syncpt_val;
587
588 u32 prev_cmdproc, cmdproc_stop;
589
590 cdma = container_of(to_delayed_work(work), struct nvhost_cdma,
591 timeout.wq);
592 dev = cdma_to_dev(cdma);
593 sp = &dev->syncpt;
594 ch = cdma_to_channel(cdma);
595
596 mutex_lock(&cdma->lock);
597
598 if (!cdma->timeout.clientid) {
599 dev_dbg(&dev->dev->dev,
600 "cdma_timeout: expired, but has no clientid\n");
601 mutex_unlock(&cdma->lock);
602 return;
603 }
604
605 /* stop processing to get a clean snapshot */
606 prev_cmdproc = readl(dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
607 cmdproc_stop = prev_cmdproc | BIT(ch->chid);
608 writel(cmdproc_stop, dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
609
610 dev_dbg(&dev->dev->dev, "cdma_timeout: cmdproc was 0x%x is 0x%x\n",
611 prev_cmdproc, cmdproc_stop);
612
613 syncpt_val = nvhost_syncpt_update_min(&dev->syncpt,
614 cdma->timeout.syncpt_id);
615
616 /* has buffer actually completed? */
617 if ((s32)(syncpt_val - cdma->timeout.syncpt_val) >= 0) {
618 dev_dbg(&dev->dev->dev,
619 "cdma_timeout: expired, but buffer had completed\n");
620 /* restore */
621 cmdproc_stop = prev_cmdproc & ~(BIT(ch->chid));
622 writel(cmdproc_stop,
623 dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
624 mutex_unlock(&cdma->lock);
625 return;
626 }
627
628 dev_warn(&dev->dev->dev,
629 "%s: timeout: %d (%s) ctx 0x%p, HW thresh %d, done %d\n",
630 __func__,
631 cdma->timeout.syncpt_id,
632 syncpt_op(sp).name(sp, cdma->timeout.syncpt_id),
633 cdma->timeout.ctx,
634 syncpt_val, cdma->timeout.syncpt_val);
635
636 /* stop HW, resetting channel/module */
637 cdma_op(cdma).timeout_teardown_begin(cdma);
638
639 nvhost_cdma_update_sync_queue(cdma, sp, &dev->dev->dev);
640 mutex_unlock(&cdma->lock);
641}
642
643int host1x_init_cdma_support(struct nvhost_master *host)
644{
645 host->op.cdma.start = cdma_start;
646 host->op.cdma.stop = cdma_stop;
647 host->op.cdma.kick = cdma_kick;
648
649 host->op.cdma.timeout_init = cdma_timeout_init;
650 host->op.cdma.timeout_destroy = cdma_timeout_destroy;
651 host->op.cdma.timeout_teardown_begin = cdma_timeout_teardown_begin;
652 host->op.cdma.timeout_teardown_end = cdma_timeout_teardown_end;
653 host->op.cdma.timeout_cpu_incr = cdma_timeout_cpu_incr;
654 host->op.cdma.timeout_pb_incr = cdma_timeout_pb_incr;
655
656 host->op.push_buffer.reset = push_buffer_reset;
657 host->op.push_buffer.init = push_buffer_init;
658 host->op.push_buffer.destroy = push_buffer_destroy;
659 host->op.push_buffer.push_to = push_buffer_push_to;
660 host->op.push_buffer.pop_from = push_buffer_pop_from;
661 host->op.push_buffer.space = push_buffer_space;
662 host->op.push_buffer.putptr = push_buffer_putptr;
663
664 return 0;
665}
diff --git a/drivers/video/tegra/host/host1x/host1x_cdma.h b/drivers/video/tegra/host/host1x/host1x_cdma.h
new file mode 100644
index 00000000000..60909236a7c
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_cdma.h
@@ -0,0 +1,41 @@
1/*
2 * drivers/video/tegra/host/host1x/host1x_cdma.h
3 *
4 * Tegra Graphics Host Channel
5 *
6 * Copyright (c) 2011-2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#ifndef __NVHOST_HOST1X_HOST1X_CDMA_H
22#define __NVHOST_HOST1X_HOST1X_CDMA_H
23
24/* Size of the sync queue. If it is too small, we won't be able to queue up
25 * many command buffers. If it is too large, we waste memory. */
26#define NVHOST_SYNC_QUEUE_SIZE 512
27
28/* Number of gathers we allow to be queued up per channel. Must be a
29 * power of two. Currently sized such that pushbuffer is 4KB (512*8B). */
30#define NVHOST_GATHER_QUEUE_SIZE 512
31
32/* 8 bytes per slot. (This number does not include the final RESTART.) */
33#define PUSH_BUFFER_SIZE (NVHOST_GATHER_QUEUE_SIZE * 8)
34
35/* 4K page containing GATHERed methods to increment channel syncpts
36 * and replaces the original timed out contexts GATHER slots */
37#define SYNCPT_INCR_BUFFER_SIZE_WORDS (4096 / sizeof(u32))
38
39int host1x_init_cdma_support(struct nvhost_master *);
40
41#endif
diff --git a/drivers/video/tegra/host/host1x/host1x_channel.c b/drivers/video/tegra/host/host1x/host1x_channel.c
new file mode 100644
index 00000000000..b16a34f416a
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_channel.c
@@ -0,0 +1,627 @@
1/*
2 * drivers/video/tegra/host/host1x/channel_host1x.c
3 *
4 * Tegra Graphics Host Channel
5 *
6 * Copyright (c) 2010-2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include "nvhost_channel.h"
22#include "dev.h"
23#include "nvhost_hwctx.h"
24#include <trace/events/nvhost.h>
25#include <linux/slab.h>
26
27#include "host1x_syncpt.h"
28#include "host1x_channel.h"
29#include "host1x_hardware.h"
30#include "host1x_hwctx.h"
31#include "nvhost_intr.h"
32
33#define NV_FIFO_READ_TIMEOUT 200000
34
35static void sync_waitbases(struct nvhost_channel *ch, u32 syncpt_val)
36{
37 unsigned long waitbase;
38 unsigned long int waitbase_mask = ch->dev->waitbases;
39 if (ch->dev->waitbasesync) {
40 waitbase = find_first_bit(&waitbase_mask, BITS_PER_LONG);
41 nvhost_cdma_push(&ch->cdma,
42 nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
43 NV_CLASS_HOST_LOAD_SYNCPT_BASE,
44 1),
45 nvhost_class_host_load_syncpt_base(waitbase,
46 syncpt_val));
47 }
48}
49
50static void *pre_submit_ctxsave(struct nvhost_job *job,
51 struct nvhost_hwctx *cur_ctx)
52{
53 struct nvhost_channel *ch = job->ch;
54 void *ctxsave_waiter = NULL;
55
56 /* Is a save needed? */
57 if (!cur_ctx || ch->cur_ctx == job->hwctx)
58 return NULL;
59
60 if (cur_ctx->has_timedout) {
61 dev_dbg(&ch->dev->dev,
62 "%s: skip save of timed out context (0x%p)\n",
63 __func__, ch->cur_ctx);
64
65 return NULL;
66 }
67
68 /* Allocate save waiter if needed */
69 if (ch->ctxhandler->save_service) {
70 ctxsave_waiter = nvhost_intr_alloc_waiter();
71 if (!ctxsave_waiter)
72 return ERR_PTR(-ENOMEM);
73 }
74
75 return ctxsave_waiter;
76}
77
78static void submit_ctxsave(struct nvhost_job *job, void *ctxsave_waiter,
79 struct nvhost_hwctx *cur_ctx)
80{
81 struct nvhost_master *host = nvhost_get_host(job->ch->dev);
82 struct nvhost_channel *ch = job->ch;
83 u32 syncval;
84 int err;
85 u32 save_thresh = 0;
86
87 /* Is a save needed? */
88 if (!cur_ctx || cur_ctx == job->hwctx || cur_ctx->has_timedout)
89 return;
90
91 /* Retrieve save threshold if we have a waiter */
92 if (ctxsave_waiter)
93 save_thresh =
94 nvhost_syncpt_read_max(&host->syncpt, job->syncpt_id)
95 + to_host1x_hwctx(cur_ctx)->save_thresh;
96
97 /* Adjust the syncpoint max */
98 job->syncpt_incrs += to_host1x_hwctx(cur_ctx)->save_incrs;
99 syncval = nvhost_syncpt_incr_max(&host->syncpt,
100 job->syncpt_id,
101 to_host1x_hwctx(cur_ctx)->save_incrs);
102
103 /* Send the save to channel */
104 cur_ctx->valid = true;
105 ch->ctxhandler->save_push(cur_ctx, &ch->cdma);
106 nvhost_job_get_hwctx(job, cur_ctx);
107
108 /* Notify save service */
109 if (ctxsave_waiter) {
110 err = nvhost_intr_add_action(&host->intr,
111 job->syncpt_id,
112 save_thresh,
113 NVHOST_INTR_ACTION_CTXSAVE, cur_ctx,
114 ctxsave_waiter,
115 NULL);
116 ctxsave_waiter = NULL;
117 WARN(err, "Failed to set ctx save interrupt");
118 }
119
120 trace_nvhost_channel_context_save(ch->dev->name, cur_ctx);
121}
122
123static void submit_ctxrestore(struct nvhost_job *job)
124{
125 struct nvhost_master *host = nvhost_get_host(job->ch->dev);
126 struct nvhost_channel *ch = job->ch;
127 u32 syncval;
128 struct host1x_hwctx *ctx =
129 job->hwctx ? to_host1x_hwctx(job->hwctx) : NULL;
130
131 /* First check if we have a valid context to restore */
132 if(ch->cur_ctx == job->hwctx || !job->hwctx || !job->hwctx->valid)
133 return;
134
135 /* Increment syncpt max */
136 job->syncpt_incrs += ctx->restore_incrs;
137 syncval = nvhost_syncpt_incr_max(&host->syncpt,
138 job->syncpt_id,
139 ctx->restore_incrs);
140
141 /* Send restore buffer to channel */
142 nvhost_cdma_push_gather(&ch->cdma,
143 host->nvmap,
144 nvmap_ref_to_handle(ctx->restore),
145 nvhost_opcode_gather(ctx->restore_size),
146 ctx->restore_phys);
147
148 trace_nvhost_channel_context_restore(ch->dev->name, &ctx->hwctx);
149}
150
151void submit_nullkickoff(struct nvhost_job *job, int user_syncpt_incrs)
152{
153 struct nvhost_channel *ch = job->ch;
154 int incr;
155 u32 op_incr;
156
157 /* push increments that correspond to nulled out commands */
158 op_incr = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE,
159 job->syncpt_id);
160 for (incr = 0; incr < (user_syncpt_incrs >> 1); incr++)
161 nvhost_cdma_push(&ch->cdma, op_incr, op_incr);
162 if (user_syncpt_incrs & 1)
163 nvhost_cdma_push(&ch->cdma, op_incr, NVHOST_OPCODE_NOOP);
164
165 /* for 3d, waitbase needs to be incremented after each submit */
166 if (ch->dev->class == NV_GRAPHICS_3D_CLASS_ID) {
167 u32 waitbase = to_host1x_hwctx_handler(job->hwctx->h)->waitbase;
168 nvhost_cdma_push(&ch->cdma,
169 nvhost_opcode_setclass(
170 NV_HOST1X_CLASS_ID,
171 NV_CLASS_HOST_INCR_SYNCPT_BASE,
172 1),
173 nvhost_class_host_incr_syncpt_base(
174 waitbase,
175 user_syncpt_incrs));
176 }
177}
178
179void submit_gathers(struct nvhost_job *job)
180{
181 /* push user gathers */
182 int i = 0;
183 for ( ; i < job->num_gathers; i++) {
184 u32 op1 = nvhost_opcode_gather(job->gathers[i].words);
185 u32 op2 = job->gathers[i].mem;
186 nvhost_cdma_push_gather(&job->ch->cdma,
187 job->nvmap, job->unpins[i/2],
188 op1, op2);
189 }
190}
191
192int host1x_channel_submit(struct nvhost_job *job)
193{
194 struct nvhost_channel *ch = job->ch;
195 struct nvhost_syncpt *sp = &nvhost_get_host(job->ch->dev)->syncpt;
196 u32 user_syncpt_incrs = job->syncpt_incrs;
197 u32 prev_max = 0;
198 u32 syncval;
199 int err;
200 void *completed_waiter = NULL, *ctxsave_waiter = NULL;
201
202 /* Bail out on timed out contexts */
203 if (job->hwctx && job->hwctx->has_timedout)
204 return -ETIMEDOUT;
205
206 /* Turn on the client module and host1x */
207 nvhost_module_busy(ch->dev);
208 if (ch->dev->busy)
209 ch->dev->busy(ch->dev);
210
211 /* before error checks, return current max */
212 prev_max = job->syncpt_end =
213 nvhost_syncpt_read_max(sp, job->syncpt_id);
214
215 /* get submit lock */
216 err = mutex_lock_interruptible(&ch->submitlock);
217 if (err) {
218 nvhost_module_idle(ch->dev);
219 goto error;
220 }
221
222 /* Do the needed allocations */
223 ctxsave_waiter = pre_submit_ctxsave(job, ch->cur_ctx);
224 if (IS_ERR(ctxsave_waiter)) {
225 err = PTR_ERR(ctxsave_waiter);
226 nvhost_module_idle(ch->dev);
227 mutex_unlock(&ch->submitlock);
228 goto error;
229 }
230
231 completed_waiter = nvhost_intr_alloc_waiter();
232 if (!completed_waiter) {
233 nvhost_module_idle(ch->dev);
234 mutex_unlock(&ch->submitlock);
235 err = -ENOMEM;
236 goto error;
237 }
238
239 /* remove stale waits */
240 if (job->num_waitchk) {
241 err = nvhost_syncpt_wait_check(sp,
242 job->nvmap,
243 job->waitchk_mask,
244 job->waitchk,
245 job->num_waitchk);
246 if (err) {
247 dev_warn(&ch->dev->dev,
248 "nvhost_syncpt_wait_check failed: %d\n", err);
249 mutex_unlock(&ch->submitlock);
250 nvhost_module_idle(ch->dev);
251 goto error;
252 }
253 }
254
255 /* begin a CDMA submit */
256 err = nvhost_cdma_begin(&ch->cdma, job);
257 if (err) {
258 mutex_unlock(&ch->submitlock);
259 nvhost_module_idle(ch->dev);
260 goto error;
261 }
262
263 submit_ctxsave(job, ctxsave_waiter, ch->cur_ctx);
264 submit_ctxrestore(job);
265 ch->cur_ctx = job->hwctx;
266
267 syncval = nvhost_syncpt_incr_max(sp,
268 job->syncpt_id, user_syncpt_incrs);
269
270 job->syncpt_end = syncval;
271
272 /* add a setclass for modules that require it */
273 if (ch->dev->class)
274 nvhost_cdma_push(&ch->cdma,
275 nvhost_opcode_setclass(ch->dev->class, 0, 0),
276 NVHOST_OPCODE_NOOP);
277
278 if (job->null_kickoff)
279 submit_nullkickoff(job, user_syncpt_incrs);
280 else
281 submit_gathers(job);
282
283 sync_waitbases(ch, job->syncpt_end);
284
285 /* end CDMA submit & stash pinned hMems into sync queue */
286 nvhost_cdma_end(&ch->cdma, job);
287
288 trace_nvhost_channel_submitted(ch->dev->name,
289 prev_max, syncval);
290
291 /* schedule a submit complete interrupt */
292 err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr,
293 job->syncpt_id, syncval,
294 NVHOST_INTR_ACTION_SUBMIT_COMPLETE, ch,
295 completed_waiter,
296 NULL);
297 completed_waiter = NULL;
298 WARN(err, "Failed to set submit complete interrupt");
299
300 mutex_unlock(&ch->submitlock);
301
302 return 0;
303
304error:
305 kfree(ctxsave_waiter);
306 kfree(completed_waiter);
307 return err;
308}
309
310int host1x_channel_read_3d_reg(
311 struct nvhost_channel *channel,
312 struct nvhost_hwctx *hwctx,
313 u32 offset,
314 u32 *value)
315{
316 struct host1x_hwctx *hwctx_to_save = NULL;
317 struct nvhost_hwctx_handler *h = hwctx->h;
318 struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h);
319 bool need_restore = false;
320 u32 syncpt_incrs = 4;
321 unsigned int pending = 0;
322 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
323 void *ref;
324 void *ctx_waiter, *read_waiter, *completed_waiter;
325 struct nvhost_job *job;
326 u32 syncval;
327 int err;
328
329 if (hwctx && hwctx->has_timedout)
330 return -ETIMEDOUT;
331
332 ctx_waiter = nvhost_intr_alloc_waiter();
333 read_waiter = nvhost_intr_alloc_waiter();
334 completed_waiter = nvhost_intr_alloc_waiter();
335 if (!ctx_waiter || !read_waiter || !completed_waiter) {
336 err = -ENOMEM;
337 goto done;
338 }
339
340 job = nvhost_job_alloc(channel, hwctx,
341 NULL,
342 nvhost_get_host(channel->dev)->nvmap, 0, 0);
343 if (!job) {
344 err = -ENOMEM;
345 goto done;
346 }
347
348 /* keep module powered */
349 nvhost_module_busy(channel->dev);
350
351 /* get submit lock */
352 err = mutex_lock_interruptible(&channel->submitlock);
353 if (err) {
354 nvhost_module_idle(channel->dev);
355 return err;
356 }
357
358 /* context switch */
359 if (channel->cur_ctx != hwctx) {
360 hwctx_to_save = channel->cur_ctx ?
361 to_host1x_hwctx(channel->cur_ctx) : NULL;
362 if (hwctx_to_save) {
363 syncpt_incrs += hwctx_to_save->save_incrs;
364 hwctx_to_save->hwctx.valid = true;
365 channel->ctxhandler->get(&hwctx_to_save->hwctx);
366 }
367 channel->cur_ctx = hwctx;
368 if (channel->cur_ctx && channel->cur_ctx->valid) {
369 need_restore = true;
370 syncpt_incrs += to_host1x_hwctx(channel->cur_ctx)
371 ->restore_incrs;
372 }
373 }
374
375 syncval = nvhost_syncpt_incr_max(&nvhost_get_host(channel->dev)->syncpt,
376 p->syncpt, syncpt_incrs);
377
378 job->syncpt_id = p->syncpt;
379 job->syncpt_incrs = syncpt_incrs;
380 job->syncpt_end = syncval;
381
382 /* begin a CDMA submit */
383 nvhost_cdma_begin(&channel->cdma, job);
384
385 /* push save buffer (pre-gather setup depends on unit) */
386 if (hwctx_to_save)
387 h->save_push(&hwctx_to_save->hwctx, &channel->cdma);
388
389 /* gather restore buffer */
390 if (need_restore)
391 nvhost_cdma_push(&channel->cdma,
392 nvhost_opcode_gather(to_host1x_hwctx(channel->cur_ctx)
393 ->restore_size),
394 to_host1x_hwctx(channel->cur_ctx)->restore_phys);
395
396 /* Switch to 3D - wait for it to complete what it was doing */
397 nvhost_cdma_push(&channel->cdma,
398 nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
399 nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE,
400 p->syncpt));
401 nvhost_cdma_push(&channel->cdma,
402 nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
403 NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1),
404 nvhost_class_host_wait_syncpt_base(p->syncpt,
405 p->waitbase, 1));
406 /* Tell 3D to send register value to FIFO */
407 nvhost_cdma_push(&channel->cdma,
408 nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1),
409 nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
410 offset, false));
411 nvhost_cdma_push(&channel->cdma,
412 nvhost_opcode_imm(NV_CLASS_HOST_INDDATA, 0),
413 NVHOST_OPCODE_NOOP);
414 /* Increment syncpt to indicate that FIFO can be read */
415 nvhost_cdma_push(&channel->cdma,
416 nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_IMMEDIATE,
417 p->syncpt),
418 NVHOST_OPCODE_NOOP);
419 /* Wait for value to be read from FIFO */
420 nvhost_cdma_push(&channel->cdma,
421 nvhost_opcode_nonincr(NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1),
422 nvhost_class_host_wait_syncpt_base(p->syncpt,
423 p->waitbase, 3));
424 /* Indicate submit complete */
425 nvhost_cdma_push(&channel->cdma,
426 nvhost_opcode_nonincr(NV_CLASS_HOST_INCR_SYNCPT_BASE, 1),
427 nvhost_class_host_incr_syncpt_base(p->waitbase, 4));
428 nvhost_cdma_push(&channel->cdma,
429 NVHOST_OPCODE_NOOP,
430 nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_IMMEDIATE,
431 p->syncpt));
432
433 /* end CDMA submit */
434 nvhost_cdma_end(&channel->cdma, job);
435 nvhost_job_put(job);
436 job = NULL;
437
438 /*
439 * schedule a context save interrupt (to drain the host FIFO
440 * if necessary, and to release the restore buffer)
441 */
442 if (hwctx_to_save) {
443 err = nvhost_intr_add_action(
444 &nvhost_get_host(channel->dev)->intr,
445 p->syncpt,
446 syncval - syncpt_incrs
447 + hwctx_to_save->save_incrs
448 - 1,
449 NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save,
450 ctx_waiter,
451 NULL);
452 ctx_waiter = NULL;
453 WARN(err, "Failed to set context save interrupt");
454 }
455
456 /* Wait for FIFO to be ready */
457 err = nvhost_intr_add_action(&nvhost_get_host(channel->dev)->intr,
458 p->syncpt, syncval - 2,
459 NVHOST_INTR_ACTION_WAKEUP, &wq,
460 read_waiter,
461 &ref);
462 read_waiter = NULL;
463 WARN(err, "Failed to set wakeup interrupt");
464 wait_event(wq,
465 nvhost_syncpt_is_expired(&nvhost_get_host(channel->dev)->syncpt,
466 p->syncpt, syncval - 2));
467 nvhost_intr_put_ref(&nvhost_get_host(channel->dev)->intr, ref);
468
469 /* Read the register value from FIFO */
470 err = host1x_drain_read_fifo(channel->aperture,
471 value, 1, &pending);
472
473 /* Indicate we've read the value */
474 nvhost_syncpt_cpu_incr(&nvhost_get_host(channel->dev)->syncpt,
475 p->syncpt);
476
477 /* Schedule a submit complete interrupt */
478 err = nvhost_intr_add_action(&nvhost_get_host(channel->dev)->intr,
479 p->syncpt, syncval,
480 NVHOST_INTR_ACTION_SUBMIT_COMPLETE, channel,
481 completed_waiter, NULL);
482 completed_waiter = NULL;
483 WARN(err, "Failed to set submit complete interrupt");
484
485 mutex_unlock(&channel->submitlock);
486
487done:
488 kfree(ctx_waiter);
489 kfree(read_waiter);
490 kfree(completed_waiter);
491 return err;
492}
493
494
495int host1x_drain_read_fifo(void __iomem *chan_regs,
496 u32 *ptr, unsigned int count, unsigned int *pending)
497{
498 unsigned int entries = *pending;
499 unsigned long timeout = jiffies + NV_FIFO_READ_TIMEOUT;
500 while (count) {
501 unsigned int num;
502
503 while (!entries && time_before(jiffies, timeout)) {
504 /* query host for number of entries in fifo */
505 entries = HOST1X_VAL(CHANNEL_FIFOSTAT, OUTFENTRIES,
506 readl(chan_regs + HOST1X_CHANNEL_FIFOSTAT));
507 if (!entries)
508 cpu_relax();
509 }
510
511 /* timeout -> return error */
512 if (!entries)
513 return -EIO;
514
515 num = min(entries, count);
516 entries -= num;
517 count -= num;
518
519 while (num & ~0x3) {
520 u32 arr[4];
521 arr[0] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
522 arr[1] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
523 arr[2] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
524 arr[3] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
525 memcpy(ptr, arr, 4*sizeof(u32));
526 ptr += 4;
527 num -= 4;
528 }
529 while (num--)
530 *ptr++ = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
531 }
532 *pending = entries;
533
534 return 0;
535}
536
537int host1x_save_context(struct nvhost_device *dev, u32 syncpt_id)
538{
539 struct nvhost_channel *ch = dev->channel;
540 struct nvhost_hwctx *hwctx_to_save;
541 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
542 u32 syncpt_incrs, syncpt_val;
543 int err = 0;
544 void *ref;
545 void *ctx_waiter = NULL, *wakeup_waiter = NULL;
546 struct nvhost_job *job;
547
548 ctx_waiter = nvhost_intr_alloc_waiter();
549 wakeup_waiter = nvhost_intr_alloc_waiter();
550 if (!ctx_waiter || !wakeup_waiter) {
551 err = -ENOMEM;
552 goto done;
553 }
554
555 if (dev->busy)
556 dev->busy(dev);
557
558 mutex_lock(&ch->submitlock);
559 hwctx_to_save = ch->cur_ctx;
560 if (!hwctx_to_save) {
561 mutex_unlock(&ch->submitlock);
562 goto done;
563 }
564
565 job = nvhost_job_alloc(ch, hwctx_to_save,
566 NULL,
567 nvhost_get_host(ch->dev)->nvmap, 0, 0);
568 if (IS_ERR_OR_NULL(job)) {
569 err = PTR_ERR(job);
570 mutex_unlock(&ch->submitlock);
571 goto done;
572 }
573
574 hwctx_to_save->valid = true;
575 ch->ctxhandler->get(hwctx_to_save);
576 ch->cur_ctx = NULL;
577
578 syncpt_incrs = to_host1x_hwctx(hwctx_to_save)->save_incrs;
579 syncpt_val = nvhost_syncpt_incr_max(&nvhost_get_host(ch->dev)->syncpt,
580 syncpt_id, syncpt_incrs);
581
582 job->syncpt_id = syncpt_id;
583 job->syncpt_incrs = syncpt_incrs;
584 job->syncpt_end = syncpt_val;
585
586 err = nvhost_cdma_begin(&ch->cdma, job);
587 if (err) {
588 mutex_unlock(&ch->submitlock);
589 goto done;
590 }
591
592 ch->ctxhandler->save_push(hwctx_to_save, &ch->cdma);
593 nvhost_cdma_end(&ch->cdma, job);
594 nvhost_job_put(job);
595 job = NULL;
596
597 err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr, syncpt_id,
598 syncpt_val - syncpt_incrs +
599 to_host1x_hwctx(hwctx_to_save)->save_thresh,
600 NVHOST_INTR_ACTION_CTXSAVE, hwctx_to_save,
601 ctx_waiter,
602 NULL);
603 ctx_waiter = NULL;
604 WARN(err, "Failed to set context save interrupt");
605
606 err = nvhost_intr_add_action(&nvhost_get_host(ch->dev)->intr,
607 syncpt_id, syncpt_val,
608 NVHOST_INTR_ACTION_WAKEUP, &wq,
609 wakeup_waiter,
610 &ref);
611 wakeup_waiter = NULL;
612 WARN(err, "Failed to set wakeup interrupt");
613 wait_event(wq,
614 nvhost_syncpt_is_expired(&nvhost_get_host(ch->dev)->syncpt,
615 syncpt_id, syncpt_val));
616
617 nvhost_intr_put_ref(&nvhost_get_host(ch->dev)->intr, ref);
618
619 nvhost_cdma_update(&ch->cdma);
620
621 mutex_unlock(&ch->submitlock);
622
623done:
624 kfree(ctx_waiter);
625 kfree(wakeup_waiter);
626 return err;
627}
diff --git a/drivers/video/tegra/host/host1x/host1x_channel.h b/drivers/video/tegra/host/host1x/host1x_channel.h
new file mode 100644
index 00000000000..4113dbcada2
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_channel.h
@@ -0,0 +1,45 @@
1/*
2 * drivers/video/tegra/host/host1x/host1x_channel.h
3 *
4 * Tegra Graphics Host Channel
5 *
6 * Copyright (c) 2011-2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#ifndef __NVHOST_HOST1X_CHANNEL_H
22#define __NVHOST_HOST1X_CHANNEL_H
23
24struct nvhost_job;
25struct nvhost_channel;
26struct nvhost_hwctx;
27struct nvhost_device;
28
29/* Submit job to a host1x client */
30int host1x_channel_submit(struct nvhost_job *job);
31
32/* Read 3d register via FIFO */
33int host1x_channel_read_3d_reg(
34 struct nvhost_channel *channel,
35 struct nvhost_hwctx *hwctx,
36 u32 offset,
37 u32 *value);
38
39/* Reads words from FIFO */
40int host1x_drain_read_fifo(void __iomem *chan_regs,
41 u32 *ptr, unsigned int count, unsigned int *pending);
42
43int host1x_save_context(struct nvhost_device *dev, u32 syncpt_id);
44
45#endif
diff --git a/drivers/video/tegra/host/host1x/host1x_debug.c b/drivers/video/tegra/host/host1x/host1x_debug.c
new file mode 100644
index 00000000000..1a1d764bbd6
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_debug.c
@@ -0,0 +1,404 @@
1/*
2 * drivers/video/tegra/host/host1x/host1x_debug.c
3 *
4 * Copyright (C) 2010 Google, Inc.
5 * Author: Erik Gilling <konkers@android.com>
6 *
7 * Copyright (C) 2011 NVIDIA Corporation
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20#include <linux/debugfs.h>
21#include <linux/seq_file.h>
22#include <linux/mm.h>
23
24#include <linux/io.h>
25
26#include "dev.h"
27#include "debug.h"
28#include "nvhost_cdma.h"
29#include "../../nvmap/nvmap.h"
30
31#include "host1x_hardware.h"
32#include "host1x_cdma.h"
33
34#define NVHOST_DEBUG_MAX_PAGE_OFFSET 102400
35
36enum {
37 NVHOST_DBG_STATE_CMD = 0,
38 NVHOST_DBG_STATE_DATA = 1,
39 NVHOST_DBG_STATE_GATHER = 2
40};
41
42static int show_channel_command(struct output *o, u32 addr, u32 val, int *count)
43{
44 unsigned mask;
45 unsigned subop;
46
47 switch (val >> 28) {
48 case 0x0:
49 mask = val & 0x3f;
50 if (mask) {
51 nvhost_debug_output(o,
52 "SETCL(class=%03x, offset=%03x, mask=%02x, [",
53 val >> 6 & 0x3ff, val >> 16 & 0xfff, mask);
54 *count = hweight8(mask);
55 return NVHOST_DBG_STATE_DATA;
56 } else {
57 nvhost_debug_output(o, "SETCL(class=%03x)\n",
58 val >> 6 & 0x3ff);
59 return NVHOST_DBG_STATE_CMD;
60 }
61
62 case 0x1:
63 nvhost_debug_output(o, "INCR(offset=%03x, [",
64 val >> 16 & 0xfff);
65 *count = val & 0xffff;
66 return NVHOST_DBG_STATE_DATA;
67
68 case 0x2:
69 nvhost_debug_output(o, "NONINCR(offset=%03x, [",
70 val >> 16 & 0xfff);
71 *count = val & 0xffff;
72 return NVHOST_DBG_STATE_DATA;
73
74 case 0x3:
75 mask = val & 0xffff;
76 nvhost_debug_output(o, "MASK(offset=%03x, mask=%03x, [",
77 val >> 16 & 0xfff, mask);
78 *count = hweight16(mask);
79 return NVHOST_DBG_STATE_DATA;
80
81 case 0x4:
82 nvhost_debug_output(o, "IMM(offset=%03x, data=%03x)\n",
83 val >> 16 & 0xfff, val & 0xffff);
84 return NVHOST_DBG_STATE_CMD;
85
86 case 0x5:
87 nvhost_debug_output(o, "RESTART(offset=%08x)\n", val << 4);
88 return NVHOST_DBG_STATE_CMD;
89
90 case 0x6:
91 nvhost_debug_output(o, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
92 val >> 16 & 0xfff, val >> 15 & 0x1, val >> 14 & 0x1,
93 val & 0x3fff);
94 *count = val & 0x3fff; /* TODO: insert */
95 return NVHOST_DBG_STATE_GATHER;
96
97 case 0xe:
98 subop = val >> 24 & 0xf;
99 if (subop == 0)
100 nvhost_debug_output(o, "ACQUIRE_MLOCK(index=%d)\n",
101 val & 0xff);
102 else if (subop == 1)
103 nvhost_debug_output(o, "RELEASE_MLOCK(index=%d)\n",
104 val & 0xff);
105 else
106 nvhost_debug_output(o, "EXTEND_UNKNOWN(%08x)\n", val);
107 return NVHOST_DBG_STATE_CMD;
108
109 default:
110 return NVHOST_DBG_STATE_CMD;
111 }
112}
113
114static void show_channel_gather(struct output *o, u32 addr,
115 phys_addr_t phys_addr, u32 words, struct nvhost_cdma *cdma);
116
117static void show_channel_word(struct output *o, int *state, int *count,
118 u32 addr, u32 val, struct nvhost_cdma *cdma)
119{
120 static int start_count, dont_print;
121
122 switch (*state) {
123 case NVHOST_DBG_STATE_CMD:
124 if (addr)
125 nvhost_debug_output(o, "%08x: %08x:", addr, val);
126 else
127 nvhost_debug_output(o, "%08x:", val);
128
129 *state = show_channel_command(o, addr, val, count);
130 dont_print = 0;
131 start_count = *count;
132 if (*state == NVHOST_DBG_STATE_DATA && *count == 0) {
133 *state = NVHOST_DBG_STATE_CMD;
134 nvhost_debug_output(o, "])\n");
135 }
136 break;
137
138 case NVHOST_DBG_STATE_DATA:
139 (*count)--;
140 if (start_count - *count < 64)
141 nvhost_debug_output(o, "%08x%s",
142 val, *count > 0 ? ", " : "])\n");
143 else if (!dont_print && (*count > 0)) {
144 nvhost_debug_output(o, "[truncated; %d more words]\n",
145 *count);
146 dont_print = 1;
147 }
148 if (*count == 0)
149 *state = NVHOST_DBG_STATE_CMD;
150 break;
151
152 case NVHOST_DBG_STATE_GATHER:
153 *state = NVHOST_DBG_STATE_CMD;
154 nvhost_debug_output(o, "%08x]):\n", val);
155 if (cdma) {
156 show_channel_gather(o, addr, val,
157 *count, cdma);
158 }
159 break;
160 }
161}
162
163static void show_channel_gather(struct output *o, u32 addr,
164 phys_addr_t phys_addr,
165 u32 words, struct nvhost_cdma *cdma)
166{
167#if defined(CONFIG_TEGRA_NVMAP)
168 /* Map dmaget cursor to corresponding nvmap_handle */
169 struct push_buffer *pb = &cdma->push_buffer;
170 u32 cur = addr - pb->phys;
171 struct nvmap_client_handle *nvmap = &pb->nvmap[cur/8];
172 struct nvmap_handle_ref ref;
173 u32 *map_addr, offset;
174 phys_addr_t pin_addr;
175 int state, count, i;
176
177 if ((u32)nvmap->handle == NVHOST_CDMA_PUSH_GATHER_CTXSAVE) {
178 nvhost_debug_output(o, "[context save]\n");
179 return;
180 }
181
182 if (!nvmap->handle || !nvmap->client
183 || atomic_read(&nvmap->handle->ref) < 1) {
184 nvhost_debug_output(o, "[already deallocated]\n");
185 return;
186 }
187
188 /* Create a fake nvmap_handle_ref - nvmap requires it
189 * but accesses only the first field - nvmap_handle */
190 ref.handle = nvmap->handle;
191
192 map_addr = nvmap_mmap(&ref);
193 if (!map_addr) {
194 nvhost_debug_output(o, "[could not mmap]\n");
195 return;
196 }
197
198 /* Get base address from nvmap */
199 pin_addr = nvmap_pin(nvmap->client, &ref);
200 if (IS_ERR_VALUE(pin_addr)) {
201 nvhost_debug_output(o, "[couldn't pin]\n");
202 nvmap_munmap(&ref, map_addr);
203 return;
204 }
205
206 offset = phys_addr - pin_addr;
207 /*
208 * Sometimes we're given different hardware address to the same
209 * page - in these cases the offset will get an invalid number and
210 * we just have to bail out.
211 */
212 if (offset > NVHOST_DEBUG_MAX_PAGE_OFFSET) {
213 nvhost_debug_output(o, "[address mismatch]\n");
214 } else {
215 /* GATHER buffer starts always with commands */
216 state = NVHOST_DBG_STATE_CMD;
217 for (i = 0; i < words; i++)
218 show_channel_word(o, &state, &count,
219 phys_addr + i * 4,
220 *(map_addr + offset/4 + i),
221 cdma);
222 }
223 nvmap_unpin(nvmap->client, &ref);
224 nvmap_munmap(&ref, map_addr);
225#endif
226}
227
228static void show_channel_pair(struct output *o, u32 addr,
229 u32 w0, u32 w1, struct nvhost_cdma *cdma)
230{
231 int state = NVHOST_DBG_STATE_CMD;
232 int count;
233
234 show_channel_word(o, &state, &count, addr, w0, cdma);
235 show_channel_word(o, &state, &count, addr+4, w1, cdma);
236}
237
238/**
239 * Retrieve the op pair at a slot offset from a DMA address
240 */
241static void cdma_peek(struct nvhost_cdma *cdma,
242 u32 dmaget, int slot, u32 *out)
243{
244 u32 offset = dmaget - cdma->push_buffer.phys;
245 u32 *p = cdma->push_buffer.mapped;
246
247 offset = ((offset + slot * 8) & (PUSH_BUFFER_SIZE - 1)) >> 2;
248 out[0] = p[offset];
249 out[1] = p[offset + 1];
250}
251
252u32 previous_oppair(struct nvhost_cdma *cdma, u32 cur)
253{
254 u32 pb = cdma->push_buffer.phys;
255 u32 prev = cur-8;
256 if (prev < pb)
257 prev += PUSH_BUFFER_SIZE;
258 return prev;
259}
260
261static void t20_debug_show_channel_cdma(struct nvhost_master *m,
262 struct nvhost_channel *ch, struct output *o, int chid)
263{
264 struct nvhost_channel *channel = ch;
265 struct nvhost_cdma *cdma = &channel->cdma;
266 u32 dmaput, dmaget, dmactrl;
267 u32 cbstat, cbread;
268 u32 val, base, baseval;
269 u32 pbw[2];
270
271 dmaput = readl(channel->aperture + HOST1X_CHANNEL_DMAPUT);
272 dmaget = readl(channel->aperture + HOST1X_CHANNEL_DMAGET);
273 dmactrl = readl(channel->aperture + HOST1X_CHANNEL_DMACTRL);
274 cbread = readl(m->sync_aperture + HOST1X_SYNC_CBREAD_x(chid));
275 cbstat = readl(m->sync_aperture + HOST1X_SYNC_CBSTAT_x(chid));
276
277 nvhost_debug_output(o, "%d-%s (%d): ", chid,
278 channel->dev->name,
279 channel->dev->refcount);
280
281 if (HOST1X_VAL(CHANNEL_DMACTRL, DMASTOP, dmactrl)
282 || !channel->cdma.push_buffer.mapped) {
283 nvhost_debug_output(o, "inactive\n\n");
284 return;
285 }
286
287 switch (cbstat) {
288 case 0x00010008:
289 nvhost_debug_output(o, "waiting on syncpt %d val %d\n",
290 cbread >> 24, cbread & 0xffffff);
291 break;
292
293 case 0x00010009:
294 base = (cbread >> 16) & 0xff;
295 val = readl(m->sync_aperture +
296 HOST1X_SYNC_SYNCPT_BASE_x(base));
297 baseval = HOST1X_VAL(SYNC_SYNCPT_BASE_0, BASE, val);
298 val = cbread & 0xffff;
299 nvhost_debug_output(o, "waiting on syncpt %d val %d "
300 "(base %d = %d; offset = %d)\n",
301 cbread >> 24, baseval + val,
302 base, baseval, val);
303 break;
304
305 default:
306 nvhost_debug_output(o,
307 "active class %02x, offset %04x, val %08x\n",
308 HOST1X_VAL(SYNC_CBSTAT_0, CBCLASS0, cbstat),
309 HOST1X_VAL(SYNC_CBSTAT_0, CBOFFSET0, cbstat),
310 cbread);
311 break;
312 }
313
314 nvhost_debug_output(o, "DMAPUT %08x, DMAGET %08x, DMACTL %08x\n",
315 dmaput, dmaget, dmactrl);
316 nvhost_debug_output(o, "CBREAD %08x, CBSTAT %08x\n", cbread, cbstat);
317
318 cdma_peek(cdma, dmaget, -1, pbw);
319 show_channel_pair(o, previous_oppair(cdma, dmaget),
320 pbw[0], pbw[1], &channel->cdma);
321 nvhost_debug_output(o, "\n");
322}
323
324void t20_debug_show_channel_fifo(struct nvhost_master *m,
325 struct nvhost_channel *ch, struct output *o, int chid)
326{
327 u32 val, rd_ptr, wr_ptr, start, end;
328 struct nvhost_channel *channel = ch;
329 int state, count;
330
331 nvhost_debug_output(o, "%d: fifo:\n", chid);
332
333 val = readl(channel->aperture + HOST1X_CHANNEL_FIFOSTAT);
334 nvhost_debug_output(o, "FIFOSTAT %08x\n", val);
335 if (HOST1X_VAL(CHANNEL_FIFOSTAT, CFEMPTY, val)) {
336 nvhost_debug_output(o, "[empty]\n");
337 return;
338 }
339
340 writel(0x0, m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL);
341 writel(HOST1X_CREATE(SYNC_CFPEEK_CTRL, ENA, 1)
342 | HOST1X_CREATE(SYNC_CFPEEK_CTRL, CHANNR, chid),
343 m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL);
344
345 val = readl(m->sync_aperture + HOST1X_SYNC_CFPEEK_PTRS);
346 rd_ptr = HOST1X_VAL(SYNC_CFPEEK_PTRS, CF_RD_PTR, val);
347 wr_ptr = HOST1X_VAL(SYNC_CFPEEK_PTRS, CF_WR_PTR, val);
348
349 val = readl(m->sync_aperture + HOST1X_SYNC_CFx_SETUP(chid));
350 start = HOST1X_VAL(SYNC_CF0_SETUP, BASE, val);
351 end = HOST1X_VAL(SYNC_CF0_SETUP, LIMIT, val);
352
353 state = NVHOST_DBG_STATE_CMD;
354
355 do {
356 writel(0x0, m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL);
357 writel(HOST1X_CREATE(SYNC_CFPEEK_CTRL, ENA, 1)
358 | HOST1X_CREATE(SYNC_CFPEEK_CTRL, CHANNR, chid)
359 | HOST1X_CREATE(SYNC_CFPEEK_CTRL, ADDR, rd_ptr),
360 m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL);
361 val = readl(m->sync_aperture + HOST1X_SYNC_CFPEEK_READ);
362
363 show_channel_word(o, &state, &count, 0, val, NULL);
364
365 if (rd_ptr == end)
366 rd_ptr = start;
367 else
368 rd_ptr++;
369 } while (rd_ptr != wr_ptr);
370
371 if (state == NVHOST_DBG_STATE_DATA)
372 nvhost_debug_output(o, ", ...])\n");
373 nvhost_debug_output(o, "\n");
374
375 writel(0x0, m->sync_aperture + HOST1X_SYNC_CFPEEK_CTRL);
376}
377
378static void t20_debug_show_mlocks(struct nvhost_master *m, struct output *o)
379{
380 u32 __iomem *mlo_regs = m->sync_aperture + HOST1X_SYNC_MLOCK_OWNER_0;
381 int i;
382
383 nvhost_debug_output(o, "---- mlocks ----\n");
384 for (i = 0; i < NV_HOST1X_NB_MLOCKS; i++) {
385 u32 owner = readl(mlo_regs + i);
386 if (HOST1X_VAL(SYNC_MLOCK_OWNER_0, CH_OWNS, owner))
387 nvhost_debug_output(o, "%d: locked by channel %d\n",
388 i, HOST1X_VAL(SYNC_MLOCK_OWNER_0, CHID, owner));
389 else if (HOST1X_VAL(SYNC_MLOCK_OWNER_0, CPU_OWNS, owner))
390 nvhost_debug_output(o, "%d: locked by cpu\n", i);
391 else
392 nvhost_debug_output(o, "%d: unlocked\n", i);
393 }
394 nvhost_debug_output(o, "\n");
395}
396
397int nvhost_init_t20_debug_support(struct nvhost_master *host)
398{
399 host->op.debug.show_channel_cdma = t20_debug_show_channel_cdma;
400 host->op.debug.show_channel_fifo = t20_debug_show_channel_fifo;
401 host->op.debug.show_mlocks = t20_debug_show_mlocks;
402
403 return 0;
404}
diff --git a/drivers/video/tegra/host/host1x/host1x_hardware.h b/drivers/video/tegra/host/host1x/host1x_hardware.h
new file mode 100644
index 00000000000..d13d5752364
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_hardware.h
@@ -0,0 +1,274 @@
1/*
2 * drivers/video/tegra/host/host1x/host1x_hardware.h
3 *
4 * Tegra Graphics Host Register Offsets
5 *
6 * Copyright (c) 2010-2012 NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#ifndef __NVHOST_HOST1X_HOST1X_HARDWARE_H
22#define __NVHOST_HOST1X_HOST1X_HARDWARE_H
23
24#include <linux/types.h>
25#include <linux/bitops.h>
26
27/* class ids */
28enum {
29 NV_HOST1X_CLASS_ID = 0x1,
30 NV_VIDEO_ENCODE_MPEG_CLASS_ID = 0x20,
31 NV_GRAPHICS_3D_CLASS_ID = 0x60
32};
33
34
35/* channel registers */
36#define NV_HOST1X_CHANNELS 8
37#define NV_HOST1X_CHANNEL0_BASE 0
38#define NV_HOST1X_CHANNEL_MAP_SIZE_BYTES 16384
39#define NV_HOST1X_SYNC_MLOCK_NUM 16
40
41#define HOST1X_VAL(reg, field, regdata) \
42 ((regdata >> HOST1X_##reg##_##field##_SHIFT) \
43 & HOST1X_##reg##_##field##_MASK)
44#define HOST1X_CREATE(reg, field, data) \
45 ((data & HOST1X_##reg##_##field##_MASK) \
46 << HOST1X_##reg##_##field##_SHIFT) \
47
48#define HOST1X_CHANNEL_FIFOSTAT 0x00
49#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_SHIFT 10
50#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_MASK 0x1
51#define HOST1X_CHANNEL_FIFOSTAT_OUTFENTRIES_SHIFT 24
52#define HOST1X_CHANNEL_FIFOSTAT_OUTFENTRIES_MASK 0x1f
53#define HOST1X_CHANNEL_INDDATA 0x0c
54#define HOST1X_CHANNEL_DMASTART 0x14
55#define HOST1X_CHANNEL_DMAPUT 0x18
56#define HOST1X_CHANNEL_DMAGET 0x1c
57#define HOST1X_CHANNEL_DMAEND 0x20
58#define HOST1X_CHANNEL_DMACTRL 0x24
59#define HOST1X_CHANNEL_DMACTRL_DMASTOP_SHIFT 0
60#define HOST1X_CHANNEL_DMACTRL_DMASTOP_MASK 0x1
61#define HOST1X_CHANNEL_DMACTRL_DMAGETRST_SHIFT 1
62#define HOST1X_CHANNEL_DMACTRL_DMAGETRST_MASK 0x1
63#define HOST1X_CHANNEL_DMACTRL_DMAINITGET_SHIFT 2
64#define HOST1X_CHANNEL_DMACTRL_DMAINITGET_MASK 0x1
65
66#define HOST1X_CHANNEL_SYNC_REG_BASE 0x3000
67
68#define HOST1X_SYNC_INTMASK 0x4
69#define HOST1X_SYNC_INTC0MASK 0x8
70#define HOST1X_SYNC_HINTSTATUS 0x20
71#define HOST1X_SYNC_HINTMASK 0x24
72#define HOST1X_SYNC_HINTSTATUS_EXT 0x28
73#define HOST1X_SYNC_HINTSTATUS_EXT_IP_READ_INT_SHIFT 30
74#define HOST1X_SYNC_HINTSTATUS_EXT_IP_READ_INT_MASK 0x1
75#define HOST1X_SYNC_HINTSTATUS_EXT_IP_WRITE_INT_SHIFT 31
76#define HOST1X_SYNC_HINTSTATUS_EXT_IP_WRITE_INT_MASK 0x1
77#define HOST1X_SYNC_HINTMASK_EXT 0x2c
78#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS 0x40
79#define HOST1X_SYNC_SYNCPT_THRESH_CPU1_INT_STATUS 0x48
80#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE 0x60
81#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0 0x68
82#define HOST1X_SYNC_CF0_SETUP 0x80
83#define HOST1X_SYNC_CF0_SETUP_BASE_SHIFT 0
84#define HOST1X_SYNC_CF0_SETUP_BASE_MASK 0x1ff
85#define HOST1X_SYNC_CF0_SETUP_LIMIT_SHIFT 16
86#define HOST1X_SYNC_CF0_SETUP_LIMIT_MASK 0x1ff
87#define HOST1X_SYNC_CFx_SETUP(x) (HOST1X_SYNC_CF0_SETUP + (4 * (x)))
88
89#define HOST1X_SYNC_CMDPROC_STOP 0xac
90#define HOST1X_SYNC_CH_TEARDOWN 0xb0
91#define HOST1X_SYNC_USEC_CLK 0x1a4
92#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG 0x1a8
93#define HOST1X_SYNC_IP_BUSY_TIMEOUT 0x1bc
94#define HOST1X_SYNC_IP_READ_TIMEOUT_ADDR 0x1c0
95#define HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR 0x1c4
96#define HOST1X_SYNC_MLOCK_0 0x2c0
97#define HOST1X_SYNC_MLOCK_OWNER_0 0x340
98#define HOST1X_SYNC_MLOCK_OWNER_0_CHID_SHIFT 8
99#define HOST1X_SYNC_MLOCK_OWNER_0_CHID_MASK 0xf
100#define HOST1X_SYNC_MLOCK_OWNER_0_CPU_OWNS_SHIFT 1
101#define HOST1X_SYNC_MLOCK_OWNER_0_CPU_OWNS_MASK 0x1
102#define HOST1X_SYNC_MLOCK_OWNER_0_CH_OWNS_SHIFT 0
103#define HOST1X_SYNC_MLOCK_OWNER_0_CH_OWNS_MASK 0x1
104#define HOST1X_SYNC_SYNCPT_0 0x400
105#define HOST1X_SYNC_SYNCPT_INT_THRESH_0 0x500
106
107#define HOST1X_SYNC_SYNCPT_BASE_0 0x600
108#define HOST1X_SYNC_SYNCPT_BASE_0_BASE_SHIFT 0
109#define HOST1X_SYNC_SYNCPT_BASE_0_BASE_MASK 0xffff
110#define HOST1X_SYNC_SYNCPT_BASE_x(x) (HOST1X_SYNC_SYNCPT_BASE_0 + (4 * (x)))
111
112#define HOST1X_SYNC_SYNCPT_CPU_INCR 0x700
113
114#define HOST1X_SYNC_CBREAD_0 0x720
115#define HOST1X_SYNC_CBREAD_x(x) (HOST1X_SYNC_CBREAD_0 + (4 * (x)))
116#define HOST1X_SYNC_CFPEEK_CTRL 0x74c
117#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_SHIFT 0
118#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_MASK 0x1ff
119#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_SHIFT 16
120#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_MASK 0x7
121#define HOST1X_SYNC_CFPEEK_CTRL_ENA_SHIFT 31
122#define HOST1X_SYNC_CFPEEK_CTRL_ENA_MASK 0x1
123#define HOST1X_SYNC_CFPEEK_READ 0x750
124#define HOST1X_SYNC_CFPEEK_PTRS 0x754
125#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_SHIFT 0
126#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_MASK 0x1ff
127#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_SHIFT 16
128#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_MASK 0x1ff
129#define HOST1X_SYNC_CBSTAT_0 0x758
130#define HOST1X_SYNC_CBSTAT_0_CBOFFSET0_SHIFT 0
131#define HOST1X_SYNC_CBSTAT_0_CBOFFSET0_MASK 0xffff
132#define HOST1X_SYNC_CBSTAT_0_CBCLASS0_SHIFT 16
133#define HOST1X_SYNC_CBSTAT_0_CBCLASS0_MASK 0xffff
134#define HOST1X_SYNC_CBSTAT_x(x) (HOST1X_SYNC_CBSTAT_0 + (4 * (x)))
135
136/* sync registers */
137#define NV_HOST1X_SYNCPT_NB_PTS 32
138#define NV_HOST1X_SYNCPT_NB_BASES 8
139#define NV_HOST1X_NB_MLOCKS 16
140
141/* host class methods */
142enum {
143 NV_CLASS_HOST_INCR_SYNCPT = 0x0,
144 NV_CLASS_HOST_WAIT_SYNCPT = 0x8,
145 NV_CLASS_HOST_WAIT_SYNCPT_BASE = 0x9,
146 NV_CLASS_HOST_LOAD_SYNCPT_BASE = 0xb,
147 NV_CLASS_HOST_INCR_SYNCPT_BASE = 0xc,
148 NV_CLASS_HOST_INDOFF = 0x2d,
149 NV_CLASS_HOST_INDDATA = 0x2e
150};
151/* sync point conditionals */
152enum {
153 NV_SYNCPT_IMMEDIATE = 0x0,
154 NV_SYNCPT_OP_DONE = 0x1,
155 NV_SYNCPT_RD_DONE = 0x2,
156 NV_SYNCPT_REG_WR_SAFE = 0x3,
157};
158
159static inline u32 nvhost_class_host_wait_syncpt(
160 unsigned indx, unsigned threshold)
161{
162 return (indx << 24) | (threshold & 0xffffff);
163}
164
165static inline u32 nvhost_class_host_load_syncpt_base(
166 unsigned indx, unsigned threshold)
167{
168 return (indx << 24) | (threshold & 0xffffff);
169}
170
171static inline u32 nvhost_class_host_wait_syncpt_base(
172 unsigned indx, unsigned base_indx, unsigned offset)
173{
174 return (indx << 24) | (base_indx << 16) | offset;
175}
176
177static inline u32 nvhost_class_host_incr_syncpt_base(
178 unsigned base_indx, unsigned offset)
179{
180 return (base_indx << 24) | offset;
181}
182
183static inline u32 nvhost_class_host_incr_syncpt(
184 unsigned cond, unsigned indx)
185{
186 return (cond << 8) | indx;
187}
188
189enum {
190 NV_HOST_MODULE_HOST1X = 0,
191 NV_HOST_MODULE_MPE = 1,
192 NV_HOST_MODULE_GR3D = 6
193};
194
195static inline u32 nvhost_class_host_indoff_reg_write(
196 unsigned mod_id, unsigned offset, bool auto_inc)
197{
198 u32 v = (0xf << 28) | (mod_id << 18) | (offset << 2);
199 if (auto_inc)
200 v |= BIT(27);
201 return v;
202}
203
204static inline u32 nvhost_class_host_indoff_reg_read(
205 unsigned mod_id, unsigned offset, bool auto_inc)
206{
207 u32 v = (mod_id << 18) | (offset << 2) | 1;
208 if (auto_inc)
209 v |= BIT(27);
210 return v;
211}
212
213
214/* cdma opcodes */
215static inline u32 nvhost_opcode_setclass(
216 unsigned class_id, unsigned offset, unsigned mask)
217{
218 return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
219}
220
221static inline u32 nvhost_opcode_incr(unsigned offset, unsigned count)
222{
223 return (1 << 28) | (offset << 16) | count;
224}
225
226static inline u32 nvhost_opcode_nonincr(unsigned offset, unsigned count)
227{
228 return (2 << 28) | (offset << 16) | count;
229}
230
231static inline u32 nvhost_opcode_mask(unsigned offset, unsigned mask)
232{
233 return (3 << 28) | (offset << 16) | mask;
234}
235
236static inline u32 nvhost_opcode_imm(unsigned offset, unsigned value)
237{
238 return (4 << 28) | (offset << 16) | value;
239}
240
241static inline u32 nvhost_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
242{
243 return nvhost_opcode_imm(NV_CLASS_HOST_INCR_SYNCPT,
244 nvhost_class_host_incr_syncpt(cond, indx));
245}
246
247static inline u32 nvhost_opcode_restart(unsigned address)
248{
249 return (5 << 28) | (address >> 4);
250}
251
252static inline u32 nvhost_opcode_gather(unsigned count)
253{
254 return (6 << 28) | count;
255}
256
257static inline u32 nvhost_opcode_gather_nonincr(unsigned offset, unsigned count)
258{
259 return (6 << 28) | (offset << 16) | BIT(15) | count;
260}
261
262static inline u32 nvhost_opcode_gather_incr(unsigned offset, unsigned count)
263{
264 return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
265}
266
267#define NVHOST_OPCODE_NOOP nvhost_opcode_nonincr(0, 0)
268
269static inline u32 nvhost_mask2(unsigned x, unsigned y)
270{
271 return 1 | (1 << (y - x));
272}
273
274#endif
diff --git a/drivers/video/tegra/host/host1x/host1x_hwctx.h b/drivers/video/tegra/host/host1x/host1x_hwctx.h
new file mode 100644
index 00000000000..7587642d0e1
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_hwctx.h
@@ -0,0 +1,65 @@
1/*
2 * drivers/video/tegra/host/host1x/host1x_hwctx.h
3 *
4 * Tegra Graphics Host HOST1X Hardware Context Interface
5 *
6 * Copyright (c) 2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 */
22
23#ifndef __NVHOST_HOST1X_HWCTX_H
24#define __NVHOST_HOST1X_HWCTX_H
25
26#include <linux/kref.h>
27
28struct nvhost_hwctx_handler;
29struct nvhost_channel;
30
31#define to_host1x_hwctx_handler(handler) \
32 container_of((handler), struct host1x_hwctx_handler, h)
33#define to_host1x_hwctx(h) container_of((h), struct host1x_hwctx, hwctx)
34#define host1x_hwctx_handler(_hwctx) to_host1x_hwctx_handler((_hwctx)->hwctx.h)
35
36struct host1x_hwctx {
37 struct nvhost_hwctx hwctx;
38
39 u32 save_incrs;
40 u32 save_thresh;
41 u32 save_slots;
42
43 struct nvmap_handle_ref *restore;
44 u32 *restore_virt;
45 phys_addr_t restore_phys;
46 u32 restore_size;
47 u32 restore_incrs;
48};
49
50struct host1x_hwctx_handler {
51 struct nvhost_hwctx_handler h;
52
53 u32 syncpt;
54 u32 waitbase;
55 u32 restore_size;
56 u32 restore_incrs;
57 struct nvmap_handle_ref *save_buf;
58 u32 save_incrs;
59 u32 save_thresh;
60 u32 save_slots;
61 phys_addr_t save_phys;
62 u32 save_size;
63};
64
65#endif
diff --git a/drivers/video/tegra/host/host1x/host1x_intr.c b/drivers/video/tegra/host/host1x/host1x_intr.c
new file mode 100644
index 00000000000..47e984e2943
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_intr.c
@@ -0,0 +1,218 @@
1/*
2 * drivers/video/tegra/host/host1x/host1x_intr.c
3 *
4 * Tegra Graphics Host Interrupt Management
5 *
6 * Copyright (c) 2010-2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/interrupt.h>
22#include <linux/irq.h>
23
24#include "nvhost_intr.h"
25#include "dev.h"
26#include "host1x_hardware.h"
27
28
29/*** HW host sync management ***/
30
31static void t20_intr_init_host_sync(struct nvhost_intr *intr)
32{
33 struct nvhost_master *dev = intr_to_dev(intr);
34 void __iomem *sync_regs = dev->sync_aperture;
35 /* disable the ip_busy_timeout. this prevents write drops, etc.
36 * there's no real way to recover from a hung client anyway.
37 */
38 writel(0, sync_regs + HOST1X_SYNC_IP_BUSY_TIMEOUT);
39
40 /* increase the auto-ack timout to the maximum value. 2d will hang
41 * otherwise on ap20.
42 */
43 writel(0xff, sync_regs + HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
44}
45
46static void t20_intr_set_host_clocks_per_usec(struct nvhost_intr *intr, u32 cpm)
47{
48 struct nvhost_master *dev = intr_to_dev(intr);
49 void __iomem *sync_regs = dev->sync_aperture;
50 /* write microsecond clock register */
51 writel(cpm, sync_regs + HOST1X_SYNC_USEC_CLK);
52}
53
54static void t20_intr_set_syncpt_threshold(struct nvhost_intr *intr,
55 u32 id, u32 thresh)
56{
57 struct nvhost_master *dev = intr_to_dev(intr);
58 void __iomem *sync_regs = dev->sync_aperture;
59 thresh &= 0xffff;
60 writel(thresh, sync_regs + (HOST1X_SYNC_SYNCPT_INT_THRESH_0 + id * 4));
61}
62
63static void t20_intr_enable_syncpt_intr(struct nvhost_intr *intr, u32 id)
64{
65 struct nvhost_master *dev = intr_to_dev(intr);
66 void __iomem *sync_regs = dev->sync_aperture;
67 writel(BIT(id), sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0);
68}
69
70static void t20_intr_disable_all_syncpt_intrs(struct nvhost_intr *intr)
71{
72 struct nvhost_master *dev = intr_to_dev(intr);
73 void __iomem *sync_regs = dev->sync_aperture;
74 /* disable interrupts for both cpu's */
75 writel(0, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE);
76
77 /* clear status for both cpu's */
78 writel(0xffffffffu, sync_regs +
79 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
80 writel(0xffffffffu, sync_regs +
81 HOST1X_SYNC_SYNCPT_THRESH_CPU1_INT_STATUS);
82}
83
84/**
85 * Sync point threshold interrupt service function
86 * Handles sync point threshold triggers, in interrupt context
87 */
88irqreturn_t t20_intr_syncpt_thresh_isr(int irq, void *dev_id)
89{
90 struct nvhost_intr_syncpt *syncpt = dev_id;
91 unsigned int id = syncpt->id;
92 struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt);
93
94 void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
95
96 writel(BIT(id),
97 sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE);
98 writel(BIT(id),
99 sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
100
101 return IRQ_WAKE_THREAD;
102}
103
104/**
105 * Host general interrupt service function
106 * Handles read / write failures
107 */
108static irqreturn_t t20_intr_host1x_isr(int irq, void *dev_id)
109{
110 struct nvhost_intr *intr = dev_id;
111 void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
112 u32 stat;
113 u32 ext_stat;
114 u32 addr;
115
116 stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS);
117 ext_stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
118
119 if (HOST1X_VAL(SYNC_HINTSTATUS_EXT, IP_READ_INT, ext_stat)) {
120 addr = readl(sync_regs + HOST1X_SYNC_IP_READ_TIMEOUT_ADDR);
121 pr_err("Host read timeout at address %x\n", addr);
122 }
123
124 if (HOST1X_VAL(SYNC_HINTSTATUS_EXT, IP_WRITE_INT, ext_stat)) {
125 addr = readl(sync_regs + HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR);
126 pr_err("Host write timeout at address %x\n", addr);
127 }
128
129 writel(ext_stat, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
130 writel(stat, sync_regs + HOST1X_SYNC_HINTSTATUS);
131
132 return IRQ_HANDLED;
133}
134static int t20_intr_request_host_general_irq(struct nvhost_intr *intr)
135{
136 void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
137 int err;
138
139 if (intr->host_general_irq_requested)
140 return 0;
141
142 /* master disable for general (not syncpt) host interrupts */
143 writel(0, sync_regs + HOST1X_SYNC_INTMASK);
144
145 /* clear status & extstatus */
146 writel(0xfffffffful, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
147 writel(0xfffffffful, sync_regs + HOST1X_SYNC_HINTSTATUS);
148
149 err = request_irq(intr->host_general_irq, t20_intr_host1x_isr, 0,
150 "host_status", intr);
151 if (err)
152 return err;
153
154 /* enable extra interrupt sources IP_READ_INT and IP_WRITE_INT */
155 writel(BIT(30) | BIT(31), sync_regs + HOST1X_SYNC_HINTMASK_EXT);
156
157 /* enable extra interrupt sources */
158 writel(BIT(31), sync_regs + HOST1X_SYNC_HINTMASK);
159
160 /* enable host module interrupt to CPU0 */
161 writel(BIT(0), sync_regs + HOST1X_SYNC_INTC0MASK);
162
163 /* master enable for general (not syncpt) host interrupts */
164 writel(BIT(0), sync_regs + HOST1X_SYNC_INTMASK);
165
166 intr->host_general_irq_requested = true;
167
168 return err;
169}
170
171static void t20_intr_free_host_general_irq(struct nvhost_intr *intr)
172{
173 if (intr->host_general_irq_requested) {
174 void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
175
176 /* master disable for general (not syncpt) host interrupts */
177 writel(0, sync_regs + HOST1X_SYNC_INTMASK);
178
179 free_irq(intr->host_general_irq, intr);
180 intr->host_general_irq_requested = false;
181 }
182}
183
184static int t20_request_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
185{
186 int err;
187 if (syncpt->irq_requested)
188 return 0;
189
190 err = request_threaded_irq(syncpt->irq,
191 t20_intr_syncpt_thresh_isr,
192 nvhost_syncpt_thresh_fn,
193 0, syncpt->thresh_irq_name, syncpt);
194 if (err)
195 return err;
196
197 syncpt->irq_requested = 1;
198 return 0;
199}
200
201int nvhost_init_t20_intr_support(struct nvhost_master *host)
202{
203 host->op.intr.init_host_sync = t20_intr_init_host_sync;
204 host->op.intr.set_host_clocks_per_usec =
205 t20_intr_set_host_clocks_per_usec;
206 host->op.intr.set_syncpt_threshold = t20_intr_set_syncpt_threshold;
207 host->op.intr.enable_syncpt_intr = t20_intr_enable_syncpt_intr;
208 host->op.intr.disable_all_syncpt_intrs =
209 t20_intr_disable_all_syncpt_intrs;
210 host->op.intr.request_host_general_irq =
211 t20_intr_request_host_general_irq;
212 host->op.intr.free_host_general_irq =
213 t20_intr_free_host_general_irq;
214 host->op.intr.request_syncpt_irq =
215 t20_request_syncpt_irq;
216
217 return 0;
218}
diff --git a/drivers/video/tegra/host/host1x/host1x_syncpt.c b/drivers/video/tegra/host/host1x/host1x_syncpt.c
new file mode 100644
index 00000000000..b0fd9970aaa
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_syncpt.c
@@ -0,0 +1,248 @@
1/*
2 * drivers/video/tegra/host/host1x/host1x_syncpt.c
3 *
4 * Tegra Graphics Host Syncpoints for HOST1X
5 *
6 * Copyright (c) 2010-2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/nvhost_ioctl.h>
22#include "nvhost_syncpt.h"
23#include "dev.h"
24#include "host1x_syncpt.h"
25#include "host1x_hardware.h"
26
27/**
28 * Write the current syncpoint value back to hw.
29 */
30static void t20_syncpt_reset(struct nvhost_syncpt *sp, u32 id)
31{
32 struct nvhost_master *dev = syncpt_to_dev(sp);
33 int min = nvhost_syncpt_read_min(sp, id);
34 writel(min, dev->sync_aperture + (HOST1X_SYNC_SYNCPT_0 + id * 4));
35}
36
37/**
38 * Write the current waitbase value back to hw.
39 */
40static void t20_syncpt_reset_wait_base(struct nvhost_syncpt *sp, u32 id)
41{
42 struct nvhost_master *dev = syncpt_to_dev(sp);
43 writel(sp->base_val[id],
44 dev->sync_aperture + (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4));
45}
46
47/**
48 * Read waitbase value from hw.
49 */
50static void t20_syncpt_read_wait_base(struct nvhost_syncpt *sp, u32 id)
51{
52 struct nvhost_master *dev = syncpt_to_dev(sp);
53 sp->base_val[id] = readl(dev->sync_aperture +
54 (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4));
55}
56
57/**
58 * Updates the last value read from hardware.
59 * (was nvhost_syncpt_update_min)
60 */
61static u32 t20_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
62{
63 struct nvhost_master *dev = syncpt_to_dev(sp);
64 void __iomem *sync_regs = dev->sync_aperture;
65 u32 old, live;
66
67 do {
68 old = nvhost_syncpt_read_min(sp, id);
69 live = readl(sync_regs + (HOST1X_SYNC_SYNCPT_0 + id * 4));
70 } while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old);
71
72 if (!nvhost_syncpt_check_max(sp, id, live))
73 dev_err(&syncpt_to_dev(sp)->dev->dev,
74 "%s failed: id=%u\n",
75 __func__,
76 id);
77
78 return live;
79}
80
81/**
82 * Write a cpu syncpoint increment to the hardware, without touching
83 * the cache. Caller is responsible for host being powered.
84 */
85static void t20_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
86{
87 struct nvhost_master *dev = syncpt_to_dev(sp);
88 BUG_ON(!nvhost_module_powered(dev->dev));
89 if (!client_managed(id) && nvhost_syncpt_min_eq_max(sp, id)) {
90 dev_err(&syncpt_to_dev(sp)->dev->dev,
91 "Trying to increment syncpoint id %d beyond max\n",
92 id);
93 nvhost_debug_dump(syncpt_to_dev(sp));
94 return;
95 }
96 writel(BIT(id), dev->sync_aperture + HOST1X_SYNC_SYNCPT_CPU_INCR);
97 wmb();
98}
99
100/* check for old WAITs to be removed (avoiding a wrap) */
101static int t20_syncpt_wait_check(struct nvhost_syncpt *sp,
102 struct nvmap_client *nvmap,
103 u32 waitchk_mask,
104 struct nvhost_waitchk *wait,
105 int num_waitchk)
106{
107 u32 idx;
108 int err = 0;
109
110 /* get current syncpt values */
111 for (idx = 0; idx < NV_HOST1X_SYNCPT_NB_PTS; idx++) {
112 if (BIT(idx) & waitchk_mask)
113 nvhost_syncpt_update_min(sp, idx);
114 }
115
116 BUG_ON(!wait && !num_waitchk);
117
118 /* compare syncpt vs wait threshold */
119 while (num_waitchk) {
120 u32 override;
121
122 BUG_ON(wait->syncpt_id >= NV_HOST1X_SYNCPT_NB_PTS);
123 if (nvhost_syncpt_is_expired(sp,
124 wait->syncpt_id, wait->thresh)) {
125 /*
126 * NULL an already satisfied WAIT_SYNCPT host method,
127 * by patching its args in the command stream. The
128 * method data is changed to reference a reserved
129 * (never given out or incr) NVSYNCPT_GRAPHICS_HOST
130 * syncpt with a matching threshold value of 0, so
131 * is guaranteed to be popped by the host HW.
132 */
133 dev_dbg(&syncpt_to_dev(sp)->dev->dev,
134 "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n",
135 wait->syncpt_id,
136 syncpt_op(sp).name(sp, wait->syncpt_id),
137 wait->thresh,
138 nvhost_syncpt_read_min(sp, wait->syncpt_id));
139
140 /* patch the wait */
141 override = nvhost_class_host_wait_syncpt(
142 NVSYNCPT_GRAPHICS_HOST, 0);
143 err = nvmap_patch_word(nvmap,
144 (struct nvmap_handle *)wait->mem,
145 wait->offset, override);
146 if (err)
147 break;
148 }
149
150 wait++;
151 num_waitchk--;
152 }
153 return err;
154}
155
156
157static const char *s_syncpt_names[32] = {
158 "gfx_host",
159 "", "", "", "", "", "", "",
160 "disp0_a", "disp1_a", "avp_0",
161 "csi_vi_0", "csi_vi_1",
162 "vi_isp_0", "vi_isp_1", "vi_isp_2", "vi_isp_3", "vi_isp_4",
163 "2d_0", "2d_1",
164 "disp0_b", "disp1_b",
165 "3d",
166 "mpe",
167 "disp0_c", "disp1_c",
168 "vblank0", "vblank1",
169 "mpe_ebm_eof", "mpe_wr_safe",
170 "2d_tinyblt",
171 "dsi"
172};
173
174static const char *t20_syncpt_name(struct nvhost_syncpt *s, u32 id)
175{
176 BUG_ON(id >= ARRAY_SIZE(s_syncpt_names));
177 return s_syncpt_names[id];
178}
179
180static void t20_syncpt_debug(struct nvhost_syncpt *sp)
181{
182 u32 i;
183 for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
184 u32 max = nvhost_syncpt_read_max(sp, i);
185 u32 min = nvhost_syncpt_update_min(sp, i);
186 if (!max && !min)
187 continue;
188 dev_info(&syncpt_to_dev(sp)->dev->dev,
189 "id %d (%s) min %d max %d\n",
190 i, syncpt_op(sp).name(sp, i),
191 min, max);
192
193 }
194
195 for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++) {
196 u32 base_val;
197 t20_syncpt_read_wait_base(sp, i);
198 base_val = sp->base_val[i];
199 if (base_val)
200 dev_info(&syncpt_to_dev(sp)->dev->dev,
201 "waitbase id %d val %d\n",
202 i, base_val);
203
204 }
205}
206
207static int syncpt_mutex_try_lock(struct nvhost_syncpt *sp,
208 unsigned int idx)
209{
210 void __iomem *sync_regs = syncpt_to_dev(sp)->sync_aperture;
211 /* mlock registers returns 0 when the lock is aquired.
212 * writing 0 clears the lock. */
213 return !!readl(sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
214}
215
216static void syncpt_mutex_unlock(struct nvhost_syncpt *sp,
217 unsigned int idx)
218{
219 void __iomem *sync_regs = syncpt_to_dev(sp)->sync_aperture;
220
221 writel(0, sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
222}
223
224int host1x_init_syncpt_support(struct nvhost_master *host)
225{
226
227 host->sync_aperture = host->aperture +
228 (NV_HOST1X_CHANNEL0_BASE +
229 HOST1X_CHANNEL_SYNC_REG_BASE);
230
231 host->op.syncpt.reset = t20_syncpt_reset;
232 host->op.syncpt.reset_wait_base = t20_syncpt_reset_wait_base;
233 host->op.syncpt.read_wait_base = t20_syncpt_read_wait_base;
234 host->op.syncpt.update_min = t20_syncpt_update_min;
235 host->op.syncpt.cpu_incr = t20_syncpt_cpu_incr;
236 host->op.syncpt.wait_check = t20_syncpt_wait_check;
237 host->op.syncpt.debug = t20_syncpt_debug;
238 host->op.syncpt.name = t20_syncpt_name;
239 host->op.syncpt.mutex_try_lock = syncpt_mutex_try_lock;
240 host->op.syncpt.mutex_unlock = syncpt_mutex_unlock;
241
242 host->syncpt.nb_pts = NV_HOST1X_SYNCPT_NB_PTS;
243 host->syncpt.nb_bases = NV_HOST1X_SYNCPT_NB_BASES;
244 host->syncpt.client_managed = NVSYNCPTS_CLIENT_MANAGED;
245 host->syncpt.nb_mlocks = NV_HOST1X_SYNC_MLOCK_NUM;
246
247 return 0;
248}
diff --git a/drivers/video/tegra/host/host1x/host1x_syncpt.h b/drivers/video/tegra/host/host1x/host1x_syncpt.h
new file mode 100644
index 00000000000..0d263dc92ed
--- /dev/null
+++ b/drivers/video/tegra/host/host1x/host1x_syncpt.h
@@ -0,0 +1,77 @@
1/*
2 * drivers/video/tegra/host/host1x/host1x_syncpt.h
3 *
4 * Tegra Graphics Host Syncpoints for HOST1X
5 *
6 * Copyright (c) 2010-2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#ifndef __NVHOST_HOST1X_HOST1X_SYNCPT_H
22#define __NVHOST_HOST1X_HOST1X_SYNCPT_H
23
24#define NVSYNCPT_DISP0_A (8)
25#define NVSYNCPT_DISP1_A (9)
26#define NVSYNCPT_AVP_0 (10)
27#define NVSYNCPT_CSI_VI_0 (11)
28#define NVSYNCPT_CSI_VI_1 (12)
29#define NVSYNCPT_VI_ISP_0 (13)
30#define NVSYNCPT_VI_ISP_1 (14)
31#define NVSYNCPT_VI_ISP_2 (15)
32#define NVSYNCPT_VI_ISP_3 (16)
33#define NVSYNCPT_VI_ISP_4 (17)
34#define NVSYNCPT_2D_0 (18)
35#define NVSYNCPT_2D_1 (19)
36#define NVSYNCPT_DISP0_B (20)
37#define NVSYNCPT_DISP1_B (21)
38#define NVSYNCPT_3D (22)
39#define NVSYNCPT_MPE (23)
40#define NVSYNCPT_DISP0_C (24)
41#define NVSYNCPT_DISP1_C (25)
42#define NVSYNCPT_VBLANK0 (26)
43#define NVSYNCPT_VBLANK1 (27)
44#define NVSYNCPT_MPE_EBM_EOF (28)
45#define NVSYNCPT_MPE_WR_SAFE (29)
46#define NVSYNCPT_DSI (31)
47
48
49/*#define NVSYNCPT_2D_CHANNEL2_0 (20) */
50/*#define NVSYNCPT_2D_CHANNEL2_1 (21) */
51/*#define NVSYNCPT_2D_TINYBLT_WAR (30)*/
52/*#define NVSYNCPT_2D_TINYBLT_RESTORE_CLASS_ID (30)*/
53
54/* sync points that are wholly managed by the client */
55#define NVSYNCPTS_CLIENT_MANAGED ( \
56 BIT(NVSYNCPT_DISP0_A) | BIT(NVSYNCPT_DISP1_A) | \
57 BIT(NVSYNCPT_DISP0_B) | BIT(NVSYNCPT_DISP1_B) | \
58 BIT(NVSYNCPT_DISP0_C) | BIT(NVSYNCPT_DISP1_C) | \
59 BIT(NVSYNCPT_DSI) | \
60 BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1) | \
61 BIT(NVSYNCPT_CSI_VI_0) | BIT(NVSYNCPT_CSI_VI_1) | \
62 BIT(NVSYNCPT_VI_ISP_1) | BIT(NVSYNCPT_VI_ISP_2) | \
63 BIT(NVSYNCPT_VI_ISP_3) | BIT(NVSYNCPT_VI_ISP_4) | \
64 BIT(NVSYNCPT_MPE_EBM_EOF) | BIT(NVSYNCPT_MPE_WR_SAFE) | \
65 BIT(NVSYNCPT_2D_1) | BIT(NVSYNCPT_AVP_0))
66
67
68#define NVWAITBASE_2D_0 (1)
69#define NVWAITBASE_2D_1 (2)
70#define NVWAITBASE_3D (3)
71#define NVWAITBASE_MPE (4)
72
73struct nvhost_master;
74int host1x_init_syncpt(struct nvhost_master *host);
75int host1x_init_syncpt_support(struct nvhost_master *host);
76
77#endif