aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/video/tegra/host/gr3d/gr3d_t20.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/video/tegra/host/gr3d/gr3d_t20.c')
-rw-r--r--drivers/video/tegra/host/gr3d/gr3d_t20.c395
1 files changed, 395 insertions, 0 deletions
diff --git a/drivers/video/tegra/host/gr3d/gr3d_t20.c b/drivers/video/tegra/host/gr3d/gr3d_t20.c
new file mode 100644
index 00000000000..3604142aaf2
--- /dev/null
+++ b/drivers/video/tegra/host/gr3d/gr3d_t20.c
@@ -0,0 +1,395 @@
1/*
2 * drivers/video/tegra/host/gr3d/gr3d_t20.c
3 *
4 * Tegra Graphics Host 3D for Tegra2
5 *
6 * Copyright (c) 2010-2012, NVIDIA Corporation.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include "nvhost_hwctx.h"
22#include "dev.h"
23#include "host1x/host1x_channel.h"
24#include "host1x/host1x_hardware.h"
25#include "host1x/host1x_syncpt.h"
26#include "gr3d.h"
27
28#include <linux/slab.h>
29
30static const struct hwctx_reginfo ctxsave_regs_3d_global[] = {
31 HWCTX_REGINFO(0xe00, 4, DIRECT),
32 HWCTX_REGINFO(0xe05, 30, DIRECT),
33 HWCTX_REGINFO(0xe25, 2, DIRECT),
34 HWCTX_REGINFO(0xe28, 2, DIRECT),
35 HWCTX_REGINFO(0x001, 2, DIRECT),
36 HWCTX_REGINFO(0x00c, 10, DIRECT),
37 HWCTX_REGINFO(0x100, 34, DIRECT),
38 HWCTX_REGINFO(0x124, 2, DIRECT),
39 HWCTX_REGINFO(0x200, 5, DIRECT),
40 HWCTX_REGINFO(0x205, 1024, INDIRECT),
41 HWCTX_REGINFO(0x207, 1024, INDIRECT),
42 HWCTX_REGINFO(0x209, 1, DIRECT),
43 HWCTX_REGINFO(0x300, 64, DIRECT),
44 HWCTX_REGINFO(0x343, 25, DIRECT),
45 HWCTX_REGINFO(0x363, 2, DIRECT),
46 HWCTX_REGINFO(0x400, 16, DIRECT),
47 HWCTX_REGINFO(0x411, 1, DIRECT),
48 HWCTX_REGINFO(0x500, 4, DIRECT),
49 HWCTX_REGINFO(0x520, 32, DIRECT),
50 HWCTX_REGINFO(0x540, 64, INDIRECT),
51 HWCTX_REGINFO(0x600, 16, INDIRECT_4X),
52 HWCTX_REGINFO(0x603, 128, INDIRECT),
53 HWCTX_REGINFO(0x608, 4, DIRECT),
54 HWCTX_REGINFO(0x60e, 1, DIRECT),
55 HWCTX_REGINFO(0x700, 64, INDIRECT),
56 HWCTX_REGINFO(0x710, 50, DIRECT),
57 HWCTX_REGINFO(0x800, 16, INDIRECT_4X),
58 HWCTX_REGINFO(0x803, 512, INDIRECT),
59 HWCTX_REGINFO(0x805, 64, INDIRECT),
60 HWCTX_REGINFO(0x820, 32, DIRECT),
61 HWCTX_REGINFO(0x900, 64, INDIRECT),
62 HWCTX_REGINFO(0x902, 2, DIRECT),
63 HWCTX_REGINFO(0xa02, 10, DIRECT),
64 HWCTX_REGINFO(0xe04, 1, DIRECT),
65 HWCTX_REGINFO(0xe2a, 1, DIRECT),
66};
67
68/* the same context save command sequence is used for all contexts. */
69#define SAVE_BEGIN_V0_SIZE 5
70#define SAVE_DIRECT_V0_SIZE 3
71#define SAVE_INDIRECT_V0_SIZE 5
72#define SAVE_END_V0_SIZE 5
73#define SAVE_INCRS 3
74#define SAVE_THRESH_OFFSET 1
75#define RESTORE_BEGIN_SIZE 4
76#define RESTORE_DIRECT_SIZE 1
77#define RESTORE_INDIRECT_SIZE 2
78#define RESTORE_END_SIZE 1
79
80struct save_info {
81 u32 *ptr;
82 unsigned int save_count;
83 unsigned int restore_count;
84 unsigned int save_incrs;
85 unsigned int restore_incrs;
86};
87
88static u32 *setup_restore_regs_v0(u32 *ptr,
89 const struct hwctx_reginfo *regs,
90 unsigned int nr_regs)
91{
92 const struct hwctx_reginfo *rend = regs + nr_regs;
93
94 for ( ; regs != rend; ++regs) {
95 u32 offset = regs->offset;
96 u32 count = regs->count;
97 u32 indoff = offset + 1;
98 switch (regs->type) {
99 case HWCTX_REGINFO_DIRECT:
100 nvhost_3dctx_restore_direct(ptr, offset, count);
101 ptr += RESTORE_DIRECT_SIZE;
102 break;
103 case HWCTX_REGINFO_INDIRECT_4X:
104 ++indoff;
105 /* fall through */
106 case HWCTX_REGINFO_INDIRECT:
107 nvhost_3dctx_restore_indirect(ptr,
108 offset, 0, indoff, count);
109 ptr += RESTORE_INDIRECT_SIZE;
110 break;
111 }
112 ptr += count;
113 }
114 return ptr;
115}
116
117static void setup_restore_v0(struct host1x_hwctx_handler *h, u32 *ptr)
118{
119 nvhost_3dctx_restore_begin(h, ptr);
120 ptr += RESTORE_BEGIN_SIZE;
121
122 ptr = setup_restore_regs_v0(ptr,
123 ctxsave_regs_3d_global,
124 ARRAY_SIZE(ctxsave_regs_3d_global));
125
126 nvhost_3dctx_restore_end(h, ptr);
127
128 wmb();
129}
130
131/*** v0 saver ***/
132
133static void save_push_v0(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma)
134{
135 struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
136 struct host1x_hwctx_handler *p = host1x_hwctx_handler(ctx);
137
138 nvhost_cdma_push_gather(cdma,
139 (void *)NVHOST_CDMA_PUSH_GATHER_CTXSAVE,
140 (void *)NVHOST_CDMA_PUSH_GATHER_CTXSAVE,
141 nvhost_opcode_gather(p->save_size),
142 p->save_phys);
143}
144
145static void __init save_begin_v0(struct host1x_hwctx_handler *h, u32 *ptr)
146{
147 /* 3d: when done, increment syncpt to base+1 */
148 ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
149 ptr[1] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_OP_DONE,
150 h->syncpt); /* incr 1 */
151 /* host: wait for syncpt base+1 */
152 ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
153 NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1);
154 ptr[3] = nvhost_class_host_wait_syncpt_base(h->syncpt,
155 h->waitbase, 1);
156 /* host: signal context read thread to start reading */
157 ptr[4] = nvhost_opcode_imm_incr_syncpt(NV_SYNCPT_IMMEDIATE,
158 h->syncpt); /* incr 2 */
159}
160
161static void __init save_direct_v0(u32 *ptr, u32 start_reg, u32 count)
162{
163 ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1);
164 ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
165 start_reg, true);
166 ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count);
167}
168
169static void __init save_indirect_v0(u32 *ptr, u32 offset_reg, u32 offset,
170 u32 data_reg, u32 count)
171{
172 ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID,
173 offset_reg, 1);
174 ptr[1] = offset;
175 ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
176 NV_CLASS_HOST_INDOFF, 1);
177 ptr[3] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
178 data_reg, false);
179 ptr[4] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count);
180}
181
182static void __init save_end_v0(struct host1x_hwctx_handler *h, u32 *ptr)
183{
184 /* Wait for context read service to finish (cpu incr 3) */
185 ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1);
186 ptr[1] = nvhost_class_host_wait_syncpt_base(h->syncpt,
187 h->waitbase, h->save_incrs);
188 /* Advance syncpoint base */
189 ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INCR_SYNCPT_BASE, 1);
190 ptr[3] = nvhost_class_host_incr_syncpt_base(NVWAITBASE_3D,
191 h->save_incrs);
192 /* set class back to the unit */
193 ptr[4] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
194}
195
196static u32 *save_regs_v0(u32 *ptr, unsigned int *pending,
197 void __iomem *chan_regs,
198 const struct hwctx_reginfo *regs,
199 unsigned int nr_regs)
200{
201 const struct hwctx_reginfo *rend = regs + nr_regs;
202 int drain_result = 0;
203
204 for ( ; regs != rend; ++regs) {
205 u32 count = regs->count;
206 switch (regs->type) {
207 case HWCTX_REGINFO_DIRECT:
208 ptr += RESTORE_DIRECT_SIZE;
209 break;
210 case HWCTX_REGINFO_INDIRECT:
211 case HWCTX_REGINFO_INDIRECT_4X:
212 ptr += RESTORE_INDIRECT_SIZE;
213 break;
214 }
215 drain_result = host1x_drain_read_fifo(chan_regs,
216 ptr, count, pending);
217 BUG_ON(drain_result < 0);
218 ptr += count;
219 }
220 return ptr;
221}
222
223/*** save ***/
224
225static void __init setup_save_regs(struct save_info *info,
226 const struct hwctx_reginfo *regs,
227 unsigned int nr_regs)
228{
229 const struct hwctx_reginfo *rend = regs + nr_regs;
230 u32 *ptr = info->ptr;
231 unsigned int save_count = info->save_count;
232 unsigned int restore_count = info->restore_count;
233
234 for ( ; regs != rend; ++regs) {
235 u32 offset = regs->offset;
236 u32 count = regs->count;
237 u32 indoff = offset + 1;
238 switch (regs->type) {
239 case HWCTX_REGINFO_DIRECT:
240 if (ptr) {
241 save_direct_v0(ptr, offset, count);
242 ptr += SAVE_DIRECT_V0_SIZE;
243 }
244 save_count += SAVE_DIRECT_V0_SIZE;
245 restore_count += RESTORE_DIRECT_SIZE;
246 break;
247 case HWCTX_REGINFO_INDIRECT_4X:
248 ++indoff;
249 /* fall through */
250 case HWCTX_REGINFO_INDIRECT:
251 if (ptr) {
252 save_indirect_v0(ptr, offset, 0,
253 indoff, count);
254 ptr += SAVE_INDIRECT_V0_SIZE;
255 }
256 save_count += SAVE_INDIRECT_V0_SIZE;
257 restore_count += RESTORE_INDIRECT_SIZE;
258 break;
259 }
260 if (ptr) {
261 /* SAVE cases only: reserve room for incoming data */
262 u32 k = 0;
263 /*
264 * Create a signature pattern for indirect data (which
265 * will be overwritten by true incoming data) for
266 * better deducing where we are in a long command
267 * sequence, when given only a FIFO snapshot for debug
268 * purposes.
269 */
270 for (k = 0; k < count; k++)
271 *(ptr + k) = 0xd000d000 | (offset << 16) | k;
272 ptr += count;
273 }
274 save_count += count;
275 restore_count += count;
276 }
277
278 info->ptr = ptr;
279 info->save_count = save_count;
280 info->restore_count = restore_count;
281}
282
283static void __init setup_save(struct host1x_hwctx_handler *h, u32 *ptr)
284{
285 struct save_info info = {
286 ptr,
287 SAVE_BEGIN_V0_SIZE,
288 RESTORE_BEGIN_SIZE,
289 SAVE_INCRS,
290 1
291 };
292
293 if (info.ptr) {
294 save_begin_v0(h, info.ptr);
295 info.ptr += SAVE_BEGIN_V0_SIZE;
296 }
297
298 /* save regs */
299 setup_save_regs(&info,
300 ctxsave_regs_3d_global,
301 ARRAY_SIZE(ctxsave_regs_3d_global));
302
303 if (info.ptr) {
304 save_end_v0(h, info.ptr);
305 info.ptr += SAVE_END_V0_SIZE;
306 }
307
308 wmb();
309
310 h->save_size = info.save_count + SAVE_END_V0_SIZE;
311 h->restore_size = info.restore_count + RESTORE_END_SIZE;
312 h->save_incrs = info.save_incrs;
313 h->save_thresh = h->save_incrs - SAVE_THRESH_OFFSET;
314 h->restore_incrs = info.restore_incrs;
315}
316
317
318
319/*** ctx3d ***/
320
321static struct nvhost_hwctx *ctx3d_alloc_v0(struct nvhost_hwctx_handler *h,
322 struct nvhost_channel *ch)
323{
324 struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h);
325 struct host1x_hwctx *ctx =
326 nvhost_3dctx_alloc_common(p, ch, true);
327 if (ctx) {
328 setup_restore_v0(p, ctx->restore_virt);
329 return &ctx->hwctx;
330 } else
331 return NULL;
332}
333
334static void ctx3d_save_service(struct nvhost_hwctx *nctx)
335{
336 struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
337
338 u32 *ptr = (u32 *)ctx->restore_virt + RESTORE_BEGIN_SIZE;
339 unsigned int pending = 0;
340
341 ptr = save_regs_v0(ptr, &pending, nctx->channel->aperture,
342 ctxsave_regs_3d_global,
343 ARRAY_SIZE(ctxsave_regs_3d_global));
344
345 wmb();
346 nvhost_syncpt_cpu_incr(&nvhost_get_host(nctx->channel->dev)->syncpt,
347 host1x_hwctx_handler(ctx)->syncpt);
348}
349
350struct nvhost_hwctx_handler * __init nvhost_gr3d_t20_ctxhandler_init(
351 u32 syncpt, u32 waitbase,
352 struct nvhost_channel *ch)
353{
354 struct nvmap_client *nvmap;
355 u32 *save_ptr;
356 struct host1x_hwctx_handler *p;
357
358 p = kmalloc(sizeof(*p), GFP_KERNEL);
359 if (!p)
360 return NULL;
361 nvmap = nvhost_get_host(ch->dev)->nvmap;
362
363 p->syncpt = syncpt;
364 p->waitbase = waitbase;
365
366 setup_save(p, NULL);
367
368 p->save_buf = nvmap_alloc(nvmap, p->save_size * sizeof(u32), 32,
369 NVMAP_HANDLE_WRITE_COMBINE, 0);
370 if (IS_ERR(p->save_buf)) {
371 p->save_buf = NULL;
372 return NULL;
373 }
374
375 p->save_slots = 1;
376
377 save_ptr = nvmap_mmap(p->save_buf);
378 if (!save_ptr) {
379 nvmap_free(nvmap, p->save_buf);
380 p->save_buf = NULL;
381 return NULL;
382 }
383
384 p->save_phys = nvmap_pin(nvmap, p->save_buf);
385
386 setup_save(p, save_ptr);
387
388 p->h.alloc = ctx3d_alloc_v0;
389 p->h.save_push = save_push_v0;
390 p->h.save_service = ctx3d_save_service;
391 p->h.get = nvhost_3dctx_get;
392 p->h.put = nvhost_3dctx_put;
393
394 return &p->h;
395}