diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2011-03-31 21:33:21 -0400 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2011-05-15 20:48:14 -0400 |
commit | 39c8d368273bca9b5f309f9feadfc8575c9fd993 (patch) | |
tree | 104e1d8be0189d3c3f21206d7c97ab3d9ca93f54 /drivers/gpu/drm | |
parent | 7a45cd19c95a383d81a7b2f5297958c0c16b5a08 (diff) |
drm/nv40/gr: move to exec engine interfaces
Like nv50, this needs a good cleanup.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drv.h | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_state.c | 25 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nv40_graph.c | 295 |
3 files changed, 158 insertions, 171 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 139864f6aabb..a20e49d36209 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -1177,14 +1177,7 @@ extern int nv30_graph_init(struct drm_device *); | |||
1177 | extern void nv20_graph_set_tile_region(struct drm_device *dev, int i); | 1177 | extern void nv20_graph_set_tile_region(struct drm_device *dev, int i); |
1178 | 1178 | ||
1179 | /* nv40_graph.c */ | 1179 | /* nv40_graph.c */ |
1180 | extern int nv40_graph_init(struct drm_device *); | 1180 | extern int nv40_graph_create(struct drm_device *); |
1181 | extern void nv40_graph_takedown(struct drm_device *); | ||
1182 | extern struct nouveau_channel *nv40_graph_channel(struct drm_device *); | ||
1183 | extern int nv40_graph_create_context(struct nouveau_channel *); | ||
1184 | extern void nv40_graph_destroy_context(struct nouveau_channel *); | ||
1185 | extern int nv40_graph_load_context(struct nouveau_channel *); | ||
1186 | extern int nv40_graph_unload_context(struct drm_device *); | ||
1187 | extern int nv40_graph_object_new(struct nouveau_channel *, u32, u16); | ||
1188 | extern void nv40_grctx_init(struct nouveau_grctx *); | 1181 | extern void nv40_grctx_init(struct nouveau_grctx *); |
1189 | extern void nv40_graph_set_tile_region(struct drm_device *dev, int i); | 1182 | extern void nv40_graph_set_tile_region(struct drm_device *dev, int i); |
1190 | 1183 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 958f312a497c..d8852edc60cb 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
@@ -299,15 +299,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) | |||
299 | engine->fb.init_tile_region = nv30_fb_init_tile_region; | 299 | engine->fb.init_tile_region = nv30_fb_init_tile_region; |
300 | engine->fb.set_tile_region = nv40_fb_set_tile_region; | 300 | engine->fb.set_tile_region = nv40_fb_set_tile_region; |
301 | engine->fb.free_tile_region = nv30_fb_free_tile_region; | 301 | engine->fb.free_tile_region = nv30_fb_free_tile_region; |
302 | engine->graph.init = nv40_graph_init; | 302 | engine->graph.init = nouveau_stub_init; |
303 | engine->graph.takedown = nv40_graph_takedown; | 303 | engine->graph.takedown = nouveau_stub_takedown; |
304 | engine->graph.fifo_access = nv04_graph_fifo_access; | 304 | engine->graph.fifo_access = nvc0_graph_fifo_access; |
305 | engine->graph.channel = nv40_graph_channel; | 305 | engine->graph.channel = nvc0_graph_channel; |
306 | engine->graph.create_context = nv40_graph_create_context; | ||
307 | engine->graph.destroy_context = nv40_graph_destroy_context; | ||
308 | engine->graph.load_context = nv40_graph_load_context; | ||
309 | engine->graph.unload_context = nv40_graph_unload_context; | ||
310 | engine->graph.object_new = nv40_graph_object_new; | ||
311 | engine->graph.set_tile_region = nv40_graph_set_tile_region; | 306 | engine->graph.set_tile_region = nv40_graph_set_tile_region; |
312 | engine->fifo.channels = 32; | 307 | engine->fifo.channels = 32; |
313 | engine->fifo.init = nv40_fifo_init; | 308 | engine->fifo.init = nv40_fifo_init; |
@@ -618,11 +613,17 @@ nouveau_card_init(struct drm_device *dev) | |||
618 | if (ret) | 613 | if (ret) |
619 | goto out_timer; | 614 | goto out_timer; |
620 | 615 | ||
621 | if (dev_priv->card_type == NV_50) | 616 | switch (dev_priv->card_type) { |
617 | case NV_40: | ||
618 | nv40_graph_create(dev); | ||
619 | break; | ||
620 | case NV_50: | ||
622 | nv50_graph_create(dev); | 621 | nv50_graph_create(dev); |
623 | else | 622 | break; |
624 | if (dev_priv->card_type == NV_C0) | 623 | case NV_C0: |
625 | nvc0_graph_create(dev); | 624 | nvc0_graph_create(dev); |
625 | break; | ||
626 | } | ||
626 | 627 | ||
627 | switch (dev_priv->chipset) { | 628 | switch (dev_priv->chipset) { |
628 | case 0x84: | 629 | case 0x84: |
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 2952daf43a3d..f0c6a64275aa 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c | |||
@@ -30,13 +30,16 @@ | |||
30 | #include "nouveau_grctx.h" | 30 | #include "nouveau_grctx.h" |
31 | #include "nouveau_ramht.h" | 31 | #include "nouveau_ramht.h" |
32 | 32 | ||
33 | static int nv40_graph_register(struct drm_device *); | 33 | struct nv40_graph_engine { |
34 | static void nv40_graph_isr(struct drm_device *); | 34 | struct nouveau_exec_engine base; |
35 | u32 grctx_size; | ||
36 | }; | ||
35 | 37 | ||
36 | struct nouveau_channel * | 38 | static struct nouveau_channel * |
37 | nv40_graph_channel(struct drm_device *dev) | 39 | nv40_graph_channel(struct drm_device *dev) |
38 | { | 40 | { |
39 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 41 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
42 | struct nouveau_gpuobj *grctx; | ||
40 | uint32_t inst; | 43 | uint32_t inst; |
41 | int i; | 44 | int i; |
42 | 45 | ||
@@ -46,74 +49,17 @@ nv40_graph_channel(struct drm_device *dev) | |||
46 | inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; | 49 | inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; |
47 | 50 | ||
48 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 51 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
49 | struct nouveau_channel *chan = dev_priv->channels.ptr[i]; | 52 | if (!dev_priv->channels.ptr[i]) |
53 | continue; | ||
50 | 54 | ||
51 | if (chan && chan->ramin_grctx && | 55 | grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR]; |
52 | chan->ramin_grctx->pinst == inst) | 56 | if (grctx && grctx->pinst == inst) |
53 | return chan; | 57 | return dev_priv->channels.ptr[i]; |
54 | } | 58 | } |
55 | 59 | ||
56 | return NULL; | 60 | return NULL; |
57 | } | 61 | } |
58 | 62 | ||
59 | int | ||
60 | nv40_graph_create_context(struct nouveau_channel *chan) | ||
61 | { | ||
62 | struct drm_device *dev = chan->dev; | ||
63 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
64 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
65 | struct nouveau_grctx ctx = {}; | ||
66 | unsigned long flags; | ||
67 | int ret; | ||
68 | |||
69 | ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16, | ||
70 | NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin_grctx); | ||
71 | if (ret) | ||
72 | return ret; | ||
73 | |||
74 | /* Initialise default context values */ | ||
75 | ctx.dev = chan->dev; | ||
76 | ctx.mode = NOUVEAU_GRCTX_VALS; | ||
77 | ctx.data = chan->ramin_grctx; | ||
78 | nv40_grctx_init(&ctx); | ||
79 | |||
80 | nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst); | ||
81 | |||
82 | /* init grctx pointer in ramfc, and on PFIFO if channel is | ||
83 | * already active there | ||
84 | */ | ||
85 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
86 | nv_wo32(chan->ramfc, 0x38, chan->ramin_grctx->pinst >> 4); | ||
87 | nv_mask(dev, 0x002500, 0x00000001, 0x00000000); | ||
88 | if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id) | ||
89 | nv_wr32(dev, 0x0032e0, chan->ramin_grctx->pinst >> 4); | ||
90 | nv_mask(dev, 0x002500, 0x00000001, 0x00000001); | ||
91 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | void | ||
96 | nv40_graph_destroy_context(struct nouveau_channel *chan) | ||
97 | { | ||
98 | struct drm_device *dev = chan->dev; | ||
99 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
100 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
101 | unsigned long flags; | ||
102 | |||
103 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | ||
104 | pgraph->fifo_access(dev, false); | ||
105 | |||
106 | /* Unload the context if it's the currently active one */ | ||
107 | if (pgraph->channel(dev) == chan) | ||
108 | pgraph->unload_context(dev); | ||
109 | |||
110 | pgraph->fifo_access(dev, true); | ||
111 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
112 | |||
113 | /* Free the context resources */ | ||
114 | nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); | ||
115 | } | ||
116 | |||
117 | static int | 63 | static int |
118 | nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) | 64 | nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) |
119 | { | 65 | { |
@@ -155,58 +101,88 @@ nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) | |||
155 | return 0; | 101 | return 0; |
156 | } | 102 | } |
157 | 103 | ||
158 | /* Restore the context for a specific channel into PGRAPH */ | 104 | static int |
159 | int | 105 | nv40_graph_unload_context(struct drm_device *dev) |
160 | nv40_graph_load_context(struct nouveau_channel *chan) | ||
161 | { | 106 | { |
162 | struct drm_device *dev = chan->dev; | ||
163 | uint32_t inst; | 107 | uint32_t inst; |
164 | int ret; | 108 | int ret; |
165 | 109 | ||
166 | if (!chan->ramin_grctx) | 110 | inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR); |
167 | return -EINVAL; | 111 | if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED)) |
168 | inst = chan->ramin_grctx->pinst >> 4; | 112 | return 0; |
113 | inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE; | ||
114 | |||
115 | ret = nv40_graph_transfer_context(dev, inst, 1); | ||
116 | |||
117 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst); | ||
118 | return ret; | ||
119 | } | ||
169 | 120 | ||
170 | ret = nv40_graph_transfer_context(dev, inst, 0); | 121 | static int |
122 | nv40_graph_context_new(struct nouveau_channel *chan, int engine) | ||
123 | { | ||
124 | struct nv40_graph_engine *pgraph = nv_engine(chan->dev, engine); | ||
125 | struct drm_device *dev = chan->dev; | ||
126 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
127 | struct nouveau_gpuobj *grctx = NULL; | ||
128 | struct nouveau_grctx ctx = {}; | ||
129 | unsigned long flags; | ||
130 | int ret; | ||
131 | |||
132 | ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16, | ||
133 | NVOBJ_FLAG_ZERO_ALLOC, &grctx); | ||
171 | if (ret) | 134 | if (ret) |
172 | return ret; | 135 | return ret; |
173 | 136 | ||
174 | /* 0x40032C, no idea of it's exact function. Could simply be a | 137 | /* Initialise default context values */ |
175 | * record of the currently active PGRAPH context. It's currently | 138 | ctx.dev = chan->dev; |
176 | * unknown as to what bit 24 does. The nv ddx has it set, so we will | 139 | ctx.mode = NOUVEAU_GRCTX_VALS; |
177 | * set it here too. | 140 | ctx.data = grctx; |
178 | */ | 141 | nv40_grctx_init(&ctx); |
179 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); | 142 | |
180 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, | 143 | nv_wo32(grctx, 0, grctx->vinst); |
181 | (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) | | 144 | |
182 | NV40_PGRAPH_CTXCTL_CUR_LOADED); | 145 | /* init grctx pointer in ramfc, and on PFIFO if channel is |
183 | /* 0x32E0 records the instance address of the active FIFO's PGRAPH | 146 | * already active there |
184 | * context. If at any time this doesn't match 0x40032C, you will | ||
185 | * receive PGRAPH_INTR_CONTEXT_SWITCH | ||
186 | */ | 147 | */ |
187 | nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst); | 148 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
149 | nv_wo32(chan->ramfc, 0x38, grctx->vinst >> 4); | ||
150 | nv_mask(dev, 0x002500, 0x00000001, 0x00000000); | ||
151 | if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id) | ||
152 | nv_wr32(dev, 0x0032e0, grctx->vinst >> 4); | ||
153 | nv_mask(dev, 0x002500, 0x00000001, 0x00000001); | ||
154 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | ||
155 | |||
156 | chan->engctx[engine] = grctx; | ||
188 | return 0; | 157 | return 0; |
189 | } | 158 | } |
190 | 159 | ||
191 | int | 160 | static void |
192 | nv40_graph_unload_context(struct drm_device *dev) | 161 | nv40_graph_context_del(struct nouveau_channel *chan, int engine) |
193 | { | 162 | { |
194 | uint32_t inst; | 163 | struct nouveau_gpuobj *grctx = chan->engctx[engine]; |
195 | int ret; | 164 | struct drm_device *dev = chan->dev; |
165 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
166 | unsigned long flags; | ||
196 | 167 | ||
197 | inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR); | 168 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
198 | if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED)) | 169 | nv04_graph_fifo_access(dev, false); |
199 | return 0; | ||
200 | inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE; | ||
201 | 170 | ||
202 | ret = nv40_graph_transfer_context(dev, inst, 1); | 171 | /* Unload the context if it's the currently active one */ |
172 | if (nv40_graph_channel(dev) == chan) | ||
173 | nv40_graph_unload_context(dev); | ||
203 | 174 | ||
204 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst); | 175 | nv04_graph_fifo_access(dev, true); |
205 | return ret; | 176 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
177 | |||
178 | /* Free the context resources */ | ||
179 | nouveau_gpuobj_ref(NULL, &grctx); | ||
180 | chan->engctx[engine] = NULL; | ||
206 | } | 181 | } |
207 | 182 | ||
208 | int | 183 | int |
209 | nv40_graph_object_new(struct nouveau_channel *chan, u32 handle, u16 class) | 184 | nv40_graph_object_new(struct nouveau_channel *chan, int engine, |
185 | u32 handle, u16 class) | ||
210 | { | 186 | { |
211 | struct drm_device *dev = chan->dev; | 187 | struct drm_device *dev = chan->dev; |
212 | struct nouveau_gpuobj *obj = NULL; | 188 | struct nouveau_gpuobj *obj = NULL; |
@@ -284,14 +260,14 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i) | |||
284 | * C51 0x4e | 260 | * C51 0x4e |
285 | */ | 261 | */ |
286 | int | 262 | int |
287 | nv40_graph_init(struct drm_device *dev) | 263 | nv40_graph_init(struct drm_device *dev, int engine) |
288 | { | 264 | { |
289 | struct drm_nouveau_private *dev_priv = | 265 | struct nv40_graph_engine *pgraph = nv_engine(dev, engine); |
290 | (struct drm_nouveau_private *)dev->dev_private; | 266 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
291 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; | 267 | struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; |
292 | struct nouveau_grctx ctx = {}; | 268 | struct nouveau_grctx ctx = {}; |
293 | uint32_t vramsz, *cp; | 269 | uint32_t vramsz, *cp; |
294 | int ret, i, j; | 270 | int i, j; |
295 | 271 | ||
296 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & | 272 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & |
297 | ~NV_PMC_ENABLE_PGRAPH); | 273 | ~NV_PMC_ENABLE_PGRAPH); |
@@ -307,7 +283,7 @@ nv40_graph_init(struct drm_device *dev) | |||
307 | ctx.data = cp; | 283 | ctx.data = cp; |
308 | ctx.ctxprog_max = 256; | 284 | ctx.ctxprog_max = 256; |
309 | nv40_grctx_init(&ctx); | 285 | nv40_grctx_init(&ctx); |
310 | dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; | 286 | pgraph->grctx_size = ctx.ctxvals_pos * 4; |
311 | 287 | ||
312 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); | 288 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); |
313 | for (i = 0; i < ctx.ctxprog_len; i++) | 289 | for (i = 0; i < ctx.ctxprog_len; i++) |
@@ -315,14 +291,9 @@ nv40_graph_init(struct drm_device *dev) | |||
315 | 291 | ||
316 | kfree(cp); | 292 | kfree(cp); |
317 | 293 | ||
318 | ret = nv40_graph_register(dev); | ||
319 | if (ret) | ||
320 | return ret; | ||
321 | |||
322 | /* No context present currently */ | 294 | /* No context present currently */ |
323 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); | 295 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); |
324 | 296 | ||
325 | nouveau_irq_register(dev, 12, nv40_graph_isr); | ||
326 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); | 297 | nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); |
327 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); | 298 | nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF); |
328 | 299 | ||
@@ -455,47 +426,10 @@ nv40_graph_init(struct drm_device *dev) | |||
455 | return 0; | 426 | return 0; |
456 | } | 427 | } |
457 | 428 | ||
458 | void nv40_graph_takedown(struct drm_device *dev) | ||
459 | { | ||
460 | nouveau_irq_unregister(dev, 12); | ||
461 | } | ||
462 | |||
463 | static int | 429 | static int |
464 | nv40_graph_register(struct drm_device *dev) | 430 | nv40_graph_fini(struct drm_device *dev, int engine) |
465 | { | 431 | { |
466 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 432 | nv40_graph_unload_context(dev); |
467 | |||
468 | if (dev_priv->engine.graph.registered) | ||
469 | return 0; | ||
470 | |||
471 | NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ | ||
472 | NVOBJ_CLASS(dev, 0x0030, GR); /* null */ | ||
473 | NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ | ||
474 | NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ | ||
475 | NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ | ||
476 | NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ | ||
477 | NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ | ||
478 | NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */ | ||
479 | NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ | ||
480 | NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */ | ||
481 | NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ | ||
482 | NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ | ||
483 | NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ | ||
484 | NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ | ||
485 | NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ | ||
486 | NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ | ||
487 | |||
488 | /* curie */ | ||
489 | if (nv44_graph_class(dev)) | ||
490 | NVOBJ_CLASS(dev, 0x4497, GR); | ||
491 | else | ||
492 | NVOBJ_CLASS(dev, 0x4097, GR); | ||
493 | |||
494 | /* nvsw */ | ||
495 | NVOBJ_CLASS(dev, 0x506e, SW); | ||
496 | NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); | ||
497 | |||
498 | dev_priv->engine.graph.registered = true; | ||
499 | return 0; | 433 | return 0; |
500 | } | 434 | } |
501 | 435 | ||
@@ -503,17 +437,17 @@ static int | |||
503 | nv40_graph_isr_chid(struct drm_device *dev, u32 inst) | 437 | nv40_graph_isr_chid(struct drm_device *dev, u32 inst) |
504 | { | 438 | { |
505 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 439 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
506 | struct nouveau_channel *chan; | 440 | struct nouveau_gpuobj *grctx; |
507 | unsigned long flags; | 441 | unsigned long flags; |
508 | int i; | 442 | int i; |
509 | 443 | ||
510 | spin_lock_irqsave(&dev_priv->channels.lock, flags); | 444 | spin_lock_irqsave(&dev_priv->channels.lock, flags); |
511 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 445 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
512 | chan = dev_priv->channels.ptr[i]; | 446 | if (!dev_priv->channels.ptr[i]) |
513 | if (!chan || !chan->ramin_grctx) | ||
514 | continue; | 447 | continue; |
448 | grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR]; | ||
515 | 449 | ||
516 | if (inst == chan->ramin_grctx->pinst) | 450 | if (grctx && grctx->pinst == inst) |
517 | break; | 451 | break; |
518 | } | 452 | } |
519 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); | 453 | spin_unlock_irqrestore(&dev_priv->channels.lock, flags); |
@@ -564,3 +498,62 @@ nv40_graph_isr(struct drm_device *dev) | |||
564 | } | 498 | } |
565 | } | 499 | } |
566 | } | 500 | } |
501 | |||
502 | static void | ||
503 | nv40_graph_destroy(struct drm_device *dev, int engine) | ||
504 | { | ||
505 | struct nv40_graph_engine *pgraph = nv_engine(dev, engine); | ||
506 | |||
507 | nouveau_irq_unregister(dev, 12); | ||
508 | |||
509 | NVOBJ_ENGINE_DEL(dev, GR); | ||
510 | kfree(pgraph); | ||
511 | } | ||
512 | |||
513 | int | ||
514 | nv40_graph_create(struct drm_device *dev) | ||
515 | { | ||
516 | struct nv40_graph_engine *pgraph; | ||
517 | |||
518 | pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL); | ||
519 | if (!pgraph) | ||
520 | return -ENOMEM; | ||
521 | |||
522 | pgraph->base.destroy = nv40_graph_destroy; | ||
523 | pgraph->base.init = nv40_graph_init; | ||
524 | pgraph->base.fini = nv40_graph_fini; | ||
525 | pgraph->base.context_new = nv40_graph_context_new; | ||
526 | pgraph->base.context_del = nv40_graph_context_del; | ||
527 | pgraph->base.object_new = nv40_graph_object_new; | ||
528 | |||
529 | NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base); | ||
530 | nouveau_irq_register(dev, 12, nv40_graph_isr); | ||
531 | |||
532 | NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */ | ||
533 | NVOBJ_CLASS(dev, 0x0030, GR); /* null */ | ||
534 | NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */ | ||
535 | NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */ | ||
536 | NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */ | ||
537 | NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */ | ||
538 | NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */ | ||
539 | NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */ | ||
540 | NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */ | ||
541 | NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */ | ||
542 | NVOBJ_CLASS(dev, 0x0043, GR); /* rop */ | ||
543 | NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */ | ||
544 | NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */ | ||
545 | NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */ | ||
546 | NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */ | ||
547 | NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */ | ||
548 | |||
549 | /* curie */ | ||
550 | if (nv44_graph_class(dev)) | ||
551 | NVOBJ_CLASS(dev, 0x4497, GR); | ||
552 | else | ||
553 | NVOBJ_CLASS(dev, 0x4097, GR); | ||
554 | |||
555 | /* nvsw */ | ||
556 | NVOBJ_CLASS(dev, 0x506e, SW); | ||
557 | NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip); | ||
558 | return 0; | ||
559 | } | ||