diff options
-rw-r--r-- | drivers/gpu/drm/nouveau/nv40_graph.c | 110 |
1 files changed, 19 insertions, 91 deletions
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 5beb01b8ace1..c7885e990937 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c | |||
@@ -35,89 +35,6 @@ struct nv40_graph_engine { | |||
35 | u32 grctx_size; | 35 | u32 grctx_size; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | static struct nouveau_channel * | ||
39 | nv40_graph_channel(struct drm_device *dev) | ||
40 | { | ||
41 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
42 | struct nouveau_gpuobj *grctx; | ||
43 | uint32_t inst; | ||
44 | int i; | ||
45 | |||
46 | inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR); | ||
47 | if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED)) | ||
48 | return NULL; | ||
49 | inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4; | ||
50 | |||
51 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | ||
52 | if (!dev_priv->channels.ptr[i]) | ||
53 | continue; | ||
54 | |||
55 | grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR]; | ||
56 | if (grctx && grctx->pinst == inst) | ||
57 | return dev_priv->channels.ptr[i]; | ||
58 | } | ||
59 | |||
60 | return NULL; | ||
61 | } | ||
62 | |||
63 | static int | ||
64 | nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save) | ||
65 | { | ||
66 | uint32_t old_cp, tv = 1000, tmp; | ||
67 | int i; | ||
68 | |||
69 | old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER); | ||
70 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst); | ||
71 | |||
72 | tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310); | ||
73 | tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE : | ||
74 | NV40_PGRAPH_CTXCTL_0310_XFER_LOAD; | ||
75 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp); | ||
76 | |||
77 | tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304); | ||
78 | tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX; | ||
79 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp); | ||
80 | |||
81 | nouveau_wait_for_idle(dev); | ||
82 | |||
83 | for (i = 0; i < tv; i++) { | ||
84 | if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0) | ||
85 | break; | ||
86 | } | ||
87 | |||
88 | nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp); | ||
89 | |||
90 | if (i == tv) { | ||
91 | uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT); | ||
92 | NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save); | ||
93 | NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n", | ||
94 | ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT, | ||
95 | ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK); | ||
96 | NV_ERROR(dev, "0x40030C = 0x%08x\n", | ||
97 | nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C)); | ||
98 | return -EBUSY; | ||
99 | } | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static int | ||
105 | nv40_graph_unload_context(struct drm_device *dev) | ||
106 | { | ||
107 | uint32_t inst; | ||
108 | int ret; | ||
109 | |||
110 | inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR); | ||
111 | if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED)) | ||
112 | return 0; | ||
113 | inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE; | ||
114 | |||
115 | ret = nv40_graph_transfer_context(dev, inst, 1); | ||
116 | |||
117 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst); | ||
118 | return ret; | ||
119 | } | ||
120 | |||
121 | static int | 38 | static int |
122 | nv40_graph_context_new(struct nouveau_channel *chan, int engine) | 39 | nv40_graph_context_new(struct nouveau_channel *chan, int engine) |
123 | { | 40 | { |
@@ -163,16 +80,16 @@ nv40_graph_context_del(struct nouveau_channel *chan, int engine) | |||
163 | struct nouveau_gpuobj *grctx = chan->engctx[engine]; | 80 | struct nouveau_gpuobj *grctx = chan->engctx[engine]; |
164 | struct drm_device *dev = chan->dev; | 81 | struct drm_device *dev = chan->dev; |
165 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 82 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
83 | u32 inst = 0x01000000 | (grctx->pinst >> 4); | ||
166 | unsigned long flags; | 84 | unsigned long flags; |
167 | 85 | ||
168 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); | 86 | spin_lock_irqsave(&dev_priv->context_switch_lock, flags); |
169 | nv04_graph_fifo_access(dev, false); | 87 | nv_mask(dev, 0x400720, 0x00000000, 0x00000001); |
170 | 88 | if (nv_rd32(dev, 0x40032c) == inst) | |
171 | /* Unload the context if it's the currently active one */ | 89 | nv_mask(dev, 0x40032c, 0x01000000, 0x00000000); |
172 | if (nv40_graph_channel(dev) == chan) | 90 | if (nv_rd32(dev, 0x400330) == inst) |
173 | nv40_graph_unload_context(dev); | 91 | nv_mask(dev, 0x400330, 0x01000000, 0x00000000); |
174 | 92 | nv_mask(dev, 0x400720, 0x00000001, 0x00000001); | |
175 | nv04_graph_fifo_access(dev, true); | ||
176 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); | 93 | spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); |
177 | 94 | ||
178 | /* Free the context resources */ | 95 | /* Free the context resources */ |
@@ -431,7 +348,18 @@ nv40_graph_init(struct drm_device *dev, int engine) | |||
431 | static int | 348 | static int |
432 | nv40_graph_fini(struct drm_device *dev, int engine) | 349 | nv40_graph_fini(struct drm_device *dev, int engine) |
433 | { | 350 | { |
434 | nv40_graph_unload_context(dev); | 351 | u32 inst = nv_rd32(dev, 0x40032c); |
352 | if (inst & 0x01000000) { | ||
353 | nv_wr32(dev, 0x400720, 0x00000000); | ||
354 | nv_wr32(dev, 0x400784, inst); | ||
355 | nv_mask(dev, 0x400310, 0x00000020, 0x00000020); | ||
356 | nv_mask(dev, 0x400304, 0x00000001, 0x00000001); | ||
357 | if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) { | ||
358 | u32 insn = nv_rd32(dev, 0x400308); | ||
359 | NV_ERROR(dev, "PGRAPH: ctxprog timeout 0x%08x\n", insn); | ||
360 | } | ||
361 | nv_mask(dev, 0x40032c, 0x01000000, 0x00000000); | ||
362 | } | ||
435 | return 0; | 363 | return 0; |
436 | } | 364 | } |
437 | 365 | ||