aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_mem.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2010-11-14 20:54:21 -0500
committerBen Skeggs <bskeggs@redhat.com>2010-12-07 22:48:13 -0500
commit4c1361429841344ce4d164492ee7620cf3286eb7 (patch)
tree7cd23e9e99299b3265b2e59d49e3aa5b77a465f0 /drivers/gpu/drm/nouveau/nouveau_mem.c
parentf869ef882382a4b6cb42d259e399aeec3781d4bb (diff)
drm/nv50: implement global channel address space on new VM code
As of this commit, it's guaranteed that if an object is in VRAM that its GPU virtual address will be constant. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_mem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c94
1 files changed, 0 insertions, 94 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 2d02401e8227..4d2d3de97ee9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -145,100 +145,6 @@ nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
145} 145}
146 146
147/* 147/*
148 * NV50 VM helpers
149 */
150int
151nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
152 uint32_t flags, uint64_t phys)
153{
154 struct drm_nouveau_private *dev_priv = dev->dev_private;
155 struct nouveau_gpuobj *pgt;
156 unsigned block;
157 int i;
158
159 virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
160 size = (size >> 16) << 1;
161
162 phys |= ((uint64_t)flags << 32);
163 phys |= 1;
164 if (dev_priv->vram_sys_base) {
165 phys += dev_priv->vram_sys_base;
166 phys |= 0x30;
167 }
168
169 while (size) {
170 unsigned offset_h = upper_32_bits(phys);
171 unsigned offset_l = lower_32_bits(phys);
172 unsigned pte, end;
173
174 for (i = 7; i >= 0; i--) {
175 block = 1 << (i + 1);
176 if (size >= block && !(virt & (block - 1)))
177 break;
178 }
179 offset_l |= (i << 7);
180
181 phys += block << 15;
182 size -= block;
183
184 while (block) {
185 pgt = dev_priv->vm_vram_pt[virt >> 14];
186 pte = virt & 0x3ffe;
187
188 end = pte + block;
189 if (end > 16384)
190 end = 16384;
191 block -= (end - pte);
192 virt += (end - pte);
193
194 while (pte < end) {
195 nv_wo32(pgt, (pte * 4) + 0, offset_l);
196 nv_wo32(pgt, (pte * 4) + 4, offset_h);
197 pte += 2;
198 }
199 }
200 }
201
202 dev_priv->engine.instmem.flush(dev);
203 dev_priv->engine.fifo.tlb_flush(dev);
204 dev_priv->engine.graph.tlb_flush(dev);
205 nv50_vm_flush_engine(dev, 6);
206 return 0;
207}
208
209void
210nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
211{
212 struct drm_nouveau_private *dev_priv = dev->dev_private;
213 struct nouveau_gpuobj *pgt;
214 unsigned pages, pte, end;
215
216 virt -= dev_priv->vm_vram_base;
217 pages = (size >> 16) << 1;
218
219 while (pages) {
220 pgt = dev_priv->vm_vram_pt[virt >> 29];
221 pte = (virt & 0x1ffe0000ULL) >> 15;
222
223 end = pte + pages;
224 if (end > 16384)
225 end = 16384;
226 pages -= (end - pte);
227 virt += (end - pte) << 15;
228
229 while (pte < end) {
230 nv_wo32(pgt, (pte * 4), 0);
231 pte++;
232 }
233 }
234
235 dev_priv->engine.instmem.flush(dev);
236 dev_priv->engine.fifo.tlb_flush(dev);
237 dev_priv->engine.graph.tlb_flush(dev);
238 nv50_vm_flush_engine(dev, 6);
239}
240
241/*
242 * Cleanup everything 148 * Cleanup everything
243 */ 149 */
244void 150void