aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-02-23 21:13:34 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-02-23 21:13:34 -0500
commit34e3f91b4e66e52b3e189b2f778bd37d68963ca8 (patch)
tree83e7a0a2d1865ad413ac76a7a0883d4ed3d52dc8 /drivers/gpu
parent9f3a6284880ceea452903e2043c88d7226736318 (diff)
parent4e4ddd47774313accc86b233d6ca2c6a9037a671 (diff)
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: drm/vmwgfx: Fix queries if no dma buffer thrashing is occuring. drm/nv50: fix vram ptes on IGPs to point at stolen system memory drm/nv50: fix instmem binding on IGPs to point at stolen system memory drm/nv50: improve vram page table construction drm/nv50: more efficient clearing of gpu page table entries drm/nv50: make nv50_mem_vm_{bind,unbind} operate only on vram drm/nouveau: Fix up pre-nv17 analog load detection.
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c113
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c58
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c108
5 files changed, 210 insertions, 76 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 5445cefdd03e..1c15ef37b71c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -583,6 +583,7 @@ struct drm_nouveau_private {
583 uint64_t vm_end; 583 uint64_t vm_end;
584 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; 584 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
585 int vm_vram_pt_nr; 585 int vm_vram_pt_nr;
586 uint64_t vram_sys_base;
586 587
587 /* the mtrr covering the FB */ 588 /* the mtrr covering the FB */
588 int fb_mtrr; 589 int fb_mtrr;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 8f3a12f614ed..2dc09dbd817d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -285,53 +285,50 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
285 uint32_t flags, uint64_t phys) 285 uint32_t flags, uint64_t phys)
286{ 286{
287 struct drm_nouveau_private *dev_priv = dev->dev_private; 287 struct drm_nouveau_private *dev_priv = dev->dev_private;
288 struct nouveau_gpuobj **pgt; 288 struct nouveau_gpuobj *pgt;
289 unsigned psz, pfl, pages; 289 unsigned block;
290 290 int i;
291 if (virt >= dev_priv->vm_gart_base &&
292 (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
293 psz = 12;
294 pgt = &dev_priv->gart_info.sg_ctxdma;
295 pfl = 0x21;
296 virt -= dev_priv->vm_gart_base;
297 } else
298 if (virt >= dev_priv->vm_vram_base &&
299 (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
300 psz = 16;
301 pgt = dev_priv->vm_vram_pt;
302 pfl = 0x01;
303 virt -= dev_priv->vm_vram_base;
304 } else {
305 NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
306 virt, virt + size - 1);
307 return -EINVAL;
308 }
309 291
310 pages = size >> psz; 292 virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
293 size = (size >> 16) << 1;
294
295 phys |= ((uint64_t)flags << 32);
296 phys |= 1;
297 if (dev_priv->vram_sys_base) {
298 phys += dev_priv->vram_sys_base;
299 phys |= 0x30;
300 }
311 301
312 dev_priv->engine.instmem.prepare_access(dev, true); 302 dev_priv->engine.instmem.prepare_access(dev, true);
313 if (flags & 0x80000000) { 303 while (size) {
314 while (pages--) { 304 unsigned offset_h = upper_32_bits(phys);
315 struct nouveau_gpuobj *pt = pgt[virt >> 29]; 305 unsigned offset_l = lower_32_bits(phys);
316 unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1; 306 unsigned pte, end;
307
308 for (i = 7; i >= 0; i--) {
309 block = 1 << (i + 1);
310 if (size >= block && !(virt & (block - 1)))
311 break;
312 }
313 offset_l |= (i << 7);
317 314
318 nv_wo32(dev, pt, pte++, 0x00000000); 315 phys += block << 15;
319 nv_wo32(dev, pt, pte++, 0x00000000); 316 size -= block;
320 317
321 virt += (1 << psz); 318 while (block) {
322 } 319 pgt = dev_priv->vm_vram_pt[virt >> 14];
323 } else { 320 pte = virt & 0x3ffe;
324 while (pages--) {
325 struct nouveau_gpuobj *pt = pgt[virt >> 29];
326 unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
327 unsigned offset_h = upper_32_bits(phys) & 0xff;
328 unsigned offset_l = lower_32_bits(phys);
329 321
330 nv_wo32(dev, pt, pte++, offset_l | pfl); 322 end = pte + block;
331 nv_wo32(dev, pt, pte++, offset_h | flags); 323 if (end > 16384)
324 end = 16384;
325 block -= (end - pte);
326 virt += (end - pte);
332 327
333 phys += (1 << psz); 328 while (pte < end) {
334 virt += (1 << psz); 329 nv_wo32(dev, pgt, pte++, offset_l);
330 nv_wo32(dev, pgt, pte++, offset_h);
331 }
335 } 332 }
336 } 333 }
337 dev_priv->engine.instmem.finish_access(dev); 334 dev_priv->engine.instmem.finish_access(dev);
@@ -356,7 +353,41 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
356void 353void
357nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) 354nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
358{ 355{
359 nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0); 356 struct drm_nouveau_private *dev_priv = dev->dev_private;
357 struct nouveau_gpuobj *pgt;
358 unsigned pages, pte, end;
359
360 virt -= dev_priv->vm_vram_base;
361 pages = (size >> 16) << 1;
362
363 dev_priv->engine.instmem.prepare_access(dev, true);
364 while (pages) {
365 pgt = dev_priv->vm_vram_pt[virt >> 29];
366 pte = (virt & 0x1ffe0000ULL) >> 15;
367
368 end = pte + pages;
369 if (end > 16384)
370 end = 16384;
371 pages -= (end - pte);
372 virt += (end - pte) << 15;
373
374 while (pte < end)
375 nv_wo32(dev, pgt, pte++, 0);
376 }
377 dev_priv->engine.instmem.finish_access(dev);
378
379 nv_wr32(dev, 0x100c80, 0x00050001);
380 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
381 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
382 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
383 return;
384 }
385
386 nv_wr32(dev, 0x100c80, 0x00000001);
387 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
388 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
389 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
390 }
360} 391}
361 392
362/* 393/*
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index d0e038d28948..1d73b15d70da 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -119,7 +119,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
119 struct drm_connector *connector) 119 struct drm_connector *connector)
120{ 120{
121 struct drm_device *dev = encoder->dev; 121 struct drm_device *dev = encoder->dev;
122 uint8_t saved_seq1, saved_pi, saved_rpc1; 122 uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
123 uint8_t saved_palette0[3], saved_palette_mask; 123 uint8_t saved_palette0[3], saved_palette_mask;
124 uint32_t saved_rtest_ctrl, saved_rgen_ctrl; 124 uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
125 int i; 125 int i;
@@ -135,6 +135,9 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
135 /* only implemented for head A for now */ 135 /* only implemented for head A for now */
136 NVSetOwner(dev, 0); 136 NVSetOwner(dev, 0);
137 137
138 saved_cr_mode = NVReadVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX);
139 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode | 0x80);
140
138 saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX); 141 saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
139 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20); 142 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
140 143
@@ -203,6 +206,7 @@ out:
203 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi); 206 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
204 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1); 207 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
205 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); 208 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
209 NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
206 210
207 if (blue == 0x18) { 211 if (blue == 0x18) {
208 NV_INFO(dev, "Load detected on head A\n"); 212 NV_INFO(dev, "Load detected on head A\n");
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 94400f777e7f..f0dc4e36ef05 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -76,6 +76,11 @@ nv50_instmem_init(struct drm_device *dev)
76 for (i = 0x1700; i <= 0x1710; i += 4) 76 for (i = 0x1700; i <= 0x1710; i += 4)
77 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); 77 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
78 78
79 if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
80 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
81 else
82 dev_priv->vram_sys_base = 0;
83
79 /* Reserve the last MiB of VRAM, we should probably try to avoid 84 /* Reserve the last MiB of VRAM, we should probably try to avoid
80 * setting up the below tables over the top of the VBIOS image at 85 * setting up the below tables over the top of the VBIOS image at
81 * some point. 86 * some point.
@@ -172,16 +177,28 @@ nv50_instmem_init(struct drm_device *dev)
172 * We map the entire fake channel into the start of the PRAMIN BAR 177 * We map the entire fake channel into the start of the PRAMIN BAR
173 */ 178 */
174 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000, 179 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
175 0, &priv->pramin_pt); 180 0, &priv->pramin_pt);
176 if (ret) 181 if (ret)
177 return ret; 182 return ret;
178 183
179 for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) { 184 v = c_offset | 1;
180 if (v < (c_offset + c_size)) 185 if (dev_priv->vram_sys_base) {
181 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1); 186 v += dev_priv->vram_sys_base;
182 else 187 v |= 0x30;
183 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009); 188 }
189
190 i = 0;
191 while (v < dev_priv->vram_sys_base + c_offset + c_size) {
192 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v);
193 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
194 v += 0x1000;
195 i += 8;
196 }
197
198 while (i < pt_size) {
199 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
184 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); 200 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
201 i += 8;
185 } 202 }
186 203
187 BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); 204 BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
@@ -416,7 +433,9 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
416{ 433{
417 struct drm_nouveau_private *dev_priv = dev->dev_private; 434 struct drm_nouveau_private *dev_priv = dev->dev_private;
418 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; 435 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
419 uint32_t pte, pte_end, vram; 436 struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
437 uint32_t pte, pte_end;
438 uint64_t vram;
420 439
421 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) 440 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
422 return -EINVAL; 441 return -EINVAL;
@@ -424,20 +443,24 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
424 NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", 443 NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
425 gpuobj->im_pramin->start, gpuobj->im_pramin->size); 444 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
426 445
427 pte = (gpuobj->im_pramin->start >> 12) << 3; 446 pte = (gpuobj->im_pramin->start >> 12) << 1;
428 pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; 447 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
429 vram = gpuobj->im_backing_start; 448 vram = gpuobj->im_backing_start;
430 449
431 NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", 450 NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
432 gpuobj->im_pramin->start, pte, pte_end); 451 gpuobj->im_pramin->start, pte, pte_end);
433 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); 452 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
434 453
454 vram |= 1;
455 if (dev_priv->vram_sys_base) {
456 vram += dev_priv->vram_sys_base;
457 vram |= 0x30;
458 }
459
435 dev_priv->engine.instmem.prepare_access(dev, true); 460 dev_priv->engine.instmem.prepare_access(dev, true);
436 while (pte < pte_end) { 461 while (pte < pte_end) {
437 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1); 462 nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
438 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); 463 nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
439
440 pte += 8;
441 vram += NV50_INSTMEM_PAGE_SIZE; 464 vram += NV50_INSTMEM_PAGE_SIZE;
442 } 465 }
443 dev_priv->engine.instmem.finish_access(dev); 466 dev_priv->engine.instmem.finish_access(dev);
@@ -470,14 +493,13 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
470 if (gpuobj->im_bound == 0) 493 if (gpuobj->im_bound == 0)
471 return -EINVAL; 494 return -EINVAL;
472 495
473 pte = (gpuobj->im_pramin->start >> 12) << 3; 496 pte = (gpuobj->im_pramin->start >> 12) << 1;
474 pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte; 497 pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
475 498
476 dev_priv->engine.instmem.prepare_access(dev, true); 499 dev_priv->engine.instmem.prepare_access(dev, true);
477 while (pte < pte_end) { 500 while (pte < pte_end) {
478 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009); 501 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
479 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000); 502 nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
480 pte += 8;
481 } 503 }
482 dev_priv->engine.instmem.finish_access(dev); 504 dev_priv->engine.instmem.finish_access(dev);
483 505
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index d69caf92ffe7..0897359b3e4e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -182,25 +182,19 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); 182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
183} 183}
184 184
185static int vmw_cmd_dma(struct vmw_private *dev_priv, 185static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
186 struct vmw_sw_context *sw_context, 186 struct vmw_sw_context *sw_context,
187 SVGA3dCmdHeader *header) 187 SVGAGuestPtr *ptr,
188 struct vmw_dma_buffer **vmw_bo_p)
188{ 189{
189 uint32_t handle;
190 struct vmw_dma_buffer *vmw_bo = NULL; 190 struct vmw_dma_buffer *vmw_bo = NULL;
191 struct ttm_buffer_object *bo; 191 struct ttm_buffer_object *bo;
192 struct vmw_surface *srf = NULL; 192 uint32_t handle = ptr->gmrId;
193 struct vmw_dma_cmd {
194 SVGA3dCmdHeader header;
195 SVGA3dCmdSurfaceDMA dma;
196 } *cmd;
197 struct vmw_relocation *reloc; 193 struct vmw_relocation *reloc;
198 int ret;
199 uint32_t cur_validate_node; 194 uint32_t cur_validate_node;
200 struct ttm_validate_buffer *val_buf; 195 struct ttm_validate_buffer *val_buf;
196 int ret;
201 197
202 cmd = container_of(header, struct vmw_dma_cmd, header);
203 handle = cmd->dma.guest.ptr.gmrId;
204 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); 198 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
205 if (unlikely(ret != 0)) { 199 if (unlikely(ret != 0)) {
206 DRM_ERROR("Could not find or use GMR region.\n"); 200 DRM_ERROR("Could not find or use GMR region.\n");
@@ -209,14 +203,14 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
209 bo = &vmw_bo->base; 203 bo = &vmw_bo->base;
210 204
211 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { 205 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
212 DRM_ERROR("Max number of DMA commands per submission" 206 DRM_ERROR("Max number relocations per submission"
213 " exceeded\n"); 207 " exceeded\n");
214 ret = -EINVAL; 208 ret = -EINVAL;
215 goto out_no_reloc; 209 goto out_no_reloc;
216 } 210 }
217 211
218 reloc = &sw_context->relocs[sw_context->cur_reloc++]; 212 reloc = &sw_context->relocs[sw_context->cur_reloc++];
219 reloc->location = &cmd->dma.guest.ptr; 213 reloc->location = ptr;
220 214
221 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); 215 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
222 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { 216 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
@@ -234,7 +228,89 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
234 list_add_tail(&val_buf->head, &sw_context->validate_nodes); 228 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
235 ++sw_context->cur_val_buf; 229 ++sw_context->cur_val_buf;
236 } 230 }
231 *vmw_bo_p = vmw_bo;
232 return 0;
233
234out_no_reloc:
235 vmw_dmabuf_unreference(&vmw_bo);
236 vmw_bo_p = NULL;
237 return ret;
238}
239
240static int vmw_cmd_end_query(struct vmw_private *dev_priv,
241 struct vmw_sw_context *sw_context,
242 SVGA3dCmdHeader *header)
243{
244 struct vmw_dma_buffer *vmw_bo;
245 struct vmw_query_cmd {
246 SVGA3dCmdHeader header;
247 SVGA3dCmdEndQuery q;
248 } *cmd;
249 int ret;
250
251 cmd = container_of(header, struct vmw_query_cmd, header);
252 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
253 if (unlikely(ret != 0))
254 return ret;
255
256 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
257 &cmd->q.guestResult,
258 &vmw_bo);
259 if (unlikely(ret != 0))
260 return ret;
261
262 vmw_dmabuf_unreference(&vmw_bo);
263 return 0;
264}
237 265
266static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
267 struct vmw_sw_context *sw_context,
268 SVGA3dCmdHeader *header)
269{
270 struct vmw_dma_buffer *vmw_bo;
271 struct vmw_query_cmd {
272 SVGA3dCmdHeader header;
273 SVGA3dCmdWaitForQuery q;
274 } *cmd;
275 int ret;
276
277 cmd = container_of(header, struct vmw_query_cmd, header);
278 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
279 if (unlikely(ret != 0))
280 return ret;
281
282 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
283 &cmd->q.guestResult,
284 &vmw_bo);
285 if (unlikely(ret != 0))
286 return ret;
287
288 vmw_dmabuf_unreference(&vmw_bo);
289 return 0;
290}
291
292
293static int vmw_cmd_dma(struct vmw_private *dev_priv,
294 struct vmw_sw_context *sw_context,
295 SVGA3dCmdHeader *header)
296{
297 struct vmw_dma_buffer *vmw_bo = NULL;
298 struct ttm_buffer_object *bo;
299 struct vmw_surface *srf = NULL;
300 struct vmw_dma_cmd {
301 SVGA3dCmdHeader header;
302 SVGA3dCmdSurfaceDMA dma;
303 } *cmd;
304 int ret;
305
306 cmd = container_of(header, struct vmw_dma_cmd, header);
307 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
308 &cmd->dma.guest.ptr,
309 &vmw_bo);
310 if (unlikely(ret != 0))
311 return ret;
312
313 bo = &vmw_bo->base;
238 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, 314 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
239 cmd->dma.host.sid, &srf); 315 cmd->dma.host.sid, &srf);
240 if (ret) { 316 if (ret) {
@@ -379,8 +455,8 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
379 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), 455 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
380 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), 456 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
381 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), 457 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
382 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), 458 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
383 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check), 459 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
384 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), 460 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
385 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, 461 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
386 &vmw_cmd_blt_surf_screen_check) 462 &vmw_cmd_blt_surf_screen_check)