aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-21 12:14:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-21 12:14:35 -0400
commitec366cd195d0f3dcfb851b5574901c0dbc51b75a (patch)
treec8b76fe4a2caa1534b565e9b34535056184afa20
parent6f33d6458e35d6ba53c2635ee4b8a3177cbd912d (diff)
parent26beaee9bb07be20cc641c1251152e280e80f54e (diff)
Merge tag 'drm-fixes-for-v4.9-rc2-part2' of git://people.freedesktop.org/~airlied/linux
Pull more drm fixes from Dave Airlie: "Mainly some vmwgfx fixes, but also some fixes for armada, etnaviv and fsl-dcu" * tag 'drm-fixes-for-v4.9-rc2-part2' of git://people.freedesktop.org/~airlied/linux: drm/fsl-dcu: enable pixel clock when enabling CRTC drm/fsl-dcu: do not transfer registers in mode_set_nofb drm/fsl-dcu: do not transfer registers on plane init drm/fsl-dcu: enable TCON bypass mode by default drm/vmwgfx: Adjust checks for null pointers in 13 functions drm/vmwgfx: Use memdup_user() rather than duplicating its implementation drm/vmwgfx: Use kmalloc_array() in vmw_surface_define_ioctl() drm/vmwgfx: Avoid validating views on view destruction drm/vmwgfx: Limit the user-space command buffer size drm/vmwgfx: Remove a leftover debug printout drm/vmwgfx: Allow resource relocations on byte boundaries drm/vmwgfx: Enable SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command drm/vmwgfx: Remove call to reservation_object_test_signaled_rcu before wait drm/vmwgfx: Replace numeric parameter like 0444 with macro drm/etnaviv: block 64K of address space behind each cmdstream drm/etnaviv: ensure write caches are flushed at end of user cmdstream drm/armada: fix clock counts
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c18
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c24
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c23
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c39
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c145
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c56
12 files changed, 187 insertions, 148 deletions
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 2f58e9e2a59c..a51f8cbcfe26 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -332,17 +332,19 @@ static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
332{ 332{
333 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 333 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
334 334
335 if (dcrtc->dpms != dpms) { 335 if (dpms_blanked(dcrtc->dpms) != dpms_blanked(dpms)) {
336 dcrtc->dpms = dpms;
337 if (!IS_ERR(dcrtc->clk) && !dpms_blanked(dpms))
338 WARN_ON(clk_prepare_enable(dcrtc->clk));
339 armada_drm_crtc_update(dcrtc);
340 if (!IS_ERR(dcrtc->clk) && dpms_blanked(dpms))
341 clk_disable_unprepare(dcrtc->clk);
342 if (dpms_blanked(dpms)) 336 if (dpms_blanked(dpms))
343 armada_drm_vblank_off(dcrtc); 337 armada_drm_vblank_off(dcrtc);
344 else 338 else if (!IS_ERR(dcrtc->clk))
339 WARN_ON(clk_prepare_enable(dcrtc->clk));
340 dcrtc->dpms = dpms;
341 armada_drm_crtc_update(dcrtc);
342 if (!dpms_blanked(dpms))
345 drm_crtc_vblank_on(&dcrtc->crtc); 343 drm_crtc_vblank_on(&dcrtc->crtc);
344 else if (!IS_ERR(dcrtc->clk))
345 clk_disable_unprepare(dcrtc->clk);
346 } else if (dcrtc->dpms != dpms) {
347 dcrtc->dpms = dpms;
346 } 348 }
347} 349}
348 350
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index cb86c7e5495c..d9230132dfbc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -329,20 +329,34 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
329 /* 329 /*
330 * Append a LINK to the submitted command buffer to return to 330 * Append a LINK to the submitted command buffer to return to
331 * the ring buffer. return_target is the ring target address. 331 * the ring buffer. return_target is the ring target address.
332 * We need three dwords: event, wait, link. 332 * We need at most 7 dwords in the return target: 2 cache flush +
333 * 2 semaphore stall + 1 event + 1 wait + 1 link.
333 */ 334 */
334 return_dwords = 3; 335 return_dwords = 7;
335 return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords); 336 return_target = etnaviv_buffer_reserve(gpu, buffer, return_dwords);
336 CMD_LINK(cmdbuf, return_dwords, return_target); 337 CMD_LINK(cmdbuf, return_dwords, return_target);
337 338
338 /* 339 /*
339 * Append event, wait and link pointing back to the wait 340 * Append a cache flush, stall, event, wait and link pointing back to
340 * command to the ring buffer. 341 * the wait command to the ring buffer.
341 */ 342 */
343 if (gpu->exec_state == ETNA_PIPE_2D) {
344 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
345 VIVS_GL_FLUSH_CACHE_PE2D);
346 } else {
347 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
348 VIVS_GL_FLUSH_CACHE_DEPTH |
349 VIVS_GL_FLUSH_CACHE_COLOR);
350 CMD_LOAD_STATE(buffer, VIVS_TS_FLUSH_CACHE,
351 VIVS_TS_FLUSH_CACHE_FLUSH);
352 }
353 CMD_SEM(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
354 CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
342 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | 355 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
343 VIVS_GL_EVENT_FROM_PE); 356 VIVS_GL_EVENT_FROM_PE);
344 CMD_WAIT(buffer); 357 CMD_WAIT(buffer);
345 CMD_LINK(buffer, 2, return_target + 8); 358 CMD_LINK(buffer, 2, etnaviv_iommu_get_cmdbuf_va(gpu, buffer) +
359 buffer->user_size - 4);
346 360
347 if (drm_debug & DRM_UT_DRIVER) 361 if (drm_debug & DRM_UT_DRIVER)
348 pr_info("stream link to 0x%08x @ 0x%08x %p\n", 362 pr_info("stream link to 0x%08x @ 0x%08x %p\n",
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index d3796ed8d8c5..169ac96e8f08 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -330,7 +330,8 @@ u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
330 return (u32)buf->vram_node.start; 330 return (u32)buf->vram_node.start;
331 331
332 mutex_lock(&mmu->lock); 332 mutex_lock(&mmu->lock);
333 ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node, buf->size); 333 ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node,
334 buf->size + SZ_64K);
334 if (ret < 0) { 335 if (ret < 0) {
335 mutex_unlock(&mmu->lock); 336 mutex_unlock(&mmu->lock);
336 return 0; 337 return 0;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 3371635cd4d7..b2d5e188b1b8 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -51,6 +51,7 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
51 DCU_MODE_DCU_MODE(DCU_MODE_OFF)); 51 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
52 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, 52 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
53 DCU_UPDATE_MODE_READREG); 53 DCU_UPDATE_MODE_READREG);
54 clk_disable_unprepare(fsl_dev->pix_clk);
54} 55}
55 56
56static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc) 57static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
@@ -58,6 +59,7 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
58 struct drm_device *dev = crtc->dev; 59 struct drm_device *dev = crtc->dev;
59 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; 60 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
60 61
62 clk_prepare_enable(fsl_dev->pix_clk);
61 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, 63 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
62 DCU_MODE_DCU_MODE_MASK, 64 DCU_MODE_DCU_MODE_MASK,
63 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); 65 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
@@ -116,8 +118,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
116 DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) | 118 DCU_THRESHOLD_LS_BF_VS(BF_VS_VAL) |
117 DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) | 119 DCU_THRESHOLD_OUT_BUF_HIGH(BUF_MAX_VAL) |
118 DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL)); 120 DCU_THRESHOLD_OUT_BUF_LOW(BUF_MIN_VAL));
119 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
120 DCU_UPDATE_MODE_READREG);
121 return; 121 return;
122} 122}
123 123
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 0884c45aefe8..e04efbed1a54 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -267,12 +267,8 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
267 return ret; 267 return ret;
268 } 268 }
269 269
270 ret = clk_prepare_enable(fsl_dev->pix_clk); 270 if (fsl_dev->tcon)
271 if (ret < 0) { 271 fsl_tcon_bypass_enable(fsl_dev->tcon);
272 dev_err(dev, "failed to enable pix clk\n");
273 goto disable_dcu_clk;
274 }
275
276 fsl_dcu_drm_init_planes(fsl_dev->drm); 272 fsl_dcu_drm_init_planes(fsl_dev->drm);
277 drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state); 273 drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
278 274
@@ -284,10 +280,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
284 enable_irq(fsl_dev->irq); 280 enable_irq(fsl_dev->irq);
285 281
286 return 0; 282 return 0;
287
288disable_dcu_clk:
289 clk_disable_unprepare(fsl_dev->clk);
290 return ret;
291} 283}
292#endif 284#endif
293 285
@@ -401,18 +393,12 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
401 goto disable_clk; 393 goto disable_clk;
402 } 394 }
403 395
404 ret = clk_prepare_enable(fsl_dev->pix_clk);
405 if (ret < 0) {
406 dev_err(dev, "failed to enable pix clk\n");
407 goto unregister_pix_clk;
408 }
409
410 fsl_dev->tcon = fsl_tcon_init(dev); 396 fsl_dev->tcon = fsl_tcon_init(dev);
411 397
412 drm = drm_dev_alloc(driver, dev); 398 drm = drm_dev_alloc(driver, dev);
413 if (IS_ERR(drm)) { 399 if (IS_ERR(drm)) {
414 ret = PTR_ERR(drm); 400 ret = PTR_ERR(drm);
415 goto disable_pix_clk; 401 goto unregister_pix_clk;
416 } 402 }
417 403
418 fsl_dev->dev = dev; 404 fsl_dev->dev = dev;
@@ -433,8 +419,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
433 419
434unref: 420unref:
435 drm_dev_unref(drm); 421 drm_dev_unref(drm);
436disable_pix_clk:
437 clk_disable_unprepare(fsl_dev->pix_clk);
438unregister_pix_clk: 422unregister_pix_clk:
439 clk_unregister(fsl_dev->pix_clk); 423 clk_unregister(fsl_dev->pix_clk);
440disable_clk: 424disable_clk:
@@ -447,7 +431,6 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev)
447 struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev); 431 struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
448 432
449 clk_disable_unprepare(fsl_dev->clk); 433 clk_disable_unprepare(fsl_dev->clk);
450 clk_disable_unprepare(fsl_dev->pix_clk);
451 clk_unregister(fsl_dev->pix_clk); 434 clk_unregister(fsl_dev->pix_clk);
452 drm_put_dev(fsl_dev->drm); 435 drm_put_dev(fsl_dev->drm);
453 436
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index a7e5486bd1e9..9e6f7d8112b3 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -211,11 +211,6 @@ void fsl_dcu_drm_init_planes(struct drm_device *dev)
211 for (j = 1; j <= fsl_dev->soc->layer_regs; j++) 211 for (j = 1; j <= fsl_dev->soc->layer_regs; j++)
212 regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0); 212 regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
213 } 213 }
214 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
215 DCU_MODE_DCU_MODE_MASK,
216 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
217 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
218 DCU_UPDATE_MODE_READREG);
219} 214}
220 215
221struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev) 216struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 26edcc899712..e1dd75b18118 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -20,38 +20,6 @@
20#include "fsl_dcu_drm_drv.h" 20#include "fsl_dcu_drm_drv.h"
21#include "fsl_tcon.h" 21#include "fsl_tcon.h"
22 22
23static int
24fsl_dcu_drm_encoder_atomic_check(struct drm_encoder *encoder,
25 struct drm_crtc_state *crtc_state,
26 struct drm_connector_state *conn_state)
27{
28 return 0;
29}
30
31static void fsl_dcu_drm_encoder_disable(struct drm_encoder *encoder)
32{
33 struct drm_device *dev = encoder->dev;
34 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
35
36 if (fsl_dev->tcon)
37 fsl_tcon_bypass_disable(fsl_dev->tcon);
38}
39
40static void fsl_dcu_drm_encoder_enable(struct drm_encoder *encoder)
41{
42 struct drm_device *dev = encoder->dev;
43 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
44
45 if (fsl_dev->tcon)
46 fsl_tcon_bypass_enable(fsl_dev->tcon);
47}
48
49static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
50 .atomic_check = fsl_dcu_drm_encoder_atomic_check,
51 .disable = fsl_dcu_drm_encoder_disable,
52 .enable = fsl_dcu_drm_encoder_enable,
53};
54
55static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder) 23static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
56{ 24{
57 drm_encoder_cleanup(encoder); 25 drm_encoder_cleanup(encoder);
@@ -68,13 +36,16 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
68 int ret; 36 int ret;
69 37
70 encoder->possible_crtcs = 1; 38 encoder->possible_crtcs = 1;
39
40 /* Use bypass mode for parallel RGB/LVDS encoder */
41 if (fsl_dev->tcon)
42 fsl_tcon_bypass_enable(fsl_dev->tcon);
43
71 ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs, 44 ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
72 DRM_MODE_ENCODER_LVDS, NULL); 45 DRM_MODE_ENCODER_LVDS, NULL);
73 if (ret < 0) 46 if (ret < 0)
74 return ret; 47 return ret;
75 48
76 drm_encoder_helper_add(encoder, &encoder_helper_funcs);
77
78 return 0; 49 return 0;
79} 50}
80 51
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e8ae3dc476d1..18061a4bc2f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -241,15 +241,15 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
241 void *ptr); 241 void *ptr);
242 242
243MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); 243MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
244module_param_named(enable_fbdev, enable_fbdev, int, 0600); 244module_param_named(enable_fbdev, enable_fbdev, int, S_IRUSR | S_IWUSR);
245MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages"); 245MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
246module_param_named(force_dma_api, vmw_force_iommu, int, 0600); 246module_param_named(force_dma_api, vmw_force_iommu, int, S_IRUSR | S_IWUSR);
247MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages"); 247MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
248module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600); 248module_param_named(restrict_iommu, vmw_restrict_iommu, int, S_IRUSR | S_IWUSR);
249MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages"); 249MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
250module_param_named(force_coherent, vmw_force_coherent, int, 0600); 250module_param_named(force_coherent, vmw_force_coherent, int, S_IRUSR | S_IWUSR);
251MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU"); 251MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
252module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600); 252module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR);
253MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes"); 253MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
254module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600); 254module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
255 255
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 070d750af16d..1e59a486bba8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -43,7 +43,7 @@
43 43
44#define VMWGFX_DRIVER_DATE "20160210" 44#define VMWGFX_DRIVER_DATE "20160210"
45#define VMWGFX_DRIVER_MAJOR 2 45#define VMWGFX_DRIVER_MAJOR 2
46#define VMWGFX_DRIVER_MINOR 10 46#define VMWGFX_DRIVER_MINOR 11
47#define VMWGFX_DRIVER_PATCHLEVEL 0 47#define VMWGFX_DRIVER_PATCHLEVEL 0
48#define VMWGFX_FILE_PAGE_OFFSET 0x00100000 48#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
49#define VMWGFX_FIFO_STATIC_SIZE (1024*1024) 49#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index dc5beff2b4aa..c7b53d987f06 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -35,17 +35,37 @@
35#define VMW_RES_HT_ORDER 12 35#define VMW_RES_HT_ORDER 12
36 36
37/** 37/**
38 * enum vmw_resource_relocation_type - Relocation type for resources
39 *
40 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
41 * command stream is replaced with the actual id after validation.
42 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
43 * with a NOP.
44 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
45 * after validation is -1, the command is replaced with a NOP. Otherwise no
46 * action.
47 */
48enum vmw_resource_relocation_type {
49 vmw_res_rel_normal,
50 vmw_res_rel_nop,
51 vmw_res_rel_cond_nop,
52 vmw_res_rel_max
53};
54
55/**
38 * struct vmw_resource_relocation - Relocation info for resources 56 * struct vmw_resource_relocation - Relocation info for resources
39 * 57 *
40 * @head: List head for the software context's relocation list. 58 * @head: List head for the software context's relocation list.
41 * @res: Non-ref-counted pointer to the resource. 59 * @res: Non-ref-counted pointer to the resource.
42 * @offset: Offset of 4 byte entries into the command buffer where the 60 * @offset: Offset of single byte entries into the command buffer where the
43 * id that needs fixup is located. 61 * id that needs fixup is located.
62 * @rel_type: Type of relocation.
44 */ 63 */
45struct vmw_resource_relocation { 64struct vmw_resource_relocation {
46 struct list_head head; 65 struct list_head head;
47 const struct vmw_resource *res; 66 const struct vmw_resource *res;
48 unsigned long offset; 67 u32 offset:29;
68 enum vmw_resource_relocation_type rel_type:3;
49}; 69};
50 70
51/** 71/**
@@ -109,7 +129,18 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
109 struct vmw_dma_buffer *vbo, 129 struct vmw_dma_buffer *vbo,
110 bool validate_as_mob, 130 bool validate_as_mob,
111 uint32_t *p_val_node); 131 uint32_t *p_val_node);
112 132/**
133 * vmw_ptr_diff - Compute the offset from a to b in bytes
134 *
135 * @a: A starting pointer.
136 * @b: A pointer offset in the same address space.
137 *
138 * Returns: The offset in bytes between the two pointers.
139 */
140static size_t vmw_ptr_diff(void *a, void *b)
141{
142 return (unsigned long) b - (unsigned long) a;
143}
113 144
114/** 145/**
115 * vmw_resources_unreserve - unreserve resources previously reserved for 146 * vmw_resources_unreserve - unreserve resources previously reserved for
@@ -409,11 +440,14 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
409 * @list: Pointer to head of relocation list. 440 * @list: Pointer to head of relocation list.
410 * @res: The resource. 441 * @res: The resource.
411 * @offset: Offset into the command buffer currently being parsed where the 442 * @offset: Offset into the command buffer currently being parsed where the
412 * id that needs fixup is located. Granularity is 4 bytes. 443 * id that needs fixup is located. Granularity is one byte.
444 * @rel_type: Relocation type.
413 */ 445 */
414static int vmw_resource_relocation_add(struct list_head *list, 446static int vmw_resource_relocation_add(struct list_head *list,
415 const struct vmw_resource *res, 447 const struct vmw_resource *res,
416 unsigned long offset) 448 unsigned long offset,
449 enum vmw_resource_relocation_type
450 rel_type)
417{ 451{
418 struct vmw_resource_relocation *rel; 452 struct vmw_resource_relocation *rel;
419 453
@@ -425,6 +459,7 @@ static int vmw_resource_relocation_add(struct list_head *list,
425 459
426 rel->res = res; 460 rel->res = res;
427 rel->offset = offset; 461 rel->offset = offset;
462 rel->rel_type = rel_type;
428 list_add_tail(&rel->head, list); 463 list_add_tail(&rel->head, list);
429 464
430 return 0; 465 return 0;
@@ -459,11 +494,24 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
459{ 494{
460 struct vmw_resource_relocation *rel; 495 struct vmw_resource_relocation *rel;
461 496
497 /* Validate the struct vmw_resource_relocation member size */
498 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
499 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
500
462 list_for_each_entry(rel, list, head) { 501 list_for_each_entry(rel, list, head) {
463 if (likely(rel->res != NULL)) 502 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
464 cb[rel->offset] = rel->res->id; 503 switch (rel->rel_type) {
465 else 504 case vmw_res_rel_normal:
466 cb[rel->offset] = SVGA_3D_CMD_NOP; 505 *addr = rel->res->id;
506 break;
507 case vmw_res_rel_nop:
508 *addr = SVGA_3D_CMD_NOP;
509 break;
510 default:
511 if (rel->res->id == -1)
512 *addr = SVGA_3D_CMD_NOP;
513 break;
514 }
467 } 515 }
468} 516}
469 517
@@ -655,7 +703,9 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
655 *p_val = NULL; 703 *p_val = NULL;
656 ret = vmw_resource_relocation_add(&sw_context->res_relocations, 704 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
657 res, 705 res,
658 id_loc - sw_context->buf_start); 706 vmw_ptr_diff(sw_context->buf_start,
707 id_loc),
708 vmw_res_rel_normal);
659 if (unlikely(ret != 0)) 709 if (unlikely(ret != 0))
660 return ret; 710 return ret;
661 711
@@ -721,7 +771,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
721 771
722 return vmw_resource_relocation_add 772 return vmw_resource_relocation_add
723 (&sw_context->res_relocations, res, 773 (&sw_context->res_relocations, res,
724 id_loc - sw_context->buf_start); 774 vmw_ptr_diff(sw_context->buf_start, id_loc),
775 vmw_res_rel_normal);
725 } 776 }
726 777
727 ret = vmw_user_resource_lookup_handle(dev_priv, 778 ret = vmw_user_resource_lookup_handle(dev_priv,
@@ -2143,10 +2194,10 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2143 return ret; 2194 return ret;
2144 2195
2145 return vmw_resource_relocation_add(&sw_context->res_relocations, 2196 return vmw_resource_relocation_add(&sw_context->res_relocations,
2146 NULL, &cmd->header.id - 2197 NULL,
2147 sw_context->buf_start); 2198 vmw_ptr_diff(sw_context->buf_start,
2148 2199 &cmd->header.id),
2149 return 0; 2200 vmw_res_rel_nop);
2150} 2201}
2151 2202
2152/** 2203/**
@@ -2188,10 +2239,10 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2188 return ret; 2239 return ret;
2189 2240
2190 return vmw_resource_relocation_add(&sw_context->res_relocations, 2241 return vmw_resource_relocation_add(&sw_context->res_relocations,
2191 NULL, &cmd->header.id - 2242 NULL,
2192 sw_context->buf_start); 2243 vmw_ptr_diff(sw_context->buf_start,
2193 2244 &cmd->header.id),
2194 return 0; 2245 vmw_res_rel_nop);
2195} 2246}
2196 2247
2197/** 2248/**
@@ -2848,8 +2899,7 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2848 * @header: Pointer to the command header in the command stream. 2899 * @header: Pointer to the command header in the command stream.
2849 * 2900 *
2850 * Check that the view exists, and if it was not created using this 2901 * Check that the view exists, and if it was not created using this
2851 * command batch, make sure it's validated (present in the device) so that 2902 * command batch, conditionally make this command a NOP.
2852 * the remove command will not confuse the device.
2853 */ 2903 */
2854static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, 2904static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2855 struct vmw_sw_context *sw_context, 2905 struct vmw_sw_context *sw_context,
@@ -2877,10 +2927,16 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2877 return ret; 2927 return ret;
2878 2928
2879 /* 2929 /*
2880 * Add view to the validate list iff it was not created using this 2930 * If the view wasn't created during this command batch, it might
2881 * command batch. 2931 * have been removed due to a context swapout, so add a
2932 * relocation to conditionally make this command a NOP to avoid
2933 * device errors.
2882 */ 2934 */
2883 return vmw_view_res_val_add(sw_context, view); 2935 return vmw_resource_relocation_add(&sw_context->res_relocations,
2936 view,
2937 vmw_ptr_diff(sw_context->buf_start,
2938 &cmd->header.id),
2939 vmw_res_rel_cond_nop);
2884} 2940}
2885 2941
2886/** 2942/**
@@ -3029,6 +3085,35 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
3029 cmd->body.shaderResourceViewId); 3085 cmd->body.shaderResourceViewId);
3030} 3086}
3031 3087
3088/**
3089 * vmw_cmd_dx_transfer_from_buffer -
3090 * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
3091 *
3092 * @dev_priv: Pointer to a device private struct.
3093 * @sw_context: The software context being used for this batch.
3094 * @header: Pointer to the command header in the command stream.
3095 */
3096static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
3097 struct vmw_sw_context *sw_context,
3098 SVGA3dCmdHeader *header)
3099{
3100 struct {
3101 SVGA3dCmdHeader header;
3102 SVGA3dCmdDXTransferFromBuffer body;
3103 } *cmd = container_of(header, typeof(*cmd), header);
3104 int ret;
3105
3106 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3107 user_surface_converter,
3108 &cmd->body.srcSid, NULL);
3109 if (ret != 0)
3110 return ret;
3111
3112 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3113 user_surface_converter,
3114 &cmd->body.destSid, NULL);
3115}
3116
3032static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, 3117static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3033 struct vmw_sw_context *sw_context, 3118 struct vmw_sw_context *sw_context,
3034 void *buf, uint32_t *size) 3119 void *buf, uint32_t *size)
@@ -3379,6 +3464,9 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3379 &vmw_cmd_buffer_copy_check, true, false, true), 3464 &vmw_cmd_buffer_copy_check, true, false, true),
3380 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, 3465 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3381 &vmw_cmd_pred_copy_check, true, false, true), 3466 &vmw_cmd_pred_copy_check, true, false, true),
3467 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3468 &vmw_cmd_dx_transfer_from_buffer,
3469 true, false, true),
3382}; 3470};
3383 3471
3384static int vmw_cmd_check(struct vmw_private *dev_priv, 3472static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -3848,14 +3936,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3848 int ret; 3936 int ret;
3849 3937
3850 *header = NULL; 3938 *header = NULL;
3851 if (!dev_priv->cman || kernel_commands)
3852 return kernel_commands;
3853
3854 if (command_size > SVGA_CB_MAX_SIZE) { 3939 if (command_size > SVGA_CB_MAX_SIZE) {
3855 DRM_ERROR("Command buffer is too large.\n"); 3940 DRM_ERROR("Command buffer is too large.\n");
3856 return ERR_PTR(-EINVAL); 3941 return ERR_PTR(-EINVAL);
3857 } 3942 }
3858 3943
3944 if (!dev_priv->cman || kernel_commands)
3945 return kernel_commands;
3946
3859 /* If possible, add a little space for fencing. */ 3947 /* If possible, add a little space for fencing. */
3860 cmdbuf_size = command_size + 512; 3948 cmdbuf_size = command_size + 512;
3861 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE); 3949 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
@@ -4232,9 +4320,6 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4232 ttm_bo_unref(&query_val.bo); 4320 ttm_bo_unref(&query_val.bo);
4233 ttm_bo_unref(&pinned_val.bo); 4321 ttm_bo_unref(&pinned_val.bo);
4234 vmw_dmabuf_unreference(&dev_priv->pinned_bo); 4322 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4235 DRM_INFO("Dummy query bo pin count: %d\n",
4236 dev_priv->dummy_query_bo->pin_count);
4237
4238out_unlock: 4323out_unlock:
4239 return; 4324 return;
4240 4325
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6a328d507a28..52ca1c9d070e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -574,10 +574,8 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
574 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); 574 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
575 long lret; 575 long lret;
576 576
577 if (nonblock) 577 lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
578 return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY; 578 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
579
580 lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
581 if (!lret) 579 if (!lret)
582 return -EBUSY; 580 return -EBUSY;
583 else if (lret < 0) 581 else if (lret < 0)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index c2a721a8cef9..b445ce9b9757 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -324,7 +324,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
324 if (res->id != -1) { 324 if (res->id != -1) {
325 325
326 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); 326 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
327 if (unlikely(cmd == NULL)) { 327 if (unlikely(!cmd)) {
328 DRM_ERROR("Failed reserving FIFO space for surface " 328 DRM_ERROR("Failed reserving FIFO space for surface "
329 "destruction.\n"); 329 "destruction.\n");
330 return; 330 return;
@@ -397,7 +397,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
397 397
398 submit_size = vmw_surface_define_size(srf); 398 submit_size = vmw_surface_define_size(srf);
399 cmd = vmw_fifo_reserve(dev_priv, submit_size); 399 cmd = vmw_fifo_reserve(dev_priv, submit_size);
400 if (unlikely(cmd == NULL)) { 400 if (unlikely(!cmd)) {
401 DRM_ERROR("Failed reserving FIFO space for surface " 401 DRM_ERROR("Failed reserving FIFO space for surface "
402 "creation.\n"); 402 "creation.\n");
403 ret = -ENOMEM; 403 ret = -ENOMEM;
@@ -446,11 +446,10 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
446 uint8_t *cmd; 446 uint8_t *cmd;
447 struct vmw_private *dev_priv = res->dev_priv; 447 struct vmw_private *dev_priv = res->dev_priv;
448 448
449 BUG_ON(val_buf->bo == NULL); 449 BUG_ON(!val_buf->bo);
450
451 submit_size = vmw_surface_dma_size(srf); 450 submit_size = vmw_surface_dma_size(srf);
452 cmd = vmw_fifo_reserve(dev_priv, submit_size); 451 cmd = vmw_fifo_reserve(dev_priv, submit_size);
453 if (unlikely(cmd == NULL)) { 452 if (unlikely(!cmd)) {
454 DRM_ERROR("Failed reserving FIFO space for surface " 453 DRM_ERROR("Failed reserving FIFO space for surface "
455 "DMA.\n"); 454 "DMA.\n");
456 return -ENOMEM; 455 return -ENOMEM;
@@ -538,7 +537,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
538 537
539 submit_size = vmw_surface_destroy_size(); 538 submit_size = vmw_surface_destroy_size();
540 cmd = vmw_fifo_reserve(dev_priv, submit_size); 539 cmd = vmw_fifo_reserve(dev_priv, submit_size);
541 if (unlikely(cmd == NULL)) { 540 if (unlikely(!cmd)) {
542 DRM_ERROR("Failed reserving FIFO space for surface " 541 DRM_ERROR("Failed reserving FIFO space for surface "
543 "eviction.\n"); 542 "eviction.\n");
544 return -ENOMEM; 543 return -ENOMEM;
@@ -578,7 +577,7 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
578 int ret; 577 int ret;
579 struct vmw_resource *res = &srf->res; 578 struct vmw_resource *res = &srf->res;
580 579
581 BUG_ON(res_free == NULL); 580 BUG_ON(!res_free);
582 if (!dev_priv->has_mob) 581 if (!dev_priv->has_mob)
583 vmw_fifo_resource_inc(dev_priv); 582 vmw_fifo_resource_inc(dev_priv);
584 ret = vmw_resource_init(dev_priv, res, true, res_free, 583 ret = vmw_resource_init(dev_priv, res, true, res_free,
@@ -700,7 +699,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
700 struct drm_vmw_surface_create_req *req = &arg->req; 699 struct drm_vmw_surface_create_req *req = &arg->req;
701 struct drm_vmw_surface_arg *rep = &arg->rep; 700 struct drm_vmw_surface_arg *rep = &arg->rep;
702 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 701 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
703 struct drm_vmw_size __user *user_sizes;
704 int ret; 702 int ret;
705 int i, j; 703 int i, j;
706 uint32_t cur_bo_offset; 704 uint32_t cur_bo_offset;
@@ -748,7 +746,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
748 } 746 }
749 747
750 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); 748 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
751 if (unlikely(user_srf == NULL)) { 749 if (unlikely(!user_srf)) {
752 ret = -ENOMEM; 750 ret = -ENOMEM;
753 goto out_no_user_srf; 751 goto out_no_user_srf;
754 } 752 }
@@ -763,29 +761,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
763 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); 761 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
764 srf->num_sizes = num_sizes; 762 srf->num_sizes = num_sizes;
765 user_srf->size = size; 763 user_srf->size = size;
766 764 srf->sizes = memdup_user((struct drm_vmw_size __user *)(unsigned long)
767 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); 765 req->size_addr,
768 if (unlikely(srf->sizes == NULL)) { 766 sizeof(*srf->sizes) * srf->num_sizes);
769 ret = -ENOMEM; 767 if (IS_ERR(srf->sizes)) {
768 ret = PTR_ERR(srf->sizes);
770 goto out_no_sizes; 769 goto out_no_sizes;
771 } 770 }
772 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets), 771 srf->offsets = kmalloc_array(srf->num_sizes,
773 GFP_KERNEL); 772 sizeof(*srf->offsets),
774 if (unlikely(srf->offsets == NULL)) { 773 GFP_KERNEL);
774 if (unlikely(!srf->offsets)) {
775 ret = -ENOMEM; 775 ret = -ENOMEM;
776 goto out_no_offsets; 776 goto out_no_offsets;
777 } 777 }
778 778
779 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
780 req->size_addr;
781
782 ret = copy_from_user(srf->sizes, user_sizes,
783 srf->num_sizes * sizeof(*srf->sizes));
784 if (unlikely(ret != 0)) {
785 ret = -EFAULT;
786 goto out_no_copy;
787 }
788
789 srf->base_size = *srf->sizes; 779 srf->base_size = *srf->sizes;
790 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; 780 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
791 srf->multisample_count = 0; 781 srf->multisample_count = 0;
@@ -923,7 +913,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
923 913
924 ret = -EINVAL; 914 ret = -EINVAL;
925 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle); 915 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
926 if (unlikely(base == NULL)) { 916 if (unlikely(!base)) {
927 DRM_ERROR("Could not find surface to reference.\n"); 917 DRM_ERROR("Could not find surface to reference.\n");
928 goto out_no_lookup; 918 goto out_no_lookup;
929 } 919 }
@@ -1069,7 +1059,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
1069 1059
1070 cmd = vmw_fifo_reserve(dev_priv, submit_len); 1060 cmd = vmw_fifo_reserve(dev_priv, submit_len);
1071 cmd2 = (typeof(cmd2))cmd; 1061 cmd2 = (typeof(cmd2))cmd;
1072 if (unlikely(cmd == NULL)) { 1062 if (unlikely(!cmd)) {
1073 DRM_ERROR("Failed reserving FIFO space for surface " 1063 DRM_ERROR("Failed reserving FIFO space for surface "
1074 "creation.\n"); 1064 "creation.\n");
1075 ret = -ENOMEM; 1065 ret = -ENOMEM;
@@ -1135,7 +1125,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
1135 submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); 1125 submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
1136 1126
1137 cmd1 = vmw_fifo_reserve(dev_priv, submit_size); 1127 cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
1138 if (unlikely(cmd1 == NULL)) { 1128 if (unlikely(!cmd1)) {
1139 DRM_ERROR("Failed reserving FIFO space for surface " 1129 DRM_ERROR("Failed reserving FIFO space for surface "
1140 "binding.\n"); 1130 "binding.\n");
1141 return -ENOMEM; 1131 return -ENOMEM;
@@ -1185,7 +1175,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
1185 1175
1186 submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); 1176 submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
1187 cmd = vmw_fifo_reserve(dev_priv, submit_size); 1177 cmd = vmw_fifo_reserve(dev_priv, submit_size);
1188 if (unlikely(cmd == NULL)) { 1178 if (unlikely(!cmd)) {
1189 DRM_ERROR("Failed reserving FIFO space for surface " 1179 DRM_ERROR("Failed reserving FIFO space for surface "
1190 "unbinding.\n"); 1180 "unbinding.\n");
1191 return -ENOMEM; 1181 return -ENOMEM;
@@ -1244,7 +1234,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
1244 vmw_binding_res_list_scrub(&res->binding_head); 1234 vmw_binding_res_list_scrub(&res->binding_head);
1245 1235
1246 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 1236 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
1247 if (unlikely(cmd == NULL)) { 1237 if (unlikely(!cmd)) {
1248 DRM_ERROR("Failed reserving FIFO space for surface " 1238 DRM_ERROR("Failed reserving FIFO space for surface "
1249 "destruction.\n"); 1239 "destruction.\n");
1250 mutex_unlock(&dev_priv->binding_mutex); 1240 mutex_unlock(&dev_priv->binding_mutex);
@@ -1410,7 +1400,7 @@ int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1410 1400
1411 user_srf = container_of(base, struct vmw_user_surface, prime.base); 1401 user_srf = container_of(base, struct vmw_user_surface, prime.base);
1412 srf = &user_srf->srf; 1402 srf = &user_srf->srf;
1413 if (srf->res.backup == NULL) { 1403 if (!srf->res.backup) {
1414 DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); 1404 DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1415 goto out_bad_resource; 1405 goto out_bad_resource;
1416 } 1406 }
@@ -1524,7 +1514,7 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
1524 } 1514 }
1525 1515
1526 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); 1516 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1527 if (unlikely(user_srf == NULL)) { 1517 if (unlikely(!user_srf)) {
1528 ret = -ENOMEM; 1518 ret = -ENOMEM;
1529 goto out_no_user_srf; 1519 goto out_no_user_srf;
1530 } 1520 }