aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2018-09-26 09:34:50 -0400
committerThomas Hellstrom <thellstrom@vmware.com>2018-09-27 09:21:36 -0400
commit2724b2d54cdad7de12e53e7ff2666822bafeae2a (patch)
treed65388f9eab9731e94207038726426a95d2d9e09
parent9c079b8ce8bf8e0394149eb39c78b04285644bcc (diff)
drm/vmwgfx: Use new validation interface for the modesetting code v2
Strip the old KMS helpers and use the new validation interface also in the modesetting code. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Deepak Rawat <drawat@vmware.com> #v1 Reviewed-by: Sinclair Yeh <syeh@vmware.com>
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c199
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c48
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c31
4 files changed, 86 insertions, 216 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ab424358b8cb..05fb16733c5c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2557,88 +2557,31 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2557} 2557}
2558 2558
2559/** 2559/**
2560 * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before 2560 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2561 * command submission. 2561 * cleanup and fencing
2562 * 2562 * @dev_priv: Pointer to the device-private struct
2563 * @dev_priv. Pointer to a device private structure. 2563 * @file_priv: Pointer identifying the client when user-space fencing is used
2564 * @buf: The buffer object 2564 * @ctx: Pointer to the validation context
2565 * @interruptible: Whether to perform waits as interruptible. 2565 * @out_fence: If non-NULL, returned refcounted fence-pointer
2566 * @validate_as_mob: Whether the buffer should be validated as a MOB. If false, 2566 * @user_fence_rep: If non-NULL, pointer to user-space address area
2567 * The buffer will be validated as a GMR. Already pinned buffers will not be 2567 * in which to copy user-space fence info
2568 * validated.
2569 *
2570 * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
2571 * interrupted by a signal.
2572 */ 2568 */
2573int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, 2569void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2574 struct vmw_buffer_object *buf, 2570 struct drm_file *file_priv,
2575 bool interruptible, 2571 struct vmw_validation_context *ctx,
2576 bool validate_as_mob, 2572 struct vmw_fence_obj **out_fence,
2577 bool for_cpu_blit) 2573 struct drm_vmw_fence_rep __user *
2578{ 2574 user_fence_rep)
2579 struct ttm_operation_ctx ctx = { 2575{
2580 .interruptible = interruptible, 2576 struct vmw_fence_obj *fence = NULL;
2581 .no_wait_gpu = false};
2582 struct ttm_buffer_object *bo = &buf->base;
2583 int ret;
2584
2585 ttm_bo_reserve(bo, false, false, NULL);
2586 if (for_cpu_blit)
2587 ret = ttm_bo_validate(bo, &vmw_nonfixed_placement, &ctx);
2588 else
2589 ret = vmw_validation_bo_validate_single(bo, interruptible,
2590 validate_as_mob);
2591 if (ret)
2592 ttm_bo_unreserve(bo);
2593
2594 return ret;
2595}
2596
2597/**
2598 * vmw_kms_helper_buffer_revert - Undo the actions of
2599 * vmw_kms_helper_buffer_prepare.
2600 *
2601 * @res: Pointer to the buffer object.
2602 *
2603 * Helper to be used if an error forces the caller to undo the actions of
2604 * vmw_kms_helper_buffer_prepare.
2605 */
2606void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf)
2607{
2608 if (buf)
2609 ttm_bo_unreserve(&buf->base);
2610}
2611
2612/**
2613 * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
2614 * kms command submission.
2615 *
2616 * @dev_priv: Pointer to a device private structure.
2617 * @file_priv: Pointer to a struct drm_file representing the caller's
2618 * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
2619 * if non-NULL, @user_fence_rep must be non-NULL.
2620 * @buf: The buffer object.
2621 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
2622 * ref-counted fence pointer is returned here.
2623 * @user_fence_rep: Optional pointer to a user-space provided struct
2624 * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
2625 * function copies fence data to user-space in a fail-safe manner.
2626 */
2627void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
2628 struct drm_file *file_priv,
2629 struct vmw_buffer_object *buf,
2630 struct vmw_fence_obj **out_fence,
2631 struct drm_vmw_fence_rep __user *
2632 user_fence_rep)
2633{
2634 struct vmw_fence_obj *fence;
2635 uint32_t handle; 2577 uint32_t handle;
2636 int ret; 2578 int ret;
2637 2579
2638 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence, 2580 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2639 file_priv ? &handle : NULL); 2581 out_fence)
2640 if (buf) 2582 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2641 vmw_bo_fence_single(&buf->base, fence); 2583 file_priv ? &handle : NULL);
2584 vmw_validation_done(ctx, fence);
2642 if (file_priv) 2585 if (file_priv)
2643 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), 2586 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2644 ret, user_fence_rep, fence, 2587 ret, user_fence_rep, fence,
@@ -2647,106 +2590,6 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
2647 *out_fence = fence; 2590 *out_fence = fence;
2648 else 2591 else
2649 vmw_fence_obj_unreference(&fence); 2592 vmw_fence_obj_unreference(&fence);
2650
2651 vmw_kms_helper_buffer_revert(buf);
2652}
2653
2654
2655/**
2656 * vmw_kms_helper_resource_revert - Undo the actions of
2657 * vmw_kms_helper_resource_prepare.
2658 *
2659 * @res: Pointer to the resource. Typically a surface.
2660 *
2661 * Helper to be used if an error forces the caller to undo the actions of
2662 * vmw_kms_helper_resource_prepare.
2663 */
2664void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
2665{
2666 struct vmw_resource *res = ctx->res;
2667
2668 vmw_kms_helper_buffer_revert(ctx->buf);
2669 vmw_bo_unreference(&ctx->buf);
2670 vmw_resource_unreserve(res, false, NULL, 0);
2671 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2672}
2673
2674/**
2675 * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
2676 * command submission.
2677 *
2678 * @res: Pointer to the resource. Typically a surface.
2679 * @interruptible: Whether to perform waits as interruptible.
2680 *
2681 * Reserves and validates also the backup buffer if a guest-backed resource.
2682 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
2683 * interrupted by a signal.
2684 */
2685int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
2686 bool interruptible,
2687 struct vmw_validation_ctx *ctx)
2688{
2689 int ret = 0;
2690
2691 ctx->buf = NULL;
2692 ctx->res = res;
2693
2694 if (interruptible)
2695 ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
2696 else
2697 mutex_lock(&res->dev_priv->cmdbuf_mutex);
2698
2699 if (unlikely(ret != 0))
2700 return -ERESTARTSYS;
2701
2702 ret = vmw_resource_reserve(res, interruptible, false);
2703 if (ret)
2704 goto out_unlock;
2705
2706 if (res->backup) {
2707 ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
2708 interruptible,
2709 res->dev_priv->has_mob,
2710 false);
2711 if (ret)
2712 goto out_unreserve;
2713
2714 ctx->buf = vmw_bo_reference(res->backup);
2715 }
2716 ret = vmw_resource_validate(res, interruptible);
2717 if (ret)
2718 goto out_revert;
2719 return 0;
2720
2721out_revert:
2722 vmw_kms_helper_buffer_revert(ctx->buf);
2723out_unreserve:
2724 vmw_resource_unreserve(res, false, NULL, 0);
2725out_unlock:
2726 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2727 return ret;
2728}
2729
2730/**
2731 * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
2732 * kms command submission.
2733 *
2734 * @res: Pointer to the resource. Typically a surface.
2735 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
2736 * ref-counted fence pointer is returned here.
2737 */
2738void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
2739 struct vmw_fence_obj **out_fence)
2740{
2741 struct vmw_resource *res = ctx->res;
2742
2743 if (ctx->buf || out_fence)
2744 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
2745 out_fence, NULL);
2746
2747 vmw_bo_unreference(&ctx->buf);
2748 vmw_resource_unreserve(res, false, NULL, 0);
2749 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2750} 2593}
2751 2594
2752/** 2595/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 31311298ec0b..76ec570c0684 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -308,24 +308,12 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
308 int increment, 308 int increment,
309 struct vmw_kms_dirty *dirty); 309 struct vmw_kms_dirty *dirty);
310 310
311int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, 311void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
312 struct vmw_buffer_object *buf, 312 struct drm_file *file_priv,
313 bool interruptible, 313 struct vmw_validation_context *ctx,
314 bool validate_as_mob, 314 struct vmw_fence_obj **out_fence,
315 bool for_cpu_blit); 315 struct drm_vmw_fence_rep __user *
316void vmw_kms_helper_buffer_revert(struct vmw_buffer_object *buf); 316 user_fence_rep);
317void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
318 struct drm_file *file_priv,
319 struct vmw_buffer_object *buf,
320 struct vmw_fence_obj **out_fence,
321 struct drm_vmw_fence_rep __user *
322 user_fence_rep);
323int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
324 bool interruptible,
325 struct vmw_validation_ctx *ctx);
326void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
327void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
328 struct vmw_fence_obj **out_fence);
329int vmw_kms_readback(struct vmw_private *dev_priv, 317int vmw_kms_readback(struct vmw_private *dev_priv,
330 struct drm_file *file_priv, 318 struct drm_file *file_priv,
331 struct vmw_framebuffer *vfb, 319 struct vmw_framebuffer *vfb,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index ad0de7f0cd60..53316b1bda3d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -946,16 +946,20 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
946 struct vmw_framebuffer_surface *vfbs = 946 struct vmw_framebuffer_surface *vfbs =
947 container_of(framebuffer, typeof(*vfbs), base); 947 container_of(framebuffer, typeof(*vfbs), base);
948 struct vmw_kms_sou_surface_dirty sdirty; 948 struct vmw_kms_sou_surface_dirty sdirty;
949 struct vmw_validation_ctx ctx; 949 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
950 int ret; 950 int ret;
951 951
952 if (!srf) 952 if (!srf)
953 srf = &vfbs->surface->res; 953 srf = &vfbs->surface->res;
954 954
955 ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); 955 ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
956 if (ret) 956 if (ret)
957 return ret; 957 return ret;
958 958
959 ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
960 if (ret)
961 goto out_unref;
962
959 sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit; 963 sdirty.base.fifo_commit = vmw_sou_surface_fifo_commit;
960 sdirty.base.clip = vmw_sou_surface_clip; 964 sdirty.base.clip = vmw_sou_surface_clip;
961 sdirty.base.dev_priv = dev_priv; 965 sdirty.base.dev_priv = dev_priv;
@@ -972,9 +976,14 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
972 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, 976 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
973 dest_x, dest_y, num_clips, inc, 977 dest_x, dest_y, num_clips, inc,
974 &sdirty.base); 978 &sdirty.base);
975 vmw_kms_helper_resource_finish(&ctx, out_fence); 979 vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
980 NULL);
976 981
977 return ret; 982 return ret;
983
984out_unref:
985 vmw_validation_unref_lists(&val_ctx);
986 return ret;
978} 987}
979 988
980/** 989/**
@@ -1051,13 +1060,17 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
1051 container_of(framebuffer, struct vmw_framebuffer_bo, 1060 container_of(framebuffer, struct vmw_framebuffer_bo,
1052 base)->buffer; 1061 base)->buffer;
1053 struct vmw_kms_dirty dirty; 1062 struct vmw_kms_dirty dirty;
1063 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
1054 int ret; 1064 int ret;
1055 1065
1056 ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible, 1066 ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
1057 false, false);
1058 if (ret) 1067 if (ret)
1059 return ret; 1068 return ret;
1060 1069
1070 ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
1071 if (ret)
1072 goto out_unref;
1073
1061 ret = do_bo_define_gmrfb(dev_priv, framebuffer); 1074 ret = do_bo_define_gmrfb(dev_priv, framebuffer);
1062 if (unlikely(ret != 0)) 1075 if (unlikely(ret != 0))
1063 goto out_revert; 1076 goto out_revert;
@@ -1069,12 +1082,15 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
1069 num_clips; 1082 num_clips;
1070 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, 1083 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
1071 0, 0, num_clips, increment, &dirty); 1084 0, 0, num_clips, increment, &dirty);
1072 vmw_kms_helper_buffer_finish(dev_priv, NULL, buf, out_fence, NULL); 1085 vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
1086 NULL);
1073 1087
1074 return ret; 1088 return ret;
1075 1089
1076out_revert: 1090out_revert:
1077 vmw_kms_helper_buffer_revert(buf); 1091 vmw_validation_revert(&val_ctx);
1092out_unref:
1093 vmw_validation_unref_lists(&val_ctx);
1078 1094
1079 return ret; 1095 return ret;
1080} 1096}
@@ -1150,13 +1166,17 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
1150 struct vmw_buffer_object *buf = 1166 struct vmw_buffer_object *buf =
1151 container_of(vfb, struct vmw_framebuffer_bo, base)->buffer; 1167 container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
1152 struct vmw_kms_dirty dirty; 1168 struct vmw_kms_dirty dirty;
1169 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
1153 int ret; 1170 int ret;
1154 1171
1155 ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false, 1172 ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
1156 false);
1157 if (ret) 1173 if (ret)
1158 return ret; 1174 return ret;
1159 1175
1176 ret = vmw_validation_prepare(&val_ctx, NULL, true);
1177 if (ret)
1178 goto out_unref;
1179
1160 ret = do_bo_define_gmrfb(dev_priv, vfb); 1180 ret = do_bo_define_gmrfb(dev_priv, vfb);
1161 if (unlikely(ret != 0)) 1181 if (unlikely(ret != 0))
1162 goto out_revert; 1182 goto out_revert;
@@ -1168,13 +1188,15 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
1168 num_clips; 1188 num_clips;
1169 ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips, 1189 ret = vmw_kms_helper_dirty(dev_priv, vfb, NULL, vclips,
1170 0, 0, num_clips, 1, &dirty); 1190 0, 0, num_clips, 1, &dirty);
1171 vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL, 1191 vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
1172 user_fence_rep); 1192 user_fence_rep);
1173 1193
1174 return ret; 1194 return ret;
1175 1195
1176out_revert: 1196out_revert:
1177 vmw_kms_helper_buffer_revert(buf); 1197 vmw_validation_revert(&val_ctx);
1178 1198out_unref:
1199 vmw_validation_unref_lists(&val_ctx);
1200
1179 return ret; 1201 return ret;
1180} 1202}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 93f6b96ca7bb..d3a9eba12b0e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -759,17 +759,21 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
759 struct vmw_stdu_dirty ddirty; 759 struct vmw_stdu_dirty ddirty;
760 int ret; 760 int ret;
761 bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D); 761 bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
762 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
762 763
763 /* 764 /*
764 * VMs without 3D support don't have the surface DMA command and 765 * VMs without 3D support don't have the surface DMA command and
765 * we'll be using a CPU blit, and the framebuffer should be moved out 766 * we'll be using a CPU blit, and the framebuffer should be moved out
766 * of VRAM. 767 * of VRAM.
767 */ 768 */
768 ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible, 769 ret = vmw_validation_add_bo(&val_ctx, buf, false, cpu_blit);
769 false, cpu_blit);
770 if (ret) 770 if (ret)
771 return ret; 771 return ret;
772 772
773 ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
774 if (ret)
775 goto out_unref;
776
773 ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM : 777 ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
774 SVGA3D_READ_HOST_VRAM; 778 SVGA3D_READ_HOST_VRAM;
775 ddirty.left = ddirty.top = S32_MAX; 779 ddirty.left = ddirty.top = S32_MAX;
@@ -796,9 +800,13 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
796 800
797 ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips, 801 ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
798 0, 0, num_clips, increment, &ddirty.base); 802 0, 0, num_clips, increment, &ddirty.base);
799 vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
800 user_fence_rep);
801 803
804 vmw_kms_helper_validation_finish(dev_priv, file_priv, &val_ctx, NULL,
805 user_fence_rep);
806 return ret;
807
808out_unref:
809 vmw_validation_unref_lists(&val_ctx);
802 return ret; 810 return ret;
803} 811}
804 812
@@ -924,16 +932,20 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
924 struct vmw_framebuffer_surface *vfbs = 932 struct vmw_framebuffer_surface *vfbs =
925 container_of(framebuffer, typeof(*vfbs), base); 933 container_of(framebuffer, typeof(*vfbs), base);
926 struct vmw_stdu_dirty sdirty; 934 struct vmw_stdu_dirty sdirty;
927 struct vmw_validation_ctx ctx; 935 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
928 int ret; 936 int ret;
929 937
930 if (!srf) 938 if (!srf)
931 srf = &vfbs->surface->res; 939 srf = &vfbs->surface->res;
932 940
933 ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); 941 ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
934 if (ret) 942 if (ret)
935 return ret; 943 return ret;
936 944
945 ret = vmw_validation_prepare(&val_ctx, &dev_priv->cmdbuf_mutex, true);
946 if (ret)
947 goto out_unref;
948
937 if (vfbs->is_bo_proxy) { 949 if (vfbs->is_bo_proxy) {
938 ret = vmw_kms_update_proxy(srf, clips, num_clips, inc); 950 ret = vmw_kms_update_proxy(srf, clips, num_clips, inc);
939 if (ret) 951 if (ret)
@@ -954,8 +966,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
954 dest_x, dest_y, num_clips, inc, 966 dest_x, dest_y, num_clips, inc,
955 &sdirty.base); 967 &sdirty.base);
956out_finish: 968out_finish:
957 vmw_kms_helper_resource_finish(&ctx, out_fence); 969 vmw_kms_helper_validation_finish(dev_priv, NULL, &val_ctx, out_fence,
970 NULL);
971
972 return ret;
958 973
974out_unref:
975 vmw_validation_unref_lists(&val_ctx);
959 return ret; 976 return ret;
960} 977}
961 978