diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | 163 |
1 files changed, 100 insertions, 63 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 63a4cd794b73..419185f60278 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | |||
@@ -316,69 +316,21 @@ static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc, | |||
316 | struct drm_modeset_acquire_ctx *ctx) | 316 | struct drm_modeset_acquire_ctx *ctx) |
317 | { | 317 | { |
318 | struct vmw_private *dev_priv = vmw_priv(crtc->dev); | 318 | struct vmw_private *dev_priv = vmw_priv(crtc->dev); |
319 | struct drm_framebuffer *old_fb = crtc->primary->fb; | ||
320 | struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb); | ||
321 | struct vmw_fence_obj *fence = NULL; | ||
322 | struct drm_vmw_rect vclips; | ||
323 | int ret; | 319 | int ret; |
324 | 320 | ||
325 | if (!vmw_kms_crtc_flippable(dev_priv, crtc)) | 321 | if (!vmw_kms_crtc_flippable(dev_priv, crtc)) |
326 | return -EINVAL; | 322 | return -EINVAL; |
327 | 323 | ||
328 | flags &= ~DRM_MODE_PAGE_FLIP_ASYNC; | 324 | ret = drm_atomic_helper_page_flip(crtc, new_fb, event, flags, ctx); |
329 | ret = drm_atomic_helper_page_flip(crtc, new_fb, NULL, flags, ctx); | ||
330 | if (ret) { | 325 | if (ret) { |
331 | DRM_ERROR("Page flip error %d.\n", ret); | 326 | DRM_ERROR("Page flip error %d.\n", ret); |
332 | return ret; | 327 | return ret; |
333 | } | 328 | } |
334 | 329 | ||
335 | /* do a full screen dirty update */ | ||
336 | vclips.x = crtc->x; | ||
337 | vclips.y = crtc->y; | ||
338 | vclips.w = crtc->mode.hdisplay; | ||
339 | vclips.h = crtc->mode.vdisplay; | ||
340 | |||
341 | if (vfb->dmabuf) | ||
342 | ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb, | ||
343 | NULL, &vclips, 1, 1, | ||
344 | true, &fence); | ||
345 | else | ||
346 | ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, | ||
347 | NULL, &vclips, NULL, | ||
348 | 0, 0, 1, 1, &fence); | ||
349 | |||
350 | |||
351 | if (ret != 0) | ||
352 | goto out_no_fence; | ||
353 | if (!fence) { | ||
354 | ret = -EINVAL; | ||
355 | goto out_no_fence; | ||
356 | } | ||
357 | |||
358 | if (event) { | ||
359 | struct drm_file *file_priv = event->base.file_priv; | ||
360 | |||
361 | ret = vmw_event_fence_action_queue(file_priv, fence, | ||
362 | &event->base, | ||
363 | &event->event.vbl.tv_sec, | ||
364 | &event->event.vbl.tv_usec, | ||
365 | true); | ||
366 | } | ||
367 | |||
368 | /* | ||
369 | * No need to hold on to this now. The only cleanup | ||
370 | * we need to do if we fail is unref the fence. | ||
371 | */ | ||
372 | vmw_fence_obj_unreference(&fence); | ||
373 | |||
374 | if (vmw_crtc_to_du(crtc)->is_implicit) | 330 | if (vmw_crtc_to_du(crtc)->is_implicit) |
375 | vmw_kms_update_implicit_fb(dev_priv, crtc); | 331 | vmw_kms_update_implicit_fb(dev_priv, crtc); |
376 | 332 | ||
377 | return ret; | 333 | return ret; |
378 | |||
379 | out_no_fence: | ||
380 | drm_atomic_set_fb_for_plane(crtc->primary->state, old_fb); | ||
381 | return ret; | ||
382 | } | 334 | } |
383 | 335 | ||
384 | static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { | 336 | static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { |
@@ -453,7 +405,11 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane, | |||
453 | struct drm_plane_state *old_state) | 405 | struct drm_plane_state *old_state) |
454 | { | 406 | { |
455 | struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); | 407 | struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); |
408 | struct drm_crtc *crtc = plane->state->crtc ? | ||
409 | plane->state->crtc : old_state->crtc; | ||
456 | 410 | ||
411 | if (vps->dmabuf) | ||
412 | vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false); | ||
457 | vmw_dmabuf_unreference(&vps->dmabuf); | 413 | vmw_dmabuf_unreference(&vps->dmabuf); |
458 | vps->dmabuf_size = 0; | 414 | vps->dmabuf_size = 0; |
459 | 415 | ||
@@ -491,10 +447,17 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, | |||
491 | } | 447 | } |
492 | 448 | ||
493 | size = new_state->crtc_w * new_state->crtc_h * 4; | 449 | size = new_state->crtc_w * new_state->crtc_h * 4; |
450 | dev_priv = vmw_priv(crtc->dev); | ||
494 | 451 | ||
495 | if (vps->dmabuf) { | 452 | if (vps->dmabuf) { |
496 | if (vps->dmabuf_size == size) | 453 | if (vps->dmabuf_size == size) { |
497 | return 0; | 454 | /* |
455 | * Note that this might temporarily up the pin-count | ||
456 | * to 2, until cleanup_fb() is called. | ||
457 | */ | ||
458 | return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, | ||
459 | true); | ||
460 | } | ||
498 | 461 | ||
499 | vmw_dmabuf_unreference(&vps->dmabuf); | 462 | vmw_dmabuf_unreference(&vps->dmabuf); |
500 | vps->dmabuf_size = 0; | 463 | vps->dmabuf_size = 0; |
@@ -504,7 +467,6 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, | |||
504 | if (!vps->dmabuf) | 467 | if (!vps->dmabuf) |
505 | return -ENOMEM; | 468 | return -ENOMEM; |
506 | 469 | ||
507 | dev_priv = vmw_priv(crtc->dev); | ||
508 | vmw_svga_enable(dev_priv); | 470 | vmw_svga_enable(dev_priv); |
509 | 471 | ||
510 | /* After we have alloced the backing store might not be able to | 472 | /* After we have alloced the backing store might not be able to |
@@ -515,13 +477,16 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, | |||
515 | &vmw_vram_ne_placement, | 477 | &vmw_vram_ne_placement, |
516 | false, &vmw_dmabuf_bo_free); | 478 | false, &vmw_dmabuf_bo_free); |
517 | vmw_overlay_resume_all(dev_priv); | 479 | vmw_overlay_resume_all(dev_priv); |
518 | 480 | if (ret) { | |
519 | if (ret != 0) | ||
520 | vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */ | 481 | vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */ |
521 | else | 482 | return ret; |
522 | vps->dmabuf_size = size; | 483 | } |
523 | 484 | ||
524 | return ret; | 485 | /* |
486 | * TTM already thinks the buffer is pinned, but make sure the | ||
487 | * pin_count is upped. | ||
488 | */ | ||
489 | return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true); | ||
525 | } | 490 | } |
526 | 491 | ||
527 | 492 | ||
@@ -530,9 +495,71 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane, | |||
530 | struct drm_plane_state *old_state) | 495 | struct drm_plane_state *old_state) |
531 | { | 496 | { |
532 | struct drm_crtc *crtc = plane->state->crtc; | 497 | struct drm_crtc *crtc = plane->state->crtc; |
498 | struct drm_pending_vblank_event *event = NULL; | ||
499 | struct vmw_fence_obj *fence = NULL; | ||
500 | int ret; | ||
501 | |||
502 | if (crtc && plane->state->fb) { | ||
503 | struct vmw_private *dev_priv = vmw_priv(crtc->dev); | ||
504 | struct vmw_framebuffer *vfb = | ||
505 | vmw_framebuffer_to_vfb(plane->state->fb); | ||
506 | struct drm_vmw_rect vclips; | ||
507 | |||
508 | vclips.x = crtc->x; | ||
509 | vclips.y = crtc->y; | ||
510 | vclips.w = crtc->mode.hdisplay; | ||
511 | vclips.h = crtc->mode.vdisplay; | ||
512 | |||
513 | if (vfb->dmabuf) | ||
514 | ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb, NULL, | ||
515 | &vclips, 1, 1, true, | ||
516 | &fence, crtc); | ||
517 | else | ||
518 | ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, | ||
519 | &vclips, NULL, 0, 0, | ||
520 | 1, 1, &fence, crtc); | ||
521 | |||
522 | /* | ||
523 | * We cannot really fail this function, so if we do, then output | ||
524 | * an error and maintain consistent atomic state. | ||
525 | */ | ||
526 | if (ret != 0) | ||
527 | DRM_ERROR("Failed to update screen.\n"); | ||
533 | 528 | ||
534 | if (crtc) | ||
535 | crtc->primary->fb = plane->state->fb; | 529 | crtc->primary->fb = plane->state->fb; |
530 | } else { | ||
531 | /* | ||
532 | * When disabling a plane, CRTC and FB should always be NULL | ||
533 | * together, otherwise it's an error. | ||
534 | * Here primary plane is being disable so should really blank | ||
535 | * the screen object display unit, if not already done. | ||
536 | */ | ||
537 | return; | ||
538 | } | ||
539 | |||
540 | event = crtc->state->event; | ||
541 | /* | ||
542 | * In case of failure and other cases, vblank event will be sent in | ||
543 | * vmw_du_crtc_atomic_flush. | ||
544 | */ | ||
545 | if (event && fence) { | ||
546 | struct drm_file *file_priv = event->base.file_priv; | ||
547 | |||
548 | ret = vmw_event_fence_action_queue(file_priv, | ||
549 | fence, | ||
550 | &event->base, | ||
551 | &event->event.vbl.tv_sec, | ||
552 | &event->event.vbl.tv_usec, | ||
553 | true); | ||
554 | |||
555 | if (unlikely(ret != 0)) | ||
556 | DRM_ERROR("Failed to queue event on fence.\n"); | ||
557 | else | ||
558 | crtc->state->event = NULL; | ||
559 | } | ||
560 | |||
561 | if (fence) | ||
562 | vmw_fence_obj_unreference(&fence); | ||
536 | } | 563 | } |
537 | 564 | ||
538 | 565 | ||
@@ -892,6 +919,7 @@ static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty) | |||
892 | * @out_fence: If non-NULL, will return a ref-counted pointer to a | 919 | * @out_fence: If non-NULL, will return a ref-counted pointer to a |
893 | * struct vmw_fence_obj. The returned fence pointer may be NULL in which | 920 | * struct vmw_fence_obj. The returned fence pointer may be NULL in which |
894 | * case the device has already synchronized. | 921 | * case the device has already synchronized. |
922 | * @crtc: If crtc is passed, perform surface dirty on that crtc only. | ||
895 | * | 923 | * |
896 | * Returns 0 on success, negative error code on failure. -ERESTARTSYS if | 924 | * Returns 0 on success, negative error code on failure. -ERESTARTSYS if |
897 | * interrupted. | 925 | * interrupted. |
@@ -904,7 +932,8 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, | |||
904 | s32 dest_x, | 932 | s32 dest_x, |
905 | s32 dest_y, | 933 | s32 dest_y, |
906 | unsigned num_clips, int inc, | 934 | unsigned num_clips, int inc, |
907 | struct vmw_fence_obj **out_fence) | 935 | struct vmw_fence_obj **out_fence, |
936 | struct drm_crtc *crtc) | ||
908 | { | 937 | { |
909 | struct vmw_framebuffer_surface *vfbs = | 938 | struct vmw_framebuffer_surface *vfbs = |
910 | container_of(framebuffer, typeof(*vfbs), base); | 939 | container_of(framebuffer, typeof(*vfbs), base); |
@@ -923,6 +952,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, | |||
923 | sdirty.base.dev_priv = dev_priv; | 952 | sdirty.base.dev_priv = dev_priv; |
924 | sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) + | 953 | sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) + |
925 | sizeof(SVGASignedRect) * num_clips; | 954 | sizeof(SVGASignedRect) * num_clips; |
955 | sdirty.base.crtc = crtc; | ||
926 | 956 | ||
927 | sdirty.sid = srf->id; | 957 | sdirty.sid = srf->id; |
928 | sdirty.left = sdirty.top = S32_MAX; | 958 | sdirty.left = sdirty.top = S32_MAX; |
@@ -994,6 +1024,7 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty) | |||
994 | * @out_fence: If non-NULL, will return a ref-counted pointer to a | 1024 | * @out_fence: If non-NULL, will return a ref-counted pointer to a |
995 | * struct vmw_fence_obj. The returned fence pointer may be NULL in which | 1025 | * struct vmw_fence_obj. The returned fence pointer may be NULL in which |
996 | * case the device has already synchronized. | 1026 | * case the device has already synchronized. |
1027 | * @crtc: If crtc is passed, perform dmabuf dirty on that crtc only. | ||
997 | * | 1028 | * |
998 | * Returns 0 on success, negative error code on failure. -ERESTARTSYS if | 1029 | * Returns 0 on success, negative error code on failure. -ERESTARTSYS if |
999 | * interrupted. | 1030 | * interrupted. |
@@ -1004,7 +1035,8 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, | |||
1004 | struct drm_vmw_rect *vclips, | 1035 | struct drm_vmw_rect *vclips, |
1005 | unsigned num_clips, int increment, | 1036 | unsigned num_clips, int increment, |
1006 | bool interruptible, | 1037 | bool interruptible, |
1007 | struct vmw_fence_obj **out_fence) | 1038 | struct vmw_fence_obj **out_fence, |
1039 | struct drm_crtc *crtc) | ||
1008 | { | 1040 | { |
1009 | struct vmw_dma_buffer *buf = | 1041 | struct vmw_dma_buffer *buf = |
1010 | container_of(framebuffer, struct vmw_framebuffer_dmabuf, | 1042 | container_of(framebuffer, struct vmw_framebuffer_dmabuf, |
@@ -1013,7 +1045,7 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, | |||
1013 | int ret; | 1045 | int ret; |
1014 | 1046 | ||
1015 | ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible, | 1047 | ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible, |
1016 | false); | 1048 | false, false); |
1017 | if (ret) | 1049 | if (ret) |
1018 | return ret; | 1050 | return ret; |
1019 | 1051 | ||
@@ -1021,6 +1053,7 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv, | |||
1021 | if (unlikely(ret != 0)) | 1053 | if (unlikely(ret != 0)) |
1022 | goto out_revert; | 1054 | goto out_revert; |
1023 | 1055 | ||
1056 | dirty.crtc = crtc; | ||
1024 | dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit; | 1057 | dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit; |
1025 | dirty.clip = vmw_sou_dmabuf_clip; | 1058 | dirty.clip = vmw_sou_dmabuf_clip; |
1026 | dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) * | 1059 | dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) * |
@@ -1092,6 +1125,7 @@ static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty) | |||
1092 | * Must be set to non-NULL if @file_priv is non-NULL. | 1125 | * Must be set to non-NULL if @file_priv is non-NULL. |
1093 | * @vclips: Array of clip rects. | 1126 | * @vclips: Array of clip rects. |
1094 | * @num_clips: Number of clip rects in @vclips. | 1127 | * @num_clips: Number of clip rects in @vclips. |
1128 | * @crtc: If crtc is passed, readback on that crtc only. | ||
1095 | * | 1129 | * |
1096 | * Returns 0 on success, negative error code on failure. -ERESTARTSYS if | 1130 | * Returns 0 on success, negative error code on failure. -ERESTARTSYS if |
1097 | * interrupted. | 1131 | * interrupted. |
@@ -1101,14 +1135,16 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv, | |||
1101 | struct vmw_framebuffer *vfb, | 1135 | struct vmw_framebuffer *vfb, |
1102 | struct drm_vmw_fence_rep __user *user_fence_rep, | 1136 | struct drm_vmw_fence_rep __user *user_fence_rep, |
1103 | struct drm_vmw_rect *vclips, | 1137 | struct drm_vmw_rect *vclips, |
1104 | uint32_t num_clips) | 1138 | uint32_t num_clips, |
1139 | struct drm_crtc *crtc) | ||
1105 | { | 1140 | { |
1106 | struct vmw_dma_buffer *buf = | 1141 | struct vmw_dma_buffer *buf = |
1107 | container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer; | 1142 | container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer; |
1108 | struct vmw_kms_dirty dirty; | 1143 | struct vmw_kms_dirty dirty; |
1109 | int ret; | 1144 | int ret; |
1110 | 1145 | ||
1111 | ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false); | 1146 | ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false, |
1147 | false); | ||
1112 | if (ret) | 1148 | if (ret) |
1113 | return ret; | 1149 | return ret; |
1114 | 1150 | ||
@@ -1116,6 +1152,7 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv, | |||
1116 | if (unlikely(ret != 0)) | 1152 | if (unlikely(ret != 0)) |
1117 | goto out_revert; | 1153 | goto out_revert; |
1118 | 1154 | ||
1155 | dirty.crtc = crtc; | ||
1119 | dirty.fifo_commit = vmw_sou_readback_fifo_commit; | 1156 | dirty.fifo_commit = vmw_sou_readback_fifo_commit; |
1120 | dirty.clip = vmw_sou_readback_clip; | 1157 | dirty.clip = vmw_sou_readback_clip; |
1121 | dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) * | 1158 | dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) * |