aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_kms.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c177
1 files changed, 114 insertions, 63 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 34ecc27fc30a..3628a9fe705f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -393,13 +393,13 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
393 du->cursor_surface = vps->surf; 393 du->cursor_surface = vps->surf;
394 du->cursor_dmabuf = vps->dmabuf; 394 du->cursor_dmabuf = vps->dmabuf;
395 395
396 /* setup new image */
397 if (vps->surf) { 396 if (vps->surf) {
398 du->cursor_age = du->cursor_surface->snooper.age; 397 du->cursor_age = du->cursor_surface->snooper.age;
399 398
400 ret = vmw_cursor_update_image(dev_priv, 399 ret = vmw_cursor_update_image(dev_priv,
401 vps->surf->snooper.image, 400 vps->surf->snooper.image,
402 64, 64, hotspot_x, hotspot_y); 401 64, 64, hotspot_x,
402 hotspot_y);
403 } else if (vps->dmabuf) { 403 } else if (vps->dmabuf) {
404 ret = vmw_cursor_update_dmabuf(dev_priv, vps->dmabuf, 404 ret = vmw_cursor_update_dmabuf(dev_priv, vps->dmabuf,
405 plane->state->crtc_w, 405 plane->state->crtc_w,
@@ -497,11 +497,22 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
497 struct vmw_surface *surface = NULL; 497 struct vmw_surface *surface = NULL;
498 struct drm_framebuffer *fb = new_state->fb; 498 struct drm_framebuffer *fb = new_state->fb;
499 499
500 struct drm_rect src = drm_plane_state_src(new_state);
501 struct drm_rect dest = drm_plane_state_dest(new_state);
500 502
501 /* Turning off */ 503 /* Turning off */
502 if (!fb) 504 if (!fb)
503 return ret; 505 return ret;
504 506
507 ret = drm_plane_helper_check_update(plane, new_state->crtc, fb,
508 &src, &dest,
509 DRM_MODE_ROTATE_0,
510 DRM_PLANE_HELPER_NO_SCALING,
511 DRM_PLANE_HELPER_NO_SCALING,
512 true, true, &new_state->visible);
513 if (!ret)
514 return ret;
515
505 /* A lot of the code assumes this */ 516 /* A lot of the code assumes this */
506 if (new_state->crtc_w != 64 || new_state->crtc_h != 64) { 517 if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
507 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n", 518 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
@@ -566,13 +577,9 @@ void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
566 crtc->state->event = NULL; 577 crtc->state->event = NULL;
567 578
568 spin_lock_irq(&crtc->dev->event_lock); 579 spin_lock_irq(&crtc->dev->event_lock);
569 if (drm_crtc_vblank_get(crtc) == 0) 580 drm_crtc_send_vblank_event(crtc, event);
570 drm_crtc_arm_vblank_event(crtc, event);
571 else
572 drm_crtc_send_vblank_event(crtc, event);
573 spin_unlock_irq(&crtc->dev->event_lock); 581 spin_unlock_irq(&crtc->dev->event_lock);
574 } 582 }
575
576} 583}
577 584
578 585
@@ -675,9 +682,6 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
675 return NULL; 682 return NULL;
676 683
677 vps->pinned = 0; 684 vps->pinned = 0;
678
679 /* Mapping is managed by prepare_fb/cleanup_fb */
680 memset(&vps->host_map, 0, sizeof(vps->host_map));
681 vps->cpp = 0; 685 vps->cpp = 0;
682 686
683 /* Each ref counted resource needs to be acquired again */ 687 /* Each ref counted resource needs to be acquired again */
@@ -739,11 +743,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
739 743
740 744
741 /* Should have been freed by cleanup_fb */ 745 /* Should have been freed by cleanup_fb */
742 if (vps->host_map.virtual) {
743 DRM_ERROR("Host mapping not freed\n");
744 ttm_bo_kunmap(&vps->host_map);
745 }
746
747 if (vps->surf) 746 if (vps->surf)
748 vmw_surface_unreference(&vps->surf); 747 vmw_surface_unreference(&vps->surf);
749 748
@@ -888,11 +887,11 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
888 if (dev_priv->active_display_unit == vmw_du_screen_object) 887 if (dev_priv->active_display_unit == vmw_du_screen_object)
889 ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base, 888 ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
890 clips, NULL, NULL, 0, 0, 889 clips, NULL, NULL, 0, 0,
891 num_clips, inc, NULL); 890 num_clips, inc, NULL, NULL);
892 else 891 else
893 ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base, 892 ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
894 clips, NULL, NULL, 0, 0, 893 clips, NULL, NULL, 0, 0,
895 num_clips, inc, NULL); 894 num_clips, inc, NULL, NULL);
896 895
897 vmw_fifo_flush(dev_priv, false); 896 vmw_fifo_flush(dev_priv, false);
898 ttm_read_unlock(&dev_priv->reservation_sem); 897 ttm_read_unlock(&dev_priv->reservation_sem);
@@ -928,11 +927,12 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
928 switch (dev_priv->active_display_unit) { 927 switch (dev_priv->active_display_unit) {
929 case vmw_du_screen_object: 928 case vmw_du_screen_object:
930 return vmw_kms_sou_readback(dev_priv, file_priv, vfb, 929 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
931 user_fence_rep, vclips, num_clips); 930 user_fence_rep, vclips, num_clips,
931 NULL);
932 case vmw_du_screen_target: 932 case vmw_du_screen_target:
933 return vmw_kms_stdu_dma(dev_priv, file_priv, vfb, 933 return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
934 user_fence_rep, NULL, vclips, num_clips, 934 user_fence_rep, NULL, vclips, num_clips,
935 1, false, true); 935 1, false, true, NULL);
936 default: 936 default:
937 WARN_ONCE(true, 937 WARN_ONCE(true,
938 "Readback called with invalid display system.\n"); 938 "Readback called with invalid display system.\n");
@@ -1090,12 +1090,12 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
1090 case vmw_du_screen_target: 1090 case vmw_du_screen_target:
1091 ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL, 1091 ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
1092 clips, NULL, num_clips, increment, 1092 clips, NULL, num_clips, increment,
1093 true, true); 1093 true, true, NULL);
1094 break; 1094 break;
1095 case vmw_du_screen_object: 1095 case vmw_du_screen_object:
1096 ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base, 1096 ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
1097 clips, NULL, num_clips, 1097 clips, NULL, num_clips,
1098 increment, true, NULL); 1098 increment, true, NULL, NULL);
1099 break; 1099 break;
1100 case vmw_du_legacy: 1100 case vmw_du_legacy:
1101 ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0, 1101 ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
@@ -1121,12 +1121,14 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
1121}; 1121};
1122 1122
1123/** 1123/**
1124 * Pin the dmabuffer to the start of vram. 1124 * Pin the dmabuffer in a location suitable for access by the
1125 * display system.
1125 */ 1126 */
1126static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb) 1127static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1127{ 1128{
1128 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); 1129 struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1129 struct vmw_dma_buffer *buf; 1130 struct vmw_dma_buffer *buf;
1131 struct ttm_placement *placement;
1130 int ret; 1132 int ret;
1131 1133
1132 buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer : 1134 buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
@@ -1143,12 +1145,24 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1143 break; 1145 break;
1144 case vmw_du_screen_object: 1146 case vmw_du_screen_object:
1145 case vmw_du_screen_target: 1147 case vmw_du_screen_target:
1146 if (vfb->dmabuf) 1148 if (vfb->dmabuf) {
1147 return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf, 1149 if (dev_priv->capabilities & SVGA_CAP_3D) {
1148 false); 1150 /*
1151 * Use surface DMA to get content to
1152 * sreen target surface.
1153 */
1154 placement = &vmw_vram_gmr_placement;
1155 } else {
1156 /* Use CPU blit. */
1157 placement = &vmw_sys_placement;
1158 }
1159 } else {
1160 /* Use surface / image update */
1161 placement = &vmw_mob_placement;
1162 }
1149 1163
1150 return vmw_dmabuf_pin_in_placement(dev_priv, buf, 1164 return vmw_dmabuf_pin_in_placement(dev_priv, buf, placement,
1151 &vmw_mob_placement, false); 1165 false);
1152 default: 1166 default:
1153 return -EINVAL; 1167 return -EINVAL;
1154 } 1168 }
@@ -1539,35 +1553,10 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
1539 return drm_atomic_helper_check(dev, state); 1553 return drm_atomic_helper_check(dev, state);
1540} 1554}
1541 1555
1542
1543/**
1544 * vmw_kms_atomic_commit - Perform an atomic state commit
1545 *
1546 * @dev: DRM device
1547 * @state: the driver state object
1548 * @nonblock: Whether nonblocking behaviour is requested
1549 *
1550 * This is a simple wrapper around drm_atomic_helper_commit() for
1551 * us to clear the nonblocking value.
1552 *
1553 * Nonblocking commits currently cause synchronization issues
1554 * for vmwgfx.
1555 *
1556 * RETURNS
1557 * Zero for success or negative error code on failure.
1558 */
1559int vmw_kms_atomic_commit(struct drm_device *dev,
1560 struct drm_atomic_state *state,
1561 bool nonblock)
1562{
1563 return drm_atomic_helper_commit(dev, state, false);
1564}
1565
1566
1567static const struct drm_mode_config_funcs vmw_kms_funcs = { 1556static const struct drm_mode_config_funcs vmw_kms_funcs = {
1568 .fb_create = vmw_kms_fb_create, 1557 .fb_create = vmw_kms_fb_create,
1569 .atomic_check = vmw_kms_atomic_check_modeset, 1558 .atomic_check = vmw_kms_atomic_check_modeset,
1570 .atomic_commit = vmw_kms_atomic_commit, 1559 .atomic_commit = drm_atomic_helper_commit,
1571}; 1560};
1572 1561
1573static int vmw_kms_generic_present(struct vmw_private *dev_priv, 1562static int vmw_kms_generic_present(struct vmw_private *dev_priv,
@@ -1581,7 +1570,7 @@ static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1581{ 1570{
1582 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips, 1571 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1583 &surface->res, destX, destY, 1572 &surface->res, destX, destY,
1584 num_clips, 1, NULL); 1573 num_clips, 1, NULL, NULL);
1585} 1574}
1586 1575
1587 1576
@@ -1600,7 +1589,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
1600 case vmw_du_screen_target: 1589 case vmw_du_screen_target:
1601 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips, 1590 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1602 &surface->res, destX, destY, 1591 &surface->res, destX, destY,
1603 num_clips, 1, NULL); 1592 num_clips, 1, NULL, NULL);
1604 break; 1593 break;
1605 case vmw_du_screen_object: 1594 case vmw_du_screen_object:
1606 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface, 1595 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
@@ -2328,10 +2317,16 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2328 2317
2329 dirty->dev_priv = dev_priv; 2318 dirty->dev_priv = dev_priv;
2330 2319
2331 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) { 2320 /* If crtc is passed, no need to iterate over other display units */
2332 if (crtc->primary->fb != &framebuffer->base) 2321 if (dirty->crtc) {
2333 continue; 2322 units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2334 units[num_units++] = vmw_crtc_to_du(crtc); 2323 } else {
2324 list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
2325 head) {
2326 if (crtc->primary->fb != &framebuffer->base)
2327 continue;
2328 units[num_units++] = vmw_crtc_to_du(crtc);
2329 }
2335 } 2330 }
2336 2331
2337 for (k = 0; k < num_units; k++) { 2332 for (k = 0; k < num_units; k++) {
@@ -2430,14 +2425,21 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2430int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv, 2425int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
2431 struct vmw_dma_buffer *buf, 2426 struct vmw_dma_buffer *buf,
2432 bool interruptible, 2427 bool interruptible,
2433 bool validate_as_mob) 2428 bool validate_as_mob,
2429 bool for_cpu_blit)
2434{ 2430{
2431 struct ttm_operation_ctx ctx = {
2432 .interruptible = interruptible,
2433 .no_wait_gpu = false};
2435 struct ttm_buffer_object *bo = &buf->base; 2434 struct ttm_buffer_object *bo = &buf->base;
2436 int ret; 2435 int ret;
2437 2436
2438 ttm_bo_reserve(bo, false, false, NULL); 2437 ttm_bo_reserve(bo, false, false, NULL);
2439 ret = vmw_validate_single_buffer(dev_priv, bo, interruptible, 2438 if (for_cpu_blit)
2440 validate_as_mob); 2439 ret = ttm_bo_validate(bo, &vmw_nonfixed_placement, &ctx);
2440 else
2441 ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
2442 validate_as_mob);
2441 if (ret) 2443 if (ret)
2442 ttm_bo_unreserve(bo); 2444 ttm_bo_unreserve(bo);
2443 2445
@@ -2549,7 +2551,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
2549 if (res->backup) { 2551 if (res->backup) {
2550 ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup, 2552 ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
2551 interruptible, 2553 interruptible,
2552 res->dev_priv->has_mob); 2554 res->dev_priv->has_mob,
2555 false);
2553 if (ret) 2556 if (ret)
2554 goto out_unreserve; 2557 goto out_unreserve;
2555 } 2558 }
@@ -2845,3 +2848,51 @@ int vmw_kms_set_config(struct drm_mode_set *set,
2845 2848
2846 return drm_atomic_helper_set_config(set, ctx); 2849 return drm_atomic_helper_set_config(set, ctx);
2847} 2850}
2851
2852
2853/**
2854 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2855 *
2856 * @dev: Pointer to the drm device
2857 * Return: 0 on success. Negative error code on failure.
2858 */
2859int vmw_kms_suspend(struct drm_device *dev)
2860{
2861 struct vmw_private *dev_priv = vmw_priv(dev);
2862
2863 dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2864 if (IS_ERR(dev_priv->suspend_state)) {
2865 int ret = PTR_ERR(dev_priv->suspend_state);
2866
2867 DRM_ERROR("Failed kms suspend: %d\n", ret);
2868 dev_priv->suspend_state = NULL;
2869
2870 return ret;
2871 }
2872
2873 return 0;
2874}
2875
2876
2877/**
2878 * vmw_kms_resume - Re-enable modesetting and restore state
2879 *
2880 * @dev: Pointer to the drm device
2881 * Return: 0 on success. Negative error code on failure.
2882 *
2883 * State is resumed from a previous vmw_kms_suspend(). It's illegal
2884 * to call this function without a previous vmw_kms_suspend().
2885 */
2886int vmw_kms_resume(struct drm_device *dev)
2887{
2888 struct vmw_private *dev_priv = vmw_priv(dev);
2889 int ret;
2890
2891 if (WARN_ON(!dev_priv->suspend_state))
2892 return 0;
2893
2894 ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2895 dev_priv->suspend_state = NULL;
2896
2897 return ret;
2898}