aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c290
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h13
2 files changed, 234 insertions, 69 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a5ba1d6990cc..d429088ef7aa 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -989,45 +989,6 @@ const struct amdgpu_ip_block_version dm_ip_block =
989}; 989};
990 990
991 991
992static struct drm_atomic_state *
993dm_atomic_state_alloc(struct drm_device *dev)
994{
995 struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
996
997 if (!state)
998 return NULL;
999
1000 if (drm_atomic_state_init(dev, &state->base) < 0)
1001 goto fail;
1002
1003 return &state->base;
1004
1005fail:
1006 kfree(state);
1007 return NULL;
1008}
1009
1010static void
1011dm_atomic_state_clear(struct drm_atomic_state *state)
1012{
1013 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
1014
1015 if (dm_state->context) {
1016 dc_release_state(dm_state->context);
1017 dm_state->context = NULL;
1018 }
1019
1020 drm_atomic_state_default_clear(state);
1021}
1022
1023static void
1024dm_atomic_state_alloc_free(struct drm_atomic_state *state)
1025{
1026 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
1027 drm_atomic_state_default_release(state);
1028 kfree(dm_state);
1029}
1030
1031/** 992/**
1032 * DOC: atomic 993 * DOC: atomic
1033 * 994 *
@@ -1039,9 +1000,6 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
1039 .output_poll_changed = drm_fb_helper_output_poll_changed, 1000 .output_poll_changed = drm_fb_helper_output_poll_changed,
1040 .atomic_check = amdgpu_dm_atomic_check, 1001 .atomic_check = amdgpu_dm_atomic_check,
1041 .atomic_commit = amdgpu_dm_atomic_commit, 1002 .atomic_commit = amdgpu_dm_atomic_commit,
1042 .atomic_state_alloc = dm_atomic_state_alloc,
1043 .atomic_state_clear = dm_atomic_state_clear,
1044 .atomic_state_free = dm_atomic_state_alloc_free
1045}; 1003};
1046 1004
1047static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { 1005static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
@@ -1563,8 +1521,117 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
1563} 1521}
1564#endif 1522#endif
1565 1523
1524/*
1525 * Acquires the lock for the atomic state object and returns
1526 * the new atomic state.
1527 *
1528 * This should only be called during atomic check.
1529 */
1530static int dm_atomic_get_state(struct drm_atomic_state *state,
1531 struct dm_atomic_state **dm_state)
1532{
1533 struct drm_device *dev = state->dev;
1534 struct amdgpu_device *adev = dev->dev_private;
1535 struct amdgpu_display_manager *dm = &adev->dm;
1536 struct drm_private_state *priv_state;
1537 int ret;
1538
1539 if (*dm_state)
1540 return 0;
1541
1542 ret = drm_modeset_lock(&dm->atomic_obj_lock, state->acquire_ctx);
1543 if (ret)
1544 return ret;
1545
1546 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
1547 if (IS_ERR(priv_state))
1548 return PTR_ERR(priv_state);
1549
1550 *dm_state = to_dm_atomic_state(priv_state);
1551
1552 return 0;
1553}
1554
1555struct dm_atomic_state *
1556dm_atomic_get_new_state(struct drm_atomic_state *state)
1557{
1558 struct drm_device *dev = state->dev;
1559 struct amdgpu_device *adev = dev->dev_private;
1560 struct amdgpu_display_manager *dm = &adev->dm;
1561 struct drm_private_obj *obj;
1562 struct drm_private_state *new_obj_state;
1563 int i;
1564
1565 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
1566 if (obj->funcs == dm->atomic_obj.funcs)
1567 return to_dm_atomic_state(new_obj_state);
1568 }
1569
1570 return NULL;
1571}
1572
1573struct dm_atomic_state *
1574dm_atomic_get_old_state(struct drm_atomic_state *state)
1575{
1576 struct drm_device *dev = state->dev;
1577 struct amdgpu_device *adev = dev->dev_private;
1578 struct amdgpu_display_manager *dm = &adev->dm;
1579 struct drm_private_obj *obj;
1580 struct drm_private_state *old_obj_state;
1581 int i;
1582
1583 for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
1584 if (obj->funcs == dm->atomic_obj.funcs)
1585 return to_dm_atomic_state(old_obj_state);
1586 }
1587
1588 return NULL;
1589}
1590
1591static struct drm_private_state *
1592dm_atomic_duplicate_state(struct drm_private_obj *obj)
1593{
1594 struct dm_atomic_state *old_state, *new_state;
1595
1596 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
1597 if (!new_state)
1598 return NULL;
1599
1600 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
1601
1602 new_state->context = dc_create_state();
1603 if (!new_state->context) {
1604 kfree(new_state);
1605 return NULL;
1606 }
1607
1608 old_state = to_dm_atomic_state(obj->state);
1609 if (old_state && old_state->context)
1610 dc_resource_state_copy_construct(old_state->context,
1611 new_state->context);
1612
1613 return &new_state->base;
1614}
1615
1616static void dm_atomic_destroy_state(struct drm_private_obj *obj,
1617 struct drm_private_state *state)
1618{
1619 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
1620
1621 if (dm_state && dm_state->context)
1622 dc_release_state(dm_state->context);
1623
1624 kfree(dm_state);
1625}
1626
1627static struct drm_private_state_funcs dm_atomic_state_funcs = {
1628 .atomic_duplicate_state = dm_atomic_duplicate_state,
1629 .atomic_destroy_state = dm_atomic_destroy_state,
1630};
1631
1566static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) 1632static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1567{ 1633{
1634 struct dm_atomic_state *state;
1568 int r; 1635 int r;
1569 1636
1570 adev->mode_info.mode_config_initialized = true; 1637 adev->mode_info.mode_config_initialized = true;
@@ -1582,6 +1649,24 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
1582 1649
1583 adev->ddev->mode_config.fb_base = adev->gmc.aper_base; 1650 adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
1584 1651
1652 drm_modeset_lock_init(&adev->dm.atomic_obj_lock);
1653
1654 state = kzalloc(sizeof(*state), GFP_KERNEL);
1655 if (!state)
1656 return -ENOMEM;
1657
1658 state->context = dc_create_state();
1659 if (!state->context) {
1660 kfree(state);
1661 return -ENOMEM;
1662 }
1663
1664 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
1665
1666 drm_atomic_private_obj_init(&adev->dm.atomic_obj,
1667 &state->base,
1668 &dm_atomic_state_funcs);
1669
1585 r = amdgpu_display_modeset_create_props(adev); 1670 r = amdgpu_display_modeset_create_props(adev);
1586 if (r) 1671 if (r)
1587 return r; 1672 return r;
@@ -1905,6 +1990,7 @@ fail:
1905static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) 1990static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
1906{ 1991{
1907 drm_mode_config_cleanup(dm->ddev); 1992 drm_mode_config_cleanup(dm->ddev);
1993 drm_atomic_private_obj_fini(&dm->atomic_obj);
1908 return; 1994 return;
1909} 1995}
1910 1996
@@ -4349,6 +4435,20 @@ static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
4349 acrtc->crtc_id); 4435 acrtc->crtc_id);
4350} 4436}
4351 4437
4438struct dc_stream_status *dc_state_get_stream_status(
4439 struct dc_state *state,
4440 struct dc_stream_state *stream)
4441{
4442 uint8_t i;
4443
4444 for (i = 0; i < state->stream_count; i++) {
4445 if (stream == state->streams[i])
4446 return &state->stream_status[i];
4447 }
4448
4449 return NULL;
4450}
4451
4352/* 4452/*
4353 * Executes flip 4453 * Executes flip
4354 * 4454 *
@@ -4558,6 +4658,7 @@ static bool commit_planes_to_stream(
4558} 4658}
4559 4659
4560static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, 4660static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
4661 struct dc_state *dc_state,
4561 struct drm_device *dev, 4662 struct drm_device *dev,
4562 struct amdgpu_display_manager *dm, 4663 struct amdgpu_display_manager *dm,
4563 struct drm_crtc *pcrtc, 4664 struct drm_crtc *pcrtc,
@@ -4574,7 +4675,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
4574 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); 4675 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
4575 struct dm_crtc_state *dm_old_crtc_state = 4676 struct dm_crtc_state *dm_old_crtc_state =
4576 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); 4677 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
4577 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
4578 int planes_count = 0; 4678 int planes_count = 0;
4579 unsigned long flags; 4679 unsigned long flags;
4580 4680
@@ -4635,7 +4735,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
4635 crtc, 4735 crtc,
4636 fb, 4736 fb,
4637 (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank, 4737 (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
4638 dm_state->context); 4738 dc_state);
4639 } 4739 }
4640 4740
4641 } 4741 }
@@ -4661,7 +4761,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
4661 planes_count, 4761 planes_count,
4662 acrtc_state, 4762 acrtc_state,
4663 dm_old_crtc_state, 4763 dm_old_crtc_state,
4664 dm_state->context)) 4764 dc_state))
4665 dm_error("%s: Failed to attach plane!\n", __func__); 4765 dm_error("%s: Failed to attach plane!\n", __func__);
4666 } else { 4766 } else {
4667 /*TODO BUG Here should go disable planes on CRTC. */ 4767 /*TODO BUG Here should go disable planes on CRTC. */
@@ -4729,6 +4829,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4729 struct amdgpu_device *adev = dev->dev_private; 4829 struct amdgpu_device *adev = dev->dev_private;
4730 struct amdgpu_display_manager *dm = &adev->dm; 4830 struct amdgpu_display_manager *dm = &adev->dm;
4731 struct dm_atomic_state *dm_state; 4831 struct dm_atomic_state *dm_state;
4832 struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
4732 uint32_t i, j; 4833 uint32_t i, j;
4733 struct drm_crtc *crtc; 4834 struct drm_crtc *crtc;
4734 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 4835 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
@@ -4741,7 +4842,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4741 4842
4742 drm_atomic_helper_update_legacy_modeset_state(dev, state); 4843 drm_atomic_helper_update_legacy_modeset_state(dev, state);
4743 4844
4744 dm_state = to_dm_atomic_state(state); 4845 dm_state = dm_atomic_get_new_state(state);
4846 if (dm_state && dm_state->context) {
4847 dc_state = dm_state->context;
4848 } else {
4849 /* No state changes, retain current state. */
4850 dc_state_temp = dc_create_state();
4851 ASSERT(dc_state_temp);
4852 dc_state = dc_state_temp;
4853 dc_resource_state_copy_construct_current(dm->dc, dc_state);
4854 }
4745 4855
4746 /* update changed items */ 4856 /* update changed items */
4747 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 4857 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -4814,9 +4924,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4814 } 4924 }
4815 } /* for_each_crtc_in_state() */ 4925 } /* for_each_crtc_in_state() */
4816 4926
4817 if (dm_state->context) { 4927 if (dc_state) {
4818 dm_enable_per_frame_crtc_master_sync(dm_state->context); 4928 dm_enable_per_frame_crtc_master_sync(dc_state);
4819 WARN_ON(!dc_commit_state(dm->dc, dm_state->context)); 4929 WARN_ON(!dc_commit_state(dm->dc, dc_state));
4820 } 4930 }
4821 4931
4822 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 4932 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -4829,6 +4939,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4829 dc_stream_get_status(dm_new_crtc_state->stream); 4939 dc_stream_get_status(dm_new_crtc_state->stream);
4830 4940
4831 if (!status) 4941 if (!status)
4942 status = dc_state_get_stream_status(dc_state,
4943 dm_new_crtc_state->stream);
4944
4945 if (!status)
4832 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); 4946 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
4833 else 4947 else
4834 acrtc->otg_inst = status->primary_otg_inst; 4948 acrtc->otg_inst = status->primary_otg_inst;
@@ -4914,7 +5028,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4914 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 5028 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
4915 5029
4916 if (dm_new_crtc_state->stream) 5030 if (dm_new_crtc_state->stream)
4917 amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank); 5031 amdgpu_dm_commit_planes(state, dc_state, dev,
5032 dm, crtc, &wait_for_vblank);
4918 } 5033 }
4919 5034
4920 5035
@@ -4954,6 +5069,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
4954 for (i = 0; i < crtc_disable_count; i++) 5069 for (i = 0; i < crtc_disable_count; i++)
4955 pm_runtime_put_autosuspend(dev->dev); 5070 pm_runtime_put_autosuspend(dev->dev);
4956 pm_runtime_mark_last_busy(dev->dev); 5071 pm_runtime_mark_last_busy(dev->dev);
5072
5073 if (dc_state_temp)
5074 dc_release_state(dc_state_temp);
4957} 5075}
4958 5076
4959 5077
@@ -5140,11 +5258,11 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
5140 bool enable, 5258 bool enable,
5141 bool *lock_and_validation_needed) 5259 bool *lock_and_validation_needed)
5142{ 5260{
5261 struct dm_atomic_state *dm_state = NULL;
5143 struct drm_crtc *crtc; 5262 struct drm_crtc *crtc;
5144 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 5263 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5145 int i; 5264 int i;
5146 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; 5265 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
5147 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
5148 struct dc_stream_state *new_stream; 5266 struct dc_stream_state *new_stream;
5149 int ret = 0; 5267 int ret = 0;
5150 5268
@@ -5245,6 +5363,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
5245 if (!dm_old_crtc_state->stream) 5363 if (!dm_old_crtc_state->stream)
5246 goto next_crtc; 5364 goto next_crtc;
5247 5365
5366 ret = dm_atomic_get_state(state, &dm_state);
5367 if (ret)
5368 goto fail;
5369
5248 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", 5370 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
5249 crtc->base.id); 5371 crtc->base.id);
5250 5372
@@ -5279,6 +5401,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
5279 5401
5280 WARN_ON(dm_new_crtc_state->stream); 5402 WARN_ON(dm_new_crtc_state->stream);
5281 5403
5404 ret = dm_atomic_get_state(state, &dm_state);
5405 if (ret)
5406 goto fail;
5407
5282 dm_new_crtc_state->stream = new_stream; 5408 dm_new_crtc_state->stream = new_stream;
5283 5409
5284 dc_stream_retain(new_stream); 5410 dc_stream_retain(new_stream);
@@ -5353,12 +5479,13 @@ static int dm_update_planes_state(struct dc *dc,
5353 bool enable, 5479 bool enable,
5354 bool *lock_and_validation_needed) 5480 bool *lock_and_validation_needed)
5355{ 5481{
5482
5483 struct dm_atomic_state *dm_state = NULL;
5356 struct drm_crtc *new_plane_crtc, *old_plane_crtc; 5484 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
5357 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 5485 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
5358 struct drm_plane *plane; 5486 struct drm_plane *plane;
5359 struct drm_plane_state *old_plane_state, *new_plane_state; 5487 struct drm_plane_state *old_plane_state, *new_plane_state;
5360 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; 5488 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
5361 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
5362 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; 5489 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
5363 int i ; 5490 int i ;
5364 /* TODO return page_flip_needed() function */ 5491 /* TODO return page_flip_needed() function */
@@ -5396,6 +5523,10 @@ static int dm_update_planes_state(struct dc *dc,
5396 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", 5523 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
5397 plane->base.id, old_plane_crtc->base.id); 5524 plane->base.id, old_plane_crtc->base.id);
5398 5525
5526 ret = dm_atomic_get_state(state, &dm_state);
5527 if (ret)
5528 return ret;
5529
5399 if (!dc_remove_plane_from_context( 5530 if (!dc_remove_plane_from_context(
5400 dc, 5531 dc,
5401 dm_old_crtc_state->stream, 5532 dm_old_crtc_state->stream,
@@ -5450,6 +5581,12 @@ static int dm_update_planes_state(struct dc *dc,
5450 return ret; 5581 return ret;
5451 } 5582 }
5452 5583
5584 ret = dm_atomic_get_state(state, &dm_state);
5585 if (ret) {
5586 dc_plane_state_release(dc_new_plane_state);
5587 return ret;
5588 }
5589
5453 /* 5590 /*
5454 * Any atomic check errors that occur after this will 5591 * Any atomic check errors that occur after this will
5455 * not need a release. The plane state will be attached 5592 * not need a release. The plane state will be attached
@@ -5481,11 +5618,14 @@ static int dm_update_planes_state(struct dc *dc,
5481 5618
5482 return ret; 5619 return ret;
5483} 5620}
5484enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, struct drm_atomic_state *state)
5485{
5486
5487 5621
5488 int i, j, num_plane; 5622static int
5623dm_determine_update_type_for_commit(struct dc *dc,
5624 struct drm_atomic_state *state,
5625 enum surface_update_type *out_type)
5626{
5627 struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
5628 int i, j, num_plane, ret = 0;
5489 struct drm_plane_state *old_plane_state, *new_plane_state; 5629 struct drm_plane_state *old_plane_state, *new_plane_state;
5490 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state; 5630 struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
5491 struct drm_crtc *new_plane_crtc, *old_plane_crtc; 5631 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
@@ -5505,7 +5645,7 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru
5505 DRM_ERROR("Plane or surface update failed to allocate"); 5645 DRM_ERROR("Plane or surface update failed to allocate");
5506 /* Set type to FULL to avoid crashing in DC*/ 5646 /* Set type to FULL to avoid crashing in DC*/
5507 update_type = UPDATE_TYPE_FULL; 5647 update_type = UPDATE_TYPE_FULL;
5508 goto ret; 5648 goto cleanup;
5509 } 5649 }
5510 5650
5511 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 5651 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -5559,27 +5699,40 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru
5559 } 5699 }
5560 5700
5561 if (num_plane > 0) { 5701 if (num_plane > 0) {
5562 status = dc_stream_get_status(new_dm_crtc_state->stream); 5702 ret = dm_atomic_get_state(state, &dm_state);
5703 if (ret)
5704 goto cleanup;
5705
5706 old_dm_state = dm_atomic_get_old_state(state);
5707 if (!old_dm_state) {
5708 ret = -EINVAL;
5709 goto cleanup;
5710 }
5711
5712 status = dc_state_get_stream_status(old_dm_state->context,
5713 new_dm_crtc_state->stream);
5714
5563 update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane, 5715 update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
5564 &stream_update, status); 5716 &stream_update, status);
5565 5717
5566 if (update_type > UPDATE_TYPE_MED) { 5718 if (update_type > UPDATE_TYPE_MED) {
5567 update_type = UPDATE_TYPE_FULL; 5719 update_type = UPDATE_TYPE_FULL;
5568 goto ret; 5720 goto cleanup;
5569 } 5721 }
5570 } 5722 }
5571 5723
5572 } else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) { 5724 } else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) {
5573 update_type = UPDATE_TYPE_FULL; 5725 update_type = UPDATE_TYPE_FULL;
5574 goto ret; 5726 goto cleanup;
5575 } 5727 }
5576 } 5728 }
5577 5729
5578ret: 5730cleanup:
5579 kfree(updates); 5731 kfree(updates);
5580 kfree(surface); 5732 kfree(surface);
5581 5733
5582 return update_type; 5734 *out_type = update_type;
5735 return ret;
5583} 5736}
5584 5737
5585/** 5738/**
@@ -5611,8 +5764,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
5611 struct drm_atomic_state *state) 5764 struct drm_atomic_state *state)
5612{ 5765{
5613 struct amdgpu_device *adev = dev->dev_private; 5766 struct amdgpu_device *adev = dev->dev_private;
5767 struct dm_atomic_state *dm_state = NULL;
5614 struct dc *dc = adev->dm.dc; 5768 struct dc *dc = adev->dm.dc;
5615 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
5616 struct drm_connector *connector; 5769 struct drm_connector *connector;
5617 struct drm_connector_state *old_con_state, *new_con_state; 5770 struct drm_connector_state *old_con_state, *new_con_state;
5618 struct drm_crtc *crtc; 5771 struct drm_crtc *crtc;
@@ -5653,10 +5806,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
5653 goto fail; 5806 goto fail;
5654 } 5807 }
5655 5808
5656 dm_state->context = dc_create_state();
5657 ASSERT(dm_state->context);
5658 dc_resource_state_copy_construct_current(dc, dm_state->context);
5659
5660 /* Remove exiting planes if they are modified */ 5809 /* Remove exiting planes if they are modified */
5661 ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed); 5810 ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
5662 if (ret) { 5811 if (ret) {
@@ -5709,7 +5858,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
5709 lock_and_validation_needed = true; 5858 lock_and_validation_needed = true;
5710 } 5859 }
5711 5860
5712 update_type = dm_determine_update_type_for_commit(dc, state); 5861 ret = dm_determine_update_type_for_commit(dc, state, &update_type);
5862 if (ret)
5863 goto fail;
5713 5864
5714 if (overall_update_type < update_type) 5865 if (overall_update_type < update_type)
5715 overall_update_type = update_type; 5866 overall_update_type = update_type;
@@ -5727,6 +5878,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
5727 5878
5728 5879
5729 if (overall_update_type > UPDATE_TYPE_FAST) { 5880 if (overall_update_type > UPDATE_TYPE_FAST) {
5881 ret = dm_atomic_get_state(state, &dm_state);
5882 if (ret)
5883 goto fail;
5730 5884
5731 ret = do_aquire_global_lock(dev, state); 5885 ret = do_aquire_global_lock(dev, state);
5732 if (ret) 5886 if (ret)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 19cd4626c3c0..f727853e52cd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -124,6 +124,17 @@ struct amdgpu_display_manager {
124 u16 display_indexes_num; 124 u16 display_indexes_num;
125 125
126 /** 126 /**
127 * @atomic_obj
128 *
129 * In combination with &dm_atomic_state it helps manage
130 * global atomic state that doesn't map cleanly into existing
131 * drm resources, like &dc_context.
132 */
133 struct drm_private_obj atomic_obj;
134
135 struct drm_modeset_lock atomic_obj_lock;
136
137 /**
127 * @irq_handler_list_low_tab: 138 * @irq_handler_list_low_tab:
128 * 139 *
129 * Low priority IRQ handler table. 140 * Low priority IRQ handler table.
@@ -254,7 +265,7 @@ struct dm_crtc_state {
254#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base) 265#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
255 266
256struct dm_atomic_state { 267struct dm_atomic_state {
257 struct drm_atomic_state base; 268 struct drm_private_state base;
258 269
259 struct dc_state *context; 270 struct dc_state *context;
260}; 271};