aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-08-06 02:26:21 -0400
committerDave Airlie <airlied@redhat.com>2014-10-13 00:40:53 -0400
commitdfda0df3426483cf5fc7441f23f318edbabecb03 (patch)
tree240106fe1072aeae2917c3c3c5fca83ba4f8a641
parent1e99cfa8de0f0879091e33cd65fd60418d006ad9 (diff)
drm/mst: rework payload table allocation to conform better.
The old code has problems with the Dell MST monitors due to some assumptions I made that weren't true. I initially thought the Virtual Channel Payload IDs had to be in the DPCD table in ascending order, however it appears that assumption is bogus. The old code also assumed it was possible to insert a member into the table and it would move other members up, like it does when you remove table entries, however reality has shown this isn't true. So the new code allocates VCPIs separate from entries in the payload tracking table, and when we remove an entry from the DPCD table, I shuffle the tracking payload entries around in the struct. This appears to make VT switch more robust (still not perfect) with an MST enabled Dell monitor. Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c77
-rw-r--r--include/drm/drm_dp_mst_helper.h2
2 files changed, 59 insertions, 20 deletions
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index b3adf1445020..070f913d2dba 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -682,7 +682,7 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n
682static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr, 682static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
683 struct drm_dp_vcpi *vcpi) 683 struct drm_dp_vcpi *vcpi)
684{ 684{
685 int ret; 685 int ret, vcpi_ret;
686 686
687 mutex_lock(&mgr->payload_lock); 687 mutex_lock(&mgr->payload_lock);
688 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1); 688 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
@@ -692,8 +692,16 @@ static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
692 goto out_unlock; 692 goto out_unlock;
693 } 693 }
694 694
695 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
696 if (vcpi_ret > mgr->max_payloads) {
697 ret = -EINVAL;
698 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
699 goto out_unlock;
700 }
701
695 set_bit(ret, &mgr->payload_mask); 702 set_bit(ret, &mgr->payload_mask);
696 vcpi->vcpi = ret; 703 set_bit(vcpi_ret, &mgr->vcpi_mask);
704 vcpi->vcpi = vcpi_ret + 1;
697 mgr->proposed_vcpis[ret - 1] = vcpi; 705 mgr->proposed_vcpis[ret - 1] = vcpi;
698out_unlock: 706out_unlock:
699 mutex_unlock(&mgr->payload_lock); 707 mutex_unlock(&mgr->payload_lock);
@@ -701,15 +709,23 @@ out_unlock:
701} 709}
702 710
703static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr, 711static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
704 int id) 712 int vcpi)
705{ 713{
706 if (id == 0) 714 int i;
715 if (vcpi == 0)
707 return; 716 return;
708 717
709 mutex_lock(&mgr->payload_lock); 718 mutex_lock(&mgr->payload_lock);
710 DRM_DEBUG_KMS("putting payload %d\n", id); 719 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
711 clear_bit(id, &mgr->payload_mask); 720 clear_bit(vcpi - 1, &mgr->vcpi_mask);
712 mgr->proposed_vcpis[id - 1] = NULL; 721
722 for (i = 0; i < mgr->max_payloads; i++) {
723 if (mgr->proposed_vcpis[i])
724 if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
725 mgr->proposed_vcpis[i] = NULL;
726 clear_bit(i + 1, &mgr->payload_mask);
727 }
728 }
713 mutex_unlock(&mgr->payload_lock); 729 mutex_unlock(&mgr->payload_lock);
714} 730}
715 731
@@ -1563,7 +1579,7 @@ static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1563 } 1579 }
1564 1580
1565 drm_dp_dpcd_write_payload(mgr, id, payload); 1581 drm_dp_dpcd_write_payload(mgr, id, payload);
1566 payload->payload_state = 0; 1582 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
1567 return 0; 1583 return 0;
1568} 1584}
1569 1585
@@ -1590,7 +1606,7 @@ static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1590 */ 1606 */
1591int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) 1607int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1592{ 1608{
1593 int i; 1609 int i, j;
1594 int cur_slots = 1; 1610 int cur_slots = 1;
1595 struct drm_dp_payload req_payload; 1611 struct drm_dp_payload req_payload;
1596 struct drm_dp_mst_port *port; 1612 struct drm_dp_mst_port *port;
@@ -1607,26 +1623,46 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1607 port = NULL; 1623 port = NULL;
1608 req_payload.num_slots = 0; 1624 req_payload.num_slots = 0;
1609 } 1625 }
1626
1627 if (mgr->payloads[i].start_slot != req_payload.start_slot) {
1628 mgr->payloads[i].start_slot = req_payload.start_slot;
1629 }
1610 /* work out what is required to happen with this payload */ 1630 /* work out what is required to happen with this payload */
1611 if (mgr->payloads[i].start_slot != req_payload.start_slot || 1631 if (mgr->payloads[i].num_slots != req_payload.num_slots) {
1612 mgr->payloads[i].num_slots != req_payload.num_slots) {
1613 1632
1614 /* need to push an update for this payload */ 1633 /* need to push an update for this payload */
1615 if (req_payload.num_slots) { 1634 if (req_payload.num_slots) {
1616 drm_dp_create_payload_step1(mgr, i + 1, &req_payload); 1635 drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
1617 mgr->payloads[i].num_slots = req_payload.num_slots; 1636 mgr->payloads[i].num_slots = req_payload.num_slots;
1618 } else if (mgr->payloads[i].num_slots) { 1637 } else if (mgr->payloads[i].num_slots) {
1619 mgr->payloads[i].num_slots = 0; 1638 mgr->payloads[i].num_slots = 0;
1620 drm_dp_destroy_payload_step1(mgr, port, i + 1, &mgr->payloads[i]); 1639 drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
1621 req_payload.payload_state = mgr->payloads[i].payload_state; 1640 req_payload.payload_state = mgr->payloads[i].payload_state;
1622 } else 1641 mgr->payloads[i].start_slot = 0;
1623 req_payload.payload_state = 0; 1642 }
1624
1625 mgr->payloads[i].start_slot = req_payload.start_slot;
1626 mgr->payloads[i].payload_state = req_payload.payload_state; 1643 mgr->payloads[i].payload_state = req_payload.payload_state;
1627 } 1644 }
1628 cur_slots += req_payload.num_slots; 1645 cur_slots += req_payload.num_slots;
1629 } 1646 }
1647
1648 for (i = 0; i < mgr->max_payloads; i++) {
1649 if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1650 DRM_DEBUG_KMS("removing payload %d\n", i);
1651 for (j = i; j < mgr->max_payloads - 1; j++) {
1652 memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
1653 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
1654 if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
1655 set_bit(j + 1, &mgr->payload_mask);
1656 } else {
1657 clear_bit(j + 1, &mgr->payload_mask);
1658 }
1659 }
1660 memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
1661 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
1662 clear_bit(mgr->max_payloads, &mgr->payload_mask);
1663
1664 }
1665 }
1630 mutex_unlock(&mgr->payload_lock); 1666 mutex_unlock(&mgr->payload_lock);
1631 1667
1632 return 0; 1668 return 0;
@@ -1657,9 +1693,9 @@ int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
1657 1693
1658 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state); 1694 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
1659 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) { 1695 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
1660 ret = drm_dp_create_payload_step2(mgr, port, i + 1, &mgr->payloads[i]); 1696 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1661 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { 1697 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1662 ret = drm_dp_destroy_payload_step2(mgr, i + 1, &mgr->payloads[i]); 1698 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1663 } 1699 }
1664 if (ret) { 1700 if (ret) {
1665 mutex_unlock(&mgr->payload_lock); 1701 mutex_unlock(&mgr->payload_lock);
@@ -1861,6 +1897,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
1861 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload)); 1897 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
1862 mgr->payload_mask = 0; 1898 mgr->payload_mask = 0;
1863 set_bit(0, &mgr->payload_mask); 1899 set_bit(0, &mgr->payload_mask);
1900 mgr->vcpi_mask = 0;
1864 } 1901 }
1865 1902
1866out_unlock: 1903out_unlock:
@@ -2475,7 +2512,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
2475 mutex_unlock(&mgr->lock); 2512 mutex_unlock(&mgr->lock);
2476 2513
2477 mutex_lock(&mgr->payload_lock); 2514 mutex_lock(&mgr->payload_lock);
2478 seq_printf(m, "vcpi: %lx\n", mgr->payload_mask); 2515 seq_printf(m, "vcpi: %lx %lx\n", mgr->payload_mask, mgr->vcpi_mask);
2479 2516
2480 for (i = 0; i < mgr->max_payloads; i++) { 2517 for (i = 0; i < mgr->max_payloads; i++) {
2481 if (mgr->proposed_vcpis[i]) { 2518 if (mgr->proposed_vcpis[i]) {
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 9b446ada2532..338fc1053835 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -388,6 +388,7 @@ struct drm_dp_payload {
388 int payload_state; 388 int payload_state;
389 int start_slot; 389 int start_slot;
390 int num_slots; 390 int num_slots;
391 int vcpi;
391}; 392};
392 393
393/** 394/**
@@ -454,6 +455,7 @@ struct drm_dp_mst_topology_mgr {
454 struct drm_dp_vcpi **proposed_vcpis; 455 struct drm_dp_vcpi **proposed_vcpis;
455 struct drm_dp_payload *payloads; 456 struct drm_dp_payload *payloads;
456 unsigned long payload_mask; 457 unsigned long payload_mask;
458 unsigned long vcpi_mask;
457 459
458 wait_queue_head_t tx_waitq; 460 wait_queue_head_t tx_waitq;
459 struct work_struct work; 461 struct work_struct work;