aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_dp_mst_topology.c
diff options
context:
space:
mode:
authorMykola Lysenko <Mykola.Lysenko@amd.com>2015-12-18 17:14:43 -0500
committerAlex Deucher <alexander.deucher@amd.com>2016-01-04 12:05:55 -0500
commit1f16ee7fa13649f4e55aa48ad31c3eb0722a62d3 (patch)
tree5eacb55daa657ca7cec731d390842a9245b26713 /drivers/gpu/drm/drm_dp_mst_topology.c
parentbd9343208704fcc70a5b919f228a7d26ae472727 (diff)
drm/dp/mst: always send reply for UP request
We should always send reply for UP request in order to make downstream device clean-up resources appropriately. Issue was that reply for UP request was sent only once. Acked-by: Dave Airlie <airlied@gmail.com> Signed-off-by: Mykola Lysenko <Mykola.Lysenko@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com> Cc: stable@vger.kernel.org
Diffstat (limited to 'drivers/gpu/drm/drm_dp_mst_topology.c')
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c30
1 files changed, 11 insertions, 19 deletions
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 7710de6e8a55..ca92a3217465 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1494,26 +1494,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1494} 1494}
1495 1495
1496/* called holding qlock */ 1496/* called holding qlock */
1497static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) 1497static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1498 struct drm_dp_sideband_msg_tx *txmsg)
1498{ 1499{
1499 struct drm_dp_sideband_msg_tx *txmsg;
1500 int ret; 1500 int ret;
1501 1501
1502 /* construct a chunk from the first msg in the tx_msg queue */ 1502 /* construct a chunk from the first msg in the tx_msg queue */
1503 if (list_empty(&mgr->tx_msg_upq)) {
1504 mgr->tx_up_in_progress = false;
1505 return;
1506 }
1507
1508 txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
1509 ret = process_single_tx_qlock(mgr, txmsg, true); 1503 ret = process_single_tx_qlock(mgr, txmsg, true);
1510 if (ret == 1) { 1504
1511 /* up txmsgs aren't put in slots - so free after we send it */ 1505 if (ret != 1)
1512 list_del(&txmsg->next);
1513 kfree(txmsg);
1514 } else if (ret)
1515 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); 1506 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1516 mgr->tx_up_in_progress = true; 1507
1508 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1517} 1509}
1518 1510
1519static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, 1511static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
@@ -1907,11 +1899,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1907 drm_dp_encode_up_ack_reply(txmsg, req_type); 1899 drm_dp_encode_up_ack_reply(txmsg, req_type);
1908 1900
1909 mutex_lock(&mgr->qlock); 1901 mutex_lock(&mgr->qlock);
1910 list_add_tail(&txmsg->next, &mgr->tx_msg_upq); 1902
1911 if (!mgr->tx_up_in_progress) { 1903 process_single_up_tx_qlock(mgr, txmsg);
1912 process_single_up_tx_qlock(mgr); 1904
1913 }
1914 mutex_unlock(&mgr->qlock); 1905 mutex_unlock(&mgr->qlock);
1906
1907 kfree(txmsg);
1915 return 0; 1908 return 0;
1916} 1909}
1917 1910
@@ -2843,7 +2836,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2843 mutex_init(&mgr->qlock); 2836 mutex_init(&mgr->qlock);
2844 mutex_init(&mgr->payload_lock); 2837 mutex_init(&mgr->payload_lock);
2845 mutex_init(&mgr->destroy_connector_lock); 2838 mutex_init(&mgr->destroy_connector_lock);
2846 INIT_LIST_HEAD(&mgr->tx_msg_upq);
2847 INIT_LIST_HEAD(&mgr->tx_msg_downq); 2839 INIT_LIST_HEAD(&mgr->tx_msg_downq);
2848 INIT_LIST_HEAD(&mgr->destroy_connector_list); 2840 INIT_LIST_HEAD(&mgr->destroy_connector_list);
2849 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); 2841 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);