aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_dp_mst_topology.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/drm_dp_mst_topology.c')
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c946
1 files changed, 755 insertions, 191 deletions
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 2ab16c9e6243..196ebba8af5f 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -46,7 +46,7 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
46 char *buf); 46 char *buf);
47static int test_calc_pbn_mode(void); 47static int test_calc_pbn_mode(void);
48 48
49static void drm_dp_put_port(struct drm_dp_mst_port *port); 49static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
50 50
51static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, 51static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
52 int id, 52 int id,
@@ -850,46 +850,212 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
850 if (lct > 1) 850 if (lct > 1)
851 memcpy(mstb->rad, rad, lct / 2); 851 memcpy(mstb->rad, rad, lct / 2);
852 INIT_LIST_HEAD(&mstb->ports); 852 INIT_LIST_HEAD(&mstb->ports);
853 kref_init(&mstb->kref); 853 kref_init(&mstb->topology_kref);
854 kref_init(&mstb->malloc_kref);
854 return mstb; 855 return mstb;
855} 856}
856 857
857static void drm_dp_free_mst_port(struct kref *kref);
858
859static void drm_dp_free_mst_branch_device(struct kref *kref) 858static void drm_dp_free_mst_branch_device(struct kref *kref)
860{ 859{
861 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); 860 struct drm_dp_mst_branch *mstb =
862 if (mstb->port_parent) { 861 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
863 if (list_empty(&mstb->port_parent->next)) 862
864 kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port); 863 if (mstb->port_parent)
865 } 864 drm_dp_mst_put_port_malloc(mstb->port_parent);
865
866 kfree(mstb); 866 kfree(mstb);
867} 867}
868 868
869/**
870 * DOC: Branch device and port refcounting
871 *
872 * Topology refcount overview
873 * ~~~~~~~~~~~~~~~~~~~~~~~~~~
874 *
875 * The refcounting schemes for &struct drm_dp_mst_branch and &struct
876 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
877 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
878 *
879 * Topology refcounts are not exposed to drivers, and are handled internally
880 * by the DP MST helpers. The helpers use them in order to prevent the
881 * in-memory topology state from being changed in the middle of critical
882 * operations like changing the internal state of payload allocations. This
883 * means each branch and port will be considered to be connected to the rest
884 * of the topology until it's topology refcount reaches zero. Additionally,
885 * for ports this means that their associated &struct drm_connector will stay
886 * registered with userspace until the port's refcount reaches 0.
887 *
888 * Malloc refcount overview
889 * ~~~~~~~~~~~~~~~~~~~~~~~~
890 *
891 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
892 * drm_dp_mst_branch allocated even after all of its topology references have
893 * been dropped, so that the driver or MST helpers can safely access each
894 * branch's last known state before it was disconnected from the topology.
895 * When the malloc refcount of a port or branch reaches 0, the memory
896 * allocation containing the &struct drm_dp_mst_branch or &struct
897 * drm_dp_mst_port respectively will be freed.
898 *
899 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
900 * to drivers. As of writing this documentation, there are no drivers that
901 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
902 * helpers. Exposing this API to drivers in a race-free manner would take more
903 * tweaking of the refcounting scheme, however patches are welcome provided
904 * there is a legitimate driver usecase for this.
905 *
906 * Refcount relationships in a topology
907 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
908 *
909 * Let's take a look at why the relationship between topology and malloc
910 * refcounts is designed the way it is.
911 *
912 * .. kernel-figure:: dp-mst/topology-figure-1.dot
913 *
914 * An example of topology and malloc refs in a DP MST topology with two
915 * active payloads. Topology refcount increments are indicated by solid
916 * lines, and malloc refcount increments are indicated by dashed lines.
917 * Each starts from the branch which incremented the refcount, and ends at
918 * the branch to which the refcount belongs to, i.e. the arrow points the
919 * same way as the C pointers used to reference a structure.
920 *
921 * As you can see in the above figure, every branch increments the topology
922 * refcount of it's children, and increments the malloc refcount of it's
923 * parent. Additionally, every payload increments the malloc refcount of it's
924 * assigned port by 1.
925 *
926 * So, what would happen if MSTB #3 from the above figure was unplugged from
927 * the system, but the driver hadn't yet removed payload #2 from port #3? The
928 * topology would start to look like the figure below.
929 *
930 * .. kernel-figure:: dp-mst/topology-figure-2.dot
931 *
932 * Ports and branch devices which have been released from memory are
933 * colored grey, and references which have been removed are colored red.
934 *
935 * Whenever a port or branch device's topology refcount reaches zero, it will
936 * decrement the topology refcounts of all its children, the malloc refcount
937 * of its parent, and finally its own malloc refcount. For MSTB #4 and port
938 * #4, this means they both have been disconnected from the topology and freed
939 * from memory. But, because payload #2 is still holding a reference to port
940 * #3, port #3 is removed from the topology but it's &struct drm_dp_mst_port
941 * is still accessible from memory. This also means port #3 has not yet
942 * decremented the malloc refcount of MSTB #3, so it's &struct
943 * drm_dp_mst_branch will also stay allocated in memory until port #3's
944 * malloc refcount reaches 0.
945 *
946 * This relationship is necessary because in order to release payload #2, we
947 * need to be able to figure out the last relative of port #3 that's still
948 * connected to the topology. In this case, we would travel up the topology as
949 * shown below.
950 *
951 * .. kernel-figure:: dp-mst/topology-figure-3.dot
952 *
953 * And finally, remove payload #2 by communicating with port #2 through
954 * sideband transactions.
955 */
956
957/**
958 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
959 * device
960 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
961 *
962 * Increments &drm_dp_mst_branch.malloc_kref. When
963 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
964 * will be released and @mstb may no longer be used.
965 *
966 * See also: drm_dp_mst_put_mstb_malloc()
967 */
968static void
969drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
970{
971 kref_get(&mstb->malloc_kref);
972 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
973}
974
975/**
976 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
977 * device
978 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
979 *
980 * Decrements &drm_dp_mst_branch.malloc_kref. When
981 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
982 * will be released and @mstb may no longer be used.
983 *
984 * See also: drm_dp_mst_get_mstb_malloc()
985 */
986static void
987drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
988{
989 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
990 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
991}
992
993static void drm_dp_free_mst_port(struct kref *kref)
994{
995 struct drm_dp_mst_port *port =
996 container_of(kref, struct drm_dp_mst_port, malloc_kref);
997
998 drm_dp_mst_put_mstb_malloc(port->parent);
999 kfree(port);
1000}
1001
1002/**
1003 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1004 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1005 *
1006 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1007 * reaches 0, the memory allocation for @port will be released and @port may
1008 * no longer be used.
1009 *
1010 * Because @port could potentially be freed at any time by the DP MST helpers
1011 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1012 * function, drivers that which to make use of &struct drm_dp_mst_port should
1013 * ensure that they grab at least one main malloc reference to their MST ports
1014 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1015 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1016 *
1017 * See also: drm_dp_mst_put_port_malloc()
1018 */
1019void
1020drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1021{
1022 kref_get(&port->malloc_kref);
1023 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1024}
1025EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1026
1027/**
1028 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1029 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1030 *
1031 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1032 * reaches 0, the memory allocation for @port will be released and @port may
1033 * no longer be used.
1034 *
1035 * See also: drm_dp_mst_get_port_malloc()
1036 */
1037void
1038drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1039{
1040 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1041 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1042}
1043EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1044
869static void drm_dp_destroy_mst_branch_device(struct kref *kref) 1045static void drm_dp_destroy_mst_branch_device(struct kref *kref)
870{ 1046{
871 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); 1047 struct drm_dp_mst_branch *mstb =
1048 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1049 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
872 struct drm_dp_mst_port *port, *tmp; 1050 struct drm_dp_mst_port *port, *tmp;
873 bool wake_tx = false; 1051 bool wake_tx = false;
874 1052
875 /* 1053 mutex_lock(&mgr->lock);
876 * init kref again to be used by ports to remove mst branch when it is
877 * not needed anymore
878 */
879 kref_init(kref);
880
881 if (mstb->port_parent && list_empty(&mstb->port_parent->next))
882 kref_get(&mstb->port_parent->kref);
883
884 /*
885 * destroy all ports - don't need lock
886 * as there are no more references to the mst branch
887 * device at this point.
888 */
889 list_for_each_entry_safe(port, tmp, &mstb->ports, next) { 1054 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
890 list_del(&port->next); 1055 list_del(&port->next);
891 drm_dp_put_port(port); 1056 drm_dp_mst_topology_put_port(port);
892 } 1057 }
1058 mutex_unlock(&mgr->lock);
893 1059
894 /* drop any tx slots msg */ 1060 /* drop any tx slots msg */
895 mutex_lock(&mstb->mgr->qlock); 1061 mutex_lock(&mstb->mgr->qlock);
@@ -908,14 +1074,83 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
908 if (wake_tx) 1074 if (wake_tx)
909 wake_up_all(&mstb->mgr->tx_waitq); 1075 wake_up_all(&mstb->mgr->tx_waitq);
910 1076
911 kref_put(kref, drm_dp_free_mst_branch_device); 1077 drm_dp_mst_put_mstb_malloc(mstb);
912} 1078}
913 1079
914static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb) 1080/**
1081 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1082 * branch device unless its zero
1083 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1084 *
1085 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1086 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1087 * reached 0). Holding a topology reference implies that a malloc reference
1088 * will be held to @mstb as long as the user holds the topology reference.
1089 *
1090 * Care should be taken to ensure that the user has at least one malloc
1091 * reference to @mstb. If you already have a topology reference to @mstb, you
1092 * should use drm_dp_mst_topology_get_mstb() instead.
1093 *
1094 * See also:
1095 * drm_dp_mst_topology_get_mstb()
1096 * drm_dp_mst_topology_put_mstb()
1097 *
1098 * Returns:
1099 * * 1: A topology reference was grabbed successfully
1100 * * 0: @port is no longer in the topology, no reference was grabbed
1101 */
1102static int __must_check
1103drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
915{ 1104{
916 kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device); 1105 int ret = kref_get_unless_zero(&mstb->topology_kref);
1106
1107 if (ret)
1108 DRM_DEBUG("mstb %p (%d)\n", mstb,
1109 kref_read(&mstb->topology_kref));
1110
1111 return ret;
917} 1112}
918 1113
1114/**
1115 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1116 * branch device
1117 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1118 *
1119 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1120 * not it's already reached 0. This is only valid to use in scenarios where
1121 * you are already guaranteed to have at least one active topology reference
1122 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1123 *
1124 * See also:
1125 * drm_dp_mst_topology_try_get_mstb()
1126 * drm_dp_mst_topology_put_mstb()
1127 */
1128static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1129{
1130 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1131 kref_get(&mstb->topology_kref);
1132 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1133}
1134
1135/**
1136 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1137 * device
1138 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1139 *
1140 * Releases a topology reference from @mstb by decrementing
1141 * &drm_dp_mst_branch.topology_kref.
1142 *
1143 * See also:
1144 * drm_dp_mst_topology_try_get_mstb()
1145 * drm_dp_mst_topology_get_mstb()
1146 */
1147static void
1148drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1149{
1150 DRM_DEBUG("mstb %p (%d)\n",
1151 mstb, kref_read(&mstb->topology_kref) - 1);
1152 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1153}
919 1154
920static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) 1155static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
921{ 1156{
@@ -930,19 +1165,18 @@ static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
930 case DP_PEER_DEVICE_MST_BRANCHING: 1165 case DP_PEER_DEVICE_MST_BRANCHING:
931 mstb = port->mstb; 1166 mstb = port->mstb;
932 port->mstb = NULL; 1167 port->mstb = NULL;
933 drm_dp_put_mst_branch_device(mstb); 1168 drm_dp_mst_topology_put_mstb(mstb);
934 break; 1169 break;
935 } 1170 }
936} 1171}
937 1172
938static void drm_dp_destroy_port(struct kref *kref) 1173static void drm_dp_destroy_port(struct kref *kref)
939{ 1174{
940 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); 1175 struct drm_dp_mst_port *port =
1176 container_of(kref, struct drm_dp_mst_port, topology_kref);
941 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 1177 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
942 1178
943 if (!port->input) { 1179 if (!port->input) {
944 port->vcpi.num_slots = 0;
945
946 kfree(port->cached_edid); 1180 kfree(port->cached_edid);
947 1181
948 /* 1182 /*
@@ -956,7 +1190,6 @@ static void drm_dp_destroy_port(struct kref *kref)
956 * from an EDID retrieval */ 1190 * from an EDID retrieval */
957 1191
958 mutex_lock(&mgr->destroy_connector_lock); 1192 mutex_lock(&mgr->destroy_connector_lock);
959 kref_get(&port->parent->kref);
960 list_add(&port->next, &mgr->destroy_connector_list); 1193 list_add(&port->next, &mgr->destroy_connector_list);
961 mutex_unlock(&mgr->destroy_connector_lock); 1194 mutex_unlock(&mgr->destroy_connector_lock);
962 schedule_work(&mgr->destroy_connector_work); 1195 schedule_work(&mgr->destroy_connector_work);
@@ -967,25 +1200,95 @@ static void drm_dp_destroy_port(struct kref *kref)
967 drm_dp_port_teardown_pdt(port, port->pdt); 1200 drm_dp_port_teardown_pdt(port, port->pdt);
968 port->pdt = DP_PEER_DEVICE_NONE; 1201 port->pdt = DP_PEER_DEVICE_NONE;
969 } 1202 }
970 kfree(port); 1203 drm_dp_mst_put_port_malloc(port);
971} 1204}
972 1205
973static void drm_dp_put_port(struct drm_dp_mst_port *port) 1206/**
1207 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1208 * port unless its zero
1209 * @port: &struct drm_dp_mst_port to increment the topology refcount of
1210 *
1211 * Attempts to grab a topology reference to @port, if it hasn't yet been
1212 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1213 * 0). Holding a topology reference implies that a malloc reference will be
1214 * held to @port as long as the user holds the topology reference.
1215 *
1216 * Care should be taken to ensure that the user has at least one malloc
1217 * reference to @port. If you already have a topology reference to @port, you
1218 * should use drm_dp_mst_topology_get_port() instead.
1219 *
1220 * See also:
1221 * drm_dp_mst_topology_get_port()
1222 * drm_dp_mst_topology_put_port()
1223 *
1224 * Returns:
1225 * * 1: A topology reference was grabbed successfully
1226 * * 0: @port is no longer in the topology, no reference was grabbed
1227 */
1228static int __must_check
1229drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
974{ 1230{
975 kref_put(&port->kref, drm_dp_destroy_port); 1231 int ret = kref_get_unless_zero(&port->topology_kref);
1232
1233 if (ret)
1234 DRM_DEBUG("port %p (%d)\n", port,
1235 kref_read(&port->topology_kref));
1236
1237 return ret;
976} 1238}
977 1239
978static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find) 1240/**
1241 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1242 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1243 *
1244 * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1245 * not it's already reached 0. This is only valid to use in scenarios where
1246 * you are already guaranteed to have at least one active topology reference
1247 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1248 *
1249 * See also:
1250 * drm_dp_mst_topology_try_get_port()
1251 * drm_dp_mst_topology_put_port()
1252 */
1253static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1254{
1255 WARN_ON(kref_read(&port->topology_kref) == 0);
1256 kref_get(&port->topology_kref);
1257 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1258}
1259
1260/**
1261 * drm_dp_mst_topology_put_port() - release a topology reference to a port
1262 * @port: The &struct drm_dp_mst_port to release the topology reference from
1263 *
1264 * Releases a topology reference from @port by decrementing
1265 * &drm_dp_mst_port.topology_kref.
1266 *
1267 * See also:
1268 * drm_dp_mst_topology_try_get_port()
1269 * drm_dp_mst_topology_get_port()
1270 */
1271static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1272{
1273 DRM_DEBUG("port %p (%d)\n",
1274 port, kref_read(&port->topology_kref) - 1);
1275 kref_put(&port->topology_kref, drm_dp_destroy_port);
1276}
1277
1278static struct drm_dp_mst_branch *
1279drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1280 struct drm_dp_mst_branch *to_find)
979{ 1281{
980 struct drm_dp_mst_port *port; 1282 struct drm_dp_mst_port *port;
981 struct drm_dp_mst_branch *rmstb; 1283 struct drm_dp_mst_branch *rmstb;
982 if (to_find == mstb) { 1284
983 kref_get(&mstb->kref); 1285 if (to_find == mstb)
984 return mstb; 1286 return mstb;
985 } 1287
986 list_for_each_entry(port, &mstb->ports, next) { 1288 list_for_each_entry(port, &mstb->ports, next) {
987 if (port->mstb) { 1289 if (port->mstb) {
988 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find); 1290 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1291 port->mstb, to_find);
989 if (rmstb) 1292 if (rmstb)
990 return rmstb; 1293 return rmstb;
991 } 1294 }
@@ -993,27 +1296,37 @@ static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct
993 return NULL; 1296 return NULL;
994} 1297}
995 1298
996static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) 1299static struct drm_dp_mst_branch *
1300drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1301 struct drm_dp_mst_branch *mstb)
997{ 1302{
998 struct drm_dp_mst_branch *rmstb = NULL; 1303 struct drm_dp_mst_branch *rmstb = NULL;
1304
999 mutex_lock(&mgr->lock); 1305 mutex_lock(&mgr->lock);
1000 if (mgr->mst_primary) 1306 if (mgr->mst_primary) {
1001 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb); 1307 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1308 mgr->mst_primary, mstb);
1309
1310 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1311 rmstb = NULL;
1312 }
1002 mutex_unlock(&mgr->lock); 1313 mutex_unlock(&mgr->lock);
1003 return rmstb; 1314 return rmstb;
1004} 1315}
1005 1316
1006static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find) 1317static struct drm_dp_mst_port *
1318drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1319 struct drm_dp_mst_port *to_find)
1007{ 1320{
1008 struct drm_dp_mst_port *port, *mport; 1321 struct drm_dp_mst_port *port, *mport;
1009 1322
1010 list_for_each_entry(port, &mstb->ports, next) { 1323 list_for_each_entry(port, &mstb->ports, next) {
1011 if (port == to_find) { 1324 if (port == to_find)
1012 kref_get(&port->kref);
1013 return port; 1325 return port;
1014 } 1326
1015 if (port->mstb) { 1327 if (port->mstb) {
1016 mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find); 1328 mport = drm_dp_mst_topology_get_port_validated_locked(
1329 port->mstb, to_find);
1017 if (mport) 1330 if (mport)
1018 return mport; 1331 return mport;
1019 } 1332 }
@@ -1021,12 +1334,20 @@ static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_
1021 return NULL; 1334 return NULL;
1022} 1335}
1023 1336
1024static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 1337static struct drm_dp_mst_port *
1338drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1339 struct drm_dp_mst_port *port)
1025{ 1340{
1026 struct drm_dp_mst_port *rport = NULL; 1341 struct drm_dp_mst_port *rport = NULL;
1342
1027 mutex_lock(&mgr->lock); 1343 mutex_lock(&mgr->lock);
1028 if (mgr->mst_primary) 1344 if (mgr->mst_primary) {
1029 rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port); 1345 rport = drm_dp_mst_topology_get_port_validated_locked(
1346 mgr->mst_primary, port);
1347
1348 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1349 rport = NULL;
1350 }
1030 mutex_unlock(&mgr->lock); 1351 mutex_unlock(&mgr->lock);
1031 return rport; 1352 return rport;
1032} 1353}
@@ -1034,11 +1355,12 @@ static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_t
1034static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num) 1355static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1035{ 1356{
1036 struct drm_dp_mst_port *port; 1357 struct drm_dp_mst_port *port;
1358 int ret;
1037 1359
1038 list_for_each_entry(port, &mstb->ports, next) { 1360 list_for_each_entry(port, &mstb->ports, next) {
1039 if (port->port_num == port_num) { 1361 if (port->port_num == port_num) {
1040 kref_get(&port->kref); 1362 ret = drm_dp_mst_topology_try_get_port(port);
1041 return port; 1363 return ret ? port : NULL;
1042 } 1364 }
1043 } 1365 }
1044 1366
@@ -1087,6 +1409,11 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1087 if (port->mstb) { 1409 if (port->mstb) {
1088 port->mstb->mgr = port->mgr; 1410 port->mstb->mgr = port->mgr;
1089 port->mstb->port_parent = port; 1411 port->mstb->port_parent = port;
1412 /*
1413 * Make sure this port's memory allocation stays
1414 * around until it's child MSTB releases it
1415 */
1416 drm_dp_mst_get_port_malloc(port);
1090 1417
1091 send_link = true; 1418 send_link = true;
1092 } 1419 }
@@ -1147,17 +1474,26 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1147 bool created = false; 1474 bool created = false;
1148 int old_pdt = 0; 1475 int old_pdt = 0;
1149 int old_ddps = 0; 1476 int old_ddps = 0;
1477
1150 port = drm_dp_get_port(mstb, port_msg->port_number); 1478 port = drm_dp_get_port(mstb, port_msg->port_number);
1151 if (!port) { 1479 if (!port) {
1152 port = kzalloc(sizeof(*port), GFP_KERNEL); 1480 port = kzalloc(sizeof(*port), GFP_KERNEL);
1153 if (!port) 1481 if (!port)
1154 return; 1482 return;
1155 kref_init(&port->kref); 1483 kref_init(&port->topology_kref);
1484 kref_init(&port->malloc_kref);
1156 port->parent = mstb; 1485 port->parent = mstb;
1157 port->port_num = port_msg->port_number; 1486 port->port_num = port_msg->port_number;
1158 port->mgr = mstb->mgr; 1487 port->mgr = mstb->mgr;
1159 port->aux.name = "DPMST"; 1488 port->aux.name = "DPMST";
1160 port->aux.dev = dev->dev; 1489 port->aux.dev = dev->dev;
1490
1491 /*
1492 * Make sure the memory allocation for our parent branch stays
1493 * around until our own memory allocation is released
1494 */
1495 drm_dp_mst_get_mstb_malloc(mstb);
1496
1161 created = true; 1497 created = true;
1162 } else { 1498 } else {
1163 old_pdt = port->pdt; 1499 old_pdt = port->pdt;
@@ -1177,18 +1513,20 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1177 for this list */ 1513 for this list */
1178 if (created) { 1514 if (created) {
1179 mutex_lock(&mstb->mgr->lock); 1515 mutex_lock(&mstb->mgr->lock);
1180 kref_get(&port->kref); 1516 drm_dp_mst_topology_get_port(port);
1181 list_add(&port->next, &mstb->ports); 1517 list_add(&port->next, &mstb->ports);
1182 mutex_unlock(&mstb->mgr->lock); 1518 mutex_unlock(&mstb->mgr->lock);
1183 } 1519 }
1184 1520
1185 if (old_ddps != port->ddps) { 1521 if (old_ddps != port->ddps) {
1186 if (port->ddps) { 1522 if (port->ddps) {
1187 if (!port->input) 1523 if (!port->input) {
1188 drm_dp_send_enum_path_resources(mstb->mgr, mstb, port); 1524 drm_dp_send_enum_path_resources(mstb->mgr,
1525 mstb, port);
1526 }
1189 } else { 1527 } else {
1190 port->available_pbn = 0; 1528 port->available_pbn = 0;
1191 } 1529 }
1192 } 1530 }
1193 1531
1194 if (old_pdt != port->pdt && !port->input) { 1532 if (old_pdt != port->pdt && !port->input) {
@@ -1202,21 +1540,25 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1202 if (created && !port->input) { 1540 if (created && !port->input) {
1203 char proppath[255]; 1541 char proppath[255];
1204 1542
1205 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); 1543 build_mst_prop_path(mstb, port->port_num, proppath,
1206 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); 1544 sizeof(proppath));
1545 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
1546 port,
1547 proppath);
1207 if (!port->connector) { 1548 if (!port->connector) {
1208 /* remove it from the port list */ 1549 /* remove it from the port list */
1209 mutex_lock(&mstb->mgr->lock); 1550 mutex_lock(&mstb->mgr->lock);
1210 list_del(&port->next); 1551 list_del(&port->next);
1211 mutex_unlock(&mstb->mgr->lock); 1552 mutex_unlock(&mstb->mgr->lock);
1212 /* drop port list reference */ 1553 /* drop port list reference */
1213 drm_dp_put_port(port); 1554 drm_dp_mst_topology_put_port(port);
1214 goto out; 1555 goto out;
1215 } 1556 }
1216 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || 1557 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1217 port->pdt == DP_PEER_DEVICE_SST_SINK) && 1558 port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1218 port->port_num >= DP_MST_LOGICAL_PORT_0) { 1559 port->port_num >= DP_MST_LOGICAL_PORT_0) {
1219 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); 1560 port->cached_edid = drm_get_edid(port->connector,
1561 &port->aux.ddc);
1220 drm_connector_set_tile_property(port->connector); 1562 drm_connector_set_tile_property(port->connector);
1221 } 1563 }
1222 (*mstb->mgr->cbs->register_connector)(port->connector); 1564 (*mstb->mgr->cbs->register_connector)(port->connector);
@@ -1224,7 +1566,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1224 1566
1225out: 1567out:
1226 /* put reference to this port */ 1568 /* put reference to this port */
1227 drm_dp_put_port(port); 1569 drm_dp_mst_topology_put_port(port);
1228} 1570}
1229 1571
1230static void drm_dp_update_port(struct drm_dp_mst_branch *mstb, 1572static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
@@ -1259,7 +1601,7 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1259 dowork = true; 1601 dowork = true;
1260 } 1602 }
1261 1603
1262 drm_dp_put_port(port); 1604 drm_dp_mst_topology_put_port(port);
1263 if (dowork) 1605 if (dowork)
1264 queue_work(system_long_wq, &mstb->mgr->work); 1606 queue_work(system_long_wq, &mstb->mgr->work);
1265 1607
@@ -1270,7 +1612,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
1270{ 1612{
1271 struct drm_dp_mst_branch *mstb; 1613 struct drm_dp_mst_branch *mstb;
1272 struct drm_dp_mst_port *port; 1614 struct drm_dp_mst_port *port;
1273 int i; 1615 int i, ret;
1274 /* find the port by iterating down */ 1616 /* find the port by iterating down */
1275 1617
1276 mutex_lock(&mgr->lock); 1618 mutex_lock(&mgr->lock);
@@ -1295,7 +1637,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
1295 } 1637 }
1296 } 1638 }
1297 } 1639 }
1298 kref_get(&mstb->kref); 1640 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1641 if (!ret)
1642 mstb = NULL;
1299out: 1643out:
1300 mutex_unlock(&mgr->lock); 1644 mutex_unlock(&mgr->lock);
1301 return mstb; 1645 return mstb;
@@ -1325,19 +1669,22 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1325 return NULL; 1669 return NULL;
1326} 1670}
1327 1671
1328static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid( 1672static struct drm_dp_mst_branch *
1329 struct drm_dp_mst_topology_mgr *mgr, 1673drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
1330 uint8_t *guid) 1674 uint8_t *guid)
1331{ 1675{
1332 struct drm_dp_mst_branch *mstb; 1676 struct drm_dp_mst_branch *mstb;
1677 int ret;
1333 1678
1334 /* find the port by iterating down */ 1679 /* find the port by iterating down */
1335 mutex_lock(&mgr->lock); 1680 mutex_lock(&mgr->lock);
1336 1681
1337 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); 1682 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1338 1683 if (mstb) {
1339 if (mstb) 1684 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1340 kref_get(&mstb->kref); 1685 if (!ret)
1686 mstb = NULL;
1687 }
1341 1688
1342 mutex_unlock(&mgr->lock); 1689 mutex_unlock(&mgr->lock);
1343 return mstb; 1690 return mstb;
@@ -1362,10 +1709,11 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
1362 drm_dp_send_enum_path_resources(mgr, mstb, port); 1709 drm_dp_send_enum_path_resources(mgr, mstb, port);
1363 1710
1364 if (port->mstb) { 1711 if (port->mstb) {
1365 mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb); 1712 mstb_child = drm_dp_mst_topology_get_mstb_validated(
1713 mgr, port->mstb);
1366 if (mstb_child) { 1714 if (mstb_child) {
1367 drm_dp_check_and_send_link_address(mgr, mstb_child); 1715 drm_dp_check_and_send_link_address(mgr, mstb_child);
1368 drm_dp_put_mst_branch_device(mstb_child); 1716 drm_dp_mst_topology_put_mstb(mstb_child);
1369 } 1717 }
1370 } 1718 }
1371 } 1719 }
@@ -1375,16 +1723,19 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
1375{ 1723{
1376 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); 1724 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1377 struct drm_dp_mst_branch *mstb; 1725 struct drm_dp_mst_branch *mstb;
1726 int ret;
1378 1727
1379 mutex_lock(&mgr->lock); 1728 mutex_lock(&mgr->lock);
1380 mstb = mgr->mst_primary; 1729 mstb = mgr->mst_primary;
1381 if (mstb) { 1730 if (mstb) {
1382 kref_get(&mstb->kref); 1731 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1732 if (!ret)
1733 mstb = NULL;
1383 } 1734 }
1384 mutex_unlock(&mgr->lock); 1735 mutex_unlock(&mgr->lock);
1385 if (mstb) { 1736 if (mstb) {
1386 drm_dp_check_and_send_link_address(mgr, mstb); 1737 drm_dp_check_and_send_link_address(mgr, mstb);
1387 drm_dp_put_mst_branch_device(mstb); 1738 drm_dp_mst_topology_put_mstb(mstb);
1388 } 1739 }
1389} 1740}
1390 1741
@@ -1695,22 +2046,40 @@ static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm
1695 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent); 2046 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
1696} 2047}
1697 2048
1698static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr, 2049/*
1699 struct drm_dp_mst_branch *mstb, 2050 * Searches upwards in the topology starting from mstb to try to find the
1700 int *port_num) 2051 * closest available parent of mstb that's still connected to the rest of the
2052 * topology. This can be used in order to perform operations like releasing
2053 * payloads, where the branch device which owned the payload may no longer be
2054 * around and thus would require that the payload on the last living relative
2055 * be freed instead.
2056 */
2057static struct drm_dp_mst_branch *
2058drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
2059 struct drm_dp_mst_branch *mstb,
2060 int *port_num)
1701{ 2061{
1702 struct drm_dp_mst_branch *rmstb = NULL; 2062 struct drm_dp_mst_branch *rmstb = NULL;
1703 struct drm_dp_mst_port *found_port; 2063 struct drm_dp_mst_port *found_port;
2064
1704 mutex_lock(&mgr->lock); 2065 mutex_lock(&mgr->lock);
1705 if (mgr->mst_primary) { 2066 if (!mgr->mst_primary)
2067 goto out;
2068
2069 do {
1706 found_port = drm_dp_get_last_connected_port_to_mstb(mstb); 2070 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
2071 if (!found_port)
2072 break;
1707 2073
1708 if (found_port) { 2074 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
1709 rmstb = found_port->parent; 2075 rmstb = found_port->parent;
1710 kref_get(&rmstb->kref);
1711 *port_num = found_port->port_num; 2076 *port_num = found_port->port_num;
2077 } else {
2078 /* Search again, starting from this parent */
2079 mstb = found_port->parent;
1712 } 2080 }
1713 } 2081 } while (!rmstb);
2082out:
1714 mutex_unlock(&mgr->lock); 2083 mutex_unlock(&mgr->lock);
1715 return rmstb; 2084 return rmstb;
1716} 2085}
@@ -1726,19 +2095,15 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1726 u8 sinks[DRM_DP_MAX_SDP_STREAMS]; 2095 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
1727 int i; 2096 int i;
1728 2097
1729 port = drm_dp_get_validated_port_ref(mgr, port);
1730 if (!port)
1731 return -EINVAL;
1732
1733 port_num = port->port_num; 2098 port_num = port->port_num;
1734 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 2099 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
1735 if (!mstb) { 2100 if (!mstb) {
1736 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num); 2101 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
2102 port->parent,
2103 &port_num);
1737 2104
1738 if (!mstb) { 2105 if (!mstb)
1739 drm_dp_put_port(port);
1740 return -EINVAL; 2106 return -EINVAL;
1741 }
1742 } 2107 }
1743 2108
1744 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 2109 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -1757,17 +2122,24 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1757 2122
1758 drm_dp_queue_down_tx(mgr, txmsg); 2123 drm_dp_queue_down_tx(mgr, txmsg);
1759 2124
2125 /*
2126 * FIXME: there is a small chance that between getting the last
2127 * connected mstb and sending the payload message, the last connected
2128 * mstb could also be removed from the topology. In the future, this
2129 * needs to be fixed by restarting the
2130 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
2131 * timeout if the topology is still connected to the system.
2132 */
1760 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); 2133 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1761 if (ret > 0) { 2134 if (ret > 0) {
1762 if (txmsg->reply.reply_type == 1) { 2135 if (txmsg->reply.reply_type == 1)
1763 ret = -EINVAL; 2136 ret = -EINVAL;
1764 } else 2137 else
1765 ret = 0; 2138 ret = 0;
1766 } 2139 }
1767 kfree(txmsg); 2140 kfree(txmsg);
1768fail_put: 2141fail_put:
1769 drm_dp_put_mst_branch_device(mstb); 2142 drm_dp_mst_topology_put_mstb(mstb);
1770 drm_dp_put_port(port);
1771 return ret; 2143 return ret;
1772} 2144}
1773 2145
@@ -1777,13 +2149,13 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
1777 struct drm_dp_sideband_msg_tx *txmsg; 2149 struct drm_dp_sideband_msg_tx *txmsg;
1778 int len, ret; 2150 int len, ret;
1779 2151
1780 port = drm_dp_get_validated_port_ref(mgr, port); 2152 port = drm_dp_mst_topology_get_port_validated(mgr, port);
1781 if (!port) 2153 if (!port)
1782 return -EINVAL; 2154 return -EINVAL;
1783 2155
1784 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 2156 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1785 if (!txmsg) { 2157 if (!txmsg) {
1786 drm_dp_put_port(port); 2158 drm_dp_mst_topology_put_port(port);
1787 return -ENOMEM; 2159 return -ENOMEM;
1788 } 2160 }
1789 2161
@@ -1799,7 +2171,7 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
1799 ret = 0; 2171 ret = 0;
1800 } 2172 }
1801 kfree(txmsg); 2173 kfree(txmsg);
1802 drm_dp_put_port(port); 2174 drm_dp_mst_topology_put_port(port);
1803 2175
1804 return ret; 2176 return ret;
1805} 2177}
@@ -1872,15 +2244,16 @@ static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1872 */ 2244 */
1873int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) 2245int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1874{ 2246{
1875 int i, j;
1876 int cur_slots = 1;
1877 struct drm_dp_payload req_payload; 2247 struct drm_dp_payload req_payload;
1878 struct drm_dp_mst_port *port; 2248 struct drm_dp_mst_port *port;
2249 int i, j;
2250 int cur_slots = 1;
1879 2251
1880 mutex_lock(&mgr->payload_lock); 2252 mutex_lock(&mgr->payload_lock);
1881 for (i = 0; i < mgr->max_payloads; i++) { 2253 for (i = 0; i < mgr->max_payloads; i++) {
1882 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; 2254 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
1883 struct drm_dp_payload *payload = &mgr->payloads[i]; 2255 struct drm_dp_payload *payload = &mgr->payloads[i];
2256 bool put_port = false;
1884 2257
1885 /* solve the current payloads - compare to the hw ones 2258 /* solve the current payloads - compare to the hw ones
1886 - update the hw view */ 2259 - update the hw view */
@@ -1888,11 +2261,20 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1888 if (vcpi) { 2261 if (vcpi) {
1889 port = container_of(vcpi, struct drm_dp_mst_port, 2262 port = container_of(vcpi, struct drm_dp_mst_port,
1890 vcpi); 2263 vcpi);
1891 port = drm_dp_get_validated_port_ref(mgr, port); 2264
1892 if (!port) { 2265 /* Validated ports don't matter if we're releasing
1893 mutex_unlock(&mgr->payload_lock); 2266 * VCPI
1894 return -EINVAL; 2267 */
2268 if (vcpi->num_slots) {
2269 port = drm_dp_mst_topology_get_port_validated(
2270 mgr, port);
2271 if (!port) {
2272 mutex_unlock(&mgr->payload_lock);
2273 return -EINVAL;
2274 }
2275 put_port = true;
1895 } 2276 }
2277
1896 req_payload.num_slots = vcpi->num_slots; 2278 req_payload.num_slots = vcpi->num_slots;
1897 req_payload.vcpi = vcpi->vcpi; 2279 req_payload.vcpi = vcpi->vcpi;
1898 } else { 2280 } else {
@@ -1924,8 +2306,8 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1924 } 2306 }
1925 cur_slots += req_payload.num_slots; 2307 cur_slots += req_payload.num_slots;
1926 2308
1927 if (port) 2309 if (put_port)
1928 drm_dp_put_port(port); 2310 drm_dp_mst_topology_put_port(port);
1929 } 2311 }
1930 2312
1931 for (i = 0; i < mgr->max_payloads; i++) { 2313 for (i = 0; i < mgr->max_payloads; i++) {
@@ -2024,7 +2406,7 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
2024 struct drm_dp_sideband_msg_tx *txmsg; 2406 struct drm_dp_sideband_msg_tx *txmsg;
2025 struct drm_dp_mst_branch *mstb; 2407 struct drm_dp_mst_branch *mstb;
2026 2408
2027 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 2409 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2028 if (!mstb) 2410 if (!mstb)
2029 return -EINVAL; 2411 return -EINVAL;
2030 2412
@@ -2048,7 +2430,7 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
2048 } 2430 }
2049 kfree(txmsg); 2431 kfree(txmsg);
2050fail_put: 2432fail_put:
2051 drm_dp_put_mst_branch_device(mstb); 2433 drm_dp_mst_topology_put_mstb(mstb);
2052 return ret; 2434 return ret;
2053} 2435}
2054 2436
@@ -2158,7 +2540,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
2158 2540
2159 /* give this the main reference */ 2541 /* give this the main reference */
2160 mgr->mst_primary = mstb; 2542 mgr->mst_primary = mstb;
2161 kref_get(&mgr->mst_primary->kref); 2543 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
2162 2544
2163 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 2545 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2164 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); 2546 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
@@ -2192,7 +2574,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
2192out_unlock: 2574out_unlock:
2193 mutex_unlock(&mgr->lock); 2575 mutex_unlock(&mgr->lock);
2194 if (mstb) 2576 if (mstb)
2195 drm_dp_put_mst_branch_device(mstb); 2577 drm_dp_mst_topology_put_mstb(mstb);
2196 return ret; 2578 return ret;
2197 2579
2198} 2580}
@@ -2357,7 +2739,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2357 mgr->down_rep_recv.initial_hdr.lct, 2739 mgr->down_rep_recv.initial_hdr.lct,
2358 mgr->down_rep_recv.initial_hdr.rad[0], 2740 mgr->down_rep_recv.initial_hdr.rad[0],
2359 mgr->down_rep_recv.msg[0]); 2741 mgr->down_rep_recv.msg[0]);
2360 drm_dp_put_mst_branch_device(mstb); 2742 drm_dp_mst_topology_put_mstb(mstb);
2361 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2743 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2362 return 0; 2744 return 0;
2363 } 2745 }
@@ -2368,7 +2750,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2368 } 2750 }
2369 2751
2370 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2752 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2371 drm_dp_put_mst_branch_device(mstb); 2753 drm_dp_mst_topology_put_mstb(mstb);
2372 2754
2373 mutex_lock(&mgr->qlock); 2755 mutex_lock(&mgr->qlock);
2374 txmsg->state = DRM_DP_SIDEBAND_TX_RX; 2756 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
@@ -2441,7 +2823,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2441 } 2823 }
2442 2824
2443 if (mstb) 2825 if (mstb)
2444 drm_dp_put_mst_branch_device(mstb); 2826 drm_dp_mst_topology_put_mstb(mstb);
2445 2827
2446 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2828 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2447 } 2829 }
@@ -2501,7 +2883,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
2501 enum drm_connector_status status = connector_status_disconnected; 2883 enum drm_connector_status status = connector_status_disconnected;
2502 2884
2503 /* we need to search for the port in the mgr in case its gone */ 2885 /* we need to search for the port in the mgr in case its gone */
2504 port = drm_dp_get_validated_port_ref(mgr, port); 2886 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2505 if (!port) 2887 if (!port)
2506 return connector_status_disconnected; 2888 return connector_status_disconnected;
2507 2889
@@ -2526,7 +2908,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
2526 break; 2908 break;
2527 } 2909 }
2528out: 2910out:
2529 drm_dp_put_port(port); 2911 drm_dp_mst_topology_put_port(port);
2530 return status; 2912 return status;
2531} 2913}
2532EXPORT_SYMBOL(drm_dp_mst_detect_port); 2914EXPORT_SYMBOL(drm_dp_mst_detect_port);
@@ -2543,11 +2925,11 @@ bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
2543{ 2925{
2544 bool ret = false; 2926 bool ret = false;
2545 2927
2546 port = drm_dp_get_validated_port_ref(mgr, port); 2928 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2547 if (!port) 2929 if (!port)
2548 return ret; 2930 return ret;
2549 ret = port->has_audio; 2931 ret = port->has_audio;
2550 drm_dp_put_port(port); 2932 drm_dp_mst_topology_put_port(port);
2551 return ret; 2933 return ret;
2552} 2934}
2553EXPORT_SYMBOL(drm_dp_mst_port_has_audio); 2935EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
@@ -2567,7 +2949,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
2567 struct edid *edid = NULL; 2949 struct edid *edid = NULL;
2568 2950
2569 /* we need to search for the port in the mgr in case its gone */ 2951 /* we need to search for the port in the mgr in case its gone */
2570 port = drm_dp_get_validated_port_ref(mgr, port); 2952 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2571 if (!port) 2953 if (!port)
2572 return NULL; 2954 return NULL;
2573 2955
@@ -2578,7 +2960,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
2578 drm_connector_set_tile_property(connector); 2960 drm_connector_set_tile_property(connector);
2579 } 2961 }
2580 port->has_audio = drm_detect_monitor_audio(edid); 2962 port->has_audio = drm_detect_monitor_audio(edid);
2581 drm_dp_put_port(port); 2963 drm_dp_mst_topology_put_port(port);
2582 return edid; 2964 return edid;
2583} 2965}
2584EXPORT_SYMBOL(drm_dp_mst_get_edid); 2966EXPORT_SYMBOL(drm_dp_mst_get_edid);
@@ -2629,43 +3011,98 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2629} 3011}
2630 3012
2631/** 3013/**
2632 * drm_dp_atomic_find_vcpi_slots() - Find and add vcpi slots to the state 3014 * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
2633 * @state: global atomic state 3015 * @state: global atomic state
2634 * @mgr: MST topology manager for the port 3016 * @mgr: MST topology manager for the port
2635 * @port: port to find vcpi slots for 3017 * @port: port to find vcpi slots for
2636 * @pbn: bandwidth required for the mode in PBN 3018 * @pbn: bandwidth required for the mode in PBN
2637 * 3019 *
2638 * RETURNS: 3020 * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
2639 * Total slots in the atomic state assigned for this port or error 3021 * may have had. Any atomic drivers which support MST must call this function
3022 * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
3023 * current VCPI allocation for the new state, but only when
3024 * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
3025 * to ensure compatibility with userspace applications that still use the
3026 * legacy modesetting UAPI.
3027 *
3028 * Allocations set by this function are not checked against the bandwidth
3029 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
3030 *
3031 * Additionally, it is OK to call this function multiple times on the same
3032 * @port as needed. It is not OK however, to call this function and
3033 * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
3034 *
3035 * See also:
3036 * drm_dp_atomic_release_vcpi_slots()
3037 * drm_dp_mst_atomic_check()
3038 *
3039 * Returns:
3040 * Total slots in the atomic state assigned for this port, or a negative error
3041 * code if the port no longer exists
2640 */ 3042 */
2641int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, 3043int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
2642 struct drm_dp_mst_topology_mgr *mgr, 3044 struct drm_dp_mst_topology_mgr *mgr,
2643 struct drm_dp_mst_port *port, int pbn) 3045 struct drm_dp_mst_port *port, int pbn)
2644{ 3046{
2645 struct drm_dp_mst_topology_state *topology_state; 3047 struct drm_dp_mst_topology_state *topology_state;
2646 int req_slots; 3048 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
3049 int prev_slots, req_slots, ret;
2647 3050
2648 topology_state = drm_atomic_get_mst_topology_state(state, mgr); 3051 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
2649 if (IS_ERR(topology_state)) 3052 if (IS_ERR(topology_state))
2650 return PTR_ERR(topology_state); 3053 return PTR_ERR(topology_state);
2651 3054
2652 port = drm_dp_get_validated_port_ref(mgr, port); 3055 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2653 if (port == NULL) 3056 if (port == NULL)
2654 return -EINVAL; 3057 return -EINVAL;
2655 req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2656 DRM_DEBUG_KMS("vcpi slots req=%d, avail=%d\n",
2657 req_slots, topology_state->avail_slots);
2658 3058
2659 if (req_slots > topology_state->avail_slots) { 3059 /* Find the current allocation for this port, if any */
2660 drm_dp_put_port(port); 3060 list_for_each_entry(pos, &topology_state->vcpis, next) {
2661 return -ENOSPC; 3061 if (pos->port == port) {
3062 vcpi = pos;
3063 prev_slots = vcpi->vcpi;
3064
3065 /*
3066 * This should never happen, unless the driver tries
3067 * releasing and allocating the same VCPI allocation,
3068 * which is an error
3069 */
3070 if (WARN_ON(!prev_slots)) {
3071 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
3072 port);
3073 return -EINVAL;
3074 }
3075
3076 break;
3077 }
2662 } 3078 }
3079 if (!vcpi)
3080 prev_slots = 0;
3081
3082 req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2663 3083
2664 topology_state->avail_slots -= req_slots; 3084 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
2665 DRM_DEBUG_KMS("vcpi slots avail=%d", topology_state->avail_slots); 3085 port->connector->base.id, port->connector->name,
3086 port, prev_slots, req_slots);
3087
3088 /* Add the new allocation to the state */
3089 if (!vcpi) {
3090 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
3091 if (!vcpi) {
3092 ret = -ENOMEM;
3093 goto out;
3094 }
3095
3096 drm_dp_mst_get_port_malloc(port);
3097 vcpi->port = port;
3098 list_add(&vcpi->next, &topology_state->vcpis);
3099 }
3100 vcpi->vcpi = req_slots;
2666 3101
2667 drm_dp_put_port(port); 3102 ret = req_slots;
2668 return req_slots; 3103out:
3104 drm_dp_mst_topology_put_port(port);
3105 return ret;
2669} 3106}
2670EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots); 3107EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
2671 3108
@@ -2673,31 +3110,57 @@ EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
2673 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots 3110 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
2674 * @state: global atomic state 3111 * @state: global atomic state
2675 * @mgr: MST topology manager for the port 3112 * @mgr: MST topology manager for the port
2676 * @slots: number of vcpi slots to release 3113 * @port: The port to release the VCPI slots from
2677 * 3114 *
2678 * RETURNS: 3115 * Releases any VCPI slots that have been allocated to a port in the atomic
2679 * 0 if @slots were added back to &drm_dp_mst_topology_state->avail_slots or 3116 * state. Any atomic drivers which support MST must call this function in
2680 * negative error code 3117 * their &drm_connector_helper_funcs.atomic_check() callback when the
3118 * connector will no longer have VCPI allocated (e.g. because it's CRTC was
3119 * removed) when it had VCPI allocated in the previous atomic state.
3120 *
3121 * It is OK to call this even if @port has been removed from the system.
3122 * Additionally, it is OK to call this function multiple times on the same
3123 * @port as needed. It is not OK however, to call this function and
3124 * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
3125 * phase.
3126 *
3127 * See also:
3128 * drm_dp_atomic_find_vcpi_slots()
3129 * drm_dp_mst_atomic_check()
3130 *
3131 * Returns:
3132 * 0 if all slots for this port were added back to
3133 * &drm_dp_mst_topology_state.avail_slots or negative error code
2681 */ 3134 */
2682int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, 3135int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
2683 struct drm_dp_mst_topology_mgr *mgr, 3136 struct drm_dp_mst_topology_mgr *mgr,
2684 int slots) 3137 struct drm_dp_mst_port *port)
2685{ 3138{
2686 struct drm_dp_mst_topology_state *topology_state; 3139 struct drm_dp_mst_topology_state *topology_state;
3140 struct drm_dp_vcpi_allocation *pos;
3141 bool found = false;
2687 3142
2688 topology_state = drm_atomic_get_mst_topology_state(state, mgr); 3143 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
2689 if (IS_ERR(topology_state)) 3144 if (IS_ERR(topology_state))
2690 return PTR_ERR(topology_state); 3145 return PTR_ERR(topology_state);
2691 3146
2692 /* We cannot rely on port->vcpi.num_slots to update 3147 list_for_each_entry(pos, &topology_state->vcpis, next) {
2693 * topology_state->avail_slots as the port may not exist if the parent 3148 if (pos->port == port) {
2694 * branch device was unplugged. This should be fixed by tracking 3149 found = true;
2695 * per-port slot allocation in drm_dp_mst_topology_state instead of 3150 break;
2696 * depending on the caller to tell us how many slots to release. 3151 }
2697 */ 3152 }
2698 topology_state->avail_slots += slots; 3153 if (WARN_ON(!found)) {
2699 DRM_DEBUG_KMS("vcpi slots released=%d, avail=%d\n", 3154 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
2700 slots, topology_state->avail_slots); 3155 port, &topology_state->base);
3156 return -EINVAL;
3157 }
3158
3159 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
3160 if (pos->vcpi) {
3161 drm_dp_mst_put_port_malloc(port);
3162 pos->vcpi = 0;
3163 }
2701 3164
2702 return 0; 3165 return 0;
2703} 3166}
@@ -2715,7 +3178,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2715{ 3178{
2716 int ret; 3179 int ret;
2717 3180
2718 port = drm_dp_get_validated_port_ref(mgr, port); 3181 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2719 if (!port) 3182 if (!port)
2720 return false; 3183 return false;
2721 3184
@@ -2723,9 +3186,10 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2723 return false; 3186 return false;
2724 3187
2725 if (port->vcpi.vcpi > 0) { 3188 if (port->vcpi.vcpi > 0) {
2726 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn); 3189 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
3190 port->vcpi.vcpi, port->vcpi.pbn, pbn);
2727 if (pbn == port->vcpi.pbn) { 3191 if (pbn == port->vcpi.pbn) {
2728 drm_dp_put_port(port); 3192 drm_dp_mst_topology_put_port(port);
2729 return true; 3193 return true;
2730 } 3194 }
2731 } 3195 }
@@ -2733,13 +3197,15 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2733 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots); 3197 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
2734 if (ret) { 3198 if (ret) {
2735 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n", 3199 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
2736 DIV_ROUND_UP(pbn, mgr->pbn_div), ret); 3200 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
2737 goto out; 3201 goto out;
2738 } 3202 }
2739 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n", 3203 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
2740 pbn, port->vcpi.num_slots); 3204 pbn, port->vcpi.num_slots);
2741 3205
2742 drm_dp_put_port(port); 3206 /* Keep port allocated until it's payload has been removed */
3207 drm_dp_mst_get_port_malloc(port);
3208 drm_dp_mst_topology_put_port(port);
2743 return true; 3209 return true;
2744out: 3210out:
2745 return false; 3211 return false;
@@ -2749,12 +3215,12 @@ EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
2749int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 3215int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2750{ 3216{
2751 int slots = 0; 3217 int slots = 0;
2752 port = drm_dp_get_validated_port_ref(mgr, port); 3218 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2753 if (!port) 3219 if (!port)
2754 return slots; 3220 return slots;
2755 3221
2756 slots = port->vcpi.num_slots; 3222 slots = port->vcpi.num_slots;
2757 drm_dp_put_port(port); 3223 drm_dp_mst_topology_put_port(port);
2758 return slots; 3224 return slots;
2759} 3225}
2760EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots); 3226EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
@@ -2768,11 +3234,12 @@ EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
2768 */ 3234 */
2769void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 3235void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2770{ 3236{
2771 port = drm_dp_get_validated_port_ref(mgr, port); 3237 /*
2772 if (!port) 3238 * A port with VCPI will remain allocated until it's VCPI is
2773 return; 3239 * released, no verified ref needed
3240 */
3241
2774 port->vcpi.num_slots = 0; 3242 port->vcpi.num_slots = 0;
2775 drm_dp_put_port(port);
2776} 3243}
2777EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); 3244EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
2778 3245
@@ -2781,18 +3248,20 @@ EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
2781 * @mgr: manager for this port 3248 * @mgr: manager for this port
2782 * @port: unverified port to deallocate vcpi for 3249 * @port: unverified port to deallocate vcpi for
2783 */ 3250 */
2784void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 3251void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3252 struct drm_dp_mst_port *port)
2785{ 3253{
2786 port = drm_dp_get_validated_port_ref(mgr, port); 3254 /*
2787 if (!port) 3255 * A port with VCPI will remain allocated until it's VCPI is
2788 return; 3256 * released, no verified ref needed
3257 */
2789 3258
2790 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 3259 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2791 port->vcpi.num_slots = 0; 3260 port->vcpi.num_slots = 0;
2792 port->vcpi.pbn = 0; 3261 port->vcpi.pbn = 0;
2793 port->vcpi.aligned_pbn = 0; 3262 port->vcpi.aligned_pbn = 0;
2794 port->vcpi.vcpi = 0; 3263 port->vcpi.vcpi = 0;
2795 drm_dp_put_port(port); 3264 drm_dp_mst_put_port_malloc(port);
2796} 3265}
2797EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi); 3266EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
2798 3267
@@ -3076,13 +3545,6 @@ static void drm_dp_tx_work(struct work_struct *work)
3076 mutex_unlock(&mgr->qlock); 3545 mutex_unlock(&mgr->qlock);
3077} 3546}
3078 3547
3079static void drm_dp_free_mst_port(struct kref *kref)
3080{
3081 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
3082 kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
3083 kfree(port);
3084}
3085
3086static void drm_dp_destroy_connector_work(struct work_struct *work) 3548static void drm_dp_destroy_connector_work(struct work_struct *work)
3087{ 3549{
3088 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); 3550 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
@@ -3103,7 +3565,6 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
3103 list_del(&port->next); 3565 list_del(&port->next);
3104 mutex_unlock(&mgr->destroy_connector_lock); 3566 mutex_unlock(&mgr->destroy_connector_lock);
3105 3567
3106 kref_init(&port->kref);
3107 INIT_LIST_HEAD(&port->next); 3568 INIT_LIST_HEAD(&port->next);
3108 3569
3109 mgr->cbs->destroy_connector(mgr, port->connector); 3570 mgr->cbs->destroy_connector(mgr, port->connector);
@@ -3111,13 +3572,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
3111 drm_dp_port_teardown_pdt(port, port->pdt); 3572 drm_dp_port_teardown_pdt(port, port->pdt);
3112 port->pdt = DP_PEER_DEVICE_NONE; 3573 port->pdt = DP_PEER_DEVICE_NONE;
3113 3574
3114 if (!port->input && port->vcpi.vcpi > 0) { 3575 drm_dp_mst_put_port_malloc(port);
3115 drm_dp_mst_reset_vcpi_slots(mgr, port);
3116 drm_dp_update_payload_part1(mgr);
3117 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3118 }
3119
3120 kref_put(&port->kref, drm_dp_free_mst_port);
3121 send_hotplug = true; 3576 send_hotplug = true;
3122 } 3577 }
3123 if (send_hotplug) 3578 if (send_hotplug)
@@ -3127,15 +3582,41 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
3127static struct drm_private_state * 3582static struct drm_private_state *
3128drm_dp_mst_duplicate_state(struct drm_private_obj *obj) 3583drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
3129{ 3584{
3130 struct drm_dp_mst_topology_state *state; 3585 struct drm_dp_mst_topology_state *state, *old_state =
3586 to_dp_mst_topology_state(obj->state);
3587 struct drm_dp_vcpi_allocation *pos, *vcpi;
3131 3588
3132 state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL); 3589 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
3133 if (!state) 3590 if (!state)
3134 return NULL; 3591 return NULL;
3135 3592
3136 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); 3593 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
3137 3594
3595 INIT_LIST_HEAD(&state->vcpis);
3596
3597 list_for_each_entry(pos, &old_state->vcpis, next) {
3598 /* Prune leftover freed VCPI allocations */
3599 if (!pos->vcpi)
3600 continue;
3601
3602 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
3603 if (!vcpi)
3604 goto fail;
3605
3606 drm_dp_mst_get_port_malloc(vcpi->port);
3607 list_add(&vcpi->next, &state->vcpis);
3608 }
3609
3138 return &state->base; 3610 return &state->base;
3611
3612fail:
3613 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
3614 drm_dp_mst_put_port_malloc(pos->port);
3615 kfree(pos);
3616 }
3617 kfree(state);
3618
3619 return NULL;
3139} 3620}
3140 3621
3141static void drm_dp_mst_destroy_state(struct drm_private_obj *obj, 3622static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
@@ -3143,14 +3624,99 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
3143{ 3624{
3144 struct drm_dp_mst_topology_state *mst_state = 3625 struct drm_dp_mst_topology_state *mst_state =
3145 to_dp_mst_topology_state(state); 3626 to_dp_mst_topology_state(state);
3627 struct drm_dp_vcpi_allocation *pos, *tmp;
3628
3629 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
3630 /* We only keep references to ports with non-zero VCPIs */
3631 if (pos->vcpi)
3632 drm_dp_mst_put_port_malloc(pos->port);
3633 kfree(pos);
3634 }
3146 3635
3147 kfree(mst_state); 3636 kfree(mst_state);
3148} 3637}
3149 3638
3150static const struct drm_private_state_funcs mst_state_funcs = { 3639static inline int
3640drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
3641 struct drm_dp_mst_topology_state *mst_state)
3642{
3643 struct drm_dp_vcpi_allocation *vcpi;
3644 int avail_slots = 63, payload_count = 0;
3645
3646 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
3647 /* Releasing VCPI is always OK-even if the port is gone */
3648 if (!vcpi->vcpi) {
3649 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
3650 vcpi->port);
3651 continue;
3652 }
3653
3654 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
3655 vcpi->port, vcpi->vcpi);
3656
3657 avail_slots -= vcpi->vcpi;
3658 if (avail_slots < 0) {
3659 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
3660 vcpi->port, mst_state,
3661 avail_slots + vcpi->vcpi);
3662 return -ENOSPC;
3663 }
3664
3665 if (++payload_count > mgr->max_payloads) {
3666 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
3667 mgr, mst_state, mgr->max_payloads);
3668 return -EINVAL;
3669 }
3670 }
3671 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
3672 mgr, mst_state, avail_slots,
3673 63 - avail_slots);
3674
3675 return 0;
3676}
3677
3678/**
3679 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
3680 * atomic update is valid
3681 * @state: Pointer to the new &struct drm_dp_mst_topology_state
3682 *
3683 * Checks the given topology state for an atomic update to ensure that it's
3684 * valid. This includes checking whether there's enough bandwidth to support
3685 * the new VCPI allocations in the atomic update.
3686 *
3687 * Any atomic drivers supporting DP MST must make sure to call this after
3688 * checking the rest of their state in their
3689 * &drm_mode_config_funcs.atomic_check() callback.
3690 *
3691 * See also:
3692 * drm_dp_atomic_find_vcpi_slots()
3693 * drm_dp_atomic_release_vcpi_slots()
3694 *
3695 * Returns:
3696 *
3697 * 0 if the new state is valid, negative error code otherwise.
3698 */
3699int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
3700{
3701 struct drm_dp_mst_topology_mgr *mgr;
3702 struct drm_dp_mst_topology_state *mst_state;
3703 int i, ret = 0;
3704
3705 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
3706 ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
3707 if (ret)
3708 break;
3709 }
3710
3711 return ret;
3712}
3713EXPORT_SYMBOL(drm_dp_mst_atomic_check);
3714
3715const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
3151 .atomic_duplicate_state = drm_dp_mst_duplicate_state, 3716 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
3152 .atomic_destroy_state = drm_dp_mst_destroy_state, 3717 .atomic_destroy_state = drm_dp_mst_destroy_state,
3153}; 3718};
3719EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
3154 3720
3155/** 3721/**
3156 * drm_atomic_get_mst_topology_state: get MST topology state 3722 * drm_atomic_get_mst_topology_state: get MST topology state
@@ -3228,13 +3794,11 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
3228 return -ENOMEM; 3794 return -ENOMEM;
3229 3795
3230 mst_state->mgr = mgr; 3796 mst_state->mgr = mgr;
3231 3797 INIT_LIST_HEAD(&mst_state->vcpis);
3232 /* max. time slots - one slot for MTP header */
3233 mst_state->avail_slots = 63;
3234 3798
3235 drm_atomic_private_obj_init(dev, &mgr->base, 3799 drm_atomic_private_obj_init(dev, &mgr->base,
3236 &mst_state->base, 3800 &mst_state->base,
3237 &mst_state_funcs); 3801 &drm_dp_mst_topology_state_funcs);
3238 3802
3239 return 0; 3803 return 0;
3240} 3804}
@@ -3292,7 +3856,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
3292 struct drm_dp_sideband_msg_tx *txmsg = NULL; 3856 struct drm_dp_sideband_msg_tx *txmsg = NULL;
3293 int ret; 3857 int ret;
3294 3858
3295 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); 3859 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3296 if (!mstb) 3860 if (!mstb)
3297 return -EREMOTEIO; 3861 return -EREMOTEIO;
3298 3862
@@ -3342,7 +3906,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
3342 } 3906 }
3343out: 3907out:
3344 kfree(txmsg); 3908 kfree(txmsg);
3345 drm_dp_put_mst_branch_device(mstb); 3909 drm_dp_mst_topology_put_mstb(mstb);
3346 return ret; 3910 return ret;
3347} 3911}
3348 3912