aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_dp_mst_topology.c
diff options
context:
space:
mode:
authorLyude Paul <lyude@redhat.com>2019-01-10 19:53:32 -0500
committerLyude Paul <lyude@redhat.com>2019-01-10 20:12:23 -0500
commitcfe9f90358d97a83253443b0182bff1222d3a7ac (patch)
treee842d8293846d1319a3a305e2dbbd4c7205f0d63 /drivers/gpu/drm/drm_dp_mst_topology.c
parenta68f9917721bba02de43f728e31426f2384cb961 (diff)
drm/dp_mst: Fix payload deallocation on hotplugs using malloc refs
Up until now, freeing payloads on remote MST hubs that just had ports removed has almost never worked because we've been relying on port validation in order to stop us from accessing ports that have already been freed from memory, but ports which need their payloads released due to being removed will never be a valid part of the topology after they've been removed. Since we've introduced malloc refs, we can replace all of the validation logic in payload helpers which are used for deallocation with some well-placed malloc krefs. This ensures that regardless of whether or not the ports are still valid and in the topology, any port which has an allocated payload will remain allocated in memory until it's payloads have been removed - finally allowing us to actually release said payloads correctly. Signed-off-by: Lyude Paul <lyude@redhat.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Harry Wentland <harry.wentland@amd.com> Cc: David Airlie <airlied@redhat.com> Cc: Jerry Zuo <Jerry.Zuo@amd.com> Cc: Juston Li <juston.li@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190111005343.17443-10-lyude@redhat.com
Diffstat (limited to 'drivers/gpu/drm/drm_dp_mst_topology.c')
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c54
1 files changed, 30 insertions, 24 deletions
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index bb9107852fed..b5976f8c318c 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2095,10 +2095,6 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2095 u8 sinks[DRM_DP_MAX_SDP_STREAMS]; 2095 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
2096 int i; 2096 int i;
2097 2097
2098 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2099 if (!port)
2100 return -EINVAL;
2101
2102 port_num = port->port_num; 2098 port_num = port->port_num;
2103 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent); 2099 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2104 if (!mstb) { 2100 if (!mstb) {
@@ -2106,10 +2102,8 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2106 port->parent, 2102 port->parent,
2107 &port_num); 2103 &port_num);
2108 2104
2109 if (!mstb) { 2105 if (!mstb)
2110 drm_dp_mst_topology_put_port(port);
2111 return -EINVAL; 2106 return -EINVAL;
2112 }
2113 } 2107 }
2114 2108
2115 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); 2109 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -2146,7 +2140,6 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2146 kfree(txmsg); 2140 kfree(txmsg);
2147fail_put: 2141fail_put:
2148 drm_dp_mst_topology_put_mstb(mstb); 2142 drm_dp_mst_topology_put_mstb(mstb);
2149 drm_dp_mst_topology_put_port(port);
2150 return ret; 2143 return ret;
2151} 2144}
2152 2145
@@ -2251,15 +2244,16 @@ static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2251 */ 2244 */
2252int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) 2245int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
2253{ 2246{
2254 int i, j;
2255 int cur_slots = 1;
2256 struct drm_dp_payload req_payload; 2247 struct drm_dp_payload req_payload;
2257 struct drm_dp_mst_port *port; 2248 struct drm_dp_mst_port *port;
2249 int i, j;
2250 int cur_slots = 1;
2258 2251
2259 mutex_lock(&mgr->payload_lock); 2252 mutex_lock(&mgr->payload_lock);
2260 for (i = 0; i < mgr->max_payloads; i++) { 2253 for (i = 0; i < mgr->max_payloads; i++) {
2261 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; 2254 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2262 struct drm_dp_payload *payload = &mgr->payloads[i]; 2255 struct drm_dp_payload *payload = &mgr->payloads[i];
2256 bool put_port = false;
2263 2257
2264 /* solve the current payloads - compare to the hw ones 2258 /* solve the current payloads - compare to the hw ones
2265 - update the hw view */ 2259 - update the hw view */
@@ -2267,12 +2261,20 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
2267 if (vcpi) { 2261 if (vcpi) {
2268 port = container_of(vcpi, struct drm_dp_mst_port, 2262 port = container_of(vcpi, struct drm_dp_mst_port,
2269 vcpi); 2263 vcpi);
2270 port = drm_dp_mst_topology_get_port_validated(mgr, 2264
2271 port); 2265 /* Validated ports don't matter if we're releasing
2272 if (!port) { 2266 * VCPI
2273 mutex_unlock(&mgr->payload_lock); 2267 */
2274 return -EINVAL; 2268 if (vcpi->num_slots) {
2269 port = drm_dp_mst_topology_get_port_validated(
2270 mgr, port);
2271 if (!port) {
2272 mutex_unlock(&mgr->payload_lock);
2273 return -EINVAL;
2274 }
2275 put_port = true;
2275 } 2276 }
2277
2276 req_payload.num_slots = vcpi->num_slots; 2278 req_payload.num_slots = vcpi->num_slots;
2277 req_payload.vcpi = vcpi->vcpi; 2279 req_payload.vcpi = vcpi->vcpi;
2278 } else { 2280 } else {
@@ -2304,7 +2306,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
2304 } 2306 }
2305 cur_slots += req_payload.num_slots; 2307 cur_slots += req_payload.num_slots;
2306 2308
2307 if (port) 2309 if (put_port)
2308 drm_dp_mst_topology_put_port(port); 2310 drm_dp_mst_topology_put_port(port);
2309 } 2311 }
2310 2312
@@ -3120,6 +3122,8 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3120 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n", 3122 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
3121 pbn, port->vcpi.num_slots); 3123 pbn, port->vcpi.num_slots);
3122 3124
3125 /* Keep port allocated until it's payload has been removed */
3126 drm_dp_mst_get_port_malloc(port);
3123 drm_dp_mst_topology_put_port(port); 3127 drm_dp_mst_topology_put_port(port);
3124 return true; 3128 return true;
3125out: 3129out:
@@ -3149,11 +3153,12 @@ EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
3149 */ 3153 */
3150void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 3154void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3151{ 3155{
3152 port = drm_dp_mst_topology_get_port_validated(mgr, port); 3156 /*
3153 if (!port) 3157 * A port with VCPI will remain allocated until it's VCPI is
3154 return; 3158 * released, no verified ref needed
3159 */
3160
3155 port->vcpi.num_slots = 0; 3161 port->vcpi.num_slots = 0;
3156 drm_dp_mst_topology_put_port(port);
3157} 3162}
3158EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); 3163EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
3159 3164
@@ -3165,16 +3170,17 @@ EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
3165void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, 3170void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3166 struct drm_dp_mst_port *port) 3171 struct drm_dp_mst_port *port)
3167{ 3172{
3168 port = drm_dp_mst_topology_get_port_validated(mgr, port); 3173 /*
3169 if (!port) 3174 * A port with VCPI will remain allocated until it's VCPI is
3170 return; 3175 * released, no verified ref needed
3176 */
3171 3177
3172 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); 3178 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3173 port->vcpi.num_slots = 0; 3179 port->vcpi.num_slots = 0;
3174 port->vcpi.pbn = 0; 3180 port->vcpi.pbn = 0;
3175 port->vcpi.aligned_pbn = 0; 3181 port->vcpi.aligned_pbn = 0;
3176 port->vcpi.vcpi = 0; 3182 port->vcpi.vcpi = 0;
3177 drm_dp_mst_topology_put_port(port); 3183 drm_dp_mst_put_port_malloc(port);
3178} 3184}
3179EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi); 3185EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
3180 3186