aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_dp_mst_topology.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/drm_dp_mst_topology.c')
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c164
1 files changed, 157 insertions, 7 deletions
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index d3fc7e4e85b7..222eb1a8549b 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -737,16 +737,16 @@ static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
737static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, 737static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
738 struct drm_dp_sideband_msg_tx *txmsg) 738 struct drm_dp_sideband_msg_tx *txmsg)
739{ 739{
740 bool ret; 740 unsigned int state;
741 741
742 /* 742 /*
743 * All updates to txmsg->state are protected by mgr->qlock, and the two 743 * All updates to txmsg->state are protected by mgr->qlock, and the two
744 * cases we check here are terminal states. For those the barriers 744 * cases we check here are terminal states. For those the barriers
745 * provided by the wake_up/wait_event pair are enough. 745 * provided by the wake_up/wait_event pair are enough.
746 */ 746 */
747 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || 747 state = READ_ONCE(txmsg->state);
748 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); 748 return (state == DRM_DP_SIDEBAND_TX_RX ||
749 return ret; 749 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
750} 750}
751 751
752static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, 752static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
@@ -855,7 +855,7 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
855 mutex_unlock(&mstb->mgr->qlock); 855 mutex_unlock(&mstb->mgr->qlock);
856 856
857 if (wake_tx) 857 if (wake_tx)
858 wake_up(&mstb->mgr->tx_waitq); 858 wake_up_all(&mstb->mgr->tx_waitq);
859 859
860 kref_put(kref, drm_dp_free_mst_branch_device); 860 kref_put(kref, drm_dp_free_mst_branch_device);
861} 861}
@@ -1510,7 +1510,7 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1510 if (txmsg->seqno != -1) 1510 if (txmsg->seqno != -1)
1511 txmsg->dst->tx_slots[txmsg->seqno] = NULL; 1511 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1512 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; 1512 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1513 wake_up(&mgr->tx_waitq); 1513 wake_up_all(&mgr->tx_waitq);
1514 } 1514 }
1515} 1515}
1516 1516
@@ -2258,7 +2258,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2258 mstb->tx_slots[slot] = NULL; 2258 mstb->tx_slots[slot] = NULL;
2259 mutex_unlock(&mgr->qlock); 2259 mutex_unlock(&mgr->qlock);
2260 2260
2261 wake_up(&mgr->tx_waitq); 2261 wake_up_all(&mgr->tx_waitq);
2262 } 2262 }
2263 return ret; 2263 return ret;
2264} 2264}
@@ -2498,6 +2498,81 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2498} 2498}
2499 2499
2500/** 2500/**
2501 * drm_dp_atomic_find_vcpi_slots() - Find and add vcpi slots to the state
2502 * @state: global atomic state
2503 * @mgr: MST topology manager for the port
2504 * @port: port to find vcpi slots for
2505 * @pbn: bandwidth required for the mode in PBN
2506 *
2507 * RETURNS:
2508 * Total slots in the atomic state assigned for this port or error
2509 */
2510int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
2511 struct drm_dp_mst_topology_mgr *mgr,
2512 struct drm_dp_mst_port *port, int pbn)
2513{
2514 struct drm_dp_mst_topology_state *topology_state;
2515 int req_slots;
2516
2517 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
2518 if (topology_state == NULL)
2519 return -ENOMEM;
2520
2521 port = drm_dp_get_validated_port_ref(mgr, port);
2522 if (port == NULL)
2523 return -EINVAL;
2524 req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2525 DRM_DEBUG_KMS("vcpi slots req=%d, avail=%d\n",
2526 req_slots, topology_state->avail_slots);
2527
2528 if (req_slots > topology_state->avail_slots) {
2529 drm_dp_put_port(port);
2530 return -ENOSPC;
2531 }
2532
2533 topology_state->avail_slots -= req_slots;
2534 DRM_DEBUG_KMS("vcpi slots avail=%d", topology_state->avail_slots);
2535
2536 drm_dp_put_port(port);
2537 return req_slots;
2538}
2539EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
2540
2541/**
2542 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
2543 * @state: global atomic state
2544 * @mgr: MST topology manager for the port
2545 * @slots: number of vcpi slots to release
2546 *
2547 * RETURNS:
2548 * 0 if @slots were added back to &drm_dp_mst_topology_state->avail_slots or
2549 * negative error code
2550 */
2551int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
2552 struct drm_dp_mst_topology_mgr *mgr,
2553 int slots)
2554{
2555 struct drm_dp_mst_topology_state *topology_state;
2556
2557 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
2558 if (topology_state == NULL)
2559 return -ENOMEM;
2560
2561 /* We cannot rely on port->vcpi.num_slots to update
2562 * topology_state->avail_slots as the port may not exist if the parent
2563 * branch device was unplugged. This should be fixed by tracking
2564 * per-port slot allocation in drm_dp_mst_topology_state instead of
2565 * depending on the caller to tell us how many slots to release.
2566 */
2567 topology_state->avail_slots += slots;
2568 DRM_DEBUG_KMS("vcpi slots released=%d, avail=%d\n",
2569 slots, topology_state->avail_slots);
2570
2571 return 0;
2572}
2573EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
2574
2575/**
2501 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel 2576 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2502 * @mgr: manager for this port 2577 * @mgr: manager for this port
2503 * @port: port to allocate a virtual channel for. 2578 * @port: port to allocate a virtual channel for.
@@ -2936,6 +3011,69 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
2936 (*mgr->cbs->hotplug)(mgr); 3011 (*mgr->cbs->hotplug)(mgr);
2937} 3012}
2938 3013
3014void *drm_dp_mst_duplicate_state(struct drm_atomic_state *state, void *obj)
3015{
3016 struct drm_dp_mst_topology_mgr *mgr = obj;
3017 struct drm_dp_mst_topology_state *new_mst_state;
3018
3019 if (WARN_ON(!mgr->state))
3020 return NULL;
3021
3022 new_mst_state = kmemdup(mgr->state, sizeof(*new_mst_state), GFP_KERNEL);
3023 if (new_mst_state)
3024 new_mst_state->state = state;
3025 return new_mst_state;
3026}
3027
3028void drm_dp_mst_swap_state(void *obj, void **obj_state_ptr)
3029{
3030 struct drm_dp_mst_topology_mgr *mgr = obj;
3031 struct drm_dp_mst_topology_state **topology_state_ptr;
3032
3033 topology_state_ptr = (struct drm_dp_mst_topology_state **)obj_state_ptr;
3034
3035 mgr->state->state = (*topology_state_ptr)->state;
3036 swap(*topology_state_ptr, mgr->state);
3037 mgr->state->state = NULL;
3038}
3039
3040void drm_dp_mst_destroy_state(void *obj_state)
3041{
3042 kfree(obj_state);
3043}
3044
3045static const struct drm_private_state_funcs mst_state_funcs = {
3046 .duplicate_state = drm_dp_mst_duplicate_state,
3047 .swap_state = drm_dp_mst_swap_state,
3048 .destroy_state = drm_dp_mst_destroy_state,
3049};
3050
3051/**
3052 * drm_atomic_get_mst_topology_state: get MST topology state
3053 *
3054 * @state: global atomic state
3055 * @mgr: MST topology manager, also the private object in this case
3056 *
3057 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
3058 * state vtable so that the private object state returned is that of a MST
3059 * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
3060 * to care of the locking, so warn if don't hold the connection_mutex.
3061 *
3062 * RETURNS:
3063 *
3064 * The MST topology state or error pointer.
3065 */
3066struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
3067 struct drm_dp_mst_topology_mgr *mgr)
3068{
3069 struct drm_device *dev = mgr->dev;
3070
3071 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3072 return drm_atomic_get_private_obj_state(state, mgr,
3073 &mst_state_funcs);
3074}
3075EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
3076
2939/** 3077/**
2940 * drm_dp_mst_topology_mgr_init - initialise a topology manager 3078 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2941 * @mgr: manager struct to initialise 3079 * @mgr: manager struct to initialise
@@ -2980,6 +3118,15 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2980 if (test_calc_pbn_mode() < 0) 3118 if (test_calc_pbn_mode() < 0)
2981 DRM_ERROR("MST PBN self-test failed\n"); 3119 DRM_ERROR("MST PBN self-test failed\n");
2982 3120
3121 mgr->state = kzalloc(sizeof(*mgr->state), GFP_KERNEL);
3122 if (mgr->state == NULL)
3123 return -ENOMEM;
3124 mgr->state->mgr = mgr;
3125
3126 /* max. time slots - one slot for MTP header */
3127 mgr->state->avail_slots = 63;
3128 mgr->funcs = &mst_state_funcs;
3129
2983 return 0; 3130 return 0;
2984} 3131}
2985EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); 3132EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
@@ -3000,6 +3147,9 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
3000 mutex_unlock(&mgr->payload_lock); 3147 mutex_unlock(&mgr->payload_lock);
3001 mgr->dev = NULL; 3148 mgr->dev = NULL;
3002 mgr->aux = NULL; 3149 mgr->aux = NULL;
3150 kfree(mgr->state);
3151 mgr->state = NULL;
3152 mgr->funcs = NULL;
3003} 3153}
3004EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); 3154EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
3005 3155