drm/dp/mst: always send reply for UP request

We should always send reply for UP request in order
to make downstream device clean-up resources appropriately.

Issue was that reply for UP request was sent only once.

Acked-by: Dave Airlie <airlied@gmail.com>
Signed-off-by: Mykola Lysenko <Mykola.Lysenko@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
This commit is contained in:
Mykola Lysenko 2015-12-18 17:14:43 -05:00 committed by Alex Deucher
parent bd93432087
commit 1f16ee7fa1
2 changed files with 11 additions and 21 deletions

View File

@ -1494,26 +1494,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
} }
/* called holding qlock */ /* called holding qlock */
static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg)
{ {
struct drm_dp_sideband_msg_tx *txmsg;
int ret; int ret;
/* construct a chunk from the first msg in the tx_msg queue */ /* construct a chunk from the first msg in the tx_msg queue */
if (list_empty(&mgr->tx_msg_upq)) {
mgr->tx_up_in_progress = false;
return;
}
txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
ret = process_single_tx_qlock(mgr, txmsg, true); ret = process_single_tx_qlock(mgr, txmsg, true);
if (ret == 1) {
/* up txmsgs aren't put in slots - so free after we send it */ if (ret != 1)
list_del(&txmsg->next);
kfree(txmsg);
} else if (ret)
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
mgr->tx_up_in_progress = true;
txmsg->dst->tx_slots[txmsg->seqno] = NULL;
} }
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
@ -1907,11 +1899,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_encode_up_ack_reply(txmsg, req_type); drm_dp_encode_up_ack_reply(txmsg, req_type);
mutex_lock(&mgr->qlock); mutex_lock(&mgr->qlock);
list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
if (!mgr->tx_up_in_progress) { process_single_up_tx_qlock(mgr, txmsg);
process_single_up_tx_qlock(mgr);
}
mutex_unlock(&mgr->qlock); mutex_unlock(&mgr->qlock);
kfree(txmsg);
return 0; return 0;
} }
@ -2843,7 +2836,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mutex_init(&mgr->qlock); mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock); mutex_init(&mgr->payload_lock);
mutex_init(&mgr->destroy_connector_lock); mutex_init(&mgr->destroy_connector_lock);
INIT_LIST_HEAD(&mgr->tx_msg_upq);
INIT_LIST_HEAD(&mgr->tx_msg_downq); INIT_LIST_HEAD(&mgr->tx_msg_downq);
INIT_LIST_HEAD(&mgr->destroy_connector_list); INIT_LIST_HEAD(&mgr->destroy_connector_list);
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);

View File

@ -451,9 +451,7 @@ struct drm_dp_mst_topology_mgr {
the mstb tx_slots and txmsg->state once they are queued */ the mstb tx_slots and txmsg->state once they are queued */
struct mutex qlock; struct mutex qlock;
struct list_head tx_msg_downq; struct list_head tx_msg_downq;
struct list_head tx_msg_upq;
bool tx_down_in_progress; bool tx_down_in_progress;
bool tx_up_in_progress;
/* payload info + lock for it */ /* payload info + lock for it */
struct mutex payload_lock; struct mutex payload_lock;