mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 04:40:51 +07:00
xhci: add xhci_get_virt_ep() helper
[commit b1adc42d440df3233255e313a45ab7e9b2b74096 upstream] In several event handlers we need to find the right endpoint structure from slot_id and ep_index in the event. Add a helper for this, check that slot_id and ep_index are valid. Cc: stable@vger.kernel.org Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com> Link: https://lore.kernel.org/r/20210129130044.206855-6-mathias.nyman@linux.intel.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Carsten Schmid <carsten_schmid@mentor.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
624290f368
commit
ba28765d33
@ -446,6 +446,26 @@ void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
|
|||||||
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
|
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci,
|
||||||
|
unsigned int slot_id,
|
||||||
|
unsigned int ep_index)
|
||||||
|
{
|
||||||
|
if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) {
|
||||||
|
xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
if (ep_index >= EP_CTX_PER_DEV) {
|
||||||
|
xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
if (!xhci->devs[slot_id]) {
|
||||||
|
xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return &xhci->devs[slot_id]->eps[ep_index];
|
||||||
|
}
|
||||||
|
|
||||||
/* Get the right ring for the given slot_id, ep_index and stream_id.
|
/* Get the right ring for the given slot_id, ep_index and stream_id.
|
||||||
* If the endpoint supports streams, boundary check the URB's stream ID.
|
* If the endpoint supports streams, boundary check the URB's stream ID.
|
||||||
* If the endpoint doesn't support streams, return the singular endpoint ring.
|
* If the endpoint doesn't support streams, return the singular endpoint ring.
|
||||||
@ -456,7 +476,10 @@ struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
|
|||||||
{
|
{
|
||||||
struct xhci_virt_ep *ep;
|
struct xhci_virt_ep *ep;
|
||||||
|
|
||||||
ep = &xhci->devs[slot_id]->eps[ep_index];
|
ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
|
||||||
|
if (!ep)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
/* Common case: no streams */
|
/* Common case: no streams */
|
||||||
if (!(ep->ep_state & EP_HAS_STREAMS))
|
if (!(ep->ep_state & EP_HAS_STREAMS))
|
||||||
return ep->ring;
|
return ep->ring;
|
||||||
@ -747,11 +770,14 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
|
|||||||
memset(&deq_state, 0, sizeof(deq_state));
|
memset(&deq_state, 0, sizeof(deq_state));
|
||||||
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
|
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
|
||||||
|
|
||||||
|
ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
|
||||||
|
if (!ep)
|
||||||
|
return;
|
||||||
|
|
||||||
vdev = xhci->devs[slot_id];
|
vdev = xhci->devs[slot_id];
|
||||||
ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
|
ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
|
||||||
trace_xhci_handle_cmd_stop_ep(ep_ctx);
|
trace_xhci_handle_cmd_stop_ep(ep_ctx);
|
||||||
|
|
||||||
ep = &xhci->devs[slot_id]->eps[ep_index];
|
|
||||||
last_unlinked_td = list_last_entry(&ep->cancelled_td_list,
|
last_unlinked_td = list_last_entry(&ep->cancelled_td_list,
|
||||||
struct xhci_td, cancelled_td_list);
|
struct xhci_td, cancelled_td_list);
|
||||||
|
|
||||||
@ -1076,9 +1102,11 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
|
|||||||
|
|
||||||
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
|
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
|
||||||
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
|
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
|
||||||
dev = xhci->devs[slot_id];
|
ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
|
||||||
ep = &dev->eps[ep_index];
|
if (!ep)
|
||||||
|
return;
|
||||||
|
|
||||||
|
dev = xhci->devs[slot_id];
|
||||||
ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
|
ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
|
||||||
if (!ep_ring) {
|
if (!ep_ring) {
|
||||||
xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
|
xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
|
||||||
@ -1151,9 +1179,9 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
|
|||||||
}
|
}
|
||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
|
ep->ep_state &= ~SET_DEQ_PENDING;
|
||||||
dev->eps[ep_index].queued_deq_seg = NULL;
|
ep->queued_deq_seg = NULL;
|
||||||
dev->eps[ep_index].queued_deq_ptr = NULL;
|
ep->queued_deq_ptr = NULL;
|
||||||
/* Restart any rings with pending URBs */
|
/* Restart any rings with pending URBs */
|
||||||
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
|
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
|
||||||
}
|
}
|
||||||
@ -1162,10 +1190,15 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
|
|||||||
union xhci_trb *trb, u32 cmd_comp_code)
|
union xhci_trb *trb, u32 cmd_comp_code)
|
||||||
{
|
{
|
||||||
struct xhci_virt_device *vdev;
|
struct xhci_virt_device *vdev;
|
||||||
|
struct xhci_virt_ep *ep;
|
||||||
struct xhci_ep_ctx *ep_ctx;
|
struct xhci_ep_ctx *ep_ctx;
|
||||||
unsigned int ep_index;
|
unsigned int ep_index;
|
||||||
|
|
||||||
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
|
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
|
||||||
|
ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
|
||||||
|
if (!ep)
|
||||||
|
return;
|
||||||
|
|
||||||
vdev = xhci->devs[slot_id];
|
vdev = xhci->devs[slot_id];
|
||||||
ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
|
ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
|
||||||
trace_xhci_handle_cmd_reset_ep(ep_ctx);
|
trace_xhci_handle_cmd_reset_ep(ep_ctx);
|
||||||
@ -1195,7 +1228,7 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
|
|||||||
xhci_ring_cmd_db(xhci);
|
xhci_ring_cmd_db(xhci);
|
||||||
} else {
|
} else {
|
||||||
/* Clear our internal halted state */
|
/* Clear our internal halted state */
|
||||||
xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
|
ep->ep_state &= ~EP_HALTED;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if this was a soft reset, then restart */
|
/* if this was a soft reset, then restart */
|
||||||
@ -2364,14 +2397,13 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
|||||||
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
|
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
|
||||||
ep_trb_dma = le64_to_cpu(event->buffer);
|
ep_trb_dma = le64_to_cpu(event->buffer);
|
||||||
|
|
||||||
xdev = xhci->devs[slot_id];
|
ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
|
||||||
if (!xdev) {
|
if (!ep) {
|
||||||
xhci_err(xhci, "ERROR Transfer event pointed to bad slot %u\n",
|
xhci_err(xhci, "ERROR Invalid Transfer event\n");
|
||||||
slot_id);
|
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ep = &xdev->eps[ep_index];
|
xdev = xhci->devs[slot_id];
|
||||||
ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
|
ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
|
||||||
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
|
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
|
||||||
|
|
||||||
|
@ -993,6 +993,7 @@ struct xhci_interval_bw_table {
|
|||||||
unsigned int ss_bw_out;
|
unsigned int ss_bw_out;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define EP_CTX_PER_DEV 31
|
||||||
|
|
||||||
struct xhci_virt_device {
|
struct xhci_virt_device {
|
||||||
struct usb_device *udev;
|
struct usb_device *udev;
|
||||||
@ -1007,7 +1008,7 @@ struct xhci_virt_device {
|
|||||||
struct xhci_container_ctx *out_ctx;
|
struct xhci_container_ctx *out_ctx;
|
||||||
/* Used for addressing devices and configuration changes */
|
/* Used for addressing devices and configuration changes */
|
||||||
struct xhci_container_ctx *in_ctx;
|
struct xhci_container_ctx *in_ctx;
|
||||||
struct xhci_virt_ep eps[31];
|
struct xhci_virt_ep eps[EP_CTX_PER_DEV];
|
||||||
u8 fake_port;
|
u8 fake_port;
|
||||||
u8 real_port;
|
u8 real_port;
|
||||||
struct xhci_interval_bw_table *bw_table;
|
struct xhci_interval_bw_table *bw_table;
|
||||||
|
Loading…
Reference in New Issue
Block a user