mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-15 02:26:39 +07:00
xhci: Giveback urb in finish_td directly
mostly refactoring and code cleanup. while handling transfer events we used to check the return value of various functions to see if we can give back the URB. Turns out the only case when those return values are set are when finish_td() notices we are completing the last td in the URB. give back the urb directly in finish_td() instead. Only functional change is that we now increase the event ring dequeue pointer in sowtware after giving back the URB. This should not matter as we are in hardware interrupt context and the time when writing the new event ring dequeue to hardware remains the same. Hardware is the only one that can put event TRBs on the event ring. Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
446b31419c
commit
0c03d89d0c
@ -1844,7 +1844,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
||||
int ep_index;
|
||||
struct urb *urb = NULL;
|
||||
struct xhci_ep_ctx *ep_ctx;
|
||||
int ret = 0;
|
||||
struct urb_priv *urb_priv;
|
||||
u32 trb_comp_code;
|
||||
|
||||
@ -1914,7 +1913,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
||||
urb_priv->td_cnt++;
|
||||
/* Giveback the urb when all the tds are completed */
|
||||
if (urb_priv->td_cnt == urb_priv->length) {
|
||||
ret = 1;
|
||||
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
|
||||
xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
|
||||
if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
|
||||
@ -1922,9 +1920,10 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
||||
usb_amd_quirk_pll_enable();
|
||||
}
|
||||
}
|
||||
xhci_giveback_urb_locked(xhci, td, *status);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
|
||||
@ -2234,7 +2233,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
||||
struct xhci_ep_ctx *ep_ctx;
|
||||
struct list_head *tmp;
|
||||
u32 trb_comp_code;
|
||||
int ret = 0;
|
||||
int td_num = 0;
|
||||
bool handling_skipped_tds = false;
|
||||
|
||||
@ -2412,7 +2410,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
||||
xhci_dbg(xhci, "td_list is empty while skip "
|
||||
"flag set. Clear skip flag.\n");
|
||||
}
|
||||
ret = 0;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
@ -2421,7 +2418,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
||||
ep->skip = false;
|
||||
xhci_dbg(xhci, "All tds on the ep_ring skipped. "
|
||||
"Clear skip flag.\n");
|
||||
ret = 0;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
@ -2443,7 +2439,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
||||
*/
|
||||
if (!ep_seg && (trb_comp_code == COMP_STOP ||
|
||||
trb_comp_code == COMP_STOP_INVAL)) {
|
||||
ret = 0;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
@ -2457,7 +2452,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
||||
if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
|
||||
ep_ring->last_td_was_short) {
|
||||
ep_ring->last_td_was_short = false;
|
||||
ret = 0;
|
||||
goto cleanup;
|
||||
}
|
||||
/* HC is busted, give up! */
|
||||
@ -2472,7 +2466,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
||||
return -ESHUTDOWN;
|
||||
}
|
||||
|
||||
ret = skip_isoc_td(xhci, td, event, ep, &status);
|
||||
skip_isoc_td(xhci, td, event, ep, &status);
|
||||
goto cleanup;
|
||||
}
|
||||
if (trb_comp_code == COMP_SHORT_TX)
|
||||
@ -2498,22 +2492,15 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Now update the urb's actual_length and give back to
|
||||
* the core
|
||||
*/
|
||||
/* update the urb's actual_length and give back to the core */
|
||||
if (usb_endpoint_xfer_control(&td->urb->ep->desc))
|
||||
ret = process_ctrl_td(xhci, td, ep_trb, event, ep,
|
||||
&status);
|
||||
process_ctrl_td(xhci, td, ep_trb, event, ep, &status);
|
||||
else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
|
||||
ret = process_isoc_td(xhci, td, ep_trb, event, ep,
|
||||
&status);
|
||||
process_isoc_td(xhci, td, ep_trb, event, ep, &status);
|
||||
else
|
||||
ret = process_bulk_intr_td(xhci, td, ep_trb, event,
|
||||
ep, &status);
|
||||
|
||||
process_bulk_intr_td(xhci, td, ep_trb, event, ep,
|
||||
&status);
|
||||
cleanup:
|
||||
|
||||
|
||||
handling_skipped_tds = ep->skip &&
|
||||
trb_comp_code != COMP_MISSED_INT &&
|
||||
trb_comp_code != COMP_PING_ERR;
|
||||
@ -2525,8 +2512,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
||||
if (!handling_skipped_tds)
|
||||
inc_deq(xhci, xhci->event_ring);
|
||||
|
||||
if (ret)
|
||||
xhci_giveback_urb_locked(xhci, td, status);
|
||||
/*
|
||||
* If ep->skip is set, it means there are missed tds on the
|
||||
* endpoint ring need to take care of.
|
||||
|
Loading…
Reference in New Issue
Block a user