xhci: Don't pass struct xhci_hcd pointer to xhci_link_seg()

It's only used to dig out if we need to set a chain flag for specific
hosts. Pass the flag directly as a parameter instead.

No functional changes.

xhci_link_seg() is also used by DbC code, this change helps decoupling
xhci and DbC.

Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
Link: https://lore.kernel.org/r/20200723144530.9992-4-mathias.nyman@linux.intel.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Mathias Nyman 2020-07-23 17:45:06 +03:00 committed by Greg Kroah-Hartman
parent 5b43a2a84b
commit e3bc8004bd

View File

@ -96,8 +96,9 @@ static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
* DMA address of the next segment. The caller needs to set any Link TRB * DMA address of the next segment. The caller needs to set any Link TRB
* related flags, such as End TRB, Toggle Cycle, and no snoop. * related flags, such as End TRB, Toggle Cycle, and no snoop.
*/ */
static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, static void xhci_link_segments(struct xhci_segment *prev,
struct xhci_segment *next, enum xhci_ring_type type) struct xhci_segment *next,
enum xhci_ring_type type, bool chain_links)
{ {
u32 val; u32 val;
@ -112,11 +113,7 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control);
val &= ~TRB_TYPE_BITMASK; val &= ~TRB_TYPE_BITMASK;
val |= TRB_TYPE(TRB_LINK); val |= TRB_TYPE(TRB_LINK);
/* Always set the chain bit with 0.95 hardware */ if (chain_links)
/* Set chain bit for isoc rings on AMD 0.96 host */
if (xhci_link_trb_quirk(xhci) ||
(type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)))
val |= TRB_CHAIN; val |= TRB_CHAIN;
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
} }
@ -131,13 +128,19 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_segs) unsigned int num_segs)
{ {
struct xhci_segment *next; struct xhci_segment *next;
bool chain_links;
if (!ring || !first || !last) if (!ring || !first || !last)
return; return;
/* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
chain_links = !!(xhci_link_trb_quirk(xhci) ||
(ring->type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)));
next = ring->enq_seg->next; next = ring->enq_seg->next;
xhci_link_segments(xhci, ring->enq_seg, first, ring->type); xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
xhci_link_segments(xhci, last, next, ring->type); xhci_link_segments(last, next, ring->type, chain_links);
ring->num_segs += num_segs; ring->num_segs += num_segs;
ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs; ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
@ -321,6 +324,12 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
{ {
struct xhci_segment *prev; struct xhci_segment *prev;
bool chain_links;
/* Set chain bit for 0.95 hosts, and for isoc rings on AMD 0.96 host */
chain_links = !!(xhci_link_trb_quirk(xhci) ||
(type == TYPE_ISOC &&
(xhci->quirks & XHCI_AMD_0x96_HOST)));
prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags); prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
if (!prev) if (!prev)
@ -341,12 +350,12 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
} }
return -ENOMEM; return -ENOMEM;
} }
xhci_link_segments(xhci, prev, next, type); xhci_link_segments(prev, next, type, chain_links);
prev = next; prev = next;
num_segs--; num_segs--;
} }
xhci_link_segments(xhci, prev, *first, type); xhci_link_segments(prev, *first, type, chain_links);
*last = prev; *last = prev;
return 0; return 0;