mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 10:13:58 +07:00
net: stmmac: prepare dma interrupt treatment for multiple queues
This patch prepares DMA interrupts treatment for multiple queues. Signed-off-by: Joao Pinto <jpinto@synopsys.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
4e59326229
commit
d62a107a4f
@ -438,7 +438,7 @@ struct stmmac_dma_ops {
|
||||
void (*start_rx)(void __iomem *ioaddr, u32 chan);
|
||||
void (*stop_rx)(void __iomem *ioaddr, u32 chan);
|
||||
int (*dma_interrupt) (void __iomem *ioaddr,
|
||||
struct stmmac_extra_stats *x);
|
||||
struct stmmac_extra_stats *x, u32 chan);
|
||||
/* If supported then get the optional core features */
|
||||
void (*get_hw_feature)(void __iomem *ioaddr,
|
||||
struct dma_features *dma_cap);
|
||||
|
@ -193,7 +193,7 @@ void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
|
||||
void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
|
||||
void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan);
|
||||
int dwmac4_dma_interrupt(void __iomem *ioaddr,
|
||||
struct stmmac_extra_stats *x);
|
||||
struct stmmac_extra_stats *x, u32 chan);
|
||||
void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len);
|
||||
void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len);
|
||||
void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
|
||||
|
@ -122,11 +122,11 @@ void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan)
|
||||
}
|
||||
|
||||
int dwmac4_dma_interrupt(void __iomem *ioaddr,
|
||||
struct stmmac_extra_stats *x)
|
||||
struct stmmac_extra_stats *x, u32 chan)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0));
|
||||
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
|
||||
|
||||
/* ABNORMAL interrupts */
|
||||
if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
|
||||
@ -153,7 +153,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
|
||||
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
|
||||
u32 value;
|
||||
|
||||
value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
|
||||
value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
|
||||
/* to schedule NAPI on real RIE event. */
|
||||
if (likely(value & DMA_CHAN_INTR_ENA_RIE)) {
|
||||
x->rx_normal_irq_n++;
|
||||
@ -172,7 +172,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
|
||||
* status [21-0] expect reserved bits [5-3]
|
||||
*/
|
||||
writel((intr_status & 0x3fffc7),
|
||||
ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0));
|
||||
ioaddr + DMA_CHAN_STATUS(chan));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -143,7 +143,8 @@ void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan);
|
||||
void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
|
||||
void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
|
||||
void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan);
|
||||
int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x);
|
||||
int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x,
|
||||
u32 chan);
|
||||
int dwmac_dma_reset(void __iomem *ioaddr);
|
||||
|
||||
#endif /* __DWMAC_DMA_H__ */
|
||||
|
@ -156,7 +156,7 @@ static void show_rx_process_state(unsigned int status)
|
||||
#endif
|
||||
|
||||
int dwmac_dma_interrupt(void __iomem *ioaddr,
|
||||
struct stmmac_extra_stats *x)
|
||||
struct stmmac_extra_stats *x, u32 chan)
|
||||
{
|
||||
int ret = 0;
|
||||
/* read the status register (CSR5) */
|
||||
|
@ -1591,32 +1591,41 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
|
||||
*/
|
||||
static void stmmac_dma_interrupt(struct stmmac_priv *priv)
|
||||
{
|
||||
u32 chan = STMMAC_CHAN0;
|
||||
u32 tx_channel_count = priv->plat->tx_queues_to_use;
|
||||
int status;
|
||||
u32 chan;
|
||||
|
||||
status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
|
||||
if (likely((status & handle_rx)) || (status & handle_tx)) {
|
||||
if (likely(napi_schedule_prep(&priv->napi))) {
|
||||
stmmac_disable_dma_irq(priv, chan);
|
||||
__napi_schedule(&priv->napi);
|
||||
for (chan = 0; chan < tx_channel_count; chan++) {
|
||||
status = priv->hw->dma->dma_interrupt(priv->ioaddr,
|
||||
&priv->xstats, chan);
|
||||
if (likely((status & handle_rx)) || (status & handle_tx)) {
|
||||
if (likely(napi_schedule_prep(&priv->napi))) {
|
||||
stmmac_disable_dma_irq(priv, chan);
|
||||
__napi_schedule(&priv->napi);
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(status & tx_hard_error_bump_tc)) {
|
||||
/* Try to bump up the dma threshold on this failure */
|
||||
if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
|
||||
(tc <= 256)) {
|
||||
tc += 64;
|
||||
if (priv->plat->force_thresh_dma_mode)
|
||||
stmmac_set_dma_operation_mode(priv,
|
||||
tc,
|
||||
tc,
|
||||
chan);
|
||||
else
|
||||
stmmac_set_dma_operation_mode(priv,
|
||||
tc,
|
||||
SF_DMA_MODE,
|
||||
chan);
|
||||
priv->xstats.threshold = tc;
|
||||
}
|
||||
} else if (unlikely(status == tx_hard_error)) {
|
||||
stmmac_tx_err(priv, chan);
|
||||
}
|
||||
}
|
||||
if (unlikely(status & tx_hard_error_bump_tc)) {
|
||||
/* Try to bump up the dma threshold on this failure */
|
||||
if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
|
||||
(tc <= 256)) {
|
||||
tc += 64;
|
||||
if (priv->plat->force_thresh_dma_mode)
|
||||
stmmac_set_dma_operation_mode(priv->ioaddr,
|
||||
tc, tc, chan);
|
||||
else
|
||||
stmmac_set_dma_operation_mode(priv->ioaddr, tc,
|
||||
SF_DMA_MODE, chan);
|
||||
|
||||
priv->xstats.threshold = tc;
|
||||
}
|
||||
} else if (unlikely(status == tx_hard_error))
|
||||
stmmac_tx_err(priv, chan);
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user