mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-27 01:40:56 +07:00
sfc: Use rmb() to ensure reads occur in order
Enabling write-combining may also enable read reordering. The BIU is only guaranteed to read from a 128-bit CSR or 64-bit SRAM word when the host reads from its lowest address; otherwise the BIU may use the latched value. Therefore we need to reinstate the read memory barriers after the first read operation for each CSR or SRAM word. Signed-off-by; Ben Hutchings <bhutchings@solarflare.com>
This commit is contained in:
parent
d4fabcc8e8
commit
fcfa060468
@ -152,6 +152,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
|
|||||||
|
|
||||||
spin_lock_irqsave(&efx->biu_lock, flags);
|
spin_lock_irqsave(&efx->biu_lock, flags);
|
||||||
value->u32[0] = _efx_readd(efx, reg + 0);
|
value->u32[0] = _efx_readd(efx, reg + 0);
|
||||||
|
rmb();
|
||||||
value->u32[1] = _efx_readd(efx, reg + 4);
|
value->u32[1] = _efx_readd(efx, reg + 4);
|
||||||
value->u32[2] = _efx_readd(efx, reg + 8);
|
value->u32[2] = _efx_readd(efx, reg + 8);
|
||||||
value->u32[3] = _efx_readd(efx, reg + 12);
|
value->u32[3] = _efx_readd(efx, reg + 12);
|
||||||
@ -174,6 +175,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
|
|||||||
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
|
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
|
||||||
#else
|
#else
|
||||||
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
|
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
|
||||||
|
rmb();
|
||||||
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
|
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
|
||||||
#endif
|
#endif
|
||||||
spin_unlock_irqrestore(&efx->biu_lock, flags);
|
spin_unlock_irqrestore(&efx->biu_lock, flags);
|
||||||
|
Loading…
Reference in New Issue
Block a user