mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-02-17 10:06:56 +07:00
VT-d: cleanup iommu_flush_iotlb_psi and flush_unmaps
Make iommu_flush_iotlb_psi() and flush_unmaps() more readable. Signed-off-by: Yu Zhao <yu.zhao@intel.com> Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
This commit is contained in:
parent
6ba6c3a4ca
commit
9dd2fe8906
@ -948,28 +948,23 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
|
|||||||
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
|
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did,
|
||||||
u64 addr, unsigned int pages)
|
u64 addr, unsigned int pages)
|
||||||
{
|
{
|
||||||
unsigned int mask;
|
unsigned int mask = ilog2(__roundup_pow_of_two(pages));
|
||||||
|
|
||||||
BUG_ON(addr & (~VTD_PAGE_MASK));
|
BUG_ON(addr & (~VTD_PAGE_MASK));
|
||||||
BUG_ON(pages == 0);
|
BUG_ON(pages == 0);
|
||||||
|
|
||||||
/* Fallback to domain selective flush if no PSI support */
|
|
||||||
if (!cap_pgsel_inv(iommu->cap))
|
|
||||||
return iommu->flush.flush_iotlb(iommu, did, 0, 0,
|
|
||||||
DMA_TLB_DSI_FLUSH);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
* Fallback to domain selective flush if no PSI support or the size is
|
||||||
|
* too big.
|
||||||
* PSI requires page size to be 2 ^ x, and the base address is naturally
|
* PSI requires page size to be 2 ^ x, and the base address is naturally
|
||||||
* aligned to the size
|
* aligned to the size
|
||||||
*/
|
*/
|
||||||
mask = ilog2(__roundup_pow_of_two(pages));
|
if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
|
||||||
/* Fallback to domain selective flush if size is too big */
|
iommu->flush.flush_iotlb(iommu, did, 0, 0,
|
||||||
if (mask > cap_max_amask_val(iommu->cap))
|
|
||||||
return iommu->flush.flush_iotlb(iommu, did, 0, 0,
|
|
||||||
DMA_TLB_DSI_FLUSH);
|
DMA_TLB_DSI_FLUSH);
|
||||||
|
else
|
||||||
return iommu->flush.flush_iotlb(iommu, did, addr, mask,
|
iommu->flush.flush_iotlb(iommu, did, addr, mask,
|
||||||
DMA_TLB_PSI_FLUSH);
|
DMA_TLB_PSI_FLUSH);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
|
static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
|
||||||
@ -2260,15 +2255,16 @@ static void flush_unmaps(void)
|
|||||||
if (!iommu)
|
if (!iommu)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (deferred_flush[i].next) {
|
if (!deferred_flush[i].next)
|
||||||
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
|
continue;
|
||||||
DMA_TLB_GLOBAL_FLUSH);
|
|
||||||
for (j = 0; j < deferred_flush[i].next; j++) {
|
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
|
||||||
__free_iova(&deferred_flush[i].domain[j]->iovad,
|
DMA_TLB_GLOBAL_FLUSH, 0);
|
||||||
deferred_flush[i].iova[j]);
|
for (j = 0; j < deferred_flush[i].next; j++) {
|
||||||
}
|
__free_iova(&deferred_flush[i].domain[j]->iovad,
|
||||||
deferred_flush[i].next = 0;
|
deferred_flush[i].iova[j]);
|
||||||
}
|
}
|
||||||
|
deferred_flush[i].next = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_size = 0;
|
list_size = 0;
|
||||||
|
Loading…
Reference in New Issue
Block a user