mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-03-11 23:27:42 +07:00
vfio iommu: Cache pgsize_bitmap in struct vfio_iommu
Calculate and cache pgsize_bitmap when iommu->domain_list is updated and iommu->external_domain is set for mdev device. Add iommu->lock protection when cached pgsize_bitmap is accessed. Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com> Reviewed-by: Neo Jia <cjia@nvidia.com> Reviewed-by: Cornelia Huck <cohuck@redhat.com> Reviewed-by: Yan Zhao <yan.y.zhao@intel.com> Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
parent
6581708586
commit
cade075f26
@ -69,6 +69,7 @@ struct vfio_iommu {
|
|||||||
struct rb_root dma_list;
|
struct rb_root dma_list;
|
||||||
struct blocking_notifier_head notifier;
|
struct blocking_notifier_head notifier;
|
||||||
unsigned int dma_avail;
|
unsigned int dma_avail;
|
||||||
|
uint64_t pgsize_bitmap;
|
||||||
bool v2;
|
bool v2;
|
||||||
bool nesting;
|
bool nesting;
|
||||||
};
|
};
|
||||||
@ -835,15 +836,14 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
|
|||||||
iommu->dma_avail++;
|
iommu->dma_avail++;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
|
static void vfio_update_pgsize_bitmap(struct vfio_iommu *iommu)
|
||||||
{
|
{
|
||||||
struct vfio_domain *domain;
|
struct vfio_domain *domain;
|
||||||
unsigned long bitmap = ULONG_MAX;
|
|
||||||
|
|
||||||
mutex_lock(&iommu->lock);
|
iommu->pgsize_bitmap = ULONG_MAX;
|
||||||
|
|
||||||
list_for_each_entry(domain, &iommu->domain_list, next)
|
list_for_each_entry(domain, &iommu->domain_list, next)
|
||||||
bitmap &= domain->domain->pgsize_bitmap;
|
iommu->pgsize_bitmap &= domain->domain->pgsize_bitmap;
|
||||||
mutex_unlock(&iommu->lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In case the IOMMU supports page sizes smaller than PAGE_SIZE
|
* In case the IOMMU supports page sizes smaller than PAGE_SIZE
|
||||||
@ -853,12 +853,10 @@ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
|
|||||||
* granularity while iommu driver can use the sub-PAGE_SIZE size
|
* granularity while iommu driver can use the sub-PAGE_SIZE size
|
||||||
* to map the buffer.
|
* to map the buffer.
|
||||||
*/
|
*/
|
||||||
if (bitmap & ~PAGE_MASK) {
|
if (iommu->pgsize_bitmap & ~PAGE_MASK) {
|
||||||
bitmap &= PAGE_MASK;
|
iommu->pgsize_bitmap &= PAGE_MASK;
|
||||||
bitmap |= PAGE_SIZE;
|
iommu->pgsize_bitmap |= PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
return bitmap;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
|
static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
|
||||||
@ -869,19 +867,28 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
|
|||||||
size_t unmapped = 0;
|
size_t unmapped = 0;
|
||||||
int ret = 0, retries = 0;
|
int ret = 0, retries = 0;
|
||||||
|
|
||||||
mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
|
mutex_lock(&iommu->lock);
|
||||||
|
|
||||||
|
mask = ((uint64_t)1 << __ffs(iommu->pgsize_bitmap)) - 1;
|
||||||
|
|
||||||
|
if (unmap->iova & mask) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!unmap->size || unmap->size & mask) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
|
||||||
if (unmap->iova & mask)
|
|
||||||
return -EINVAL;
|
|
||||||
if (!unmap->size || unmap->size & mask)
|
|
||||||
return -EINVAL;
|
|
||||||
if (unmap->iova + unmap->size - 1 < unmap->iova ||
|
if (unmap->iova + unmap->size - 1 < unmap->iova ||
|
||||||
unmap->size > SIZE_MAX)
|
unmap->size > SIZE_MAX) {
|
||||||
return -EINVAL;
|
ret = -EINVAL;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
|
||||||
WARN_ON(mask & PAGE_MASK);
|
WARN_ON(mask & PAGE_MASK);
|
||||||
again:
|
again:
|
||||||
mutex_lock(&iommu->lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* vfio-iommu-type1 (v1) - User mappings were coalesced together to
|
* vfio-iommu-type1 (v1) - User mappings were coalesced together to
|
||||||
@ -960,6 +967,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
|
|||||||
blocking_notifier_call_chain(&iommu->notifier,
|
blocking_notifier_call_chain(&iommu->notifier,
|
||||||
VFIO_IOMMU_NOTIFY_DMA_UNMAP,
|
VFIO_IOMMU_NOTIFY_DMA_UNMAP,
|
||||||
&nb_unmap);
|
&nb_unmap);
|
||||||
|
mutex_lock(&iommu->lock);
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
unmapped += dma->size;
|
unmapped += dma->size;
|
||||||
@ -1075,24 +1083,28 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
|
|||||||
if (map->size != size || map->vaddr != vaddr || map->iova != iova)
|
if (map->size != size || map->vaddr != vaddr || map->iova != iova)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
|
|
||||||
|
|
||||||
WARN_ON(mask & PAGE_MASK);
|
|
||||||
|
|
||||||
/* READ/WRITE from device perspective */
|
/* READ/WRITE from device perspective */
|
||||||
if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
|
if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
|
||||||
prot |= IOMMU_WRITE;
|
prot |= IOMMU_WRITE;
|
||||||
if (map->flags & VFIO_DMA_MAP_FLAG_READ)
|
if (map->flags & VFIO_DMA_MAP_FLAG_READ)
|
||||||
prot |= IOMMU_READ;
|
prot |= IOMMU_READ;
|
||||||
|
|
||||||
if (!prot || !size || (size | iova | vaddr) & mask)
|
mutex_lock(&iommu->lock);
|
||||||
return -EINVAL;
|
|
||||||
|
mask = ((uint64_t)1 << __ffs(iommu->pgsize_bitmap)) - 1;
|
||||||
|
|
||||||
|
WARN_ON(mask & PAGE_MASK);
|
||||||
|
|
||||||
|
if (!prot || !size || (size | iova | vaddr) & mask) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
/* Don't allow IOVA or virtual address wrap */
|
/* Don't allow IOVA or virtual address wrap */
|
||||||
if (iova + size - 1 < iova || vaddr + size - 1 < vaddr)
|
if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) {
|
||||||
return -EINVAL;
|
ret = -EINVAL;
|
||||||
|
goto out_unlock;
|
||||||
mutex_lock(&iommu->lock);
|
}
|
||||||
|
|
||||||
if (vfio_find_dma(iommu, iova, size)) {
|
if (vfio_find_dma(iommu, iova, size)) {
|
||||||
ret = -EEXIST;
|
ret = -EEXIST;
|
||||||
@ -1698,6 +1710,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
|||||||
if (!iommu->external_domain) {
|
if (!iommu->external_domain) {
|
||||||
INIT_LIST_HEAD(&domain->group_list);
|
INIT_LIST_HEAD(&domain->group_list);
|
||||||
iommu->external_domain = domain;
|
iommu->external_domain = domain;
|
||||||
|
vfio_update_pgsize_bitmap(iommu);
|
||||||
} else {
|
} else {
|
||||||
kfree(domain);
|
kfree(domain);
|
||||||
}
|
}
|
||||||
@ -1823,6 +1836,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
list_add(&domain->next, &iommu->domain_list);
|
list_add(&domain->next, &iommu->domain_list);
|
||||||
|
vfio_update_pgsize_bitmap(iommu);
|
||||||
done:
|
done:
|
||||||
/* Delete the old one and insert new iova list */
|
/* Delete the old one and insert new iova list */
|
||||||
vfio_iommu_iova_insert_copy(iommu, &iova_copy);
|
vfio_iommu_iova_insert_copy(iommu, &iova_copy);
|
||||||
@ -2034,6 +2048,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
|
|||||||
list_del(&domain->next);
|
list_del(&domain->next);
|
||||||
kfree(domain);
|
kfree(domain);
|
||||||
vfio_iommu_aper_expand(iommu, &iova_copy);
|
vfio_iommu_aper_expand(iommu, &iova_copy);
|
||||||
|
vfio_update_pgsize_bitmap(iommu);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -2166,8 +2181,6 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
|
|||||||
size_t size;
|
size_t size;
|
||||||
int iovas = 0, i = 0, ret;
|
int iovas = 0, i = 0, ret;
|
||||||
|
|
||||||
mutex_lock(&iommu->lock);
|
|
||||||
|
|
||||||
list_for_each_entry(iova, &iommu->iova_list, list)
|
list_for_each_entry(iova, &iommu->iova_list, list)
|
||||||
iovas++;
|
iovas++;
|
||||||
|
|
||||||
@ -2176,17 +2189,14 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
|
|||||||
* Return 0 as a container with a single mdev device
|
* Return 0 as a container with a single mdev device
|
||||||
* will have an empty list
|
* will have an empty list
|
||||||
*/
|
*/
|
||||||
ret = 0;
|
return 0;
|
||||||
goto out_unlock;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges));
|
size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges));
|
||||||
|
|
||||||
cap_iovas = kzalloc(size, GFP_KERNEL);
|
cap_iovas = kzalloc(size, GFP_KERNEL);
|
||||||
if (!cap_iovas) {
|
if (!cap_iovas)
|
||||||
ret = -ENOMEM;
|
return -ENOMEM;
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
cap_iovas->nr_iovas = iovas;
|
cap_iovas->nr_iovas = iovas;
|
||||||
|
|
||||||
@ -2199,8 +2209,6 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
|
|||||||
ret = vfio_iommu_iova_add_cap(caps, cap_iovas, size);
|
ret = vfio_iommu_iova_add_cap(caps, cap_iovas, size);
|
||||||
|
|
||||||
kfree(cap_iovas);
|
kfree(cap_iovas);
|
||||||
out_unlock:
|
|
||||||
mutex_unlock(&iommu->lock);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2245,11 +2253,13 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
|
|||||||
info.cap_offset = 0; /* output, no-recopy necessary */
|
info.cap_offset = 0; /* output, no-recopy necessary */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mutex_lock(&iommu->lock);
|
||||||
info.flags = VFIO_IOMMU_INFO_PGSIZES;
|
info.flags = VFIO_IOMMU_INFO_PGSIZES;
|
||||||
|
|
||||||
info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
|
info.iova_pgsizes = iommu->pgsize_bitmap;
|
||||||
|
|
||||||
ret = vfio_iommu_iova_build_caps(iommu, &caps);
|
ret = vfio_iommu_iova_build_caps(iommu, &caps);
|
||||||
|
mutex_unlock(&iommu->lock);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user