mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 09:56:40 +07:00
drm: fix up mmap locking in preparation for ttm changes
This change is needed to protect againt disappearing maps which aren't common. The map lists are protected using sturct_mutex but drm_mmap never locked it. Signed-off-by: Dave Airlie <airlied@linux.ie>
This commit is contained in:
parent
040ac32048
commit
d7d8aac79d
@ -413,7 +413,7 @@ static struct vm_operations_struct drm_vm_sg_ops = {
|
||||
* Create a new drm_vma_entry structure as the \p vma private data entry and
|
||||
* add it to drm_device::vmalist.
|
||||
*/
|
||||
static void drm_vm_open(struct vm_area_struct *vma)
|
||||
static void drm_vm_open_locked(struct vm_area_struct *vma)
|
||||
{
|
||||
drm_file_t *priv = vma->vm_file->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
@ -425,15 +425,23 @@ static void drm_vm_open(struct vm_area_struct *vma)
|
||||
|
||||
vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
|
||||
if (vma_entry) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
vma_entry->vma = vma;
|
||||
vma_entry->next = dev->vmalist;
|
||||
vma_entry->pid = current->pid;
|
||||
dev->vmalist = vma_entry;
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
static void drm_vm_open(struct vm_area_struct *vma)
|
||||
{
|
||||
drm_file_t *priv = vma->vm_file->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
drm_vm_open_locked(vma);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* \c close method for all virtual memory types.
|
||||
*
|
||||
@ -484,7 +492,6 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
|
||||
drm_device_dma_t *dma;
|
||||
unsigned long length = vma->vm_end - vma->vm_start;
|
||||
|
||||
lock_kernel();
|
||||
dev = priv->head->dev;
|
||||
dma = dev->dma;
|
||||
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
|
||||
@ -492,10 +499,8 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
|
||||
|
||||
/* Length must match exact page count */
|
||||
if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
|
||||
unlock_kernel();
|
||||
return -EINVAL;
|
||||
}
|
||||
unlock_kernel();
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN) &&
|
||||
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
|
||||
@ -518,7 +523,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
|
||||
vma->vm_flags |= VM_RESERVED; /* Don't swap */
|
||||
|
||||
vma->vm_file = filp; /* Needed for drm_vm_open() */
|
||||
drm_vm_open(vma);
|
||||
drm_vm_open_locked(vma);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -553,7 +558,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
|
||||
* according to the mapping type and remaps the pages. Finally sets the file
|
||||
* pointer and calls vm_open().
|
||||
*/
|
||||
int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
@ -667,8 +672,20 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
vma->vm_flags |= VM_RESERVED; /* Don't swap */
|
||||
|
||||
vma->vm_file = filp; /* Needed for drm_vm_open() */
|
||||
drm_vm_open(vma);
|
||||
drm_vm_open_locked(vma);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int drm_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
drm_file_t *priv = filp->private_data;
|
||||
drm_device_t *dev = priv->head->dev;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
ret = drm_mmap_locked(filp, vma);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_mmap);
|
||||
|
Loading…
Reference in New Issue
Block a user