linux_dsm_epyc7002/drivers/xen/privcmd-buf.c

192 lines
4.1 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0 OR MIT
/******************************************************************************
* privcmd-buf.c
*
* Mmap of hypercall buffers.
*
* Copyright (c) 2018 Juergen Gross
*/
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include "privcmd.h"
MODULE_LICENSE("GPL");
struct privcmd_buf_private {
struct mutex lock;
struct list_head list;
};
struct privcmd_buf_vma_private {
struct privcmd_buf_private *file_priv;
struct list_head list;
unsigned int users;
unsigned int n_pages;
struct page *pages[];
};
static int privcmd_buf_open(struct inode *ino, struct file *file)
{
struct privcmd_buf_private *file_priv;
file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
return -ENOMEM;
mutex_init(&file_priv->lock);
INIT_LIST_HEAD(&file_priv->list);
file->private_data = file_priv;
return 0;
}
static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv)
{
unsigned int i;
list_del(&vma_priv->list);
for (i = 0; i < vma_priv->n_pages; i++)
__free_page(vma_priv->pages[i]);
kfree(vma_priv);
}
static int privcmd_buf_release(struct inode *ino, struct file *file)
{
struct privcmd_buf_private *file_priv = file->private_data;
struct privcmd_buf_vma_private *vma_priv;
mutex_lock(&file_priv->lock);
while (!list_empty(&file_priv->list)) {
vma_priv = list_first_entry(&file_priv->list,
struct privcmd_buf_vma_private,
list);
privcmd_buf_vmapriv_free(vma_priv);
}
mutex_unlock(&file_priv->lock);
kfree(file_priv);
return 0;
}
static void privcmd_buf_vma_open(struct vm_area_struct *vma)
{
struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
if (!vma_priv)
return;
mutex_lock(&vma_priv->file_priv->lock);
vma_priv->users++;
mutex_unlock(&vma_priv->file_priv->lock);
}
static void privcmd_buf_vma_close(struct vm_area_struct *vma)
{
struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
struct privcmd_buf_private *file_priv;
if (!vma_priv)
return;
file_priv = vma_priv->file_priv;
mutex_lock(&file_priv->lock);
vma_priv->users--;
if (!vma_priv->users)
privcmd_buf_vmapriv_free(vma_priv);
mutex_unlock(&file_priv->lock);
}
static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf)
{
pr_debug("fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
vmf->pgoff, (void *)vmf->address);
return VM_FAULT_SIGBUS;
}
static const struct vm_operations_struct privcmd_buf_vm_ops = {
.open = privcmd_buf_vma_open,
.close = privcmd_buf_vma_close,
.fault = privcmd_buf_vma_fault,
};
static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
{
struct privcmd_buf_private *file_priv = file->private_data;
struct privcmd_buf_vma_private *vma_priv;
unsigned long count = vma_pages(vma);
unsigned int i;
int ret = 0;
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
vma_priv = kzalloc(struct_size(vma_priv, pages, count), GFP_KERNEL);
if (!vma_priv)
return -ENOMEM;
for (i = 0; i < count; i++) {
vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!vma_priv->pages[i])
break;
vma_priv->n_pages++;
}
mutex_lock(&file_priv->lock);
vma_priv->file_priv = file_priv;
vma_priv->users = 1;
vma->vm_flags |= VM_IO | VM_DONTEXPAND;
vma->vm_ops = &privcmd_buf_vm_ops;
vma->vm_private_data = vma_priv;
list_add(&vma_priv->list, &file_priv->list);
if (vma_priv->n_pages != count)
ret = -ENOMEM;
else
xen/privcmd-buf.c: convert to use vm_map_pages_zero() Convert to use vm_map_pages_zero() to map range of kernel memory to user vma. This driver has ignored vm_pgoff. We could later "fix" these drivers to behave according to the normal vm_pgoff offsetting simply by removing the _zero suffix on the function name and if that causes regressions, it gives us an easy way to revert. Link: http://lkml.kernel.org/r/acf678e81d554d01a9b590716ac0ccbdcdf71c25.1552921225.git.jrdr.linux@gmail.com Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: David Airlie <airlied@linux.ie> Cc: Heiko Stuebner <heiko@sntech.de> Cc: Joerg Roedel <joro@8bytes.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Juergen Gross <jgross@suse.com> Cc: Kees Cook <keescook@chromium.org> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Kyungmin Park <kyungmin.park@samsung.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mauro Carvalho Chehab <mchehab@infradead.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@linux.ibm.com> Cc: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> Cc: Pawel Osciak <pawel@osciak.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Sandy Huang <hjc@rock-chips.com> Cc: Stefan Richter <stefanr@s5r6.in-berlin.de> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Thierry Reding <treding@nvidia.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-05-14 07:22:27 +07:00
ret = vm_map_pages_zero(vma, vma_priv->pages,
vma_priv->n_pages);
if (ret)
privcmd_buf_vmapriv_free(vma_priv);
mutex_unlock(&file_priv->lock);
return ret;
}
const struct file_operations xen_privcmdbuf_fops = {
.owner = THIS_MODULE,
.open = privcmd_buf_open,
.release = privcmd_buf_release,
.mmap = privcmd_buf_mmap,
};
EXPORT_SYMBOL_GPL(xen_privcmdbuf_fops);
struct miscdevice xen_privcmdbuf_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "xen/hypercall",
.fops = &xen_privcmdbuf_fops,
};