linux_dsm_epyc7002/samples/vfio-mdev/mdpy.c

808 lines
18 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/*
* Mediated virtual PCI display host device driver
*
* See mdpy-defs.h for device specs
*
* (c) Gerd Hoffmann <kraxel@redhat.com>
*
* based on mtty driver which is:
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
* Author: Neo Jia <cjia@nvidia.com>
* Kirti Wankhede <kwankhede@nvidia.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/cdev.h>
#include <linux/vfio.h>
#include <linux/iommu.h>
#include <linux/sysfs.h>
#include <linux/mdev.h>
#include <linux/pci.h>
#include <drm/drm_fourcc.h>
#include "mdpy-defs.h"
#define MDPY_NAME "mdpy"
#define MDPY_CLASS_NAME "mdpy"
#define MDPY_CONFIG_SPACE_SIZE 0xff
#define MDPY_MEMORY_BAR_OFFSET PAGE_SIZE
#define MDPY_DISPLAY_REGION 16
#define STORE_LE16(addr, val) (*(u16 *)addr = val)
#define STORE_LE32(addr, val) (*(u32 *)addr = val)
MODULE_LICENSE("GPL v2");
static int max_devices = 4;
module_param_named(count, max_devices, int, 0444);
MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices");
#define MDPY_TYPE_1 "vga"
#define MDPY_TYPE_2 "xga"
#define MDPY_TYPE_3 "hd"
static const struct mdpy_type {
const char *name;
u32 format;
u32 bytepp;
u32 width;
u32 height;
} mdpy_types[] = {
{
.name = MDPY_CLASS_NAME "-" MDPY_TYPE_1,
.format = DRM_FORMAT_XRGB8888,
.bytepp = 4,
.width = 640,
.height = 480,
}, {
.name = MDPY_CLASS_NAME "-" MDPY_TYPE_2,
.format = DRM_FORMAT_XRGB8888,
.bytepp = 4,
.width = 1024,
.height = 768,
}, {
.name = MDPY_CLASS_NAME "-" MDPY_TYPE_3,
.format = DRM_FORMAT_XRGB8888,
.bytepp = 4,
.width = 1920,
.height = 1080,
},
};
static dev_t mdpy_devt;
static struct class *mdpy_class;
static struct cdev mdpy_cdev;
static struct device mdpy_dev;
static u32 mdpy_count;
/* State of each mdev device */
struct mdev_state {
u8 *vconfig;
u32 bar_mask;
struct mutex ops_lock;
struct mdev_device *mdev;
struct vfio_device_info dev_info;
const struct mdpy_type *type;
u32 memsize;
void *memblk;
};
static const struct mdpy_type *mdpy_find_type(struct kobject *kobj)
{
int i;
for (i = 0; i < ARRAY_SIZE(mdpy_types); i++)
if (strcmp(mdpy_types[i].name, kobj->name) == 0)
return mdpy_types + i;
return NULL;
}
static void mdpy_create_config_space(struct mdev_state *mdev_state)
{
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
MDPY_PCI_VENDOR_ID);
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
MDPY_PCI_DEVICE_ID);
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
MDPY_PCI_SUBVENDOR_ID);
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
MDPY_PCI_SUBDEVICE_ID);
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_STATUS],
PCI_STATUS_CAP_LIST);
STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
PCI_CLASS_DISPLAY_OTHER);
mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01;
STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_32 |
PCI_BASE_ADDRESS_MEM_PREFETCH);
mdev_state->bar_mask = ~(mdev_state->memsize) + 1;
/* vendor specific capability for the config registers */
mdev_state->vconfig[PCI_CAPABILITY_LIST] = MDPY_VENDORCAP_OFFSET;
mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 0] = 0x09; /* vendor cap */
mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 1] = 0x00; /* next ptr */
mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 2] = MDPY_VENDORCAP_SIZE;
STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_FORMAT_OFFSET],
mdev_state->type->format);
STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_WIDTH_OFFSET],
mdev_state->type->width);
STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_HEIGHT_OFFSET],
mdev_state->type->height);
}
static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
char *buf, u32 count)
{
struct device *dev = mdev_dev(mdev_state->mdev);
u32 cfg_addr;
switch (offset) {
case PCI_BASE_ADDRESS_0:
cfg_addr = *(u32 *)buf;
if (cfg_addr == 0xffffffff) {
cfg_addr = (cfg_addr & mdev_state->bar_mask);
} else {
cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK;
if (cfg_addr)
dev_info(dev, "BAR0 @ 0x%x\n", cfg_addr);
}
cfg_addr |= (mdev_state->vconfig[offset] &
~PCI_BASE_ADDRESS_MEM_MASK);
STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
break;
}
}
static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
loff_t pos, bool is_write)
{
struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
struct device *dev = mdev_dev(mdev);
int ret = 0;
mutex_lock(&mdev_state->ops_lock);
if (pos < MDPY_CONFIG_SPACE_SIZE) {
if (is_write)
handle_pci_cfg_write(mdev_state, pos, buf, count);
else
memcpy(buf, (mdev_state->vconfig + pos), count);
} else if ((pos >= MDPY_MEMORY_BAR_OFFSET) &&
(pos + count <=
MDPY_MEMORY_BAR_OFFSET + mdev_state->memsize)) {
pos -= MDPY_MEMORY_BAR_OFFSET;
if (is_write)
memcpy(mdev_state->memblk, buf, count);
else
memcpy(buf, mdev_state->memblk, count);
} else {
dev_info(dev, "%s: %s @0x%llx (unhandled)\n",
__func__, is_write ? "WR" : "RD", pos);
ret = -1;
goto accessfailed;
}
ret = count;
accessfailed:
mutex_unlock(&mdev_state->ops_lock);
return ret;
}
static int mdpy_reset(struct mdev_device *mdev)
{
struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
u32 stride, i;
/* initialize with gray gradient */
stride = mdev_state->type->width * mdev_state->type->bytepp;
for (i = 0; i < mdev_state->type->height; i++)
memset(mdev_state->memblk + i * stride,
i * 255 / mdev_state->type->height,
stride);
return 0;
}
static int mdpy_create(struct kobject *kobj, struct mdev_device *mdev)
{
const struct mdpy_type *type = mdpy_find_type(kobj);
struct device *dev = mdev_dev(mdev);
struct mdev_state *mdev_state;
u32 fbsize;
if (mdpy_count >= max_devices)
return -ENOMEM;
mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
if (mdev_state == NULL)
return -ENOMEM;
mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL);
if (mdev_state->vconfig == NULL) {
kfree(mdev_state);
return -ENOMEM;
}
if (!type)
type = &mdpy_types[0];
fbsize = roundup_pow_of_two(type->width * type->height * type->bytepp);
mdev_state->memblk = vmalloc_user(fbsize);
if (!mdev_state->memblk) {
kfree(mdev_state->vconfig);
kfree(mdev_state);
return -ENOMEM;
}
dev_info(dev, "%s: %s (%dx%d)\n",
__func__, kobj->name, type->width, type->height);
mutex_init(&mdev_state->ops_lock);
mdev_state->mdev = mdev;
mdev_set_drvdata(mdev, mdev_state);
mdev_state->type = type;
mdev_state->memsize = fbsize;
mdpy_create_config_space(mdev_state);
mdpy_reset(mdev);
mdpy_count++;
return 0;
}
static int mdpy_remove(struct mdev_device *mdev)
{
struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
struct device *dev = mdev_dev(mdev);
dev_info(dev, "%s\n", __func__);
mdev_set_drvdata(mdev, NULL);
vfree(mdev_state->memblk);
kfree(mdev_state->vconfig);
kfree(mdev_state);
mdpy_count--;
return 0;
}
static ssize_t mdpy_read(struct mdev_device *mdev, char __user *buf,
size_t count, loff_t *ppos)
{
unsigned int done = 0;
int ret;
while (count) {
size_t filled;
if (count >= 4 && !(*ppos % 4)) {
u32 val;
ret = mdev_access(mdev, (char *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
if (copy_to_user(buf, &val, sizeof(val)))
goto read_err;
filled = 4;
} else if (count >= 2 && !(*ppos % 2)) {
u16 val;
ret = mdev_access(mdev, (char *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
if (copy_to_user(buf, &val, sizeof(val)))
goto read_err;
filled = 2;
} else {
u8 val;
ret = mdev_access(mdev, (char *)&val, sizeof(val),
*ppos, false);
if (ret <= 0)
goto read_err;
if (copy_to_user(buf, &val, sizeof(val)))
goto read_err;
filled = 1;
}
count -= filled;
done += filled;
*ppos += filled;
buf += filled;
}
return done;
read_err:
return -EFAULT;
}
static ssize_t mdpy_write(struct mdev_device *mdev, const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned int done = 0;
int ret;
while (count) {
size_t filled;
if (count >= 4 && !(*ppos % 4)) {
u32 val;
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
ret = mdev_access(mdev, (char *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
filled = 4;
} else if (count >= 2 && !(*ppos % 2)) {
u16 val;
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
ret = mdev_access(mdev, (char *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
filled = 2;
} else {
u8 val;
if (copy_from_user(&val, buf, sizeof(val)))
goto write_err;
ret = mdev_access(mdev, (char *)&val, sizeof(val),
*ppos, true);
if (ret <= 0)
goto write_err;
filled = 1;
}
count -= filled;
done += filled;
*ppos += filled;
buf += filled;
}
return done;
write_err:
return -EFAULT;
}
static int mdpy_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
{
struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
return -EINVAL;
if (vma->vm_end < vma->vm_start)
return -EINVAL;
if (vma->vm_end - vma->vm_start > mdev_state->memsize)
return -EINVAL;
if ((vma->vm_flags & VM_SHARED) == 0)
return -EINVAL;
return remap_vmalloc_range_partial(vma, vma->vm_start,
vmalloc: fix remap_vmalloc_range() bounds checks remap_vmalloc_range() has had various issues with the bounds checks it promises to perform ("This function checks that addr is a valid vmalloc'ed area, and that it is big enough to cover the vma") over time, e.g.: - not detecting pgoff<<PAGE_SHIFT overflow - not detecting (pgoff<<PAGE_SHIFT)+usize overflow - not checking whether addr and addr+(pgoff<<PAGE_SHIFT) are the same vmalloc allocation - comparing a potentially wildly out-of-bounds pointer with the end of the vmalloc region In particular, since commit fc9702273e2e ("bpf: Add mmap() support for BPF_MAP_TYPE_ARRAY"), unprivileged users can cause kernel null pointer dereferences by calling mmap() on a BPF map with a size that is bigger than the distance from the start of the BPF map to the end of the address space. This could theoretically be used as a kernel ASLR bypass, by using whether mmap() with a given offset oopses or returns an error code to perform a binary search over the possible address range. To allow remap_vmalloc_range_partial() to verify that addr and addr+(pgoff<<PAGE_SHIFT) are in the same vmalloc region, pass the offset to remap_vmalloc_range_partial() instead of adding it to the pointer in remap_vmalloc_range(). In remap_vmalloc_range_partial(), fix the check against get_vm_area_size() by using size comparisons instead of pointer comparisons, and add checks for pgoff. Fixes: 833423143c3a ("[PATCH] mm: introduce remap_vmalloc_range()") Signed-off-by: Jann Horn <jannh@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: stable@vger.kernel.org Cc: Alexei Starovoitov <ast@kernel.org> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Martin KaFai Lau <kafai@fb.com> Cc: Song Liu <songliubraving@fb.com> Cc: Yonghong Song <yhs@fb.com> Cc: Andrii Nakryiko <andriin@fb.com> Cc: John Fastabend <john.fastabend@gmail.com> Cc: KP Singh <kpsingh@chromium.org> Link: http://lkml.kernel.org/r/20200415222312.236431-1-jannh@google.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-04-21 08:14:11 +07:00
mdev_state->memblk, 0,
vma->vm_end - vma->vm_start);
}
static int mdpy_get_region_info(struct mdev_device *mdev,
struct vfio_region_info *region_info,
u16 *cap_type_id, void **cap_type)
{
struct mdev_state *mdev_state;
mdev_state = mdev_get_drvdata(mdev);
if (!mdev_state)
return -EINVAL;
if (region_info->index >= VFIO_PCI_NUM_REGIONS &&
region_info->index != MDPY_DISPLAY_REGION)
return -EINVAL;
switch (region_info->index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
region_info->offset = 0;
region_info->size = MDPY_CONFIG_SPACE_SIZE;
region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE);
break;
case VFIO_PCI_BAR0_REGION_INDEX:
case MDPY_DISPLAY_REGION:
region_info->offset = MDPY_MEMORY_BAR_OFFSET;
region_info->size = mdev_state->memsize;
region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
VFIO_REGION_INFO_FLAG_WRITE |
VFIO_REGION_INFO_FLAG_MMAP);
break;
default:
region_info->size = 0;
region_info->offset = 0;
region_info->flags = 0;
}
return 0;
}
static int mdpy_get_irq_info(struct mdev_device *mdev,
struct vfio_irq_info *irq_info)
{
irq_info->count = 0;
return 0;
}
static int mdpy_get_device_info(struct mdev_device *mdev,
struct vfio_device_info *dev_info)
{
dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
return 0;
}
static int mdpy_query_gfx_plane(struct mdev_device *mdev,
struct vfio_device_gfx_plane_info *plane)
{
struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
VFIO_GFX_PLANE_TYPE_REGION))
return 0;
return -EINVAL;
}
if (plane->flags != VFIO_GFX_PLANE_TYPE_REGION)
return -EINVAL;
plane->drm_format = mdev_state->type->format;
plane->width = mdev_state->type->width;
plane->height = mdev_state->type->height;
plane->stride = (mdev_state->type->width *
mdev_state->type->bytepp);
plane->size = mdev_state->memsize;
plane->region_index = MDPY_DISPLAY_REGION;
/* unused */
plane->drm_format_mod = 0;
plane->x_pos = 0;
plane->y_pos = 0;
plane->x_hot = 0;
plane->y_hot = 0;
return 0;
}
static long mdpy_ioctl(struct mdev_device *mdev, unsigned int cmd,
unsigned long arg)
{
int ret = 0;
unsigned long minsz;
struct mdev_state *mdev_state;
mdev_state = mdev_get_drvdata(mdev);
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
{
struct vfio_device_info info;
minsz = offsetofend(struct vfio_device_info, num_irqs);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
ret = mdpy_get_device_info(mdev, &info);
if (ret)
return ret;
memcpy(&mdev_state->dev_info, &info, sizeof(info));
if (copy_to_user((void __user *)arg, &info, minsz))
return -EFAULT;
return 0;
}
case VFIO_DEVICE_GET_REGION_INFO:
{
struct vfio_region_info info;
u16 cap_type_id = 0;
void *cap_type = NULL;
minsz = offsetofend(struct vfio_region_info, offset);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if (info.argsz < minsz)
return -EINVAL;
ret = mdpy_get_region_info(mdev, &info, &cap_type_id,
&cap_type);
if (ret)
return ret;
if (copy_to_user((void __user *)arg, &info, minsz))
return -EFAULT;
return 0;
}
case VFIO_DEVICE_GET_IRQ_INFO:
{
struct vfio_irq_info info;
minsz = offsetofend(struct vfio_irq_info, count);
if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT;
if ((info.argsz < minsz) ||
(info.index >= mdev_state->dev_info.num_irqs))
return -EINVAL;
ret = mdpy_get_irq_info(mdev, &info);
if (ret)
return ret;
if (copy_to_user((void __user *)arg, &info, minsz))
return -EFAULT;
return 0;
}
case VFIO_DEVICE_QUERY_GFX_PLANE:
{
struct vfio_device_gfx_plane_info plane;
minsz = offsetofend(struct vfio_device_gfx_plane_info,
region_index);
if (copy_from_user(&plane, (void __user *)arg, minsz))
return -EFAULT;
if (plane.argsz < minsz)
return -EINVAL;
ret = mdpy_query_gfx_plane(mdev, &plane);
if (ret)
return ret;
if (copy_to_user((void __user *)arg, &plane, minsz))
return -EFAULT;
return 0;
}
case VFIO_DEVICE_SET_IRQS:
return -EINVAL;
case VFIO_DEVICE_RESET:
return mdpy_reset(mdev);
}
return -ENOTTY;
}
static int mdpy_open(struct mdev_device *mdev)
{
if (!try_module_get(THIS_MODULE))
return -ENODEV;
return 0;
}
static void mdpy_close(struct mdev_device *mdev)
{
module_put(THIS_MODULE);
}
static ssize_t
resolution_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct mdev_device *mdev = mdev_from_dev(dev);
struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
return sprintf(buf, "%dx%d\n",
mdev_state->type->width,
mdev_state->type->height);
}
static DEVICE_ATTR_RO(resolution);
static struct attribute *mdev_dev_attrs[] = {
&dev_attr_resolution.attr,
NULL,
};
static const struct attribute_group mdev_dev_group = {
.name = "vendor",
.attrs = mdev_dev_attrs,
};
const struct attribute_group *mdev_dev_groups[] = {
&mdev_dev_group,
NULL,
};
static ssize_t
name_show(struct kobject *kobj, struct device *dev, char *buf)
{
return sprintf(buf, "%s\n", kobj->name);
}
MDEV_TYPE_ATTR_RO(name);
static ssize_t
description_show(struct kobject *kobj, struct device *dev, char *buf)
{
const struct mdpy_type *type = mdpy_find_type(kobj);
return sprintf(buf, "virtual display, %dx%d framebuffer\n",
type ? type->width : 0,
type ? type->height : 0);
}
MDEV_TYPE_ATTR_RO(description);
static ssize_t
available_instances_show(struct kobject *kobj, struct device *dev, char *buf)
{
return sprintf(buf, "%d\n", max_devices - mdpy_count);
}
MDEV_TYPE_ATTR_RO(available_instances);
static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
char *buf)
{
return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
}
MDEV_TYPE_ATTR_RO(device_api);
static struct attribute *mdev_types_attrs[] = {
&mdev_type_attr_name.attr,
&mdev_type_attr_description.attr,
&mdev_type_attr_device_api.attr,
&mdev_type_attr_available_instances.attr,
NULL,
};
static struct attribute_group mdev_type_group1 = {
.name = MDPY_TYPE_1,
.attrs = mdev_types_attrs,
};
static struct attribute_group mdev_type_group2 = {
.name = MDPY_TYPE_2,
.attrs = mdev_types_attrs,
};
static struct attribute_group mdev_type_group3 = {
.name = MDPY_TYPE_3,
.attrs = mdev_types_attrs,
};
static struct attribute_group *mdev_type_groups[] = {
&mdev_type_group1,
&mdev_type_group2,
&mdev_type_group3,
NULL,
};
static const struct mdev_parent_ops mdev_fops = {
.owner = THIS_MODULE,
.mdev_attr_groups = mdev_dev_groups,
.supported_type_groups = mdev_type_groups,
.create = mdpy_create,
.remove = mdpy_remove,
.open = mdpy_open,
.release = mdpy_close,
.read = mdpy_read,
.write = mdpy_write,
.ioctl = mdpy_ioctl,
.mmap = mdpy_mmap,
};
static const struct file_operations vd_fops = {
.owner = THIS_MODULE,
};
static void mdpy_device_release(struct device *dev)
{
/* nothing */
}
static int __init mdpy_dev_init(void)
{
int ret = 0;
ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME);
if (ret < 0) {
pr_err("Error: failed to register mdpy_dev, err: %d\n", ret);
return ret;
}
cdev_init(&mdpy_cdev, &vd_fops);
cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1);
pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt));
mdpy_class = class_create(THIS_MODULE, MDPY_CLASS_NAME);
if (IS_ERR(mdpy_class)) {
pr_err("Error: failed to register mdpy_dev class\n");
ret = PTR_ERR(mdpy_class);
goto failed1;
}
mdpy_dev.class = mdpy_class;
mdpy_dev.release = mdpy_device_release;
dev_set_name(&mdpy_dev, "%s", MDPY_NAME);
ret = device_register(&mdpy_dev);
if (ret)
goto failed2;
ret = mdev_register_device(&mdpy_dev, &mdev_fops);
if (ret)
goto failed3;
return 0;
failed3:
device_unregister(&mdpy_dev);
failed2:
class_destroy(mdpy_class);
failed1:
cdev_del(&mdpy_cdev);
unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
return ret;
}
static void __exit mdpy_dev_exit(void)
{
mdpy_dev.bus = NULL;
mdev_unregister_device(&mdpy_dev);
device_unregister(&mdpy_dev);
cdev_del(&mdpy_cdev);
unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
class_destroy(mdpy_class);
mdpy_class = NULL;
}
module_init(mdpy_dev_init)
module_exit(mdpy_dev_exit)