linux_dsm_epyc7002/include/uapi/linux/vfio.h

702 lines
25 KiB
C
Raw Normal View History

/*
* VFIO API definition
*
* Copyright (C) 2012 Red Hat, Inc. All rights reserved.
* Author: Alex Williamson <alex.williamson@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef _UAPIVFIO_H
#define _UAPIVFIO_H
#include <linux/types.h>
#include <linux/ioctl.h>
#define VFIO_API_VERSION 0
/* Kernel & User level defines for VFIO IOCTLs. */
/* Extensions */
#define VFIO_TYPE1_IOMMU 1
#define VFIO_SPAPR_TCE_IOMMU 2
vfio/iommu_type1: Multi-IOMMU domain support We currently have a problem that we cannot support advanced features of an IOMMU domain (ex. IOMMU_CACHE), because we have no guarantee that those features will be supported by all of the hardware units involved with the domain over its lifetime. For instance, the Intel VT-d architecture does not require that all DRHDs support snoop control. If we create a domain based on a device behind a DRHD that does support snoop control and enable SNP support via the IOMMU_CACHE mapping option, we cannot then add a device behind a DRHD which does not support snoop control or we'll get reserved bit faults from the SNP bit in the pagetables. To add to the complexity, we can't know the properties of a domain until a device is attached. We could pass this problem off to userspace and require that a separate vfio container be used, but we don't know how to handle page accounting in that case. How do we know that a page pinned in one container is the same page as a different container and avoid double billing the user for the page. The solution is therefore to support multiple IOMMU domains per container. In the majority of cases, only one domain will be required since hardware is typically consistent within a system. However, this provides us the ability to validate compatibility of domains and support mixed environments where page table flags can be different between domains. To do this, our DMA tracking needs to change. We currently try to coalesce user mappings into as few tracking entries as possible. The problem then becomes that we lose granularity of user mappings. We've never guaranteed that a user is able to unmap at a finer granularity than the original mapping, but we must honor the granularity of the original mapping. This coalescing code is therefore removed, allowing only unmaps covering complete maps. The change in accounting is fairly small here, a typical QEMU VM will start out with roughly a dozen entries, so it's arguable if this coalescing was ever needed. We also move IOMMU domain creation to the point where a group is attached to the container. An interesting side-effect of this is that we now have access to the device at the time of domain creation and can probe the devices within the group to determine the bus_type. This finally makes vfio_iommu_type1 completely device/bus agnostic. In fact, each IOMMU domain can host devices on different buses managed by different physical IOMMUs, and present a single DMA mapping interface to the user. When a new domain is created, mappings are replayed to bring the IOMMU pagetables up to the state of the current container. And of course, DMA mapping and unmapping automatically traverse all of the configured IOMMU domains. Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Cc: Varun Sethi <Varun.Sethi@freescale.com>
2014-02-27 01:38:36 +07:00
#define VFIO_TYPE1v2_IOMMU 3
/*
* IOMMU enforces DMA cache coherence (ex. PCIe NoSnoop stripping). This
* capability is subject to change as groups are added or removed.
*/
#define VFIO_DMA_CC_IOMMU 4
/* Check if EEH is supported */
#define VFIO_EEH 5
/* Two-stage IOMMU */
#define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
vfio: powerpc/spapr: Register memory and define IOMMU v2 The existing implementation accounts the whole DMA window in the locked_vm counter. This is going to be worse with multiple containers and huge DMA windows. Also, real-time accounting would requite additional tracking of accounted pages due to the page size difference - IOMMU uses 4K pages and system uses 4K or 64K pages. Another issue is that actual pages pinning/unpinning happens on every DMA map/unmap request. This does not affect the performance much now as we spend way too much time now on switching context between guest/userspace/host but this will start to matter when we add in-kernel DMA map/unmap acceleration. This introduces a new IOMMU type for SPAPR - VFIO_SPAPR_TCE_v2_IOMMU. New IOMMU deprecates VFIO_IOMMU_ENABLE/VFIO_IOMMU_DISABLE and introduces 2 new ioctls to register/unregister DMA memory - VFIO_IOMMU_SPAPR_REGISTER_MEMORY and VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - which receive user space address and size of a memory region which needs to be pinned/unpinned and counted in locked_vm. New IOMMU splits physical pages pinning and TCE table update into 2 different operations. It requires: 1) guest pages to be registered first 2) consequent map/unmap requests to work only with pre-registered memory. For the default single window case this means that the entire guest (instead of 2GB) needs to be pinned before using VFIO. When a huge DMA window is added, no additional pinning will be required, otherwise it would be guest RAM + 2GB. The new memory registration ioctls are not supported by VFIO_SPAPR_TCE_IOMMU. Dynamic DMA window and in-kernel acceleration will require memory to be preregistered in order to work. The accounting is done per the user process. This advertises v2 SPAPR TCE IOMMU and restricts what the userspace can do with v1 or v2 IOMMUs. In order to support memory pre-registration, we need a way to track the use of every registered memory region and only allow unregistration if a region is not in use anymore. So we need a way to tell from what region the just cleared TCE was from. This adds a userspace view of the TCE table into iommu_table struct. It contains userspace address, one per TCE entry. The table is only allocated when the ownership over an IOMMU group is taken which means it is only used from outside of the powernv code (such as VFIO). As v2 IOMMU supports IODA2 and pre-IODA2 IOMMUs (which do not support DDW API), this creates a default DMA window for IODA2 for consistency. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> [aw: for the vfio related changes] Acked-by: Alex Williamson <alex.williamson@redhat.com> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2015-06-05 13:35:25 +07:00
#define VFIO_SPAPR_TCE_v2_IOMMU 7
/*
* The No-IOMMU IOMMU offers no translation or isolation for devices and
* supports no ioctls outside of VFIO_CHECK_EXTENSION. Use of VFIO's No-IOMMU
* code will taint the host kernel and should be used with extreme caution.
*/
#define VFIO_NOIOMMU_IOMMU 8
/*
* The IOCTL interface is designed for extensibility by embedding the
* structure length (argsz) and flags into structures passed between
* kernel and userspace. We therefore use the _IO() macro for these
* defines to avoid implicitly embedding a size into the ioctl request.
* As structure fields are added, argsz will increase to match and flag
* bits will be defined to indicate additional fields with valid data.
* It's *always* the caller's responsibility to indicate the size of
* the structure passed by setting argsz appropriately.
*/
#define VFIO_TYPE (';')
#define VFIO_BASE 100
vfio: Define capability chains We have a few cases where we need to extend the data returned from the INFO ioctls in VFIO. For instance we already have devices exposed through vfio-pci where VFIO_DEVICE_GET_REGION_INFO reports the region as mmap-capable, but really only supports sparse mmaps, avoiding the MSI-X table. If we wanted to provide in-kernel emulation or extended functionality for devices, we'd also want the ability to tell the user not to mmap various regions, rather than forcing them to figure it out on their own. Another example is VFIO_IOMMU_GET_INFO. We'd really like to expose the actual IOVA capabilities of the IOMMU rather than letting the user assume the address space they have available to them. We could add IOVA base and size fields to struct vfio_iommu_type1_info, but what if we have multiple IOVA ranges. For instance x86 uses a range of addresses at 0xfee00000 for MSI vectors. These typically are not available for standard DMA IOVA mappings and splits our available IOVA space into two ranges. POWER systems have both an IOVA window below 4G as well as dynamic data window which they can use to remap all of guest memory. Representing variable sized arrays within a fixed structure makes it very difficult to parse, we'd therefore like to put this data beyond fixed fields within the data structures. One way to do this is to emulate capabilities in PCI configuration space. A new flag indciates whether capabilties are supported and a new fixed field reports the offset of the first entry. Users can then walk the chain to find capabilities, adding capabilities does not require additional fields in the fixed structure, and parsing variable sized data becomes trivial. This patch outlines the theory and base header structure, which should be shared by all future users. Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
2016-02-23 06:02:32 +07:00
/*
* For extension of INFO ioctls, VFIO makes use of a capability chain
* designed after PCI/e capabilities. A flag bit indicates whether
* this capability chain is supported and a field defined in the fixed
* structure defines the offset of the first capability in the chain.
* This field is only valid when the corresponding bit in the flags
* bitmap is set. This offset field is relative to the start of the
* INFO buffer, as is the next field within each capability header.
* The id within the header is a shared address space per INFO ioctl,
* while the version field is specific to the capability id. The
* contents following the header are specific to the capability id.
*/
struct vfio_info_cap_header {
__u16 id; /* Identifies capability */
__u16 version; /* Version specific to the capability ID */
__u32 next; /* Offset of next capability */
};
/*
* Callers of INFO ioctls passing insufficiently sized buffers will see
* the capability chain flag bit set, a zero value for the first capability
* offset (if available within the provided argsz), and argsz will be
* updated to report the necessary buffer size. For compatibility, the
* INFO ioctl will not report error in this case, but the capability chain
* will not be available.
*/
/* -------- IOCTLs for VFIO file descriptor (/dev/vfio/vfio) -------- */
/**
* VFIO_GET_API_VERSION - _IO(VFIO_TYPE, VFIO_BASE + 0)
*
* Report the version of the VFIO API. This allows us to bump the entire
* API version should we later need to add or change features in incompatible
* ways.
* Return: VFIO_API_VERSION
* Availability: Always
*/
#define VFIO_GET_API_VERSION _IO(VFIO_TYPE, VFIO_BASE + 0)
/**
* VFIO_CHECK_EXTENSION - _IOW(VFIO_TYPE, VFIO_BASE + 1, __u32)
*
* Check whether an extension is supported.
* Return: 0 if not supported, 1 (or some other positive integer) if supported.
* Availability: Always
*/
#define VFIO_CHECK_EXTENSION _IO(VFIO_TYPE, VFIO_BASE + 1)
/**
* VFIO_SET_IOMMU - _IOW(VFIO_TYPE, VFIO_BASE + 2, __s32)
*
* Set the iommu to the given type. The type must be supported by an
* iommu driver as verified by calling CHECK_EXTENSION using the same
* type. A group must be set to this file descriptor before this
* ioctl is available. The IOMMU interfaces enabled by this call are
* specific to the value set.
* Return: 0 on success, -errno on failure
* Availability: When VFIO group attached
*/
#define VFIO_SET_IOMMU _IO(VFIO_TYPE, VFIO_BASE + 2)
/* -------- IOCTLs for GROUP file descriptors (/dev/vfio/$GROUP) -------- */
/**
* VFIO_GROUP_GET_STATUS - _IOR(VFIO_TYPE, VFIO_BASE + 3,
* struct vfio_group_status)
*
* Retrieve information about the group. Fills in provided
* struct vfio_group_info. Caller sets argsz.
* Return: 0 on succes, -errno on failure.
* Availability: Always
*/
struct vfio_group_status {
__u32 argsz;
__u32 flags;
#define VFIO_GROUP_FLAGS_VIABLE (1 << 0)
#define VFIO_GROUP_FLAGS_CONTAINER_SET (1 << 1)
};
#define VFIO_GROUP_GET_STATUS _IO(VFIO_TYPE, VFIO_BASE + 3)
/**
* VFIO_GROUP_SET_CONTAINER - _IOW(VFIO_TYPE, VFIO_BASE + 4, __s32)
*
* Set the container for the VFIO group to the open VFIO file
* descriptor provided. Groups may only belong to a single
* container. Containers may, at their discretion, support multiple
* groups. Only when a container is set are all of the interfaces
* of the VFIO file descriptor and the VFIO group file descriptor
* available to the user.
* Return: 0 on success, -errno on failure.
* Availability: Always
*/
#define VFIO_GROUP_SET_CONTAINER _IO(VFIO_TYPE, VFIO_BASE + 4)
/**
* VFIO_GROUP_UNSET_CONTAINER - _IO(VFIO_TYPE, VFIO_BASE + 5)
*
* Remove the group from the attached container. This is the
* opposite of the SET_CONTAINER call and returns the group to
* an initial state. All device file descriptors must be released
* prior to calling this interface. When removing the last group
* from a container, the IOMMU will be disabled and all state lost,
* effectively also returning the VFIO file descriptor to an initial
* state.
* Return: 0 on success, -errno on failure.
* Availability: When attached to container
*/
#define VFIO_GROUP_UNSET_CONTAINER _IO(VFIO_TYPE, VFIO_BASE + 5)
/**
* VFIO_GROUP_GET_DEVICE_FD - _IOW(VFIO_TYPE, VFIO_BASE + 6, char)
*
* Return a new file descriptor for the device object described by
* the provided string. The string should match a device listed in
* the devices subdirectory of the IOMMU group sysfs entry. The
* group containing the device must already be added to this context.
* Return: new file descriptor on success, -errno on failure.
* Availability: When attached to container
*/
#define VFIO_GROUP_GET_DEVICE_FD _IO(VFIO_TYPE, VFIO_BASE + 6)
/* --------------- IOCTLs for DEVICE file descriptors --------------- */
/**
* VFIO_DEVICE_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 7,
* struct vfio_device_info)
*
* Retrieve information about the device. Fills in provided
* struct vfio_device_info. Caller sets argsz.
* Return: 0 on success, -errno on failure.
*/
struct vfio_device_info {
__u32 argsz;
__u32 flags;
#define VFIO_DEVICE_FLAGS_RESET (1 << 0) /* Device supports reset */
#define VFIO_DEVICE_FLAGS_PCI (1 << 1) /* vfio-pci device */
#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
#define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
__u32 num_regions; /* Max region index + 1 */
__u32 num_irqs; /* Max IRQ index + 1 */
};
#define VFIO_DEVICE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 7)
/*
* Vendor driver using Mediated device framework should provide device_api
* attribute in supported type attribute groups. Device API string should be one
* of the following corresponding to device flags in vfio_device_info structure.
*/
#define VFIO_DEVICE_API_PCI_STRING "vfio-pci"
#define VFIO_DEVICE_API_PLATFORM_STRING "vfio-platform"
#define VFIO_DEVICE_API_AMBA_STRING "vfio-amba"
/**
* VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
* struct vfio_region_info)
*
* Retrieve information about a device region. Caller provides
* struct vfio_region_info with index value set. Caller sets argsz.
* Implementation of region mapping is bus driver specific. This is
* intended to describe MMIO, I/O port, as well as bus specific
* regions (ex. PCI config space). Zero sized regions may be used
* to describe unimplemented regions (ex. unimplemented PCI BARs).
* Return: 0 on success, -errno on failure.
*/
struct vfio_region_info {
__u32 argsz;
__u32 flags;
#define VFIO_REGION_INFO_FLAG_READ (1 << 0) /* Region supports read */
#define VFIO_REGION_INFO_FLAG_WRITE (1 << 1) /* Region supports write */
#define VFIO_REGION_INFO_FLAG_MMAP (1 << 2) /* Region supports mmap */
#define VFIO_REGION_INFO_FLAG_CAPS (1 << 3) /* Info supports caps */
__u32 index; /* Region index */
__u32 cap_offset; /* Offset within info struct of first cap */
__u64 size; /* Region size (bytes) */
__u64 offset; /* Region offset from start of device fd */
};
#define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8)
/*
* The sparse mmap capability allows finer granularity of specifying areas
* within a region with mmap support. When specified, the user should only
* mmap the offset ranges specified by the areas array. mmaps outside of the
* areas specified may fail (such as the range covering a PCI MSI-X table) or
* may result in improper device behavior.
*
* The structures below define version 1 of this capability.
*/
#define VFIO_REGION_INFO_CAP_SPARSE_MMAP 1
struct vfio_region_sparse_mmap_area {
__u64 offset; /* Offset of mmap'able area within region */
__u64 size; /* Size of mmap'able area */
};
struct vfio_region_info_cap_sparse_mmap {
struct vfio_info_cap_header header;
__u32 nr_areas;
__u32 reserved;
struct vfio_region_sparse_mmap_area areas[];
};
/*
* The device specific type capability allows regions unique to a specific
* device or class of devices to be exposed. This helps solve the problem for
* vfio bus drivers of defining which region indexes correspond to which region
* on the device, without needing to resort to static indexes, as done by
* vfio-pci. For instance, if we were to go back in time, we might remove
* VFIO_PCI_VGA_REGION_INDEX and let vfio-pci simply define that all indexes
* greater than or equal to VFIO_PCI_NUM_REGIONS are device specific and we'd
* make a "VGA" device specific type to describe the VGA access space. This
* means that non-VGA devices wouldn't need to waste this index, and thus the
* address space associated with it due to implementation of device file
* descriptor offsets in vfio-pci.
*
* The current implementation is now part of the user ABI, so we can't use this
* for VGA, but there are other upcoming use cases, such as opregions for Intel
* IGD devices and framebuffers for vGPU devices. We missed VGA, but we'll
* use this for future additions.
*
* The structure below defines version 1 of this capability.
*/
#define VFIO_REGION_INFO_CAP_TYPE 2
struct vfio_region_info_cap_type {
struct vfio_info_cap_header header;
__u32 type; /* global per bus driver */
__u32 subtype; /* type specific */
};
#define VFIO_REGION_TYPE_PCI_VENDOR_TYPE (1 << 31)
#define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff)
/* 8086 Vendor sub-types */
#define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION (1)
#define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG (2)
#define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3)
/**
* VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
* struct vfio_irq_info)
*
* Retrieve information about a device IRQ. Caller provides
* struct vfio_irq_info with index value set. Caller sets argsz.
* Implementation of IRQ mapping is bus driver specific. Indexes
* using multiple IRQs are primarily intended to support MSI-like
* interrupt blocks. Zero count irq blocks may be used to describe
* unimplemented interrupt types.
*
* The EVENTFD flag indicates the interrupt index supports eventfd based
* signaling.
*
* The MASKABLE flags indicates the index supports MASK and UNMASK
* actions described below.
*
* AUTOMASKED indicates that after signaling, the interrupt line is
* automatically masked by VFIO and the user needs to unmask the line
* to receive new interrupts. This is primarily intended to distinguish
* level triggered interrupts.
*
* The NORESIZE flag indicates that the interrupt lines within the index
* are setup as a set and new subindexes cannot be enabled without first
* disabling the entire index. This is used for interrupts like PCI MSI
* and MSI-X where the driver may only use a subset of the available
* indexes, but VFIO needs to enable a specific number of vectors
* upfront. In the case of MSI-X, where the user can enable MSI-X and
* then add and unmask vectors, it's up to userspace to make the decision
* whether to allocate the maximum supported number of vectors or tear
* down setup and incrementally increase the vectors as each is enabled.
*/
struct vfio_irq_info {
__u32 argsz;
__u32 flags;
#define VFIO_IRQ_INFO_EVENTFD (1 << 0)
#define VFIO_IRQ_INFO_MASKABLE (1 << 1)
#define VFIO_IRQ_INFO_AUTOMASKED (1 << 2)
#define VFIO_IRQ_INFO_NORESIZE (1 << 3)
__u32 index; /* IRQ index */
__u32 count; /* Number of IRQs within this index */
};
#define VFIO_DEVICE_GET_IRQ_INFO _IO(VFIO_TYPE, VFIO_BASE + 9)
/**
* VFIO_DEVICE_SET_IRQS - _IOW(VFIO_TYPE, VFIO_BASE + 10, struct vfio_irq_set)
*
* Set signaling, masking, and unmasking of interrupts. Caller provides
* struct vfio_irq_set with all fields set. 'start' and 'count' indicate
* the range of subindexes being specified.
*
* The DATA flags specify the type of data provided. If DATA_NONE, the
* operation performs the specified action immediately on the specified
* interrupt(s). For example, to unmask AUTOMASKED interrupt [0,0]:
* flags = (DATA_NONE|ACTION_UNMASK), index = 0, start = 0, count = 1.
*
* DATA_BOOL allows sparse support for the same on arrays of interrupts.
* For example, to mask interrupts [0,1] and [0,3] (but not [0,2]):
* flags = (DATA_BOOL|ACTION_MASK), index = 0, start = 1, count = 3,
* data = {1,0,1}
*
* DATA_EVENTFD binds the specified ACTION to the provided __s32 eventfd.
* A value of -1 can be used to either de-assign interrupts if already
* assigned or skip un-assigned interrupts. For example, to set an eventfd
* to be trigger for interrupts [0,0] and [0,2]:
* flags = (DATA_EVENTFD|ACTION_TRIGGER), index = 0, start = 0, count = 3,
* data = {fd1, -1, fd2}
* If index [0,1] is previously set, two count = 1 ioctls calls would be
* required to set [0,0] and [0,2] without changing [0,1].
*
* Once a signaling mechanism is set, DATA_BOOL or DATA_NONE can be used
* with ACTION_TRIGGER to perform kernel level interrupt loopback testing
* from userspace (ie. simulate hardware triggering).
*
* Setting of an event triggering mechanism to userspace for ACTION_TRIGGER
* enables the interrupt index for the device. Individual subindex interrupts
* can be disabled using the -1 value for DATA_EVENTFD or the index can be
* disabled as a whole with: flags = (DATA_NONE|ACTION_TRIGGER), count = 0.
*
* Note that ACTION_[UN]MASK specify user->kernel signaling (irqfds) while
* ACTION_TRIGGER specifies kernel->user signaling.
*/
struct vfio_irq_set {
__u32 argsz;
__u32 flags;
#define VFIO_IRQ_SET_DATA_NONE (1 << 0) /* Data not present */
#define VFIO_IRQ_SET_DATA_BOOL (1 << 1) /* Data is bool (u8) */
#define VFIO_IRQ_SET_DATA_EVENTFD (1 << 2) /* Data is eventfd (s32) */
#define VFIO_IRQ_SET_ACTION_MASK (1 << 3) /* Mask interrupt */
#define VFIO_IRQ_SET_ACTION_UNMASK (1 << 4) /* Unmask interrupt */
#define VFIO_IRQ_SET_ACTION_TRIGGER (1 << 5) /* Trigger interrupt */
__u32 index;
__u32 start;
__u32 count;
__u8 data[];
};
#define VFIO_DEVICE_SET_IRQS _IO(VFIO_TYPE, VFIO_BASE + 10)
#define VFIO_IRQ_SET_DATA_TYPE_MASK (VFIO_IRQ_SET_DATA_NONE | \
VFIO_IRQ_SET_DATA_BOOL | \
VFIO_IRQ_SET_DATA_EVENTFD)
#define VFIO_IRQ_SET_ACTION_TYPE_MASK (VFIO_IRQ_SET_ACTION_MASK | \
VFIO_IRQ_SET_ACTION_UNMASK | \
VFIO_IRQ_SET_ACTION_TRIGGER)
/**
* VFIO_DEVICE_RESET - _IO(VFIO_TYPE, VFIO_BASE + 11)
*
* Reset a device.
*/
#define VFIO_DEVICE_RESET _IO(VFIO_TYPE, VFIO_BASE + 11)
/*
* The VFIO-PCI bus driver makes use of the following fixed region and
* IRQ index mapping. Unimplemented regions return a size of zero.
* Unimplemented IRQ types return a count of zero.
*/
enum {
VFIO_PCI_BAR0_REGION_INDEX,
VFIO_PCI_BAR1_REGION_INDEX,
VFIO_PCI_BAR2_REGION_INDEX,
VFIO_PCI_BAR3_REGION_INDEX,
VFIO_PCI_BAR4_REGION_INDEX,
VFIO_PCI_BAR5_REGION_INDEX,
VFIO_PCI_ROM_REGION_INDEX,
VFIO_PCI_CONFIG_REGION_INDEX,
/*
* Expose VGA regions defined for PCI base class 03, subclass 00.
* This includes I/O port ranges 0x3b0 to 0x3bb and 0x3c0 to 0x3df
* as well as the MMIO range 0xa0000 to 0xbffff. Each implemented
* range is found at it's identity mapped offset from the region
* offset, for example 0x3b0 is region_info.offset + 0x3b0. Areas
* between described ranges are unimplemented.
*/
VFIO_PCI_VGA_REGION_INDEX,
VFIO_PCI_NUM_REGIONS = 9 /* Fixed user ABI, region indexes >=9 use */
/* device specific cap to define content. */
};
enum {
VFIO_PCI_INTX_IRQ_INDEX,
VFIO_PCI_MSI_IRQ_INDEX,
VFIO_PCI_MSIX_IRQ_INDEX,
VFIO_PCI_ERR_IRQ_INDEX,
VFIO_PCI_REQ_IRQ_INDEX,
VFIO_PCI_NUM_IRQS
};
/**
* VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12,
* struct vfio_pci_hot_reset_info)
*
* Return: 0 on success, -errno on failure:
* -enospc = insufficient buffer, -enodev = unsupported for device.
*/
struct vfio_pci_dependent_device {
__u32 group_id;
__u16 segment;
__u8 bus;
__u8 devfn; /* Use PCI_SLOT/PCI_FUNC */
};
struct vfio_pci_hot_reset_info {
__u32 argsz;
__u32 flags;
__u32 count;
struct vfio_pci_dependent_device devices[];
};
#define VFIO_DEVICE_GET_PCI_HOT_RESET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
/**
* VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13,
* struct vfio_pci_hot_reset)
*
* Return: 0 on success, -errno on failure.
*/
struct vfio_pci_hot_reset {
__u32 argsz;
__u32 flags;
__u32 count;
__s32 group_fds[];
};
#define VFIO_DEVICE_PCI_HOT_RESET _IO(VFIO_TYPE, VFIO_BASE + 13)
/* -------- API for Type1 VFIO IOMMU -------- */
/**
* VFIO_IOMMU_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 12, struct vfio_iommu_info)
*
* Retrieve information about the IOMMU object. Fills in provided
* struct vfio_iommu_info. Caller sets argsz.
*
* XXX Should we do these by CHECK_EXTENSION too?
*/
struct vfio_iommu_type1_info {
__u32 argsz;
__u32 flags;
#define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */
__u64 iova_pgsizes; /* Bitmap of supported page sizes */
};
#define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
/**
* VFIO_IOMMU_MAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 13, struct vfio_dma_map)
*
* Map process virtual addresses to IO virtual addresses using the
* provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
*/
struct vfio_iommu_type1_dma_map {
__u32 argsz;
__u32 flags;
#define VFIO_DMA_MAP_FLAG_READ (1 << 0) /* readable from device */
#define VFIO_DMA_MAP_FLAG_WRITE (1 << 1) /* writable from device */
__u64 vaddr; /* Process virtual address */
__u64 iova; /* IO virtual address */
__u64 size; /* Size of mapping (bytes) */
};
#define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
/**
vfio: hugepage support for vfio_iommu_type1 We currently send all mappings to the iommu in PAGE_SIZE chunks, which prevents the iommu from enabling support for larger page sizes. We still need to pin pages, which means we step through them in PAGE_SIZE chunks, but we can batch up contiguous physical memory chunks to allow the iommu the opportunity to use larger pages. The approach here is a bit different that the one currently used for legacy KVM device assignment. Rather than looking at the vma page size and using that as the maximum size to pass to the iommu, we instead simply look at whether the next page is physically contiguous. This means we might ask the iommu to map a 4MB region, while legacy KVM might limit itself to a maximum of 2MB. Splitting our mapping path also allows us to be smarter about locked memory because we can more easily unwind if the user attempts to exceed the limit. Therefore, rather than assuming that a mapping will result in locked memory, we test each page as it is pinned to determine whether it locks RAM vs an mmap'd MMIO region. This should result in better locking granularity and less locked page fudge factors in userspace. The unmap path uses the same algorithm as legacy KVM. We don't want to track the pfn for each mapping ourselves, but we need the pfn in order to unpin pages. We therefore ask the iommu for the iova to physical address translation, ask it to unpin a page, and see how many pages were actually unpinned. iommus supporting large pages will often return something bigger than a page here, which we know will be physically contiguous and we can unpin a batch of pfns. iommus not supporting large mappings won't see an improvement in batching here as they only unmap a page at a time. With this change, we also make a clarification to the API for mapping and unmapping DMA. We can only guarantee unmaps at the same granularity as used for the original mapping. In other words, unmapping a subregion of a previous mapping is not guaranteed and may result in a larger or smaller unmapping than requested. The size field in the unmapping structure is updated to reflect this. Previously this was unmodified on mapping, always returning the the requested unmap size. This is now updated to return the actual unmap size on success, allowing userspace to appropriately track mappings. Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
2013-06-21 22:38:02 +07:00
* VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,
* struct vfio_dma_unmap)
*
* Unmap IO virtual addresses using the provided struct vfio_dma_unmap.
vfio: hugepage support for vfio_iommu_type1 We currently send all mappings to the iommu in PAGE_SIZE chunks, which prevents the iommu from enabling support for larger page sizes. We still need to pin pages, which means we step through them in PAGE_SIZE chunks, but we can batch up contiguous physical memory chunks to allow the iommu the opportunity to use larger pages. The approach here is a bit different that the one currently used for legacy KVM device assignment. Rather than looking at the vma page size and using that as the maximum size to pass to the iommu, we instead simply look at whether the next page is physically contiguous. This means we might ask the iommu to map a 4MB region, while legacy KVM might limit itself to a maximum of 2MB. Splitting our mapping path also allows us to be smarter about locked memory because we can more easily unwind if the user attempts to exceed the limit. Therefore, rather than assuming that a mapping will result in locked memory, we test each page as it is pinned to determine whether it locks RAM vs an mmap'd MMIO region. This should result in better locking granularity and less locked page fudge factors in userspace. The unmap path uses the same algorithm as legacy KVM. We don't want to track the pfn for each mapping ourselves, but we need the pfn in order to unpin pages. We therefore ask the iommu for the iova to physical address translation, ask it to unpin a page, and see how many pages were actually unpinned. iommus supporting large pages will often return something bigger than a page here, which we know will be physically contiguous and we can unpin a batch of pfns. iommus not supporting large mappings won't see an improvement in batching here as they only unmap a page at a time. With this change, we also make a clarification to the API for mapping and unmapping DMA. We can only guarantee unmaps at the same granularity as used for the original mapping. In other words, unmapping a subregion of a previous mapping is not guaranteed and may result in a larger or smaller unmapping than requested. The size field in the unmapping structure is updated to reflect this. Previously this was unmodified on mapping, always returning the the requested unmap size. This is now updated to return the actual unmap size on success, allowing userspace to appropriately track mappings. Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
2013-06-21 22:38:02 +07:00
* Caller sets argsz. The actual unmapped size is returned in the size
* field. No guarantee is made to the user that arbitrary unmaps of iova
* or size different from those used in the original mapping call will
* succeed.
*/
struct vfio_iommu_type1_dma_unmap {
__u32 argsz;
__u32 flags;
__u64 iova; /* IO virtual address */
__u64 size; /* Size of mapping (bytes) */
};
#define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14)
/*
* IOCTLs to enable/disable IOMMU container usage.
* No parameters are supported.
*/
#define VFIO_IOMMU_ENABLE _IO(VFIO_TYPE, VFIO_BASE + 15)
#define VFIO_IOMMU_DISABLE _IO(VFIO_TYPE, VFIO_BASE + 16)
/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
/*
* The SPAPR TCE DDW info struct provides the information about
* the details of Dynamic DMA window capability.
*
* @pgsizes contains a page size bitmask, 4K/64K/16M are supported.
* @max_dynamic_windows_supported tells the maximum number of windows
* which the platform can create.
* @levels tells the maximum number of levels in multi-level IOMMU tables;
* this allows splitting a table into smaller chunks which reduces
* the amount of physically contiguous memory required for the table.
*/
struct vfio_iommu_spapr_tce_ddw_info {
__u64 pgsizes; /* Bitmap of supported page sizes */
__u32 max_dynamic_windows_supported;
__u32 levels;
};
/*
* The SPAPR TCE info struct provides the information about the PCI bus
* address ranges available for DMA, these values are programmed into
* the hardware so the guest has to know that information.
*
* The DMA 32 bit window start is an absolute PCI bus address.
* The IOVA address passed via map/unmap ioctls are absolute PCI bus
* addresses too so the window works as a filter rather than an offset
* for IOVA addresses.
*
* Flags supported:
* - VFIO_IOMMU_SPAPR_INFO_DDW: informs the userspace that dynamic DMA windows
* (DDW) support is present. @ddw is only supported when DDW is present.
*/
struct vfio_iommu_spapr_tce_info {
__u32 argsz;
__u32 flags;
#define VFIO_IOMMU_SPAPR_INFO_DDW (1 << 0) /* DDW supported */
__u32 dma32_window_start; /* 32 bit window start (bytes) */
__u32 dma32_window_size; /* 32 bit window size (bytes) */
struct vfio_iommu_spapr_tce_ddw_info ddw;
};
#define VFIO_IOMMU_SPAPR_TCE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
/*
* EEH PE operation struct provides ways to:
* - enable/disable EEH functionality;
* - unfreeze IO/DMA for frozen PE;
* - read PE state;
* - reset PE;
* - configure PE;
* - inject EEH error.
*/
struct vfio_eeh_pe_err {
__u32 type;
__u32 func;
__u64 addr;
__u64 mask;
};
struct vfio_eeh_pe_op {
__u32 argsz;
__u32 flags;
__u32 op;
union {
struct vfio_eeh_pe_err err;
};
};
#define VFIO_EEH_PE_DISABLE 0 /* Disable EEH functionality */
#define VFIO_EEH_PE_ENABLE 1 /* Enable EEH functionality */
#define VFIO_EEH_PE_UNFREEZE_IO 2 /* Enable IO for frozen PE */
#define VFIO_EEH_PE_UNFREEZE_DMA 3 /* Enable DMA for frozen PE */
#define VFIO_EEH_PE_GET_STATE 4 /* PE state retrieval */
#define VFIO_EEH_PE_STATE_NORMAL 0 /* PE in functional state */
#define VFIO_EEH_PE_STATE_RESET 1 /* PE reset in progress */
#define VFIO_EEH_PE_STATE_STOPPED 2 /* Stopped DMA and IO */
#define VFIO_EEH_PE_STATE_STOPPED_DMA 4 /* Stopped DMA only */
#define VFIO_EEH_PE_STATE_UNAVAIL 5 /* State unavailable */
#define VFIO_EEH_PE_RESET_DEACTIVATE 5 /* Deassert PE reset */
#define VFIO_EEH_PE_RESET_HOT 6 /* Assert hot reset */
#define VFIO_EEH_PE_RESET_FUNDAMENTAL 7 /* Assert fundamental reset */
#define VFIO_EEH_PE_CONFIGURE 8 /* PE configuration */
#define VFIO_EEH_PE_INJECT_ERR 9 /* Inject EEH error */
#define VFIO_EEH_PE_OP _IO(VFIO_TYPE, VFIO_BASE + 21)
vfio: powerpc/spapr: Register memory and define IOMMU v2 The existing implementation accounts the whole DMA window in the locked_vm counter. This is going to be worse with multiple containers and huge DMA windows. Also, real-time accounting would requite additional tracking of accounted pages due to the page size difference - IOMMU uses 4K pages and system uses 4K or 64K pages. Another issue is that actual pages pinning/unpinning happens on every DMA map/unmap request. This does not affect the performance much now as we spend way too much time now on switching context between guest/userspace/host but this will start to matter when we add in-kernel DMA map/unmap acceleration. This introduces a new IOMMU type for SPAPR - VFIO_SPAPR_TCE_v2_IOMMU. New IOMMU deprecates VFIO_IOMMU_ENABLE/VFIO_IOMMU_DISABLE and introduces 2 new ioctls to register/unregister DMA memory - VFIO_IOMMU_SPAPR_REGISTER_MEMORY and VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - which receive user space address and size of a memory region which needs to be pinned/unpinned and counted in locked_vm. New IOMMU splits physical pages pinning and TCE table update into 2 different operations. It requires: 1) guest pages to be registered first 2) consequent map/unmap requests to work only with pre-registered memory. For the default single window case this means that the entire guest (instead of 2GB) needs to be pinned before using VFIO. When a huge DMA window is added, no additional pinning will be required, otherwise it would be guest RAM + 2GB. The new memory registration ioctls are not supported by VFIO_SPAPR_TCE_IOMMU. Dynamic DMA window and in-kernel acceleration will require memory to be preregistered in order to work. The accounting is done per the user process. This advertises v2 SPAPR TCE IOMMU and restricts what the userspace can do with v1 or v2 IOMMUs. In order to support memory pre-registration, we need a way to track the use of every registered memory region and only allow unregistration if a region is not in use anymore. So we need a way to tell from what region the just cleared TCE was from. This adds a userspace view of the TCE table into iommu_table struct. It contains userspace address, one per TCE entry. The table is only allocated when the ownership over an IOMMU group is taken which means it is only used from outside of the powernv code (such as VFIO). As v2 IOMMU supports IODA2 and pre-IODA2 IOMMUs (which do not support DDW API), this creates a default DMA window for IODA2 for consistency. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> [aw: for the vfio related changes] Acked-by: Alex Williamson <alex.williamson@redhat.com> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2015-06-05 13:35:25 +07:00
/**
* VFIO_IOMMU_SPAPR_REGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 17, struct vfio_iommu_spapr_register_memory)
*
* Registers user space memory where DMA is allowed. It pins
* user pages and does the locked memory accounting so
* subsequent VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA calls
* get faster.
*/
struct vfio_iommu_spapr_register_memory {
__u32 argsz;
__u32 flags;
__u64 vaddr; /* Process virtual address */
__u64 size; /* Size of mapping (bytes) */
};
#define VFIO_IOMMU_SPAPR_REGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 17)
/**
* VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 18, struct vfio_iommu_spapr_register_memory)
*
* Unregisters user space memory registered with
* VFIO_IOMMU_SPAPR_REGISTER_MEMORY.
* Uses vfio_iommu_spapr_register_memory for parameters.
*/
#define VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 18)
/**
* VFIO_IOMMU_SPAPR_TCE_CREATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19, struct vfio_iommu_spapr_tce_create)
*
* Creates an additional TCE table and programs it (sets a new DMA window)
* to every IOMMU group in the container. It receives page shift, window
* size and number of levels in the TCE table being created.
*
* It allocates and returns an offset on a PCI bus of the new DMA window.
*/
struct vfio_iommu_spapr_tce_create {
__u32 argsz;
__u32 flags;
/* in */
__u32 page_shift;
__u32 __resv1;
__u64 window_size;
__u32 levels;
__u32 __resv2;
/* out */
__u64 start_addr;
};
#define VFIO_IOMMU_SPAPR_TCE_CREATE _IO(VFIO_TYPE, VFIO_BASE + 19)
/**
* VFIO_IOMMU_SPAPR_TCE_REMOVE - _IOW(VFIO_TYPE, VFIO_BASE + 20, struct vfio_iommu_spapr_tce_remove)
*
* Unprograms a TCE table from all groups in the container and destroys it.
* It receives a PCI bus offset as a window id.
*/
struct vfio_iommu_spapr_tce_remove {
__u32 argsz;
__u32 flags;
/* in */
__u64 start_addr;
};
#define VFIO_IOMMU_SPAPR_TCE_REMOVE _IO(VFIO_TYPE, VFIO_BASE + 20)
/* ***************************************************************** */
#endif /* _UAPIVFIO_H */