mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 19:13:15 +07:00
f180bf12ac
This add an ioctl to migrate a range of process address space to the device memory. On platform without cache coherent bus (x86, ARM, ...) this means that CPU can not access that range directly, instead CPU will fault which will migrate the memory back to system memory. This is behind a staging flag so that we can evolve the API. Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
204 lines
6.4 KiB
C
204 lines
6.4 KiB
C
/*
|
|
* Copyright 2005 Stephane Marchesin.
|
|
* All Rights Reserved.
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice (including the next
|
|
* paragraph) shall be included in all copies or substantial portions of the
|
|
* Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
*/
|
|
|
|
#ifndef __NOUVEAU_DRM_H__
|
|
#define __NOUVEAU_DRM_H__
|
|
|
|
#define DRM_NOUVEAU_EVENT_NVIF 0x80000000
|
|
|
|
#include "drm.h"
|
|
|
|
#if defined(__cplusplus)
|
|
extern "C" {
|
|
#endif
|
|
|
|
#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
|
|
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
|
|
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
|
|
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
|
|
#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
|
|
|
|
#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
|
|
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
|
|
#define NOUVEAU_GEM_TILE_16BPP 0x00000001
|
|
#define NOUVEAU_GEM_TILE_32BPP 0x00000002
|
|
#define NOUVEAU_GEM_TILE_ZETA 0x00000004
|
|
#define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008
|
|
|
|
struct drm_nouveau_gem_info {
|
|
__u32 handle;
|
|
__u32 domain;
|
|
__u64 size;
|
|
__u64 offset;
|
|
__u64 map_handle;
|
|
__u32 tile_mode;
|
|
__u32 tile_flags;
|
|
};
|
|
|
|
struct drm_nouveau_gem_new {
|
|
struct drm_nouveau_gem_info info;
|
|
__u32 channel_hint;
|
|
__u32 align;
|
|
};
|
|
|
|
#define NOUVEAU_GEM_MAX_BUFFERS 1024
|
|
struct drm_nouveau_gem_pushbuf_bo_presumed {
|
|
__u32 valid;
|
|
__u32 domain;
|
|
__u64 offset;
|
|
};
|
|
|
|
struct drm_nouveau_gem_pushbuf_bo {
|
|
__u64 user_priv;
|
|
__u32 handle;
|
|
__u32 read_domains;
|
|
__u32 write_domains;
|
|
__u32 valid_domains;
|
|
struct drm_nouveau_gem_pushbuf_bo_presumed presumed;
|
|
};
|
|
|
|
#define NOUVEAU_GEM_RELOC_LOW (1 << 0)
|
|
#define NOUVEAU_GEM_RELOC_HIGH (1 << 1)
|
|
#define NOUVEAU_GEM_RELOC_OR (1 << 2)
|
|
#define NOUVEAU_GEM_MAX_RELOCS 1024
|
|
struct drm_nouveau_gem_pushbuf_reloc {
|
|
__u32 reloc_bo_index;
|
|
__u32 reloc_bo_offset;
|
|
__u32 bo_index;
|
|
__u32 flags;
|
|
__u32 data;
|
|
__u32 vor;
|
|
__u32 tor;
|
|
};
|
|
|
|
#define NOUVEAU_GEM_MAX_PUSH 512
|
|
struct drm_nouveau_gem_pushbuf_push {
|
|
__u32 bo_index;
|
|
__u32 pad;
|
|
__u64 offset;
|
|
__u64 length;
|
|
};
|
|
|
|
struct drm_nouveau_gem_pushbuf {
|
|
__u32 channel;
|
|
__u32 nr_buffers;
|
|
__u64 buffers;
|
|
__u32 nr_relocs;
|
|
__u32 nr_push;
|
|
__u64 relocs;
|
|
__u64 push;
|
|
__u32 suffix0;
|
|
__u32 suffix1;
|
|
__u64 vram_available;
|
|
__u64 gart_available;
|
|
};
|
|
|
|
#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
|
|
#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
|
|
struct drm_nouveau_gem_cpu_prep {
|
|
__u32 handle;
|
|
__u32 flags;
|
|
};
|
|
|
|
struct drm_nouveau_gem_cpu_fini {
|
|
__u32 handle;
|
|
};
|
|
|
|
#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */
|
|
#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
|
|
#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02 /* deprecated */
|
|
#define DRM_NOUVEAU_CHANNEL_FREE 0x03 /* deprecated */
|
|
#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
|
|
#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
|
|
#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
|
|
#define DRM_NOUVEAU_NVIF 0x07
|
|
#define DRM_NOUVEAU_SVM_INIT 0x08
|
|
#define DRM_NOUVEAU_SVM_BIND 0x09
|
|
#define DRM_NOUVEAU_GEM_NEW 0x40
|
|
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
|
|
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
|
|
#define DRM_NOUVEAU_GEM_CPU_FINI 0x43
|
|
#define DRM_NOUVEAU_GEM_INFO 0x44
|
|
|
|
struct drm_nouveau_svm_init {
|
|
__u64 unmanaged_addr;
|
|
__u64 unmanaged_size;
|
|
};
|
|
|
|
struct drm_nouveau_svm_bind {
|
|
__u64 header;
|
|
__u64 va_start;
|
|
__u64 va_end;
|
|
__u64 npages;
|
|
__u64 stride;
|
|
__u64 result;
|
|
__u64 reserved0;
|
|
__u64 reserved1;
|
|
};
|
|
|
|
#define NOUVEAU_SVM_BIND_COMMAND_SHIFT 0
|
|
#define NOUVEAU_SVM_BIND_COMMAND_BITS 8
|
|
#define NOUVEAU_SVM_BIND_COMMAND_MASK ((1 << 8) - 1)
|
|
#define NOUVEAU_SVM_BIND_PRIORITY_SHIFT 8
|
|
#define NOUVEAU_SVM_BIND_PRIORITY_BITS 8
|
|
#define NOUVEAU_SVM_BIND_PRIORITY_MASK ((1 << 8) - 1)
|
|
#define NOUVEAU_SVM_BIND_TARGET_SHIFT 16
|
|
#define NOUVEAU_SVM_BIND_TARGET_BITS 32
|
|
#define NOUVEAU_SVM_BIND_TARGET_MASK 0xffffffff
|
|
|
|
/*
|
|
* Below is use to validate ioctl argument, userspace can also use it to make
|
|
* sure that no bit are set beyond known fields for a given kernel version.
|
|
*/
|
|
#define NOUVEAU_SVM_BIND_VALID_BITS 48
|
|
#define NOUVEAU_SVM_BIND_VALID_MASK ((1ULL << NOUVEAU_SVM_BIND_VALID_BITS) - 1)
|
|
|
|
|
|
/*
|
|
* NOUVEAU_BIND_COMMAND__MIGRATE: synchronous migrate to target memory.
|
|
* result: number of page successfuly migrate to the target memory.
|
|
*/
|
|
#define NOUVEAU_SVM_BIND_COMMAND__MIGRATE 0
|
|
|
|
/*
|
|
* NOUVEAU_SVM_BIND_HEADER_TARGET__GPU_VRAM: target the GPU VRAM memory.
|
|
*/
|
|
#define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM (1UL << 31)
|
|
|
|
|
|
#define DRM_IOCTL_NOUVEAU_SVM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
|
|
#define DRM_IOCTL_NOUVEAU_SVM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
|
|
|
|
#define DRM_IOCTL_NOUVEAU_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new)
|
|
#define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf)
|
|
#define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep)
|
|
#define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
|
|
#define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
|
|
|
|
#if defined(__cplusplus)
|
|
}
|
|
#endif
|
|
|
|
#endif /* __NOUVEAU_DRM_H__ */
|