mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
9a5788c615
At present, on Power systems with Protected Execution Facility hardware and an ultravisor, a KVM guest can transition to being a secure guest at will. Userspace (QEMU) has no way of knowing whether a host system is capable of running secure guests. This will present a problem in future when the ultravisor is capable of migrating secure guests from one host to another, because virtualization management software will have no way to ensure that secure guests only run in domains where all of the hosts can support secure guests. This adds a VM capability which has two functions: (a) userspace can query it to find out whether the host can support secure guests, and (b) userspace can enable it for a guest, which allows that guest to become a secure guest. If userspace does not enable it, KVM will return an error when the ultravisor does the hypercall that indicates that the guest is starting to transition to a secure guest. The ultravisor will then abort the transition and the guest will terminate. Signed-off-by: Paul Mackerras <paulus@ozlabs.org> Reviewed-by: David Gibson <david@gibson.dropbear.id.au> Reviewed-by: Ram Pai <linuxram@us.ibm.com>
87 lines
2.3 KiB
C
87 lines
2.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_KVM_BOOK3S_UVMEM_H__
|
|
#define __ASM_KVM_BOOK3S_UVMEM_H__
|
|
|
|
#ifdef CONFIG_PPC_UV
|
|
int kvmppc_uvmem_init(void);
|
|
void kvmppc_uvmem_free(void);
|
|
bool kvmppc_uvmem_available(void);
|
|
int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot);
|
|
void kvmppc_uvmem_slot_free(struct kvm *kvm,
|
|
const struct kvm_memory_slot *slot);
|
|
unsigned long kvmppc_h_svm_page_in(struct kvm *kvm,
|
|
unsigned long gra,
|
|
unsigned long flags,
|
|
unsigned long page_shift);
|
|
unsigned long kvmppc_h_svm_page_out(struct kvm *kvm,
|
|
unsigned long gra,
|
|
unsigned long flags,
|
|
unsigned long page_shift);
|
|
unsigned long kvmppc_h_svm_init_start(struct kvm *kvm);
|
|
unsigned long kvmppc_h_svm_init_done(struct kvm *kvm);
|
|
int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn);
|
|
unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm);
|
|
void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
|
|
struct kvm *kvm, bool skip_page_out);
|
|
#else
|
|
static inline int kvmppc_uvmem_init(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void kvmppc_uvmem_free(void) { }
|
|
|
|
static inline bool kvmppc_uvmem_available(void)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline int
|
|
kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void
|
|
kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot) { }
|
|
|
|
static inline unsigned long
|
|
kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gra,
|
|
unsigned long flags, unsigned long page_shift)
|
|
{
|
|
return H_UNSUPPORTED;
|
|
}
|
|
|
|
static inline unsigned long
|
|
kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gra,
|
|
unsigned long flags, unsigned long page_shift)
|
|
{
|
|
return H_UNSUPPORTED;
|
|
}
|
|
|
|
static inline unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
|
|
{
|
|
return H_UNSUPPORTED;
|
|
}
|
|
|
|
static inline unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
|
|
{
|
|
return H_UNSUPPORTED;
|
|
}
|
|
|
|
static inline unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
|
|
{
|
|
return H_UNSUPPORTED;
|
|
}
|
|
|
|
static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
|
|
{
|
|
return -EFAULT;
|
|
}
|
|
|
|
static inline void
|
|
kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
|
|
struct kvm *kvm, bool skip_page_out) { }
|
|
#endif /* CONFIG_PPC_UV */
|
|
#endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */
|