mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-14 17:57:05 +07:00
KVM: s390: clean up cmma_enable check
As we already only enable CMMA when userspace requests it, we can safely move the additional checks to the request handler and avoid doing them multiple times. This also tells userspace if CMMA is available. Signed-off-by: Dominik Dingel <dingel@linux.vnet.ibm.com> Reviewed-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
This commit is contained in:
parent
0df30abcd6
commit
e6db1d61c7
@ -330,6 +330,11 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
|
||||
unsigned int idx;
|
||||
switch (attr->attr) {
|
||||
case KVM_S390_VM_MEM_ENABLE_CMMA:
|
||||
/* enable CMMA only for z10 and later (EDAT_1) */
|
||||
ret = -EINVAL;
|
||||
if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
|
||||
break;
|
||||
|
||||
ret = -EBUSY;
|
||||
mutex_lock(&kvm->lock);
|
||||
if (atomic_read(&kvm->online_vcpus) == 0) {
|
||||
@ -1133,7 +1138,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
if (kvm_is_ucontrol(vcpu->kvm))
|
||||
gmap_free(vcpu->arch.gmap);
|
||||
|
||||
if (kvm_s390_cmma_enabled(vcpu->kvm))
|
||||
if (vcpu->kvm->arch.use_cmma)
|
||||
kvm_s390_vcpu_unsetup_cmma(vcpu);
|
||||
free_page((unsigned long)(vcpu->arch.sie_block));
|
||||
|
||||
@ -1344,7 +1349,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
|
||||
|
||||
if (kvm_s390_cmma_enabled(vcpu->kvm)) {
|
||||
if (vcpu->kvm->arch.use_cmma) {
|
||||
rc = kvm_s390_vcpu_setup_cmma(vcpu);
|
||||
if (rc)
|
||||
return rc;
|
||||
@ -1725,18 +1730,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
|
||||
return rc;
|
||||
}
|
||||
|
||||
bool kvm_s390_cmma_enabled(struct kvm *kvm)
|
||||
{
|
||||
if (!MACHINE_IS_LPAR)
|
||||
return false;
|
||||
/* only enable for z10 and later */
|
||||
if (!MACHINE_HAS_EDAT1)
|
||||
return false;
|
||||
if (!kvm->arch.use_cmma)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ibs_enabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
|
||||
|
@ -217,8 +217,6 @@ void exit_sie(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
|
||||
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
|
||||
/* is cmma enabled */
|
||||
bool kvm_s390_cmma_enabled(struct kvm *kvm);
|
||||
unsigned long kvm_s390_fac_list_mask_size(void);
|
||||
extern unsigned long kvm_s390_fac_list_mask[];
|
||||
|
||||
|
@ -761,7 +761,7 @@ static int handle_essa(struct kvm_vcpu *vcpu)
|
||||
VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries);
|
||||
gmap = vcpu->arch.gmap;
|
||||
vcpu->stat.instruction_essa++;
|
||||
if (!kvm_s390_cmma_enabled(vcpu->kvm))
|
||||
if (!vcpu->kvm->arch.use_cmma)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
|
Loading…
Reference in New Issue
Block a user