mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 21:20:51 +07:00
KVM: s390 features and fixes for 4.5 (kvm/next)
Some small cleanups - use assignment instead of memcpy - use %pK for kernel pointers Changes regarding guest memory size - Fix an off-by-one error in our guest memory interface (we might use unnecessarily big page tables, e.g. 3 levels for a 2GB guest instead of 2 levels) - We now ask the machine about the max. supported guest address and limit accordingly. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.14 (GNU/Linux) iQIcBAABAgAGBQJWcDsJAAoJEBF7vIC1phx8sR0P+wWcpDKbnmoYBoJhUMraJp5Y myf5Ch/meZYiKknTnsoXS+sZFjAnQTtm9jXpz0k1oSyXQ+/J0kxcbcdrtBCbGs7I r24XGb1Ld6cs+1lnE9Ch64g62UUuRiwcDmF+ZZmMLrgZLSz9EFIjd4WkF5MLYr0K /z+gBcmNQGZLO2ud33qHf2KoQxf9uU1vmsd6db/ksINUj13APLwDCvHjarmQnlKS 74kQcrvR1nOgrV+MV3Un8SsI3AR90guHmXbSA3LzVb0BB7ocMnPncK0HcUqmFfAX cC+dI8mXwLKcr17xE0gm9ddnQy0SJx2bAvYYdyft/xA09P4sBxWzCoLCUvN/MHSm +Zd5YXJzNY1jKFnPSNFxCJCei8zxTDxx5sZ5cfbU3pWrto9vwQdx/D/9nPC3wjgG enxvCivYlNB755Qtm/3jsyt7UuT4TbFuShg8I1BID4jsWTads2CtDJOZ3ny0vwh2 GHtzRx5Kb+oTmj8A5xkTa4AUj0Ov4QWDbrX1NHPTNBcHi04MJRhWQsYO9202EaNN kDNmcKhiBxVgLrXkCGXfh55QF7YRAq6RLv5mYkJg5JCsthD0x6aJr/xucT1yr1pd JCQDWOSCh4A3+WpYZ2ICBEkX2dJPX2eSivSX8Ky8p+GCKQ0tfLQSnc6Gv/UBc9Sj KraXyMJamI56rZu33Vzb =HaYd -----END PGP SIGNATURE----- Merge tag 'kvm-s390-next-4.5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD KVM: s390 features and fixes for 4.5 (kvm/next) Some small cleanups - use assignment instead of memcpy - use %pK for kernel pointers Changes regarding guest memory size - Fix an off-by-one error in our guest memory interface (we might use unnecessarily big page tables, e.g. 3 levels for a 2GB guest instead of 2 levels) - We now ask the machine about the max. supported guest address and limit accordingly.
This commit is contained in:
commit
da3f7ca3e8
@ -37,7 +37,8 @@ Returns: -EFAULT if the given address is not accessible
|
||||
Allows userspace to query the actual limit and set a new limit for
|
||||
the maximum guest memory size. The limit will be rounded up to
|
||||
2048 MB, 4096 GB, 8192 TB respectively, as this limit is governed by
|
||||
the number of page table levels.
|
||||
the number of page table levels. In the case that there is no limit we will set
|
||||
the limit to KVM_S390_NO_MEM_LIMIT (U64_MAX).
|
||||
|
||||
2. GROUP: KVM_S390_VM_CPU_MODEL
|
||||
Architectures: s390
|
||||
|
@ -627,6 +627,7 @@ struct kvm_arch{
|
||||
struct kvm_s390_float_interrupt float_int;
|
||||
struct kvm_device *flic;
|
||||
struct gmap *gmap;
|
||||
unsigned long mem_limit;
|
||||
int css_support;
|
||||
int use_irqchip;
|
||||
int use_cmma;
|
||||
|
@ -66,6 +66,8 @@ struct kvm_s390_io_adapter_req {
|
||||
#define KVM_S390_VM_MEM_CLR_CMMA 1
|
||||
#define KVM_S390_VM_MEM_LIMIT_SIZE 2
|
||||
|
||||
#define KVM_S390_NO_MEM_LIMIT U64_MAX
|
||||
|
||||
/* kvm attributes for KVM_S390_VM_TOD */
|
||||
#define KVM_S390_VM_TOD_LOW 0
|
||||
#define KVM_S390_VM_TOD_HIGH 1
|
||||
|
@ -378,8 +378,8 @@ static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *att
|
||||
case KVM_S390_VM_MEM_LIMIT_SIZE:
|
||||
ret = 0;
|
||||
VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
|
||||
kvm->arch.gmap->asce_end);
|
||||
if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
|
||||
kvm->arch.mem_limit);
|
||||
if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
default:
|
||||
@ -431,9 +431,17 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
|
||||
if (get_user(new_limit, (u64 __user *)attr->addr))
|
||||
return -EFAULT;
|
||||
|
||||
if (new_limit > kvm->arch.gmap->asce_end)
|
||||
if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
|
||||
new_limit > kvm->arch.mem_limit)
|
||||
return -E2BIG;
|
||||
|
||||
if (!new_limit)
|
||||
return -EINVAL;
|
||||
|
||||
/* gmap_alloc takes last usable address */
|
||||
if (new_limit != KVM_S390_NO_MEM_LIMIT)
|
||||
new_limit -= 1;
|
||||
|
||||
ret = -EBUSY;
|
||||
mutex_lock(&kvm->lock);
|
||||
if (atomic_read(&kvm->online_vcpus) == 0) {
|
||||
@ -450,7 +458,9 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
|
||||
}
|
||||
}
|
||||
mutex_unlock(&kvm->lock);
|
||||
VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit);
|
||||
VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
|
||||
VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
|
||||
(void *) kvm->arch.gmap->asce);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -1172,8 +1182,14 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
|
||||
if (type & KVM_VM_S390_UCONTROL) {
|
||||
kvm->arch.gmap = NULL;
|
||||
kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
|
||||
} else {
|
||||
kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
|
||||
if (sclp.hamax == U64_MAX)
|
||||
kvm->arch.mem_limit = TASK_MAX_SIZE;
|
||||
else
|
||||
kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
|
||||
sclp.hamax + 1);
|
||||
kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
|
||||
if (!kvm->arch.gmap)
|
||||
goto out_err;
|
||||
kvm->arch.gmap->private = kvm;
|
||||
@ -1185,7 +1201,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
kvm->arch.epoch = 0;
|
||||
|
||||
spin_lock_init(&kvm->arch.start_stop_lock);
|
||||
KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
|
||||
KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
@ -1245,7 +1261,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||
gmap_free(kvm->arch.gmap);
|
||||
kvm_s390_destroy_adapters(kvm);
|
||||
kvm_s390_clear_float_irqs(kvm);
|
||||
KVM_EVENT(3, "vm 0x%p destroyed", kvm);
|
||||
KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
|
||||
}
|
||||
|
||||
/* Section: vcpu related */
|
||||
@ -1349,7 +1365,8 @@ static int sca_switch_to_extended(struct kvm *kvm)
|
||||
|
||||
free_page((unsigned long)old_sca);
|
||||
|
||||
VM_EVENT(kvm, 2, "Switched to ESCA (%p -> %p)", old_sca, kvm->arch.sca);
|
||||
VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
|
||||
old_sca, kvm->arch.sca);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1624,7 +1641,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
||||
rc = kvm_vcpu_init(vcpu, kvm, id);
|
||||
if (rc)
|
||||
goto out_free_sie_block;
|
||||
VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
|
||||
VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
|
||||
vcpu->arch.sie_block);
|
||||
trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
|
||||
|
||||
@ -2120,7 +2137,8 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
kvm_check_async_pf_completion(vcpu);
|
||||
|
||||
memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
|
||||
vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
|
||||
vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
|
||||
|
||||
if (need_resched())
|
||||
schedule();
|
||||
@ -2185,7 +2203,8 @@ static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
|
||||
if (guestdbg_enabled(vcpu))
|
||||
kvm_s390_restore_guest_per_regs(vcpu);
|
||||
|
||||
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
|
||||
vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
|
||||
vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
|
||||
|
||||
if (vcpu->arch.sie_block->icptcode > 0) {
|
||||
int rc = kvm_handle_sie_intercept(vcpu);
|
||||
@ -2826,6 +2845,9 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||
if (mem->memory_size & 0xffffful)
|
||||
return -EINVAL;
|
||||
|
||||
if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -55,8 +55,8 @@ TRACE_EVENT(kvm_s390_create_vcpu,
|
||||
__entry->sie_block = sie_block;
|
||||
),
|
||||
|
||||
TP_printk("create cpu %d at %p, sie block at %p", __entry->id,
|
||||
__entry->vcpu, __entry->sie_block)
|
||||
TP_printk("create cpu %d at 0x%pK, sie block at 0x%pK",
|
||||
__entry->id, __entry->vcpu, __entry->sie_block)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_s390_destroy_vcpu,
|
||||
@ -254,7 +254,7 @@ TRACE_EVENT(kvm_s390_enable_css,
|
||||
__entry->kvm = kvm;
|
||||
),
|
||||
|
||||
TP_printk("enabling channel I/O support (kvm @ %p)\n",
|
||||
TP_printk("enabling channel I/O support (kvm @ %pK)\n",
|
||||
__entry->kvm)
|
||||
);
|
||||
|
||||
|
@ -133,7 +133,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
|
||||
/**
|
||||
* gmap_alloc - allocate a guest address space
|
||||
* @mm: pointer to the parent mm_struct
|
||||
* @limit: maximum size of the gmap address space
|
||||
* @limit: maximum address of the gmap address space
|
||||
*
|
||||
* Returns a guest address space structure.
|
||||
*/
|
||||
@ -402,7 +402,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
|
||||
if ((from | to | len) & (PMD_SIZE - 1))
|
||||
return -EINVAL;
|
||||
if (len == 0 || from + len < from || to + len < to ||
|
||||
from + len > TASK_MAX_SIZE || to + len > gmap->asce_end)
|
||||
from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
|
||||
return -EINVAL;
|
||||
|
||||
flush = 0;
|
||||
|
@ -40,7 +40,8 @@ struct read_info_sccb {
|
||||
u8 fac85; /* 85 */
|
||||
u8 _pad_86[91 - 86]; /* 86-90 */
|
||||
u8 flags; /* 91 */
|
||||
u8 _pad_92[100 - 92]; /* 92-99 */
|
||||
u8 _pad_92[99 - 92]; /* 92-98 */
|
||||
u8 hamaxpow; /* 99 */
|
||||
u32 rnsize2; /* 100-103 */
|
||||
u64 rnmax2; /* 104-111 */
|
||||
u8 _pad_112[116 - 112]; /* 112-115 */
|
||||
@ -120,6 +121,11 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
|
||||
sclp.rzm <<= 20;
|
||||
sclp.ibc = sccb->ibc;
|
||||
|
||||
if (sccb->hamaxpow && sccb->hamaxpow < 64)
|
||||
sclp.hamax = (1UL << sccb->hamaxpow) - 1;
|
||||
else
|
||||
sclp.hamax = U64_MAX;
|
||||
|
||||
if (!sccb->hcpua) {
|
||||
if (MACHINE_IS_VM)
|
||||
sclp.max_cores = 64;
|
||||
|
Loading…
Reference in New Issue
Block a user