2009-06-17 19:22:14 +07:00
|
|
|
#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
|
|
|
|
#define _TRACE_KVM_MAIN_H
|
|
|
|
|
|
|
|
#include <linux/tracepoint.h>
|
|
|
|
|
|
|
|
#undef TRACE_SYSTEM
|
|
|
|
#define TRACE_SYSTEM kvm
|
|
|
|
|
2010-10-24 21:49:08 +07:00
|
|
|
#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
|
|
|
|
|
|
|
|
#define kvm_trace_exit_reason \
|
|
|
|
ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
|
|
|
|
ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
|
|
|
|
ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
|
|
|
|
ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
|
2012-06-11 23:39:50 +07:00
|
|
|
ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
|
2013-01-08 19:00:01 +07:00
|
|
|
ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH)
|
2010-10-24 21:49:08 +07:00
|
|
|
|
|
|
|
TRACE_EVENT(kvm_userspace_exit,
|
|
|
|
TP_PROTO(__u32 reason, int errno),
|
|
|
|
TP_ARGS(reason, errno),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field( __u32, reason )
|
|
|
|
__field( int, errno )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->reason = reason;
|
|
|
|
__entry->errno = errno;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("reason %s (%d)",
|
|
|
|
__entry->errno < 0 ?
|
|
|
|
(__entry->errno == -EINTR ? "restart" : "error") :
|
|
|
|
__print_symbolic(__entry->reason, kvm_trace_exit_reason),
|
|
|
|
__entry->errno < 0 ? -__entry->errno : __entry->reason)
|
|
|
|
);
|
|
|
|
|
kvm: add halt_poll_ns module parameter
This patch introduces a new module parameter for the KVM module; when it
is present, KVM attempts a bit of polling on every HLT before scheduling
itself out via kvm_vcpu_block.
This parameter helps a lot for latency-bound workloads---in particular
I tested it with O_DSYNC writes with a battery-backed disk in the host.
In this case, writes are fast (because the data doesn't have to go all
the way to the platters) but they cannot be merged by either the host or
the guest. KVM's performance here is usually around 30% of bare metal,
or 50% if you use cache=directsync or cache=writethrough (these
parameters avoid that the guest sends pointless flush requests, and
at the same time they are not slow because of the battery-backed cache).
The bad performance happens because on every halt the host CPU decides
to halt itself too. When the interrupt comes, the vCPU thread is then
migrated to a new physical CPU, and in general the latency is horrible
because the vCPU thread has to be scheduled back in.
With this patch performance reaches 60-65% of bare metal and, more
important, 99% of what you get if you use idle=poll in the guest. This
means that the tunable gets rid of this particular bottleneck, and more
work can be done to improve performance in the kernel or QEMU.
Of course there is some price to pay; every time an otherwise idle vCPUs
is interrupted by an interrupt, it will poll unnecessarily and thus
impose a little load on the host. The above results were obtained with
a mostly random value of the parameter (500000), and the load was around
1.5-2.5% CPU usage on one of the host's core for each idle guest vCPU.
The patch also adds a new stat, /sys/kernel/debug/kvm/halt_successful_poll,
that can be used to tune the parameter. It counts how many HLT
instructions received an interrupt during the polling period; each
successful poll avoids that Linux schedules the VCPU thread out and back
in, and may also avoid a likely trip to C1 and back for the physical CPU.
While the VM is idle, a Linux 4 VCPU VM halts around 10 times per second.
Of these halts, almost all are failed polls. During the benchmark,
instead, basically all halts end within the polling period, except a more
or less constant stream of 50 per second coming from vCPUs that are not
running the benchmark. The wasted time is thus very low. Things may
be slightly different for Windows VMs, which have a ~10 ms timer tick.
The effect is also visible on Marcelo's recently-introduced latency
test for the TSC deadline timer. Though of course a non-RT kernel has
awful latency bounds, the latency of the timer is around 8000-10000 clock
cycles compared to 20000-120000 without setting halt_poll_ns. For the TSC
deadline timer, thus, the effect is both a smaller average latency and
a smaller variance.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2015-02-05 00:20:58 +07:00
|
|
|
TRACE_EVENT(kvm_vcpu_wakeup,
|
2016-05-13 17:16:35 +07:00
|
|
|
TP_PROTO(__u64 ns, bool waited, bool valid),
|
|
|
|
TP_ARGS(ns, waited, valid),
|
kvm: add halt_poll_ns module parameter
This patch introduces a new module parameter for the KVM module; when it
is present, KVM attempts a bit of polling on every HLT before scheduling
itself out via kvm_vcpu_block.
This parameter helps a lot for latency-bound workloads---in particular
I tested it with O_DSYNC writes with a battery-backed disk in the host.
In this case, writes are fast (because the data doesn't have to go all
the way to the platters) but they cannot be merged by either the host or
the guest. KVM's performance here is usually around 30% of bare metal,
or 50% if you use cache=directsync or cache=writethrough (these
parameters avoid that the guest sends pointless flush requests, and
at the same time they are not slow because of the battery-backed cache).
The bad performance happens because on every halt the host CPU decides
to halt itself too. When the interrupt comes, the vCPU thread is then
migrated to a new physical CPU, and in general the latency is horrible
because the vCPU thread has to be scheduled back in.
With this patch performance reaches 60-65% of bare metal and, more
important, 99% of what you get if you use idle=poll in the guest. This
means that the tunable gets rid of this particular bottleneck, and more
work can be done to improve performance in the kernel or QEMU.
Of course there is some price to pay; every time an otherwise idle vCPUs
is interrupted by an interrupt, it will poll unnecessarily and thus
impose a little load on the host. The above results were obtained with
a mostly random value of the parameter (500000), and the load was around
1.5-2.5% CPU usage on one of the host's core for each idle guest vCPU.
The patch also adds a new stat, /sys/kernel/debug/kvm/halt_successful_poll,
that can be used to tune the parameter. It counts how many HLT
instructions received an interrupt during the polling period; each
successful poll avoids that Linux schedules the VCPU thread out and back
in, and may also avoid a likely trip to C1 and back for the physical CPU.
While the VM is idle, a Linux 4 VCPU VM halts around 10 times per second.
Of these halts, almost all are failed polls. During the benchmark,
instead, basically all halts end within the polling period, except a more
or less constant stream of 50 per second coming from vCPUs that are not
running the benchmark. The wasted time is thus very low. Things may
be slightly different for Windows VMs, which have a ~10 ms timer tick.
The effect is also visible on Marcelo's recently-introduced latency
test for the TSC deadline timer. Though of course a non-RT kernel has
awful latency bounds, the latency of the timer is around 8000-10000 clock
cycles compared to 20000-120000 without setting halt_poll_ns. For the TSC
deadline timer, thus, the effect is both a smaller average latency and
a smaller variance.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2015-02-05 00:20:58 +07:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field( __u64, ns )
|
|
|
|
__field( bool, waited )
|
2016-05-13 17:16:35 +07:00
|
|
|
__field( bool, valid )
|
kvm: add halt_poll_ns module parameter
This patch introduces a new module parameter for the KVM module; when it
is present, KVM attempts a bit of polling on every HLT before scheduling
itself out via kvm_vcpu_block.
This parameter helps a lot for latency-bound workloads---in particular
I tested it with O_DSYNC writes with a battery-backed disk in the host.
In this case, writes are fast (because the data doesn't have to go all
the way to the platters) but they cannot be merged by either the host or
the guest. KVM's performance here is usually around 30% of bare metal,
or 50% if you use cache=directsync or cache=writethrough (these
parameters avoid that the guest sends pointless flush requests, and
at the same time they are not slow because of the battery-backed cache).
The bad performance happens because on every halt the host CPU decides
to halt itself too. When the interrupt comes, the vCPU thread is then
migrated to a new physical CPU, and in general the latency is horrible
because the vCPU thread has to be scheduled back in.
With this patch performance reaches 60-65% of bare metal and, more
important, 99% of what you get if you use idle=poll in the guest. This
means that the tunable gets rid of this particular bottleneck, and more
work can be done to improve performance in the kernel or QEMU.
Of course there is some price to pay; every time an otherwise idle vCPUs
is interrupted by an interrupt, it will poll unnecessarily and thus
impose a little load on the host. The above results were obtained with
a mostly random value of the parameter (500000), and the load was around
1.5-2.5% CPU usage on one of the host's core for each idle guest vCPU.
The patch also adds a new stat, /sys/kernel/debug/kvm/halt_successful_poll,
that can be used to tune the parameter. It counts how many HLT
instructions received an interrupt during the polling period; each
successful poll avoids that Linux schedules the VCPU thread out and back
in, and may also avoid a likely trip to C1 and back for the physical CPU.
While the VM is idle, a Linux 4 VCPU VM halts around 10 times per second.
Of these halts, almost all are failed polls. During the benchmark,
instead, basically all halts end within the polling period, except a more
or less constant stream of 50 per second coming from vCPUs that are not
running the benchmark. The wasted time is thus very low. Things may
be slightly different for Windows VMs, which have a ~10 ms timer tick.
The effect is also visible on Marcelo's recently-introduced latency
test for the TSC deadline timer. Though of course a non-RT kernel has
awful latency bounds, the latency of the timer is around 8000-10000 clock
cycles compared to 20000-120000 without setting halt_poll_ns. For the TSC
deadline timer, thus, the effect is both a smaller average latency and
a smaller variance.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2015-02-05 00:20:58 +07:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->ns = ns;
|
|
|
|
__entry->waited = waited;
|
2016-05-13 17:16:35 +07:00
|
|
|
__entry->valid = valid;
|
kvm: add halt_poll_ns module parameter
This patch introduces a new module parameter for the KVM module; when it
is present, KVM attempts a bit of polling on every HLT before scheduling
itself out via kvm_vcpu_block.
This parameter helps a lot for latency-bound workloads---in particular
I tested it with O_DSYNC writes with a battery-backed disk in the host.
In this case, writes are fast (because the data doesn't have to go all
the way to the platters) but they cannot be merged by either the host or
the guest. KVM's performance here is usually around 30% of bare metal,
or 50% if you use cache=directsync or cache=writethrough (these
parameters avoid that the guest sends pointless flush requests, and
at the same time they are not slow because of the battery-backed cache).
The bad performance happens because on every halt the host CPU decides
to halt itself too. When the interrupt comes, the vCPU thread is then
migrated to a new physical CPU, and in general the latency is horrible
because the vCPU thread has to be scheduled back in.
With this patch performance reaches 60-65% of bare metal and, more
important, 99% of what you get if you use idle=poll in the guest. This
means that the tunable gets rid of this particular bottleneck, and more
work can be done to improve performance in the kernel or QEMU.
Of course there is some price to pay; every time an otherwise idle vCPUs
is interrupted by an interrupt, it will poll unnecessarily and thus
impose a little load on the host. The above results were obtained with
a mostly random value of the parameter (500000), and the load was around
1.5-2.5% CPU usage on one of the host's core for each idle guest vCPU.
The patch also adds a new stat, /sys/kernel/debug/kvm/halt_successful_poll,
that can be used to tune the parameter. It counts how many HLT
instructions received an interrupt during the polling period; each
successful poll avoids that Linux schedules the VCPU thread out and back
in, and may also avoid a likely trip to C1 and back for the physical CPU.
While the VM is idle, a Linux 4 VCPU VM halts around 10 times per second.
Of these halts, almost all are failed polls. During the benchmark,
instead, basically all halts end within the polling period, except a more
or less constant stream of 50 per second coming from vCPUs that are not
running the benchmark. The wasted time is thus very low. Things may
be slightly different for Windows VMs, which have a ~10 ms timer tick.
The effect is also visible on Marcelo's recently-introduced latency
test for the TSC deadline timer. Though of course a non-RT kernel has
awful latency bounds, the latency of the timer is around 8000-10000 clock
cycles compared to 20000-120000 without setting halt_poll_ns. For the TSC
deadline timer, thus, the effect is both a smaller average latency and
a smaller variance.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2015-02-05 00:20:58 +07:00
|
|
|
),
|
|
|
|
|
2016-05-13 17:16:35 +07:00
|
|
|
TP_printk("%s time %lld ns, polling %s",
|
kvm: add halt_poll_ns module parameter
This patch introduces a new module parameter for the KVM module; when it
is present, KVM attempts a bit of polling on every HLT before scheduling
itself out via kvm_vcpu_block.
This parameter helps a lot for latency-bound workloads---in particular
I tested it with O_DSYNC writes with a battery-backed disk in the host.
In this case, writes are fast (because the data doesn't have to go all
the way to the platters) but they cannot be merged by either the host or
the guest. KVM's performance here is usually around 30% of bare metal,
or 50% if you use cache=directsync or cache=writethrough (these
parameters avoid that the guest sends pointless flush requests, and
at the same time they are not slow because of the battery-backed cache).
The bad performance happens because on every halt the host CPU decides
to halt itself too. When the interrupt comes, the vCPU thread is then
migrated to a new physical CPU, and in general the latency is horrible
because the vCPU thread has to be scheduled back in.
With this patch performance reaches 60-65% of bare metal and, more
important, 99% of what you get if you use idle=poll in the guest. This
means that the tunable gets rid of this particular bottleneck, and more
work can be done to improve performance in the kernel or QEMU.
Of course there is some price to pay; every time an otherwise idle vCPUs
is interrupted by an interrupt, it will poll unnecessarily and thus
impose a little load on the host. The above results were obtained with
a mostly random value of the parameter (500000), and the load was around
1.5-2.5% CPU usage on one of the host's core for each idle guest vCPU.
The patch also adds a new stat, /sys/kernel/debug/kvm/halt_successful_poll,
that can be used to tune the parameter. It counts how many HLT
instructions received an interrupt during the polling period; each
successful poll avoids that Linux schedules the VCPU thread out and back
in, and may also avoid a likely trip to C1 and back for the physical CPU.
While the VM is idle, a Linux 4 VCPU VM halts around 10 times per second.
Of these halts, almost all are failed polls. During the benchmark,
instead, basically all halts end within the polling period, except a more
or less constant stream of 50 per second coming from vCPUs that are not
running the benchmark. The wasted time is thus very low. Things may
be slightly different for Windows VMs, which have a ~10 ms timer tick.
The effect is also visible on Marcelo's recently-introduced latency
test for the TSC deadline timer. Though of course a non-RT kernel has
awful latency bounds, the latency of the timer is around 8000-10000 clock
cycles compared to 20000-120000 without setting halt_poll_ns. For the TSC
deadline timer, thus, the effect is both a smaller average latency and
a smaller variance.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2015-02-05 00:20:58 +07:00
|
|
|
__entry->waited ? "wait" : "poll",
|
2016-05-13 17:16:35 +07:00
|
|
|
__entry->ns,
|
|
|
|
__entry->valid ? "valid" : "invalid")
|
kvm: add halt_poll_ns module parameter
This patch introduces a new module parameter for the KVM module; when it
is present, KVM attempts a bit of polling on every HLT before scheduling
itself out via kvm_vcpu_block.
This parameter helps a lot for latency-bound workloads---in particular
I tested it with O_DSYNC writes with a battery-backed disk in the host.
In this case, writes are fast (because the data doesn't have to go all
the way to the platters) but they cannot be merged by either the host or
the guest. KVM's performance here is usually around 30% of bare metal,
or 50% if you use cache=directsync or cache=writethrough (these
parameters avoid that the guest sends pointless flush requests, and
at the same time they are not slow because of the battery-backed cache).
The bad performance happens because on every halt the host CPU decides
to halt itself too. When the interrupt comes, the vCPU thread is then
migrated to a new physical CPU, and in general the latency is horrible
because the vCPU thread has to be scheduled back in.
With this patch performance reaches 60-65% of bare metal and, more
important, 99% of what you get if you use idle=poll in the guest. This
means that the tunable gets rid of this particular bottleneck, and more
work can be done to improve performance in the kernel or QEMU.
Of course there is some price to pay; every time an otherwise idle vCPUs
is interrupted by an interrupt, it will poll unnecessarily and thus
impose a little load on the host. The above results were obtained with
a mostly random value of the parameter (500000), and the load was around
1.5-2.5% CPU usage on one of the host's core for each idle guest vCPU.
The patch also adds a new stat, /sys/kernel/debug/kvm/halt_successful_poll,
that can be used to tune the parameter. It counts how many HLT
instructions received an interrupt during the polling period; each
successful poll avoids that Linux schedules the VCPU thread out and back
in, and may also avoid a likely trip to C1 and back for the physical CPU.
While the VM is idle, a Linux 4 VCPU VM halts around 10 times per second.
Of these halts, almost all are failed polls. During the benchmark,
instead, basically all halts end within the polling period, except a more
or less constant stream of 50 per second coming from vCPUs that are not
running the benchmark. The wasted time is thus very low. Things may
be slightly different for Windows VMs, which have a ~10 ms timer tick.
The effect is also visible on Marcelo's recently-introduced latency
test for the TSC deadline timer. Though of course a non-RT kernel has
awful latency bounds, the latency of the timer is around 8000-10000 clock
cycles compared to 20000-120000 without setting halt_poll_ns. For the TSC
deadline timer, thus, the effect is both a smaller average latency and
a smaller variance.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2015-02-05 00:20:58 +07:00
|
|
|
);
|
|
|
|
|
2014-08-06 19:24:45 +07:00
|
|
|
#if defined(CONFIG_HAVE_KVM_IRQFD)
|
2009-06-17 19:22:14 +07:00
|
|
|
TRACE_EVENT(kvm_set_irq,
|
2009-07-01 16:09:41 +07:00
|
|
|
TP_PROTO(unsigned int gsi, int level, int irq_source_id),
|
|
|
|
TP_ARGS(gsi, level, irq_source_id),
|
2009-06-17 19:22:14 +07:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field( unsigned int, gsi )
|
2009-07-01 16:09:41 +07:00
|
|
|
__field( int, level )
|
|
|
|
__field( int, irq_source_id )
|
2009-06-17 19:22:14 +07:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->gsi = gsi;
|
2009-07-01 16:09:41 +07:00
|
|
|
__entry->level = level;
|
|
|
|
__entry->irq_source_id = irq_source_id;
|
2009-06-17 19:22:14 +07:00
|
|
|
),
|
|
|
|
|
2009-07-01 16:09:41 +07:00
|
|
|
TP_printk("gsi %u level %d source %d",
|
|
|
|
__entry->gsi, __entry->level, __entry->irq_source_id)
|
2009-06-17 19:22:14 +07:00
|
|
|
);
|
2014-08-06 19:24:45 +07:00
|
|
|
#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
|
2009-06-17 19:22:14 +07:00
|
|
|
|
2012-06-16 02:07:13 +07:00
|
|
|
#if defined(__KVM_HAVE_IOAPIC)
|
2009-07-07 20:00:57 +07:00
|
|
|
#define kvm_deliver_mode \
|
|
|
|
{0x0, "Fixed"}, \
|
|
|
|
{0x1, "LowPrio"}, \
|
|
|
|
{0x2, "SMI"}, \
|
|
|
|
{0x3, "Res3"}, \
|
|
|
|
{0x4, "NMI"}, \
|
|
|
|
{0x5, "INIT"}, \
|
|
|
|
{0x6, "SIPI"}, \
|
|
|
|
{0x7, "ExtINT"}
|
|
|
|
|
|
|
|
TRACE_EVENT(kvm_ioapic_set_irq,
|
|
|
|
TP_PROTO(__u64 e, int pin, bool coalesced),
|
|
|
|
TP_ARGS(e, pin, coalesced),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field( __u64, e )
|
|
|
|
__field( int, pin )
|
|
|
|
__field( bool, coalesced )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->e = e;
|
|
|
|
__entry->pin = pin;
|
|
|
|
__entry->coalesced = coalesced;
|
|
|
|
),
|
|
|
|
|
2016-05-23 13:43:33 +07:00
|
|
|
TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
|
2009-07-07 20:00:57 +07:00
|
|
|
__entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
|
|
|
|
__print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
|
|
|
|
(__entry->e & (1<<11)) ? "logical" : "physical",
|
|
|
|
(__entry->e & (1<<15)) ? "level" : "edge",
|
|
|
|
(__entry->e & (1<<16)) ? "|masked" : "",
|
|
|
|
__entry->coalesced ? " (coalesced)" : "")
|
|
|
|
);
|
|
|
|
|
2014-09-11 15:47:04 +07:00
|
|
|
TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
|
|
|
|
TP_PROTO(__u64 e),
|
|
|
|
TP_ARGS(e),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field( __u64, e )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->e = e;
|
|
|
|
),
|
|
|
|
|
2016-05-23 13:43:33 +07:00
|
|
|
TP_printk("dst %x vec %u (%s|%s|%s%s)",
|
2014-09-11 15:47:04 +07:00
|
|
|
(u8)(__entry->e >> 56), (u8)__entry->e,
|
|
|
|
__print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
|
|
|
|
(__entry->e & (1<<11)) ? "logical" : "physical",
|
|
|
|
(__entry->e & (1<<15)) ? "level" : "edge",
|
|
|
|
(__entry->e & (1<<16)) ? "|masked" : "")
|
|
|
|
);
|
|
|
|
|
2009-07-07 20:00:57 +07:00
|
|
|
TRACE_EVENT(kvm_msi_set_irq,
|
|
|
|
TP_PROTO(__u64 address, __u64 data),
|
|
|
|
TP_ARGS(address, data),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field( __u64, address )
|
|
|
|
__field( __u64, data )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->address = address;
|
|
|
|
__entry->data = data;
|
|
|
|
),
|
|
|
|
|
2016-07-13 03:09:27 +07:00
|
|
|
TP_printk("dst %llx vec %u (%s|%s|%s%s)",
|
|
|
|
(u8)(__entry->address >> 12) | ((__entry->address >> 32) & 0xffffff00),
|
|
|
|
(u8)__entry->data,
|
2009-07-07 20:00:57 +07:00
|
|
|
__print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
|
|
|
|
(__entry->address & (1<<2)) ? "logical" : "physical",
|
|
|
|
(__entry->data & (1<<15)) ? "level" : "edge",
|
|
|
|
(__entry->address & (1<<3)) ? "|rh" : "")
|
|
|
|
);
|
2009-06-17 19:22:14 +07:00
|
|
|
|
|
|
|
#define kvm_irqchips \
|
|
|
|
{KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
|
|
|
|
{KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
|
|
|
|
{KVM_IRQCHIP_IOAPIC, "IOAPIC"}
|
|
|
|
|
2013-04-16 04:04:10 +07:00
|
|
|
#endif /* defined(__KVM_HAVE_IOAPIC) */
|
|
|
|
|
2014-08-06 19:24:45 +07:00
|
|
|
#if defined(CONFIG_HAVE_KVM_IRQFD)
|
2013-04-16 04:04:10 +07:00
|
|
|
|
2015-01-15 21:21:19 +07:00
|
|
|
#ifdef kvm_irqchips
|
|
|
|
#define kvm_ack_irq_string "irqchip %s pin %u"
|
|
|
|
#define kvm_ack_irq_parm __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
|
|
|
|
#else
|
|
|
|
#define kvm_ack_irq_string "irqchip %d pin %u"
|
|
|
|
#define kvm_ack_irq_parm __entry->irqchip, __entry->pin
|
|
|
|
#endif
|
|
|
|
|
2009-06-17 19:22:14 +07:00
|
|
|
TRACE_EVENT(kvm_ack_irq,
|
|
|
|
TP_PROTO(unsigned int irqchip, unsigned int pin),
|
|
|
|
TP_ARGS(irqchip, pin),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field( unsigned int, irqchip )
|
|
|
|
__field( unsigned int, pin )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->irqchip = irqchip;
|
|
|
|
__entry->pin = pin;
|
|
|
|
),
|
|
|
|
|
2015-01-15 21:21:19 +07:00
|
|
|
TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
|
2009-06-17 19:22:14 +07:00
|
|
|
);
|
|
|
|
|
2014-08-06 19:24:45 +07:00
|
|
|
#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
|
2009-06-17 19:22:14 +07:00
|
|
|
|
|
|
|
|
2009-07-01 20:01:02 +07:00
|
|
|
|
|
|
|
#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
|
|
|
|
#define KVM_TRACE_MMIO_READ 1
|
|
|
|
#define KVM_TRACE_MMIO_WRITE 2
|
|
|
|
|
|
|
|
#define kvm_trace_symbol_mmio \
|
|
|
|
{ KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
|
|
|
|
{ KVM_TRACE_MMIO_READ, "read" }, \
|
|
|
|
{ KVM_TRACE_MMIO_WRITE, "write" }
|
|
|
|
|
|
|
|
TRACE_EVENT(kvm_mmio,
|
|
|
|
TP_PROTO(int type, int len, u64 gpa, u64 val),
|
|
|
|
TP_ARGS(type, len, gpa, val),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field( u32, type )
|
|
|
|
__field( u32, len )
|
|
|
|
__field( u64, gpa )
|
|
|
|
__field( u64, val )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->type = type;
|
|
|
|
__entry->len = len;
|
|
|
|
__entry->gpa = gpa;
|
|
|
|
__entry->val = val;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
|
|
|
|
__print_symbolic(__entry->type, kvm_trace_symbol_mmio),
|
|
|
|
__entry->len, __entry->gpa, __entry->val)
|
|
|
|
);
|
|
|
|
|
2010-01-21 20:31:52 +07:00
|
|
|
#define kvm_fpu_load_symbol \
|
|
|
|
{0, "unload"}, \
|
|
|
|
{1, "load"}
|
|
|
|
|
|
|
|
TRACE_EVENT(kvm_fpu,
|
|
|
|
TP_PROTO(int load),
|
|
|
|
TP_ARGS(load),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field( u32, load )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->load = load;
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
|
|
|
|
);
|
|
|
|
|
2009-12-31 17:10:16 +07:00
|
|
|
TRACE_EVENT(kvm_age_page,
|
2014-09-24 02:34:54 +07:00
|
|
|
TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
|
|
|
|
TP_ARGS(gfn, level, slot, ref),
|
2009-12-31 17:10:16 +07:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field( u64, hva )
|
|
|
|
__field( u64, gfn )
|
2014-09-24 02:34:54 +07:00
|
|
|
__field( u8, level )
|
2009-12-31 17:10:16 +07:00
|
|
|
__field( u8, referenced )
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
2014-09-24 02:34:54 +07:00
|
|
|
__entry->gfn = gfn;
|
|
|
|
__entry->level = level;
|
|
|
|
__entry->hva = ((gfn - slot->base_gfn) <<
|
|
|
|
PAGE_SHIFT) + slot->userspace_addr;
|
2009-12-31 17:10:16 +07:00
|
|
|
__entry->referenced = ref;
|
|
|
|
),
|
|
|
|
|
2014-09-24 02:34:54 +07:00
|
|
|
TP_printk("hva %llx gfn %llx level %u %s",
|
|
|
|
__entry->hva, __entry->gfn, __entry->level,
|
2009-12-31 17:10:16 +07:00
|
|
|
__entry->referenced ? "YOUNG" : "OLD")
|
|
|
|
);
|
|
|
|
|
2010-10-14 16:22:46 +07:00
|
|
|
#ifdef CONFIG_KVM_ASYNC_PF
|
2010-11-01 15:59:39 +07:00
|
|
|
DECLARE_EVENT_CLASS(kvm_async_get_page_class,
|
|
|
|
|
2010-11-01 15:58:43 +07:00
|
|
|
TP_PROTO(u64 gva, u64 gfn),
|
2010-11-01 15:59:39 +07:00
|
|
|
|
2010-11-01 15:58:43 +07:00
|
|
|
TP_ARGS(gva, gfn),
|
2010-10-14 16:22:46 +07:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2010-11-01 15:59:39 +07:00
|
|
|
__field(__u64, gva)
|
2010-11-01 15:58:43 +07:00
|
|
|
__field(u64, gfn)
|
2010-11-01 15:59:39 +07:00
|
|
|
),
|
2010-10-14 16:22:46 +07:00
|
|
|
|
|
|
|
TP_fast_assign(
|
2010-11-01 15:58:43 +07:00
|
|
|
__entry->gva = gva;
|
|
|
|
__entry->gfn = gfn;
|
2010-11-01 15:59:39 +07:00
|
|
|
),
|
2010-10-14 16:22:46 +07:00
|
|
|
|
2010-11-01 15:58:43 +07:00
|
|
|
TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
|
2010-10-14 16:22:46 +07:00
|
|
|
);
|
|
|
|
|
2010-11-01 15:59:39 +07:00
|
|
|
DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
|
|
|
|
|
|
|
|
TP_PROTO(u64 gva, u64 gfn),
|
|
|
|
|
|
|
|
TP_ARGS(gva, gfn)
|
|
|
|
);
|
|
|
|
|
|
|
|
DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
|
|
|
|
|
|
|
|
TP_PROTO(u64 gva, u64 gfn),
|
|
|
|
|
|
|
|
TP_ARGS(gva, gfn)
|
|
|
|
);
|
|
|
|
|
|
|
|
DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
|
|
|
|
|
2010-10-14 16:22:53 +07:00
|
|
|
TP_PROTO(u64 token, u64 gva),
|
2010-11-01 15:59:39 +07:00
|
|
|
|
2010-10-14 16:22:53 +07:00
|
|
|
TP_ARGS(token, gva),
|
2010-10-14 16:22:46 +07:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
2010-10-14 16:22:53 +07:00
|
|
|
__field(__u64, token)
|
2010-10-14 16:22:46 +07:00
|
|
|
__field(__u64, gva)
|
2010-11-01 15:59:39 +07:00
|
|
|
),
|
2010-10-14 16:22:46 +07:00
|
|
|
|
|
|
|
TP_fast_assign(
|
2010-10-14 16:22:53 +07:00
|
|
|
__entry->token = token;
|
2010-10-14 16:22:46 +07:00
|
|
|
__entry->gva = gva;
|
2010-11-01 15:59:39 +07:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
|
2010-10-14 16:22:46 +07:00
|
|
|
|
|
|
|
);
|
|
|
|
|
2010-11-01 15:59:39 +07:00
|
|
|
DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
|
|
|
|
|
2010-10-14 16:22:53 +07:00
|
|
|
TP_PROTO(u64 token, u64 gva),
|
2010-10-14 16:22:46 +07:00
|
|
|
|
2010-11-01 15:59:39 +07:00
|
|
|
TP_ARGS(token, gva)
|
|
|
|
);
|
2010-10-14 16:22:46 +07:00
|
|
|
|
2010-11-01 15:59:39 +07:00
|
|
|
DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
|
|
|
|
|
|
|
|
TP_PROTO(u64 token, u64 gva),
|
2010-10-14 16:22:46 +07:00
|
|
|
|
2010-11-01 15:59:39 +07:00
|
|
|
TP_ARGS(token, gva)
|
2010-10-14 16:22:46 +07:00
|
|
|
);
|
|
|
|
|
|
|
|
TRACE_EVENT(
|
|
|
|
kvm_async_pf_completed,
|
2013-10-14 21:22:33 +07:00
|
|
|
TP_PROTO(unsigned long address, u64 gva),
|
|
|
|
TP_ARGS(address, gva),
|
2010-10-14 16:22:46 +07:00
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(unsigned long, address)
|
|
|
|
__field(u64, gva)
|
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->address = address;
|
|
|
|
__entry->gva = gva;
|
|
|
|
),
|
|
|
|
|
2013-10-14 21:22:33 +07:00
|
|
|
TP_printk("gva %#llx address %#lx", __entry->gva,
|
|
|
|
__entry->address)
|
2010-10-14 16:22:46 +07:00
|
|
|
);
|
|
|
|
|
2015-09-08 16:14:13 +07:00
|
|
|
#endif
|
|
|
|
|
2015-09-03 21:07:39 +07:00
|
|
|
TRACE_EVENT(kvm_halt_poll_ns,
|
2016-02-09 19:47:55 +07:00
|
|
|
TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new,
|
|
|
|
unsigned int old),
|
2015-09-03 21:07:39 +07:00
|
|
|
TP_ARGS(grow, vcpu_id, new, old),
|
|
|
|
|
|
|
|
TP_STRUCT__entry(
|
|
|
|
__field(bool, grow)
|
|
|
|
__field(unsigned int, vcpu_id)
|
2016-02-09 19:47:55 +07:00
|
|
|
__field(unsigned int, new)
|
|
|
|
__field(unsigned int, old)
|
2015-09-03 21:07:39 +07:00
|
|
|
),
|
|
|
|
|
|
|
|
TP_fast_assign(
|
|
|
|
__entry->grow = grow;
|
|
|
|
__entry->vcpu_id = vcpu_id;
|
|
|
|
__entry->new = new;
|
|
|
|
__entry->old = old;
|
|
|
|
),
|
|
|
|
|
2016-02-09 19:47:55 +07:00
|
|
|
TP_printk("vcpu %u: halt_poll_ns %u (%s %u)",
|
2015-09-03 21:07:39 +07:00
|
|
|
__entry->vcpu_id,
|
|
|
|
__entry->new,
|
|
|
|
__entry->grow ? "grow" : "shrink",
|
|
|
|
__entry->old)
|
|
|
|
);
|
|
|
|
|
|
|
|
#define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
|
|
|
|
trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
|
|
|
|
#define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
|
|
|
|
trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
|
|
|
|
|
2009-06-17 19:22:14 +07:00
|
|
|
#endif /* _TRACE_KVM_MAIN_H */
|
|
|
|
|
|
|
|
/* This part must be outside protection */
|
|
|
|
#include <trace/define_trace.h>
|