2010-08-30 17:24:10 +07:00
|
|
|
/*
|
|
|
|
* mmu_audit.c:
|
|
|
|
*
|
|
|
|
* Audit code for KVM MMU
|
|
|
|
*
|
|
|
|
* Copyright (C) 2006 Qumranet, Inc.
|
2010-10-06 19:23:22 +07:00
|
|
|
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
|
2010-08-30 17:24:10 +07:00
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Yaniv Kamay <yaniv@qumranet.com>
|
|
|
|
* Avi Kivity <avi@qumranet.com>
|
|
|
|
* Marcelo Tosatti <mtosatti@redhat.com>
|
|
|
|
* Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
|
|
* the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2010-08-30 17:26:33 +07:00
|
|
|
#include <linux/ratelimit.h>
|
|
|
|
|
2011-11-30 16:43:24 +07:00
|
|
|
char const *audit_point_name[] = {
|
|
|
|
"pre page fault",
|
|
|
|
"post page fault",
|
|
|
|
"pre pte write",
|
|
|
|
"post pte write",
|
|
|
|
"pre sync",
|
|
|
|
"post sync"
|
|
|
|
};
|
|
|
|
|
2010-12-23 15:08:35 +07:00
|
|
|
#define audit_printk(kvm, fmt, args...) \
|
2010-09-27 17:07:59 +07:00
|
|
|
printk(KERN_ERR "audit: (%s) error: " \
|
2010-12-23 15:08:35 +07:00
|
|
|
fmt, audit_point_name[kvm->arch.audit_point], ##args)
|
2010-08-30 17:24:10 +07:00
|
|
|
|
2010-08-30 17:25:51 +07:00
|
|
|
typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
|
2010-08-30 17:24:10 +07:00
|
|
|
|
2010-08-30 17:25:51 +07:00
|
|
|
static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
|
|
|
inspect_spte_fn fn, int level)
|
2010-08-30 17:24:10 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
|
2010-08-30 17:25:51 +07:00
|
|
|
u64 *ent = sp->spt;
|
|
|
|
|
|
|
|
fn(vcpu, ent + i, level);
|
|
|
|
|
|
|
|
if (is_shadow_present_pte(ent[i]) &&
|
|
|
|
!is_last_spte(ent[i], level)) {
|
|
|
|
struct kvm_mmu_page *child;
|
|
|
|
|
|
|
|
child = page_header(ent[i] & PT64_BASE_ADDR_MASK);
|
|
|
|
__mmu_spte_walk(vcpu, child, fn, level - 1);
|
2010-08-30 17:24:10 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct kvm_mmu_page *sp;
|
|
|
|
|
|
|
|
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
|
|
|
|
return;
|
2010-08-30 17:25:51 +07:00
|
|
|
|
2017-08-24 19:27:55 +07:00
|
|
|
if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) {
|
2010-08-30 17:24:10 +07:00
|
|
|
hpa_t root = vcpu->arch.mmu.root_hpa;
|
2010-08-30 17:25:51 +07:00
|
|
|
|
2010-08-30 17:24:10 +07:00
|
|
|
sp = page_header(root);
|
2017-08-24 19:27:55 +07:00
|
|
|
__mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu.root_level);
|
2010-08-30 17:24:10 +07:00
|
|
|
return;
|
|
|
|
}
|
2010-08-30 17:25:51 +07:00
|
|
|
|
2010-08-30 17:24:10 +07:00
|
|
|
for (i = 0; i < 4; ++i) {
|
|
|
|
hpa_t root = vcpu->arch.mmu.pae_root[i];
|
|
|
|
|
|
|
|
if (root && VALID_PAGE(root)) {
|
|
|
|
root &= PT64_BASE_ADDR_MASK;
|
|
|
|
sp = page_header(root);
|
2010-08-30 17:25:51 +07:00
|
|
|
__mmu_spte_walk(vcpu, sp, fn, 2);
|
2010-08-30 17:24:10 +07:00
|
|
|
}
|
|
|
|
}
|
2010-08-30 17:25:51 +07:00
|
|
|
|
2010-08-30 17:24:10 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-08-30 17:25:03 +07:00
|
|
|
typedef void (*sp_handler) (struct kvm *kvm, struct kvm_mmu_page *sp);
|
|
|
|
|
|
|
|
static void walk_all_active_sps(struct kvm *kvm, sp_handler fn)
|
|
|
|
{
|
|
|
|
struct kvm_mmu_page *sp;
|
|
|
|
|
|
|
|
list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link)
|
|
|
|
fn(kvm, sp);
|
|
|
|
}
|
|
|
|
|
2010-08-30 17:25:51 +07:00
|
|
|
static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
|
2010-08-30 17:24:10 +07:00
|
|
|
{
|
2010-08-30 17:25:51 +07:00
|
|
|
struct kvm_mmu_page *sp;
|
|
|
|
gfn_t gfn;
|
kvm: rename pfn_t to kvm_pfn_t
To date, we have implemented two I/O usage models for persistent memory,
PMEM (a persistent "ram disk") and DAX (mmap persistent memory into
userspace). This series adds a third, DAX-GUP, that allows DAX mappings
to be the target of direct-i/o. It allows userspace to coordinate
DMA/RDMA from/to persistent memory.
The implementation leverages the ZONE_DEVICE mm-zone that went into
4.3-rc1 (also discussed at kernel summit) to flag pages that are owned
and dynamically mapped by a device driver. The pmem driver, after
mapping a persistent memory range into the system memmap via
devm_memremap_pages(), arranges for DAX to distinguish pfn-only versus
page-backed pmem-pfns via flags in the new pfn_t type.
The DAX code, upon seeing a PFN_DEV+PFN_MAP flagged pfn, flags the
resulting pte(s) inserted into the process page tables with a new
_PAGE_DEVMAP flag. Later, when get_user_pages() is walking ptes it keys
off _PAGE_DEVMAP to pin the device hosting the page range active.
Finally, get_page() and put_page() are modified to take references
against the device driver established page mapping.
Finally, this need for "struct page" for persistent memory requires
memory capacity to store the memmap array. Given the memmap array for a
large pool of persistent may exhaust available DRAM introduce a
mechanism to allocate the memmap from persistent memory. The new
"struct vmem_altmap *" parameter to devm_memremap_pages() enables
arch_add_memory() to use reserved pmem capacity rather than the page
allocator.
This patch (of 18):
The core has developed a need for a "pfn_t" type [1]. Move the existing
pfn_t in KVM to kvm_pfn_t [2].
[1]: https://lists.01.org/pipermail/linux-nvdimm/2015-September/002199.html
[2]: https://lists.01.org/pipermail/linux-nvdimm/2015-September/002218.html
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-01-16 07:56:11 +07:00
|
|
|
kvm_pfn_t pfn;
|
2010-08-30 17:25:51 +07:00
|
|
|
hpa_t hpa;
|
2010-08-30 17:24:10 +07:00
|
|
|
|
2010-08-30 17:25:51 +07:00
|
|
|
sp = page_header(__pa(sptep));
|
|
|
|
|
|
|
|
if (sp->unsync) {
|
|
|
|
if (level != PT_PAGE_TABLE_LEVEL) {
|
2010-12-23 15:08:35 +07:00
|
|
|
audit_printk(vcpu->kvm, "unsync sp: %p "
|
|
|
|
"level = %d\n", sp, level);
|
2010-08-30 17:24:10 +07:00
|
|
|
return;
|
|
|
|
}
|
2010-08-30 17:25:51 +07:00
|
|
|
}
|
2010-08-30 17:24:10 +07:00
|
|
|
|
2010-08-30 17:25:51 +07:00
|
|
|
if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level))
|
|
|
|
return;
|
2010-08-30 17:24:10 +07:00
|
|
|
|
2010-08-30 17:25:51 +07:00
|
|
|
gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
|
2015-04-08 20:39:23 +07:00
|
|
|
pfn = kvm_vcpu_gfn_to_pfn_atomic(vcpu, gfn);
|
2010-08-30 17:24:10 +07:00
|
|
|
|
2012-08-03 14:42:10 +07:00
|
|
|
if (is_error_pfn(pfn))
|
2010-08-30 17:25:51 +07:00
|
|
|
return;
|
2010-08-30 17:24:10 +07:00
|
|
|
|
2010-08-30 17:25:51 +07:00
|
|
|
hpa = pfn << PAGE_SHIFT;
|
|
|
|
if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
|
2010-12-23 15:08:35 +07:00
|
|
|
audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
|
|
|
|
"ent %llxn", vcpu->arch.mmu.root_level, pfn,
|
|
|
|
hpa, *sptep);
|
2010-08-30 17:24:10 +07:00
|
|
|
}
|
|
|
|
|
2010-08-30 17:25:51 +07:00
|
|
|
static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
|
2010-08-30 17:24:10 +07:00
|
|
|
{
|
2011-09-12 16:26:22 +07:00
|
|
|
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
|
2015-11-20 15:41:28 +07:00
|
|
|
struct kvm_rmap_head *rmap_head;
|
2010-08-30 17:24:10 +07:00
|
|
|
struct kvm_mmu_page *rev_sp;
|
2015-05-18 20:03:39 +07:00
|
|
|
struct kvm_memslots *slots;
|
|
|
|
struct kvm_memory_slot *slot;
|
2010-08-30 17:24:10 +07:00
|
|
|
gfn_t gfn;
|
|
|
|
|
|
|
|
rev_sp = page_header(__pa(sptep));
|
|
|
|
gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
|
|
|
|
|
2015-05-18 20:03:39 +07:00
|
|
|
slots = kvm_memslots_for_spte_role(kvm, rev_sp->role);
|
|
|
|
slot = __gfn_to_memslot(slots, gfn);
|
|
|
|
if (!slot) {
|
2011-09-12 16:26:22 +07:00
|
|
|
if (!__ratelimit(&ratelimit_state))
|
2010-08-30 17:24:10 +07:00
|
|
|
return;
|
2010-12-23 15:08:35 +07:00
|
|
|
audit_printk(kvm, "no memslot for gfn %llx\n", gfn);
|
|
|
|
audit_printk(kvm, "index %ld of sp (gfn=%llx)\n",
|
2010-09-27 17:07:59 +07:00
|
|
|
(long int)(sptep - rev_sp->spt), rev_sp->gfn);
|
2010-08-30 17:24:10 +07:00
|
|
|
dump_stack();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-11-20 15:41:28 +07:00
|
|
|
rmap_head = __gfn_to_rmap(gfn, rev_sp->role.level, slot);
|
|
|
|
if (!rmap_head->val) {
|
2011-09-12 16:26:22 +07:00
|
|
|
if (!__ratelimit(&ratelimit_state))
|
2010-08-30 17:24:10 +07:00
|
|
|
return;
|
2010-12-23 15:08:35 +07:00
|
|
|
audit_printk(kvm, "no rmap for writable spte %llx\n",
|
|
|
|
*sptep);
|
2010-08-30 17:24:10 +07:00
|
|
|
dump_stack();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-30 17:25:51 +07:00
|
|
|
static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level)
|
2010-08-30 17:24:10 +07:00
|
|
|
{
|
2010-08-30 17:25:51 +07:00
|
|
|
if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level))
|
|
|
|
inspect_spte_has_rmap(vcpu->kvm, sptep);
|
2010-08-30 17:24:10 +07:00
|
|
|
}
|
|
|
|
|
2010-09-27 17:09:29 +07:00
|
|
|
static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level)
|
|
|
|
{
|
|
|
|
struct kvm_mmu_page *sp = page_header(__pa(sptep));
|
|
|
|
|
2010-12-23 15:08:35 +07:00
|
|
|
if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync)
|
|
|
|
audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync "
|
|
|
|
"root.\n", sp);
|
2010-09-27 17:09:29 +07:00
|
|
|
}
|
|
|
|
|
2010-08-30 17:25:03 +07:00
|
|
|
static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
|
2010-08-30 17:24:10 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2010-08-30 17:25:03 +07:00
|
|
|
if (sp->role.level != PT_PAGE_TABLE_LEVEL)
|
|
|
|
return;
|
2010-08-30 17:24:10 +07:00
|
|
|
|
2010-08-30 17:25:03 +07:00
|
|
|
for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
|
2015-11-20 15:44:55 +07:00
|
|
|
if (!is_shadow_present_pte(sp->spt[i]))
|
2010-08-30 17:24:10 +07:00
|
|
|
continue;
|
|
|
|
|
2010-08-30 17:25:03 +07:00
|
|
|
inspect_spte_has_rmap(kvm, sp->spt + i);
|
2010-08-30 17:24:10 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-27 17:09:29 +07:00
|
|
|
static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
|
2010-08-30 17:24:10 +07:00
|
|
|
{
|
2015-11-20 15:41:28 +07:00
|
|
|
struct kvm_rmap_head *rmap_head;
|
2012-03-21 21:50:34 +07:00
|
|
|
u64 *sptep;
|
|
|
|
struct rmap_iterator iter;
|
2015-05-18 20:11:46 +07:00
|
|
|
struct kvm_memslots *slots;
|
|
|
|
struct kvm_memory_slot *slot;
|
2010-08-30 17:24:10 +07:00
|
|
|
|
2010-08-30 17:25:03 +07:00
|
|
|
if (sp->role.direct || sp->unsync || sp->role.invalid)
|
|
|
|
return;
|
2010-08-30 17:24:10 +07:00
|
|
|
|
2015-05-18 20:03:39 +07:00
|
|
|
slots = kvm_memslots_for_spte_role(kvm, sp->role);
|
2015-05-18 20:11:46 +07:00
|
|
|
slot = __gfn_to_memslot(slots, sp->gfn);
|
2015-11-20 15:41:28 +07:00
|
|
|
rmap_head = __gfn_to_rmap(sp->gfn, PT_PAGE_TABLE_LEVEL, slot);
|
2010-08-30 17:24:10 +07:00
|
|
|
|
2015-11-20 15:41:28 +07:00
|
|
|
for_each_rmap_spte(rmap_head, &iter, sptep) {
|
2012-03-21 21:50:34 +07:00
|
|
|
if (is_writable_pte(*sptep))
|
2010-12-23 15:08:35 +07:00
|
|
|
audit_printk(kvm, "shadow page has writable "
|
|
|
|
"mappings: gfn %llx role %x\n",
|
|
|
|
sp->gfn, sp->role.word);
|
2015-11-20 15:41:28 +07:00
|
|
|
}
|
2010-08-30 17:24:10 +07:00
|
|
|
}
|
|
|
|
|
2010-08-30 17:25:03 +07:00
|
|
|
static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
|
|
|
|
{
|
|
|
|
check_mappings_rmap(kvm, sp);
|
|
|
|
audit_write_protection(kvm, sp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void audit_all_active_sps(struct kvm *kvm)
|
|
|
|
{
|
|
|
|
walk_all_active_sps(kvm, audit_sp);
|
|
|
|
}
|
|
|
|
|
2010-08-30 17:25:51 +07:00
|
|
|
static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level)
|
|
|
|
{
|
|
|
|
audit_sptes_have_rmaps(vcpu, sptep, level);
|
|
|
|
audit_mappings(vcpu, sptep, level);
|
2010-09-27 17:09:29 +07:00
|
|
|
audit_spte_after_sync(vcpu, sptep, level);
|
2010-08-30 17:25:51 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
mmu_spte_walk(vcpu, audit_spte);
|
|
|
|
}
|
|
|
|
|
2011-11-28 19:41:00 +07:00
|
|
|
static bool mmu_audit;
|
2012-02-24 14:31:31 +07:00
|
|
|
static struct static_key mmu_audit_key;
|
2011-11-28 19:41:00 +07:00
|
|
|
|
2011-11-30 16:43:24 +07:00
|
|
|
static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
|
2010-08-30 17:24:10 +07:00
|
|
|
{
|
2010-08-30 17:26:33 +07:00
|
|
|
static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
|
|
|
|
|
2011-11-30 16:43:24 +07:00
|
|
|
if (!__ratelimit(&ratelimit_state))
|
|
|
|
return;
|
2010-08-30 17:26:33 +07:00
|
|
|
|
2011-11-30 16:43:24 +07:00
|
|
|
vcpu->kvm->arch.audit_point = point;
|
|
|
|
audit_all_active_sps(vcpu->kvm);
|
|
|
|
audit_vcpu_spte(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
|
|
|
|
{
|
2012-02-24 14:31:31 +07:00
|
|
|
if (static_key_false((&mmu_audit_key)))
|
2011-11-30 16:43:24 +07:00
|
|
|
__kvm_mmu_audit(vcpu, point);
|
2010-08-30 17:24:10 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mmu_audit_enable(void)
|
|
|
|
{
|
|
|
|
if (mmu_audit)
|
|
|
|
return;
|
|
|
|
|
2012-02-24 14:31:31 +07:00
|
|
|
static_key_slow_inc(&mmu_audit_key);
|
2010-08-30 17:24:10 +07:00
|
|
|
mmu_audit = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mmu_audit_disable(void)
|
|
|
|
{
|
|
|
|
if (!mmu_audit)
|
|
|
|
return;
|
|
|
|
|
2012-02-24 14:31:31 +07:00
|
|
|
static_key_slow_dec(&mmu_audit_key);
|
2010-08-30 17:24:10 +07:00
|
|
|
mmu_audit = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mmu_audit_set(const char *val, const struct kernel_param *kp)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
unsigned long enable;
|
|
|
|
|
2014-08-09 04:24:03 +07:00
|
|
|
ret = kstrtoul(val, 10, &enable);
|
2010-08-30 17:24:10 +07:00
|
|
|
if (ret < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
switch (enable) {
|
|
|
|
case 0:
|
|
|
|
mmu_audit_disable();
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
mmu_audit_enable();
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-27 08:39:38 +07:00
|
|
|
static const struct kernel_param_ops audit_param_ops = {
|
2010-08-30 17:24:10 +07:00
|
|
|
.set = mmu_audit_set,
|
|
|
|
.get = param_get_bool,
|
|
|
|
};
|
|
|
|
|
2013-11-20 03:22:47 +07:00
|
|
|
arch_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644);
|