2017-12-05 19:34:47 +07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef _ASM_X86_INVPCID
|
|
|
|
#define _ASM_X86_INVPCID
|
|
|
|
|
|
|
|
static inline void __invpcid(unsigned long pcid, unsigned long addr,
|
|
|
|
unsigned long type)
|
|
|
|
{
|
|
|
|
struct { u64 d[2]; } desc = { { pcid, addr } };
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The memory clobber is because the whole point is to invalidate
|
|
|
|
* stale TLB entries and, especially if we're flushing global
|
|
|
|
* mappings, we don't want the compiler to reorder any subsequent
|
|
|
|
* memory accesses before the TLB flush.
|
|
|
|
*/
|
2020-05-08 16:22:47 +07:00
|
|
|
asm volatile("invpcid %[desc], %[type]"
|
|
|
|
:: [desc] "m" (desc), [type] "r" (type) : "memory");
|
2017-12-05 19:34:47 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
#define INVPCID_TYPE_INDIV_ADDR 0
|
|
|
|
#define INVPCID_TYPE_SINGLE_CTXT 1
|
|
|
|
#define INVPCID_TYPE_ALL_INCL_GLOBAL 2
|
|
|
|
#define INVPCID_TYPE_ALL_NON_GLOBAL 3
|
|
|
|
|
|
|
|
/* Flush all mappings for a given pcid and addr, not including globals. */
|
|
|
|
static inline void invpcid_flush_one(unsigned long pcid,
|
|
|
|
unsigned long addr)
|
|
|
|
{
|
|
|
|
__invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Flush all mappings for a given PCID, not including globals. */
|
|
|
|
static inline void invpcid_flush_single_context(unsigned long pcid)
|
|
|
|
{
|
|
|
|
__invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Flush all mappings, including globals, for all PCIDs. */
|
|
|
|
static inline void invpcid_flush_all(void)
|
|
|
|
{
|
|
|
|
__invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Flush all mappings for all PCIDs except globals. */
|
|
|
|
static inline void invpcid_flush_all_nonglobals(void)
|
|
|
|
{
|
|
|
|
__invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* _ASM_X86_INVPCID */
|