mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 09:25:19 +07:00
7beaa24ba4
- x86: miscellaneous fixes, AVIC support (local APIC virtualization, AMD version) - s390: polling for interrupts after a VCPU goes to halted state is now enabled for s390; use hardware provided information about facility bits that do not need any hypervisor activity, and other fixes for cpu models and facilities; improve perf output; floating interrupt controller improvements. - MIPS: miscellaneous fixes - PPC: bugfixes only - ARM: 16K page size support, generic firmware probing layer for timer and GIC Christoffer Dall (KVM-ARM maintainer) says: "There are a few changes in this pull request touching things outside KVM, but they should all carry the necessary acks and it made the merge process much easier to do it this way." though actually the irqchip maintainers' acks didn't make it into the patches. Marc Zyngier, who is both irqchip and KVM-ARM maintainer, later acked at http://mid.gmane.org/573351D1.4060303@arm.com "more formally and for documentation purposes". -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJXPJjyAAoJEL/70l94x66DhioH/j4fwQ0FmfPSM9PArzaFHQdx LNE3tU4+bobbsy1BJr4DiAaOUQn3DAgwUvGLWXdeLiOXtoWXBiFHKaxlqEsCA6iQ xcTH1TgfxsVoqGQ6bT9X/2GCx70heYpcWG3f+zqBy7ZfFmQykLAC/HwOr52VQL8f hUFi3YmTHcnorp0n5Xg+9r3+RBS4D/kTbtdn6+KCLnPJ0RcgNkI3/NcafTemoofw Tkv8+YYFNvKV13qlIfVqxMa0GwWI3pP6YaNKhaS5XO8Pu16HuuF1JthJsUBDzwBa RInp8R9MoXgsBYhLpz3jc9vWG7G9yDl5LehsD9KOUGOaFYJ7sQN+QZOusa6jFgA= =llO5 -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM updates from Paolo Bonzini: "Small release overall. x86: - miscellaneous fixes - AVIC support (local APIC virtualization, AMD version) s390: - polling for interrupts after a VCPU goes to halted state is now enabled for s390 - use hardware provided information about facility bits that do not need any hypervisor activity, and other fixes for cpu models and facilities - improve perf output - floating interrupt controller improvements. MIPS: - miscellaneous fixes PPC: - bugfixes only ARM: - 16K page size support - generic firmware probing layer for timer and GIC Christoffer Dall (KVM-ARM maintainer) says: "There are a few changes in this pull request touching things outside KVM, but they should all carry the necessary acks and it made the merge process much easier to do it this way." though actually the irqchip maintainers' acks didn't make it into the patches. Marc Zyngier, who is both irqchip and KVM-ARM maintainer, later acked at http://mid.gmane.org/573351D1.4060303@arm.com ('more formally and for documentation purposes')" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (82 commits) KVM: MTRR: remove MSR 0x2f8 KVM: x86: make hwapic_isr_update and hwapic_irr_update look the same svm: Manage vcpu load/unload when enable AVIC svm: Do not intercept CR8 when enable AVIC svm: Do not expose x2APIC when enable AVIC KVM: x86: Introducing kvm_x86_ops.apicv_post_state_restore svm: Add VMEXIT handlers for AVIC svm: Add interrupt injection via AVIC KVM: x86: Detect and Initialize AVIC support svm: Introduce new AVIC VMCB registers KVM: split kvm_vcpu_wake_up from kvm_vcpu_kick KVM: x86: Introducing kvm_x86_ops VCPU blocking/unblocking hooks KVM: x86: Introducing kvm_x86_ops VM init/destroy hooks KVM: x86: Rename kvm_apic_get_reg to kvm_lapic_get_reg KVM: x86: Misc LAPIC changes to expose helper functions KVM: shrink halt polling even more for invalid wakeups KVM: s390: set halt polling to 80 microseconds KVM: halt_polling: provide a way to qualify wakeups during poll KVM: PPC: Book3S HV: Re-enable XICS fast path for irqfd-generated interrupts kvm: Conditionally register IRQ bypass consumer ...
279 lines
9.2 KiB
C
279 lines
9.2 KiB
C
/*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
#ifndef __ASM_PGTABLE_HWDEF_H
|
|
#define __ASM_PGTABLE_HWDEF_H
|
|
|
|
/*
|
|
* Number of page-table levels required to address 'va_bits' wide
|
|
* address, without section mapping. We resolve the top (va_bits - PAGE_SHIFT)
|
|
* bits with (PAGE_SHIFT - 3) bits at each page table level. Hence:
|
|
*
|
|
* levels = DIV_ROUND_UP((va_bits - PAGE_SHIFT), (PAGE_SHIFT - 3))
|
|
*
|
|
* where DIV_ROUND_UP(n, d) => (((n) + (d) - 1) / (d))
|
|
*
|
|
* We cannot include linux/kernel.h which defines DIV_ROUND_UP here
|
|
* due to build issues. So we open code DIV_ROUND_UP here:
|
|
*
|
|
* ((((va_bits) - PAGE_SHIFT) + (PAGE_SHIFT - 3) - 1) / (PAGE_SHIFT - 3))
|
|
*
|
|
* which gets simplified as :
|
|
*/
|
|
#define ARM64_HW_PGTABLE_LEVELS(va_bits) (((va_bits) - 4) / (PAGE_SHIFT - 3))
|
|
|
|
/*
|
|
* Size mapped by an entry at level n ( 0 <= n <= 3)
|
|
* We map (PAGE_SHIFT - 3) at all translation levels and PAGE_SHIFT bits
|
|
* in the final page. The maximum number of translation levels supported by
|
|
* the architecture is 4. Hence, starting at at level n, we have further
|
|
* ((4 - n) - 1) levels of translation excluding the offset within the page.
|
|
* So, the total number of bits mapped by an entry at level n is :
|
|
*
|
|
* ((4 - n) - 1) * (PAGE_SHIFT - 3) + PAGE_SHIFT
|
|
*
|
|
* Rearranging it a bit we get :
|
|
* (4 - n) * (PAGE_SHIFT - 3) + 3
|
|
*/
|
|
#define ARM64_HW_PGTABLE_LEVEL_SHIFT(n) ((PAGE_SHIFT - 3) * (4 - (n)) + 3)
|
|
|
|
#define PTRS_PER_PTE (1 << (PAGE_SHIFT - 3))
|
|
|
|
/*
|
|
* PMD_SHIFT determines the size a level 2 page table entry can map.
|
|
*/
|
|
#if CONFIG_PGTABLE_LEVELS > 2
|
|
#define PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
|
|
#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
|
|
#define PMD_MASK (~(PMD_SIZE-1))
|
|
#define PTRS_PER_PMD PTRS_PER_PTE
|
|
#endif
|
|
|
|
/*
|
|
* PUD_SHIFT determines the size a level 1 page table entry can map.
|
|
*/
|
|
#if CONFIG_PGTABLE_LEVELS > 3
|
|
#define PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
|
|
#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
|
|
#define PUD_MASK (~(PUD_SIZE-1))
|
|
#define PTRS_PER_PUD PTRS_PER_PTE
|
|
#endif
|
|
|
|
/*
|
|
* PGDIR_SHIFT determines the size a top-level page table entry can map
|
|
* (depending on the configuration, this level can be 0, 1 or 2).
|
|
*/
|
|
#define PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - CONFIG_PGTABLE_LEVELS)
|
|
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
|
|
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
|
#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
|
|
|
|
/*
|
|
* Section address mask and size definitions.
|
|
*/
|
|
#define SECTION_SHIFT PMD_SHIFT
|
|
#define SECTION_SIZE (_AC(1, UL) << SECTION_SHIFT)
|
|
#define SECTION_MASK (~(SECTION_SIZE-1))
|
|
|
|
/*
|
|
* Contiguous page definitions.
|
|
*/
|
|
#ifdef CONFIG_ARM64_64K_PAGES
|
|
#define CONT_PTE_SHIFT 5
|
|
#define CONT_PMD_SHIFT 5
|
|
#elif defined(CONFIG_ARM64_16K_PAGES)
|
|
#define CONT_PTE_SHIFT 7
|
|
#define CONT_PMD_SHIFT 5
|
|
#else
|
|
#define CONT_PTE_SHIFT 4
|
|
#define CONT_PMD_SHIFT 4
|
|
#endif
|
|
|
|
#define CONT_PTES (1 << CONT_PTE_SHIFT)
|
|
#define CONT_PTE_SIZE (CONT_PTES * PAGE_SIZE)
|
|
#define CONT_PTE_MASK (~(CONT_PTE_SIZE - 1))
|
|
#define CONT_PMDS (1 << CONT_PMD_SHIFT)
|
|
#define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE)
|
|
#define CONT_PMD_MASK (~(CONT_PMD_SIZE - 1))
|
|
/* the the numerical offset of the PTE within a range of CONT_PTES */
|
|
#define CONT_RANGE_OFFSET(addr) (((addr)>>PAGE_SHIFT)&(CONT_PTES-1))
|
|
|
|
/*
|
|
* Hardware page table definitions.
|
|
*
|
|
* Level 1 descriptor (PUD).
|
|
*/
|
|
#define PUD_TYPE_TABLE (_AT(pudval_t, 3) << 0)
|
|
#define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1)
|
|
#define PUD_TYPE_MASK (_AT(pgdval_t, 3) << 0)
|
|
#define PUD_TYPE_SECT (_AT(pgdval_t, 1) << 0)
|
|
|
|
/*
|
|
* Level 2 descriptor (PMD).
|
|
*/
|
|
#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0)
|
|
#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
|
|
#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0)
|
|
#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
|
|
#define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1)
|
|
|
|
/*
|
|
* Section
|
|
*/
|
|
#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
|
|
#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
|
|
#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
|
|
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
|
|
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
|
|
#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11)
|
|
#define PMD_SECT_CONT (_AT(pmdval_t, 1) << 52)
|
|
#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
|
|
#define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54)
|
|
|
|
/*
|
|
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
|
|
*/
|
|
#define PMD_ATTRINDX(t) (_AT(pmdval_t, (t)) << 2)
|
|
#define PMD_ATTRINDX_MASK (_AT(pmdval_t, 7) << 2)
|
|
|
|
/*
|
|
* Level 3 descriptor (PTE).
|
|
*/
|
|
#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0)
|
|
#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0)
|
|
#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0)
|
|
#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1)
|
|
#define PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
|
|
#define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
|
|
#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
|
|
#define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
|
|
#define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */
|
|
#define PTE_DBM (_AT(pteval_t, 1) << 51) /* Dirty Bit Management */
|
|
#define PTE_CONT (_AT(pteval_t, 1) << 52) /* Contiguous range */
|
|
#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
|
|
#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
|
|
|
|
/*
|
|
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
|
|
*/
|
|
#define PTE_ATTRINDX(t) (_AT(pteval_t, (t)) << 2)
|
|
#define PTE_ATTRINDX_MASK (_AT(pteval_t, 7) << 2)
|
|
|
|
/*
|
|
* 2nd stage PTE definitions
|
|
*/
|
|
#define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
|
|
#define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
|
|
|
|
#define PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[2:1] */
|
|
#define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
|
|
|
|
/*
|
|
* Memory Attribute override for Stage-2 (MemAttr[3:0])
|
|
*/
|
|
#define PTE_S2_MEMATTR(t) (_AT(pteval_t, (t)) << 2)
|
|
#define PTE_S2_MEMATTR_MASK (_AT(pteval_t, 0xf) << 2)
|
|
|
|
/*
|
|
* EL2/HYP PTE/PMD definitions
|
|
*/
|
|
#define PMD_HYP PMD_SECT_USER
|
|
#define PTE_HYP PTE_USER
|
|
|
|
/*
|
|
* Highest possible physical address supported.
|
|
*/
|
|
#define PHYS_MASK_SHIFT (48)
|
|
#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
|
|
|
|
/*
|
|
* TCR flags.
|
|
*/
|
|
#define TCR_T0SZ_OFFSET 0
|
|
#define TCR_T1SZ_OFFSET 16
|
|
#define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET)
|
|
#define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET)
|
|
#define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x))
|
|
#define TCR_TxSZ_WIDTH 6
|
|
|
|
#define TCR_IRGN0_SHIFT 8
|
|
#define TCR_IRGN0_MASK (UL(3) << TCR_IRGN0_SHIFT)
|
|
#define TCR_IRGN0_NC (UL(0) << TCR_IRGN0_SHIFT)
|
|
#define TCR_IRGN0_WBWA (UL(1) << TCR_IRGN0_SHIFT)
|
|
#define TCR_IRGN0_WT (UL(2) << TCR_IRGN0_SHIFT)
|
|
#define TCR_IRGN0_WBnWA (UL(3) << TCR_IRGN0_SHIFT)
|
|
|
|
#define TCR_IRGN1_SHIFT 24
|
|
#define TCR_IRGN1_MASK (UL(3) << TCR_IRGN1_SHIFT)
|
|
#define TCR_IRGN1_NC (UL(0) << TCR_IRGN1_SHIFT)
|
|
#define TCR_IRGN1_WBWA (UL(1) << TCR_IRGN1_SHIFT)
|
|
#define TCR_IRGN1_WT (UL(2) << TCR_IRGN1_SHIFT)
|
|
#define TCR_IRGN1_WBnWA (UL(3) << TCR_IRGN1_SHIFT)
|
|
|
|
#define TCR_IRGN_NC (TCR_IRGN0_NC | TCR_IRGN1_NC)
|
|
#define TCR_IRGN_WBWA (TCR_IRGN0_WBWA | TCR_IRGN1_WBWA)
|
|
#define TCR_IRGN_WT (TCR_IRGN0_WT | TCR_IRGN1_WT)
|
|
#define TCR_IRGN_WBnWA (TCR_IRGN0_WBnWA | TCR_IRGN1_WBnWA)
|
|
#define TCR_IRGN_MASK (TCR_IRGN0_MASK | TCR_IRGN1_MASK)
|
|
|
|
|
|
#define TCR_ORGN0_SHIFT 10
|
|
#define TCR_ORGN0_MASK (UL(3) << TCR_ORGN0_SHIFT)
|
|
#define TCR_ORGN0_NC (UL(0) << TCR_ORGN0_SHIFT)
|
|
#define TCR_ORGN0_WBWA (UL(1) << TCR_ORGN0_SHIFT)
|
|
#define TCR_ORGN0_WT (UL(2) << TCR_ORGN0_SHIFT)
|
|
#define TCR_ORGN0_WBnWA (UL(3) << TCR_ORGN0_SHIFT)
|
|
|
|
#define TCR_ORGN1_SHIFT 26
|
|
#define TCR_ORGN1_MASK (UL(3) << TCR_ORGN1_SHIFT)
|
|
#define TCR_ORGN1_NC (UL(0) << TCR_ORGN1_SHIFT)
|
|
#define TCR_ORGN1_WBWA (UL(1) << TCR_ORGN1_SHIFT)
|
|
#define TCR_ORGN1_WT (UL(2) << TCR_ORGN1_SHIFT)
|
|
#define TCR_ORGN1_WBnWA (UL(3) << TCR_ORGN1_SHIFT)
|
|
|
|
#define TCR_ORGN_NC (TCR_ORGN0_NC | TCR_ORGN1_NC)
|
|
#define TCR_ORGN_WBWA (TCR_ORGN0_WBWA | TCR_ORGN1_WBWA)
|
|
#define TCR_ORGN_WT (TCR_ORGN0_WT | TCR_ORGN1_WT)
|
|
#define TCR_ORGN_WBnWA (TCR_ORGN0_WBnWA | TCR_ORGN1_WBnWA)
|
|
#define TCR_ORGN_MASK (TCR_ORGN0_MASK | TCR_ORGN1_MASK)
|
|
|
|
#define TCR_SH0_SHIFT 12
|
|
#define TCR_SH0_MASK (UL(3) << TCR_SH0_SHIFT)
|
|
#define TCR_SH0_INNER (UL(3) << TCR_SH0_SHIFT)
|
|
|
|
#define TCR_SH1_SHIFT 28
|
|
#define TCR_SH1_MASK (UL(3) << TCR_SH1_SHIFT)
|
|
#define TCR_SH1_INNER (UL(3) << TCR_SH1_SHIFT)
|
|
#define TCR_SHARED (TCR_SH0_INNER | TCR_SH1_INNER)
|
|
|
|
#define TCR_TG0_SHIFT 14
|
|
#define TCR_TG0_MASK (UL(3) << TCR_TG0_SHIFT)
|
|
#define TCR_TG0_4K (UL(0) << TCR_TG0_SHIFT)
|
|
#define TCR_TG0_64K (UL(1) << TCR_TG0_SHIFT)
|
|
#define TCR_TG0_16K (UL(2) << TCR_TG0_SHIFT)
|
|
|
|
#define TCR_TG1_SHIFT 30
|
|
#define TCR_TG1_MASK (UL(3) << TCR_TG1_SHIFT)
|
|
#define TCR_TG1_16K (UL(1) << TCR_TG1_SHIFT)
|
|
#define TCR_TG1_4K (UL(2) << TCR_TG1_SHIFT)
|
|
#define TCR_TG1_64K (UL(3) << TCR_TG1_SHIFT)
|
|
|
|
#define TCR_ASID16 (UL(1) << 36)
|
|
#define TCR_TBI0 (UL(1) << 37)
|
|
#define TCR_HA (UL(1) << 39)
|
|
#define TCR_HD (UL(1) << 40)
|
|
|
|
#endif
|