mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 16:10:56 +07:00
powerpc fixes for 4.20 #3
Two weeks worth of fixes since rc1. - I broke 16-byte alignment of the stack when we moved PPR into pt_regs. Despite being required by the ABI this broke almost nothing, we eventually hit it in code where GCC does arithmetic on the stack pointer assuming the bottom 4 bits are clear. Fix it by padding the in-kernel pt_regs by 8 bytes. - A couple of commits fixing minor bugs in the recent SLB rewrite. - A build fix related to tracepoints in KVM in some configurations. - Our old "IO workarounds" code written for Cell couldn't coexist in a kernel that runs on Power9 with the Radix MMU, fix that. - Remove the NPU DMA ops, these just printed a warning and should never have been called. - Suppress an overly chatty message triggered by CPU hotplug in some configs. - Two small selftest fixes. Thanks to: Alistair Popple, Gustavo Romero, Nicholas Piggin, Satheesh Rajendran, Scott Wood. -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJb7qyjAAoJEFHr6jzI4aWA73kP/3x+vghoHthxrazbN+9Z0bWQ +c5fHobHLuREjzLhy73lbCE9NOhZTlmdgfAB/MRWW9aVIzHViuhUjRLpLqw0+LBA mWIrVloJeMcbupE+zFnc6qh1WY/YZ8lsPZxmb5YSqDSxtcdh8JzDK+RgWn9XkiFa sjppaZoLLf/Wxz4VT4v75o8WXEFavpbEaS2PLdWhwT1//H4QpKYWY80tPCijdRhp 0susCzObBfdwxS4qlwmLBmCxbGhqLzBg1vnPPGq6GypRELIeqR+jHWOjzYmmvQRh hLffVTaHIVFgO9c4ruCFmMsJCA1hf186w/62IHXLgOfp7eQJYPQYCn7uVVTWoVDC 5hpPR71xOkovLJbipk07lshj/kVJQVapCbyGtOz8DKgQnAWcSq23HMuJBHwSEnKH xtIk6iupik+7gWjDDY0Yz0xiCmZLRS5heWNAJgXpPFNMSR47EkmU4bZpMrWwbetf CashFhbYcFzpPaP7bHgxLk79fhUitkwCFFlSQK3Hj4bog+U2KmKnKgLphn8If8jy iHuggxvxCwDvfn+d9X29VJu7Bxz4M5l0ouEp0+hWAockGCsaDxXNuzJNMk1V5GyO Ytg6UtGBxLwKBKLqNeXmuY0/CD1lALU5mD/KSIKDjm7skM7T0U3s8T+7/GpFqQ7J uQ9aA49aWy4kUy+xCay5 =fEqA -----END PGP SIGNATURE----- Merge tag 'powerpc-4.20-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc fixes from Michael Ellerman: "Two weeks worth of fixes since rc1. - I broke 16-byte alignment of the stack when we moved PPR into pt_regs. Despite being required by the ABI this broke almost nothing, we eventually hit it in code where GCC does arithmetic on the stack pointer assuming the bottom 4 bits are clear. Fix it by padding the in-kernel pt_regs by 8 bytes. - A couple of commits fixing minor bugs in the recent SLB rewrite. - A build fix related to tracepoints in KVM in some configurations. - Our old "IO workarounds" code written for Cell couldn't coexist in a kernel that runs on Power9 with the Radix MMU, fix that. - Remove the NPU DMA ops, these just printed a warning and should never have been called. - Suppress an overly chatty message triggered by CPU hotplug in some configs. - Two small selftest fixes. Thanks to: Alistair Popple, Gustavo Romero, Nicholas Piggin, Satheesh Rajendran, Scott Wood" * tag 'powerpc-4.20-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: selftests/powerpc: Adjust wild_bctr to build with old binutils powerpc/64: Fix kernel stack 16-byte alignment powerpc/numa: Suppress "VPHN is not supported" messages selftests/powerpc: Fix wild_bctr test to work on ppc64 powerpc/io: Fix the IO workarounds code to work with Radix powerpc/mm/64s: Fix preempt warning in slb_allocate_kernel() KVM: PPC: Move and undef TRACE_INCLUDE_PATH/FILE powerpc/mm/64s: Only use slbfee on CPUs that support it powerpc/mm/64s: Use PPC_SLBFEE macro powerpc/mm/64s: Consolidate SLB assertions powerpc/powernv/npu: Remove NPU DMA ops
This commit is contained in:
commit
ef268de197
@ -268,19 +268,13 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
|
||||
* their hooks, a bitfield is reserved for use by the platform near the
|
||||
* top of MMIO addresses (not PIO, those have to cope the hard way).
|
||||
*
|
||||
* This bit field is 12 bits and is at the top of the IO virtual
|
||||
* addresses PCI_IO_INDIRECT_TOKEN_MASK.
|
||||
* The highest address in the kernel virtual space are:
|
||||
*
|
||||
* The kernel virtual space is thus:
|
||||
* d0003fffffffffff # with Hash MMU
|
||||
* c00fffffffffffff # with Radix MMU
|
||||
*
|
||||
* 0xD000000000000000 : vmalloc
|
||||
* 0xD000080000000000 : PCI PHB IO space
|
||||
* 0xD000080080000000 : ioremap
|
||||
* 0xD0000fffffffffff : end of ioremap region
|
||||
*
|
||||
* Since the top 4 bits are reserved as the region ID, we use thus
|
||||
* the next 12 bits and keep 4 bits available for the future if the
|
||||
* virtual address space is ever to be extended.
|
||||
* The top 4 bits are reserved as the region ID on hash, leaving us 8 bits
|
||||
* that can be used for the field.
|
||||
*
|
||||
* The direct IO mapping operations will then mask off those bits
|
||||
* before doing the actual access, though that only happen when
|
||||
@ -292,8 +286,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_PPC_INDIRECT_MMIO
|
||||
#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul
|
||||
#define PCI_IO_IND_TOKEN_SHIFT 48
|
||||
#define PCI_IO_IND_TOKEN_SHIFT 52
|
||||
#define PCI_IO_IND_TOKEN_MASK (0xfful << PCI_IO_IND_TOKEN_SHIFT)
|
||||
#define PCI_FIX_ADDR(addr) \
|
||||
((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK))
|
||||
#define PCI_GET_ADDR_TOKEN(addr) \
|
||||
|
@ -493,6 +493,8 @@
|
||||
__PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
|
||||
#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
|
||||
__PPC_RT(t) | __PPC_RB(b))
|
||||
#define __PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
|
||||
___PPC_RT(t) | ___PPC_RB(b))
|
||||
#define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \
|
||||
__PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b))
|
||||
/* PASemi instructions */
|
||||
|
@ -54,6 +54,7 @@ struct pt_regs
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
unsigned long ppr;
|
||||
unsigned long __pad; /* Maintain 16 byte interrupt stack alignment */
|
||||
#endif
|
||||
};
|
||||
#endif
|
||||
|
@ -636,6 +636,8 @@ static void *__init alloc_stack(unsigned long limit, int cpu)
|
||||
{
|
||||
unsigned long pa;
|
||||
|
||||
BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
|
||||
|
||||
pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit,
|
||||
early_cpu_to_node(cpu), MEMBLOCK_NONE);
|
||||
if (!pa) {
|
||||
|
@ -6,8 +6,6 @@
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kvm
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
|
||||
/*
|
||||
* Tracepoint for guest mode entry.
|
||||
@ -120,4 +118,10 @@ TRACE_EVENT(kvm_check_requests,
|
||||
#endif /* _TRACE_KVM_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
|
||||
#include <trace/define_trace.h>
|
||||
|
@ -6,8 +6,6 @@
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kvm_booke
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE trace_booke
|
||||
|
||||
#define kvm_trace_symbol_exit \
|
||||
{0, "CRITICAL"}, \
|
||||
@ -218,4 +216,11 @@ TRACE_EVENT(kvm_booke_queue_irqprio,
|
||||
#endif
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE trace_booke
|
||||
|
||||
#include <trace/define_trace.h>
|
||||
|
@ -9,8 +9,6 @@
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kvm_hv
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE trace_hv
|
||||
|
||||
#define kvm_trace_symbol_hcall \
|
||||
{H_REMOVE, "H_REMOVE"}, \
|
||||
@ -497,4 +495,11 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
|
||||
#endif /* _TRACE_KVM_HV_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE trace_hv
|
||||
|
||||
#include <trace/define_trace.h>
|
||||
|
@ -8,8 +8,6 @@
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kvm_pr
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE trace_pr
|
||||
|
||||
TRACE_EVENT(kvm_book3s_reenter,
|
||||
TP_PROTO(int r, struct kvm_vcpu *vcpu),
|
||||
@ -257,4 +255,11 @@ TRACE_EVENT(kvm_exit,
|
||||
#endif /* _TRACE_KVM_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE trace_pr
|
||||
|
||||
#include <trace/define_trace.h>
|
||||
|
@ -1178,7 +1178,7 @@ static long vphn_get_associativity(unsigned long cpu,
|
||||
|
||||
switch (rc) {
|
||||
case H_FUNCTION:
|
||||
printk(KERN_INFO
|
||||
printk_once(KERN_INFO
|
||||
"VPHN is not supported. Disabling polling...\n");
|
||||
stop_topology_update();
|
||||
break;
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/paca.h>
|
||||
#include <asm/ppc-opcode.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/smp.h>
|
||||
@ -58,27 +59,19 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
|
||||
return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
|
||||
}
|
||||
|
||||
static void assert_slb_exists(unsigned long ea)
|
||||
static void assert_slb_presence(bool present, unsigned long ea)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
unsigned long tmp;
|
||||
|
||||
WARN_ON_ONCE(mfmsr() & MSR_EE);
|
||||
|
||||
asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0");
|
||||
WARN_ON(tmp == 0);
|
||||
#endif
|
||||
}
|
||||
if (!cpu_has_feature(CPU_FTR_ARCH_206))
|
||||
return;
|
||||
|
||||
static void assert_slb_notexists(unsigned long ea)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
unsigned long tmp;
|
||||
asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
|
||||
|
||||
WARN_ON_ONCE(mfmsr() & MSR_EE);
|
||||
|
||||
asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0");
|
||||
WARN_ON(tmp != 0);
|
||||
WARN_ON(present == (tmp == 0));
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -114,7 +107,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
|
||||
*/
|
||||
slb_shadow_update(ea, ssize, flags, index);
|
||||
|
||||
assert_slb_notexists(ea);
|
||||
assert_slb_presence(false, ea);
|
||||
asm volatile("slbmte %0,%1" :
|
||||
: "r" (mk_vsid_data(ea, ssize, flags)),
|
||||
"r" (mk_esid_data(ea, ssize, index))
|
||||
@ -137,7 +130,7 @@ void __slb_restore_bolted_realmode(void)
|
||||
"r" (be64_to_cpu(p->save_area[index].esid)));
|
||||
}
|
||||
|
||||
assert_slb_exists(local_paca->kstack);
|
||||
assert_slb_presence(true, local_paca->kstack);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -185,7 +178,7 @@ void slb_flush_and_restore_bolted(void)
|
||||
:: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
|
||||
"r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
|
||||
: "memory");
|
||||
assert_slb_exists(get_paca()->kstack);
|
||||
assert_slb_presence(true, get_paca()->kstack);
|
||||
|
||||
get_paca()->slb_cache_ptr = 0;
|
||||
|
||||
@ -443,9 +436,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
|
||||
ea = (unsigned long)
|
||||
get_paca()->slb_cache[i] << SID_SHIFT;
|
||||
/*
|
||||
* Could assert_slb_exists here, but hypervisor
|
||||
* or machine check could have come in and
|
||||
* removed the entry at this point.
|
||||
* Could assert_slb_presence(true) here, but
|
||||
* hypervisor or machine check could have come
|
||||
* in and removed the entry at this point.
|
||||
*/
|
||||
|
||||
slbie_data = ea;
|
||||
@ -676,7 +669,7 @@ static long slb_insert_entry(unsigned long ea, unsigned long context,
|
||||
* User preloads should add isync afterwards in case the kernel
|
||||
* accesses user memory before it returns to userspace with rfid.
|
||||
*/
|
||||
assert_slb_notexists(ea);
|
||||
assert_slb_presence(false, ea);
|
||||
asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
|
||||
|
||||
barrier();
|
||||
@ -715,7 +708,7 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id)
|
||||
return -EFAULT;
|
||||
|
||||
if (ea < H_VMALLOC_END)
|
||||
flags = get_paca()->vmalloc_sllp;
|
||||
flags = local_paca->vmalloc_sllp;
|
||||
else
|
||||
flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
|
||||
} else {
|
||||
|
@ -102,63 +102,6 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
|
||||
}
|
||||
EXPORT_SYMBOL(pnv_pci_get_npu_dev);
|
||||
|
||||
#define NPU_DMA_OP_UNSUPPORTED() \
|
||||
dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
|
||||
__func__)
|
||||
|
||||
static void *dma_npu_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
unsigned long attrs)
|
||||
{
|
||||
NPU_DMA_OP_UNSUPPORTED();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void dma_npu_free(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
unsigned long attrs)
|
||||
{
|
||||
NPU_DMA_OP_UNSUPPORTED();
|
||||
}
|
||||
|
||||
static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
NPU_DMA_OP_UNSUPPORTED();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
|
||||
int nelems, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
NPU_DMA_OP_UNSUPPORTED();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dma_npu_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
NPU_DMA_OP_UNSUPPORTED();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 dma_npu_get_required_mask(struct device *dev)
|
||||
{
|
||||
NPU_DMA_OP_UNSUPPORTED();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dma_map_ops dma_npu_ops = {
|
||||
.map_page = dma_npu_map_page,
|
||||
.map_sg = dma_npu_map_sg,
|
||||
.alloc = dma_npu_alloc,
|
||||
.free = dma_npu_free,
|
||||
.dma_supported = dma_npu_dma_supported,
|
||||
.get_required_mask = dma_npu_get_required_mask,
|
||||
};
|
||||
|
||||
/*
|
||||
* Returns the PE assoicated with the PCI device of the given
|
||||
* NPU. Returns the linked pci device if pci_dev != NULL.
|
||||
@ -270,10 +213,11 @@ static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
|
||||
rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]);
|
||||
|
||||
/*
|
||||
* We don't initialise npu_pe->tce32_table as we always use
|
||||
* dma_npu_ops which are nops.
|
||||
* NVLink devices use the same TCE table configuration as
|
||||
* their parent device so drivers shouldn't be doing DMA
|
||||
* operations directly on these devices.
|
||||
*/
|
||||
set_dma_ops(&npe->pdev->dev, &dma_npu_ops);
|
||||
set_dma_ops(&npe->pdev->dev, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -47,8 +47,9 @@ static int ok(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define REG_POISON 0x5a5aUL
|
||||
#define POISONED_REG(n) ((REG_POISON << 48) | ((n) << 32) | (REG_POISON << 16) | (n))
|
||||
#define REG_POISON 0x5a5a
|
||||
#define POISONED_REG(n) ((((unsigned long)REG_POISON) << 48) | ((n) << 32) | \
|
||||
(((unsigned long)REG_POISON) << 16) | (n))
|
||||
|
||||
static inline void poison_regs(void)
|
||||
{
|
||||
@ -105,6 +106,20 @@ static void dump_regs(void)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef _CALL_AIXDESC
|
||||
struct opd {
|
||||
unsigned long ip;
|
||||
unsigned long toc;
|
||||
unsigned long env;
|
||||
};
|
||||
static struct opd bad_opd = {
|
||||
.ip = BAD_NIP,
|
||||
};
|
||||
#define BAD_FUNC (&bad_opd)
|
||||
#else
|
||||
#define BAD_FUNC BAD_NIP
|
||||
#endif
|
||||
|
||||
int test_wild_bctr(void)
|
||||
{
|
||||
int (*func_ptr)(void);
|
||||
@ -133,7 +148,7 @@ int test_wild_bctr(void)
|
||||
|
||||
poison_regs();
|
||||
|
||||
func_ptr = (int (*)(void))BAD_NIP;
|
||||
func_ptr = (int (*)(void))BAD_FUNC;
|
||||
func_ptr();
|
||||
|
||||
FAIL_IF(1); /* we didn't segv? */
|
||||
|
Loading…
Reference in New Issue
Block a user