mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-16 01:36:42 +07:00
d7cceda96b
Today we have: config PPC_BOOK3S_32 bool "512x/52xx/6xx/7xx/74xx/82xx/83xx/86xx" [depends on PPC32 within a choice] config PPC_BOOK3S def_bool y depends on PPC_BOOK3S_32 || PPC_BOOK3S_64 config 6xx def_bool y depends on PPC32 && PPC_BOOK3S 6xx is therefore redundant with PPC_BOOK3S_32. In order to make the code clearer, lets use preferably PPC_BOOK3S_32. This will allow to remove CONFIG_6xx in a later patch. Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
103 lines
2.4 KiB
C
103 lines
2.4 KiB
C
/*
|
|
* arch/powerpc/kernel/pmc.c
|
|
*
|
|
* Copyright (C) 2004 David Gibson, IBM Corporation.
|
|
* Includes code formerly from arch/ppc/kernel/perfmon.c:
|
|
* Author: Andy Fleming
|
|
* Copyright (c) 2004 Freescale Semiconductor, Inc
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/errno.h>
|
|
#include <linux/bug.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/export.h>
|
|
|
|
#include <asm/processor.h>
|
|
#include <asm/cputable.h>
|
|
#include <asm/pmc.h>
|
|
|
|
#ifndef MMCR0_PMAO
|
|
#define MMCR0_PMAO 0
|
|
#endif
|
|
|
|
static void dummy_perf(struct pt_regs *regs)
|
|
{
|
|
#if defined(CONFIG_FSL_EMB_PERFMON)
|
|
mtpmr(PMRN_PMGC0, mfpmr(PMRN_PMGC0) & ~PMGC0_PMIE);
|
|
#elif defined(CONFIG_PPC64) || defined(CONFIG_PPC_BOOK3S_32)
|
|
if (cur_cpu_spec->pmc_type == PPC_PMC_IBM)
|
|
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~(MMCR0_PMXE|MMCR0_PMAO));
|
|
#else
|
|
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_PMXE);
|
|
#endif
|
|
}
|
|
|
|
|
|
static DEFINE_RAW_SPINLOCK(pmc_owner_lock);
|
|
static void *pmc_owner_caller; /* mostly for debugging */
|
|
perf_irq_t perf_irq = dummy_perf;
|
|
|
|
int reserve_pmc_hardware(perf_irq_t new_perf_irq)
|
|
{
|
|
int err = 0;
|
|
|
|
raw_spin_lock(&pmc_owner_lock);
|
|
|
|
if (pmc_owner_caller) {
|
|
printk(KERN_WARNING "reserve_pmc_hardware: "
|
|
"PMC hardware busy (reserved by caller %p)\n",
|
|
pmc_owner_caller);
|
|
err = -EBUSY;
|
|
goto out;
|
|
}
|
|
|
|
pmc_owner_caller = __builtin_return_address(0);
|
|
perf_irq = new_perf_irq ? new_perf_irq : dummy_perf;
|
|
|
|
out:
|
|
raw_spin_unlock(&pmc_owner_lock);
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(reserve_pmc_hardware);
|
|
|
|
void release_pmc_hardware(void)
|
|
{
|
|
raw_spin_lock(&pmc_owner_lock);
|
|
|
|
WARN_ON(! pmc_owner_caller);
|
|
|
|
pmc_owner_caller = NULL;
|
|
perf_irq = dummy_perf;
|
|
|
|
raw_spin_unlock(&pmc_owner_lock);
|
|
}
|
|
EXPORT_SYMBOL_GPL(release_pmc_hardware);
|
|
|
|
#ifdef CONFIG_PPC64
|
|
void power4_enable_pmcs(void)
|
|
{
|
|
unsigned long hid0;
|
|
|
|
hid0 = mfspr(SPRN_HID0);
|
|
hid0 |= 1UL << (63 - 20);
|
|
|
|
/* POWER4 requires the following sequence */
|
|
asm volatile(
|
|
"sync\n"
|
|
"mtspr %1, %0\n"
|
|
"mfspr %0, %1\n"
|
|
"mfspr %0, %1\n"
|
|
"mfspr %0, %1\n"
|
|
"mfspr %0, %1\n"
|
|
"mfspr %0, %1\n"
|
|
"mfspr %0, %1\n"
|
|
"isync" : "=&r" (hid0) : "i" (SPRN_HID0), "0" (hid0):
|
|
"memory");
|
|
}
|
|
#endif /* CONFIG_PPC64 */
|