linux_dsm_epyc7002/arch/powerpc/platforms/powernv/opal-tracepoints.c
Masahiro Yamada e9666d10a5 jump_label: move 'asm goto' support test to Kconfig
Currently, CONFIG_JUMP_LABEL just means "I _want_ to use jump label".

The jump label is controlled by HAVE_JUMP_LABEL, which is defined
like this:

  #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
  # define HAVE_JUMP_LABEL
  #endif

We can improve this by testing 'asm goto' support in Kconfig, then
make JUMP_LABEL depend on CC_HAS_ASM_GOTO.

Ugly #ifdef HAVE_JUMP_LABEL will go away, and CONFIG_JUMP_LABEL will
match to the real kernel capability.

Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc)
Tested-by: Sedat Dilek <sedat.dilek@gmail.com>
2019-01-06 09:46:51 +09:00

89 lines
1.6 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include <linux/percpu.h>
#include <linux/jump_label.h>
#include <asm/trace.h>
#include <asm/asm-prototypes.h>
#ifdef CONFIG_JUMP_LABEL
struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
int opal_tracepoint_regfunc(void)
{
static_key_slow_inc(&opal_tracepoint_key);
return 0;
}
void opal_tracepoint_unregfunc(void)
{
static_key_slow_dec(&opal_tracepoint_key);
}
#else
/*
* We optimise OPAL calls by placing opal_tracepoint_refcount
* directly in the TOC so we can check if the opal tracepoints are
* enabled via a single load.
*/
/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
extern long opal_tracepoint_refcount;
int opal_tracepoint_regfunc(void)
{
opal_tracepoint_refcount++;
return 0;
}
void opal_tracepoint_unregfunc(void)
{
opal_tracepoint_refcount--;
}
#endif
/*
* Since the tracing code might execute OPAL calls we need to guard against
* recursion.
*/
static DEFINE_PER_CPU(unsigned int, opal_trace_depth);
void __trace_opal_entry(unsigned long opcode, unsigned long *args)
{
unsigned long flags;
unsigned int *depth;
local_irq_save(flags);
depth = this_cpu_ptr(&opal_trace_depth);
if (*depth)
goto out;
(*depth)++;
preempt_disable();
trace_opal_entry(opcode, args);
(*depth)--;
out:
local_irq_restore(flags);
}
void __trace_opal_exit(long opcode, unsigned long retval)
{
unsigned long flags;
unsigned int *depth;
local_irq_save(flags);
depth = this_cpu_ptr(&opal_trace_depth);
if (*depth)
goto out;
(*depth)++;
trace_opal_exit(opcode, retval);
preempt_enable();
(*depth)--;
out:
local_irq_restore(flags);
}