mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-11 21:26:44 +07:00
a7fd20d1c4
Pull networking updates from David Miller: "Highlights: 1) Support SPI based w5100 devices, from Akinobu Mita. 2) Partial Segmentation Offload, from Alexander Duyck. 3) Add GMAC4 support to stmmac driver, from Alexandre TORGUE. 4) Allow cls_flower stats offload, from Amir Vadai. 5) Implement bpf blinding, from Daniel Borkmann. 6) Optimize _ASYNC_ bit twiddling on sockets, unless the socket is actually using FASYNC these atomics are superfluous. From Eric Dumazet. 7) Run TCP more preemptibly, also from Eric Dumazet. 8) Support LED blinking, EEPROM dumps, and rxvlan offloading in mlx5e driver, from Gal Pressman. 9) Allow creating ppp devices via rtnetlink, from Guillaume Nault. 10) Improve BPF usage documentation, from Jesper Dangaard Brouer. 11) Support tunneling offloads in qed, from Manish Chopra. 12) aRFS offloading in mlx5e, from Maor Gottlieb. 13) Add RFS and RPS support to SCTP protocol, from Marcelo Ricardo Leitner. 14) Add MSG_EOR support to TCP, this allows controlling packet coalescing on application record boundaries for more accurate socket timestamp sampling. From Martin KaFai Lau. 15) Fix alignment of 64-bit netlink attributes across the board, from Nicolas Dichtel. 16) Per-vlan stats in bridging, from Nikolay Aleksandrov. 17) Several conversions of drivers to ethtool ksettings, from Philippe Reynes. 18) Checksum neutral ILA in ipv6, from Tom Herbert. 19) Factorize all of the various marvell dsa drivers into one, from Vivien Didelot 20) Add VF support to qed driver, from Yuval Mintz" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1649 commits) Revert "phy dp83867: Fix compilation with CONFIG_OF_MDIO=m" Revert "phy dp83867: Make rgmii parameters optional" r8169: default to 64-bit DMA on recent PCIe chips phy dp83867: Make rgmii parameters optional phy dp83867: Fix compilation with CONFIG_OF_MDIO=m bpf: arm64: remove callee-save registers use for tmp registers asix: Fix offset calculation in asix_rx_fixup() causing slow transmissions switchdev: pass pointer to fib_info instead of copy net_sched: close another race condition in tcf_mirred_release() tipc: fix nametable publication field in nl compat drivers: net: Don't print unpopulated net_device name qed: add support for dcbx. ravb: Add missing free_irq() calls to ravb_close() qed: Remove a stray tab net: ethernet: fec-mpc52xx: use phy_ethtool_{get|set}_link_ksettings net: ethernet: fec-mpc52xx: use phydev from struct net_device bpf, doc: fix typo on bpf_asm descriptions stmmac: hardware TX COE doesn't work when force_thresh_dma_mode is set net: ethernet: fs-enet: use phy_ethtool_{get|set}_link_ksettings net: ethernet: fs-enet: use phydev from struct net_device ...
390 lines
8.9 KiB
C
390 lines
8.9 KiB
C
/*
|
|
* trace event based perf event profiling/tracing
|
|
*
|
|
* Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
|
|
* Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/kprobes.h>
|
|
#include "trace.h"
|
|
|
|
static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
|
|
|
|
/*
|
|
* Force it to be aligned to unsigned long to avoid misaligned accesses
|
|
* suprises
|
|
*/
|
|
typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
|
|
perf_trace_t;
|
|
|
|
/* Count the events in use (per event id, not per instance) */
|
|
static int total_ref_count;
|
|
|
|
static int perf_trace_event_perm(struct trace_event_call *tp_event,
|
|
struct perf_event *p_event)
|
|
{
|
|
if (tp_event->perf_perm) {
|
|
int ret = tp_event->perf_perm(tp_event, p_event);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* We checked and allowed to create parent,
|
|
* allow children without checking.
|
|
*/
|
|
if (p_event->parent)
|
|
return 0;
|
|
|
|
/*
|
|
* It's ok to check current process (owner) permissions in here,
|
|
* because code below is called only via perf_event_open syscall.
|
|
*/
|
|
|
|
/* The ftrace function trace is allowed only for root. */
|
|
if (ftrace_event_is_function(tp_event)) {
|
|
if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
|
|
return -EPERM;
|
|
|
|
if (!is_sampling_event(p_event))
|
|
return 0;
|
|
|
|
/*
|
|
* We don't allow user space callchains for function trace
|
|
* event, due to issues with page faults while tracing page
|
|
* fault handler and its overall trickiness nature.
|
|
*/
|
|
if (!p_event->attr.exclude_callchain_user)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* Same reason to disable user stack dump as for user space
|
|
* callchains above.
|
|
*/
|
|
if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
|
|
return -EINVAL;
|
|
}
|
|
|
|
/* No tracing, just counting, so no obvious leak */
|
|
if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
|
|
return 0;
|
|
|
|
/* Some events are ok to be traced by non-root users... */
|
|
if (p_event->attach_state == PERF_ATTACH_TASK) {
|
|
if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* ...otherwise raw tracepoint data can be a severe data leak,
|
|
* only allow root to have these.
|
|
*/
|
|
if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
|
|
return -EPERM;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int perf_trace_event_reg(struct trace_event_call *tp_event,
|
|
struct perf_event *p_event)
|
|
{
|
|
struct hlist_head __percpu *list;
|
|
int ret = -ENOMEM;
|
|
int cpu;
|
|
|
|
p_event->tp_event = tp_event;
|
|
if (tp_event->perf_refcount++ > 0)
|
|
return 0;
|
|
|
|
list = alloc_percpu(struct hlist_head);
|
|
if (!list)
|
|
goto fail;
|
|
|
|
for_each_possible_cpu(cpu)
|
|
INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
|
|
|
|
tp_event->perf_events = list;
|
|
|
|
if (!total_ref_count) {
|
|
char __percpu *buf;
|
|
int i;
|
|
|
|
for (i = 0; i < PERF_NR_CONTEXTS; i++) {
|
|
buf = (char __percpu *)alloc_percpu(perf_trace_t);
|
|
if (!buf)
|
|
goto fail;
|
|
|
|
perf_trace_buf[i] = buf;
|
|
}
|
|
}
|
|
|
|
ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
|
|
if (ret)
|
|
goto fail;
|
|
|
|
total_ref_count++;
|
|
return 0;
|
|
|
|
fail:
|
|
if (!total_ref_count) {
|
|
int i;
|
|
|
|
for (i = 0; i < PERF_NR_CONTEXTS; i++) {
|
|
free_percpu(perf_trace_buf[i]);
|
|
perf_trace_buf[i] = NULL;
|
|
}
|
|
}
|
|
|
|
if (!--tp_event->perf_refcount) {
|
|
free_percpu(tp_event->perf_events);
|
|
tp_event->perf_events = NULL;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void perf_trace_event_unreg(struct perf_event *p_event)
|
|
{
|
|
struct trace_event_call *tp_event = p_event->tp_event;
|
|
int i;
|
|
|
|
if (--tp_event->perf_refcount > 0)
|
|
goto out;
|
|
|
|
tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
|
|
|
|
/*
|
|
* Ensure our callback won't be called anymore. The buffers
|
|
* will be freed after that.
|
|
*/
|
|
tracepoint_synchronize_unregister();
|
|
|
|
free_percpu(tp_event->perf_events);
|
|
tp_event->perf_events = NULL;
|
|
|
|
if (!--total_ref_count) {
|
|
for (i = 0; i < PERF_NR_CONTEXTS; i++) {
|
|
free_percpu(perf_trace_buf[i]);
|
|
perf_trace_buf[i] = NULL;
|
|
}
|
|
}
|
|
out:
|
|
module_put(tp_event->mod);
|
|
}
|
|
|
|
static int perf_trace_event_open(struct perf_event *p_event)
|
|
{
|
|
struct trace_event_call *tp_event = p_event->tp_event;
|
|
return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
|
|
}
|
|
|
|
static void perf_trace_event_close(struct perf_event *p_event)
|
|
{
|
|
struct trace_event_call *tp_event = p_event->tp_event;
|
|
tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
|
|
}
|
|
|
|
static int perf_trace_event_init(struct trace_event_call *tp_event,
|
|
struct perf_event *p_event)
|
|
{
|
|
int ret;
|
|
|
|
ret = perf_trace_event_perm(tp_event, p_event);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = perf_trace_event_reg(tp_event, p_event);
|
|
if (ret)
|
|
return ret;
|
|
|
|
ret = perf_trace_event_open(p_event);
|
|
if (ret) {
|
|
perf_trace_event_unreg(p_event);
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int perf_trace_init(struct perf_event *p_event)
|
|
{
|
|
struct trace_event_call *tp_event;
|
|
u64 event_id = p_event->attr.config;
|
|
int ret = -EINVAL;
|
|
|
|
mutex_lock(&event_mutex);
|
|
list_for_each_entry(tp_event, &ftrace_events, list) {
|
|
if (tp_event->event.type == event_id &&
|
|
tp_event->class && tp_event->class->reg &&
|
|
try_module_get(tp_event->mod)) {
|
|
ret = perf_trace_event_init(tp_event, p_event);
|
|
if (ret)
|
|
module_put(tp_event->mod);
|
|
break;
|
|
}
|
|
}
|
|
mutex_unlock(&event_mutex);
|
|
|
|
return ret;
|
|
}
|
|
|
|
void perf_trace_destroy(struct perf_event *p_event)
|
|
{
|
|
mutex_lock(&event_mutex);
|
|
perf_trace_event_close(p_event);
|
|
perf_trace_event_unreg(p_event);
|
|
mutex_unlock(&event_mutex);
|
|
}
|
|
|
|
int perf_trace_add(struct perf_event *p_event, int flags)
|
|
{
|
|
struct trace_event_call *tp_event = p_event->tp_event;
|
|
struct hlist_head __percpu *pcpu_list;
|
|
struct hlist_head *list;
|
|
|
|
pcpu_list = tp_event->perf_events;
|
|
if (WARN_ON_ONCE(!pcpu_list))
|
|
return -EINVAL;
|
|
|
|
if (!(flags & PERF_EF_START))
|
|
p_event->hw.state = PERF_HES_STOPPED;
|
|
|
|
list = this_cpu_ptr(pcpu_list);
|
|
hlist_add_head_rcu(&p_event->hlist_entry, list);
|
|
|
|
return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
|
|
}
|
|
|
|
void perf_trace_del(struct perf_event *p_event, int flags)
|
|
{
|
|
struct trace_event_call *tp_event = p_event->tp_event;
|
|
hlist_del_rcu(&p_event->hlist_entry);
|
|
tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
|
|
}
|
|
|
|
void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
|
|
{
|
|
char *raw_data;
|
|
int rctx;
|
|
|
|
BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
|
|
|
|
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
|
|
"perf buffer not large enough"))
|
|
return NULL;
|
|
|
|
*rctxp = rctx = perf_swevent_get_recursion_context();
|
|
if (rctx < 0)
|
|
return NULL;
|
|
|
|
if (regs)
|
|
*regs = this_cpu_ptr(&__perf_regs[rctx]);
|
|
raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
|
|
|
|
/* zero the dead bytes from align to not leak stack to user */
|
|
memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
|
|
return raw_data;
|
|
}
|
|
EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
|
|
NOKPROBE_SYMBOL(perf_trace_buf_alloc);
|
|
|
|
void perf_trace_buf_update(void *record, u16 type)
|
|
{
|
|
struct trace_entry *entry = record;
|
|
int pc = preempt_count();
|
|
unsigned long flags;
|
|
|
|
local_save_flags(flags);
|
|
tracing_generic_entry_update(entry, flags, pc);
|
|
entry->type = type;
|
|
}
|
|
NOKPROBE_SYMBOL(perf_trace_buf_update);
|
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
static void
|
|
perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
|
|
struct ftrace_ops *ops, struct pt_regs *pt_regs)
|
|
{
|
|
struct ftrace_entry *entry;
|
|
struct hlist_head *head;
|
|
struct pt_regs regs;
|
|
int rctx;
|
|
|
|
head = this_cpu_ptr(event_function.perf_events);
|
|
if (hlist_empty(head))
|
|
return;
|
|
|
|
#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
|
|
sizeof(u64)) - sizeof(u32))
|
|
|
|
BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
|
|
|
|
memset(®s, 0, sizeof(regs));
|
|
perf_fetch_caller_regs(®s);
|
|
|
|
entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
|
|
if (!entry)
|
|
return;
|
|
|
|
entry->ip = ip;
|
|
entry->parent_ip = parent_ip;
|
|
perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
|
|
1, ®s, head, NULL);
|
|
|
|
#undef ENTRY_SIZE
|
|
}
|
|
|
|
static int perf_ftrace_function_register(struct perf_event *event)
|
|
{
|
|
struct ftrace_ops *ops = &event->ftrace_ops;
|
|
|
|
ops->flags |= FTRACE_OPS_FL_PER_CPU | FTRACE_OPS_FL_RCU;
|
|
ops->func = perf_ftrace_function_call;
|
|
return register_ftrace_function(ops);
|
|
}
|
|
|
|
static int perf_ftrace_function_unregister(struct perf_event *event)
|
|
{
|
|
struct ftrace_ops *ops = &event->ftrace_ops;
|
|
int ret = unregister_ftrace_function(ops);
|
|
ftrace_free_filter(ops);
|
|
return ret;
|
|
}
|
|
|
|
static void perf_ftrace_function_enable(struct perf_event *event)
|
|
{
|
|
ftrace_function_local_enable(&event->ftrace_ops);
|
|
}
|
|
|
|
static void perf_ftrace_function_disable(struct perf_event *event)
|
|
{
|
|
ftrace_function_local_disable(&event->ftrace_ops);
|
|
}
|
|
|
|
int perf_ftrace_event_register(struct trace_event_call *call,
|
|
enum trace_reg type, void *data)
|
|
{
|
|
switch (type) {
|
|
case TRACE_REG_REGISTER:
|
|
case TRACE_REG_UNREGISTER:
|
|
break;
|
|
case TRACE_REG_PERF_REGISTER:
|
|
case TRACE_REG_PERF_UNREGISTER:
|
|
return 0;
|
|
case TRACE_REG_PERF_OPEN:
|
|
return perf_ftrace_function_register(data);
|
|
case TRACE_REG_PERF_CLOSE:
|
|
return perf_ftrace_function_unregister(data);
|
|
case TRACE_REG_PERF_ADD:
|
|
perf_ftrace_function_enable(data);
|
|
return 0;
|
|
case TRACE_REG_PERF_DEL:
|
|
perf_ftrace_function_disable(data);
|
|
return 0;
|
|
}
|
|
|
|
return -EINVAL;
|
|
}
|
|
#endif /* CONFIG_FUNCTION_TRACER */
|