perf_counter: rework ioctl()s

Corey noticed that ioctl()s on grouped counters didn't work on
the whole group. This extends the ioctl() interface to take a
second argument that is interpreted as a flags field. We then
provide PERF_IOC_FLAG_GROUP to toggle the behaviour.

Having this flag gives the greatest flexibility, allowing you
to individually enable/disable/reset counters in a group, or
all together.

[ Impact: fix group counter enable/disable semantics ]

Reported-by: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <20090508170028.837558214@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra 2009-05-08 18:52:22 +02:00 committed by Ingo Molnar
parent 7fc23a5380
commit 3df5edad87
2 changed files with 65 additions and 49 deletions

View File

@ -157,10 +157,14 @@ struct perf_counter_hw_event {
/*
* Ioctls that can be done on a perf counter fd:
*/
#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0)
#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1)
#define PERF_COUNTER_IOC_ENABLE _IOW('$', 0, u32)
#define PERF_COUNTER_IOC_DISABLE _IOW('$', 1, u32)
#define PERF_COUNTER_IOC_REFRESH _IOW('$', 2, u32)
#define PERF_COUNTER_IOC_RESET _IO ('$', 3)
#define PERF_COUNTER_IOC_RESET _IOW('$', 3, u32)
enum perf_counter_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
};
/*
* Structure of the page that can be mapped via mmap

View File

@ -82,7 +82,7 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
* add it straight to the context's counter list, or to the group
* leader's sibling list:
*/
if (counter->group_leader == counter)
if (group_leader == counter)
list_add_tail(&counter->list_entry, &ctx->counter_list);
else {
list_add_tail(&counter->list_entry, &group_leader->sibling_list);
@ -385,24 +385,6 @@ static void perf_counter_disable(struct perf_counter *counter)
spin_unlock_irq(&ctx->lock);
}
/*
* Disable a counter and all its children.
*/
static void perf_counter_disable_family(struct perf_counter *counter)
{
struct perf_counter *child;
perf_counter_disable(counter);
/*
* Lock the mutex to protect the list of children
*/
mutex_lock(&counter->mutex);
list_for_each_entry(child, &counter->child_list, child_list)
perf_counter_disable(child);
mutex_unlock(&counter->mutex);
}
static int
counter_sched_in(struct perf_counter *counter,
struct perf_cpu_context *cpuctx,
@ -753,24 +735,6 @@ static int perf_counter_refresh(struct perf_counter *counter, int refresh)
return 0;
}
/*
* Enable a counter and all its children.
*/
static void perf_counter_enable_family(struct perf_counter *counter)
{
struct perf_counter *child;
perf_counter_enable(counter);
/*
* Lock the mutex to protect the list of children
*/
mutex_lock(&counter->mutex);
list_for_each_entry(child, &counter->child_list, child_list)
perf_counter_enable(child);
mutex_unlock(&counter->mutex);
}
void __perf_counter_sched_out(struct perf_counter_context *ctx,
struct perf_cpu_context *cpuctx)
{
@ -1307,31 +1271,79 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
static void perf_counter_reset(struct perf_counter *counter)
{
(void)perf_counter_read(counter);
atomic_set(&counter->count, 0);
perf_counter_update_userpage(counter);
}
static void perf_counter_for_each_sibling(struct perf_counter *counter,
void (*func)(struct perf_counter *))
{
struct perf_counter_context *ctx = counter->ctx;
struct perf_counter *sibling;
spin_lock_irq(&ctx->lock);
counter = counter->group_leader;
func(counter);
list_for_each_entry(sibling, &counter->sibling_list, list_entry)
func(sibling);
spin_unlock_irq(&ctx->lock);
}
static void perf_counter_for_each_child(struct perf_counter *counter,
void (*func)(struct perf_counter *))
{
struct perf_counter *child;
mutex_lock(&counter->mutex);
func(counter);
list_for_each_entry(child, &counter->child_list, child_list)
func(child);
mutex_unlock(&counter->mutex);
}
static void perf_counter_for_each(struct perf_counter *counter,
void (*func)(struct perf_counter *))
{
struct perf_counter *child;
mutex_lock(&counter->mutex);
perf_counter_for_each_sibling(counter, func);
list_for_each_entry(child, &counter->child_list, child_list)
perf_counter_for_each_sibling(child, func);
mutex_unlock(&counter->mutex);
}
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct perf_counter *counter = file->private_data;
int err = 0;
void (*func)(struct perf_counter *);
u32 flags = arg;
switch (cmd) {
case PERF_COUNTER_IOC_ENABLE:
perf_counter_enable_family(counter);
func = perf_counter_enable;
break;
case PERF_COUNTER_IOC_DISABLE:
perf_counter_disable_family(counter);
break;
case PERF_COUNTER_IOC_REFRESH:
err = perf_counter_refresh(counter, arg);
func = perf_counter_disable;
break;
case PERF_COUNTER_IOC_RESET:
perf_counter_reset(counter);
func = perf_counter_reset;
break;
case PERF_COUNTER_IOC_REFRESH:
return perf_counter_refresh(counter, arg);
default:
err = -ENOTTY;
return -ENOTTY;
}
return err;
if (flags & PERF_IOC_FLAG_GROUP)
perf_counter_for_each(counter, func);
else
perf_counter_for_each_child(counter, func);
return 0;
}
/*