[POWERPC] spufs: add context switch notification log

There are userspace instrumentation tools that need to monitor spu
context switches. This patch adds a new file called 'switch_log' to
each spufs context directory that can be used to monitor the context
switches.

Context switch in/out and exit from spu_run are monitored after the
file was first opened and can be read from it.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
This commit is contained in:
Christoph Hellwig 2008-04-29 17:08:38 +10:00 committed by Jeremy Kerr
parent 14b3ca4022
commit 5158e9b521
5 changed files with 200 additions and 0 deletions

View File

@ -88,6 +88,7 @@ void destroy_spu_context(struct kref *kref)
kref_put(ctx->prof_priv_kref, ctx->prof_priv_release);
BUG_ON(!list_empty(&ctx->rq));
atomic_dec(&nr_spu_contexts);
kfree(ctx->switch_log);
kfree(ctx);
}

View File

@ -2387,6 +2387,171 @@ static const struct file_operations spufs_stat_fops = {
.release = single_release,
};
static inline int spufs_switch_log_used(struct spu_context *ctx)
{
return (ctx->switch_log->head - ctx->switch_log->tail) %
SWITCH_LOG_BUFSIZE;
}
static inline int spufs_switch_log_avail(struct spu_context *ctx)
{
return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
}
static int spufs_switch_log_open(struct inode *inode, struct file *file)
{
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
/*
* We (ab-)use the mapping_lock here because it serves the similar
* purpose for synchronizing open/close elsewhere. Maybe it should
* be renamed eventually.
*/
mutex_lock(&ctx->mapping_lock);
if (ctx->switch_log) {
spin_lock(&ctx->switch_log->lock);
ctx->switch_log->head = 0;
ctx->switch_log->tail = 0;
spin_unlock(&ctx->switch_log->lock);
} else {
/*
* We allocate the switch log data structures on first open.
* They will never be free because we assume a context will
* be traced until it goes away.
*/
ctx->switch_log = kzalloc(sizeof(struct switch_log) +
SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
GFP_KERNEL);
if (!ctx->switch_log)
goto out;
spin_lock_init(&ctx->switch_log->lock);
init_waitqueue_head(&ctx->switch_log->wait);
}
mutex_unlock(&ctx->mapping_lock);
return 0;
out:
mutex_unlock(&ctx->mapping_lock);
return -ENOMEM;
}
static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
{
struct switch_log_entry *p;
p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n",
(unsigned int) p->tstamp.tv_sec,
(unsigned int) p->tstamp.tv_nsec,
p->spu_id,
(unsigned int) p->type,
(unsigned int) p->val,
(unsigned long long) p->timebase);
}
static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
int error = 0, cnt = 0;
if (!buf || len < 0)
return -EINVAL;
while (cnt < len) {
char tbuf[128];
int width;
if (file->f_flags & O_NONBLOCK) {
if (spufs_switch_log_used(ctx) <= 0)
return cnt ? cnt : -EAGAIN;
} else {
/* Wait for data in buffer */
error = wait_event_interruptible(ctx->switch_log->wait,
spufs_switch_log_used(ctx) > 0);
if (error)
break;
}
spin_lock(&ctx->switch_log->lock);
if (ctx->switch_log->head == ctx->switch_log->tail) {
/* multiple readers race? */
spin_unlock(&ctx->switch_log->lock);
continue;
}
width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
if (width < len) {
ctx->switch_log->tail =
(ctx->switch_log->tail + 1) %
SWITCH_LOG_BUFSIZE;
}
spin_unlock(&ctx->switch_log->lock);
/*
* If the record is greater than space available return
* partial buffer (so far)
*/
if (width >= len)
break;
error = copy_to_user(buf + cnt, tbuf, width);
if (error)
break;
cnt += width;
}
return cnt == 0 ? error : cnt;
}
static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
unsigned int mask = 0;
poll_wait(file, &ctx->switch_log->wait, wait);
if (spufs_switch_log_used(ctx) > 0)
mask |= POLLIN;
return mask;
}
static const struct file_operations spufs_switch_log_fops = {
.owner = THIS_MODULE,
.open = spufs_switch_log_open,
.read = spufs_switch_log_read,
.poll = spufs_switch_log_poll,
};
void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
u32 type, u32 val)
{
if (!ctx->switch_log)
return;
spin_lock(&ctx->switch_log->lock);
if (spufs_switch_log_avail(ctx) > 1) {
struct switch_log_entry *p;
p = ctx->switch_log->log + ctx->switch_log->head;
ktime_get_ts(&p->tstamp);
p->timebase = get_tb();
p->spu_id = spu ? spu->number : -1;
p->type = type;
p->val = val;
ctx->switch_log->head =
(ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
}
spin_unlock(&ctx->switch_log->lock);
wake_up(&ctx->switch_log->wait);
}
struct tree_descr spufs_dir_contents[] = {
{ "capabilities", &spufs_caps_fops, 0444, },
@ -2423,6 +2588,7 @@ struct tree_descr spufs_dir_contents[] = {
{ "proxydma_info", &spufs_proxydma_info_fops, 0444, },
{ "tid", &spufs_tid_fops, 0444, },
{ "stat", &spufs_stat_fops, 0444, },
{ "switch_log", &spufs_switch_log_fops, 0444 },
{},
};

View File

@ -405,6 +405,8 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
ret = spu_run_fini(ctx, npc, &status);
spu_yield(ctx);
spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, status);
if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
(((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
ctx->stats.libassist++;

View File

@ -240,6 +240,7 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
spu->mfc_callback = spufs_mfc_callback;
mb();
spu_unmap_mappings(ctx);
spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
spu_restore(&ctx->csa, spu);
spu->timestamp = jiffies;
spu_cpu_affinity_set(spu, raw_smp_processor_id());
@ -419,6 +420,7 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
spu_switch_notify(spu, NULL);
spu_unmap_mappings(ctx);
spu_save(&ctx->csa, spu);
spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
spu->timestamp = jiffies;
ctx->state = SPU_STATE_SAVED;
spu->ibox_callback = NULL;

View File

@ -47,6 +47,30 @@ enum {
SPU_SCHED_SPU_RUN, /* context is within spu_run */
};
enum {
SWITCH_LOG_BUFSIZE = 4096,
};
enum {
SWITCH_LOG_START,
SWITCH_LOG_STOP,
SWITCH_LOG_EXIT,
};
struct switch_log {
spinlock_t lock;
wait_queue_head_t wait;
unsigned long head;
unsigned long tail;
struct switch_log_entry {
struct timespec tstamp;
s32 spu_id;
u32 type;
u32 val;
u64 timebase;
} log[];
};
struct spu_context {
struct spu *spu; /* pointer to a physical SPU */
struct spu_state csa; /* SPU context save area. */
@ -116,6 +140,9 @@ struct spu_context {
unsigned long long libassist;
} stats;
/* context switch log */
struct switch_log *switch_log;
struct list_head aff_list;
int aff_head;
int aff_offset;
@ -256,6 +283,8 @@ int spu_activate(struct spu_context *ctx, unsigned long flags);
void spu_deactivate(struct spu_context *ctx);
void spu_yield(struct spu_context *ctx);
void spu_switch_notify(struct spu *spu, struct spu_context *ctx);
void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
u32 type, u32 val);
void spu_set_timeslice(struct spu_context *ctx);
void spu_update_sched_info(struct spu_context *ctx);
void __spu_update_sched_info(struct spu_context *ctx);