2012-11-28 01:33:25 +07:00
|
|
|
#ifndef _LINUX_CONTEXT_TRACKING_H
|
|
|
|
#define _LINUX_CONTEXT_TRACKING_H
|
|
|
|
|
|
|
|
#include <linux/sched.h>
|
2013-01-08 00:12:14 +07:00
|
|
|
#include <linux/percpu.h>
|
2013-05-16 06:21:38 +07:00
|
|
|
#include <linux/vtime.h>
|
2013-02-24 06:23:25 +07:00
|
|
|
#include <asm/ptrace.h>
|
2013-01-08 00:12:14 +07:00
|
|
|
|
|
|
|
struct context_tracking {
|
|
|
|
/*
|
|
|
|
* When active is false, probes are unset in order
|
|
|
|
* to minimize overhead: TIF flags are cleared
|
|
|
|
* and calls to user_enter/exit are ignored. This
|
|
|
|
* may be further optimized using static keys.
|
|
|
|
*/
|
|
|
|
bool active;
|
2013-02-24 07:19:14 +07:00
|
|
|
enum ctx_state {
|
2013-01-08 00:12:14 +07:00
|
|
|
IN_KERNEL = 0,
|
|
|
|
IN_USER,
|
|
|
|
} state;
|
|
|
|
};
|
|
|
|
|
2013-05-16 06:21:38 +07:00
|
|
|
|
2013-02-24 07:19:14 +07:00
|
|
|
#ifdef CONFIG_CONTEXT_TRACKING
|
2013-01-08 00:12:14 +07:00
|
|
|
DECLARE_PER_CPU(struct context_tracking, context_tracking);
|
|
|
|
|
|
|
|
static inline bool context_tracking_in_user(void)
|
|
|
|
{
|
|
|
|
return __this_cpu_read(context_tracking.state) == IN_USER;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool context_tracking_active(void)
|
|
|
|
{
|
|
|
|
return __this_cpu_read(context_tracking.active);
|
|
|
|
}
|
2012-11-28 01:33:25 +07:00
|
|
|
|
|
|
|
extern void user_enter(void);
|
|
|
|
extern void user_exit(void);
|
2013-02-24 06:23:25 +07:00
|
|
|
|
2013-02-24 07:19:14 +07:00
|
|
|
static inline enum ctx_state exception_enter(void)
|
2013-02-24 06:23:25 +07:00
|
|
|
{
|
2013-02-24 07:19:14 +07:00
|
|
|
enum ctx_state prev_ctx;
|
|
|
|
|
|
|
|
prev_ctx = this_cpu_read(context_tracking.state);
|
2013-02-24 06:23:25 +07:00
|
|
|
user_exit();
|
2013-02-24 07:19:14 +07:00
|
|
|
|
|
|
|
return prev_ctx;
|
2013-02-24 06:23:25 +07:00
|
|
|
}
|
|
|
|
|
2013-02-24 07:19:14 +07:00
|
|
|
static inline void exception_exit(enum ctx_state prev_ctx)
|
2013-02-24 06:23:25 +07:00
|
|
|
{
|
2013-02-24 07:19:14 +07:00
|
|
|
if (prev_ctx == IN_USER)
|
2013-02-24 06:23:25 +07:00
|
|
|
user_enter();
|
|
|
|
}
|
|
|
|
|
2012-11-28 01:33:25 +07:00
|
|
|
extern void context_tracking_task_switch(struct task_struct *prev,
|
|
|
|
struct task_struct *next);
|
|
|
|
#else
|
2013-01-08 00:12:14 +07:00
|
|
|
static inline bool context_tracking_in_user(void) { return false; }
|
2012-11-28 01:33:25 +07:00
|
|
|
static inline void user_enter(void) { }
|
|
|
|
static inline void user_exit(void) { }
|
2013-07-13 00:02:30 +07:00
|
|
|
static inline enum ctx_state exception_enter(void) { return 0; }
|
|
|
|
static inline void exception_exit(enum ctx_state prev_ctx) { }
|
|
|
|
static inline void context_tracking_task_switch(struct task_struct *prev,
|
|
|
|
struct task_struct *next) { }
|
|
|
|
#endif /* !CONFIG_CONTEXT_TRACKING */
|
2013-05-16 06:21:38 +07:00
|
|
|
|
2013-07-13 00:02:30 +07:00
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
|
|
|
extern void guest_enter(void);
|
|
|
|
extern void guest_exit(void);
|
|
|
|
#else
|
2013-05-16 06:21:38 +07:00
|
|
|
static inline void guest_enter(void)
|
|
|
|
{
|
2013-07-13 00:02:30 +07:00
|
|
|
/*
|
2013-07-13 00:05:14 +07:00
|
|
|
* This is running in ioctl context so its safe
|
|
|
|
* to assume that it's the stime pending cputime
|
|
|
|
* to flush.
|
2013-07-13 00:02:30 +07:00
|
|
|
*/
|
|
|
|
vtime_account_system(current);
|
|
|
|
current->flags |= PF_VCPU;
|
2013-05-16 06:21:38 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void guest_exit(void)
|
|
|
|
{
|
2013-07-13 00:05:14 +07:00
|
|
|
/* Flush the guest cputime we spent on the guest */
|
2013-07-13 00:02:30 +07:00
|
|
|
vtime_account_system(current);
|
|
|
|
current->flags &= ~PF_VCPU;
|
2013-05-16 06:21:38 +07:00
|
|
|
}
|
2013-07-13 00:02:30 +07:00
|
|
|
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
|
2012-11-28 01:33:25 +07:00
|
|
|
|
|
|
|
#endif
|