mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-02 19:36:42 +07:00
f9fd8914c1
On systems with a large number of cpus, with even a modest rate of tasks exiting per cpu, the volume of taskstats data sent on thread exit can overflow a userspace listener's buffers. One approach to avoiding overflow is to allow listeners to get data for a limited and specific set of cpus. By scaling the number of listeners and/or the cpus they monitor, userspace can handle the statistical data overload more gracefully. In this patch, each listener registers to listen to a specific set of cpus by specifying a cpumask. The interest is recorded per-cpu. When a task exits on a cpu, its taskstats data is unicast to each listener interested in that cpu. Thanks to Andrew Morton for pointing out the various scalability and general concerns of previous attempts and for suggesting this design. [akpm@osdl.org: build fix] Signed-off-by: Shailabh Nagar <nagar@watson.ibm.com> Signed-off-by: Balbir Singh <balbir@in.ibm.com> Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
90 lines
2.3 KiB
C
90 lines
2.3 KiB
C
/* taskstats_kern.h - kernel header for per-task statistics interface
|
|
*
|
|
* Copyright (C) Shailabh Nagar, IBM Corp. 2006
|
|
* (C) Balbir Singh, IBM Corp. 2006
|
|
*/
|
|
|
|
#ifndef _LINUX_TASKSTATS_KERN_H
|
|
#define _LINUX_TASKSTATS_KERN_H
|
|
|
|
#include <linux/taskstats.h>
|
|
#include <linux/sched.h>
|
|
#include <net/genetlink.h>
|
|
|
|
#ifdef CONFIG_TASKSTATS
|
|
extern kmem_cache_t *taskstats_cache;
|
|
extern struct mutex taskstats_exit_mutex;
|
|
|
|
static inline void taskstats_exit_free(struct taskstats *tidstats)
|
|
{
|
|
if (tidstats)
|
|
kmem_cache_free(taskstats_cache, tidstats);
|
|
}
|
|
|
|
static inline void taskstats_tgid_init(struct signal_struct *sig)
|
|
{
|
|
spin_lock_init(&sig->stats_lock);
|
|
sig->stats = NULL;
|
|
}
|
|
|
|
static inline void taskstats_tgid_alloc(struct signal_struct *sig)
|
|
{
|
|
struct taskstats *stats;
|
|
unsigned long flags;
|
|
|
|
stats = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
|
|
if (!stats)
|
|
return;
|
|
|
|
spin_lock_irqsave(&sig->stats_lock, flags);
|
|
if (!sig->stats) {
|
|
sig->stats = stats;
|
|
stats = NULL;
|
|
}
|
|
spin_unlock_irqrestore(&sig->stats_lock, flags);
|
|
|
|
if (stats)
|
|
kmem_cache_free(taskstats_cache, stats);
|
|
}
|
|
|
|
static inline void taskstats_tgid_free(struct signal_struct *sig)
|
|
{
|
|
struct taskstats *stats = NULL;
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&sig->stats_lock, flags);
|
|
if (sig->stats) {
|
|
stats = sig->stats;
|
|
sig->stats = NULL;
|
|
}
|
|
spin_unlock_irqrestore(&sig->stats_lock, flags);
|
|
if (stats)
|
|
kmem_cache_free(taskstats_cache, stats);
|
|
}
|
|
|
|
extern void taskstats_exit_alloc(struct taskstats **, unsigned int *);
|
|
extern void taskstats_exit_send(struct task_struct *, struct taskstats *, int, unsigned int);
|
|
extern void taskstats_init_early(void);
|
|
extern void taskstats_tgid_alloc(struct signal_struct *);
|
|
#else
|
|
static inline void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
|
|
{}
|
|
static inline void taskstats_exit_free(struct taskstats *ptidstats)
|
|
{}
|
|
static inline void taskstats_exit_send(struct task_struct *tsk,
|
|
struct taskstats *tidstats,
|
|
int group_dead, unsigned int cpu)
|
|
{}
|
|
static inline void taskstats_tgid_init(struct signal_struct *sig)
|
|
{}
|
|
static inline void taskstats_tgid_alloc(struct signal_struct *sig)
|
|
{}
|
|
static inline void taskstats_tgid_free(struct signal_struct *sig)
|
|
{}
|
|
static inline void taskstats_init_early(void)
|
|
{}
|
|
#endif /* CONFIG_TASKSTATS */
|
|
|
|
#endif
|
|
|