linux_dsm_epyc7002/include/linux/flex_proportions.h
Jan Kara f3109a51f8 lib: Proportions with flexible period
Implement code computing proportions of events of different type (like code in
lib/proportions.c) but allowing periods to have different lengths. This allows
us to have aging periods of fixed wallclock time which gives better proportion
estimates given the hugely varying throughput of different devices - previous
measuring of aging period by number of events has the problem that a reasonable
period length for a system with low-end USB stick is not a reasonable period
length for a system with high-end storage array resulting either in too slow
proportion updates or too fluctuating proportion updates.

Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Fengguang Wu <fengguang.wu@intel.com>
2012-06-09 08:37:55 +09:00

102 lines
2.7 KiB
C

/*
* Floating proportions with flexible aging period
*
* Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
*/
#ifndef _LINUX_FLEX_PROPORTIONS_H
#define _LINUX_FLEX_PROPORTIONS_H
#include <linux/percpu_counter.h>
#include <linux/spinlock.h>
#include <linux/seqlock.h>
/*
* When maximum proportion of some event type is specified, this is the
* precision with which we allow limitting. Note that this creates an upper
* bound on the number of events per period like
* ULLONG_MAX >> FPROP_FRAC_SHIFT.
*/
#define FPROP_FRAC_SHIFT 10
#define FPROP_FRAC_BASE (1UL << FPROP_FRAC_SHIFT)
/*
* ---- Global proportion definitions ----
*/
struct fprop_global {
/* Number of events in the current period */
struct percpu_counter events;
/* Current period */
unsigned int period;
/* Synchronization with period transitions */
seqcount_t sequence;
};
int fprop_global_init(struct fprop_global *p);
void fprop_global_destroy(struct fprop_global *p);
bool fprop_new_period(struct fprop_global *p, int periods);
/*
* ---- SINGLE ----
*/
struct fprop_local_single {
/* the local events counter */
unsigned long events;
/* Period in which we last updated events */
unsigned int period;
raw_spinlock_t lock; /* Protect period and numerator */
};
#define INIT_FPROP_LOCAL_SINGLE(name) \
{ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
}
int fprop_local_init_single(struct fprop_local_single *pl);
void fprop_local_destroy_single(struct fprop_local_single *pl);
void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl);
void fprop_fraction_single(struct fprop_global *p,
struct fprop_local_single *pl, unsigned long *numerator,
unsigned long *denominator);
static inline
void fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
{
unsigned long flags;
local_irq_save(flags);
__fprop_inc_single(p, pl);
local_irq_restore(flags);
}
/*
* ---- PERCPU ----
*/
struct fprop_local_percpu {
/* the local events counter */
struct percpu_counter events;
/* Period in which we last updated events */
unsigned int period;
raw_spinlock_t lock; /* Protect period and numerator */
};
int fprop_local_init_percpu(struct fprop_local_percpu *pl);
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
int max_frac);
void fprop_fraction_percpu(struct fprop_global *p,
struct fprop_local_percpu *pl, unsigned long *numerator,
unsigned long *denominator);
static inline
void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
{
unsigned long flags;
local_irq_save(flags);
__fprop_inc_percpu(p, pl);
local_irq_restore(flags);
}
#endif