mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 14:28:55 +07:00
49f0ce5f92
Some applications that run on HPC clusters are designed around the availability of RAM and the overcommit ratio is fine tuned to get the maximum usage of memory without swapping. With growing memory, the 1%-of-all-RAM grain provided by overcommit_ratio has become too coarse for these workload (on a 2TB machine it represents no less than 20GB). This patch adds the new overcommit_kbytes sysctl variable that allow a much finer grain. [akpm@linux-foundation.org: coding-style fixes] [akpm@linux-foundation.org: fix nommu build] Signed-off-by: Jerome Marchand <jmarchan@redhat.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Alan Cox <alan@lxorguk.ukuu.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
94 lines
2.3 KiB
C
94 lines
2.3 KiB
C
#ifndef _LINUX_MMAN_H
|
|
#define _LINUX_MMAN_H
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/percpu_counter.h>
|
|
|
|
#include <linux/atomic.h>
|
|
#include <uapi/linux/mman.h>
|
|
|
|
extern int sysctl_overcommit_memory;
|
|
extern int sysctl_overcommit_ratio;
|
|
extern unsigned long sysctl_overcommit_kbytes;
|
|
extern struct percpu_counter vm_committed_as;
|
|
|
|
#ifdef CONFIG_SMP
|
|
extern s32 vm_committed_as_batch;
|
|
#else
|
|
#define vm_committed_as_batch 0
|
|
#endif
|
|
|
|
unsigned long vm_memory_committed(void);
|
|
|
|
static inline void vm_acct_memory(long pages)
|
|
{
|
|
__percpu_counter_add(&vm_committed_as, pages, vm_committed_as_batch);
|
|
}
|
|
|
|
static inline void vm_unacct_memory(long pages)
|
|
{
|
|
vm_acct_memory(-pages);
|
|
}
|
|
|
|
/*
|
|
* Allow architectures to handle additional protection bits
|
|
*/
|
|
|
|
#ifndef arch_calc_vm_prot_bits
|
|
#define arch_calc_vm_prot_bits(prot) 0
|
|
#endif
|
|
|
|
#ifndef arch_vm_get_page_prot
|
|
#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
|
|
#endif
|
|
|
|
#ifndef arch_validate_prot
|
|
/*
|
|
* This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have
|
|
* already been masked out.
|
|
*
|
|
* Returns true if the prot flags are valid
|
|
*/
|
|
static inline int arch_validate_prot(unsigned long prot)
|
|
{
|
|
return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
|
|
}
|
|
#define arch_validate_prot arch_validate_prot
|
|
#endif
|
|
|
|
/*
|
|
* Optimisation macro. It is equivalent to:
|
|
* (x & bit1) ? bit2 : 0
|
|
* but this version is faster.
|
|
* ("bit1" and "bit2" must be single bits)
|
|
*/
|
|
#define _calc_vm_trans(x, bit1, bit2) \
|
|
((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
|
|
: ((x) & (bit1)) / ((bit1) / (bit2)))
|
|
|
|
/*
|
|
* Combine the mmap "prot" argument into "vm_flags" used internally.
|
|
*/
|
|
static inline unsigned long
|
|
calc_vm_prot_bits(unsigned long prot)
|
|
{
|
|
return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
|
|
_calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
|
|
_calc_vm_trans(prot, PROT_EXEC, VM_EXEC) |
|
|
arch_calc_vm_prot_bits(prot);
|
|
}
|
|
|
|
/*
|
|
* Combine the mmap "flags" argument into "vm_flags" used internally.
|
|
*/
|
|
static inline unsigned long
|
|
calc_vm_flag_bits(unsigned long flags)
|
|
{
|
|
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
|
|
_calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
|
|
_calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
|
|
}
|
|
|
|
unsigned long vm_commit_limit(void);
|
|
#endif /* _LINUX_MMAN_H */
|