mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 05:45:09 +07:00
8676af1ff2
I've noticed that there is no interface exposed by CMA which would let me to declare contigous memory on particular NUMA node. This patchset adds the ability to try to allocate contiguous memory on a specific node. It will fallback to other nodes if the specified one doesn't work. Implement a new method for declaring contigous memory on particular node and keep cma_declare_contiguous() as a wrapper. [akpm@linux-foundation.org: build fix] Signed-off-by: Aslan Bakirov <aslan@fb.com> Signed-off-by: Roman Gushchin <guro@fb.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Michal Hocko <mhocko@kernel.org> Cc: Andreas Schaufler <andreas.schaufler@gmx.de> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Rik van Riel <riel@surriel.com> Cc: Joonsoo Kim <js1304@gmail.com> Link: http://lkml.kernel.org/r/20200407163840.92263-2-guro@fb.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
51 lines
1.6 KiB
C
51 lines
1.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __CMA_H__
|
|
#define __CMA_H__
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/types.h>
|
|
#include <linux/numa.h>
|
|
|
|
/*
|
|
* There is always at least global CMA area and a few optional
|
|
* areas configured in kernel .config.
|
|
*/
|
|
#ifdef CONFIG_CMA_AREAS
|
|
#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
|
|
|
|
#else
|
|
#define MAX_CMA_AREAS (0)
|
|
|
|
#endif
|
|
|
|
struct cma;
|
|
|
|
extern unsigned long totalcma_pages;
|
|
extern phys_addr_t cma_get_base(const struct cma *cma);
|
|
extern unsigned long cma_get_size(const struct cma *cma);
|
|
extern const char *cma_get_name(const struct cma *cma);
|
|
|
|
extern int __init cma_declare_contiguous_nid(phys_addr_t base,
|
|
phys_addr_t size, phys_addr_t limit,
|
|
phys_addr_t alignment, unsigned int order_per_bit,
|
|
bool fixed, const char *name, struct cma **res_cma,
|
|
int nid);
|
|
static inline int __init cma_declare_contiguous(phys_addr_t base,
|
|
phys_addr_t size, phys_addr_t limit,
|
|
phys_addr_t alignment, unsigned int order_per_bit,
|
|
bool fixed, const char *name, struct cma **res_cma)
|
|
{
|
|
return cma_declare_contiguous_nid(base, size, limit, alignment,
|
|
order_per_bit, fixed, name, res_cma, NUMA_NO_NODE);
|
|
}
|
|
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
|
|
unsigned int order_per_bit,
|
|
const char *name,
|
|
struct cma **res_cma);
|
|
extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
|
|
bool no_warn);
|
|
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
|
|
|
|
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
|
|
#endif
|