mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-15 22:56:42 +07:00
7f184275aa
This version of the gen_pool memory allocator supports lockless operation. This makes it safe to use in NMI handlers and other special unblockable contexts that could otherwise deadlock on locks. This is implemented by using atomic operations and retries on any conflicts. The disadvantage is that there may be livelocks in extreme cases. For better scalability, one gen_pool allocator can be used for each CPU. The lockless operation only works if there is enough memory available. If new memory is added to the pool a lock has to be still taken. So any user relying on locklessness has to ensure that sufficient memory is preallocated. The basic atomic operation of this allocator is cmpxchg on long. On architectures that don't have NMI-safe cmpxchg implementation, the allocator can NOT be used in NMI handler. So code uses the allocator in NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. Signed-off-by: Huang Ying <ying.huang@intel.com> Reviewed-by: Andi Kleen <ak@linux.intel.com> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Len Brown <len.brown@intel.com>
82 lines
3.1 KiB
C
82 lines
3.1 KiB
C
/*
|
|
* Basic general purpose allocator for managing special purpose
|
|
* memory, for example, memory that is not managed by the regular
|
|
* kmalloc/kfree interface. Uses for this includes on-device special
|
|
* memory, uncached memory etc.
|
|
*
|
|
* It is safe to use the allocator in NMI handlers and other special
|
|
* unblockable contexts that could otherwise deadlock on locks. This
|
|
* is implemented by using atomic operations and retries on any
|
|
* conflicts. The disadvantage is that there may be livelocks in
|
|
* extreme cases. For better scalability, one allocator can be used
|
|
* for each CPU.
|
|
*
|
|
* The lockless operation only works if there is enough memory
|
|
* available. If new memory is added to the pool a lock has to be
|
|
* still taken. So any user relying on locklessness has to ensure
|
|
* that sufficient memory is preallocated.
|
|
*
|
|
* The basic atomic operation of this allocator is cmpxchg on long.
|
|
* On architectures that don't have NMI-safe cmpxchg implementation,
|
|
* the allocator can NOT be used in NMI handler. So code uses the
|
|
* allocator in NMI handler should depend on
|
|
* CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
|
|
*
|
|
* This source code is licensed under the GNU General Public License,
|
|
* Version 2. See the file COPYING for more details.
|
|
*/
|
|
|
|
|
|
#ifndef __GENALLOC_H__
|
|
#define __GENALLOC_H__
|
|
/*
|
|
* General purpose special memory pool descriptor.
|
|
*/
|
|
struct gen_pool {
|
|
spinlock_t lock;
|
|
struct list_head chunks; /* list of chunks in this pool */
|
|
int min_alloc_order; /* minimum allocation order */
|
|
};
|
|
|
|
/*
|
|
* General purpose special memory pool chunk descriptor.
|
|
*/
|
|
struct gen_pool_chunk {
|
|
struct list_head next_chunk; /* next chunk in pool */
|
|
atomic_t avail;
|
|
phys_addr_t phys_addr; /* physical starting address of memory chunk */
|
|
unsigned long start_addr; /* starting address of memory chunk */
|
|
unsigned long end_addr; /* ending address of memory chunk */
|
|
unsigned long bits[0]; /* bitmap for allocating memory chunk */
|
|
};
|
|
|
|
extern struct gen_pool *gen_pool_create(int, int);
|
|
extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
|
|
extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t,
|
|
size_t, int);
|
|
/**
|
|
* gen_pool_add - add a new chunk of special memory to the pool
|
|
* @pool: pool to add new memory chunk to
|
|
* @addr: starting address of memory chunk to add to pool
|
|
* @size: size in bytes of the memory chunk to add to pool
|
|
* @nid: node id of the node the chunk structure and bitmap should be
|
|
* allocated on, or -1
|
|
*
|
|
* Add a new chunk of special memory to the specified pool.
|
|
*
|
|
* Returns 0 on success or a -ve errno on failure.
|
|
*/
|
|
static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
|
|
size_t size, int nid)
|
|
{
|
|
return gen_pool_add_virt(pool, addr, -1, size, nid);
|
|
}
|
|
extern void gen_pool_destroy(struct gen_pool *);
|
|
extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
|
|
extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
|
|
extern void gen_pool_for_each_chunk(struct gen_pool *,
|
|
void (*)(struct gen_pool *, struct gen_pool_chunk *, void *), void *);
|
|
extern size_t gen_pool_avail(struct gen_pool *);
|
|
extern size_t gen_pool_size(struct gen_pool *);
|
|
#endif /* __GENALLOC_H__ */
|