mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 02:35:16 +07:00
a254129e86
Currently, there are two users on CMA functionality, one is the DMA subsystem and the other is the KVM on powerpc. They have their own code to manage CMA reserved area even if they looks really similar. From my guess, it is caused by some needs on bitmap management. KVM side wants to maintain bitmap not for 1 page, but for more size. Eventually it use bitmap where one bit represents 64 pages. When I implement CMA related patches, I should change those two places to apply my change and it seem to be painful to me. I want to change this situation and reduce future code management overhead through this patch. This change could also help developer who want to use CMA in their new feature development, since they can use CMA easily without copying & pasting this reserved area management code. In previous patches, we have prepared some features to generalize CMA reserved area management and now it's time to do it. This patch moves core functions to mm/cma.c and change DMA APIs to use these functions. There is no functional change in DMA APIs. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Acked-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Acked-by: Minchan Kim <minchan@kernel.org> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Alexander Graf <agraf@suse.de> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Gleb Natapov <gleb@kernel.org> Acked-by: Marek Szyprowski <m.szyprowski@samsung.com> Tested-by: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
165 lines
4.4 KiB
C
165 lines
4.4 KiB
C
#ifndef __LINUX_CMA_H
|
|
#define __LINUX_CMA_H
|
|
|
|
/*
|
|
* Contiguous Memory Allocator for DMA mapping framework
|
|
* Copyright (c) 2010-2011 by Samsung Electronics.
|
|
* Written by:
|
|
* Marek Szyprowski <m.szyprowski@samsung.com>
|
|
* Michal Nazarewicz <mina86@mina86.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License as
|
|
* published by the Free Software Foundation; either version 2 of the
|
|
* License or (at your optional) any later version of the license.
|
|
*/
|
|
|
|
/*
|
|
* Contiguous Memory Allocator
|
|
*
|
|
* The Contiguous Memory Allocator (CMA) makes it possible to
|
|
* allocate big contiguous chunks of memory after the system has
|
|
* booted.
|
|
*
|
|
* Why is it needed?
|
|
*
|
|
* Various devices on embedded systems have no scatter-getter and/or
|
|
* IO map support and require contiguous blocks of memory to
|
|
* operate. They include devices such as cameras, hardware video
|
|
* coders, etc.
|
|
*
|
|
* Such devices often require big memory buffers (a full HD frame
|
|
* is, for instance, more then 2 mega pixels large, i.e. more than 6
|
|
* MB of memory), which makes mechanisms such as kmalloc() or
|
|
* alloc_page() ineffective.
|
|
*
|
|
* At the same time, a solution where a big memory region is
|
|
* reserved for a device is suboptimal since often more memory is
|
|
* reserved then strictly required and, moreover, the memory is
|
|
* inaccessible to page system even if device drivers don't use it.
|
|
*
|
|
* CMA tries to solve this issue by operating on memory regions
|
|
* where only movable pages can be allocated from. This way, kernel
|
|
* can use the memory for pagecache and when device driver requests
|
|
* it, allocated pages can be migrated.
|
|
*
|
|
* Driver usage
|
|
*
|
|
* CMA should not be used by the device drivers directly. It is
|
|
* only a helper framework for dma-mapping subsystem.
|
|
*
|
|
* For more information, see kernel-docs in drivers/base/dma-contiguous.c
|
|
*/
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <linux/device.h>
|
|
|
|
struct cma;
|
|
struct page;
|
|
|
|
#ifdef CONFIG_DMA_CMA
|
|
|
|
extern struct cma *dma_contiguous_default_area;
|
|
|
|
static inline struct cma *dev_get_cma_area(struct device *dev)
|
|
{
|
|
if (dev && dev->cma_area)
|
|
return dev->cma_area;
|
|
return dma_contiguous_default_area;
|
|
}
|
|
|
|
static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
|
|
{
|
|
if (dev)
|
|
dev->cma_area = cma;
|
|
}
|
|
|
|
static inline void dma_contiguous_set_default(struct cma *cma)
|
|
{
|
|
dma_contiguous_default_area = cma;
|
|
}
|
|
|
|
void dma_contiguous_reserve(phys_addr_t addr_limit);
|
|
|
|
int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
|
|
phys_addr_t limit, struct cma **res_cma,
|
|
bool fixed);
|
|
|
|
/**
|
|
* dma_declare_contiguous() - reserve area for contiguous memory handling
|
|
* for particular device
|
|
* @dev: Pointer to device structure.
|
|
* @size: Size of the reserved memory.
|
|
* @base: Start address of the reserved memory (optional, 0 for any).
|
|
* @limit: End address of the reserved memory (optional, 0 for any).
|
|
*
|
|
* This function reserves memory for specified device. It should be
|
|
* called by board specific code when early allocator (memblock or bootmem)
|
|
* is still activate.
|
|
*/
|
|
|
|
static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
|
|
phys_addr_t base, phys_addr_t limit)
|
|
{
|
|
struct cma *cma;
|
|
int ret;
|
|
ret = dma_contiguous_reserve_area(size, base, limit, &cma, true);
|
|
if (ret == 0)
|
|
dev_set_cma_area(dev, cma);
|
|
|
|
return ret;
|
|
}
|
|
|
|
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
|
|
unsigned int order);
|
|
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
|
int count);
|
|
|
|
#else
|
|
|
|
static inline struct cma *dev_get_cma_area(struct device *dev)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { }
|
|
|
|
static inline void dma_contiguous_set_default(struct cma *cma) { }
|
|
|
|
static inline void dma_contiguous_reserve(phys_addr_t limit) { }
|
|
|
|
static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
|
|
phys_addr_t limit, struct cma **res_cma,
|
|
bool fixed)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
|
|
static inline
|
|
int dma_declare_contiguous(struct device *dev, phys_addr_t size,
|
|
phys_addr_t base, phys_addr_t limit)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
|
|
static inline
|
|
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
|
|
unsigned int order)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline
|
|
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
|
int count)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#endif
|