dma-direct: always align allocation size in dma_direct_alloc_pages()

dma_alloc_contiguous() does size >> PAGE_SHIFT and set_memory_decrypted()
works at page granularity.  It's necessary to page align the allocation
size in dma_direct_alloc_pages() for consistent behavior.

This also fixes an issue when arch_dma_prep_coherent() is called on an
unaligned allocation size for dma_alloc_need_uncached() when
CONFIG_DMA_DIRECT_REMAP is disabled but CONFIG_ARCH_HAS_DMA_SET_UNCACHED
is enabled.

Signed-off-by: David Rientjes <rientjes@google.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
David Rientjes 2020-06-11 12:20:28 -07:00 committed by Christoph Hellwig
parent 26749b3201
commit 633d5fce78

View File

@ -112,11 +112,12 @@ static inline bool dma_should_free_from_pool(struct device *dev,
static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp_t gfp, unsigned long attrs) gfp_t gfp, unsigned long attrs)
{ {
size_t alloc_size = PAGE_ALIGN(size);
int node = dev_to_node(dev); int node = dev_to_node(dev);
struct page *page = NULL; struct page *page = NULL;
u64 phys_limit; u64 phys_limit;
WARN_ON_ONCE(!PAGE_ALIGNED(size));
if (attrs & DMA_ATTR_NO_WARN) if (attrs & DMA_ATTR_NO_WARN)
gfp |= __GFP_NOWARN; gfp |= __GFP_NOWARN;
@ -124,14 +125,14 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp &= ~__GFP_ZERO; gfp &= ~__GFP_ZERO;
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
&phys_limit); &phys_limit);
page = dma_alloc_contiguous(dev, alloc_size, gfp); page = dma_alloc_contiguous(dev, size, gfp);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_free_contiguous(dev, page, alloc_size); dma_free_contiguous(dev, page, size);
page = NULL; page = NULL;
} }
again: again:
if (!page) if (!page)
page = alloc_pages_node(node, gfp, get_order(alloc_size)); page = alloc_pages_node(node, gfp, get_order(size));
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_free_contiguous(dev, page, size); dma_free_contiguous(dev, page, size);
page = NULL; page = NULL;
@ -158,8 +159,10 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
struct page *page; struct page *page;
void *ret; void *ret;
size = PAGE_ALIGN(size);
if (dma_should_alloc_from_pool(dev, gfp, attrs)) { if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp); ret = dma_alloc_from_pool(dev, size, &page, gfp);
if (!ret) if (!ret)
return NULL; return NULL;
goto done; goto done;
@ -183,10 +186,10 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
dma_alloc_need_uncached(dev, attrs)) || dma_alloc_need_uncached(dev, attrs)) ||
(IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) { (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
/* remove any dirty cache lines on the kernel alias */ /* remove any dirty cache lines on the kernel alias */
arch_dma_prep_coherent(page, PAGE_ALIGN(size)); arch_dma_prep_coherent(page, size);
/* create a coherent mapping */ /* create a coherent mapping */
ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size), ret = dma_common_contiguous_remap(page, size,
dma_pgprot(dev, PAGE_KERNEL, attrs), dma_pgprot(dev, PAGE_KERNEL, attrs),
__builtin_return_address(0)); __builtin_return_address(0));
if (!ret) if (!ret)