mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
8a7f97b902
Add check for the return value of memblock_alloc*() functions and call panic() in case of error. The panic message repeats the one used by panicing memblock allocators with adjustment of parameters to include only relevant ones. The replacement was mostly automated with semantic patches like the one below with manual massaging of format strings. @@ expression ptr, size, align; @@ ptr = memblock_alloc(size, align); + if (!ptr) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, size, align); [anders.roxell@linaro.org: use '%pa' with 'phys_addr_t' type] Link: http://lkml.kernel.org/r/20190131161046.21886-1-anders.roxell@linaro.org [rppt@linux.ibm.com: fix format strings for panics after memblock_alloc] Link: http://lkml.kernel.org/r/1548950940-15145-1-git-send-email-rppt@linux.ibm.com [rppt@linux.ibm.com: don't panic if the allocation in sparse_buffer_init fails] Link: http://lkml.kernel.org/r/20190131074018.GD28876@rapoport-lnx [akpm@linux-foundation.org: fix xtensa printk warning] Link: http://lkml.kernel.org/r/1548057848-15136-20-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Anders Roxell <anders.roxell@linaro.org> Reviewed-by: Guo Ren <ren_guo@c-sky.com> [c-sky] Acked-by: Paul Burton <paul.burton@mips.com> [MIPS] Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> [s390] Reviewed-by: Juergen Gross <jgross@suse.com> [Xen] Reviewed-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k] Acked-by: Max Filippov <jcmvbkbc@gmail.com> [xtensa] Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@c-s.fr> Cc: Christoph Hellwig <hch@lst.de> Cc: "David S. Miller" <davem@davemloft.net> Cc: Dennis Zhou <dennis@kernel.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Mark Salter <msalter@redhat.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Petr Mladek <pmladek@suse.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Rob Herring <robh+dt@kernel.org> Cc: Rob Herring <robh@kernel.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
178 lines
4.0 KiB
C
178 lines
4.0 KiB
C
/*
|
|
* Port on Texas Instruments TMS320C6x architecture
|
|
*
|
|
* Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
|
|
* Author: Aurelien Jacquiot <aurelien.jacquiot@ti.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* DMA uncached mapping support.
|
|
*
|
|
* Using code pulled from ARM
|
|
* Copyright (C) 2000-2004 Russell King
|
|
*
|
|
*/
|
|
#include <linux/slab.h>
|
|
#include <linux/bitmap.h>
|
|
#include <linux/bitops.h>
|
|
#include <linux/module.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/dma-noncoherent.h>
|
|
#include <linux/memblock.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/page.h>
|
|
#include <asm/setup.h>
|
|
|
|
/*
|
|
* DMA coherent memory management, can be redefined using the memdma=
|
|
* kernel command line
|
|
*/
|
|
|
|
/* none by default */
|
|
static phys_addr_t dma_base;
|
|
static u32 dma_size;
|
|
static u32 dma_pages;
|
|
|
|
static unsigned long *dma_bitmap;
|
|
|
|
/* bitmap lock */
|
|
static DEFINE_SPINLOCK(dma_lock);
|
|
|
|
/*
|
|
* Return a DMA coherent and contiguous memory chunk from the DMA memory
|
|
*/
|
|
static inline u32 __alloc_dma_pages(int order)
|
|
{
|
|
unsigned long flags;
|
|
u32 pos;
|
|
|
|
spin_lock_irqsave(&dma_lock, flags);
|
|
pos = bitmap_find_free_region(dma_bitmap, dma_pages, order);
|
|
spin_unlock_irqrestore(&dma_lock, flags);
|
|
|
|
return dma_base + (pos << PAGE_SHIFT);
|
|
}
|
|
|
|
static void __free_dma_pages(u32 addr, int order)
|
|
{
|
|
unsigned long flags;
|
|
u32 pos = (addr - dma_base) >> PAGE_SHIFT;
|
|
|
|
if (addr < dma_base || (pos + (1 << order)) >= dma_pages) {
|
|
printk(KERN_ERR "%s: freeing outside range.\n", __func__);
|
|
BUG();
|
|
}
|
|
|
|
spin_lock_irqsave(&dma_lock, flags);
|
|
bitmap_release_region(dma_bitmap, pos, order);
|
|
spin_unlock_irqrestore(&dma_lock, flags);
|
|
}
|
|
|
|
/*
|
|
* Allocate DMA coherent memory space and return both the kernel
|
|
* virtual and DMA address for that space.
|
|
*/
|
|
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|
gfp_t gfp, unsigned long attrs)
|
|
{
|
|
void *ret;
|
|
u32 paddr;
|
|
int order;
|
|
|
|
if (!dma_size || !size)
|
|
return NULL;
|
|
|
|
order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
|
|
|
|
paddr = __alloc_dma_pages(order);
|
|
|
|
if (handle)
|
|
*handle = paddr;
|
|
|
|
if (!paddr)
|
|
return NULL;
|
|
|
|
ret = phys_to_virt(paddr);
|
|
memset(ret, 0, 1 << order);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Free DMA coherent memory as defined by the above mapping.
|
|
*/
|
|
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
|
dma_addr_t dma_handle, unsigned long attrs)
|
|
{
|
|
int order;
|
|
|
|
if (!dma_size || !size)
|
|
return;
|
|
|
|
order = get_count_order(((size - 1) >> PAGE_SHIFT) + 1);
|
|
|
|
__free_dma_pages(virt_to_phys(vaddr), order);
|
|
}
|
|
|
|
/*
|
|
* Initialise the coherent DMA memory allocator using the given uncached region.
|
|
*/
|
|
void __init coherent_mem_init(phys_addr_t start, u32 size)
|
|
{
|
|
if (!size)
|
|
return;
|
|
|
|
printk(KERN_INFO
|
|
"Coherent memory (DMA) region start=0x%x size=0x%x\n",
|
|
start, size);
|
|
|
|
dma_base = start;
|
|
dma_size = size;
|
|
|
|
/* allocate bitmap */
|
|
dma_pages = dma_size >> PAGE_SHIFT;
|
|
if (dma_size & (PAGE_SIZE - 1))
|
|
++dma_pages;
|
|
|
|
dma_bitmap = memblock_alloc(BITS_TO_LONGS(dma_pages) * sizeof(long),
|
|
sizeof(long));
|
|
if (!dma_bitmap)
|
|
panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
|
|
__func__, BITS_TO_LONGS(dma_pages) * sizeof(long),
|
|
sizeof(long));
|
|
}
|
|
|
|
static void c6x_dma_sync(struct device *dev, phys_addr_t paddr, size_t size,
|
|
enum dma_data_direction dir)
|
|
{
|
|
BUG_ON(!valid_dma_direction(dir));
|
|
|
|
switch (dir) {
|
|
case DMA_FROM_DEVICE:
|
|
L2_cache_block_invalidate(paddr, paddr + size);
|
|
break;
|
|
case DMA_TO_DEVICE:
|
|
L2_cache_block_writeback(paddr, paddr + size);
|
|
break;
|
|
case DMA_BIDIRECTIONAL:
|
|
L2_cache_block_writeback_invalidate(paddr, paddr + size);
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
|
|
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
|
size_t size, enum dma_data_direction dir)
|
|
{
|
|
return c6x_dma_sync(dev, paddr, size, dir);
|
|
}
|
|
|
|
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
|
size_t size, enum dma_data_direction dir)
|
|
{
|
|
return c6x_dma_sync(dev, paddr, size, dir);
|
|
}
|