mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-03 04:06:43 +07:00
a6eb9fe105
Now each architecture has the own dma_get_cache_alignment implementation. dma_get_cache_alignment returns the minimum DMA alignment. Architectures define it as ARCH_KMALLOC_MINALIGN (it's used to make sure that malloc'ed buffer is DMA-safe; the buffer doesn't share a cache with the others). So we can unify dma_get_cache_alignment implementations. This patch: dma_get_cache_alignment() needs to know if an architecture defines ARCH_KMALLOC_MINALIGN or not (needs to know if architecture has DMA alignment restriction). However, slab.h define ARCH_KMALLOC_MINALIGN if architectures doesn't define it. Let's rename ARCH_KMALLOC_MINALIGN to ARCH_DMA_MINALIGN. ARCH_KMALLOC_MINALIGN is used only in the internals of slab/slob/slub (except for crypto). Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: <linux-arch@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
69 lines
1.4 KiB
C
69 lines
1.4 KiB
C
/*
|
|
* Copyright 2004-2009 Analog Devices Inc.
|
|
*
|
|
* Licensed under the GPL-2 or later.
|
|
*/
|
|
|
|
#ifndef __ARCH_BLACKFIN_CACHE_H
|
|
#define __ARCH_BLACKFIN_CACHE_H
|
|
|
|
/*
|
|
* Bytes per L1 cache line
|
|
* Blackfin loads 32 bytes for cache
|
|
*/
|
|
#define L1_CACHE_SHIFT 5
|
|
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
|
#define SMP_CACHE_BYTES L1_CACHE_BYTES
|
|
|
|
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
|
|
|
|
#ifdef CONFIG_SMP
|
|
#define __cacheline_aligned
|
|
#else
|
|
#define ____cacheline_aligned
|
|
|
|
/*
|
|
* Put cacheline_aliged data to L1 data memory
|
|
*/
|
|
#ifdef CONFIG_CACHELINE_ALIGNED_L1
|
|
#define __cacheline_aligned \
|
|
__attribute__((__aligned__(L1_CACHE_BYTES), \
|
|
__section__(".data_l1.cacheline_aligned")))
|
|
#endif
|
|
|
|
#endif
|
|
|
|
/*
|
|
* largest L1 which this arch supports
|
|
*/
|
|
#define L1_CACHE_SHIFT_MAX 5
|
|
|
|
#if defined(CONFIG_SMP) && \
|
|
!defined(CONFIG_BFIN_CACHE_COHERENT)
|
|
# if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) || defined(CONFIG_BFIN_L2_ICACHEABLE)
|
|
# define __ARCH_SYNC_CORE_ICACHE
|
|
# endif
|
|
# if defined(CONFIG_BFIN_EXTMEM_DCACHEABLE) || defined(CONFIG_BFIN_L2_DCACHEABLE)
|
|
# define __ARCH_SYNC_CORE_DCACHE
|
|
# endif
|
|
#ifndef __ASSEMBLY__
|
|
asmlinkage void __raw_smp_mark_barrier_asm(void);
|
|
asmlinkage void __raw_smp_check_barrier_asm(void);
|
|
|
|
static inline void smp_mark_barrier(void)
|
|
{
|
|
__raw_smp_mark_barrier_asm();
|
|
}
|
|
static inline void smp_check_barrier(void)
|
|
{
|
|
__raw_smp_check_barrier_asm();
|
|
}
|
|
|
|
void resync_core_dcache(void);
|
|
void resync_core_icache(void);
|
|
#endif
|
|
#endif
|
|
|
|
|
|
#endif
|