mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-12 23:46:40 +07:00
a6eb9fe105
Now each architecture has the own dma_get_cache_alignment implementation. dma_get_cache_alignment returns the minimum DMA alignment. Architectures define it as ARCH_KMALLOC_MINALIGN (it's used to make sure that malloc'ed buffer is DMA-safe; the buffer doesn't share a cache with the others). So we can unify dma_get_cache_alignment implementations. This patch: dma_get_cache_alignment() needs to know if an architecture defines ARCH_KMALLOC_MINALIGN or not (needs to know if architecture has DMA alignment restriction). However, slab.h define ARCH_KMALLOC_MINALIGN if architectures doesn't define it. Let's rename ARCH_KMALLOC_MINALIGN to ARCH_DMA_MINALIGN. ARCH_KMALLOC_MINALIGN is used only in the internals of slab/slob/slub (except for crypto). Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: <linux-arch@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
57 lines
2.0 KiB
C
57 lines
2.0 KiB
C
/* MN10300 cache management registers
|
|
*
|
|
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public Licence
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the Licence, or (at your option) any later version.
|
|
*/
|
|
|
|
#ifndef _ASM_CACHE_H
|
|
#define _ASM_CACHE_H
|
|
|
|
#include <asm/cpu-regs.h>
|
|
#include <proc/cache.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
#define L1_CACHE_DISPARITY (L1_CACHE_NENTRIES * L1_CACHE_BYTES)
|
|
#else
|
|
#define L1_CACHE_DISPARITY L1_CACHE_NENTRIES * L1_CACHE_BYTES
|
|
#endif
|
|
|
|
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
|
|
|
|
/* data cache purge registers
|
|
* - read from the register to unconditionally purge that cache line
|
|
* - write address & 0xffffff00 to conditionally purge that cache line
|
|
* - clear LSB to request invalidation as well
|
|
*/
|
|
#define DCACHE_PURGE(WAY, ENTRY) \
|
|
__SYSREG(0xc8400000 + (WAY) * L1_CACHE_WAYDISP + \
|
|
(ENTRY) * L1_CACHE_BYTES, u32)
|
|
|
|
#define DCACHE_PURGE_WAY0(ENTRY) \
|
|
__SYSREG(0xc8400000 + 0 * L1_CACHE_WAYDISP + (ENTRY) * L1_CACHE_BYTES, u32)
|
|
#define DCACHE_PURGE_WAY1(ENTRY) \
|
|
__SYSREG(0xc8400000 + 1 * L1_CACHE_WAYDISP + (ENTRY) * L1_CACHE_BYTES, u32)
|
|
#define DCACHE_PURGE_WAY2(ENTRY) \
|
|
__SYSREG(0xc8400000 + 2 * L1_CACHE_WAYDISP + (ENTRY) * L1_CACHE_BYTES, u32)
|
|
#define DCACHE_PURGE_WAY3(ENTRY) \
|
|
__SYSREG(0xc8400000 + 3 * L1_CACHE_WAYDISP + (ENTRY) * L1_CACHE_BYTES, u32)
|
|
|
|
/* instruction cache access registers */
|
|
#define ICACHE_DATA(WAY, ENTRY, OFF) \
|
|
__SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
|
|
#define ICACHE_TAG(WAY, ENTRY) \
|
|
__SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
|
|
|
|
/* instruction cache access registers */
|
|
#define DCACHE_DATA(WAY, ENTRY, OFF) \
|
|
__SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
|
|
#define DCACHE_TAG(WAY, ENTRY) \
|
|
__SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
|
|
|
|
#endif /* _ASM_CACHE_H */
|