linux_dsm_epyc7002/arch/arm/mm/cache-fa.S
Mika Westerberg c8c90860cd ARM: 6466/1: implement flush_icache_all for the rest of the CPUs
Commit 81d11955bf ("ARM: 6405/1: Handle __flush_icache_all for
CONFIG_SMP_ON_UP") added a new function to struct cpu_cache_fns:
flush_icache_all(). It also implemented this for v6 and v7 but not
for v5 and backwards. Without the function pointer in place, we
will be calling wrong cache functions.

For example with ep93xx we get following:

    Unable to handle kernel paging request at virtual address ee070f38
    pgd = c0004000
    [ee070f38] *pgd=00000000
    Internal error: Oops: 80000005 [#1] PREEMPT
    last sysfs file:
    Modules linked in:
    CPU: 0    Not tainted  (2.6.36+ #1)
    PC is at 0xee070f38
    LR is at __dma_alloc+0x11c/0x2d0
    pc : [<ee070f38>]    lr : [<c0032c8c>]    psr: 60000013
    sp : c581bde0  ip : 00000000  fp : c0472000
    r10: c0472000  r9 : 000000d0  r8 : 00020000
    r7 : 0001ffff  r6 : 00000000  r5 : c0472400  r4 : c5980000
    r3 : c03ab7e0  r2 : 00000000  r1 : c59a0000  r0 : c5980000
    Flags: nZCv  IRQs on  FIQs on  Mode SVC_32  ISA ARM  Segment kernel
    Control: c000717f  Table: c0004000  DAC: 00000017
    Process swapper (pid: 1, stack limit = 0xc581a270)
    [<c0032c8c>] (__dma_alloc+0x11c/0x2d0)
    [<c0032e5c>] (dma_alloc_writecombine+0x1c/0x24)
    [<c0204148>] (ep93xx_pcm_preallocate_dma_buffer+0x44/0x60)
    [<c02041c0>] (ep93xx_pcm_new+0x5c/0x88)
    [<c01ff188>] (snd_soc_instantiate_cards+0x8a8/0xbc0)
    [<c01ff59c>] (soc_probe+0xfc/0x134)
    [<c01adafc>] (platform_drv_probe+0x18/0x1c)
    [<c01acca4>] (driver_probe_device+0xb0/0x16c)
    [<c01ac284>] (bus_for_each_drv+0x48/0x84)
    [<c01ace90>] (device_attach+0x50/0x68)
    [<c01ac0f8>] (bus_probe_device+0x24/0x44)
    [<c01aad7c>] (device_add+0x2fc/0x44c)
    [<c01adfa8>] (platform_device_add+0x104/0x15c)
    [<c0015eb8>] (simone_init+0x60/0x94)
    [<c0021410>] (do_one_initcall+0xd0/0x1a4)

__dma_alloc() calls (inlined) __dma_alloc_buffer() which ends up
calling dmac_flush_range(). Now since the entries in the
arm920_cache_fns are shifted by one, we jump into address 0xee070f38
which is actually next instruction after the arm920_cache_fns
structure.

So implement flush_icache_all() for the rest of the supported CPUs
using a generic 'invalidate I cache' instruction.

Signed-off-by: Mika Westerberg <mika.westerberg@iki.fi>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-10-28 13:54:28 +01:00

258 lines
6.3 KiB
ArmAsm

/*
* linux/arch/arm/mm/cache-fa.S
*
* Copyright (C) 2005 Faraday Corp.
* Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
*
* Based on cache-v4wb.S:
* Copyright (C) 1997-2002 Russell king
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Processors: FA520 FA526 FA626
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/memory.h>
#include <asm/page.h>
#include "proc-macros.S"
/*
* The size of one data cache line.
*/
#define CACHE_DLINESIZE 16
/*
* The total size of the data cache.
*/
#ifdef CONFIG_ARCH_GEMINI
#define CACHE_DSIZE 8192
#else
#define CACHE_DSIZE 16384
#endif
/* FIXME: put optimal value here. Current one is just estimation */
#define CACHE_DLIMIT (CACHE_DSIZE * 2)
/*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
*/
ENTRY(fa_flush_icache_all)
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mov pc, lr
ENDPROC(fa_flush_icache_all)
/*
* flush_user_cache_all()
*
* Clean and invalidate all cache entries in a particular address
* space.
*/
ENTRY(fa_flush_user_cache_all)
/* FALLTHROUGH */
/*
* flush_kern_cache_all()
*
* Clean and invalidate the entire cache.
*/
ENTRY(fa_flush_kern_cache_all)
mov ip, #0
mov r2, #VM_EXEC
__flush_whole_cache:
mcr p15, 0, ip, c7, c14, 0 @ clean/invalidate D cache
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
mov pc, lr
/*
* flush_user_cache_range(start, end, flags)
*
* Invalidate a range of cache entries in the specified
* address space.
*
* - start - start address (inclusive, page aligned)
* - end - end address (exclusive, page aligned)
* - flags - vma_area_struct flags describing address space
*/
ENTRY(fa_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT @ total size >= limit?
bhs __flush_whole_cache @ flush whole D cache
1: tst r2, #VM_EXEC
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I line
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
mov pc, lr
/*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(fa_coherent_kern_range)
/* fall through */
/*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start. If you have non-snooping
* Harvard caches, you need to implement this function.
*
* - start - virtual start address
* - end - virtual end address
*/
ENTRY(fa_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
mov pc, lr
/*
* flush_kern_dcache_area(void *addr, size_t size)
*
* Ensure that the data held in the page kaddr is written back
* to the page in question.
*
* - addr - kernel address
* - size - size of region
*/
ENTRY(fa_flush_kern_dcache_area)
add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* dma_inv_range(start, end)
*
* Invalidate (discard) the specified virtual address range.
* May not write back any entries. If 'start' or 'end'
* are not cache line aligned, those lines must be written
* back.
*
* - start - virtual start address
* - end - virtual end address
*/
fa_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
tst r1, #CACHE_DLINESIZE - 1
bic r1, r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* dma_clean_range(start, end)
*
* Clean (write back) the specified virtual address range.
*
* - start - virtual start address
* - end - virtual end address
*/
fa_dma_clean_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* dma_flush_range(start,end)
* - start - virtual start address of region
* - end - virtual end address of region
*/
ENTRY(fa_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
/*
* dma_map_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
* - dir - DMA direction
*/
ENTRY(fa_dma_map_area)
add r1, r1, r0
cmp r2, #DMA_TO_DEVICE
beq fa_dma_clean_range
bcs fa_dma_inv_range
b fa_dma_flush_range
ENDPROC(fa_dma_map_area)
/*
* dma_unmap_area(start, size, dir)
* - start - kernel virtual start address
* - size - size of region
* - dir - DMA direction
*/
ENTRY(fa_dma_unmap_area)
mov pc, lr
ENDPROC(fa_dma_unmap_area)
__INITDATA
.type fa_cache_fns, #object
ENTRY(fa_cache_fns)
.long fa_flush_icache_all
.long fa_flush_kern_cache_all
.long fa_flush_user_cache_all
.long fa_flush_user_cache_range
.long fa_coherent_kern_range
.long fa_coherent_user_range
.long fa_flush_kern_dcache_area
.long fa_dma_map_area
.long fa_dma_unmap_area
.long fa_dma_flush_range
.size fa_cache_fns, . - fa_cache_fns