mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
4d46a67a3e
The lazy cache flushing implemented in the MIPS kernel suffers from a race condition that is exposed by do_set_pte() in mm/memory.c. A pre-condition is a file-system that writes to the page from the CPU in its readpage method and then calls flush_dcache_page(). One example is ubifs. Another pre-condition is that the dcache flush is postponed in __flush_dcache_page(). Upon a page fault for an executable mapping not existing in the page-cache, the following will happen: 1. Write to the page 2. flush_dcache_page 3. flush_icache_page 4. set_pte_at 5. update_mmu_cache (commits the flush of a dcache-dirty page) Between steps 4 and 5 another thread can hit the same page and it will encounter a valid pte. Because the data still is in the L1 dcache the CPU will fetch stale data from L2 into the icache and execute garbage. This fix moves the commit of the cache flush to step 3 to close the race window. It also reduces the amount of flushes on non-executable mappings because we never enter __flush_dcache_page() for non-aliasing CPUs. Regressions can occur in drivers that mistakenly relies on the flush_dcache_page() in get_user_pages() for DMA operations. [ralf@linux-mips.org: Folded in patch 9346 to fix highmem issue.] Signed-off-by: Lars Persson <larper@axis.com> Cc: linux-mips@linux-mips.org Cc: paul.burton@imgtec.com Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/9346/ Patchwork: https://patchwork.linux-mips.org/patch/9738/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
156 lines
4.9 KiB
C
156 lines
4.9 KiB
C
/*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
* Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
|
|
* Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
|
|
*/
|
|
#ifndef _ASM_CACHEFLUSH_H
|
|
#define _ASM_CACHEFLUSH_H
|
|
|
|
/* Keep includes the same across arches. */
|
|
#include <linux/mm.h>
|
|
#include <asm/cpu-features.h>
|
|
|
|
/* Cache flushing:
|
|
*
|
|
* - flush_cache_all() flushes entire cache
|
|
* - flush_cache_mm(mm) flushes the specified mm context's cache lines
|
|
* - flush_cache_dup mm(mm) handles cache flushing when forking
|
|
* - flush_cache_page(mm, vmaddr, pfn) flushes a single page
|
|
* - flush_cache_range(vma, start, end) flushes a range of pages
|
|
* - flush_icache_range(start, end) flush a range of instructions
|
|
* - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
|
|
*
|
|
* MIPS specific flush operations:
|
|
*
|
|
* - flush_cache_sigtramp() flush signal trampoline
|
|
* - flush_icache_all() flush the entire instruction cache
|
|
* - flush_data_cache_page() flushes a page from the data cache
|
|
*/
|
|
|
|
/*
|
|
* This flag is used to indicate that the page pointed to by a pte
|
|
* is dirty and requires cleaning before returning it to the user.
|
|
*/
|
|
#define PG_dcache_dirty PG_arch_1
|
|
|
|
#define Page_dcache_dirty(page) \
|
|
test_bit(PG_dcache_dirty, &(page)->flags)
|
|
#define SetPageDcacheDirty(page) \
|
|
set_bit(PG_dcache_dirty, &(page)->flags)
|
|
#define ClearPageDcacheDirty(page) \
|
|
clear_bit(PG_dcache_dirty, &(page)->flags)
|
|
|
|
extern void (*flush_cache_all)(void);
|
|
extern void (*__flush_cache_all)(void);
|
|
extern void (*flush_cache_mm)(struct mm_struct *mm);
|
|
#define flush_cache_dup_mm(mm) do { (void) (mm); } while (0)
|
|
extern void (*flush_cache_range)(struct vm_area_struct *vma,
|
|
unsigned long start, unsigned long end);
|
|
extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
|
|
extern void __flush_dcache_page(struct page *page);
|
|
extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page);
|
|
|
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
|
static inline void flush_dcache_page(struct page *page)
|
|
{
|
|
if (cpu_has_dc_aliases)
|
|
__flush_dcache_page(page);
|
|
else if (!cpu_has_ic_fills_f_dc)
|
|
SetPageDcacheDirty(page);
|
|
}
|
|
|
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
|
|
|
#define ARCH_HAS_FLUSH_ANON_PAGE
|
|
extern void __flush_anon_page(struct page *, unsigned long);
|
|
static inline void flush_anon_page(struct vm_area_struct *vma,
|
|
struct page *page, unsigned long vmaddr)
|
|
{
|
|
if (cpu_has_dc_aliases && PageAnon(page))
|
|
__flush_anon_page(page, vmaddr);
|
|
}
|
|
|
|
static inline void flush_icache_page(struct vm_area_struct *vma,
|
|
struct page *page)
|
|
{
|
|
if (!cpu_has_ic_fills_f_dc && (vma->vm_flags & VM_EXEC) &&
|
|
Page_dcache_dirty(page)) {
|
|
__flush_icache_page(vma, page);
|
|
ClearPageDcacheDirty(page);
|
|
}
|
|
}
|
|
|
|
extern void (*flush_icache_range)(unsigned long start, unsigned long end);
|
|
extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
|
|
|
|
extern void (*__flush_cache_vmap)(void);
|
|
|
|
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
|
|
{
|
|
if (cpu_has_dc_aliases)
|
|
__flush_cache_vmap();
|
|
}
|
|
|
|
extern void (*__flush_cache_vunmap)(void);
|
|
|
|
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
|
|
{
|
|
if (cpu_has_dc_aliases)
|
|
__flush_cache_vunmap();
|
|
}
|
|
|
|
extern void copy_to_user_page(struct vm_area_struct *vma,
|
|
struct page *page, unsigned long vaddr, void *dst, const void *src,
|
|
unsigned long len);
|
|
|
|
extern void copy_from_user_page(struct vm_area_struct *vma,
|
|
struct page *page, unsigned long vaddr, void *dst, const void *src,
|
|
unsigned long len);
|
|
|
|
extern void (*flush_cache_sigtramp)(unsigned long addr);
|
|
extern void (*flush_icache_all)(void);
|
|
extern void (*local_flush_data_cache_page)(void * addr);
|
|
extern void (*flush_data_cache_page)(unsigned long addr);
|
|
|
|
/* Run kernel code uncached, useful for cache probing functions. */
|
|
unsigned long run_uncached(void *func);
|
|
|
|
extern void *kmap_coherent(struct page *page, unsigned long addr);
|
|
extern void kunmap_coherent(void);
|
|
extern void *kmap_noncoherent(struct page *page, unsigned long addr);
|
|
|
|
static inline void kunmap_noncoherent(void)
|
|
{
|
|
kunmap_coherent();
|
|
}
|
|
|
|
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
|
|
static inline void flush_kernel_dcache_page(struct page *page)
|
|
{
|
|
BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
|
|
}
|
|
|
|
/*
|
|
* For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
|
|
* cache writeback and invalidate operation.
|
|
*/
|
|
extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
|
|
|
|
static inline void flush_kernel_vmap_range(void *vaddr, int size)
|
|
{
|
|
if (cpu_has_dc_aliases)
|
|
__flush_kernel_vmap_range((unsigned long) vaddr, size);
|
|
}
|
|
|
|
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
|
|
{
|
|
if (cpu_has_dc_aliases)
|
|
__flush_kernel_vmap_range((unsigned long) vaddr, size);
|
|
}
|
|
|
|
#endif /* _ASM_CACHEFLUSH_H */
|