mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 17:22:01 +07:00
00a9730e10
This patch adds cache and tlb sync codes for abiv1 & abiv2. Signed-off-by: Guo Ren <ren_guo@c-sky.com> Reviewed-by: Arnd Bergmann <arnd@arndb.de>
53 lines
1.2 KiB
C
53 lines
1.2 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/spinlock.h>
|
|
#include <asm/page.h>
|
|
#include <asm/cache.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/cachectl.h>
|
|
|
|
void flush_dcache_page(struct page *page)
|
|
{
|
|
struct address_space *mapping = page_mapping(page);
|
|
unsigned long addr;
|
|
|
|
if (mapping && !mapping_mapped(mapping)) {
|
|
set_bit(PG_arch_1, &(page)->flags);
|
|
return;
|
|
}
|
|
|
|
/*
|
|
* We could delay the flush for the !page_mapping case too. But that
|
|
* case is for exec env/arg pages and those are %99 certainly going to
|
|
* get faulted into the tlb (and thus flushed) anyways.
|
|
*/
|
|
addr = (unsigned long) page_address(page);
|
|
dcache_wb_range(addr, addr + PAGE_SIZE);
|
|
}
|
|
|
|
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
|
pte_t *pte)
|
|
{
|
|
unsigned long addr;
|
|
struct page *page;
|
|
unsigned long pfn;
|
|
|
|
pfn = pte_pfn(*pte);
|
|
if (unlikely(!pfn_valid(pfn)))
|
|
return;
|
|
|
|
page = pfn_to_page(pfn);
|
|
addr = (unsigned long) page_address(page);
|
|
|
|
if (vma->vm_flags & VM_EXEC ||
|
|
pages_do_alias(addr, address & PAGE_MASK))
|
|
cache_wbinv_all();
|
|
|
|
clear_bit(PG_arch_1, &(page)->flags);
|
|
}
|