mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-06 17:46:40 +07:00
4b3073e1c5
On VIVT ARM, when we have multiple shared mappings of the same file in the same MM, we need to ensure that we have coherency across all copies. We do this via make_coherent() by making the pages uncacheable. This used to work fine, until we allowed highmem with highpte - we now have a page table which is mapped as required, and is not available for modification via update_mmu_cache(). Ralf Beache suggested getting rid of the PTE value passed to update_mmu_cache(): On MIPS update_mmu_cache() calls __update_tlb() which walks pagetables to construct a pointer to the pte again. Passing a pte_t * is much more elegant. Maybe we might even replace the pte argument with the pte_t? Ben Herrenschmidt would also like the pte pointer for PowerPC: Passing the ptep in there is exactly what I want. I want that -instead- of the PTE value, because I have issue on some ppc cases, for I$/D$ coherency, where set_pte_at() may decide to mask out the _PAGE_EXEC. So, pass in the mapped page table pointer into update_mmu_cache(), and remove the PTE value, updating all implementations and call sites to suit. Includes a fix from Stephen Rothwell: sparc: fix fallout from update_mmu_cache API change Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
82 lines
1.9 KiB
C
82 lines
1.9 KiB
C
/* MN10300 MMU context allocation and management
|
|
*
|
|
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public Licence
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the Licence, or (at your option) any later version.
|
|
*/
|
|
#include <linux/sched.h>
|
|
#include <linux/mm.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/tlbflush.h>
|
|
|
|
/*
|
|
* list of the MMU contexts last allocated on each CPU
|
|
*/
|
|
unsigned long mmu_context_cache[NR_CPUS] = {
|
|
[0 ... NR_CPUS - 1] = MMU_CONTEXT_FIRST_VERSION * 2 - 1,
|
|
};
|
|
|
|
/*
|
|
* flush the specified TLB entry
|
|
*/
|
|
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
|
|
{
|
|
unsigned long pteu, cnx, flags;
|
|
|
|
addr &= PAGE_MASK;
|
|
|
|
/* make sure the context doesn't migrate and defend against
|
|
* interference from vmalloc'd regions */
|
|
local_irq_save(flags);
|
|
|
|
cnx = mm_context(vma->vm_mm);
|
|
|
|
if (cnx != MMU_NO_CONTEXT) {
|
|
pteu = addr | (cnx & 0x000000ffUL);
|
|
IPTEU = pteu;
|
|
DPTEU = pteu;
|
|
if (IPTEL & xPTEL_V)
|
|
IPTEL = 0;
|
|
if (DPTEL & xPTEL_V)
|
|
DPTEL = 0;
|
|
}
|
|
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
/*
|
|
* preemptively set a TLB entry
|
|
*/
|
|
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
|
|
{
|
|
unsigned long pteu, ptel, cnx, flags;
|
|
pte_t pte = *ptep;
|
|
|
|
addr &= PAGE_MASK;
|
|
ptel = pte_val(pte) & ~(xPTEL_UNUSED1 | xPTEL_UNUSED2);
|
|
|
|
/* make sure the context doesn't migrate and defend against
|
|
* interference from vmalloc'd regions */
|
|
local_irq_save(flags);
|
|
|
|
cnx = mm_context(vma->vm_mm);
|
|
|
|
if (cnx != MMU_NO_CONTEXT) {
|
|
pteu = addr | (cnx & 0x000000ffUL);
|
|
if (!(pte_val(pte) & _PAGE_NX)) {
|
|
IPTEU = pteu;
|
|
if (IPTEL & xPTEL_V)
|
|
IPTEL = ptel;
|
|
}
|
|
DPTEU = pteu;
|
|
if (DPTEL & xPTEL_V)
|
|
DPTEL = ptel;
|
|
}
|
|
|
|
local_irq_restore(flags);
|
|
}
|