mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
b0e551313e
PA-RISC doesn't have atomic instructions to modify page table entries, so it takes spinlock in the TLB handler and modifies the page table entry non-atomically. If you modify the page table entry without the spinlock, you may race with TLB handler on another CPU and your modification may be lost. Protect against that with usage of purge_tlb_start() and purge_tlb_end() which handles the TLB spinlock. Signed-off-by: Helge Deller <deller@gmx.de> Cc: stable@vger.kernel.org # v4.4
74 lines
1.6 KiB
C
74 lines
1.6 KiB
C
#ifndef _ASM_PARISC64_HUGETLB_H
|
|
#define _ASM_PARISC64_HUGETLB_H
|
|
|
|
#include <asm/page.h>
|
|
#include <asm-generic/hugetlb.h>
|
|
|
|
|
|
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep, pte_t pte);
|
|
|
|
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
|
pte_t *ptep);
|
|
|
|
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
|
unsigned long addr,
|
|
unsigned long len) {
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* If the arch doesn't supply something else, assume that hugepage
|
|
* size aligned regions are ok without further preparation.
|
|
*/
|
|
static inline int prepare_hugepage_range(struct file *file,
|
|
unsigned long addr, unsigned long len)
|
|
{
|
|
if (len & ~HPAGE_MASK)
|
|
return -EINVAL;
|
|
if (addr & ~HPAGE_MASK)
|
|
return -EINVAL;
|
|
return 0;
|
|
}
|
|
|
|
static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
|
unsigned long addr, unsigned long end,
|
|
unsigned long floor,
|
|
unsigned long ceiling)
|
|
{
|
|
free_pgd_range(tlb, addr, end, floor, ceiling);
|
|
}
|
|
|
|
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
|
|
unsigned long addr, pte_t *ptep)
|
|
{
|
|
}
|
|
|
|
static inline int huge_pte_none(pte_t pte)
|
|
{
|
|
return pte_none(pte);
|
|
}
|
|
|
|
static inline pte_t huge_pte_wrprotect(pte_t pte)
|
|
{
|
|
return pte_wrprotect(pte);
|
|
}
|
|
|
|
void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
|
unsigned long addr, pte_t *ptep);
|
|
|
|
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|
unsigned long addr, pte_t *ptep,
|
|
pte_t pte, int dirty);
|
|
|
|
static inline pte_t huge_ptep_get(pte_t *ptep)
|
|
{
|
|
return *ptep;
|
|
}
|
|
|
|
static inline void arch_clear_hugepage_flags(struct page *page)
|
|
{
|
|
}
|
|
|
|
#endif /* _ASM_PARISC64_HUGETLB_H */
|