mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-21 21:42:06 +07:00
aaa2cc56c1
Add API for nested write locks and convert the few call sites doing that. Signed-off-by: Michel Lespinasse <walken@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Reviewed-by: Laurent Dufour <ldufour@linux.ibm.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Davidlohr Bueso <dbueso@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Liam Howlett <Liam.Howlett@oracle.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ying Han <yinghan@google.com> Link: http://lkml.kernel.org/r/20200520052908.204642-7-walken@google.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
79 lines
1.8 KiB
C
79 lines
1.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
|
*/
|
|
|
|
#ifndef __UM_MMU_CONTEXT_H
|
|
#define __UM_MMU_CONTEXT_H
|
|
|
|
#include <linux/sched.h>
|
|
#include <linux/mm_types.h>
|
|
#include <linux/mmap_lock.h>
|
|
|
|
#include <asm/mmu.h>
|
|
|
|
extern void uml_setup_stubs(struct mm_struct *mm);
|
|
/*
|
|
* Needed since we do not use the asm-generic/mm_hooks.h:
|
|
*/
|
|
static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
|
|
{
|
|
uml_setup_stubs(mm);
|
|
return 0;
|
|
}
|
|
extern void arch_exit_mmap(struct mm_struct *mm);
|
|
static inline void arch_unmap(struct mm_struct *mm,
|
|
unsigned long start, unsigned long end)
|
|
{
|
|
}
|
|
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
|
|
bool write, bool execute, bool foreign)
|
|
{
|
|
/* by default, allow everything */
|
|
return true;
|
|
}
|
|
|
|
/*
|
|
* end asm-generic/mm_hooks.h functions
|
|
*/
|
|
|
|
#define deactivate_mm(tsk,mm) do { } while (0)
|
|
|
|
extern void force_flush_all(void);
|
|
|
|
static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
|
|
{
|
|
/*
|
|
* This is called by fs/exec.c and sys_unshare()
|
|
* when the new ->mm is used for the first time.
|
|
*/
|
|
__switch_mm(&new->context.id);
|
|
mmap_write_lock_nested(new, SINGLE_DEPTH_NESTING);
|
|
uml_setup_stubs(new);
|
|
mmap_write_unlock(new);
|
|
}
|
|
|
|
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
struct task_struct *tsk)
|
|
{
|
|
unsigned cpu = smp_processor_id();
|
|
|
|
if(prev != next){
|
|
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
|
cpumask_set_cpu(cpu, mm_cpumask(next));
|
|
if(next != &init_mm)
|
|
__switch_mm(&next->context.id);
|
|
}
|
|
}
|
|
|
|
static inline void enter_lazy_tlb(struct mm_struct *mm,
|
|
struct task_struct *tsk)
|
|
{
|
|
}
|
|
|
|
extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
|
|
|
|
extern void destroy_context(struct mm_struct *mm);
|
|
|
|
#endif
|