mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-29 20:46:41 +07:00
11805bcfa4
The current ASID allocation algorithm doesn't ensure the notification of the other CPUs when the ASID rolls over. This may lead to two processes using the same ASID (but different generation) or multiple threads of the same process using different ASIDs. This patch adds the broadcasting of the ASID rollover event to the other CPUs. To avoid a race on multiple CPUs modifying "cpu_last_asid" during the handling of the broadcast, the ASID numbering now starts at "smp_processor_id() + 1". At rollover, the cpu_last_asid will be set to NR_CPUS. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
138 lines
3.6 KiB
C
138 lines
3.6 KiB
C
/*
|
|
* arch/arm/include/asm/mmu_context.h
|
|
*
|
|
* Copyright (C) 1996 Russell King.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* Changelog:
|
|
* 27-06-1996 RMK Created
|
|
*/
|
|
#ifndef __ASM_ARM_MMU_CONTEXT_H
|
|
#define __ASM_ARM_MMU_CONTEXT_H
|
|
|
|
#include <linux/compiler.h>
|
|
#include <linux/sched.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/cachetype.h>
|
|
#include <asm/proc-fns.h>
|
|
#include <asm-generic/mm_hooks.h>
|
|
|
|
void __check_kvm_seq(struct mm_struct *mm);
|
|
|
|
#ifdef CONFIG_CPU_HAS_ASID
|
|
|
|
/*
|
|
* On ARMv6, we have the following structure in the Context ID:
|
|
*
|
|
* 31 7 0
|
|
* +-------------------------+-----------+
|
|
* | process ID | ASID |
|
|
* +-------------------------+-----------+
|
|
* | context ID |
|
|
* +-------------------------------------+
|
|
*
|
|
* The ASID is used to tag entries in the CPU caches and TLBs.
|
|
* The context ID is used by debuggers and trace logic, and
|
|
* should be unique within all running processes.
|
|
*/
|
|
#define ASID_BITS 8
|
|
#define ASID_MASK ((~0) << ASID_BITS)
|
|
#define ASID_FIRST_VERSION (1 << ASID_BITS)
|
|
|
|
extern unsigned int cpu_last_asid;
|
|
#ifdef CONFIG_SMP
|
|
DECLARE_PER_CPU(struct mm_struct *, current_mm);
|
|
#endif
|
|
|
|
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
|
void __new_context(struct mm_struct *mm);
|
|
|
|
static inline void check_context(struct mm_struct *mm)
|
|
{
|
|
/*
|
|
* This code is executed with interrupts enabled. Therefore,
|
|
* mm->context.id cannot be updated to the latest ASID version
|
|
* on a different CPU (and condition below not triggered)
|
|
* without first getting an IPI to reset the context. The
|
|
* alternative is to take a read_lock on mm->context.id_lock
|
|
* (after changing its type to rwlock_t).
|
|
*/
|
|
if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
|
|
__new_context(mm);
|
|
|
|
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
|
|
__check_kvm_seq(mm);
|
|
}
|
|
|
|
#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
|
|
|
|
#else
|
|
|
|
static inline void check_context(struct mm_struct *mm)
|
|
{
|
|
#ifdef CONFIG_MMU
|
|
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
|
|
__check_kvm_seq(mm);
|
|
#endif
|
|
}
|
|
|
|
#define init_new_context(tsk,mm) 0
|
|
|
|
#endif
|
|
|
|
#define destroy_context(mm) do { } while(0)
|
|
|
|
/*
|
|
* This is called when "tsk" is about to enter lazy TLB mode.
|
|
*
|
|
* mm: describes the currently active mm context
|
|
* tsk: task which is entering lazy tlb
|
|
* cpu: cpu number which is entering lazy tlb
|
|
*
|
|
* tsk->mm will be NULL
|
|
*/
|
|
static inline void
|
|
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
|
{
|
|
}
|
|
|
|
/*
|
|
* This is the actual mm switch as far as the scheduler
|
|
* is concerned. No registers are touched. We avoid
|
|
* calling the CPU specific function when the mm hasn't
|
|
* actually changed.
|
|
*/
|
|
static inline void
|
|
switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|
struct task_struct *tsk)
|
|
{
|
|
#ifdef CONFIG_MMU
|
|
unsigned int cpu = smp_processor_id();
|
|
|
|
#ifdef CONFIG_SMP
|
|
/* check for possible thread migration */
|
|
if (!cpumask_empty(mm_cpumask(next)) &&
|
|
!cpumask_test_cpu(cpu, mm_cpumask(next)))
|
|
__flush_icache_all();
|
|
#endif
|
|
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
|
|
#ifdef CONFIG_SMP
|
|
struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
|
|
*crt_mm = next;
|
|
#endif
|
|
check_context(next);
|
|
cpu_switch_mm(next->pgd, next);
|
|
if (cache_is_vivt())
|
|
cpumask_clear_cpu(cpu, mm_cpumask(prev));
|
|
}
|
|
#endif
|
|
}
|
|
|
|
#define deactivate_mm(tsk,mm) do { } while (0)
|
|
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
|
|
|
|
#endif
|