2010-12-20 21:44:32 +07:00
|
|
|
/*
|
|
|
|
* linux/arch/arm/kernel/smp_tlb.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 2002 ARM Limited, All Rights Reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
#include <linux/preempt.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
|
|
|
|
#include <asm/smp_plat.h>
|
|
|
|
#include <asm/tlbflush.h>
|
2013-03-27 05:35:04 +07:00
|
|
|
#include <asm/mmu_context.h>
|
2010-12-20 21:44:32 +07:00
|
|
|
|
|
|
|
/**********************************************************************/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TLB operations
|
|
|
|
*/
|
|
|
|
struct tlb_args {
|
|
|
|
struct vm_area_struct *ta_vma;
|
|
|
|
unsigned long ta_start;
|
|
|
|
unsigned long ta_end;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline void ipi_flush_tlb_all(void *ignored)
|
|
|
|
{
|
|
|
|
local_flush_tlb_all();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ipi_flush_tlb_mm(void *arg)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm = (struct mm_struct *)arg;
|
|
|
|
|
|
|
|
local_flush_tlb_mm(mm);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ipi_flush_tlb_page(void *arg)
|
|
|
|
{
|
|
|
|
struct tlb_args *ta = (struct tlb_args *)arg;
|
|
|
|
|
|
|
|
local_flush_tlb_page(ta->ta_vma, ta->ta_start);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ipi_flush_tlb_kernel_page(void *arg)
|
|
|
|
{
|
|
|
|
struct tlb_args *ta = (struct tlb_args *)arg;
|
|
|
|
|
|
|
|
local_flush_tlb_kernel_page(ta->ta_start);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ipi_flush_tlb_range(void *arg)
|
|
|
|
{
|
|
|
|
struct tlb_args *ta = (struct tlb_args *)arg;
|
|
|
|
|
|
|
|
local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void ipi_flush_tlb_kernel_range(void *arg)
|
|
|
|
{
|
|
|
|
struct tlb_args *ta = (struct tlb_args *)arg;
|
|
|
|
|
|
|
|
local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
|
|
|
|
}
|
|
|
|
|
2013-02-28 23:48:11 +07:00
|
|
|
static inline void ipi_flush_bp_all(void *ignored)
|
|
|
|
{
|
|
|
|
local_flush_bp_all();
|
|
|
|
}
|
|
|
|
|
2013-10-09 23:26:44 +07:00
|
|
|
#ifdef CONFIG_ARM_ERRATA_798181
|
|
|
|
bool (*erratum_a15_798181_handler)(void);
|
|
|
|
|
|
|
|
static bool erratum_a15_798181_partial(void)
|
|
|
|
{
|
|
|
|
asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
|
|
|
|
dsb(ish);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool erratum_a15_798181_broadcast(void)
|
|
|
|
{
|
|
|
|
asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
|
|
|
|
dsb(ish);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void erratum_a15_798181_init(void)
|
|
|
|
{
|
|
|
|
unsigned int midr = read_cpuid_id();
|
|
|
|
unsigned int revidr = read_cpuid(CPUID_REVIDR);
|
|
|
|
|
2014-07-23 06:34:23 +07:00
|
|
|
/* Brahma-B15 r0p0..r0p2 affected
|
ARM: 8577/1: Fix Cortex-A15 798181 errata initialization
Current errata initialization doesn't take properly revision and REVIDR
into account. Depending on the core revision, revidr bits should not be
taken into account. Errata misleadingly declares r3p3 to be error-free,
but this is not the case. Include rp3p3 in errata initialization.
Here are possible fixes defined in revidr register for r2 and r3 [1,2]:
r0p0-r2p1: No fixes applied
r2p2,r2p3:
REVIDR[4]: 798181 Moving a virtual page that is being accessed by
an active process can lead to unexpected behavior
REVIDR[9]: Not defined
r2p4,r3p0,r3p1,r3p2:
REVIDR[4]: 798181 Moving a virtual page that is being accessed by
an active process can lead to unexpected behavior
REVIDR[9]: 798181 Moving a virtual page that is being accessed by
an active process can lead to unexpected behavior
- This is an update to a previously released ECO.
r3p3:
REVIDR[4]: Reserved
REVIDR[9]: 798181 Moving a virtual page that is being accessed by
an active process can lead to unexpected behavior
- This is an update to a previously released ECO.
And here is proposed handling from the same document:
* In r3p2 and earlier versions with REVIDR[4]= 0,the full workaround is
required.
* In r3p2 and earlier versions with REVIDR[4]=1, REVIDR[9]=0, only the
portion of the workaround up to the end of step 6 is required.
* In r3p2 and earlier versions with REVIDR[4]=1, REVIDR[9]=1, no
workaround is required.
* In r3p3, if REVIDR[9]=0, only the portion of the workaround up
to the end of step 6 is required.
* In r3p3, if REVIDR[9]=1, no workaround is required.
These imply following:
REVIDR[9] set -> No WA
REVIDR[4] set, REVIDR[9] cleared -> Partial WA
Both cleared -> Full WA
Where certain bits should and should not be taken into account
depending on whether they are defined for the revision.
Although not explicitly mentioned in the errata note, REVIDR[9] set,
with REVIDR[4] cleared is valid combination which requires no WA. This
is confirmed by ARM support and errata will be updated.
[1] ARM CortexTM-A15 MPCore - NEON
Product revision r3
Software Developers Errata Notice
ARM-EPM-028093 v20.0 Released
[2] ARM CortexTM-A15 MPCore - NEON
Product Revision r2
Software Developers Errata Notice
ARM-EPM-028090 v19.3 Released
Signed-off-by: Matija Glavinic Pecotic <matija.glavinic-pecotic.ext@nokia.com>
Reviewed-by: Alexander Sverdlin <alexander.sverdlin@nokia.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2016-06-07 22:19:16 +07:00
|
|
|
* Cortex-A15 r0p0..r3p3 w/o ECO fix affected
|
|
|
|
* Fixes applied to A15 with respect to the revision and revidr are:
|
|
|
|
*
|
|
|
|
* r0p0-r2p1: No fixes applied
|
|
|
|
* r2p2,r2p3:
|
|
|
|
* REVIDR[4]: 798181 Moving a virtual page that is being accessed
|
|
|
|
* by an active process can lead to unexpected behavior
|
|
|
|
* REVIDR[9]: Not defined
|
|
|
|
* r2p4,r3p0,r3p1,r3p2:
|
|
|
|
* REVIDR[4]: 798181 Moving a virtual page that is being accessed
|
|
|
|
* by an active process can lead to unexpected behavior
|
|
|
|
* REVIDR[9]: 798181 Moving a virtual page that is being accessed
|
|
|
|
* by an active process can lead to unexpected behavior
|
|
|
|
* - This is an update to a previously released ECO.
|
|
|
|
* r3p3:
|
|
|
|
* REVIDR[4]: Reserved
|
|
|
|
* REVIDR[9]: 798181 Moving a virtual page that is being accessed
|
|
|
|
* by an active process can lead to unexpected behavior
|
|
|
|
* - This is an update to a previously released ECO.
|
|
|
|
*
|
|
|
|
* Handling:
|
|
|
|
* REVIDR[9] set -> No WA
|
|
|
|
* REVIDR[4] set, REVIDR[9] cleared -> Partial WA
|
|
|
|
* Both cleared -> Full WA
|
|
|
|
*/
|
|
|
|
if ((midr & 0xff0ffff0) == 0x420f00f0 && midr <= 0x420f00f2) {
|
2013-10-09 23:26:44 +07:00
|
|
|
erratum_a15_798181_handler = erratum_a15_798181_broadcast;
|
ARM: 8577/1: Fix Cortex-A15 798181 errata initialization
Current errata initialization doesn't take properly revision and REVIDR
into account. Depending on the core revision, revidr bits should not be
taken into account. Errata misleadingly declares r3p3 to be error-free,
but this is not the case. Include rp3p3 in errata initialization.
Here are possible fixes defined in revidr register for r2 and r3 [1,2]:
r0p0-r2p1: No fixes applied
r2p2,r2p3:
REVIDR[4]: 798181 Moving a virtual page that is being accessed by
an active process can lead to unexpected behavior
REVIDR[9]: Not defined
r2p4,r3p0,r3p1,r3p2:
REVIDR[4]: 798181 Moving a virtual page that is being accessed by
an active process can lead to unexpected behavior
REVIDR[9]: 798181 Moving a virtual page that is being accessed by
an active process can lead to unexpected behavior
- This is an update to a previously released ECO.
r3p3:
REVIDR[4]: Reserved
REVIDR[9]: 798181 Moving a virtual page that is being accessed by
an active process can lead to unexpected behavior
- This is an update to a previously released ECO.
And here is proposed handling from the same document:
* In r3p2 and earlier versions with REVIDR[4]= 0,the full workaround is
required.
* In r3p2 and earlier versions with REVIDR[4]=1, REVIDR[9]=0, only the
portion of the workaround up to the end of step 6 is required.
* In r3p2 and earlier versions with REVIDR[4]=1, REVIDR[9]=1, no
workaround is required.
* In r3p3, if REVIDR[9]=0, only the portion of the workaround up
to the end of step 6 is required.
* In r3p3, if REVIDR[9]=1, no workaround is required.
These imply following:
REVIDR[9] set -> No WA
REVIDR[4] set, REVIDR[9] cleared -> Partial WA
Both cleared -> Full WA
Where certain bits should and should not be taken into account
depending on whether they are defined for the revision.
Although not explicitly mentioned in the errata note, REVIDR[9] set,
with REVIDR[4] cleared is valid combination which requires no WA. This
is confirmed by ARM support and errata will be updated.
[1] ARM CortexTM-A15 MPCore - NEON
Product revision r3
Software Developers Errata Notice
ARM-EPM-028093 v20.0 Released
[2] ARM CortexTM-A15 MPCore - NEON
Product Revision r2
Software Developers Errata Notice
ARM-EPM-028090 v19.3 Released
Signed-off-by: Matija Glavinic Pecotic <matija.glavinic-pecotic.ext@nokia.com>
Reviewed-by: Alexander Sverdlin <alexander.sverdlin@nokia.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2016-06-07 22:19:16 +07:00
|
|
|
} else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x412fc0f2) {
|
|
|
|
erratum_a15_798181_handler = erratum_a15_798181_broadcast;
|
|
|
|
} else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x412fc0f4) {
|
2014-07-23 06:34:23 +07:00
|
|
|
if (revidr & 0x10)
|
|
|
|
erratum_a15_798181_handler =
|
|
|
|
erratum_a15_798181_partial;
|
|
|
|
else
|
|
|
|
erratum_a15_798181_handler =
|
|
|
|
erratum_a15_798181_broadcast;
|
ARM: 8577/1: Fix Cortex-A15 798181 errata initialization
Current errata initialization doesn't take properly revision and REVIDR
into account. Depending on the core revision, revidr bits should not be
taken into account. Errata misleadingly declares r3p3 to be error-free,
but this is not the case. Include rp3p3 in errata initialization.
Here are possible fixes defined in revidr register for r2 and r3 [1,2]:
r0p0-r2p1: No fixes applied
r2p2,r2p3:
REVIDR[4]: 798181 Moving a virtual page that is being accessed by
an active process can lead to unexpected behavior
REVIDR[9]: Not defined
r2p4,r3p0,r3p1,r3p2:
REVIDR[4]: 798181 Moving a virtual page that is being accessed by
an active process can lead to unexpected behavior
REVIDR[9]: 798181 Moving a virtual page that is being accessed by
an active process can lead to unexpected behavior
- This is an update to a previously released ECO.
r3p3:
REVIDR[4]: Reserved
REVIDR[9]: 798181 Moving a virtual page that is being accessed by
an active process can lead to unexpected behavior
- This is an update to a previously released ECO.
And here is proposed handling from the same document:
* In r3p2 and earlier versions with REVIDR[4]= 0,the full workaround is
required.
* In r3p2 and earlier versions with REVIDR[4]=1, REVIDR[9]=0, only the
portion of the workaround up to the end of step 6 is required.
* In r3p2 and earlier versions with REVIDR[4]=1, REVIDR[9]=1, no
workaround is required.
* In r3p3, if REVIDR[9]=0, only the portion of the workaround up
to the end of step 6 is required.
* In r3p3, if REVIDR[9]=1, no workaround is required.
These imply following:
REVIDR[9] set -> No WA
REVIDR[4] set, REVIDR[9] cleared -> Partial WA
Both cleared -> Full WA
Where certain bits should and should not be taken into account
depending on whether they are defined for the revision.
Although not explicitly mentioned in the errata note, REVIDR[9] set,
with REVIDR[4] cleared is valid combination which requires no WA. This
is confirmed by ARM support and errata will be updated.
[1] ARM CortexTM-A15 MPCore - NEON
Product revision r3
Software Developers Errata Notice
ARM-EPM-028093 v20.0 Released
[2] ARM CortexTM-A15 MPCore - NEON
Product Revision r2
Software Developers Errata Notice
ARM-EPM-028090 v19.3 Released
Signed-off-by: Matija Glavinic Pecotic <matija.glavinic-pecotic.ext@nokia.com>
Reviewed-by: Alexander Sverdlin <alexander.sverdlin@nokia.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2016-06-07 22:19:16 +07:00
|
|
|
} else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x413fc0f3) {
|
|
|
|
if ((revidr & 0x210) == 0)
|
|
|
|
erratum_a15_798181_handler =
|
|
|
|
erratum_a15_798181_broadcast;
|
|
|
|
else if (revidr & 0x10)
|
|
|
|
erratum_a15_798181_handler =
|
|
|
|
erratum_a15_798181_partial;
|
|
|
|
} else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr < 0x414fc0f0) {
|
|
|
|
if ((revidr & 0x200) == 0)
|
|
|
|
erratum_a15_798181_handler =
|
|
|
|
erratum_a15_798181_partial;
|
2014-07-23 06:34:23 +07:00
|
|
|
}
|
2013-10-09 23:26:44 +07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-03-27 05:35:04 +07:00
|
|
|
static void ipi_flush_tlb_a15_erratum(void *arg)
|
|
|
|
{
|
|
|
|
dmb();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void broadcast_tlb_a15_erratum(void)
|
|
|
|
{
|
|
|
|
if (!erratum_a15_798181())
|
|
|
|
return;
|
|
|
|
|
2013-04-24 20:41:37 +07:00
|
|
|
smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1);
|
2013-03-27 05:35:04 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
|
|
|
|
{
|
2013-06-21 18:07:27 +07:00
|
|
|
int this_cpu;
|
2013-03-27 05:35:04 +07:00
|
|
|
cpumask_t mask = { CPU_BITS_NONE };
|
|
|
|
|
|
|
|
if (!erratum_a15_798181())
|
|
|
|
return;
|
|
|
|
|
2013-04-24 20:41:37 +07:00
|
|
|
this_cpu = get_cpu();
|
2013-06-21 18:07:27 +07:00
|
|
|
a15_erratum_get_cpumask(this_cpu, mm, &mask);
|
2013-03-27 05:35:04 +07:00
|
|
|
smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
|
2013-04-24 20:41:37 +07:00
|
|
|
put_cpu();
|
2013-03-27 05:35:04 +07:00
|
|
|
}
|
|
|
|
|
2010-12-20 21:44:32 +07:00
|
|
|
void flush_tlb_all(void)
|
|
|
|
{
|
|
|
|
if (tlb_ops_need_broadcast())
|
|
|
|
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
|
|
|
|
else
|
2013-02-11 20:47:48 +07:00
|
|
|
__flush_tlb_all();
|
2013-03-27 05:35:04 +07:00
|
|
|
broadcast_tlb_a15_erratum();
|
2010-12-20 21:44:32 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void flush_tlb_mm(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
if (tlb_ops_need_broadcast())
|
smp: introduce a generic on_each_cpu_mask() function
We have lots of infrastructure in place to partition multi-core systems
such that we have a group of CPUs that are dedicated to specific task:
cgroups, scheduler and interrupt affinity, and cpuisol= boot parameter.
Still, kernel code will at times interrupt all CPUs in the system via IPIs
for various needs. These IPIs are useful and cannot be avoided
altogether, but in certain cases it is possible to interrupt only specific
CPUs that have useful work to do and not the entire system.
This patch set, inspired by discussions with Peter Zijlstra and Frederic
Weisbecker when testing the nohz task patch set, is a first stab at trying
to explore doing this by locating the places where such global IPI calls
are being made and turning the global IPI into an IPI for a specific group
of CPUs. The purpose of the patch set is to get feedback if this is the
right way to go for dealing with this issue and indeed, if the issue is
even worth dealing with at all. Based on the feedback from this patch set
I plan to offer further patches that address similar issue in other code
paths.
This patch creates an on_each_cpu_mask() and on_each_cpu_cond()
infrastructure API (the former derived from existing arch specific
versions in Tile and Arm) and uses them to turn several global IPI
invocation to per CPU group invocations.
Core kernel:
on_each_cpu_mask() calls a function on processors specified by cpumask,
which may or may not include the local processor.
You must not call this function with disabled interrupts or from a
hardware interrupt handler or from a bottom half handler.
arch/arm:
Note that the generic version is a little different then the Arm one:
1. It has the mask as first parameter
2. It calls the function on the calling CPU with interrupts disabled,
but this should be OK since the function is called on the other CPUs
with interrupts disabled anyway.
arch/tile:
The API is the same as the tile private one, but the generic version
also calls the function on the with interrupts disabled in UP case
This is OK since the function is called on the other CPUs
with interrupts disabled.
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Reviewed-by: Christoph Lameter <cl@linux.com>
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Sasha Levin <levinsasha928@gmail.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Avi Kivity <avi@redhat.com>
Acked-by: Michal Nazarewicz <mina86@mina86.org>
Cc: Kosaki Motohiro <kosaki.motohiro@gmail.com>
Cc: Milton Miller <miltonm@bga.com>
Cc: Russell King <linux@arm.linux.org.uk>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-29 04:42:43 +07:00
|
|
|
on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
|
2010-12-20 21:44:32 +07:00
|
|
|
else
|
2013-02-11 20:47:48 +07:00
|
|
|
__flush_tlb_mm(mm);
|
2013-03-27 05:35:04 +07:00
|
|
|
broadcast_tlb_mm_a15_erratum(mm);
|
2010-12-20 21:44:32 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
|
|
|
{
|
|
|
|
if (tlb_ops_need_broadcast()) {
|
|
|
|
struct tlb_args ta;
|
|
|
|
ta.ta_vma = vma;
|
|
|
|
ta.ta_start = uaddr;
|
smp: introduce a generic on_each_cpu_mask() function
We have lots of infrastructure in place to partition multi-core systems
such that we have a group of CPUs that are dedicated to specific task:
cgroups, scheduler and interrupt affinity, and cpuisol= boot parameter.
Still, kernel code will at times interrupt all CPUs in the system via IPIs
for various needs. These IPIs are useful and cannot be avoided
altogether, but in certain cases it is possible to interrupt only specific
CPUs that have useful work to do and not the entire system.
This patch set, inspired by discussions with Peter Zijlstra and Frederic
Weisbecker when testing the nohz task patch set, is a first stab at trying
to explore doing this by locating the places where such global IPI calls
are being made and turning the global IPI into an IPI for a specific group
of CPUs. The purpose of the patch set is to get feedback if this is the
right way to go for dealing with this issue and indeed, if the issue is
even worth dealing with at all. Based on the feedback from this patch set
I plan to offer further patches that address similar issue in other code
paths.
This patch creates an on_each_cpu_mask() and on_each_cpu_cond()
infrastructure API (the former derived from existing arch specific
versions in Tile and Arm) and uses them to turn several global IPI
invocation to per CPU group invocations.
Core kernel:
on_each_cpu_mask() calls a function on processors specified by cpumask,
which may or may not include the local processor.
You must not call this function with disabled interrupts or from a
hardware interrupt handler or from a bottom half handler.
arch/arm:
Note that the generic version is a little different then the Arm one:
1. It has the mask as first parameter
2. It calls the function on the calling CPU with interrupts disabled,
but this should be OK since the function is called on the other CPUs
with interrupts disabled anyway.
arch/tile:
The API is the same as the tile private one, but the generic version
also calls the function on the with interrupts disabled in UP case
This is OK since the function is called on the other CPUs
with interrupts disabled.
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Reviewed-by: Christoph Lameter <cl@linux.com>
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Sasha Levin <levinsasha928@gmail.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Avi Kivity <avi@redhat.com>
Acked-by: Michal Nazarewicz <mina86@mina86.org>
Cc: Kosaki Motohiro <kosaki.motohiro@gmail.com>
Cc: Milton Miller <miltonm@bga.com>
Cc: Russell King <linux@arm.linux.org.uk>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-29 04:42:43 +07:00
|
|
|
on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page,
|
|
|
|
&ta, 1);
|
2010-12-20 21:44:32 +07:00
|
|
|
} else
|
2013-02-11 20:47:48 +07:00
|
|
|
__flush_tlb_page(vma, uaddr);
|
2013-03-27 05:35:04 +07:00
|
|
|
broadcast_tlb_mm_a15_erratum(vma->vm_mm);
|
2010-12-20 21:44:32 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void flush_tlb_kernel_page(unsigned long kaddr)
|
|
|
|
{
|
|
|
|
if (tlb_ops_need_broadcast()) {
|
|
|
|
struct tlb_args ta;
|
|
|
|
ta.ta_start = kaddr;
|
|
|
|
on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
|
|
|
|
} else
|
2013-02-11 20:47:48 +07:00
|
|
|
__flush_tlb_kernel_page(kaddr);
|
2013-03-27 05:35:04 +07:00
|
|
|
broadcast_tlb_a15_erratum();
|
2010-12-20 21:44:32 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void flush_tlb_range(struct vm_area_struct *vma,
|
|
|
|
unsigned long start, unsigned long end)
|
|
|
|
{
|
|
|
|
if (tlb_ops_need_broadcast()) {
|
|
|
|
struct tlb_args ta;
|
|
|
|
ta.ta_vma = vma;
|
|
|
|
ta.ta_start = start;
|
|
|
|
ta.ta_end = end;
|
smp: introduce a generic on_each_cpu_mask() function
We have lots of infrastructure in place to partition multi-core systems
such that we have a group of CPUs that are dedicated to specific task:
cgroups, scheduler and interrupt affinity, and cpuisol= boot parameter.
Still, kernel code will at times interrupt all CPUs in the system via IPIs
for various needs. These IPIs are useful and cannot be avoided
altogether, but in certain cases it is possible to interrupt only specific
CPUs that have useful work to do and not the entire system.
This patch set, inspired by discussions with Peter Zijlstra and Frederic
Weisbecker when testing the nohz task patch set, is a first stab at trying
to explore doing this by locating the places where such global IPI calls
are being made and turning the global IPI into an IPI for a specific group
of CPUs. The purpose of the patch set is to get feedback if this is the
right way to go for dealing with this issue and indeed, if the issue is
even worth dealing with at all. Based on the feedback from this patch set
I plan to offer further patches that address similar issue in other code
paths.
This patch creates an on_each_cpu_mask() and on_each_cpu_cond()
infrastructure API (the former derived from existing arch specific
versions in Tile and Arm) and uses them to turn several global IPI
invocation to per CPU group invocations.
Core kernel:
on_each_cpu_mask() calls a function on processors specified by cpumask,
which may or may not include the local processor.
You must not call this function with disabled interrupts or from a
hardware interrupt handler or from a bottom half handler.
arch/arm:
Note that the generic version is a little different then the Arm one:
1. It has the mask as first parameter
2. It calls the function on the calling CPU with interrupts disabled,
but this should be OK since the function is called on the other CPUs
with interrupts disabled anyway.
arch/tile:
The API is the same as the tile private one, but the generic version
also calls the function on the with interrupts disabled in UP case
This is OK since the function is called on the other CPUs
with interrupts disabled.
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Reviewed-by: Christoph Lameter <cl@linux.com>
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Sasha Levin <levinsasha928@gmail.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Avi Kivity <avi@redhat.com>
Acked-by: Michal Nazarewicz <mina86@mina86.org>
Cc: Kosaki Motohiro <kosaki.motohiro@gmail.com>
Cc: Milton Miller <miltonm@bga.com>
Cc: Russell King <linux@arm.linux.org.uk>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-03-29 04:42:43 +07:00
|
|
|
on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range,
|
|
|
|
&ta, 1);
|
2010-12-20 21:44:32 +07:00
|
|
|
} else
|
|
|
|
local_flush_tlb_range(vma, start, end);
|
2013-03-27 05:35:04 +07:00
|
|
|
broadcast_tlb_mm_a15_erratum(vma->vm_mm);
|
2010-12-20 21:44:32 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
|
|
|
{
|
|
|
|
if (tlb_ops_need_broadcast()) {
|
|
|
|
struct tlb_args ta;
|
|
|
|
ta.ta_start = start;
|
|
|
|
ta.ta_end = end;
|
|
|
|
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
|
|
|
|
} else
|
|
|
|
local_flush_tlb_kernel_range(start, end);
|
2013-03-27 05:35:04 +07:00
|
|
|
broadcast_tlb_a15_erratum();
|
2010-12-20 21:44:32 +07:00
|
|
|
}
|
|
|
|
|
2013-02-28 23:48:11 +07:00
|
|
|
void flush_bp_all(void)
|
|
|
|
{
|
|
|
|
if (tlb_ops_need_broadcast())
|
|
|
|
on_each_cpu(ipi_flush_bp_all, NULL, 1);
|
|
|
|
else
|
2013-02-19 05:07:47 +07:00
|
|
|
__flush_bp_all();
|
2013-02-28 23:48:11 +07:00
|
|
|
}
|