2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* S390 version
|
|
|
|
*
|
|
|
|
* Derived from "include/asm-i386/mmu_context.h"
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __S390_MMU_CONTEXT_H
|
|
|
|
#define __S390_MMU_CONTEXT_H
|
|
|
|
|
2007-02-06 03:18:17 +07:00
|
|
|
#include <asm/pgalloc.h>
|
2016-12-25 02:46:01 +07:00
|
|
|
#include <linux/uaccess.h>
|
2017-02-04 06:16:44 +07:00
|
|
|
#include <linux/mm_types.h>
|
2010-08-24 14:26:21 +07:00
|
|
|
#include <asm/tlbflush.h>
|
2012-03-29 00:30:02 +07:00
|
|
|
#include <asm/ctl_reg.h>
|
2017-08-23 17:13:51 +07:00
|
|
|
#include <asm-generic/mm_hooks.h>
|
2007-05-03 00:27:14 +07:00
|
|
|
|
2008-01-26 20:10:58 +07:00
|
|
|
static inline int init_new_context(struct task_struct *tsk,
|
|
|
|
struct mm_struct *mm)
|
|
|
|
{
|
2017-08-17 13:15:16 +07:00
|
|
|
spin_lock_init(&mm->context.lock);
|
2016-02-15 20:46:49 +07:00
|
|
|
INIT_LIST_HEAD(&mm->context.pgtable_list);
|
|
|
|
INIT_LIST_HEAD(&mm->context.gmap_list);
|
2014-04-03 18:55:01 +07:00
|
|
|
cpumask_clear(&mm->context.cpu_attach_mask);
|
2016-05-25 14:45:26 +07:00
|
|
|
atomic_set(&mm->context.flush_count, 0);
|
2016-06-13 15:36:00 +07:00
|
|
|
mm->context.gmap_asce = 0;
|
2010-08-24 14:26:21 +07:00
|
|
|
mm->context.flush_mm = 0;
|
2015-04-15 18:23:26 +07:00
|
|
|
#ifdef CONFIG_PGSTE
|
2017-06-07 19:10:24 +07:00
|
|
|
mm->context.alloc_pgste = page_table_allocate_pgste ||
|
|
|
|
test_thread_flag(TIF_PGSTE) ||
|
|
|
|
current->mm->context.alloc_pgste;
|
2013-07-26 20:04:02 +07:00
|
|
|
mm->context.has_pgste = 0;
|
2014-01-15 00:11:14 +07:00
|
|
|
mm->context.use_skey = 0;
|
2017-04-20 15:03:46 +07:00
|
|
|
mm->context.use_cmma = 0;
|
2015-04-15 18:23:26 +07:00
|
|
|
#endif
|
s390/mm: fix asce_bits handling with dynamic pagetable levels
There is a race with multi-threaded applications between context switch and
pagetable upgrade. In switch_mm() a new user_asce is built from mm->pgd and
mm->context.asce_bits, w/o holding any locks. A concurrent mmap with a
pagetable upgrade on another thread in crst_table_upgrade() could already
have set new asce_bits, but not yet the new mm->pgd. This would result in a
corrupt user_asce in switch_mm(), and eventually in a kernel panic from a
translation exception.
Fix this by storing the complete asce instead of just the asce_bits, which
can then be read atomically from switch_mm(), so that it either sees the
old value or the new value, but no mixture. Both cases are OK. Having the
old value would result in a page fault on access to the higher level memory,
but the fault handler would see the new mm->pgd, if it was a valid access
after the mmap on the other thread has completed. So as worst-case scenario
we would have a page fault loop for the racing thread until the next time
slice.
Also remove dead code and simplify the upgrade/downgrade path, there are no
upgrades from 2 levels, and only downgrades from 3 levels for compat tasks.
There are also no concurrent upgrades, because the mmap_sem is held with
down_write() in do_mmap, so the flush and table checks during upgrade can
be removed.
Reported-by: Michael Munday <munday@ca.ibm.com>
Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2016-04-15 21:38:40 +07:00
|
|
|
switch (mm->context.asce_limit) {
|
2017-07-05 12:37:27 +07:00
|
|
|
case _REGION2_SIZE:
|
s390/mm: fix asce_bits handling with dynamic pagetable levels
There is a race with multi-threaded applications between context switch and
pagetable upgrade. In switch_mm() a new user_asce is built from mm->pgd and
mm->context.asce_bits, w/o holding any locks. A concurrent mmap with a
pagetable upgrade on another thread in crst_table_upgrade() could already
have set new asce_bits, but not yet the new mm->pgd. This would result in a
corrupt user_asce in switch_mm(), and eventually in a kernel panic from a
translation exception.
Fix this by storing the complete asce instead of just the asce_bits, which
can then be read atomically from switch_mm(), so that it either sees the
old value or the new value, but no mixture. Both cases are OK. Having the
old value would result in a page fault on access to the higher level memory,
but the fault handler would see the new mm->pgd, if it was a valid access
after the mmap on the other thread has completed. So as worst-case scenario
we would have a page fault loop for the racing thread until the next time
slice.
Also remove dead code and simplify the upgrade/downgrade path, there are no
upgrades from 2 levels, and only downgrades from 3 levels for compat tasks.
There are also no concurrent upgrades, because the mmap_sem is held with
down_write() in do_mmap, so the flush and table checks during upgrade can
be removed.
Reported-by: Michael Munday <munday@ca.ibm.com>
Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2016-04-15 21:38:40 +07:00
|
|
|
/*
|
|
|
|
* forked 3-level task, fall through to set new asce with new
|
|
|
|
* mm->pgd
|
|
|
|
*/
|
|
|
|
case 0:
|
2016-02-15 20:46:49 +07:00
|
|
|
/* context created by exec, set asce limit to 4TB */
|
|
|
|
mm->context.asce_limit = STACK_TOP_MAX;
|
s390/mm: fix asce_bits handling with dynamic pagetable levels
There is a race with multi-threaded applications between context switch and
pagetable upgrade. In switch_mm() a new user_asce is built from mm->pgd and
mm->context.asce_bits, w/o holding any locks. A concurrent mmap with a
pagetable upgrade on another thread in crst_table_upgrade() could already
have set new asce_bits, but not yet the new mm->pgd. This would result in a
corrupt user_asce in switch_mm(), and eventually in a kernel panic from a
translation exception.
Fix this by storing the complete asce instead of just the asce_bits, which
can then be read atomically from switch_mm(), so that it either sees the
old value or the new value, but no mixture. Both cases are OK. Having the
old value would result in a page fault on access to the higher level memory,
but the fault handler would see the new mm->pgd, if it was a valid access
after the mmap on the other thread has completed. So as worst-case scenario
we would have a page fault loop for the racing thread until the next time
slice.
Also remove dead code and simplify the upgrade/downgrade path, there are no
upgrades from 2 levels, and only downgrades from 3 levels for compat tasks.
There are also no concurrent upgrades, because the mmap_sem is held with
down_write() in do_mmap, so the flush and table checks during upgrade can
be removed.
Reported-by: Michael Munday <munday@ca.ibm.com>
Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2016-04-15 21:38:40 +07:00
|
|
|
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
|
|
|
|
_ASCE_USER_BITS | _ASCE_TYPE_REGION3;
|
|
|
|
break;
|
2017-08-31 17:30:54 +07:00
|
|
|
case -PAGE_SIZE:
|
|
|
|
/* forked 5-level task, set new asce with new_mm->pgd */
|
|
|
|
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
|
|
|
|
_ASCE_USER_BITS | _ASCE_TYPE_REGION1;
|
|
|
|
break;
|
2017-07-05 12:37:27 +07:00
|
|
|
case _REGION1_SIZE:
|
s390/mm: fix asce_bits handling with dynamic pagetable levels
There is a race with multi-threaded applications between context switch and
pagetable upgrade. In switch_mm() a new user_asce is built from mm->pgd and
mm->context.asce_bits, w/o holding any locks. A concurrent mmap with a
pagetable upgrade on another thread in crst_table_upgrade() could already
have set new asce_bits, but not yet the new mm->pgd. This would result in a
corrupt user_asce in switch_mm(), and eventually in a kernel panic from a
translation exception.
Fix this by storing the complete asce instead of just the asce_bits, which
can then be read atomically from switch_mm(), so that it either sees the
old value or the new value, but no mixture. Both cases are OK. Having the
old value would result in a page fault on access to the higher level memory,
but the fault handler would see the new mm->pgd, if it was a valid access
after the mmap on the other thread has completed. So as worst-case scenario
we would have a page fault loop for the racing thread until the next time
slice.
Also remove dead code and simplify the upgrade/downgrade path, there are no
upgrades from 2 levels, and only downgrades from 3 levels for compat tasks.
There are also no concurrent upgrades, because the mmap_sem is held with
down_write() in do_mmap, so the flush and table checks during upgrade can
be removed.
Reported-by: Michael Munday <munday@ca.ibm.com>
Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2016-04-15 21:38:40 +07:00
|
|
|
/* forked 4-level task, set new asce with new mm->pgd */
|
|
|
|
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
|
|
|
|
_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
|
|
|
|
break;
|
2017-07-05 12:37:27 +07:00
|
|
|
case _REGION3_SIZE:
|
s390/mm: fix asce_bits handling with dynamic pagetable levels
There is a race with multi-threaded applications between context switch and
pagetable upgrade. In switch_mm() a new user_asce is built from mm->pgd and
mm->context.asce_bits, w/o holding any locks. A concurrent mmap with a
pagetable upgrade on another thread in crst_table_upgrade() could already
have set new asce_bits, but not yet the new mm->pgd. This would result in a
corrupt user_asce in switch_mm(), and eventually in a kernel panic from a
translation exception.
Fix this by storing the complete asce instead of just the asce_bits, which
can then be read atomically from switch_mm(), so that it either sees the
old value or the new value, but no mixture. Both cases are OK. Having the
old value would result in a page fault on access to the higher level memory,
but the fault handler would see the new mm->pgd, if it was a valid access
after the mmap on the other thread has completed. So as worst-case scenario
we would have a page fault loop for the racing thread until the next time
slice.
Also remove dead code and simplify the upgrade/downgrade path, there are no
upgrades from 2 levels, and only downgrades from 3 levels for compat tasks.
There are also no concurrent upgrades, because the mmap_sem is held with
down_write() in do_mmap, so the flush and table checks during upgrade can
be removed.
Reported-by: Michael Munday <munday@ca.ibm.com>
Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2016-04-15 21:38:40 +07:00
|
|
|
/* forked 2-level compat task, set new asce with new mm->pgd */
|
|
|
|
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
|
|
|
|
_ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
|
|
|
|
/* pgd_alloc() did not increase mm->nr_pmds */
|
2016-02-15 20:46:49 +07:00
|
|
|
mm_inc_nr_pmds(mm);
|
|
|
|
}
|
2008-02-10 00:24:37 +07:00
|
|
|
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
|
2008-01-26 20:10:58 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#define destroy_context(mm) do { } while (0)
|
|
|
|
|
2014-04-14 20:11:26 +07:00
|
|
|
static inline void set_user_asce(struct mm_struct *mm)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
s390/mm: fix asce_bits handling with dynamic pagetable levels
There is a race with multi-threaded applications between context switch and
pagetable upgrade. In switch_mm() a new user_asce is built from mm->pgd and
mm->context.asce_bits, w/o holding any locks. A concurrent mmap with a
pagetable upgrade on another thread in crst_table_upgrade() could already
have set new asce_bits, but not yet the new mm->pgd. This would result in a
corrupt user_asce in switch_mm(), and eventually in a kernel panic from a
translation exception.
Fix this by storing the complete asce instead of just the asce_bits, which
can then be read atomically from switch_mm(), so that it either sees the
old value or the new value, but no mixture. Both cases are OK. Having the
old value would result in a page fault on access to the higher level memory,
but the fault handler would see the new mm->pgd, if it was a valid access
after the mmap on the other thread has completed. So as worst-case scenario
we would have a page fault loop for the racing thread until the next time
slice.
Also remove dead code and simplify the upgrade/downgrade path, there are no
upgrades from 2 levels, and only downgrades from 3 levels for compat tasks.
There are also no concurrent upgrades, because the mmap_sem is held with
down_write() in do_mmap, so the flush and table checks during upgrade can
be removed.
Reported-by: Michael Munday <munday@ca.ibm.com>
Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2016-04-15 21:38:40 +07:00
|
|
|
S390_lowcore.user_asce = mm->context.asce;
|
2014-06-02 19:53:57 +07:00
|
|
|
if (current->thread.mm_segment.ar4)
|
|
|
|
__ctl_load(S390_lowcore.user_asce, 7, 7);
|
2017-02-17 14:12:30 +07:00
|
|
|
set_cpu_flag(CIF_ASCE_PRIMARY);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-04-14 20:11:26 +07:00
|
|
|
static inline void clear_user_asce(void)
|
2014-04-03 18:54:59 +07:00
|
|
|
{
|
|
|
|
S390_lowcore.user_asce = S390_lowcore.kernel_asce;
|
s390/uaccess: rework uaccess code - fix locking issues
The current uaccess code uses a page table walk in some circumstances,
e.g. in case of the in atomic futex operations or if running on old
hardware which doesn't support the mvcos instruction.
However it turned out that the page table walk code does not correctly
lock page tables when accessing page table entries.
In other words: a different cpu may invalidate a page table entry while
the current cpu inspects the pte. This may lead to random data corruption.
Adding correct locking however isn't trivial for all uaccess operations.
Especially copy_in_user() is problematic since that requires to hold at
least two locks, but must be protected against ABBA deadlock when a
different cpu also performs a copy_in_user() operation.
So the solution is a different approach where we change address spaces:
User space runs in primary address mode, or access register mode within
vdso code, like it currently already does.
The kernel usually also runs in home space mode, however when accessing
user space the kernel switches to primary or secondary address mode if
the mvcos instruction is not available or if a compare-and-swap (futex)
instruction on a user space address is performed.
KVM however is special, since that requires the kernel to run in home
address space while implicitly accessing user space with the sie
instruction.
So we end up with:
User space:
- runs in primary or access register mode
- cr1 contains the user asce
- cr7 contains the user asce
- cr13 contains the kernel asce
Kernel space:
- runs in home space mode
- cr1 contains the user or kernel asce
-> the kernel asce is loaded when a uaccess requires primary or
secondary address mode
- cr7 contains the user or kernel asce, (changed with set_fs())
- cr13 contains the kernel asce
In case of uaccess the kernel changes to:
- primary space mode in case of a uaccess (copy_to_user) and uses
e.g. the mvcp instruction to access user space. However the kernel
will stay in home space mode if the mvcos instruction is available
- secondary space mode in case of futex atomic operations, so that the
instructions come from primary address space and data from secondary
space
In case of kvm the kernel runs in home space mode, but cr1 gets switched
to contain the gmap asce before the sie instruction gets executed. When
the sie instruction is finished cr1 will be switched back to contain the
user asce.
A context switch between two processes will always load the kernel asce
for the next process in cr1. So the first exit to user space is a bit
more expensive (one extra load control register instruction) than before,
however keeps the code rather simple.
In sum this means there is no need to perform any error prone page table
walks anymore when accessing user space.
The patch seems to be rather large, however it mainly removes the
the page table walk code and restores the previously deleted "standard"
uaccess code, with a couple of changes.
The uaccess without mvcos mode can be enforced with the "uaccess_primary"
kernel parameter.
Reported-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2014-03-21 16:42:25 +07:00
|
|
|
|
2014-04-14 20:11:26 +07:00
|
|
|
__ctl_load(S390_lowcore.user_asce, 1, 1);
|
s390/uaccess: rework uaccess code - fix locking issues
The current uaccess code uses a page table walk in some circumstances,
e.g. in case of the in atomic futex operations or if running on old
hardware which doesn't support the mvcos instruction.
However it turned out that the page table walk code does not correctly
lock page tables when accessing page table entries.
In other words: a different cpu may invalidate a page table entry while
the current cpu inspects the pte. This may lead to random data corruption.
Adding correct locking however isn't trivial for all uaccess operations.
Especially copy_in_user() is problematic since that requires to hold at
least two locks, but must be protected against ABBA deadlock when a
different cpu also performs a copy_in_user() operation.
So the solution is a different approach where we change address spaces:
User space runs in primary address mode, or access register mode within
vdso code, like it currently already does.
The kernel usually also runs in home space mode, however when accessing
user space the kernel switches to primary or secondary address mode if
the mvcos instruction is not available or if a compare-and-swap (futex)
instruction on a user space address is performed.
KVM however is special, since that requires the kernel to run in home
address space while implicitly accessing user space with the sie
instruction.
So we end up with:
User space:
- runs in primary or access register mode
- cr1 contains the user asce
- cr7 contains the user asce
- cr13 contains the kernel asce
Kernel space:
- runs in home space mode
- cr1 contains the user or kernel asce
-> the kernel asce is loaded when a uaccess requires primary or
secondary address mode
- cr7 contains the user or kernel asce, (changed with set_fs())
- cr13 contains the kernel asce
In case of uaccess the kernel changes to:
- primary space mode in case of a uaccess (copy_to_user) and uses
e.g. the mvcp instruction to access user space. However the kernel
will stay in home space mode if the mvcos instruction is available
- secondary space mode in case of futex atomic operations, so that the
instructions come from primary address space and data from secondary
space
In case of kvm the kernel runs in home space mode, but cr1 gets switched
to contain the gmap asce before the sie instruction gets executed. When
the sie instruction is finished cr1 will be switched back to contain the
user asce.
A context switch between two processes will always load the kernel asce
for the next process in cr1. So the first exit to user space is a bit
more expensive (one extra load control register instruction) than before,
however keeps the code rather simple.
In sum this means there is no need to perform any error prone page table
walks anymore when accessing user space.
The patch seems to be rather large, however it mainly removes the
the page table walk code and restores the previously deleted "standard"
uaccess code, with a couple of changes.
The uaccess without mvcos mode can be enforced with the "uaccess_primary"
kernel parameter.
Reported-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2014-03-21 16:42:25 +07:00
|
|
|
__ctl_load(S390_lowcore.user_asce, 7, 7);
|
|
|
|
}
|
|
|
|
|
2014-04-14 20:11:26 +07:00
|
|
|
static inline void load_kernel_asce(void)
|
s390/uaccess: rework uaccess code - fix locking issues
The current uaccess code uses a page table walk in some circumstances,
e.g. in case of the in atomic futex operations or if running on old
hardware which doesn't support the mvcos instruction.
However it turned out that the page table walk code does not correctly
lock page tables when accessing page table entries.
In other words: a different cpu may invalidate a page table entry while
the current cpu inspects the pte. This may lead to random data corruption.
Adding correct locking however isn't trivial for all uaccess operations.
Especially copy_in_user() is problematic since that requires to hold at
least two locks, but must be protected against ABBA deadlock when a
different cpu also performs a copy_in_user() operation.
So the solution is a different approach where we change address spaces:
User space runs in primary address mode, or access register mode within
vdso code, like it currently already does.
The kernel usually also runs in home space mode, however when accessing
user space the kernel switches to primary or secondary address mode if
the mvcos instruction is not available or if a compare-and-swap (futex)
instruction on a user space address is performed.
KVM however is special, since that requires the kernel to run in home
address space while implicitly accessing user space with the sie
instruction.
So we end up with:
User space:
- runs in primary or access register mode
- cr1 contains the user asce
- cr7 contains the user asce
- cr13 contains the kernel asce
Kernel space:
- runs in home space mode
- cr1 contains the user or kernel asce
-> the kernel asce is loaded when a uaccess requires primary or
secondary address mode
- cr7 contains the user or kernel asce, (changed with set_fs())
- cr13 contains the kernel asce
In case of uaccess the kernel changes to:
- primary space mode in case of a uaccess (copy_to_user) and uses
e.g. the mvcp instruction to access user space. However the kernel
will stay in home space mode if the mvcos instruction is available
- secondary space mode in case of futex atomic operations, so that the
instructions come from primary address space and data from secondary
space
In case of kvm the kernel runs in home space mode, but cr1 gets switched
to contain the gmap asce before the sie instruction gets executed. When
the sie instruction is finished cr1 will be switched back to contain the
user asce.
A context switch between two processes will always load the kernel asce
for the next process in cr1. So the first exit to user space is a bit
more expensive (one extra load control register instruction) than before,
however keeps the code rather simple.
In sum this means there is no need to perform any error prone page table
walks anymore when accessing user space.
The patch seems to be rather large, however it mainly removes the
the page table walk code and restores the previously deleted "standard"
uaccess code, with a couple of changes.
The uaccess without mvcos mode can be enforced with the "uaccess_primary"
kernel parameter.
Reported-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2014-03-21 16:42:25 +07:00
|
|
|
{
|
|
|
|
unsigned long asce;
|
|
|
|
|
|
|
|
__ctl_store(asce, 1, 1);
|
|
|
|
if (asce != S390_lowcore.kernel_asce)
|
|
|
|
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
|
2017-02-17 14:12:30 +07:00
|
|
|
set_cpu_flag(CIF_ASCE_PRIMARY);
|
2014-04-03 18:54:59 +07:00
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
2007-02-06 03:18:17 +07:00
|
|
|
struct task_struct *tsk)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2012-09-10 18:00:09 +07:00
|
|
|
int cpu = smp_processor_id();
|
|
|
|
|
s390/mm: fix asce_bits handling with dynamic pagetable levels
There is a race with multi-threaded applications between context switch and
pagetable upgrade. In switch_mm() a new user_asce is built from mm->pgd and
mm->context.asce_bits, w/o holding any locks. A concurrent mmap with a
pagetable upgrade on another thread in crst_table_upgrade() could already
have set new asce_bits, but not yet the new mm->pgd. This would result in a
corrupt user_asce in switch_mm(), and eventually in a kernel panic from a
translation exception.
Fix this by storing the complete asce instead of just the asce_bits, which
can then be read atomically from switch_mm(), so that it either sees the
old value or the new value, but no mixture. Both cases are OK. Having the
old value would result in a page fault on access to the higher level memory,
but the fault handler would see the new mm->pgd, if it was a valid access
after the mmap on the other thread has completed. So as worst-case scenario
we would have a page fault loop for the racing thread until the next time
slice.
Also remove dead code and simplify the upgrade/downgrade path, there are no
upgrades from 2 levels, and only downgrades from 3 levels for compat tasks.
There are also no concurrent upgrades, because the mmap_sem is held with
down_write() in do_mmap, so the flush and table checks during upgrade can
be removed.
Reported-by: Michael Munday <munday@ca.ibm.com>
Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2016-04-15 21:38:40 +07:00
|
|
|
S390_lowcore.user_asce = next->context.asce;
|
2012-09-10 18:00:09 +07:00
|
|
|
if (prev == next)
|
|
|
|
return;
|
2016-05-25 14:45:26 +07:00
|
|
|
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
|
2014-04-14 20:11:26 +07:00
|
|
|
/* Clear old ASCE by loading the kernel ASCE. */
|
|
|
|
__ctl_load(S390_lowcore.kernel_asce, 1, 1);
|
|
|
|
__ctl_load(S390_lowcore.kernel_asce, 7, 7);
|
2016-05-25 14:45:26 +07:00
|
|
|
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
|
2012-09-10 18:00:09 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
|
|
|
|
static inline void finish_arch_post_lock_switch(void)
|
|
|
|
{
|
|
|
|
struct task_struct *tsk = current;
|
|
|
|
struct mm_struct *mm = tsk->mm;
|
|
|
|
|
2014-06-02 19:53:57 +07:00
|
|
|
load_kernel_asce();
|
|
|
|
if (mm) {
|
|
|
|
preempt_disable();
|
2016-05-25 14:45:26 +07:00
|
|
|
while (atomic_read(&mm->context.flush_count))
|
2014-06-02 19:53:57 +07:00
|
|
|
cpu_relax();
|
2017-08-16 19:10:01 +07:00
|
|
|
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
|
2017-08-17 13:15:16 +07:00
|
|
|
__tlb_flush_mm_lazy(mm);
|
2014-06-02 19:53:57 +07:00
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
set_fs(current->thread.mm_segment);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2007-10-22 17:52:47 +07:00
|
|
|
#define enter_lazy_tlb(mm,tsk) do { } while (0)
|
2005-04-17 05:20:36 +07:00
|
|
|
#define deactivate_mm(tsk,mm) do { } while (0)
|
|
|
|
|
2005-11-09 12:34:42 +07:00
|
|
|
static inline void activate_mm(struct mm_struct *prev,
|
2005-04-17 05:20:36 +07:00
|
|
|
struct mm_struct *next)
|
|
|
|
{
|
2014-04-14 20:11:26 +07:00
|
|
|
switch_mm(prev, next, current);
|
2017-08-16 19:10:01 +07:00
|
|
|
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
|
2014-04-14 20:11:26 +07:00
|
|
|
set_user_asce(next);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2007-02-06 03:18:17 +07:00
|
|
|
#endif /* __S390_MMU_CONTEXT_H */
|