2005-04-17 05:20:36 +07:00
|
|
|
#ifndef __MMU_H
|
|
|
|
#define __MMU_H
|
|
|
|
|
2014-04-03 18:55:01 +07:00
|
|
|
#include <linux/cpumask.h>
|
2012-03-30 14:40:55 +07:00
|
|
|
#include <linux/errno.h>
|
|
|
|
|
2008-02-10 00:24:35 +07:00
|
|
|
typedef struct {
|
2017-08-17 13:15:16 +07:00
|
|
|
spinlock_t lock;
|
2014-04-03 18:55:01 +07:00
|
|
|
cpumask_t cpu_attach_mask;
|
2016-05-25 14:45:26 +07:00
|
|
|
atomic_t flush_count;
|
2010-08-24 14:26:21 +07:00
|
|
|
unsigned int flush_mm;
|
2008-02-10 00:24:35 +07:00
|
|
|
struct list_head pgtable_list;
|
2011-07-24 15:48:20 +07:00
|
|
|
struct list_head gmap_list;
|
2016-06-13 15:36:00 +07:00
|
|
|
unsigned long gmap_asce;
|
s390/mm: fix asce_bits handling with dynamic pagetable levels
There is a race with multi-threaded applications between context switch and
pagetable upgrade. In switch_mm() a new user_asce is built from mm->pgd and
mm->context.asce_bits, w/o holding any locks. A concurrent mmap with a
pagetable upgrade on another thread in crst_table_upgrade() could already
have set new asce_bits, but not yet the new mm->pgd. This would result in a
corrupt user_asce in switch_mm(), and eventually in a kernel panic from a
translation exception.
Fix this by storing the complete asce instead of just the asce_bits, which
can then be read atomically from switch_mm(), so that it either sees the
old value or the new value, but no mixture. Both cases are OK. Having the
old value would result in a page fault on access to the higher level memory,
but the fault handler would see the new mm->pgd, if it was a valid access
after the mmap on the other thread has completed. So as worst-case scenario
we would have a page fault loop for the racing thread until the next time
slice.
Also remove dead code and simplify the upgrade/downgrade path, there are no
upgrades from 2 levels, and only downgrades from 3 levels for compat tasks.
There are also no concurrent upgrades, because the mmap_sem is held with
down_write() in do_mmap, so the flush and table checks during upgrade can
be removed.
Reported-by: Michael Munday <munday@ca.ibm.com>
Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2016-04-15 21:38:40 +07:00
|
|
|
unsigned long asce;
|
2008-02-10 00:24:37 +07:00
|
|
|
unsigned long asce_limit;
|
2008-12-25 19:38:36 +07:00
|
|
|
unsigned long vdso_base;
|
2015-04-15 18:23:26 +07:00
|
|
|
/* The mmu context allocates 4K page tables. */
|
|
|
|
unsigned int alloc_pgste:1;
|
|
|
|
/* The mmu context uses extended page tables. */
|
2011-05-23 15:24:40 +07:00
|
|
|
unsigned int has_pgste:1;
|
2014-01-14 21:02:11 +07:00
|
|
|
/* The mmu context uses storage keys. */
|
|
|
|
unsigned int use_skey:1;
|
2017-04-20 15:03:46 +07:00
|
|
|
/* The mmu context uses CMMA. */
|
|
|
|
unsigned int use_cmma:1;
|
2008-02-10 00:24:35 +07:00
|
|
|
} mm_context_t;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2016-03-08 17:54:14 +07:00
|
|
|
#define INIT_MM_CONTEXT(name) \
|
2017-08-17 13:15:16 +07:00
|
|
|
.context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \
|
2016-03-08 17:54:14 +07:00
|
|
|
.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
|
2011-07-24 15:48:20 +07:00
|
|
|
.context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
|
2010-08-10 07:18:28 +07:00
|
|
|
|
2012-03-29 00:30:02 +07:00
|
|
|
static inline int tprot(unsigned long addr)
|
|
|
|
{
|
|
|
|
int rc = -EFAULT;
|
|
|
|
|
|
|
|
asm volatile(
|
|
|
|
" tprot 0(%1),0\n"
|
|
|
|
"0: ipm %0\n"
|
|
|
|
" srl %0,28\n"
|
|
|
|
"1:\n"
|
|
|
|
EX_TABLE(0b,1b)
|
|
|
|
: "+d" (rc) : "a" (addr) : "cc");
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|