2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* linux/arch/arm/mm/fault-armv.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1995 Linus Torvalds
|
|
|
|
* Modifications for ARM processor (c) 1995-2002 Russell King
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/pagemap.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 15:04:11 +07:00
|
|
|
#include <linux/gfp.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-09-05 20:08:44 +07:00
|
|
|
#include <asm/bugs.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <asm/cacheflush.h>
|
2008-08-11 00:10:19 +07:00
|
|
|
#include <asm/cachetype.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
|
2009-10-24 20:11:59 +07:00
|
|
|
#include "mm.h"
|
|
|
|
|
2010-11-16 07:22:09 +07:00
|
|
|
static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2010-09-13 21:58:06 +07:00
|
|
|
#if __LINUX_ARM_ARCH__ < 6
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* We take the easy way out of this problem - we make the
|
|
|
|
* PTE uncacheable. However, we leave the write buffer on.
|
[PATCH] mm: arm ready for split ptlock
Prepare arm for the split page_table_lock: three issues.
Signal handling's preserve and restore of iwmmxt context currently involves
reading and writing that context to and from user space, while holding
page_table_lock to secure the user page(s) against kswapd. If we split the
lock, then the structure might span two pages, secured by to read into and
write from a kernel stack buffer, copying that out and in without locking (the
structure is 160 bytes in size, and here we're near the top of the kernel
stack). Or would the overhead be noticeable?
arm_syscall's cmpxchg emulation use pte_offset_map_lock, instead of
pte_offset_map and mm-wide page_table_lock; and strictly, it should now also
take mmap_sem before descending to pmd, to guard against another thread
munmapping, and the page table pulled out beneath this thread.
Updated two comments in fault-armv.c. adjust_pte is interesting, since its
modification of a pte in one part of the mm depends on the lock held when
calling update_mmu_cache for a pte in some other part of that mm. This can't
be done with a split page_table_lock (and we've already taken the lowest lock
in the hierarchy here): so we'll have to disable split on arm, unless
CONFIG_CPU_CACHE_VIPT to ensures adjust_pte never used.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 08:16:36 +07:00
|
|
|
*
|
|
|
|
* Note that the pte lock held when calling update_mmu_cache must also
|
|
|
|
* guard the pte (somewhere else in the same mm) that we modify here.
|
|
|
|
* Therefore those configurations which might call adjust_pte (those
|
|
|
|
* without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2009-12-18 23:21:35 +07:00
|
|
|
static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
|
2009-12-18 23:31:38 +07:00
|
|
|
unsigned long pfn, pte_t *ptep)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2009-12-18 23:21:35 +07:00
|
|
|
pte_t entry = *ptep;
|
2008-07-27 16:35:54 +07:00
|
|
|
int ret;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-07-27 16:35:54 +07:00
|
|
|
/*
|
|
|
|
* If this page is present, it's actually being shared.
|
|
|
|
*/
|
|
|
|
ret = pte_present(entry);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* If this page isn't present, or is already setup to
|
|
|
|
* fault (ie, is old), we can safely ignore any issues.
|
|
|
|
*/
|
2008-09-07 02:04:59 +07:00
|
|
|
if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
|
2009-01-17 05:02:54 +07:00
|
|
|
flush_cache_page(vma, address, pfn);
|
|
|
|
outer_flush_range((pfn << PAGE_SHIFT),
|
|
|
|
(pfn << PAGE_SHIFT) + PAGE_SIZE);
|
2008-09-07 02:04:59 +07:00
|
|
|
pte_val(entry) &= ~L_PTE_MT_MASK;
|
|
|
|
pte_val(entry) |= shared_pte_mask;
|
2009-12-18 23:21:35 +07:00
|
|
|
set_pte_at(vma->vm_mm, address, ptep, entry);
|
2005-04-17 05:20:36 +07:00
|
|
|
flush_tlb_page(vma, address);
|
|
|
|
}
|
2009-12-18 23:21:35 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
ARM: 6464/2: fix spinlock recursion in adjust_pte()
When running following code in a machine which has VIVT caches and
USE_SPLIT_PTLOCKS is not defined:
fd = open("/etc/passwd", O_RDONLY);
addr = mmap(NULL, 4096, PROT_READ, MAP_SHARED, fd, 0);
addr2 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, fd, 0);
v = *((int *)addr);
we will hang in spinlock recursion in the page fault handler:
BUG: spinlock recursion on CPU#0, mmap_test/717
lock: c5e295d8, .magic: dead4ead, .owner: mmap_test/717,
.owner_cpu: 0
[<c0026604>] (unwind_backtrace+0x0/0xec)
[<c014ee48>] (do_raw_spin_lock+0x40/0x140)
[<c0027f68>] (update_mmu_cache+0x208/0x250)
[<c0079db4>] (__do_fault+0x320/0x3ec)
[<c007af7c>] (handle_mm_fault+0x2f0/0x6d8)
[<c0027834>] (do_page_fault+0xdc/0x1cc)
[<c00202d0>] (do_DataAbort+0x34/0x94)
This comes from the fact that when USE_SPLIT_PTLOCKS is not defined,
the only lock protecting the page tables is mm->page_table_lock
which is already locked before update_mmu_cache() is called.
Signed-off-by: Mika Westerberg <mika.westerberg@iki.fi>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-10-28 17:45:22 +07:00
|
|
|
#if USE_SPLIT_PTLOCKS
|
|
|
|
/*
|
|
|
|
* If we are using split PTE locks, then we need to take the page
|
|
|
|
* lock here. Otherwise we are using shared mm->page_table_lock
|
|
|
|
* which is already locked, thus cannot take it.
|
|
|
|
*/
|
|
|
|
static inline void do_pte_lock(spinlock_t *ptl)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Use nested version here to indicate that we are already
|
|
|
|
* holding one similar spinlock.
|
|
|
|
*/
|
|
|
|
spin_lock_nested(ptl, SINGLE_DEPTH_NESTING);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void do_pte_unlock(spinlock_t *ptl)
|
|
|
|
{
|
|
|
|
spin_unlock(ptl);
|
|
|
|
}
|
|
|
|
#else /* !USE_SPLIT_PTLOCKS */
|
|
|
|
static inline void do_pte_lock(spinlock_t *ptl) {}
|
|
|
|
static inline void do_pte_unlock(spinlock_t *ptl) {}
|
|
|
|
#endif /* USE_SPLIT_PTLOCKS */
|
|
|
|
|
2009-12-18 23:31:38 +07:00
|
|
|
static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
|
|
|
|
unsigned long pfn)
|
2009-12-18 23:21:35 +07:00
|
|
|
{
|
2009-12-18 23:24:34 +07:00
|
|
|
spinlock_t *ptl;
|
2009-12-18 23:21:35 +07:00
|
|
|
pgd_t *pgd;
|
2010-11-21 23:27:49 +07:00
|
|
|
pud_t *pud;
|
2009-12-18 23:21:35 +07:00
|
|
|
pmd_t *pmd;
|
|
|
|
pte_t *pte;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
pgd = pgd_offset(vma->vm_mm, address);
|
2009-12-18 23:23:44 +07:00
|
|
|
if (pgd_none_or_clear_bad(pgd))
|
|
|
|
return 0;
|
2009-12-18 23:21:35 +07:00
|
|
|
|
2010-11-21 23:27:49 +07:00
|
|
|
pud = pud_offset(pgd, address);
|
|
|
|
if (pud_none_or_clear_bad(pud))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pmd = pmd_offset(pud, address);
|
2009-12-18 23:23:44 +07:00
|
|
|
if (pmd_none_or_clear_bad(pmd))
|
|
|
|
return 0;
|
2009-12-18 23:21:35 +07:00
|
|
|
|
2009-12-18 23:24:34 +07:00
|
|
|
/*
|
|
|
|
* This is called while another page table is mapped, so we
|
|
|
|
* must use the nested version. This also means we need to
|
|
|
|
* open-code the spin-locking.
|
|
|
|
*/
|
|
|
|
ptl = pte_lockptr(vma->vm_mm, pmd);
|
2010-10-27 04:21:52 +07:00
|
|
|
pte = pte_offset_map(pmd, address);
|
ARM: 6464/2: fix spinlock recursion in adjust_pte()
When running following code in a machine which has VIVT caches and
USE_SPLIT_PTLOCKS is not defined:
fd = open("/etc/passwd", O_RDONLY);
addr = mmap(NULL, 4096, PROT_READ, MAP_SHARED, fd, 0);
addr2 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, fd, 0);
v = *((int *)addr);
we will hang in spinlock recursion in the page fault handler:
BUG: spinlock recursion on CPU#0, mmap_test/717
lock: c5e295d8, .magic: dead4ead, .owner: mmap_test/717,
.owner_cpu: 0
[<c0026604>] (unwind_backtrace+0x0/0xec)
[<c014ee48>] (do_raw_spin_lock+0x40/0x140)
[<c0027f68>] (update_mmu_cache+0x208/0x250)
[<c0079db4>] (__do_fault+0x320/0x3ec)
[<c007af7c>] (handle_mm_fault+0x2f0/0x6d8)
[<c0027834>] (do_page_fault+0xdc/0x1cc)
[<c00202d0>] (do_DataAbort+0x34/0x94)
This comes from the fact that when USE_SPLIT_PTLOCKS is not defined,
the only lock protecting the page tables is mm->page_table_lock
which is already locked before update_mmu_cache() is called.
Signed-off-by: Mika Westerberg <mika.westerberg@iki.fi>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-10-28 17:45:22 +07:00
|
|
|
do_pte_lock(ptl);
|
2009-12-18 23:21:35 +07:00
|
|
|
|
2009-12-18 23:31:38 +07:00
|
|
|
ret = do_adjust_pte(vma, address, pfn, pte);
|
2009-12-18 23:21:35 +07:00
|
|
|
|
ARM: 6464/2: fix spinlock recursion in adjust_pte()
When running following code in a machine which has VIVT caches and
USE_SPLIT_PTLOCKS is not defined:
fd = open("/etc/passwd", O_RDONLY);
addr = mmap(NULL, 4096, PROT_READ, MAP_SHARED, fd, 0);
addr2 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, fd, 0);
v = *((int *)addr);
we will hang in spinlock recursion in the page fault handler:
BUG: spinlock recursion on CPU#0, mmap_test/717
lock: c5e295d8, .magic: dead4ead, .owner: mmap_test/717,
.owner_cpu: 0
[<c0026604>] (unwind_backtrace+0x0/0xec)
[<c014ee48>] (do_raw_spin_lock+0x40/0x140)
[<c0027f68>] (update_mmu_cache+0x208/0x250)
[<c0079db4>] (__do_fault+0x320/0x3ec)
[<c007af7c>] (handle_mm_fault+0x2f0/0x6d8)
[<c0027834>] (do_page_fault+0xdc/0x1cc)
[<c00202d0>] (do_DataAbort+0x34/0x94)
This comes from the fact that when USE_SPLIT_PTLOCKS is not defined,
the only lock protecting the page tables is mm->page_table_lock
which is already locked before update_mmu_cache() is called.
Signed-off-by: Mika Westerberg <mika.westerberg@iki.fi>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2010-10-28 17:45:22 +07:00
|
|
|
do_pte_unlock(ptl);
|
2010-10-27 04:21:52 +07:00
|
|
|
pte_unmap(pte);
|
2009-12-18 23:21:35 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2009-12-18 23:43:57 +07:00
|
|
|
make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
|
|
|
|
unsigned long addr, pte_t *ptep, unsigned long pfn)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
|
struct vm_area_struct *mpnt;
|
|
|
|
struct prio_tree_iter iter;
|
|
|
|
unsigned long offset;
|
|
|
|
pgoff_t pgoff;
|
|
|
|
int aliases = 0;
|
|
|
|
|
|
|
|
pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have any shared mappings that are in the same mm
|
|
|
|
* space, then we need to handle them specially to maintain
|
|
|
|
* cache coherency.
|
|
|
|
*/
|
|
|
|
flush_dcache_mmap_lock(mapping);
|
|
|
|
vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
|
|
|
|
/*
|
|
|
|
* If this VMA is not in our MM, we can ignore it.
|
|
|
|
* Note that we intentionally mask out the VMA
|
|
|
|
* that we are fixing up.
|
|
|
|
*/
|
|
|
|
if (mpnt->vm_mm != mm || mpnt == vma)
|
|
|
|
continue;
|
|
|
|
if (!(mpnt->vm_flags & VM_MAYSHARE))
|
|
|
|
continue;
|
|
|
|
offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
|
2009-12-18 23:31:38 +07:00
|
|
|
aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
flush_dcache_mmap_unlock(mapping);
|
|
|
|
if (aliases)
|
2009-12-18 23:43:57 +07:00
|
|
|
do_adjust_pte(vma, addr, pfn, ptep);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Take care of architecture specific things when placing a new PTE into
|
|
|
|
* a page table, or changing an existing PTE. Basically, there are two
|
|
|
|
* things that we need to take care of:
|
|
|
|
*
|
2010-09-13 21:57:36 +07:00
|
|
|
* 1. If PG_dcache_clean is not set for the page, we need to ensure
|
2005-04-17 05:20:36 +07:00
|
|
|
* that any cache entries for the kernels virtual memory
|
|
|
|
* range are written back to the page.
|
|
|
|
* 2. If we have multiple shared mappings of the same space in
|
|
|
|
* an object, we need to deal with the cache aliasing issues.
|
|
|
|
*
|
[PATCH] mm: arm ready for split ptlock
Prepare arm for the split page_table_lock: three issues.
Signal handling's preserve and restore of iwmmxt context currently involves
reading and writing that context to and from user space, while holding
page_table_lock to secure the user page(s) against kswapd. If we split the
lock, then the structure might span two pages, secured by to read into and
write from a kernel stack buffer, copying that out and in without locking (the
structure is 160 bytes in size, and here we're near the top of the kernel
stack). Or would the overhead be noticeable?
arm_syscall's cmpxchg emulation use pte_offset_map_lock, instead of
pte_offset_map and mm-wide page_table_lock; and strictly, it should now also
take mmap_sem before descending to pmd, to guard against another thread
munmapping, and the page table pulled out beneath this thread.
Updated two comments in fault-armv.c. adjust_pte is interesting, since its
modification of a pte in one part of the mm depends on the lock held when
calling update_mmu_cache for a pte in some other part of that mm. This can't
be done with a split page_table_lock (and we've already taken the lowest lock
in the hierarchy here): so we'll have to disable split on arm, unless
CONFIG_CPU_CACHE_VIPT to ensures adjust_pte never used.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 08:16:36 +07:00
|
|
|
* Note that the pte lock will be held.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
MM: Pass a PTE pointer to update_mmu_cache() rather than the PTE itself
On VIVT ARM, when we have multiple shared mappings of the same file
in the same MM, we need to ensure that we have coherency across all
copies. We do this via make_coherent() by making the pages
uncacheable.
This used to work fine, until we allowed highmem with highpte - we
now have a page table which is mapped as required, and is not available
for modification via update_mmu_cache().
Ralf Beache suggested getting rid of the PTE value passed to
update_mmu_cache():
On MIPS update_mmu_cache() calls __update_tlb() which walks pagetables
to construct a pointer to the pte again. Passing a pte_t * is much
more elegant. Maybe we might even replace the pte argument with the
pte_t?
Ben Herrenschmidt would also like the pte pointer for PowerPC:
Passing the ptep in there is exactly what I want. I want that
-instead- of the PTE value, because I have issue on some ppc cases,
for I$/D$ coherency, where set_pte_at() may decide to mask out the
_PAGE_EXEC.
So, pass in the mapped page table pointer into update_mmu_cache(), and
remove the PTE value, updating all implementations and call sites to
suit.
Includes a fix from Stephen Rothwell:
sparc: fix fallout from update_mmu_cache API change
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2009-12-18 23:40:18 +07:00
|
|
|
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
|
|
|
|
pte_t *ptep)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
MM: Pass a PTE pointer to update_mmu_cache() rather than the PTE itself
On VIVT ARM, when we have multiple shared mappings of the same file
in the same MM, we need to ensure that we have coherency across all
copies. We do this via make_coherent() by making the pages
uncacheable.
This used to work fine, until we allowed highmem with highpte - we
now have a page table which is mapped as required, and is not available
for modification via update_mmu_cache().
Ralf Beache suggested getting rid of the PTE value passed to
update_mmu_cache():
On MIPS update_mmu_cache() calls __update_tlb() which walks pagetables
to construct a pointer to the pte again. Passing a pte_t * is much
more elegant. Maybe we might even replace the pte argument with the
pte_t?
Ben Herrenschmidt would also like the pte pointer for PowerPC:
Passing the ptep in there is exactly what I want. I want that
-instead- of the PTE value, because I have issue on some ppc cases,
for I$/D$ coherency, where set_pte_at() may decide to mask out the
_PAGE_EXEC.
So, pass in the mapped page table pointer into update_mmu_cache(), and
remove the PTE value, updating all implementations and call sites to
suit.
Includes a fix from Stephen Rothwell:
sparc: fix fallout from update_mmu_cache API change
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2009-12-18 23:40:18 +07:00
|
|
|
unsigned long pfn = pte_pfn(*ptep);
|
2005-06-20 15:51:03 +07:00
|
|
|
struct address_space *mapping;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
if (!pfn_valid(pfn))
|
|
|
|
return;
|
2005-06-20 15:51:03 +07:00
|
|
|
|
2009-10-25 17:23:04 +07:00
|
|
|
/*
|
|
|
|
* The zero page is never written to, so never has any dirty
|
|
|
|
* cache lines, and therefore never needs to be flushed.
|
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
page = pfn_to_page(pfn);
|
2009-10-25 17:23:04 +07:00
|
|
|
if (page == ZERO_PAGE(0))
|
|
|
|
return;
|
|
|
|
|
2005-06-20 15:51:03 +07:00
|
|
|
mapping = page_mapping(page);
|
2010-09-13 21:57:36 +07:00
|
|
|
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
|
2009-10-12 15:50:23 +07:00
|
|
|
__flush_dcache_page(mapping, page);
|
|
|
|
if (mapping) {
|
2005-04-17 05:20:36 +07:00
|
|
|
if (cache_is_vivt())
|
2009-12-18 23:43:57 +07:00
|
|
|
make_coherent(mapping, vma, addr, ptep, pfn);
|
2008-06-13 16:28:36 +07:00
|
|
|
else if (vma->vm_flags & VM_EXEC)
|
|
|
|
__flush_icache_all();
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
2010-09-13 21:58:06 +07:00
|
|
|
#endif /* __LINUX_ARM_ARCH__ < 6 */
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether the write buffer has physical address aliasing
|
|
|
|
* issues. If it has, we need to avoid them for the case where
|
|
|
|
* we have several shared mappings of the same object in user
|
|
|
|
* space.
|
|
|
|
*/
|
|
|
|
static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
|
|
|
|
{
|
|
|
|
register unsigned long zero = 0, one = 1, val;
|
|
|
|
|
|
|
|
local_irq_disable();
|
|
|
|
mb();
|
|
|
|
*p1 = one;
|
|
|
|
mb();
|
|
|
|
*p2 = zero;
|
|
|
|
mb();
|
|
|
|
val = *p1;
|
|
|
|
mb();
|
|
|
|
local_irq_enable();
|
|
|
|
return val != zero;
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init check_writebuffer_bugs(void)
|
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
const char *reason;
|
|
|
|
unsigned long v = 1;
|
|
|
|
|
|
|
|
printk(KERN_INFO "CPU: Testing write buffer coherency: ");
|
|
|
|
|
|
|
|
page = alloc_page(GFP_KERNEL);
|
|
|
|
if (page) {
|
|
|
|
unsigned long *p1, *p2;
|
2009-12-24 02:54:31 +07:00
|
|
|
pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
|
|
|
|
L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
p1 = vmap(&page, 1, VM_IOREMAP, prot);
|
|
|
|
p2 = vmap(&page, 1, VM_IOREMAP, prot);
|
|
|
|
|
|
|
|
if (p1 && p2) {
|
|
|
|
v = check_writebuffer(p1, p2);
|
|
|
|
reason = "enabling work-around";
|
|
|
|
} else {
|
|
|
|
reason = "unable to map memory\n";
|
|
|
|
}
|
|
|
|
|
|
|
|
vunmap(p1);
|
|
|
|
vunmap(p2);
|
|
|
|
put_page(page);
|
|
|
|
} else {
|
|
|
|
reason = "unable to grab page\n";
|
|
|
|
}
|
|
|
|
|
|
|
|
if (v) {
|
|
|
|
printk("failed, %s\n", reason);
|
2008-09-07 02:04:59 +07:00
|
|
|
shared_pte_mask = L_PTE_MT_UNCACHED;
|
2005-04-17 05:20:36 +07:00
|
|
|
} else {
|
|
|
|
printk("ok\n");
|
|
|
|
}
|
|
|
|
}
|