2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Initialize MMU support.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1998-2003 Hewlett-Packard Co
|
|
|
|
* David Mosberger-Tang <davidm@hpl.hp.com>
|
|
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
|
|
|
|
#include <linux/bootmem.h>
|
|
|
|
#include <linux/efi.h>
|
|
|
|
#include <linux/elf.h>
|
2011-12-09 01:22:08 +07:00
|
|
|
#include <linux/memblock.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/mmzone.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/personality.h>
|
|
|
|
#include <linux/reboot.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/proc_fs.h>
|
|
|
|
#include <linux/bitops.h>
|
2007-01-30 17:11:09 +07:00
|
|
|
#include <linux/kexec.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#include <asm/dma.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/machvec.h>
|
|
|
|
#include <asm/numa.h>
|
|
|
|
#include <asm/patch.h>
|
|
|
|
#include <asm/pgalloc.h>
|
|
|
|
#include <asm/sal.h>
|
|
|
|
#include <asm/sections.h>
|
|
|
|
#include <asm/tlb.h>
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/unistd.h>
|
|
|
|
#include <asm/mca.h>
|
2009-03-04 19:05:34 +07:00
|
|
|
#include <asm/paravirt.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
extern void ia64_tlb_init (void);
|
|
|
|
|
|
|
|
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
|
|
|
|
|
|
|
|
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
2009-10-02 11:28:55 +07:00
|
|
|
unsigned long VMALLOC_END = VMALLOC_END_INIT;
|
|
|
|
EXPORT_SYMBOL(VMALLOC_END);
|
2005-04-17 05:20:36 +07:00
|
|
|
struct page *vmem_map;
|
|
|
|
EXPORT_SYMBOL(vmem_map);
|
|
|
|
#endif
|
|
|
|
|
2005-04-26 03:13:16 +07:00
|
|
|
struct page *zero_page_memmap_ptr; /* map entry for zero page */
|
2005-04-17 05:20:36 +07:00
|
|
|
EXPORT_SYMBOL(zero_page_memmap_ptr);
|
|
|
|
|
|
|
|
void
|
2007-10-16 15:25:44 +07:00
|
|
|
__ia64_sync_icache_dcache (pte_t pte)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
unsigned long addr;
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
page = pte_page(pte);
|
|
|
|
addr = (unsigned long) page_address(page);
|
|
|
|
|
|
|
|
if (test_bit(PG_arch_1, &page->flags))
|
|
|
|
return; /* i-cache is already coherent with d-cache */
|
|
|
|
|
2008-04-10 03:05:41 +07:00
|
|
|
flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
|
2005-04-17 05:20:36 +07:00
|
|
|
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
|
|
|
|
}
|
|
|
|
|
2007-02-06 09:46:40 +07:00
|
|
|
/*
|
|
|
|
* Since DMA is i-cache coherent, any (complete) pages that were written via
|
|
|
|
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
|
|
|
|
* flush them when they get mapped into an executable vm-area.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
dma_mark_clean(void *addr, size_t size)
|
|
|
|
{
|
|
|
|
unsigned long pg_addr, end;
|
|
|
|
|
|
|
|
pg_addr = PAGE_ALIGN((unsigned long) addr);
|
|
|
|
end = (unsigned long) addr + size;
|
|
|
|
while (pg_addr + PAGE_SIZE <= end) {
|
|
|
|
struct page *page = virt_to_page(pg_addr);
|
|
|
|
set_bit(PG_arch_1, &page->flags);
|
|
|
|
pg_addr += PAGE_SIZE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
inline void
|
|
|
|
ia64_set_rbs_bot (void)
|
|
|
|
{
|
2010-01-06 22:24:30 +07:00
|
|
|
unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (stack_size > MAX_USER_STACK_SIZE)
|
|
|
|
stack_size = MAX_USER_STACK_SIZE;
|
2007-03-23 10:17:46 +07:00
|
|
|
current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This performs some platform-dependent address space initialization.
|
|
|
|
* On IA-64, we want to setup the VM area for the register backing
|
|
|
|
* store (which grows upwards) and install the gateway page which is
|
|
|
|
* used for signal trampolines, etc.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ia64_init_addr_space (void)
|
|
|
|
{
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
|
|
|
|
ia64_set_rbs_bot();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
|
|
|
|
* the problem. When the process attempts to write to the register backing store
|
|
|
|
* for the first time, it will get a SEGFAULT in this case.
|
|
|
|
*/
|
2007-02-10 16:45:03 +07:00
|
|
|
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (vma) {
|
mm: change anon_vma linking to fix multi-process server scalability issue
The old anon_vma code can lead to scalability issues with heavily forking
workloads. Specifically, each anon_vma will be shared between the parent
process and all its child processes.
In a workload with 1000 child processes and a VMA with 1000 anonymous
pages per process that get COWed, this leads to a system with a million
anonymous pages in the same anon_vma, each of which is mapped in just one
of the 1000 processes. However, the current rmap code needs to walk them
all, leading to O(N) scanning complexity for each page.
This can result in systems where one CPU is walking the page tables of
1000 processes in page_referenced_one, while all other CPUs are stuck on
the anon_vma lock. This leads to catastrophic failure for a benchmark
like AIM7, where the total number of processes can reach in the tens of
thousands. Real workloads are still a factor 10 less process intensive
than AIM7, but they are catching up.
This patch changes the way anon_vmas and VMAs are linked, which allows us
to associate multiple anon_vmas with a VMA. At fork time, each child
process gets its own anon_vmas, in which its COWed pages will be
instantiated. The parents' anon_vma is also linked to the VMA, because
non-COWed pages could be present in any of the children.
This reduces rmap scanning complexity to O(1) for the pages of the 1000
child processes, with O(N) complexity for at most 1/N pages in the system.
This reduces the average scanning cost in heavily forking workloads from
O(N) to 2.
The only real complexity in this patch stems from the fact that linking a
VMA to anon_vmas now involves memory allocations. This means vma_adjust
can fail, if it needs to attach a VMA to anon_vma structures. This in
turn means error handling needs to be added to the calling functions.
A second source of complexity is that, because there can be multiple
anon_vmas, the anon_vma linking in vma_adjust can no longer be done under
"the" anon_vma lock. To prevent the rmap code from walking up an
incomplete VMA, this patch introduces the VM_LOCK_RMAP VMA flag. This bit
flag uses the same slot as the NOMMU VM_MAPPED_COPY, with an ifdef in mm.h
to make sure it is impossible to compile a kernel that needs both symbolic
values for the same bitflag.
Some test results:
Without the anon_vma changes, when AIM7 hits around 9.7k users (on a test
box with 16GB RAM and not quite enough IO), the system ends up running
>99% in system time, with every CPU on the same anon_vma lock in the
pageout code.
With these changes, AIM7 hits the cross-over point around 29.7k users.
This happens with ~99% IO wait time, there never seems to be any spike in
system time. The anon_vma lock contention appears to be resolved.
[akpm@linux-foundation.org: cleanups]
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Larry Woodman <lwoodman@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-06 04:42:07 +07:00
|
|
|
INIT_LIST_HEAD(&vma->anon_vma_chain);
|
2005-04-17 05:20:36 +07:00
|
|
|
vma->vm_mm = current->mm;
|
|
|
|
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
|
|
|
|
vma->vm_end = vma->vm_start + PAGE_SIZE;
|
2005-10-30 08:16:20 +07:00
|
|
|
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
|
2007-10-19 13:39:15 +07:00
|
|
|
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
down_write(¤t->mm->mmap_sem);
|
|
|
|
if (insert_vm_struct(current->mm, vma)) {
|
|
|
|
up_write(¤t->mm->mmap_sem);
|
|
|
|
kmem_cache_free(vm_area_cachep, vma);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
up_write(¤t->mm->mmap_sem);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
|
|
|
|
if (!(current->personality & MMAP_PAGE_ZERO)) {
|
2007-02-10 16:45:03 +07:00
|
|
|
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (vma) {
|
mm: change anon_vma linking to fix multi-process server scalability issue
The old anon_vma code can lead to scalability issues with heavily forking
workloads. Specifically, each anon_vma will be shared between the parent
process and all its child processes.
In a workload with 1000 child processes and a VMA with 1000 anonymous
pages per process that get COWed, this leads to a system with a million
anonymous pages in the same anon_vma, each of which is mapped in just one
of the 1000 processes. However, the current rmap code needs to walk them
all, leading to O(N) scanning complexity for each page.
This can result in systems where one CPU is walking the page tables of
1000 processes in page_referenced_one, while all other CPUs are stuck on
the anon_vma lock. This leads to catastrophic failure for a benchmark
like AIM7, where the total number of processes can reach in the tens of
thousands. Real workloads are still a factor 10 less process intensive
than AIM7, but they are catching up.
This patch changes the way anon_vmas and VMAs are linked, which allows us
to associate multiple anon_vmas with a VMA. At fork time, each child
process gets its own anon_vmas, in which its COWed pages will be
instantiated. The parents' anon_vma is also linked to the VMA, because
non-COWed pages could be present in any of the children.
This reduces rmap scanning complexity to O(1) for the pages of the 1000
child processes, with O(N) complexity for at most 1/N pages in the system.
This reduces the average scanning cost in heavily forking workloads from
O(N) to 2.
The only real complexity in this patch stems from the fact that linking a
VMA to anon_vmas now involves memory allocations. This means vma_adjust
can fail, if it needs to attach a VMA to anon_vma structures. This in
turn means error handling needs to be added to the calling functions.
A second source of complexity is that, because there can be multiple
anon_vmas, the anon_vma linking in vma_adjust can no longer be done under
"the" anon_vma lock. To prevent the rmap code from walking up an
incomplete VMA, this patch introduces the VM_LOCK_RMAP VMA flag. This bit
flag uses the same slot as the NOMMU VM_MAPPED_COPY, with an ifdef in mm.h
to make sure it is impossible to compile a kernel that needs both symbolic
values for the same bitflag.
Some test results:
Without the anon_vma changes, when AIM7 hits around 9.7k users (on a test
box with 16GB RAM and not quite enough IO), the system ends up running
>99% in system time, with every CPU on the same anon_vma lock in the
pageout code.
With these changes, AIM7 hits the cross-over point around 29.7k users.
This happens with ~99% IO wait time, there never seems to be any spike in
system time. The anon_vma lock contention appears to be resolved.
[akpm@linux-foundation.org: cleanups]
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Larry Woodman <lwoodman@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2010-03-06 04:42:07 +07:00
|
|
|
INIT_LIST_HEAD(&vma->anon_vma_chain);
|
2005-04-17 05:20:36 +07:00
|
|
|
vma->vm_mm = current->mm;
|
|
|
|
vma->vm_end = PAGE_SIZE;
|
|
|
|
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
|
mm: kill vma flag VM_RESERVED and mm->reserved_vm counter
A long time ago, in v2.4, VM_RESERVED kept swapout process off VMA,
currently it lost original meaning but still has some effects:
| effect | alternative flags
-+------------------------+---------------------------------------------
1| account as reserved_vm | VM_IO
2| skip in core dump | VM_IO, VM_DONTDUMP
3| do not merge or expand | VM_IO, VM_DONTEXPAND, VM_HUGETLB, VM_PFNMAP
4| do not mlock | VM_IO, VM_DONTEXPAND, VM_HUGETLB, VM_PFNMAP
This patch removes reserved_vm counter from mm_struct. Seems like nobody
cares about it, it does not exported into userspace directly, it only
reduces total_vm showed in proc.
Thus VM_RESERVED can be replaced with VM_IO or pair VM_DONTEXPAND | VM_DONTDUMP.
remap_pfn_range() and io_remap_pfn_range() set VM_IO|VM_DONTEXPAND|VM_DONTDUMP.
remap_vmalloc_range() set VM_DONTEXPAND | VM_DONTDUMP.
[akpm@linux-foundation.org: drivers/vfio/pci/vfio_pci.c fixup]
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Carsten Otte <cotte@de.ibm.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Eric Paris <eparis@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Morris <james.l.morris@oracle.com>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Kentaro Takeda <takedakn@nttdata.co.jp>
Cc: Matt Helsley <matthltc@us.ibm.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Venkatesh Pallipadi <venki@google.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-10-09 06:29:02 +07:00
|
|
|
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
|
|
|
|
VM_DONTEXPAND | VM_DONTDUMP;
|
2005-04-17 05:20:36 +07:00
|
|
|
down_write(¤t->mm->mmap_sem);
|
|
|
|
if (insert_vm_struct(current->mm, vma)) {
|
|
|
|
up_write(¤t->mm->mmap_sem);
|
|
|
|
kmem_cache_free(vm_area_cachep, vma);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
up_write(¤t->mm->mmap_sem);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
free_initmem (void)
|
|
|
|
{
|
mm: change signature of free_reserved_area() to fix building warnings
Change signature of free_reserved_area() according to Russell King's
suggestion to fix following build warnings:
arch/arm/mm/init.c: In function 'mem_init':
arch/arm/mm/init.c:603:2: warning: passing argument 1 of 'free_reserved_area' makes integer from pointer without a cast [enabled by default]
free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL);
^
In file included from include/linux/mman.h:4:0,
from arch/arm/mm/init.c:15:
include/linux/mm.h:1301:22: note: expected 'long unsigned int' but argument is of type 'void *'
extern unsigned long free_reserved_area(unsigned long start, unsigned long end,
mm/page_alloc.c: In function 'free_reserved_area':
>> mm/page_alloc.c:5134:3: warning: passing argument 1 of 'virt_to_phys' makes pointer from integer without a cast [enabled by default]
In file included from arch/mips/include/asm/page.h:49:0,
from include/linux/mmzone.h:20,
from include/linux/gfp.h:4,
from include/linux/mm.h:8,
from mm/page_alloc.c:18:
arch/mips/include/asm/io.h:119:29: note: expected 'const volatile void *' but argument is of type 'long unsigned int'
mm/page_alloc.c: In function 'free_area_init_nodes':
mm/page_alloc.c:5030:34: warning: array subscript is below array bounds [-Warray-bounds]
Also address some minor code review comments.
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Reported-by: Arnd Bergmann <arnd@arndb.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: <sworddragon2@aol.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Jianguo Wu <wujianguo@huawei.com>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Michel Lespinasse <walken@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Tang Chen <tangchen@cn.fujitsu.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Russell King <rmk@arm.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-07-04 05:02:48 +07:00
|
|
|
free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
|
2013-07-04 05:02:51 +07:00
|
|
|
-1, "unused kernel");
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2006-03-23 07:54:15 +07:00
|
|
|
void __init
|
2005-04-17 05:20:36 +07:00
|
|
|
free_initrd_mem (unsigned long start, unsigned long end)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* EFI uses 4KB pages while the kernel can use 4KB or bigger.
|
|
|
|
* Thus EFI and the kernel may have different page sizes. It is
|
|
|
|
* therefore possible to have the initrd share the same page as
|
|
|
|
* the end of the kernel (given current setup).
|
|
|
|
*
|
|
|
|
* To avoid freeing/using the wrong page (kernel sized) we:
|
|
|
|
* - align up the beginning of initrd
|
|
|
|
* - align down the end of initrd
|
|
|
|
*
|
|
|
|
* | |
|
|
|
|
* |=============| a000
|
|
|
|
* | |
|
|
|
|
* | |
|
|
|
|
* | | 9000
|
|
|
|
* |/////////////|
|
|
|
|
* |/////////////|
|
|
|
|
* |=============| 8000
|
|
|
|
* |///INITRD////|
|
|
|
|
* |/////////////|
|
|
|
|
* |/////////////| 7000
|
|
|
|
* | |
|
|
|
|
* |KKKKKKKKKKKKK|
|
|
|
|
* |=============| 6000
|
|
|
|
* |KKKKKKKKKKKKK|
|
|
|
|
* |KKKKKKKKKKKKK|
|
|
|
|
* K=kernel using 8KB pages
|
|
|
|
*
|
|
|
|
* In this example, we must free page 8000 ONLY. So we must align up
|
|
|
|
* initrd_start and keep initrd_end as is.
|
|
|
|
*/
|
|
|
|
start = PAGE_ALIGN(start);
|
|
|
|
end = end & PAGE_MASK;
|
|
|
|
|
|
|
|
if (start < end)
|
|
|
|
printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
|
|
|
|
|
|
|
|
for (; start < end; start += PAGE_SIZE) {
|
|
|
|
if (!virt_addr_valid(start))
|
|
|
|
continue;
|
2013-04-30 05:06:39 +07:00
|
|
|
free_reserved_page(virt_to_page(start));
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This installs a clean page in the kernel's page table.
|
|
|
|
*/
|
2006-03-23 07:54:15 +07:00
|
|
|
static struct page * __init
|
2005-04-17 05:20:36 +07:00
|
|
|
put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
|
|
|
|
{
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
pte_t *pte;
|
|
|
|
|
|
|
|
if (!PageReserved(page))
|
|
|
|
printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n",
|
|
|
|
page_address(page));
|
|
|
|
|
|
|
|
pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
|
|
|
|
|
|
|
|
{
|
|
|
|
pud = pud_alloc(&init_mm, pgd, address);
|
|
|
|
if (!pud)
|
|
|
|
goto out;
|
|
|
|
pmd = pmd_alloc(&init_mm, pud, address);
|
|
|
|
if (!pmd)
|
|
|
|
goto out;
|
[PATCH] mm: init_mm without ptlock
First step in pushing down the page_table_lock. init_mm.page_table_lock has
been used throughout the architectures (usually for ioremap): not to serialize
kernel address space allocation (that's usually vmlist_lock), but because
pud_alloc,pmd_alloc,pte_alloc_kernel expect caller holds it.
Reverse that: don't lock or unlock init_mm.page_table_lock in any of the
architectures; instead rely on pud_alloc,pmd_alloc,pte_alloc_kernel to take
and drop it when allocating a new one, to check lest a racing task already
did. Similarly no page_table_lock in vmalloc's map_vm_area.
Some temporary ugliness in __pud_alloc and __pmd_alloc: since they also handle
user mms, which are converted only by a later patch, for now they have to lock
differently according to whether or not it's init_mm.
If sources get muddled, there's a danger that an arch source taking
init_mm.page_table_lock will be mixed with common source also taking it (or
neither take it). So break the rules and make another change, which should
break the build for such a mismatch: remove the redundant mm arg from
pte_alloc_kernel (ppc64 scrapped its distinct ioremap_mm in 2.6.13).
Exceptions: arm26 used pte_alloc_kernel on user mm, now pte_alloc_map; ia64
used pte_alloc_map on init_mm, now pte_alloc_kernel; parisc had bad args to
pmd_alloc and pte_alloc_kernel in unused USE_HPPA_IOREMAP code; ppc64
map_io_page forgot to unlock on failure; ppc mmu_mapin_ram and ppc64 im_free
took page_table_lock for no good reason.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 08:16:21 +07:00
|
|
|
pte = pte_alloc_kernel(pmd, address);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (!pte)
|
|
|
|
goto out;
|
[PATCH] mm: init_mm without ptlock
First step in pushing down the page_table_lock. init_mm.page_table_lock has
been used throughout the architectures (usually for ioremap): not to serialize
kernel address space allocation (that's usually vmlist_lock), but because
pud_alloc,pmd_alloc,pte_alloc_kernel expect caller holds it.
Reverse that: don't lock or unlock init_mm.page_table_lock in any of the
architectures; instead rely on pud_alloc,pmd_alloc,pte_alloc_kernel to take
and drop it when allocating a new one, to check lest a racing task already
did. Similarly no page_table_lock in vmalloc's map_vm_area.
Some temporary ugliness in __pud_alloc and __pmd_alloc: since they also handle
user mms, which are converted only by a later patch, for now they have to lock
differently according to whether or not it's init_mm.
If sources get muddled, there's a danger that an arch source taking
init_mm.page_table_lock will be mixed with common source also taking it (or
neither take it). So break the rules and make another change, which should
break the build for such a mismatch: remove the redundant mm arg from
pte_alloc_kernel (ppc64 scrapped its distinct ioremap_mm in 2.6.13).
Exceptions: arm26 used pte_alloc_kernel on user mm, now pte_alloc_map; ia64
used pte_alloc_map on init_mm, now pte_alloc_kernel; parisc had bad args to
pmd_alloc and pte_alloc_kernel in unused USE_HPPA_IOREMAP code; ppc64
map_io_page forgot to unlock on failure; ppc mmu_mapin_ram and ppc64 im_free
took page_table_lock for no good reason.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 08:16:21 +07:00
|
|
|
if (!pte_none(*pte))
|
2005-04-17 05:20:36 +07:00
|
|
|
goto out;
|
|
|
|
set_pte(pte, mk_pte(page, pgprot));
|
|
|
|
}
|
[PATCH] mm: init_mm without ptlock
First step in pushing down the page_table_lock. init_mm.page_table_lock has
been used throughout the architectures (usually for ioremap): not to serialize
kernel address space allocation (that's usually vmlist_lock), but because
pud_alloc,pmd_alloc,pte_alloc_kernel expect caller holds it.
Reverse that: don't lock or unlock init_mm.page_table_lock in any of the
architectures; instead rely on pud_alloc,pmd_alloc,pte_alloc_kernel to take
and drop it when allocating a new one, to check lest a racing task already
did. Similarly no page_table_lock in vmalloc's map_vm_area.
Some temporary ugliness in __pud_alloc and __pmd_alloc: since they also handle
user mms, which are converted only by a later patch, for now they have to lock
differently according to whether or not it's init_mm.
If sources get muddled, there's a danger that an arch source taking
init_mm.page_table_lock will be mixed with common source also taking it (or
neither take it). So break the rules and make another change, which should
break the build for such a mismatch: remove the redundant mm arg from
pte_alloc_kernel (ppc64 scrapped its distinct ioremap_mm in 2.6.13).
Exceptions: arm26 used pte_alloc_kernel on user mm, now pte_alloc_map; ia64
used pte_alloc_map on init_mm, now pte_alloc_kernel; parisc had bad args to
pmd_alloc and pte_alloc_kernel in unused USE_HPPA_IOREMAP code; ppc64
map_io_page forgot to unlock on failure; ppc mmu_mapin_ram and ppc64 im_free
took page_table_lock for no good reason.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 08:16:21 +07:00
|
|
|
out:
|
2005-04-17 05:20:36 +07:00
|
|
|
/* no need for flush_tlb */
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
2006-03-13 00:08:26 +07:00
|
|
|
static void __init
|
2005-04-17 05:20:36 +07:00
|
|
|
setup_gate (void)
|
|
|
|
{
|
2009-03-04 19:05:42 +07:00
|
|
|
void *gate_section;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
/*
|
2005-06-09 00:45:00 +07:00
|
|
|
* Map the gate page twice: once read-only to export the ELF
|
|
|
|
* headers etc. and once execute-only page to enable
|
|
|
|
* privilege-promotion via "epc":
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2009-03-04 19:05:42 +07:00
|
|
|
gate_section = paravirt_get_gate_section();
|
|
|
|
page = virt_to_page(ia64_imva(gate_section));
|
2005-04-17 05:20:36 +07:00
|
|
|
put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
|
|
|
|
#ifdef HAVE_BUGGY_SEGREL
|
2009-03-04 19:05:42 +07:00
|
|
|
page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE));
|
2005-04-17 05:20:36 +07:00
|
|
|
put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
|
|
|
|
#else
|
|
|
|
put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
|
2005-06-09 00:45:00 +07:00
|
|
|
/* Fill in the holes (if any) with read-only zero pages: */
|
|
|
|
{
|
|
|
|
unsigned long addr;
|
|
|
|
|
|
|
|
for (addr = GATE_ADDR + PAGE_SIZE;
|
|
|
|
addr < GATE_ADDR + PERCPU_PAGE_SIZE;
|
|
|
|
addr += PAGE_SIZE)
|
|
|
|
{
|
|
|
|
put_kernel_page(ZERO_PAGE(0), addr,
|
|
|
|
PAGE_READONLY);
|
|
|
|
put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
|
|
|
|
PAGE_READONLY);
|
|
|
|
}
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|
|
|
|
ia64_patch_gate();
|
|
|
|
}
|
|
|
|
|
arm64,ia64,ppc,s390,sh,tile,um,x86,mm: remove default gate area
The core mm code will provide a default gate area based on
FIXADDR_USER_START and FIXADDR_USER_END if
!defined(__HAVE_ARCH_GATE_AREA) && defined(AT_SYSINFO_EHDR).
This default is only useful for ia64. arm64, ppc, s390, sh, tile, 64-bit
UML, and x86_32 have their own code just to disable it. arm, 32-bit UML,
and x86_64 have gate areas, but they have their own implementations.
This gets rid of the default and moves the code into ia64.
This should save some code on architectures without a gate area: it's now
possible to inline the gate_area functions in the default case.
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Acked-by: Nathan Lynch <nathan_lynch@mentor.com>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> [in principle]
Acked-by: Richard Weinberger <richard@nod.at> [for um]
Acked-by: Will Deacon <will.deacon@arm.com> [for arm64]
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Jeff Dike <jdike@addtoit.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Nathan Lynch <Nathan_Lynch@mentor.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-08-09 04:23:40 +07:00
|
|
|
static struct vm_area_struct gate_vma;
|
|
|
|
|
|
|
|
static int __init gate_vma_init(void)
|
|
|
|
{
|
|
|
|
gate_vma.vm_mm = NULL;
|
|
|
|
gate_vma.vm_start = FIXADDR_USER_START;
|
|
|
|
gate_vma.vm_end = FIXADDR_USER_END;
|
|
|
|
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
|
|
|
|
gate_vma.vm_page_prot = __P101;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
__initcall(gate_vma_init);
|
|
|
|
|
|
|
|
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
return &gate_vma;
|
|
|
|
}
|
|
|
|
|
|
|
|
int in_gate_area_no_mm(unsigned long addr)
|
|
|
|
{
|
|
|
|
if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int in_gate_area(struct mm_struct *mm, unsigned long addr)
|
|
|
|
{
|
|
|
|
return in_gate_area_no_mm(addr);
|
|
|
|
}
|
|
|
|
|
2012-12-22 05:05:13 +07:00
|
|
|
void ia64_mmu_init(void *my_cpu_data)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-10-14 00:08:13 +07:00
|
|
|
unsigned long pta, impl_va_bits;
|
2012-12-22 05:05:13 +07:00
|
|
|
extern void tlb_init(void);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_DISABLE_VHPT
|
|
|
|
# define VHPT_ENABLE_BIT 0
|
|
|
|
#else
|
|
|
|
# define VHPT_ENABLE_BIT 1
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
|
|
|
|
* address space. The IA-64 architecture guarantees that at least 50 bits of
|
|
|
|
* virtual address space are implemented but if we pick a large enough page size
|
|
|
|
* (e.g., 64KB), the mapped address space is big enough that it will overlap with
|
|
|
|
* VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
|
|
|
|
* IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
|
|
|
|
* problem in practice. Alternatively, we could truncate the top of the mapped
|
|
|
|
* address space to not permit mappings that would overlap with the VMLPT.
|
|
|
|
* --davidm 00/12/06
|
|
|
|
*/
|
|
|
|
# define pte_bits 3
|
|
|
|
# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
|
|
|
|
/*
|
|
|
|
* The virtual page table has to cover the entire implemented address space within
|
|
|
|
* a region even though not all of this space may be mappable. The reason for
|
|
|
|
* this is that the Access bit and Dirty bit fault handlers perform
|
|
|
|
* non-speculative accesses to the virtual page table, so the address range of the
|
|
|
|
* virtual page table itself needs to be covered by virtual page table.
|
|
|
|
*/
|
|
|
|
# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
|
|
|
|
# define POW2(n) (1ULL << (n))
|
|
|
|
|
|
|
|
impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
|
|
|
|
|
|
|
|
if (impl_va_bits < 51 || impl_va_bits > 61)
|
|
|
|
panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
|
2005-08-24 10:07:00 +07:00
|
|
|
/*
|
|
|
|
* mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
|
|
|
|
* which must fit into "vmlpt_bits - pte_bits" slots. Second half of
|
|
|
|
* the test makes sure that our mapped space doesn't overlap the
|
|
|
|
* unimplemented hole in the middle of the region.
|
|
|
|
*/
|
|
|
|
if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
|
|
|
|
(mapped_space_bits > impl_va_bits - 1))
|
|
|
|
panic("Cannot build a big enough virtual-linear page table"
|
|
|
|
" to cover mapped address space.\n"
|
|
|
|
" Try using a smaller page size.\n");
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* place the VMLPT at the end of each page-table mapped region: */
|
|
|
|
pta = POW2(61) - POW2(vmlpt_bits);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the (virtually mapped linear) page table address. Bit
|
|
|
|
* 8 selects between the short and long format, bits 2-7 the
|
|
|
|
* size of the table, and bit 0 whether the VHPT walker is
|
|
|
|
* enabled.
|
|
|
|
*/
|
|
|
|
ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
|
|
|
|
|
|
|
|
ia64_tlb_init();
|
|
|
|
|
|
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
|
|
ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
|
|
|
|
ia64_srlz_d();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_VIRTUAL_MEM_MAP
|
2006-06-28 23:55:43 +07:00
|
|
|
int vmemmap_find_next_valid_pfn(int node, int i)
|
|
|
|
{
|
|
|
|
unsigned long end_address, hole_next_pfn;
|
|
|
|
unsigned long stop_address;
|
|
|
|
pg_data_t *pgdat = NODE_DATA(node);
|
|
|
|
|
|
|
|
end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
|
|
|
|
end_address = PAGE_ALIGN(end_address);
|
2013-11-13 06:07:17 +07:00
|
|
|
stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)];
|
2006-06-28 23:55:43 +07:00
|
|
|
|
|
|
|
do {
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
pte_t *pte;
|
|
|
|
|
|
|
|
pgd = pgd_offset_k(end_address);
|
|
|
|
if (pgd_none(*pgd)) {
|
|
|
|
end_address += PGDIR_SIZE;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
pud = pud_offset(pgd, end_address);
|
|
|
|
if (pud_none(*pud)) {
|
|
|
|
end_address += PUD_SIZE;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
pmd = pmd_offset(pud, end_address);
|
|
|
|
if (pmd_none(*pmd)) {
|
|
|
|
end_address += PMD_SIZE;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
pte = pte_offset_kernel(pmd, end_address);
|
|
|
|
retry_pte:
|
|
|
|
if (pte_none(*pte)) {
|
|
|
|
end_address += PAGE_SIZE;
|
|
|
|
pte++;
|
|
|
|
if ((end_address < stop_address) &&
|
|
|
|
(end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
|
|
|
|
goto retry_pte;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* Found next valid vmem_map page */
|
|
|
|
break;
|
|
|
|
} while (end_address < stop_address);
|
|
|
|
|
|
|
|
end_address = min(end_address, stop_address);
|
|
|
|
end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
|
|
|
|
hole_next_pfn = end_address / sizeof(struct page);
|
|
|
|
return hole_next_pfn - pgdat->node_start_pfn;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2009-05-23 03:49:49 +07:00
|
|
|
int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
unsigned long address, start_page, end_page;
|
|
|
|
struct page *map_start, *map_end;
|
|
|
|
int node;
|
|
|
|
pgd_t *pgd;
|
|
|
|
pud_t *pud;
|
|
|
|
pmd_t *pmd;
|
|
|
|
pte_t *pte;
|
|
|
|
|
|
|
|
map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
|
|
|
|
map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
|
|
|
|
|
|
|
|
start_page = (unsigned long) map_start & PAGE_MASK;
|
|
|
|
end_page = PAGE_ALIGN((unsigned long) map_end);
|
|
|
|
node = paddr_to_nid(__pa(start));
|
|
|
|
|
|
|
|
for (address = start_page; address < end_page; address += PAGE_SIZE) {
|
|
|
|
pgd = pgd_offset_k(address);
|
|
|
|
if (pgd_none(*pgd))
|
|
|
|
pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
|
|
|
|
pud = pud_offset(pgd, address);
|
|
|
|
|
|
|
|
if (pud_none(*pud))
|
|
|
|
pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
|
|
|
|
pmd = pmd_offset(pud, address);
|
|
|
|
|
|
|
|
if (pmd_none(*pmd))
|
|
|
|
pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
|
|
|
|
pte = pte_offset_kernel(pmd, address);
|
|
|
|
|
|
|
|
if (pte_none(*pte))
|
|
|
|
set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
|
|
|
|
PAGE_KERNEL));
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct memmap_init_callback_data {
|
|
|
|
struct page *start;
|
|
|
|
struct page *end;
|
|
|
|
int nid;
|
|
|
|
unsigned long zone;
|
|
|
|
};
|
|
|
|
|
2007-10-29 19:49:47 +07:00
|
|
|
static int __meminit
|
2009-05-23 03:49:49 +07:00
|
|
|
virtual_memmap_init(u64 start, u64 end, void *arg)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct memmap_init_callback_data *args;
|
|
|
|
struct page *map_start, *map_end;
|
|
|
|
|
|
|
|
args = (struct memmap_init_callback_data *) arg;
|
|
|
|
map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
|
|
|
|
map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
|
|
|
|
|
|
|
|
if (map_start < args->start)
|
|
|
|
map_start = args->start;
|
|
|
|
if (map_end > args->end)
|
|
|
|
map_end = args->end;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have to initialize "out of bounds" struct page elements that fit completely
|
|
|
|
* on the same pages that were allocated for the "in bounds" elements because they
|
|
|
|
* may be referenced later (and found to be "reserved").
|
|
|
|
*/
|
|
|
|
map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
|
|
|
|
map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
|
|
|
|
/ sizeof(struct page));
|
|
|
|
|
|
|
|
if (map_start < map_end)
|
|
|
|
memmap_init_zone((unsigned long)(map_end - map_start),
|
2007-01-11 14:15:30 +07:00
|
|
|
args->nid, args->zone, page_to_pfn(map_start),
|
|
|
|
MEMMAP_EARLY);
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-10-29 19:49:47 +07:00
|
|
|
void __meminit
|
2005-04-17 05:20:36 +07:00
|
|
|
memmap_init (unsigned long size, int nid, unsigned long zone,
|
|
|
|
unsigned long start_pfn)
|
|
|
|
{
|
|
|
|
if (!vmem_map)
|
2007-01-11 14:15:30 +07:00
|
|
|
memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
|
2005-04-17 05:20:36 +07:00
|
|
|
else {
|
|
|
|
struct page *start;
|
|
|
|
struct memmap_init_callback_data args;
|
|
|
|
|
|
|
|
start = pfn_to_page(start_pfn);
|
|
|
|
args.start = start;
|
|
|
|
args.end = start + size;
|
|
|
|
args.nid = nid;
|
|
|
|
args.zone = zone;
|
|
|
|
|
|
|
|
efi_memmap_walk(virtual_memmap_init, &args);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ia64_pfn_valid (unsigned long pfn)
|
|
|
|
{
|
|
|
|
char byte;
|
|
|
|
struct page *pg = pfn_to_page(pfn);
|
|
|
|
|
|
|
|
return (__get_user(byte, (char __user *) pg) == 0)
|
|
|
|
&& ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
|
|
|
|
|| (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ia64_pfn_valid);
|
|
|
|
|
2009-05-23 03:49:49 +07:00
|
|
|
int __init find_largest_hole(u64 start, u64 end, void *arg)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
u64 *max_gap = arg;
|
|
|
|
|
|
|
|
static u64 last_end = PAGE_OFFSET;
|
|
|
|
|
|
|
|
/* NOTE: this algorithm assumes efi memmap table is ordered */
|
|
|
|
|
|
|
|
if (*max_gap < (start - last_end))
|
|
|
|
*max_gap = start - last_end;
|
|
|
|
last_end = end;
|
|
|
|
return 0;
|
|
|
|
}
|
2006-09-27 15:49:54 +07:00
|
|
|
|
2007-01-30 17:11:09 +07:00
|
|
|
#endif /* CONFIG_VIRTUAL_MEM_MAP */
|
|
|
|
|
2009-05-23 03:49:49 +07:00
|
|
|
int __init register_active_ranges(u64 start, u64 len, int nid)
|
2006-09-27 15:49:54 +07:00
|
|
|
{
|
2008-04-12 05:21:35 +07:00
|
|
|
u64 end = start + len;
|
2007-01-30 17:11:09 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_KEXEC
|
|
|
|
if (start > crashk_res.start && start < crashk_res.end)
|
|
|
|
start = crashk_res.end;
|
|
|
|
if (end > crashk_res.start && end < crashk_res.end)
|
|
|
|
end = crashk_res.start;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (start < end)
|
2011-12-09 01:22:08 +07:00
|
|
|
memblock_add_node(__pa(start), end - start, nid);
|
2006-09-27 15:49:54 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
[IA64] min_low_pfn and max_low_pfn calculation fix
We have seen bad_pte_print when testing crashdump on an SN machine in
recent 2.6.20 kernel. There are tons of bad pte print (pfn < max_low_pfn)
reports when the crash kernel boots up, all those reported bad pages
are inside initmem range; That is because if the crash kernel code and
data happens to be at the beginning of the 1st node. build_node_maps in
discontig.c will bypass reserved regions with filter_rsvd_memory. Since
min_low_pfn is calculated in build_node_map, so in this case, min_low_pfn
will be greater than kernel code and data.
Because pages inside initmem are freed and reused later, we saw
pfn_valid check fail on those pages.
I think this theoretically happen on a normal kernel. When I check
min_low_pfn and max_low_pfn calculation in contig.c and discontig.c.
I found more issues than this.
1. min_low_pfn and max_low_pfn calculation is inconsistent between
contig.c and discontig.c,
min_low_pfn is calculated as the first page number of boot memmap in
contig.c (Why? Though this may work at the most of the time, I don't
think it is the right logic). It is calculated as the lowest physical
memory page number bypass reserved regions in discontig.c.
max_low_pfn is calculated include reserved regions in contig.c. It is
calculated exclude reserved regions in discontig.c.
2. If kernel code and data region is happen to be at the begin or the
end of physical memory, when min_low_pfn and max_low_pfn calculation is
bypassed kernel code and data, pages in initmem will report bad.
3. initrd is also in reserved regions, if it is at the begin or at the
end of physical memory, kernel will refuse to reuse the memory. Because
the virt_addr_valid check in free_initrd_mem.
So it is better to fix and clean up those issues.
Calculate min_low_pfn and max_low_pfn in a consistent way.
Signed-off-by: Zou Nan hai <nanhai.zou@intel.com>
Acked-by: Jay Lan <jlan@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
2007-03-21 03:41:57 +07:00
|
|
|
int
|
2009-05-23 03:49:49 +07:00
|
|
|
find_max_min_low_pfn (u64 start, u64 end, void *arg)
|
[IA64] min_low_pfn and max_low_pfn calculation fix
We have seen bad_pte_print when testing crashdump on an SN machine in
recent 2.6.20 kernel. There are tons of bad pte print (pfn < max_low_pfn)
reports when the crash kernel boots up, all those reported bad pages
are inside initmem range; That is because if the crash kernel code and
data happens to be at the beginning of the 1st node. build_node_maps in
discontig.c will bypass reserved regions with filter_rsvd_memory. Since
min_low_pfn is calculated in build_node_map, so in this case, min_low_pfn
will be greater than kernel code and data.
Because pages inside initmem are freed and reused later, we saw
pfn_valid check fail on those pages.
I think this theoretically happen on a normal kernel. When I check
min_low_pfn and max_low_pfn calculation in contig.c and discontig.c.
I found more issues than this.
1. min_low_pfn and max_low_pfn calculation is inconsistent between
contig.c and discontig.c,
min_low_pfn is calculated as the first page number of boot memmap in
contig.c (Why? Though this may work at the most of the time, I don't
think it is the right logic). It is calculated as the lowest physical
memory page number bypass reserved regions in discontig.c.
max_low_pfn is calculated include reserved regions in contig.c. It is
calculated exclude reserved regions in discontig.c.
2. If kernel code and data region is happen to be at the begin or the
end of physical memory, when min_low_pfn and max_low_pfn calculation is
bypassed kernel code and data, pages in initmem will report bad.
3. initrd is also in reserved regions, if it is at the begin or at the
end of physical memory, kernel will refuse to reuse the memory. Because
the virt_addr_valid check in free_initrd_mem.
So it is better to fix and clean up those issues.
Calculate min_low_pfn and max_low_pfn in a consistent way.
Signed-off-by: Zou Nan hai <nanhai.zou@intel.com>
Acked-by: Jay Lan <jlan@sgi.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
2007-03-21 03:41:57 +07:00
|
|
|
{
|
|
|
|
unsigned long pfn_start, pfn_end;
|
|
|
|
#ifdef CONFIG_FLATMEM
|
|
|
|
pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
|
|
|
|
pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
|
|
|
|
#else
|
|
|
|
pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
|
|
|
|
pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
|
|
|
|
#endif
|
|
|
|
min_low_pfn = min(min_low_pfn, pfn_start);
|
|
|
|
max_low_pfn = max(max_low_pfn, pfn_end);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Boot command-line option "nolwsys" can be used to disable the use of any light-weight
|
|
|
|
* system call handler. When this option is in effect, all fsyscalls will end up bubbling
|
|
|
|
* down into the kernel and calling the normal (heavy-weight) syscall handler. This is
|
|
|
|
* useful for performance testing, but conceivably could also come in handy for debugging
|
|
|
|
* purposes.
|
|
|
|
*/
|
|
|
|
|
2006-03-13 00:10:59 +07:00
|
|
|
static int nolwsys __initdata;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
static int __init
|
|
|
|
nolwsys_setup (char *s)
|
|
|
|
{
|
|
|
|
nolwsys = 1;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
__setup("nolwsys", nolwsys_setup);
|
|
|
|
|
2006-03-23 07:54:15 +07:00
|
|
|
void __init
|
2005-04-17 05:20:36 +07:00
|
|
|
mem_init (void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2005-04-26 03:13:16 +07:00
|
|
|
BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
|
|
|
|
BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
|
|
|
|
BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#ifdef CONFIG_PCI
|
|
|
|
/*
|
|
|
|
* This needs to be called _after_ the command line has been parsed but _before_
|
|
|
|
* any drivers that may need the PCI DMA interface are initialized or bootmem has
|
|
|
|
* been freed.
|
|
|
|
*/
|
|
|
|
platform_dma_init();
|
|
|
|
#endif
|
|
|
|
|
2005-10-05 02:13:57 +07:00
|
|
|
#ifdef CONFIG_FLATMEM
|
2009-03-10 12:10:30 +07:00
|
|
|
BUG_ON(!mem_map);
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|
|
|
|
|
2013-07-04 05:04:25 +07:00
|
|
|
set_max_mapnr(max_low_pfn);
|
2005-04-17 05:20:36 +07:00
|
|
|
high_memory = __va(max_low_pfn * PAGE_SIZE);
|
2013-07-04 05:04:25 +07:00
|
|
|
free_all_bootmem();
|
2013-07-04 05:03:58 +07:00
|
|
|
mem_init_print_info(NULL);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For fsyscall entrpoints with no light-weight handler, use the ordinary
|
|
|
|
* (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
|
|
|
|
* code can tell them apart.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < NR_syscalls; ++i) {
|
|
|
|
extern unsigned long sys_call_table[NR_syscalls];
|
2009-03-04 19:05:34 +07:00
|
|
|
unsigned long *fsyscall_table = paravirt_get_fsyscall_table();
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (!fsyscall_table[i] || nolwsys)
|
|
|
|
fsyscall_table[i] = sys_call_table[i] | 1;
|
|
|
|
}
|
|
|
|
setup_gate();
|
|
|
|
}
|
2006-01-07 09:50:38 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
2006-06-27 16:53:30 +07:00
|
|
|
int arch_add_memory(int nid, u64 start, u64 size)
|
2006-01-07 09:50:38 +07:00
|
|
|
{
|
|
|
|
pg_data_t *pgdat;
|
|
|
|
struct zone *zone;
|
|
|
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
|
|
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
|
|
|
int ret;
|
|
|
|
|
2006-06-27 16:53:30 +07:00
|
|
|
pgdat = NODE_DATA(nid);
|
2006-01-07 09:50:38 +07:00
|
|
|
|
2014-08-07 06:07:43 +07:00
|
|
|
zone = pgdat->node_zones +
|
|
|
|
zone_for_memory(nid, start, size, ZONE_NORMAL);
|
2009-01-07 05:39:14 +07:00
|
|
|
ret = __add_pages(nid, zone, start_pfn, nr_pages);
|
2006-01-07 09:50:38 +07:00
|
|
|
|
|
|
|
if (ret)
|
|
|
|
printk("%s: Problem encountered in __add_pages() as ret=%d\n",
|
2008-03-05 06:15:00 +07:00
|
|
|
__func__, ret);
|
2006-01-07 09:50:38 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2013-02-23 07:32:58 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_MEMORY_HOTREMOVE
|
|
|
|
int arch_remove_memory(u64 start, u64 size)
|
|
|
|
{
|
|
|
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
|
|
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
|
|
|
struct zone *zone;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
zone = page_zone(pfn_to_page(start_pfn));
|
|
|
|
ret = __remove_pages(zone, start_pfn, nr_pages);
|
|
|
|
if (ret)
|
|
|
|
pr_warn("%s: Problem encountered in __remove_pages() as"
|
|
|
|
" ret=%d\n", __func__, ret);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
2006-01-07 09:50:38 +07:00
|
|
|
#endif
|
2008-05-15 09:18:41 +07:00
|
|
|
|
2014-01-22 06:49:13 +07:00
|
|
|
/**
|
|
|
|
* show_mem - give short summary of memory stats
|
|
|
|
*
|
|
|
|
* Shows a simple page count of reserved and used pages in the system.
|
|
|
|
* For discontig machines, it does this on a per-pgdat basis.
|
|
|
|
*/
|
|
|
|
void show_mem(unsigned int filter)
|
|
|
|
{
|
|
|
|
int total_reserved = 0;
|
|
|
|
unsigned long total_present = 0;
|
|
|
|
pg_data_t *pgdat;
|
|
|
|
|
|
|
|
printk(KERN_INFO "Mem-info:\n");
|
|
|
|
show_free_areas(filter);
|
|
|
|
printk(KERN_INFO "Node memory in pages:\n");
|
|
|
|
for_each_online_pgdat(pgdat) {
|
|
|
|
unsigned long present;
|
|
|
|
unsigned long flags;
|
|
|
|
int reserved = 0;
|
|
|
|
int nid = pgdat->node_id;
|
|
|
|
int zoneid;
|
|
|
|
|
|
|
|
if (skip_free_areas_node(filter, nid))
|
|
|
|
continue;
|
|
|
|
pgdat_resize_lock(pgdat, &flags);
|
|
|
|
|
|
|
|
for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
|
|
|
|
struct zone *zone = &pgdat->node_zones[zoneid];
|
|
|
|
if (!populated_zone(zone))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
reserved += zone->present_pages - zone->managed_pages;
|
|
|
|
}
|
|
|
|
present = pgdat->node_present_pages;
|
|
|
|
|
|
|
|
pgdat_resize_unlock(pgdat, &flags);
|
|
|
|
total_present += present;
|
|
|
|
total_reserved += reserved;
|
|
|
|
printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, ",
|
|
|
|
nid, present, reserved);
|
|
|
|
}
|
|
|
|
printk(KERN_INFO "%ld pages of RAM\n", total_present);
|
|
|
|
printk(KERN_INFO "%d reserved pages\n", total_reserved);
|
|
|
|
printk(KERN_INFO "Total of %ld pages in page table cache\n",
|
|
|
|
quicklist_total_size());
|
|
|
|
printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages());
|
|
|
|
}
|