2014-10-10 05:28:37 +07:00
|
|
|
/*
|
|
|
|
* mm/debug.c
|
|
|
|
*
|
|
|
|
* mm/ specific debug routines.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2014-10-10 05:28:34 +07:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/mm.h>
|
2015-04-30 01:36:05 +07:00
|
|
|
#include <linux/trace_events.h>
|
2014-10-10 05:28:34 +07:00
|
|
|
#include <linux/memcontrol.h>
|
|
|
|
|
|
|
|
static const struct trace_print_flags pageflag_names[] = {
|
|
|
|
{1UL << PG_locked, "locked" },
|
|
|
|
{1UL << PG_error, "error" },
|
|
|
|
{1UL << PG_referenced, "referenced" },
|
|
|
|
{1UL << PG_uptodate, "uptodate" },
|
|
|
|
{1UL << PG_dirty, "dirty" },
|
|
|
|
{1UL << PG_lru, "lru" },
|
|
|
|
{1UL << PG_active, "active" },
|
|
|
|
{1UL << PG_slab, "slab" },
|
|
|
|
{1UL << PG_owner_priv_1, "owner_priv_1" },
|
|
|
|
{1UL << PG_arch_1, "arch_1" },
|
|
|
|
{1UL << PG_reserved, "reserved" },
|
|
|
|
{1UL << PG_private, "private" },
|
|
|
|
{1UL << PG_private_2, "private_2" },
|
|
|
|
{1UL << PG_writeback, "writeback" },
|
|
|
|
{1UL << PG_head, "head" },
|
|
|
|
{1UL << PG_swapcache, "swapcache" },
|
|
|
|
{1UL << PG_mappedtodisk, "mappedtodisk" },
|
|
|
|
{1UL << PG_reclaim, "reclaim" },
|
|
|
|
{1UL << PG_swapbacked, "swapbacked" },
|
|
|
|
{1UL << PG_unevictable, "unevictable" },
|
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
{1UL << PG_mlocked, "mlocked" },
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
|
|
|
|
{1UL << PG_uncached, "uncached" },
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_MEMORY_FAILURE
|
|
|
|
{1UL << PG_hwpoison, "hwpoison" },
|
|
|
|
#endif
|
mm: introduce idle page tracking
Knowing the portion of memory that is not used by a certain application or
memory cgroup (idle memory) can be useful for partitioning the system
efficiently, e.g. by setting memory cgroup limits appropriately.
Currently, the only means to estimate the amount of idle memory provided
by the kernel is /proc/PID/{clear_refs,smaps}: the user can clear the
access bit for all pages mapped to a particular process by writing 1 to
clear_refs, wait for some time, and then count smaps:Referenced. However,
this method has two serious shortcomings:
- it does not count unmapped file pages
- it affects the reclaimer logic
To overcome these drawbacks, this patch introduces two new page flags,
Idle and Young, and a new sysfs file, /sys/kernel/mm/page_idle/bitmap.
A page's Idle flag can only be set from userspace by setting bit in
/sys/kernel/mm/page_idle/bitmap at the offset corresponding to the page,
and it is cleared whenever the page is accessed either through page tables
(it is cleared in page_referenced() in this case) or using the read(2)
system call (mark_page_accessed()). Thus by setting the Idle flag for
pages of a particular workload, which can be found e.g. by reading
/proc/PID/pagemap, waiting for some time to let the workload access its
working set, and then reading the bitmap file, one can estimate the amount
of pages that are not used by the workload.
The Young page flag is used to avoid interference with the memory
reclaimer. A page's Young flag is set whenever the Access bit of a page
table entry pointing to the page is cleared by writing to the bitmap file.
If page_referenced() is called on a Young page, it will add 1 to its
return value, therefore concealing the fact that the Access bit was
cleared.
Note, since there is no room for extra page flags on 32 bit, this feature
uses extended page flags when compiled on 32 bit.
[akpm@linux-foundation.org: fix build]
[akpm@linux-foundation.org: kpageidle requires an MMU]
[akpm@linux-foundation.org: decouple from page-flags rework]
Signed-off-by: Vladimir Davydov <vdavydov@parallels.com>
Reviewed-by: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Greg Thelen <gthelen@google.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-09-10 05:35:45 +07:00
|
|
|
#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
|
|
|
|
{1UL << PG_young, "young" },
|
|
|
|
{1UL << PG_idle, "idle" },
|
|
|
|
#endif
|
2014-10-10 05:28:34 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static void dump_flags(unsigned long flags,
|
|
|
|
const struct trace_print_flags *names, int count)
|
|
|
|
{
|
|
|
|
const char *delim = "";
|
|
|
|
unsigned long mask;
|
|
|
|
int i;
|
|
|
|
|
2014-10-10 05:28:41 +07:00
|
|
|
pr_emerg("flags: %#lx(", flags);
|
2014-10-10 05:28:34 +07:00
|
|
|
|
|
|
|
/* remove zone id */
|
|
|
|
flags &= (1UL << NR_PAGEFLAGS) - 1;
|
|
|
|
|
|
|
|
for (i = 0; i < count && flags; i++) {
|
|
|
|
|
|
|
|
mask = names[i].mask;
|
|
|
|
if ((flags & mask) != mask)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
flags &= ~mask;
|
2014-10-10 05:28:41 +07:00
|
|
|
pr_cont("%s%s", delim, names[i].name);
|
2014-10-10 05:28:34 +07:00
|
|
|
delim = "|";
|
|
|
|
}
|
|
|
|
|
|
|
|
/* check for left over flags */
|
|
|
|
if (flags)
|
2014-10-10 05:28:41 +07:00
|
|
|
pr_cont("%s%#lx", delim, flags);
|
2014-10-10 05:28:34 +07:00
|
|
|
|
2014-10-10 05:28:41 +07:00
|
|
|
pr_cont(")\n");
|
2014-10-10 05:28:34 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void dump_page_badflags(struct page *page, const char *reason,
|
|
|
|
unsigned long badflags)
|
|
|
|
{
|
2016-01-16 07:53:42 +07:00
|
|
|
pr_emerg("page:%p count:%d mapcount:%d mapping:%p index:%#lx",
|
2014-10-10 05:28:41 +07:00
|
|
|
page, atomic_read(&page->_count), page_mapcount(page),
|
|
|
|
page->mapping, page->index);
|
2016-01-16 07:53:42 +07:00
|
|
|
if (PageCompound(page))
|
|
|
|
pr_cont(" compound_mapcount: %d", compound_mapcount(page));
|
|
|
|
pr_cont("\n");
|
2014-10-10 05:28:34 +07:00
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS);
|
|
|
|
dump_flags(page->flags, pageflag_names, ARRAY_SIZE(pageflag_names));
|
|
|
|
if (reason)
|
|
|
|
pr_alert("page dumped because: %s\n", reason);
|
|
|
|
if (page->flags & badflags) {
|
|
|
|
pr_alert("bad because of flags:\n");
|
|
|
|
dump_flags(page->flags & badflags,
|
|
|
|
pageflag_names, ARRAY_SIZE(pageflag_names));
|
|
|
|
}
|
2014-12-11 06:44:58 +07:00
|
|
|
#ifdef CONFIG_MEMCG
|
|
|
|
if (page->mem_cgroup)
|
|
|
|
pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup);
|
|
|
|
#endif
|
2014-10-10 05:28:34 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void dump_page(struct page *page, const char *reason)
|
|
|
|
{
|
|
|
|
dump_page_badflags(page, reason, 0);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dump_page);
|
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_VM
|
|
|
|
|
|
|
|
static const struct trace_print_flags vmaflags_names[] = {
|
|
|
|
{VM_READ, "read" },
|
|
|
|
{VM_WRITE, "write" },
|
|
|
|
{VM_EXEC, "exec" },
|
|
|
|
{VM_SHARED, "shared" },
|
|
|
|
{VM_MAYREAD, "mayread" },
|
|
|
|
{VM_MAYWRITE, "maywrite" },
|
|
|
|
{VM_MAYEXEC, "mayexec" },
|
|
|
|
{VM_MAYSHARE, "mayshare" },
|
|
|
|
{VM_GROWSDOWN, "growsdown" },
|
|
|
|
{VM_PFNMAP, "pfnmap" },
|
|
|
|
{VM_DENYWRITE, "denywrite" },
|
2015-11-06 09:51:36 +07:00
|
|
|
{VM_LOCKONFAULT, "lockonfault" },
|
2014-10-10 05:28:34 +07:00
|
|
|
{VM_LOCKED, "locked" },
|
|
|
|
{VM_IO, "io" },
|
|
|
|
{VM_SEQ_READ, "seqread" },
|
|
|
|
{VM_RAND_READ, "randread" },
|
|
|
|
{VM_DONTCOPY, "dontcopy" },
|
|
|
|
{VM_DONTEXPAND, "dontexpand" },
|
|
|
|
{VM_ACCOUNT, "account" },
|
|
|
|
{VM_NORESERVE, "noreserve" },
|
|
|
|
{VM_HUGETLB, "hugetlb" },
|
|
|
|
#if defined(CONFIG_X86)
|
|
|
|
{VM_PAT, "pat" },
|
|
|
|
#elif defined(CONFIG_PPC)
|
|
|
|
{VM_SAO, "sao" },
|
|
|
|
#elif defined(CONFIG_PARISC) || defined(CONFIG_METAG) || defined(CONFIG_IA64)
|
|
|
|
{VM_GROWSUP, "growsup" },
|
|
|
|
#elif !defined(CONFIG_MMU)
|
|
|
|
{VM_MAPPED_COPY, "mappedcopy" },
|
|
|
|
#else
|
|
|
|
{VM_ARCH_1, "arch_1" },
|
|
|
|
#endif
|
|
|
|
{VM_DONTDUMP, "dontdump" },
|
|
|
|
#ifdef CONFIG_MEM_SOFT_DIRTY
|
|
|
|
{VM_SOFTDIRTY, "softdirty" },
|
|
|
|
#endif
|
|
|
|
{VM_MIXEDMAP, "mixedmap" },
|
|
|
|
{VM_HUGEPAGE, "hugepage" },
|
|
|
|
{VM_NOHUGEPAGE, "nohugepage" },
|
|
|
|
{VM_MERGEABLE, "mergeable" },
|
|
|
|
};
|
|
|
|
|
|
|
|
void dump_vma(const struct vm_area_struct *vma)
|
|
|
|
{
|
2014-10-10 05:28:41 +07:00
|
|
|
pr_emerg("vma %p start %p end %p\n"
|
2014-10-10 05:28:34 +07:00
|
|
|
"next %p prev %p mm %p\n"
|
|
|
|
"prot %lx anon_vma %p vm_ops %p\n"
|
|
|
|
"pgoff %lx file %p private_data %p\n",
|
|
|
|
vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
|
|
|
|
vma->vm_prev, vma->vm_mm,
|
|
|
|
(unsigned long)pgprot_val(vma->vm_page_prot),
|
|
|
|
vma->anon_vma, vma->vm_ops, vma->vm_pgoff,
|
|
|
|
vma->vm_file, vma->vm_private_data);
|
|
|
|
dump_flags(vma->vm_flags, vmaflags_names, ARRAY_SIZE(vmaflags_names));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dump_vma);
|
|
|
|
|
2014-10-10 05:28:37 +07:00
|
|
|
void dump_mm(const struct mm_struct *mm)
|
|
|
|
{
|
2014-10-10 05:28:41 +07:00
|
|
|
pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
|
2014-10-10 05:28:37 +07:00
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
"get_unmapped_area %p\n"
|
|
|
|
#endif
|
|
|
|
"mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n"
|
mm: account pmd page tables to the process
Dave noticed that unprivileged process can allocate significant amount of
memory -- >500 MiB on x86_64 -- and stay unnoticed by oom-killer and
memory cgroup. The trick is to allocate a lot of PMD page tables. Linux
kernel doesn't account PMD tables to the process, only PTE.
The use-cases below use few tricks to allocate a lot of PMD page tables
while keeping VmRSS and VmPTE low. oom_score for the process will be 0.
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/prctl.h>
#define PUD_SIZE (1UL << 30)
#define PMD_SIZE (1UL << 21)
#define NR_PUD 130000
int main(void)
{
char *addr = NULL;
unsigned long i;
prctl(PR_SET_THP_DISABLE);
for (i = 0; i < NR_PUD ; i++) {
addr = mmap(addr + PUD_SIZE, PUD_SIZE, PROT_WRITE|PROT_READ,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
if (addr == MAP_FAILED) {
perror("mmap");
break;
}
*addr = 'x';
munmap(addr, PMD_SIZE);
mmap(addr, PMD_SIZE, PROT_WRITE|PROT_READ,
MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0);
if (addr == MAP_FAILED)
perror("re-mmap"), exit(1);
}
printf("PID %d consumed %lu KiB in PMD page tables\n",
getpid(), i * 4096 >> 10);
return pause();
}
The patch addresses the issue by account PMD tables to the process the
same way we account PTE.
The main place where PMD tables is accounted is __pmd_alloc() and
free_pmd_range(). But there're few corner cases:
- HugeTLB can share PMD page tables. The patch handles by accounting
the table to all processes who share it.
- x86 PAE pre-allocates few PMD tables on fork.
- Architectures with FIRST_USER_ADDRESS > 0. We need to adjust sanity
check on exit(2).
Accounting only happens on configuration where PMD page table's level is
present (PMD is not folded). As with nr_ptes we use per-mm counter. The
counter value is used to calculate baseline for badness score by
oom-killer.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reported-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Hugh Dickins <hughd@google.com>
Reviewed-by: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Pavel Emelyanov <xemul@openvz.org>
Cc: David Rientjes <rientjes@google.com>
Tested-by: Sedat Dilek <sedat.dilek@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-02-12 06:26:50 +07:00
|
|
|
"pgd %p mm_users %d mm_count %d nr_ptes %lu nr_pmds %lu map_count %d\n"
|
2014-10-10 05:28:37 +07:00
|
|
|
"hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n"
|
2016-01-15 06:22:07 +07:00
|
|
|
"pinned_vm %lx data_vm %lx exec_vm %lx stack_vm %lx\n"
|
2014-10-10 05:28:37 +07:00
|
|
|
"start_code %lx end_code %lx start_data %lx end_data %lx\n"
|
|
|
|
"start_brk %lx brk %lx start_stack %lx\n"
|
|
|
|
"arg_start %lx arg_end %lx env_start %lx env_end %lx\n"
|
|
|
|
"binfmt %p flags %lx core_state %p\n"
|
|
|
|
#ifdef CONFIG_AIO
|
|
|
|
"ioctx_table %p\n"
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_MEMCG
|
|
|
|
"owner %p "
|
|
|
|
#endif
|
|
|
|
"exe_file %p\n"
|
|
|
|
#ifdef CONFIG_MMU_NOTIFIER
|
|
|
|
"mmu_notifier_mm %p\n"
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
|
|
"numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n"
|
|
|
|
#endif
|
|
|
|
#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
|
|
|
|
"tlb_flush_pending %d\n"
|
|
|
|
#endif
|
|
|
|
"%s", /* This is here to hold the comma */
|
|
|
|
|
|
|
|
mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
|
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
mm->get_unmapped_area,
|
|
|
|
#endif
|
|
|
|
mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end,
|
|
|
|
mm->pgd, atomic_read(&mm->mm_users),
|
|
|
|
atomic_read(&mm->mm_count),
|
|
|
|
atomic_long_read((atomic_long_t *)&mm->nr_ptes),
|
mm: account pmd page tables to the process
Dave noticed that unprivileged process can allocate significant amount of
memory -- >500 MiB on x86_64 -- and stay unnoticed by oom-killer and
memory cgroup. The trick is to allocate a lot of PMD page tables. Linux
kernel doesn't account PMD tables to the process, only PTE.
The use-cases below use few tricks to allocate a lot of PMD page tables
while keeping VmRSS and VmPTE low. oom_score for the process will be 0.
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/prctl.h>
#define PUD_SIZE (1UL << 30)
#define PMD_SIZE (1UL << 21)
#define NR_PUD 130000
int main(void)
{
char *addr = NULL;
unsigned long i;
prctl(PR_SET_THP_DISABLE);
for (i = 0; i < NR_PUD ; i++) {
addr = mmap(addr + PUD_SIZE, PUD_SIZE, PROT_WRITE|PROT_READ,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
if (addr == MAP_FAILED) {
perror("mmap");
break;
}
*addr = 'x';
munmap(addr, PMD_SIZE);
mmap(addr, PMD_SIZE, PROT_WRITE|PROT_READ,
MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0);
if (addr == MAP_FAILED)
perror("re-mmap"), exit(1);
}
printf("PID %d consumed %lu KiB in PMD page tables\n",
getpid(), i * 4096 >> 10);
return pause();
}
The patch addresses the issue by account PMD tables to the process the
same way we account PTE.
The main place where PMD tables is accounted is __pmd_alloc() and
free_pmd_range(). But there're few corner cases:
- HugeTLB can share PMD page tables. The patch handles by accounting
the table to all processes who share it.
- x86 PAE pre-allocates few PMD tables on fork.
- Architectures with FIRST_USER_ADDRESS > 0. We need to adjust sanity
check on exit(2).
Accounting only happens on configuration where PMD page table's level is
present (PMD is not folded). As with nr_ptes we use per-mm counter. The
counter value is used to calculate baseline for badness score by
oom-killer.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Reported-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Hugh Dickins <hughd@google.com>
Reviewed-by: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Pavel Emelyanov <xemul@openvz.org>
Cc: David Rientjes <rientjes@google.com>
Tested-by: Sedat Dilek <sedat.dilek@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-02-12 06:26:50 +07:00
|
|
|
mm_nr_pmds((struct mm_struct *)mm),
|
2014-10-10 05:28:37 +07:00
|
|
|
mm->map_count,
|
|
|
|
mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
|
2016-01-15 06:22:07 +07:00
|
|
|
mm->pinned_vm, mm->data_vm, mm->exec_vm, mm->stack_vm,
|
2014-10-10 05:28:37 +07:00
|
|
|
mm->start_code, mm->end_code, mm->start_data, mm->end_data,
|
|
|
|
mm->start_brk, mm->brk, mm->start_stack,
|
|
|
|
mm->arg_start, mm->arg_end, mm->env_start, mm->env_end,
|
|
|
|
mm->binfmt, mm->flags, mm->core_state,
|
|
|
|
#ifdef CONFIG_AIO
|
|
|
|
mm->ioctx_table,
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_MEMCG
|
|
|
|
mm->owner,
|
|
|
|
#endif
|
|
|
|
mm->exe_file,
|
|
|
|
#ifdef CONFIG_MMU_NOTIFIER
|
|
|
|
mm->mmu_notifier_mm,
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
|
|
mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq,
|
|
|
|
#endif
|
|
|
|
#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
|
|
|
|
mm->tlb_flush_pending,
|
|
|
|
#endif
|
|
|
|
"" /* This is here to not have a comma! */
|
|
|
|
);
|
|
|
|
|
|
|
|
dump_flags(mm->def_flags, vmaflags_names,
|
|
|
|
ARRAY_SIZE(vmaflags_names));
|
|
|
|
}
|
|
|
|
|
2014-10-10 05:28:34 +07:00
|
|
|
#endif /* CONFIG_DEBUG_VM */
|