mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-05 07:16:56 +07:00
abaf3787ac
PROC_FS is a bool, so this code is either present or absent. It will never be modular, so using module_init as an alias for __initcall is rather misleading. Fix this up now, so that we can relocate module_init from init.h into module.h in the future. If we don't do this, we'd have to add module.h to obviously non-modular code, and that would be ugly at best. Note that direct use of __initcall is discouraged, vs. one of the priority categorized subgroups. As __initcall gets mapped onto device_initcall, our use of fs_initcall (which makes sense for fs code) will thus change these registrations from level 6-device to level 5-fs (i.e. slightly earlier). However no observable impact of that small difference has been observed during testing, or is expected. Also note that this change uncovers a missing semicolon bug in the registration of vmcore_init as an initcall. Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
224 lines
5.7 KiB
C
224 lines
5.7 KiB
C
#include <linux/fs.h>
|
|
#include <linux/init.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/hugetlb.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/mmzone.h>
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/quicklist.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/vmstat.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <asm/page.h>
|
|
#include <asm/pgtable.h>
|
|
#include "internal.h"
|
|
|
|
void __attribute__((weak)) arch_report_meminfo(struct seq_file *m)
|
|
{
|
|
}
|
|
|
|
static int meminfo_proc_show(struct seq_file *m, void *v)
|
|
{
|
|
struct sysinfo i;
|
|
unsigned long committed;
|
|
struct vmalloc_info vmi;
|
|
long cached;
|
|
long available;
|
|
unsigned long pagecache;
|
|
unsigned long wmark_low = 0;
|
|
unsigned long pages[NR_LRU_LISTS];
|
|
struct zone *zone;
|
|
int lru;
|
|
|
|
/*
|
|
* display in kilobytes.
|
|
*/
|
|
#define K(x) ((x) << (PAGE_SHIFT - 10))
|
|
si_meminfo(&i);
|
|
si_swapinfo(&i);
|
|
committed = percpu_counter_read_positive(&vm_committed_as);
|
|
|
|
cached = global_page_state(NR_FILE_PAGES) -
|
|
total_swapcache_pages() - i.bufferram;
|
|
if (cached < 0)
|
|
cached = 0;
|
|
|
|
get_vmalloc_info(&vmi);
|
|
|
|
for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
|
|
pages[lru] = global_page_state(NR_LRU_BASE + lru);
|
|
|
|
for_each_zone(zone)
|
|
wmark_low += zone->watermark[WMARK_LOW];
|
|
|
|
/*
|
|
* Estimate the amount of memory available for userspace allocations,
|
|
* without causing swapping.
|
|
*
|
|
* Free memory cannot be taken below the low watermark, before the
|
|
* system starts swapping.
|
|
*/
|
|
available = i.freeram - wmark_low;
|
|
|
|
/*
|
|
* Not all the page cache can be freed, otherwise the system will
|
|
* start swapping. Assume at least half of the page cache, or the
|
|
* low watermark worth of cache, needs to stay.
|
|
*/
|
|
pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
|
|
pagecache -= min(pagecache / 2, wmark_low);
|
|
available += pagecache;
|
|
|
|
/*
|
|
* Part of the reclaimable swap consists of items that are in use,
|
|
* and cannot be freed. Cap this estimate at the low watermark.
|
|
*/
|
|
available += global_page_state(NR_SLAB_RECLAIMABLE) -
|
|
min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
|
|
|
|
if (available < 0)
|
|
available = 0;
|
|
|
|
/*
|
|
* Tagged format, for easy grepping and expansion.
|
|
*/
|
|
seq_printf(m,
|
|
"MemTotal: %8lu kB\n"
|
|
"MemFree: %8lu kB\n"
|
|
"MemAvailable: %8lu kB\n"
|
|
"Buffers: %8lu kB\n"
|
|
"Cached: %8lu kB\n"
|
|
"SwapCached: %8lu kB\n"
|
|
"Active: %8lu kB\n"
|
|
"Inactive: %8lu kB\n"
|
|
"Active(anon): %8lu kB\n"
|
|
"Inactive(anon): %8lu kB\n"
|
|
"Active(file): %8lu kB\n"
|
|
"Inactive(file): %8lu kB\n"
|
|
"Unevictable: %8lu kB\n"
|
|
"Mlocked: %8lu kB\n"
|
|
#ifdef CONFIG_HIGHMEM
|
|
"HighTotal: %8lu kB\n"
|
|
"HighFree: %8lu kB\n"
|
|
"LowTotal: %8lu kB\n"
|
|
"LowFree: %8lu kB\n"
|
|
#endif
|
|
#ifndef CONFIG_MMU
|
|
"MmapCopy: %8lu kB\n"
|
|
#endif
|
|
"SwapTotal: %8lu kB\n"
|
|
"SwapFree: %8lu kB\n"
|
|
"Dirty: %8lu kB\n"
|
|
"Writeback: %8lu kB\n"
|
|
"AnonPages: %8lu kB\n"
|
|
"Mapped: %8lu kB\n"
|
|
"Shmem: %8lu kB\n"
|
|
"Slab: %8lu kB\n"
|
|
"SReclaimable: %8lu kB\n"
|
|
"SUnreclaim: %8lu kB\n"
|
|
"KernelStack: %8lu kB\n"
|
|
"PageTables: %8lu kB\n"
|
|
#ifdef CONFIG_QUICKLIST
|
|
"Quicklists: %8lu kB\n"
|
|
#endif
|
|
"NFS_Unstable: %8lu kB\n"
|
|
"Bounce: %8lu kB\n"
|
|
"WritebackTmp: %8lu kB\n"
|
|
"CommitLimit: %8lu kB\n"
|
|
"Committed_AS: %8lu kB\n"
|
|
"VmallocTotal: %8lu kB\n"
|
|
"VmallocUsed: %8lu kB\n"
|
|
"VmallocChunk: %8lu kB\n"
|
|
#ifdef CONFIG_MEMORY_FAILURE
|
|
"HardwareCorrupted: %5lu kB\n"
|
|
#endif
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
"AnonHugePages: %8lu kB\n"
|
|
#endif
|
|
,
|
|
K(i.totalram),
|
|
K(i.freeram),
|
|
K(available),
|
|
K(i.bufferram),
|
|
K(cached),
|
|
K(total_swapcache_pages()),
|
|
K(pages[LRU_ACTIVE_ANON] + pages[LRU_ACTIVE_FILE]),
|
|
K(pages[LRU_INACTIVE_ANON] + pages[LRU_INACTIVE_FILE]),
|
|
K(pages[LRU_ACTIVE_ANON]),
|
|
K(pages[LRU_INACTIVE_ANON]),
|
|
K(pages[LRU_ACTIVE_FILE]),
|
|
K(pages[LRU_INACTIVE_FILE]),
|
|
K(pages[LRU_UNEVICTABLE]),
|
|
K(global_page_state(NR_MLOCK)),
|
|
#ifdef CONFIG_HIGHMEM
|
|
K(i.totalhigh),
|
|
K(i.freehigh),
|
|
K(i.totalram-i.totalhigh),
|
|
K(i.freeram-i.freehigh),
|
|
#endif
|
|
#ifndef CONFIG_MMU
|
|
K((unsigned long) atomic_long_read(&mmap_pages_allocated)),
|
|
#endif
|
|
K(i.totalswap),
|
|
K(i.freeswap),
|
|
K(global_page_state(NR_FILE_DIRTY)),
|
|
K(global_page_state(NR_WRITEBACK)),
|
|
K(global_page_state(NR_ANON_PAGES)),
|
|
K(global_page_state(NR_FILE_MAPPED)),
|
|
K(global_page_state(NR_SHMEM)),
|
|
K(global_page_state(NR_SLAB_RECLAIMABLE) +
|
|
global_page_state(NR_SLAB_UNRECLAIMABLE)),
|
|
K(global_page_state(NR_SLAB_RECLAIMABLE)),
|
|
K(global_page_state(NR_SLAB_UNRECLAIMABLE)),
|
|
global_page_state(NR_KERNEL_STACK) * THREAD_SIZE / 1024,
|
|
K(global_page_state(NR_PAGETABLE)),
|
|
#ifdef CONFIG_QUICKLIST
|
|
K(quicklist_total_size()),
|
|
#endif
|
|
K(global_page_state(NR_UNSTABLE_NFS)),
|
|
K(global_page_state(NR_BOUNCE)),
|
|
K(global_page_state(NR_WRITEBACK_TEMP)),
|
|
K(vm_commit_limit()),
|
|
K(committed),
|
|
(unsigned long)VMALLOC_TOTAL >> 10,
|
|
vmi.used >> 10,
|
|
vmi.largest_chunk >> 10
|
|
#ifdef CONFIG_MEMORY_FAILURE
|
|
,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
|
|
#endif
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
|
|
HPAGE_PMD_NR)
|
|
#endif
|
|
);
|
|
|
|
hugetlb_report_meminfo(m);
|
|
|
|
arch_report_meminfo(m);
|
|
|
|
return 0;
|
|
#undef K
|
|
}
|
|
|
|
static int meminfo_proc_open(struct inode *inode, struct file *file)
|
|
{
|
|
return single_open(file, meminfo_proc_show, NULL);
|
|
}
|
|
|
|
static const struct file_operations meminfo_proc_fops = {
|
|
.open = meminfo_proc_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = single_release,
|
|
};
|
|
|
|
static int __init proc_meminfo_init(void)
|
|
{
|
|
proc_create("meminfo", 0, NULL, &meminfo_proc_fops);
|
|
return 0;
|
|
}
|
|
fs_initcall(proc_meminfo_init);
|