mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 04:37:23 +07:00
cbc65df240
Patch series "mm, swap: VMA based swap readahead", v4. The swap readahead is an important mechanism to reduce the swap in latency. Although pure sequential memory access pattern isn't very popular for anonymous memory, the space locality is still considered valid. In the original swap readahead implementation, the consecutive blocks in swap device are readahead based on the global space locality estimation. But the consecutive blocks in swap device just reflect the order of page reclaiming, don't necessarily reflect the access pattern in virtual memory space. And the different tasks in the system may have different access patterns, which makes the global space locality estimation incorrect. In this patchset, when page fault occurs, the virtual pages near the fault address will be readahead instead of the swap slots near the fault swap slot in swap device. This avoid to readahead the unrelated swap slots. At the same time, the swap readahead is changed to work on per-VMA from globally. So that the different access patterns of the different VMAs could be distinguished, and the different readahead policy could be applied accordingly. The original core readahead detection and scaling algorithm is reused, because it is an effect algorithm to detect the space locality. In addition to the swap readahead changes, some new sysfs interface is added to show the efficiency of the readahead algorithm and some other swap statistics. This new implementation will incur more small random read, on SSD, the improved correctness of estimation and readahead target should beat the potential increased overhead, this is also illustrated in the test results below. But on HDD, the overhead may beat the benefit, so the original implementation will be used by default. The test and result is as follow, Common test condition ===================== Test Machine: Xeon E5 v3 (2 sockets, 72 threads, 32G RAM) Swap device: NVMe disk Micro-benchmark with combined access pattern ============================================ vm-scalability, sequential swap test case, 4 processes to eat 50G virtual memory space, repeat the sequential memory writing until 300 seconds. The first round writing will trigger swap out, the following rounds will trigger sequential swap in and out. At the same time, run vm-scalability random swap test case in background, 8 processes to eat 30G virtual memory space, repeat the random memory write until 300 seconds. This will trigger random swap-in in the background. This is a combined workload with sequential and random memory accessing at the same time. The result (for sequential workload) is as follow, Base Optimized ---- --------- throughput 345413 KB/s 414029 KB/s (+19.9%) latency.average 97.14 us 61.06 us (-37.1%) latency.50th 2 us 1 us latency.60th 2 us 1 us latency.70th 98 us 2 us latency.80th 160 us 2 us latency.90th 260 us 217 us latency.95th 346 us 369 us latency.99th 1.34 ms 1.09 ms ra_hit% 52.69% 99.98% The original swap readahead algorithm is confused by the background random access workload, so readahead hit rate is lower. The VMA-base readahead algorithm works much better. Linpack ======= The test memory size is bigger than RAM to trigger swapping. Base Optimized ---- --------- elapsed_time 393.49 s 329.88 s (-16.2%) ra_hit% 86.21% 98.82% The score of base and optimized kernel hasn't visible changes. But the elapsed time reduced and readahead hit rate improved, so the optimized kernel runs better for startup and tear down stages. And the absolute value of readahead hit rate is high, shows that the space locality is still valid in some practical workloads. This patch (of 5): The statistics for total readahead pages and total readahead hits are recorded and exported via the following sysfs interface. /sys/kernel/mm/swap/ra_hits /sys/kernel/mm/swap/ra_total With them, the efficiency of the swap readahead could be measured, so that the swap readahead algorithm and parameters could be tuned accordingly. [akpm@linux-foundation.org: don't display swap stats if CONFIG_SWAP=n] Link: http://lkml.kernel.org/r/20170807054038.1843-2-ying.huang@intel.com Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@redhat.com> Cc: Shaohua Li <shli@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Fengguang Wu <fengguang.wu@intel.com> Cc: Tim Chen <tim.c.chen@intel.com> Cc: Dave Hansen <dave.hansen@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
122 lines
2.9 KiB
C
122 lines
2.9 KiB
C
#ifndef VM_EVENT_ITEM_H_INCLUDED
|
|
#define VM_EVENT_ITEM_H_INCLUDED
|
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
#define DMA_ZONE(xx) xx##_DMA,
|
|
#else
|
|
#define DMA_ZONE(xx)
|
|
#endif
|
|
|
|
#ifdef CONFIG_ZONE_DMA32
|
|
#define DMA32_ZONE(xx) xx##_DMA32,
|
|
#else
|
|
#define DMA32_ZONE(xx)
|
|
#endif
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
#define HIGHMEM_ZONE(xx) xx##_HIGH,
|
|
#else
|
|
#define HIGHMEM_ZONE(xx)
|
|
#endif
|
|
|
|
#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, HIGHMEM_ZONE(xx) xx##_MOVABLE
|
|
|
|
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
|
|
FOR_ALL_ZONES(PGALLOC),
|
|
FOR_ALL_ZONES(ALLOCSTALL),
|
|
FOR_ALL_ZONES(PGSCAN_SKIP),
|
|
PGFREE, PGACTIVATE, PGDEACTIVATE, PGLAZYFREE,
|
|
PGFAULT, PGMAJFAULT,
|
|
PGLAZYFREED,
|
|
PGREFILL,
|
|
PGSTEAL_KSWAPD,
|
|
PGSTEAL_DIRECT,
|
|
PGSCAN_KSWAPD,
|
|
PGSCAN_DIRECT,
|
|
PGSCAN_DIRECT_THROTTLE,
|
|
#ifdef CONFIG_NUMA
|
|
PGSCAN_ZONE_RECLAIM_FAILED,
|
|
#endif
|
|
PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL,
|
|
KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
|
|
PAGEOUTRUN, PGROTATED,
|
|
DROP_PAGECACHE, DROP_SLAB,
|
|
OOM_KILL,
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
NUMA_PTE_UPDATES,
|
|
NUMA_HUGE_PTE_UPDATES,
|
|
NUMA_HINT_FAULTS,
|
|
NUMA_HINT_FAULTS_LOCAL,
|
|
NUMA_PAGE_MIGRATE,
|
|
#endif
|
|
#ifdef CONFIG_MIGRATION
|
|
PGMIGRATE_SUCCESS, PGMIGRATE_FAIL,
|
|
#endif
|
|
#ifdef CONFIG_COMPACTION
|
|
COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED,
|
|
COMPACTISOLATED,
|
|
COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
|
|
KCOMPACTD_WAKE,
|
|
KCOMPACTD_MIGRATE_SCANNED, KCOMPACTD_FREE_SCANNED,
|
|
#endif
|
|
#ifdef CONFIG_HUGETLB_PAGE
|
|
HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
|
|
#endif
|
|
UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
|
|
UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
|
|
UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
|
|
UNEVICTABLE_PGMLOCKED,
|
|
UNEVICTABLE_PGMUNLOCKED,
|
|
UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
|
|
UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
THP_FAULT_ALLOC,
|
|
THP_FAULT_FALLBACK,
|
|
THP_COLLAPSE_ALLOC,
|
|
THP_COLLAPSE_ALLOC_FAILED,
|
|
THP_FILE_ALLOC,
|
|
THP_FILE_MAPPED,
|
|
THP_SPLIT_PAGE,
|
|
THP_SPLIT_PAGE_FAILED,
|
|
THP_DEFERRED_SPLIT_PAGE,
|
|
THP_SPLIT_PMD,
|
|
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
|
|
THP_SPLIT_PUD,
|
|
#endif
|
|
THP_ZERO_PAGE_ALLOC,
|
|
THP_ZERO_PAGE_ALLOC_FAILED,
|
|
THP_SWPOUT,
|
|
THP_SWPOUT_FALLBACK,
|
|
#endif
|
|
#ifdef CONFIG_MEMORY_BALLOON
|
|
BALLOON_INFLATE,
|
|
BALLOON_DEFLATE,
|
|
#ifdef CONFIG_BALLOON_COMPACTION
|
|
BALLOON_MIGRATE,
|
|
#endif
|
|
#endif
|
|
#ifdef CONFIG_DEBUG_TLBFLUSH
|
|
NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
|
|
NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
|
|
NR_TLB_LOCAL_FLUSH_ALL,
|
|
NR_TLB_LOCAL_FLUSH_ONE,
|
|
#endif /* CONFIG_DEBUG_TLBFLUSH */
|
|
#ifdef CONFIG_DEBUG_VM_VMACACHE
|
|
VMACACHE_FIND_CALLS,
|
|
VMACACHE_FIND_HITS,
|
|
VMACACHE_FULL_FLUSHES,
|
|
#endif
|
|
#ifdef CONFIG_SWAP
|
|
SWAP_RA,
|
|
SWAP_RA_HIT,
|
|
#endif
|
|
NR_VM_EVENT_ITEMS
|
|
};
|
|
|
|
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
|
|
#define THP_FILE_ALLOC ({ BUILD_BUG(); 0; })
|
|
#define THP_FILE_MAPPED ({ BUILD_BUG(); 0; })
|
|
#endif
|
|
|
|
#endif /* VM_EVENT_ITEM_H_INCLUDED */
|