mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 11:37:47 +07:00
2813b9c029
Tag-based KASAN doesn't check memory accesses through pointers tagged with 0xff. When page_address is used to get pointer to memory that corresponds to some page, the tag of the resulting pointer gets set to 0xff, even though the allocated memory might have been tagged differently. For slab pages it's impossible to recover the correct tag to return from page_address, since the page might contain multiple slab objects tagged with different values, and we can't know in advance which one of them is going to get accessed. For non slab pages however, we can recover the tag in page_address, since the whole page was marked with the same tag. This patch adds tagging to non slab memory allocated with pagealloc. To set the tag of the pointer returned from page_address, the tag gets stored to page->flags when the memory gets allocated. Link: http://lkml.kernel.org/r/d758ddcef46a5abc9970182b9137e2fbee202a2c.1544099024.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov <andreyknvl@google.com> Reviewed-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Reviewed-by: Dmitry Vyukov <dvyukov@google.com> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Christoph Lameter <cl@linux.com> Cc: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
108 lines
3.2 KiB
C
108 lines
3.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef PAGE_FLAGS_LAYOUT_H
|
|
#define PAGE_FLAGS_LAYOUT_H
|
|
|
|
#include <linux/numa.h>
|
|
#include <generated/bounds.h>
|
|
|
|
/*
|
|
* When a memory allocation must conform to specific limitations (such
|
|
* as being suitable for DMA) the caller will pass in hints to the
|
|
* allocator in the gfp_mask, in the zone modifier bits. These bits
|
|
* are used to select a priority ordered list of memory zones which
|
|
* match the requested limits. See gfp_zone() in include/linux/gfp.h
|
|
*/
|
|
#if MAX_NR_ZONES < 2
|
|
#define ZONES_SHIFT 0
|
|
#elif MAX_NR_ZONES <= 2
|
|
#define ZONES_SHIFT 1
|
|
#elif MAX_NR_ZONES <= 4
|
|
#define ZONES_SHIFT 2
|
|
#elif MAX_NR_ZONES <= 8
|
|
#define ZONES_SHIFT 3
|
|
#else
|
|
#error ZONES_SHIFT -- too many zones configured adjust calculation
|
|
#endif
|
|
|
|
#ifdef CONFIG_SPARSEMEM
|
|
#include <asm/sparsemem.h>
|
|
|
|
/* SECTION_SHIFT #bits space required to store a section # */
|
|
#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)
|
|
|
|
#endif /* CONFIG_SPARSEMEM */
|
|
|
|
/*
|
|
* page->flags layout:
|
|
*
|
|
* There are five possibilities for how page->flags get laid out. The first
|
|
* pair is for the normal case without sparsemem. The second pair is for
|
|
* sparsemem when there is plenty of space for node and section information.
|
|
* The last is when there is insufficient space in page->flags and a separate
|
|
* lookup is necessary.
|
|
*
|
|
* No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
|
|
* " plus space for last_cpupid: | NODE | ZONE | LAST_CPUPID ... | FLAGS |
|
|
* classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
|
|
* " plus space for last_cpupid: | SECTION | NODE | ZONE | LAST_CPUPID ... | FLAGS |
|
|
* classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
|
|
*/
|
|
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
|
|
#define SECTIONS_WIDTH SECTIONS_SHIFT
|
|
#else
|
|
#define SECTIONS_WIDTH 0
|
|
#endif
|
|
|
|
#define ZONES_WIDTH ZONES_SHIFT
|
|
|
|
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
|
|
#define NODES_WIDTH NODES_SHIFT
|
|
#else
|
|
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
|
#error "Vmemmap: No space for nodes field in page flags"
|
|
#endif
|
|
#define NODES_WIDTH 0
|
|
#endif
|
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
#define LAST__PID_SHIFT 8
|
|
#define LAST__PID_MASK ((1 << LAST__PID_SHIFT)-1)
|
|
|
|
#define LAST__CPU_SHIFT NR_CPUS_BITS
|
|
#define LAST__CPU_MASK ((1 << LAST__CPU_SHIFT)-1)
|
|
|
|
#define LAST_CPUPID_SHIFT (LAST__PID_SHIFT+LAST__CPU_SHIFT)
|
|
#else
|
|
#define LAST_CPUPID_SHIFT 0
|
|
#endif
|
|
|
|
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
|
|
#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
|
|
#else
|
|
#define LAST_CPUPID_WIDTH 0
|
|
#endif
|
|
|
|
#ifdef CONFIG_KASAN_SW_TAGS
|
|
#define KASAN_TAG_WIDTH 8
|
|
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
|
|
> BITS_PER_LONG - NR_PAGEFLAGS
|
|
#error "KASAN: not enough bits in page flags for tag"
|
|
#endif
|
|
#else
|
|
#define KASAN_TAG_WIDTH 0
|
|
#endif
|
|
|
|
/*
|
|
* We are going to use the flags for the page to node mapping if its in
|
|
* there. This includes the case where there is no node, so it is implicit.
|
|
*/
|
|
#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
|
|
#define NODE_NOT_IN_PAGE_FLAGS
|
|
#endif
|
|
|
|
#if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0
|
|
#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
|
|
#endif
|
|
|
|
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
|