mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 12:35:17 +07:00
32d6bd9059
This is the third version of the patchset previously sent [1]. I have basically only rebased it on top of 4.7-rc1 tree and dropped "dm: get rid of superfluous gfp flags" which went through dm tree. I am sending it now because it is tree wide and chances for conflicts are reduced considerably when we want to target rc2. I plan to send the next step and rename the flag and move to a better semantic later during this release cycle so we will have a new semantic ready for 4.8 merge window hopefully. Motivation: While working on something unrelated I've checked the current usage of __GFP_REPEAT in the tree. It seems that a majority of the usage is and always has been bogus because __GFP_REPEAT has always been about costly high order allocations while we are using it for order-0 or very small orders very often. It seems that a big pile of them is just a copy&paste when a code has been adopted from one arch to another. I think it makes some sense to get rid of them because they are just making the semantic more unclear. Please note that GFP_REPEAT is documented as * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt * _might_ fail. This depends upon the particular VM implementation. while !costly requests have basically nofail semantic. So one could reasonably expect that order-0 request with __GFP_REPEAT will not loop for ever. This is not implemented right now though. I would like to move on with __GFP_REPEAT and define a better semantic for it. $ git grep __GFP_REPEAT origin/master | wc -l 111 $ git grep __GFP_REPEAT | wc -l 36 So we are down to the third after this patch series. The remaining places really seem to be relying on __GFP_REPEAT due to large allocation requests. This still needs some double checking which I will do later after all the simple ones are sorted out. I am touching a lot of arch specific code here and I hope I got it right but as a matter of fact I even didn't compile test for some archs as I do not have cross compiler for them. Patches should be quite trivial to review for stupid compile mistakes though. The tricky parts are usually hidden by macro definitions and thats where I would appreciate help from arch maintainers. [1] http://lkml.kernel.org/r/1461849846-27209-1-git-send-email-mhocko@kernel.org This patch (of 19): __GFP_REPEAT has a rather weak semantic but since it has been introduced around 2.6.12 it has been ignored for low order allocations. Yet we have the full kernel tree with its usage for apparently order-0 allocations. This is really confusing because __GFP_REPEAT is explicitly documented to allow allocation failures which is a weaker semantic than the current order-0 has (basically nofail). Let's simply drop __GFP_REPEAT from those places. This would allow to identify place which really need allocator to retry harder and formulate a more specific semantic for what the flag is supposed to do actually. Link: http://lkml.kernel.org/r/1464599699-30131-2-git-send-email-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Andy Lutomirski <luto@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chen Liqin <liqin.linux@gmail.com> Cc: Chris Metcalf <cmetcalf@mellanox.com> [for tile] Cc: Guan Xuetao <gxt@mprc.pku.edu.cn> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: John Crispin <blogic@openwrt.org> Cc: Lennox Wu <lennox.wu@gmail.com> Cc: Ley Foon Tan <lftan@altera.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@arm.linux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
241 lines
5.5 KiB
C
241 lines
5.5 KiB
C
/*
|
|
* Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
|
* Licensed under the GPL
|
|
*/
|
|
|
|
#include <linux/stddef.h>
|
|
#include <linux/module.h>
|
|
#include <linux/bootmem.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/slab.h>
|
|
#include <asm/fixmap.h>
|
|
#include <asm/page.h>
|
|
#include <as-layout.h>
|
|
#include <init.h>
|
|
#include <kern.h>
|
|
#include <kern_util.h>
|
|
#include <mem_user.h>
|
|
#include <os.h>
|
|
|
|
/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
|
|
unsigned long *empty_zero_page = NULL;
|
|
EXPORT_SYMBOL(empty_zero_page);
|
|
/* allocated in paging_init and unchanged thereafter */
|
|
static unsigned long *empty_bad_page = NULL;
|
|
|
|
/*
|
|
* Initialized during boot, and readonly for initializing page tables
|
|
* afterwards
|
|
*/
|
|
pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
|
|
|
/* Initialized at boot time, and readonly after that */
|
|
unsigned long long highmem;
|
|
int kmalloc_ok = 0;
|
|
|
|
/* Used during early boot */
|
|
static unsigned long brk_end;
|
|
|
|
void __init mem_init(void)
|
|
{
|
|
/* clear the zero-page */
|
|
memset(empty_zero_page, 0, PAGE_SIZE);
|
|
|
|
/* Map in the area just after the brk now that kmalloc is about
|
|
* to be turned on.
|
|
*/
|
|
brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
|
|
map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
|
|
free_bootmem(__pa(brk_end), uml_reserved - brk_end);
|
|
uml_reserved = brk_end;
|
|
|
|
/* this will put all low memory onto the freelists */
|
|
free_all_bootmem();
|
|
max_low_pfn = totalram_pages;
|
|
max_pfn = totalram_pages;
|
|
mem_init_print_info(NULL);
|
|
kmalloc_ok = 1;
|
|
}
|
|
|
|
/*
|
|
* Create a page table and place a pointer to it in a middle page
|
|
* directory entry.
|
|
*/
|
|
static void __init one_page_table_init(pmd_t *pmd)
|
|
{
|
|
if (pmd_none(*pmd)) {
|
|
pte_t *pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
|
|
set_pmd(pmd, __pmd(_KERNPG_TABLE +
|
|
(unsigned long) __pa(pte)));
|
|
if (pte != pte_offset_kernel(pmd, 0))
|
|
BUG();
|
|
}
|
|
}
|
|
|
|
static void __init one_md_table_init(pud_t *pud)
|
|
{
|
|
#ifdef CONFIG_3_LEVEL_PGTABLES
|
|
pmd_t *pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
|
|
set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
|
|
if (pmd_table != pmd_offset(pud, 0))
|
|
BUG();
|
|
#endif
|
|
}
|
|
|
|
static void __init fixrange_init(unsigned long start, unsigned long end,
|
|
pgd_t *pgd_base)
|
|
{
|
|
pgd_t *pgd;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
int i, j;
|
|
unsigned long vaddr;
|
|
|
|
vaddr = start;
|
|
i = pgd_index(vaddr);
|
|
j = pmd_index(vaddr);
|
|
pgd = pgd_base + i;
|
|
|
|
for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
|
|
pud = pud_offset(pgd, vaddr);
|
|
if (pud_none(*pud))
|
|
one_md_table_init(pud);
|
|
pmd = pmd_offset(pud, vaddr);
|
|
for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
|
|
one_page_table_init(pmd);
|
|
vaddr += PMD_SIZE;
|
|
}
|
|
j = 0;
|
|
}
|
|
}
|
|
|
|
static void __init fixaddr_user_init( void)
|
|
{
|
|
#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
|
|
long size = FIXADDR_USER_END - FIXADDR_USER_START;
|
|
pgd_t *pgd;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
pte_t *pte;
|
|
phys_t p;
|
|
unsigned long v, vaddr = FIXADDR_USER_START;
|
|
|
|
if (!size)
|
|
return;
|
|
|
|
fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
|
|
v = (unsigned long) alloc_bootmem_low_pages(size);
|
|
memcpy((void *) v , (void *) FIXADDR_USER_START, size);
|
|
p = __pa(v);
|
|
for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
|
|
p += PAGE_SIZE) {
|
|
pgd = swapper_pg_dir + pgd_index(vaddr);
|
|
pud = pud_offset(pgd, vaddr);
|
|
pmd = pmd_offset(pud, vaddr);
|
|
pte = pte_offset_kernel(pmd, vaddr);
|
|
pte_set_val(*pte, p, PAGE_READONLY);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
void __init paging_init(void)
|
|
{
|
|
unsigned long zones_size[MAX_NR_ZONES], vaddr;
|
|
int i;
|
|
|
|
empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
|
|
empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
|
|
for (i = 0; i < ARRAY_SIZE(zones_size); i++)
|
|
zones_size[i] = 0;
|
|
|
|
zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
|
|
(uml_physmem >> PAGE_SHIFT);
|
|
free_area_init(zones_size);
|
|
|
|
/*
|
|
* Fixed mappings, only the page table structure has to be
|
|
* created - mappings will be set by set_fixmap():
|
|
*/
|
|
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
|
|
fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
|
|
|
|
fixaddr_user_init();
|
|
}
|
|
|
|
/*
|
|
* This can't do anything because nothing in the kernel image can be freed
|
|
* since it's not in kernel physical memory.
|
|
*/
|
|
|
|
void free_initmem(void)
|
|
{
|
|
}
|
|
|
|
#ifdef CONFIG_BLK_DEV_INITRD
|
|
void free_initrd_mem(unsigned long start, unsigned long end)
|
|
{
|
|
free_reserved_area((void *)start, (void *)end, -1, "initrd");
|
|
}
|
|
#endif
|
|
|
|
/* Allocate and free page tables. */
|
|
|
|
pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
|
|
|
|
if (pgd) {
|
|
memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
|
|
memcpy(pgd + USER_PTRS_PER_PGD,
|
|
swapper_pg_dir + USER_PTRS_PER_PGD,
|
|
(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
|
|
}
|
|
return pgd;
|
|
}
|
|
|
|
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|
{
|
|
free_page((unsigned long) pgd);
|
|
}
|
|
|
|
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
|
{
|
|
pte_t *pte;
|
|
|
|
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
|
|
return pte;
|
|
}
|
|
|
|
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
|
|
{
|
|
struct page *pte;
|
|
|
|
pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
|
|
if (!pte)
|
|
return NULL;
|
|
if (!pgtable_page_ctor(pte)) {
|
|
__free_page(pte);
|
|
return NULL;
|
|
}
|
|
return pte;
|
|
}
|
|
|
|
#ifdef CONFIG_3_LEVEL_PGTABLES
|
|
pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
|
{
|
|
pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
|
|
|
|
if (pmd)
|
|
memset(pmd, 0, PAGE_SIZE);
|
|
|
|
return pmd;
|
|
}
|
|
#endif
|
|
|
|
void *uml_kmalloc(int size, int flags)
|
|
{
|
|
return kmalloc(size, flags);
|
|
}
|