mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-07 03:56:41 +07:00
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "4 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm, page_alloc: fix has_unmovable_pages for HugePages fork,memcg: fix crash in free_thread_stack on memcg charge fail mm: thp: fix flags for pmd migration when split mm, memory_hotplug: initialize struct pages for the full memory section
This commit is contained in:
commit
23203e3f34
@ -240,8 +240,10 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
|
|||||||
* free_thread_stack() can be called in interrupt context,
|
* free_thread_stack() can be called in interrupt context,
|
||||||
* so cache the vm_struct.
|
* so cache the vm_struct.
|
||||||
*/
|
*/
|
||||||
if (stack)
|
if (stack) {
|
||||||
tsk->stack_vm_area = find_vm_area(stack);
|
tsk->stack_vm_area = find_vm_area(stack);
|
||||||
|
tsk->stack = stack;
|
||||||
|
}
|
||||||
return stack;
|
return stack;
|
||||||
#else
|
#else
|
||||||
struct page *page = alloc_pages_node(node, THREADINFO_GFP,
|
struct page *page = alloc_pages_node(node, THREADINFO_GFP,
|
||||||
@ -288,7 +290,10 @@ static struct kmem_cache *thread_stack_cache;
|
|||||||
static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
|
static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
|
||||||
int node)
|
int node)
|
||||||
{
|
{
|
||||||
return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
|
unsigned long *stack;
|
||||||
|
stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
|
||||||
|
tsk->stack = stack;
|
||||||
|
return stack;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_thread_stack(struct task_struct *tsk)
|
static void free_thread_stack(struct task_struct *tsk)
|
||||||
|
@ -2144,23 +2144,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
|||||||
*/
|
*/
|
||||||
old_pmd = pmdp_invalidate(vma, haddr, pmd);
|
old_pmd = pmdp_invalidate(vma, haddr, pmd);
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
|
|
||||||
pmd_migration = is_pmd_migration_entry(old_pmd);
|
pmd_migration = is_pmd_migration_entry(old_pmd);
|
||||||
if (pmd_migration) {
|
if (unlikely(pmd_migration)) {
|
||||||
swp_entry_t entry;
|
swp_entry_t entry;
|
||||||
|
|
||||||
entry = pmd_to_swp_entry(old_pmd);
|
entry = pmd_to_swp_entry(old_pmd);
|
||||||
page = pfn_to_page(swp_offset(entry));
|
page = pfn_to_page(swp_offset(entry));
|
||||||
} else
|
write = is_write_migration_entry(entry);
|
||||||
#endif
|
young = false;
|
||||||
|
soft_dirty = pmd_swp_soft_dirty(old_pmd);
|
||||||
|
} else {
|
||||||
page = pmd_page(old_pmd);
|
page = pmd_page(old_pmd);
|
||||||
|
if (pmd_dirty(old_pmd))
|
||||||
|
SetPageDirty(page);
|
||||||
|
write = pmd_write(old_pmd);
|
||||||
|
young = pmd_young(old_pmd);
|
||||||
|
soft_dirty = pmd_soft_dirty(old_pmd);
|
||||||
|
}
|
||||||
VM_BUG_ON_PAGE(!page_count(page), page);
|
VM_BUG_ON_PAGE(!page_count(page), page);
|
||||||
page_ref_add(page, HPAGE_PMD_NR - 1);
|
page_ref_add(page, HPAGE_PMD_NR - 1);
|
||||||
if (pmd_dirty(old_pmd))
|
|
||||||
SetPageDirty(page);
|
|
||||||
write = pmd_write(old_pmd);
|
|
||||||
young = pmd_young(old_pmd);
|
|
||||||
soft_dirty = pmd_soft_dirty(old_pmd);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Withdraw the table only after we mark the pmd entry invalid.
|
* Withdraw the table only after we mark the pmd entry invalid.
|
||||||
|
@ -5542,6 +5542,18 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
|||||||
cond_resched();
|
cond_resched();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#ifdef CONFIG_SPARSEMEM
|
||||||
|
/*
|
||||||
|
* If the zone does not span the rest of the section then
|
||||||
|
* we should at least initialize those pages. Otherwise we
|
||||||
|
* could blow up on a poisoned page in some paths which depend
|
||||||
|
* on full sections being initialized (e.g. memory hotplug).
|
||||||
|
*/
|
||||||
|
while (end_pfn % PAGES_PER_SECTION) {
|
||||||
|
__init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid);
|
||||||
|
end_pfn++;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ZONE_DEVICE
|
#ifdef CONFIG_ZONE_DEVICE
|
||||||
@ -7802,11 +7814,14 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
|
|||||||
* handle each tail page individually in migration.
|
* handle each tail page individually in migration.
|
||||||
*/
|
*/
|
||||||
if (PageHuge(page)) {
|
if (PageHuge(page)) {
|
||||||
|
struct page *head = compound_head(page);
|
||||||
|
unsigned int skip_pages;
|
||||||
|
|
||||||
if (!hugepage_migration_supported(page_hstate(page)))
|
if (!hugepage_migration_supported(page_hstate(head)))
|
||||||
goto unmovable;
|
goto unmovable;
|
||||||
|
|
||||||
iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
|
skip_pages = (1 << compound_order(head)) - (page - head);
|
||||||
|
iter += skip_pages - 1;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user