mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
abca2500c0
Every single architecture (including !CONFIG_HIGHMEM) calls... pagefault_enable(); preempt_enable(); ... before returning from __kunmap_atomic(). Lift this code into the kunmap_atomic() macro. While we are at it rename __kunmap_atomic() to kunmap_atomic_high() to be consistent. [ira.weiny@intel.com: don't enable pagefault/preempt twice] Link: http://lkml.kernel.org/r/20200518184843.3029640-1-ira.weiny@intel.com [akpm@linux-foundation.org: coding style fixes] Signed-off-by: Ira Weiny <ira.weiny@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andy Lutomirski <luto@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Christian König <christian.koenig@amd.com> Cc: Chris Zankel <chris@zankel.net> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Helge Deller <deller@gmx.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Guenter Roeck <linux@roeck-us.net> Link: http://lkml.kernel.org/r/20200507150004.1423069-8-ira.weiny@intel.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
93 lines
2.3 KiB
C
93 lines
2.3 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
#include <linux/highmem.h>
|
|
#include <linux/export.h>
|
|
#include <linux/swap.h> /* for totalram_pages */
|
|
#include <linux/memblock.h>
|
|
|
|
void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
|
|
{
|
|
unsigned long vaddr;
|
|
int idx, type;
|
|
|
|
type = kmap_atomic_idx_push();
|
|
idx = type + KM_TYPE_NR*smp_processor_id();
|
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
|
BUG_ON(!pte_none(*(kmap_pte-idx)));
|
|
set_pte(kmap_pte-idx, mk_pte(page, prot));
|
|
arch_flush_lazy_mmu_mode();
|
|
|
|
return (void *)vaddr;
|
|
}
|
|
EXPORT_SYMBOL(kmap_atomic_high_prot);
|
|
|
|
/*
|
|
* This is the same as kmap_atomic() but can map memory that doesn't
|
|
* have a struct page associated with it.
|
|
*/
|
|
void *kmap_atomic_pfn(unsigned long pfn)
|
|
{
|
|
return kmap_atomic_prot_pfn(pfn, kmap_prot);
|
|
}
|
|
EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
|
|
|
|
void kunmap_atomic_high(void *kvaddr)
|
|
{
|
|
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|
|
|
|
if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
|
|
vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
|
|
int idx, type;
|
|
|
|
type = kmap_atomic_idx();
|
|
idx = type + KM_TYPE_NR * smp_processor_id();
|
|
|
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
|
WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
|
|
#endif
|
|
/*
|
|
* Force other mappings to Oops if they'll try to access this
|
|
* pte without first remap it. Keeping stale mappings around
|
|
* is a bad idea also, in case the page changes cacheability
|
|
* attributes or becomes a protected page in a hypervisor.
|
|
*/
|
|
kpte_clear_flush(kmap_pte-idx, vaddr);
|
|
kmap_atomic_idx_pop();
|
|
arch_flush_lazy_mmu_mode();
|
|
}
|
|
#ifdef CONFIG_DEBUG_HIGHMEM
|
|
else {
|
|
BUG_ON(vaddr < PAGE_OFFSET);
|
|
BUG_ON(vaddr >= (unsigned long)high_memory);
|
|
}
|
|
#endif
|
|
}
|
|
EXPORT_SYMBOL(kunmap_atomic_high);
|
|
|
|
void __init set_highmem_pages_init(void)
|
|
{
|
|
struct zone *zone;
|
|
int nid;
|
|
|
|
/*
|
|
* Explicitly reset zone->managed_pages because set_highmem_pages_init()
|
|
* is invoked before memblock_free_all()
|
|
*/
|
|
reset_all_zones_managed_pages();
|
|
for_each_zone(zone) {
|
|
unsigned long zone_start_pfn, zone_end_pfn;
|
|
|
|
if (!is_highmem(zone))
|
|
continue;
|
|
|
|
zone_start_pfn = zone->zone_start_pfn;
|
|
zone_end_pfn = zone_start_pfn + zone->spanned_pages;
|
|
|
|
nid = zone_to_nid(zone);
|
|
printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
|
|
zone->name, nid, zone_start_pfn, zone_end_pfn);
|
|
|
|
add_highpages_with_active_regions(nid, zone_start_pfn,
|
|
zone_end_pfn);
|
|
}
|
|
}
|