linux_dsm_epyc7002/arch/x86/include/asm/page_32.h
Alexander Duyck 7d74275d39 x86: Make it so that __pa_symbol can only process kernel symbols on x86_64
I submitted an earlier patch that make __phys_addr an inline.  This obviously
results in an increase in the code size.  One step I can take to reduce that
is to make it so that the __pa_symbol call does a direct translation for
kernel addresses instead of covering all of virtual memory.

On my system this reduced the size for __pa_symbol from 5 instructions
totalling 30 bytes to 3 instructions totalling 16 bytes.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Link: http://lkml.kernel.org/r/20121116215356.8521.92472.stgit@ahduyck-cp1.jf.intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2012-11-16 16:42:09 -08:00

53 lines
1.0 KiB
C

#ifndef _ASM_X86_PAGE_32_H
#define _ASM_X86_PAGE_32_H
#include <asm/page_32_types.h>
#ifndef __ASSEMBLY__
#ifdef CONFIG_HUGETLB_PAGE
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif
#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
#ifdef CONFIG_DEBUG_VIRTUAL
extern unsigned long __phys_addr(unsigned long);
#else
#define __phys_addr(x) __phys_addr_nodebug(x)
#endif
#define __phys_addr_symbol(x) __phys_addr(x)
#define __phys_reloc_hide(x) RELOC_HIDE((x), 0)
#ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif /* CONFIG_FLATMEM */
#ifdef CONFIG_X86_USE_3DNOW
#include <asm/mmx.h>
static inline void clear_page(void *page)
{
mmx_clear_page(page);
}
static inline void copy_page(void *to, void *from)
{
mmx_copy_page(to, from);
}
#else /* !CONFIG_X86_USE_3DNOW */
#include <linux/string.h>
static inline void clear_page(void *page)
{
memset(page, 0, PAGE_SIZE);
}
static inline void copy_page(void *to, void *from)
{
memcpy(to, from, PAGE_SIZE);
}
#endif /* CONFIG_X86_3DNOW */
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PAGE_32_H */