2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* linux/arch/arm/kernel/armksyms.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 2000 Russell King
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
2011-07-22 21:58:34 +07:00
|
|
|
#include <linux/export.h>
|
2008-11-30 00:35:51 +07:00
|
|
|
#include <linux/sched.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/string.h>
|
2005-11-09 05:43:05 +07:00
|
|
|
#include <linux/cryptohash.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/in6.h>
|
|
|
|
#include <linux/syscalls.h>
|
2008-09-06 17:35:55 +07:00
|
|
|
#include <linux/uaccess.h>
|
2008-09-06 18:10:45 +07:00
|
|
|
#include <linux/io.h>
|
2016-01-04 21:42:55 +07:00
|
|
|
#include <linux/arm-smccc.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#include <asm/checksum.h>
|
2008-06-22 01:17:27 +07:00
|
|
|
#include <asm/ftrace.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* libgcc functions - functions that are used internally by the
|
|
|
|
* compiler... (prototypes are not correct though, but that
|
|
|
|
* doesn't really matter since they're not versioned).
|
|
|
|
*/
|
|
|
|
extern void __ashldi3(void);
|
|
|
|
extern void __ashrdi3(void);
|
|
|
|
extern void __divsi3(void);
|
|
|
|
extern void __lshrdi3(void);
|
|
|
|
extern void __modsi3(void);
|
|
|
|
extern void __muldi3(void);
|
|
|
|
extern void __ucmpdi2(void);
|
|
|
|
extern void __udivsi3(void);
|
|
|
|
extern void __umodsi3(void);
|
|
|
|
extern void __do_div64(void);
|
2013-11-06 11:15:24 +07:00
|
|
|
extern void __bswapsi2(void);
|
|
|
|
extern void __bswapdi2(void);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-01-14 23:18:29 +07:00
|
|
|
extern void __aeabi_idiv(void);
|
|
|
|
extern void __aeabi_idivmod(void);
|
|
|
|
extern void __aeabi_lasr(void);
|
|
|
|
extern void __aeabi_llsl(void);
|
|
|
|
extern void __aeabi_llsr(void);
|
|
|
|
extern void __aeabi_lmul(void);
|
|
|
|
extern void __aeabi_uidiv(void);
|
|
|
|
extern void __aeabi_uidivmod(void);
|
|
|
|
extern void __aeabi_ulcmp(void);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
extern void fpundefinstr(void);
|
|
|
|
|
2015-07-03 21:22:54 +07:00
|
|
|
void mmioset(void *, unsigned int, size_t);
|
|
|
|
void mmiocpy(void *, const void *, size_t);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* platform dependent support */
|
2012-07-06 21:47:17 +07:00
|
|
|
EXPORT_SYMBOL(arm_delay_ops);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* networking */
|
|
|
|
EXPORT_SYMBOL(csum_partial);
|
2007-05-12 00:10:52 +07:00
|
|
|
EXPORT_SYMBOL(csum_partial_copy_from_user);
|
2005-04-17 05:20:36 +07:00
|
|
|
EXPORT_SYMBOL(csum_partial_copy_nocheck);
|
|
|
|
EXPORT_SYMBOL(__csum_ipv6_magic);
|
|
|
|
|
|
|
|
/* io */
|
|
|
|
#ifndef __raw_readsb
|
|
|
|
EXPORT_SYMBOL(__raw_readsb);
|
|
|
|
#endif
|
|
|
|
#ifndef __raw_readsw
|
|
|
|
EXPORT_SYMBOL(__raw_readsw);
|
|
|
|
#endif
|
|
|
|
#ifndef __raw_readsl
|
|
|
|
EXPORT_SYMBOL(__raw_readsl);
|
|
|
|
#endif
|
|
|
|
#ifndef __raw_writesb
|
|
|
|
EXPORT_SYMBOL(__raw_writesb);
|
|
|
|
#endif
|
|
|
|
#ifndef __raw_writesw
|
|
|
|
EXPORT_SYMBOL(__raw_writesw);
|
|
|
|
#endif
|
|
|
|
#ifndef __raw_writesl
|
|
|
|
EXPORT_SYMBOL(__raw_writesl);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* string / mem functions */
|
|
|
|
EXPORT_SYMBOL(strchr);
|
|
|
|
EXPORT_SYMBOL(strrchr);
|
|
|
|
EXPORT_SYMBOL(memset);
|
|
|
|
EXPORT_SYMBOL(memcpy);
|
|
|
|
EXPORT_SYMBOL(memmove);
|
|
|
|
EXPORT_SYMBOL(memchr);
|
|
|
|
EXPORT_SYMBOL(__memzero);
|
|
|
|
|
2015-07-03 21:22:54 +07:00
|
|
|
EXPORT_SYMBOL(mmioset);
|
|
|
|
EXPORT_SYMBOL(mmiocpy);
|
|
|
|
|
2006-06-22 02:38:17 +07:00
|
|
|
#ifdef CONFIG_MMU
|
2008-12-09 09:13:39 +07:00
|
|
|
EXPORT_SYMBOL(copy_page);
|
|
|
|
|
2015-08-19 17:02:28 +07:00
|
|
|
EXPORT_SYMBOL(arm_copy_from_user);
|
|
|
|
EXPORT_SYMBOL(arm_copy_to_user);
|
|
|
|
EXPORT_SYMBOL(arm_clear_user);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
EXPORT_SYMBOL(__get_user_1);
|
|
|
|
EXPORT_SYMBOL(__get_user_2);
|
|
|
|
EXPORT_SYMBOL(__get_user_4);
|
2014-09-14 05:29:17 +07:00
|
|
|
EXPORT_SYMBOL(__get_user_8);
|
|
|
|
|
|
|
|
#ifdef __ARMEB__
|
|
|
|
EXPORT_SYMBOL(__get_user_64t_1);
|
|
|
|
EXPORT_SYMBOL(__get_user_64t_2);
|
|
|
|
EXPORT_SYMBOL(__get_user_64t_4);
|
|
|
|
EXPORT_SYMBOL(__get_user_32t_8);
|
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
EXPORT_SYMBOL(__put_user_1);
|
|
|
|
EXPORT_SYMBOL(__put_user_2);
|
|
|
|
EXPORT_SYMBOL(__put_user_4);
|
|
|
|
EXPORT_SYMBOL(__put_user_8);
|
2006-06-22 02:38:17 +07:00
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* gcc lib functions */
|
|
|
|
EXPORT_SYMBOL(__ashldi3);
|
|
|
|
EXPORT_SYMBOL(__ashrdi3);
|
|
|
|
EXPORT_SYMBOL(__divsi3);
|
|
|
|
EXPORT_SYMBOL(__lshrdi3);
|
|
|
|
EXPORT_SYMBOL(__modsi3);
|
|
|
|
EXPORT_SYMBOL(__muldi3);
|
|
|
|
EXPORT_SYMBOL(__ucmpdi2);
|
|
|
|
EXPORT_SYMBOL(__udivsi3);
|
|
|
|
EXPORT_SYMBOL(__umodsi3);
|
|
|
|
EXPORT_SYMBOL(__do_div64);
|
2013-11-06 11:15:24 +07:00
|
|
|
EXPORT_SYMBOL(__bswapsi2);
|
|
|
|
EXPORT_SYMBOL(__bswapdi2);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-01-14 23:18:29 +07:00
|
|
|
#ifdef CONFIG_AEABI
|
|
|
|
EXPORT_SYMBOL(__aeabi_idiv);
|
|
|
|
EXPORT_SYMBOL(__aeabi_idivmod);
|
|
|
|
EXPORT_SYMBOL(__aeabi_lasr);
|
|
|
|
EXPORT_SYMBOL(__aeabi_llsl);
|
|
|
|
EXPORT_SYMBOL(__aeabi_llsr);
|
|
|
|
EXPORT_SYMBOL(__aeabi_lmul);
|
|
|
|
EXPORT_SYMBOL(__aeabi_uidiv);
|
|
|
|
EXPORT_SYMBOL(__aeabi_uidivmod);
|
|
|
|
EXPORT_SYMBOL(__aeabi_ulcmp);
|
|
|
|
#endif
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* bitops */
|
2011-01-17 01:02:17 +07:00
|
|
|
EXPORT_SYMBOL(_set_bit);
|
|
|
|
EXPORT_SYMBOL(_test_and_set_bit);
|
|
|
|
EXPORT_SYMBOL(_clear_bit);
|
|
|
|
EXPORT_SYMBOL(_test_and_clear_bit);
|
|
|
|
EXPORT_SYMBOL(_change_bit);
|
|
|
|
EXPORT_SYMBOL(_test_and_change_bit);
|
2005-04-17 05:20:36 +07:00
|
|
|
EXPORT_SYMBOL(_find_first_zero_bit_le);
|
|
|
|
EXPORT_SYMBOL(_find_next_zero_bit_le);
|
|
|
|
EXPORT_SYMBOL(_find_first_bit_le);
|
|
|
|
EXPORT_SYMBOL(_find_next_bit_le);
|
|
|
|
|
|
|
|
#ifdef __ARMEB__
|
|
|
|
EXPORT_SYMBOL(_find_first_zero_bit_be);
|
|
|
|
EXPORT_SYMBOL(_find_next_zero_bit_be);
|
|
|
|
EXPORT_SYMBOL(_find_first_bit_be);
|
|
|
|
EXPORT_SYMBOL(_find_next_bit_be);
|
|
|
|
#endif
|
2008-05-12 04:36:03 +07:00
|
|
|
|
2008-10-07 06:06:12 +07:00
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
2010-08-11 01:32:37 +07:00
|
|
|
#ifdef CONFIG_OLD_MCOUNT
|
2008-06-22 01:17:27 +07:00
|
|
|
EXPORT_SYMBOL(mcount);
|
2010-08-11 01:32:37 +07:00
|
|
|
#endif
|
2009-08-14 01:38:16 +07:00
|
|
|
EXPORT_SYMBOL(__gnu_mcount_nc);
|
2008-06-22 01:17:27 +07:00
|
|
|
#endif
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-05 02:09:43 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
|
ARM: Better virt_to_page() handling
virt_to_page() is incredibly inefficient when virt-to-phys patching is
enabled. This is because we end up with this calculation:
page = &mem_map[asm virt_to_phys(addr) >> 12 - __pv_phys_offset >> 12]
in assembly. The asm virt_to_phys() is equivalent this this operation:
addr - PAGE_OFFSET + __pv_phys_offset
and we can see that because this is assembly, the compiler has no chance
to optimise some of that away. This should reduce down to:
page = &mem_map[(addr - PAGE_OFFSET) >> 12]
for the common cases. Permit the compiler to make this optimisation by
giving it more of the information it needs - do this by providing a
virt_to_pfn() macro.
Another issue which makes this more complex is that __pv_phys_offset is
a 64-bit type on all platforms. This is needlessly wasteful - if we
store the physical offset as a PFN, we can save a lot of work having
to deal with 64-bit values, which sometimes ends up producing incredibly
horrid code:
a4c: e3009000 movw r9, #0
a4c: R_ARM_MOVW_ABS_NC __pv_phys_offset
a50: e3409000 movt r9, #0 ; r9 = &__pv_phys_offset
a50: R_ARM_MOVT_ABS __pv_phys_offset
a54: e3002000 movw r2, #0
a54: R_ARM_MOVW_ABS_NC __pv_phys_offset
a58: e3402000 movt r2, #0 ; r2 = &__pv_phys_offset
a58: R_ARM_MOVT_ABS __pv_phys_offset
a5c: e5999004 ldr r9, [r9, #4] ; r9 = high word of __pv_phys_offset
a60: e3001000 movw r1, #0
a60: R_ARM_MOVW_ABS_NC mem_map
a64: e592c000 ldr ip, [r2] ; ip = low word of __pv_phys_offset
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-03-26 02:45:31 +07:00
|
|
|
EXPORT_SYMBOL(__pv_phys_pfn_offset);
|
2013-07-29 21:56:22 +07:00
|
|
|
EXPORT_SYMBOL(__pv_offset);
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-05 02:09:43 +07:00
|
|
|
#endif
|
2016-01-04 21:42:55 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_HAVE_ARM_SMCCC
|
|
|
|
EXPORT_SYMBOL(arm_smccc_smc);
|
|
|
|
EXPORT_SYMBOL(arm_smccc_hvc);
|
|
|
|
#endif
|