2005-04-17 05:20:36 +07:00
|
|
|
/*
|
2008-08-02 16:55:55 +07:00
|
|
|
* arch/arm/include/asm/memory.h
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
|
|
|
* Copyright (C) 2000-2002 Russell King
|
2006-06-21 02:46:52 +07:00
|
|
|
* modification for nommu, Hyok S. Choi, 2004
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* Note: this file should not be included by non-asm/.h files
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_ARM_MEMORY_H
|
|
|
|
#define __ASM_ARM_MEMORY_H
|
|
|
|
|
2008-08-26 03:03:32 +07:00
|
|
|
#include <linux/compiler.h>
|
|
|
|
#include <linux/const.h>
|
2011-02-15 23:28:28 +07:00
|
|
|
#include <linux/types.h>
|
2012-06-24 18:46:26 +07:00
|
|
|
#include <linux/sizes.h>
|
2008-08-26 03:03:32 +07:00
|
|
|
|
2011-09-03 09:26:55 +07:00
|
|
|
#ifdef CONFIG_NEED_MACH_MEMORY_H
|
2011-07-06 09:52:51 +07:00
|
|
|
#include <mach/memory.h>
|
|
|
|
#endif
|
|
|
|
|
2005-10-30 03:44:55 +07:00
|
|
|
/*
|
|
|
|
* Allow for constants defined here to be used from assembly code
|
|
|
|
* by prepending the UL suffix only with actual C code compilation.
|
|
|
|
*/
|
2008-08-26 03:03:32 +07:00
|
|
|
#define UL(x) _AC(x, UL)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-02-27 02:40:46 +07:00
|
|
|
/* PAGE_OFFSET - the virtual address of the start of the kernel image */
|
|
|
|
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
|
|
|
|
|
2006-06-21 02:46:52 +07:00
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* TASK_SIZE - the maximum size of a user space task.
|
|
|
|
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
|
|
|
|
*/
|
2013-02-01 01:19:30 +07:00
|
|
|
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
|
2013-02-08 18:52:29 +07:00
|
|
|
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The maximum size of a 26-bit user space task.
|
|
|
|
*/
|
2013-02-01 01:19:30 +07:00
|
|
|
#define TASK_SIZE_26 (UL(1) << 26)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-06-21 02:46:52 +07:00
|
|
|
/*
|
|
|
|
* The module space lives between the addresses given by TASK_SIZE
|
|
|
|
* and PAGE_OFFSET - it must be within 32MB of the kernel text.
|
|
|
|
*/
|
2009-07-24 18:32:59 +07:00
|
|
|
#ifndef CONFIG_THUMB2_KERNEL
|
2013-02-01 01:19:30 +07:00
|
|
|
#define MODULES_VADDR (PAGE_OFFSET - SZ_16M)
|
2009-07-24 18:32:59 +07:00
|
|
|
#else
|
|
|
|
/* smaller range for Thumb-2 symbols relocation (2^24)*/
|
2013-02-01 01:19:30 +07:00
|
|
|
#define MODULES_VADDR (PAGE_OFFSET - SZ_8M)
|
2009-07-24 18:32:59 +07:00
|
|
|
#endif
|
|
|
|
|
2008-11-07 00:11:07 +07:00
|
|
|
#if TASK_SIZE > MODULES_VADDR
|
2006-06-21 02:46:52 +07:00
|
|
|
#error Top of user space clashes with start of module space
|
|
|
|
#endif
|
|
|
|
|
2008-09-16 03:44:55 +07:00
|
|
|
/*
|
|
|
|
* The highmem pkmap virtual space shares the end of the module area.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
|
|
#define MODULES_END (PAGE_OFFSET - PMD_SIZE)
|
|
|
|
#else
|
|
|
|
#define MODULES_END (PAGE_OFFSET)
|
|
|
|
#endif
|
|
|
|
|
2006-06-21 02:46:52 +07:00
|
|
|
/*
|
|
|
|
* The XIP kernel gets mapped at the bottom of the module vm area.
|
|
|
|
* Since we use sections to map it, this macro replaces the physical address
|
|
|
|
* with its virtual address while keeping offset from the base section.
|
|
|
|
*/
|
2008-11-07 00:11:07 +07:00
|
|
|
#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
|
2006-06-21 02:46:52 +07:00
|
|
|
|
2006-06-30 02:17:15 +07:00
|
|
|
/*
|
2006-07-02 01:58:20 +07:00
|
|
|
* Allow 16MB-aligned ioremap pages
|
2006-06-30 02:17:15 +07:00
|
|
|
*/
|
2006-07-02 01:58:20 +07:00
|
|
|
#define IOREMAP_MAX_ORDER 24
|
2006-06-30 02:17:15 +07:00
|
|
|
|
2006-06-21 02:46:52 +07:00
|
|
|
#else /* CONFIG_MMU */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The limitation of user task size can grow up to the end of free ram region.
|
|
|
|
* It is difficult to define and perhaps will never meet the original meaning
|
|
|
|
* of this define that was meant to.
|
|
|
|
* Fortunately, there is no reference for this in noMMU mode, for now.
|
|
|
|
*/
|
2014-06-03 22:24:51 +07:00
|
|
|
#define TASK_SIZE UL(0xffffffff)
|
2006-06-21 02:46:52 +07:00
|
|
|
|
|
|
|
#ifndef TASK_UNMAPPED_BASE
|
|
|
|
#define TASK_UNMAPPED_BASE UL(0x00000000)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef END_MEM
|
2010-02-08 03:47:17 +07:00
|
|
|
#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
|
2006-06-21 02:46:52 +07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The module can be at any place in ram in nommu mode.
|
|
|
|
*/
|
2008-11-07 00:11:07 +07:00
|
|
|
#define MODULES_END (END_MEM)
|
2013-12-11 02:21:08 +07:00
|
|
|
#define MODULES_VADDR PAGE_OFFSET
|
2006-06-21 02:46:52 +07:00
|
|
|
|
2012-03-14 16:30:52 +07:00
|
|
|
#define XIP_VIRT_ADDR(physaddr) (physaddr)
|
|
|
|
|
2006-06-21 02:46:52 +07:00
|
|
|
#endif /* !CONFIG_MMU */
|
|
|
|
|
2010-07-13 03:53:28 +07:00
|
|
|
/*
|
|
|
|
* We fix the TCM memories max 32 KiB ITCM resp DTCM at these
|
|
|
|
* locations
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_HAVE_TCM
|
|
|
|
#define ITCM_OFFSET UL(0xfffe0000)
|
|
|
|
#define DTCM_OFFSET UL(0xfffe8000)
|
|
|
|
#endif
|
|
|
|
|
2006-01-10 02:23:11 +07:00
|
|
|
/*
|
|
|
|
* Convert a physical address to a Page Frame Number and back
|
|
|
|
*/
|
2011-02-15 23:28:28 +07:00
|
|
|
#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
|
|
|
|
#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT)
|
2006-01-10 02:23:11 +07:00
|
|
|
|
2009-11-01 00:51:57 +07:00
|
|
|
/*
|
|
|
|
* Convert a page to/from a physical address
|
|
|
|
*/
|
|
|
|
#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
|
|
|
|
#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
|
|
|
|
|
2013-12-11 02:21:08 +07:00
|
|
|
/*
|
|
|
|
* PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
|
2014-07-24 02:37:43 +07:00
|
|
|
* memory. This is used for XIP and NoMMU kernels, and on platforms that don't
|
|
|
|
* have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use
|
2013-12-11 02:21:08 +07:00
|
|
|
* PLAT_PHYS_OFFSET and not PHYS_OFFSET.
|
|
|
|
*/
|
|
|
|
#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-05 02:09:43 +07:00
|
|
|
/*
|
|
|
|
* Physical vs virtual RAM address space conversion. These are
|
|
|
|
* private definitions which should NOT be used outside memory.h
|
|
|
|
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
|
ARM: Better virt_to_page() handling
virt_to_page() is incredibly inefficient when virt-to-phys patching is
enabled. This is because we end up with this calculation:
page = &mem_map[asm virt_to_phys(addr) >> 12 - __pv_phys_offset >> 12]
in assembly. The asm virt_to_phys() is equivalent this this operation:
addr - PAGE_OFFSET + __pv_phys_offset
and we can see that because this is assembly, the compiler has no chance
to optimise some of that away. This should reduce down to:
page = &mem_map[(addr - PAGE_OFFSET) >> 12]
for the common cases. Permit the compiler to make this optimisation by
giving it more of the information it needs - do this by providing a
virt_to_pfn() macro.
Another issue which makes this more complex is that __pv_phys_offset is
a 64-bit type on all platforms. This is needlessly wasteful - if we
store the physical offset as a PFN, we can save a lot of work having
to deal with 64-bit values, which sometimes ends up producing incredibly
horrid code:
a4c: e3009000 movw r9, #0
a4c: R_ARM_MOVW_ABS_NC __pv_phys_offset
a50: e3409000 movt r9, #0 ; r9 = &__pv_phys_offset
a50: R_ARM_MOVT_ABS __pv_phys_offset
a54: e3002000 movw r2, #0
a54: R_ARM_MOVW_ABS_NC __pv_phys_offset
a58: e3402000 movt r2, #0 ; r2 = &__pv_phys_offset
a58: R_ARM_MOVT_ABS __pv_phys_offset
a5c: e5999004 ldr r9, [r9, #4] ; r9 = high word of __pv_phys_offset
a60: e3001000 movw r1, #0
a60: R_ARM_MOVW_ABS_NC mem_map
a64: e592c000 ldr ip, [r2] ; ip = low word of __pv_phys_offset
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-03-26 02:45:31 +07:00
|
|
|
*
|
|
|
|
* PFNs are used to describe any physical page; this means
|
|
|
|
* PFN 0 == physical address 0.
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-05 02:09:43 +07:00
|
|
|
*/
|
ARM: Better virt_to_page() handling
virt_to_page() is incredibly inefficient when virt-to-phys patching is
enabled. This is because we end up with this calculation:
page = &mem_map[asm virt_to_phys(addr) >> 12 - __pv_phys_offset >> 12]
in assembly. The asm virt_to_phys() is equivalent this this operation:
addr - PAGE_OFFSET + __pv_phys_offset
and we can see that because this is assembly, the compiler has no chance
to optimise some of that away. This should reduce down to:
page = &mem_map[(addr - PAGE_OFFSET) >> 12]
for the common cases. Permit the compiler to make this optimisation by
giving it more of the information it needs - do this by providing a
virt_to_pfn() macro.
Another issue which makes this more complex is that __pv_phys_offset is
a 64-bit type on all platforms. This is needlessly wasteful - if we
store the physical offset as a PFN, we can save a lot of work having
to deal with 64-bit values, which sometimes ends up producing incredibly
horrid code:
a4c: e3009000 movw r9, #0
a4c: R_ARM_MOVW_ABS_NC __pv_phys_offset
a50: e3409000 movt r9, #0 ; r9 = &__pv_phys_offset
a50: R_ARM_MOVT_ABS __pv_phys_offset
a54: e3002000 movw r2, #0
a54: R_ARM_MOVW_ABS_NC __pv_phys_offset
a58: e3402000 movt r2, #0 ; r2 = &__pv_phys_offset
a58: R_ARM_MOVT_ABS __pv_phys_offset
a5c: e5999004 ldr r9, [r9, #4] ; r9 = high word of __pv_phys_offset
a60: e3001000 movw r1, #0
a60: R_ARM_MOVW_ABS_NC mem_map
a64: e592c000 ldr ip, [r2] ; ip = low word of __pv_phys_offset
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-03-26 02:45:31 +07:00
|
|
|
#if defined(__virt_to_phys)
|
|
|
|
#define PHYS_OFFSET PLAT_PHYS_OFFSET
|
|
|
|
#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
|
|
|
|
|
|
|
|
#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
|
|
|
|
|
|
|
|
#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT)
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-05 02:09:43 +07:00
|
|
|
|
2011-01-05 02:39:29 +07:00
|
|
|
/*
|
|
|
|
* Constants used to force the right instruction encodings and shifts
|
|
|
|
* so that all we need to do is modify the 8-bit constant field.
|
|
|
|
*/
|
|
|
|
#define __PV_BITS_31_24 0x81000000
|
2013-07-29 21:56:22 +07:00
|
|
|
#define __PV_BITS_7_0 0x81
|
2011-01-05 02:39:29 +07:00
|
|
|
|
ARM: Better virt_to_page() handling
virt_to_page() is incredibly inefficient when virt-to-phys patching is
enabled. This is because we end up with this calculation:
page = &mem_map[asm virt_to_phys(addr) >> 12 - __pv_phys_offset >> 12]
in assembly. The asm virt_to_phys() is equivalent this this operation:
addr - PAGE_OFFSET + __pv_phys_offset
and we can see that because this is assembly, the compiler has no chance
to optimise some of that away. This should reduce down to:
page = &mem_map[(addr - PAGE_OFFSET) >> 12]
for the common cases. Permit the compiler to make this optimisation by
giving it more of the information it needs - do this by providing a
virt_to_pfn() macro.
Another issue which makes this more complex is that __pv_phys_offset is
a 64-bit type on all platforms. This is needlessly wasteful - if we
store the physical offset as a PFN, we can save a lot of work having
to deal with 64-bit values, which sometimes ends up producing incredibly
horrid code:
a4c: e3009000 movw r9, #0
a4c: R_ARM_MOVW_ABS_NC __pv_phys_offset
a50: e3409000 movt r9, #0 ; r9 = &__pv_phys_offset
a50: R_ARM_MOVT_ABS __pv_phys_offset
a54: e3002000 movw r2, #0
a54: R_ARM_MOVW_ABS_NC __pv_phys_offset
a58: e3402000 movt r2, #0 ; r2 = &__pv_phys_offset
a58: R_ARM_MOVT_ABS __pv_phys_offset
a5c: e5999004 ldr r9, [r9, #4] ; r9 = high word of __pv_phys_offset
a60: e3001000 movw r1, #0
a60: R_ARM_MOVW_ABS_NC mem_map
a64: e592c000 ldr ip, [r2] ; ip = low word of __pv_phys_offset
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-03-26 02:45:31 +07:00
|
|
|
extern unsigned long __pv_phys_pfn_offset;
|
2013-07-29 21:56:22 +07:00
|
|
|
extern u64 __pv_offset;
|
|
|
|
extern void fixup_pv_table(const void *, unsigned long);
|
|
|
|
extern const void *__pv_table_begin, *__pv_table_end;
|
|
|
|
|
ARM: Better virt_to_page() handling
virt_to_page() is incredibly inefficient when virt-to-phys patching is
enabled. This is because we end up with this calculation:
page = &mem_map[asm virt_to_phys(addr) >> 12 - __pv_phys_offset >> 12]
in assembly. The asm virt_to_phys() is equivalent this this operation:
addr - PAGE_OFFSET + __pv_phys_offset
and we can see that because this is assembly, the compiler has no chance
to optimise some of that away. This should reduce down to:
page = &mem_map[(addr - PAGE_OFFSET) >> 12]
for the common cases. Permit the compiler to make this optimisation by
giving it more of the information it needs - do this by providing a
virt_to_pfn() macro.
Another issue which makes this more complex is that __pv_phys_offset is
a 64-bit type on all platforms. This is needlessly wasteful - if we
store the physical offset as a PFN, we can save a lot of work having
to deal with 64-bit values, which sometimes ends up producing incredibly
horrid code:
a4c: e3009000 movw r9, #0
a4c: R_ARM_MOVW_ABS_NC __pv_phys_offset
a50: e3409000 movt r9, #0 ; r9 = &__pv_phys_offset
a50: R_ARM_MOVT_ABS __pv_phys_offset
a54: e3002000 movw r2, #0
a54: R_ARM_MOVW_ABS_NC __pv_phys_offset
a58: e3402000 movt r2, #0 ; r2 = &__pv_phys_offset
a58: R_ARM_MOVT_ABS __pv_phys_offset
a5c: e5999004 ldr r9, [r9, #4] ; r9 = high word of __pv_phys_offset
a60: e3001000 movw r1, #0
a60: R_ARM_MOVW_ABS_NC mem_map
a64: e592c000 ldr ip, [r2] ; ip = low word of __pv_phys_offset
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-03-26 02:45:31 +07:00
|
|
|
#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
|
|
|
|
#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
|
|
|
|
|
|
|
|
#define virt_to_pfn(kaddr) \
|
|
|
|
((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
|
|
|
|
PHYS_PFN_OFFSET)
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-05 02:09:43 +07:00
|
|
|
|
2011-01-05 02:39:29 +07:00
|
|
|
#define __pv_stub(from,to,instr,type) \
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-05 02:09:43 +07:00
|
|
|
__asm__("@ __pv_stub\n" \
|
|
|
|
"1: " instr " %0, %1, %2\n" \
|
|
|
|
" .pushsection .pv_table,\"a\"\n" \
|
|
|
|
" .long 1b\n" \
|
|
|
|
" .popsection\n" \
|
|
|
|
: "=r" (to) \
|
2011-01-05 02:39:29 +07:00
|
|
|
: "r" (from), "I" (type))
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-05 02:09:43 +07:00
|
|
|
|
2013-07-29 21:56:22 +07:00
|
|
|
#define __pv_stub_mov_hi(t) \
|
|
|
|
__asm__ volatile("@ __pv_stub_mov\n" \
|
|
|
|
"1: mov %R0, %1\n" \
|
|
|
|
" .pushsection .pv_table,\"a\"\n" \
|
|
|
|
" .long 1b\n" \
|
|
|
|
" .popsection\n" \
|
|
|
|
: "=r" (t) \
|
|
|
|
: "I" (__PV_BITS_7_0))
|
|
|
|
|
|
|
|
#define __pv_add_carry_stub(x, y) \
|
|
|
|
__asm__ volatile("@ __pv_add_carry_stub\n" \
|
|
|
|
"1: adds %Q0, %1, %2\n" \
|
|
|
|
" adc %R0, %R0, #0\n" \
|
|
|
|
" .pushsection .pv_table,\"a\"\n" \
|
|
|
|
" .long 1b\n" \
|
|
|
|
" .popsection\n" \
|
|
|
|
: "+r" (y) \
|
|
|
|
: "r" (x), "I" (__PV_BITS_31_24) \
|
|
|
|
: "cc")
|
|
|
|
|
2013-07-31 23:44:41 +07:00
|
|
|
static inline phys_addr_t __virt_to_phys(unsigned long x)
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-05 02:09:43 +07:00
|
|
|
{
|
2013-07-29 21:56:22 +07:00
|
|
|
phys_addr_t t;
|
|
|
|
|
|
|
|
if (sizeof(phys_addr_t) == 4) {
|
|
|
|
__pv_stub(x, t, "add", __PV_BITS_31_24);
|
|
|
|
} else {
|
|
|
|
__pv_stub_mov_hi(t);
|
|
|
|
__pv_add_carry_stub(x, t);
|
|
|
|
}
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-05 02:09:43 +07:00
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
2013-07-31 23:44:41 +07:00
|
|
|
static inline unsigned long __phys_to_virt(phys_addr_t x)
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-05 02:09:43 +07:00
|
|
|
{
|
|
|
|
unsigned long t;
|
ARM: 7882/1: mm: fix __phys_to_virt to work with 64 bit phys_addr_t in BE case
Make sure that inline assembler that expects 'r' operand
receives 32 bit value.
Before this fix in case of CONFIG_ARCH_PHYS_ADDR_T_64BIT and
CONFIG_ARM_PATCH_PHYS_VIRT __phys_to_virt function passed 64 bit
value to __pv_stub inline assembler where 'r' operand is
expected. Compiler behavior in such case is not well specified.
It worked in little endian case, but in big endian case
incorrect code was generated, where compiler confused which
part of 64 bit value it needed to modify. For example BE
snippet looked like this:
N:0x80904E08 : MOV r2,#0
N:0x80904E0C : SUB r2,r2,#0x81000000
when LE similar code looked like this
N:0x808FCE2C : MOV r2,r0
N:0x808FCE30 : SUB r2,r2,#0xc0, 8 ; #0xc0000000
Note 'r0' register is va that have to be translated into phys
To avoid this situation use explicit cast to 'unsigned long',
which explicitly discard upper part of phys address and convert
value to 32 bit. Also add comment so such cast will not be
removed in the future.
Signed-off-by: Victor Kamensky <victor.kamensky@linaro.org>
Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-11-07 14:42:41 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* 'unsigned long' cast discard upper word when
|
|
|
|
* phys_addr_t is 64 bit, and makes sure that inline
|
|
|
|
* assembler expression receives 32 bit argument
|
|
|
|
* in place where 'r' 32 bit operand is expected.
|
|
|
|
*/
|
|
|
|
__pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24);
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-05 02:09:43 +07:00
|
|
|
return t;
|
|
|
|
}
|
2013-07-31 23:44:41 +07:00
|
|
|
|
ARM: P2V: introduce phys_to_virt/virt_to_phys runtime patching
This idea came from Nicolas, Eric Miao produced an initial version,
which was then rewritten into this.
Patch the physical to virtual translations at runtime. As we modify
the code, this makes it incompatible with XIP kernels, but allows us
to achieve this with minimal loss of performance.
As many translations are of the form:
physical = virtual + (PHYS_OFFSET - PAGE_OFFSET)
virtual = physical - (PHYS_OFFSET - PAGE_OFFSET)
we generate an 'add' instruction for __virt_to_phys(), and a 'sub'
instruction for __phys_to_virt(). We calculate at run time (PHYS_OFFSET
- PAGE_OFFSET) by comparing the address prior to MMU initialization with
where it should be once the MMU has been initialized, and place this
constant into the above add/sub instructions.
Once we have (PHYS_OFFSET - PAGE_OFFSET), we can calculate the real
PHYS_OFFSET as PAGE_OFFSET is a build-time constant, and save this for
the C-mode PHYS_OFFSET variable definition to use.
At present, we are unable to support Realview with Sparsemem enabled
as this uses a complex mapping function, and MSM as this requires a
constant which will not fit in our math instruction.
Add a module version magic string for this feature to prevent
incompatible modules being loaded.
Tested-by: Tony Lindgren <tony@atomide.com>
Reviewed-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2011-01-05 02:09:43 +07:00
|
|
|
#else
|
2013-07-31 23:44:41 +07:00
|
|
|
|
2013-12-11 02:21:08 +07:00
|
|
|
#define PHYS_OFFSET PLAT_PHYS_OFFSET
|
ARM: Better virt_to_page() handling
virt_to_page() is incredibly inefficient when virt-to-phys patching is
enabled. This is because we end up with this calculation:
page = &mem_map[asm virt_to_phys(addr) >> 12 - __pv_phys_offset >> 12]
in assembly. The asm virt_to_phys() is equivalent this this operation:
addr - PAGE_OFFSET + __pv_phys_offset
and we can see that because this is assembly, the compiler has no chance
to optimise some of that away. This should reduce down to:
page = &mem_map[(addr - PAGE_OFFSET) >> 12]
for the common cases. Permit the compiler to make this optimisation by
giving it more of the information it needs - do this by providing a
virt_to_pfn() macro.
Another issue which makes this more complex is that __pv_phys_offset is
a 64-bit type on all platforms. This is needlessly wasteful - if we
store the physical offset as a PFN, we can save a lot of work having
to deal with 64-bit values, which sometimes ends up producing incredibly
horrid code:
a4c: e3009000 movw r9, #0
a4c: R_ARM_MOVW_ABS_NC __pv_phys_offset
a50: e3409000 movt r9, #0 ; r9 = &__pv_phys_offset
a50: R_ARM_MOVT_ABS __pv_phys_offset
a54: e3002000 movw r2, #0
a54: R_ARM_MOVW_ABS_NC __pv_phys_offset
a58: e3402000 movt r2, #0 ; r2 = &__pv_phys_offset
a58: R_ARM_MOVT_ABS __pv_phys_offset
a5c: e5999004 ldr r9, [r9, #4] ; r9 = high word of __pv_phys_offset
a60: e3001000 movw r1, #0
a60: R_ARM_MOVW_ABS_NC mem_map
a64: e592c000 ldr ip, [r2] ; ip = low word of __pv_phys_offset
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-03-26 02:45:31 +07:00
|
|
|
#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
|
2013-12-11 02:21:08 +07:00
|
|
|
|
2013-07-31 23:44:41 +07:00
|
|
|
static inline phys_addr_t __virt_to_phys(unsigned long x)
|
|
|
|
{
|
|
|
|
return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long __phys_to_virt(phys_addr_t x)
|
|
|
|
{
|
|
|
|
return x - PHYS_OFFSET + PAGE_OFFSET;
|
|
|
|
}
|
|
|
|
|
ARM: Better virt_to_page() handling
virt_to_page() is incredibly inefficient when virt-to-phys patching is
enabled. This is because we end up with this calculation:
page = &mem_map[asm virt_to_phys(addr) >> 12 - __pv_phys_offset >> 12]
in assembly. The asm virt_to_phys() is equivalent this this operation:
addr - PAGE_OFFSET + __pv_phys_offset
and we can see that because this is assembly, the compiler has no chance
to optimise some of that away. This should reduce down to:
page = &mem_map[(addr - PAGE_OFFSET) >> 12]
for the common cases. Permit the compiler to make this optimisation by
giving it more of the information it needs - do this by providing a
virt_to_pfn() macro.
Another issue which makes this more complex is that __pv_phys_offset is
a 64-bit type on all platforms. This is needlessly wasteful - if we
store the physical offset as a PFN, we can save a lot of work having
to deal with 64-bit values, which sometimes ends up producing incredibly
horrid code:
a4c: e3009000 movw r9, #0
a4c: R_ARM_MOVW_ABS_NC __pv_phys_offset
a50: e3409000 movt r9, #0 ; r9 = &__pv_phys_offset
a50: R_ARM_MOVT_ABS __pv_phys_offset
a54: e3002000 movw r2, #0
a54: R_ARM_MOVW_ABS_NC __pv_phys_offset
a58: e3402000 movt r2, #0 ; r2 = &__pv_phys_offset
a58: R_ARM_MOVT_ABS __pv_phys_offset
a5c: e5999004 ldr r9, [r9, #4] ; r9 = high word of __pv_phys_offset
a60: e3001000 movw r1, #0
a60: R_ARM_MOVW_ABS_NC mem_map
a64: e592c000 ldr ip, [r2] ; ip = low word of __pv_phys_offset
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-03-26 02:45:31 +07:00
|
|
|
#define virt_to_pfn(kaddr) \
|
|
|
|
((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
|
|
|
|
PHYS_PFN_OFFSET)
|
2012-09-05 02:04:35 +07:00
|
|
|
|
ARM: Better virt_to_page() handling
virt_to_page() is incredibly inefficient when virt-to-phys patching is
enabled. This is because we end up with this calculation:
page = &mem_map[asm virt_to_phys(addr) >> 12 - __pv_phys_offset >> 12]
in assembly. The asm virt_to_phys() is equivalent this this operation:
addr - PAGE_OFFSET + __pv_phys_offset
and we can see that because this is assembly, the compiler has no chance
to optimise some of that away. This should reduce down to:
page = &mem_map[(addr - PAGE_OFFSET) >> 12]
for the common cases. Permit the compiler to make this optimisation by
giving it more of the information it needs - do this by providing a
virt_to_pfn() macro.
Another issue which makes this more complex is that __pv_phys_offset is
a 64-bit type on all platforms. This is needlessly wasteful - if we
store the physical offset as a PFN, we can save a lot of work having
to deal with 64-bit values, which sometimes ends up producing incredibly
horrid code:
a4c: e3009000 movw r9, #0
a4c: R_ARM_MOVW_ABS_NC __pv_phys_offset
a50: e3409000 movt r9, #0 ; r9 = &__pv_phys_offset
a50: R_ARM_MOVT_ABS __pv_phys_offset
a54: e3002000 movw r2, #0
a54: R_ARM_MOVW_ABS_NC __pv_phys_offset
a58: e3402000 movt r2, #0 ; r2 = &__pv_phys_offset
a58: R_ARM_MOVT_ABS __pv_phys_offset
a5c: e5999004 ldr r9, [r9, #4] ; r9 = high word of __pv_phys_offset
a60: e3001000 movw r1, #0
a60: R_ARM_MOVW_ABS_NC mem_map
a64: e592c000 ldr ip, [r2] ; ip = low word of __pv_phys_offset
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-03-26 02:45:31 +07:00
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* These are *only* valid on the kernel direct mapped RAM memory.
|
|
|
|
* Note: Drivers should NOT use these. They are the wrong
|
|
|
|
* translation for translating DMA addresses. Use the driver
|
|
|
|
* DMA support - see dma-mapping.h.
|
|
|
|
*/
|
2014-07-28 21:34:18 +07:00
|
|
|
#define virt_to_phys virt_to_phys
|
2011-02-15 23:28:28 +07:00
|
|
|
static inline phys_addr_t virt_to_phys(const volatile void *x)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
return __virt_to_phys((unsigned long)(x));
|
|
|
|
}
|
|
|
|
|
2014-07-28 21:34:18 +07:00
|
|
|
#define phys_to_virt phys_to_virt
|
2011-02-15 23:28:28 +07:00
|
|
|
static inline void *phys_to_virt(phys_addr_t x)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2013-07-31 23:44:41 +07:00
|
|
|
return (void *)__phys_to_virt(x);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Drivers should NOT use these either.
|
|
|
|
*/
|
|
|
|
#define __pa(x) __virt_to_phys((unsigned long)(x))
|
2013-07-31 23:44:41 +07:00
|
|
|
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
|
2015-06-26 23:13:03 +07:00
|
|
|
#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-10-30 06:06:44 +07:00
|
|
|
extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
|
|
|
|
|
2013-07-31 23:44:42 +07:00
|
|
|
/*
|
|
|
|
* These are for systems that have a hardware interconnect supported alias of
|
|
|
|
* physical memory for idmap purposes. Most cases should leave these
|
|
|
|
* untouched.
|
|
|
|
*/
|
|
|
|
static inline phys_addr_t __virt_to_idmap(unsigned long x)
|
|
|
|
{
|
2015-07-17 16:33:04 +07:00
|
|
|
if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap)
|
2013-07-31 23:44:42 +07:00
|
|
|
return arch_virt_to_idmap(x);
|
|
|
|
else
|
|
|
|
return __virt_to_phys(x);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Virtual <-> DMA view memory address translations
|
|
|
|
* Again, these are *only* valid on the kernel direct mapped RAM
|
|
|
|
* memory. Use of these is *deprecated* (and that doesn't mean
|
|
|
|
* use the __ prefixed forms instead.) See dma-mapping.h.
|
|
|
|
*/
|
2008-09-06 08:53:30 +07:00
|
|
|
#ifndef __virt_to_bus
|
|
|
|
#define __virt_to_bus __virt_to_phys
|
|
|
|
#define __bus_to_virt __phys_to_virt
|
2009-10-31 22:58:30 +07:00
|
|
|
#define __pfn_to_bus(x) __pfn_to_phys(x)
|
|
|
|
#define __bus_to_pfn(x) __phys_to_pfn(x)
|
2008-09-06 08:53:30 +07:00
|
|
|
#endif
|
|
|
|
|
2012-11-13 05:16:12 +07:00
|
|
|
#ifdef CONFIG_VIRT_TO_BUS
|
2014-07-28 21:34:18 +07:00
|
|
|
#define virt_to_bus virt_to_bus
|
2005-04-17 05:20:36 +07:00
|
|
|
static inline __deprecated unsigned long virt_to_bus(void *x)
|
|
|
|
{
|
|
|
|
return __virt_to_bus((unsigned long)x);
|
|
|
|
}
|
|
|
|
|
2014-07-28 21:34:18 +07:00
|
|
|
#define bus_to_virt bus_to_virt
|
2005-04-17 05:20:36 +07:00
|
|
|
static inline __deprecated void *bus_to_virt(unsigned long x)
|
|
|
|
{
|
|
|
|
return (void *)__bus_to_virt(x);
|
|
|
|
}
|
2012-11-13 05:16:12 +07:00
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Conversion between a struct page and a physical address.
|
|
|
|
*
|
|
|
|
* page_to_pfn(page) convert a struct page * to a PFN number
|
|
|
|
* pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
|
|
|
|
*
|
|
|
|
* virt_to_page(k) convert a _valid_ virtual address to struct page *
|
|
|
|
* virt_addr_valid(k) indicates whether a virtual address is valid
|
|
|
|
*/
|
2006-04-04 22:25:47 +07:00
|
|
|
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
|
2006-12-01 03:43:51 +07:00
|
|
|
|
ARM: Better virt_to_page() handling
virt_to_page() is incredibly inefficient when virt-to-phys patching is
enabled. This is because we end up with this calculation:
page = &mem_map[asm virt_to_phys(addr) >> 12 - __pv_phys_offset >> 12]
in assembly. The asm virt_to_phys() is equivalent this this operation:
addr - PAGE_OFFSET + __pv_phys_offset
and we can see that because this is assembly, the compiler has no chance
to optimise some of that away. This should reduce down to:
page = &mem_map[(addr - PAGE_OFFSET) >> 12]
for the common cases. Permit the compiler to make this optimisation by
giving it more of the information it needs - do this by providing a
virt_to_pfn() macro.
Another issue which makes this more complex is that __pv_phys_offset is
a 64-bit type on all platforms. This is needlessly wasteful - if we
store the physical offset as a PFN, we can save a lot of work having
to deal with 64-bit values, which sometimes ends up producing incredibly
horrid code:
a4c: e3009000 movw r9, #0
a4c: R_ARM_MOVW_ABS_NC __pv_phys_offset
a50: e3409000 movt r9, #0 ; r9 = &__pv_phys_offset
a50: R_ARM_MOVT_ABS __pv_phys_offset
a54: e3002000 movw r2, #0
a54: R_ARM_MOVW_ABS_NC __pv_phys_offset
a58: e3402000 movt r2, #0 ; r2 = &__pv_phys_offset
a58: R_ARM_MOVT_ABS __pv_phys_offset
a5c: e5999004 ldr r9, [r9, #4] ; r9 = high word of __pv_phys_offset
a60: e3001000 movw r1, #0
a60: R_ARM_MOVW_ABS_NC mem_map
a64: e592c000 ldr ip, [r2] ; ip = low word of __pv_phys_offset
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-03-26 02:45:31 +07:00
|
|
|
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
|
2013-12-21 07:03:06 +07:00
|
|
|
#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
|
ARM: Better virt_to_page() handling
virt_to_page() is incredibly inefficient when virt-to-phys patching is
enabled. This is because we end up with this calculation:
page = &mem_map[asm virt_to_phys(addr) >> 12 - __pv_phys_offset >> 12]
in assembly. The asm virt_to_phys() is equivalent this this operation:
addr - PAGE_OFFSET + __pv_phys_offset
and we can see that because this is assembly, the compiler has no chance
to optimise some of that away. This should reduce down to:
page = &mem_map[(addr - PAGE_OFFSET) >> 12]
for the common cases. Permit the compiler to make this optimisation by
giving it more of the information it needs - do this by providing a
virt_to_pfn() macro.
Another issue which makes this more complex is that __pv_phys_offset is
a 64-bit type on all platforms. This is needlessly wasteful - if we
store the physical offset as a PFN, we can save a lot of work having
to deal with 64-bit values, which sometimes ends up producing incredibly
horrid code:
a4c: e3009000 movw r9, #0
a4c: R_ARM_MOVW_ABS_NC __pv_phys_offset
a50: e3409000 movt r9, #0 ; r9 = &__pv_phys_offset
a50: R_ARM_MOVT_ABS __pv_phys_offset
a54: e3002000 movw r2, #0
a54: R_ARM_MOVW_ABS_NC __pv_phys_offset
a58: e3402000 movt r2, #0 ; r2 = &__pv_phys_offset
a58: R_ARM_MOVT_ABS __pv_phys_offset
a5c: e5999004 ldr r9, [r9, #4] ; r9 = high word of __pv_phys_offset
a60: e3001000 movw r1, #0
a60: R_ARM_MOVW_ABS_NC mem_map
a64: e592c000 ldr ip, [r2] ; ip = low word of __pv_phys_offset
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-03-26 02:45:31 +07:00
|
|
|
&& pfn_valid(virt_to_pfn(kaddr)))
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2006-03-27 16:15:37 +07:00
|
|
|
#include <asm-generic/memory_model.h>
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|