2006-11-30 20:53:54 +07:00
|
|
|
#ifdef CONFIG_MMU
|
ARM: 7645/1: ioremap: introduce an infrastructure for static mapped area
In current implementation, we used ARM-specific flag, that is,
VM_ARM_STATIC_MAPPING, for distinguishing ARM specific static mapped area.
The purpose of static mapped area is to re-use static mapped area when
entire physical address range of the ioremap request can be covered
by this area.
This implementation causes needless overhead for some cases.
For example, assume that there is only one static mapped area and
vmlist has 300 areas. Every time we call ioremap, we check 300 areas for
deciding whether it is matched or not. Moreover, even if there is
no static mapped area and vmlist has 300 areas, every time we call
ioremap, we check 300 areas in now.
If we construct a extra list for static mapped area, we can eliminate
above mentioned overhead.
With a extra list, if there is one static mapped area,
we just check only one area and proceed next operation quickly.
In fact, it is not a critical problem, because ioremap is not frequently
used. But reducing overhead is better idea.
Another reason for doing this work is for removing architecture dependency
on vmalloc layer. I think that vmlist and vmlist_lock is internal data
structure for vmalloc layer. Some codes for debugging and stat inevitably
use vmlist and vmlist_lock. But it is preferable that they are used
as least as possible in outside of vmalloc.c
Now, I introduce an ARM-specific infrastructure for static mapped area. In
the following patch, we will use this and resolve above mentioned problem.
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-02-09 12:28:05 +07:00
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/vmalloc.h>
|
2006-11-30 20:53:54 +07:00
|
|
|
|
2008-09-17 00:05:53 +07:00
|
|
|
/* the upper-most page table pointer */
|
2006-08-21 23:06:38 +07:00
|
|
|
extern pmd_t *top_pmd;
|
|
|
|
|
2011-07-02 20:46:27 +07:00
|
|
|
/*
|
|
|
|
* 0xffff8000 to 0xffffffff is reserved for any ARM architecture
|
|
|
|
* specific hacks for copying pages efficiently, while 0xffff4000
|
|
|
|
* is reserved for VIPT aliasing flushing by generic code.
|
|
|
|
*
|
|
|
|
* Note that we don't allow VIPT aliasing caches with SMP.
|
|
|
|
*/
|
|
|
|
#define COPYPAGE_MINICACHE 0xffff8000
|
|
|
|
#define COPYPAGE_V6_FROM 0xffff8000
|
|
|
|
#define COPYPAGE_V6_TO 0xffffc000
|
|
|
|
/* PFN alias flushing, for VIPT caches */
|
|
|
|
#define FLUSH_ALIAS_START 0xffff4000
|
|
|
|
|
2011-07-02 21:20:44 +07:00
|
|
|
static inline void set_top_pte(unsigned long va, pte_t pte)
|
|
|
|
{
|
2011-07-04 17:25:53 +07:00
|
|
|
pte_t *ptep = pte_offset_kernel(top_pmd, va);
|
|
|
|
set_pte_ext(ptep, pte, 0);
|
2011-07-02 21:20:44 +07:00
|
|
|
local_flush_tlb_kernel_page(va);
|
|
|
|
}
|
|
|
|
|
2011-07-04 17:22:27 +07:00
|
|
|
static inline pte_t get_top_pte(unsigned long va)
|
|
|
|
{
|
2011-07-04 17:25:53 +07:00
|
|
|
pte_t *ptep = pte_offset_kernel(top_pmd, va);
|
|
|
|
return *ptep;
|
2011-07-04 17:22:27 +07:00
|
|
|
}
|
|
|
|
|
2006-08-21 23:06:38 +07:00
|
|
|
static inline pmd_t *pmd_off_k(unsigned long virt)
|
|
|
|
{
|
2011-05-27 01:50:30 +07:00
|
|
|
return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
|
2006-08-21 23:06:38 +07:00
|
|
|
}
|
|
|
|
|
2007-04-21 16:47:29 +07:00
|
|
|
struct mem_type {
|
2010-11-16 07:22:09 +07:00
|
|
|
pteval_t prot_pte;
|
2011-09-05 23:51:56 +07:00
|
|
|
pmdval_t prot_l1;
|
|
|
|
pmdval_t prot_sect;
|
2007-04-21 16:47:29 +07:00
|
|
|
unsigned int domain;
|
|
|
|
};
|
|
|
|
|
|
|
|
const struct mem_type *get_mem_type(unsigned int type);
|
|
|
|
|
2009-10-24 20:11:59 +07:00
|
|
|
extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
|
|
|
|
|
2011-09-16 12:14:23 +07:00
|
|
|
/*
|
|
|
|
* ARM specific vm_struct->flags bits.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* (super)section-mapped I/O regions used by ioremap()/iounmap() */
|
|
|
|
#define VM_ARM_SECTION_MAPPING 0x80000000
|
|
|
|
|
|
|
|
/* permanent static mappings from iotable_init() */
|
|
|
|
#define VM_ARM_STATIC_MAPPING 0x40000000
|
|
|
|
|
2012-08-25 15:03:15 +07:00
|
|
|
/* empty mapping */
|
|
|
|
#define VM_ARM_EMPTY_MAPPING 0x20000000
|
|
|
|
|
2011-09-16 12:14:23 +07:00
|
|
|
/* mapping type (attributes) for permanent static mappings */
|
|
|
|
#define VM_ARM_MTYPE(mt) ((mt) << 20)
|
|
|
|
#define VM_ARM_MTYPE_MASK (0x1f << 20)
|
|
|
|
|
2012-07-30 14:11:33 +07:00
|
|
|
/* consistent regions used by dma_alloc_attrs() */
|
|
|
|
#define VM_ARM_DMA_CONSISTENT 0x20000000
|
|
|
|
|
ARM: 7645/1: ioremap: introduce an infrastructure for static mapped area
In current implementation, we used ARM-specific flag, that is,
VM_ARM_STATIC_MAPPING, for distinguishing ARM specific static mapped area.
The purpose of static mapped area is to re-use static mapped area when
entire physical address range of the ioremap request can be covered
by this area.
This implementation causes needless overhead for some cases.
For example, assume that there is only one static mapped area and
vmlist has 300 areas. Every time we call ioremap, we check 300 areas for
deciding whether it is matched or not. Moreover, even if there is
no static mapped area and vmlist has 300 areas, every time we call
ioremap, we check 300 areas in now.
If we construct a extra list for static mapped area, we can eliminate
above mentioned overhead.
With a extra list, if there is one static mapped area,
we just check only one area and proceed next operation quickly.
In fact, it is not a critical problem, because ioremap is not frequently
used. But reducing overhead is better idea.
Another reason for doing this work is for removing architecture dependency
on vmalloc layer. I think that vmlist and vmlist_lock is internal data
structure for vmalloc layer. Some codes for debugging and stat inevitably
use vmlist and vmlist_lock. But it is preferable that they are used
as least as possible in outside of vmalloc.c
Now, I introduce an ARM-specific infrastructure for static mapped area. In
the following patch, we will use this and resolve above mentioned problem.
Reviewed-by: Nicolas Pitre <nico@linaro.org>
Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2013-02-09 12:28:05 +07:00
|
|
|
|
|
|
|
struct static_vm {
|
|
|
|
struct vm_struct vm;
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
|
|
|
extern struct list_head static_vmlist;
|
|
|
|
extern struct static_vm *find_static_vm_vaddr(void *vaddr);
|
|
|
|
extern __init void add_static_vm_early(struct static_vm *svm);
|
|
|
|
|
2006-11-30 20:53:54 +07:00
|
|
|
#endif
|
|
|
|
|
2011-07-09 03:26:59 +07:00
|
|
|
#ifdef CONFIG_ZONE_DMA
|
2012-06-06 17:05:01 +07:00
|
|
|
extern phys_addr_t arm_dma_limit;
|
2013-07-09 18:14:49 +07:00
|
|
|
extern unsigned long arm_dma_pfn_limit;
|
2011-07-09 03:26:59 +07:00
|
|
|
#else
|
2012-07-05 19:11:31 +07:00
|
|
|
#define arm_dma_limit ((phys_addr_t)~0)
|
2013-07-09 18:14:49 +07:00
|
|
|
#define arm_dma_pfn_limit (~0ul >> PAGE_SHIFT)
|
2011-07-09 03:26:59 +07:00
|
|
|
#endif
|
|
|
|
|
2011-12-29 19:09:51 +07:00
|
|
|
extern phys_addr_t arm_lowmem_limit;
|
|
|
|
|
2010-05-23 01:47:18 +07:00
|
|
|
void __init bootmem_init(void);
|
2010-07-09 22:27:52 +07:00
|
|
|
void arm_mm_memblock_reserve(void);
|
2011-12-29 19:09:51 +07:00
|
|
|
void dma_contiguous_remap(void);
|