linux_dsm_epyc7002/arch/arm/mm/ioremap.c

179 lines
4.5 KiB
C
Raw Normal View History

/*
* linux/arch/arm/mm/ioremap.c
*
* Re-map IO memory to kernel address space so that we can access it.
*
* (C) Copyright 1995 1996 Linus Torvalds
*
* Hacked for ARM by Phil Blundell <philb@gnu.org>
* Hacked to allow all architectures to build, and various cleanups
* by Russell King
*
* This allows a driver to remap an arbitrary region of bus memory into
* virtual space. One should *only* use readl, writel, memcpy_toio and
* so on with such remapped areas.
*
* Because the ARM only has a 32-bit address space we can't address the
* whole of the (physical) PCI space at once. PCI huge-mode addressing
* allows us to circumvent this restriction by splitting PCI space into
* two 2GB chunks and mapping only one at a time into processor memory.
* We use MMU protection domains to trap any attempt to access the bank
* that is not currently mapped. (This isn't fully implemented yet.)
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm/tlbflush.h>
static inline void
remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
unsigned long phys_addr, pgprot_t pgprot)
{
unsigned long end;
address &= ~PMD_MASK;
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
BUG_ON(address >= end);
do {
if (!pte_none(*pte))
goto bad;
set_pte(pte, pfn_pte(phys_addr >> PAGE_SHIFT, pgprot));
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
pte++;
} while (address && (address < end));
return;
bad:
printk("remap_area_pte: page already exists\n");
BUG();
}
static inline int
remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
unsigned long phys_addr, unsigned long flags)
{
unsigned long end;
pgprot_t pgprot;
address &= ~PGDIR_MASK;
end = address + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
phys_addr -= address;
BUG_ON(address >= end);
pgprot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_WRITE | flags);
do {
[PATCH] mm: init_mm without ptlock First step in pushing down the page_table_lock. init_mm.page_table_lock has been used throughout the architectures (usually for ioremap): not to serialize kernel address space allocation (that's usually vmlist_lock), but because pud_alloc,pmd_alloc,pte_alloc_kernel expect caller holds it. Reverse that: don't lock or unlock init_mm.page_table_lock in any of the architectures; instead rely on pud_alloc,pmd_alloc,pte_alloc_kernel to take and drop it when allocating a new one, to check lest a racing task already did. Similarly no page_table_lock in vmalloc's map_vm_area. Some temporary ugliness in __pud_alloc and __pmd_alloc: since they also handle user mms, which are converted only by a later patch, for now they have to lock differently according to whether or not it's init_mm. If sources get muddled, there's a danger that an arch source taking init_mm.page_table_lock will be mixed with common source also taking it (or neither take it). So break the rules and make another change, which should break the build for such a mismatch: remove the redundant mm arg from pte_alloc_kernel (ppc64 scrapped its distinct ioremap_mm in 2.6.13). Exceptions: arm26 used pte_alloc_kernel on user mm, now pte_alloc_map; ia64 used pte_alloc_map on init_mm, now pte_alloc_kernel; parisc had bad args to pmd_alloc and pte_alloc_kernel in unused USE_HPPA_IOREMAP code; ppc64 map_io_page forgot to unlock on failure; ppc mmu_mapin_ram and ppc64 im_free took page_table_lock for no good reason. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-30 08:16:21 +07:00
pte_t * pte = pte_alloc_kernel(pmd, address);
if (!pte)
return -ENOMEM;
remap_area_pte(pte, address, end - address, address + phys_addr, pgprot);
address = (address + PMD_SIZE) & PMD_MASK;
pmd++;
} while (address && (address < end));
return 0;
}
static int
remap_area_pages(unsigned long start, unsigned long pfn,
unsigned long size, unsigned long flags)
{
unsigned long address = start;
unsigned long end = start + size;
unsigned long phys_addr = __pfn_to_phys(pfn);
int err = 0;
pgd_t * dir;
phys_addr -= address;
dir = pgd_offset(&init_mm, address);
BUG_ON(address >= end);
do {
pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
if (!pmd) {
err = -ENOMEM;
break;
}
if (remap_area_pmd(pmd, address, end - address,
phys_addr + address, flags)) {
err = -ENOMEM;
break;
}
address = (address + PGDIR_SIZE) & PGDIR_MASK;
dir++;
} while (address && (address < end));
flush_cache_vmap(start, end);
return err;
}
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
* directly.
*
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*
* 'flags' are the extra L_PTE_ flags that you want to specify for this
* mapping. See include/asm-arm/proc-armv/pgtable.h for more information.
*/
void __iomem *
__ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
unsigned long flags)
{
unsigned long addr;
struct vm_struct * area;
area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
addr = (unsigned long)area->addr;
if (remap_area_pages(addr, pfn, size, flags)) {
vunmap((void *)addr);
return NULL;
}
return (void __iomem *) (offset + (char *)addr);
}
EXPORT_SYMBOL(__ioremap_pfn);
void __iomem *
__ioremap(unsigned long phys_addr, size_t size, unsigned long flags)
{
unsigned long last_addr;
unsigned long offset = phys_addr & ~PAGE_MASK;
unsigned long pfn = __phys_to_pfn(phys_addr);
/*
* Don't allow wraparound or zero size
*/
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
return NULL;
/*
* Page align the mapping size
*/
size = PAGE_ALIGN(last_addr + 1) - phys_addr;
return __ioremap_pfn(pfn, offset, size, flags);
}
EXPORT_SYMBOL(__ioremap);
void __iounmap(void __iomem *addr)
{
vunmap((void *)(PAGE_MASK & (unsigned long)addr));
}
EXPORT_SYMBOL(__iounmap);