linux_dsm_epyc7002/arch/csky/mm/ioremap.c
Guo Ren 013de2d667 csky: MMU and page table management
This patch adds files related to memory management and here is our
memory-layout:

   Fixmap       : 0xffc02000 – 0xfffff000       (4 MB - 12KB)
   Pkmap        : 0xff800000 – 0xffc00000       (4 MB)
   Vmalloc      : 0xf0200000 – 0xff000000       (238 MB)
   Lowmem       : 0x80000000 – 0xc0000000       (1GB)

abiv1 CPU (CK610) is VIPT cache and it doesn't support highmem.
abiv2 CPUs are all PIPT cache and they could support highmem.

Lowmem is directly mapped by msa0 & msa1 reg, and we needn't setup
memory page table for it.

Link:https://lore.kernel.org/lkml/20180518215548.GH17671@n2100.armlinux.org.uk/
Signed-off-by: Guo Ren <ren_guo@c-sky.com>
Cc: Christoph Hellwig <hch@infradead.org>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
2018-10-25 23:36:19 +08:00

49 lines
1.0 KiB
C

// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/io.h>
#include <asm/pgtable.h>
void __iomem *ioremap(phys_addr_t addr, size_t size)
{
phys_addr_t last_addr;
unsigned long offset, vaddr;
struct vm_struct *area;
pgprot_t prot;
last_addr = addr + size - 1;
if (!size || last_addr < addr)
return NULL;
offset = addr & (~PAGE_MASK);
addr &= PAGE_MASK;
size = PAGE_ALIGN(size + offset);
area = get_vm_area_caller(size, VM_ALLOC, __builtin_return_address(0));
if (!area)
return NULL;
vaddr = (unsigned long)area->addr;
prot = __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE |
_PAGE_GLOBAL | _CACHE_UNCACHED);
if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
free_vm_area(area);
return NULL;
}
return (void __iomem *)(vaddr + offset);
}
EXPORT_SYMBOL(ioremap);
void iounmap(void __iomem *addr)
{
vunmap((void *)((unsigned long)addr & PAGE_MASK));
}
EXPORT_SYMBOL(iounmap);