mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-15 01:36:46 +07:00
de73b6b1bd
Provide arch_get_unmapped_area function aligning shared memory mapping addresses to the biggest of the page size or the cache way size. That guarantees that corresponding virtual addresses of shared mappings are cached by the same cache sets. Signed-off-by: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Chris Zankel <chris@zankel.net>
96 lines
2.4 KiB
C
96 lines
2.4 KiB
C
/*
|
|
* arch/xtensa/kernel/syscall.c
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
* Copyright (C) 2001 - 2005 Tensilica Inc.
|
|
* Copyright (C) 2000 Silicon Graphics, Inc.
|
|
* Copyright (C) 1995 - 2000 by Ralf Baechle
|
|
*
|
|
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
|
* Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
|
|
* Chris Zankel <chris@zankel.net>
|
|
* Kevin Chea
|
|
*
|
|
*/
|
|
#include <asm/uaccess.h>
|
|
#include <asm/syscall.h>
|
|
#include <asm/unistd.h>
|
|
#include <linux/linkage.h>
|
|
#include <linux/stringify.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/syscalls.h>
|
|
#include <linux/file.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/shm.h>
|
|
|
|
typedef void (*syscall_t)(void);
|
|
|
|
syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= {
|
|
[0 ... __NR_syscall_count - 1] = (syscall_t)&sys_ni_syscall,
|
|
|
|
#define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol,
|
|
#include <uapi/asm/unistd.h>
|
|
};
|
|
|
|
#define COLOUR_ALIGN(addr, pgoff) \
|
|
((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
|
|
(((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
|
|
|
|
asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
|
|
{
|
|
unsigned long ret;
|
|
long err;
|
|
|
|
err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
|
|
if (err)
|
|
return err;
|
|
return (long)ret;
|
|
}
|
|
|
|
asmlinkage long xtensa_fadvise64_64(int fd, int advice,
|
|
unsigned long long offset, unsigned long long len)
|
|
{
|
|
return sys_fadvise64_64(fd, offset, len, advice);
|
|
}
|
|
|
|
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
|
|
unsigned long len, unsigned long pgoff, unsigned long flags)
|
|
{
|
|
struct vm_area_struct *vmm;
|
|
|
|
if (flags & MAP_FIXED) {
|
|
/* We do not accept a shared mapping if it would violate
|
|
* cache aliasing constraints.
|
|
*/
|
|
if ((flags & MAP_SHARED) &&
|
|
((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
|
|
return -EINVAL;
|
|
return addr;
|
|
}
|
|
|
|
if (len > TASK_SIZE)
|
|
return -ENOMEM;
|
|
if (!addr)
|
|
addr = TASK_UNMAPPED_BASE;
|
|
|
|
if (flags & MAP_SHARED)
|
|
addr = COLOUR_ALIGN(addr, pgoff);
|
|
else
|
|
addr = PAGE_ALIGN(addr);
|
|
|
|
for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
|
|
/* At this point: (!vmm || addr < vmm->vm_end). */
|
|
if (TASK_SIZE - len < addr)
|
|
return -ENOMEM;
|
|
if (!vmm || addr + len <= vmm->vm_start)
|
|
return addr;
|
|
addr = vmm->vm_end;
|
|
if (flags & MAP_SHARED)
|
|
addr = COLOUR_ALIGN(addr, pgoff);
|
|
}
|
|
}
|