linux_dsm_epyc7002/arch/arm64/kernel/sys_compat.c
Thomas Gleixner caab277b1d treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 234
Based on 1 normalized pattern(s):

  this program is free software you can redistribute it and or modify
  it under the terms of the gnu general public license version 2 as
  published by the free software foundation this program is
  distributed in the hope that it will be useful but without any
  warranty without even the implied warranty of merchantability or
  fitness for a particular purpose see the gnu general public license
  for more details you should have received a copy of the gnu general
  public license along with this program if not see http www gnu org
  licenses

extracted by the scancode license scanner the SPDX license identifier

  GPL-2.0-only

has been chosen to replace the boilerplate/reference in 503 file(s).

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Alexios Zavras <alexios.zavras@intel.com>
Reviewed-by: Allison Randal <allison@lohutok.net>
Reviewed-by: Enrico Weigelt <info@metux.net>
Cc: linux-spdx@vger.kernel.org
Link: https://lkml.kernel.org/r/20190602204653.811534538@linutronix.de
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-06-19 17:09:07 +02:00

110 lines
2.7 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Based on arch/arm/kernel/sys_arm.c
*
* Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c
* Copyright (C) 1995, 1996 Russell King.
* Copyright (C) 2012 ARM Ltd.
*/
#include <linux/compat.h>
#include <linux/personality.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/system_misc.h>
#include <asm/unistd.h>
static long
__do_compat_cache_op(unsigned long start, unsigned long end)
{
long ret;
do {
unsigned long chunk = min(PAGE_SIZE, end - start);
if (fatal_signal_pending(current))
return 0;
ret = __flush_cache_user_range(start, start + chunk);
if (ret)
return ret;
cond_resched();
start += chunk;
} while (start < end);
return 0;
}
static inline long
do_compat_cache_op(unsigned long start, unsigned long end, int flags)
{
if (end < start || flags)
return -EINVAL;
if (!access_ok((const void __user *)start, end - start))
return -EFAULT;
return __do_compat_cache_op(start, end);
}
/*
* Handle all unrecognised system calls.
*/
long compat_arm_syscall(struct pt_regs *regs, int scno)
{
void __user *addr;
switch (scno) {
/*
* Flush a region from virtual address 'r0' to virtual address 'r1'
* _exclusive_. There is no alignment requirement on either address;
* user space does not need to know the hardware cache layout.
*
* r2 contains flags. It should ALWAYS be passed as ZERO until it
* is defined to be something else. For now we ignore it, but may
* the fires of hell burn in your belly if you break this rule. ;)
*
* (at a later date, we may want to allow this call to not flush
* various aspects of the cache. Passing '0' will guarantee that
* everything necessary gets flushed to maintain consistency in
* the specified region).
*/
case __ARM_NR_compat_cacheflush:
return do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]);
case __ARM_NR_compat_set_tls:
current->thread.uw.tp_value = regs->regs[0];
/*
* Protect against register corruption from context switch.
* See comment in tls_thread_flush.
*/
barrier();
write_sysreg(regs->regs[0], tpidrro_el0);
return 0;
default:
/*
* Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS
* if not implemented, rather than raising SIGILL. This
* way the calling program can gracefully determine whether
* a feature is supported.
*/
if (scno < __ARM_NR_COMPAT_END)
return -ENOSYS;
break;
}
addr = (void __user *)instruction_pointer(regs) -
(compat_thumb_mode(regs) ? 2 : 4);
arm64_notify_die("Oops - bad compat syscall(2)", regs,
SIGILL, ILL_ILLTRP, addr, scno);
return 0;
}