2011-06-07 16:49:55 +07:00
|
|
|
/*
|
|
|
|
* User address space access functions.
|
|
|
|
*
|
|
|
|
* For licencing details see kernel-base/COPYING
|
|
|
|
*/
|
|
|
|
|
2017-03-26 06:33:21 +07:00
|
|
|
#include <linux/uaccess.h>
|
2016-07-14 07:18:57 +07:00
|
|
|
#include <linux/export.h>
|
2011-06-07 16:49:55 +07:00
|
|
|
|
|
|
|
/*
|
2013-10-24 17:52:06 +07:00
|
|
|
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
|
|
|
|
* nested NMI paths are careful to preserve CR2.
|
2011-06-07 16:49:55 +07:00
|
|
|
*/
|
|
|
|
unsigned long
|
|
|
|
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
|
|
|
|
{
|
2013-10-24 17:52:06 +07:00
|
|
|
unsigned long ret;
|
2011-06-07 16:49:55 +07:00
|
|
|
|
2012-06-11 20:44:26 +07:00
|
|
|
if (__range_not_ok(from, n, TASK_SIZE))
|
2015-06-23 02:38:43 +07:00
|
|
|
return n;
|
2013-10-24 17:52:06 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Even though this function is typically called from NMI/IRQ context
|
|
|
|
* disable pagefaults so that its behaviour is consistent even when
|
|
|
|
* called form other contexts.
|
|
|
|
*/
|
|
|
|
pagefault_disable();
|
|
|
|
ret = __copy_from_user_inatomic(to, from, n);
|
|
|
|
pagefault_enable();
|
|
|
|
|
2013-10-31 03:16:22 +07:00
|
|
|
return ret;
|
2011-06-07 16:49:55 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(copy_from_user_nmi);
|