mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-16 15:16:41 +07:00
f0b89d3958
atomic_t variables are currently used to implement reference counters with the following properties: - counter is initialized to 1 using atomic_set() - a resource is freed upon counter reaching zero - once counter reaches zero, its further increments aren't allowed - counter schema uses basic atomic operations (set, inc, inc_not_zero, dec_and_test, etc.) Such atomic variables should be converted to a newly provided refcount_t type and API that prevents accidental counter overflows and underflows. This is important since overflows and underflows can lead to use-after-free situation and be exploitable. The variable task_struct.stack_refcount is used as pure reference counter. Convert it to refcount_t and fix up the operations. ** Important note for maintainers: Some functions from refcount_t API defined in lib/refcount.c have different memory ordering guarantees than their atomic counterparts. The full comparison can be seen in https://lkml.org/lkml/2017/11/15/57 and it is hopefully soon in state to be merged to the documentation tree. Normally the differences should not matter since refcount_t provides enough guarantees to satisfy the refcounting use cases, but in some rare cases it might matter. Please double check that you don't have some undocumented memory guarantees for this variable usage. For the task_struct.stack_refcount it might make a difference in following places: - try_get_task_stack(): increment in refcount_inc_not_zero() only guarantees control dependency on success vs. fully ordered atomic counterpart - put_task_stack(): decrement in refcount_dec_and_test() only provides RELEASE ordering and control dependency on success vs. fully ordered atomic counterpart Suggested-by: Kees Cook <keescook@chromium.org> Signed-off-by: Elena Reshetova <elena.reshetova@intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: David Windsor <dwindsor@gmail.com> Reviewed-by: Hans Liljestrand <ishkamiel@gmail.com> Reviewed-by: Andrea Parri <andrea.parri@amarulasolutions.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: akpm@linux-foundation.org Cc: viro@zeniv.linux.org.uk Link: https://lkml.kernel.org/r/1547814450-18902-6-git-send-email-elena.reshetova@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
123 lines
2.9 KiB
C
123 lines
2.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_SCHED_TASK_STACK_H
|
|
#define _LINUX_SCHED_TASK_STACK_H
|
|
|
|
/*
|
|
* task->stack (kernel stack) handling interfaces:
|
|
*/
|
|
|
|
#include <linux/sched.h>
|
|
#include <linux/magic.h>
|
|
|
|
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
|
|
|
/*
|
|
* When accessing the stack of a non-current task that might exit, use
|
|
* try_get_task_stack() instead. task_stack_page will return a pointer
|
|
* that could get freed out from under you.
|
|
*/
|
|
static inline void *task_stack_page(const struct task_struct *task)
|
|
{
|
|
return task->stack;
|
|
}
|
|
|
|
#define setup_thread_stack(new,old) do { } while(0)
|
|
|
|
static inline unsigned long *end_of_stack(const struct task_struct *task)
|
|
{
|
|
return task->stack;
|
|
}
|
|
|
|
#elif !defined(__HAVE_THREAD_FUNCTIONS)
|
|
|
|
#define task_stack_page(task) ((void *)(task)->stack)
|
|
|
|
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
|
|
{
|
|
*task_thread_info(p) = *task_thread_info(org);
|
|
task_thread_info(p)->task = p;
|
|
}
|
|
|
|
/*
|
|
* Return the address of the last usable long on the stack.
|
|
*
|
|
* When the stack grows down, this is just above the thread
|
|
* info struct. Going any lower will corrupt the threadinfo.
|
|
*
|
|
* When the stack grows up, this is the highest address.
|
|
* Beyond that position, we corrupt data on the next page.
|
|
*/
|
|
static inline unsigned long *end_of_stack(struct task_struct *p)
|
|
{
|
|
#ifdef CONFIG_STACK_GROWSUP
|
|
return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
|
|
#else
|
|
return (unsigned long *)(task_thread_info(p) + 1);
|
|
#endif
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_THREAD_INFO_IN_TASK
|
|
static inline void *try_get_task_stack(struct task_struct *tsk)
|
|
{
|
|
return refcount_inc_not_zero(&tsk->stack_refcount) ?
|
|
task_stack_page(tsk) : NULL;
|
|
}
|
|
|
|
extern void put_task_stack(struct task_struct *tsk);
|
|
#else
|
|
static inline void *try_get_task_stack(struct task_struct *tsk)
|
|
{
|
|
return task_stack_page(tsk);
|
|
}
|
|
|
|
static inline void put_task_stack(struct task_struct *tsk) {}
|
|
#endif
|
|
|
|
#define task_stack_end_corrupted(task) \
|
|
(*(end_of_stack(task)) != STACK_END_MAGIC)
|
|
|
|
static inline int object_is_on_stack(const void *obj)
|
|
{
|
|
void *stack = task_stack_page(current);
|
|
|
|
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
|
|
}
|
|
|
|
extern void thread_stack_cache_init(void);
|
|
|
|
#ifdef CONFIG_DEBUG_STACK_USAGE
|
|
static inline unsigned long stack_not_used(struct task_struct *p)
|
|
{
|
|
unsigned long *n = end_of_stack(p);
|
|
|
|
do { /* Skip over canary */
|
|
# ifdef CONFIG_STACK_GROWSUP
|
|
n--;
|
|
# else
|
|
n++;
|
|
# endif
|
|
} while (!*n);
|
|
|
|
# ifdef CONFIG_STACK_GROWSUP
|
|
return (unsigned long)end_of_stack(p) - (unsigned long)n;
|
|
# else
|
|
return (unsigned long)n - (unsigned long)end_of_stack(p);
|
|
# endif
|
|
}
|
|
#endif
|
|
extern void set_task_stack_end_magic(struct task_struct *tsk);
|
|
|
|
#ifndef __HAVE_ARCH_KSTACK_END
|
|
static inline int kstack_end(void *addr)
|
|
{
|
|
/* Reliable end of stack detection:
|
|
* Some APM bios versions misalign the stack
|
|
*/
|
|
return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
|
|
}
|
|
#endif
|
|
|
|
#endif /* _LINUX_SCHED_TASK_STACK_H */
|