mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 14:30:58 +07:00
Merge branch 'uprobes/core' of git://git.kernel.org/pub/scm/linux/kernel/git/oleg/misc into perf/core
Fix uprobes bugs that happen if fork() is called with pending ret-probes, from Oleg Nesterov. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
0e73453e17
@ -117,13 +117,13 @@ extern void uprobe_start_dup_mmap(void);
|
||||
extern void uprobe_end_dup_mmap(void);
|
||||
extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm);
|
||||
extern void uprobe_free_utask(struct task_struct *t);
|
||||
extern void uprobe_copy_process(struct task_struct *t);
|
||||
extern void uprobe_copy_process(struct task_struct *t, unsigned long flags);
|
||||
extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs);
|
||||
extern int uprobe_post_sstep_notifier(struct pt_regs *regs);
|
||||
extern int uprobe_pre_sstep_notifier(struct pt_regs *regs);
|
||||
extern void uprobe_notify_resume(struct pt_regs *regs);
|
||||
extern bool uprobe_deny_signal(void);
|
||||
extern bool __weak arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs);
|
||||
extern bool arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs);
|
||||
extern void uprobe_clear_state(struct mm_struct *mm);
|
||||
#else /* !CONFIG_UPROBES */
|
||||
struct uprobes_state {
|
||||
@ -174,7 +174,7 @@ static inline unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
|
||||
static inline void uprobe_free_utask(struct task_struct *t)
|
||||
{
|
||||
}
|
||||
static inline void uprobe_copy_process(struct task_struct *t)
|
||||
static inline void uprobe_copy_process(struct task_struct *t, unsigned long flags)
|
||||
{
|
||||
}
|
||||
static inline void uprobe_clear_state(struct mm_struct *mm)
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <linux/kdebug.h> /* notifier mechanism */
|
||||
#include "../../mm/internal.h" /* munlock_vma_page */
|
||||
#include <linux/percpu-rwsem.h>
|
||||
#include <linux/task_work.h>
|
||||
|
||||
#include <linux/uprobes.h>
|
||||
|
||||
@ -1096,21 +1097,22 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
|
||||
}
|
||||
|
||||
/* Slot allocation for XOL */
|
||||
static int xol_add_vma(struct xol_area *area)
|
||||
static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
int ret = -EALREADY;
|
||||
|
||||
down_write(&mm->mmap_sem);
|
||||
if (mm->uprobes_state.xol_area)
|
||||
goto fail;
|
||||
|
||||
ret = -ENOMEM;
|
||||
/* Try to map as high as possible, this is only a hint. */
|
||||
area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
|
||||
if (area->vaddr & ~PAGE_MASK) {
|
||||
ret = area->vaddr;
|
||||
goto fail;
|
||||
if (!area->vaddr) {
|
||||
/* Try to map as high as possible, this is only a hint. */
|
||||
area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
|
||||
PAGE_SIZE, 0, 0);
|
||||
if (area->vaddr & ~PAGE_MASK) {
|
||||
ret = area->vaddr;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE,
|
||||
@ -1120,13 +1122,49 @@ static int xol_add_vma(struct xol_area *area)
|
||||
|
||||
smp_wmb(); /* pairs with get_xol_area() */
|
||||
mm->uprobes_state.xol_area = area;
|
||||
ret = 0;
|
||||
fail:
|
||||
up_write(&mm->mmap_sem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct xol_area *__create_xol_area(unsigned long vaddr)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
uprobe_opcode_t insn = UPROBE_SWBP_INSN;
|
||||
struct xol_area *area;
|
||||
|
||||
area = kmalloc(sizeof(*area), GFP_KERNEL);
|
||||
if (unlikely(!area))
|
||||
goto out;
|
||||
|
||||
area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
|
||||
if (!area->bitmap)
|
||||
goto free_area;
|
||||
|
||||
area->page = alloc_page(GFP_HIGHUSER);
|
||||
if (!area->page)
|
||||
goto free_bitmap;
|
||||
|
||||
area->vaddr = vaddr;
|
||||
init_waitqueue_head(&area->wq);
|
||||
/* Reserve the 1st slot for get_trampoline_vaddr() */
|
||||
set_bit(0, area->bitmap);
|
||||
atomic_set(&area->slot_count, 1);
|
||||
copy_to_page(area->page, 0, &insn, UPROBE_SWBP_INSN_SIZE);
|
||||
|
||||
if (!xol_add_vma(mm, area))
|
||||
return area;
|
||||
|
||||
__free_page(area->page);
|
||||
free_bitmap:
|
||||
kfree(area->bitmap);
|
||||
free_area:
|
||||
kfree(area);
|
||||
out:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* get_xol_area - Allocate process's xol_area if necessary.
|
||||
* This area will be used for storing instructions for execution out of line.
|
||||
@ -1137,42 +1175,12 @@ static struct xol_area *get_xol_area(void)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct xol_area *area;
|
||||
uprobe_opcode_t insn = UPROBE_SWBP_INSN;
|
||||
|
||||
if (!mm->uprobes_state.xol_area)
|
||||
__create_xol_area(0);
|
||||
|
||||
area = mm->uprobes_state.xol_area;
|
||||
if (area)
|
||||
goto ret;
|
||||
|
||||
area = kzalloc(sizeof(*area), GFP_KERNEL);
|
||||
if (unlikely(!area))
|
||||
goto out;
|
||||
|
||||
area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
|
||||
if (!area->bitmap)
|
||||
goto free_area;
|
||||
|
||||
area->page = alloc_page(GFP_HIGHUSER);
|
||||
if (!area->page)
|
||||
goto free_bitmap;
|
||||
|
||||
/* allocate first slot of task's xol_area for the return probes */
|
||||
set_bit(0, area->bitmap);
|
||||
copy_to_page(area->page, 0, &insn, UPROBE_SWBP_INSN_SIZE);
|
||||
atomic_set(&area->slot_count, 1);
|
||||
init_waitqueue_head(&area->wq);
|
||||
|
||||
if (!xol_add_vma(area))
|
||||
return area;
|
||||
|
||||
__free_page(area->page);
|
||||
free_bitmap:
|
||||
kfree(area->bitmap);
|
||||
free_area:
|
||||
kfree(area);
|
||||
out:
|
||||
area = mm->uprobes_state.xol_area;
|
||||
ret:
|
||||
smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */
|
||||
smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */
|
||||
return area;
|
||||
}
|
||||
|
||||
@ -1344,14 +1352,6 @@ void uprobe_free_utask(struct task_struct *t)
|
||||
t->utask = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called in context of a new clone/fork from copy_process.
|
||||
*/
|
||||
void uprobe_copy_process(struct task_struct *t)
|
||||
{
|
||||
t->utask = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a uprobe_task object for the task if if necessary.
|
||||
* Called when the thread hits a breakpoint.
|
||||
@ -1367,6 +1367,90 @@ static struct uprobe_task *get_utask(void)
|
||||
return current->utask;
|
||||
}
|
||||
|
||||
static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
|
||||
{
|
||||
struct uprobe_task *n_utask;
|
||||
struct return_instance **p, *o, *n;
|
||||
|
||||
n_utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
|
||||
if (!n_utask)
|
||||
return -ENOMEM;
|
||||
t->utask = n_utask;
|
||||
|
||||
p = &n_utask->return_instances;
|
||||
for (o = o_utask->return_instances; o; o = o->next) {
|
||||
n = kmalloc(sizeof(struct return_instance), GFP_KERNEL);
|
||||
if (!n)
|
||||
return -ENOMEM;
|
||||
|
||||
*n = *o;
|
||||
atomic_inc(&n->uprobe->ref);
|
||||
n->next = NULL;
|
||||
|
||||
*p = n;
|
||||
p = &n->next;
|
||||
n_utask->depth++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void uprobe_warn(struct task_struct *t, const char *msg)
|
||||
{
|
||||
pr_warn("uprobe: %s:%d failed to %s\n",
|
||||
current->comm, current->pid, msg);
|
||||
}
|
||||
|
||||
static void dup_xol_work(struct callback_head *work)
|
||||
{
|
||||
kfree(work);
|
||||
|
||||
if (current->flags & PF_EXITING)
|
||||
return;
|
||||
|
||||
if (!__create_xol_area(current->utask->vaddr))
|
||||
uprobe_warn(current, "dup xol area");
|
||||
}
|
||||
|
||||
/*
|
||||
* Called in context of a new clone/fork from copy_process.
|
||||
*/
|
||||
void uprobe_copy_process(struct task_struct *t, unsigned long flags)
|
||||
{
|
||||
struct uprobe_task *utask = current->utask;
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct callback_head *work;
|
||||
struct xol_area *area;
|
||||
|
||||
t->utask = NULL;
|
||||
|
||||
if (!utask || !utask->return_instances)
|
||||
return;
|
||||
|
||||
if (mm == t->mm && !(flags & CLONE_VFORK))
|
||||
return;
|
||||
|
||||
if (dup_utask(t, utask))
|
||||
return uprobe_warn(t, "dup ret instances");
|
||||
|
||||
/* The task can fork() after dup_xol_work() fails */
|
||||
area = mm->uprobes_state.xol_area;
|
||||
if (!area)
|
||||
return uprobe_warn(t, "dup xol area");
|
||||
|
||||
if (mm == t->mm)
|
||||
return;
|
||||
|
||||
/* TODO: move it into the union in uprobe_task */
|
||||
work = kmalloc(sizeof(*work), GFP_KERNEL);
|
||||
if (!work)
|
||||
return uprobe_warn(t, "dup xol area");
|
||||
|
||||
utask->vaddr = area->vaddr;
|
||||
init_task_work(work, dup_xol_work);
|
||||
task_work_add(t, work, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Current area->vaddr notion assume the trampoline address is always
|
||||
* equal area->vaddr.
|
||||
|
@ -1373,7 +1373,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
INIT_LIST_HEAD(&p->pi_state_list);
|
||||
p->pi_state_cache = NULL;
|
||||
#endif
|
||||
uprobe_copy_process(p);
|
||||
/*
|
||||
* sigaltstack should be cleared when sharing the same VM
|
||||
*/
|
||||
@ -1490,6 +1489,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
|
||||
perf_event_fork(p);
|
||||
|
||||
trace_task_newtask(p, clone_flags);
|
||||
uprobe_copy_process(p, clone_flags);
|
||||
|
||||
return p;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user