mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
5695be142e
PM freezer relies on having all tasks frozen by the time devices are
getting frozen so that no task will touch them while they are getting
frozen. But OOM killer is allowed to kill an already frozen task in
order to handle OOM situtation. In order to protect from late wake ups
OOM killer is disabled after all tasks are frozen. This, however, still
keeps a window open when a killed task didn't manage to die by the time
freeze_processes finishes.
Reduce the race window by checking all tasks after OOM killer has been
disabled. This is still not race free completely unfortunately because
oom_killer_disable cannot stop an already ongoing OOM killer so a task
might still wake up from the fridge and get killed without
freeze_processes noticing. Full synchronization of OOM and freezer is,
however, too heavy weight for this highly unlikely case.
Introduce and check oom_kills counter which gets incremented early when
the allocator enters __alloc_pages_may_oom path and only check all the
tasks if the counter changes during the freezing attempt. The counter
is updated so early to reduce the race window since allocator checked
oom_killer_disabled which is set by PM-freezing code. A false positive
will push the PM-freezer into a slow path but that is not a big deal.
Changes since v1
- push the re-check loop out of freeze_processes into
check_frozen_processes and invert the condition to make the code more
readable as per Rafael
Fixes: f660daac47
(oom: thaw threads if oom killed thread is frozen before deferring)
Cc: 3.2+ <stable@vger.kernel.org> # 3.2+
Signed-off-by: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
100 lines
2.7 KiB
C
100 lines
2.7 KiB
C
#ifndef __INCLUDE_LINUX_OOM_H
|
|
#define __INCLUDE_LINUX_OOM_H
|
|
|
|
|
|
#include <linux/sched.h>
|
|
#include <linux/types.h>
|
|
#include <linux/nodemask.h>
|
|
#include <uapi/linux/oom.h>
|
|
|
|
struct zonelist;
|
|
struct notifier_block;
|
|
struct mem_cgroup;
|
|
struct task_struct;
|
|
|
|
/*
|
|
* Types of limitations to the nodes from which allocations may occur
|
|
*/
|
|
enum oom_constraint {
|
|
CONSTRAINT_NONE,
|
|
CONSTRAINT_CPUSET,
|
|
CONSTRAINT_MEMORY_POLICY,
|
|
CONSTRAINT_MEMCG,
|
|
};
|
|
|
|
enum oom_scan_t {
|
|
OOM_SCAN_OK, /* scan thread and find its badness */
|
|
OOM_SCAN_CONTINUE, /* do not consider thread for oom kill */
|
|
OOM_SCAN_ABORT, /* abort the iteration and return */
|
|
OOM_SCAN_SELECT, /* always select this thread first */
|
|
};
|
|
|
|
/* Thread is the potential origin of an oom condition; kill first on oom */
|
|
#define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1)
|
|
|
|
static inline void set_current_oom_origin(void)
|
|
{
|
|
current->signal->oom_flags |= OOM_FLAG_ORIGIN;
|
|
}
|
|
|
|
static inline void clear_current_oom_origin(void)
|
|
{
|
|
current->signal->oom_flags &= ~OOM_FLAG_ORIGIN;
|
|
}
|
|
|
|
static inline bool oom_task_origin(const struct task_struct *p)
|
|
{
|
|
return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN);
|
|
}
|
|
|
|
extern unsigned long oom_badness(struct task_struct *p,
|
|
struct mem_cgroup *memcg, const nodemask_t *nodemask,
|
|
unsigned long totalpages);
|
|
|
|
extern int oom_kills_count(void);
|
|
extern void note_oom_kill(void);
|
|
extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
|
|
unsigned int points, unsigned long totalpages,
|
|
struct mem_cgroup *memcg, nodemask_t *nodemask,
|
|
const char *message);
|
|
|
|
extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags);
|
|
extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags);
|
|
|
|
extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
|
|
int order, const nodemask_t *nodemask);
|
|
|
|
extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
|
|
unsigned long totalpages, const nodemask_t *nodemask,
|
|
bool force_kill);
|
|
|
|
extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
|
|
int order, nodemask_t *mask, bool force_kill);
|
|
extern int register_oom_notifier(struct notifier_block *nb);
|
|
extern int unregister_oom_notifier(struct notifier_block *nb);
|
|
|
|
extern bool oom_killer_disabled;
|
|
|
|
static inline void oom_killer_disable(void)
|
|
{
|
|
oom_killer_disabled = true;
|
|
}
|
|
|
|
static inline void oom_killer_enable(void)
|
|
{
|
|
oom_killer_disabled = false;
|
|
}
|
|
|
|
static inline bool oom_gfp_allowed(gfp_t gfp_mask)
|
|
{
|
|
return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY);
|
|
}
|
|
|
|
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
|
|
|
|
/* sysctls */
|
|
extern int sysctl_oom_dump_tasks;
|
|
extern int sysctl_oom_kill_allocating_task;
|
|
extern int sysctl_panic_on_oom;
|
|
#endif /* _INCLUDE_LINUX_OOM_H */
|