2017-02-09 00:51:29 +07:00
|
|
|
#ifndef _LINUX_SCHED_MM_H
|
|
|
|
#define _LINUX_SCHED_MM_H
|
|
|
|
|
2017-02-09 00:51:54 +07:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/atomic.h>
|
2017-02-09 00:51:29 +07:00
|
|
|
#include <linux/sched.h>
|
2017-02-04 06:16:44 +07:00
|
|
|
#include <linux/mm_types.h>
|
2017-02-03 02:56:33 +07:00
|
|
|
#include <linux/gfp.h>
|
2017-02-09 00:51:29 +07:00
|
|
|
|
2017-02-02 01:08:20 +07:00
|
|
|
/*
|
|
|
|
* Routines for handling mm_structs
|
|
|
|
*/
|
|
|
|
extern struct mm_struct * mm_alloc(void);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* mmgrab() - Pin a &struct mm_struct.
|
|
|
|
* @mm: The &struct mm_struct to pin.
|
|
|
|
*
|
|
|
|
* Make sure that @mm will not get freed even after the owning task
|
|
|
|
* exits. This doesn't guarantee that the associated address space
|
|
|
|
* will still exist later on and mmget_not_zero() has to be used before
|
|
|
|
* accessing it.
|
|
|
|
*
|
|
|
|
* This is a preferred way to to pin @mm for a longer/unbounded amount
|
|
|
|
* of time.
|
|
|
|
*
|
|
|
|
* Use mmdrop() to release the reference acquired by mmgrab().
|
|
|
|
*
|
|
|
|
* See also <Documentation/vm/active_mm.txt> for an in-depth explanation
|
|
|
|
* of &mm_struct.mm_count vs &mm_struct.mm_users.
|
|
|
|
*/
|
|
|
|
static inline void mmgrab(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
atomic_inc(&mm->mm_count);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* mmdrop drops the mm and the page tables */
|
|
|
|
extern void __mmdrop(struct mm_struct *);
|
|
|
|
static inline void mmdrop(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
|
|
|
|
__mmdrop(mm);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void mmdrop_async_fn(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
|
|
|
|
__mmdrop(mm);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void mmdrop_async(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
|
|
|
|
INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
|
|
|
|
schedule_work(&mm->async_put_work);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* mmget() - Pin the address space associated with a &struct mm_struct.
|
|
|
|
* @mm: The address space to pin.
|
|
|
|
*
|
|
|
|
* Make sure that the address space of the given &struct mm_struct doesn't
|
|
|
|
* go away. This does not protect against parts of the address space being
|
|
|
|
* modified or freed, however.
|
|
|
|
*
|
|
|
|
* Never use this function to pin this address space for an
|
|
|
|
* unbounded/indefinite amount of time.
|
|
|
|
*
|
|
|
|
* Use mmput() to release the reference acquired by mmget().
|
|
|
|
*
|
|
|
|
* See also <Documentation/vm/active_mm.txt> for an in-depth explanation
|
|
|
|
* of &mm_struct.mm_count vs &mm_struct.mm_users.
|
|
|
|
*/
|
|
|
|
static inline void mmget(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
atomic_inc(&mm->mm_users);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool mmget_not_zero(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
return atomic_inc_not_zero(&mm->mm_users);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* mmput gets rid of the mappings and all user-space */
|
|
|
|
extern void mmput(struct mm_struct *);
|
2017-10-04 06:15:00 +07:00
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
/* same as above but performs the slow path from the async context. Can
|
|
|
|
* be called from the atomic context as well
|
|
|
|
*/
|
|
|
|
void mmput_async(struct mm_struct *);
|
|
|
|
#endif
|
2017-02-02 01:08:20 +07:00
|
|
|
|
|
|
|
/* Grab a reference to a task's mm, if it is not already going away */
|
|
|
|
extern struct mm_struct *get_task_mm(struct task_struct *task);
|
|
|
|
/*
|
|
|
|
* Grab a reference to a task's mm, if it is not already going away
|
|
|
|
* and ptrace_may_access with the mode parameter passed to it
|
|
|
|
* succeeds.
|
|
|
|
*/
|
|
|
|
extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
|
|
|
|
/* Remove the current tasks stale references to the old mm_struct */
|
|
|
|
extern void mm_release(struct task_struct *, struct mm_struct *);
|
|
|
|
|
2017-02-02 18:18:24 +07:00
|
|
|
#ifdef CONFIG_MEMCG
|
|
|
|
extern void mm_update_next_owner(struct mm_struct *mm);
|
|
|
|
#else
|
|
|
|
static inline void mm_update_next_owner(struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_MEMCG */
|
|
|
|
|
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
extern void arch_pick_mmap_layout(struct mm_struct *mm);
|
|
|
|
extern unsigned long
|
|
|
|
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
|
|
|
|
unsigned long, unsigned long);
|
|
|
|
extern unsigned long
|
|
|
|
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
|
|
|
|
unsigned long len, unsigned long pgoff,
|
|
|
|
unsigned long flags);
|
|
|
|
#else
|
|
|
|
static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
|
|
|
|
#endif
|
|
|
|
|
2017-02-02 18:32:21 +07:00
|
|
|
static inline bool in_vfork(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* need RCU to access ->real_parent if CLONE_VM was used along with
|
|
|
|
* CLONE_PARENT.
|
|
|
|
*
|
|
|
|
* We check real_parent->mm == tsk->mm because CLONE_VFORK does not
|
|
|
|
* imply CLONE_VM
|
|
|
|
*
|
|
|
|
* CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
|
|
|
|
* ->real_parent is not necessarily the task doing vfork(), so in
|
|
|
|
* theory we can't rely on task_lock() if we want to dereference it.
|
|
|
|
*
|
|
|
|
* And in this case we can't trust the real_parent->mm == tsk->mm
|
|
|
|
* check, it can be false negative. But we do not care, if init or
|
|
|
|
* another oom-unkillable task does this it should blame itself.
|
|
|
|
*/
|
|
|
|
rcu_read_lock();
|
|
|
|
ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
mm: introduce memalloc_nofs_{save,restore} API
GFP_NOFS context is used for the following 5 reasons currently:
- to prevent from deadlocks when the lock held by the allocation
context would be needed during the memory reclaim
- to prevent from stack overflows during the reclaim because the
allocation is performed from a deep context already
- to prevent lockups when the allocation context depends on other
reclaimers to make a forward progress indirectly
- just in case because this would be safe from the fs POV
- silence lockdep false positives
Unfortunately overuse of this allocation context brings some problems to
the MM. Memory reclaim is much weaker (especially during heavy FS
metadata workloads), OOM killer cannot be invoked because the MM layer
doesn't have enough information about how much memory is freeable by the
FS layer.
In many cases it is far from clear why the weaker context is even used
and so it might be used unnecessarily. We would like to get rid of
those as much as possible. One way to do that is to use the flag in
scopes rather than isolated cases. Such a scope is declared when really
necessary, tracked per task and all the allocation requests from within
the context will simply inherit the GFP_NOFS semantic.
Not only this is easier to understand and maintain because there are
much less problematic contexts than specific allocation requests, this
also helps code paths where FS layer interacts with other layers (e.g.
crypto, security modules, MM etc...) and there is no easy way to convey
the allocation context between the layers.
Introduce memalloc_nofs_{save,restore} API to control the scope of
GFP_NOFS allocation context. This is basically copying
memalloc_noio_{save,restore} API we have for other restricted allocation
context GFP_NOIO. The PF_MEMALLOC_NOFS flag already exists and it is
just an alias for PF_FSTRANS which has been xfs specific until recently.
There are no more PF_FSTRANS users anymore so let's just drop it.
PF_MEMALLOC_NOFS is now checked in the MM layer and drops __GFP_FS
implicitly same as PF_MEMALLOC_NOIO drops __GFP_IO. memalloc_noio_flags
is renamed to current_gfp_context because it now cares about both
PF_MEMALLOC_NOFS and PF_MEMALLOC_NOIO contexts. Xfs code paths preserve
their semantic. kmem_flags_convert() doesn't need to evaluate the flag
anymore.
This patch shouldn't introduce any functional changes.
Let's hope that filesystems will drop direct GFP_NOFS (resp. ~__GFP_FS)
usage as much as possible and only use a properly documented
memalloc_nofs_{save,restore} checkpoints where they are appropriate.
[akpm@linux-foundation.org: fix comment typo, reflow comment]
Link: http://lkml.kernel.org/r/20170306131408.9828-5-mhocko@kernel.org
Signed-off-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Chris Mason <clm@fb.com>
Cc: David Sterba <dsterba@suse.cz>
Cc: Jan Kara <jack@suse.cz>
Cc: Brian Foster <bfoster@redhat.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Nikolay Borisov <nborisov@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-05-04 04:53:15 +07:00
|
|
|
/*
|
|
|
|
* Applies per-task gfp context to the given allocation flags.
|
|
|
|
* PF_MEMALLOC_NOIO implies GFP_NOIO
|
|
|
|
* PF_MEMALLOC_NOFS implies GFP_NOFS
|
2017-02-03 02:43:54 +07:00
|
|
|
*/
|
mm: introduce memalloc_nofs_{save,restore} API
GFP_NOFS context is used for the following 5 reasons currently:
- to prevent from deadlocks when the lock held by the allocation
context would be needed during the memory reclaim
- to prevent from stack overflows during the reclaim because the
allocation is performed from a deep context already
- to prevent lockups when the allocation context depends on other
reclaimers to make a forward progress indirectly
- just in case because this would be safe from the fs POV
- silence lockdep false positives
Unfortunately overuse of this allocation context brings some problems to
the MM. Memory reclaim is much weaker (especially during heavy FS
metadata workloads), OOM killer cannot be invoked because the MM layer
doesn't have enough information about how much memory is freeable by the
FS layer.
In many cases it is far from clear why the weaker context is even used
and so it might be used unnecessarily. We would like to get rid of
those as much as possible. One way to do that is to use the flag in
scopes rather than isolated cases. Such a scope is declared when really
necessary, tracked per task and all the allocation requests from within
the context will simply inherit the GFP_NOFS semantic.
Not only this is easier to understand and maintain because there are
much less problematic contexts than specific allocation requests, this
also helps code paths where FS layer interacts with other layers (e.g.
crypto, security modules, MM etc...) and there is no easy way to convey
the allocation context between the layers.
Introduce memalloc_nofs_{save,restore} API to control the scope of
GFP_NOFS allocation context. This is basically copying
memalloc_noio_{save,restore} API we have for other restricted allocation
context GFP_NOIO. The PF_MEMALLOC_NOFS flag already exists and it is
just an alias for PF_FSTRANS which has been xfs specific until recently.
There are no more PF_FSTRANS users anymore so let's just drop it.
PF_MEMALLOC_NOFS is now checked in the MM layer and drops __GFP_FS
implicitly same as PF_MEMALLOC_NOIO drops __GFP_IO. memalloc_noio_flags
is renamed to current_gfp_context because it now cares about both
PF_MEMALLOC_NOFS and PF_MEMALLOC_NOIO contexts. Xfs code paths preserve
their semantic. kmem_flags_convert() doesn't need to evaluate the flag
anymore.
This patch shouldn't introduce any functional changes.
Let's hope that filesystems will drop direct GFP_NOFS (resp. ~__GFP_FS)
usage as much as possible and only use a properly documented
memalloc_nofs_{save,restore} checkpoints where they are appropriate.
[akpm@linux-foundation.org: fix comment typo, reflow comment]
Link: http://lkml.kernel.org/r/20170306131408.9828-5-mhocko@kernel.org
Signed-off-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Chris Mason <clm@fb.com>
Cc: David Sterba <dsterba@suse.cz>
Cc: Jan Kara <jack@suse.cz>
Cc: Brian Foster <bfoster@redhat.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Nikolay Borisov <nborisov@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-05-04 04:53:15 +07:00
|
|
|
static inline gfp_t current_gfp_context(gfp_t flags)
|
2017-02-03 02:43:54 +07:00
|
|
|
{
|
mm: introduce memalloc_nofs_{save,restore} API
GFP_NOFS context is used for the following 5 reasons currently:
- to prevent from deadlocks when the lock held by the allocation
context would be needed during the memory reclaim
- to prevent from stack overflows during the reclaim because the
allocation is performed from a deep context already
- to prevent lockups when the allocation context depends on other
reclaimers to make a forward progress indirectly
- just in case because this would be safe from the fs POV
- silence lockdep false positives
Unfortunately overuse of this allocation context brings some problems to
the MM. Memory reclaim is much weaker (especially during heavy FS
metadata workloads), OOM killer cannot be invoked because the MM layer
doesn't have enough information about how much memory is freeable by the
FS layer.
In many cases it is far from clear why the weaker context is even used
and so it might be used unnecessarily. We would like to get rid of
those as much as possible. One way to do that is to use the flag in
scopes rather than isolated cases. Such a scope is declared when really
necessary, tracked per task and all the allocation requests from within
the context will simply inherit the GFP_NOFS semantic.
Not only this is easier to understand and maintain because there are
much less problematic contexts than specific allocation requests, this
also helps code paths where FS layer interacts with other layers (e.g.
crypto, security modules, MM etc...) and there is no easy way to convey
the allocation context between the layers.
Introduce memalloc_nofs_{save,restore} API to control the scope of
GFP_NOFS allocation context. This is basically copying
memalloc_noio_{save,restore} API we have for other restricted allocation
context GFP_NOIO. The PF_MEMALLOC_NOFS flag already exists and it is
just an alias for PF_FSTRANS which has been xfs specific until recently.
There are no more PF_FSTRANS users anymore so let's just drop it.
PF_MEMALLOC_NOFS is now checked in the MM layer and drops __GFP_FS
implicitly same as PF_MEMALLOC_NOIO drops __GFP_IO. memalloc_noio_flags
is renamed to current_gfp_context because it now cares about both
PF_MEMALLOC_NOFS and PF_MEMALLOC_NOIO contexts. Xfs code paths preserve
their semantic. kmem_flags_convert() doesn't need to evaluate the flag
anymore.
This patch shouldn't introduce any functional changes.
Let's hope that filesystems will drop direct GFP_NOFS (resp. ~__GFP_FS)
usage as much as possible and only use a properly documented
memalloc_nofs_{save,restore} checkpoints where they are appropriate.
[akpm@linux-foundation.org: fix comment typo, reflow comment]
Link: http://lkml.kernel.org/r/20170306131408.9828-5-mhocko@kernel.org
Signed-off-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Chris Mason <clm@fb.com>
Cc: David Sterba <dsterba@suse.cz>
Cc: Jan Kara <jack@suse.cz>
Cc: Brian Foster <bfoster@redhat.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Nikolay Borisov <nborisov@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-05-04 04:53:15 +07:00
|
|
|
/*
|
|
|
|
* NOIO implies both NOIO and NOFS and it is a weaker context
|
|
|
|
* so always make sure it makes precendence
|
|
|
|
*/
|
2017-02-03 02:43:54 +07:00
|
|
|
if (unlikely(current->flags & PF_MEMALLOC_NOIO))
|
|
|
|
flags &= ~(__GFP_IO | __GFP_FS);
|
mm: introduce memalloc_nofs_{save,restore} API
GFP_NOFS context is used for the following 5 reasons currently:
- to prevent from deadlocks when the lock held by the allocation
context would be needed during the memory reclaim
- to prevent from stack overflows during the reclaim because the
allocation is performed from a deep context already
- to prevent lockups when the allocation context depends on other
reclaimers to make a forward progress indirectly
- just in case because this would be safe from the fs POV
- silence lockdep false positives
Unfortunately overuse of this allocation context brings some problems to
the MM. Memory reclaim is much weaker (especially during heavy FS
metadata workloads), OOM killer cannot be invoked because the MM layer
doesn't have enough information about how much memory is freeable by the
FS layer.
In many cases it is far from clear why the weaker context is even used
and so it might be used unnecessarily. We would like to get rid of
those as much as possible. One way to do that is to use the flag in
scopes rather than isolated cases. Such a scope is declared when really
necessary, tracked per task and all the allocation requests from within
the context will simply inherit the GFP_NOFS semantic.
Not only this is easier to understand and maintain because there are
much less problematic contexts than specific allocation requests, this
also helps code paths where FS layer interacts with other layers (e.g.
crypto, security modules, MM etc...) and there is no easy way to convey
the allocation context between the layers.
Introduce memalloc_nofs_{save,restore} API to control the scope of
GFP_NOFS allocation context. This is basically copying
memalloc_noio_{save,restore} API we have for other restricted allocation
context GFP_NOIO. The PF_MEMALLOC_NOFS flag already exists and it is
just an alias for PF_FSTRANS which has been xfs specific until recently.
There are no more PF_FSTRANS users anymore so let's just drop it.
PF_MEMALLOC_NOFS is now checked in the MM layer and drops __GFP_FS
implicitly same as PF_MEMALLOC_NOIO drops __GFP_IO. memalloc_noio_flags
is renamed to current_gfp_context because it now cares about both
PF_MEMALLOC_NOFS and PF_MEMALLOC_NOIO contexts. Xfs code paths preserve
their semantic. kmem_flags_convert() doesn't need to evaluate the flag
anymore.
This patch shouldn't introduce any functional changes.
Let's hope that filesystems will drop direct GFP_NOFS (resp. ~__GFP_FS)
usage as much as possible and only use a properly documented
memalloc_nofs_{save,restore} checkpoints where they are appropriate.
[akpm@linux-foundation.org: fix comment typo, reflow comment]
Link: http://lkml.kernel.org/r/20170306131408.9828-5-mhocko@kernel.org
Signed-off-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Chris Mason <clm@fb.com>
Cc: David Sterba <dsterba@suse.cz>
Cc: Jan Kara <jack@suse.cz>
Cc: Brian Foster <bfoster@redhat.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Nikolay Borisov <nborisov@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-05-04 04:53:15 +07:00
|
|
|
else if (unlikely(current->flags & PF_MEMALLOC_NOFS))
|
|
|
|
flags &= ~__GFP_FS;
|
2017-02-03 02:43:54 +07:00
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
2017-03-03 16:13:38 +07:00
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
|
|
extern void fs_reclaim_acquire(gfp_t gfp_mask);
|
|
|
|
extern void fs_reclaim_release(gfp_t gfp_mask);
|
|
|
|
#else
|
|
|
|
static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
|
|
|
|
static inline void fs_reclaim_release(gfp_t gfp_mask) { }
|
|
|
|
#endif
|
|
|
|
|
2017-02-03 02:43:54 +07:00
|
|
|
static inline unsigned int memalloc_noio_save(void)
|
|
|
|
{
|
|
|
|
unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
|
|
|
|
current->flags |= PF_MEMALLOC_NOIO;
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void memalloc_noio_restore(unsigned int flags)
|
|
|
|
{
|
|
|
|
current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
|
|
|
|
}
|
|
|
|
|
mm: introduce memalloc_nofs_{save,restore} API
GFP_NOFS context is used for the following 5 reasons currently:
- to prevent from deadlocks when the lock held by the allocation
context would be needed during the memory reclaim
- to prevent from stack overflows during the reclaim because the
allocation is performed from a deep context already
- to prevent lockups when the allocation context depends on other
reclaimers to make a forward progress indirectly
- just in case because this would be safe from the fs POV
- silence lockdep false positives
Unfortunately overuse of this allocation context brings some problems to
the MM. Memory reclaim is much weaker (especially during heavy FS
metadata workloads), OOM killer cannot be invoked because the MM layer
doesn't have enough information about how much memory is freeable by the
FS layer.
In many cases it is far from clear why the weaker context is even used
and so it might be used unnecessarily. We would like to get rid of
those as much as possible. One way to do that is to use the flag in
scopes rather than isolated cases. Such a scope is declared when really
necessary, tracked per task and all the allocation requests from within
the context will simply inherit the GFP_NOFS semantic.
Not only this is easier to understand and maintain because there are
much less problematic contexts than specific allocation requests, this
also helps code paths where FS layer interacts with other layers (e.g.
crypto, security modules, MM etc...) and there is no easy way to convey
the allocation context between the layers.
Introduce memalloc_nofs_{save,restore} API to control the scope of
GFP_NOFS allocation context. This is basically copying
memalloc_noio_{save,restore} API we have for other restricted allocation
context GFP_NOIO. The PF_MEMALLOC_NOFS flag already exists and it is
just an alias for PF_FSTRANS which has been xfs specific until recently.
There are no more PF_FSTRANS users anymore so let's just drop it.
PF_MEMALLOC_NOFS is now checked in the MM layer and drops __GFP_FS
implicitly same as PF_MEMALLOC_NOIO drops __GFP_IO. memalloc_noio_flags
is renamed to current_gfp_context because it now cares about both
PF_MEMALLOC_NOFS and PF_MEMALLOC_NOIO contexts. Xfs code paths preserve
their semantic. kmem_flags_convert() doesn't need to evaluate the flag
anymore.
This patch shouldn't introduce any functional changes.
Let's hope that filesystems will drop direct GFP_NOFS (resp. ~__GFP_FS)
usage as much as possible and only use a properly documented
memalloc_nofs_{save,restore} checkpoints where they are appropriate.
[akpm@linux-foundation.org: fix comment typo, reflow comment]
Link: http://lkml.kernel.org/r/20170306131408.9828-5-mhocko@kernel.org
Signed-off-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Chris Mason <clm@fb.com>
Cc: David Sterba <dsterba@suse.cz>
Cc: Jan Kara <jack@suse.cz>
Cc: Brian Foster <bfoster@redhat.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Nikolay Borisov <nborisov@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-05-04 04:53:15 +07:00
|
|
|
static inline unsigned int memalloc_nofs_save(void)
|
|
|
|
{
|
|
|
|
unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
|
|
|
|
current->flags |= PF_MEMALLOC_NOFS;
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void memalloc_nofs_restore(unsigned int flags)
|
|
|
|
{
|
|
|
|
current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
|
|
|
|
}
|
|
|
|
|
2017-05-09 05:59:50 +07:00
|
|
|
static inline unsigned int memalloc_noreclaim_save(void)
|
|
|
|
{
|
|
|
|
unsigned int flags = current->flags & PF_MEMALLOC;
|
|
|
|
current->flags |= PF_MEMALLOC;
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void memalloc_noreclaim_restore(unsigned int flags)
|
|
|
|
{
|
|
|
|
current->flags = (current->flags & ~PF_MEMALLOC) | flags;
|
|
|
|
}
|
|
|
|
|
2017-10-20 00:30:15 +07:00
|
|
|
#ifdef CONFIG_MEMBARRIER
|
|
|
|
enum {
|
|
|
|
MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
|
|
|
|
MEMBARRIER_STATE_SWITCH_MM = (1U << 1),
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline void membarrier_execve(struct task_struct *t)
|
|
|
|
{
|
|
|
|
atomic_set(&t->mm->membarrier_state, 0);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void membarrier_execve(struct task_struct *t)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-02-09 00:51:29 +07:00
|
|
|
#endif /* _LINUX_SCHED_MM_H */
|