2005-04-17 05:20:36 +07:00
|
|
|
/*
|
2005-11-02 10:58:39 +07:00
|
|
|
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
2005-11-02 10:58:39 +07:00
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License as
|
2005-04-17 05:20:36 +07:00
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
2005-11-02 10:58:39 +07:00
|
|
|
* This program is distributed in the hope that it would be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
2005-11-02 10:58:39 +07:00
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
#ifndef __XFS_SUPPORT_KMEM_H__
|
|
|
|
#define __XFS_SUPPORT_KMEM_H__
|
|
|
|
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/mm.h>
|
2010-01-21 04:55:30 +07:00
|
|
|
#include <linux/vmalloc.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-03-14 09:18:19 +07:00
|
|
|
/*
|
|
|
|
* General memory allocation interfaces
|
|
|
|
*/
|
|
|
|
|
2012-04-02 17:24:04 +07:00
|
|
|
typedef unsigned __bitwise xfs_km_flags_t;
|
|
|
|
#define KM_SLEEP ((__force xfs_km_flags_t)0x0001u)
|
|
|
|
#define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u)
|
|
|
|
#define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
|
|
|
|
#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
|
2013-11-04 17:21:05 +07:00
|
|
|
#define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
|
2006-03-14 09:18:19 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We use a special process flag to avoid recursive callbacks into
|
|
|
|
* the filesystem during transactions. We will also issue our own
|
|
|
|
* warnings, so we explicitly skip any generic ones (silly of us).
|
|
|
|
*/
|
|
|
|
static inline gfp_t
|
2012-04-02 17:24:04 +07:00
|
|
|
kmem_flags_convert(xfs_km_flags_t flags)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-03-14 09:18:19 +07:00
|
|
|
gfp_t lflags;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-11-04 17:21:05 +07:00
|
|
|
BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_ZERO));
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (flags & KM_NOSLEEP) {
|
2006-03-14 09:18:19 +07:00
|
|
|
lflags = GFP_ATOMIC | __GFP_NOWARN;
|
2005-04-17 05:20:36 +07:00
|
|
|
} else {
|
2006-03-14 09:18:19 +07:00
|
|
|
lflags = GFP_KERNEL | __GFP_NOWARN;
|
mm: introduce memalloc_nofs_{save,restore} API
GFP_NOFS context is used for the following 5 reasons currently:
- to prevent from deadlocks when the lock held by the allocation
context would be needed during the memory reclaim
- to prevent from stack overflows during the reclaim because the
allocation is performed from a deep context already
- to prevent lockups when the allocation context depends on other
reclaimers to make a forward progress indirectly
- just in case because this would be safe from the fs POV
- silence lockdep false positives
Unfortunately overuse of this allocation context brings some problems to
the MM. Memory reclaim is much weaker (especially during heavy FS
metadata workloads), OOM killer cannot be invoked because the MM layer
doesn't have enough information about how much memory is freeable by the
FS layer.
In many cases it is far from clear why the weaker context is even used
and so it might be used unnecessarily. We would like to get rid of
those as much as possible. One way to do that is to use the flag in
scopes rather than isolated cases. Such a scope is declared when really
necessary, tracked per task and all the allocation requests from within
the context will simply inherit the GFP_NOFS semantic.
Not only this is easier to understand and maintain because there are
much less problematic contexts than specific allocation requests, this
also helps code paths where FS layer interacts with other layers (e.g.
crypto, security modules, MM etc...) and there is no easy way to convey
the allocation context between the layers.
Introduce memalloc_nofs_{save,restore} API to control the scope of
GFP_NOFS allocation context. This is basically copying
memalloc_noio_{save,restore} API we have for other restricted allocation
context GFP_NOIO. The PF_MEMALLOC_NOFS flag already exists and it is
just an alias for PF_FSTRANS which has been xfs specific until recently.
There are no more PF_FSTRANS users anymore so let's just drop it.
PF_MEMALLOC_NOFS is now checked in the MM layer and drops __GFP_FS
implicitly same as PF_MEMALLOC_NOIO drops __GFP_IO. memalloc_noio_flags
is renamed to current_gfp_context because it now cares about both
PF_MEMALLOC_NOFS and PF_MEMALLOC_NOIO contexts. Xfs code paths preserve
their semantic. kmem_flags_convert() doesn't need to evaluate the flag
anymore.
This patch shouldn't introduce any functional changes.
Let's hope that filesystems will drop direct GFP_NOFS (resp. ~__GFP_FS)
usage as much as possible and only use a properly documented
memalloc_nofs_{save,restore} checkpoints where they are appropriate.
[akpm@linux-foundation.org: fix comment typo, reflow comment]
Link: http://lkml.kernel.org/r/20170306131408.9828-5-mhocko@kernel.org
Signed-off-by: Michal Hocko <mhocko@suse.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Chris Mason <clm@fb.com>
Cc: David Sterba <dsterba@suse.cz>
Cc: Jan Kara <jack@suse.cz>
Cc: Brian Foster <bfoster@redhat.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Nikolay Borisov <nborisov@suse.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-05-04 04:53:15 +07:00
|
|
|
if (flags & KM_NOFS)
|
2005-04-17 05:20:36 +07:00
|
|
|
lflags &= ~__GFP_FS;
|
|
|
|
}
|
2013-11-04 17:21:05 +07:00
|
|
|
|
2017-07-13 04:36:49 +07:00
|
|
|
/*
|
|
|
|
* Default page/slab allocator behavior is to retry for ever
|
|
|
|
* for small allocations. We can override this behavior by using
|
|
|
|
* __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long
|
|
|
|
* as it is feasible but rather fail than retry forever for all
|
|
|
|
* request sizes.
|
|
|
|
*/
|
|
|
|
if (flags & KM_MAYFAIL)
|
|
|
|
lflags |= __GFP_RETRY_MAYFAIL;
|
|
|
|
|
2013-11-04 17:21:05 +07:00
|
|
|
if (flags & KM_ZERO)
|
|
|
|
lflags |= __GFP_ZERO;
|
|
|
|
|
2006-03-14 09:18:19 +07:00
|
|
|
return lflags;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2012-04-02 17:24:04 +07:00
|
|
|
extern void *kmem_alloc(size_t, xfs_km_flags_t);
|
2018-03-07 08:03:28 +07:00
|
|
|
extern void *kmem_alloc_large(size_t size, xfs_km_flags_t);
|
2016-04-06 06:47:01 +07:00
|
|
|
extern void *kmem_realloc(const void *, size_t, xfs_km_flags_t);
|
2015-02-02 05:54:18 +07:00
|
|
|
static inline void kmem_free(const void *ptr)
|
|
|
|
{
|
|
|
|
kvfree(ptr);
|
|
|
|
}
|
2006-03-14 09:18:19 +07:00
|
|
|
|
2010-01-21 04:55:30 +07:00
|
|
|
|
2013-11-04 17:21:05 +07:00
|
|
|
static inline void *
|
|
|
|
kmem_zalloc(size_t size, xfs_km_flags_t flags)
|
|
|
|
{
|
|
|
|
return kmem_alloc(size, flags | KM_ZERO);
|
2018-03-07 08:03:28 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *
|
|
|
|
kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
|
|
|
|
{
|
|
|
|
return kmem_alloc_large(size, flags | KM_ZERO);
|
2013-11-04 17:21:05 +07:00
|
|
|
}
|
|
|
|
|
2006-03-14 09:18:19 +07:00
|
|
|
/*
|
|
|
|
* Zone interfaces
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN
|
|
|
|
#define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT
|
2006-03-24 18:16:09 +07:00
|
|
|
#define KM_ZONE_SPREAD SLAB_MEM_SPREAD
|
2016-01-15 06:18:21 +07:00
|
|
|
#define KM_ZONE_ACCOUNT SLAB_ACCOUNT
|
2006-03-14 09:18:19 +07:00
|
|
|
|
|
|
|
#define kmem_zone kmem_cache
|
|
|
|
#define kmem_zone_t struct kmem_cache
|
|
|
|
|
|
|
|
static inline kmem_zone_t *
|
2005-04-17 05:20:36 +07:00
|
|
|
kmem_zone_init(int size, char *zone_name)
|
|
|
|
{
|
2007-07-20 08:11:58 +07:00
|
|
|
return kmem_cache_create(zone_name, size, 0, 0, NULL);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2006-03-14 09:18:19 +07:00
|
|
|
static inline kmem_zone_t *
|
2017-11-16 08:32:18 +07:00
|
|
|
kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags,
|
2008-07-26 09:45:34 +07:00
|
|
|
void (*construct)(void *))
|
2006-03-14 09:18:19 +07:00
|
|
|
{
|
2007-07-20 08:11:58 +07:00
|
|
|
return kmem_cache_create(zone_name, size, 0, flags, construct);
|
2006-03-14 09:18:19 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2005-04-17 05:20:36 +07:00
|
|
|
kmem_zone_free(kmem_zone_t *zone, void *ptr)
|
|
|
|
{
|
|
|
|
kmem_cache_free(zone, ptr);
|
|
|
|
}
|
|
|
|
|
2006-03-14 09:18:19 +07:00
|
|
|
static inline void
|
2005-04-17 05:20:36 +07:00
|
|
|
kmem_zone_destroy(kmem_zone_t *zone)
|
|
|
|
{
|
2017-11-09 03:00:40 +07:00
|
|
|
kmem_cache_destroy(zone);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2012-04-02 17:24:04 +07:00
|
|
|
extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
|
2013-11-04 17:21:05 +07:00
|
|
|
|
|
|
|
static inline void *
|
|
|
|
kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
|
|
|
|
{
|
|
|
|
return kmem_zone_alloc(zone, flags | KM_ZERO);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#endif /* __XFS_SUPPORT_KMEM_H__ */
|