2020-05-13 06:54:17 +07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
2005-11-02 10:58:39 +07:00
|
|
|
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
#ifndef __XFS_SUPPORT_KMEM_H__
|
|
|
|
#define __XFS_SUPPORT_KMEM_H__
|
|
|
|
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/mm.h>
|
2010-01-21 04:55:30 +07:00
|
|
|
#include <linux/vmalloc.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-03-14 09:18:19 +07:00
|
|
|
/*
|
|
|
|
* General memory allocation interfaces
|
|
|
|
*/
|
|
|
|
|
2012-04-02 17:24:04 +07:00
|
|
|
typedef unsigned __bitwise xfs_km_flags_t;
|
|
|
|
#define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
|
|
|
|
#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
|
2013-11-04 17:21:05 +07:00
|
|
|
#define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
|
2006-03-14 09:18:19 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We use a special process flag to avoid recursive callbacks into
|
|
|
|
* the filesystem during transactions. We will also issue our own
|
|
|
|
* warnings, so we explicitly skip any generic ones (silly of us).
|
|
|
|
*/
|
|
|
|
static inline gfp_t
|
2012-04-02 17:24:04 +07:00
|
|
|
kmem_flags_convert(xfs_km_flags_t flags)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-03-14 09:18:19 +07:00
|
|
|
gfp_t lflags;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2019-08-27 02:06:22 +07:00
|
|
|
BUG_ON(flags & ~(KM_NOFS|KM_MAYFAIL|KM_ZERO));
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2019-08-27 02:06:22 +07:00
|
|
|
lflags = GFP_KERNEL | __GFP_NOWARN;
|
|
|
|
if (flags & KM_NOFS)
|
|
|
|
lflags &= ~__GFP_FS;
|
2013-11-04 17:21:05 +07:00
|
|
|
|
2017-07-13 04:36:49 +07:00
|
|
|
/*
|
|
|
|
* Default page/slab allocator behavior is to retry for ever
|
|
|
|
* for small allocations. We can override this behavior by using
|
|
|
|
* __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long
|
|
|
|
* as it is feasible but rather fail than retry forever for all
|
|
|
|
* request sizes.
|
|
|
|
*/
|
|
|
|
if (flags & KM_MAYFAIL)
|
|
|
|
lflags |= __GFP_RETRY_MAYFAIL;
|
|
|
|
|
2013-11-04 17:21:05 +07:00
|
|
|
if (flags & KM_ZERO)
|
|
|
|
lflags |= __GFP_ZERO;
|
|
|
|
|
2006-03-14 09:18:19 +07:00
|
|
|
return lflags;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2012-04-02 17:24:04 +07:00
|
|
|
extern void *kmem_alloc(size_t, xfs_km_flags_t);
|
xfs: add kmem_alloc_io()
Memory we use to submit for IO needs strict alignment to the
underlying driver contraints. Worst case, this is 512 bytes. Given
that all allocations for IO are always a power of 2 multiple of 512
bytes, the kernel heap provides natural alignment for objects of
these sizes and that suffices.
Until, of course, memory debugging of some kind is turned on (e.g.
red zones, poisoning, KASAN) and then the alignment of the heap
objects is thrown out the window. Then we get weird IO errors and
data corruption problems because drivers don't validate alignment
and do the wrong thing when passed unaligned memory buffers in bios.
TO fix this, introduce kmem_alloc_io(), which will guaranteeat least
512 byte alignment of buffers for IO, even if memory debugging
options are turned on. It is assumed that the minimum allocation
size will be 512 bytes, and that sizes will be power of 2 mulitples
of 512 bytes.
Use this everywhere we allocate buffers for IO.
This no longer fails with log recovery errors when KASAN is enabled
due to the brd driver not handling unaligned memory buffers:
# mkfs.xfs -f /dev/ram0 ; mount /dev/ram0 /mnt/test
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2019-08-27 02:08:39 +07:00
|
|
|
extern void *kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags);
|
2018-03-07 08:03:28 +07:00
|
|
|
extern void *kmem_alloc_large(size_t size, xfs_km_flags_t);
|
2016-04-06 06:47:01 +07:00
|
|
|
extern void *kmem_realloc(const void *, size_t, xfs_km_flags_t);
|
2015-02-02 05:54:18 +07:00
|
|
|
static inline void kmem_free(const void *ptr)
|
|
|
|
{
|
|
|
|
kvfree(ptr);
|
|
|
|
}
|
2006-03-14 09:18:19 +07:00
|
|
|
|
2010-01-21 04:55:30 +07:00
|
|
|
|
2013-11-04 17:21:05 +07:00
|
|
|
static inline void *
|
|
|
|
kmem_zalloc(size_t size, xfs_km_flags_t flags)
|
|
|
|
{
|
|
|
|
return kmem_alloc(size, flags | KM_ZERO);
|
2018-03-07 08:03:28 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *
|
|
|
|
kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
|
|
|
|
{
|
|
|
|
return kmem_alloc_large(size, flags | KM_ZERO);
|
2013-11-04 17:21:05 +07:00
|
|
|
}
|
|
|
|
|
2006-03-14 09:18:19 +07:00
|
|
|
/*
|
|
|
|
* Zone interfaces
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define kmem_zone kmem_cache
|
|
|
|
#define kmem_zone_t struct kmem_cache
|
|
|
|
|
2012-04-02 17:24:04 +07:00
|
|
|
extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
|
2013-11-04 17:21:05 +07:00
|
|
|
|
|
|
|
static inline void *
|
|
|
|
kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
|
|
|
|
{
|
|
|
|
return kmem_zone_alloc(zone, flags | KM_ZERO);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2019-06-29 09:27:19 +07:00
|
|
|
static inline struct page *
|
|
|
|
kmem_to_page(void *addr)
|
|
|
|
{
|
|
|
|
if (is_vmalloc_addr(addr))
|
|
|
|
return vmalloc_to_page(addr);
|
|
|
|
return virt_to_page(addr);
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif /* __XFS_SUPPORT_KMEM_H__ */
|