mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 02:50:53 +07:00
Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm fixes from Dan Williams: "Incremental fixes and a small feature addition on top of the main libnvdimm 4.12 pull request: - Geert noticed that tinyconfig was bloated by BLOCK selecting DAX. The size regression is fixed by moving all dax helpers into the dax-core and only specifying "select DAX" for FS_DAX and dax-capable drivers. He also asked for clarification of the NR_DEV_DAX config option which, on closer look, does not need to be a config option at all. Mike also throws in a DEV_DAX_PMEM fixup for good measure. - Ben's attention to detail on -stable patch submissions caught a case where the recent fixes to arch_copy_from_iter_pmem() missed a condition where we strand dirty data in the cache. This is tagged for -stable and will also be included in the rework of the pmem api to a proposed {memcpy,copy_user}_flushcache() interface for 4.13. - Vishal adds a feature that missed the initial pull due to pending review feedback. It allows the kernel to clear media errors when initializing a BTT (atomic sector update driver) instance on a pmem namespace. - Ross noticed that the dax_device + dax_operations conversion broke __dax_zero_page_range(). The nvdimm unit tests fail to check this path, but xfstests immediately trips over it. No excuse for missing this before submitting the 4.12 pull request. These all pass the nvdimm unit tests and an xfstests spot check. The set has received a build success notification from the kbuild robot" * 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: filesystem-dax: fix broken __dax_zero_page_range() conversion libnvdimm, btt: ensure that initializing metadata clears poison libnvdimm: add an atomic vs process context flag to rw_bytes x86, pmem: Fix cache flushing for iovec write < 8 bytes device-dax: kill NR_DEV_DAX block, dax: move "select DAX" from BLOCK to FS_DAX device-dax: Tell kbuild DEV_DAX_PMEM depends on DEV_DAX
This commit is contained in:
commit
0fcc3ab23d
@ -98,7 +98,7 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
|
||||
|
||||
if (bytes < 8) {
|
||||
if (!IS_ALIGNED(dest, 4) || (bytes != 4))
|
||||
arch_wb_cache_pmem(addr, 1);
|
||||
arch_wb_cache_pmem(addr, bytes);
|
||||
} else {
|
||||
if (!IS_ALIGNED(dest, 8)) {
|
||||
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
|
||||
|
@ -6,7 +6,6 @@ menuconfig BLOCK
|
||||
default y
|
||||
select SBITMAP
|
||||
select SRCU
|
||||
select DAX
|
||||
help
|
||||
Provide block layer support for the kernel.
|
||||
|
||||
|
@ -19,7 +19,7 @@ config DEV_DAX
|
||||
|
||||
config DEV_DAX_PMEM
|
||||
tristate "PMEM DAX: direct access to persistent memory"
|
||||
depends on LIBNVDIMM && NVDIMM_DAX
|
||||
depends on LIBNVDIMM && NVDIMM_DAX && DEV_DAX
|
||||
default DEV_DAX
|
||||
help
|
||||
Support raw access to persistent memory. Note that this
|
||||
@ -28,9 +28,4 @@ config DEV_DAX_PMEM
|
||||
|
||||
Say Y if unsure
|
||||
|
||||
config NR_DEV_DAX
|
||||
int "Maximum number of Device-DAX instances"
|
||||
default 32768
|
||||
range 256 2147483647
|
||||
|
||||
endif
|
||||
|
@ -14,16 +14,13 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/magic.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dax.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
static int nr_dax = CONFIG_NR_DEV_DAX;
|
||||
module_param(nr_dax, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(nr_dax, "max number of dax device instances");
|
||||
|
||||
static dev_t dax_devt;
|
||||
DEFINE_STATIC_SRCU(dax_srcu);
|
||||
static struct vfsmount *dax_mnt;
|
||||
@ -47,6 +44,75 @@ void dax_read_unlock(int id)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dax_read_unlock);
|
||||
|
||||
int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
|
||||
pgoff_t *pgoff)
|
||||
{
|
||||
phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512;
|
||||
|
||||
if (pgoff)
|
||||
*pgoff = PHYS_PFN(phys_off);
|
||||
if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(bdev_dax_pgoff);
|
||||
|
||||
/**
|
||||
* __bdev_dax_supported() - Check if the device supports dax for filesystem
|
||||
* @sb: The superblock of the device
|
||||
* @blocksize: The block size of the device
|
||||
*
|
||||
* This is a library function for filesystems to check if the block device
|
||||
* can be mounted with dax option.
|
||||
*
|
||||
* Return: negative errno if unsupported, 0 if supported.
|
||||
*/
|
||||
int __bdev_dax_supported(struct super_block *sb, int blocksize)
|
||||
{
|
||||
struct block_device *bdev = sb->s_bdev;
|
||||
struct dax_device *dax_dev;
|
||||
pgoff_t pgoff;
|
||||
int err, id;
|
||||
void *kaddr;
|
||||
pfn_t pfn;
|
||||
long len;
|
||||
|
||||
if (blocksize != PAGE_SIZE) {
|
||||
pr_err("VFS (%s): error: unsupported blocksize for dax\n",
|
||||
sb->s_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
|
||||
if (err) {
|
||||
pr_err("VFS (%s): error: unaligned partition for dax\n",
|
||||
sb->s_id);
|
||||
return err;
|
||||
}
|
||||
|
||||
dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
|
||||
if (!dax_dev) {
|
||||
pr_err("VFS (%s): error: device does not support dax\n",
|
||||
sb->s_id);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
id = dax_read_lock();
|
||||
len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
|
||||
dax_read_unlock(id);
|
||||
|
||||
put_dax(dax_dev);
|
||||
|
||||
if (len < 1) {
|
||||
pr_err("VFS (%s): error: dax access failed (%ld)",
|
||||
sb->s_id, len);
|
||||
return len < 0 ? len : -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__bdev_dax_supported);
|
||||
|
||||
/**
|
||||
* struct dax_device - anchor object for dax services
|
||||
* @inode: core vfs
|
||||
@ -261,7 +327,7 @@ struct dax_device *alloc_dax(void *private, const char *__host,
|
||||
if (__host && !host)
|
||||
return NULL;
|
||||
|
||||
minor = ida_simple_get(&dax_minor_ida, 0, nr_dax, GFP_KERNEL);
|
||||
minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
|
||||
if (minor < 0)
|
||||
goto err_minor;
|
||||
|
||||
@ -405,8 +471,7 @@ static int __init dax_fs_init(void)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
nr_dax = max(nr_dax, 256);
|
||||
rc = alloc_chrdev_region(&dax_devt, 0, nr_dax, "dax");
|
||||
rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
|
||||
if (rc)
|
||||
__dax_fs_exit();
|
||||
return rc;
|
||||
@ -414,7 +479,7 @@ static int __init dax_fs_init(void)
|
||||
|
||||
static void __exit dax_fs_exit(void)
|
||||
{
|
||||
unregister_chrdev_region(dax_devt, nr_dax);
|
||||
unregister_chrdev_region(dax_devt, MINORMASK+1);
|
||||
ida_destroy(&dax_minor_ida);
|
||||
__dax_fs_exit();
|
||||
}
|
||||
|
@ -218,7 +218,8 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
|
||||
}
|
||||
|
||||
static int nsblk_rw_bytes(struct nd_namespace_common *ndns,
|
||||
resource_size_t offset, void *iobuf, size_t n, int rw)
|
||||
resource_size_t offset, void *iobuf, size_t n, int rw,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(&ndns->dev);
|
||||
struct nd_blk_region *ndbr = to_ndbr(nsblk);
|
||||
|
@ -32,45 +32,53 @@ enum log_ent_request {
|
||||
};
|
||||
|
||||
static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
|
||||
void *buf, size_t n)
|
||||
void *buf, size_t n, unsigned long flags)
|
||||
{
|
||||
struct nd_btt *nd_btt = arena->nd_btt;
|
||||
struct nd_namespace_common *ndns = nd_btt->ndns;
|
||||
|
||||
/* arena offsets are 4K from the base of the device */
|
||||
offset += SZ_4K;
|
||||
return nvdimm_read_bytes(ndns, offset, buf, n);
|
||||
return nvdimm_read_bytes(ndns, offset, buf, n, flags);
|
||||
}
|
||||
|
||||
static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
|
||||
void *buf, size_t n)
|
||||
void *buf, size_t n, unsigned long flags)
|
||||
{
|
||||
struct nd_btt *nd_btt = arena->nd_btt;
|
||||
struct nd_namespace_common *ndns = nd_btt->ndns;
|
||||
|
||||
/* arena offsets are 4K from the base of the device */
|
||||
offset += SZ_4K;
|
||||
return nvdimm_write_bytes(ndns, offset, buf, n);
|
||||
return nvdimm_write_bytes(ndns, offset, buf, n, flags);
|
||||
}
|
||||
|
||||
static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* infooff and info2off should always be at least 512B aligned.
|
||||
* We rely on that to make sure rw_bytes does error clearing
|
||||
* correctly, so make sure that is the case.
|
||||
*/
|
||||
WARN_ON_ONCE(!IS_ALIGNED(arena->infooff, 512));
|
||||
WARN_ON_ONCE(!IS_ALIGNED(arena->info2off, 512));
|
||||
|
||||
ret = arena_write_bytes(arena, arena->info2off, super,
|
||||
sizeof(struct btt_sb));
|
||||
sizeof(struct btt_sb), 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return arena_write_bytes(arena, arena->infooff, super,
|
||||
sizeof(struct btt_sb));
|
||||
sizeof(struct btt_sb), 0);
|
||||
}
|
||||
|
||||
static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
|
||||
{
|
||||
WARN_ON(!super);
|
||||
return arena_read_bytes(arena, arena->infooff, super,
|
||||
sizeof(struct btt_sb));
|
||||
sizeof(struct btt_sb), 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -79,16 +87,17 @@ static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
|
||||
* mapping is in little-endian
|
||||
* mapping contains 'E' and 'Z' flags as desired
|
||||
*/
|
||||
static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping)
|
||||
static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
|
||||
unsigned long flags)
|
||||
{
|
||||
u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
|
||||
|
||||
WARN_ON(lba >= arena->external_nlba);
|
||||
return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE);
|
||||
return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
|
||||
}
|
||||
|
||||
static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
|
||||
u32 z_flag, u32 e_flag)
|
||||
u32 z_flag, u32 e_flag, unsigned long rwb_flags)
|
||||
{
|
||||
u32 ze;
|
||||
__le32 mapping_le;
|
||||
@ -127,11 +136,11 @@ static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
|
||||
}
|
||||
|
||||
mapping_le = cpu_to_le32(mapping);
|
||||
return __btt_map_write(arena, lba, mapping_le);
|
||||
return __btt_map_write(arena, lba, mapping_le, rwb_flags);
|
||||
}
|
||||
|
||||
static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
|
||||
int *trim, int *error)
|
||||
int *trim, int *error, unsigned long rwb_flags)
|
||||
{
|
||||
int ret;
|
||||
__le32 in;
|
||||
@ -140,7 +149,7 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
|
||||
|
||||
WARN_ON(lba >= arena->external_nlba);
|
||||
|
||||
ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE);
|
||||
ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -189,7 +198,7 @@ static int btt_log_read_pair(struct arena_info *arena, u32 lane,
|
||||
WARN_ON(!ent);
|
||||
return arena_read_bytes(arena,
|
||||
arena->logoff + (2 * lane * LOG_ENT_SIZE), ent,
|
||||
2 * LOG_ENT_SIZE);
|
||||
2 * LOG_ENT_SIZE, 0);
|
||||
}
|
||||
|
||||
static struct dentry *debugfs_root;
|
||||
@ -335,7 +344,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
|
||||
* btt_flog_write is the wrapper for updating the freelist elements
|
||||
*/
|
||||
static int __btt_log_write(struct arena_info *arena, u32 lane,
|
||||
u32 sub, struct log_entry *ent)
|
||||
u32 sub, struct log_entry *ent, unsigned long flags)
|
||||
{
|
||||
int ret;
|
||||
/*
|
||||
@ -350,13 +359,13 @@ static int __btt_log_write(struct arena_info *arena, u32 lane,
|
||||
void *src = ent;
|
||||
|
||||
/* split the 16B write into atomic, durable halves */
|
||||
ret = arena_write_bytes(arena, ns_off, src, log_half);
|
||||
ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ns_off += log_half;
|
||||
src += log_half;
|
||||
return arena_write_bytes(arena, ns_off, src, log_half);
|
||||
return arena_write_bytes(arena, ns_off, src, log_half, flags);
|
||||
}
|
||||
|
||||
static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
|
||||
@ -364,7 +373,7 @@ static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = __btt_log_write(arena, lane, sub, ent);
|
||||
ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -393,11 +402,19 @@ static int btt_map_init(struct arena_info *arena)
|
||||
if (!zerobuf)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* mapoff should always be at least 512B aligned. We rely on that to
|
||||
* make sure rw_bytes does error clearing correctly, so make sure that
|
||||
* is the case.
|
||||
*/
|
||||
WARN_ON_ONCE(!IS_ALIGNED(arena->mapoff, 512));
|
||||
|
||||
while (mapsize) {
|
||||
size_t size = min(mapsize, chunk_size);
|
||||
|
||||
WARN_ON_ONCE(size < 512);
|
||||
ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
|
||||
size);
|
||||
size, 0);
|
||||
if (ret)
|
||||
goto free;
|
||||
|
||||
@ -417,26 +434,50 @@ static int btt_map_init(struct arena_info *arena)
|
||||
*/
|
||||
static int btt_log_init(struct arena_info *arena)
|
||||
{
|
||||
size_t logsize = arena->info2off - arena->logoff;
|
||||
size_t chunk_size = SZ_4K, offset = 0;
|
||||
struct log_entry log;
|
||||
void *zerobuf;
|
||||
int ret;
|
||||
u32 i;
|
||||
struct log_entry log, zerolog;
|
||||
|
||||
memset(&zerolog, 0, sizeof(zerolog));
|
||||
zerobuf = kzalloc(chunk_size, GFP_KERNEL);
|
||||
if (!zerobuf)
|
||||
return -ENOMEM;
|
||||
/*
|
||||
* logoff should always be at least 512B aligned. We rely on that to
|
||||
* make sure rw_bytes does error clearing correctly, so make sure that
|
||||
* is the case.
|
||||
*/
|
||||
WARN_ON_ONCE(!IS_ALIGNED(arena->logoff, 512));
|
||||
|
||||
while (logsize) {
|
||||
size_t size = min(logsize, chunk_size);
|
||||
|
||||
WARN_ON_ONCE(size < 512);
|
||||
ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
|
||||
size, 0);
|
||||
if (ret)
|
||||
goto free;
|
||||
|
||||
offset += size;
|
||||
logsize -= size;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
for (i = 0; i < arena->nfree; i++) {
|
||||
log.lba = cpu_to_le32(i);
|
||||
log.old_map = cpu_to_le32(arena->external_nlba + i);
|
||||
log.new_map = cpu_to_le32(arena->external_nlba + i);
|
||||
log.seq = cpu_to_le32(LOG_SEQ_INIT);
|
||||
ret = __btt_log_write(arena, i, 0, &log);
|
||||
ret = __btt_log_write(arena, i, 0, &log, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = __btt_log_write(arena, i, 1, &zerolog);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto free;
|
||||
}
|
||||
|
||||
return 0;
|
||||
free:
|
||||
kfree(zerobuf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int btt_freelist_init(struct arena_info *arena)
|
||||
@ -470,7 +511,7 @@ static int btt_freelist_init(struct arena_info *arena)
|
||||
|
||||
/* Check if map recovery is needed */
|
||||
ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
|
||||
NULL, NULL);
|
||||
NULL, NULL, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
if ((le32_to_cpu(log_new.new_map) != map_entry) &&
|
||||
@ -480,7 +521,7 @@ static int btt_freelist_init(struct arena_info *arena)
|
||||
* to complete the map write. So fix up the map.
|
||||
*/
|
||||
ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
|
||||
le32_to_cpu(log_new.new_map), 0, 0);
|
||||
le32_to_cpu(log_new.new_map), 0, 0, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -875,7 +916,7 @@ static int btt_data_read(struct arena_info *arena, struct page *page,
|
||||
u64 nsoff = to_namespace_offset(arena, lba);
|
||||
void *mem = kmap_atomic(page);
|
||||
|
||||
ret = arena_read_bytes(arena, nsoff, mem + off, len);
|
||||
ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
|
||||
kunmap_atomic(mem);
|
||||
|
||||
return ret;
|
||||
@ -888,7 +929,7 @@ static int btt_data_write(struct arena_info *arena, u32 lba,
|
||||
u64 nsoff = to_namespace_offset(arena, lba);
|
||||
void *mem = kmap_atomic(page);
|
||||
|
||||
ret = arena_write_bytes(arena, nsoff, mem + off, len);
|
||||
ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
|
||||
kunmap_atomic(mem);
|
||||
|
||||
return ret;
|
||||
@ -931,10 +972,12 @@ static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
|
||||
mem = kmap_atomic(bv.bv_page);
|
||||
if (rw)
|
||||
ret = arena_write_bytes(arena, meta_nsoff,
|
||||
mem + bv.bv_offset, cur_len);
|
||||
mem + bv.bv_offset, cur_len,
|
||||
NVDIMM_IO_ATOMIC);
|
||||
else
|
||||
ret = arena_read_bytes(arena, meta_nsoff,
|
||||
mem + bv.bv_offset, cur_len);
|
||||
mem + bv.bv_offset, cur_len,
|
||||
NVDIMM_IO_ATOMIC);
|
||||
|
||||
kunmap_atomic(mem);
|
||||
if (ret)
|
||||
@ -976,7 +1019,8 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
|
||||
|
||||
cur_len = min(btt->sector_size, len);
|
||||
|
||||
ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag);
|
||||
ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
|
||||
NVDIMM_IO_ATOMIC);
|
||||
if (ret)
|
||||
goto out_lane;
|
||||
|
||||
@ -1006,7 +1050,7 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
|
||||
barrier();
|
||||
|
||||
ret = btt_map_read(arena, premap, &new_map, &t_flag,
|
||||
&e_flag);
|
||||
&e_flag, NVDIMM_IO_ATOMIC);
|
||||
if (ret)
|
||||
goto out_rtt;
|
||||
|
||||
@ -1093,7 +1137,8 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
|
||||
}
|
||||
|
||||
lock_map(arena, premap);
|
||||
ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL);
|
||||
ret = btt_map_read(arena, premap, &old_postmap, NULL, NULL,
|
||||
NVDIMM_IO_ATOMIC);
|
||||
if (ret)
|
||||
goto out_map;
|
||||
if (old_postmap >= arena->internal_nlba) {
|
||||
@ -1110,7 +1155,7 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
|
||||
if (ret)
|
||||
goto out_map;
|
||||
|
||||
ret = btt_map_write(arena, premap, new_postmap, 0, 0);
|
||||
ret = btt_map_write(arena, premap, new_postmap, 0, 0, 0);
|
||||
if (ret)
|
||||
goto out_map;
|
||||
|
||||
|
@ -273,7 +273,7 @@ static int __nd_btt_probe(struct nd_btt *nd_btt,
|
||||
if (!btt_sb || !ndns || !nd_btt)
|
||||
return -ENODEV;
|
||||
|
||||
if (nvdimm_read_bytes(ndns, SZ_4K, btt_sb, sizeof(*btt_sb)))
|
||||
if (nvdimm_read_bytes(ndns, SZ_4K, btt_sb, sizeof(*btt_sb), 0))
|
||||
return -ENXIO;
|
||||
|
||||
if (nvdimm_namespace_capacity(ndns) < SZ_16M)
|
||||
|
@ -228,7 +228,8 @@ u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
|
||||
EXPORT_SYMBOL(nd_sb_checksum);
|
||||
|
||||
static int nsio_rw_bytes(struct nd_namespace_common *ndns,
|
||||
resource_size_t offset, void *buf, size_t size, int rw)
|
||||
resource_size_t offset, void *buf, size_t size, int rw,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
|
||||
unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
|
||||
@ -259,7 +260,8 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
|
||||
* work around this collision.
|
||||
*/
|
||||
if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
|
||||
&& (!ndns->claim || !is_nd_btt(ndns->claim))) {
|
||||
&& !(flags & NVDIMM_IO_ATOMIC)
|
||||
&& !ndns->claim) {
|
||||
long cleared;
|
||||
|
||||
cleared = nvdimm_clear_poison(&ndns->dev,
|
||||
|
@ -31,6 +31,7 @@ enum {
|
||||
ND_MAX_LANES = 256,
|
||||
SECTOR_SHIFT = 9,
|
||||
INT_LBASIZE_ALIGNMENT = 64,
|
||||
NVDIMM_IO_ATOMIC = 1,
|
||||
};
|
||||
|
||||
struct nd_poison {
|
||||
|
@ -357,7 +357,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
|
||||
if (!is_nd_pmem(nd_pfn->dev.parent))
|
||||
return -ENODEV;
|
||||
|
||||
if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb)))
|
||||
if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0))
|
||||
return -ENXIO;
|
||||
|
||||
if (memcmp(pfn_sb->signature, sig, PFN_SIG_LEN) != 0)
|
||||
@ -662,7 +662,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
|
||||
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
|
||||
pfn_sb->checksum = cpu_to_le64(checksum);
|
||||
|
||||
return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
|
||||
return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -39,6 +39,7 @@ config FS_DAX
|
||||
depends on MMU
|
||||
depends on !(ARM || MIPS || SPARC)
|
||||
select FS_IOMAP
|
||||
select DAX
|
||||
help
|
||||
Direct Access (DAX) can be used on memory-backed block devices.
|
||||
If the block device supports DAX and the filesystem supports DAX,
|
||||
|
@ -717,72 +717,6 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bdev_write_page);
|
||||
|
||||
int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
|
||||
pgoff_t *pgoff)
|
||||
{
|
||||
phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512;
|
||||
|
||||
if (pgoff)
|
||||
*pgoff = PHYS_PFN(phys_off);
|
||||
if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(bdev_dax_pgoff);
|
||||
|
||||
/**
|
||||
* bdev_dax_supported() - Check if the device supports dax for filesystem
|
||||
* @sb: The superblock of the device
|
||||
* @blocksize: The block size of the device
|
||||
*
|
||||
* This is a library function for filesystems to check if the block device
|
||||
* can be mounted with dax option.
|
||||
*
|
||||
* Return: negative errno if unsupported, 0 if supported.
|
||||
*/
|
||||
int bdev_dax_supported(struct super_block *sb, int blocksize)
|
||||
{
|
||||
struct block_device *bdev = sb->s_bdev;
|
||||
struct dax_device *dax_dev;
|
||||
pgoff_t pgoff;
|
||||
int err, id;
|
||||
void *kaddr;
|
||||
pfn_t pfn;
|
||||
long len;
|
||||
|
||||
if (blocksize != PAGE_SIZE) {
|
||||
vfs_msg(sb, KERN_ERR, "error: unsupported blocksize for dax");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
|
||||
if (err) {
|
||||
vfs_msg(sb, KERN_ERR, "error: unaligned partition for dax");
|
||||
return err;
|
||||
}
|
||||
|
||||
dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
|
||||
if (!dax_dev) {
|
||||
vfs_msg(sb, KERN_ERR, "error: device does not support dax");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
id = dax_read_lock();
|
||||
len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
|
||||
dax_read_unlock(id);
|
||||
|
||||
put_dax(dax_dev);
|
||||
|
||||
if (len < 1) {
|
||||
vfs_msg(sb, KERN_ERR,
|
||||
"error: dax access failed (%ld)", len);
|
||||
return len < 0 ? len : -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bdev_dax_supported);
|
||||
|
||||
/*
|
||||
* pseudo-fs
|
||||
*/
|
||||
|
4
fs/dax.c
4
fs/dax.c
@ -993,12 +993,12 @@ int __dax_zero_page_range(struct block_device *bdev,
|
||||
void *kaddr;
|
||||
pfn_t pfn;
|
||||
|
||||
rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
|
||||
rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
id = dax_read_lock();
|
||||
rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr,
|
||||
rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
|
||||
&pfn);
|
||||
if (rc < 0) {
|
||||
dax_read_unlock(id);
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <linux/log2.h>
|
||||
#include <linux/quotaops.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/dax.h>
|
||||
#include "ext2.h"
|
||||
#include "xattr.h"
|
||||
#include "acl.h"
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/crc16.h>
|
||||
#include <linux/dax.h>
|
||||
#include <linux/cleancache.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
|
@ -52,6 +52,7 @@
|
||||
#include "xfs_reflink.h"
|
||||
|
||||
#include <linux/namei.h>
|
||||
#include <linux/dax.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mount.h>
|
||||
|
@ -1947,8 +1947,6 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
|
||||
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
|
||||
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
|
||||
struct writeback_control *);
|
||||
extern int bdev_dax_supported(struct super_block *, int);
|
||||
int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
|
||||
#else /* CONFIG_BLOCK */
|
||||
|
||||
struct block_device;
|
||||
|
@ -18,12 +18,38 @@ struct dax_operations {
|
||||
void **, pfn_t *);
|
||||
};
|
||||
|
||||
int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
|
||||
#if IS_ENABLED(CONFIG_FS_DAX)
|
||||
int __bdev_dax_supported(struct super_block *sb, int blocksize);
|
||||
static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
|
||||
{
|
||||
return __bdev_dax_supported(sb, blocksize);
|
||||
}
|
||||
#else
|
||||
static inline int bdev_dax_supported(struct super_block *sb, int blocksize)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_DAX)
|
||||
struct dax_device *dax_get_by_host(const char *host);
|
||||
void put_dax(struct dax_device *dax_dev);
|
||||
#else
|
||||
static inline struct dax_device *dax_get_by_host(const char *host)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void put_dax(struct dax_device *dax_dev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
int dax_read_lock(void);
|
||||
void dax_read_unlock(int id);
|
||||
struct dax_device *dax_get_by_host(const char *host);
|
||||
struct dax_device *alloc_dax(void *private, const char *host,
|
||||
const struct dax_operations *ops);
|
||||
void put_dax(struct dax_device *dax_dev);
|
||||
bool dax_alive(struct dax_device *dax_dev);
|
||||
void kill_dax(struct dax_device *dax_dev);
|
||||
void *dax_get_private(struct dax_device *dax_dev);
|
||||
|
@ -48,7 +48,7 @@ struct nd_namespace_common {
|
||||
struct device dev;
|
||||
struct device *claim;
|
||||
int (*rw_bytes)(struct nd_namespace_common *, resource_size_t offset,
|
||||
void *buf, size_t size, int rw);
|
||||
void *buf, size_t size, int rw, unsigned long flags);
|
||||
};
|
||||
|
||||
static inline struct nd_namespace_common *to_ndns(struct device *dev)
|
||||
@ -134,9 +134,10 @@ static inline struct nd_namespace_blk *to_nd_namespace_blk(const struct device *
|
||||
* @buf is up-to-date upon return from this routine.
|
||||
*/
|
||||
static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns,
|
||||
resource_size_t offset, void *buf, size_t size)
|
||||
resource_size_t offset, void *buf, size_t size,
|
||||
unsigned long flags)
|
||||
{
|
||||
return ndns->rw_bytes(ndns, offset, buf, size, READ);
|
||||
return ndns->rw_bytes(ndns, offset, buf, size, READ, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -152,9 +153,10 @@ static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns,
|
||||
* to media is handled internal to the @ndns driver, if at all.
|
||||
*/
|
||||
static inline int nvdimm_write_bytes(struct nd_namespace_common *ndns,
|
||||
resource_size_t offset, void *buf, size_t size)
|
||||
resource_size_t offset, void *buf, size_t size,
|
||||
unsigned long flags)
|
||||
{
|
||||
return ndns->rw_bytes(ndns, offset, buf, size, WRITE);
|
||||
return ndns->rw_bytes(ndns, offset, buf, size, WRITE, flags);
|
||||
}
|
||||
|
||||
#define MODULE_ALIAS_ND_DEVICE(type) \
|
||||
|
Loading…
Reference in New Issue
Block a user