2016-06-21 06:23:11 +07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2010 Red Hat, Inc.
|
|
|
|
* Copyright (c) 2016 Christoph Hellwig.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms and conditions of the GNU General Public License,
|
|
|
|
* version 2, as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
|
|
* more details.
|
|
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/iomap.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/gfp.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include <linux/uio.h>
|
|
|
|
#include <linux/backing-dev.h>
|
|
|
|
#include <linux/buffer_head.h>
|
2016-11-30 10:36:01 +07:00
|
|
|
#include <linux/task_io_accounting_ops.h>
|
2016-06-21 06:31:39 +07:00
|
|
|
#include <linux/dax.h>
|
2017-02-04 05:47:37 +07:00
|
|
|
#include <linux/sched/signal.h>
|
|
|
|
|
2016-06-21 06:23:11 +07:00
|
|
|
#include "internal.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Execute a iomap write on a segment of the mapping that spans a
|
|
|
|
* contiguous range of pages that have identical block mapping state.
|
|
|
|
*
|
|
|
|
* This avoids the need to map pages individually, do individual allocations
|
|
|
|
* for each page and most importantly avoid the need for filesystem specific
|
|
|
|
* locking per page. Instead, all the operations are amortised over the entire
|
|
|
|
* range of pages. It is assumed that the filesystems will lock whatever
|
|
|
|
* resources they require in the iomap_begin call, and release them in the
|
|
|
|
* iomap_end call.
|
|
|
|
*/
|
2016-09-19 08:24:49 +07:00
|
|
|
loff_t
|
2016-06-21 06:23:11 +07:00
|
|
|
iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
|
2017-01-28 14:20:26 +07:00
|
|
|
const struct iomap_ops *ops, void *data, iomap_actor_t actor)
|
2016-06-21 06:23:11 +07:00
|
|
|
{
|
|
|
|
struct iomap iomap = { 0 };
|
|
|
|
loff_t written = 0, ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Need to map a range from start position for length bytes. This can
|
|
|
|
* span multiple pages - it is only guaranteed to return a range of a
|
|
|
|
* single type of pages (e.g. all into a hole, all mapped or all
|
|
|
|
* unwritten). Failure at this point has nothing to undo.
|
|
|
|
*
|
|
|
|
* If allocation is required for this range, reserve the space now so
|
|
|
|
* that the allocation is guaranteed to succeed later on. Once we copy
|
|
|
|
* the data into the page cache pages, then we cannot fail otherwise we
|
|
|
|
* expose transient stale data. If the reserve fails, we can safely
|
|
|
|
* back out at this point as there is nothing to undo.
|
|
|
|
*/
|
|
|
|
ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
if (WARN_ON(iomap.offset > pos))
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cut down the length to the one actually provided by the filesystem,
|
|
|
|
* as it might not be able to give us the whole size that we requested.
|
|
|
|
*/
|
|
|
|
if (iomap.offset + iomap.length < pos + length)
|
|
|
|
length = iomap.offset + iomap.length - pos;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now that we have guaranteed that the space allocation will succeed.
|
|
|
|
* we can do the copy-in page by page without having to worry about
|
|
|
|
* failures exposing transient data.
|
|
|
|
*/
|
|
|
|
written = actor(inode, pos, length, data, &iomap);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now the data has been copied, commit the range we've copied. This
|
|
|
|
* should not fail unless the filesystem has had a fatal error.
|
|
|
|
*/
|
2016-08-17 05:42:34 +07:00
|
|
|
if (ops->iomap_end) {
|
|
|
|
ret = ops->iomap_end(inode, pos, length,
|
|
|
|
written > 0 ? written : 0,
|
|
|
|
flags, &iomap);
|
|
|
|
}
|
2016-06-21 06:23:11 +07:00
|
|
|
|
|
|
|
return written ? written : ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
|
|
|
|
{
|
|
|
|
loff_t i_size = i_size_read(inode);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only truncate newly allocated pages beyoned EOF, even if the
|
|
|
|
* write started inside the existing inode size.
|
|
|
|
*/
|
|
|
|
if (pos + len > i_size)
|
|
|
|
truncate_pagecache_range(inode, max(pos, i_size), pos + len);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
|
|
|
|
struct page **pagep, struct iomap *iomap)
|
|
|
|
{
|
|
|
|
pgoff_t index = pos >> PAGE_SHIFT;
|
|
|
|
struct page *page;
|
|
|
|
int status = 0;
|
|
|
|
|
|
|
|
BUG_ON(pos + len > iomap->offset + iomap->length);
|
|
|
|
|
2017-02-04 04:13:26 +07:00
|
|
|
if (fatal_signal_pending(current))
|
|
|
|
return -EINTR;
|
|
|
|
|
2016-06-21 06:23:11 +07:00
|
|
|
page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
|
|
|
|
if (!page)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
status = __block_write_begin_int(page, pos, len, NULL, iomap);
|
|
|
|
if (unlikely(status)) {
|
|
|
|
unlock_page(page);
|
|
|
|
put_page(page);
|
|
|
|
page = NULL;
|
|
|
|
|
|
|
|
iomap_write_failed(inode, pos, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
*pagep = page;
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
|
|
|
|
unsigned copied, struct page *page)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = generic_write_end(NULL, inode->i_mapping, pos, len,
|
|
|
|
copied, page, NULL);
|
|
|
|
if (ret < len)
|
|
|
|
iomap_write_failed(inode, pos, len);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static loff_t
|
|
|
|
iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
|
|
|
struct iomap *iomap)
|
|
|
|
{
|
|
|
|
struct iov_iter *i = data;
|
|
|
|
long status = 0;
|
|
|
|
ssize_t written = 0;
|
|
|
|
unsigned int flags = AOP_FLAG_NOFS;
|
|
|
|
|
|
|
|
do {
|
|
|
|
struct page *page;
|
|
|
|
unsigned long offset; /* Offset into pagecache page */
|
|
|
|
unsigned long bytes; /* Bytes to write to page */
|
|
|
|
size_t copied; /* Bytes copied from user */
|
|
|
|
|
|
|
|
offset = (pos & (PAGE_SIZE - 1));
|
|
|
|
bytes = min_t(unsigned long, PAGE_SIZE - offset,
|
|
|
|
iov_iter_count(i));
|
|
|
|
again:
|
|
|
|
if (bytes > length)
|
|
|
|
bytes = length;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Bring in the user page that we will copy from _first_.
|
|
|
|
* Otherwise there's a nasty deadlock on copying from the
|
|
|
|
* same page as we're writing to, without it being marked
|
|
|
|
* up-to-date.
|
|
|
|
*
|
|
|
|
* Not only is this an optimisation, but it is also required
|
|
|
|
* to check that the address is actually valid, when atomic
|
|
|
|
* usercopies are used, below.
|
|
|
|
*/
|
|
|
|
if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
|
|
|
|
status = -EFAULT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
status = iomap_write_begin(inode, pos, bytes, flags, &page,
|
|
|
|
iomap);
|
|
|
|
if (unlikely(status))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (mapping_writably_mapped(inode->i_mapping))
|
|
|
|
flush_dcache_page(page);
|
|
|
|
|
|
|
|
copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
|
|
|
|
|
|
|
|
flush_dcache_page(page);
|
|
|
|
|
|
|
|
status = iomap_write_end(inode, pos, bytes, copied, page);
|
|
|
|
if (unlikely(status < 0))
|
|
|
|
break;
|
|
|
|
copied = status;
|
|
|
|
|
|
|
|
cond_resched();
|
|
|
|
|
|
|
|
iov_iter_advance(i, copied);
|
|
|
|
if (unlikely(copied == 0)) {
|
|
|
|
/*
|
|
|
|
* If we were unable to copy any data at all, we must
|
|
|
|
* fall back to a single segment length write.
|
|
|
|
*
|
|
|
|
* If we didn't fallback here, we could livelock
|
|
|
|
* because not all segments in the iov can be copied at
|
|
|
|
* once without a pagefault.
|
|
|
|
*/
|
|
|
|
bytes = min_t(unsigned long, PAGE_SIZE - offset,
|
|
|
|
iov_iter_single_seg_count(i));
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
pos += copied;
|
|
|
|
written += copied;
|
|
|
|
length -= copied;
|
|
|
|
|
|
|
|
balance_dirty_pages_ratelimited(inode->i_mapping);
|
|
|
|
} while (iov_iter_count(i) && length);
|
|
|
|
|
|
|
|
return written ? written : status;
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t
|
|
|
|
iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
|
2017-01-28 14:20:26 +07:00
|
|
|
const struct iomap_ops *ops)
|
2016-06-21 06:23:11 +07:00
|
|
|
{
|
|
|
|
struct inode *inode = iocb->ki_filp->f_mapping->host;
|
|
|
|
loff_t pos = iocb->ki_pos, ret = 0, written = 0;
|
|
|
|
|
|
|
|
while (iov_iter_count(iter)) {
|
|
|
|
ret = iomap_apply(inode, pos, iov_iter_count(iter),
|
|
|
|
IOMAP_WRITE, ops, iter, iomap_write_actor);
|
|
|
|
if (ret <= 0)
|
|
|
|
break;
|
|
|
|
pos += ret;
|
|
|
|
written += ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return written ? written : ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
|
|
|
|
|
2016-09-19 07:12:45 +07:00
|
|
|
static struct page *
|
|
|
|
__iomap_read_page(struct inode *inode, loff_t offset)
|
|
|
|
{
|
|
|
|
struct address_space *mapping = inode->i_mapping;
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
|
|
|
|
if (IS_ERR(page))
|
|
|
|
return page;
|
|
|
|
if (!PageUptodate(page)) {
|
|
|
|
put_page(page);
|
|
|
|
return ERR_PTR(-EIO);
|
|
|
|
}
|
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
|
|
|
static loff_t
|
|
|
|
iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
|
|
|
struct iomap *iomap)
|
|
|
|
{
|
|
|
|
long status = 0;
|
|
|
|
ssize_t written = 0;
|
|
|
|
|
|
|
|
do {
|
|
|
|
struct page *page, *rpage;
|
|
|
|
unsigned long offset; /* Offset into pagecache page */
|
|
|
|
unsigned long bytes; /* Bytes to write to page */
|
|
|
|
|
|
|
|
offset = (pos & (PAGE_SIZE - 1));
|
|
|
|
bytes = min_t(unsigned long, PAGE_SIZE - offset, length);
|
|
|
|
|
|
|
|
rpage = __iomap_read_page(inode, pos);
|
|
|
|
if (IS_ERR(rpage))
|
|
|
|
return PTR_ERR(rpage);
|
|
|
|
|
|
|
|
status = iomap_write_begin(inode, pos, bytes,
|
2017-05-09 05:58:59 +07:00
|
|
|
AOP_FLAG_NOFS, &page, iomap);
|
2016-09-19 07:12:45 +07:00
|
|
|
put_page(rpage);
|
|
|
|
if (unlikely(status))
|
|
|
|
return status;
|
|
|
|
|
|
|
|
WARN_ON_ONCE(!PageUptodate(page));
|
|
|
|
|
|
|
|
status = iomap_write_end(inode, pos, bytes, bytes, page);
|
|
|
|
if (unlikely(status <= 0)) {
|
|
|
|
if (WARN_ON_ONCE(status == 0))
|
|
|
|
return -EIO;
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
cond_resched();
|
|
|
|
|
|
|
|
pos += status;
|
|
|
|
written += status;
|
|
|
|
length -= status;
|
|
|
|
|
|
|
|
balance_dirty_pages_ratelimited(inode->i_mapping);
|
|
|
|
} while (length);
|
|
|
|
|
|
|
|
return written;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
|
2017-01-28 14:20:26 +07:00
|
|
|
const struct iomap_ops *ops)
|
2016-09-19 07:12:45 +07:00
|
|
|
{
|
|
|
|
loff_t ret;
|
|
|
|
|
|
|
|
while (len) {
|
|
|
|
ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
|
|
|
|
iomap_dirty_actor);
|
|
|
|
if (ret <= 0)
|
|
|
|
return ret;
|
|
|
|
pos += ret;
|
|
|
|
len -= ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iomap_file_dirty);
|
|
|
|
|
2016-06-21 06:23:11 +07:00
|
|
|
static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
|
|
|
|
unsigned bytes, struct iomap *iomap)
|
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
int status;
|
|
|
|
|
2017-05-09 05:58:59 +07:00
|
|
|
status = iomap_write_begin(inode, pos, bytes, AOP_FLAG_NOFS, &page,
|
|
|
|
iomap);
|
2016-06-21 06:23:11 +07:00
|
|
|
if (status)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
zero_user(page, offset, bytes);
|
|
|
|
mark_page_accessed(page);
|
|
|
|
|
|
|
|
return iomap_write_end(inode, pos, bytes, bytes, page);
|
|
|
|
}
|
|
|
|
|
2016-06-21 06:31:39 +07:00
|
|
|
static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
|
|
|
|
struct iomap *iomap)
|
|
|
|
{
|
|
|
|
sector_t sector = iomap->blkno +
|
|
|
|
(((pos & ~(PAGE_SIZE - 1)) - iomap->offset) >> 9);
|
|
|
|
|
2017-01-28 04:31:42 +07:00
|
|
|
return __dax_zero_page_range(iomap->bdev, iomap->dax_dev, sector,
|
|
|
|
offset, bytes);
|
2016-06-21 06:31:39 +07:00
|
|
|
}
|
|
|
|
|
2016-06-21 06:23:11 +07:00
|
|
|
static loff_t
|
|
|
|
iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
|
|
|
|
void *data, struct iomap *iomap)
|
|
|
|
{
|
|
|
|
bool *did_zero = data;
|
|
|
|
loff_t written = 0;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
/* already zeroed? we're done. */
|
|
|
|
if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
|
|
|
|
return count;
|
|
|
|
|
|
|
|
do {
|
|
|
|
unsigned offset, bytes;
|
|
|
|
|
|
|
|
offset = pos & (PAGE_SIZE - 1); /* Within page */
|
|
|
|
bytes = min_t(unsigned, PAGE_SIZE - offset, count);
|
|
|
|
|
2016-06-21 06:31:39 +07:00
|
|
|
if (IS_DAX(inode))
|
|
|
|
status = iomap_dax_zero(pos, offset, bytes, iomap);
|
|
|
|
else
|
|
|
|
status = iomap_zero(inode, pos, offset, bytes, iomap);
|
2016-06-21 06:23:11 +07:00
|
|
|
if (status < 0)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
pos += bytes;
|
|
|
|
count -= bytes;
|
|
|
|
written += bytes;
|
|
|
|
if (did_zero)
|
|
|
|
*did_zero = true;
|
|
|
|
} while (count > 0);
|
|
|
|
|
|
|
|
return written;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
|
2017-01-28 14:20:26 +07:00
|
|
|
const struct iomap_ops *ops)
|
2016-06-21 06:23:11 +07:00
|
|
|
{
|
|
|
|
loff_t ret;
|
|
|
|
|
|
|
|
while (len > 0) {
|
|
|
|
ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
|
|
|
|
ops, did_zero, iomap_zero_range_actor);
|
|
|
|
if (ret <= 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
pos += ret;
|
|
|
|
len -= ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iomap_zero_range);
|
|
|
|
|
|
|
|
int
|
|
|
|
iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
|
2017-01-28 14:20:26 +07:00
|
|
|
const struct iomap_ops *ops)
|
2016-06-21 06:23:11 +07:00
|
|
|
{
|
2017-02-28 05:28:32 +07:00
|
|
|
unsigned int blocksize = i_blocksize(inode);
|
|
|
|
unsigned int off = pos & (blocksize - 1);
|
2016-06-21 06:23:11 +07:00
|
|
|
|
|
|
|
/* Block boundary? Nothing to do */
|
|
|
|
if (!off)
|
|
|
|
return 0;
|
|
|
|
return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iomap_truncate_page);
|
|
|
|
|
|
|
|
static loff_t
|
|
|
|
iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
|
|
|
|
void *data, struct iomap *iomap)
|
|
|
|
{
|
|
|
|
struct page *page = data;
|
|
|
|
int ret;
|
|
|
|
|
2016-10-24 10:20:25 +07:00
|
|
|
ret = __block_write_begin_int(page, pos, length, NULL, iomap);
|
2016-06-21 06:23:11 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
block_commit_write(page, 0, length);
|
|
|
|
return length;
|
|
|
|
}
|
|
|
|
|
2017-02-25 05:56:41 +07:00
|
|
|
int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
|
2016-06-21 06:23:11 +07:00
|
|
|
{
|
|
|
|
struct page *page = vmf->page;
|
2017-02-25 05:56:41 +07:00
|
|
|
struct inode *inode = file_inode(vmf->vma->vm_file);
|
2016-06-21 06:23:11 +07:00
|
|
|
unsigned long length;
|
|
|
|
loff_t offset, size;
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
|
|
lock_page(page);
|
|
|
|
size = i_size_read(inode);
|
|
|
|
if ((page->mapping != inode->i_mapping) ||
|
|
|
|
(page_offset(page) > size)) {
|
|
|
|
/* We overload EFAULT to mean page got truncated */
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* page is wholly or partially inside EOF */
|
|
|
|
if (((page->index + 1) << PAGE_SHIFT) > size)
|
|
|
|
length = size & ~PAGE_MASK;
|
|
|
|
else
|
|
|
|
length = PAGE_SIZE;
|
|
|
|
|
|
|
|
offset = page_offset(page);
|
|
|
|
while (length > 0) {
|
2016-11-10 06:26:50 +07:00
|
|
|
ret = iomap_apply(inode, offset, length,
|
|
|
|
IOMAP_WRITE | IOMAP_FAULT, ops, page,
|
|
|
|
iomap_page_mkwrite_actor);
|
2016-06-21 06:23:11 +07:00
|
|
|
if (unlikely(ret <= 0))
|
|
|
|
goto out_unlock;
|
|
|
|
offset += ret;
|
|
|
|
length -= ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
set_page_dirty(page);
|
|
|
|
wait_for_stable_page(page);
|
|
|
|
return 0;
|
|
|
|
out_unlock:
|
|
|
|
unlock_page(page);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
|
2016-06-21 06:38:45 +07:00
|
|
|
|
|
|
|
struct fiemap_ctx {
|
|
|
|
struct fiemap_extent_info *fi;
|
|
|
|
struct iomap prev;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int iomap_to_fiemap(struct fiemap_extent_info *fi,
|
|
|
|
struct iomap *iomap, u32 flags)
|
|
|
|
{
|
|
|
|
switch (iomap->type) {
|
|
|
|
case IOMAP_HOLE:
|
|
|
|
/* skip holes */
|
|
|
|
return 0;
|
|
|
|
case IOMAP_DELALLOC:
|
|
|
|
flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
|
|
|
|
break;
|
|
|
|
case IOMAP_UNWRITTEN:
|
|
|
|
flags |= FIEMAP_EXTENT_UNWRITTEN;
|
|
|
|
break;
|
|
|
|
case IOMAP_MAPPED:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-08-29 08:33:58 +07:00
|
|
|
if (iomap->flags & IOMAP_F_MERGED)
|
|
|
|
flags |= FIEMAP_EXTENT_MERGED;
|
2016-09-19 07:13:02 +07:00
|
|
|
if (iomap->flags & IOMAP_F_SHARED)
|
|
|
|
flags |= FIEMAP_EXTENT_SHARED;
|
2016-08-29 08:33:58 +07:00
|
|
|
|
2016-06-21 06:38:45 +07:00
|
|
|
return fiemap_fill_next_extent(fi, iomap->offset,
|
|
|
|
iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
|
2016-08-29 08:33:58 +07:00
|
|
|
iomap->length, flags);
|
2016-06-21 06:38:45 +07:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static loff_t
|
|
|
|
iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
|
|
|
struct iomap *iomap)
|
|
|
|
{
|
|
|
|
struct fiemap_ctx *ctx = data;
|
|
|
|
loff_t ret = length;
|
|
|
|
|
|
|
|
if (iomap->type == IOMAP_HOLE)
|
|
|
|
return length;
|
|
|
|
|
|
|
|
ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
|
|
|
|
ctx->prev = *iomap;
|
|
|
|
switch (ret) {
|
|
|
|
case 0: /* success */
|
|
|
|
return length;
|
|
|
|
case 1: /* extent array full */
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
|
2017-01-28 14:20:26 +07:00
|
|
|
loff_t start, loff_t len, const struct iomap_ops *ops)
|
2016-06-21 06:38:45 +07:00
|
|
|
{
|
|
|
|
struct fiemap_ctx ctx;
|
|
|
|
loff_t ret;
|
|
|
|
|
|
|
|
memset(&ctx, 0, sizeof(ctx));
|
|
|
|
ctx.fi = fi;
|
|
|
|
ctx.prev.type = IOMAP_HOLE;
|
|
|
|
|
|
|
|
ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2016-08-17 05:41:10 +07:00
|
|
|
if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
|
|
|
|
ret = filemap_write_and_wait(inode->i_mapping);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2016-06-21 06:38:45 +07:00
|
|
|
|
|
|
|
while (len > 0) {
|
2016-10-20 11:51:28 +07:00
|
|
|
ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
|
2016-06-21 06:38:45 +07:00
|
|
|
iomap_fiemap_actor);
|
2016-08-17 05:41:34 +07:00
|
|
|
/* inode with no (attribute) mapping will give ENOENT */
|
|
|
|
if (ret == -ENOENT)
|
|
|
|
break;
|
2016-06-21 06:38:45 +07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
if (ret == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
start += ret;
|
|
|
|
len -= ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ctx.prev.type != IOMAP_HOLE) {
|
|
|
|
ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iomap_fiemap);
|
2016-11-30 10:36:01 +07:00
|
|
|
|
2017-06-30 01:43:21 +07:00
|
|
|
static loff_t
|
|
|
|
iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length,
|
|
|
|
void *data, struct iomap *iomap)
|
|
|
|
{
|
|
|
|
switch (iomap->type) {
|
|
|
|
case IOMAP_UNWRITTEN:
|
|
|
|
offset = page_cache_seek_hole_data(inode, offset, length,
|
|
|
|
SEEK_HOLE);
|
|
|
|
if (offset < 0)
|
|
|
|
return length;
|
|
|
|
/* fall through */
|
|
|
|
case IOMAP_HOLE:
|
|
|
|
*(loff_t *)data = offset;
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
return length;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
loff_t
|
|
|
|
iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
|
|
|
|
{
|
|
|
|
loff_t size = i_size_read(inode);
|
|
|
|
loff_t length = size - offset;
|
|
|
|
loff_t ret;
|
|
|
|
|
2017-07-13 00:26:47 +07:00
|
|
|
/* Nothing to be found before or beyond the end of the file. */
|
|
|
|
if (offset < 0 || offset >= size)
|
2017-06-30 01:43:21 +07:00
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
while (length > 0) {
|
|
|
|
ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
|
|
|
|
&offset, iomap_seek_hole_actor);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
if (ret == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
offset += ret;
|
|
|
|
length -= ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iomap_seek_hole);
|
|
|
|
|
|
|
|
static loff_t
|
|
|
|
iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length,
|
|
|
|
void *data, struct iomap *iomap)
|
|
|
|
{
|
|
|
|
switch (iomap->type) {
|
|
|
|
case IOMAP_HOLE:
|
|
|
|
return length;
|
|
|
|
case IOMAP_UNWRITTEN:
|
|
|
|
offset = page_cache_seek_hole_data(inode, offset, length,
|
|
|
|
SEEK_DATA);
|
|
|
|
if (offset < 0)
|
|
|
|
return length;
|
|
|
|
/*FALLTHRU*/
|
|
|
|
default:
|
|
|
|
*(loff_t *)data = offset;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
loff_t
|
|
|
|
iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
|
|
|
|
{
|
|
|
|
loff_t size = i_size_read(inode);
|
|
|
|
loff_t length = size - offset;
|
|
|
|
loff_t ret;
|
|
|
|
|
2017-07-13 00:26:47 +07:00
|
|
|
/* Nothing to be found before or beyond the end of the file. */
|
|
|
|
if (offset < 0 || offset >= size)
|
2017-06-30 01:43:21 +07:00
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
while (length > 0) {
|
|
|
|
ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
|
|
|
|
&offset, iomap_seek_data_actor);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
if (ret == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
offset += ret;
|
|
|
|
length -= ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (length <= 0)
|
|
|
|
return -ENXIO;
|
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iomap_seek_data);
|
|
|
|
|
2016-11-30 10:36:01 +07:00
|
|
|
/*
|
|
|
|
* Private flags for iomap_dio, must not overlap with the public ones in
|
|
|
|
* iomap.h:
|
|
|
|
*/
|
|
|
|
#define IOMAP_DIO_WRITE (1 << 30)
|
|
|
|
#define IOMAP_DIO_DIRTY (1 << 31)
|
|
|
|
|
|
|
|
struct iomap_dio {
|
|
|
|
struct kiocb *iocb;
|
|
|
|
iomap_dio_end_io_t *end_io;
|
|
|
|
loff_t i_size;
|
|
|
|
loff_t size;
|
|
|
|
atomic_t ref;
|
|
|
|
unsigned flags;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
union {
|
|
|
|
/* used during submission and for synchronous completion: */
|
|
|
|
struct {
|
|
|
|
struct iov_iter *iter;
|
|
|
|
struct task_struct *waiter;
|
|
|
|
struct request_queue *last_queue;
|
|
|
|
blk_qc_t cookie;
|
|
|
|
} submit;
|
|
|
|
|
|
|
|
/* used for aio completion: */
|
|
|
|
struct {
|
|
|
|
struct work_struct work;
|
|
|
|
} aio;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
static ssize_t iomap_dio_complete(struct iomap_dio *dio)
|
|
|
|
{
|
|
|
|
struct kiocb *iocb = dio->iocb;
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
|
|
if (dio->end_io) {
|
|
|
|
ret = dio->end_io(iocb,
|
|
|
|
dio->error ? dio->error : dio->size,
|
|
|
|
dio->flags);
|
|
|
|
} else {
|
|
|
|
ret = dio->error;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(!ret)) {
|
|
|
|
ret = dio->size;
|
|
|
|
/* check for short read */
|
|
|
|
if (iocb->ki_pos + ret > dio->i_size &&
|
|
|
|
!(dio->flags & IOMAP_DIO_WRITE))
|
|
|
|
ret = dio->i_size - iocb->ki_pos;
|
|
|
|
iocb->ki_pos += ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
inode_dio_end(file_inode(iocb->ki_filp));
|
|
|
|
kfree(dio);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iomap_dio_complete_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
|
|
|
|
struct kiocb *iocb = dio->iocb;
|
|
|
|
bool is_write = (dio->flags & IOMAP_DIO_WRITE);
|
|
|
|
ssize_t ret;
|
|
|
|
|
|
|
|
ret = iomap_dio_complete(dio);
|
|
|
|
if (is_write && ret > 0)
|
|
|
|
ret = generic_write_sync(iocb, ret);
|
|
|
|
iocb->ki_complete(iocb, ret, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set an error in the dio if none is set yet. We have to use cmpxchg
|
|
|
|
* as the submission context and the completion context(s) can race to
|
|
|
|
* update the error.
|
|
|
|
*/
|
|
|
|
static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
|
|
|
|
{
|
|
|
|
cmpxchg(&dio->error, 0, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void iomap_dio_bio_end_io(struct bio *bio)
|
|
|
|
{
|
|
|
|
struct iomap_dio *dio = bio->bi_private;
|
|
|
|
bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
|
|
|
|
|
2017-06-03 14:38:06 +07:00
|
|
|
if (bio->bi_status)
|
|
|
|
iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
|
2016-11-30 10:36:01 +07:00
|
|
|
|
|
|
|
if (atomic_dec_and_test(&dio->ref)) {
|
|
|
|
if (is_sync_kiocb(dio->iocb)) {
|
|
|
|
struct task_struct *waiter = dio->submit.waiter;
|
|
|
|
|
|
|
|
WRITE_ONCE(dio->submit.waiter, NULL);
|
|
|
|
wake_up_process(waiter);
|
|
|
|
} else if (dio->flags & IOMAP_DIO_WRITE) {
|
|
|
|
struct inode *inode = file_inode(dio->iocb->ki_filp);
|
|
|
|
|
|
|
|
INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
|
|
|
|
queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
|
|
|
|
} else {
|
|
|
|
iomap_dio_complete_work(&dio->aio.work);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (should_dirty) {
|
|
|
|
bio_check_pages_dirty(bio);
|
|
|
|
} else {
|
|
|
|
struct bio_vec *bvec;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
bio_for_each_segment_all(bvec, bio, i)
|
|
|
|
put_page(bvec->bv_page);
|
|
|
|
bio_put(bio);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static blk_qc_t
|
|
|
|
iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
|
|
|
|
unsigned len)
|
|
|
|
{
|
|
|
|
struct page *page = ZERO_PAGE(0);
|
|
|
|
struct bio *bio;
|
|
|
|
|
|
|
|
bio = bio_alloc(GFP_KERNEL, 1);
|
|
|
|
bio->bi_bdev = iomap->bdev;
|
|
|
|
bio->bi_iter.bi_sector =
|
|
|
|
iomap->blkno + ((pos - iomap->offset) >> 9);
|
|
|
|
bio->bi_private = dio;
|
|
|
|
bio->bi_end_io = iomap_dio_bio_end_io;
|
|
|
|
|
|
|
|
get_page(page);
|
|
|
|
if (bio_add_page(bio, page, len, 0) != len)
|
|
|
|
BUG();
|
2016-12-15 12:35:31 +07:00
|
|
|
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
|
2016-11-30 10:36:01 +07:00
|
|
|
|
|
|
|
atomic_inc(&dio->ref);
|
|
|
|
return submit_bio(bio);
|
|
|
|
}
|
|
|
|
|
|
|
|
static loff_t
|
|
|
|
iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
|
|
|
|
void *data, struct iomap *iomap)
|
|
|
|
{
|
|
|
|
struct iomap_dio *dio = data;
|
2017-02-28 05:28:32 +07:00
|
|
|
unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
|
|
|
|
unsigned int fs_block_size = i_blocksize(inode), pad;
|
|
|
|
unsigned int align = iov_iter_alignment(dio->submit.iter);
|
2016-11-30 10:36:01 +07:00
|
|
|
struct iov_iter iter;
|
|
|
|
struct bio *bio;
|
|
|
|
bool need_zeroout = false;
|
|
|
|
int nr_pages, ret;
|
|
|
|
|
|
|
|
if ((pos | length | align) & ((1 << blkbits) - 1))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
switch (iomap->type) {
|
|
|
|
case IOMAP_HOLE:
|
|
|
|
if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
|
|
|
|
return -EIO;
|
|
|
|
/*FALLTHRU*/
|
|
|
|
case IOMAP_UNWRITTEN:
|
|
|
|
if (!(dio->flags & IOMAP_DIO_WRITE)) {
|
|
|
|
iov_iter_zero(length, dio->submit.iter);
|
|
|
|
dio->size += length;
|
|
|
|
return length;
|
|
|
|
}
|
|
|
|
dio->flags |= IOMAP_DIO_UNWRITTEN;
|
|
|
|
need_zeroout = true;
|
|
|
|
break;
|
|
|
|
case IOMAP_MAPPED:
|
|
|
|
if (iomap->flags & IOMAP_F_SHARED)
|
|
|
|
dio->flags |= IOMAP_DIO_COW;
|
|
|
|
if (iomap->flags & IOMAP_F_NEW)
|
|
|
|
need_zeroout = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Operate on a partial iter trimmed to the extent we were called for.
|
|
|
|
* We'll update the iter in the dio once we're done with this extent.
|
|
|
|
*/
|
|
|
|
iter = *dio->submit.iter;
|
|
|
|
iov_iter_truncate(&iter, length);
|
|
|
|
|
|
|
|
nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
|
|
|
|
if (nr_pages <= 0)
|
|
|
|
return nr_pages;
|
|
|
|
|
|
|
|
if (need_zeroout) {
|
|
|
|
/* zero out from the start of the block to the write offset */
|
|
|
|
pad = pos & (fs_block_size - 1);
|
|
|
|
if (pad)
|
|
|
|
iomap_dio_zero(dio, iomap, pos - pad, pad);
|
|
|
|
}
|
|
|
|
|
|
|
|
do {
|
|
|
|
if (dio->error)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
bio = bio_alloc(GFP_KERNEL, nr_pages);
|
|
|
|
bio->bi_bdev = iomap->bdev;
|
|
|
|
bio->bi_iter.bi_sector =
|
|
|
|
iomap->blkno + ((pos - iomap->offset) >> 9);
|
2017-06-28 00:01:22 +07:00
|
|
|
bio->bi_write_hint = dio->iocb->ki_hint;
|
2016-11-30 10:36:01 +07:00
|
|
|
bio->bi_private = dio;
|
|
|
|
bio->bi_end_io = iomap_dio_bio_end_io;
|
|
|
|
|
|
|
|
ret = bio_iov_iter_get_pages(bio, &iter);
|
|
|
|
if (unlikely(ret)) {
|
|
|
|
bio_put(bio);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dio->flags & IOMAP_DIO_WRITE) {
|
2016-12-15 12:35:31 +07:00
|
|
|
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
|
2016-11-30 10:36:01 +07:00
|
|
|
task_io_account_write(bio->bi_iter.bi_size);
|
|
|
|
} else {
|
|
|
|
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
|
|
|
if (dio->flags & IOMAP_DIO_DIRTY)
|
|
|
|
bio_set_pages_dirty(bio);
|
|
|
|
}
|
|
|
|
|
|
|
|
dio->size += bio->bi_iter.bi_size;
|
|
|
|
pos += bio->bi_iter.bi_size;
|
|
|
|
|
|
|
|
nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
|
|
|
|
|
|
|
|
atomic_inc(&dio->ref);
|
|
|
|
|
|
|
|
dio->submit.last_queue = bdev_get_queue(iomap->bdev);
|
|
|
|
dio->submit.cookie = submit_bio(bio);
|
|
|
|
} while (nr_pages);
|
|
|
|
|
|
|
|
if (need_zeroout) {
|
|
|
|
/* zero out from the end of the write to the end of the block */
|
|
|
|
pad = pos & (fs_block_size - 1);
|
|
|
|
if (pad)
|
|
|
|
iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
|
|
|
|
}
|
|
|
|
|
|
|
|
iov_iter_advance(dio->submit.iter, length);
|
|
|
|
return length;
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t
|
2017-01-28 14:20:26 +07:00
|
|
|
iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|
|
|
const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
|
2016-11-30 10:36:01 +07:00
|
|
|
{
|
|
|
|
struct address_space *mapping = iocb->ki_filp->f_mapping;
|
|
|
|
struct inode *inode = file_inode(iocb->ki_filp);
|
|
|
|
size_t count = iov_iter_count(iter);
|
iomap: invalidate page caches should be after iomap_dio_complete() in direct write
After XFS switching to iomap based DIO (commit acdda3aae146 ("xfs:
use iomap_dio_rw")), I started to notice dio29/dio30 tests failures
from LTP run on ppc64 hosts, and they can be reproduced on x86_64
hosts with 512B/1k block size XFS too.
dio29 diotest3 -b 65536 -n 100 -i 1000 -o 1024000
dio30 diotest6 -b 65536 -n 100 -i 1000 -o 1024000
The failure message is like:
bufcmp: offset 0: Expected: 0x62, got 0x0
diotest03 1 TPASS : Read with Direct IO, Write without
diotest03 2 TFAIL : diotest3.c:142: comparsion failed; child=98 offset=1425408
diotest03 3 TFAIL : diotest3.c:194: Write Direct-child 98 failed
Direct write wrote 0x62 but buffer read got zero. This is because,
when doing direct write to a hole or preallocated file, we
invalidate the page caches before converting the extent from
unwritten state to normal state, which is done by
iomap_dio_complete(), thus leave a window for other buffer reader to
cache the unwritten state extent.
Consider this case, with sub-page blocksize XFS, two processes are
direct writing to different blocksize-aligned regions (say 512B) of
the same preallocated file, and reading the region back via buffered
I/O to compare contents.
process A, region [0,512] process B, region [512,1024]
xfs_file_write_iter
xfs_file_aio_dio_write
iomap_dio_rw
iomap_apply
invalidate_inode_pages2_range
xfs_file_write_iter
xfs_file_aio_dio_write
iomap_dio_rw
iomap_apply
invalidate_inode_pages2_range
iomap_dio_complete
xfs_file_read_iter
xfs_file_buffered_aio_read
generic_file_read_iter
do_generic_file_read
<readahead fills pagecache with 0>
iomap_dio_complete
xfs_file_read_iter
<read gets 0 from pagecache>
Process A first invalidates page caches, at this point the
underlying extent is still in unwritten state (iomap_dio_complete
not called yet), and process B finishs direct write and populates
page caches via readahead, which caches zeros in page for region A,
then process A reads zeros from page cache, instead of the actual
data.
Fix it by invalidating page caches after converting unwritten extent
to make sure we read content from disk after extent state changed,
as what we did before switching to iomap based dio.
Also introduce a new 'start' variable to save the original write
offset (iomap_dio_complete() updates iocb->ki_pos), and a 'err'
variable for invalidating caches result, cause we can't reuse 'ret'
anymore.
Signed-off-by: Eryu Guan <eguan@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2017-03-03 06:02:06 +07:00
|
|
|
loff_t pos = iocb->ki_pos, start = pos;
|
|
|
|
loff_t end = iocb->ki_pos + count - 1, ret = 0;
|
2016-11-30 10:36:01 +07:00
|
|
|
unsigned int flags = IOMAP_DIRECT;
|
|
|
|
struct blk_plug plug;
|
|
|
|
struct iomap_dio *dio;
|
|
|
|
|
|
|
|
lockdep_assert_held(&inode->i_rwsem);
|
|
|
|
|
|
|
|
if (!count)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
dio = kmalloc(sizeof(*dio), GFP_KERNEL);
|
|
|
|
if (!dio)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
dio->iocb = iocb;
|
|
|
|
atomic_set(&dio->ref, 1);
|
|
|
|
dio->size = 0;
|
|
|
|
dio->i_size = i_size_read(inode);
|
|
|
|
dio->end_io = end_io;
|
|
|
|
dio->error = 0;
|
|
|
|
dio->flags = 0;
|
|
|
|
|
|
|
|
dio->submit.iter = iter;
|
|
|
|
if (is_sync_kiocb(iocb)) {
|
|
|
|
dio->submit.waiter = current;
|
|
|
|
dio->submit.cookie = BLK_QC_T_NONE;
|
|
|
|
dio->submit.last_queue = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (iov_iter_rw(iter) == READ) {
|
|
|
|
if (pos >= dio->i_size)
|
|
|
|
goto out_free_dio;
|
|
|
|
|
|
|
|
if (iter->type == ITER_IOVEC)
|
|
|
|
dio->flags |= IOMAP_DIO_DIRTY;
|
|
|
|
} else {
|
|
|
|
dio->flags |= IOMAP_DIO_WRITE;
|
|
|
|
flags |= IOMAP_WRITE;
|
|
|
|
}
|
|
|
|
|
2017-06-20 19:05:45 +07:00
|
|
|
if (iocb->ki_flags & IOCB_NOWAIT) {
|
|
|
|
if (filemap_range_has_page(mapping, start, end)) {
|
|
|
|
ret = -EAGAIN;
|
|
|
|
goto out_free_dio;
|
|
|
|
}
|
|
|
|
flags |= IOMAP_NOWAIT;
|
|
|
|
}
|
|
|
|
|
fs: fix data invalidation in the cleancache during direct IO
Patch series "Properly invalidate data in the cleancache", v2.
We've noticed that after direct IO write, buffered read sometimes gets
stale data which is coming from the cleancache. The reason for this is
that some direct write hooks call call invalidate_inode_pages2[_range]()
conditionally iff mapping->nrpages is not zero, so we may not invalidate
data in the cleancache.
Another odd thing is that we check only for ->nrpages and don't check
for ->nrexceptional, but invalidate_inode_pages2[_range] also
invalidates exceptional entries as well. So we invalidate exceptional
entries only if ->nrpages != 0? This doesn't feel right.
- Patch 1 fixes direct IO writes by removing ->nrpages check.
- Patch 2 fixes similar case in invalidate_bdev().
Note: I only fixed conditional cleancache_invalidate_inode() here.
Do we also need to add ->nrexceptional check in into invalidate_bdev()?
- Patches 3-4: some optimizations.
This patch (of 4):
Some direct IO write fs hooks call invalidate_inode_pages2[_range]()
conditionally iff mapping->nrpages is not zero. This can't be right,
because invalidate_inode_pages2[_range]() also invalidate data in the
cleancache via cleancache_invalidate_inode() call. So if page cache is
empty but there is some data in the cleancache, buffered read after
direct IO write would get stale data from the cleancache.
Also it doesn't feel right to check only for ->nrpages because
invalidate_inode_pages2[_range] invalidates exceptional entries as well.
Fix this by calling invalidate_inode_pages2[_range]() regardless of
nrpages state.
Note: nfs,cifs,9p doesn't need similar fix because the never call
cleancache_get_page() (nor directly, nor via mpage_readpage[s]()), so
they are not affected by this bug.
Fixes: c515e1fd361c ("mm/fs: add hooks to support cleancache")
Link: http://lkml.kernel.org/r/20170424164135.22350-2-aryabinin@virtuozzo.com
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Alexey Kuznetsov <kuznet@virtuozzo.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Nikolay Borisov <n.borisov.lkml@gmail.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-05-04 04:55:59 +07:00
|
|
|
ret = filemap_write_and_wait_range(mapping, start, end);
|
|
|
|
if (ret)
|
|
|
|
goto out_free_dio;
|
2016-11-30 10:36:01 +07:00
|
|
|
|
fs: fix data invalidation in the cleancache during direct IO
Patch series "Properly invalidate data in the cleancache", v2.
We've noticed that after direct IO write, buffered read sometimes gets
stale data which is coming from the cleancache. The reason for this is
that some direct write hooks call call invalidate_inode_pages2[_range]()
conditionally iff mapping->nrpages is not zero, so we may not invalidate
data in the cleancache.
Another odd thing is that we check only for ->nrpages and don't check
for ->nrexceptional, but invalidate_inode_pages2[_range] also
invalidates exceptional entries as well. So we invalidate exceptional
entries only if ->nrpages != 0? This doesn't feel right.
- Patch 1 fixes direct IO writes by removing ->nrpages check.
- Patch 2 fixes similar case in invalidate_bdev().
Note: I only fixed conditional cleancache_invalidate_inode() here.
Do we also need to add ->nrexceptional check in into invalidate_bdev()?
- Patches 3-4: some optimizations.
This patch (of 4):
Some direct IO write fs hooks call invalidate_inode_pages2[_range]()
conditionally iff mapping->nrpages is not zero. This can't be right,
because invalidate_inode_pages2[_range]() also invalidate data in the
cleancache via cleancache_invalidate_inode() call. So if page cache is
empty but there is some data in the cleancache, buffered read after
direct IO write would get stale data from the cleancache.
Also it doesn't feel right to check only for ->nrpages because
invalidate_inode_pages2[_range] invalidates exceptional entries as well.
Fix this by calling invalidate_inode_pages2[_range]() regardless of
nrpages state.
Note: nfs,cifs,9p doesn't need similar fix because the never call
cleancache_get_page() (nor directly, nor via mpage_readpage[s]()), so
they are not affected by this bug.
Fixes: c515e1fd361c ("mm/fs: add hooks to support cleancache")
Link: http://lkml.kernel.org/r/20170424164135.22350-2-aryabinin@virtuozzo.com
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Alexey Kuznetsov <kuznet@virtuozzo.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Nikolay Borisov <n.borisov.lkml@gmail.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-05-04 04:55:59 +07:00
|
|
|
ret = invalidate_inode_pages2_range(mapping,
|
|
|
|
start >> PAGE_SHIFT, end >> PAGE_SHIFT);
|
|
|
|
WARN_ON_ONCE(ret);
|
|
|
|
ret = 0;
|
2016-11-30 10:36:01 +07:00
|
|
|
|
|
|
|
inode_dio_begin(inode);
|
|
|
|
|
|
|
|
blk_start_plug(&plug);
|
|
|
|
do {
|
|
|
|
ret = iomap_apply(inode, pos, count, flags, ops, dio,
|
|
|
|
iomap_dio_actor);
|
|
|
|
if (ret <= 0) {
|
|
|
|
/* magic error code to fall back to buffered I/O */
|
|
|
|
if (ret == -ENOTBLK)
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
pos += ret;
|
iomap_dio_rw: Prevent reading file data beyond iomap_dio->i_size
On a ppc64 machine executing overlayfs/019 with xfs as the lower and
upper filesystem causes the following call trace,
WARNING: CPU: 2 PID: 8034 at /root/repos/linux/fs/iomap.c:765 .iomap_dio_actor+0xcc/0x420
Modules linked in:
CPU: 2 PID: 8034 Comm: fsstress Tainted: G L 4.11.0-rc5-next-20170405 #100
task: c000000631314880 task.stack: c0000003915d4000
NIP: c00000000035a72c LR: c00000000035a6f4 CTR: c00000000035a660
REGS: c0000003915d7570 TRAP: 0700 Tainted: G L (4.11.0-rc5-next-20170405)
MSR: 800000000282b032 <SF,VEC,VSX,EE,FP,ME,IR,DR,RI>
CR: 24004284 XER: 00000000
CFAR: c0000000006f7190 SOFTE: 1
GPR00: c00000000035a6f4 c0000003915d77f0 c0000000015a3f00 000000007c22f600
GPR04: 000000000022d000 0000000000002600 c0000003b2d56360 c0000003915d7960
GPR08: c0000003915d7cd0 0000000000000002 0000000000002600 c000000000521cc0
GPR12: 0000000024004284 c00000000fd80a00 000000004b04ae64 ffffffffffffffff
GPR16: 000000001000ca70 0000000000000000 c0000003b2d56380 c00000000153d2b8
GPR20: 0000000000000010 c0000003bc87bac8 0000000000223000 000000000022f5ff
GPR24: c0000003b2d56360 000000000000000c 0000000000002600 000000000022d000
GPR28: 0000000000000000 c0000003915d7960 c0000003b2d56360 00000000000001ff
NIP [c00000000035a72c] .iomap_dio_actor+0xcc/0x420
LR [c00000000035a6f4] .iomap_dio_actor+0x94/0x420
Call Trace:
[c0000003915d77f0] [c00000000035a6f4] .iomap_dio_actor+0x94/0x420 (unreliable)
[c0000003915d78f0] [c00000000035b9f4] .iomap_apply+0xf4/0x1f0
[c0000003915d79d0] [c00000000035c320] .iomap_dio_rw+0x230/0x420
[c0000003915d7ae0] [c000000000512a14] .xfs_file_dio_aio_read+0x84/0x160
[c0000003915d7b80] [c000000000512d24] .xfs_file_read_iter+0x104/0x130
[c0000003915d7c10] [c0000000002d6234] .__vfs_read+0x114/0x1a0
[c0000003915d7cf0] [c0000000002d7a8c] .vfs_read+0xac/0x1a0
[c0000003915d7d90] [c0000000002d96b8] .SyS_read+0x58/0x100
[c0000003915d7e30] [c00000000000b8e0] system_call+0x38/0xfc
Instruction dump:
78630020 7f831b78 7ffc07b4 7c7ce039 40820360 a13d0018 2f890003 419e0288
2f890004 419e00a0 2f890001 419e02a8 <0fe00000> 3b80fffb 38210100 7f83e378
The above problem can also be recreated on a regular xfs filesystem
using the command,
$ fsstress -d /mnt -l 1000 -n 1000 -p 1000
The reason for the call trace is,
1. When 'reserving' blocks for delayed allocation , XFS reserves more
blocks (i.e. past file's current EOF) than required. This is done
because XFS assumes that userspace might write more data and hence
'reserving' more blocks might lead to the file's new data being
stored contiguously on disk.
2. The in-memory 'struct xfs_bmbt_irec' mapping the file's last extent would
then cover the prealloc-ed EOF blocks in addition to the regular blocks.
3. When flushing the dirty blocks to disk, we only flush data till the
file's EOF. But before writing out the dirty data, we allocate blocks
on the disk for holding the file's new data. This allocation includes
the blocks that are part of the 'prealloc EOF blocks'.
4. Later, when the last reference to the inode is being closed, XFS frees the
unused 'prealloc EOF blocks' in xfs_inactive().
In step 3 above, When allocating space on disk for the delayed allocation
range, the space allocator might sometimes allocate less blocks than
required. If such an allocation ends right at the current EOF of the
file, We will not be able to clear the "delayed allocation" flag for the
'prealloc EOF blocks', since we won't have dirty buffer heads associated
with that range of the file.
In such a situation if a Direct I/O read operation is performed on file
range [X, Y] (where X < EOF and Y > EOF), we flush dirty data in the
range [X, Y] and invalidate page cache for that range (Refer to
iomap_dio_rw()). Later for performing the Direct I/O read, XFS obtains
the extent items (which are still cached in memory) for the file
range. When doing so we are not supposed to get an extent item with
IOMAP_DELALLOC flag set, since the previous "flush" operation should
have converted any delayed allocation data in the range [X, Y]. Hence we
end up hitting a WARN_ON_ONCE(1) statement in iomap_dio_actor().
This commit fixes the bug by preventing the read operation from going
beyond iomap_dio->i_size.
Reported-by: Santhosh G <santhog4@linux.vnet.ibm.com>
Signed-off-by: Chandan Rajendra <chandan@linux.vnet.ibm.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2017-04-13 01:03:20 +07:00
|
|
|
|
|
|
|
if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
|
|
|
|
break;
|
2016-11-30 10:36:01 +07:00
|
|
|
} while ((count = iov_iter_count(iter)) > 0);
|
|
|
|
blk_finish_plug(&plug);
|
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
iomap_dio_set_error(dio, ret);
|
|
|
|
|
|
|
|
if (ret >= 0 && iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
|
|
|
|
!inode->i_sb->s_dio_done_wq) {
|
|
|
|
ret = sb_init_dio_done_wq(inode->i_sb);
|
|
|
|
if (ret < 0)
|
|
|
|
iomap_dio_set_error(dio, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!atomic_dec_and_test(&dio->ref)) {
|
|
|
|
if (!is_sync_kiocb(iocb))
|
|
|
|
return -EIOCBQUEUED;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
|
if (!READ_ONCE(dio->submit.waiter))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!(iocb->ki_flags & IOCB_HIPRI) ||
|
|
|
|
!dio->submit.last_queue ||
|
2016-12-15 12:35:31 +07:00
|
|
|
!blk_mq_poll(dio->submit.last_queue,
|
|
|
|
dio->submit.cookie))
|
2016-11-30 10:36:01 +07:00
|
|
|
io_schedule();
|
|
|
|
}
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
}
|
|
|
|
|
iomap: invalidate page caches should be after iomap_dio_complete() in direct write
After XFS switching to iomap based DIO (commit acdda3aae146 ("xfs:
use iomap_dio_rw")), I started to notice dio29/dio30 tests failures
from LTP run on ppc64 hosts, and they can be reproduced on x86_64
hosts with 512B/1k block size XFS too.
dio29 diotest3 -b 65536 -n 100 -i 1000 -o 1024000
dio30 diotest6 -b 65536 -n 100 -i 1000 -o 1024000
The failure message is like:
bufcmp: offset 0: Expected: 0x62, got 0x0
diotest03 1 TPASS : Read with Direct IO, Write without
diotest03 2 TFAIL : diotest3.c:142: comparsion failed; child=98 offset=1425408
diotest03 3 TFAIL : diotest3.c:194: Write Direct-child 98 failed
Direct write wrote 0x62 but buffer read got zero. This is because,
when doing direct write to a hole or preallocated file, we
invalidate the page caches before converting the extent from
unwritten state to normal state, which is done by
iomap_dio_complete(), thus leave a window for other buffer reader to
cache the unwritten state extent.
Consider this case, with sub-page blocksize XFS, two processes are
direct writing to different blocksize-aligned regions (say 512B) of
the same preallocated file, and reading the region back via buffered
I/O to compare contents.
process A, region [0,512] process B, region [512,1024]
xfs_file_write_iter
xfs_file_aio_dio_write
iomap_dio_rw
iomap_apply
invalidate_inode_pages2_range
xfs_file_write_iter
xfs_file_aio_dio_write
iomap_dio_rw
iomap_apply
invalidate_inode_pages2_range
iomap_dio_complete
xfs_file_read_iter
xfs_file_buffered_aio_read
generic_file_read_iter
do_generic_file_read
<readahead fills pagecache with 0>
iomap_dio_complete
xfs_file_read_iter
<read gets 0 from pagecache>
Process A first invalidates page caches, at this point the
underlying extent is still in unwritten state (iomap_dio_complete
not called yet), and process B finishs direct write and populates
page caches via readahead, which caches zeros in page for region A,
then process A reads zeros from page cache, instead of the actual
data.
Fix it by invalidating page caches after converting unwritten extent
to make sure we read content from disk after extent state changed,
as what we did before switching to iomap based dio.
Also introduce a new 'start' variable to save the original write
offset (iomap_dio_complete() updates iocb->ki_pos), and a 'err'
variable for invalidating caches result, cause we can't reuse 'ret'
anymore.
Signed-off-by: Eryu Guan <eguan@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2017-03-03 06:02:06 +07:00
|
|
|
ret = iomap_dio_complete(dio);
|
|
|
|
|
2016-11-30 10:36:01 +07:00
|
|
|
/*
|
|
|
|
* Try again to invalidate clean pages which might have been cached by
|
|
|
|
* non-direct readahead, or faulted in by get_user_pages() if the source
|
|
|
|
* of the write was an mmap'ed region of the file we're writing. Either
|
|
|
|
* one is a pretty crazy thing to do, so we don't support it 100%. If
|
|
|
|
* this invalidation fails, tough, the write still worked...
|
|
|
|
*/
|
fs: fix data invalidation in the cleancache during direct IO
Patch series "Properly invalidate data in the cleancache", v2.
We've noticed that after direct IO write, buffered read sometimes gets
stale data which is coming from the cleancache. The reason for this is
that some direct write hooks call call invalidate_inode_pages2[_range]()
conditionally iff mapping->nrpages is not zero, so we may not invalidate
data in the cleancache.
Another odd thing is that we check only for ->nrpages and don't check
for ->nrexceptional, but invalidate_inode_pages2[_range] also
invalidates exceptional entries as well. So we invalidate exceptional
entries only if ->nrpages != 0? This doesn't feel right.
- Patch 1 fixes direct IO writes by removing ->nrpages check.
- Patch 2 fixes similar case in invalidate_bdev().
Note: I only fixed conditional cleancache_invalidate_inode() here.
Do we also need to add ->nrexceptional check in into invalidate_bdev()?
- Patches 3-4: some optimizations.
This patch (of 4):
Some direct IO write fs hooks call invalidate_inode_pages2[_range]()
conditionally iff mapping->nrpages is not zero. This can't be right,
because invalidate_inode_pages2[_range]() also invalidate data in the
cleancache via cleancache_invalidate_inode() call. So if page cache is
empty but there is some data in the cleancache, buffered read after
direct IO write would get stale data from the cleancache.
Also it doesn't feel right to check only for ->nrpages because
invalidate_inode_pages2[_range] invalidates exceptional entries as well.
Fix this by calling invalidate_inode_pages2[_range]() regardless of
nrpages state.
Note: nfs,cifs,9p doesn't need similar fix because the never call
cleancache_get_page() (nor directly, nor via mpage_readpage[s]()), so
they are not affected by this bug.
Fixes: c515e1fd361c ("mm/fs: add hooks to support cleancache")
Link: http://lkml.kernel.org/r/20170424164135.22350-2-aryabinin@virtuozzo.com
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Alexey Kuznetsov <kuznet@virtuozzo.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Nikolay Borisov <n.borisov.lkml@gmail.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-05-04 04:55:59 +07:00
|
|
|
if (iov_iter_rw(iter) == WRITE) {
|
iomap: invalidate page caches should be after iomap_dio_complete() in direct write
After XFS switching to iomap based DIO (commit acdda3aae146 ("xfs:
use iomap_dio_rw")), I started to notice dio29/dio30 tests failures
from LTP run on ppc64 hosts, and they can be reproduced on x86_64
hosts with 512B/1k block size XFS too.
dio29 diotest3 -b 65536 -n 100 -i 1000 -o 1024000
dio30 diotest6 -b 65536 -n 100 -i 1000 -o 1024000
The failure message is like:
bufcmp: offset 0: Expected: 0x62, got 0x0
diotest03 1 TPASS : Read with Direct IO, Write without
diotest03 2 TFAIL : diotest3.c:142: comparsion failed; child=98 offset=1425408
diotest03 3 TFAIL : diotest3.c:194: Write Direct-child 98 failed
Direct write wrote 0x62 but buffer read got zero. This is because,
when doing direct write to a hole or preallocated file, we
invalidate the page caches before converting the extent from
unwritten state to normal state, which is done by
iomap_dio_complete(), thus leave a window for other buffer reader to
cache the unwritten state extent.
Consider this case, with sub-page blocksize XFS, two processes are
direct writing to different blocksize-aligned regions (say 512B) of
the same preallocated file, and reading the region back via buffered
I/O to compare contents.
process A, region [0,512] process B, region [512,1024]
xfs_file_write_iter
xfs_file_aio_dio_write
iomap_dio_rw
iomap_apply
invalidate_inode_pages2_range
xfs_file_write_iter
xfs_file_aio_dio_write
iomap_dio_rw
iomap_apply
invalidate_inode_pages2_range
iomap_dio_complete
xfs_file_read_iter
xfs_file_buffered_aio_read
generic_file_read_iter
do_generic_file_read
<readahead fills pagecache with 0>
iomap_dio_complete
xfs_file_read_iter
<read gets 0 from pagecache>
Process A first invalidates page caches, at this point the
underlying extent is still in unwritten state (iomap_dio_complete
not called yet), and process B finishs direct write and populates
page caches via readahead, which caches zeros in page for region A,
then process A reads zeros from page cache, instead of the actual
data.
Fix it by invalidating page caches after converting unwritten extent
to make sure we read content from disk after extent state changed,
as what we did before switching to iomap based dio.
Also introduce a new 'start' variable to save the original write
offset (iomap_dio_complete() updates iocb->ki_pos), and a 'err'
variable for invalidating caches result, cause we can't reuse 'ret'
anymore.
Signed-off-by: Eryu Guan <eguan@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2017-03-03 06:02:06 +07:00
|
|
|
int err = invalidate_inode_pages2_range(mapping,
|
|
|
|
start >> PAGE_SHIFT, end >> PAGE_SHIFT);
|
|
|
|
WARN_ON_ONCE(err);
|
2016-11-30 10:36:01 +07:00
|
|
|
}
|
|
|
|
|
iomap: invalidate page caches should be after iomap_dio_complete() in direct write
After XFS switching to iomap based DIO (commit acdda3aae146 ("xfs:
use iomap_dio_rw")), I started to notice dio29/dio30 tests failures
from LTP run on ppc64 hosts, and they can be reproduced on x86_64
hosts with 512B/1k block size XFS too.
dio29 diotest3 -b 65536 -n 100 -i 1000 -o 1024000
dio30 diotest6 -b 65536 -n 100 -i 1000 -o 1024000
The failure message is like:
bufcmp: offset 0: Expected: 0x62, got 0x0
diotest03 1 TPASS : Read with Direct IO, Write without
diotest03 2 TFAIL : diotest3.c:142: comparsion failed; child=98 offset=1425408
diotest03 3 TFAIL : diotest3.c:194: Write Direct-child 98 failed
Direct write wrote 0x62 but buffer read got zero. This is because,
when doing direct write to a hole or preallocated file, we
invalidate the page caches before converting the extent from
unwritten state to normal state, which is done by
iomap_dio_complete(), thus leave a window for other buffer reader to
cache the unwritten state extent.
Consider this case, with sub-page blocksize XFS, two processes are
direct writing to different blocksize-aligned regions (say 512B) of
the same preallocated file, and reading the region back via buffered
I/O to compare contents.
process A, region [0,512] process B, region [512,1024]
xfs_file_write_iter
xfs_file_aio_dio_write
iomap_dio_rw
iomap_apply
invalidate_inode_pages2_range
xfs_file_write_iter
xfs_file_aio_dio_write
iomap_dio_rw
iomap_apply
invalidate_inode_pages2_range
iomap_dio_complete
xfs_file_read_iter
xfs_file_buffered_aio_read
generic_file_read_iter
do_generic_file_read
<readahead fills pagecache with 0>
iomap_dio_complete
xfs_file_read_iter
<read gets 0 from pagecache>
Process A first invalidates page caches, at this point the
underlying extent is still in unwritten state (iomap_dio_complete
not called yet), and process B finishs direct write and populates
page caches via readahead, which caches zeros in page for region A,
then process A reads zeros from page cache, instead of the actual
data.
Fix it by invalidating page caches after converting unwritten extent
to make sure we read content from disk after extent state changed,
as what we did before switching to iomap based dio.
Also introduce a new 'start' variable to save the original write
offset (iomap_dio_complete() updates iocb->ki_pos), and a 'err'
variable for invalidating caches result, cause we can't reuse 'ret'
anymore.
Signed-off-by: Eryu Guan <eguan@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
2017-03-03 06:02:06 +07:00
|
|
|
return ret;
|
2016-11-30 10:36:01 +07:00
|
|
|
|
|
|
|
out_free_dio:
|
|
|
|
kfree(dio);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(iomap_dio_rw);
|